Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * NET3 Protocol independent device support routines.
4 *
5 * Derived from the non IP parts of dev.c 1.0.19
6 * Authors: Ross Biro
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
9 *
10 * Additional Authors:
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
17 *
18 * Changes:
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
30 * drivers
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
40 * call a packet.
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
46 * changes.
47 * Rudi Cilibrasi : Pass the right thing to
48 * set_mac_address()
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
54 * 1 device.
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
62 * the backlog queue.
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
69 */
70
71#include <linux/uaccess.h>
72#include <linux/bitmap.h>
73#include <linux/capability.h>
74#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
77#include <linux/hash.h>
78#include <linux/slab.h>
79#include <linux/sched.h>
80#include <linux/sched/isolation.h>
81#include <linux/sched/mm.h>
82#include <linux/smpboot.h>
83#include <linux/mutex.h>
84#include <linux/rwsem.h>
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
94#include <linux/ethtool.h>
95#include <linux/skbuff.h>
96#include <linux/kthread.h>
97#include <linux/bpf.h>
98#include <linux/bpf_trace.h>
99#include <net/net_namespace.h>
100#include <net/sock.h>
101#include <net/busy_poll.h>
102#include <linux/rtnetlink.h>
103#include <linux/stat.h>
104#include <net/dsa.h>
105#include <net/dst.h>
106#include <net/dst_metadata.h>
107#include <net/gro.h>
108#include <net/pkt_sched.h>
109#include <net/pkt_cls.h>
110#include <net/checksum.h>
111#include <net/xfrm.h>
112#include <net/tcx.h>
113#include <linux/highmem.h>
114#include <linux/init.h>
115#include <linux/module.h>
116#include <linux/netpoll.h>
117#include <linux/rcupdate.h>
118#include <linux/delay.h>
119#include <net/iw_handler.h>
120#include <asm/current.h>
121#include <linux/audit.h>
122#include <linux/dmaengine.h>
123#include <linux/err.h>
124#include <linux/ctype.h>
125#include <linux/if_arp.h>
126#include <linux/if_vlan.h>
127#include <linux/ip.h>
128#include <net/ip.h>
129#include <net/mpls.h>
130#include <linux/ipv6.h>
131#include <linux/in.h>
132#include <linux/jhash.h>
133#include <linux/random.h>
134#include <trace/events/napi.h>
135#include <trace/events/net.h>
136#include <trace/events/skb.h>
137#include <trace/events/qdisc.h>
138#include <trace/events/xdp.h>
139#include <linux/inetdevice.h>
140#include <linux/cpu_rmap.h>
141#include <linux/static_key.h>
142#include <linux/hashtable.h>
143#include <linux/vmalloc.h>
144#include <linux/if_macvlan.h>
145#include <linux/errqueue.h>
146#include <linux/hrtimer.h>
147#include <linux/netfilter_netdev.h>
148#include <linux/crash_dump.h>
149#include <linux/sctp.h>
150#include <net/udp_tunnel.h>
151#include <linux/net_namespace.h>
152#include <linux/indirect_call_wrapper.h>
153#include <net/devlink.h>
154#include <linux/pm_runtime.h>
155#include <linux/prandom.h>
156#include <linux/once_lite.h>
157#include <net/netdev_rx_queue.h>
158#include <net/page_pool/types.h>
159#include <net/page_pool/helpers.h>
160#include <net/rps.h>
161
162#include "dev.h"
163#include "net-sysfs.h"
164
165static DEFINE_SPINLOCK(ptype_lock);
166struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
167
168static int netif_rx_internal(struct sk_buff *skb);
169static int call_netdevice_notifiers_extack(unsigned long val,
170 struct net_device *dev,
171 struct netlink_ext_ack *extack);
172
173static DEFINE_MUTEX(ifalias_mutex);
174
175/* protects napi_hash addition/deletion and napi_gen_id */
176static DEFINE_SPINLOCK(napi_hash_lock);
177
178static unsigned int napi_gen_id = NR_CPUS;
179static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
180
181static DECLARE_RWSEM(devnet_rename_sem);
182
183static inline void dev_base_seq_inc(struct net *net)
184{
185 unsigned int val = net->dev_base_seq + 1;
186
187 WRITE_ONCE(net->dev_base_seq, val ?: 1);
188}
189
190static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
191{
192 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
193
194 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
195}
196
197static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
198{
199 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
200}
201
202#ifndef CONFIG_PREEMPT_RT
203
204static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
205
206static int __init setup_backlog_napi_threads(char *arg)
207{
208 static_branch_enable(&use_backlog_threads_key);
209 return 0;
210}
211early_param("thread_backlog_napi", setup_backlog_napi_threads);
212
213static bool use_backlog_threads(void)
214{
215 return static_branch_unlikely(&use_backlog_threads_key);
216}
217
218#else
219
220static bool use_backlog_threads(void)
221{
222 return true;
223}
224
225#endif
226
227static inline void backlog_lock_irq_save(struct softnet_data *sd,
228 unsigned long *flags)
229{
230 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
231 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
232 else
233 local_irq_save(*flags);
234}
235
236static inline void backlog_lock_irq_disable(struct softnet_data *sd)
237{
238 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
239 spin_lock_irq(&sd->input_pkt_queue.lock);
240 else
241 local_irq_disable();
242}
243
244static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
245 unsigned long *flags)
246{
247 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
248 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
249 else
250 local_irq_restore(*flags);
251}
252
253static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
254{
255 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
256 spin_unlock_irq(&sd->input_pkt_queue.lock);
257 else
258 local_irq_enable();
259}
260
261static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
262 const char *name)
263{
264 struct netdev_name_node *name_node;
265
266 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
267 if (!name_node)
268 return NULL;
269 INIT_HLIST_NODE(&name_node->hlist);
270 name_node->dev = dev;
271 name_node->name = name;
272 return name_node;
273}
274
275static struct netdev_name_node *
276netdev_name_node_head_alloc(struct net_device *dev)
277{
278 struct netdev_name_node *name_node;
279
280 name_node = netdev_name_node_alloc(dev, dev->name);
281 if (!name_node)
282 return NULL;
283 INIT_LIST_HEAD(&name_node->list);
284 return name_node;
285}
286
287static void netdev_name_node_free(struct netdev_name_node *name_node)
288{
289 kfree(name_node);
290}
291
292static void netdev_name_node_add(struct net *net,
293 struct netdev_name_node *name_node)
294{
295 hlist_add_head_rcu(&name_node->hlist,
296 dev_name_hash(net, name_node->name));
297}
298
299static void netdev_name_node_del(struct netdev_name_node *name_node)
300{
301 hlist_del_rcu(&name_node->hlist);
302}
303
304static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
305 const char *name)
306{
307 struct hlist_head *head = dev_name_hash(net, name);
308 struct netdev_name_node *name_node;
309
310 hlist_for_each_entry(name_node, head, hlist)
311 if (!strcmp(name_node->name, name))
312 return name_node;
313 return NULL;
314}
315
316static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
317 const char *name)
318{
319 struct hlist_head *head = dev_name_hash(net, name);
320 struct netdev_name_node *name_node;
321
322 hlist_for_each_entry_rcu(name_node, head, hlist)
323 if (!strcmp(name_node->name, name))
324 return name_node;
325 return NULL;
326}
327
328bool netdev_name_in_use(struct net *net, const char *name)
329{
330 return netdev_name_node_lookup(net, name);
331}
332EXPORT_SYMBOL(netdev_name_in_use);
333
334int netdev_name_node_alt_create(struct net_device *dev, const char *name)
335{
336 struct netdev_name_node *name_node;
337 struct net *net = dev_net(dev);
338
339 name_node = netdev_name_node_lookup(net, name);
340 if (name_node)
341 return -EEXIST;
342 name_node = netdev_name_node_alloc(dev, name);
343 if (!name_node)
344 return -ENOMEM;
345 netdev_name_node_add(net, name_node);
346 /* The node that holds dev->name acts as a head of per-device list. */
347 list_add_tail_rcu(&name_node->list, &dev->name_node->list);
348
349 return 0;
350}
351
352static void netdev_name_node_alt_free(struct rcu_head *head)
353{
354 struct netdev_name_node *name_node =
355 container_of(head, struct netdev_name_node, rcu);
356
357 kfree(name_node->name);
358 netdev_name_node_free(name_node);
359}
360
361static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
362{
363 netdev_name_node_del(name_node);
364 list_del(&name_node->list);
365 call_rcu(&name_node->rcu, netdev_name_node_alt_free);
366}
367
368int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
369{
370 struct netdev_name_node *name_node;
371 struct net *net = dev_net(dev);
372
373 name_node = netdev_name_node_lookup(net, name);
374 if (!name_node)
375 return -ENOENT;
376 /* lookup might have found our primary name or a name belonging
377 * to another device.
378 */
379 if (name_node == dev->name_node || name_node->dev != dev)
380 return -EINVAL;
381
382 __netdev_name_node_alt_destroy(name_node);
383 return 0;
384}
385
386static void netdev_name_node_alt_flush(struct net_device *dev)
387{
388 struct netdev_name_node *name_node, *tmp;
389
390 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
391 list_del(&name_node->list);
392 netdev_name_node_alt_free(&name_node->rcu);
393 }
394}
395
396/* Device list insertion */
397static void list_netdevice(struct net_device *dev)
398{
399 struct netdev_name_node *name_node;
400 struct net *net = dev_net(dev);
401
402 ASSERT_RTNL();
403
404 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
405 netdev_name_node_add(net, dev->name_node);
406 hlist_add_head_rcu(&dev->index_hlist,
407 dev_index_hash(net, dev->ifindex));
408
409 netdev_for_each_altname(dev, name_node)
410 netdev_name_node_add(net, name_node);
411
412 /* We reserved the ifindex, this can't fail */
413 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
414
415 dev_base_seq_inc(net);
416}
417
418/* Device list removal
419 * caller must respect a RCU grace period before freeing/reusing dev
420 */
421static void unlist_netdevice(struct net_device *dev)
422{
423 struct netdev_name_node *name_node;
424 struct net *net = dev_net(dev);
425
426 ASSERT_RTNL();
427
428 xa_erase(&net->dev_by_index, dev->ifindex);
429
430 netdev_for_each_altname(dev, name_node)
431 netdev_name_node_del(name_node);
432
433 /* Unlink dev from the device chain */
434 list_del_rcu(&dev->dev_list);
435 netdev_name_node_del(dev->name_node);
436 hlist_del_rcu(&dev->index_hlist);
437
438 dev_base_seq_inc(dev_net(dev));
439}
440
441/*
442 * Our notifier list
443 */
444
445static RAW_NOTIFIER_HEAD(netdev_chain);
446
447/*
448 * Device drivers call our routines to queue packets here. We empty the
449 * queue in the local softnet handler.
450 */
451
452DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
453 .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
454};
455EXPORT_PER_CPU_SYMBOL(softnet_data);
456
457/* Page_pool has a lockless array/stack to alloc/recycle pages.
458 * PP consumers must pay attention to run APIs in the appropriate context
459 * (e.g. NAPI context).
460 */
461static DEFINE_PER_CPU(struct page_pool *, system_page_pool);
462
463#ifdef CONFIG_LOCKDEP
464/*
465 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
466 * according to dev->type
467 */
468static const unsigned short netdev_lock_type[] = {
469 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
470 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
471 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
472 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
473 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
474 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
475 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
476 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
477 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
478 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
479 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
480 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
481 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
482 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
483 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
484
485static const char *const netdev_lock_name[] = {
486 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
487 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
488 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
489 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
490 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
491 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
492 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
493 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
494 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
495 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
496 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
497 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
498 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
499 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
500 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
501
502static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
503static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
504
505static inline unsigned short netdev_lock_pos(unsigned short dev_type)
506{
507 int i;
508
509 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
510 if (netdev_lock_type[i] == dev_type)
511 return i;
512 /* the last key is used by default */
513 return ARRAY_SIZE(netdev_lock_type) - 1;
514}
515
516static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
517 unsigned short dev_type)
518{
519 int i;
520
521 i = netdev_lock_pos(dev_type);
522 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
523 netdev_lock_name[i]);
524}
525
526static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
527{
528 int i;
529
530 i = netdev_lock_pos(dev->type);
531 lockdep_set_class_and_name(&dev->addr_list_lock,
532 &netdev_addr_lock_key[i],
533 netdev_lock_name[i]);
534}
535#else
536static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
537 unsigned short dev_type)
538{
539}
540
541static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
542{
543}
544#endif
545
546/*******************************************************************************
547 *
548 * Protocol management and registration routines
549 *
550 *******************************************************************************/
551
552
553/*
554 * Add a protocol ID to the list. Now that the input handler is
555 * smarter we can dispense with all the messy stuff that used to be
556 * here.
557 *
558 * BEWARE!!! Protocol handlers, mangling input packets,
559 * MUST BE last in hash buckets and checking protocol handlers
560 * MUST start from promiscuous ptype_all chain in net_bh.
561 * It is true now, do not change it.
562 * Explanation follows: if protocol handler, mangling packet, will
563 * be the first on list, it is not able to sense, that packet
564 * is cloned and should be copied-on-write, so that it will
565 * change it and subsequent readers will get broken packet.
566 * --ANK (980803)
567 */
568
569static inline struct list_head *ptype_head(const struct packet_type *pt)
570{
571 if (pt->type == htons(ETH_P_ALL))
572 return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
573 else
574 return pt->dev ? &pt->dev->ptype_specific :
575 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
576}
577
578/**
579 * dev_add_pack - add packet handler
580 * @pt: packet type declaration
581 *
582 * Add a protocol handler to the networking stack. The passed &packet_type
583 * is linked into kernel lists and may not be freed until it has been
584 * removed from the kernel lists.
585 *
586 * This call does not sleep therefore it can not
587 * guarantee all CPU's that are in middle of receiving packets
588 * will see the new packet type (until the next received packet).
589 */
590
591void dev_add_pack(struct packet_type *pt)
592{
593 struct list_head *head = ptype_head(pt);
594
595 spin_lock(&ptype_lock);
596 list_add_rcu(&pt->list, head);
597 spin_unlock(&ptype_lock);
598}
599EXPORT_SYMBOL(dev_add_pack);
600
601/**
602 * __dev_remove_pack - remove packet handler
603 * @pt: packet type declaration
604 *
605 * Remove a protocol handler that was previously added to the kernel
606 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
607 * from the kernel lists and can be freed or reused once this function
608 * returns.
609 *
610 * The packet type might still be in use by receivers
611 * and must not be freed until after all the CPU's have gone
612 * through a quiescent state.
613 */
614void __dev_remove_pack(struct packet_type *pt)
615{
616 struct list_head *head = ptype_head(pt);
617 struct packet_type *pt1;
618
619 spin_lock(&ptype_lock);
620
621 list_for_each_entry(pt1, head, list) {
622 if (pt == pt1) {
623 list_del_rcu(&pt->list);
624 goto out;
625 }
626 }
627
628 pr_warn("dev_remove_pack: %p not found\n", pt);
629out:
630 spin_unlock(&ptype_lock);
631}
632EXPORT_SYMBOL(__dev_remove_pack);
633
634/**
635 * dev_remove_pack - remove packet handler
636 * @pt: packet type declaration
637 *
638 * Remove a protocol handler that was previously added to the kernel
639 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
640 * from the kernel lists and can be freed or reused once this function
641 * returns.
642 *
643 * This call sleeps to guarantee that no CPU is looking at the packet
644 * type after return.
645 */
646void dev_remove_pack(struct packet_type *pt)
647{
648 __dev_remove_pack(pt);
649
650 synchronize_net();
651}
652EXPORT_SYMBOL(dev_remove_pack);
653
654
655/*******************************************************************************
656 *
657 * Device Interface Subroutines
658 *
659 *******************************************************************************/
660
661/**
662 * dev_get_iflink - get 'iflink' value of a interface
663 * @dev: targeted interface
664 *
665 * Indicates the ifindex the interface is linked to.
666 * Physical interfaces have the same 'ifindex' and 'iflink' values.
667 */
668
669int dev_get_iflink(const struct net_device *dev)
670{
671 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
672 return dev->netdev_ops->ndo_get_iflink(dev);
673
674 return READ_ONCE(dev->ifindex);
675}
676EXPORT_SYMBOL(dev_get_iflink);
677
678/**
679 * dev_fill_metadata_dst - Retrieve tunnel egress information.
680 * @dev: targeted interface
681 * @skb: The packet.
682 *
683 * For better visibility of tunnel traffic OVS needs to retrieve
684 * egress tunnel information for a packet. Following API allows
685 * user to get this info.
686 */
687int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
688{
689 struct ip_tunnel_info *info;
690
691 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
692 return -EINVAL;
693
694 info = skb_tunnel_info_unclone(skb);
695 if (!info)
696 return -ENOMEM;
697 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
698 return -EINVAL;
699
700 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
701}
702EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
703
704static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
705{
706 int k = stack->num_paths++;
707
708 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
709 return NULL;
710
711 return &stack->path[k];
712}
713
714int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
715 struct net_device_path_stack *stack)
716{
717 const struct net_device *last_dev;
718 struct net_device_path_ctx ctx = {
719 .dev = dev,
720 };
721 struct net_device_path *path;
722 int ret = 0;
723
724 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
725 stack->num_paths = 0;
726 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
727 last_dev = ctx.dev;
728 path = dev_fwd_path(stack);
729 if (!path)
730 return -1;
731
732 memset(path, 0, sizeof(struct net_device_path));
733 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
734 if (ret < 0)
735 return -1;
736
737 if (WARN_ON_ONCE(last_dev == ctx.dev))
738 return -1;
739 }
740
741 if (!ctx.dev)
742 return ret;
743
744 path = dev_fwd_path(stack);
745 if (!path)
746 return -1;
747 path->type = DEV_PATH_ETHERNET;
748 path->dev = ctx.dev;
749
750 return ret;
751}
752EXPORT_SYMBOL_GPL(dev_fill_forward_path);
753
754/**
755 * __dev_get_by_name - find a device by its name
756 * @net: the applicable net namespace
757 * @name: name to find
758 *
759 * Find an interface by name. Must be called under RTNL semaphore.
760 * If the name is found a pointer to the device is returned.
761 * If the name is not found then %NULL is returned. The
762 * reference counters are not incremented so the caller must be
763 * careful with locks.
764 */
765
766struct net_device *__dev_get_by_name(struct net *net, const char *name)
767{
768 struct netdev_name_node *node_name;
769
770 node_name = netdev_name_node_lookup(net, name);
771 return node_name ? node_name->dev : NULL;
772}
773EXPORT_SYMBOL(__dev_get_by_name);
774
775/**
776 * dev_get_by_name_rcu - find a device by its name
777 * @net: the applicable net namespace
778 * @name: name to find
779 *
780 * Find an interface by name.
781 * If the name is found a pointer to the device is returned.
782 * If the name is not found then %NULL is returned.
783 * The reference counters are not incremented so the caller must be
784 * careful with locks. The caller must hold RCU lock.
785 */
786
787struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
788{
789 struct netdev_name_node *node_name;
790
791 node_name = netdev_name_node_lookup_rcu(net, name);
792 return node_name ? node_name->dev : NULL;
793}
794EXPORT_SYMBOL(dev_get_by_name_rcu);
795
796/* Deprecated for new users, call netdev_get_by_name() instead */
797struct net_device *dev_get_by_name(struct net *net, const char *name)
798{
799 struct net_device *dev;
800
801 rcu_read_lock();
802 dev = dev_get_by_name_rcu(net, name);
803 dev_hold(dev);
804 rcu_read_unlock();
805 return dev;
806}
807EXPORT_SYMBOL(dev_get_by_name);
808
809/**
810 * netdev_get_by_name() - find a device by its name
811 * @net: the applicable net namespace
812 * @name: name to find
813 * @tracker: tracking object for the acquired reference
814 * @gfp: allocation flags for the tracker
815 *
816 * Find an interface by name. This can be called from any
817 * context and does its own locking. The returned handle has
818 * the usage count incremented and the caller must use netdev_put() to
819 * release it when it is no longer needed. %NULL is returned if no
820 * matching device is found.
821 */
822struct net_device *netdev_get_by_name(struct net *net, const char *name,
823 netdevice_tracker *tracker, gfp_t gfp)
824{
825 struct net_device *dev;
826
827 dev = dev_get_by_name(net, name);
828 if (dev)
829 netdev_tracker_alloc(dev, tracker, gfp);
830 return dev;
831}
832EXPORT_SYMBOL(netdev_get_by_name);
833
834/**
835 * __dev_get_by_index - find a device by its ifindex
836 * @net: the applicable net namespace
837 * @ifindex: index of device
838 *
839 * Search for an interface by index. Returns %NULL if the device
840 * is not found or a pointer to the device. The device has not
841 * had its reference counter increased so the caller must be careful
842 * about locking. The caller must hold the RTNL semaphore.
843 */
844
845struct net_device *__dev_get_by_index(struct net *net, int ifindex)
846{
847 struct net_device *dev;
848 struct hlist_head *head = dev_index_hash(net, ifindex);
849
850 hlist_for_each_entry(dev, head, index_hlist)
851 if (dev->ifindex == ifindex)
852 return dev;
853
854 return NULL;
855}
856EXPORT_SYMBOL(__dev_get_by_index);
857
858/**
859 * dev_get_by_index_rcu - find a device by its ifindex
860 * @net: the applicable net namespace
861 * @ifindex: index of device
862 *
863 * Search for an interface by index. Returns %NULL if the device
864 * is not found or a pointer to the device. The device has not
865 * had its reference counter increased so the caller must be careful
866 * about locking. The caller must hold RCU lock.
867 */
868
869struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
870{
871 struct net_device *dev;
872 struct hlist_head *head = dev_index_hash(net, ifindex);
873
874 hlist_for_each_entry_rcu(dev, head, index_hlist)
875 if (dev->ifindex == ifindex)
876 return dev;
877
878 return NULL;
879}
880EXPORT_SYMBOL(dev_get_by_index_rcu);
881
882/* Deprecated for new users, call netdev_get_by_index() instead */
883struct net_device *dev_get_by_index(struct net *net, int ifindex)
884{
885 struct net_device *dev;
886
887 rcu_read_lock();
888 dev = dev_get_by_index_rcu(net, ifindex);
889 dev_hold(dev);
890 rcu_read_unlock();
891 return dev;
892}
893EXPORT_SYMBOL(dev_get_by_index);
894
895/**
896 * netdev_get_by_index() - find a device by its ifindex
897 * @net: the applicable net namespace
898 * @ifindex: index of device
899 * @tracker: tracking object for the acquired reference
900 * @gfp: allocation flags for the tracker
901 *
902 * Search for an interface by index. Returns NULL if the device
903 * is not found or a pointer to the device. The device returned has
904 * had a reference added and the pointer is safe until the user calls
905 * netdev_put() to indicate they have finished with it.
906 */
907struct net_device *netdev_get_by_index(struct net *net, int ifindex,
908 netdevice_tracker *tracker, gfp_t gfp)
909{
910 struct net_device *dev;
911
912 dev = dev_get_by_index(net, ifindex);
913 if (dev)
914 netdev_tracker_alloc(dev, tracker, gfp);
915 return dev;
916}
917EXPORT_SYMBOL(netdev_get_by_index);
918
919/**
920 * dev_get_by_napi_id - find a device by napi_id
921 * @napi_id: ID of the NAPI struct
922 *
923 * Search for an interface by NAPI ID. Returns %NULL if the device
924 * is not found or a pointer to the device. The device has not had
925 * its reference counter increased so the caller must be careful
926 * about locking. The caller must hold RCU lock.
927 */
928
929struct net_device *dev_get_by_napi_id(unsigned int napi_id)
930{
931 struct napi_struct *napi;
932
933 WARN_ON_ONCE(!rcu_read_lock_held());
934
935 if (napi_id < MIN_NAPI_ID)
936 return NULL;
937
938 napi = napi_by_id(napi_id);
939
940 return napi ? napi->dev : NULL;
941}
942EXPORT_SYMBOL(dev_get_by_napi_id);
943
944static DEFINE_SEQLOCK(netdev_rename_lock);
945
946void netdev_copy_name(struct net_device *dev, char *name)
947{
948 unsigned int seq;
949
950 do {
951 seq = read_seqbegin(&netdev_rename_lock);
952 strscpy(name, dev->name, IFNAMSIZ);
953 } while (read_seqretry(&netdev_rename_lock, seq));
954}
955
956/**
957 * netdev_get_name - get a netdevice name, knowing its ifindex.
958 * @net: network namespace
959 * @name: a pointer to the buffer where the name will be stored.
960 * @ifindex: the ifindex of the interface to get the name from.
961 */
962int netdev_get_name(struct net *net, char *name, int ifindex)
963{
964 struct net_device *dev;
965 int ret;
966
967 rcu_read_lock();
968
969 dev = dev_get_by_index_rcu(net, ifindex);
970 if (!dev) {
971 ret = -ENODEV;
972 goto out;
973 }
974
975 netdev_copy_name(dev, name);
976
977 ret = 0;
978out:
979 rcu_read_unlock();
980 return ret;
981}
982
983/**
984 * dev_getbyhwaddr_rcu - find a device by its hardware address
985 * @net: the applicable net namespace
986 * @type: media type of device
987 * @ha: hardware address
988 *
989 * Search for an interface by MAC address. Returns NULL if the device
990 * is not found or a pointer to the device.
991 * The caller must hold RCU or RTNL.
992 * The returned device has not had its ref count increased
993 * and the caller must therefore be careful about locking
994 *
995 */
996
997struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
998 const char *ha)
999{
1000 struct net_device *dev;
1001
1002 for_each_netdev_rcu(net, dev)
1003 if (dev->type == type &&
1004 !memcmp(dev->dev_addr, ha, dev->addr_len))
1005 return dev;
1006
1007 return NULL;
1008}
1009EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
1010
1011struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
1012{
1013 struct net_device *dev, *ret = NULL;
1014
1015 rcu_read_lock();
1016 for_each_netdev_rcu(net, dev)
1017 if (dev->type == type) {
1018 dev_hold(dev);
1019 ret = dev;
1020 break;
1021 }
1022 rcu_read_unlock();
1023 return ret;
1024}
1025EXPORT_SYMBOL(dev_getfirstbyhwtype);
1026
1027/**
1028 * __dev_get_by_flags - find any device with given flags
1029 * @net: the applicable net namespace
1030 * @if_flags: IFF_* values
1031 * @mask: bitmask of bits in if_flags to check
1032 *
1033 * Search for any interface with the given flags. Returns NULL if a device
1034 * is not found or a pointer to the device. Must be called inside
1035 * rtnl_lock(), and result refcount is unchanged.
1036 */
1037
1038struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1039 unsigned short mask)
1040{
1041 struct net_device *dev, *ret;
1042
1043 ASSERT_RTNL();
1044
1045 ret = NULL;
1046 for_each_netdev(net, dev) {
1047 if (((dev->flags ^ if_flags) & mask) == 0) {
1048 ret = dev;
1049 break;
1050 }
1051 }
1052 return ret;
1053}
1054EXPORT_SYMBOL(__dev_get_by_flags);
1055
1056/**
1057 * dev_valid_name - check if name is okay for network device
1058 * @name: name string
1059 *
1060 * Network device names need to be valid file names to
1061 * allow sysfs to work. We also disallow any kind of
1062 * whitespace.
1063 */
1064bool dev_valid_name(const char *name)
1065{
1066 if (*name == '\0')
1067 return false;
1068 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1069 return false;
1070 if (!strcmp(name, ".") || !strcmp(name, ".."))
1071 return false;
1072
1073 while (*name) {
1074 if (*name == '/' || *name == ':' || isspace(*name))
1075 return false;
1076 name++;
1077 }
1078 return true;
1079}
1080EXPORT_SYMBOL(dev_valid_name);
1081
1082/**
1083 * __dev_alloc_name - allocate a name for a device
1084 * @net: network namespace to allocate the device name in
1085 * @name: name format string
1086 * @res: result name string
1087 *
1088 * Passed a format string - eg "lt%d" it will try and find a suitable
1089 * id. It scans list of devices to build up a free map, then chooses
1090 * the first empty slot. The caller must hold the dev_base or rtnl lock
1091 * while allocating the name and adding the device in order to avoid
1092 * duplicates.
1093 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1094 * Returns the number of the unit assigned or a negative errno code.
1095 */
1096
1097static int __dev_alloc_name(struct net *net, const char *name, char *res)
1098{
1099 int i = 0;
1100 const char *p;
1101 const int max_netdevices = 8*PAGE_SIZE;
1102 unsigned long *inuse;
1103 struct net_device *d;
1104 char buf[IFNAMSIZ];
1105
1106 /* Verify the string as this thing may have come from the user.
1107 * There must be one "%d" and no other "%" characters.
1108 */
1109 p = strchr(name, '%');
1110 if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1111 return -EINVAL;
1112
1113 /* Use one page as a bit array of possible slots */
1114 inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
1115 if (!inuse)
1116 return -ENOMEM;
1117
1118 for_each_netdev(net, d) {
1119 struct netdev_name_node *name_node;
1120
1121 netdev_for_each_altname(d, name_node) {
1122 if (!sscanf(name_node->name, name, &i))
1123 continue;
1124 if (i < 0 || i >= max_netdevices)
1125 continue;
1126
1127 /* avoid cases where sscanf is not exact inverse of printf */
1128 snprintf(buf, IFNAMSIZ, name, i);
1129 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1130 __set_bit(i, inuse);
1131 }
1132 if (!sscanf(d->name, name, &i))
1133 continue;
1134 if (i < 0 || i >= max_netdevices)
1135 continue;
1136
1137 /* avoid cases where sscanf is not exact inverse of printf */
1138 snprintf(buf, IFNAMSIZ, name, i);
1139 if (!strncmp(buf, d->name, IFNAMSIZ))
1140 __set_bit(i, inuse);
1141 }
1142
1143 i = find_first_zero_bit(inuse, max_netdevices);
1144 bitmap_free(inuse);
1145 if (i == max_netdevices)
1146 return -ENFILE;
1147
1148 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
1149 strscpy(buf, name, IFNAMSIZ);
1150 snprintf(res, IFNAMSIZ, buf, i);
1151 return i;
1152}
1153
1154/* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
1155static int dev_prep_valid_name(struct net *net, struct net_device *dev,
1156 const char *want_name, char *out_name,
1157 int dup_errno)
1158{
1159 if (!dev_valid_name(want_name))
1160 return -EINVAL;
1161
1162 if (strchr(want_name, '%'))
1163 return __dev_alloc_name(net, want_name, out_name);
1164
1165 if (netdev_name_in_use(net, want_name))
1166 return -dup_errno;
1167 if (out_name != want_name)
1168 strscpy(out_name, want_name, IFNAMSIZ);
1169 return 0;
1170}
1171
1172/**
1173 * dev_alloc_name - allocate a name for a device
1174 * @dev: device
1175 * @name: name format string
1176 *
1177 * Passed a format string - eg "lt%d" it will try and find a suitable
1178 * id. It scans list of devices to build up a free map, then chooses
1179 * the first empty slot. The caller must hold the dev_base or rtnl lock
1180 * while allocating the name and adding the device in order to avoid
1181 * duplicates.
1182 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1183 * Returns the number of the unit assigned or a negative errno code.
1184 */
1185
1186int dev_alloc_name(struct net_device *dev, const char *name)
1187{
1188 return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE);
1189}
1190EXPORT_SYMBOL(dev_alloc_name);
1191
1192static int dev_get_valid_name(struct net *net, struct net_device *dev,
1193 const char *name)
1194{
1195 int ret;
1196
1197 ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST);
1198 return ret < 0 ? ret : 0;
1199}
1200
1201/**
1202 * dev_change_name - change name of a device
1203 * @dev: device
1204 * @newname: name (or format string) must be at least IFNAMSIZ
1205 *
1206 * Change name of a device, can pass format strings "eth%d".
1207 * for wildcarding.
1208 */
1209int dev_change_name(struct net_device *dev, const char *newname)
1210{
1211 unsigned char old_assign_type;
1212 char oldname[IFNAMSIZ];
1213 int err = 0;
1214 int ret;
1215 struct net *net;
1216
1217 ASSERT_RTNL();
1218 BUG_ON(!dev_net(dev));
1219
1220 net = dev_net(dev);
1221
1222 down_write(&devnet_rename_sem);
1223
1224 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1225 up_write(&devnet_rename_sem);
1226 return 0;
1227 }
1228
1229 memcpy(oldname, dev->name, IFNAMSIZ);
1230
1231 write_seqlock_bh(&netdev_rename_lock);
1232 err = dev_get_valid_name(net, dev, newname);
1233 write_sequnlock_bh(&netdev_rename_lock);
1234
1235 if (err < 0) {
1236 up_write(&devnet_rename_sem);
1237 return err;
1238 }
1239
1240 if (oldname[0] && !strchr(oldname, '%'))
1241 netdev_info(dev, "renamed from %s%s\n", oldname,
1242 dev->flags & IFF_UP ? " (while UP)" : "");
1243
1244 old_assign_type = dev->name_assign_type;
1245 WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
1246
1247rollback:
1248 ret = device_rename(&dev->dev, dev->name);
1249 if (ret) {
1250 memcpy(dev->name, oldname, IFNAMSIZ);
1251 WRITE_ONCE(dev->name_assign_type, old_assign_type);
1252 up_write(&devnet_rename_sem);
1253 return ret;
1254 }
1255
1256 up_write(&devnet_rename_sem);
1257
1258 netdev_adjacent_rename_links(dev, oldname);
1259
1260 netdev_name_node_del(dev->name_node);
1261
1262 synchronize_net();
1263
1264 netdev_name_node_add(net, dev->name_node);
1265
1266 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1267 ret = notifier_to_errno(ret);
1268
1269 if (ret) {
1270 /* err >= 0 after dev_alloc_name() or stores the first errno */
1271 if (err >= 0) {
1272 err = ret;
1273 down_write(&devnet_rename_sem);
1274 write_seqlock_bh(&netdev_rename_lock);
1275 memcpy(dev->name, oldname, IFNAMSIZ);
1276 write_sequnlock_bh(&netdev_rename_lock);
1277 memcpy(oldname, newname, IFNAMSIZ);
1278 WRITE_ONCE(dev->name_assign_type, old_assign_type);
1279 old_assign_type = NET_NAME_RENAMED;
1280 goto rollback;
1281 } else {
1282 netdev_err(dev, "name change rollback failed: %d\n",
1283 ret);
1284 }
1285 }
1286
1287 return err;
1288}
1289
1290/**
1291 * dev_set_alias - change ifalias of a device
1292 * @dev: device
1293 * @alias: name up to IFALIASZ
1294 * @len: limit of bytes to copy from info
1295 *
1296 * Set ifalias for a device,
1297 */
1298int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1299{
1300 struct dev_ifalias *new_alias = NULL;
1301
1302 if (len >= IFALIASZ)
1303 return -EINVAL;
1304
1305 if (len) {
1306 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1307 if (!new_alias)
1308 return -ENOMEM;
1309
1310 memcpy(new_alias->ifalias, alias, len);
1311 new_alias->ifalias[len] = 0;
1312 }
1313
1314 mutex_lock(&ifalias_mutex);
1315 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1316 mutex_is_locked(&ifalias_mutex));
1317 mutex_unlock(&ifalias_mutex);
1318
1319 if (new_alias)
1320 kfree_rcu(new_alias, rcuhead);
1321
1322 return len;
1323}
1324EXPORT_SYMBOL(dev_set_alias);
1325
1326/**
1327 * dev_get_alias - get ifalias of a device
1328 * @dev: device
1329 * @name: buffer to store name of ifalias
1330 * @len: size of buffer
1331 *
1332 * get ifalias for a device. Caller must make sure dev cannot go
1333 * away, e.g. rcu read lock or own a reference count to device.
1334 */
1335int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1336{
1337 const struct dev_ifalias *alias;
1338 int ret = 0;
1339
1340 rcu_read_lock();
1341 alias = rcu_dereference(dev->ifalias);
1342 if (alias)
1343 ret = snprintf(name, len, "%s", alias->ifalias);
1344 rcu_read_unlock();
1345
1346 return ret;
1347}
1348
1349/**
1350 * netdev_features_change - device changes features
1351 * @dev: device to cause notification
1352 *
1353 * Called to indicate a device has changed features.
1354 */
1355void netdev_features_change(struct net_device *dev)
1356{
1357 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1358}
1359EXPORT_SYMBOL(netdev_features_change);
1360
1361/**
1362 * netdev_state_change - device changes state
1363 * @dev: device to cause notification
1364 *
1365 * Called to indicate a device has changed state. This function calls
1366 * the notifier chains for netdev_chain and sends a NEWLINK message
1367 * to the routing socket.
1368 */
1369void netdev_state_change(struct net_device *dev)
1370{
1371 if (dev->flags & IFF_UP) {
1372 struct netdev_notifier_change_info change_info = {
1373 .info.dev = dev,
1374 };
1375
1376 call_netdevice_notifiers_info(NETDEV_CHANGE,
1377 &change_info.info);
1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1379 }
1380}
1381EXPORT_SYMBOL(netdev_state_change);
1382
1383/**
1384 * __netdev_notify_peers - notify network peers about existence of @dev,
1385 * to be called when rtnl lock is already held.
1386 * @dev: network device
1387 *
1388 * Generate traffic such that interested network peers are aware of
1389 * @dev, such as by generating a gratuitous ARP. This may be used when
1390 * a device wants to inform the rest of the network about some sort of
1391 * reconfiguration such as a failover event or virtual machine
1392 * migration.
1393 */
1394void __netdev_notify_peers(struct net_device *dev)
1395{
1396 ASSERT_RTNL();
1397 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1398 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1399}
1400EXPORT_SYMBOL(__netdev_notify_peers);
1401
1402/**
1403 * netdev_notify_peers - notify network peers about existence of @dev
1404 * @dev: network device
1405 *
1406 * Generate traffic such that interested network peers are aware of
1407 * @dev, such as by generating a gratuitous ARP. This may be used when
1408 * a device wants to inform the rest of the network about some sort of
1409 * reconfiguration such as a failover event or virtual machine
1410 * migration.
1411 */
1412void netdev_notify_peers(struct net_device *dev)
1413{
1414 rtnl_lock();
1415 __netdev_notify_peers(dev);
1416 rtnl_unlock();
1417}
1418EXPORT_SYMBOL(netdev_notify_peers);
1419
1420static int napi_threaded_poll(void *data);
1421
1422static int napi_kthread_create(struct napi_struct *n)
1423{
1424 int err = 0;
1425
1426 /* Create and wake up the kthread once to put it in
1427 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1428 * warning and work with loadavg.
1429 */
1430 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1431 n->dev->name, n->napi_id);
1432 if (IS_ERR(n->thread)) {
1433 err = PTR_ERR(n->thread);
1434 pr_err("kthread_run failed with err %d\n", err);
1435 n->thread = NULL;
1436 }
1437
1438 return err;
1439}
1440
1441static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1442{
1443 const struct net_device_ops *ops = dev->netdev_ops;
1444 int ret;
1445
1446 ASSERT_RTNL();
1447 dev_addr_check(dev);
1448
1449 if (!netif_device_present(dev)) {
1450 /* may be detached because parent is runtime-suspended */
1451 if (dev->dev.parent)
1452 pm_runtime_resume(dev->dev.parent);
1453 if (!netif_device_present(dev))
1454 return -ENODEV;
1455 }
1456
1457 /* Block netpoll from trying to do any rx path servicing.
1458 * If we don't do this there is a chance ndo_poll_controller
1459 * or ndo_poll may be running while we open the device
1460 */
1461 netpoll_poll_disable(dev);
1462
1463 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1464 ret = notifier_to_errno(ret);
1465 if (ret)
1466 return ret;
1467
1468 set_bit(__LINK_STATE_START, &dev->state);
1469
1470 if (ops->ndo_validate_addr)
1471 ret = ops->ndo_validate_addr(dev);
1472
1473 if (!ret && ops->ndo_open)
1474 ret = ops->ndo_open(dev);
1475
1476 netpoll_poll_enable(dev);
1477
1478 if (ret)
1479 clear_bit(__LINK_STATE_START, &dev->state);
1480 else {
1481 dev->flags |= IFF_UP;
1482 dev_set_rx_mode(dev);
1483 dev_activate(dev);
1484 add_device_randomness(dev->dev_addr, dev->addr_len);
1485 }
1486
1487 return ret;
1488}
1489
1490/**
1491 * dev_open - prepare an interface for use.
1492 * @dev: device to open
1493 * @extack: netlink extended ack
1494 *
1495 * Takes a device from down to up state. The device's private open
1496 * function is invoked and then the multicast lists are loaded. Finally
1497 * the device is moved into the up state and a %NETDEV_UP message is
1498 * sent to the netdev notifier chain.
1499 *
1500 * Calling this function on an active interface is a nop. On a failure
1501 * a negative errno code is returned.
1502 */
1503int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1504{
1505 int ret;
1506
1507 if (dev->flags & IFF_UP)
1508 return 0;
1509
1510 ret = __dev_open(dev, extack);
1511 if (ret < 0)
1512 return ret;
1513
1514 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1515 call_netdevice_notifiers(NETDEV_UP, dev);
1516
1517 return ret;
1518}
1519EXPORT_SYMBOL(dev_open);
1520
1521static void __dev_close_many(struct list_head *head)
1522{
1523 struct net_device *dev;
1524
1525 ASSERT_RTNL();
1526 might_sleep();
1527
1528 list_for_each_entry(dev, head, close_list) {
1529 /* Temporarily disable netpoll until the interface is down */
1530 netpoll_poll_disable(dev);
1531
1532 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1533
1534 clear_bit(__LINK_STATE_START, &dev->state);
1535
1536 /* Synchronize to scheduled poll. We cannot touch poll list, it
1537 * can be even on different cpu. So just clear netif_running().
1538 *
1539 * dev->stop() will invoke napi_disable() on all of it's
1540 * napi_struct instances on this device.
1541 */
1542 smp_mb__after_atomic(); /* Commit netif_running(). */
1543 }
1544
1545 dev_deactivate_many(head);
1546
1547 list_for_each_entry(dev, head, close_list) {
1548 const struct net_device_ops *ops = dev->netdev_ops;
1549
1550 /*
1551 * Call the device specific close. This cannot fail.
1552 * Only if device is UP
1553 *
1554 * We allow it to be called even after a DETACH hot-plug
1555 * event.
1556 */
1557 if (ops->ndo_stop)
1558 ops->ndo_stop(dev);
1559
1560 dev->flags &= ~IFF_UP;
1561 netpoll_poll_enable(dev);
1562 }
1563}
1564
1565static void __dev_close(struct net_device *dev)
1566{
1567 LIST_HEAD(single);
1568
1569 list_add(&dev->close_list, &single);
1570 __dev_close_many(&single);
1571 list_del(&single);
1572}
1573
1574void dev_close_many(struct list_head *head, bool unlink)
1575{
1576 struct net_device *dev, *tmp;
1577
1578 /* Remove the devices that don't need to be closed */
1579 list_for_each_entry_safe(dev, tmp, head, close_list)
1580 if (!(dev->flags & IFF_UP))
1581 list_del_init(&dev->close_list);
1582
1583 __dev_close_many(head);
1584
1585 list_for_each_entry_safe(dev, tmp, head, close_list) {
1586 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1587 call_netdevice_notifiers(NETDEV_DOWN, dev);
1588 if (unlink)
1589 list_del_init(&dev->close_list);
1590 }
1591}
1592EXPORT_SYMBOL(dev_close_many);
1593
1594/**
1595 * dev_close - shutdown an interface.
1596 * @dev: device to shutdown
1597 *
1598 * This function moves an active device into down state. A
1599 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1600 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1601 * chain.
1602 */
1603void dev_close(struct net_device *dev)
1604{
1605 if (dev->flags & IFF_UP) {
1606 LIST_HEAD(single);
1607
1608 list_add(&dev->close_list, &single);
1609 dev_close_many(&single, true);
1610 list_del(&single);
1611 }
1612}
1613EXPORT_SYMBOL(dev_close);
1614
1615
1616/**
1617 * dev_disable_lro - disable Large Receive Offload on a device
1618 * @dev: device
1619 *
1620 * Disable Large Receive Offload (LRO) on a net device. Must be
1621 * called under RTNL. This is needed if received packets may be
1622 * forwarded to another interface.
1623 */
1624void dev_disable_lro(struct net_device *dev)
1625{
1626 struct net_device *lower_dev;
1627 struct list_head *iter;
1628
1629 dev->wanted_features &= ~NETIF_F_LRO;
1630 netdev_update_features(dev);
1631
1632 if (unlikely(dev->features & NETIF_F_LRO))
1633 netdev_WARN(dev, "failed to disable LRO!\n");
1634
1635 netdev_for_each_lower_dev(dev, lower_dev, iter)
1636 dev_disable_lro(lower_dev);
1637}
1638EXPORT_SYMBOL(dev_disable_lro);
1639
1640/**
1641 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1642 * @dev: device
1643 *
1644 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1645 * called under RTNL. This is needed if Generic XDP is installed on
1646 * the device.
1647 */
1648static void dev_disable_gro_hw(struct net_device *dev)
1649{
1650 dev->wanted_features &= ~NETIF_F_GRO_HW;
1651 netdev_update_features(dev);
1652
1653 if (unlikely(dev->features & NETIF_F_GRO_HW))
1654 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1655}
1656
1657const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1658{
1659#define N(val) \
1660 case NETDEV_##val: \
1661 return "NETDEV_" __stringify(val);
1662 switch (cmd) {
1663 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1664 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1665 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1666 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
1667 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
1668 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
1669 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1670 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1671 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1672 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1673 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1674 N(XDP_FEAT_CHANGE)
1675 }
1676#undef N
1677 return "UNKNOWN_NETDEV_EVENT";
1678}
1679EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1680
1681static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1682 struct net_device *dev)
1683{
1684 struct netdev_notifier_info info = {
1685 .dev = dev,
1686 };
1687
1688 return nb->notifier_call(nb, val, &info);
1689}
1690
1691static int call_netdevice_register_notifiers(struct notifier_block *nb,
1692 struct net_device *dev)
1693{
1694 int err;
1695
1696 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1697 err = notifier_to_errno(err);
1698 if (err)
1699 return err;
1700
1701 if (!(dev->flags & IFF_UP))
1702 return 0;
1703
1704 call_netdevice_notifier(nb, NETDEV_UP, dev);
1705 return 0;
1706}
1707
1708static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1709 struct net_device *dev)
1710{
1711 if (dev->flags & IFF_UP) {
1712 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1713 dev);
1714 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1715 }
1716 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1717}
1718
1719static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1720 struct net *net)
1721{
1722 struct net_device *dev;
1723 int err;
1724
1725 for_each_netdev(net, dev) {
1726 err = call_netdevice_register_notifiers(nb, dev);
1727 if (err)
1728 goto rollback;
1729 }
1730 return 0;
1731
1732rollback:
1733 for_each_netdev_continue_reverse(net, dev)
1734 call_netdevice_unregister_notifiers(nb, dev);
1735 return err;
1736}
1737
1738static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1739 struct net *net)
1740{
1741 struct net_device *dev;
1742
1743 for_each_netdev(net, dev)
1744 call_netdevice_unregister_notifiers(nb, dev);
1745}
1746
1747static int dev_boot_phase = 1;
1748
1749/**
1750 * register_netdevice_notifier - register a network notifier block
1751 * @nb: notifier
1752 *
1753 * Register a notifier to be called when network device events occur.
1754 * The notifier passed is linked into the kernel structures and must
1755 * not be reused until it has been unregistered. A negative errno code
1756 * is returned on a failure.
1757 *
1758 * When registered all registration and up events are replayed
1759 * to the new notifier to allow device to have a race free
1760 * view of the network device list.
1761 */
1762
1763int register_netdevice_notifier(struct notifier_block *nb)
1764{
1765 struct net *net;
1766 int err;
1767
1768 /* Close race with setup_net() and cleanup_net() */
1769 down_write(&pernet_ops_rwsem);
1770 rtnl_lock();
1771 err = raw_notifier_chain_register(&netdev_chain, nb);
1772 if (err)
1773 goto unlock;
1774 if (dev_boot_phase)
1775 goto unlock;
1776 for_each_net(net) {
1777 err = call_netdevice_register_net_notifiers(nb, net);
1778 if (err)
1779 goto rollback;
1780 }
1781
1782unlock:
1783 rtnl_unlock();
1784 up_write(&pernet_ops_rwsem);
1785 return err;
1786
1787rollback:
1788 for_each_net_continue_reverse(net)
1789 call_netdevice_unregister_net_notifiers(nb, net);
1790
1791 raw_notifier_chain_unregister(&netdev_chain, nb);
1792 goto unlock;
1793}
1794EXPORT_SYMBOL(register_netdevice_notifier);
1795
1796/**
1797 * unregister_netdevice_notifier - unregister a network notifier block
1798 * @nb: notifier
1799 *
1800 * Unregister a notifier previously registered by
1801 * register_netdevice_notifier(). The notifier is unlinked into the
1802 * kernel structures and may then be reused. A negative errno code
1803 * is returned on a failure.
1804 *
1805 * After unregistering unregister and down device events are synthesized
1806 * for all devices on the device list to the removed notifier to remove
1807 * the need for special case cleanup code.
1808 */
1809
1810int unregister_netdevice_notifier(struct notifier_block *nb)
1811{
1812 struct net *net;
1813 int err;
1814
1815 /* Close race with setup_net() and cleanup_net() */
1816 down_write(&pernet_ops_rwsem);
1817 rtnl_lock();
1818 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1819 if (err)
1820 goto unlock;
1821
1822 for_each_net(net)
1823 call_netdevice_unregister_net_notifiers(nb, net);
1824
1825unlock:
1826 rtnl_unlock();
1827 up_write(&pernet_ops_rwsem);
1828 return err;
1829}
1830EXPORT_SYMBOL(unregister_netdevice_notifier);
1831
1832static int __register_netdevice_notifier_net(struct net *net,
1833 struct notifier_block *nb,
1834 bool ignore_call_fail)
1835{
1836 int err;
1837
1838 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1839 if (err)
1840 return err;
1841 if (dev_boot_phase)
1842 return 0;
1843
1844 err = call_netdevice_register_net_notifiers(nb, net);
1845 if (err && !ignore_call_fail)
1846 goto chain_unregister;
1847
1848 return 0;
1849
1850chain_unregister:
1851 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1852 return err;
1853}
1854
1855static int __unregister_netdevice_notifier_net(struct net *net,
1856 struct notifier_block *nb)
1857{
1858 int err;
1859
1860 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1861 if (err)
1862 return err;
1863
1864 call_netdevice_unregister_net_notifiers(nb, net);
1865 return 0;
1866}
1867
1868/**
1869 * register_netdevice_notifier_net - register a per-netns network notifier block
1870 * @net: network namespace
1871 * @nb: notifier
1872 *
1873 * Register a notifier to be called when network device events occur.
1874 * The notifier passed is linked into the kernel structures and must
1875 * not be reused until it has been unregistered. A negative errno code
1876 * is returned on a failure.
1877 *
1878 * When registered all registration and up events are replayed
1879 * to the new notifier to allow device to have a race free
1880 * view of the network device list.
1881 */
1882
1883int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1884{
1885 int err;
1886
1887 rtnl_lock();
1888 err = __register_netdevice_notifier_net(net, nb, false);
1889 rtnl_unlock();
1890 return err;
1891}
1892EXPORT_SYMBOL(register_netdevice_notifier_net);
1893
1894/**
1895 * unregister_netdevice_notifier_net - unregister a per-netns
1896 * network notifier block
1897 * @net: network namespace
1898 * @nb: notifier
1899 *
1900 * Unregister a notifier previously registered by
1901 * register_netdevice_notifier_net(). The notifier is unlinked from the
1902 * kernel structures and may then be reused. A negative errno code
1903 * is returned on a failure.
1904 *
1905 * After unregistering unregister and down device events are synthesized
1906 * for all devices on the device list to the removed notifier to remove
1907 * the need for special case cleanup code.
1908 */
1909
1910int unregister_netdevice_notifier_net(struct net *net,
1911 struct notifier_block *nb)
1912{
1913 int err;
1914
1915 rtnl_lock();
1916 err = __unregister_netdevice_notifier_net(net, nb);
1917 rtnl_unlock();
1918 return err;
1919}
1920EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1921
1922static void __move_netdevice_notifier_net(struct net *src_net,
1923 struct net *dst_net,
1924 struct notifier_block *nb)
1925{
1926 __unregister_netdevice_notifier_net(src_net, nb);
1927 __register_netdevice_notifier_net(dst_net, nb, true);
1928}
1929
1930int register_netdevice_notifier_dev_net(struct net_device *dev,
1931 struct notifier_block *nb,
1932 struct netdev_net_notifier *nn)
1933{
1934 int err;
1935
1936 rtnl_lock();
1937 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1938 if (!err) {
1939 nn->nb = nb;
1940 list_add(&nn->list, &dev->net_notifier_list);
1941 }
1942 rtnl_unlock();
1943 return err;
1944}
1945EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1946
1947int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1948 struct notifier_block *nb,
1949 struct netdev_net_notifier *nn)
1950{
1951 int err;
1952
1953 rtnl_lock();
1954 list_del(&nn->list);
1955 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1956 rtnl_unlock();
1957 return err;
1958}
1959EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1960
1961static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1962 struct net *net)
1963{
1964 struct netdev_net_notifier *nn;
1965
1966 list_for_each_entry(nn, &dev->net_notifier_list, list)
1967 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
1968}
1969
1970/**
1971 * call_netdevice_notifiers_info - call all network notifier blocks
1972 * @val: value passed unmodified to notifier function
1973 * @info: notifier information data
1974 *
1975 * Call all network notifier blocks. Parameters and return value
1976 * are as for raw_notifier_call_chain().
1977 */
1978
1979int call_netdevice_notifiers_info(unsigned long val,
1980 struct netdev_notifier_info *info)
1981{
1982 struct net *net = dev_net(info->dev);
1983 int ret;
1984
1985 ASSERT_RTNL();
1986
1987 /* Run per-netns notifier block chain first, then run the global one.
1988 * Hopefully, one day, the global one is going to be removed after
1989 * all notifier block registrators get converted to be per-netns.
1990 */
1991 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1992 if (ret & NOTIFY_STOP_MASK)
1993 return ret;
1994 return raw_notifier_call_chain(&netdev_chain, val, info);
1995}
1996
1997/**
1998 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
1999 * for and rollback on error
2000 * @val_up: value passed unmodified to notifier function
2001 * @val_down: value passed unmodified to the notifier function when
2002 * recovering from an error on @val_up
2003 * @info: notifier information data
2004 *
2005 * Call all per-netns network notifier blocks, but not notifier blocks on
2006 * the global notifier chain. Parameters and return value are as for
2007 * raw_notifier_call_chain_robust().
2008 */
2009
2010static int
2011call_netdevice_notifiers_info_robust(unsigned long val_up,
2012 unsigned long val_down,
2013 struct netdev_notifier_info *info)
2014{
2015 struct net *net = dev_net(info->dev);
2016
2017 ASSERT_RTNL();
2018
2019 return raw_notifier_call_chain_robust(&net->netdev_chain,
2020 val_up, val_down, info);
2021}
2022
2023static int call_netdevice_notifiers_extack(unsigned long val,
2024 struct net_device *dev,
2025 struct netlink_ext_ack *extack)
2026{
2027 struct netdev_notifier_info info = {
2028 .dev = dev,
2029 .extack = extack,
2030 };
2031
2032 return call_netdevice_notifiers_info(val, &info);
2033}
2034
2035/**
2036 * call_netdevice_notifiers - call all network notifier blocks
2037 * @val: value passed unmodified to notifier function
2038 * @dev: net_device pointer passed unmodified to notifier function
2039 *
2040 * Call all network notifier blocks. Parameters and return value
2041 * are as for raw_notifier_call_chain().
2042 */
2043
2044int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2045{
2046 return call_netdevice_notifiers_extack(val, dev, NULL);
2047}
2048EXPORT_SYMBOL(call_netdevice_notifiers);
2049
2050/**
2051 * call_netdevice_notifiers_mtu - call all network notifier blocks
2052 * @val: value passed unmodified to notifier function
2053 * @dev: net_device pointer passed unmodified to notifier function
2054 * @arg: additional u32 argument passed to the notifier function
2055 *
2056 * Call all network notifier blocks. Parameters and return value
2057 * are as for raw_notifier_call_chain().
2058 */
2059static int call_netdevice_notifiers_mtu(unsigned long val,
2060 struct net_device *dev, u32 arg)
2061{
2062 struct netdev_notifier_info_ext info = {
2063 .info.dev = dev,
2064 .ext.mtu = arg,
2065 };
2066
2067 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2068
2069 return call_netdevice_notifiers_info(val, &info.info);
2070}
2071
2072#ifdef CONFIG_NET_INGRESS
2073static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2074
2075void net_inc_ingress_queue(void)
2076{
2077 static_branch_inc(&ingress_needed_key);
2078}
2079EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2080
2081void net_dec_ingress_queue(void)
2082{
2083 static_branch_dec(&ingress_needed_key);
2084}
2085EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2086#endif
2087
2088#ifdef CONFIG_NET_EGRESS
2089static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2090
2091void net_inc_egress_queue(void)
2092{
2093 static_branch_inc(&egress_needed_key);
2094}
2095EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2096
2097void net_dec_egress_queue(void)
2098{
2099 static_branch_dec(&egress_needed_key);
2100}
2101EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2102#endif
2103
2104#ifdef CONFIG_NET_CLS_ACT
2105DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
2106EXPORT_SYMBOL(tcf_bypass_check_needed_key);
2107#endif
2108
2109DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2110EXPORT_SYMBOL(netstamp_needed_key);
2111#ifdef CONFIG_JUMP_LABEL
2112static atomic_t netstamp_needed_deferred;
2113static atomic_t netstamp_wanted;
2114static void netstamp_clear(struct work_struct *work)
2115{
2116 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2117 int wanted;
2118
2119 wanted = atomic_add_return(deferred, &netstamp_wanted);
2120 if (wanted > 0)
2121 static_branch_enable(&netstamp_needed_key);
2122 else
2123 static_branch_disable(&netstamp_needed_key);
2124}
2125static DECLARE_WORK(netstamp_work, netstamp_clear);
2126#endif
2127
2128void net_enable_timestamp(void)
2129{
2130#ifdef CONFIG_JUMP_LABEL
2131 int wanted = atomic_read(&netstamp_wanted);
2132
2133 while (wanted > 0) {
2134 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
2135 return;
2136 }
2137 atomic_inc(&netstamp_needed_deferred);
2138 schedule_work(&netstamp_work);
2139#else
2140 static_branch_inc(&netstamp_needed_key);
2141#endif
2142}
2143EXPORT_SYMBOL(net_enable_timestamp);
2144
2145void net_disable_timestamp(void)
2146{
2147#ifdef CONFIG_JUMP_LABEL
2148 int wanted = atomic_read(&netstamp_wanted);
2149
2150 while (wanted > 1) {
2151 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2152 return;
2153 }
2154 atomic_dec(&netstamp_needed_deferred);
2155 schedule_work(&netstamp_work);
2156#else
2157 static_branch_dec(&netstamp_needed_key);
2158#endif
2159}
2160EXPORT_SYMBOL(net_disable_timestamp);
2161
2162static inline void net_timestamp_set(struct sk_buff *skb)
2163{
2164 skb->tstamp = 0;
2165 skb->tstamp_type = SKB_CLOCK_REALTIME;
2166 if (static_branch_unlikely(&netstamp_needed_key))
2167 skb->tstamp = ktime_get_real();
2168}
2169
2170#define net_timestamp_check(COND, SKB) \
2171 if (static_branch_unlikely(&netstamp_needed_key)) { \
2172 if ((COND) && !(SKB)->tstamp) \
2173 (SKB)->tstamp = ktime_get_real(); \
2174 } \
2175
2176bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2177{
2178 return __is_skb_forwardable(dev, skb, true);
2179}
2180EXPORT_SYMBOL_GPL(is_skb_forwardable);
2181
2182static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2183 bool check_mtu)
2184{
2185 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2186
2187 if (likely(!ret)) {
2188 skb->protocol = eth_type_trans(skb, dev);
2189 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2190 }
2191
2192 return ret;
2193}
2194
2195int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2196{
2197 return __dev_forward_skb2(dev, skb, true);
2198}
2199EXPORT_SYMBOL_GPL(__dev_forward_skb);
2200
2201/**
2202 * dev_forward_skb - loopback an skb to another netif
2203 *
2204 * @dev: destination network device
2205 * @skb: buffer to forward
2206 *
2207 * return values:
2208 * NET_RX_SUCCESS (no congestion)
2209 * NET_RX_DROP (packet was dropped, but freed)
2210 *
2211 * dev_forward_skb can be used for injecting an skb from the
2212 * start_xmit function of one device into the receive queue
2213 * of another device.
2214 *
2215 * The receiving device may be in another namespace, so
2216 * we have to clear all information in the skb that could
2217 * impact namespace isolation.
2218 */
2219int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2220{
2221 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2222}
2223EXPORT_SYMBOL_GPL(dev_forward_skb);
2224
2225int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2226{
2227 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2228}
2229
2230static inline int deliver_skb(struct sk_buff *skb,
2231 struct packet_type *pt_prev,
2232 struct net_device *orig_dev)
2233{
2234 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2235 return -ENOMEM;
2236 refcount_inc(&skb->users);
2237 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2238}
2239
2240static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2241 struct packet_type **pt,
2242 struct net_device *orig_dev,
2243 __be16 type,
2244 struct list_head *ptype_list)
2245{
2246 struct packet_type *ptype, *pt_prev = *pt;
2247
2248 list_for_each_entry_rcu(ptype, ptype_list, list) {
2249 if (ptype->type != type)
2250 continue;
2251 if (pt_prev)
2252 deliver_skb(skb, pt_prev, orig_dev);
2253 pt_prev = ptype;
2254 }
2255 *pt = pt_prev;
2256}
2257
2258static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2259{
2260 if (!ptype->af_packet_priv || !skb->sk)
2261 return false;
2262
2263 if (ptype->id_match)
2264 return ptype->id_match(ptype, skb->sk);
2265 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2266 return true;
2267
2268 return false;
2269}
2270
2271/**
2272 * dev_nit_active - return true if any network interface taps are in use
2273 *
2274 * @dev: network device to check for the presence of taps
2275 */
2276bool dev_nit_active(struct net_device *dev)
2277{
2278 return !list_empty(&net_hotdata.ptype_all) ||
2279 !list_empty(&dev->ptype_all);
2280}
2281EXPORT_SYMBOL_GPL(dev_nit_active);
2282
2283/*
2284 * Support routine. Sends outgoing frames to any network
2285 * taps currently in use.
2286 */
2287
2288void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2289{
2290 struct list_head *ptype_list = &net_hotdata.ptype_all;
2291 struct packet_type *ptype, *pt_prev = NULL;
2292 struct sk_buff *skb2 = NULL;
2293
2294 rcu_read_lock();
2295again:
2296 list_for_each_entry_rcu(ptype, ptype_list, list) {
2297 if (READ_ONCE(ptype->ignore_outgoing))
2298 continue;
2299
2300 /* Never send packets back to the socket
2301 * they originated from - MvS (miquels@drinkel.ow.org)
2302 */
2303 if (skb_loop_sk(ptype, skb))
2304 continue;
2305
2306 if (pt_prev) {
2307 deliver_skb(skb2, pt_prev, skb->dev);
2308 pt_prev = ptype;
2309 continue;
2310 }
2311
2312 /* need to clone skb, done only once */
2313 skb2 = skb_clone(skb, GFP_ATOMIC);
2314 if (!skb2)
2315 goto out_unlock;
2316
2317 net_timestamp_set(skb2);
2318
2319 /* skb->nh should be correctly
2320 * set by sender, so that the second statement is
2321 * just protection against buggy protocols.
2322 */
2323 skb_reset_mac_header(skb2);
2324
2325 if (skb_network_header(skb2) < skb2->data ||
2326 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2327 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2328 ntohs(skb2->protocol),
2329 dev->name);
2330 skb_reset_network_header(skb2);
2331 }
2332
2333 skb2->transport_header = skb2->network_header;
2334 skb2->pkt_type = PACKET_OUTGOING;
2335 pt_prev = ptype;
2336 }
2337
2338 if (ptype_list == &net_hotdata.ptype_all) {
2339 ptype_list = &dev->ptype_all;
2340 goto again;
2341 }
2342out_unlock:
2343 if (pt_prev) {
2344 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2345 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2346 else
2347 kfree_skb(skb2);
2348 }
2349 rcu_read_unlock();
2350}
2351EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2352
2353/**
2354 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2355 * @dev: Network device
2356 * @txq: number of queues available
2357 *
2358 * If real_num_tx_queues is changed the tc mappings may no longer be
2359 * valid. To resolve this verify the tc mapping remains valid and if
2360 * not NULL the mapping. With no priorities mapping to this
2361 * offset/count pair it will no longer be used. In the worst case TC0
2362 * is invalid nothing can be done so disable priority mappings. If is
2363 * expected that drivers will fix this mapping if they can before
2364 * calling netif_set_real_num_tx_queues.
2365 */
2366static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2367{
2368 int i;
2369 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2370
2371 /* If TC0 is invalidated disable TC mapping */
2372 if (tc->offset + tc->count > txq) {
2373 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2374 dev->num_tc = 0;
2375 return;
2376 }
2377
2378 /* Invalidated prio to tc mappings set to TC0 */
2379 for (i = 1; i < TC_BITMASK + 1; i++) {
2380 int q = netdev_get_prio_tc_map(dev, i);
2381
2382 tc = &dev->tc_to_txq[q];
2383 if (tc->offset + tc->count > txq) {
2384 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2385 i, q);
2386 netdev_set_prio_tc_map(dev, i, 0);
2387 }
2388 }
2389}
2390
2391int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2392{
2393 if (dev->num_tc) {
2394 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2395 int i;
2396
2397 /* walk through the TCs and see if it falls into any of them */
2398 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2399 if ((txq - tc->offset) < tc->count)
2400 return i;
2401 }
2402
2403 /* didn't find it, just return -1 to indicate no match */
2404 return -1;
2405 }
2406
2407 return 0;
2408}
2409EXPORT_SYMBOL(netdev_txq_to_tc);
2410
2411#ifdef CONFIG_XPS
2412static struct static_key xps_needed __read_mostly;
2413static struct static_key xps_rxqs_needed __read_mostly;
2414static DEFINE_MUTEX(xps_map_mutex);
2415#define xmap_dereference(P) \
2416 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2417
2418static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2419 struct xps_dev_maps *old_maps, int tci, u16 index)
2420{
2421 struct xps_map *map = NULL;
2422 int pos;
2423
2424 map = xmap_dereference(dev_maps->attr_map[tci]);
2425 if (!map)
2426 return false;
2427
2428 for (pos = map->len; pos--;) {
2429 if (map->queues[pos] != index)
2430 continue;
2431
2432 if (map->len > 1) {
2433 map->queues[pos] = map->queues[--map->len];
2434 break;
2435 }
2436
2437 if (old_maps)
2438 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2439 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2440 kfree_rcu(map, rcu);
2441 return false;
2442 }
2443
2444 return true;
2445}
2446
2447static bool remove_xps_queue_cpu(struct net_device *dev,
2448 struct xps_dev_maps *dev_maps,
2449 int cpu, u16 offset, u16 count)
2450{
2451 int num_tc = dev_maps->num_tc;
2452 bool active = false;
2453 int tci;
2454
2455 for (tci = cpu * num_tc; num_tc--; tci++) {
2456 int i, j;
2457
2458 for (i = count, j = offset; i--; j++) {
2459 if (!remove_xps_queue(dev_maps, NULL, tci, j))
2460 break;
2461 }
2462
2463 active |= i < 0;
2464 }
2465
2466 return active;
2467}
2468
2469static void reset_xps_maps(struct net_device *dev,
2470 struct xps_dev_maps *dev_maps,
2471 enum xps_map_type type)
2472{
2473 static_key_slow_dec_cpuslocked(&xps_needed);
2474 if (type == XPS_RXQS)
2475 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2476
2477 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2478
2479 kfree_rcu(dev_maps, rcu);
2480}
2481
2482static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2483 u16 offset, u16 count)
2484{
2485 struct xps_dev_maps *dev_maps;
2486 bool active = false;
2487 int i, j;
2488
2489 dev_maps = xmap_dereference(dev->xps_maps[type]);
2490 if (!dev_maps)
2491 return;
2492
2493 for (j = 0; j < dev_maps->nr_ids; j++)
2494 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2495 if (!active)
2496 reset_xps_maps(dev, dev_maps, type);
2497
2498 if (type == XPS_CPUS) {
2499 for (i = offset + (count - 1); count--; i--)
2500 netdev_queue_numa_node_write(
2501 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2502 }
2503}
2504
2505static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2506 u16 count)
2507{
2508 if (!static_key_false(&xps_needed))
2509 return;
2510
2511 cpus_read_lock();
2512 mutex_lock(&xps_map_mutex);
2513
2514 if (static_key_false(&xps_rxqs_needed))
2515 clean_xps_maps(dev, XPS_RXQS, offset, count);
2516
2517 clean_xps_maps(dev, XPS_CPUS, offset, count);
2518
2519 mutex_unlock(&xps_map_mutex);
2520 cpus_read_unlock();
2521}
2522
2523static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2524{
2525 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2526}
2527
2528static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2529 u16 index, bool is_rxqs_map)
2530{
2531 struct xps_map *new_map;
2532 int alloc_len = XPS_MIN_MAP_ALLOC;
2533 int i, pos;
2534
2535 for (pos = 0; map && pos < map->len; pos++) {
2536 if (map->queues[pos] != index)
2537 continue;
2538 return map;
2539 }
2540
2541 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2542 if (map) {
2543 if (pos < map->alloc_len)
2544 return map;
2545
2546 alloc_len = map->alloc_len * 2;
2547 }
2548
2549 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2550 * map
2551 */
2552 if (is_rxqs_map)
2553 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2554 else
2555 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2556 cpu_to_node(attr_index));
2557 if (!new_map)
2558 return NULL;
2559
2560 for (i = 0; i < pos; i++)
2561 new_map->queues[i] = map->queues[i];
2562 new_map->alloc_len = alloc_len;
2563 new_map->len = pos;
2564
2565 return new_map;
2566}
2567
2568/* Copy xps maps at a given index */
2569static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2570 struct xps_dev_maps *new_dev_maps, int index,
2571 int tc, bool skip_tc)
2572{
2573 int i, tci = index * dev_maps->num_tc;
2574 struct xps_map *map;
2575
2576 /* copy maps belonging to foreign traffic classes */
2577 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2578 if (i == tc && skip_tc)
2579 continue;
2580
2581 /* fill in the new device map from the old device map */
2582 map = xmap_dereference(dev_maps->attr_map[tci]);
2583 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2584 }
2585}
2586
2587/* Must be called under cpus_read_lock */
2588int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2589 u16 index, enum xps_map_type type)
2590{
2591 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2592 const unsigned long *online_mask = NULL;
2593 bool active = false, copy = false;
2594 int i, j, tci, numa_node_id = -2;
2595 int maps_sz, num_tc = 1, tc = 0;
2596 struct xps_map *map, *new_map;
2597 unsigned int nr_ids;
2598
2599 WARN_ON_ONCE(index >= dev->num_tx_queues);
2600
2601 if (dev->num_tc) {
2602 /* Do not allow XPS on subordinate device directly */
2603 num_tc = dev->num_tc;
2604 if (num_tc < 0)
2605 return -EINVAL;
2606
2607 /* If queue belongs to subordinate dev use its map */
2608 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2609
2610 tc = netdev_txq_to_tc(dev, index);
2611 if (tc < 0)
2612 return -EINVAL;
2613 }
2614
2615 mutex_lock(&xps_map_mutex);
2616
2617 dev_maps = xmap_dereference(dev->xps_maps[type]);
2618 if (type == XPS_RXQS) {
2619 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2620 nr_ids = dev->num_rx_queues;
2621 } else {
2622 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2623 if (num_possible_cpus() > 1)
2624 online_mask = cpumask_bits(cpu_online_mask);
2625 nr_ids = nr_cpu_ids;
2626 }
2627
2628 if (maps_sz < L1_CACHE_BYTES)
2629 maps_sz = L1_CACHE_BYTES;
2630
2631 /* The old dev_maps could be larger or smaller than the one we're
2632 * setting up now, as dev->num_tc or nr_ids could have been updated in
2633 * between. We could try to be smart, but let's be safe instead and only
2634 * copy foreign traffic classes if the two map sizes match.
2635 */
2636 if (dev_maps &&
2637 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2638 copy = true;
2639
2640 /* allocate memory for queue storage */
2641 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2642 j < nr_ids;) {
2643 if (!new_dev_maps) {
2644 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2645 if (!new_dev_maps) {
2646 mutex_unlock(&xps_map_mutex);
2647 return -ENOMEM;
2648 }
2649
2650 new_dev_maps->nr_ids = nr_ids;
2651 new_dev_maps->num_tc = num_tc;
2652 }
2653
2654 tci = j * num_tc + tc;
2655 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2656
2657 map = expand_xps_map(map, j, index, type == XPS_RXQS);
2658 if (!map)
2659 goto error;
2660
2661 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2662 }
2663
2664 if (!new_dev_maps)
2665 goto out_no_new_maps;
2666
2667 if (!dev_maps) {
2668 /* Increment static keys at most once per type */
2669 static_key_slow_inc_cpuslocked(&xps_needed);
2670 if (type == XPS_RXQS)
2671 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2672 }
2673
2674 for (j = 0; j < nr_ids; j++) {
2675 bool skip_tc = false;
2676
2677 tci = j * num_tc + tc;
2678 if (netif_attr_test_mask(j, mask, nr_ids) &&
2679 netif_attr_test_online(j, online_mask, nr_ids)) {
2680 /* add tx-queue to CPU/rx-queue maps */
2681 int pos = 0;
2682
2683 skip_tc = true;
2684
2685 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2686 while ((pos < map->len) && (map->queues[pos] != index))
2687 pos++;
2688
2689 if (pos == map->len)
2690 map->queues[map->len++] = index;
2691#ifdef CONFIG_NUMA
2692 if (type == XPS_CPUS) {
2693 if (numa_node_id == -2)
2694 numa_node_id = cpu_to_node(j);
2695 else if (numa_node_id != cpu_to_node(j))
2696 numa_node_id = -1;
2697 }
2698#endif
2699 }
2700
2701 if (copy)
2702 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2703 skip_tc);
2704 }
2705
2706 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2707
2708 /* Cleanup old maps */
2709 if (!dev_maps)
2710 goto out_no_old_maps;
2711
2712 for (j = 0; j < dev_maps->nr_ids; j++) {
2713 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2714 map = xmap_dereference(dev_maps->attr_map[tci]);
2715 if (!map)
2716 continue;
2717
2718 if (copy) {
2719 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2720 if (map == new_map)
2721 continue;
2722 }
2723
2724 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2725 kfree_rcu(map, rcu);
2726 }
2727 }
2728
2729 old_dev_maps = dev_maps;
2730
2731out_no_old_maps:
2732 dev_maps = new_dev_maps;
2733 active = true;
2734
2735out_no_new_maps:
2736 if (type == XPS_CPUS)
2737 /* update Tx queue numa node */
2738 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2739 (numa_node_id >= 0) ?
2740 numa_node_id : NUMA_NO_NODE);
2741
2742 if (!dev_maps)
2743 goto out_no_maps;
2744
2745 /* removes tx-queue from unused CPUs/rx-queues */
2746 for (j = 0; j < dev_maps->nr_ids; j++) {
2747 tci = j * dev_maps->num_tc;
2748
2749 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2750 if (i == tc &&
2751 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2752 netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2753 continue;
2754
2755 active |= remove_xps_queue(dev_maps,
2756 copy ? old_dev_maps : NULL,
2757 tci, index);
2758 }
2759 }
2760
2761 if (old_dev_maps)
2762 kfree_rcu(old_dev_maps, rcu);
2763
2764 /* free map if not active */
2765 if (!active)
2766 reset_xps_maps(dev, dev_maps, type);
2767
2768out_no_maps:
2769 mutex_unlock(&xps_map_mutex);
2770
2771 return 0;
2772error:
2773 /* remove any maps that we added */
2774 for (j = 0; j < nr_ids; j++) {
2775 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2776 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2777 map = copy ?
2778 xmap_dereference(dev_maps->attr_map[tci]) :
2779 NULL;
2780 if (new_map && new_map != map)
2781 kfree(new_map);
2782 }
2783 }
2784
2785 mutex_unlock(&xps_map_mutex);
2786
2787 kfree(new_dev_maps);
2788 return -ENOMEM;
2789}
2790EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2791
2792int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2793 u16 index)
2794{
2795 int ret;
2796
2797 cpus_read_lock();
2798 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2799 cpus_read_unlock();
2800
2801 return ret;
2802}
2803EXPORT_SYMBOL(netif_set_xps_queue);
2804
2805#endif
2806static void netdev_unbind_all_sb_channels(struct net_device *dev)
2807{
2808 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2809
2810 /* Unbind any subordinate channels */
2811 while (txq-- != &dev->_tx[0]) {
2812 if (txq->sb_dev)
2813 netdev_unbind_sb_channel(dev, txq->sb_dev);
2814 }
2815}
2816
2817void netdev_reset_tc(struct net_device *dev)
2818{
2819#ifdef CONFIG_XPS
2820 netif_reset_xps_queues_gt(dev, 0);
2821#endif
2822 netdev_unbind_all_sb_channels(dev);
2823
2824 /* Reset TC configuration of device */
2825 dev->num_tc = 0;
2826 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2827 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2828}
2829EXPORT_SYMBOL(netdev_reset_tc);
2830
2831int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2832{
2833 if (tc >= dev->num_tc)
2834 return -EINVAL;
2835
2836#ifdef CONFIG_XPS
2837 netif_reset_xps_queues(dev, offset, count);
2838#endif
2839 dev->tc_to_txq[tc].count = count;
2840 dev->tc_to_txq[tc].offset = offset;
2841 return 0;
2842}
2843EXPORT_SYMBOL(netdev_set_tc_queue);
2844
2845int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2846{
2847 if (num_tc > TC_MAX_QUEUE)
2848 return -EINVAL;
2849
2850#ifdef CONFIG_XPS
2851 netif_reset_xps_queues_gt(dev, 0);
2852#endif
2853 netdev_unbind_all_sb_channels(dev);
2854
2855 dev->num_tc = num_tc;
2856 return 0;
2857}
2858EXPORT_SYMBOL(netdev_set_num_tc);
2859
2860void netdev_unbind_sb_channel(struct net_device *dev,
2861 struct net_device *sb_dev)
2862{
2863 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2864
2865#ifdef CONFIG_XPS
2866 netif_reset_xps_queues_gt(sb_dev, 0);
2867#endif
2868 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2869 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2870
2871 while (txq-- != &dev->_tx[0]) {
2872 if (txq->sb_dev == sb_dev)
2873 txq->sb_dev = NULL;
2874 }
2875}
2876EXPORT_SYMBOL(netdev_unbind_sb_channel);
2877
2878int netdev_bind_sb_channel_queue(struct net_device *dev,
2879 struct net_device *sb_dev,
2880 u8 tc, u16 count, u16 offset)
2881{
2882 /* Make certain the sb_dev and dev are already configured */
2883 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2884 return -EINVAL;
2885
2886 /* We cannot hand out queues we don't have */
2887 if ((offset + count) > dev->real_num_tx_queues)
2888 return -EINVAL;
2889
2890 /* Record the mapping */
2891 sb_dev->tc_to_txq[tc].count = count;
2892 sb_dev->tc_to_txq[tc].offset = offset;
2893
2894 /* Provide a way for Tx queue to find the tc_to_txq map or
2895 * XPS map for itself.
2896 */
2897 while (count--)
2898 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2899
2900 return 0;
2901}
2902EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2903
2904int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2905{
2906 /* Do not use a multiqueue device to represent a subordinate channel */
2907 if (netif_is_multiqueue(dev))
2908 return -ENODEV;
2909
2910 /* We allow channels 1 - 32767 to be used for subordinate channels.
2911 * Channel 0 is meant to be "native" mode and used only to represent
2912 * the main root device. We allow writing 0 to reset the device back
2913 * to normal mode after being used as a subordinate channel.
2914 */
2915 if (channel > S16_MAX)
2916 return -EINVAL;
2917
2918 dev->num_tc = -channel;
2919
2920 return 0;
2921}
2922EXPORT_SYMBOL(netdev_set_sb_channel);
2923
2924/*
2925 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2926 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2927 */
2928int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2929{
2930 bool disabling;
2931 int rc;
2932
2933 disabling = txq < dev->real_num_tx_queues;
2934
2935 if (txq < 1 || txq > dev->num_tx_queues)
2936 return -EINVAL;
2937
2938 if (dev->reg_state == NETREG_REGISTERED ||
2939 dev->reg_state == NETREG_UNREGISTERING) {
2940 ASSERT_RTNL();
2941
2942 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2943 txq);
2944 if (rc)
2945 return rc;
2946
2947 if (dev->num_tc)
2948 netif_setup_tc(dev, txq);
2949
2950 dev_qdisc_change_real_num_tx(dev, txq);
2951
2952 dev->real_num_tx_queues = txq;
2953
2954 if (disabling) {
2955 synchronize_net();
2956 qdisc_reset_all_tx_gt(dev, txq);
2957#ifdef CONFIG_XPS
2958 netif_reset_xps_queues_gt(dev, txq);
2959#endif
2960 }
2961 } else {
2962 dev->real_num_tx_queues = txq;
2963 }
2964
2965 return 0;
2966}
2967EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2968
2969#ifdef CONFIG_SYSFS
2970/**
2971 * netif_set_real_num_rx_queues - set actual number of RX queues used
2972 * @dev: Network device
2973 * @rxq: Actual number of RX queues
2974 *
2975 * This must be called either with the rtnl_lock held or before
2976 * registration of the net device. Returns 0 on success, or a
2977 * negative error code. If called before registration, it always
2978 * succeeds.
2979 */
2980int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2981{
2982 int rc;
2983
2984 if (rxq < 1 || rxq > dev->num_rx_queues)
2985 return -EINVAL;
2986
2987 if (dev->reg_state == NETREG_REGISTERED) {
2988 ASSERT_RTNL();
2989
2990 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2991 rxq);
2992 if (rc)
2993 return rc;
2994 }
2995
2996 dev->real_num_rx_queues = rxq;
2997 return 0;
2998}
2999EXPORT_SYMBOL(netif_set_real_num_rx_queues);
3000#endif
3001
3002/**
3003 * netif_set_real_num_queues - set actual number of RX and TX queues used
3004 * @dev: Network device
3005 * @txq: Actual number of TX queues
3006 * @rxq: Actual number of RX queues
3007 *
3008 * Set the real number of both TX and RX queues.
3009 * Does nothing if the number of queues is already correct.
3010 */
3011int netif_set_real_num_queues(struct net_device *dev,
3012 unsigned int txq, unsigned int rxq)
3013{
3014 unsigned int old_rxq = dev->real_num_rx_queues;
3015 int err;
3016
3017 if (txq < 1 || txq > dev->num_tx_queues ||
3018 rxq < 1 || rxq > dev->num_rx_queues)
3019 return -EINVAL;
3020
3021 /* Start from increases, so the error path only does decreases -
3022 * decreases can't fail.
3023 */
3024 if (rxq > dev->real_num_rx_queues) {
3025 err = netif_set_real_num_rx_queues(dev, rxq);
3026 if (err)
3027 return err;
3028 }
3029 if (txq > dev->real_num_tx_queues) {
3030 err = netif_set_real_num_tx_queues(dev, txq);
3031 if (err)
3032 goto undo_rx;
3033 }
3034 if (rxq < dev->real_num_rx_queues)
3035 WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
3036 if (txq < dev->real_num_tx_queues)
3037 WARN_ON(netif_set_real_num_tx_queues(dev, txq));
3038
3039 return 0;
3040undo_rx:
3041 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
3042 return err;
3043}
3044EXPORT_SYMBOL(netif_set_real_num_queues);
3045
3046/**
3047 * netif_set_tso_max_size() - set the max size of TSO frames supported
3048 * @dev: netdev to update
3049 * @size: max skb->len of a TSO frame
3050 *
3051 * Set the limit on the size of TSO super-frames the device can handle.
3052 * Unless explicitly set the stack will assume the value of
3053 * %GSO_LEGACY_MAX_SIZE.
3054 */
3055void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3056{
3057 dev->tso_max_size = min(GSO_MAX_SIZE, size);
3058 if (size < READ_ONCE(dev->gso_max_size))
3059 netif_set_gso_max_size(dev, size);
3060 if (size < READ_ONCE(dev->gso_ipv4_max_size))
3061 netif_set_gso_ipv4_max_size(dev, size);
3062}
3063EXPORT_SYMBOL(netif_set_tso_max_size);
3064
3065/**
3066 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3067 * @dev: netdev to update
3068 * @segs: max number of TCP segments
3069 *
3070 * Set the limit on the number of TCP segments the device can generate from
3071 * a single TSO super-frame.
3072 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3073 */
3074void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3075{
3076 dev->tso_max_segs = segs;
3077 if (segs < READ_ONCE(dev->gso_max_segs))
3078 netif_set_gso_max_segs(dev, segs);
3079}
3080EXPORT_SYMBOL(netif_set_tso_max_segs);
3081
3082/**
3083 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3084 * @to: netdev to update
3085 * @from: netdev from which to copy the limits
3086 */
3087void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3088{
3089 netif_set_tso_max_size(to, from->tso_max_size);
3090 netif_set_tso_max_segs(to, from->tso_max_segs);
3091}
3092EXPORT_SYMBOL(netif_inherit_tso_max);
3093
3094/**
3095 * netif_get_num_default_rss_queues - default number of RSS queues
3096 *
3097 * Default value is the number of physical cores if there are only 1 or 2, or
3098 * divided by 2 if there are more.
3099 */
3100int netif_get_num_default_rss_queues(void)
3101{
3102 cpumask_var_t cpus;
3103 int cpu, count = 0;
3104
3105 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3106 return 1;
3107
3108 cpumask_copy(cpus, cpu_online_mask);
3109 for_each_cpu(cpu, cpus) {
3110 ++count;
3111 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3112 }
3113 free_cpumask_var(cpus);
3114
3115 return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3116}
3117EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3118
3119static void __netif_reschedule(struct Qdisc *q)
3120{
3121 struct softnet_data *sd;
3122 unsigned long flags;
3123
3124 local_irq_save(flags);
3125 sd = this_cpu_ptr(&softnet_data);
3126 q->next_sched = NULL;
3127 *sd->output_queue_tailp = q;
3128 sd->output_queue_tailp = &q->next_sched;
3129 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3130 local_irq_restore(flags);
3131}
3132
3133void __netif_schedule(struct Qdisc *q)
3134{
3135 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3136 __netif_reschedule(q);
3137}
3138EXPORT_SYMBOL(__netif_schedule);
3139
3140struct dev_kfree_skb_cb {
3141 enum skb_drop_reason reason;
3142};
3143
3144static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3145{
3146 return (struct dev_kfree_skb_cb *)skb->cb;
3147}
3148
3149void netif_schedule_queue(struct netdev_queue *txq)
3150{
3151 rcu_read_lock();
3152 if (!netif_xmit_stopped(txq)) {
3153 struct Qdisc *q = rcu_dereference(txq->qdisc);
3154
3155 __netif_schedule(q);
3156 }
3157 rcu_read_unlock();
3158}
3159EXPORT_SYMBOL(netif_schedule_queue);
3160
3161void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3162{
3163 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3164 struct Qdisc *q;
3165
3166 rcu_read_lock();
3167 q = rcu_dereference(dev_queue->qdisc);
3168 __netif_schedule(q);
3169 rcu_read_unlock();
3170 }
3171}
3172EXPORT_SYMBOL(netif_tx_wake_queue);
3173
3174void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3175{
3176 unsigned long flags;
3177
3178 if (unlikely(!skb))
3179 return;
3180
3181 if (likely(refcount_read(&skb->users) == 1)) {
3182 smp_rmb();
3183 refcount_set(&skb->users, 0);
3184 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3185 return;
3186 }
3187 get_kfree_skb_cb(skb)->reason = reason;
3188 local_irq_save(flags);
3189 skb->next = __this_cpu_read(softnet_data.completion_queue);
3190 __this_cpu_write(softnet_data.completion_queue, skb);
3191 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3192 local_irq_restore(flags);
3193}
3194EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
3195
3196void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3197{
3198 if (in_hardirq() || irqs_disabled())
3199 dev_kfree_skb_irq_reason(skb, reason);
3200 else
3201 kfree_skb_reason(skb, reason);
3202}
3203EXPORT_SYMBOL(dev_kfree_skb_any_reason);
3204
3205
3206/**
3207 * netif_device_detach - mark device as removed
3208 * @dev: network device
3209 *
3210 * Mark device as removed from system and therefore no longer available.
3211 */
3212void netif_device_detach(struct net_device *dev)
3213{
3214 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3215 netif_running(dev)) {
3216 netif_tx_stop_all_queues(dev);
3217 }
3218}
3219EXPORT_SYMBOL(netif_device_detach);
3220
3221/**
3222 * netif_device_attach - mark device as attached
3223 * @dev: network device
3224 *
3225 * Mark device as attached from system and restart if needed.
3226 */
3227void netif_device_attach(struct net_device *dev)
3228{
3229 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3230 netif_running(dev)) {
3231 netif_tx_wake_all_queues(dev);
3232 __netdev_watchdog_up(dev);
3233 }
3234}
3235EXPORT_SYMBOL(netif_device_attach);
3236
3237/*
3238 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3239 * to be used as a distribution range.
3240 */
3241static u16 skb_tx_hash(const struct net_device *dev,
3242 const struct net_device *sb_dev,
3243 struct sk_buff *skb)
3244{
3245 u32 hash;
3246 u16 qoffset = 0;
3247 u16 qcount = dev->real_num_tx_queues;
3248
3249 if (dev->num_tc) {
3250 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3251
3252 qoffset = sb_dev->tc_to_txq[tc].offset;
3253 qcount = sb_dev->tc_to_txq[tc].count;
3254 if (unlikely(!qcount)) {
3255 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3256 sb_dev->name, qoffset, tc);
3257 qoffset = 0;
3258 qcount = dev->real_num_tx_queues;
3259 }
3260 }
3261
3262 if (skb_rx_queue_recorded(skb)) {
3263 DEBUG_NET_WARN_ON_ONCE(qcount == 0);
3264 hash = skb_get_rx_queue(skb);
3265 if (hash >= qoffset)
3266 hash -= qoffset;
3267 while (unlikely(hash >= qcount))
3268 hash -= qcount;
3269 return hash + qoffset;
3270 }
3271
3272 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3273}
3274
3275void skb_warn_bad_offload(const struct sk_buff *skb)
3276{
3277 static const netdev_features_t null_features;
3278 struct net_device *dev = skb->dev;
3279 const char *name = "";
3280
3281 if (!net_ratelimit())
3282 return;
3283
3284 if (dev) {
3285 if (dev->dev.parent)
3286 name = dev_driver_string(dev->dev.parent);
3287 else
3288 name = netdev_name(dev);
3289 }
3290 skb_dump(KERN_WARNING, skb, false);
3291 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3292 name, dev ? &dev->features : &null_features,
3293 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3294}
3295
3296/*
3297 * Invalidate hardware checksum when packet is to be mangled, and
3298 * complete checksum manually on outgoing path.
3299 */
3300int skb_checksum_help(struct sk_buff *skb)
3301{
3302 __wsum csum;
3303 int ret = 0, offset;
3304
3305 if (skb->ip_summed == CHECKSUM_COMPLETE)
3306 goto out_set_summed;
3307
3308 if (unlikely(skb_is_gso(skb))) {
3309 skb_warn_bad_offload(skb);
3310 return -EINVAL;
3311 }
3312
3313 /* Before computing a checksum, we should make sure no frag could
3314 * be modified by an external entity : checksum could be wrong.
3315 */
3316 if (skb_has_shared_frag(skb)) {
3317 ret = __skb_linearize(skb);
3318 if (ret)
3319 goto out;
3320 }
3321
3322 offset = skb_checksum_start_offset(skb);
3323 ret = -EINVAL;
3324 if (unlikely(offset >= skb_headlen(skb))) {
3325 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3326 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3327 offset, skb_headlen(skb));
3328 goto out;
3329 }
3330 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3331
3332 offset += skb->csum_offset;
3333 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
3334 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3335 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3336 offset + sizeof(__sum16), skb_headlen(skb));
3337 goto out;
3338 }
3339 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3340 if (ret)
3341 goto out;
3342
3343 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3344out_set_summed:
3345 skb->ip_summed = CHECKSUM_NONE;
3346out:
3347 return ret;
3348}
3349EXPORT_SYMBOL(skb_checksum_help);
3350
3351int skb_crc32c_csum_help(struct sk_buff *skb)
3352{
3353 __le32 crc32c_csum;
3354 int ret = 0, offset, start;
3355
3356 if (skb->ip_summed != CHECKSUM_PARTIAL)
3357 goto out;
3358
3359 if (unlikely(skb_is_gso(skb)))
3360 goto out;
3361
3362 /* Before computing a checksum, we should make sure no frag could
3363 * be modified by an external entity : checksum could be wrong.
3364 */
3365 if (unlikely(skb_has_shared_frag(skb))) {
3366 ret = __skb_linearize(skb);
3367 if (ret)
3368 goto out;
3369 }
3370 start = skb_checksum_start_offset(skb);
3371 offset = start + offsetof(struct sctphdr, checksum);
3372 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3373 ret = -EINVAL;
3374 goto out;
3375 }
3376
3377 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3378 if (ret)
3379 goto out;
3380
3381 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3382 skb->len - start, ~(__u32)0,
3383 crc32c_csum_stub));
3384 *(__le32 *)(skb->data + offset) = crc32c_csum;
3385 skb_reset_csum_not_inet(skb);
3386out:
3387 return ret;
3388}
3389
3390__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3391{
3392 __be16 type = skb->protocol;
3393
3394 /* Tunnel gso handlers can set protocol to ethernet. */
3395 if (type == htons(ETH_P_TEB)) {
3396 struct ethhdr *eth;
3397
3398 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3399 return 0;
3400
3401 eth = (struct ethhdr *)skb->data;
3402 type = eth->h_proto;
3403 }
3404
3405 return vlan_get_protocol_and_depth(skb, type, depth);
3406}
3407
3408
3409/* Take action when hardware reception checksum errors are detected. */
3410#ifdef CONFIG_BUG
3411static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3412{
3413 netdev_err(dev, "hw csum failure\n");
3414 skb_dump(KERN_ERR, skb, true);
3415 dump_stack();
3416}
3417
3418void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3419{
3420 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3421}
3422EXPORT_SYMBOL(netdev_rx_csum_fault);
3423#endif
3424
3425/* XXX: check that highmem exists at all on the given machine. */
3426static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3427{
3428#ifdef CONFIG_HIGHMEM
3429 int i;
3430
3431 if (!(dev->features & NETIF_F_HIGHDMA)) {
3432 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3433 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3434
3435 if (PageHighMem(skb_frag_page(frag)))
3436 return 1;
3437 }
3438 }
3439#endif
3440 return 0;
3441}
3442
3443/* If MPLS offload request, verify we are testing hardware MPLS features
3444 * instead of standard features for the netdev.
3445 */
3446#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3447static netdev_features_t net_mpls_features(struct sk_buff *skb,
3448 netdev_features_t features,
3449 __be16 type)
3450{
3451 if (eth_p_mpls(type))
3452 features &= skb->dev->mpls_features;
3453
3454 return features;
3455}
3456#else
3457static netdev_features_t net_mpls_features(struct sk_buff *skb,
3458 netdev_features_t features,
3459 __be16 type)
3460{
3461 return features;
3462}
3463#endif
3464
3465static netdev_features_t harmonize_features(struct sk_buff *skb,
3466 netdev_features_t features)
3467{
3468 __be16 type;
3469
3470 type = skb_network_protocol(skb, NULL);
3471 features = net_mpls_features(skb, features, type);
3472
3473 if (skb->ip_summed != CHECKSUM_NONE &&
3474 !can_checksum_protocol(features, type)) {
3475 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3476 }
3477 if (illegal_highdma(skb->dev, skb))
3478 features &= ~NETIF_F_SG;
3479
3480 return features;
3481}
3482
3483netdev_features_t passthru_features_check(struct sk_buff *skb,
3484 struct net_device *dev,
3485 netdev_features_t features)
3486{
3487 return features;
3488}
3489EXPORT_SYMBOL(passthru_features_check);
3490
3491static netdev_features_t dflt_features_check(struct sk_buff *skb,
3492 struct net_device *dev,
3493 netdev_features_t features)
3494{
3495 return vlan_features_check(skb, features);
3496}
3497
3498static netdev_features_t gso_features_check(const struct sk_buff *skb,
3499 struct net_device *dev,
3500 netdev_features_t features)
3501{
3502 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3503
3504 if (gso_segs > READ_ONCE(dev->gso_max_segs))
3505 return features & ~NETIF_F_GSO_MASK;
3506
3507 if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
3508 return features & ~NETIF_F_GSO_MASK;
3509
3510 if (!skb_shinfo(skb)->gso_type) {
3511 skb_warn_bad_offload(skb);
3512 return features & ~NETIF_F_GSO_MASK;
3513 }
3514
3515 /* Support for GSO partial features requires software
3516 * intervention before we can actually process the packets
3517 * so we need to strip support for any partial features now
3518 * and we can pull them back in after we have partially
3519 * segmented the frame.
3520 */
3521 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3522 features &= ~dev->gso_partial_features;
3523
3524 /* Make sure to clear the IPv4 ID mangling feature if the
3525 * IPv4 header has the potential to be fragmented.
3526 */
3527 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3528 struct iphdr *iph = skb->encapsulation ?
3529 inner_ip_hdr(skb) : ip_hdr(skb);
3530
3531 if (!(iph->frag_off & htons(IP_DF)))
3532 features &= ~NETIF_F_TSO_MANGLEID;
3533 }
3534
3535 return features;
3536}
3537
3538netdev_features_t netif_skb_features(struct sk_buff *skb)
3539{
3540 struct net_device *dev = skb->dev;
3541 netdev_features_t features = dev->features;
3542
3543 if (skb_is_gso(skb))
3544 features = gso_features_check(skb, dev, features);
3545
3546 /* If encapsulation offload request, verify we are testing
3547 * hardware encapsulation features instead of standard
3548 * features for the netdev
3549 */
3550 if (skb->encapsulation)
3551 features &= dev->hw_enc_features;
3552
3553 if (skb_vlan_tagged(skb))
3554 features = netdev_intersect_features(features,
3555 dev->vlan_features |
3556 NETIF_F_HW_VLAN_CTAG_TX |
3557 NETIF_F_HW_VLAN_STAG_TX);
3558
3559 if (dev->netdev_ops->ndo_features_check)
3560 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3561 features);
3562 else
3563 features &= dflt_features_check(skb, dev, features);
3564
3565 return harmonize_features(skb, features);
3566}
3567EXPORT_SYMBOL(netif_skb_features);
3568
3569static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3570 struct netdev_queue *txq, bool more)
3571{
3572 unsigned int len;
3573 int rc;
3574
3575 if (dev_nit_active(dev))
3576 dev_queue_xmit_nit(skb, dev);
3577
3578 len = skb->len;
3579 trace_net_dev_start_xmit(skb, dev);
3580 rc = netdev_start_xmit(skb, dev, txq, more);
3581 trace_net_dev_xmit(skb, rc, dev, len);
3582
3583 return rc;
3584}
3585
3586struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3587 struct netdev_queue *txq, int *ret)
3588{
3589 struct sk_buff *skb = first;
3590 int rc = NETDEV_TX_OK;
3591
3592 while (skb) {
3593 struct sk_buff *next = skb->next;
3594
3595 skb_mark_not_on_list(skb);
3596 rc = xmit_one(skb, dev, txq, next != NULL);
3597 if (unlikely(!dev_xmit_complete(rc))) {
3598 skb->next = next;
3599 goto out;
3600 }
3601
3602 skb = next;
3603 if (netif_tx_queue_stopped(txq) && skb) {
3604 rc = NETDEV_TX_BUSY;
3605 break;
3606 }
3607 }
3608
3609out:
3610 *ret = rc;
3611 return skb;
3612}
3613
3614static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3615 netdev_features_t features)
3616{
3617 if (skb_vlan_tag_present(skb) &&
3618 !vlan_hw_offload_capable(features, skb->vlan_proto))
3619 skb = __vlan_hwaccel_push_inside(skb);
3620 return skb;
3621}
3622
3623int skb_csum_hwoffload_help(struct sk_buff *skb,
3624 const netdev_features_t features)
3625{
3626 if (unlikely(skb_csum_is_sctp(skb)))
3627 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3628 skb_crc32c_csum_help(skb);
3629
3630 if (features & NETIF_F_HW_CSUM)
3631 return 0;
3632
3633 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3634 switch (skb->csum_offset) {
3635 case offsetof(struct tcphdr, check):
3636 case offsetof(struct udphdr, check):
3637 return 0;
3638 }
3639 }
3640
3641 return skb_checksum_help(skb);
3642}
3643EXPORT_SYMBOL(skb_csum_hwoffload_help);
3644
3645static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3646{
3647 netdev_features_t features;
3648
3649 features = netif_skb_features(skb);
3650 skb = validate_xmit_vlan(skb, features);
3651 if (unlikely(!skb))
3652 goto out_null;
3653
3654 skb = sk_validate_xmit_skb(skb, dev);
3655 if (unlikely(!skb))
3656 goto out_null;
3657
3658 if (netif_needs_gso(skb, features)) {
3659 struct sk_buff *segs;
3660
3661 segs = skb_gso_segment(skb, features);
3662 if (IS_ERR(segs)) {
3663 goto out_kfree_skb;
3664 } else if (segs) {
3665 consume_skb(skb);
3666 skb = segs;
3667 }
3668 } else {
3669 if (skb_needs_linearize(skb, features) &&
3670 __skb_linearize(skb))
3671 goto out_kfree_skb;
3672
3673 /* If packet is not checksummed and device does not
3674 * support checksumming for this protocol, complete
3675 * checksumming here.
3676 */
3677 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3678 if (skb->encapsulation)
3679 skb_set_inner_transport_header(skb,
3680 skb_checksum_start_offset(skb));
3681 else
3682 skb_set_transport_header(skb,
3683 skb_checksum_start_offset(skb));
3684 if (skb_csum_hwoffload_help(skb, features))
3685 goto out_kfree_skb;
3686 }
3687 }
3688
3689 skb = validate_xmit_xfrm(skb, features, again);
3690
3691 return skb;
3692
3693out_kfree_skb:
3694 kfree_skb(skb);
3695out_null:
3696 dev_core_stats_tx_dropped_inc(dev);
3697 return NULL;
3698}
3699
3700struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3701{
3702 struct sk_buff *next, *head = NULL, *tail;
3703
3704 for (; skb != NULL; skb = next) {
3705 next = skb->next;
3706 skb_mark_not_on_list(skb);
3707
3708 /* in case skb wont be segmented, point to itself */
3709 skb->prev = skb;
3710
3711 skb = validate_xmit_skb(skb, dev, again);
3712 if (!skb)
3713 continue;
3714
3715 if (!head)
3716 head = skb;
3717 else
3718 tail->next = skb;
3719 /* If skb was segmented, skb->prev points to
3720 * the last segment. If not, it still contains skb.
3721 */
3722 tail = skb->prev;
3723 }
3724 return head;
3725}
3726EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3727
3728static void qdisc_pkt_len_init(struct sk_buff *skb)
3729{
3730 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3731
3732 qdisc_skb_cb(skb)->pkt_len = skb->len;
3733
3734 /* To get more precise estimation of bytes sent on wire,
3735 * we add to pkt_len the headers size of all segments
3736 */
3737 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3738 u16 gso_segs = shinfo->gso_segs;
3739 unsigned int hdr_len;
3740
3741 /* mac layer + network layer */
3742 hdr_len = skb_transport_offset(skb);
3743
3744 /* + transport layer */
3745 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3746 const struct tcphdr *th;
3747 struct tcphdr _tcphdr;
3748
3749 th = skb_header_pointer(skb, hdr_len,
3750 sizeof(_tcphdr), &_tcphdr);
3751 if (likely(th))
3752 hdr_len += __tcp_hdrlen(th);
3753 } else {
3754 struct udphdr _udphdr;
3755
3756 if (skb_header_pointer(skb, hdr_len,
3757 sizeof(_udphdr), &_udphdr))
3758 hdr_len += sizeof(struct udphdr);
3759 }
3760
3761 if (shinfo->gso_type & SKB_GSO_DODGY)
3762 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3763 shinfo->gso_size);
3764
3765 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3766 }
3767}
3768
3769static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3770 struct sk_buff **to_free,
3771 struct netdev_queue *txq)
3772{
3773 int rc;
3774
3775 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3776 if (rc == NET_XMIT_SUCCESS)
3777 trace_qdisc_enqueue(q, txq, skb);
3778 return rc;
3779}
3780
3781static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3782 struct net_device *dev,
3783 struct netdev_queue *txq)
3784{
3785 spinlock_t *root_lock = qdisc_lock(q);
3786 struct sk_buff *to_free = NULL;
3787 bool contended;
3788 int rc;
3789
3790 qdisc_calculate_pkt_len(skb, q);
3791
3792 tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP);
3793
3794 if (q->flags & TCQ_F_NOLOCK) {
3795 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3796 qdisc_run_begin(q)) {
3797 /* Retest nolock_qdisc_is_empty() within the protection
3798 * of q->seqlock to protect from racing with requeuing.
3799 */
3800 if (unlikely(!nolock_qdisc_is_empty(q))) {
3801 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3802 __qdisc_run(q);
3803 qdisc_run_end(q);
3804
3805 goto no_lock_out;
3806 }
3807
3808 qdisc_bstats_cpu_update(q, skb);
3809 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3810 !nolock_qdisc_is_empty(q))
3811 __qdisc_run(q);
3812
3813 qdisc_run_end(q);
3814 return NET_XMIT_SUCCESS;
3815 }
3816
3817 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3818 qdisc_run(q);
3819
3820no_lock_out:
3821 if (unlikely(to_free))
3822 kfree_skb_list_reason(to_free,
3823 tcf_get_drop_reason(to_free));
3824 return rc;
3825 }
3826
3827 if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) {
3828 kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
3829 return NET_XMIT_DROP;
3830 }
3831 /*
3832 * Heuristic to force contended enqueues to serialize on a
3833 * separate lock before trying to get qdisc main lock.
3834 * This permits qdisc->running owner to get the lock more
3835 * often and dequeue packets faster.
3836 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3837 * and then other tasks will only enqueue packets. The packets will be
3838 * sent after the qdisc owner is scheduled again. To prevent this
3839 * scenario the task always serialize on the lock.
3840 */
3841 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3842 if (unlikely(contended))
3843 spin_lock(&q->busylock);
3844
3845 spin_lock(root_lock);
3846 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3847 __qdisc_drop(skb, &to_free);
3848 rc = NET_XMIT_DROP;
3849 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3850 qdisc_run_begin(q)) {
3851 /*
3852 * This is a work-conserving queue; there are no old skbs
3853 * waiting to be sent out; and the qdisc is not running -
3854 * xmit the skb directly.
3855 */
3856
3857 qdisc_bstats_update(q, skb);
3858
3859 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3860 if (unlikely(contended)) {
3861 spin_unlock(&q->busylock);
3862 contended = false;
3863 }
3864 __qdisc_run(q);
3865 }
3866
3867 qdisc_run_end(q);
3868 rc = NET_XMIT_SUCCESS;
3869 } else {
3870 WRITE_ONCE(q->owner, smp_processor_id());
3871 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3872 WRITE_ONCE(q->owner, -1);
3873 if (qdisc_run_begin(q)) {
3874 if (unlikely(contended)) {
3875 spin_unlock(&q->busylock);
3876 contended = false;
3877 }
3878 __qdisc_run(q);
3879 qdisc_run_end(q);
3880 }
3881 }
3882 spin_unlock(root_lock);
3883 if (unlikely(to_free))
3884 kfree_skb_list_reason(to_free,
3885 tcf_get_drop_reason(to_free));
3886 if (unlikely(contended))
3887 spin_unlock(&q->busylock);
3888 return rc;
3889}
3890
3891#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3892static void skb_update_prio(struct sk_buff *skb)
3893{
3894 const struct netprio_map *map;
3895 const struct sock *sk;
3896 unsigned int prioidx;
3897
3898 if (skb->priority)
3899 return;
3900 map = rcu_dereference_bh(skb->dev->priomap);
3901 if (!map)
3902 return;
3903 sk = skb_to_full_sk(skb);
3904 if (!sk)
3905 return;
3906
3907 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3908
3909 if (prioidx < map->priomap_len)
3910 skb->priority = map->priomap[prioidx];
3911}
3912#else
3913#define skb_update_prio(skb)
3914#endif
3915
3916/**
3917 * dev_loopback_xmit - loop back @skb
3918 * @net: network namespace this loopback is happening in
3919 * @sk: sk needed to be a netfilter okfn
3920 * @skb: buffer to transmit
3921 */
3922int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3923{
3924 skb_reset_mac_header(skb);
3925 __skb_pull(skb, skb_network_offset(skb));
3926 skb->pkt_type = PACKET_LOOPBACK;
3927 if (skb->ip_summed == CHECKSUM_NONE)
3928 skb->ip_summed = CHECKSUM_UNNECESSARY;
3929 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3930 skb_dst_force(skb);
3931 netif_rx(skb);
3932 return 0;
3933}
3934EXPORT_SYMBOL(dev_loopback_xmit);
3935
3936#ifdef CONFIG_NET_EGRESS
3937static struct netdev_queue *
3938netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3939{
3940 int qm = skb_get_queue_mapping(skb);
3941
3942 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3943}
3944
3945#ifndef CONFIG_PREEMPT_RT
3946static bool netdev_xmit_txqueue_skipped(void)
3947{
3948 return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3949}
3950
3951void netdev_xmit_skip_txqueue(bool skip)
3952{
3953 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3954}
3955EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3956
3957#else
3958static bool netdev_xmit_txqueue_skipped(void)
3959{
3960 return current->net_xmit.skip_txqueue;
3961}
3962
3963void netdev_xmit_skip_txqueue(bool skip)
3964{
3965 current->net_xmit.skip_txqueue = skip;
3966}
3967EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3968#endif
3969#endif /* CONFIG_NET_EGRESS */
3970
3971#ifdef CONFIG_NET_XGRESS
3972static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
3973 enum skb_drop_reason *drop_reason)
3974{
3975 int ret = TC_ACT_UNSPEC;
3976#ifdef CONFIG_NET_CLS_ACT
3977 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
3978 struct tcf_result res;
3979
3980 if (!miniq)
3981 return ret;
3982
3983 if (static_branch_unlikely(&tcf_bypass_check_needed_key)) {
3984 if (tcf_block_bypass_sw(miniq->block))
3985 return ret;
3986 }
3987
3988 tc_skb_cb(skb)->mru = 0;
3989 tc_skb_cb(skb)->post_ct = false;
3990 tcf_set_drop_reason(skb, *drop_reason);
3991
3992 mini_qdisc_bstats_cpu_update(miniq, skb);
3993 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
3994 /* Only tcf related quirks below. */
3995 switch (ret) {
3996 case TC_ACT_SHOT:
3997 *drop_reason = tcf_get_drop_reason(skb);
3998 mini_qdisc_qstats_cpu_drop(miniq);
3999 break;
4000 case TC_ACT_OK:
4001 case TC_ACT_RECLASSIFY:
4002 skb->tc_index = TC_H_MIN(res.classid);
4003 break;
4004 }
4005#endif /* CONFIG_NET_CLS_ACT */
4006 return ret;
4007}
4008
4009static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
4010
4011void tcx_inc(void)
4012{
4013 static_branch_inc(&tcx_needed_key);
4014}
4015
4016void tcx_dec(void)
4017{
4018 static_branch_dec(&tcx_needed_key);
4019}
4020
4021static __always_inline enum tcx_action_base
4022tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
4023 const bool needs_mac)
4024{
4025 const struct bpf_mprog_fp *fp;
4026 const struct bpf_prog *prog;
4027 int ret = TCX_NEXT;
4028
4029 if (needs_mac)
4030 __skb_push(skb, skb->mac_len);
4031 bpf_mprog_foreach_prog(entry, fp, prog) {
4032 bpf_compute_data_pointers(skb);
4033 ret = bpf_prog_run(prog, skb);
4034 if (ret != TCX_NEXT)
4035 break;
4036 }
4037 if (needs_mac)
4038 __skb_pull(skb, skb->mac_len);
4039 return tcx_action_code(skb, ret);
4040}
4041
4042static __always_inline struct sk_buff *
4043sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4044 struct net_device *orig_dev, bool *another)
4045{
4046 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
4047 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
4048 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
4049 int sch_ret;
4050
4051 if (!entry)
4052 return skb;
4053
4054 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
4055 if (*pt_prev) {
4056 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4057 *pt_prev = NULL;
4058 }
4059
4060 qdisc_skb_cb(skb)->pkt_len = skb->len;
4061 tcx_set_ingress(skb, true);
4062
4063 if (static_branch_unlikely(&tcx_needed_key)) {
4064 sch_ret = tcx_run(entry, skb, true);
4065 if (sch_ret != TC_ACT_UNSPEC)
4066 goto ingress_verdict;
4067 }
4068 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
4069ingress_verdict:
4070 switch (sch_ret) {
4071 case TC_ACT_REDIRECT:
4072 /* skb_mac_header check was done by BPF, so we can safely
4073 * push the L2 header back before redirecting to another
4074 * netdev.
4075 */
4076 __skb_push(skb, skb->mac_len);
4077 if (skb_do_redirect(skb) == -EAGAIN) {
4078 __skb_pull(skb, skb->mac_len);
4079 *another = true;
4080 break;
4081 }
4082 *ret = NET_RX_SUCCESS;
4083 bpf_net_ctx_clear(bpf_net_ctx);
4084 return NULL;
4085 case TC_ACT_SHOT:
4086 kfree_skb_reason(skb, drop_reason);
4087 *ret = NET_RX_DROP;
4088 bpf_net_ctx_clear(bpf_net_ctx);
4089 return NULL;
4090 /* used by tc_run */
4091 case TC_ACT_STOLEN:
4092 case TC_ACT_QUEUED:
4093 case TC_ACT_TRAP:
4094 consume_skb(skb);
4095 fallthrough;
4096 case TC_ACT_CONSUMED:
4097 *ret = NET_RX_SUCCESS;
4098 bpf_net_ctx_clear(bpf_net_ctx);
4099 return NULL;
4100 }
4101 bpf_net_ctx_clear(bpf_net_ctx);
4102
4103 return skb;
4104}
4105
4106static __always_inline struct sk_buff *
4107sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4108{
4109 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4110 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
4111 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
4112 int sch_ret;
4113
4114 if (!entry)
4115 return skb;
4116
4117 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
4118
4119 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4120 * already set by the caller.
4121 */
4122 if (static_branch_unlikely(&tcx_needed_key)) {
4123 sch_ret = tcx_run(entry, skb, false);
4124 if (sch_ret != TC_ACT_UNSPEC)
4125 goto egress_verdict;
4126 }
4127 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
4128egress_verdict:
4129 switch (sch_ret) {
4130 case TC_ACT_REDIRECT:
4131 /* No need to push/pop skb's mac_header here on egress! */
4132 skb_do_redirect(skb);
4133 *ret = NET_XMIT_SUCCESS;
4134 bpf_net_ctx_clear(bpf_net_ctx);
4135 return NULL;
4136 case TC_ACT_SHOT:
4137 kfree_skb_reason(skb, drop_reason);
4138 *ret = NET_XMIT_DROP;
4139 bpf_net_ctx_clear(bpf_net_ctx);
4140 return NULL;
4141 /* used by tc_run */
4142 case TC_ACT_STOLEN:
4143 case TC_ACT_QUEUED:
4144 case TC_ACT_TRAP:
4145 consume_skb(skb);
4146 fallthrough;
4147 case TC_ACT_CONSUMED:
4148 *ret = NET_XMIT_SUCCESS;
4149 bpf_net_ctx_clear(bpf_net_ctx);
4150 return NULL;
4151 }
4152 bpf_net_ctx_clear(bpf_net_ctx);
4153
4154 return skb;
4155}
4156#else
4157static __always_inline struct sk_buff *
4158sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4159 struct net_device *orig_dev, bool *another)
4160{
4161 return skb;
4162}
4163
4164static __always_inline struct sk_buff *
4165sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4166{
4167 return skb;
4168}
4169#endif /* CONFIG_NET_XGRESS */
4170
4171#ifdef CONFIG_XPS
4172static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4173 struct xps_dev_maps *dev_maps, unsigned int tci)
4174{
4175 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4176 struct xps_map *map;
4177 int queue_index = -1;
4178
4179 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4180 return queue_index;
4181
4182 tci *= dev_maps->num_tc;
4183 tci += tc;
4184
4185 map = rcu_dereference(dev_maps->attr_map[tci]);
4186 if (map) {
4187 if (map->len == 1)
4188 queue_index = map->queues[0];
4189 else
4190 queue_index = map->queues[reciprocal_scale(
4191 skb_get_hash(skb), map->len)];
4192 if (unlikely(queue_index >= dev->real_num_tx_queues))
4193 queue_index = -1;
4194 }
4195 return queue_index;
4196}
4197#endif
4198
4199static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4200 struct sk_buff *skb)
4201{
4202#ifdef CONFIG_XPS
4203 struct xps_dev_maps *dev_maps;
4204 struct sock *sk = skb->sk;
4205 int queue_index = -1;
4206
4207 if (!static_key_false(&xps_needed))
4208 return -1;
4209
4210 rcu_read_lock();
4211 if (!static_key_false(&xps_rxqs_needed))
4212 goto get_cpus_map;
4213
4214 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4215 if (dev_maps) {
4216 int tci = sk_rx_queue_get(sk);
4217
4218 if (tci >= 0)
4219 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4220 tci);
4221 }
4222
4223get_cpus_map:
4224 if (queue_index < 0) {
4225 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4226 if (dev_maps) {
4227 unsigned int tci = skb->sender_cpu - 1;
4228
4229 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4230 tci);
4231 }
4232 }
4233 rcu_read_unlock();
4234
4235 return queue_index;
4236#else
4237 return -1;
4238#endif
4239}
4240
4241u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4242 struct net_device *sb_dev)
4243{
4244 return 0;
4245}
4246EXPORT_SYMBOL(dev_pick_tx_zero);
4247
4248u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4249 struct net_device *sb_dev)
4250{
4251 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4252}
4253EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4254
4255u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4256 struct net_device *sb_dev)
4257{
4258 struct sock *sk = skb->sk;
4259 int queue_index = sk_tx_queue_get(sk);
4260
4261 sb_dev = sb_dev ? : dev;
4262
4263 if (queue_index < 0 || skb->ooo_okay ||
4264 queue_index >= dev->real_num_tx_queues) {
4265 int new_index = get_xps_queue(dev, sb_dev, skb);
4266
4267 if (new_index < 0)
4268 new_index = skb_tx_hash(dev, sb_dev, skb);
4269
4270 if (queue_index != new_index && sk &&
4271 sk_fullsock(sk) &&
4272 rcu_access_pointer(sk->sk_dst_cache))
4273 sk_tx_queue_set(sk, new_index);
4274
4275 queue_index = new_index;
4276 }
4277
4278 return queue_index;
4279}
4280EXPORT_SYMBOL(netdev_pick_tx);
4281
4282struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4283 struct sk_buff *skb,
4284 struct net_device *sb_dev)
4285{
4286 int queue_index = 0;
4287
4288#ifdef CONFIG_XPS
4289 u32 sender_cpu = skb->sender_cpu - 1;
4290
4291 if (sender_cpu >= (u32)NR_CPUS)
4292 skb->sender_cpu = raw_smp_processor_id() + 1;
4293#endif
4294
4295 if (dev->real_num_tx_queues != 1) {
4296 const struct net_device_ops *ops = dev->netdev_ops;
4297
4298 if (ops->ndo_select_queue)
4299 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4300 else
4301 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4302
4303 queue_index = netdev_cap_txqueue(dev, queue_index);
4304 }
4305
4306 skb_set_queue_mapping(skb, queue_index);
4307 return netdev_get_tx_queue(dev, queue_index);
4308}
4309
4310/**
4311 * __dev_queue_xmit() - transmit a buffer
4312 * @skb: buffer to transmit
4313 * @sb_dev: suboordinate device used for L2 forwarding offload
4314 *
4315 * Queue a buffer for transmission to a network device. The caller must
4316 * have set the device and priority and built the buffer before calling
4317 * this function. The function can be called from an interrupt.
4318 *
4319 * When calling this method, interrupts MUST be enabled. This is because
4320 * the BH enable code must have IRQs enabled so that it will not deadlock.
4321 *
4322 * Regardless of the return value, the skb is consumed, so it is currently
4323 * difficult to retry a send to this method. (You can bump the ref count
4324 * before sending to hold a reference for retry if you are careful.)
4325 *
4326 * Return:
4327 * * 0 - buffer successfully transmitted
4328 * * positive qdisc return code - NET_XMIT_DROP etc.
4329 * * negative errno - other errors
4330 */
4331int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4332{
4333 struct net_device *dev = skb->dev;
4334 struct netdev_queue *txq = NULL;
4335 struct Qdisc *q;
4336 int rc = -ENOMEM;
4337 bool again = false;
4338
4339 skb_reset_mac_header(skb);
4340 skb_assert_len(skb);
4341
4342 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4343 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4344
4345 /* Disable soft irqs for various locks below. Also
4346 * stops preemption for RCU.
4347 */
4348 rcu_read_lock_bh();
4349
4350 skb_update_prio(skb);
4351
4352 qdisc_pkt_len_init(skb);
4353 tcx_set_ingress(skb, false);
4354#ifdef CONFIG_NET_EGRESS
4355 if (static_branch_unlikely(&egress_needed_key)) {
4356 if (nf_hook_egress_active()) {
4357 skb = nf_hook_egress(skb, &rc, dev);
4358 if (!skb)
4359 goto out;
4360 }
4361
4362 netdev_xmit_skip_txqueue(false);
4363
4364 nf_skip_egress(skb, true);
4365 skb = sch_handle_egress(skb, &rc, dev);
4366 if (!skb)
4367 goto out;
4368 nf_skip_egress(skb, false);
4369
4370 if (netdev_xmit_txqueue_skipped())
4371 txq = netdev_tx_queue_mapping(dev, skb);
4372 }
4373#endif
4374 /* If device/qdisc don't need skb->dst, release it right now while
4375 * its hot in this cpu cache.
4376 */
4377 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4378 skb_dst_drop(skb);
4379 else
4380 skb_dst_force(skb);
4381
4382 if (!txq)
4383 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4384
4385 q = rcu_dereference_bh(txq->qdisc);
4386
4387 trace_net_dev_queue(skb);
4388 if (q->enqueue) {
4389 rc = __dev_xmit_skb(skb, q, dev, txq);
4390 goto out;
4391 }
4392
4393 /* The device has no queue. Common case for software devices:
4394 * loopback, all the sorts of tunnels...
4395
4396 * Really, it is unlikely that netif_tx_lock protection is necessary
4397 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4398 * counters.)
4399 * However, it is possible, that they rely on protection
4400 * made by us here.
4401
4402 * Check this and shot the lock. It is not prone from deadlocks.
4403 *Either shot noqueue qdisc, it is even simpler 8)
4404 */
4405 if (dev->flags & IFF_UP) {
4406 int cpu = smp_processor_id(); /* ok because BHs are off */
4407
4408 /* Other cpus might concurrently change txq->xmit_lock_owner
4409 * to -1 or to their cpu id, but not to our id.
4410 */
4411 if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4412 if (dev_xmit_recursion())
4413 goto recursion_alert;
4414
4415 skb = validate_xmit_skb(skb, dev, &again);
4416 if (!skb)
4417 goto out;
4418
4419 HARD_TX_LOCK(dev, txq, cpu);
4420
4421 if (!netif_xmit_stopped(txq)) {
4422 dev_xmit_recursion_inc();
4423 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4424 dev_xmit_recursion_dec();
4425 if (dev_xmit_complete(rc)) {
4426 HARD_TX_UNLOCK(dev, txq);
4427 goto out;
4428 }
4429 }
4430 HARD_TX_UNLOCK(dev, txq);
4431 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4432 dev->name);
4433 } else {
4434 /* Recursion is detected! It is possible,
4435 * unfortunately
4436 */
4437recursion_alert:
4438 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4439 dev->name);
4440 }
4441 }
4442
4443 rc = -ENETDOWN;
4444 rcu_read_unlock_bh();
4445
4446 dev_core_stats_tx_dropped_inc(dev);
4447 kfree_skb_list(skb);
4448 return rc;
4449out:
4450 rcu_read_unlock_bh();
4451 return rc;
4452}
4453EXPORT_SYMBOL(__dev_queue_xmit);
4454
4455int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4456{
4457 struct net_device *dev = skb->dev;
4458 struct sk_buff *orig_skb = skb;
4459 struct netdev_queue *txq;
4460 int ret = NETDEV_TX_BUSY;
4461 bool again = false;
4462
4463 if (unlikely(!netif_running(dev) ||
4464 !netif_carrier_ok(dev)))
4465 goto drop;
4466
4467 skb = validate_xmit_skb_list(skb, dev, &again);
4468 if (skb != orig_skb)
4469 goto drop;
4470
4471 skb_set_queue_mapping(skb, queue_id);
4472 txq = skb_get_tx_queue(dev, skb);
4473
4474 local_bh_disable();
4475
4476 dev_xmit_recursion_inc();
4477 HARD_TX_LOCK(dev, txq, smp_processor_id());
4478 if (!netif_xmit_frozen_or_drv_stopped(txq))
4479 ret = netdev_start_xmit(skb, dev, txq, false);
4480 HARD_TX_UNLOCK(dev, txq);
4481 dev_xmit_recursion_dec();
4482
4483 local_bh_enable();
4484 return ret;
4485drop:
4486 dev_core_stats_tx_dropped_inc(dev);
4487 kfree_skb_list(skb);
4488 return NET_XMIT_DROP;
4489}
4490EXPORT_SYMBOL(__dev_direct_xmit);
4491
4492/*************************************************************************
4493 * Receiver routines
4494 *************************************************************************/
4495static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
4496
4497int weight_p __read_mostly = 64; /* old backlog weight */
4498int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
4499int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
4500
4501/* Called with irq disabled */
4502static inline void ____napi_schedule(struct softnet_data *sd,
4503 struct napi_struct *napi)
4504{
4505 struct task_struct *thread;
4506
4507 lockdep_assert_irqs_disabled();
4508
4509 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4510 /* Paired with smp_mb__before_atomic() in
4511 * napi_enable()/dev_set_threaded().
4512 * Use READ_ONCE() to guarantee a complete
4513 * read on napi->thread. Only call
4514 * wake_up_process() when it's not NULL.
4515 */
4516 thread = READ_ONCE(napi->thread);
4517 if (thread) {
4518 if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
4519 goto use_local_napi;
4520
4521 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4522 wake_up_process(thread);
4523 return;
4524 }
4525 }
4526
4527use_local_napi:
4528 list_add_tail(&napi->poll_list, &sd->poll_list);
4529 WRITE_ONCE(napi->list_owner, smp_processor_id());
4530 /* If not called from net_rx_action()
4531 * we have to raise NET_RX_SOFTIRQ.
4532 */
4533 if (!sd->in_net_rx_action)
4534 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4535}
4536
4537#ifdef CONFIG_RPS
4538
4539struct static_key_false rps_needed __read_mostly;
4540EXPORT_SYMBOL(rps_needed);
4541struct static_key_false rfs_needed __read_mostly;
4542EXPORT_SYMBOL(rfs_needed);
4543
4544static struct rps_dev_flow *
4545set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4546 struct rps_dev_flow *rflow, u16 next_cpu)
4547{
4548 if (next_cpu < nr_cpu_ids) {
4549 u32 head;
4550#ifdef CONFIG_RFS_ACCEL
4551 struct netdev_rx_queue *rxqueue;
4552 struct rps_dev_flow_table *flow_table;
4553 struct rps_dev_flow *old_rflow;
4554 u16 rxq_index;
4555 u32 flow_id;
4556 int rc;
4557
4558 /* Should we steer this flow to a different hardware queue? */
4559 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4560 !(dev->features & NETIF_F_NTUPLE))
4561 goto out;
4562 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4563 if (rxq_index == skb_get_rx_queue(skb))
4564 goto out;
4565
4566 rxqueue = dev->_rx + rxq_index;
4567 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4568 if (!flow_table)
4569 goto out;
4570 flow_id = skb_get_hash(skb) & flow_table->mask;
4571 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4572 rxq_index, flow_id);
4573 if (rc < 0)
4574 goto out;
4575 old_rflow = rflow;
4576 rflow = &flow_table->flows[flow_id];
4577 WRITE_ONCE(rflow->filter, rc);
4578 if (old_rflow->filter == rc)
4579 WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER);
4580 out:
4581#endif
4582 head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
4583 rps_input_queue_tail_save(&rflow->last_qtail, head);
4584 }
4585
4586 WRITE_ONCE(rflow->cpu, next_cpu);
4587 return rflow;
4588}
4589
4590/*
4591 * get_rps_cpu is called from netif_receive_skb and returns the target
4592 * CPU from the RPS map of the receiving queue for a given skb.
4593 * rcu_read_lock must be held on entry.
4594 */
4595static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4596 struct rps_dev_flow **rflowp)
4597{
4598 const struct rps_sock_flow_table *sock_flow_table;
4599 struct netdev_rx_queue *rxqueue = dev->_rx;
4600 struct rps_dev_flow_table *flow_table;
4601 struct rps_map *map;
4602 int cpu = -1;
4603 u32 tcpu;
4604 u32 hash;
4605
4606 if (skb_rx_queue_recorded(skb)) {
4607 u16 index = skb_get_rx_queue(skb);
4608
4609 if (unlikely(index >= dev->real_num_rx_queues)) {
4610 WARN_ONCE(dev->real_num_rx_queues > 1,
4611 "%s received packet on queue %u, but number "
4612 "of RX queues is %u\n",
4613 dev->name, index, dev->real_num_rx_queues);
4614 goto done;
4615 }
4616 rxqueue += index;
4617 }
4618
4619 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4620
4621 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4622 map = rcu_dereference(rxqueue->rps_map);
4623 if (!flow_table && !map)
4624 goto done;
4625
4626 skb_reset_network_header(skb);
4627 hash = skb_get_hash(skb);
4628 if (!hash)
4629 goto done;
4630
4631 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
4632 if (flow_table && sock_flow_table) {
4633 struct rps_dev_flow *rflow;
4634 u32 next_cpu;
4635 u32 ident;
4636
4637 /* First check into global flow table if there is a match.
4638 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4639 */
4640 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4641 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
4642 goto try_rps;
4643
4644 next_cpu = ident & net_hotdata.rps_cpu_mask;
4645
4646 /* OK, now we know there is a match,
4647 * we can look at the local (per receive queue) flow table
4648 */
4649 rflow = &flow_table->flows[hash & flow_table->mask];
4650 tcpu = rflow->cpu;
4651
4652 /*
4653 * If the desired CPU (where last recvmsg was done) is
4654 * different from current CPU (one in the rx-queue flow
4655 * table entry), switch if one of the following holds:
4656 * - Current CPU is unset (>= nr_cpu_ids).
4657 * - Current CPU is offline.
4658 * - The current CPU's queue tail has advanced beyond the
4659 * last packet that was enqueued using this table entry.
4660 * This guarantees that all previous packets for the flow
4661 * have been dequeued, thus preserving in order delivery.
4662 */
4663 if (unlikely(tcpu != next_cpu) &&
4664 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4665 ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
4666 rflow->last_qtail)) >= 0)) {
4667 tcpu = next_cpu;
4668 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4669 }
4670
4671 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4672 *rflowp = rflow;
4673 cpu = tcpu;
4674 goto done;
4675 }
4676 }
4677
4678try_rps:
4679
4680 if (map) {
4681 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4682 if (cpu_online(tcpu)) {
4683 cpu = tcpu;
4684 goto done;
4685 }
4686 }
4687
4688done:
4689 return cpu;
4690}
4691
4692#ifdef CONFIG_RFS_ACCEL
4693
4694/**
4695 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4696 * @dev: Device on which the filter was set
4697 * @rxq_index: RX queue index
4698 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4699 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4700 *
4701 * Drivers that implement ndo_rx_flow_steer() should periodically call
4702 * this function for each installed filter and remove the filters for
4703 * which it returns %true.
4704 */
4705bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4706 u32 flow_id, u16 filter_id)
4707{
4708 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4709 struct rps_dev_flow_table *flow_table;
4710 struct rps_dev_flow *rflow;
4711 bool expire = true;
4712 unsigned int cpu;
4713
4714 rcu_read_lock();
4715 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4716 if (flow_table && flow_id <= flow_table->mask) {
4717 rflow = &flow_table->flows[flow_id];
4718 cpu = READ_ONCE(rflow->cpu);
4719 if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids &&
4720 ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) -
4721 READ_ONCE(rflow->last_qtail)) <
4722 (int)(10 * flow_table->mask)))
4723 expire = false;
4724 }
4725 rcu_read_unlock();
4726 return expire;
4727}
4728EXPORT_SYMBOL(rps_may_expire_flow);
4729
4730#endif /* CONFIG_RFS_ACCEL */
4731
4732/* Called from hardirq (IPI) context */
4733static void rps_trigger_softirq(void *data)
4734{
4735 struct softnet_data *sd = data;
4736
4737 ____napi_schedule(sd, &sd->backlog);
4738 sd->received_rps++;
4739}
4740
4741#endif /* CONFIG_RPS */
4742
4743/* Called from hardirq (IPI) context */
4744static void trigger_rx_softirq(void *data)
4745{
4746 struct softnet_data *sd = data;
4747
4748 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4749 smp_store_release(&sd->defer_ipi_scheduled, 0);
4750}
4751
4752/*
4753 * After we queued a packet into sd->input_pkt_queue,
4754 * we need to make sure this queue is serviced soon.
4755 *
4756 * - If this is another cpu queue, link it to our rps_ipi_list,
4757 * and make sure we will process rps_ipi_list from net_rx_action().
4758 *
4759 * - If this is our own queue, NAPI schedule our backlog.
4760 * Note that this also raises NET_RX_SOFTIRQ.
4761 */
4762static void napi_schedule_rps(struct softnet_data *sd)
4763{
4764 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4765
4766#ifdef CONFIG_RPS
4767 if (sd != mysd) {
4768 if (use_backlog_threads()) {
4769 __napi_schedule_irqoff(&sd->backlog);
4770 return;
4771 }
4772
4773 sd->rps_ipi_next = mysd->rps_ipi_list;
4774 mysd->rps_ipi_list = sd;
4775
4776 /* If not called from net_rx_action() or napi_threaded_poll()
4777 * we have to raise NET_RX_SOFTIRQ.
4778 */
4779 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
4780 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4781 return;
4782 }
4783#endif /* CONFIG_RPS */
4784 __napi_schedule_irqoff(&mysd->backlog);
4785}
4786
4787void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
4788{
4789 unsigned long flags;
4790
4791 if (use_backlog_threads()) {
4792 backlog_lock_irq_save(sd, &flags);
4793
4794 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4795 __napi_schedule_irqoff(&sd->backlog);
4796
4797 backlog_unlock_irq_restore(sd, &flags);
4798
4799 } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
4800 smp_call_function_single_async(cpu, &sd->defer_csd);
4801 }
4802}
4803
4804#ifdef CONFIG_NET_FLOW_LIMIT
4805int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4806#endif
4807
4808static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4809{
4810#ifdef CONFIG_NET_FLOW_LIMIT
4811 struct sd_flow_limit *fl;
4812 struct softnet_data *sd;
4813 unsigned int old_flow, new_flow;
4814
4815 if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
4816 return false;
4817
4818 sd = this_cpu_ptr(&softnet_data);
4819
4820 rcu_read_lock();
4821 fl = rcu_dereference(sd->flow_limit);
4822 if (fl) {
4823 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4824 old_flow = fl->history[fl->history_head];
4825 fl->history[fl->history_head] = new_flow;
4826
4827 fl->history_head++;
4828 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4829
4830 if (likely(fl->buckets[old_flow]))
4831 fl->buckets[old_flow]--;
4832
4833 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4834 fl->count++;
4835 rcu_read_unlock();
4836 return true;
4837 }
4838 }
4839 rcu_read_unlock();
4840#endif
4841 return false;
4842}
4843
4844/*
4845 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4846 * queue (may be a remote CPU queue).
4847 */
4848static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4849 unsigned int *qtail)
4850{
4851 enum skb_drop_reason reason;
4852 struct softnet_data *sd;
4853 unsigned long flags;
4854 unsigned int qlen;
4855 int max_backlog;
4856 u32 tail;
4857
4858 reason = SKB_DROP_REASON_DEV_READY;
4859 if (!netif_running(skb->dev))
4860 goto bad_dev;
4861
4862 reason = SKB_DROP_REASON_CPU_BACKLOG;
4863 sd = &per_cpu(softnet_data, cpu);
4864
4865 qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
4866 max_backlog = READ_ONCE(net_hotdata.max_backlog);
4867 if (unlikely(qlen > max_backlog))
4868 goto cpu_backlog_drop;
4869 backlog_lock_irq_save(sd, &flags);
4870 qlen = skb_queue_len(&sd->input_pkt_queue);
4871 if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
4872 if (!qlen) {
4873 /* Schedule NAPI for backlog device. We can use
4874 * non atomic operation as we own the queue lock.
4875 */
4876 if (!__test_and_set_bit(NAPI_STATE_SCHED,
4877 &sd->backlog.state))
4878 napi_schedule_rps(sd);
4879 }
4880 __skb_queue_tail(&sd->input_pkt_queue, skb);
4881 tail = rps_input_queue_tail_incr(sd);
4882 backlog_unlock_irq_restore(sd, &flags);
4883
4884 /* save the tail outside of the critical section */
4885 rps_input_queue_tail_save(qtail, tail);
4886 return NET_RX_SUCCESS;
4887 }
4888
4889 backlog_unlock_irq_restore(sd, &flags);
4890
4891cpu_backlog_drop:
4892 atomic_inc(&sd->dropped);
4893bad_dev:
4894 dev_core_stats_rx_dropped_inc(skb->dev);
4895 kfree_skb_reason(skb, reason);
4896 return NET_RX_DROP;
4897}
4898
4899static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4900{
4901 struct net_device *dev = skb->dev;
4902 struct netdev_rx_queue *rxqueue;
4903
4904 rxqueue = dev->_rx;
4905
4906 if (skb_rx_queue_recorded(skb)) {
4907 u16 index = skb_get_rx_queue(skb);
4908
4909 if (unlikely(index >= dev->real_num_rx_queues)) {
4910 WARN_ONCE(dev->real_num_rx_queues > 1,
4911 "%s received packet on queue %u, but number "
4912 "of RX queues is %u\n",
4913 dev->name, index, dev->real_num_rx_queues);
4914
4915 return rxqueue; /* Return first rxqueue */
4916 }
4917 rxqueue += index;
4918 }
4919 return rxqueue;
4920}
4921
4922u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4923 struct bpf_prog *xdp_prog)
4924{
4925 void *orig_data, *orig_data_end, *hard_start;
4926 struct netdev_rx_queue *rxqueue;
4927 bool orig_bcast, orig_host;
4928 u32 mac_len, frame_sz;
4929 __be16 orig_eth_type;
4930 struct ethhdr *eth;
4931 u32 metalen, act;
4932 int off;
4933
4934 /* The XDP program wants to see the packet starting at the MAC
4935 * header.
4936 */
4937 mac_len = skb->data - skb_mac_header(skb);
4938 hard_start = skb->data - skb_headroom(skb);
4939
4940 /* SKB "head" area always have tailroom for skb_shared_info */
4941 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4942 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4943
4944 rxqueue = netif_get_rxqueue(skb);
4945 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4946 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4947 skb_headlen(skb) + mac_len, true);
4948 if (skb_is_nonlinear(skb)) {
4949 skb_shinfo(skb)->xdp_frags_size = skb->data_len;
4950 xdp_buff_set_frags_flag(xdp);
4951 } else {
4952 xdp_buff_clear_frags_flag(xdp);
4953 }
4954
4955 orig_data_end = xdp->data_end;
4956 orig_data = xdp->data;
4957 eth = (struct ethhdr *)xdp->data;
4958 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4959 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4960 orig_eth_type = eth->h_proto;
4961
4962 act = bpf_prog_run_xdp(xdp_prog, xdp);
4963
4964 /* check if bpf_xdp_adjust_head was used */
4965 off = xdp->data - orig_data;
4966 if (off) {
4967 if (off > 0)
4968 __skb_pull(skb, off);
4969 else if (off < 0)
4970 __skb_push(skb, -off);
4971
4972 skb->mac_header += off;
4973 skb_reset_network_header(skb);
4974 }
4975
4976 /* check if bpf_xdp_adjust_tail was used */
4977 off = xdp->data_end - orig_data_end;
4978 if (off != 0) {
4979 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4980 skb->len += off; /* positive on grow, negative on shrink */
4981 }
4982
4983 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
4984 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
4985 */
4986 if (xdp_buff_has_frags(xdp))
4987 skb->data_len = skb_shinfo(skb)->xdp_frags_size;
4988 else
4989 skb->data_len = 0;
4990
4991 /* check if XDP changed eth hdr such SKB needs update */
4992 eth = (struct ethhdr *)xdp->data;
4993 if ((orig_eth_type != eth->h_proto) ||
4994 (orig_host != ether_addr_equal_64bits(eth->h_dest,
4995 skb->dev->dev_addr)) ||
4996 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4997 __skb_push(skb, ETH_HLEN);
4998 skb->pkt_type = PACKET_HOST;
4999 skb->protocol = eth_type_trans(skb, skb->dev);
5000 }
5001
5002 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
5003 * before calling us again on redirect path. We do not call do_redirect
5004 * as we leave that up to the caller.
5005 *
5006 * Caller is responsible for managing lifetime of skb (i.e. calling
5007 * kfree_skb in response to actions it cannot handle/XDP_DROP).
5008 */
5009 switch (act) {
5010 case XDP_REDIRECT:
5011 case XDP_TX:
5012 __skb_push(skb, mac_len);
5013 break;
5014 case XDP_PASS:
5015 metalen = xdp->data - xdp->data_meta;
5016 if (metalen)
5017 skb_metadata_set(skb, metalen);
5018 break;
5019 }
5020
5021 return act;
5022}
5023
5024static int
5025netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
5026{
5027 struct sk_buff *skb = *pskb;
5028 int err, hroom, troom;
5029
5030 if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
5031 return 0;
5032
5033 /* In case we have to go down the path and also linearize,
5034 * then lets do the pskb_expand_head() work just once here.
5035 */
5036 hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
5037 troom = skb->tail + skb->data_len - skb->end;
5038 err = pskb_expand_head(skb,
5039 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
5040 troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
5041 if (err)
5042 return err;
5043
5044 return skb_linearize(skb);
5045}
5046
5047static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
5048 struct xdp_buff *xdp,
5049 struct bpf_prog *xdp_prog)
5050{
5051 struct sk_buff *skb = *pskb;
5052 u32 mac_len, act = XDP_DROP;
5053
5054 /* Reinjected packets coming from act_mirred or similar should
5055 * not get XDP generic processing.
5056 */
5057 if (skb_is_redirected(skb))
5058 return XDP_PASS;
5059
5060 /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
5061 * bytes. This is the guarantee that also native XDP provides,
5062 * thus we need to do it here as well.
5063 */
5064 mac_len = skb->data - skb_mac_header(skb);
5065 __skb_push(skb, mac_len);
5066
5067 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
5068 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
5069 if (netif_skb_check_for_xdp(pskb, xdp_prog))
5070 goto do_drop;
5071 }
5072
5073 __skb_pull(*pskb, mac_len);
5074
5075 act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
5076 switch (act) {
5077 case XDP_REDIRECT:
5078 case XDP_TX:
5079 case XDP_PASS:
5080 break;
5081 default:
5082 bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
5083 fallthrough;
5084 case XDP_ABORTED:
5085 trace_xdp_exception((*pskb)->dev, xdp_prog, act);
5086 fallthrough;
5087 case XDP_DROP:
5088 do_drop:
5089 kfree_skb(*pskb);
5090 break;
5091 }
5092
5093 return act;
5094}
5095
5096/* When doing generic XDP we have to bypass the qdisc layer and the
5097 * network taps in order to match in-driver-XDP behavior. This also means
5098 * that XDP packets are able to starve other packets going through a qdisc,
5099 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
5100 * queues, so they do not have this starvation issue.
5101 */
5102void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
5103{
5104 struct net_device *dev = skb->dev;
5105 struct netdev_queue *txq;
5106 bool free_skb = true;
5107 int cpu, rc;
5108
5109 txq = netdev_core_pick_tx(dev, skb, NULL);
5110 cpu = smp_processor_id();
5111 HARD_TX_LOCK(dev, txq, cpu);
5112 if (!netif_xmit_frozen_or_drv_stopped(txq)) {
5113 rc = netdev_start_xmit(skb, dev, txq, 0);
5114 if (dev_xmit_complete(rc))
5115 free_skb = false;
5116 }
5117 HARD_TX_UNLOCK(dev, txq);
5118 if (free_skb) {
5119 trace_xdp_exception(dev, xdp_prog, XDP_TX);
5120 dev_core_stats_tx_dropped_inc(dev);
5121 kfree_skb(skb);
5122 }
5123}
5124
5125static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
5126
5127int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
5128{
5129 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
5130
5131 if (xdp_prog) {
5132 struct xdp_buff xdp;
5133 u32 act;
5134 int err;
5135
5136 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
5137 act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
5138 if (act != XDP_PASS) {
5139 switch (act) {
5140 case XDP_REDIRECT:
5141 err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
5142 &xdp, xdp_prog);
5143 if (err)
5144 goto out_redir;
5145 break;
5146 case XDP_TX:
5147 generic_xdp_tx(*pskb, xdp_prog);
5148 break;
5149 }
5150 bpf_net_ctx_clear(bpf_net_ctx);
5151 return XDP_DROP;
5152 }
5153 bpf_net_ctx_clear(bpf_net_ctx);
5154 }
5155 return XDP_PASS;
5156out_redir:
5157 bpf_net_ctx_clear(bpf_net_ctx);
5158 kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
5159 return XDP_DROP;
5160}
5161EXPORT_SYMBOL_GPL(do_xdp_generic);
5162
5163static int netif_rx_internal(struct sk_buff *skb)
5164{
5165 int ret;
5166
5167 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5168
5169 trace_netif_rx(skb);
5170
5171#ifdef CONFIG_RPS
5172 if (static_branch_unlikely(&rps_needed)) {
5173 struct rps_dev_flow voidflow, *rflow = &voidflow;
5174 int cpu;
5175
5176 rcu_read_lock();
5177
5178 cpu = get_rps_cpu(skb->dev, skb, &rflow);
5179 if (cpu < 0)
5180 cpu = smp_processor_id();
5181
5182 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5183
5184 rcu_read_unlock();
5185 } else
5186#endif
5187 {
5188 unsigned int qtail;
5189
5190 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
5191 }
5192 return ret;
5193}
5194
5195/**
5196 * __netif_rx - Slightly optimized version of netif_rx
5197 * @skb: buffer to post
5198 *
5199 * This behaves as netif_rx except that it does not disable bottom halves.
5200 * As a result this function may only be invoked from the interrupt context
5201 * (either hard or soft interrupt).
5202 */
5203int __netif_rx(struct sk_buff *skb)
5204{
5205 int ret;
5206
5207 lockdep_assert_once(hardirq_count() | softirq_count());
5208
5209 trace_netif_rx_entry(skb);
5210 ret = netif_rx_internal(skb);
5211 trace_netif_rx_exit(ret);
5212 return ret;
5213}
5214EXPORT_SYMBOL(__netif_rx);
5215
5216/**
5217 * netif_rx - post buffer to the network code
5218 * @skb: buffer to post
5219 *
5220 * This function receives a packet from a device driver and queues it for
5221 * the upper (protocol) levels to process via the backlog NAPI device. It
5222 * always succeeds. The buffer may be dropped during processing for
5223 * congestion control or by the protocol layers.
5224 * The network buffer is passed via the backlog NAPI device. Modern NIC
5225 * driver should use NAPI and GRO.
5226 * This function can used from interrupt and from process context. The
5227 * caller from process context must not disable interrupts before invoking
5228 * this function.
5229 *
5230 * return values:
5231 * NET_RX_SUCCESS (no congestion)
5232 * NET_RX_DROP (packet was dropped)
5233 *
5234 */
5235int netif_rx(struct sk_buff *skb)
5236{
5237 bool need_bh_off = !(hardirq_count() | softirq_count());
5238 int ret;
5239
5240 if (need_bh_off)
5241 local_bh_disable();
5242 trace_netif_rx_entry(skb);
5243 ret = netif_rx_internal(skb);
5244 trace_netif_rx_exit(ret);
5245 if (need_bh_off)
5246 local_bh_enable();
5247 return ret;
5248}
5249EXPORT_SYMBOL(netif_rx);
5250
5251static __latent_entropy void net_tx_action(struct softirq_action *h)
5252{
5253 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5254
5255 if (sd->completion_queue) {
5256 struct sk_buff *clist;
5257
5258 local_irq_disable();
5259 clist = sd->completion_queue;
5260 sd->completion_queue = NULL;
5261 local_irq_enable();
5262
5263 while (clist) {
5264 struct sk_buff *skb = clist;
5265
5266 clist = clist->next;
5267
5268 WARN_ON(refcount_read(&skb->users));
5269 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5270 trace_consume_skb(skb, net_tx_action);
5271 else
5272 trace_kfree_skb(skb, net_tx_action,
5273 get_kfree_skb_cb(skb)->reason, NULL);
5274
5275 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5276 __kfree_skb(skb);
5277 else
5278 __napi_kfree_skb(skb,
5279 get_kfree_skb_cb(skb)->reason);
5280 }
5281 }
5282
5283 if (sd->output_queue) {
5284 struct Qdisc *head;
5285
5286 local_irq_disable();
5287 head = sd->output_queue;
5288 sd->output_queue = NULL;
5289 sd->output_queue_tailp = &sd->output_queue;
5290 local_irq_enable();
5291
5292 rcu_read_lock();
5293
5294 while (head) {
5295 struct Qdisc *q = head;
5296 spinlock_t *root_lock = NULL;
5297
5298 head = head->next_sched;
5299
5300 /* We need to make sure head->next_sched is read
5301 * before clearing __QDISC_STATE_SCHED
5302 */
5303 smp_mb__before_atomic();
5304
5305 if (!(q->flags & TCQ_F_NOLOCK)) {
5306 root_lock = qdisc_lock(q);
5307 spin_lock(root_lock);
5308 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5309 &q->state))) {
5310 /* There is a synchronize_net() between
5311 * STATE_DEACTIVATED flag being set and
5312 * qdisc_reset()/some_qdisc_is_busy() in
5313 * dev_deactivate(), so we can safely bail out
5314 * early here to avoid data race between
5315 * qdisc_deactivate() and some_qdisc_is_busy()
5316 * for lockless qdisc.
5317 */
5318 clear_bit(__QDISC_STATE_SCHED, &q->state);
5319 continue;
5320 }
5321
5322 clear_bit(__QDISC_STATE_SCHED, &q->state);
5323 qdisc_run(q);
5324 if (root_lock)
5325 spin_unlock(root_lock);
5326 }
5327
5328 rcu_read_unlock();
5329 }
5330
5331 xfrm_dev_backlog(sd);
5332}
5333
5334#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5335/* This hook is defined here for ATM LANE */
5336int (*br_fdb_test_addr_hook)(struct net_device *dev,
5337 unsigned char *addr) __read_mostly;
5338EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5339#endif
5340
5341/**
5342 * netdev_is_rx_handler_busy - check if receive handler is registered
5343 * @dev: device to check
5344 *
5345 * Check if a receive handler is already registered for a given device.
5346 * Return true if there one.
5347 *
5348 * The caller must hold the rtnl_mutex.
5349 */
5350bool netdev_is_rx_handler_busy(struct net_device *dev)
5351{
5352 ASSERT_RTNL();
5353 return dev && rtnl_dereference(dev->rx_handler);
5354}
5355EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5356
5357/**
5358 * netdev_rx_handler_register - register receive handler
5359 * @dev: device to register a handler for
5360 * @rx_handler: receive handler to register
5361 * @rx_handler_data: data pointer that is used by rx handler
5362 *
5363 * Register a receive handler for a device. This handler will then be
5364 * called from __netif_receive_skb. A negative errno code is returned
5365 * on a failure.
5366 *
5367 * The caller must hold the rtnl_mutex.
5368 *
5369 * For a general description of rx_handler, see enum rx_handler_result.
5370 */
5371int netdev_rx_handler_register(struct net_device *dev,
5372 rx_handler_func_t *rx_handler,
5373 void *rx_handler_data)
5374{
5375 if (netdev_is_rx_handler_busy(dev))
5376 return -EBUSY;
5377
5378 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5379 return -EINVAL;
5380
5381 /* Note: rx_handler_data must be set before rx_handler */
5382 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5383 rcu_assign_pointer(dev->rx_handler, rx_handler);
5384
5385 return 0;
5386}
5387EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5388
5389/**
5390 * netdev_rx_handler_unregister - unregister receive handler
5391 * @dev: device to unregister a handler from
5392 *
5393 * Unregister a receive handler from a device.
5394 *
5395 * The caller must hold the rtnl_mutex.
5396 */
5397void netdev_rx_handler_unregister(struct net_device *dev)
5398{
5399
5400 ASSERT_RTNL();
5401 RCU_INIT_POINTER(dev->rx_handler, NULL);
5402 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5403 * section has a guarantee to see a non NULL rx_handler_data
5404 * as well.
5405 */
5406 synchronize_net();
5407 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5408}
5409EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5410
5411/*
5412 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5413 * the special handling of PFMEMALLOC skbs.
5414 */
5415static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5416{
5417 switch (skb->protocol) {
5418 case htons(ETH_P_ARP):
5419 case htons(ETH_P_IP):
5420 case htons(ETH_P_IPV6):
5421 case htons(ETH_P_8021Q):
5422 case htons(ETH_P_8021AD):
5423 return true;
5424 default:
5425 return false;
5426 }
5427}
5428
5429static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5430 int *ret, struct net_device *orig_dev)
5431{
5432 if (nf_hook_ingress_active(skb)) {
5433 int ingress_retval;
5434
5435 if (*pt_prev) {
5436 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5437 *pt_prev = NULL;
5438 }
5439
5440 rcu_read_lock();
5441 ingress_retval = nf_hook_ingress(skb);
5442 rcu_read_unlock();
5443 return ingress_retval;
5444 }
5445 return 0;
5446}
5447
5448static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5449 struct packet_type **ppt_prev)
5450{
5451 struct packet_type *ptype, *pt_prev;
5452 rx_handler_func_t *rx_handler;
5453 struct sk_buff *skb = *pskb;
5454 struct net_device *orig_dev;
5455 bool deliver_exact = false;
5456 int ret = NET_RX_DROP;
5457 __be16 type;
5458
5459 net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5460
5461 trace_netif_receive_skb(skb);
5462
5463 orig_dev = skb->dev;
5464
5465 skb_reset_network_header(skb);
5466 if (!skb_transport_header_was_set(skb))
5467 skb_reset_transport_header(skb);
5468 skb_reset_mac_len(skb);
5469
5470 pt_prev = NULL;
5471
5472another_round:
5473 skb->skb_iif = skb->dev->ifindex;
5474
5475 __this_cpu_inc(softnet_data.processed);
5476
5477 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5478 int ret2;
5479
5480 migrate_disable();
5481 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
5482 &skb);
5483 migrate_enable();
5484
5485 if (ret2 != XDP_PASS) {
5486 ret = NET_RX_DROP;
5487 goto out;
5488 }
5489 }
5490
5491 if (eth_type_vlan(skb->protocol)) {
5492 skb = skb_vlan_untag(skb);
5493 if (unlikely(!skb))
5494 goto out;
5495 }
5496
5497 if (skb_skip_tc_classify(skb))
5498 goto skip_classify;
5499
5500 if (pfmemalloc)
5501 goto skip_taps;
5502
5503 list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
5504 if (pt_prev)
5505 ret = deliver_skb(skb, pt_prev, orig_dev);
5506 pt_prev = ptype;
5507 }
5508
5509 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5510 if (pt_prev)
5511 ret = deliver_skb(skb, pt_prev, orig_dev);
5512 pt_prev = ptype;
5513 }
5514
5515skip_taps:
5516#ifdef CONFIG_NET_INGRESS
5517 if (static_branch_unlikely(&ingress_needed_key)) {
5518 bool another = false;
5519
5520 nf_skip_egress(skb, true);
5521 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5522 &another);
5523 if (another)
5524 goto another_round;
5525 if (!skb)
5526 goto out;
5527
5528 nf_skip_egress(skb, false);
5529 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5530 goto out;
5531 }
5532#endif
5533 skb_reset_redirect(skb);
5534skip_classify:
5535 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5536 goto drop;
5537
5538 if (skb_vlan_tag_present(skb)) {
5539 if (pt_prev) {
5540 ret = deliver_skb(skb, pt_prev, orig_dev);
5541 pt_prev = NULL;
5542 }
5543 if (vlan_do_receive(&skb))
5544 goto another_round;
5545 else if (unlikely(!skb))
5546 goto out;
5547 }
5548
5549 rx_handler = rcu_dereference(skb->dev->rx_handler);
5550 if (rx_handler) {
5551 if (pt_prev) {
5552 ret = deliver_skb(skb, pt_prev, orig_dev);
5553 pt_prev = NULL;
5554 }
5555 switch (rx_handler(&skb)) {
5556 case RX_HANDLER_CONSUMED:
5557 ret = NET_RX_SUCCESS;
5558 goto out;
5559 case RX_HANDLER_ANOTHER:
5560 goto another_round;
5561 case RX_HANDLER_EXACT:
5562 deliver_exact = true;
5563 break;
5564 case RX_HANDLER_PASS:
5565 break;
5566 default:
5567 BUG();
5568 }
5569 }
5570
5571 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5572check_vlan_id:
5573 if (skb_vlan_tag_get_id(skb)) {
5574 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5575 * find vlan device.
5576 */
5577 skb->pkt_type = PACKET_OTHERHOST;
5578 } else if (eth_type_vlan(skb->protocol)) {
5579 /* Outer header is 802.1P with vlan 0, inner header is
5580 * 802.1Q or 802.1AD and vlan_do_receive() above could
5581 * not find vlan dev for vlan id 0.
5582 */
5583 __vlan_hwaccel_clear_tag(skb);
5584 skb = skb_vlan_untag(skb);
5585 if (unlikely(!skb))
5586 goto out;
5587 if (vlan_do_receive(&skb))
5588 /* After stripping off 802.1P header with vlan 0
5589 * vlan dev is found for inner header.
5590 */
5591 goto another_round;
5592 else if (unlikely(!skb))
5593 goto out;
5594 else
5595 /* We have stripped outer 802.1P vlan 0 header.
5596 * But could not find vlan dev.
5597 * check again for vlan id to set OTHERHOST.
5598 */
5599 goto check_vlan_id;
5600 }
5601 /* Note: we might in the future use prio bits
5602 * and set skb->priority like in vlan_do_receive()
5603 * For the time being, just ignore Priority Code Point
5604 */
5605 __vlan_hwaccel_clear_tag(skb);
5606 }
5607
5608 type = skb->protocol;
5609
5610 /* deliver only exact match when indicated */
5611 if (likely(!deliver_exact)) {
5612 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5613 &ptype_base[ntohs(type) &
5614 PTYPE_HASH_MASK]);
5615 }
5616
5617 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5618 &orig_dev->ptype_specific);
5619
5620 if (unlikely(skb->dev != orig_dev)) {
5621 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5622 &skb->dev->ptype_specific);
5623 }
5624
5625 if (pt_prev) {
5626 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5627 goto drop;
5628 *ppt_prev = pt_prev;
5629 } else {
5630drop:
5631 if (!deliver_exact)
5632 dev_core_stats_rx_dropped_inc(skb->dev);
5633 else
5634 dev_core_stats_rx_nohandler_inc(skb->dev);
5635 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5636 /* Jamal, now you will not able to escape explaining
5637 * me how you were going to use this. :-)
5638 */
5639 ret = NET_RX_DROP;
5640 }
5641
5642out:
5643 /* The invariant here is that if *ppt_prev is not NULL
5644 * then skb should also be non-NULL.
5645 *
5646 * Apparently *ppt_prev assignment above holds this invariant due to
5647 * skb dereferencing near it.
5648 */
5649 *pskb = skb;
5650 return ret;
5651}
5652
5653static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5654{
5655 struct net_device *orig_dev = skb->dev;
5656 struct packet_type *pt_prev = NULL;
5657 int ret;
5658
5659 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5660 if (pt_prev)
5661 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5662 skb->dev, pt_prev, orig_dev);
5663 return ret;
5664}
5665
5666/**
5667 * netif_receive_skb_core - special purpose version of netif_receive_skb
5668 * @skb: buffer to process
5669 *
5670 * More direct receive version of netif_receive_skb(). It should
5671 * only be used by callers that have a need to skip RPS and Generic XDP.
5672 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5673 *
5674 * This function may only be called from softirq context and interrupts
5675 * should be enabled.
5676 *
5677 * Return values (usually ignored):
5678 * NET_RX_SUCCESS: no congestion
5679 * NET_RX_DROP: packet was dropped
5680 */
5681int netif_receive_skb_core(struct sk_buff *skb)
5682{
5683 int ret;
5684
5685 rcu_read_lock();
5686 ret = __netif_receive_skb_one_core(skb, false);
5687 rcu_read_unlock();
5688
5689 return ret;
5690}
5691EXPORT_SYMBOL(netif_receive_skb_core);
5692
5693static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5694 struct packet_type *pt_prev,
5695 struct net_device *orig_dev)
5696{
5697 struct sk_buff *skb, *next;
5698
5699 if (!pt_prev)
5700 return;
5701 if (list_empty(head))
5702 return;
5703 if (pt_prev->list_func != NULL)
5704 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5705 ip_list_rcv, head, pt_prev, orig_dev);
5706 else
5707 list_for_each_entry_safe(skb, next, head, list) {
5708 skb_list_del_init(skb);
5709 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5710 }
5711}
5712
5713static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5714{
5715 /* Fast-path assumptions:
5716 * - There is no RX handler.
5717 * - Only one packet_type matches.
5718 * If either of these fails, we will end up doing some per-packet
5719 * processing in-line, then handling the 'last ptype' for the whole
5720 * sublist. This can't cause out-of-order delivery to any single ptype,
5721 * because the 'last ptype' must be constant across the sublist, and all
5722 * other ptypes are handled per-packet.
5723 */
5724 /* Current (common) ptype of sublist */
5725 struct packet_type *pt_curr = NULL;
5726 /* Current (common) orig_dev of sublist */
5727 struct net_device *od_curr = NULL;
5728 struct list_head sublist;
5729 struct sk_buff *skb, *next;
5730
5731 INIT_LIST_HEAD(&sublist);
5732 list_for_each_entry_safe(skb, next, head, list) {
5733 struct net_device *orig_dev = skb->dev;
5734 struct packet_type *pt_prev = NULL;
5735
5736 skb_list_del_init(skb);
5737 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5738 if (!pt_prev)
5739 continue;
5740 if (pt_curr != pt_prev || od_curr != orig_dev) {
5741 /* dispatch old sublist */
5742 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5743 /* start new sublist */
5744 INIT_LIST_HEAD(&sublist);
5745 pt_curr = pt_prev;
5746 od_curr = orig_dev;
5747 }
5748 list_add_tail(&skb->list, &sublist);
5749 }
5750
5751 /* dispatch final sublist */
5752 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5753}
5754
5755static int __netif_receive_skb(struct sk_buff *skb)
5756{
5757 int ret;
5758
5759 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5760 unsigned int noreclaim_flag;
5761
5762 /*
5763 * PFMEMALLOC skbs are special, they should
5764 * - be delivered to SOCK_MEMALLOC sockets only
5765 * - stay away from userspace
5766 * - have bounded memory usage
5767 *
5768 * Use PF_MEMALLOC as this saves us from propagating the allocation
5769 * context down to all allocation sites.
5770 */
5771 noreclaim_flag = memalloc_noreclaim_save();
5772 ret = __netif_receive_skb_one_core(skb, true);
5773 memalloc_noreclaim_restore(noreclaim_flag);
5774 } else
5775 ret = __netif_receive_skb_one_core(skb, false);
5776
5777 return ret;
5778}
5779
5780static void __netif_receive_skb_list(struct list_head *head)
5781{
5782 unsigned long noreclaim_flag = 0;
5783 struct sk_buff *skb, *next;
5784 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5785
5786 list_for_each_entry_safe(skb, next, head, list) {
5787 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5788 struct list_head sublist;
5789
5790 /* Handle the previous sublist */
5791 list_cut_before(&sublist, head, &skb->list);
5792 if (!list_empty(&sublist))
5793 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5794 pfmemalloc = !pfmemalloc;
5795 /* See comments in __netif_receive_skb */
5796 if (pfmemalloc)
5797 noreclaim_flag = memalloc_noreclaim_save();
5798 else
5799 memalloc_noreclaim_restore(noreclaim_flag);
5800 }
5801 }
5802 /* Handle the remaining sublist */
5803 if (!list_empty(head))
5804 __netif_receive_skb_list_core(head, pfmemalloc);
5805 /* Restore pflags */
5806 if (pfmemalloc)
5807 memalloc_noreclaim_restore(noreclaim_flag);
5808}
5809
5810static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5811{
5812 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5813 struct bpf_prog *new = xdp->prog;
5814 int ret = 0;
5815
5816 switch (xdp->command) {
5817 case XDP_SETUP_PROG:
5818 rcu_assign_pointer(dev->xdp_prog, new);
5819 if (old)
5820 bpf_prog_put(old);
5821
5822 if (old && !new) {
5823 static_branch_dec(&generic_xdp_needed_key);
5824 } else if (new && !old) {
5825 static_branch_inc(&generic_xdp_needed_key);
5826 dev_disable_lro(dev);
5827 dev_disable_gro_hw(dev);
5828 }
5829 break;
5830
5831 default:
5832 ret = -EINVAL;
5833 break;
5834 }
5835
5836 return ret;
5837}
5838
5839static int netif_receive_skb_internal(struct sk_buff *skb)
5840{
5841 int ret;
5842
5843 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5844
5845 if (skb_defer_rx_timestamp(skb))
5846 return NET_RX_SUCCESS;
5847
5848 rcu_read_lock();
5849#ifdef CONFIG_RPS
5850 if (static_branch_unlikely(&rps_needed)) {
5851 struct rps_dev_flow voidflow, *rflow = &voidflow;
5852 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5853
5854 if (cpu >= 0) {
5855 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5856 rcu_read_unlock();
5857 return ret;
5858 }
5859 }
5860#endif
5861 ret = __netif_receive_skb(skb);
5862 rcu_read_unlock();
5863 return ret;
5864}
5865
5866void netif_receive_skb_list_internal(struct list_head *head)
5867{
5868 struct sk_buff *skb, *next;
5869 struct list_head sublist;
5870
5871 INIT_LIST_HEAD(&sublist);
5872 list_for_each_entry_safe(skb, next, head, list) {
5873 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
5874 skb);
5875 skb_list_del_init(skb);
5876 if (!skb_defer_rx_timestamp(skb))
5877 list_add_tail(&skb->list, &sublist);
5878 }
5879 list_splice_init(&sublist, head);
5880
5881 rcu_read_lock();
5882#ifdef CONFIG_RPS
5883 if (static_branch_unlikely(&rps_needed)) {
5884 list_for_each_entry_safe(skb, next, head, list) {
5885 struct rps_dev_flow voidflow, *rflow = &voidflow;
5886 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5887
5888 if (cpu >= 0) {
5889 /* Will be handled, remove from list */
5890 skb_list_del_init(skb);
5891 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5892 }
5893 }
5894 }
5895#endif
5896 __netif_receive_skb_list(head);
5897 rcu_read_unlock();
5898}
5899
5900/**
5901 * netif_receive_skb - process receive buffer from network
5902 * @skb: buffer to process
5903 *
5904 * netif_receive_skb() is the main receive data processing function.
5905 * It always succeeds. The buffer may be dropped during processing
5906 * for congestion control or by the protocol layers.
5907 *
5908 * This function may only be called from softirq context and interrupts
5909 * should be enabled.
5910 *
5911 * Return values (usually ignored):
5912 * NET_RX_SUCCESS: no congestion
5913 * NET_RX_DROP: packet was dropped
5914 */
5915int netif_receive_skb(struct sk_buff *skb)
5916{
5917 int ret;
5918
5919 trace_netif_receive_skb_entry(skb);
5920
5921 ret = netif_receive_skb_internal(skb);
5922 trace_netif_receive_skb_exit(ret);
5923
5924 return ret;
5925}
5926EXPORT_SYMBOL(netif_receive_skb);
5927
5928/**
5929 * netif_receive_skb_list - process many receive buffers from network
5930 * @head: list of skbs to process.
5931 *
5932 * Since return value of netif_receive_skb() is normally ignored, and
5933 * wouldn't be meaningful for a list, this function returns void.
5934 *
5935 * This function may only be called from softirq context and interrupts
5936 * should be enabled.
5937 */
5938void netif_receive_skb_list(struct list_head *head)
5939{
5940 struct sk_buff *skb;
5941
5942 if (list_empty(head))
5943 return;
5944 if (trace_netif_receive_skb_list_entry_enabled()) {
5945 list_for_each_entry(skb, head, list)
5946 trace_netif_receive_skb_list_entry(skb);
5947 }
5948 netif_receive_skb_list_internal(head);
5949 trace_netif_receive_skb_list_exit(0);
5950}
5951EXPORT_SYMBOL(netif_receive_skb_list);
5952
5953static DEFINE_PER_CPU(struct work_struct, flush_works);
5954
5955/* Network device is going away, flush any packets still pending */
5956static void flush_backlog(struct work_struct *work)
5957{
5958 struct sk_buff *skb, *tmp;
5959 struct softnet_data *sd;
5960
5961 local_bh_disable();
5962 sd = this_cpu_ptr(&softnet_data);
5963
5964 backlog_lock_irq_disable(sd);
5965 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5966 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5967 __skb_unlink(skb, &sd->input_pkt_queue);
5968 dev_kfree_skb_irq(skb);
5969 rps_input_queue_head_incr(sd);
5970 }
5971 }
5972 backlog_unlock_irq_enable(sd);
5973
5974 local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
5975 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5976 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5977 __skb_unlink(skb, &sd->process_queue);
5978 kfree_skb(skb);
5979 rps_input_queue_head_incr(sd);
5980 }
5981 }
5982 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
5983 local_bh_enable();
5984}
5985
5986static bool flush_required(int cpu)
5987{
5988#if IS_ENABLED(CONFIG_RPS)
5989 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5990 bool do_flush;
5991
5992 backlog_lock_irq_disable(sd);
5993
5994 /* as insertion into process_queue happens with the rps lock held,
5995 * process_queue access may race only with dequeue
5996 */
5997 do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5998 !skb_queue_empty_lockless(&sd->process_queue);
5999 backlog_unlock_irq_enable(sd);
6000
6001 return do_flush;
6002#endif
6003 /* without RPS we can't safely check input_pkt_queue: during a
6004 * concurrent remote skb_queue_splice() we can detect as empty both
6005 * input_pkt_queue and process_queue even if the latter could end-up
6006 * containing a lot of packets.
6007 */
6008 return true;
6009}
6010
6011static void flush_all_backlogs(void)
6012{
6013 static cpumask_t flush_cpus;
6014 unsigned int cpu;
6015
6016 /* since we are under rtnl lock protection we can use static data
6017 * for the cpumask and avoid allocating on stack the possibly
6018 * large mask
6019 */
6020 ASSERT_RTNL();
6021
6022 cpus_read_lock();
6023
6024 cpumask_clear(&flush_cpus);
6025 for_each_online_cpu(cpu) {
6026 if (flush_required(cpu)) {
6027 queue_work_on(cpu, system_highpri_wq,
6028 per_cpu_ptr(&flush_works, cpu));
6029 cpumask_set_cpu(cpu, &flush_cpus);
6030 }
6031 }
6032
6033 /* we can have in flight packet[s] on the cpus we are not flushing,
6034 * synchronize_net() in unregister_netdevice_many() will take care of
6035 * them
6036 */
6037 for_each_cpu(cpu, &flush_cpus)
6038 flush_work(per_cpu_ptr(&flush_works, cpu));
6039
6040 cpus_read_unlock();
6041}
6042
6043static void net_rps_send_ipi(struct softnet_data *remsd)
6044{
6045#ifdef CONFIG_RPS
6046 while (remsd) {
6047 struct softnet_data *next = remsd->rps_ipi_next;
6048
6049 if (cpu_online(remsd->cpu))
6050 smp_call_function_single_async(remsd->cpu, &remsd->csd);
6051 remsd = next;
6052 }
6053#endif
6054}
6055
6056/*
6057 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6058 * Note: called with local irq disabled, but exits with local irq enabled.
6059 */
6060static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6061{
6062#ifdef CONFIG_RPS
6063 struct softnet_data *remsd = sd->rps_ipi_list;
6064
6065 if (!use_backlog_threads() && remsd) {
6066 sd->rps_ipi_list = NULL;
6067
6068 local_irq_enable();
6069
6070 /* Send pending IPI's to kick RPS processing on remote cpus. */
6071 net_rps_send_ipi(remsd);
6072 } else
6073#endif
6074 local_irq_enable();
6075}
6076
6077static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6078{
6079#ifdef CONFIG_RPS
6080 return !use_backlog_threads() && sd->rps_ipi_list;
6081#else
6082 return false;
6083#endif
6084}
6085
6086static int process_backlog(struct napi_struct *napi, int quota)
6087{
6088 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6089 bool again = true;
6090 int work = 0;
6091
6092 /* Check if we have pending ipi, its better to send them now,
6093 * not waiting net_rx_action() end.
6094 */
6095 if (sd_has_rps_ipi_waiting(sd)) {
6096 local_irq_disable();
6097 net_rps_action_and_irq_enable(sd);
6098 }
6099
6100 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
6101 while (again) {
6102 struct sk_buff *skb;
6103
6104 local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
6105 while ((skb = __skb_dequeue(&sd->process_queue))) {
6106 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
6107 rcu_read_lock();
6108 __netif_receive_skb(skb);
6109 rcu_read_unlock();
6110 if (++work >= quota) {
6111 rps_input_queue_head_add(sd, work);
6112 return work;
6113 }
6114
6115 local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
6116 }
6117 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
6118
6119 backlog_lock_irq_disable(sd);
6120 if (skb_queue_empty(&sd->input_pkt_queue)) {
6121 /*
6122 * Inline a custom version of __napi_complete().
6123 * only current cpu owns and manipulates this napi,
6124 * and NAPI_STATE_SCHED is the only possible flag set
6125 * on backlog.
6126 * We can use a plain write instead of clear_bit(),
6127 * and we dont need an smp_mb() memory barrier.
6128 */
6129 napi->state &= NAPIF_STATE_THREADED;
6130 again = false;
6131 } else {
6132 local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
6133 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6134 &sd->process_queue);
6135 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
6136 }
6137 backlog_unlock_irq_enable(sd);
6138 }
6139
6140 if (work)
6141 rps_input_queue_head_add(sd, work);
6142 return work;
6143}
6144
6145/**
6146 * __napi_schedule - schedule for receive
6147 * @n: entry to schedule
6148 *
6149 * The entry's receive function will be scheduled to run.
6150 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6151 */
6152void __napi_schedule(struct napi_struct *n)
6153{
6154 unsigned long flags;
6155
6156 local_irq_save(flags);
6157 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6158 local_irq_restore(flags);
6159}
6160EXPORT_SYMBOL(__napi_schedule);
6161
6162/**
6163 * napi_schedule_prep - check if napi can be scheduled
6164 * @n: napi context
6165 *
6166 * Test if NAPI routine is already running, and if not mark
6167 * it as running. This is used as a condition variable to
6168 * insure only one NAPI poll instance runs. We also make
6169 * sure there is no pending NAPI disable.
6170 */
6171bool napi_schedule_prep(struct napi_struct *n)
6172{
6173 unsigned long new, val = READ_ONCE(n->state);
6174
6175 do {
6176 if (unlikely(val & NAPIF_STATE_DISABLE))
6177 return false;
6178 new = val | NAPIF_STATE_SCHED;
6179
6180 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6181 * This was suggested by Alexander Duyck, as compiler
6182 * emits better code than :
6183 * if (val & NAPIF_STATE_SCHED)
6184 * new |= NAPIF_STATE_MISSED;
6185 */
6186 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6187 NAPIF_STATE_MISSED;
6188 } while (!try_cmpxchg(&n->state, &val, new));
6189
6190 return !(val & NAPIF_STATE_SCHED);
6191}
6192EXPORT_SYMBOL(napi_schedule_prep);
6193
6194/**
6195 * __napi_schedule_irqoff - schedule for receive
6196 * @n: entry to schedule
6197 *
6198 * Variant of __napi_schedule() assuming hard irqs are masked.
6199 *
6200 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6201 * because the interrupt disabled assumption might not be true
6202 * due to force-threaded interrupts and spinlock substitution.
6203 */
6204void __napi_schedule_irqoff(struct napi_struct *n)
6205{
6206 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6207 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6208 else
6209 __napi_schedule(n);
6210}
6211EXPORT_SYMBOL(__napi_schedule_irqoff);
6212
6213bool napi_complete_done(struct napi_struct *n, int work_done)
6214{
6215 unsigned long flags, val, new, timeout = 0;
6216 bool ret = true;
6217
6218 /*
6219 * 1) Don't let napi dequeue from the cpu poll list
6220 * just in case its running on a different cpu.
6221 * 2) If we are busy polling, do nothing here, we have
6222 * the guarantee we will be called later.
6223 */
6224 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6225 NAPIF_STATE_IN_BUSY_POLL)))
6226 return false;
6227
6228 if (work_done) {
6229 if (n->gro_bitmask)
6230 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6231 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6232 }
6233 if (n->defer_hard_irqs_count > 0) {
6234 n->defer_hard_irqs_count--;
6235 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6236 if (timeout)
6237 ret = false;
6238 }
6239 if (n->gro_bitmask) {
6240 /* When the NAPI instance uses a timeout and keeps postponing
6241 * it, we need to bound somehow the time packets are kept in
6242 * the GRO layer
6243 */
6244 napi_gro_flush(n, !!timeout);
6245 }
6246
6247 gro_normal_list(n);
6248
6249 if (unlikely(!list_empty(&n->poll_list))) {
6250 /* If n->poll_list is not empty, we need to mask irqs */
6251 local_irq_save(flags);
6252 list_del_init(&n->poll_list);
6253 local_irq_restore(flags);
6254 }
6255 WRITE_ONCE(n->list_owner, -1);
6256
6257 val = READ_ONCE(n->state);
6258 do {
6259 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6260
6261 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6262 NAPIF_STATE_SCHED_THREADED |
6263 NAPIF_STATE_PREFER_BUSY_POLL);
6264
6265 /* If STATE_MISSED was set, leave STATE_SCHED set,
6266 * because we will call napi->poll() one more time.
6267 * This C code was suggested by Alexander Duyck to help gcc.
6268 */
6269 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6270 NAPIF_STATE_SCHED;
6271 } while (!try_cmpxchg(&n->state, &val, new));
6272
6273 if (unlikely(val & NAPIF_STATE_MISSED)) {
6274 __napi_schedule(n);
6275 return false;
6276 }
6277
6278 if (timeout)
6279 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6280 HRTIMER_MODE_REL_PINNED);
6281 return ret;
6282}
6283EXPORT_SYMBOL(napi_complete_done);
6284
6285/* must be called under rcu_read_lock(), as we dont take a reference */
6286struct napi_struct *napi_by_id(unsigned int napi_id)
6287{
6288 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6289 struct napi_struct *napi;
6290
6291 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6292 if (napi->napi_id == napi_id)
6293 return napi;
6294
6295 return NULL;
6296}
6297
6298static void skb_defer_free_flush(struct softnet_data *sd)
6299{
6300 struct sk_buff *skb, *next;
6301
6302 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6303 if (!READ_ONCE(sd->defer_list))
6304 return;
6305
6306 spin_lock(&sd->defer_lock);
6307 skb = sd->defer_list;
6308 sd->defer_list = NULL;
6309 sd->defer_count = 0;
6310 spin_unlock(&sd->defer_lock);
6311
6312 while (skb != NULL) {
6313 next = skb->next;
6314 napi_consume_skb(skb, 1);
6315 skb = next;
6316 }
6317}
6318
6319#if defined(CONFIG_NET_RX_BUSY_POLL)
6320
6321static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6322{
6323 if (!skip_schedule) {
6324 gro_normal_list(napi);
6325 __napi_schedule(napi);
6326 return;
6327 }
6328
6329 if (napi->gro_bitmask) {
6330 /* flush too old packets
6331 * If HZ < 1000, flush all packets.
6332 */
6333 napi_gro_flush(napi, HZ >= 1000);
6334 }
6335
6336 gro_normal_list(napi);
6337 clear_bit(NAPI_STATE_SCHED, &napi->state);
6338}
6339
6340enum {
6341 NAPI_F_PREFER_BUSY_POLL = 1,
6342 NAPI_F_END_ON_RESCHED = 2,
6343};
6344
6345static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
6346 unsigned flags, u16 budget)
6347{
6348 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
6349 bool skip_schedule = false;
6350 unsigned long timeout;
6351 int rc;
6352
6353 /* Busy polling means there is a high chance device driver hard irq
6354 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6355 * set in napi_schedule_prep().
6356 * Since we are about to call napi->poll() once more, we can safely
6357 * clear NAPI_STATE_MISSED.
6358 *
6359 * Note: x86 could use a single "lock and ..." instruction
6360 * to perform these two clear_bit()
6361 */
6362 clear_bit(NAPI_STATE_MISSED, &napi->state);
6363 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6364
6365 local_bh_disable();
6366 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
6367
6368 if (flags & NAPI_F_PREFER_BUSY_POLL) {
6369 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6370 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6371 if (napi->defer_hard_irqs_count && timeout) {
6372 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6373 skip_schedule = true;
6374 }
6375 }
6376
6377 /* All we really want here is to re-enable device interrupts.
6378 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6379 */
6380 rc = napi->poll(napi, budget);
6381 /* We can't gro_normal_list() here, because napi->poll() might have
6382 * rearmed the napi (napi_complete_done()) in which case it could
6383 * already be running on another CPU.
6384 */
6385 trace_napi_poll(napi, rc, budget);
6386 netpoll_poll_unlock(have_poll_lock);
6387 if (rc == budget)
6388 __busy_poll_stop(napi, skip_schedule);
6389 bpf_net_ctx_clear(bpf_net_ctx);
6390 local_bh_enable();
6391}
6392
6393static void __napi_busy_loop(unsigned int napi_id,
6394 bool (*loop_end)(void *, unsigned long),
6395 void *loop_end_arg, unsigned flags, u16 budget)
6396{
6397 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6398 int (*napi_poll)(struct napi_struct *napi, int budget);
6399 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
6400 void *have_poll_lock = NULL;
6401 struct napi_struct *napi;
6402
6403 WARN_ON_ONCE(!rcu_read_lock_held());
6404
6405restart:
6406 napi_poll = NULL;
6407
6408 napi = napi_by_id(napi_id);
6409 if (!napi)
6410 return;
6411
6412 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6413 preempt_disable();
6414 for (;;) {
6415 int work = 0;
6416
6417 local_bh_disable();
6418 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
6419 if (!napi_poll) {
6420 unsigned long val = READ_ONCE(napi->state);
6421
6422 /* If multiple threads are competing for this napi,
6423 * we avoid dirtying napi->state as much as we can.
6424 */
6425 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6426 NAPIF_STATE_IN_BUSY_POLL)) {
6427 if (flags & NAPI_F_PREFER_BUSY_POLL)
6428 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6429 goto count;
6430 }
6431 if (cmpxchg(&napi->state, val,
6432 val | NAPIF_STATE_IN_BUSY_POLL |
6433 NAPIF_STATE_SCHED) != val) {
6434 if (flags & NAPI_F_PREFER_BUSY_POLL)
6435 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6436 goto count;
6437 }
6438 have_poll_lock = netpoll_poll_lock(napi);
6439 napi_poll = napi->poll;
6440 }
6441 work = napi_poll(napi, budget);
6442 trace_napi_poll(napi, work, budget);
6443 gro_normal_list(napi);
6444count:
6445 if (work > 0)
6446 __NET_ADD_STATS(dev_net(napi->dev),
6447 LINUX_MIB_BUSYPOLLRXPACKETS, work);
6448 skb_defer_free_flush(this_cpu_ptr(&softnet_data));
6449 bpf_net_ctx_clear(bpf_net_ctx);
6450 local_bh_enable();
6451
6452 if (!loop_end || loop_end(loop_end_arg, start_time))
6453 break;
6454
6455 if (unlikely(need_resched())) {
6456 if (flags & NAPI_F_END_ON_RESCHED)
6457 break;
6458 if (napi_poll)
6459 busy_poll_stop(napi, have_poll_lock, flags, budget);
6460 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6461 preempt_enable();
6462 rcu_read_unlock();
6463 cond_resched();
6464 rcu_read_lock();
6465 if (loop_end(loop_end_arg, start_time))
6466 return;
6467 goto restart;
6468 }
6469 cpu_relax();
6470 }
6471 if (napi_poll)
6472 busy_poll_stop(napi, have_poll_lock, flags, budget);
6473 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6474 preempt_enable();
6475}
6476
6477void napi_busy_loop_rcu(unsigned int napi_id,
6478 bool (*loop_end)(void *, unsigned long),
6479 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6480{
6481 unsigned flags = NAPI_F_END_ON_RESCHED;
6482
6483 if (prefer_busy_poll)
6484 flags |= NAPI_F_PREFER_BUSY_POLL;
6485
6486 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6487}
6488
6489void napi_busy_loop(unsigned int napi_id,
6490 bool (*loop_end)(void *, unsigned long),
6491 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6492{
6493 unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0;
6494
6495 rcu_read_lock();
6496 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6497 rcu_read_unlock();
6498}
6499EXPORT_SYMBOL(napi_busy_loop);
6500
6501#endif /* CONFIG_NET_RX_BUSY_POLL */
6502
6503static void napi_hash_add(struct napi_struct *napi)
6504{
6505 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6506 return;
6507
6508 spin_lock(&napi_hash_lock);
6509
6510 /* 0..NR_CPUS range is reserved for sender_cpu use */
6511 do {
6512 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6513 napi_gen_id = MIN_NAPI_ID;
6514 } while (napi_by_id(napi_gen_id));
6515 napi->napi_id = napi_gen_id;
6516
6517 hlist_add_head_rcu(&napi->napi_hash_node,
6518 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6519
6520 spin_unlock(&napi_hash_lock);
6521}
6522
6523/* Warning : caller is responsible to make sure rcu grace period
6524 * is respected before freeing memory containing @napi
6525 */
6526static void napi_hash_del(struct napi_struct *napi)
6527{
6528 spin_lock(&napi_hash_lock);
6529
6530 hlist_del_init_rcu(&napi->napi_hash_node);
6531
6532 spin_unlock(&napi_hash_lock);
6533}
6534
6535static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6536{
6537 struct napi_struct *napi;
6538
6539 napi = container_of(timer, struct napi_struct, timer);
6540
6541 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6542 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6543 */
6544 if (!napi_disable_pending(napi) &&
6545 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6546 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6547 __napi_schedule_irqoff(napi);
6548 }
6549
6550 return HRTIMER_NORESTART;
6551}
6552
6553static void init_gro_hash(struct napi_struct *napi)
6554{
6555 int i;
6556
6557 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6558 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6559 napi->gro_hash[i].count = 0;
6560 }
6561 napi->gro_bitmask = 0;
6562}
6563
6564int dev_set_threaded(struct net_device *dev, bool threaded)
6565{
6566 struct napi_struct *napi;
6567 int err = 0;
6568
6569 if (dev->threaded == threaded)
6570 return 0;
6571
6572 if (threaded) {
6573 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6574 if (!napi->thread) {
6575 err = napi_kthread_create(napi);
6576 if (err) {
6577 threaded = false;
6578 break;
6579 }
6580 }
6581 }
6582 }
6583
6584 WRITE_ONCE(dev->threaded, threaded);
6585
6586 /* Make sure kthread is created before THREADED bit
6587 * is set.
6588 */
6589 smp_mb__before_atomic();
6590
6591 /* Setting/unsetting threaded mode on a napi might not immediately
6592 * take effect, if the current napi instance is actively being
6593 * polled. In this case, the switch between threaded mode and
6594 * softirq mode will happen in the next round of napi_schedule().
6595 * This should not cause hiccups/stalls to the live traffic.
6596 */
6597 list_for_each_entry(napi, &dev->napi_list, dev_list)
6598 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
6599
6600 return err;
6601}
6602EXPORT_SYMBOL(dev_set_threaded);
6603
6604/**
6605 * netif_queue_set_napi - Associate queue with the napi
6606 * @dev: device to which NAPI and queue belong
6607 * @queue_index: Index of queue
6608 * @type: queue type as RX or TX
6609 * @napi: NAPI context, pass NULL to clear previously set NAPI
6610 *
6611 * Set queue with its corresponding napi context. This should be done after
6612 * registering the NAPI handler for the queue-vector and the queues have been
6613 * mapped to the corresponding interrupt vector.
6614 */
6615void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
6616 enum netdev_queue_type type, struct napi_struct *napi)
6617{
6618 struct netdev_rx_queue *rxq;
6619 struct netdev_queue *txq;
6620
6621 if (WARN_ON_ONCE(napi && !napi->dev))
6622 return;
6623 if (dev->reg_state >= NETREG_REGISTERED)
6624 ASSERT_RTNL();
6625
6626 switch (type) {
6627 case NETDEV_QUEUE_TYPE_RX:
6628 rxq = __netif_get_rx_queue(dev, queue_index);
6629 rxq->napi = napi;
6630 return;
6631 case NETDEV_QUEUE_TYPE_TX:
6632 txq = netdev_get_tx_queue(dev, queue_index);
6633 txq->napi = napi;
6634 return;
6635 default:
6636 return;
6637 }
6638}
6639EXPORT_SYMBOL(netif_queue_set_napi);
6640
6641void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6642 int (*poll)(struct napi_struct *, int), int weight)
6643{
6644 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6645 return;
6646
6647 INIT_LIST_HEAD(&napi->poll_list);
6648 INIT_HLIST_NODE(&napi->napi_hash_node);
6649 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6650 napi->timer.function = napi_watchdog;
6651 init_gro_hash(napi);
6652 napi->skb = NULL;
6653 INIT_LIST_HEAD(&napi->rx_list);
6654 napi->rx_count = 0;
6655 napi->poll = poll;
6656 if (weight > NAPI_POLL_WEIGHT)
6657 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6658 weight);
6659 napi->weight = weight;
6660 napi->dev = dev;
6661#ifdef CONFIG_NETPOLL
6662 napi->poll_owner = -1;
6663#endif
6664 napi->list_owner = -1;
6665 set_bit(NAPI_STATE_SCHED, &napi->state);
6666 set_bit(NAPI_STATE_NPSVC, &napi->state);
6667 list_add_rcu(&napi->dev_list, &dev->napi_list);
6668 napi_hash_add(napi);
6669 napi_get_frags_check(napi);
6670 /* Create kthread for this napi if dev->threaded is set.
6671 * Clear dev->threaded if kthread creation failed so that
6672 * threaded mode will not be enabled in napi_enable().
6673 */
6674 if (dev->threaded && napi_kthread_create(napi))
6675 dev->threaded = false;
6676 netif_napi_set_irq(napi, -1);
6677}
6678EXPORT_SYMBOL(netif_napi_add_weight);
6679
6680void napi_disable(struct napi_struct *n)
6681{
6682 unsigned long val, new;
6683
6684 might_sleep();
6685 set_bit(NAPI_STATE_DISABLE, &n->state);
6686
6687 val = READ_ONCE(n->state);
6688 do {
6689 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6690 usleep_range(20, 200);
6691 val = READ_ONCE(n->state);
6692 }
6693
6694 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6695 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6696 } while (!try_cmpxchg(&n->state, &val, new));
6697
6698 hrtimer_cancel(&n->timer);
6699
6700 clear_bit(NAPI_STATE_DISABLE, &n->state);
6701}
6702EXPORT_SYMBOL(napi_disable);
6703
6704/**
6705 * napi_enable - enable NAPI scheduling
6706 * @n: NAPI context
6707 *
6708 * Resume NAPI from being scheduled on this context.
6709 * Must be paired with napi_disable.
6710 */
6711void napi_enable(struct napi_struct *n)
6712{
6713 unsigned long new, val = READ_ONCE(n->state);
6714
6715 do {
6716 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6717
6718 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6719 if (n->dev->threaded && n->thread)
6720 new |= NAPIF_STATE_THREADED;
6721 } while (!try_cmpxchg(&n->state, &val, new));
6722}
6723EXPORT_SYMBOL(napi_enable);
6724
6725static void flush_gro_hash(struct napi_struct *napi)
6726{
6727 int i;
6728
6729 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6730 struct sk_buff *skb, *n;
6731
6732 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6733 kfree_skb(skb);
6734 napi->gro_hash[i].count = 0;
6735 }
6736}
6737
6738/* Must be called in process context */
6739void __netif_napi_del(struct napi_struct *napi)
6740{
6741 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6742 return;
6743
6744 napi_hash_del(napi);
6745 list_del_rcu(&napi->dev_list);
6746 napi_free_frags(napi);
6747
6748 flush_gro_hash(napi);
6749 napi->gro_bitmask = 0;
6750
6751 if (napi->thread) {
6752 kthread_stop(napi->thread);
6753 napi->thread = NULL;
6754 }
6755}
6756EXPORT_SYMBOL(__netif_napi_del);
6757
6758static int __napi_poll(struct napi_struct *n, bool *repoll)
6759{
6760 int work, weight;
6761
6762 weight = n->weight;
6763
6764 /* This NAPI_STATE_SCHED test is for avoiding a race
6765 * with netpoll's poll_napi(). Only the entity which
6766 * obtains the lock and sees NAPI_STATE_SCHED set will
6767 * actually make the ->poll() call. Therefore we avoid
6768 * accidentally calling ->poll() when NAPI is not scheduled.
6769 */
6770 work = 0;
6771 if (napi_is_scheduled(n)) {
6772 work = n->poll(n, weight);
6773 trace_napi_poll(n, work, weight);
6774
6775 xdp_do_check_flushed(n);
6776 }
6777
6778 if (unlikely(work > weight))
6779 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6780 n->poll, work, weight);
6781
6782 if (likely(work < weight))
6783 return work;
6784
6785 /* Drivers must not modify the NAPI state if they
6786 * consume the entire weight. In such cases this code
6787 * still "owns" the NAPI instance and therefore can
6788 * move the instance around on the list at-will.
6789 */
6790 if (unlikely(napi_disable_pending(n))) {
6791 napi_complete(n);
6792 return work;
6793 }
6794
6795 /* The NAPI context has more processing work, but busy-polling
6796 * is preferred. Exit early.
6797 */
6798 if (napi_prefer_busy_poll(n)) {
6799 if (napi_complete_done(n, work)) {
6800 /* If timeout is not set, we need to make sure
6801 * that the NAPI is re-scheduled.
6802 */
6803 napi_schedule(n);
6804 }
6805 return work;
6806 }
6807
6808 if (n->gro_bitmask) {
6809 /* flush too old packets
6810 * If HZ < 1000, flush all packets.
6811 */
6812 napi_gro_flush(n, HZ >= 1000);
6813 }
6814
6815 gro_normal_list(n);
6816
6817 /* Some drivers may have called napi_schedule
6818 * prior to exhausting their budget.
6819 */
6820 if (unlikely(!list_empty(&n->poll_list))) {
6821 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6822 n->dev ? n->dev->name : "backlog");
6823 return work;
6824 }
6825
6826 *repoll = true;
6827
6828 return work;
6829}
6830
6831static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6832{
6833 bool do_repoll = false;
6834 void *have;
6835 int work;
6836
6837 list_del_init(&n->poll_list);
6838
6839 have = netpoll_poll_lock(n);
6840
6841 work = __napi_poll(n, &do_repoll);
6842
6843 if (do_repoll)
6844 list_add_tail(&n->poll_list, repoll);
6845
6846 netpoll_poll_unlock(have);
6847
6848 return work;
6849}
6850
6851static int napi_thread_wait(struct napi_struct *napi)
6852{
6853 set_current_state(TASK_INTERRUPTIBLE);
6854
6855 while (!kthread_should_stop()) {
6856 /* Testing SCHED_THREADED bit here to make sure the current
6857 * kthread owns this napi and could poll on this napi.
6858 * Testing SCHED bit is not enough because SCHED bit might be
6859 * set by some other busy poll thread or by napi_disable().
6860 */
6861 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
6862 WARN_ON(!list_empty(&napi->poll_list));
6863 __set_current_state(TASK_RUNNING);
6864 return 0;
6865 }
6866
6867 schedule();
6868 set_current_state(TASK_INTERRUPTIBLE);
6869 }
6870 __set_current_state(TASK_RUNNING);
6871
6872 return -1;
6873}
6874
6875static void napi_threaded_poll_loop(struct napi_struct *napi)
6876{
6877 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
6878 struct softnet_data *sd;
6879 unsigned long last_qs = jiffies;
6880
6881 for (;;) {
6882 bool repoll = false;
6883 void *have;
6884
6885 local_bh_disable();
6886 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
6887
6888 sd = this_cpu_ptr(&softnet_data);
6889 sd->in_napi_threaded_poll = true;
6890
6891 have = netpoll_poll_lock(napi);
6892 __napi_poll(napi, &repoll);
6893 netpoll_poll_unlock(have);
6894
6895 sd->in_napi_threaded_poll = false;
6896 barrier();
6897
6898 if (sd_has_rps_ipi_waiting(sd)) {
6899 local_irq_disable();
6900 net_rps_action_and_irq_enable(sd);
6901 }
6902 skb_defer_free_flush(sd);
6903 bpf_net_ctx_clear(bpf_net_ctx);
6904 local_bh_enable();
6905
6906 if (!repoll)
6907 break;
6908
6909 rcu_softirq_qs_periodic(last_qs);
6910 cond_resched();
6911 }
6912}
6913
6914static int napi_threaded_poll(void *data)
6915{
6916 struct napi_struct *napi = data;
6917
6918 while (!napi_thread_wait(napi))
6919 napi_threaded_poll_loop(napi);
6920
6921 return 0;
6922}
6923
6924static __latent_entropy void net_rx_action(struct softirq_action *h)
6925{
6926 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6927 unsigned long time_limit = jiffies +
6928 usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
6929 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
6930 int budget = READ_ONCE(net_hotdata.netdev_budget);
6931 LIST_HEAD(list);
6932 LIST_HEAD(repoll);
6933
6934 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
6935start:
6936 sd->in_net_rx_action = true;
6937 local_irq_disable();
6938 list_splice_init(&sd->poll_list, &list);
6939 local_irq_enable();
6940
6941 for (;;) {
6942 struct napi_struct *n;
6943
6944 skb_defer_free_flush(sd);
6945
6946 if (list_empty(&list)) {
6947 if (list_empty(&repoll)) {
6948 sd->in_net_rx_action = false;
6949 barrier();
6950 /* We need to check if ____napi_schedule()
6951 * had refilled poll_list while
6952 * sd->in_net_rx_action was true.
6953 */
6954 if (!list_empty(&sd->poll_list))
6955 goto start;
6956 if (!sd_has_rps_ipi_waiting(sd))
6957 goto end;
6958 }
6959 break;
6960 }
6961
6962 n = list_first_entry(&list, struct napi_struct, poll_list);
6963 budget -= napi_poll(n, &repoll);
6964
6965 /* If softirq window is exhausted then punt.
6966 * Allow this to run for 2 jiffies since which will allow
6967 * an average latency of 1.5/HZ.
6968 */
6969 if (unlikely(budget <= 0 ||
6970 time_after_eq(jiffies, time_limit))) {
6971 sd->time_squeeze++;
6972 break;
6973 }
6974 }
6975
6976 local_irq_disable();
6977
6978 list_splice_tail_init(&sd->poll_list, &list);
6979 list_splice_tail(&repoll, &list);
6980 list_splice(&list, &sd->poll_list);
6981 if (!list_empty(&sd->poll_list))
6982 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6983 else
6984 sd->in_net_rx_action = false;
6985
6986 net_rps_action_and_irq_enable(sd);
6987end:
6988 bpf_net_ctx_clear(bpf_net_ctx);
6989}
6990
6991struct netdev_adjacent {
6992 struct net_device *dev;
6993 netdevice_tracker dev_tracker;
6994
6995 /* upper master flag, there can only be one master device per list */
6996 bool master;
6997
6998 /* lookup ignore flag */
6999 bool ignore;
7000
7001 /* counter for the number of times this device was added to us */
7002 u16 ref_nr;
7003
7004 /* private field for the users */
7005 void *private;
7006
7007 struct list_head list;
7008 struct rcu_head rcu;
7009};
7010
7011static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
7012 struct list_head *adj_list)
7013{
7014 struct netdev_adjacent *adj;
7015
7016 list_for_each_entry(adj, adj_list, list) {
7017 if (adj->dev == adj_dev)
7018 return adj;
7019 }
7020 return NULL;
7021}
7022
7023static int ____netdev_has_upper_dev(struct net_device *upper_dev,
7024 struct netdev_nested_priv *priv)
7025{
7026 struct net_device *dev = (struct net_device *)priv->data;
7027
7028 return upper_dev == dev;
7029}
7030
7031/**
7032 * netdev_has_upper_dev - Check if device is linked to an upper device
7033 * @dev: device
7034 * @upper_dev: upper device to check
7035 *
7036 * Find out if a device is linked to specified upper device and return true
7037 * in case it is. Note that this checks only immediate upper device,
7038 * not through a complete stack of devices. The caller must hold the RTNL lock.
7039 */
7040bool netdev_has_upper_dev(struct net_device *dev,
7041 struct net_device *upper_dev)
7042{
7043 struct netdev_nested_priv priv = {
7044 .data = (void *)upper_dev,
7045 };
7046
7047 ASSERT_RTNL();
7048
7049 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7050 &priv);
7051}
7052EXPORT_SYMBOL(netdev_has_upper_dev);
7053
7054/**
7055 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
7056 * @dev: device
7057 * @upper_dev: upper device to check
7058 *
7059 * Find out if a device is linked to specified upper device and return true
7060 * in case it is. Note that this checks the entire upper device chain.
7061 * The caller must hold rcu lock.
7062 */
7063
7064bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
7065 struct net_device *upper_dev)
7066{
7067 struct netdev_nested_priv priv = {
7068 .data = (void *)upper_dev,
7069 };
7070
7071 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7072 &priv);
7073}
7074EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
7075
7076/**
7077 * netdev_has_any_upper_dev - Check if device is linked to some device
7078 * @dev: device
7079 *
7080 * Find out if a device is linked to an upper device and return true in case
7081 * it is. The caller must hold the RTNL lock.
7082 */
7083bool netdev_has_any_upper_dev(struct net_device *dev)
7084{
7085 ASSERT_RTNL();
7086
7087 return !list_empty(&dev->adj_list.upper);
7088}
7089EXPORT_SYMBOL(netdev_has_any_upper_dev);
7090
7091/**
7092 * netdev_master_upper_dev_get - Get master upper device
7093 * @dev: device
7094 *
7095 * Find a master upper device and return pointer to it or NULL in case
7096 * it's not there. The caller must hold the RTNL lock.
7097 */
7098struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
7099{
7100 struct netdev_adjacent *upper;
7101
7102 ASSERT_RTNL();
7103
7104 if (list_empty(&dev->adj_list.upper))
7105 return NULL;
7106
7107 upper = list_first_entry(&dev->adj_list.upper,
7108 struct netdev_adjacent, list);
7109 if (likely(upper->master))
7110 return upper->dev;
7111 return NULL;
7112}
7113EXPORT_SYMBOL(netdev_master_upper_dev_get);
7114
7115static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
7116{
7117 struct netdev_adjacent *upper;
7118
7119 ASSERT_RTNL();
7120
7121 if (list_empty(&dev->adj_list.upper))
7122 return NULL;
7123
7124 upper = list_first_entry(&dev->adj_list.upper,
7125 struct netdev_adjacent, list);
7126 if (likely(upper->master) && !upper->ignore)
7127 return upper->dev;
7128 return NULL;
7129}
7130
7131/**
7132 * netdev_has_any_lower_dev - Check if device is linked to some device
7133 * @dev: device
7134 *
7135 * Find out if a device is linked to a lower device and return true in case
7136 * it is. The caller must hold the RTNL lock.
7137 */
7138static bool netdev_has_any_lower_dev(struct net_device *dev)
7139{
7140 ASSERT_RTNL();
7141
7142 return !list_empty(&dev->adj_list.lower);
7143}
7144
7145void *netdev_adjacent_get_private(struct list_head *adj_list)
7146{
7147 struct netdev_adjacent *adj;
7148
7149 adj = list_entry(adj_list, struct netdev_adjacent, list);
7150
7151 return adj->private;
7152}
7153EXPORT_SYMBOL(netdev_adjacent_get_private);
7154
7155/**
7156 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7157 * @dev: device
7158 * @iter: list_head ** of the current position
7159 *
7160 * Gets the next device from the dev's upper list, starting from iter
7161 * position. The caller must hold RCU read lock.
7162 */
7163struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7164 struct list_head **iter)
7165{
7166 struct netdev_adjacent *upper;
7167
7168 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7169
7170 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7171
7172 if (&upper->list == &dev->adj_list.upper)
7173 return NULL;
7174
7175 *iter = &upper->list;
7176
7177 return upper->dev;
7178}
7179EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7180
7181static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7182 struct list_head **iter,
7183 bool *ignore)
7184{
7185 struct netdev_adjacent *upper;
7186
7187 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7188
7189 if (&upper->list == &dev->adj_list.upper)
7190 return NULL;
7191
7192 *iter = &upper->list;
7193 *ignore = upper->ignore;
7194
7195 return upper->dev;
7196}
7197
7198static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7199 struct list_head **iter)
7200{
7201 struct netdev_adjacent *upper;
7202
7203 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7204
7205 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7206
7207 if (&upper->list == &dev->adj_list.upper)
7208 return NULL;
7209
7210 *iter = &upper->list;
7211
7212 return upper->dev;
7213}
7214
7215static int __netdev_walk_all_upper_dev(struct net_device *dev,
7216 int (*fn)(struct net_device *dev,
7217 struct netdev_nested_priv *priv),
7218 struct netdev_nested_priv *priv)
7219{
7220 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7221 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7222 int ret, cur = 0;
7223 bool ignore;
7224
7225 now = dev;
7226 iter = &dev->adj_list.upper;
7227
7228 while (1) {
7229 if (now != dev) {
7230 ret = fn(now, priv);
7231 if (ret)
7232 return ret;
7233 }
7234
7235 next = NULL;
7236 while (1) {
7237 udev = __netdev_next_upper_dev(now, &iter, &ignore);
7238 if (!udev)
7239 break;
7240 if (ignore)
7241 continue;
7242
7243 next = udev;
7244 niter = &udev->adj_list.upper;
7245 dev_stack[cur] = now;
7246 iter_stack[cur++] = iter;
7247 break;
7248 }
7249
7250 if (!next) {
7251 if (!cur)
7252 return 0;
7253 next = dev_stack[--cur];
7254 niter = iter_stack[cur];
7255 }
7256
7257 now = next;
7258 iter = niter;
7259 }
7260
7261 return 0;
7262}
7263
7264int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7265 int (*fn)(struct net_device *dev,
7266 struct netdev_nested_priv *priv),
7267 struct netdev_nested_priv *priv)
7268{
7269 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7270 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7271 int ret, cur = 0;
7272
7273 now = dev;
7274 iter = &dev->adj_list.upper;
7275
7276 while (1) {
7277 if (now != dev) {
7278 ret = fn(now, priv);
7279 if (ret)
7280 return ret;
7281 }
7282
7283 next = NULL;
7284 while (1) {
7285 udev = netdev_next_upper_dev_rcu(now, &iter);
7286 if (!udev)
7287 break;
7288
7289 next = udev;
7290 niter = &udev->adj_list.upper;
7291 dev_stack[cur] = now;
7292 iter_stack[cur++] = iter;
7293 break;
7294 }
7295
7296 if (!next) {
7297 if (!cur)
7298 return 0;
7299 next = dev_stack[--cur];
7300 niter = iter_stack[cur];
7301 }
7302
7303 now = next;
7304 iter = niter;
7305 }
7306
7307 return 0;
7308}
7309EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7310
7311static bool __netdev_has_upper_dev(struct net_device *dev,
7312 struct net_device *upper_dev)
7313{
7314 struct netdev_nested_priv priv = {
7315 .flags = 0,
7316 .data = (void *)upper_dev,
7317 };
7318
7319 ASSERT_RTNL();
7320
7321 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7322 &priv);
7323}
7324
7325/**
7326 * netdev_lower_get_next_private - Get the next ->private from the
7327 * lower neighbour list
7328 * @dev: device
7329 * @iter: list_head ** of the current position
7330 *
7331 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7332 * list, starting from iter position. The caller must hold either hold the
7333 * RTNL lock or its own locking that guarantees that the neighbour lower
7334 * list will remain unchanged.
7335 */
7336void *netdev_lower_get_next_private(struct net_device *dev,
7337 struct list_head **iter)
7338{
7339 struct netdev_adjacent *lower;
7340
7341 lower = list_entry(*iter, struct netdev_adjacent, list);
7342
7343 if (&lower->list == &dev->adj_list.lower)
7344 return NULL;
7345
7346 *iter = lower->list.next;
7347
7348 return lower->private;
7349}
7350EXPORT_SYMBOL(netdev_lower_get_next_private);
7351
7352/**
7353 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7354 * lower neighbour list, RCU
7355 * variant
7356 * @dev: device
7357 * @iter: list_head ** of the current position
7358 *
7359 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7360 * list, starting from iter position. The caller must hold RCU read lock.
7361 */
7362void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7363 struct list_head **iter)
7364{
7365 struct netdev_adjacent *lower;
7366
7367 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7368
7369 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7370
7371 if (&lower->list == &dev->adj_list.lower)
7372 return NULL;
7373
7374 *iter = &lower->list;
7375
7376 return lower->private;
7377}
7378EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7379
7380/**
7381 * netdev_lower_get_next - Get the next device from the lower neighbour
7382 * list
7383 * @dev: device
7384 * @iter: list_head ** of the current position
7385 *
7386 * Gets the next netdev_adjacent from the dev's lower neighbour
7387 * list, starting from iter position. The caller must hold RTNL lock or
7388 * its own locking that guarantees that the neighbour lower
7389 * list will remain unchanged.
7390 */
7391void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7392{
7393 struct netdev_adjacent *lower;
7394
7395 lower = list_entry(*iter, struct netdev_adjacent, list);
7396
7397 if (&lower->list == &dev->adj_list.lower)
7398 return NULL;
7399
7400 *iter = lower->list.next;
7401
7402 return lower->dev;
7403}
7404EXPORT_SYMBOL(netdev_lower_get_next);
7405
7406static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7407 struct list_head **iter)
7408{
7409 struct netdev_adjacent *lower;
7410
7411 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7412
7413 if (&lower->list == &dev->adj_list.lower)
7414 return NULL;
7415
7416 *iter = &lower->list;
7417
7418 return lower->dev;
7419}
7420
7421static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7422 struct list_head **iter,
7423 bool *ignore)
7424{
7425 struct netdev_adjacent *lower;
7426
7427 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7428
7429 if (&lower->list == &dev->adj_list.lower)
7430 return NULL;
7431
7432 *iter = &lower->list;
7433 *ignore = lower->ignore;
7434
7435 return lower->dev;
7436}
7437
7438int netdev_walk_all_lower_dev(struct net_device *dev,
7439 int (*fn)(struct net_device *dev,
7440 struct netdev_nested_priv *priv),
7441 struct netdev_nested_priv *priv)
7442{
7443 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7444 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7445 int ret, cur = 0;
7446
7447 now = dev;
7448 iter = &dev->adj_list.lower;
7449
7450 while (1) {
7451 if (now != dev) {
7452 ret = fn(now, priv);
7453 if (ret)
7454 return ret;
7455 }
7456
7457 next = NULL;
7458 while (1) {
7459 ldev = netdev_next_lower_dev(now, &iter);
7460 if (!ldev)
7461 break;
7462
7463 next = ldev;
7464 niter = &ldev->adj_list.lower;
7465 dev_stack[cur] = now;
7466 iter_stack[cur++] = iter;
7467 break;
7468 }
7469
7470 if (!next) {
7471 if (!cur)
7472 return 0;
7473 next = dev_stack[--cur];
7474 niter = iter_stack[cur];
7475 }
7476
7477 now = next;
7478 iter = niter;
7479 }
7480
7481 return 0;
7482}
7483EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7484
7485static int __netdev_walk_all_lower_dev(struct net_device *dev,
7486 int (*fn)(struct net_device *dev,
7487 struct netdev_nested_priv *priv),
7488 struct netdev_nested_priv *priv)
7489{
7490 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7491 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7492 int ret, cur = 0;
7493 bool ignore;
7494
7495 now = dev;
7496 iter = &dev->adj_list.lower;
7497
7498 while (1) {
7499 if (now != dev) {
7500 ret = fn(now, priv);
7501 if (ret)
7502 return ret;
7503 }
7504
7505 next = NULL;
7506 while (1) {
7507 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7508 if (!ldev)
7509 break;
7510 if (ignore)
7511 continue;
7512
7513 next = ldev;
7514 niter = &ldev->adj_list.lower;
7515 dev_stack[cur] = now;
7516 iter_stack[cur++] = iter;
7517 break;
7518 }
7519
7520 if (!next) {
7521 if (!cur)
7522 return 0;
7523 next = dev_stack[--cur];
7524 niter = iter_stack[cur];
7525 }
7526
7527 now = next;
7528 iter = niter;
7529 }
7530
7531 return 0;
7532}
7533
7534struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7535 struct list_head **iter)
7536{
7537 struct netdev_adjacent *lower;
7538
7539 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7540 if (&lower->list == &dev->adj_list.lower)
7541 return NULL;
7542
7543 *iter = &lower->list;
7544
7545 return lower->dev;
7546}
7547EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7548
7549static u8 __netdev_upper_depth(struct net_device *dev)
7550{
7551 struct net_device *udev;
7552 struct list_head *iter;
7553 u8 max_depth = 0;
7554 bool ignore;
7555
7556 for (iter = &dev->adj_list.upper,
7557 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7558 udev;
7559 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7560 if (ignore)
7561 continue;
7562 if (max_depth < udev->upper_level)
7563 max_depth = udev->upper_level;
7564 }
7565
7566 return max_depth;
7567}
7568
7569static u8 __netdev_lower_depth(struct net_device *dev)
7570{
7571 struct net_device *ldev;
7572 struct list_head *iter;
7573 u8 max_depth = 0;
7574 bool ignore;
7575
7576 for (iter = &dev->adj_list.lower,
7577 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7578 ldev;
7579 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7580 if (ignore)
7581 continue;
7582 if (max_depth < ldev->lower_level)
7583 max_depth = ldev->lower_level;
7584 }
7585
7586 return max_depth;
7587}
7588
7589static int __netdev_update_upper_level(struct net_device *dev,
7590 struct netdev_nested_priv *__unused)
7591{
7592 dev->upper_level = __netdev_upper_depth(dev) + 1;
7593 return 0;
7594}
7595
7596#ifdef CONFIG_LOCKDEP
7597static LIST_HEAD(net_unlink_list);
7598
7599static void net_unlink_todo(struct net_device *dev)
7600{
7601 if (list_empty(&dev->unlink_list))
7602 list_add_tail(&dev->unlink_list, &net_unlink_list);
7603}
7604#endif
7605
7606static int __netdev_update_lower_level(struct net_device *dev,
7607 struct netdev_nested_priv *priv)
7608{
7609 dev->lower_level = __netdev_lower_depth(dev) + 1;
7610
7611#ifdef CONFIG_LOCKDEP
7612 if (!priv)
7613 return 0;
7614
7615 if (priv->flags & NESTED_SYNC_IMM)
7616 dev->nested_level = dev->lower_level - 1;
7617 if (priv->flags & NESTED_SYNC_TODO)
7618 net_unlink_todo(dev);
7619#endif
7620 return 0;
7621}
7622
7623int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7624 int (*fn)(struct net_device *dev,
7625 struct netdev_nested_priv *priv),
7626 struct netdev_nested_priv *priv)
7627{
7628 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7629 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7630 int ret, cur = 0;
7631
7632 now = dev;
7633 iter = &dev->adj_list.lower;
7634
7635 while (1) {
7636 if (now != dev) {
7637 ret = fn(now, priv);
7638 if (ret)
7639 return ret;
7640 }
7641
7642 next = NULL;
7643 while (1) {
7644 ldev = netdev_next_lower_dev_rcu(now, &iter);
7645 if (!ldev)
7646 break;
7647
7648 next = ldev;
7649 niter = &ldev->adj_list.lower;
7650 dev_stack[cur] = now;
7651 iter_stack[cur++] = iter;
7652 break;
7653 }
7654
7655 if (!next) {
7656 if (!cur)
7657 return 0;
7658 next = dev_stack[--cur];
7659 niter = iter_stack[cur];
7660 }
7661
7662 now = next;
7663 iter = niter;
7664 }
7665
7666 return 0;
7667}
7668EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7669
7670/**
7671 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7672 * lower neighbour list, RCU
7673 * variant
7674 * @dev: device
7675 *
7676 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7677 * list. The caller must hold RCU read lock.
7678 */
7679void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7680{
7681 struct netdev_adjacent *lower;
7682
7683 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7684 struct netdev_adjacent, list);
7685 if (lower)
7686 return lower->private;
7687 return NULL;
7688}
7689EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7690
7691/**
7692 * netdev_master_upper_dev_get_rcu - Get master upper device
7693 * @dev: device
7694 *
7695 * Find a master upper device and return pointer to it or NULL in case
7696 * it's not there. The caller must hold the RCU read lock.
7697 */
7698struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7699{
7700 struct netdev_adjacent *upper;
7701
7702 upper = list_first_or_null_rcu(&dev->adj_list.upper,
7703 struct netdev_adjacent, list);
7704 if (upper && likely(upper->master))
7705 return upper->dev;
7706 return NULL;
7707}
7708EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7709
7710static int netdev_adjacent_sysfs_add(struct net_device *dev,
7711 struct net_device *adj_dev,
7712 struct list_head *dev_list)
7713{
7714 char linkname[IFNAMSIZ+7];
7715
7716 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7717 "upper_%s" : "lower_%s", adj_dev->name);
7718 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7719 linkname);
7720}
7721static void netdev_adjacent_sysfs_del(struct net_device *dev,
7722 char *name,
7723 struct list_head *dev_list)
7724{
7725 char linkname[IFNAMSIZ+7];
7726
7727 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7728 "upper_%s" : "lower_%s", name);
7729 sysfs_remove_link(&(dev->dev.kobj), linkname);
7730}
7731
7732static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7733 struct net_device *adj_dev,
7734 struct list_head *dev_list)
7735{
7736 return (dev_list == &dev->adj_list.upper ||
7737 dev_list == &dev->adj_list.lower) &&
7738 net_eq(dev_net(dev), dev_net(adj_dev));
7739}
7740
7741static int __netdev_adjacent_dev_insert(struct net_device *dev,
7742 struct net_device *adj_dev,
7743 struct list_head *dev_list,
7744 void *private, bool master)
7745{
7746 struct netdev_adjacent *adj;
7747 int ret;
7748
7749 adj = __netdev_find_adj(adj_dev, dev_list);
7750
7751 if (adj) {
7752 adj->ref_nr += 1;
7753 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7754 dev->name, adj_dev->name, adj->ref_nr);
7755
7756 return 0;
7757 }
7758
7759 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7760 if (!adj)
7761 return -ENOMEM;
7762
7763 adj->dev = adj_dev;
7764 adj->master = master;
7765 adj->ref_nr = 1;
7766 adj->private = private;
7767 adj->ignore = false;
7768 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7769
7770 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7771 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7772
7773 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7774 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7775 if (ret)
7776 goto free_adj;
7777 }
7778
7779 /* Ensure that master link is always the first item in list. */
7780 if (master) {
7781 ret = sysfs_create_link(&(dev->dev.kobj),
7782 &(adj_dev->dev.kobj), "master");
7783 if (ret)
7784 goto remove_symlinks;
7785
7786 list_add_rcu(&adj->list, dev_list);
7787 } else {
7788 list_add_tail_rcu(&adj->list, dev_list);
7789 }
7790
7791 return 0;
7792
7793remove_symlinks:
7794 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7795 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7796free_adj:
7797 netdev_put(adj_dev, &adj->dev_tracker);
7798 kfree(adj);
7799
7800 return ret;
7801}
7802
7803static void __netdev_adjacent_dev_remove(struct net_device *dev,
7804 struct net_device *adj_dev,
7805 u16 ref_nr,
7806 struct list_head *dev_list)
7807{
7808 struct netdev_adjacent *adj;
7809
7810 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7811 dev->name, adj_dev->name, ref_nr);
7812
7813 adj = __netdev_find_adj(adj_dev, dev_list);
7814
7815 if (!adj) {
7816 pr_err("Adjacency does not exist for device %s from %s\n",
7817 dev->name, adj_dev->name);
7818 WARN_ON(1);
7819 return;
7820 }
7821
7822 if (adj->ref_nr > ref_nr) {
7823 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7824 dev->name, adj_dev->name, ref_nr,
7825 adj->ref_nr - ref_nr);
7826 adj->ref_nr -= ref_nr;
7827 return;
7828 }
7829
7830 if (adj->master)
7831 sysfs_remove_link(&(dev->dev.kobj), "master");
7832
7833 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7834 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7835
7836 list_del_rcu(&adj->list);
7837 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7838 adj_dev->name, dev->name, adj_dev->name);
7839 netdev_put(adj_dev, &adj->dev_tracker);
7840 kfree_rcu(adj, rcu);
7841}
7842
7843static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7844 struct net_device *upper_dev,
7845 struct list_head *up_list,
7846 struct list_head *down_list,
7847 void *private, bool master)
7848{
7849 int ret;
7850
7851 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7852 private, master);
7853 if (ret)
7854 return ret;
7855
7856 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7857 private, false);
7858 if (ret) {
7859 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7860 return ret;
7861 }
7862
7863 return 0;
7864}
7865
7866static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7867 struct net_device *upper_dev,
7868 u16 ref_nr,
7869 struct list_head *up_list,
7870 struct list_head *down_list)
7871{
7872 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7873 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7874}
7875
7876static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7877 struct net_device *upper_dev,
7878 void *private, bool master)
7879{
7880 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7881 &dev->adj_list.upper,
7882 &upper_dev->adj_list.lower,
7883 private, master);
7884}
7885
7886static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7887 struct net_device *upper_dev)
7888{
7889 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7890 &dev->adj_list.upper,
7891 &upper_dev->adj_list.lower);
7892}
7893
7894static int __netdev_upper_dev_link(struct net_device *dev,
7895 struct net_device *upper_dev, bool master,
7896 void *upper_priv, void *upper_info,
7897 struct netdev_nested_priv *priv,
7898 struct netlink_ext_ack *extack)
7899{
7900 struct netdev_notifier_changeupper_info changeupper_info = {
7901 .info = {
7902 .dev = dev,
7903 .extack = extack,
7904 },
7905 .upper_dev = upper_dev,
7906 .master = master,
7907 .linking = true,
7908 .upper_info = upper_info,
7909 };
7910 struct net_device *master_dev;
7911 int ret = 0;
7912
7913 ASSERT_RTNL();
7914
7915 if (dev == upper_dev)
7916 return -EBUSY;
7917
7918 /* To prevent loops, check if dev is not upper device to upper_dev. */
7919 if (__netdev_has_upper_dev(upper_dev, dev))
7920 return -EBUSY;
7921
7922 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7923 return -EMLINK;
7924
7925 if (!master) {
7926 if (__netdev_has_upper_dev(dev, upper_dev))
7927 return -EEXIST;
7928 } else {
7929 master_dev = __netdev_master_upper_dev_get(dev);
7930 if (master_dev)
7931 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7932 }
7933
7934 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7935 &changeupper_info.info);
7936 ret = notifier_to_errno(ret);
7937 if (ret)
7938 return ret;
7939
7940 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7941 master);
7942 if (ret)
7943 return ret;
7944
7945 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7946 &changeupper_info.info);
7947 ret = notifier_to_errno(ret);
7948 if (ret)
7949 goto rollback;
7950
7951 __netdev_update_upper_level(dev, NULL);
7952 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7953
7954 __netdev_update_lower_level(upper_dev, priv);
7955 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7956 priv);
7957
7958 return 0;
7959
7960rollback:
7961 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7962
7963 return ret;
7964}
7965
7966/**
7967 * netdev_upper_dev_link - Add a link to the upper device
7968 * @dev: device
7969 * @upper_dev: new upper device
7970 * @extack: netlink extended ack
7971 *
7972 * Adds a link to device which is upper to this one. The caller must hold
7973 * the RTNL lock. On a failure a negative errno code is returned.
7974 * On success the reference counts are adjusted and the function
7975 * returns zero.
7976 */
7977int netdev_upper_dev_link(struct net_device *dev,
7978 struct net_device *upper_dev,
7979 struct netlink_ext_ack *extack)
7980{
7981 struct netdev_nested_priv priv = {
7982 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7983 .data = NULL,
7984 };
7985
7986 return __netdev_upper_dev_link(dev, upper_dev, false,
7987 NULL, NULL, &priv, extack);
7988}
7989EXPORT_SYMBOL(netdev_upper_dev_link);
7990
7991/**
7992 * netdev_master_upper_dev_link - Add a master link to the upper device
7993 * @dev: device
7994 * @upper_dev: new upper device
7995 * @upper_priv: upper device private
7996 * @upper_info: upper info to be passed down via notifier
7997 * @extack: netlink extended ack
7998 *
7999 * Adds a link to device which is upper to this one. In this case, only
8000 * one master upper device can be linked, although other non-master devices
8001 * might be linked as well. The caller must hold the RTNL lock.
8002 * On a failure a negative errno code is returned. On success the reference
8003 * counts are adjusted and the function returns zero.
8004 */
8005int netdev_master_upper_dev_link(struct net_device *dev,
8006 struct net_device *upper_dev,
8007 void *upper_priv, void *upper_info,
8008 struct netlink_ext_ack *extack)
8009{
8010 struct netdev_nested_priv priv = {
8011 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8012 .data = NULL,
8013 };
8014
8015 return __netdev_upper_dev_link(dev, upper_dev, true,
8016 upper_priv, upper_info, &priv, extack);
8017}
8018EXPORT_SYMBOL(netdev_master_upper_dev_link);
8019
8020static void __netdev_upper_dev_unlink(struct net_device *dev,
8021 struct net_device *upper_dev,
8022 struct netdev_nested_priv *priv)
8023{
8024 struct netdev_notifier_changeupper_info changeupper_info = {
8025 .info = {
8026 .dev = dev,
8027 },
8028 .upper_dev = upper_dev,
8029 .linking = false,
8030 };
8031
8032 ASSERT_RTNL();
8033
8034 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
8035
8036 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8037 &changeupper_info.info);
8038
8039 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8040
8041 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8042 &changeupper_info.info);
8043
8044 __netdev_update_upper_level(dev, NULL);
8045 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8046
8047 __netdev_update_lower_level(upper_dev, priv);
8048 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8049 priv);
8050}
8051
8052/**
8053 * netdev_upper_dev_unlink - Removes a link to upper device
8054 * @dev: device
8055 * @upper_dev: new upper device
8056 *
8057 * Removes a link to device which is upper to this one. The caller must hold
8058 * the RTNL lock.
8059 */
8060void netdev_upper_dev_unlink(struct net_device *dev,
8061 struct net_device *upper_dev)
8062{
8063 struct netdev_nested_priv priv = {
8064 .flags = NESTED_SYNC_TODO,
8065 .data = NULL,
8066 };
8067
8068 __netdev_upper_dev_unlink(dev, upper_dev, &priv);
8069}
8070EXPORT_SYMBOL(netdev_upper_dev_unlink);
8071
8072static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
8073 struct net_device *lower_dev,
8074 bool val)
8075{
8076 struct netdev_adjacent *adj;
8077
8078 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
8079 if (adj)
8080 adj->ignore = val;
8081
8082 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
8083 if (adj)
8084 adj->ignore = val;
8085}
8086
8087static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
8088 struct net_device *lower_dev)
8089{
8090 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
8091}
8092
8093static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
8094 struct net_device *lower_dev)
8095{
8096 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
8097}
8098
8099int netdev_adjacent_change_prepare(struct net_device *old_dev,
8100 struct net_device *new_dev,
8101 struct net_device *dev,
8102 struct netlink_ext_ack *extack)
8103{
8104 struct netdev_nested_priv priv = {
8105 .flags = 0,
8106 .data = NULL,
8107 };
8108 int err;
8109
8110 if (!new_dev)
8111 return 0;
8112
8113 if (old_dev && new_dev != old_dev)
8114 netdev_adjacent_dev_disable(dev, old_dev);
8115 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
8116 extack);
8117 if (err) {
8118 if (old_dev && new_dev != old_dev)
8119 netdev_adjacent_dev_enable(dev, old_dev);
8120 return err;
8121 }
8122
8123 return 0;
8124}
8125EXPORT_SYMBOL(netdev_adjacent_change_prepare);
8126
8127void netdev_adjacent_change_commit(struct net_device *old_dev,
8128 struct net_device *new_dev,
8129 struct net_device *dev)
8130{
8131 struct netdev_nested_priv priv = {
8132 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8133 .data = NULL,
8134 };
8135
8136 if (!new_dev || !old_dev)
8137 return;
8138
8139 if (new_dev == old_dev)
8140 return;
8141
8142 netdev_adjacent_dev_enable(dev, old_dev);
8143 __netdev_upper_dev_unlink(old_dev, dev, &priv);
8144}
8145EXPORT_SYMBOL(netdev_adjacent_change_commit);
8146
8147void netdev_adjacent_change_abort(struct net_device *old_dev,
8148 struct net_device *new_dev,
8149 struct net_device *dev)
8150{
8151 struct netdev_nested_priv priv = {
8152 .flags = 0,
8153 .data = NULL,
8154 };
8155
8156 if (!new_dev)
8157 return;
8158
8159 if (old_dev && new_dev != old_dev)
8160 netdev_adjacent_dev_enable(dev, old_dev);
8161
8162 __netdev_upper_dev_unlink(new_dev, dev, &priv);
8163}
8164EXPORT_SYMBOL(netdev_adjacent_change_abort);
8165
8166/**
8167 * netdev_bonding_info_change - Dispatch event about slave change
8168 * @dev: device
8169 * @bonding_info: info to dispatch
8170 *
8171 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8172 * The caller must hold the RTNL lock.
8173 */
8174void netdev_bonding_info_change(struct net_device *dev,
8175 struct netdev_bonding_info *bonding_info)
8176{
8177 struct netdev_notifier_bonding_info info = {
8178 .info.dev = dev,
8179 };
8180
8181 memcpy(&info.bonding_info, bonding_info,
8182 sizeof(struct netdev_bonding_info));
8183 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
8184 &info.info);
8185}
8186EXPORT_SYMBOL(netdev_bonding_info_change);
8187
8188static int netdev_offload_xstats_enable_l3(struct net_device *dev,
8189 struct netlink_ext_ack *extack)
8190{
8191 struct netdev_notifier_offload_xstats_info info = {
8192 .info.dev = dev,
8193 .info.extack = extack,
8194 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8195 };
8196 int err;
8197 int rc;
8198
8199 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
8200 GFP_KERNEL);
8201 if (!dev->offload_xstats_l3)
8202 return -ENOMEM;
8203
8204 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
8205 NETDEV_OFFLOAD_XSTATS_DISABLE,
8206 &info.info);
8207 err = notifier_to_errno(rc);
8208 if (err)
8209 goto free_stats;
8210
8211 return 0;
8212
8213free_stats:
8214 kfree(dev->offload_xstats_l3);
8215 dev->offload_xstats_l3 = NULL;
8216 return err;
8217}
8218
8219int netdev_offload_xstats_enable(struct net_device *dev,
8220 enum netdev_offload_xstats_type type,
8221 struct netlink_ext_ack *extack)
8222{
8223 ASSERT_RTNL();
8224
8225 if (netdev_offload_xstats_enabled(dev, type))
8226 return -EALREADY;
8227
8228 switch (type) {
8229 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8230 return netdev_offload_xstats_enable_l3(dev, extack);
8231 }
8232
8233 WARN_ON(1);
8234 return -EINVAL;
8235}
8236EXPORT_SYMBOL(netdev_offload_xstats_enable);
8237
8238static void netdev_offload_xstats_disable_l3(struct net_device *dev)
8239{
8240 struct netdev_notifier_offload_xstats_info info = {
8241 .info.dev = dev,
8242 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8243 };
8244
8245 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
8246 &info.info);
8247 kfree(dev->offload_xstats_l3);
8248 dev->offload_xstats_l3 = NULL;
8249}
8250
8251int netdev_offload_xstats_disable(struct net_device *dev,
8252 enum netdev_offload_xstats_type type)
8253{
8254 ASSERT_RTNL();
8255
8256 if (!netdev_offload_xstats_enabled(dev, type))
8257 return -EALREADY;
8258
8259 switch (type) {
8260 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8261 netdev_offload_xstats_disable_l3(dev);
8262 return 0;
8263 }
8264
8265 WARN_ON(1);
8266 return -EINVAL;
8267}
8268EXPORT_SYMBOL(netdev_offload_xstats_disable);
8269
8270static void netdev_offload_xstats_disable_all(struct net_device *dev)
8271{
8272 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
8273}
8274
8275static struct rtnl_hw_stats64 *
8276netdev_offload_xstats_get_ptr(const struct net_device *dev,
8277 enum netdev_offload_xstats_type type)
8278{
8279 switch (type) {
8280 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8281 return dev->offload_xstats_l3;
8282 }
8283
8284 WARN_ON(1);
8285 return NULL;
8286}
8287
8288bool netdev_offload_xstats_enabled(const struct net_device *dev,
8289 enum netdev_offload_xstats_type type)
8290{
8291 ASSERT_RTNL();
8292
8293 return netdev_offload_xstats_get_ptr(dev, type);
8294}
8295EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8296
8297struct netdev_notifier_offload_xstats_ru {
8298 bool used;
8299};
8300
8301struct netdev_notifier_offload_xstats_rd {
8302 struct rtnl_hw_stats64 stats;
8303 bool used;
8304};
8305
8306static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8307 const struct rtnl_hw_stats64 *src)
8308{
8309 dest->rx_packets += src->rx_packets;
8310 dest->tx_packets += src->tx_packets;
8311 dest->rx_bytes += src->rx_bytes;
8312 dest->tx_bytes += src->tx_bytes;
8313 dest->rx_errors += src->rx_errors;
8314 dest->tx_errors += src->tx_errors;
8315 dest->rx_dropped += src->rx_dropped;
8316 dest->tx_dropped += src->tx_dropped;
8317 dest->multicast += src->multicast;
8318}
8319
8320static int netdev_offload_xstats_get_used(struct net_device *dev,
8321 enum netdev_offload_xstats_type type,
8322 bool *p_used,
8323 struct netlink_ext_ack *extack)
8324{
8325 struct netdev_notifier_offload_xstats_ru report_used = {};
8326 struct netdev_notifier_offload_xstats_info info = {
8327 .info.dev = dev,
8328 .info.extack = extack,
8329 .type = type,
8330 .report_used = &report_used,
8331 };
8332 int rc;
8333
8334 WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8335 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8336 &info.info);
8337 *p_used = report_used.used;
8338 return notifier_to_errno(rc);
8339}
8340
8341static int netdev_offload_xstats_get_stats(struct net_device *dev,
8342 enum netdev_offload_xstats_type type,
8343 struct rtnl_hw_stats64 *p_stats,
8344 bool *p_used,
8345 struct netlink_ext_ack *extack)
8346{
8347 struct netdev_notifier_offload_xstats_rd report_delta = {};
8348 struct netdev_notifier_offload_xstats_info info = {
8349 .info.dev = dev,
8350 .info.extack = extack,
8351 .type = type,
8352 .report_delta = &report_delta,
8353 };
8354 struct rtnl_hw_stats64 *stats;
8355 int rc;
8356
8357 stats = netdev_offload_xstats_get_ptr(dev, type);
8358 if (WARN_ON(!stats))
8359 return -EINVAL;
8360
8361 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8362 &info.info);
8363
8364 /* Cache whatever we got, even if there was an error, otherwise the
8365 * successful stats retrievals would get lost.
8366 */
8367 netdev_hw_stats64_add(stats, &report_delta.stats);
8368
8369 if (p_stats)
8370 *p_stats = *stats;
8371 *p_used = report_delta.used;
8372
8373 return notifier_to_errno(rc);
8374}
8375
8376int netdev_offload_xstats_get(struct net_device *dev,
8377 enum netdev_offload_xstats_type type,
8378 struct rtnl_hw_stats64 *p_stats, bool *p_used,
8379 struct netlink_ext_ack *extack)
8380{
8381 ASSERT_RTNL();
8382
8383 if (p_stats)
8384 return netdev_offload_xstats_get_stats(dev, type, p_stats,
8385 p_used, extack);
8386 else
8387 return netdev_offload_xstats_get_used(dev, type, p_used,
8388 extack);
8389}
8390EXPORT_SYMBOL(netdev_offload_xstats_get);
8391
8392void
8393netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8394 const struct rtnl_hw_stats64 *stats)
8395{
8396 report_delta->used = true;
8397 netdev_hw_stats64_add(&report_delta->stats, stats);
8398}
8399EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8400
8401void
8402netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8403{
8404 report_used->used = true;
8405}
8406EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8407
8408void netdev_offload_xstats_push_delta(struct net_device *dev,
8409 enum netdev_offload_xstats_type type,
8410 const struct rtnl_hw_stats64 *p_stats)
8411{
8412 struct rtnl_hw_stats64 *stats;
8413
8414 ASSERT_RTNL();
8415
8416 stats = netdev_offload_xstats_get_ptr(dev, type);
8417 if (WARN_ON(!stats))
8418 return;
8419
8420 netdev_hw_stats64_add(stats, p_stats);
8421}
8422EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8423
8424/**
8425 * netdev_get_xmit_slave - Get the xmit slave of master device
8426 * @dev: device
8427 * @skb: The packet
8428 * @all_slaves: assume all the slaves are active
8429 *
8430 * The reference counters are not incremented so the caller must be
8431 * careful with locks. The caller must hold RCU lock.
8432 * %NULL is returned if no slave is found.
8433 */
8434
8435struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8436 struct sk_buff *skb,
8437 bool all_slaves)
8438{
8439 const struct net_device_ops *ops = dev->netdev_ops;
8440
8441 if (!ops->ndo_get_xmit_slave)
8442 return NULL;
8443 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8444}
8445EXPORT_SYMBOL(netdev_get_xmit_slave);
8446
8447static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8448 struct sock *sk)
8449{
8450 const struct net_device_ops *ops = dev->netdev_ops;
8451
8452 if (!ops->ndo_sk_get_lower_dev)
8453 return NULL;
8454 return ops->ndo_sk_get_lower_dev(dev, sk);
8455}
8456
8457/**
8458 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8459 * @dev: device
8460 * @sk: the socket
8461 *
8462 * %NULL is returned if no lower device is found.
8463 */
8464
8465struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8466 struct sock *sk)
8467{
8468 struct net_device *lower;
8469
8470 lower = netdev_sk_get_lower_dev(dev, sk);
8471 while (lower) {
8472 dev = lower;
8473 lower = netdev_sk_get_lower_dev(dev, sk);
8474 }
8475
8476 return dev;
8477}
8478EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8479
8480static void netdev_adjacent_add_links(struct net_device *dev)
8481{
8482 struct netdev_adjacent *iter;
8483
8484 struct net *net = dev_net(dev);
8485
8486 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8487 if (!net_eq(net, dev_net(iter->dev)))
8488 continue;
8489 netdev_adjacent_sysfs_add(iter->dev, dev,
8490 &iter->dev->adj_list.lower);
8491 netdev_adjacent_sysfs_add(dev, iter->dev,
8492 &dev->adj_list.upper);
8493 }
8494
8495 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8496 if (!net_eq(net, dev_net(iter->dev)))
8497 continue;
8498 netdev_adjacent_sysfs_add(iter->dev, dev,
8499 &iter->dev->adj_list.upper);
8500 netdev_adjacent_sysfs_add(dev, iter->dev,
8501 &dev->adj_list.lower);
8502 }
8503}
8504
8505static void netdev_adjacent_del_links(struct net_device *dev)
8506{
8507 struct netdev_adjacent *iter;
8508
8509 struct net *net = dev_net(dev);
8510
8511 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8512 if (!net_eq(net, dev_net(iter->dev)))
8513 continue;
8514 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8515 &iter->dev->adj_list.lower);
8516 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8517 &dev->adj_list.upper);
8518 }
8519
8520 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8521 if (!net_eq(net, dev_net(iter->dev)))
8522 continue;
8523 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8524 &iter->dev->adj_list.upper);
8525 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8526 &dev->adj_list.lower);
8527 }
8528}
8529
8530void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8531{
8532 struct netdev_adjacent *iter;
8533
8534 struct net *net = dev_net(dev);
8535
8536 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8537 if (!net_eq(net, dev_net(iter->dev)))
8538 continue;
8539 netdev_adjacent_sysfs_del(iter->dev, oldname,
8540 &iter->dev->adj_list.lower);
8541 netdev_adjacent_sysfs_add(iter->dev, dev,
8542 &iter->dev->adj_list.lower);
8543 }
8544
8545 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8546 if (!net_eq(net, dev_net(iter->dev)))
8547 continue;
8548 netdev_adjacent_sysfs_del(iter->dev, oldname,
8549 &iter->dev->adj_list.upper);
8550 netdev_adjacent_sysfs_add(iter->dev, dev,
8551 &iter->dev->adj_list.upper);
8552 }
8553}
8554
8555void *netdev_lower_dev_get_private(struct net_device *dev,
8556 struct net_device *lower_dev)
8557{
8558 struct netdev_adjacent *lower;
8559
8560 if (!lower_dev)
8561 return NULL;
8562 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8563 if (!lower)
8564 return NULL;
8565
8566 return lower->private;
8567}
8568EXPORT_SYMBOL(netdev_lower_dev_get_private);
8569
8570
8571/**
8572 * netdev_lower_state_changed - Dispatch event about lower device state change
8573 * @lower_dev: device
8574 * @lower_state_info: state to dispatch
8575 *
8576 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8577 * The caller must hold the RTNL lock.
8578 */
8579void netdev_lower_state_changed(struct net_device *lower_dev,
8580 void *lower_state_info)
8581{
8582 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8583 .info.dev = lower_dev,
8584 };
8585
8586 ASSERT_RTNL();
8587 changelowerstate_info.lower_state_info = lower_state_info;
8588 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8589 &changelowerstate_info.info);
8590}
8591EXPORT_SYMBOL(netdev_lower_state_changed);
8592
8593static void dev_change_rx_flags(struct net_device *dev, int flags)
8594{
8595 const struct net_device_ops *ops = dev->netdev_ops;
8596
8597 if (ops->ndo_change_rx_flags)
8598 ops->ndo_change_rx_flags(dev, flags);
8599}
8600
8601static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8602{
8603 unsigned int old_flags = dev->flags;
8604 unsigned int promiscuity, flags;
8605 kuid_t uid;
8606 kgid_t gid;
8607
8608 ASSERT_RTNL();
8609
8610 promiscuity = dev->promiscuity + inc;
8611 if (promiscuity == 0) {
8612 /*
8613 * Avoid overflow.
8614 * If inc causes overflow, untouch promisc and return error.
8615 */
8616 if (unlikely(inc > 0)) {
8617 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8618 return -EOVERFLOW;
8619 }
8620 flags = old_flags & ~IFF_PROMISC;
8621 } else {
8622 flags = old_flags | IFF_PROMISC;
8623 }
8624 WRITE_ONCE(dev->promiscuity, promiscuity);
8625 if (flags != old_flags) {
8626 WRITE_ONCE(dev->flags, flags);
8627 netdev_info(dev, "%s promiscuous mode\n",
8628 dev->flags & IFF_PROMISC ? "entered" : "left");
8629 if (audit_enabled) {
8630 current_uid_gid(&uid, &gid);
8631 audit_log(audit_context(), GFP_ATOMIC,
8632 AUDIT_ANOM_PROMISCUOUS,
8633 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8634 dev->name, (dev->flags & IFF_PROMISC),
8635 (old_flags & IFF_PROMISC),
8636 from_kuid(&init_user_ns, audit_get_loginuid(current)),
8637 from_kuid(&init_user_ns, uid),
8638 from_kgid(&init_user_ns, gid),
8639 audit_get_sessionid(current));
8640 }
8641
8642 dev_change_rx_flags(dev, IFF_PROMISC);
8643 }
8644 if (notify)
8645 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
8646 return 0;
8647}
8648
8649/**
8650 * dev_set_promiscuity - update promiscuity count on a device
8651 * @dev: device
8652 * @inc: modifier
8653 *
8654 * Add or remove promiscuity from a device. While the count in the device
8655 * remains above zero the interface remains promiscuous. Once it hits zero
8656 * the device reverts back to normal filtering operation. A negative inc
8657 * value is used to drop promiscuity on the device.
8658 * Return 0 if successful or a negative errno code on error.
8659 */
8660int dev_set_promiscuity(struct net_device *dev, int inc)
8661{
8662 unsigned int old_flags = dev->flags;
8663 int err;
8664
8665 err = __dev_set_promiscuity(dev, inc, true);
8666 if (err < 0)
8667 return err;
8668 if (dev->flags != old_flags)
8669 dev_set_rx_mode(dev);
8670 return err;
8671}
8672EXPORT_SYMBOL(dev_set_promiscuity);
8673
8674static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8675{
8676 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8677 unsigned int allmulti, flags;
8678
8679 ASSERT_RTNL();
8680
8681 allmulti = dev->allmulti + inc;
8682 if (allmulti == 0) {
8683 /*
8684 * Avoid overflow.
8685 * If inc causes overflow, untouch allmulti and return error.
8686 */
8687 if (unlikely(inc > 0)) {
8688 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8689 return -EOVERFLOW;
8690 }
8691 flags = old_flags & ~IFF_ALLMULTI;
8692 } else {
8693 flags = old_flags | IFF_ALLMULTI;
8694 }
8695 WRITE_ONCE(dev->allmulti, allmulti);
8696 if (flags != old_flags) {
8697 WRITE_ONCE(dev->flags, flags);
8698 netdev_info(dev, "%s allmulticast mode\n",
8699 dev->flags & IFF_ALLMULTI ? "entered" : "left");
8700 dev_change_rx_flags(dev, IFF_ALLMULTI);
8701 dev_set_rx_mode(dev);
8702 if (notify)
8703 __dev_notify_flags(dev, old_flags,
8704 dev->gflags ^ old_gflags, 0, NULL);
8705 }
8706 return 0;
8707}
8708
8709/**
8710 * dev_set_allmulti - update allmulti count on a device
8711 * @dev: device
8712 * @inc: modifier
8713 *
8714 * Add or remove reception of all multicast frames to a device. While the
8715 * count in the device remains above zero the interface remains listening
8716 * to all interfaces. Once it hits zero the device reverts back to normal
8717 * filtering operation. A negative @inc value is used to drop the counter
8718 * when releasing a resource needing all multicasts.
8719 * Return 0 if successful or a negative errno code on error.
8720 */
8721
8722int dev_set_allmulti(struct net_device *dev, int inc)
8723{
8724 return __dev_set_allmulti(dev, inc, true);
8725}
8726EXPORT_SYMBOL(dev_set_allmulti);
8727
8728/*
8729 * Upload unicast and multicast address lists to device and
8730 * configure RX filtering. When the device doesn't support unicast
8731 * filtering it is put in promiscuous mode while unicast addresses
8732 * are present.
8733 */
8734void __dev_set_rx_mode(struct net_device *dev)
8735{
8736 const struct net_device_ops *ops = dev->netdev_ops;
8737
8738 /* dev_open will call this function so the list will stay sane. */
8739 if (!(dev->flags&IFF_UP))
8740 return;
8741
8742 if (!netif_device_present(dev))
8743 return;
8744
8745 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8746 /* Unicast addresses changes may only happen under the rtnl,
8747 * therefore calling __dev_set_promiscuity here is safe.
8748 */
8749 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8750 __dev_set_promiscuity(dev, 1, false);
8751 dev->uc_promisc = true;
8752 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8753 __dev_set_promiscuity(dev, -1, false);
8754 dev->uc_promisc = false;
8755 }
8756 }
8757
8758 if (ops->ndo_set_rx_mode)
8759 ops->ndo_set_rx_mode(dev);
8760}
8761
8762void dev_set_rx_mode(struct net_device *dev)
8763{
8764 netif_addr_lock_bh(dev);
8765 __dev_set_rx_mode(dev);
8766 netif_addr_unlock_bh(dev);
8767}
8768
8769/**
8770 * dev_get_flags - get flags reported to userspace
8771 * @dev: device
8772 *
8773 * Get the combination of flag bits exported through APIs to userspace.
8774 */
8775unsigned int dev_get_flags(const struct net_device *dev)
8776{
8777 unsigned int flags;
8778
8779 flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
8780 IFF_ALLMULTI |
8781 IFF_RUNNING |
8782 IFF_LOWER_UP |
8783 IFF_DORMANT)) |
8784 (READ_ONCE(dev->gflags) & (IFF_PROMISC |
8785 IFF_ALLMULTI));
8786
8787 if (netif_running(dev)) {
8788 if (netif_oper_up(dev))
8789 flags |= IFF_RUNNING;
8790 if (netif_carrier_ok(dev))
8791 flags |= IFF_LOWER_UP;
8792 if (netif_dormant(dev))
8793 flags |= IFF_DORMANT;
8794 }
8795
8796 return flags;
8797}
8798EXPORT_SYMBOL(dev_get_flags);
8799
8800int __dev_change_flags(struct net_device *dev, unsigned int flags,
8801 struct netlink_ext_ack *extack)
8802{
8803 unsigned int old_flags = dev->flags;
8804 int ret;
8805
8806 ASSERT_RTNL();
8807
8808 /*
8809 * Set the flags on our device.
8810 */
8811
8812 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8813 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8814 IFF_AUTOMEDIA)) |
8815 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8816 IFF_ALLMULTI));
8817
8818 /*
8819 * Load in the correct multicast list now the flags have changed.
8820 */
8821
8822 if ((old_flags ^ flags) & IFF_MULTICAST)
8823 dev_change_rx_flags(dev, IFF_MULTICAST);
8824
8825 dev_set_rx_mode(dev);
8826
8827 /*
8828 * Have we downed the interface. We handle IFF_UP ourselves
8829 * according to user attempts to set it, rather than blindly
8830 * setting it.
8831 */
8832
8833 ret = 0;
8834 if ((old_flags ^ flags) & IFF_UP) {
8835 if (old_flags & IFF_UP)
8836 __dev_close(dev);
8837 else
8838 ret = __dev_open(dev, extack);
8839 }
8840
8841 if ((flags ^ dev->gflags) & IFF_PROMISC) {
8842 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8843 unsigned int old_flags = dev->flags;
8844
8845 dev->gflags ^= IFF_PROMISC;
8846
8847 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8848 if (dev->flags != old_flags)
8849 dev_set_rx_mode(dev);
8850 }
8851
8852 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8853 * is important. Some (broken) drivers set IFF_PROMISC, when
8854 * IFF_ALLMULTI is requested not asking us and not reporting.
8855 */
8856 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8857 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8858
8859 dev->gflags ^= IFF_ALLMULTI;
8860 __dev_set_allmulti(dev, inc, false);
8861 }
8862
8863 return ret;
8864}
8865
8866void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8867 unsigned int gchanges, u32 portid,
8868 const struct nlmsghdr *nlh)
8869{
8870 unsigned int changes = dev->flags ^ old_flags;
8871
8872 if (gchanges)
8873 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
8874
8875 if (changes & IFF_UP) {
8876 if (dev->flags & IFF_UP)
8877 call_netdevice_notifiers(NETDEV_UP, dev);
8878 else
8879 call_netdevice_notifiers(NETDEV_DOWN, dev);
8880 }
8881
8882 if (dev->flags & IFF_UP &&
8883 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8884 struct netdev_notifier_change_info change_info = {
8885 .info = {
8886 .dev = dev,
8887 },
8888 .flags_changed = changes,
8889 };
8890
8891 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8892 }
8893}
8894
8895/**
8896 * dev_change_flags - change device settings
8897 * @dev: device
8898 * @flags: device state flags
8899 * @extack: netlink extended ack
8900 *
8901 * Change settings on device based state flags. The flags are
8902 * in the userspace exported format.
8903 */
8904int dev_change_flags(struct net_device *dev, unsigned int flags,
8905 struct netlink_ext_ack *extack)
8906{
8907 int ret;
8908 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8909
8910 ret = __dev_change_flags(dev, flags, extack);
8911 if (ret < 0)
8912 return ret;
8913
8914 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8915 __dev_notify_flags(dev, old_flags, changes, 0, NULL);
8916 return ret;
8917}
8918EXPORT_SYMBOL(dev_change_flags);
8919
8920int __dev_set_mtu(struct net_device *dev, int new_mtu)
8921{
8922 const struct net_device_ops *ops = dev->netdev_ops;
8923
8924 if (ops->ndo_change_mtu)
8925 return ops->ndo_change_mtu(dev, new_mtu);
8926
8927 /* Pairs with all the lockless reads of dev->mtu in the stack */
8928 WRITE_ONCE(dev->mtu, new_mtu);
8929 return 0;
8930}
8931EXPORT_SYMBOL(__dev_set_mtu);
8932
8933int dev_validate_mtu(struct net_device *dev, int new_mtu,
8934 struct netlink_ext_ack *extack)
8935{
8936 /* MTU must be positive, and in range */
8937 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8938 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8939 return -EINVAL;
8940 }
8941
8942 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8943 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8944 return -EINVAL;
8945 }
8946 return 0;
8947}
8948
8949/**
8950 * dev_set_mtu_ext - Change maximum transfer unit
8951 * @dev: device
8952 * @new_mtu: new transfer unit
8953 * @extack: netlink extended ack
8954 *
8955 * Change the maximum transfer size of the network device.
8956 */
8957int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8958 struct netlink_ext_ack *extack)
8959{
8960 int err, orig_mtu;
8961
8962 if (new_mtu == dev->mtu)
8963 return 0;
8964
8965 err = dev_validate_mtu(dev, new_mtu, extack);
8966 if (err)
8967 return err;
8968
8969 if (!netif_device_present(dev))
8970 return -ENODEV;
8971
8972 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8973 err = notifier_to_errno(err);
8974 if (err)
8975 return err;
8976
8977 orig_mtu = dev->mtu;
8978 err = __dev_set_mtu(dev, new_mtu);
8979
8980 if (!err) {
8981 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8982 orig_mtu);
8983 err = notifier_to_errno(err);
8984 if (err) {
8985 /* setting mtu back and notifying everyone again,
8986 * so that they have a chance to revert changes.
8987 */
8988 __dev_set_mtu(dev, orig_mtu);
8989 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8990 new_mtu);
8991 }
8992 }
8993 return err;
8994}
8995
8996int dev_set_mtu(struct net_device *dev, int new_mtu)
8997{
8998 struct netlink_ext_ack extack;
8999 int err;
9000
9001 memset(&extack, 0, sizeof(extack));
9002 err = dev_set_mtu_ext(dev, new_mtu, &extack);
9003 if (err && extack._msg)
9004 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
9005 return err;
9006}
9007EXPORT_SYMBOL(dev_set_mtu);
9008
9009/**
9010 * dev_change_tx_queue_len - Change TX queue length of a netdevice
9011 * @dev: device
9012 * @new_len: new tx queue length
9013 */
9014int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
9015{
9016 unsigned int orig_len = dev->tx_queue_len;
9017 int res;
9018
9019 if (new_len != (unsigned int)new_len)
9020 return -ERANGE;
9021
9022 if (new_len != orig_len) {
9023 WRITE_ONCE(dev->tx_queue_len, new_len);
9024 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
9025 res = notifier_to_errno(res);
9026 if (res)
9027 goto err_rollback;
9028 res = dev_qdisc_change_tx_queue_len(dev);
9029 if (res)
9030 goto err_rollback;
9031 }
9032
9033 return 0;
9034
9035err_rollback:
9036 netdev_err(dev, "refused to change device tx_queue_len\n");
9037 WRITE_ONCE(dev->tx_queue_len, orig_len);
9038 return res;
9039}
9040
9041/**
9042 * dev_set_group - Change group this device belongs to
9043 * @dev: device
9044 * @new_group: group this device should belong to
9045 */
9046void dev_set_group(struct net_device *dev, int new_group)
9047{
9048 dev->group = new_group;
9049}
9050
9051/**
9052 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
9053 * @dev: device
9054 * @addr: new address
9055 * @extack: netlink extended ack
9056 */
9057int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
9058 struct netlink_ext_ack *extack)
9059{
9060 struct netdev_notifier_pre_changeaddr_info info = {
9061 .info.dev = dev,
9062 .info.extack = extack,
9063 .dev_addr = addr,
9064 };
9065 int rc;
9066
9067 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
9068 return notifier_to_errno(rc);
9069}
9070EXPORT_SYMBOL(dev_pre_changeaddr_notify);
9071
9072/**
9073 * dev_set_mac_address - Change Media Access Control Address
9074 * @dev: device
9075 * @sa: new address
9076 * @extack: netlink extended ack
9077 *
9078 * Change the hardware (MAC) address of the device
9079 */
9080int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
9081 struct netlink_ext_ack *extack)
9082{
9083 const struct net_device_ops *ops = dev->netdev_ops;
9084 int err;
9085
9086 if (!ops->ndo_set_mac_address)
9087 return -EOPNOTSUPP;
9088 if (sa->sa_family != dev->type)
9089 return -EINVAL;
9090 if (!netif_device_present(dev))
9091 return -ENODEV;
9092 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
9093 if (err)
9094 return err;
9095 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
9096 err = ops->ndo_set_mac_address(dev, sa);
9097 if (err)
9098 return err;
9099 }
9100 dev->addr_assign_type = NET_ADDR_SET;
9101 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
9102 add_device_randomness(dev->dev_addr, dev->addr_len);
9103 return 0;
9104}
9105EXPORT_SYMBOL(dev_set_mac_address);
9106
9107DECLARE_RWSEM(dev_addr_sem);
9108
9109int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
9110 struct netlink_ext_ack *extack)
9111{
9112 int ret;
9113
9114 down_write(&dev_addr_sem);
9115 ret = dev_set_mac_address(dev, sa, extack);
9116 up_write(&dev_addr_sem);
9117 return ret;
9118}
9119EXPORT_SYMBOL(dev_set_mac_address_user);
9120
9121int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
9122{
9123 size_t size = sizeof(sa->sa_data_min);
9124 struct net_device *dev;
9125 int ret = 0;
9126
9127 down_read(&dev_addr_sem);
9128 rcu_read_lock();
9129
9130 dev = dev_get_by_name_rcu(net, dev_name);
9131 if (!dev) {
9132 ret = -ENODEV;
9133 goto unlock;
9134 }
9135 if (!dev->addr_len)
9136 memset(sa->sa_data, 0, size);
9137 else
9138 memcpy(sa->sa_data, dev->dev_addr,
9139 min_t(size_t, size, dev->addr_len));
9140 sa->sa_family = dev->type;
9141
9142unlock:
9143 rcu_read_unlock();
9144 up_read(&dev_addr_sem);
9145 return ret;
9146}
9147EXPORT_SYMBOL(dev_get_mac_address);
9148
9149/**
9150 * dev_change_carrier - Change device carrier
9151 * @dev: device
9152 * @new_carrier: new value
9153 *
9154 * Change device carrier
9155 */
9156int dev_change_carrier(struct net_device *dev, bool new_carrier)
9157{
9158 const struct net_device_ops *ops = dev->netdev_ops;
9159
9160 if (!ops->ndo_change_carrier)
9161 return -EOPNOTSUPP;
9162 if (!netif_device_present(dev))
9163 return -ENODEV;
9164 return ops->ndo_change_carrier(dev, new_carrier);
9165}
9166
9167/**
9168 * dev_get_phys_port_id - Get device physical port ID
9169 * @dev: device
9170 * @ppid: port ID
9171 *
9172 * Get device physical port ID
9173 */
9174int dev_get_phys_port_id(struct net_device *dev,
9175 struct netdev_phys_item_id *ppid)
9176{
9177 const struct net_device_ops *ops = dev->netdev_ops;
9178
9179 if (!ops->ndo_get_phys_port_id)
9180 return -EOPNOTSUPP;
9181 return ops->ndo_get_phys_port_id(dev, ppid);
9182}
9183
9184/**
9185 * dev_get_phys_port_name - Get device physical port name
9186 * @dev: device
9187 * @name: port name
9188 * @len: limit of bytes to copy to name
9189 *
9190 * Get device physical port name
9191 */
9192int dev_get_phys_port_name(struct net_device *dev,
9193 char *name, size_t len)
9194{
9195 const struct net_device_ops *ops = dev->netdev_ops;
9196 int err;
9197
9198 if (ops->ndo_get_phys_port_name) {
9199 err = ops->ndo_get_phys_port_name(dev, name, len);
9200 if (err != -EOPNOTSUPP)
9201 return err;
9202 }
9203 return devlink_compat_phys_port_name_get(dev, name, len);
9204}
9205
9206/**
9207 * dev_get_port_parent_id - Get the device's port parent identifier
9208 * @dev: network device
9209 * @ppid: pointer to a storage for the port's parent identifier
9210 * @recurse: allow/disallow recursion to lower devices
9211 *
9212 * Get the devices's port parent identifier
9213 */
9214int dev_get_port_parent_id(struct net_device *dev,
9215 struct netdev_phys_item_id *ppid,
9216 bool recurse)
9217{
9218 const struct net_device_ops *ops = dev->netdev_ops;
9219 struct netdev_phys_item_id first = { };
9220 struct net_device *lower_dev;
9221 struct list_head *iter;
9222 int err;
9223
9224 if (ops->ndo_get_port_parent_id) {
9225 err = ops->ndo_get_port_parent_id(dev, ppid);
9226 if (err != -EOPNOTSUPP)
9227 return err;
9228 }
9229
9230 err = devlink_compat_switch_id_get(dev, ppid);
9231 if (!recurse || err != -EOPNOTSUPP)
9232 return err;
9233
9234 netdev_for_each_lower_dev(dev, lower_dev, iter) {
9235 err = dev_get_port_parent_id(lower_dev, ppid, true);
9236 if (err)
9237 break;
9238 if (!first.id_len)
9239 first = *ppid;
9240 else if (memcmp(&first, ppid, sizeof(*ppid)))
9241 return -EOPNOTSUPP;
9242 }
9243
9244 return err;
9245}
9246EXPORT_SYMBOL(dev_get_port_parent_id);
9247
9248/**
9249 * netdev_port_same_parent_id - Indicate if two network devices have
9250 * the same port parent identifier
9251 * @a: first network device
9252 * @b: second network device
9253 */
9254bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9255{
9256 struct netdev_phys_item_id a_id = { };
9257 struct netdev_phys_item_id b_id = { };
9258
9259 if (dev_get_port_parent_id(a, &a_id, true) ||
9260 dev_get_port_parent_id(b, &b_id, true))
9261 return false;
9262
9263 return netdev_phys_item_id_same(&a_id, &b_id);
9264}
9265EXPORT_SYMBOL(netdev_port_same_parent_id);
9266
9267/**
9268 * dev_change_proto_down - set carrier according to proto_down.
9269 *
9270 * @dev: device
9271 * @proto_down: new value
9272 */
9273int dev_change_proto_down(struct net_device *dev, bool proto_down)
9274{
9275 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
9276 return -EOPNOTSUPP;
9277 if (!netif_device_present(dev))
9278 return -ENODEV;
9279 if (proto_down)
9280 netif_carrier_off(dev);
9281 else
9282 netif_carrier_on(dev);
9283 WRITE_ONCE(dev->proto_down, proto_down);
9284 return 0;
9285}
9286
9287/**
9288 * dev_change_proto_down_reason - proto down reason
9289 *
9290 * @dev: device
9291 * @mask: proto down mask
9292 * @value: proto down value
9293 */
9294void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9295 u32 value)
9296{
9297 u32 proto_down_reason;
9298 int b;
9299
9300 if (!mask) {
9301 proto_down_reason = value;
9302 } else {
9303 proto_down_reason = dev->proto_down_reason;
9304 for_each_set_bit(b, &mask, 32) {
9305 if (value & (1 << b))
9306 proto_down_reason |= BIT(b);
9307 else
9308 proto_down_reason &= ~BIT(b);
9309 }
9310 }
9311 WRITE_ONCE(dev->proto_down_reason, proto_down_reason);
9312}
9313
9314struct bpf_xdp_link {
9315 struct bpf_link link;
9316 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9317 int flags;
9318};
9319
9320static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9321{
9322 if (flags & XDP_FLAGS_HW_MODE)
9323 return XDP_MODE_HW;
9324 if (flags & XDP_FLAGS_DRV_MODE)
9325 return XDP_MODE_DRV;
9326 if (flags & XDP_FLAGS_SKB_MODE)
9327 return XDP_MODE_SKB;
9328 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9329}
9330
9331static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9332{
9333 switch (mode) {
9334 case XDP_MODE_SKB:
9335 return generic_xdp_install;
9336 case XDP_MODE_DRV:
9337 case XDP_MODE_HW:
9338 return dev->netdev_ops->ndo_bpf;
9339 default:
9340 return NULL;
9341 }
9342}
9343
9344static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9345 enum bpf_xdp_mode mode)
9346{
9347 return dev->xdp_state[mode].link;
9348}
9349
9350static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9351 enum bpf_xdp_mode mode)
9352{
9353 struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9354
9355 if (link)
9356 return link->link.prog;
9357 return dev->xdp_state[mode].prog;
9358}
9359
9360u8 dev_xdp_prog_count(struct net_device *dev)
9361{
9362 u8 count = 0;
9363 int i;
9364
9365 for (i = 0; i < __MAX_XDP_MODE; i++)
9366 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9367 count++;
9368 return count;
9369}
9370EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9371
9372u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9373{
9374 struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9375
9376 return prog ? prog->aux->id : 0;
9377}
9378
9379static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9380 struct bpf_xdp_link *link)
9381{
9382 dev->xdp_state[mode].link = link;
9383 dev->xdp_state[mode].prog = NULL;
9384}
9385
9386static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9387 struct bpf_prog *prog)
9388{
9389 dev->xdp_state[mode].link = NULL;
9390 dev->xdp_state[mode].prog = prog;
9391}
9392
9393static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9394 bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9395 u32 flags, struct bpf_prog *prog)
9396{
9397 struct netdev_bpf xdp;
9398 int err;
9399
9400 memset(&xdp, 0, sizeof(xdp));
9401 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9402 xdp.extack = extack;
9403 xdp.flags = flags;
9404 xdp.prog = prog;
9405
9406 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9407 * "moved" into driver), so they don't increment it on their own, but
9408 * they do decrement refcnt when program is detached or replaced.
9409 * Given net_device also owns link/prog, we need to bump refcnt here
9410 * to prevent drivers from underflowing it.
9411 */
9412 if (prog)
9413 bpf_prog_inc(prog);
9414 err = bpf_op(dev, &xdp);
9415 if (err) {
9416 if (prog)
9417 bpf_prog_put(prog);
9418 return err;
9419 }
9420
9421 if (mode != XDP_MODE_HW)
9422 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9423
9424 return 0;
9425}
9426
9427static void dev_xdp_uninstall(struct net_device *dev)
9428{
9429 struct bpf_xdp_link *link;
9430 struct bpf_prog *prog;
9431 enum bpf_xdp_mode mode;
9432 bpf_op_t bpf_op;
9433
9434 ASSERT_RTNL();
9435
9436 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9437 prog = dev_xdp_prog(dev, mode);
9438 if (!prog)
9439 continue;
9440
9441 bpf_op = dev_xdp_bpf_op(dev, mode);
9442 if (!bpf_op)
9443 continue;
9444
9445 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9446
9447 /* auto-detach link from net device */
9448 link = dev_xdp_link(dev, mode);
9449 if (link)
9450 link->dev = NULL;
9451 else
9452 bpf_prog_put(prog);
9453
9454 dev_xdp_set_link(dev, mode, NULL);
9455 }
9456}
9457
9458static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9459 struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9460 struct bpf_prog *old_prog, u32 flags)
9461{
9462 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9463 struct bpf_prog *cur_prog;
9464 struct net_device *upper;
9465 struct list_head *iter;
9466 enum bpf_xdp_mode mode;
9467 bpf_op_t bpf_op;
9468 int err;
9469
9470 ASSERT_RTNL();
9471
9472 /* either link or prog attachment, never both */
9473 if (link && (new_prog || old_prog))
9474 return -EINVAL;
9475 /* link supports only XDP mode flags */
9476 if (link && (flags & ~XDP_FLAGS_MODES)) {
9477 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9478 return -EINVAL;
9479 }
9480 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9481 if (num_modes > 1) {
9482 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9483 return -EINVAL;
9484 }
9485 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9486 if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9487 NL_SET_ERR_MSG(extack,
9488 "More than one program loaded, unset mode is ambiguous");
9489 return -EINVAL;
9490 }
9491 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9492 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9493 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9494 return -EINVAL;
9495 }
9496
9497 mode = dev_xdp_mode(dev, flags);
9498 /* can't replace attached link */
9499 if (dev_xdp_link(dev, mode)) {
9500 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9501 return -EBUSY;
9502 }
9503
9504 /* don't allow if an upper device already has a program */
9505 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9506 if (dev_xdp_prog_count(upper) > 0) {
9507 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9508 return -EEXIST;
9509 }
9510 }
9511
9512 cur_prog = dev_xdp_prog(dev, mode);
9513 /* can't replace attached prog with link */
9514 if (link && cur_prog) {
9515 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9516 return -EBUSY;
9517 }
9518 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9519 NL_SET_ERR_MSG(extack, "Active program does not match expected");
9520 return -EEXIST;
9521 }
9522
9523 /* put effective new program into new_prog */
9524 if (link)
9525 new_prog = link->link.prog;
9526
9527 if (new_prog) {
9528 bool offload = mode == XDP_MODE_HW;
9529 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9530 ? XDP_MODE_DRV : XDP_MODE_SKB;
9531
9532 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9533 NL_SET_ERR_MSG(extack, "XDP program already attached");
9534 return -EBUSY;
9535 }
9536 if (!offload && dev_xdp_prog(dev, other_mode)) {
9537 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9538 return -EEXIST;
9539 }
9540 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
9541 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
9542 return -EINVAL;
9543 }
9544 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9545 NL_SET_ERR_MSG(extack, "Program bound to different device");
9546 return -EINVAL;
9547 }
9548 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9549 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9550 return -EINVAL;
9551 }
9552 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9553 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9554 return -EINVAL;
9555 }
9556 }
9557
9558 /* don't call drivers if the effective program didn't change */
9559 if (new_prog != cur_prog) {
9560 bpf_op = dev_xdp_bpf_op(dev, mode);
9561 if (!bpf_op) {
9562 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9563 return -EOPNOTSUPP;
9564 }
9565
9566 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9567 if (err)
9568 return err;
9569 }
9570
9571 if (link)
9572 dev_xdp_set_link(dev, mode, link);
9573 else
9574 dev_xdp_set_prog(dev, mode, new_prog);
9575 if (cur_prog)
9576 bpf_prog_put(cur_prog);
9577
9578 return 0;
9579}
9580
9581static int dev_xdp_attach_link(struct net_device *dev,
9582 struct netlink_ext_ack *extack,
9583 struct bpf_xdp_link *link)
9584{
9585 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9586}
9587
9588static int dev_xdp_detach_link(struct net_device *dev,
9589 struct netlink_ext_ack *extack,
9590 struct bpf_xdp_link *link)
9591{
9592 enum bpf_xdp_mode mode;
9593 bpf_op_t bpf_op;
9594
9595 ASSERT_RTNL();
9596
9597 mode = dev_xdp_mode(dev, link->flags);
9598 if (dev_xdp_link(dev, mode) != link)
9599 return -EINVAL;
9600
9601 bpf_op = dev_xdp_bpf_op(dev, mode);
9602 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9603 dev_xdp_set_link(dev, mode, NULL);
9604 return 0;
9605}
9606
9607static void bpf_xdp_link_release(struct bpf_link *link)
9608{
9609 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9610
9611 rtnl_lock();
9612
9613 /* if racing with net_device's tear down, xdp_link->dev might be
9614 * already NULL, in which case link was already auto-detached
9615 */
9616 if (xdp_link->dev) {
9617 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9618 xdp_link->dev = NULL;
9619 }
9620
9621 rtnl_unlock();
9622}
9623
9624static int bpf_xdp_link_detach(struct bpf_link *link)
9625{
9626 bpf_xdp_link_release(link);
9627 return 0;
9628}
9629
9630static void bpf_xdp_link_dealloc(struct bpf_link *link)
9631{
9632 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9633
9634 kfree(xdp_link);
9635}
9636
9637static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9638 struct seq_file *seq)
9639{
9640 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9641 u32 ifindex = 0;
9642
9643 rtnl_lock();
9644 if (xdp_link->dev)
9645 ifindex = xdp_link->dev->ifindex;
9646 rtnl_unlock();
9647
9648 seq_printf(seq, "ifindex:\t%u\n", ifindex);
9649}
9650
9651static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9652 struct bpf_link_info *info)
9653{
9654 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9655 u32 ifindex = 0;
9656
9657 rtnl_lock();
9658 if (xdp_link->dev)
9659 ifindex = xdp_link->dev->ifindex;
9660 rtnl_unlock();
9661
9662 info->xdp.ifindex = ifindex;
9663 return 0;
9664}
9665
9666static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9667 struct bpf_prog *old_prog)
9668{
9669 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9670 enum bpf_xdp_mode mode;
9671 bpf_op_t bpf_op;
9672 int err = 0;
9673
9674 rtnl_lock();
9675
9676 /* link might have been auto-released already, so fail */
9677 if (!xdp_link->dev) {
9678 err = -ENOLINK;
9679 goto out_unlock;
9680 }
9681
9682 if (old_prog && link->prog != old_prog) {
9683 err = -EPERM;
9684 goto out_unlock;
9685 }
9686 old_prog = link->prog;
9687 if (old_prog->type != new_prog->type ||
9688 old_prog->expected_attach_type != new_prog->expected_attach_type) {
9689 err = -EINVAL;
9690 goto out_unlock;
9691 }
9692
9693 if (old_prog == new_prog) {
9694 /* no-op, don't disturb drivers */
9695 bpf_prog_put(new_prog);
9696 goto out_unlock;
9697 }
9698
9699 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9700 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9701 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9702 xdp_link->flags, new_prog);
9703 if (err)
9704 goto out_unlock;
9705
9706 old_prog = xchg(&link->prog, new_prog);
9707 bpf_prog_put(old_prog);
9708
9709out_unlock:
9710 rtnl_unlock();
9711 return err;
9712}
9713
9714static const struct bpf_link_ops bpf_xdp_link_lops = {
9715 .release = bpf_xdp_link_release,
9716 .dealloc = bpf_xdp_link_dealloc,
9717 .detach = bpf_xdp_link_detach,
9718 .show_fdinfo = bpf_xdp_link_show_fdinfo,
9719 .fill_link_info = bpf_xdp_link_fill_link_info,
9720 .update_prog = bpf_xdp_link_update,
9721};
9722
9723int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9724{
9725 struct net *net = current->nsproxy->net_ns;
9726 struct bpf_link_primer link_primer;
9727 struct netlink_ext_ack extack = {};
9728 struct bpf_xdp_link *link;
9729 struct net_device *dev;
9730 int err, fd;
9731
9732 rtnl_lock();
9733 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9734 if (!dev) {
9735 rtnl_unlock();
9736 return -EINVAL;
9737 }
9738
9739 link = kzalloc(sizeof(*link), GFP_USER);
9740 if (!link) {
9741 err = -ENOMEM;
9742 goto unlock;
9743 }
9744
9745 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9746 link->dev = dev;
9747 link->flags = attr->link_create.flags;
9748
9749 err = bpf_link_prime(&link->link, &link_primer);
9750 if (err) {
9751 kfree(link);
9752 goto unlock;
9753 }
9754
9755 err = dev_xdp_attach_link(dev, &extack, link);
9756 rtnl_unlock();
9757
9758 if (err) {
9759 link->dev = NULL;
9760 bpf_link_cleanup(&link_primer);
9761 trace_bpf_xdp_link_attach_failed(extack._msg);
9762 goto out_put_dev;
9763 }
9764
9765 fd = bpf_link_settle(&link_primer);
9766 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9767 dev_put(dev);
9768 return fd;
9769
9770unlock:
9771 rtnl_unlock();
9772
9773out_put_dev:
9774 dev_put(dev);
9775 return err;
9776}
9777
9778/**
9779 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9780 * @dev: device
9781 * @extack: netlink extended ack
9782 * @fd: new program fd or negative value to clear
9783 * @expected_fd: old program fd that userspace expects to replace or clear
9784 * @flags: xdp-related flags
9785 *
9786 * Set or clear a bpf program for a device
9787 */
9788int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9789 int fd, int expected_fd, u32 flags)
9790{
9791 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9792 struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9793 int err;
9794
9795 ASSERT_RTNL();
9796
9797 if (fd >= 0) {
9798 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9799 mode != XDP_MODE_SKB);
9800 if (IS_ERR(new_prog))
9801 return PTR_ERR(new_prog);
9802 }
9803
9804 if (expected_fd >= 0) {
9805 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9806 mode != XDP_MODE_SKB);
9807 if (IS_ERR(old_prog)) {
9808 err = PTR_ERR(old_prog);
9809 old_prog = NULL;
9810 goto err_out;
9811 }
9812 }
9813
9814 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9815
9816err_out:
9817 if (err && new_prog)
9818 bpf_prog_put(new_prog);
9819 if (old_prog)
9820 bpf_prog_put(old_prog);
9821 return err;
9822}
9823
9824/**
9825 * dev_index_reserve() - allocate an ifindex in a namespace
9826 * @net: the applicable net namespace
9827 * @ifindex: requested ifindex, pass %0 to get one allocated
9828 *
9829 * Allocate a ifindex for a new device. Caller must either use the ifindex
9830 * to store the device (via list_netdevice()) or call dev_index_release()
9831 * to give the index up.
9832 *
9833 * Return: a suitable unique value for a new device interface number or -errno.
9834 */
9835static int dev_index_reserve(struct net *net, u32 ifindex)
9836{
9837 int err;
9838
9839 if (ifindex > INT_MAX) {
9840 DEBUG_NET_WARN_ON_ONCE(1);
9841 return -EINVAL;
9842 }
9843
9844 if (!ifindex)
9845 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
9846 xa_limit_31b, &net->ifindex, GFP_KERNEL);
9847 else
9848 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
9849 if (err < 0)
9850 return err;
9851
9852 return ifindex;
9853}
9854
9855static void dev_index_release(struct net *net, int ifindex)
9856{
9857 /* Expect only unused indexes, unlist_netdevice() removes the used */
9858 WARN_ON(xa_erase(&net->dev_by_index, ifindex));
9859}
9860
9861/* Delayed registration/unregisteration */
9862LIST_HEAD(net_todo_list);
9863DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9864atomic_t dev_unreg_count = ATOMIC_INIT(0);
9865
9866static void net_set_todo(struct net_device *dev)
9867{
9868 list_add_tail(&dev->todo_list, &net_todo_list);
9869}
9870
9871static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9872 struct net_device *upper, netdev_features_t features)
9873{
9874 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9875 netdev_features_t feature;
9876 int feature_bit;
9877
9878 for_each_netdev_feature(upper_disables, feature_bit) {
9879 feature = __NETIF_F_BIT(feature_bit);
9880 if (!(upper->wanted_features & feature)
9881 && (features & feature)) {
9882 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9883 &feature, upper->name);
9884 features &= ~feature;
9885 }
9886 }
9887
9888 return features;
9889}
9890
9891static void netdev_sync_lower_features(struct net_device *upper,
9892 struct net_device *lower, netdev_features_t features)
9893{
9894 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9895 netdev_features_t feature;
9896 int feature_bit;
9897
9898 for_each_netdev_feature(upper_disables, feature_bit) {
9899 feature = __NETIF_F_BIT(feature_bit);
9900 if (!(features & feature) && (lower->features & feature)) {
9901 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9902 &feature, lower->name);
9903 lower->wanted_features &= ~feature;
9904 __netdev_update_features(lower);
9905
9906 if (unlikely(lower->features & feature))
9907 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9908 &feature, lower->name);
9909 else
9910 netdev_features_change(lower);
9911 }
9912 }
9913}
9914
9915static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
9916{
9917 netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9918 bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
9919 bool hw_csum = features & NETIF_F_HW_CSUM;
9920
9921 return ip_csum || hw_csum;
9922}
9923
9924static netdev_features_t netdev_fix_features(struct net_device *dev,
9925 netdev_features_t features)
9926{
9927 /* Fix illegal checksum combinations */
9928 if ((features & NETIF_F_HW_CSUM) &&
9929 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9930 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9931 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9932 }
9933
9934 /* TSO requires that SG is present as well. */
9935 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9936 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9937 features &= ~NETIF_F_ALL_TSO;
9938 }
9939
9940 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9941 !(features & NETIF_F_IP_CSUM)) {
9942 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9943 features &= ~NETIF_F_TSO;
9944 features &= ~NETIF_F_TSO_ECN;
9945 }
9946
9947 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9948 !(features & NETIF_F_IPV6_CSUM)) {
9949 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9950 features &= ~NETIF_F_TSO6;
9951 }
9952
9953 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9954 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9955 features &= ~NETIF_F_TSO_MANGLEID;
9956
9957 /* TSO ECN requires that TSO is present as well. */
9958 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9959 features &= ~NETIF_F_TSO_ECN;
9960
9961 /* Software GSO depends on SG. */
9962 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9963 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9964 features &= ~NETIF_F_GSO;
9965 }
9966
9967 /* GSO partial features require GSO partial be set */
9968 if ((features & dev->gso_partial_features) &&
9969 !(features & NETIF_F_GSO_PARTIAL)) {
9970 netdev_dbg(dev,
9971 "Dropping partially supported GSO features since no GSO partial.\n");
9972 features &= ~dev->gso_partial_features;
9973 }
9974
9975 if (!(features & NETIF_F_RXCSUM)) {
9976 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9977 * successfully merged by hardware must also have the
9978 * checksum verified by hardware. If the user does not
9979 * want to enable RXCSUM, logically, we should disable GRO_HW.
9980 */
9981 if (features & NETIF_F_GRO_HW) {
9982 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9983 features &= ~NETIF_F_GRO_HW;
9984 }
9985 }
9986
9987 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9988 if (features & NETIF_F_RXFCS) {
9989 if (features & NETIF_F_LRO) {
9990 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9991 features &= ~NETIF_F_LRO;
9992 }
9993
9994 if (features & NETIF_F_GRO_HW) {
9995 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9996 features &= ~NETIF_F_GRO_HW;
9997 }
9998 }
9999
10000 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
10001 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
10002 features &= ~NETIF_F_LRO;
10003 }
10004
10005 if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
10006 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
10007 features &= ~NETIF_F_HW_TLS_TX;
10008 }
10009
10010 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
10011 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
10012 features &= ~NETIF_F_HW_TLS_RX;
10013 }
10014
10015 if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
10016 netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
10017 features &= ~NETIF_F_GSO_UDP_L4;
10018 }
10019
10020 return features;
10021}
10022
10023int __netdev_update_features(struct net_device *dev)
10024{
10025 struct net_device *upper, *lower;
10026 netdev_features_t features;
10027 struct list_head *iter;
10028 int err = -1;
10029
10030 ASSERT_RTNL();
10031
10032 features = netdev_get_wanted_features(dev);
10033
10034 if (dev->netdev_ops->ndo_fix_features)
10035 features = dev->netdev_ops->ndo_fix_features(dev, features);
10036
10037 /* driver might be less strict about feature dependencies */
10038 features = netdev_fix_features(dev, features);
10039
10040 /* some features can't be enabled if they're off on an upper device */
10041 netdev_for_each_upper_dev_rcu(dev, upper, iter)
10042 features = netdev_sync_upper_features(dev, upper, features);
10043
10044 if (dev->features == features)
10045 goto sync_lower;
10046
10047 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
10048 &dev->features, &features);
10049
10050 if (dev->netdev_ops->ndo_set_features)
10051 err = dev->netdev_ops->ndo_set_features(dev, features);
10052 else
10053 err = 0;
10054
10055 if (unlikely(err < 0)) {
10056 netdev_err(dev,
10057 "set_features() failed (%d); wanted %pNF, left %pNF\n",
10058 err, &features, &dev->features);
10059 /* return non-0 since some features might have changed and
10060 * it's better to fire a spurious notification than miss it
10061 */
10062 return -1;
10063 }
10064
10065sync_lower:
10066 /* some features must be disabled on lower devices when disabled
10067 * on an upper device (think: bonding master or bridge)
10068 */
10069 netdev_for_each_lower_dev(dev, lower, iter)
10070 netdev_sync_lower_features(dev, lower, features);
10071
10072 if (!err) {
10073 netdev_features_t diff = features ^ dev->features;
10074
10075 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
10076 /* udp_tunnel_{get,drop}_rx_info both need
10077 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
10078 * device, or they won't do anything.
10079 * Thus we need to update dev->features
10080 * *before* calling udp_tunnel_get_rx_info,
10081 * but *after* calling udp_tunnel_drop_rx_info.
10082 */
10083 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
10084 dev->features = features;
10085 udp_tunnel_get_rx_info(dev);
10086 } else {
10087 udp_tunnel_drop_rx_info(dev);
10088 }
10089 }
10090
10091 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
10092 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
10093 dev->features = features;
10094 err |= vlan_get_rx_ctag_filter_info(dev);
10095 } else {
10096 vlan_drop_rx_ctag_filter_info(dev);
10097 }
10098 }
10099
10100 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
10101 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
10102 dev->features = features;
10103 err |= vlan_get_rx_stag_filter_info(dev);
10104 } else {
10105 vlan_drop_rx_stag_filter_info(dev);
10106 }
10107 }
10108
10109 dev->features = features;
10110 }
10111
10112 return err < 0 ? 0 : 1;
10113}
10114
10115/**
10116 * netdev_update_features - recalculate device features
10117 * @dev: the device to check
10118 *
10119 * Recalculate dev->features set and send notifications if it
10120 * has changed. Should be called after driver or hardware dependent
10121 * conditions might have changed that influence the features.
10122 */
10123void netdev_update_features(struct net_device *dev)
10124{
10125 if (__netdev_update_features(dev))
10126 netdev_features_change(dev);
10127}
10128EXPORT_SYMBOL(netdev_update_features);
10129
10130/**
10131 * netdev_change_features - recalculate device features
10132 * @dev: the device to check
10133 *
10134 * Recalculate dev->features set and send notifications even
10135 * if they have not changed. Should be called instead of
10136 * netdev_update_features() if also dev->vlan_features might
10137 * have changed to allow the changes to be propagated to stacked
10138 * VLAN devices.
10139 */
10140void netdev_change_features(struct net_device *dev)
10141{
10142 __netdev_update_features(dev);
10143 netdev_features_change(dev);
10144}
10145EXPORT_SYMBOL(netdev_change_features);
10146
10147/**
10148 * netif_stacked_transfer_operstate - transfer operstate
10149 * @rootdev: the root or lower level device to transfer state from
10150 * @dev: the device to transfer operstate to
10151 *
10152 * Transfer operational state from root to device. This is normally
10153 * called when a stacking relationship exists between the root
10154 * device and the device(a leaf device).
10155 */
10156void netif_stacked_transfer_operstate(const struct net_device *rootdev,
10157 struct net_device *dev)
10158{
10159 if (rootdev->operstate == IF_OPER_DORMANT)
10160 netif_dormant_on(dev);
10161 else
10162 netif_dormant_off(dev);
10163
10164 if (rootdev->operstate == IF_OPER_TESTING)
10165 netif_testing_on(dev);
10166 else
10167 netif_testing_off(dev);
10168
10169 if (netif_carrier_ok(rootdev))
10170 netif_carrier_on(dev);
10171 else
10172 netif_carrier_off(dev);
10173}
10174EXPORT_SYMBOL(netif_stacked_transfer_operstate);
10175
10176static int netif_alloc_rx_queues(struct net_device *dev)
10177{
10178 unsigned int i, count = dev->num_rx_queues;
10179 struct netdev_rx_queue *rx;
10180 size_t sz = count * sizeof(*rx);
10181 int err = 0;
10182
10183 BUG_ON(count < 1);
10184
10185 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10186 if (!rx)
10187 return -ENOMEM;
10188
10189 dev->_rx = rx;
10190
10191 for (i = 0; i < count; i++) {
10192 rx[i].dev = dev;
10193
10194 /* XDP RX-queue setup */
10195 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
10196 if (err < 0)
10197 goto err_rxq_info;
10198 }
10199 return 0;
10200
10201err_rxq_info:
10202 /* Rollback successful reg's and free other resources */
10203 while (i--)
10204 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
10205 kvfree(dev->_rx);
10206 dev->_rx = NULL;
10207 return err;
10208}
10209
10210static void netif_free_rx_queues(struct net_device *dev)
10211{
10212 unsigned int i, count = dev->num_rx_queues;
10213
10214 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10215 if (!dev->_rx)
10216 return;
10217
10218 for (i = 0; i < count; i++)
10219 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10220
10221 kvfree(dev->_rx);
10222}
10223
10224static void netdev_init_one_queue(struct net_device *dev,
10225 struct netdev_queue *queue, void *_unused)
10226{
10227 /* Initialize queue lock */
10228 spin_lock_init(&queue->_xmit_lock);
10229 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10230 queue->xmit_lock_owner = -1;
10231 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10232 queue->dev = dev;
10233#ifdef CONFIG_BQL
10234 dql_init(&queue->dql, HZ);
10235#endif
10236}
10237
10238static void netif_free_tx_queues(struct net_device *dev)
10239{
10240 kvfree(dev->_tx);
10241}
10242
10243static int netif_alloc_netdev_queues(struct net_device *dev)
10244{
10245 unsigned int count = dev->num_tx_queues;
10246 struct netdev_queue *tx;
10247 size_t sz = count * sizeof(*tx);
10248
10249 if (count < 1 || count > 0xffff)
10250 return -EINVAL;
10251
10252 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10253 if (!tx)
10254 return -ENOMEM;
10255
10256 dev->_tx = tx;
10257
10258 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10259 spin_lock_init(&dev->tx_global_lock);
10260
10261 return 0;
10262}
10263
10264void netif_tx_stop_all_queues(struct net_device *dev)
10265{
10266 unsigned int i;
10267
10268 for (i = 0; i < dev->num_tx_queues; i++) {
10269 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10270
10271 netif_tx_stop_queue(txq);
10272 }
10273}
10274EXPORT_SYMBOL(netif_tx_stop_all_queues);
10275
10276static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
10277{
10278 void __percpu *v;
10279
10280 /* Drivers implementing ndo_get_peer_dev must support tstat
10281 * accounting, so that skb_do_redirect() can bump the dev's
10282 * RX stats upon network namespace switch.
10283 */
10284 if (dev->netdev_ops->ndo_get_peer_dev &&
10285 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
10286 return -EOPNOTSUPP;
10287
10288 switch (dev->pcpu_stat_type) {
10289 case NETDEV_PCPU_STAT_NONE:
10290 return 0;
10291 case NETDEV_PCPU_STAT_LSTATS:
10292 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
10293 break;
10294 case NETDEV_PCPU_STAT_TSTATS:
10295 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
10296 break;
10297 case NETDEV_PCPU_STAT_DSTATS:
10298 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
10299 break;
10300 default:
10301 return -EINVAL;
10302 }
10303
10304 return v ? 0 : -ENOMEM;
10305}
10306
10307static void netdev_do_free_pcpu_stats(struct net_device *dev)
10308{
10309 switch (dev->pcpu_stat_type) {
10310 case NETDEV_PCPU_STAT_NONE:
10311 return;
10312 case NETDEV_PCPU_STAT_LSTATS:
10313 free_percpu(dev->lstats);
10314 break;
10315 case NETDEV_PCPU_STAT_TSTATS:
10316 free_percpu(dev->tstats);
10317 break;
10318 case NETDEV_PCPU_STAT_DSTATS:
10319 free_percpu(dev->dstats);
10320 break;
10321 }
10322}
10323
10324/**
10325 * register_netdevice() - register a network device
10326 * @dev: device to register
10327 *
10328 * Take a prepared network device structure and make it externally accessible.
10329 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10330 * Callers must hold the rtnl lock - you may want register_netdev()
10331 * instead of this.
10332 */
10333int register_netdevice(struct net_device *dev)
10334{
10335 int ret;
10336 struct net *net = dev_net(dev);
10337
10338 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10339 NETDEV_FEATURE_COUNT);
10340 BUG_ON(dev_boot_phase);
10341 ASSERT_RTNL();
10342
10343 might_sleep();
10344
10345 /* When net_device's are persistent, this will be fatal. */
10346 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10347 BUG_ON(!net);
10348
10349 ret = ethtool_check_ops(dev->ethtool_ops);
10350 if (ret)
10351 return ret;
10352
10353 /* rss ctx ID 0 is reserved for the default context, start from 1 */
10354 xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1);
10355 mutex_init(&dev->ethtool->rss_lock);
10356
10357 spin_lock_init(&dev->addr_list_lock);
10358 netdev_set_addr_lockdep_class(dev);
10359
10360 ret = dev_get_valid_name(net, dev, dev->name);
10361 if (ret < 0)
10362 goto out;
10363
10364 ret = -ENOMEM;
10365 dev->name_node = netdev_name_node_head_alloc(dev);
10366 if (!dev->name_node)
10367 goto out;
10368
10369 /* Init, if this function is available */
10370 if (dev->netdev_ops->ndo_init) {
10371 ret = dev->netdev_ops->ndo_init(dev);
10372 if (ret) {
10373 if (ret > 0)
10374 ret = -EIO;
10375 goto err_free_name;
10376 }
10377 }
10378
10379 if (((dev->hw_features | dev->features) &
10380 NETIF_F_HW_VLAN_CTAG_FILTER) &&
10381 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10382 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10383 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10384 ret = -EINVAL;
10385 goto err_uninit;
10386 }
10387
10388 ret = netdev_do_alloc_pcpu_stats(dev);
10389 if (ret)
10390 goto err_uninit;
10391
10392 ret = dev_index_reserve(net, dev->ifindex);
10393 if (ret < 0)
10394 goto err_free_pcpu;
10395 dev->ifindex = ret;
10396
10397 /* Transfer changeable features to wanted_features and enable
10398 * software offloads (GSO and GRO).
10399 */
10400 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10401 dev->features |= NETIF_F_SOFT_FEATURES;
10402
10403 if (dev->udp_tunnel_nic_info) {
10404 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10405 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10406 }
10407
10408 dev->wanted_features = dev->features & dev->hw_features;
10409
10410 if (!(dev->flags & IFF_LOOPBACK))
10411 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10412
10413 /* If IPv4 TCP segmentation offload is supported we should also
10414 * allow the device to enable segmenting the frame with the option
10415 * of ignoring a static IP ID value. This doesn't enable the
10416 * feature itself but allows the user to enable it later.
10417 */
10418 if (dev->hw_features & NETIF_F_TSO)
10419 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10420 if (dev->vlan_features & NETIF_F_TSO)
10421 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10422 if (dev->mpls_features & NETIF_F_TSO)
10423 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10424 if (dev->hw_enc_features & NETIF_F_TSO)
10425 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10426
10427 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10428 */
10429 dev->vlan_features |= NETIF_F_HIGHDMA;
10430
10431 /* Make NETIF_F_SG inheritable to tunnel devices.
10432 */
10433 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10434
10435 /* Make NETIF_F_SG inheritable to MPLS.
10436 */
10437 dev->mpls_features |= NETIF_F_SG;
10438
10439 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10440 ret = notifier_to_errno(ret);
10441 if (ret)
10442 goto err_ifindex_release;
10443
10444 ret = netdev_register_kobject(dev);
10445
10446 WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
10447
10448 if (ret)
10449 goto err_uninit_notify;
10450
10451 __netdev_update_features(dev);
10452
10453 /*
10454 * Default initial state at registry is that the
10455 * device is present.
10456 */
10457
10458 set_bit(__LINK_STATE_PRESENT, &dev->state);
10459
10460 linkwatch_init_dev(dev);
10461
10462 dev_init_scheduler(dev);
10463
10464 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10465 list_netdevice(dev);
10466
10467 add_device_randomness(dev->dev_addr, dev->addr_len);
10468
10469 /* If the device has permanent device address, driver should
10470 * set dev_addr and also addr_assign_type should be set to
10471 * NET_ADDR_PERM (default value).
10472 */
10473 if (dev->addr_assign_type == NET_ADDR_PERM)
10474 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10475
10476 /* Notify protocols, that a new device appeared. */
10477 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10478 ret = notifier_to_errno(ret);
10479 if (ret) {
10480 /* Expect explicit free_netdev() on failure */
10481 dev->needs_free_netdev = false;
10482 unregister_netdevice_queue(dev, NULL);
10483 goto out;
10484 }
10485 /*
10486 * Prevent userspace races by waiting until the network
10487 * device is fully setup before sending notifications.
10488 */
10489 if (!dev->rtnl_link_ops ||
10490 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10491 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
10492
10493out:
10494 return ret;
10495
10496err_uninit_notify:
10497 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10498err_ifindex_release:
10499 dev_index_release(net, dev->ifindex);
10500err_free_pcpu:
10501 netdev_do_free_pcpu_stats(dev);
10502err_uninit:
10503 if (dev->netdev_ops->ndo_uninit)
10504 dev->netdev_ops->ndo_uninit(dev);
10505 if (dev->priv_destructor)
10506 dev->priv_destructor(dev);
10507err_free_name:
10508 netdev_name_node_free(dev->name_node);
10509 goto out;
10510}
10511EXPORT_SYMBOL(register_netdevice);
10512
10513/* Initialize the core of a dummy net device.
10514 * This is useful if you are calling this function after alloc_netdev(),
10515 * since it does not memset the net_device fields.
10516 */
10517static void init_dummy_netdev_core(struct net_device *dev)
10518{
10519 /* make sure we BUG if trying to hit standard
10520 * register/unregister code path
10521 */
10522 dev->reg_state = NETREG_DUMMY;
10523
10524 /* NAPI wants this */
10525 INIT_LIST_HEAD(&dev->napi_list);
10526
10527 /* a dummy interface is started by default */
10528 set_bit(__LINK_STATE_PRESENT, &dev->state);
10529 set_bit(__LINK_STATE_START, &dev->state);
10530
10531 /* napi_busy_loop stats accounting wants this */
10532 dev_net_set(dev, &init_net);
10533
10534 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10535 * because users of this 'device' dont need to change
10536 * its refcount.
10537 */
10538}
10539
10540/**
10541 * init_dummy_netdev - init a dummy network device for NAPI
10542 * @dev: device to init
10543 *
10544 * This takes a network device structure and initializes the minimum
10545 * amount of fields so it can be used to schedule NAPI polls without
10546 * registering a full blown interface. This is to be used by drivers
10547 * that need to tie several hardware interfaces to a single NAPI
10548 * poll scheduler due to HW limitations.
10549 */
10550void init_dummy_netdev(struct net_device *dev)
10551{
10552 /* Clear everything. Note we don't initialize spinlocks
10553 * as they aren't supposed to be taken by any of the
10554 * NAPI code and this dummy netdev is supposed to be
10555 * only ever used for NAPI polls
10556 */
10557 memset(dev, 0, sizeof(struct net_device));
10558 init_dummy_netdev_core(dev);
10559}
10560EXPORT_SYMBOL_GPL(init_dummy_netdev);
10561
10562/**
10563 * register_netdev - register a network device
10564 * @dev: device to register
10565 *
10566 * Take a completed network device structure and add it to the kernel
10567 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10568 * chain. 0 is returned on success. A negative errno code is returned
10569 * on a failure to set up the device, or if the name is a duplicate.
10570 *
10571 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10572 * and expands the device name if you passed a format string to
10573 * alloc_netdev.
10574 */
10575int register_netdev(struct net_device *dev)
10576{
10577 int err;
10578
10579 if (rtnl_lock_killable())
10580 return -EINTR;
10581 err = register_netdevice(dev);
10582 rtnl_unlock();
10583 return err;
10584}
10585EXPORT_SYMBOL(register_netdev);
10586
10587int netdev_refcnt_read(const struct net_device *dev)
10588{
10589#ifdef CONFIG_PCPU_DEV_REFCNT
10590 int i, refcnt = 0;
10591
10592 for_each_possible_cpu(i)
10593 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10594 return refcnt;
10595#else
10596 return refcount_read(&dev->dev_refcnt);
10597#endif
10598}
10599EXPORT_SYMBOL(netdev_refcnt_read);
10600
10601int netdev_unregister_timeout_secs __read_mostly = 10;
10602
10603#define WAIT_REFS_MIN_MSECS 1
10604#define WAIT_REFS_MAX_MSECS 250
10605/**
10606 * netdev_wait_allrefs_any - wait until all references are gone.
10607 * @list: list of net_devices to wait on
10608 *
10609 * This is called when unregistering network devices.
10610 *
10611 * Any protocol or device that holds a reference should register
10612 * for netdevice notification, and cleanup and put back the
10613 * reference if they receive an UNREGISTER event.
10614 * We can get stuck here if buggy protocols don't correctly
10615 * call dev_put.
10616 */
10617static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10618{
10619 unsigned long rebroadcast_time, warning_time;
10620 struct net_device *dev;
10621 int wait = 0;
10622
10623 rebroadcast_time = warning_time = jiffies;
10624
10625 list_for_each_entry(dev, list, todo_list)
10626 if (netdev_refcnt_read(dev) == 1)
10627 return dev;
10628
10629 while (true) {
10630 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10631 rtnl_lock();
10632
10633 /* Rebroadcast unregister notification */
10634 list_for_each_entry(dev, list, todo_list)
10635 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10636
10637 __rtnl_unlock();
10638 rcu_barrier();
10639 rtnl_lock();
10640
10641 list_for_each_entry(dev, list, todo_list)
10642 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10643 &dev->state)) {
10644 /* We must not have linkwatch events
10645 * pending on unregister. If this
10646 * happens, we simply run the queue
10647 * unscheduled, resulting in a noop
10648 * for this device.
10649 */
10650 linkwatch_run_queue();
10651 break;
10652 }
10653
10654 __rtnl_unlock();
10655
10656 rebroadcast_time = jiffies;
10657 }
10658
10659 rcu_barrier();
10660
10661 if (!wait) {
10662 wait = WAIT_REFS_MIN_MSECS;
10663 } else {
10664 msleep(wait);
10665 wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10666 }
10667
10668 list_for_each_entry(dev, list, todo_list)
10669 if (netdev_refcnt_read(dev) == 1)
10670 return dev;
10671
10672 if (time_after(jiffies, warning_time +
10673 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
10674 list_for_each_entry(dev, list, todo_list) {
10675 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10676 dev->name, netdev_refcnt_read(dev));
10677 ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10678 }
10679
10680 warning_time = jiffies;
10681 }
10682 }
10683}
10684
10685/* The sequence is:
10686 *
10687 * rtnl_lock();
10688 * ...
10689 * register_netdevice(x1);
10690 * register_netdevice(x2);
10691 * ...
10692 * unregister_netdevice(y1);
10693 * unregister_netdevice(y2);
10694 * ...
10695 * rtnl_unlock();
10696 * free_netdev(y1);
10697 * free_netdev(y2);
10698 *
10699 * We are invoked by rtnl_unlock().
10700 * This allows us to deal with problems:
10701 * 1) We can delete sysfs objects which invoke hotplug
10702 * without deadlocking with linkwatch via keventd.
10703 * 2) Since we run with the RTNL semaphore not held, we can sleep
10704 * safely in order to wait for the netdev refcnt to drop to zero.
10705 *
10706 * We must not return until all unregister events added during
10707 * the interval the lock was held have been completed.
10708 */
10709void netdev_run_todo(void)
10710{
10711 struct net_device *dev, *tmp;
10712 struct list_head list;
10713 int cnt;
10714#ifdef CONFIG_LOCKDEP
10715 struct list_head unlink_list;
10716
10717 list_replace_init(&net_unlink_list, &unlink_list);
10718
10719 while (!list_empty(&unlink_list)) {
10720 struct net_device *dev = list_first_entry(&unlink_list,
10721 struct net_device,
10722 unlink_list);
10723 list_del_init(&dev->unlink_list);
10724 dev->nested_level = dev->lower_level - 1;
10725 }
10726#endif
10727
10728 /* Snapshot list, allow later requests */
10729 list_replace_init(&net_todo_list, &list);
10730
10731 __rtnl_unlock();
10732
10733 /* Wait for rcu callbacks to finish before next phase */
10734 if (!list_empty(&list))
10735 rcu_barrier();
10736
10737 list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10738 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10739 netdev_WARN(dev, "run_todo but not unregistering\n");
10740 list_del(&dev->todo_list);
10741 continue;
10742 }
10743
10744 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
10745 linkwatch_sync_dev(dev);
10746 }
10747
10748 cnt = 0;
10749 while (!list_empty(&list)) {
10750 dev = netdev_wait_allrefs_any(&list);
10751 list_del(&dev->todo_list);
10752
10753 /* paranoia */
10754 BUG_ON(netdev_refcnt_read(dev) != 1);
10755 BUG_ON(!list_empty(&dev->ptype_all));
10756 BUG_ON(!list_empty(&dev->ptype_specific));
10757 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10758 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10759
10760 netdev_do_free_pcpu_stats(dev);
10761 if (dev->priv_destructor)
10762 dev->priv_destructor(dev);
10763 if (dev->needs_free_netdev)
10764 free_netdev(dev);
10765
10766 cnt++;
10767
10768 /* Free network device */
10769 kobject_put(&dev->dev.kobj);
10770 }
10771 if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
10772 wake_up(&netdev_unregistering_wq);
10773}
10774
10775/* Collate per-cpu network dstats statistics
10776 *
10777 * Read per-cpu network statistics from dev->dstats and populate the related
10778 * fields in @s.
10779 */
10780static void dev_fetch_dstats(struct rtnl_link_stats64 *s,
10781 const struct pcpu_dstats __percpu *dstats)
10782{
10783 int cpu;
10784
10785 for_each_possible_cpu(cpu) {
10786 u64 rx_packets, rx_bytes, rx_drops;
10787 u64 tx_packets, tx_bytes, tx_drops;
10788 const struct pcpu_dstats *stats;
10789 unsigned int start;
10790
10791 stats = per_cpu_ptr(dstats, cpu);
10792 do {
10793 start = u64_stats_fetch_begin(&stats->syncp);
10794 rx_packets = u64_stats_read(&stats->rx_packets);
10795 rx_bytes = u64_stats_read(&stats->rx_bytes);
10796 rx_drops = u64_stats_read(&stats->rx_drops);
10797 tx_packets = u64_stats_read(&stats->tx_packets);
10798 tx_bytes = u64_stats_read(&stats->tx_bytes);
10799 tx_drops = u64_stats_read(&stats->tx_drops);
10800 } while (u64_stats_fetch_retry(&stats->syncp, start));
10801
10802 s->rx_packets += rx_packets;
10803 s->rx_bytes += rx_bytes;
10804 s->rx_dropped += rx_drops;
10805 s->tx_packets += tx_packets;
10806 s->tx_bytes += tx_bytes;
10807 s->tx_dropped += tx_drops;
10808 }
10809}
10810
10811/* ndo_get_stats64 implementation for dtstats-based accounting.
10812 *
10813 * Populate @s from dev->stats and dev->dstats. This is used internally by the
10814 * core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
10815 */
10816static void dev_get_dstats64(const struct net_device *dev,
10817 struct rtnl_link_stats64 *s)
10818{
10819 netdev_stats_to_stats64(s, &dev->stats);
10820 dev_fetch_dstats(s, dev->dstats);
10821}
10822
10823/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10824 * all the same fields in the same order as net_device_stats, with only
10825 * the type differing, but rtnl_link_stats64 may have additional fields
10826 * at the end for newer counters.
10827 */
10828void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10829 const struct net_device_stats *netdev_stats)
10830{
10831 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
10832 const atomic_long_t *src = (atomic_long_t *)netdev_stats;
10833 u64 *dst = (u64 *)stats64;
10834
10835 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10836 for (i = 0; i < n; i++)
10837 dst[i] = (unsigned long)atomic_long_read(&src[i]);
10838 /* zero out counters that only exist in rtnl_link_stats64 */
10839 memset((char *)stats64 + n * sizeof(u64), 0,
10840 sizeof(*stats64) - n * sizeof(u64));
10841}
10842EXPORT_SYMBOL(netdev_stats_to_stats64);
10843
10844static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
10845 struct net_device *dev)
10846{
10847 struct net_device_core_stats __percpu *p;
10848
10849 p = alloc_percpu_gfp(struct net_device_core_stats,
10850 GFP_ATOMIC | __GFP_NOWARN);
10851
10852 if (p && cmpxchg(&dev->core_stats, NULL, p))
10853 free_percpu(p);
10854
10855 /* This READ_ONCE() pairs with the cmpxchg() above */
10856 return READ_ONCE(dev->core_stats);
10857}
10858
10859noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
10860{
10861 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10862 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
10863 unsigned long __percpu *field;
10864
10865 if (unlikely(!p)) {
10866 p = netdev_core_stats_alloc(dev);
10867 if (!p)
10868 return;
10869 }
10870
10871 field = (__force unsigned long __percpu *)((__force void *)p + offset);
10872 this_cpu_inc(*field);
10873}
10874EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
10875
10876/**
10877 * dev_get_stats - get network device statistics
10878 * @dev: device to get statistics from
10879 * @storage: place to store stats
10880 *
10881 * Get network statistics from device. Return @storage.
10882 * The device driver may provide its own method by setting
10883 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10884 * otherwise the internal statistics structure is used.
10885 */
10886struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10887 struct rtnl_link_stats64 *storage)
10888{
10889 const struct net_device_ops *ops = dev->netdev_ops;
10890 const struct net_device_core_stats __percpu *p;
10891
10892 if (ops->ndo_get_stats64) {
10893 memset(storage, 0, sizeof(*storage));
10894 ops->ndo_get_stats64(dev, storage);
10895 } else if (ops->ndo_get_stats) {
10896 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10897 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
10898 dev_get_tstats64(dev, storage);
10899 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) {
10900 dev_get_dstats64(dev, storage);
10901 } else {
10902 netdev_stats_to_stats64(storage, &dev->stats);
10903 }
10904
10905 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10906 p = READ_ONCE(dev->core_stats);
10907 if (p) {
10908 const struct net_device_core_stats *core_stats;
10909 int i;
10910
10911 for_each_possible_cpu(i) {
10912 core_stats = per_cpu_ptr(p, i);
10913 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10914 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10915 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10916 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10917 }
10918 }
10919 return storage;
10920}
10921EXPORT_SYMBOL(dev_get_stats);
10922
10923/**
10924 * dev_fetch_sw_netstats - get per-cpu network device statistics
10925 * @s: place to store stats
10926 * @netstats: per-cpu network stats to read from
10927 *
10928 * Read per-cpu network statistics and populate the related fields in @s.
10929 */
10930void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10931 const struct pcpu_sw_netstats __percpu *netstats)
10932{
10933 int cpu;
10934
10935 for_each_possible_cpu(cpu) {
10936 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
10937 const struct pcpu_sw_netstats *stats;
10938 unsigned int start;
10939
10940 stats = per_cpu_ptr(netstats, cpu);
10941 do {
10942 start = u64_stats_fetch_begin(&stats->syncp);
10943 rx_packets = u64_stats_read(&stats->rx_packets);
10944 rx_bytes = u64_stats_read(&stats->rx_bytes);
10945 tx_packets = u64_stats_read(&stats->tx_packets);
10946 tx_bytes = u64_stats_read(&stats->tx_bytes);
10947 } while (u64_stats_fetch_retry(&stats->syncp, start));
10948
10949 s->rx_packets += rx_packets;
10950 s->rx_bytes += rx_bytes;
10951 s->tx_packets += tx_packets;
10952 s->tx_bytes += tx_bytes;
10953 }
10954}
10955EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10956
10957/**
10958 * dev_get_tstats64 - ndo_get_stats64 implementation
10959 * @dev: device to get statistics from
10960 * @s: place to store stats
10961 *
10962 * Populate @s from dev->stats and dev->tstats. Can be used as
10963 * ndo_get_stats64() callback.
10964 */
10965void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10966{
10967 netdev_stats_to_stats64(s, &dev->stats);
10968 dev_fetch_sw_netstats(s, dev->tstats);
10969}
10970EXPORT_SYMBOL_GPL(dev_get_tstats64);
10971
10972struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10973{
10974 struct netdev_queue *queue = dev_ingress_queue(dev);
10975
10976#ifdef CONFIG_NET_CLS_ACT
10977 if (queue)
10978 return queue;
10979 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10980 if (!queue)
10981 return NULL;
10982 netdev_init_one_queue(dev, queue, NULL);
10983 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10984 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
10985 rcu_assign_pointer(dev->ingress_queue, queue);
10986#endif
10987 return queue;
10988}
10989
10990static const struct ethtool_ops default_ethtool_ops;
10991
10992void netdev_set_default_ethtool_ops(struct net_device *dev,
10993 const struct ethtool_ops *ops)
10994{
10995 if (dev->ethtool_ops == &default_ethtool_ops)
10996 dev->ethtool_ops = ops;
10997}
10998EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10999
11000/**
11001 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
11002 * @dev: netdev to enable the IRQ coalescing on
11003 *
11004 * Sets a conservative default for SW IRQ coalescing. Users can use
11005 * sysfs attributes to override the default values.
11006 */
11007void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
11008{
11009 WARN_ON(dev->reg_state == NETREG_REGISTERED);
11010
11011 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
11012 dev->gro_flush_timeout = 20000;
11013 dev->napi_defer_hard_irqs = 1;
11014 }
11015}
11016EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
11017
11018/**
11019 * alloc_netdev_mqs - allocate network device
11020 * @sizeof_priv: size of private data to allocate space for
11021 * @name: device name format string
11022 * @name_assign_type: origin of device name
11023 * @setup: callback to initialize device
11024 * @txqs: the number of TX subqueues to allocate
11025 * @rxqs: the number of RX subqueues to allocate
11026 *
11027 * Allocates a struct net_device with private data area for driver use
11028 * and performs basic initialization. Also allocates subqueue structs
11029 * for each queue on the device.
11030 */
11031struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
11032 unsigned char name_assign_type,
11033 void (*setup)(struct net_device *),
11034 unsigned int txqs, unsigned int rxqs)
11035{
11036 struct net_device *dev;
11037
11038 BUG_ON(strlen(name) >= sizeof(dev->name));
11039
11040 if (txqs < 1) {
11041 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
11042 return NULL;
11043 }
11044
11045 if (rxqs < 1) {
11046 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
11047 return NULL;
11048 }
11049
11050 dev = kvzalloc(struct_size(dev, priv, sizeof_priv),
11051 GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
11052 if (!dev)
11053 return NULL;
11054
11055 dev->priv_len = sizeof_priv;
11056
11057 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
11058#ifdef CONFIG_PCPU_DEV_REFCNT
11059 dev->pcpu_refcnt = alloc_percpu(int);
11060 if (!dev->pcpu_refcnt)
11061 goto free_dev;
11062 __dev_hold(dev);
11063#else
11064 refcount_set(&dev->dev_refcnt, 1);
11065#endif
11066
11067 if (dev_addr_init(dev))
11068 goto free_pcpu;
11069
11070 dev_mc_init(dev);
11071 dev_uc_init(dev);
11072
11073 dev_net_set(dev, &init_net);
11074
11075 dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
11076 dev->xdp_zc_max_segs = 1;
11077 dev->gso_max_segs = GSO_MAX_SEGS;
11078 dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
11079 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
11080 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
11081 dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
11082 dev->tso_max_segs = TSO_MAX_SEGS;
11083 dev->upper_level = 1;
11084 dev->lower_level = 1;
11085#ifdef CONFIG_LOCKDEP
11086 dev->nested_level = 0;
11087 INIT_LIST_HEAD(&dev->unlink_list);
11088#endif
11089
11090 INIT_LIST_HEAD(&dev->napi_list);
11091 INIT_LIST_HEAD(&dev->unreg_list);
11092 INIT_LIST_HEAD(&dev->close_list);
11093 INIT_LIST_HEAD(&dev->link_watch_list);
11094 INIT_LIST_HEAD(&dev->adj_list.upper);
11095 INIT_LIST_HEAD(&dev->adj_list.lower);
11096 INIT_LIST_HEAD(&dev->ptype_all);
11097 INIT_LIST_HEAD(&dev->ptype_specific);
11098 INIT_LIST_HEAD(&dev->net_notifier_list);
11099#ifdef CONFIG_NET_SCHED
11100 hash_init(dev->qdisc_hash);
11101#endif
11102 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
11103 setup(dev);
11104
11105 if (!dev->tx_queue_len) {
11106 dev->priv_flags |= IFF_NO_QUEUE;
11107 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
11108 }
11109
11110 dev->num_tx_queues = txqs;
11111 dev->real_num_tx_queues = txqs;
11112 if (netif_alloc_netdev_queues(dev))
11113 goto free_all;
11114
11115 dev->num_rx_queues = rxqs;
11116 dev->real_num_rx_queues = rxqs;
11117 if (netif_alloc_rx_queues(dev))
11118 goto free_all;
11119 dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
11120 if (!dev->ethtool)
11121 goto free_all;
11122
11123 strcpy(dev->name, name);
11124 dev->name_assign_type = name_assign_type;
11125 dev->group = INIT_NETDEV_GROUP;
11126 if (!dev->ethtool_ops)
11127 dev->ethtool_ops = &default_ethtool_ops;
11128
11129 nf_hook_netdev_init(dev);
11130
11131 return dev;
11132
11133free_all:
11134 free_netdev(dev);
11135 return NULL;
11136
11137free_pcpu:
11138#ifdef CONFIG_PCPU_DEV_REFCNT
11139 free_percpu(dev->pcpu_refcnt);
11140free_dev:
11141#endif
11142 kvfree(dev);
11143 return NULL;
11144}
11145EXPORT_SYMBOL(alloc_netdev_mqs);
11146
11147/**
11148 * free_netdev - free network device
11149 * @dev: device
11150 *
11151 * This function does the last stage of destroying an allocated device
11152 * interface. The reference to the device object is released. If this
11153 * is the last reference then it will be freed.Must be called in process
11154 * context.
11155 */
11156void free_netdev(struct net_device *dev)
11157{
11158 struct napi_struct *p, *n;
11159
11160 might_sleep();
11161
11162 /* When called immediately after register_netdevice() failed the unwind
11163 * handling may still be dismantling the device. Handle that case by
11164 * deferring the free.
11165 */
11166 if (dev->reg_state == NETREG_UNREGISTERING) {
11167 ASSERT_RTNL();
11168 dev->needs_free_netdev = true;
11169 return;
11170 }
11171
11172 kfree(dev->ethtool);
11173 netif_free_tx_queues(dev);
11174 netif_free_rx_queues(dev);
11175
11176 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
11177
11178 /* Flush device addresses */
11179 dev_addr_flush(dev);
11180
11181 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
11182 netif_napi_del(p);
11183
11184 ref_tracker_dir_exit(&dev->refcnt_tracker);
11185#ifdef CONFIG_PCPU_DEV_REFCNT
11186 free_percpu(dev->pcpu_refcnt);
11187 dev->pcpu_refcnt = NULL;
11188#endif
11189 free_percpu(dev->core_stats);
11190 dev->core_stats = NULL;
11191 free_percpu(dev->xdp_bulkq);
11192 dev->xdp_bulkq = NULL;
11193
11194 /* Compatibility with error handling in drivers */
11195 if (dev->reg_state == NETREG_UNINITIALIZED ||
11196 dev->reg_state == NETREG_DUMMY) {
11197 kvfree(dev);
11198 return;
11199 }
11200
11201 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
11202 WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
11203
11204 /* will free via device release */
11205 put_device(&dev->dev);
11206}
11207EXPORT_SYMBOL(free_netdev);
11208
11209/**
11210 * alloc_netdev_dummy - Allocate and initialize a dummy net device.
11211 * @sizeof_priv: size of private data to allocate space for
11212 *
11213 * Return: the allocated net_device on success, NULL otherwise
11214 */
11215struct net_device *alloc_netdev_dummy(int sizeof_priv)
11216{
11217 return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN,
11218 init_dummy_netdev_core);
11219}
11220EXPORT_SYMBOL_GPL(alloc_netdev_dummy);
11221
11222/**
11223 * synchronize_net - Synchronize with packet receive processing
11224 *
11225 * Wait for packets currently being received to be done.
11226 * Does not block later packets from starting.
11227 */
11228void synchronize_net(void)
11229{
11230 might_sleep();
11231 if (rtnl_is_locked())
11232 synchronize_rcu_expedited();
11233 else
11234 synchronize_rcu();
11235}
11236EXPORT_SYMBOL(synchronize_net);
11237
11238static void netdev_rss_contexts_free(struct net_device *dev)
11239{
11240 struct ethtool_rxfh_context *ctx;
11241 unsigned long context;
11242
11243 mutex_lock(&dev->ethtool->rss_lock);
11244 xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
11245 struct ethtool_rxfh_param rxfh;
11246
11247 rxfh.indir = ethtool_rxfh_context_indir(ctx);
11248 rxfh.key = ethtool_rxfh_context_key(ctx);
11249 rxfh.hfunc = ctx->hfunc;
11250 rxfh.input_xfrm = ctx->input_xfrm;
11251 rxfh.rss_context = context;
11252 rxfh.rss_delete = true;
11253
11254 xa_erase(&dev->ethtool->rss_ctx, context);
11255 if (dev->ethtool_ops->create_rxfh_context)
11256 dev->ethtool_ops->remove_rxfh_context(dev, ctx,
11257 context, NULL);
11258 else
11259 dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL);
11260 kfree(ctx);
11261 }
11262 xa_destroy(&dev->ethtool->rss_ctx);
11263 mutex_unlock(&dev->ethtool->rss_lock);
11264}
11265
11266/**
11267 * unregister_netdevice_queue - remove device from the kernel
11268 * @dev: device
11269 * @head: list
11270 *
11271 * This function shuts down a device interface and removes it
11272 * from the kernel tables.
11273 * If head not NULL, device is queued to be unregistered later.
11274 *
11275 * Callers must hold the rtnl semaphore. You may want
11276 * unregister_netdev() instead of this.
11277 */
11278
11279void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
11280{
11281 ASSERT_RTNL();
11282
11283 if (head) {
11284 list_move_tail(&dev->unreg_list, head);
11285 } else {
11286 LIST_HEAD(single);
11287
11288 list_add(&dev->unreg_list, &single);
11289 unregister_netdevice_many(&single);
11290 }
11291}
11292EXPORT_SYMBOL(unregister_netdevice_queue);
11293
11294void unregister_netdevice_many_notify(struct list_head *head,
11295 u32 portid, const struct nlmsghdr *nlh)
11296{
11297 struct net_device *dev, *tmp;
11298 LIST_HEAD(close_head);
11299 int cnt = 0;
11300
11301 BUG_ON(dev_boot_phase);
11302 ASSERT_RTNL();
11303
11304 if (list_empty(head))
11305 return;
11306
11307 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
11308 /* Some devices call without registering
11309 * for initialization unwind. Remove those
11310 * devices and proceed with the remaining.
11311 */
11312 if (dev->reg_state == NETREG_UNINITIALIZED) {
11313 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11314 dev->name, dev);
11315
11316 WARN_ON(1);
11317 list_del(&dev->unreg_list);
11318 continue;
11319 }
11320 dev->dismantle = true;
11321 BUG_ON(dev->reg_state != NETREG_REGISTERED);
11322 }
11323
11324 /* If device is running, close it first. */
11325 list_for_each_entry(dev, head, unreg_list)
11326 list_add_tail(&dev->close_list, &close_head);
11327 dev_close_many(&close_head, true);
11328
11329 list_for_each_entry(dev, head, unreg_list) {
11330 /* And unlink it from device chain. */
11331 unlist_netdevice(dev);
11332 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
11333 }
11334 flush_all_backlogs();
11335
11336 synchronize_net();
11337
11338 list_for_each_entry(dev, head, unreg_list) {
11339 struct sk_buff *skb = NULL;
11340
11341 /* Shutdown queueing discipline. */
11342 dev_shutdown(dev);
11343 dev_tcx_uninstall(dev);
11344 dev_xdp_uninstall(dev);
11345 bpf_dev_bound_netdev_unregister(dev);
11346
11347 netdev_offload_xstats_disable_all(dev);
11348
11349 /* Notify protocols, that we are about to destroy
11350 * this device. They should clean all the things.
11351 */
11352 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11353
11354 if (!dev->rtnl_link_ops ||
11355 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11356 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11357 GFP_KERNEL, NULL, 0,
11358 portid, nlh);
11359
11360 /*
11361 * Flush the unicast and multicast chains
11362 */
11363 dev_uc_flush(dev);
11364 dev_mc_flush(dev);
11365
11366 netdev_name_node_alt_flush(dev);
11367 netdev_name_node_free(dev->name_node);
11368
11369 netdev_rss_contexts_free(dev);
11370
11371 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
11372
11373 if (dev->netdev_ops->ndo_uninit)
11374 dev->netdev_ops->ndo_uninit(dev);
11375
11376 mutex_destroy(&dev->ethtool->rss_lock);
11377
11378 if (skb)
11379 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
11380
11381 /* Notifier chain MUST detach us all upper devices. */
11382 WARN_ON(netdev_has_any_upper_dev(dev));
11383 WARN_ON(netdev_has_any_lower_dev(dev));
11384
11385 /* Remove entries from kobject tree */
11386 netdev_unregister_kobject(dev);
11387#ifdef CONFIG_XPS
11388 /* Remove XPS queueing entries */
11389 netif_reset_xps_queues_gt(dev, 0);
11390#endif
11391 }
11392
11393 synchronize_net();
11394
11395 list_for_each_entry(dev, head, unreg_list) {
11396 netdev_put(dev, &dev->dev_registered_tracker);
11397 net_set_todo(dev);
11398 cnt++;
11399 }
11400 atomic_add(cnt, &dev_unreg_count);
11401
11402 list_del(head);
11403}
11404
11405/**
11406 * unregister_netdevice_many - unregister many devices
11407 * @head: list of devices
11408 *
11409 * Note: As most callers use a stack allocated list_head,
11410 * we force a list_del() to make sure stack wont be corrupted later.
11411 */
11412void unregister_netdevice_many(struct list_head *head)
11413{
11414 unregister_netdevice_many_notify(head, 0, NULL);
11415}
11416EXPORT_SYMBOL(unregister_netdevice_many);
11417
11418/**
11419 * unregister_netdev - remove device from the kernel
11420 * @dev: device
11421 *
11422 * This function shuts down a device interface and removes it
11423 * from the kernel tables.
11424 *
11425 * This is just a wrapper for unregister_netdevice that takes
11426 * the rtnl semaphore. In general you want to use this and not
11427 * unregister_netdevice.
11428 */
11429void unregister_netdev(struct net_device *dev)
11430{
11431 rtnl_lock();
11432 unregister_netdevice(dev);
11433 rtnl_unlock();
11434}
11435EXPORT_SYMBOL(unregister_netdev);
11436
11437/**
11438 * __dev_change_net_namespace - move device to different nethost namespace
11439 * @dev: device
11440 * @net: network namespace
11441 * @pat: If not NULL name pattern to try if the current device name
11442 * is already taken in the destination network namespace.
11443 * @new_ifindex: If not zero, specifies device index in the target
11444 * namespace.
11445 *
11446 * This function shuts down a device interface and moves it
11447 * to a new network namespace. On success 0 is returned, on
11448 * a failure a netagive errno code is returned.
11449 *
11450 * Callers must hold the rtnl semaphore.
11451 */
11452
11453int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11454 const char *pat, int new_ifindex)
11455{
11456 struct netdev_name_node *name_node;
11457 struct net *net_old = dev_net(dev);
11458 char new_name[IFNAMSIZ] = {};
11459 int err, new_nsid;
11460
11461 ASSERT_RTNL();
11462
11463 /* Don't allow namespace local devices to be moved. */
11464 err = -EINVAL;
11465 if (dev->features & NETIF_F_NETNS_LOCAL)
11466 goto out;
11467
11468 /* Ensure the device has been registrered */
11469 if (dev->reg_state != NETREG_REGISTERED)
11470 goto out;
11471
11472 /* Get out if there is nothing todo */
11473 err = 0;
11474 if (net_eq(net_old, net))
11475 goto out;
11476
11477 /* Pick the destination device name, and ensure
11478 * we can use it in the destination network namespace.
11479 */
11480 err = -EEXIST;
11481 if (netdev_name_in_use(net, dev->name)) {
11482 /* We get here if we can't use the current device name */
11483 if (!pat)
11484 goto out;
11485 err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST);
11486 if (err < 0)
11487 goto out;
11488 }
11489 /* Check that none of the altnames conflicts. */
11490 err = -EEXIST;
11491 netdev_for_each_altname(dev, name_node)
11492 if (netdev_name_in_use(net, name_node->name))
11493 goto out;
11494
11495 /* Check that new_ifindex isn't used yet. */
11496 if (new_ifindex) {
11497 err = dev_index_reserve(net, new_ifindex);
11498 if (err < 0)
11499 goto out;
11500 } else {
11501 /* If there is an ifindex conflict assign a new one */
11502 err = dev_index_reserve(net, dev->ifindex);
11503 if (err == -EBUSY)
11504 err = dev_index_reserve(net, 0);
11505 if (err < 0)
11506 goto out;
11507 new_ifindex = err;
11508 }
11509
11510 /*
11511 * And now a mini version of register_netdevice unregister_netdevice.
11512 */
11513
11514 /* If device is running close it first. */
11515 dev_close(dev);
11516
11517 /* And unlink it from device chain */
11518 unlist_netdevice(dev);
11519
11520 synchronize_net();
11521
11522 /* Shutdown queueing discipline. */
11523 dev_shutdown(dev);
11524
11525 /* Notify protocols, that we are about to destroy
11526 * this device. They should clean all the things.
11527 *
11528 * Note that dev->reg_state stays at NETREG_REGISTERED.
11529 * This is wanted because this way 8021q and macvlan know
11530 * the device is just moving and can keep their slaves up.
11531 */
11532 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11533 rcu_barrier();
11534
11535 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11536
11537 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11538 new_ifindex);
11539
11540 /*
11541 * Flush the unicast and multicast chains
11542 */
11543 dev_uc_flush(dev);
11544 dev_mc_flush(dev);
11545
11546 /* Send a netdev-removed uevent to the old namespace */
11547 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11548 netdev_adjacent_del_links(dev);
11549
11550 /* Move per-net netdevice notifiers that are following the netdevice */
11551 move_netdevice_notifiers_dev_net(dev, net);
11552
11553 /* Actually switch the network namespace */
11554 dev_net_set(dev, net);
11555 dev->ifindex = new_ifindex;
11556
11557 if (new_name[0]) {
11558 /* Rename the netdev to prepared name */
11559 write_seqlock_bh(&netdev_rename_lock);
11560 strscpy(dev->name, new_name, IFNAMSIZ);
11561 write_sequnlock_bh(&netdev_rename_lock);
11562 }
11563
11564 /* Fixup kobjects */
11565 dev_set_uevent_suppress(&dev->dev, 1);
11566 err = device_rename(&dev->dev, dev->name);
11567 dev_set_uevent_suppress(&dev->dev, 0);
11568 WARN_ON(err);
11569
11570 /* Send a netdev-add uevent to the new namespace */
11571 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11572 netdev_adjacent_add_links(dev);
11573
11574 /* Adapt owner in case owning user namespace of target network
11575 * namespace is different from the original one.
11576 */
11577 err = netdev_change_owner(dev, net_old, net);
11578 WARN_ON(err);
11579
11580 /* Add the device back in the hashes */
11581 list_netdevice(dev);
11582
11583 /* Notify protocols, that a new device appeared. */
11584 call_netdevice_notifiers(NETDEV_REGISTER, dev);
11585
11586 /*
11587 * Prevent userspace races by waiting until the network
11588 * device is fully setup before sending notifications.
11589 */
11590 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11591
11592 synchronize_net();
11593 err = 0;
11594out:
11595 return err;
11596}
11597EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11598
11599static int dev_cpu_dead(unsigned int oldcpu)
11600{
11601 struct sk_buff **list_skb;
11602 struct sk_buff *skb;
11603 unsigned int cpu;
11604 struct softnet_data *sd, *oldsd, *remsd = NULL;
11605
11606 local_irq_disable();
11607 cpu = smp_processor_id();
11608 sd = &per_cpu(softnet_data, cpu);
11609 oldsd = &per_cpu(softnet_data, oldcpu);
11610
11611 /* Find end of our completion_queue. */
11612 list_skb = &sd->completion_queue;
11613 while (*list_skb)
11614 list_skb = &(*list_skb)->next;
11615 /* Append completion queue from offline CPU. */
11616 *list_skb = oldsd->completion_queue;
11617 oldsd->completion_queue = NULL;
11618
11619 /* Append output queue from offline CPU. */
11620 if (oldsd->output_queue) {
11621 *sd->output_queue_tailp = oldsd->output_queue;
11622 sd->output_queue_tailp = oldsd->output_queue_tailp;
11623 oldsd->output_queue = NULL;
11624 oldsd->output_queue_tailp = &oldsd->output_queue;
11625 }
11626 /* Append NAPI poll list from offline CPU, with one exception :
11627 * process_backlog() must be called by cpu owning percpu backlog.
11628 * We properly handle process_queue & input_pkt_queue later.
11629 */
11630 while (!list_empty(&oldsd->poll_list)) {
11631 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11632 struct napi_struct,
11633 poll_list);
11634
11635 list_del_init(&napi->poll_list);
11636 if (napi->poll == process_backlog)
11637 napi->state &= NAPIF_STATE_THREADED;
11638 else
11639 ____napi_schedule(sd, napi);
11640 }
11641
11642 raise_softirq_irqoff(NET_TX_SOFTIRQ);
11643 local_irq_enable();
11644
11645 if (!use_backlog_threads()) {
11646#ifdef CONFIG_RPS
11647 remsd = oldsd->rps_ipi_list;
11648 oldsd->rps_ipi_list = NULL;
11649#endif
11650 /* send out pending IPI's on offline CPU */
11651 net_rps_send_ipi(remsd);
11652 }
11653
11654 /* Process offline CPU's input_pkt_queue */
11655 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11656 netif_rx(skb);
11657 rps_input_queue_head_incr(oldsd);
11658 }
11659 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11660 netif_rx(skb);
11661 rps_input_queue_head_incr(oldsd);
11662 }
11663
11664 return 0;
11665}
11666
11667/**
11668 * netdev_increment_features - increment feature set by one
11669 * @all: current feature set
11670 * @one: new feature set
11671 * @mask: mask feature set
11672 *
11673 * Computes a new feature set after adding a device with feature set
11674 * @one to the master device with current feature set @all. Will not
11675 * enable anything that is off in @mask. Returns the new feature set.
11676 */
11677netdev_features_t netdev_increment_features(netdev_features_t all,
11678 netdev_features_t one, netdev_features_t mask)
11679{
11680 if (mask & NETIF_F_HW_CSUM)
11681 mask |= NETIF_F_CSUM_MASK;
11682 mask |= NETIF_F_VLAN_CHALLENGED;
11683
11684 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11685 all &= one | ~NETIF_F_ALL_FOR_ALL;
11686
11687 /* If one device supports hw checksumming, set for all. */
11688 if (all & NETIF_F_HW_CSUM)
11689 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11690
11691 return all;
11692}
11693EXPORT_SYMBOL(netdev_increment_features);
11694
11695static struct hlist_head * __net_init netdev_create_hash(void)
11696{
11697 int i;
11698 struct hlist_head *hash;
11699
11700 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11701 if (hash != NULL)
11702 for (i = 0; i < NETDEV_HASHENTRIES; i++)
11703 INIT_HLIST_HEAD(&hash[i]);
11704
11705 return hash;
11706}
11707
11708/* Initialize per network namespace state */
11709static int __net_init netdev_init(struct net *net)
11710{
11711 BUILD_BUG_ON(GRO_HASH_BUCKETS >
11712 8 * sizeof_field(struct napi_struct, gro_bitmask));
11713
11714 INIT_LIST_HEAD(&net->dev_base_head);
11715
11716 net->dev_name_head = netdev_create_hash();
11717 if (net->dev_name_head == NULL)
11718 goto err_name;
11719
11720 net->dev_index_head = netdev_create_hash();
11721 if (net->dev_index_head == NULL)
11722 goto err_idx;
11723
11724 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
11725
11726 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11727
11728 return 0;
11729
11730err_idx:
11731 kfree(net->dev_name_head);
11732err_name:
11733 return -ENOMEM;
11734}
11735
11736/**
11737 * netdev_drivername - network driver for the device
11738 * @dev: network device
11739 *
11740 * Determine network driver for device.
11741 */
11742const char *netdev_drivername(const struct net_device *dev)
11743{
11744 const struct device_driver *driver;
11745 const struct device *parent;
11746 const char *empty = "";
11747
11748 parent = dev->dev.parent;
11749 if (!parent)
11750 return empty;
11751
11752 driver = parent->driver;
11753 if (driver && driver->name)
11754 return driver->name;
11755 return empty;
11756}
11757
11758static void __netdev_printk(const char *level, const struct net_device *dev,
11759 struct va_format *vaf)
11760{
11761 if (dev && dev->dev.parent) {
11762 dev_printk_emit(level[1] - '0',
11763 dev->dev.parent,
11764 "%s %s %s%s: %pV",
11765 dev_driver_string(dev->dev.parent),
11766 dev_name(dev->dev.parent),
11767 netdev_name(dev), netdev_reg_state(dev),
11768 vaf);
11769 } else if (dev) {
11770 printk("%s%s%s: %pV",
11771 level, netdev_name(dev), netdev_reg_state(dev), vaf);
11772 } else {
11773 printk("%s(NULL net_device): %pV", level, vaf);
11774 }
11775}
11776
11777void netdev_printk(const char *level, const struct net_device *dev,
11778 const char *format, ...)
11779{
11780 struct va_format vaf;
11781 va_list args;
11782
11783 va_start(args, format);
11784
11785 vaf.fmt = format;
11786 vaf.va = &args;
11787
11788 __netdev_printk(level, dev, &vaf);
11789
11790 va_end(args);
11791}
11792EXPORT_SYMBOL(netdev_printk);
11793
11794#define define_netdev_printk_level(func, level) \
11795void func(const struct net_device *dev, const char *fmt, ...) \
11796{ \
11797 struct va_format vaf; \
11798 va_list args; \
11799 \
11800 va_start(args, fmt); \
11801 \
11802 vaf.fmt = fmt; \
11803 vaf.va = &args; \
11804 \
11805 __netdev_printk(level, dev, &vaf); \
11806 \
11807 va_end(args); \
11808} \
11809EXPORT_SYMBOL(func);
11810
11811define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11812define_netdev_printk_level(netdev_alert, KERN_ALERT);
11813define_netdev_printk_level(netdev_crit, KERN_CRIT);
11814define_netdev_printk_level(netdev_err, KERN_ERR);
11815define_netdev_printk_level(netdev_warn, KERN_WARNING);
11816define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11817define_netdev_printk_level(netdev_info, KERN_INFO);
11818
11819static void __net_exit netdev_exit(struct net *net)
11820{
11821 kfree(net->dev_name_head);
11822 kfree(net->dev_index_head);
11823 xa_destroy(&net->dev_by_index);
11824 if (net != &init_net)
11825 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11826}
11827
11828static struct pernet_operations __net_initdata netdev_net_ops = {
11829 .init = netdev_init,
11830 .exit = netdev_exit,
11831};
11832
11833static void __net_exit default_device_exit_net(struct net *net)
11834{
11835 struct netdev_name_node *name_node, *tmp;
11836 struct net_device *dev, *aux;
11837 /*
11838 * Push all migratable network devices back to the
11839 * initial network namespace
11840 */
11841 ASSERT_RTNL();
11842 for_each_netdev_safe(net, dev, aux) {
11843 int err;
11844 char fb_name[IFNAMSIZ];
11845
11846 /* Ignore unmoveable devices (i.e. loopback) */
11847 if (dev->features & NETIF_F_NETNS_LOCAL)
11848 continue;
11849
11850 /* Leave virtual devices for the generic cleanup */
11851 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11852 continue;
11853
11854 /* Push remaining network devices to init_net */
11855 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11856 if (netdev_name_in_use(&init_net, fb_name))
11857 snprintf(fb_name, IFNAMSIZ, "dev%%d");
11858
11859 netdev_for_each_altname_safe(dev, name_node, tmp)
11860 if (netdev_name_in_use(&init_net, name_node->name))
11861 __netdev_name_node_alt_destroy(name_node);
11862
11863 err = dev_change_net_namespace(dev, &init_net, fb_name);
11864 if (err) {
11865 pr_emerg("%s: failed to move %s to init_net: %d\n",
11866 __func__, dev->name, err);
11867 BUG();
11868 }
11869 }
11870}
11871
11872static void __net_exit default_device_exit_batch(struct list_head *net_list)
11873{
11874 /* At exit all network devices most be removed from a network
11875 * namespace. Do this in the reverse order of registration.
11876 * Do this across as many network namespaces as possible to
11877 * improve batching efficiency.
11878 */
11879 struct net_device *dev;
11880 struct net *net;
11881 LIST_HEAD(dev_kill_list);
11882
11883 rtnl_lock();
11884 list_for_each_entry(net, net_list, exit_list) {
11885 default_device_exit_net(net);
11886 cond_resched();
11887 }
11888
11889 list_for_each_entry(net, net_list, exit_list) {
11890 for_each_netdev_reverse(net, dev) {
11891 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11892 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11893 else
11894 unregister_netdevice_queue(dev, &dev_kill_list);
11895 }
11896 }
11897 unregister_netdevice_many(&dev_kill_list);
11898 rtnl_unlock();
11899}
11900
11901static struct pernet_operations __net_initdata default_device_ops = {
11902 .exit_batch = default_device_exit_batch,
11903};
11904
11905static void __init net_dev_struct_check(void)
11906{
11907 /* TX read-mostly hotpath */
11908 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
11909 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
11910 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
11911 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
11912 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues);
11913 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size);
11914 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size);
11915 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs);
11916 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_partial_features);
11917 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc);
11918 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu);
11919 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom);
11920 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq);
11921#ifdef CONFIG_XPS
11922 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps);
11923#endif
11924#ifdef CONFIG_NETFILTER_EGRESS
11925 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress);
11926#endif
11927#ifdef CONFIG_NET_XGRESS
11928 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress);
11929#endif
11930 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160);
11931
11932 /* TXRX read-mostly hotpath */
11933 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats);
11934 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state);
11935 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
11936 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
11937 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
11938 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
11939 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46);
11940
11941 /* RX read-mostly hotpath */
11942 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
11943 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
11944 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
11945 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
11946 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_flush_timeout);
11947 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, napi_defer_hard_irqs);
11948 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
11949 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
11950 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
11951 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data);
11952 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net);
11953#ifdef CONFIG_NETPOLL
11954 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo);
11955#endif
11956#ifdef CONFIG_NET_XGRESS
11957 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
11958#endif
11959 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 104);
11960}
11961
11962/*
11963 * Initialize the DEV module. At boot time this walks the device list and
11964 * unhooks any devices that fail to initialise (normally hardware not
11965 * present) and leaves us with a valid list of present and active devices.
11966 *
11967 */
11968
11969/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
11970#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
11971
11972static int net_page_pool_create(int cpuid)
11973{
11974#if IS_ENABLED(CONFIG_PAGE_POOL)
11975 struct page_pool_params page_pool_params = {
11976 .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
11977 .flags = PP_FLAG_SYSTEM_POOL,
11978 .nid = cpu_to_mem(cpuid),
11979 };
11980 struct page_pool *pp_ptr;
11981
11982 pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
11983 if (IS_ERR(pp_ptr))
11984 return -ENOMEM;
11985
11986 per_cpu(system_page_pool, cpuid) = pp_ptr;
11987#endif
11988 return 0;
11989}
11990
11991static int backlog_napi_should_run(unsigned int cpu)
11992{
11993 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
11994 struct napi_struct *napi = &sd->backlog;
11995
11996 return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
11997}
11998
11999static void run_backlog_napi(unsigned int cpu)
12000{
12001 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
12002
12003 napi_threaded_poll_loop(&sd->backlog);
12004}
12005
12006static void backlog_napi_setup(unsigned int cpu)
12007{
12008 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
12009 struct napi_struct *napi = &sd->backlog;
12010
12011 napi->thread = this_cpu_read(backlog_napi);
12012 set_bit(NAPI_STATE_THREADED, &napi->state);
12013}
12014
12015static struct smp_hotplug_thread backlog_threads = {
12016 .store = &backlog_napi,
12017 .thread_should_run = backlog_napi_should_run,
12018 .thread_fn = run_backlog_napi,
12019 .thread_comm = "backlog_napi/%u",
12020 .setup = backlog_napi_setup,
12021};
12022
12023/*
12024 * This is called single threaded during boot, so no need
12025 * to take the rtnl semaphore.
12026 */
12027static int __init net_dev_init(void)
12028{
12029 int i, rc = -ENOMEM;
12030
12031 BUG_ON(!dev_boot_phase);
12032
12033 net_dev_struct_check();
12034
12035 if (dev_proc_init())
12036 goto out;
12037
12038 if (netdev_kobject_init())
12039 goto out;
12040
12041 for (i = 0; i < PTYPE_HASH_SIZE; i++)
12042 INIT_LIST_HEAD(&ptype_base[i]);
12043
12044 if (register_pernet_subsys(&netdev_net_ops))
12045 goto out;
12046
12047 /*
12048 * Initialise the packet receive queues.
12049 */
12050
12051 for_each_possible_cpu(i) {
12052 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
12053 struct softnet_data *sd = &per_cpu(softnet_data, i);
12054
12055 INIT_WORK(flush, flush_backlog);
12056
12057 skb_queue_head_init(&sd->input_pkt_queue);
12058 skb_queue_head_init(&sd->process_queue);
12059#ifdef CONFIG_XFRM_OFFLOAD
12060 skb_queue_head_init(&sd->xfrm_backlog);
12061#endif
12062 INIT_LIST_HEAD(&sd->poll_list);
12063 sd->output_queue_tailp = &sd->output_queue;
12064#ifdef CONFIG_RPS
12065 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
12066 sd->cpu = i;
12067#endif
12068 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
12069 spin_lock_init(&sd->defer_lock);
12070
12071 init_gro_hash(&sd->backlog);
12072 sd->backlog.poll = process_backlog;
12073 sd->backlog.weight = weight_p;
12074 INIT_LIST_HEAD(&sd->backlog.poll_list);
12075
12076 if (net_page_pool_create(i))
12077 goto out;
12078 }
12079 if (use_backlog_threads())
12080 smpboot_register_percpu_thread(&backlog_threads);
12081
12082 dev_boot_phase = 0;
12083
12084 /* The loopback device is special if any other network devices
12085 * is present in a network namespace the loopback device must
12086 * be present. Since we now dynamically allocate and free the
12087 * loopback device ensure this invariant is maintained by
12088 * keeping the loopback device as the first device on the
12089 * list of network devices. Ensuring the loopback devices
12090 * is the first device that appears and the last network device
12091 * that disappears.
12092 */
12093 if (register_pernet_device(&loopback_net_ops))
12094 goto out;
12095
12096 if (register_pernet_device(&default_device_ops))
12097 goto out;
12098
12099 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
12100 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
12101
12102 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
12103 NULL, dev_cpu_dead);
12104 WARN_ON(rc < 0);
12105 rc = 0;
12106
12107 /* avoid static key IPIs to isolated CPUs */
12108 if (housekeeping_enabled(HK_TYPE_MISC))
12109 net_enable_timestamp();
12110out:
12111 if (rc < 0) {
12112 for_each_possible_cpu(i) {
12113 struct page_pool *pp_ptr;
12114
12115 pp_ptr = per_cpu(system_page_pool, i);
12116 if (!pp_ptr)
12117 continue;
12118
12119 page_pool_destroy(pp_ptr);
12120 per_cpu(system_page_pool, i) = NULL;
12121 }
12122 }
12123
12124 return rc;
12125}
12126
12127subsys_initcall(net_dev_init);