Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * NET3 Protocol independent device support routines.
4 *
5 * Derived from the non IP parts of dev.c 1.0.19
6 * Authors: Ross Biro
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
9 *
10 * Additional Authors:
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
17 *
18 * Changes:
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
30 * drivers
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
40 * call a packet.
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
46 * changes.
47 * Rudi Cilibrasi : Pass the right thing to
48 * set_mac_address()
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
54 * 1 device.
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
62 * the backlog queue.
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
69 */
70
71#include <linux/uaccess.h>
72#include <linux/bitmap.h>
73#include <linux/capability.h>
74#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
77#include <linux/hash.h>
78#include <linux/slab.h>
79#include <linux/sched.h>
80#include <linux/sched/mm.h>
81#include <linux/mutex.h>
82#include <linux/rwsem.h>
83#include <linux/string.h>
84#include <linux/mm.h>
85#include <linux/socket.h>
86#include <linux/sockios.h>
87#include <linux/errno.h>
88#include <linux/interrupt.h>
89#include <linux/if_ether.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/ethtool.h>
93#include <linux/skbuff.h>
94#include <linux/kthread.h>
95#include <linux/bpf.h>
96#include <linux/bpf_trace.h>
97#include <net/net_namespace.h>
98#include <net/sock.h>
99#include <net/busy_poll.h>
100#include <linux/rtnetlink.h>
101#include <linux/stat.h>
102#include <net/dsa.h>
103#include <net/dst.h>
104#include <net/dst_metadata.h>
105#include <net/gro.h>
106#include <net/pkt_sched.h>
107#include <net/pkt_cls.h>
108#include <net/checksum.h>
109#include <net/xfrm.h>
110#include <net/tcx.h>
111#include <linux/highmem.h>
112#include <linux/init.h>
113#include <linux/module.h>
114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
117#include <net/iw_handler.h>
118#include <asm/current.h>
119#include <linux/audit.h>
120#include <linux/dmaengine.h>
121#include <linux/err.h>
122#include <linux/ctype.h>
123#include <linux/if_arp.h>
124#include <linux/if_vlan.h>
125#include <linux/ip.h>
126#include <net/ip.h>
127#include <net/mpls.h>
128#include <linux/ipv6.h>
129#include <linux/in.h>
130#include <linux/jhash.h>
131#include <linux/random.h>
132#include <trace/events/napi.h>
133#include <trace/events/net.h>
134#include <trace/events/skb.h>
135#include <trace/events/qdisc.h>
136#include <trace/events/xdp.h>
137#include <linux/inetdevice.h>
138#include <linux/cpu_rmap.h>
139#include <linux/static_key.h>
140#include <linux/hashtable.h>
141#include <linux/vmalloc.h>
142#include <linux/if_macvlan.h>
143#include <linux/errqueue.h>
144#include <linux/hrtimer.h>
145#include <linux/netfilter_netdev.h>
146#include <linux/crash_dump.h>
147#include <linux/sctp.h>
148#include <net/udp_tunnel.h>
149#include <linux/net_namespace.h>
150#include <linux/indirect_call_wrapper.h>
151#include <net/devlink.h>
152#include <linux/pm_runtime.h>
153#include <linux/prandom.h>
154#include <linux/once_lite.h>
155#include <net/netdev_rx_queue.h>
156#include <net/page_pool/types.h>
157#include <net/page_pool/helpers.h>
158#include <net/rps.h>
159
160#include "dev.h"
161#include "net-sysfs.h"
162
163static DEFINE_SPINLOCK(ptype_lock);
164struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
165
166static int netif_rx_internal(struct sk_buff *skb);
167static int call_netdevice_notifiers_extack(unsigned long val,
168 struct net_device *dev,
169 struct netlink_ext_ack *extack);
170
171static DEFINE_MUTEX(ifalias_mutex);
172
173/* protects napi_hash addition/deletion and napi_gen_id */
174static DEFINE_SPINLOCK(napi_hash_lock);
175
176static unsigned int napi_gen_id = NR_CPUS;
177static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
178
179static DECLARE_RWSEM(devnet_rename_sem);
180
181static inline void dev_base_seq_inc(struct net *net)
182{
183 unsigned int val = net->dev_base_seq + 1;
184
185 WRITE_ONCE(net->dev_base_seq, val ?: 1);
186}
187
188static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
189{
190 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
191
192 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
193}
194
195static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
196{
197 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
198}
199
200static inline void rps_lock_irqsave(struct softnet_data *sd,
201 unsigned long *flags)
202{
203 if (IS_ENABLED(CONFIG_RPS))
204 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
205 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
206 local_irq_save(*flags);
207}
208
209static inline void rps_lock_irq_disable(struct softnet_data *sd)
210{
211 if (IS_ENABLED(CONFIG_RPS))
212 spin_lock_irq(&sd->input_pkt_queue.lock);
213 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
214 local_irq_disable();
215}
216
217static inline void rps_unlock_irq_restore(struct softnet_data *sd,
218 unsigned long *flags)
219{
220 if (IS_ENABLED(CONFIG_RPS))
221 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
222 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
223 local_irq_restore(*flags);
224}
225
226static inline void rps_unlock_irq_enable(struct softnet_data *sd)
227{
228 if (IS_ENABLED(CONFIG_RPS))
229 spin_unlock_irq(&sd->input_pkt_queue.lock);
230 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
231 local_irq_enable();
232}
233
234static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
235 const char *name)
236{
237 struct netdev_name_node *name_node;
238
239 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
240 if (!name_node)
241 return NULL;
242 INIT_HLIST_NODE(&name_node->hlist);
243 name_node->dev = dev;
244 name_node->name = name;
245 return name_node;
246}
247
248static struct netdev_name_node *
249netdev_name_node_head_alloc(struct net_device *dev)
250{
251 struct netdev_name_node *name_node;
252
253 name_node = netdev_name_node_alloc(dev, dev->name);
254 if (!name_node)
255 return NULL;
256 INIT_LIST_HEAD(&name_node->list);
257 return name_node;
258}
259
260static void netdev_name_node_free(struct netdev_name_node *name_node)
261{
262 kfree(name_node);
263}
264
265static void netdev_name_node_add(struct net *net,
266 struct netdev_name_node *name_node)
267{
268 hlist_add_head_rcu(&name_node->hlist,
269 dev_name_hash(net, name_node->name));
270}
271
272static void netdev_name_node_del(struct netdev_name_node *name_node)
273{
274 hlist_del_rcu(&name_node->hlist);
275}
276
277static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
278 const char *name)
279{
280 struct hlist_head *head = dev_name_hash(net, name);
281 struct netdev_name_node *name_node;
282
283 hlist_for_each_entry(name_node, head, hlist)
284 if (!strcmp(name_node->name, name))
285 return name_node;
286 return NULL;
287}
288
289static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
290 const char *name)
291{
292 struct hlist_head *head = dev_name_hash(net, name);
293 struct netdev_name_node *name_node;
294
295 hlist_for_each_entry_rcu(name_node, head, hlist)
296 if (!strcmp(name_node->name, name))
297 return name_node;
298 return NULL;
299}
300
301bool netdev_name_in_use(struct net *net, const char *name)
302{
303 return netdev_name_node_lookup(net, name);
304}
305EXPORT_SYMBOL(netdev_name_in_use);
306
307int netdev_name_node_alt_create(struct net_device *dev, const char *name)
308{
309 struct netdev_name_node *name_node;
310 struct net *net = dev_net(dev);
311
312 name_node = netdev_name_node_lookup(net, name);
313 if (name_node)
314 return -EEXIST;
315 name_node = netdev_name_node_alloc(dev, name);
316 if (!name_node)
317 return -ENOMEM;
318 netdev_name_node_add(net, name_node);
319 /* The node that holds dev->name acts as a head of per-device list. */
320 list_add_tail_rcu(&name_node->list, &dev->name_node->list);
321
322 return 0;
323}
324
325static void netdev_name_node_alt_free(struct rcu_head *head)
326{
327 struct netdev_name_node *name_node =
328 container_of(head, struct netdev_name_node, rcu);
329
330 kfree(name_node->name);
331 netdev_name_node_free(name_node);
332}
333
334static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
335{
336 netdev_name_node_del(name_node);
337 list_del(&name_node->list);
338 call_rcu(&name_node->rcu, netdev_name_node_alt_free);
339}
340
341int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
342{
343 struct netdev_name_node *name_node;
344 struct net *net = dev_net(dev);
345
346 name_node = netdev_name_node_lookup(net, name);
347 if (!name_node)
348 return -ENOENT;
349 /* lookup might have found our primary name or a name belonging
350 * to another device.
351 */
352 if (name_node == dev->name_node || name_node->dev != dev)
353 return -EINVAL;
354
355 __netdev_name_node_alt_destroy(name_node);
356 return 0;
357}
358
359static void netdev_name_node_alt_flush(struct net_device *dev)
360{
361 struct netdev_name_node *name_node, *tmp;
362
363 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
364 list_del(&name_node->list);
365 netdev_name_node_alt_free(&name_node->rcu);
366 }
367}
368
369/* Device list insertion */
370static void list_netdevice(struct net_device *dev)
371{
372 struct netdev_name_node *name_node;
373 struct net *net = dev_net(dev);
374
375 ASSERT_RTNL();
376
377 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
378 netdev_name_node_add(net, dev->name_node);
379 hlist_add_head_rcu(&dev->index_hlist,
380 dev_index_hash(net, dev->ifindex));
381
382 netdev_for_each_altname(dev, name_node)
383 netdev_name_node_add(net, name_node);
384
385 /* We reserved the ifindex, this can't fail */
386 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
387
388 dev_base_seq_inc(net);
389}
390
391/* Device list removal
392 * caller must respect a RCU grace period before freeing/reusing dev
393 */
394static void unlist_netdevice(struct net_device *dev)
395{
396 struct netdev_name_node *name_node;
397 struct net *net = dev_net(dev);
398
399 ASSERT_RTNL();
400
401 xa_erase(&net->dev_by_index, dev->ifindex);
402
403 netdev_for_each_altname(dev, name_node)
404 netdev_name_node_del(name_node);
405
406 /* Unlink dev from the device chain */
407 list_del_rcu(&dev->dev_list);
408 netdev_name_node_del(dev->name_node);
409 hlist_del_rcu(&dev->index_hlist);
410
411 dev_base_seq_inc(dev_net(dev));
412}
413
414/*
415 * Our notifier list
416 */
417
418static RAW_NOTIFIER_HEAD(netdev_chain);
419
420/*
421 * Device drivers call our routines to queue packets here. We empty the
422 * queue in the local softnet handler.
423 */
424
425DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
426EXPORT_PER_CPU_SYMBOL(softnet_data);
427
428/* Page_pool has a lockless array/stack to alloc/recycle pages.
429 * PP consumers must pay attention to run APIs in the appropriate context
430 * (e.g. NAPI context).
431 */
432static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
433
434#ifdef CONFIG_LOCKDEP
435/*
436 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
437 * according to dev->type
438 */
439static const unsigned short netdev_lock_type[] = {
440 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
441 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
442 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
443 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
444 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
445 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
446 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
447 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
448 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
449 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
450 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
451 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
452 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
453 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
454 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
455
456static const char *const netdev_lock_name[] = {
457 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
458 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
459 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
460 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
461 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
462 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
463 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
464 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
465 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
466 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
467 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
468 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
469 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
470 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
471 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
472
473static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
474static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
475
476static inline unsigned short netdev_lock_pos(unsigned short dev_type)
477{
478 int i;
479
480 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
481 if (netdev_lock_type[i] == dev_type)
482 return i;
483 /* the last key is used by default */
484 return ARRAY_SIZE(netdev_lock_type) - 1;
485}
486
487static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
488 unsigned short dev_type)
489{
490 int i;
491
492 i = netdev_lock_pos(dev_type);
493 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
494 netdev_lock_name[i]);
495}
496
497static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
498{
499 int i;
500
501 i = netdev_lock_pos(dev->type);
502 lockdep_set_class_and_name(&dev->addr_list_lock,
503 &netdev_addr_lock_key[i],
504 netdev_lock_name[i]);
505}
506#else
507static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
508 unsigned short dev_type)
509{
510}
511
512static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
513{
514}
515#endif
516
517/*******************************************************************************
518 *
519 * Protocol management and registration routines
520 *
521 *******************************************************************************/
522
523
524/*
525 * Add a protocol ID to the list. Now that the input handler is
526 * smarter we can dispense with all the messy stuff that used to be
527 * here.
528 *
529 * BEWARE!!! Protocol handlers, mangling input packets,
530 * MUST BE last in hash buckets and checking protocol handlers
531 * MUST start from promiscuous ptype_all chain in net_bh.
532 * It is true now, do not change it.
533 * Explanation follows: if protocol handler, mangling packet, will
534 * be the first on list, it is not able to sense, that packet
535 * is cloned and should be copied-on-write, so that it will
536 * change it and subsequent readers will get broken packet.
537 * --ANK (980803)
538 */
539
540static inline struct list_head *ptype_head(const struct packet_type *pt)
541{
542 if (pt->type == htons(ETH_P_ALL))
543 return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
544 else
545 return pt->dev ? &pt->dev->ptype_specific :
546 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
547}
548
549/**
550 * dev_add_pack - add packet handler
551 * @pt: packet type declaration
552 *
553 * Add a protocol handler to the networking stack. The passed &packet_type
554 * is linked into kernel lists and may not be freed until it has been
555 * removed from the kernel lists.
556 *
557 * This call does not sleep therefore it can not
558 * guarantee all CPU's that are in middle of receiving packets
559 * will see the new packet type (until the next received packet).
560 */
561
562void dev_add_pack(struct packet_type *pt)
563{
564 struct list_head *head = ptype_head(pt);
565
566 spin_lock(&ptype_lock);
567 list_add_rcu(&pt->list, head);
568 spin_unlock(&ptype_lock);
569}
570EXPORT_SYMBOL(dev_add_pack);
571
572/**
573 * __dev_remove_pack - remove packet handler
574 * @pt: packet type declaration
575 *
576 * Remove a protocol handler that was previously added to the kernel
577 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
578 * from the kernel lists and can be freed or reused once this function
579 * returns.
580 *
581 * The packet type might still be in use by receivers
582 * and must not be freed until after all the CPU's have gone
583 * through a quiescent state.
584 */
585void __dev_remove_pack(struct packet_type *pt)
586{
587 struct list_head *head = ptype_head(pt);
588 struct packet_type *pt1;
589
590 spin_lock(&ptype_lock);
591
592 list_for_each_entry(pt1, head, list) {
593 if (pt == pt1) {
594 list_del_rcu(&pt->list);
595 goto out;
596 }
597 }
598
599 pr_warn("dev_remove_pack: %p not found\n", pt);
600out:
601 spin_unlock(&ptype_lock);
602}
603EXPORT_SYMBOL(__dev_remove_pack);
604
605/**
606 * dev_remove_pack - remove packet handler
607 * @pt: packet type declaration
608 *
609 * Remove a protocol handler that was previously added to the kernel
610 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
611 * from the kernel lists and can be freed or reused once this function
612 * returns.
613 *
614 * This call sleeps to guarantee that no CPU is looking at the packet
615 * type after return.
616 */
617void dev_remove_pack(struct packet_type *pt)
618{
619 __dev_remove_pack(pt);
620
621 synchronize_net();
622}
623EXPORT_SYMBOL(dev_remove_pack);
624
625
626/*******************************************************************************
627 *
628 * Device Interface Subroutines
629 *
630 *******************************************************************************/
631
632/**
633 * dev_get_iflink - get 'iflink' value of a interface
634 * @dev: targeted interface
635 *
636 * Indicates the ifindex the interface is linked to.
637 * Physical interfaces have the same 'ifindex' and 'iflink' values.
638 */
639
640int dev_get_iflink(const struct net_device *dev)
641{
642 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
643 return dev->netdev_ops->ndo_get_iflink(dev);
644
645 return READ_ONCE(dev->ifindex);
646}
647EXPORT_SYMBOL(dev_get_iflink);
648
649/**
650 * dev_fill_metadata_dst - Retrieve tunnel egress information.
651 * @dev: targeted interface
652 * @skb: The packet.
653 *
654 * For better visibility of tunnel traffic OVS needs to retrieve
655 * egress tunnel information for a packet. Following API allows
656 * user to get this info.
657 */
658int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
659{
660 struct ip_tunnel_info *info;
661
662 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
663 return -EINVAL;
664
665 info = skb_tunnel_info_unclone(skb);
666 if (!info)
667 return -ENOMEM;
668 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
669 return -EINVAL;
670
671 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
672}
673EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
674
675static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
676{
677 int k = stack->num_paths++;
678
679 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
680 return NULL;
681
682 return &stack->path[k];
683}
684
685int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
686 struct net_device_path_stack *stack)
687{
688 const struct net_device *last_dev;
689 struct net_device_path_ctx ctx = {
690 .dev = dev,
691 };
692 struct net_device_path *path;
693 int ret = 0;
694
695 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
696 stack->num_paths = 0;
697 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
698 last_dev = ctx.dev;
699 path = dev_fwd_path(stack);
700 if (!path)
701 return -1;
702
703 memset(path, 0, sizeof(struct net_device_path));
704 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
705 if (ret < 0)
706 return -1;
707
708 if (WARN_ON_ONCE(last_dev == ctx.dev))
709 return -1;
710 }
711
712 if (!ctx.dev)
713 return ret;
714
715 path = dev_fwd_path(stack);
716 if (!path)
717 return -1;
718 path->type = DEV_PATH_ETHERNET;
719 path->dev = ctx.dev;
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(dev_fill_forward_path);
724
725/**
726 * __dev_get_by_name - find a device by its name
727 * @net: the applicable net namespace
728 * @name: name to find
729 *
730 * Find an interface by name. Must be called under RTNL semaphore.
731 * If the name is found a pointer to the device is returned.
732 * If the name is not found then %NULL is returned. The
733 * reference counters are not incremented so the caller must be
734 * careful with locks.
735 */
736
737struct net_device *__dev_get_by_name(struct net *net, const char *name)
738{
739 struct netdev_name_node *node_name;
740
741 node_name = netdev_name_node_lookup(net, name);
742 return node_name ? node_name->dev : NULL;
743}
744EXPORT_SYMBOL(__dev_get_by_name);
745
746/**
747 * dev_get_by_name_rcu - find a device by its name
748 * @net: the applicable net namespace
749 * @name: name to find
750 *
751 * Find an interface by name.
752 * If the name is found a pointer to the device is returned.
753 * If the name is not found then %NULL is returned.
754 * The reference counters are not incremented so the caller must be
755 * careful with locks. The caller must hold RCU lock.
756 */
757
758struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
759{
760 struct netdev_name_node *node_name;
761
762 node_name = netdev_name_node_lookup_rcu(net, name);
763 return node_name ? node_name->dev : NULL;
764}
765EXPORT_SYMBOL(dev_get_by_name_rcu);
766
767/* Deprecated for new users, call netdev_get_by_name() instead */
768struct net_device *dev_get_by_name(struct net *net, const char *name)
769{
770 struct net_device *dev;
771
772 rcu_read_lock();
773 dev = dev_get_by_name_rcu(net, name);
774 dev_hold(dev);
775 rcu_read_unlock();
776 return dev;
777}
778EXPORT_SYMBOL(dev_get_by_name);
779
780/**
781 * netdev_get_by_name() - find a device by its name
782 * @net: the applicable net namespace
783 * @name: name to find
784 * @tracker: tracking object for the acquired reference
785 * @gfp: allocation flags for the tracker
786 *
787 * Find an interface by name. This can be called from any
788 * context and does its own locking. The returned handle has
789 * the usage count incremented and the caller must use netdev_put() to
790 * release it when it is no longer needed. %NULL is returned if no
791 * matching device is found.
792 */
793struct net_device *netdev_get_by_name(struct net *net, const char *name,
794 netdevice_tracker *tracker, gfp_t gfp)
795{
796 struct net_device *dev;
797
798 dev = dev_get_by_name(net, name);
799 if (dev)
800 netdev_tracker_alloc(dev, tracker, gfp);
801 return dev;
802}
803EXPORT_SYMBOL(netdev_get_by_name);
804
805/**
806 * __dev_get_by_index - find a device by its ifindex
807 * @net: the applicable net namespace
808 * @ifindex: index of device
809 *
810 * Search for an interface by index. Returns %NULL if the device
811 * is not found or a pointer to the device. The device has not
812 * had its reference counter increased so the caller must be careful
813 * about locking. The caller must hold the RTNL semaphore.
814 */
815
816struct net_device *__dev_get_by_index(struct net *net, int ifindex)
817{
818 struct net_device *dev;
819 struct hlist_head *head = dev_index_hash(net, ifindex);
820
821 hlist_for_each_entry(dev, head, index_hlist)
822 if (dev->ifindex == ifindex)
823 return dev;
824
825 return NULL;
826}
827EXPORT_SYMBOL(__dev_get_by_index);
828
829/**
830 * dev_get_by_index_rcu - find a device by its ifindex
831 * @net: the applicable net namespace
832 * @ifindex: index of device
833 *
834 * Search for an interface by index. Returns %NULL if the device
835 * is not found or a pointer to the device. The device has not
836 * had its reference counter increased so the caller must be careful
837 * about locking. The caller must hold RCU lock.
838 */
839
840struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
841{
842 struct net_device *dev;
843 struct hlist_head *head = dev_index_hash(net, ifindex);
844
845 hlist_for_each_entry_rcu(dev, head, index_hlist)
846 if (dev->ifindex == ifindex)
847 return dev;
848
849 return NULL;
850}
851EXPORT_SYMBOL(dev_get_by_index_rcu);
852
853/* Deprecated for new users, call netdev_get_by_index() instead */
854struct net_device *dev_get_by_index(struct net *net, int ifindex)
855{
856 struct net_device *dev;
857
858 rcu_read_lock();
859 dev = dev_get_by_index_rcu(net, ifindex);
860 dev_hold(dev);
861 rcu_read_unlock();
862 return dev;
863}
864EXPORT_SYMBOL(dev_get_by_index);
865
866/**
867 * netdev_get_by_index() - find a device by its ifindex
868 * @net: the applicable net namespace
869 * @ifindex: index of device
870 * @tracker: tracking object for the acquired reference
871 * @gfp: allocation flags for the tracker
872 *
873 * Search for an interface by index. Returns NULL if the device
874 * is not found or a pointer to the device. The device returned has
875 * had a reference added and the pointer is safe until the user calls
876 * netdev_put() to indicate they have finished with it.
877 */
878struct net_device *netdev_get_by_index(struct net *net, int ifindex,
879 netdevice_tracker *tracker, gfp_t gfp)
880{
881 struct net_device *dev;
882
883 dev = dev_get_by_index(net, ifindex);
884 if (dev)
885 netdev_tracker_alloc(dev, tracker, gfp);
886 return dev;
887}
888EXPORT_SYMBOL(netdev_get_by_index);
889
890/**
891 * dev_get_by_napi_id - find a device by napi_id
892 * @napi_id: ID of the NAPI struct
893 *
894 * Search for an interface by NAPI ID. Returns %NULL if the device
895 * is not found or a pointer to the device. The device has not had
896 * its reference counter increased so the caller must be careful
897 * about locking. The caller must hold RCU lock.
898 */
899
900struct net_device *dev_get_by_napi_id(unsigned int napi_id)
901{
902 struct napi_struct *napi;
903
904 WARN_ON_ONCE(!rcu_read_lock_held());
905
906 if (napi_id < MIN_NAPI_ID)
907 return NULL;
908
909 napi = napi_by_id(napi_id);
910
911 return napi ? napi->dev : NULL;
912}
913EXPORT_SYMBOL(dev_get_by_napi_id);
914
915/**
916 * netdev_get_name - get a netdevice name, knowing its ifindex.
917 * @net: network namespace
918 * @name: a pointer to the buffer where the name will be stored.
919 * @ifindex: the ifindex of the interface to get the name from.
920 */
921int netdev_get_name(struct net *net, char *name, int ifindex)
922{
923 struct net_device *dev;
924 int ret;
925
926 down_read(&devnet_rename_sem);
927 rcu_read_lock();
928
929 dev = dev_get_by_index_rcu(net, ifindex);
930 if (!dev) {
931 ret = -ENODEV;
932 goto out;
933 }
934
935 strcpy(name, dev->name);
936
937 ret = 0;
938out:
939 rcu_read_unlock();
940 up_read(&devnet_rename_sem);
941 return ret;
942}
943
944/**
945 * dev_getbyhwaddr_rcu - find a device by its hardware address
946 * @net: the applicable net namespace
947 * @type: media type of device
948 * @ha: hardware address
949 *
950 * Search for an interface by MAC address. Returns NULL if the device
951 * is not found or a pointer to the device.
952 * The caller must hold RCU or RTNL.
953 * The returned device has not had its ref count increased
954 * and the caller must therefore be careful about locking
955 *
956 */
957
958struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
959 const char *ha)
960{
961 struct net_device *dev;
962
963 for_each_netdev_rcu(net, dev)
964 if (dev->type == type &&
965 !memcmp(dev->dev_addr, ha, dev->addr_len))
966 return dev;
967
968 return NULL;
969}
970EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
971
972struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
973{
974 struct net_device *dev, *ret = NULL;
975
976 rcu_read_lock();
977 for_each_netdev_rcu(net, dev)
978 if (dev->type == type) {
979 dev_hold(dev);
980 ret = dev;
981 break;
982 }
983 rcu_read_unlock();
984 return ret;
985}
986EXPORT_SYMBOL(dev_getfirstbyhwtype);
987
988/**
989 * __dev_get_by_flags - find any device with given flags
990 * @net: the applicable net namespace
991 * @if_flags: IFF_* values
992 * @mask: bitmask of bits in if_flags to check
993 *
994 * Search for any interface with the given flags. Returns NULL if a device
995 * is not found or a pointer to the device. Must be called inside
996 * rtnl_lock(), and result refcount is unchanged.
997 */
998
999struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1000 unsigned short mask)
1001{
1002 struct net_device *dev, *ret;
1003
1004 ASSERT_RTNL();
1005
1006 ret = NULL;
1007 for_each_netdev(net, dev) {
1008 if (((dev->flags ^ if_flags) & mask) == 0) {
1009 ret = dev;
1010 break;
1011 }
1012 }
1013 return ret;
1014}
1015EXPORT_SYMBOL(__dev_get_by_flags);
1016
1017/**
1018 * dev_valid_name - check if name is okay for network device
1019 * @name: name string
1020 *
1021 * Network device names need to be valid file names to
1022 * allow sysfs to work. We also disallow any kind of
1023 * whitespace.
1024 */
1025bool dev_valid_name(const char *name)
1026{
1027 if (*name == '\0')
1028 return false;
1029 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1030 return false;
1031 if (!strcmp(name, ".") || !strcmp(name, ".."))
1032 return false;
1033
1034 while (*name) {
1035 if (*name == '/' || *name == ':' || isspace(*name))
1036 return false;
1037 name++;
1038 }
1039 return true;
1040}
1041EXPORT_SYMBOL(dev_valid_name);
1042
1043/**
1044 * __dev_alloc_name - allocate a name for a device
1045 * @net: network namespace to allocate the device name in
1046 * @name: name format string
1047 * @res: result name string
1048 *
1049 * Passed a format string - eg "lt%d" it will try and find a suitable
1050 * id. It scans list of devices to build up a free map, then chooses
1051 * the first empty slot. The caller must hold the dev_base or rtnl lock
1052 * while allocating the name and adding the device in order to avoid
1053 * duplicates.
1054 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055 * Returns the number of the unit assigned or a negative errno code.
1056 */
1057
1058static int __dev_alloc_name(struct net *net, const char *name, char *res)
1059{
1060 int i = 0;
1061 const char *p;
1062 const int max_netdevices = 8*PAGE_SIZE;
1063 unsigned long *inuse;
1064 struct net_device *d;
1065 char buf[IFNAMSIZ];
1066
1067 /* Verify the string as this thing may have come from the user.
1068 * There must be one "%d" and no other "%" characters.
1069 */
1070 p = strchr(name, '%');
1071 if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1072 return -EINVAL;
1073
1074 /* Use one page as a bit array of possible slots */
1075 inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
1076 if (!inuse)
1077 return -ENOMEM;
1078
1079 for_each_netdev(net, d) {
1080 struct netdev_name_node *name_node;
1081
1082 netdev_for_each_altname(d, name_node) {
1083 if (!sscanf(name_node->name, name, &i))
1084 continue;
1085 if (i < 0 || i >= max_netdevices)
1086 continue;
1087
1088 /* avoid cases where sscanf is not exact inverse of printf */
1089 snprintf(buf, IFNAMSIZ, name, i);
1090 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1091 __set_bit(i, inuse);
1092 }
1093 if (!sscanf(d->name, name, &i))
1094 continue;
1095 if (i < 0 || i >= max_netdevices)
1096 continue;
1097
1098 /* avoid cases where sscanf is not exact inverse of printf */
1099 snprintf(buf, IFNAMSIZ, name, i);
1100 if (!strncmp(buf, d->name, IFNAMSIZ))
1101 __set_bit(i, inuse);
1102 }
1103
1104 i = find_first_zero_bit(inuse, max_netdevices);
1105 bitmap_free(inuse);
1106 if (i == max_netdevices)
1107 return -ENFILE;
1108
1109 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
1110 strscpy(buf, name, IFNAMSIZ);
1111 snprintf(res, IFNAMSIZ, buf, i);
1112 return i;
1113}
1114
1115/* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
1116static int dev_prep_valid_name(struct net *net, struct net_device *dev,
1117 const char *want_name, char *out_name,
1118 int dup_errno)
1119{
1120 if (!dev_valid_name(want_name))
1121 return -EINVAL;
1122
1123 if (strchr(want_name, '%'))
1124 return __dev_alloc_name(net, want_name, out_name);
1125
1126 if (netdev_name_in_use(net, want_name))
1127 return -dup_errno;
1128 if (out_name != want_name)
1129 strscpy(out_name, want_name, IFNAMSIZ);
1130 return 0;
1131}
1132
1133/**
1134 * dev_alloc_name - allocate a name for a device
1135 * @dev: device
1136 * @name: name format string
1137 *
1138 * Passed a format string - eg "lt%d" it will try and find a suitable
1139 * id. It scans list of devices to build up a free map, then chooses
1140 * the first empty slot. The caller must hold the dev_base or rtnl lock
1141 * while allocating the name and adding the device in order to avoid
1142 * duplicates.
1143 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1144 * Returns the number of the unit assigned or a negative errno code.
1145 */
1146
1147int dev_alloc_name(struct net_device *dev, const char *name)
1148{
1149 return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE);
1150}
1151EXPORT_SYMBOL(dev_alloc_name);
1152
1153static int dev_get_valid_name(struct net *net, struct net_device *dev,
1154 const char *name)
1155{
1156 int ret;
1157
1158 ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST);
1159 return ret < 0 ? ret : 0;
1160}
1161
1162/**
1163 * dev_change_name - change name of a device
1164 * @dev: device
1165 * @newname: name (or format string) must be at least IFNAMSIZ
1166 *
1167 * Change name of a device, can pass format strings "eth%d".
1168 * for wildcarding.
1169 */
1170int dev_change_name(struct net_device *dev, const char *newname)
1171{
1172 unsigned char old_assign_type;
1173 char oldname[IFNAMSIZ];
1174 int err = 0;
1175 int ret;
1176 struct net *net;
1177
1178 ASSERT_RTNL();
1179 BUG_ON(!dev_net(dev));
1180
1181 net = dev_net(dev);
1182
1183 down_write(&devnet_rename_sem);
1184
1185 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1186 up_write(&devnet_rename_sem);
1187 return 0;
1188 }
1189
1190 memcpy(oldname, dev->name, IFNAMSIZ);
1191
1192 err = dev_get_valid_name(net, dev, newname);
1193 if (err < 0) {
1194 up_write(&devnet_rename_sem);
1195 return err;
1196 }
1197
1198 if (oldname[0] && !strchr(oldname, '%'))
1199 netdev_info(dev, "renamed from %s%s\n", oldname,
1200 dev->flags & IFF_UP ? " (while UP)" : "");
1201
1202 old_assign_type = dev->name_assign_type;
1203 WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
1204
1205rollback:
1206 ret = device_rename(&dev->dev, dev->name);
1207 if (ret) {
1208 memcpy(dev->name, oldname, IFNAMSIZ);
1209 WRITE_ONCE(dev->name_assign_type, old_assign_type);
1210 up_write(&devnet_rename_sem);
1211 return ret;
1212 }
1213
1214 up_write(&devnet_rename_sem);
1215
1216 netdev_adjacent_rename_links(dev, oldname);
1217
1218 netdev_name_node_del(dev->name_node);
1219
1220 synchronize_net();
1221
1222 netdev_name_node_add(net, dev->name_node);
1223
1224 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1225 ret = notifier_to_errno(ret);
1226
1227 if (ret) {
1228 /* err >= 0 after dev_alloc_name() or stores the first errno */
1229 if (err >= 0) {
1230 err = ret;
1231 down_write(&devnet_rename_sem);
1232 memcpy(dev->name, oldname, IFNAMSIZ);
1233 memcpy(oldname, newname, IFNAMSIZ);
1234 WRITE_ONCE(dev->name_assign_type, old_assign_type);
1235 old_assign_type = NET_NAME_RENAMED;
1236 goto rollback;
1237 } else {
1238 netdev_err(dev, "name change rollback failed: %d\n",
1239 ret);
1240 }
1241 }
1242
1243 return err;
1244}
1245
1246/**
1247 * dev_set_alias - change ifalias of a device
1248 * @dev: device
1249 * @alias: name up to IFALIASZ
1250 * @len: limit of bytes to copy from info
1251 *
1252 * Set ifalias for a device,
1253 */
1254int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1255{
1256 struct dev_ifalias *new_alias = NULL;
1257
1258 if (len >= IFALIASZ)
1259 return -EINVAL;
1260
1261 if (len) {
1262 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1263 if (!new_alias)
1264 return -ENOMEM;
1265
1266 memcpy(new_alias->ifalias, alias, len);
1267 new_alias->ifalias[len] = 0;
1268 }
1269
1270 mutex_lock(&ifalias_mutex);
1271 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1272 mutex_is_locked(&ifalias_mutex));
1273 mutex_unlock(&ifalias_mutex);
1274
1275 if (new_alias)
1276 kfree_rcu(new_alias, rcuhead);
1277
1278 return len;
1279}
1280EXPORT_SYMBOL(dev_set_alias);
1281
1282/**
1283 * dev_get_alias - get ifalias of a device
1284 * @dev: device
1285 * @name: buffer to store name of ifalias
1286 * @len: size of buffer
1287 *
1288 * get ifalias for a device. Caller must make sure dev cannot go
1289 * away, e.g. rcu read lock or own a reference count to device.
1290 */
1291int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1292{
1293 const struct dev_ifalias *alias;
1294 int ret = 0;
1295
1296 rcu_read_lock();
1297 alias = rcu_dereference(dev->ifalias);
1298 if (alias)
1299 ret = snprintf(name, len, "%s", alias->ifalias);
1300 rcu_read_unlock();
1301
1302 return ret;
1303}
1304
1305/**
1306 * netdev_features_change - device changes features
1307 * @dev: device to cause notification
1308 *
1309 * Called to indicate a device has changed features.
1310 */
1311void netdev_features_change(struct net_device *dev)
1312{
1313 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1314}
1315EXPORT_SYMBOL(netdev_features_change);
1316
1317/**
1318 * netdev_state_change - device changes state
1319 * @dev: device to cause notification
1320 *
1321 * Called to indicate a device has changed state. This function calls
1322 * the notifier chains for netdev_chain and sends a NEWLINK message
1323 * to the routing socket.
1324 */
1325void netdev_state_change(struct net_device *dev)
1326{
1327 if (dev->flags & IFF_UP) {
1328 struct netdev_notifier_change_info change_info = {
1329 .info.dev = dev,
1330 };
1331
1332 call_netdevice_notifiers_info(NETDEV_CHANGE,
1333 &change_info.info);
1334 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1335 }
1336}
1337EXPORT_SYMBOL(netdev_state_change);
1338
1339/**
1340 * __netdev_notify_peers - notify network peers about existence of @dev,
1341 * to be called when rtnl lock is already held.
1342 * @dev: network device
1343 *
1344 * Generate traffic such that interested network peers are aware of
1345 * @dev, such as by generating a gratuitous ARP. This may be used when
1346 * a device wants to inform the rest of the network about some sort of
1347 * reconfiguration such as a failover event or virtual machine
1348 * migration.
1349 */
1350void __netdev_notify_peers(struct net_device *dev)
1351{
1352 ASSERT_RTNL();
1353 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1354 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1355}
1356EXPORT_SYMBOL(__netdev_notify_peers);
1357
1358/**
1359 * netdev_notify_peers - notify network peers about existence of @dev
1360 * @dev: network device
1361 *
1362 * Generate traffic such that interested network peers are aware of
1363 * @dev, such as by generating a gratuitous ARP. This may be used when
1364 * a device wants to inform the rest of the network about some sort of
1365 * reconfiguration such as a failover event or virtual machine
1366 * migration.
1367 */
1368void netdev_notify_peers(struct net_device *dev)
1369{
1370 rtnl_lock();
1371 __netdev_notify_peers(dev);
1372 rtnl_unlock();
1373}
1374EXPORT_SYMBOL(netdev_notify_peers);
1375
1376static int napi_threaded_poll(void *data);
1377
1378static int napi_kthread_create(struct napi_struct *n)
1379{
1380 int err = 0;
1381
1382 /* Create and wake up the kthread once to put it in
1383 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1384 * warning and work with loadavg.
1385 */
1386 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1387 n->dev->name, n->napi_id);
1388 if (IS_ERR(n->thread)) {
1389 err = PTR_ERR(n->thread);
1390 pr_err("kthread_run failed with err %d\n", err);
1391 n->thread = NULL;
1392 }
1393
1394 return err;
1395}
1396
1397static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1398{
1399 const struct net_device_ops *ops = dev->netdev_ops;
1400 int ret;
1401
1402 ASSERT_RTNL();
1403 dev_addr_check(dev);
1404
1405 if (!netif_device_present(dev)) {
1406 /* may be detached because parent is runtime-suspended */
1407 if (dev->dev.parent)
1408 pm_runtime_resume(dev->dev.parent);
1409 if (!netif_device_present(dev))
1410 return -ENODEV;
1411 }
1412
1413 /* Block netpoll from trying to do any rx path servicing.
1414 * If we don't do this there is a chance ndo_poll_controller
1415 * or ndo_poll may be running while we open the device
1416 */
1417 netpoll_poll_disable(dev);
1418
1419 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1420 ret = notifier_to_errno(ret);
1421 if (ret)
1422 return ret;
1423
1424 set_bit(__LINK_STATE_START, &dev->state);
1425
1426 if (ops->ndo_validate_addr)
1427 ret = ops->ndo_validate_addr(dev);
1428
1429 if (!ret && ops->ndo_open)
1430 ret = ops->ndo_open(dev);
1431
1432 netpoll_poll_enable(dev);
1433
1434 if (ret)
1435 clear_bit(__LINK_STATE_START, &dev->state);
1436 else {
1437 dev->flags |= IFF_UP;
1438 dev_set_rx_mode(dev);
1439 dev_activate(dev);
1440 add_device_randomness(dev->dev_addr, dev->addr_len);
1441 }
1442
1443 return ret;
1444}
1445
1446/**
1447 * dev_open - prepare an interface for use.
1448 * @dev: device to open
1449 * @extack: netlink extended ack
1450 *
1451 * Takes a device from down to up state. The device's private open
1452 * function is invoked and then the multicast lists are loaded. Finally
1453 * the device is moved into the up state and a %NETDEV_UP message is
1454 * sent to the netdev notifier chain.
1455 *
1456 * Calling this function on an active interface is a nop. On a failure
1457 * a negative errno code is returned.
1458 */
1459int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1460{
1461 int ret;
1462
1463 if (dev->flags & IFF_UP)
1464 return 0;
1465
1466 ret = __dev_open(dev, extack);
1467 if (ret < 0)
1468 return ret;
1469
1470 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1471 call_netdevice_notifiers(NETDEV_UP, dev);
1472
1473 return ret;
1474}
1475EXPORT_SYMBOL(dev_open);
1476
1477static void __dev_close_many(struct list_head *head)
1478{
1479 struct net_device *dev;
1480
1481 ASSERT_RTNL();
1482 might_sleep();
1483
1484 list_for_each_entry(dev, head, close_list) {
1485 /* Temporarily disable netpoll until the interface is down */
1486 netpoll_poll_disable(dev);
1487
1488 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1489
1490 clear_bit(__LINK_STATE_START, &dev->state);
1491
1492 /* Synchronize to scheduled poll. We cannot touch poll list, it
1493 * can be even on different cpu. So just clear netif_running().
1494 *
1495 * dev->stop() will invoke napi_disable() on all of it's
1496 * napi_struct instances on this device.
1497 */
1498 smp_mb__after_atomic(); /* Commit netif_running(). */
1499 }
1500
1501 dev_deactivate_many(head);
1502
1503 list_for_each_entry(dev, head, close_list) {
1504 const struct net_device_ops *ops = dev->netdev_ops;
1505
1506 /*
1507 * Call the device specific close. This cannot fail.
1508 * Only if device is UP
1509 *
1510 * We allow it to be called even after a DETACH hot-plug
1511 * event.
1512 */
1513 if (ops->ndo_stop)
1514 ops->ndo_stop(dev);
1515
1516 dev->flags &= ~IFF_UP;
1517 netpoll_poll_enable(dev);
1518 }
1519}
1520
1521static void __dev_close(struct net_device *dev)
1522{
1523 LIST_HEAD(single);
1524
1525 list_add(&dev->close_list, &single);
1526 __dev_close_many(&single);
1527 list_del(&single);
1528}
1529
1530void dev_close_many(struct list_head *head, bool unlink)
1531{
1532 struct net_device *dev, *tmp;
1533
1534 /* Remove the devices that don't need to be closed */
1535 list_for_each_entry_safe(dev, tmp, head, close_list)
1536 if (!(dev->flags & IFF_UP))
1537 list_del_init(&dev->close_list);
1538
1539 __dev_close_many(head);
1540
1541 list_for_each_entry_safe(dev, tmp, head, close_list) {
1542 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1543 call_netdevice_notifiers(NETDEV_DOWN, dev);
1544 if (unlink)
1545 list_del_init(&dev->close_list);
1546 }
1547}
1548EXPORT_SYMBOL(dev_close_many);
1549
1550/**
1551 * dev_close - shutdown an interface.
1552 * @dev: device to shutdown
1553 *
1554 * This function moves an active device into down state. A
1555 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1556 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1557 * chain.
1558 */
1559void dev_close(struct net_device *dev)
1560{
1561 if (dev->flags & IFF_UP) {
1562 LIST_HEAD(single);
1563
1564 list_add(&dev->close_list, &single);
1565 dev_close_many(&single, true);
1566 list_del(&single);
1567 }
1568}
1569EXPORT_SYMBOL(dev_close);
1570
1571
1572/**
1573 * dev_disable_lro - disable Large Receive Offload on a device
1574 * @dev: device
1575 *
1576 * Disable Large Receive Offload (LRO) on a net device. Must be
1577 * called under RTNL. This is needed if received packets may be
1578 * forwarded to another interface.
1579 */
1580void dev_disable_lro(struct net_device *dev)
1581{
1582 struct net_device *lower_dev;
1583 struct list_head *iter;
1584
1585 dev->wanted_features &= ~NETIF_F_LRO;
1586 netdev_update_features(dev);
1587
1588 if (unlikely(dev->features & NETIF_F_LRO))
1589 netdev_WARN(dev, "failed to disable LRO!\n");
1590
1591 netdev_for_each_lower_dev(dev, lower_dev, iter)
1592 dev_disable_lro(lower_dev);
1593}
1594EXPORT_SYMBOL(dev_disable_lro);
1595
1596/**
1597 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1598 * @dev: device
1599 *
1600 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1601 * called under RTNL. This is needed if Generic XDP is installed on
1602 * the device.
1603 */
1604static void dev_disable_gro_hw(struct net_device *dev)
1605{
1606 dev->wanted_features &= ~NETIF_F_GRO_HW;
1607 netdev_update_features(dev);
1608
1609 if (unlikely(dev->features & NETIF_F_GRO_HW))
1610 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1611}
1612
1613const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1614{
1615#define N(val) \
1616 case NETDEV_##val: \
1617 return "NETDEV_" __stringify(val);
1618 switch (cmd) {
1619 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1620 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1621 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1622 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
1623 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
1624 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
1625 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1626 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1627 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1628 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1629 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1630 N(XDP_FEAT_CHANGE)
1631 }
1632#undef N
1633 return "UNKNOWN_NETDEV_EVENT";
1634}
1635EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1636
1637static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1638 struct net_device *dev)
1639{
1640 struct netdev_notifier_info info = {
1641 .dev = dev,
1642 };
1643
1644 return nb->notifier_call(nb, val, &info);
1645}
1646
1647static int call_netdevice_register_notifiers(struct notifier_block *nb,
1648 struct net_device *dev)
1649{
1650 int err;
1651
1652 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1653 err = notifier_to_errno(err);
1654 if (err)
1655 return err;
1656
1657 if (!(dev->flags & IFF_UP))
1658 return 0;
1659
1660 call_netdevice_notifier(nb, NETDEV_UP, dev);
1661 return 0;
1662}
1663
1664static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1665 struct net_device *dev)
1666{
1667 if (dev->flags & IFF_UP) {
1668 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1669 dev);
1670 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1671 }
1672 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1673}
1674
1675static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1676 struct net *net)
1677{
1678 struct net_device *dev;
1679 int err;
1680
1681 for_each_netdev(net, dev) {
1682 err = call_netdevice_register_notifiers(nb, dev);
1683 if (err)
1684 goto rollback;
1685 }
1686 return 0;
1687
1688rollback:
1689 for_each_netdev_continue_reverse(net, dev)
1690 call_netdevice_unregister_notifiers(nb, dev);
1691 return err;
1692}
1693
1694static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1695 struct net *net)
1696{
1697 struct net_device *dev;
1698
1699 for_each_netdev(net, dev)
1700 call_netdevice_unregister_notifiers(nb, dev);
1701}
1702
1703static int dev_boot_phase = 1;
1704
1705/**
1706 * register_netdevice_notifier - register a network notifier block
1707 * @nb: notifier
1708 *
1709 * Register a notifier to be called when network device events occur.
1710 * The notifier passed is linked into the kernel structures and must
1711 * not be reused until it has been unregistered. A negative errno code
1712 * is returned on a failure.
1713 *
1714 * When registered all registration and up events are replayed
1715 * to the new notifier to allow device to have a race free
1716 * view of the network device list.
1717 */
1718
1719int register_netdevice_notifier(struct notifier_block *nb)
1720{
1721 struct net *net;
1722 int err;
1723
1724 /* Close race with setup_net() and cleanup_net() */
1725 down_write(&pernet_ops_rwsem);
1726 rtnl_lock();
1727 err = raw_notifier_chain_register(&netdev_chain, nb);
1728 if (err)
1729 goto unlock;
1730 if (dev_boot_phase)
1731 goto unlock;
1732 for_each_net(net) {
1733 err = call_netdevice_register_net_notifiers(nb, net);
1734 if (err)
1735 goto rollback;
1736 }
1737
1738unlock:
1739 rtnl_unlock();
1740 up_write(&pernet_ops_rwsem);
1741 return err;
1742
1743rollback:
1744 for_each_net_continue_reverse(net)
1745 call_netdevice_unregister_net_notifiers(nb, net);
1746
1747 raw_notifier_chain_unregister(&netdev_chain, nb);
1748 goto unlock;
1749}
1750EXPORT_SYMBOL(register_netdevice_notifier);
1751
1752/**
1753 * unregister_netdevice_notifier - unregister a network notifier block
1754 * @nb: notifier
1755 *
1756 * Unregister a notifier previously registered by
1757 * register_netdevice_notifier(). The notifier is unlinked into the
1758 * kernel structures and may then be reused. A negative errno code
1759 * is returned on a failure.
1760 *
1761 * After unregistering unregister and down device events are synthesized
1762 * for all devices on the device list to the removed notifier to remove
1763 * the need for special case cleanup code.
1764 */
1765
1766int unregister_netdevice_notifier(struct notifier_block *nb)
1767{
1768 struct net *net;
1769 int err;
1770
1771 /* Close race with setup_net() and cleanup_net() */
1772 down_write(&pernet_ops_rwsem);
1773 rtnl_lock();
1774 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1775 if (err)
1776 goto unlock;
1777
1778 for_each_net(net)
1779 call_netdevice_unregister_net_notifiers(nb, net);
1780
1781unlock:
1782 rtnl_unlock();
1783 up_write(&pernet_ops_rwsem);
1784 return err;
1785}
1786EXPORT_SYMBOL(unregister_netdevice_notifier);
1787
1788static int __register_netdevice_notifier_net(struct net *net,
1789 struct notifier_block *nb,
1790 bool ignore_call_fail)
1791{
1792 int err;
1793
1794 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1795 if (err)
1796 return err;
1797 if (dev_boot_phase)
1798 return 0;
1799
1800 err = call_netdevice_register_net_notifiers(nb, net);
1801 if (err && !ignore_call_fail)
1802 goto chain_unregister;
1803
1804 return 0;
1805
1806chain_unregister:
1807 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1808 return err;
1809}
1810
1811static int __unregister_netdevice_notifier_net(struct net *net,
1812 struct notifier_block *nb)
1813{
1814 int err;
1815
1816 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1817 if (err)
1818 return err;
1819
1820 call_netdevice_unregister_net_notifiers(nb, net);
1821 return 0;
1822}
1823
1824/**
1825 * register_netdevice_notifier_net - register a per-netns network notifier block
1826 * @net: network namespace
1827 * @nb: notifier
1828 *
1829 * Register a notifier to be called when network device events occur.
1830 * The notifier passed is linked into the kernel structures and must
1831 * not be reused until it has been unregistered. A negative errno code
1832 * is returned on a failure.
1833 *
1834 * When registered all registration and up events are replayed
1835 * to the new notifier to allow device to have a race free
1836 * view of the network device list.
1837 */
1838
1839int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1840{
1841 int err;
1842
1843 rtnl_lock();
1844 err = __register_netdevice_notifier_net(net, nb, false);
1845 rtnl_unlock();
1846 return err;
1847}
1848EXPORT_SYMBOL(register_netdevice_notifier_net);
1849
1850/**
1851 * unregister_netdevice_notifier_net - unregister a per-netns
1852 * network notifier block
1853 * @net: network namespace
1854 * @nb: notifier
1855 *
1856 * Unregister a notifier previously registered by
1857 * register_netdevice_notifier_net(). The notifier is unlinked from the
1858 * kernel structures and may then be reused. A negative errno code
1859 * is returned on a failure.
1860 *
1861 * After unregistering unregister and down device events are synthesized
1862 * for all devices on the device list to the removed notifier to remove
1863 * the need for special case cleanup code.
1864 */
1865
1866int unregister_netdevice_notifier_net(struct net *net,
1867 struct notifier_block *nb)
1868{
1869 int err;
1870
1871 rtnl_lock();
1872 err = __unregister_netdevice_notifier_net(net, nb);
1873 rtnl_unlock();
1874 return err;
1875}
1876EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1877
1878static void __move_netdevice_notifier_net(struct net *src_net,
1879 struct net *dst_net,
1880 struct notifier_block *nb)
1881{
1882 __unregister_netdevice_notifier_net(src_net, nb);
1883 __register_netdevice_notifier_net(dst_net, nb, true);
1884}
1885
1886int register_netdevice_notifier_dev_net(struct net_device *dev,
1887 struct notifier_block *nb,
1888 struct netdev_net_notifier *nn)
1889{
1890 int err;
1891
1892 rtnl_lock();
1893 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1894 if (!err) {
1895 nn->nb = nb;
1896 list_add(&nn->list, &dev->net_notifier_list);
1897 }
1898 rtnl_unlock();
1899 return err;
1900}
1901EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1902
1903int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1904 struct notifier_block *nb,
1905 struct netdev_net_notifier *nn)
1906{
1907 int err;
1908
1909 rtnl_lock();
1910 list_del(&nn->list);
1911 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1912 rtnl_unlock();
1913 return err;
1914}
1915EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1916
1917static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1918 struct net *net)
1919{
1920 struct netdev_net_notifier *nn;
1921
1922 list_for_each_entry(nn, &dev->net_notifier_list, list)
1923 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
1924}
1925
1926/**
1927 * call_netdevice_notifiers_info - call all network notifier blocks
1928 * @val: value passed unmodified to notifier function
1929 * @info: notifier information data
1930 *
1931 * Call all network notifier blocks. Parameters and return value
1932 * are as for raw_notifier_call_chain().
1933 */
1934
1935int call_netdevice_notifiers_info(unsigned long val,
1936 struct netdev_notifier_info *info)
1937{
1938 struct net *net = dev_net(info->dev);
1939 int ret;
1940
1941 ASSERT_RTNL();
1942
1943 /* Run per-netns notifier block chain first, then run the global one.
1944 * Hopefully, one day, the global one is going to be removed after
1945 * all notifier block registrators get converted to be per-netns.
1946 */
1947 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1948 if (ret & NOTIFY_STOP_MASK)
1949 return ret;
1950 return raw_notifier_call_chain(&netdev_chain, val, info);
1951}
1952
1953/**
1954 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
1955 * for and rollback on error
1956 * @val_up: value passed unmodified to notifier function
1957 * @val_down: value passed unmodified to the notifier function when
1958 * recovering from an error on @val_up
1959 * @info: notifier information data
1960 *
1961 * Call all per-netns network notifier blocks, but not notifier blocks on
1962 * the global notifier chain. Parameters and return value are as for
1963 * raw_notifier_call_chain_robust().
1964 */
1965
1966static int
1967call_netdevice_notifiers_info_robust(unsigned long val_up,
1968 unsigned long val_down,
1969 struct netdev_notifier_info *info)
1970{
1971 struct net *net = dev_net(info->dev);
1972
1973 ASSERT_RTNL();
1974
1975 return raw_notifier_call_chain_robust(&net->netdev_chain,
1976 val_up, val_down, info);
1977}
1978
1979static int call_netdevice_notifiers_extack(unsigned long val,
1980 struct net_device *dev,
1981 struct netlink_ext_ack *extack)
1982{
1983 struct netdev_notifier_info info = {
1984 .dev = dev,
1985 .extack = extack,
1986 };
1987
1988 return call_netdevice_notifiers_info(val, &info);
1989}
1990
1991/**
1992 * call_netdevice_notifiers - call all network notifier blocks
1993 * @val: value passed unmodified to notifier function
1994 * @dev: net_device pointer passed unmodified to notifier function
1995 *
1996 * Call all network notifier blocks. Parameters and return value
1997 * are as for raw_notifier_call_chain().
1998 */
1999
2000int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2001{
2002 return call_netdevice_notifiers_extack(val, dev, NULL);
2003}
2004EXPORT_SYMBOL(call_netdevice_notifiers);
2005
2006/**
2007 * call_netdevice_notifiers_mtu - call all network notifier blocks
2008 * @val: value passed unmodified to notifier function
2009 * @dev: net_device pointer passed unmodified to notifier function
2010 * @arg: additional u32 argument passed to the notifier function
2011 *
2012 * Call all network notifier blocks. Parameters and return value
2013 * are as for raw_notifier_call_chain().
2014 */
2015static int call_netdevice_notifiers_mtu(unsigned long val,
2016 struct net_device *dev, u32 arg)
2017{
2018 struct netdev_notifier_info_ext info = {
2019 .info.dev = dev,
2020 .ext.mtu = arg,
2021 };
2022
2023 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2024
2025 return call_netdevice_notifiers_info(val, &info.info);
2026}
2027
2028#ifdef CONFIG_NET_INGRESS
2029static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2030
2031void net_inc_ingress_queue(void)
2032{
2033 static_branch_inc(&ingress_needed_key);
2034}
2035EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2036
2037void net_dec_ingress_queue(void)
2038{
2039 static_branch_dec(&ingress_needed_key);
2040}
2041EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2042#endif
2043
2044#ifdef CONFIG_NET_EGRESS
2045static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2046
2047void net_inc_egress_queue(void)
2048{
2049 static_branch_inc(&egress_needed_key);
2050}
2051EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2052
2053void net_dec_egress_queue(void)
2054{
2055 static_branch_dec(&egress_needed_key);
2056}
2057EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2058#endif
2059
2060DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2061EXPORT_SYMBOL(netstamp_needed_key);
2062#ifdef CONFIG_JUMP_LABEL
2063static atomic_t netstamp_needed_deferred;
2064static atomic_t netstamp_wanted;
2065static void netstamp_clear(struct work_struct *work)
2066{
2067 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2068 int wanted;
2069
2070 wanted = atomic_add_return(deferred, &netstamp_wanted);
2071 if (wanted > 0)
2072 static_branch_enable(&netstamp_needed_key);
2073 else
2074 static_branch_disable(&netstamp_needed_key);
2075}
2076static DECLARE_WORK(netstamp_work, netstamp_clear);
2077#endif
2078
2079void net_enable_timestamp(void)
2080{
2081#ifdef CONFIG_JUMP_LABEL
2082 int wanted = atomic_read(&netstamp_wanted);
2083
2084 while (wanted > 0) {
2085 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
2086 return;
2087 }
2088 atomic_inc(&netstamp_needed_deferred);
2089 schedule_work(&netstamp_work);
2090#else
2091 static_branch_inc(&netstamp_needed_key);
2092#endif
2093}
2094EXPORT_SYMBOL(net_enable_timestamp);
2095
2096void net_disable_timestamp(void)
2097{
2098#ifdef CONFIG_JUMP_LABEL
2099 int wanted = atomic_read(&netstamp_wanted);
2100
2101 while (wanted > 1) {
2102 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2103 return;
2104 }
2105 atomic_dec(&netstamp_needed_deferred);
2106 schedule_work(&netstamp_work);
2107#else
2108 static_branch_dec(&netstamp_needed_key);
2109#endif
2110}
2111EXPORT_SYMBOL(net_disable_timestamp);
2112
2113static inline void net_timestamp_set(struct sk_buff *skb)
2114{
2115 skb->tstamp = 0;
2116 skb->mono_delivery_time = 0;
2117 if (static_branch_unlikely(&netstamp_needed_key))
2118 skb->tstamp = ktime_get_real();
2119}
2120
2121#define net_timestamp_check(COND, SKB) \
2122 if (static_branch_unlikely(&netstamp_needed_key)) { \
2123 if ((COND) && !(SKB)->tstamp) \
2124 (SKB)->tstamp = ktime_get_real(); \
2125 } \
2126
2127bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2128{
2129 return __is_skb_forwardable(dev, skb, true);
2130}
2131EXPORT_SYMBOL_GPL(is_skb_forwardable);
2132
2133static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2134 bool check_mtu)
2135{
2136 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2137
2138 if (likely(!ret)) {
2139 skb->protocol = eth_type_trans(skb, dev);
2140 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2141 }
2142
2143 return ret;
2144}
2145
2146int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2147{
2148 return __dev_forward_skb2(dev, skb, true);
2149}
2150EXPORT_SYMBOL_GPL(__dev_forward_skb);
2151
2152/**
2153 * dev_forward_skb - loopback an skb to another netif
2154 *
2155 * @dev: destination network device
2156 * @skb: buffer to forward
2157 *
2158 * return values:
2159 * NET_RX_SUCCESS (no congestion)
2160 * NET_RX_DROP (packet was dropped, but freed)
2161 *
2162 * dev_forward_skb can be used for injecting an skb from the
2163 * start_xmit function of one device into the receive queue
2164 * of another device.
2165 *
2166 * The receiving device may be in another namespace, so
2167 * we have to clear all information in the skb that could
2168 * impact namespace isolation.
2169 */
2170int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2171{
2172 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2173}
2174EXPORT_SYMBOL_GPL(dev_forward_skb);
2175
2176int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2177{
2178 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2179}
2180
2181static inline int deliver_skb(struct sk_buff *skb,
2182 struct packet_type *pt_prev,
2183 struct net_device *orig_dev)
2184{
2185 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2186 return -ENOMEM;
2187 refcount_inc(&skb->users);
2188 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2189}
2190
2191static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2192 struct packet_type **pt,
2193 struct net_device *orig_dev,
2194 __be16 type,
2195 struct list_head *ptype_list)
2196{
2197 struct packet_type *ptype, *pt_prev = *pt;
2198
2199 list_for_each_entry_rcu(ptype, ptype_list, list) {
2200 if (ptype->type != type)
2201 continue;
2202 if (pt_prev)
2203 deliver_skb(skb, pt_prev, orig_dev);
2204 pt_prev = ptype;
2205 }
2206 *pt = pt_prev;
2207}
2208
2209static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2210{
2211 if (!ptype->af_packet_priv || !skb->sk)
2212 return false;
2213
2214 if (ptype->id_match)
2215 return ptype->id_match(ptype, skb->sk);
2216 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2217 return true;
2218
2219 return false;
2220}
2221
2222/**
2223 * dev_nit_active - return true if any network interface taps are in use
2224 *
2225 * @dev: network device to check for the presence of taps
2226 */
2227bool dev_nit_active(struct net_device *dev)
2228{
2229 return !list_empty(&net_hotdata.ptype_all) ||
2230 !list_empty(&dev->ptype_all);
2231}
2232EXPORT_SYMBOL_GPL(dev_nit_active);
2233
2234/*
2235 * Support routine. Sends outgoing frames to any network
2236 * taps currently in use.
2237 */
2238
2239void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2240{
2241 struct list_head *ptype_list = &net_hotdata.ptype_all;
2242 struct packet_type *ptype, *pt_prev = NULL;
2243 struct sk_buff *skb2 = NULL;
2244
2245 rcu_read_lock();
2246again:
2247 list_for_each_entry_rcu(ptype, ptype_list, list) {
2248 if (READ_ONCE(ptype->ignore_outgoing))
2249 continue;
2250
2251 /* Never send packets back to the socket
2252 * they originated from - MvS (miquels@drinkel.ow.org)
2253 */
2254 if (skb_loop_sk(ptype, skb))
2255 continue;
2256
2257 if (pt_prev) {
2258 deliver_skb(skb2, pt_prev, skb->dev);
2259 pt_prev = ptype;
2260 continue;
2261 }
2262
2263 /* need to clone skb, done only once */
2264 skb2 = skb_clone(skb, GFP_ATOMIC);
2265 if (!skb2)
2266 goto out_unlock;
2267
2268 net_timestamp_set(skb2);
2269
2270 /* skb->nh should be correctly
2271 * set by sender, so that the second statement is
2272 * just protection against buggy protocols.
2273 */
2274 skb_reset_mac_header(skb2);
2275
2276 if (skb_network_header(skb2) < skb2->data ||
2277 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2278 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2279 ntohs(skb2->protocol),
2280 dev->name);
2281 skb_reset_network_header(skb2);
2282 }
2283
2284 skb2->transport_header = skb2->network_header;
2285 skb2->pkt_type = PACKET_OUTGOING;
2286 pt_prev = ptype;
2287 }
2288
2289 if (ptype_list == &net_hotdata.ptype_all) {
2290 ptype_list = &dev->ptype_all;
2291 goto again;
2292 }
2293out_unlock:
2294 if (pt_prev) {
2295 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2296 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2297 else
2298 kfree_skb(skb2);
2299 }
2300 rcu_read_unlock();
2301}
2302EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2303
2304/**
2305 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2306 * @dev: Network device
2307 * @txq: number of queues available
2308 *
2309 * If real_num_tx_queues is changed the tc mappings may no longer be
2310 * valid. To resolve this verify the tc mapping remains valid and if
2311 * not NULL the mapping. With no priorities mapping to this
2312 * offset/count pair it will no longer be used. In the worst case TC0
2313 * is invalid nothing can be done so disable priority mappings. If is
2314 * expected that drivers will fix this mapping if they can before
2315 * calling netif_set_real_num_tx_queues.
2316 */
2317static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2318{
2319 int i;
2320 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2321
2322 /* If TC0 is invalidated disable TC mapping */
2323 if (tc->offset + tc->count > txq) {
2324 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2325 dev->num_tc = 0;
2326 return;
2327 }
2328
2329 /* Invalidated prio to tc mappings set to TC0 */
2330 for (i = 1; i < TC_BITMASK + 1; i++) {
2331 int q = netdev_get_prio_tc_map(dev, i);
2332
2333 tc = &dev->tc_to_txq[q];
2334 if (tc->offset + tc->count > txq) {
2335 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2336 i, q);
2337 netdev_set_prio_tc_map(dev, i, 0);
2338 }
2339 }
2340}
2341
2342int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2343{
2344 if (dev->num_tc) {
2345 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2346 int i;
2347
2348 /* walk through the TCs and see if it falls into any of them */
2349 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2350 if ((txq - tc->offset) < tc->count)
2351 return i;
2352 }
2353
2354 /* didn't find it, just return -1 to indicate no match */
2355 return -1;
2356 }
2357
2358 return 0;
2359}
2360EXPORT_SYMBOL(netdev_txq_to_tc);
2361
2362#ifdef CONFIG_XPS
2363static struct static_key xps_needed __read_mostly;
2364static struct static_key xps_rxqs_needed __read_mostly;
2365static DEFINE_MUTEX(xps_map_mutex);
2366#define xmap_dereference(P) \
2367 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2368
2369static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2370 struct xps_dev_maps *old_maps, int tci, u16 index)
2371{
2372 struct xps_map *map = NULL;
2373 int pos;
2374
2375 map = xmap_dereference(dev_maps->attr_map[tci]);
2376 if (!map)
2377 return false;
2378
2379 for (pos = map->len; pos--;) {
2380 if (map->queues[pos] != index)
2381 continue;
2382
2383 if (map->len > 1) {
2384 map->queues[pos] = map->queues[--map->len];
2385 break;
2386 }
2387
2388 if (old_maps)
2389 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2390 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2391 kfree_rcu(map, rcu);
2392 return false;
2393 }
2394
2395 return true;
2396}
2397
2398static bool remove_xps_queue_cpu(struct net_device *dev,
2399 struct xps_dev_maps *dev_maps,
2400 int cpu, u16 offset, u16 count)
2401{
2402 int num_tc = dev_maps->num_tc;
2403 bool active = false;
2404 int tci;
2405
2406 for (tci = cpu * num_tc; num_tc--; tci++) {
2407 int i, j;
2408
2409 for (i = count, j = offset; i--; j++) {
2410 if (!remove_xps_queue(dev_maps, NULL, tci, j))
2411 break;
2412 }
2413
2414 active |= i < 0;
2415 }
2416
2417 return active;
2418}
2419
2420static void reset_xps_maps(struct net_device *dev,
2421 struct xps_dev_maps *dev_maps,
2422 enum xps_map_type type)
2423{
2424 static_key_slow_dec_cpuslocked(&xps_needed);
2425 if (type == XPS_RXQS)
2426 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2427
2428 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2429
2430 kfree_rcu(dev_maps, rcu);
2431}
2432
2433static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2434 u16 offset, u16 count)
2435{
2436 struct xps_dev_maps *dev_maps;
2437 bool active = false;
2438 int i, j;
2439
2440 dev_maps = xmap_dereference(dev->xps_maps[type]);
2441 if (!dev_maps)
2442 return;
2443
2444 for (j = 0; j < dev_maps->nr_ids; j++)
2445 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2446 if (!active)
2447 reset_xps_maps(dev, dev_maps, type);
2448
2449 if (type == XPS_CPUS) {
2450 for (i = offset + (count - 1); count--; i--)
2451 netdev_queue_numa_node_write(
2452 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2453 }
2454}
2455
2456static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2457 u16 count)
2458{
2459 if (!static_key_false(&xps_needed))
2460 return;
2461
2462 cpus_read_lock();
2463 mutex_lock(&xps_map_mutex);
2464
2465 if (static_key_false(&xps_rxqs_needed))
2466 clean_xps_maps(dev, XPS_RXQS, offset, count);
2467
2468 clean_xps_maps(dev, XPS_CPUS, offset, count);
2469
2470 mutex_unlock(&xps_map_mutex);
2471 cpus_read_unlock();
2472}
2473
2474static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2475{
2476 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2477}
2478
2479static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2480 u16 index, bool is_rxqs_map)
2481{
2482 struct xps_map *new_map;
2483 int alloc_len = XPS_MIN_MAP_ALLOC;
2484 int i, pos;
2485
2486 for (pos = 0; map && pos < map->len; pos++) {
2487 if (map->queues[pos] != index)
2488 continue;
2489 return map;
2490 }
2491
2492 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2493 if (map) {
2494 if (pos < map->alloc_len)
2495 return map;
2496
2497 alloc_len = map->alloc_len * 2;
2498 }
2499
2500 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2501 * map
2502 */
2503 if (is_rxqs_map)
2504 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2505 else
2506 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2507 cpu_to_node(attr_index));
2508 if (!new_map)
2509 return NULL;
2510
2511 for (i = 0; i < pos; i++)
2512 new_map->queues[i] = map->queues[i];
2513 new_map->alloc_len = alloc_len;
2514 new_map->len = pos;
2515
2516 return new_map;
2517}
2518
2519/* Copy xps maps at a given index */
2520static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2521 struct xps_dev_maps *new_dev_maps, int index,
2522 int tc, bool skip_tc)
2523{
2524 int i, tci = index * dev_maps->num_tc;
2525 struct xps_map *map;
2526
2527 /* copy maps belonging to foreign traffic classes */
2528 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2529 if (i == tc && skip_tc)
2530 continue;
2531
2532 /* fill in the new device map from the old device map */
2533 map = xmap_dereference(dev_maps->attr_map[tci]);
2534 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2535 }
2536}
2537
2538/* Must be called under cpus_read_lock */
2539int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2540 u16 index, enum xps_map_type type)
2541{
2542 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2543 const unsigned long *online_mask = NULL;
2544 bool active = false, copy = false;
2545 int i, j, tci, numa_node_id = -2;
2546 int maps_sz, num_tc = 1, tc = 0;
2547 struct xps_map *map, *new_map;
2548 unsigned int nr_ids;
2549
2550 WARN_ON_ONCE(index >= dev->num_tx_queues);
2551
2552 if (dev->num_tc) {
2553 /* Do not allow XPS on subordinate device directly */
2554 num_tc = dev->num_tc;
2555 if (num_tc < 0)
2556 return -EINVAL;
2557
2558 /* If queue belongs to subordinate dev use its map */
2559 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2560
2561 tc = netdev_txq_to_tc(dev, index);
2562 if (tc < 0)
2563 return -EINVAL;
2564 }
2565
2566 mutex_lock(&xps_map_mutex);
2567
2568 dev_maps = xmap_dereference(dev->xps_maps[type]);
2569 if (type == XPS_RXQS) {
2570 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2571 nr_ids = dev->num_rx_queues;
2572 } else {
2573 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2574 if (num_possible_cpus() > 1)
2575 online_mask = cpumask_bits(cpu_online_mask);
2576 nr_ids = nr_cpu_ids;
2577 }
2578
2579 if (maps_sz < L1_CACHE_BYTES)
2580 maps_sz = L1_CACHE_BYTES;
2581
2582 /* The old dev_maps could be larger or smaller than the one we're
2583 * setting up now, as dev->num_tc or nr_ids could have been updated in
2584 * between. We could try to be smart, but let's be safe instead and only
2585 * copy foreign traffic classes if the two map sizes match.
2586 */
2587 if (dev_maps &&
2588 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2589 copy = true;
2590
2591 /* allocate memory for queue storage */
2592 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2593 j < nr_ids;) {
2594 if (!new_dev_maps) {
2595 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2596 if (!new_dev_maps) {
2597 mutex_unlock(&xps_map_mutex);
2598 return -ENOMEM;
2599 }
2600
2601 new_dev_maps->nr_ids = nr_ids;
2602 new_dev_maps->num_tc = num_tc;
2603 }
2604
2605 tci = j * num_tc + tc;
2606 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2607
2608 map = expand_xps_map(map, j, index, type == XPS_RXQS);
2609 if (!map)
2610 goto error;
2611
2612 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2613 }
2614
2615 if (!new_dev_maps)
2616 goto out_no_new_maps;
2617
2618 if (!dev_maps) {
2619 /* Increment static keys at most once per type */
2620 static_key_slow_inc_cpuslocked(&xps_needed);
2621 if (type == XPS_RXQS)
2622 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2623 }
2624
2625 for (j = 0; j < nr_ids; j++) {
2626 bool skip_tc = false;
2627
2628 tci = j * num_tc + tc;
2629 if (netif_attr_test_mask(j, mask, nr_ids) &&
2630 netif_attr_test_online(j, online_mask, nr_ids)) {
2631 /* add tx-queue to CPU/rx-queue maps */
2632 int pos = 0;
2633
2634 skip_tc = true;
2635
2636 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2637 while ((pos < map->len) && (map->queues[pos] != index))
2638 pos++;
2639
2640 if (pos == map->len)
2641 map->queues[map->len++] = index;
2642#ifdef CONFIG_NUMA
2643 if (type == XPS_CPUS) {
2644 if (numa_node_id == -2)
2645 numa_node_id = cpu_to_node(j);
2646 else if (numa_node_id != cpu_to_node(j))
2647 numa_node_id = -1;
2648 }
2649#endif
2650 }
2651
2652 if (copy)
2653 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2654 skip_tc);
2655 }
2656
2657 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2658
2659 /* Cleanup old maps */
2660 if (!dev_maps)
2661 goto out_no_old_maps;
2662
2663 for (j = 0; j < dev_maps->nr_ids; j++) {
2664 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2665 map = xmap_dereference(dev_maps->attr_map[tci]);
2666 if (!map)
2667 continue;
2668
2669 if (copy) {
2670 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2671 if (map == new_map)
2672 continue;
2673 }
2674
2675 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2676 kfree_rcu(map, rcu);
2677 }
2678 }
2679
2680 old_dev_maps = dev_maps;
2681
2682out_no_old_maps:
2683 dev_maps = new_dev_maps;
2684 active = true;
2685
2686out_no_new_maps:
2687 if (type == XPS_CPUS)
2688 /* update Tx queue numa node */
2689 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2690 (numa_node_id >= 0) ?
2691 numa_node_id : NUMA_NO_NODE);
2692
2693 if (!dev_maps)
2694 goto out_no_maps;
2695
2696 /* removes tx-queue from unused CPUs/rx-queues */
2697 for (j = 0; j < dev_maps->nr_ids; j++) {
2698 tci = j * dev_maps->num_tc;
2699
2700 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2701 if (i == tc &&
2702 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2703 netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2704 continue;
2705
2706 active |= remove_xps_queue(dev_maps,
2707 copy ? old_dev_maps : NULL,
2708 tci, index);
2709 }
2710 }
2711
2712 if (old_dev_maps)
2713 kfree_rcu(old_dev_maps, rcu);
2714
2715 /* free map if not active */
2716 if (!active)
2717 reset_xps_maps(dev, dev_maps, type);
2718
2719out_no_maps:
2720 mutex_unlock(&xps_map_mutex);
2721
2722 return 0;
2723error:
2724 /* remove any maps that we added */
2725 for (j = 0; j < nr_ids; j++) {
2726 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2727 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2728 map = copy ?
2729 xmap_dereference(dev_maps->attr_map[tci]) :
2730 NULL;
2731 if (new_map && new_map != map)
2732 kfree(new_map);
2733 }
2734 }
2735
2736 mutex_unlock(&xps_map_mutex);
2737
2738 kfree(new_dev_maps);
2739 return -ENOMEM;
2740}
2741EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2742
2743int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2744 u16 index)
2745{
2746 int ret;
2747
2748 cpus_read_lock();
2749 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2750 cpus_read_unlock();
2751
2752 return ret;
2753}
2754EXPORT_SYMBOL(netif_set_xps_queue);
2755
2756#endif
2757static void netdev_unbind_all_sb_channels(struct net_device *dev)
2758{
2759 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2760
2761 /* Unbind any subordinate channels */
2762 while (txq-- != &dev->_tx[0]) {
2763 if (txq->sb_dev)
2764 netdev_unbind_sb_channel(dev, txq->sb_dev);
2765 }
2766}
2767
2768void netdev_reset_tc(struct net_device *dev)
2769{
2770#ifdef CONFIG_XPS
2771 netif_reset_xps_queues_gt(dev, 0);
2772#endif
2773 netdev_unbind_all_sb_channels(dev);
2774
2775 /* Reset TC configuration of device */
2776 dev->num_tc = 0;
2777 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2778 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2779}
2780EXPORT_SYMBOL(netdev_reset_tc);
2781
2782int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2783{
2784 if (tc >= dev->num_tc)
2785 return -EINVAL;
2786
2787#ifdef CONFIG_XPS
2788 netif_reset_xps_queues(dev, offset, count);
2789#endif
2790 dev->tc_to_txq[tc].count = count;
2791 dev->tc_to_txq[tc].offset = offset;
2792 return 0;
2793}
2794EXPORT_SYMBOL(netdev_set_tc_queue);
2795
2796int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2797{
2798 if (num_tc > TC_MAX_QUEUE)
2799 return -EINVAL;
2800
2801#ifdef CONFIG_XPS
2802 netif_reset_xps_queues_gt(dev, 0);
2803#endif
2804 netdev_unbind_all_sb_channels(dev);
2805
2806 dev->num_tc = num_tc;
2807 return 0;
2808}
2809EXPORT_SYMBOL(netdev_set_num_tc);
2810
2811void netdev_unbind_sb_channel(struct net_device *dev,
2812 struct net_device *sb_dev)
2813{
2814 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2815
2816#ifdef CONFIG_XPS
2817 netif_reset_xps_queues_gt(sb_dev, 0);
2818#endif
2819 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2820 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2821
2822 while (txq-- != &dev->_tx[0]) {
2823 if (txq->sb_dev == sb_dev)
2824 txq->sb_dev = NULL;
2825 }
2826}
2827EXPORT_SYMBOL(netdev_unbind_sb_channel);
2828
2829int netdev_bind_sb_channel_queue(struct net_device *dev,
2830 struct net_device *sb_dev,
2831 u8 tc, u16 count, u16 offset)
2832{
2833 /* Make certain the sb_dev and dev are already configured */
2834 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2835 return -EINVAL;
2836
2837 /* We cannot hand out queues we don't have */
2838 if ((offset + count) > dev->real_num_tx_queues)
2839 return -EINVAL;
2840
2841 /* Record the mapping */
2842 sb_dev->tc_to_txq[tc].count = count;
2843 sb_dev->tc_to_txq[tc].offset = offset;
2844
2845 /* Provide a way for Tx queue to find the tc_to_txq map or
2846 * XPS map for itself.
2847 */
2848 while (count--)
2849 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2850
2851 return 0;
2852}
2853EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2854
2855int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2856{
2857 /* Do not use a multiqueue device to represent a subordinate channel */
2858 if (netif_is_multiqueue(dev))
2859 return -ENODEV;
2860
2861 /* We allow channels 1 - 32767 to be used for subordinate channels.
2862 * Channel 0 is meant to be "native" mode and used only to represent
2863 * the main root device. We allow writing 0 to reset the device back
2864 * to normal mode after being used as a subordinate channel.
2865 */
2866 if (channel > S16_MAX)
2867 return -EINVAL;
2868
2869 dev->num_tc = -channel;
2870
2871 return 0;
2872}
2873EXPORT_SYMBOL(netdev_set_sb_channel);
2874
2875/*
2876 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2877 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2878 */
2879int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2880{
2881 bool disabling;
2882 int rc;
2883
2884 disabling = txq < dev->real_num_tx_queues;
2885
2886 if (txq < 1 || txq > dev->num_tx_queues)
2887 return -EINVAL;
2888
2889 if (dev->reg_state == NETREG_REGISTERED ||
2890 dev->reg_state == NETREG_UNREGISTERING) {
2891 ASSERT_RTNL();
2892
2893 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2894 txq);
2895 if (rc)
2896 return rc;
2897
2898 if (dev->num_tc)
2899 netif_setup_tc(dev, txq);
2900
2901 dev_qdisc_change_real_num_tx(dev, txq);
2902
2903 dev->real_num_tx_queues = txq;
2904
2905 if (disabling) {
2906 synchronize_net();
2907 qdisc_reset_all_tx_gt(dev, txq);
2908#ifdef CONFIG_XPS
2909 netif_reset_xps_queues_gt(dev, txq);
2910#endif
2911 }
2912 } else {
2913 dev->real_num_tx_queues = txq;
2914 }
2915
2916 return 0;
2917}
2918EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2919
2920#ifdef CONFIG_SYSFS
2921/**
2922 * netif_set_real_num_rx_queues - set actual number of RX queues used
2923 * @dev: Network device
2924 * @rxq: Actual number of RX queues
2925 *
2926 * This must be called either with the rtnl_lock held or before
2927 * registration of the net device. Returns 0 on success, or a
2928 * negative error code. If called before registration, it always
2929 * succeeds.
2930 */
2931int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2932{
2933 int rc;
2934
2935 if (rxq < 1 || rxq > dev->num_rx_queues)
2936 return -EINVAL;
2937
2938 if (dev->reg_state == NETREG_REGISTERED) {
2939 ASSERT_RTNL();
2940
2941 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2942 rxq);
2943 if (rc)
2944 return rc;
2945 }
2946
2947 dev->real_num_rx_queues = rxq;
2948 return 0;
2949}
2950EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2951#endif
2952
2953/**
2954 * netif_set_real_num_queues - set actual number of RX and TX queues used
2955 * @dev: Network device
2956 * @txq: Actual number of TX queues
2957 * @rxq: Actual number of RX queues
2958 *
2959 * Set the real number of both TX and RX queues.
2960 * Does nothing if the number of queues is already correct.
2961 */
2962int netif_set_real_num_queues(struct net_device *dev,
2963 unsigned int txq, unsigned int rxq)
2964{
2965 unsigned int old_rxq = dev->real_num_rx_queues;
2966 int err;
2967
2968 if (txq < 1 || txq > dev->num_tx_queues ||
2969 rxq < 1 || rxq > dev->num_rx_queues)
2970 return -EINVAL;
2971
2972 /* Start from increases, so the error path only does decreases -
2973 * decreases can't fail.
2974 */
2975 if (rxq > dev->real_num_rx_queues) {
2976 err = netif_set_real_num_rx_queues(dev, rxq);
2977 if (err)
2978 return err;
2979 }
2980 if (txq > dev->real_num_tx_queues) {
2981 err = netif_set_real_num_tx_queues(dev, txq);
2982 if (err)
2983 goto undo_rx;
2984 }
2985 if (rxq < dev->real_num_rx_queues)
2986 WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
2987 if (txq < dev->real_num_tx_queues)
2988 WARN_ON(netif_set_real_num_tx_queues(dev, txq));
2989
2990 return 0;
2991undo_rx:
2992 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
2993 return err;
2994}
2995EXPORT_SYMBOL(netif_set_real_num_queues);
2996
2997/**
2998 * netif_set_tso_max_size() - set the max size of TSO frames supported
2999 * @dev: netdev to update
3000 * @size: max skb->len of a TSO frame
3001 *
3002 * Set the limit on the size of TSO super-frames the device can handle.
3003 * Unless explicitly set the stack will assume the value of
3004 * %GSO_LEGACY_MAX_SIZE.
3005 */
3006void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3007{
3008 dev->tso_max_size = min(GSO_MAX_SIZE, size);
3009 if (size < READ_ONCE(dev->gso_max_size))
3010 netif_set_gso_max_size(dev, size);
3011 if (size < READ_ONCE(dev->gso_ipv4_max_size))
3012 netif_set_gso_ipv4_max_size(dev, size);
3013}
3014EXPORT_SYMBOL(netif_set_tso_max_size);
3015
3016/**
3017 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3018 * @dev: netdev to update
3019 * @segs: max number of TCP segments
3020 *
3021 * Set the limit on the number of TCP segments the device can generate from
3022 * a single TSO super-frame.
3023 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3024 */
3025void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3026{
3027 dev->tso_max_segs = segs;
3028 if (segs < READ_ONCE(dev->gso_max_segs))
3029 netif_set_gso_max_segs(dev, segs);
3030}
3031EXPORT_SYMBOL(netif_set_tso_max_segs);
3032
3033/**
3034 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3035 * @to: netdev to update
3036 * @from: netdev from which to copy the limits
3037 */
3038void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3039{
3040 netif_set_tso_max_size(to, from->tso_max_size);
3041 netif_set_tso_max_segs(to, from->tso_max_segs);
3042}
3043EXPORT_SYMBOL(netif_inherit_tso_max);
3044
3045/**
3046 * netif_get_num_default_rss_queues - default number of RSS queues
3047 *
3048 * Default value is the number of physical cores if there are only 1 or 2, or
3049 * divided by 2 if there are more.
3050 */
3051int netif_get_num_default_rss_queues(void)
3052{
3053 cpumask_var_t cpus;
3054 int cpu, count = 0;
3055
3056 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3057 return 1;
3058
3059 cpumask_copy(cpus, cpu_online_mask);
3060 for_each_cpu(cpu, cpus) {
3061 ++count;
3062 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3063 }
3064 free_cpumask_var(cpus);
3065
3066 return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3067}
3068EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3069
3070static void __netif_reschedule(struct Qdisc *q)
3071{
3072 struct softnet_data *sd;
3073 unsigned long flags;
3074
3075 local_irq_save(flags);
3076 sd = this_cpu_ptr(&softnet_data);
3077 q->next_sched = NULL;
3078 *sd->output_queue_tailp = q;
3079 sd->output_queue_tailp = &q->next_sched;
3080 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3081 local_irq_restore(flags);
3082}
3083
3084void __netif_schedule(struct Qdisc *q)
3085{
3086 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3087 __netif_reschedule(q);
3088}
3089EXPORT_SYMBOL(__netif_schedule);
3090
3091struct dev_kfree_skb_cb {
3092 enum skb_drop_reason reason;
3093};
3094
3095static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3096{
3097 return (struct dev_kfree_skb_cb *)skb->cb;
3098}
3099
3100void netif_schedule_queue(struct netdev_queue *txq)
3101{
3102 rcu_read_lock();
3103 if (!netif_xmit_stopped(txq)) {
3104 struct Qdisc *q = rcu_dereference(txq->qdisc);
3105
3106 __netif_schedule(q);
3107 }
3108 rcu_read_unlock();
3109}
3110EXPORT_SYMBOL(netif_schedule_queue);
3111
3112void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3113{
3114 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3115 struct Qdisc *q;
3116
3117 rcu_read_lock();
3118 q = rcu_dereference(dev_queue->qdisc);
3119 __netif_schedule(q);
3120 rcu_read_unlock();
3121 }
3122}
3123EXPORT_SYMBOL(netif_tx_wake_queue);
3124
3125void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3126{
3127 unsigned long flags;
3128
3129 if (unlikely(!skb))
3130 return;
3131
3132 if (likely(refcount_read(&skb->users) == 1)) {
3133 smp_rmb();
3134 refcount_set(&skb->users, 0);
3135 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3136 return;
3137 }
3138 get_kfree_skb_cb(skb)->reason = reason;
3139 local_irq_save(flags);
3140 skb->next = __this_cpu_read(softnet_data.completion_queue);
3141 __this_cpu_write(softnet_data.completion_queue, skb);
3142 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3143 local_irq_restore(flags);
3144}
3145EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
3146
3147void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3148{
3149 if (in_hardirq() || irqs_disabled())
3150 dev_kfree_skb_irq_reason(skb, reason);
3151 else
3152 kfree_skb_reason(skb, reason);
3153}
3154EXPORT_SYMBOL(dev_kfree_skb_any_reason);
3155
3156
3157/**
3158 * netif_device_detach - mark device as removed
3159 * @dev: network device
3160 *
3161 * Mark device as removed from system and therefore no longer available.
3162 */
3163void netif_device_detach(struct net_device *dev)
3164{
3165 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3166 netif_running(dev)) {
3167 netif_tx_stop_all_queues(dev);
3168 }
3169}
3170EXPORT_SYMBOL(netif_device_detach);
3171
3172/**
3173 * netif_device_attach - mark device as attached
3174 * @dev: network device
3175 *
3176 * Mark device as attached from system and restart if needed.
3177 */
3178void netif_device_attach(struct net_device *dev)
3179{
3180 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3181 netif_running(dev)) {
3182 netif_tx_wake_all_queues(dev);
3183 __netdev_watchdog_up(dev);
3184 }
3185}
3186EXPORT_SYMBOL(netif_device_attach);
3187
3188/*
3189 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3190 * to be used as a distribution range.
3191 */
3192static u16 skb_tx_hash(const struct net_device *dev,
3193 const struct net_device *sb_dev,
3194 struct sk_buff *skb)
3195{
3196 u32 hash;
3197 u16 qoffset = 0;
3198 u16 qcount = dev->real_num_tx_queues;
3199
3200 if (dev->num_tc) {
3201 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3202
3203 qoffset = sb_dev->tc_to_txq[tc].offset;
3204 qcount = sb_dev->tc_to_txq[tc].count;
3205 if (unlikely(!qcount)) {
3206 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3207 sb_dev->name, qoffset, tc);
3208 qoffset = 0;
3209 qcount = dev->real_num_tx_queues;
3210 }
3211 }
3212
3213 if (skb_rx_queue_recorded(skb)) {
3214 DEBUG_NET_WARN_ON_ONCE(qcount == 0);
3215 hash = skb_get_rx_queue(skb);
3216 if (hash >= qoffset)
3217 hash -= qoffset;
3218 while (unlikely(hash >= qcount))
3219 hash -= qcount;
3220 return hash + qoffset;
3221 }
3222
3223 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3224}
3225
3226void skb_warn_bad_offload(const struct sk_buff *skb)
3227{
3228 static const netdev_features_t null_features;
3229 struct net_device *dev = skb->dev;
3230 const char *name = "";
3231
3232 if (!net_ratelimit())
3233 return;
3234
3235 if (dev) {
3236 if (dev->dev.parent)
3237 name = dev_driver_string(dev->dev.parent);
3238 else
3239 name = netdev_name(dev);
3240 }
3241 skb_dump(KERN_WARNING, skb, false);
3242 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3243 name, dev ? &dev->features : &null_features,
3244 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3245}
3246
3247/*
3248 * Invalidate hardware checksum when packet is to be mangled, and
3249 * complete checksum manually on outgoing path.
3250 */
3251int skb_checksum_help(struct sk_buff *skb)
3252{
3253 __wsum csum;
3254 int ret = 0, offset;
3255
3256 if (skb->ip_summed == CHECKSUM_COMPLETE)
3257 goto out_set_summed;
3258
3259 if (unlikely(skb_is_gso(skb))) {
3260 skb_warn_bad_offload(skb);
3261 return -EINVAL;
3262 }
3263
3264 /* Before computing a checksum, we should make sure no frag could
3265 * be modified by an external entity : checksum could be wrong.
3266 */
3267 if (skb_has_shared_frag(skb)) {
3268 ret = __skb_linearize(skb);
3269 if (ret)
3270 goto out;
3271 }
3272
3273 offset = skb_checksum_start_offset(skb);
3274 ret = -EINVAL;
3275 if (unlikely(offset >= skb_headlen(skb))) {
3276 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3277 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3278 offset, skb_headlen(skb));
3279 goto out;
3280 }
3281 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3282
3283 offset += skb->csum_offset;
3284 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
3285 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3286 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3287 offset + sizeof(__sum16), skb_headlen(skb));
3288 goto out;
3289 }
3290 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3291 if (ret)
3292 goto out;
3293
3294 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3295out_set_summed:
3296 skb->ip_summed = CHECKSUM_NONE;
3297out:
3298 return ret;
3299}
3300EXPORT_SYMBOL(skb_checksum_help);
3301
3302int skb_crc32c_csum_help(struct sk_buff *skb)
3303{
3304 __le32 crc32c_csum;
3305 int ret = 0, offset, start;
3306
3307 if (skb->ip_summed != CHECKSUM_PARTIAL)
3308 goto out;
3309
3310 if (unlikely(skb_is_gso(skb)))
3311 goto out;
3312
3313 /* Before computing a checksum, we should make sure no frag could
3314 * be modified by an external entity : checksum could be wrong.
3315 */
3316 if (unlikely(skb_has_shared_frag(skb))) {
3317 ret = __skb_linearize(skb);
3318 if (ret)
3319 goto out;
3320 }
3321 start = skb_checksum_start_offset(skb);
3322 offset = start + offsetof(struct sctphdr, checksum);
3323 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3324 ret = -EINVAL;
3325 goto out;
3326 }
3327
3328 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3329 if (ret)
3330 goto out;
3331
3332 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3333 skb->len - start, ~(__u32)0,
3334 crc32c_csum_stub));
3335 *(__le32 *)(skb->data + offset) = crc32c_csum;
3336 skb_reset_csum_not_inet(skb);
3337out:
3338 return ret;
3339}
3340
3341__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3342{
3343 __be16 type = skb->protocol;
3344
3345 /* Tunnel gso handlers can set protocol to ethernet. */
3346 if (type == htons(ETH_P_TEB)) {
3347 struct ethhdr *eth;
3348
3349 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3350 return 0;
3351
3352 eth = (struct ethhdr *)skb->data;
3353 type = eth->h_proto;
3354 }
3355
3356 return vlan_get_protocol_and_depth(skb, type, depth);
3357}
3358
3359
3360/* Take action when hardware reception checksum errors are detected. */
3361#ifdef CONFIG_BUG
3362static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3363{
3364 netdev_err(dev, "hw csum failure\n");
3365 skb_dump(KERN_ERR, skb, true);
3366 dump_stack();
3367}
3368
3369void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3370{
3371 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3372}
3373EXPORT_SYMBOL(netdev_rx_csum_fault);
3374#endif
3375
3376/* XXX: check that highmem exists at all on the given machine. */
3377static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3378{
3379#ifdef CONFIG_HIGHMEM
3380 int i;
3381
3382 if (!(dev->features & NETIF_F_HIGHDMA)) {
3383 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3384 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3385
3386 if (PageHighMem(skb_frag_page(frag)))
3387 return 1;
3388 }
3389 }
3390#endif
3391 return 0;
3392}
3393
3394/* If MPLS offload request, verify we are testing hardware MPLS features
3395 * instead of standard features for the netdev.
3396 */
3397#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3398static netdev_features_t net_mpls_features(struct sk_buff *skb,
3399 netdev_features_t features,
3400 __be16 type)
3401{
3402 if (eth_p_mpls(type))
3403 features &= skb->dev->mpls_features;
3404
3405 return features;
3406}
3407#else
3408static netdev_features_t net_mpls_features(struct sk_buff *skb,
3409 netdev_features_t features,
3410 __be16 type)
3411{
3412 return features;
3413}
3414#endif
3415
3416static netdev_features_t harmonize_features(struct sk_buff *skb,
3417 netdev_features_t features)
3418{
3419 __be16 type;
3420
3421 type = skb_network_protocol(skb, NULL);
3422 features = net_mpls_features(skb, features, type);
3423
3424 if (skb->ip_summed != CHECKSUM_NONE &&
3425 !can_checksum_protocol(features, type)) {
3426 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3427 }
3428 if (illegal_highdma(skb->dev, skb))
3429 features &= ~NETIF_F_SG;
3430
3431 return features;
3432}
3433
3434netdev_features_t passthru_features_check(struct sk_buff *skb,
3435 struct net_device *dev,
3436 netdev_features_t features)
3437{
3438 return features;
3439}
3440EXPORT_SYMBOL(passthru_features_check);
3441
3442static netdev_features_t dflt_features_check(struct sk_buff *skb,
3443 struct net_device *dev,
3444 netdev_features_t features)
3445{
3446 return vlan_features_check(skb, features);
3447}
3448
3449static netdev_features_t gso_features_check(const struct sk_buff *skb,
3450 struct net_device *dev,
3451 netdev_features_t features)
3452{
3453 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3454
3455 if (gso_segs > READ_ONCE(dev->gso_max_segs))
3456 return features & ~NETIF_F_GSO_MASK;
3457
3458 if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
3459 return features & ~NETIF_F_GSO_MASK;
3460
3461 if (!skb_shinfo(skb)->gso_type) {
3462 skb_warn_bad_offload(skb);
3463 return features & ~NETIF_F_GSO_MASK;
3464 }
3465
3466 /* Support for GSO partial features requires software
3467 * intervention before we can actually process the packets
3468 * so we need to strip support for any partial features now
3469 * and we can pull them back in after we have partially
3470 * segmented the frame.
3471 */
3472 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3473 features &= ~dev->gso_partial_features;
3474
3475 /* Make sure to clear the IPv4 ID mangling feature if the
3476 * IPv4 header has the potential to be fragmented.
3477 */
3478 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3479 struct iphdr *iph = skb->encapsulation ?
3480 inner_ip_hdr(skb) : ip_hdr(skb);
3481
3482 if (!(iph->frag_off & htons(IP_DF)))
3483 features &= ~NETIF_F_TSO_MANGLEID;
3484 }
3485
3486 return features;
3487}
3488
3489netdev_features_t netif_skb_features(struct sk_buff *skb)
3490{
3491 struct net_device *dev = skb->dev;
3492 netdev_features_t features = dev->features;
3493
3494 if (skb_is_gso(skb))
3495 features = gso_features_check(skb, dev, features);
3496
3497 /* If encapsulation offload request, verify we are testing
3498 * hardware encapsulation features instead of standard
3499 * features for the netdev
3500 */
3501 if (skb->encapsulation)
3502 features &= dev->hw_enc_features;
3503
3504 if (skb_vlan_tagged(skb))
3505 features = netdev_intersect_features(features,
3506 dev->vlan_features |
3507 NETIF_F_HW_VLAN_CTAG_TX |
3508 NETIF_F_HW_VLAN_STAG_TX);
3509
3510 if (dev->netdev_ops->ndo_features_check)
3511 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3512 features);
3513 else
3514 features &= dflt_features_check(skb, dev, features);
3515
3516 return harmonize_features(skb, features);
3517}
3518EXPORT_SYMBOL(netif_skb_features);
3519
3520static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3521 struct netdev_queue *txq, bool more)
3522{
3523 unsigned int len;
3524 int rc;
3525
3526 if (dev_nit_active(dev))
3527 dev_queue_xmit_nit(skb, dev);
3528
3529 len = skb->len;
3530 trace_net_dev_start_xmit(skb, dev);
3531 rc = netdev_start_xmit(skb, dev, txq, more);
3532 trace_net_dev_xmit(skb, rc, dev, len);
3533
3534 return rc;
3535}
3536
3537struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3538 struct netdev_queue *txq, int *ret)
3539{
3540 struct sk_buff *skb = first;
3541 int rc = NETDEV_TX_OK;
3542
3543 while (skb) {
3544 struct sk_buff *next = skb->next;
3545
3546 skb_mark_not_on_list(skb);
3547 rc = xmit_one(skb, dev, txq, next != NULL);
3548 if (unlikely(!dev_xmit_complete(rc))) {
3549 skb->next = next;
3550 goto out;
3551 }
3552
3553 skb = next;
3554 if (netif_tx_queue_stopped(txq) && skb) {
3555 rc = NETDEV_TX_BUSY;
3556 break;
3557 }
3558 }
3559
3560out:
3561 *ret = rc;
3562 return skb;
3563}
3564
3565static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3566 netdev_features_t features)
3567{
3568 if (skb_vlan_tag_present(skb) &&
3569 !vlan_hw_offload_capable(features, skb->vlan_proto))
3570 skb = __vlan_hwaccel_push_inside(skb);
3571 return skb;
3572}
3573
3574int skb_csum_hwoffload_help(struct sk_buff *skb,
3575 const netdev_features_t features)
3576{
3577 if (unlikely(skb_csum_is_sctp(skb)))
3578 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3579 skb_crc32c_csum_help(skb);
3580
3581 if (features & NETIF_F_HW_CSUM)
3582 return 0;
3583
3584 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3585 switch (skb->csum_offset) {
3586 case offsetof(struct tcphdr, check):
3587 case offsetof(struct udphdr, check):
3588 return 0;
3589 }
3590 }
3591
3592 return skb_checksum_help(skb);
3593}
3594EXPORT_SYMBOL(skb_csum_hwoffload_help);
3595
3596static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3597{
3598 netdev_features_t features;
3599
3600 features = netif_skb_features(skb);
3601 skb = validate_xmit_vlan(skb, features);
3602 if (unlikely(!skb))
3603 goto out_null;
3604
3605 skb = sk_validate_xmit_skb(skb, dev);
3606 if (unlikely(!skb))
3607 goto out_null;
3608
3609 if (netif_needs_gso(skb, features)) {
3610 struct sk_buff *segs;
3611
3612 segs = skb_gso_segment(skb, features);
3613 if (IS_ERR(segs)) {
3614 goto out_kfree_skb;
3615 } else if (segs) {
3616 consume_skb(skb);
3617 skb = segs;
3618 }
3619 } else {
3620 if (skb_needs_linearize(skb, features) &&
3621 __skb_linearize(skb))
3622 goto out_kfree_skb;
3623
3624 /* If packet is not checksummed and device does not
3625 * support checksumming for this protocol, complete
3626 * checksumming here.
3627 */
3628 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3629 if (skb->encapsulation)
3630 skb_set_inner_transport_header(skb,
3631 skb_checksum_start_offset(skb));
3632 else
3633 skb_set_transport_header(skb,
3634 skb_checksum_start_offset(skb));
3635 if (skb_csum_hwoffload_help(skb, features))
3636 goto out_kfree_skb;
3637 }
3638 }
3639
3640 skb = validate_xmit_xfrm(skb, features, again);
3641
3642 return skb;
3643
3644out_kfree_skb:
3645 kfree_skb(skb);
3646out_null:
3647 dev_core_stats_tx_dropped_inc(dev);
3648 return NULL;
3649}
3650
3651struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3652{
3653 struct sk_buff *next, *head = NULL, *tail;
3654
3655 for (; skb != NULL; skb = next) {
3656 next = skb->next;
3657 skb_mark_not_on_list(skb);
3658
3659 /* in case skb wont be segmented, point to itself */
3660 skb->prev = skb;
3661
3662 skb = validate_xmit_skb(skb, dev, again);
3663 if (!skb)
3664 continue;
3665
3666 if (!head)
3667 head = skb;
3668 else
3669 tail->next = skb;
3670 /* If skb was segmented, skb->prev points to
3671 * the last segment. If not, it still contains skb.
3672 */
3673 tail = skb->prev;
3674 }
3675 return head;
3676}
3677EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3678
3679static void qdisc_pkt_len_init(struct sk_buff *skb)
3680{
3681 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3682
3683 qdisc_skb_cb(skb)->pkt_len = skb->len;
3684
3685 /* To get more precise estimation of bytes sent on wire,
3686 * we add to pkt_len the headers size of all segments
3687 */
3688 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3689 u16 gso_segs = shinfo->gso_segs;
3690 unsigned int hdr_len;
3691
3692 /* mac layer + network layer */
3693 hdr_len = skb_transport_offset(skb);
3694
3695 /* + transport layer */
3696 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3697 const struct tcphdr *th;
3698 struct tcphdr _tcphdr;
3699
3700 th = skb_header_pointer(skb, hdr_len,
3701 sizeof(_tcphdr), &_tcphdr);
3702 if (likely(th))
3703 hdr_len += __tcp_hdrlen(th);
3704 } else {
3705 struct udphdr _udphdr;
3706
3707 if (skb_header_pointer(skb, hdr_len,
3708 sizeof(_udphdr), &_udphdr))
3709 hdr_len += sizeof(struct udphdr);
3710 }
3711
3712 if (shinfo->gso_type & SKB_GSO_DODGY)
3713 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3714 shinfo->gso_size);
3715
3716 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3717 }
3718}
3719
3720static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3721 struct sk_buff **to_free,
3722 struct netdev_queue *txq)
3723{
3724 int rc;
3725
3726 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3727 if (rc == NET_XMIT_SUCCESS)
3728 trace_qdisc_enqueue(q, txq, skb);
3729 return rc;
3730}
3731
3732static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3733 struct net_device *dev,
3734 struct netdev_queue *txq)
3735{
3736 spinlock_t *root_lock = qdisc_lock(q);
3737 struct sk_buff *to_free = NULL;
3738 bool contended;
3739 int rc;
3740
3741 qdisc_calculate_pkt_len(skb, q);
3742
3743 tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP);
3744
3745 if (q->flags & TCQ_F_NOLOCK) {
3746 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3747 qdisc_run_begin(q)) {
3748 /* Retest nolock_qdisc_is_empty() within the protection
3749 * of q->seqlock to protect from racing with requeuing.
3750 */
3751 if (unlikely(!nolock_qdisc_is_empty(q))) {
3752 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3753 __qdisc_run(q);
3754 qdisc_run_end(q);
3755
3756 goto no_lock_out;
3757 }
3758
3759 qdisc_bstats_cpu_update(q, skb);
3760 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3761 !nolock_qdisc_is_empty(q))
3762 __qdisc_run(q);
3763
3764 qdisc_run_end(q);
3765 return NET_XMIT_SUCCESS;
3766 }
3767
3768 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3769 qdisc_run(q);
3770
3771no_lock_out:
3772 if (unlikely(to_free))
3773 kfree_skb_list_reason(to_free,
3774 tcf_get_drop_reason(to_free));
3775 return rc;
3776 }
3777
3778 /*
3779 * Heuristic to force contended enqueues to serialize on a
3780 * separate lock before trying to get qdisc main lock.
3781 * This permits qdisc->running owner to get the lock more
3782 * often and dequeue packets faster.
3783 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3784 * and then other tasks will only enqueue packets. The packets will be
3785 * sent after the qdisc owner is scheduled again. To prevent this
3786 * scenario the task always serialize on the lock.
3787 */
3788 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3789 if (unlikely(contended))
3790 spin_lock(&q->busylock);
3791
3792 spin_lock(root_lock);
3793 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3794 __qdisc_drop(skb, &to_free);
3795 rc = NET_XMIT_DROP;
3796 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3797 qdisc_run_begin(q)) {
3798 /*
3799 * This is a work-conserving queue; there are no old skbs
3800 * waiting to be sent out; and the qdisc is not running -
3801 * xmit the skb directly.
3802 */
3803
3804 qdisc_bstats_update(q, skb);
3805
3806 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3807 if (unlikely(contended)) {
3808 spin_unlock(&q->busylock);
3809 contended = false;
3810 }
3811 __qdisc_run(q);
3812 }
3813
3814 qdisc_run_end(q);
3815 rc = NET_XMIT_SUCCESS;
3816 } else {
3817 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3818 if (qdisc_run_begin(q)) {
3819 if (unlikely(contended)) {
3820 spin_unlock(&q->busylock);
3821 contended = false;
3822 }
3823 __qdisc_run(q);
3824 qdisc_run_end(q);
3825 }
3826 }
3827 spin_unlock(root_lock);
3828 if (unlikely(to_free))
3829 kfree_skb_list_reason(to_free,
3830 tcf_get_drop_reason(to_free));
3831 if (unlikely(contended))
3832 spin_unlock(&q->busylock);
3833 return rc;
3834}
3835
3836#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3837static void skb_update_prio(struct sk_buff *skb)
3838{
3839 const struct netprio_map *map;
3840 const struct sock *sk;
3841 unsigned int prioidx;
3842
3843 if (skb->priority)
3844 return;
3845 map = rcu_dereference_bh(skb->dev->priomap);
3846 if (!map)
3847 return;
3848 sk = skb_to_full_sk(skb);
3849 if (!sk)
3850 return;
3851
3852 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3853
3854 if (prioidx < map->priomap_len)
3855 skb->priority = map->priomap[prioidx];
3856}
3857#else
3858#define skb_update_prio(skb)
3859#endif
3860
3861/**
3862 * dev_loopback_xmit - loop back @skb
3863 * @net: network namespace this loopback is happening in
3864 * @sk: sk needed to be a netfilter okfn
3865 * @skb: buffer to transmit
3866 */
3867int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3868{
3869 skb_reset_mac_header(skb);
3870 __skb_pull(skb, skb_network_offset(skb));
3871 skb->pkt_type = PACKET_LOOPBACK;
3872 if (skb->ip_summed == CHECKSUM_NONE)
3873 skb->ip_summed = CHECKSUM_UNNECESSARY;
3874 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3875 skb_dst_force(skb);
3876 netif_rx(skb);
3877 return 0;
3878}
3879EXPORT_SYMBOL(dev_loopback_xmit);
3880
3881#ifdef CONFIG_NET_EGRESS
3882static struct netdev_queue *
3883netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3884{
3885 int qm = skb_get_queue_mapping(skb);
3886
3887 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3888}
3889
3890static bool netdev_xmit_txqueue_skipped(void)
3891{
3892 return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3893}
3894
3895void netdev_xmit_skip_txqueue(bool skip)
3896{
3897 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3898}
3899EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3900#endif /* CONFIG_NET_EGRESS */
3901
3902#ifdef CONFIG_NET_XGRESS
3903static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
3904 enum skb_drop_reason *drop_reason)
3905{
3906 int ret = TC_ACT_UNSPEC;
3907#ifdef CONFIG_NET_CLS_ACT
3908 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
3909 struct tcf_result res;
3910
3911 if (!miniq)
3912 return ret;
3913
3914 tc_skb_cb(skb)->mru = 0;
3915 tc_skb_cb(skb)->post_ct = false;
3916 tcf_set_drop_reason(skb, *drop_reason);
3917
3918 mini_qdisc_bstats_cpu_update(miniq, skb);
3919 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
3920 /* Only tcf related quirks below. */
3921 switch (ret) {
3922 case TC_ACT_SHOT:
3923 *drop_reason = tcf_get_drop_reason(skb);
3924 mini_qdisc_qstats_cpu_drop(miniq);
3925 break;
3926 case TC_ACT_OK:
3927 case TC_ACT_RECLASSIFY:
3928 skb->tc_index = TC_H_MIN(res.classid);
3929 break;
3930 }
3931#endif /* CONFIG_NET_CLS_ACT */
3932 return ret;
3933}
3934
3935static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
3936
3937void tcx_inc(void)
3938{
3939 static_branch_inc(&tcx_needed_key);
3940}
3941
3942void tcx_dec(void)
3943{
3944 static_branch_dec(&tcx_needed_key);
3945}
3946
3947static __always_inline enum tcx_action_base
3948tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
3949 const bool needs_mac)
3950{
3951 const struct bpf_mprog_fp *fp;
3952 const struct bpf_prog *prog;
3953 int ret = TCX_NEXT;
3954
3955 if (needs_mac)
3956 __skb_push(skb, skb->mac_len);
3957 bpf_mprog_foreach_prog(entry, fp, prog) {
3958 bpf_compute_data_pointers(skb);
3959 ret = bpf_prog_run(prog, skb);
3960 if (ret != TCX_NEXT)
3961 break;
3962 }
3963 if (needs_mac)
3964 __skb_pull(skb, skb->mac_len);
3965 return tcx_action_code(skb, ret);
3966}
3967
3968static __always_inline struct sk_buff *
3969sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3970 struct net_device *orig_dev, bool *another)
3971{
3972 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
3973 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
3974 int sch_ret;
3975
3976 if (!entry)
3977 return skb;
3978 if (*pt_prev) {
3979 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3980 *pt_prev = NULL;
3981 }
3982
3983 qdisc_skb_cb(skb)->pkt_len = skb->len;
3984 tcx_set_ingress(skb, true);
3985
3986 if (static_branch_unlikely(&tcx_needed_key)) {
3987 sch_ret = tcx_run(entry, skb, true);
3988 if (sch_ret != TC_ACT_UNSPEC)
3989 goto ingress_verdict;
3990 }
3991 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
3992ingress_verdict:
3993 switch (sch_ret) {
3994 case TC_ACT_REDIRECT:
3995 /* skb_mac_header check was done by BPF, so we can safely
3996 * push the L2 header back before redirecting to another
3997 * netdev.
3998 */
3999 __skb_push(skb, skb->mac_len);
4000 if (skb_do_redirect(skb) == -EAGAIN) {
4001 __skb_pull(skb, skb->mac_len);
4002 *another = true;
4003 break;
4004 }
4005 *ret = NET_RX_SUCCESS;
4006 return NULL;
4007 case TC_ACT_SHOT:
4008 kfree_skb_reason(skb, drop_reason);
4009 *ret = NET_RX_DROP;
4010 return NULL;
4011 /* used by tc_run */
4012 case TC_ACT_STOLEN:
4013 case TC_ACT_QUEUED:
4014 case TC_ACT_TRAP:
4015 consume_skb(skb);
4016 fallthrough;
4017 case TC_ACT_CONSUMED:
4018 *ret = NET_RX_SUCCESS;
4019 return NULL;
4020 }
4021
4022 return skb;
4023}
4024
4025static __always_inline struct sk_buff *
4026sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4027{
4028 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4029 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
4030 int sch_ret;
4031
4032 if (!entry)
4033 return skb;
4034
4035 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4036 * already set by the caller.
4037 */
4038 if (static_branch_unlikely(&tcx_needed_key)) {
4039 sch_ret = tcx_run(entry, skb, false);
4040 if (sch_ret != TC_ACT_UNSPEC)
4041 goto egress_verdict;
4042 }
4043 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
4044egress_verdict:
4045 switch (sch_ret) {
4046 case TC_ACT_REDIRECT:
4047 /* No need to push/pop skb's mac_header here on egress! */
4048 skb_do_redirect(skb);
4049 *ret = NET_XMIT_SUCCESS;
4050 return NULL;
4051 case TC_ACT_SHOT:
4052 kfree_skb_reason(skb, drop_reason);
4053 *ret = NET_XMIT_DROP;
4054 return NULL;
4055 /* used by tc_run */
4056 case TC_ACT_STOLEN:
4057 case TC_ACT_QUEUED:
4058 case TC_ACT_TRAP:
4059 consume_skb(skb);
4060 fallthrough;
4061 case TC_ACT_CONSUMED:
4062 *ret = NET_XMIT_SUCCESS;
4063 return NULL;
4064 }
4065
4066 return skb;
4067}
4068#else
4069static __always_inline struct sk_buff *
4070sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4071 struct net_device *orig_dev, bool *another)
4072{
4073 return skb;
4074}
4075
4076static __always_inline struct sk_buff *
4077sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4078{
4079 return skb;
4080}
4081#endif /* CONFIG_NET_XGRESS */
4082
4083#ifdef CONFIG_XPS
4084static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4085 struct xps_dev_maps *dev_maps, unsigned int tci)
4086{
4087 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4088 struct xps_map *map;
4089 int queue_index = -1;
4090
4091 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4092 return queue_index;
4093
4094 tci *= dev_maps->num_tc;
4095 tci += tc;
4096
4097 map = rcu_dereference(dev_maps->attr_map[tci]);
4098 if (map) {
4099 if (map->len == 1)
4100 queue_index = map->queues[0];
4101 else
4102 queue_index = map->queues[reciprocal_scale(
4103 skb_get_hash(skb), map->len)];
4104 if (unlikely(queue_index >= dev->real_num_tx_queues))
4105 queue_index = -1;
4106 }
4107 return queue_index;
4108}
4109#endif
4110
4111static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4112 struct sk_buff *skb)
4113{
4114#ifdef CONFIG_XPS
4115 struct xps_dev_maps *dev_maps;
4116 struct sock *sk = skb->sk;
4117 int queue_index = -1;
4118
4119 if (!static_key_false(&xps_needed))
4120 return -1;
4121
4122 rcu_read_lock();
4123 if (!static_key_false(&xps_rxqs_needed))
4124 goto get_cpus_map;
4125
4126 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4127 if (dev_maps) {
4128 int tci = sk_rx_queue_get(sk);
4129
4130 if (tci >= 0)
4131 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4132 tci);
4133 }
4134
4135get_cpus_map:
4136 if (queue_index < 0) {
4137 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4138 if (dev_maps) {
4139 unsigned int tci = skb->sender_cpu - 1;
4140
4141 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4142 tci);
4143 }
4144 }
4145 rcu_read_unlock();
4146
4147 return queue_index;
4148#else
4149 return -1;
4150#endif
4151}
4152
4153u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4154 struct net_device *sb_dev)
4155{
4156 return 0;
4157}
4158EXPORT_SYMBOL(dev_pick_tx_zero);
4159
4160u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4161 struct net_device *sb_dev)
4162{
4163 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4164}
4165EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4166
4167u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4168 struct net_device *sb_dev)
4169{
4170 struct sock *sk = skb->sk;
4171 int queue_index = sk_tx_queue_get(sk);
4172
4173 sb_dev = sb_dev ? : dev;
4174
4175 if (queue_index < 0 || skb->ooo_okay ||
4176 queue_index >= dev->real_num_tx_queues) {
4177 int new_index = get_xps_queue(dev, sb_dev, skb);
4178
4179 if (new_index < 0)
4180 new_index = skb_tx_hash(dev, sb_dev, skb);
4181
4182 if (queue_index != new_index && sk &&
4183 sk_fullsock(sk) &&
4184 rcu_access_pointer(sk->sk_dst_cache))
4185 sk_tx_queue_set(sk, new_index);
4186
4187 queue_index = new_index;
4188 }
4189
4190 return queue_index;
4191}
4192EXPORT_SYMBOL(netdev_pick_tx);
4193
4194struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4195 struct sk_buff *skb,
4196 struct net_device *sb_dev)
4197{
4198 int queue_index = 0;
4199
4200#ifdef CONFIG_XPS
4201 u32 sender_cpu = skb->sender_cpu - 1;
4202
4203 if (sender_cpu >= (u32)NR_CPUS)
4204 skb->sender_cpu = raw_smp_processor_id() + 1;
4205#endif
4206
4207 if (dev->real_num_tx_queues != 1) {
4208 const struct net_device_ops *ops = dev->netdev_ops;
4209
4210 if (ops->ndo_select_queue)
4211 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4212 else
4213 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4214
4215 queue_index = netdev_cap_txqueue(dev, queue_index);
4216 }
4217
4218 skb_set_queue_mapping(skb, queue_index);
4219 return netdev_get_tx_queue(dev, queue_index);
4220}
4221
4222/**
4223 * __dev_queue_xmit() - transmit a buffer
4224 * @skb: buffer to transmit
4225 * @sb_dev: suboordinate device used for L2 forwarding offload
4226 *
4227 * Queue a buffer for transmission to a network device. The caller must
4228 * have set the device and priority and built the buffer before calling
4229 * this function. The function can be called from an interrupt.
4230 *
4231 * When calling this method, interrupts MUST be enabled. This is because
4232 * the BH enable code must have IRQs enabled so that it will not deadlock.
4233 *
4234 * Regardless of the return value, the skb is consumed, so it is currently
4235 * difficult to retry a send to this method. (You can bump the ref count
4236 * before sending to hold a reference for retry if you are careful.)
4237 *
4238 * Return:
4239 * * 0 - buffer successfully transmitted
4240 * * positive qdisc return code - NET_XMIT_DROP etc.
4241 * * negative errno - other errors
4242 */
4243int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4244{
4245 struct net_device *dev = skb->dev;
4246 struct netdev_queue *txq = NULL;
4247 struct Qdisc *q;
4248 int rc = -ENOMEM;
4249 bool again = false;
4250
4251 skb_reset_mac_header(skb);
4252 skb_assert_len(skb);
4253
4254 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4255 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4256
4257 /* Disable soft irqs for various locks below. Also
4258 * stops preemption for RCU.
4259 */
4260 rcu_read_lock_bh();
4261
4262 skb_update_prio(skb);
4263
4264 qdisc_pkt_len_init(skb);
4265 tcx_set_ingress(skb, false);
4266#ifdef CONFIG_NET_EGRESS
4267 if (static_branch_unlikely(&egress_needed_key)) {
4268 if (nf_hook_egress_active()) {
4269 skb = nf_hook_egress(skb, &rc, dev);
4270 if (!skb)
4271 goto out;
4272 }
4273
4274 netdev_xmit_skip_txqueue(false);
4275
4276 nf_skip_egress(skb, true);
4277 skb = sch_handle_egress(skb, &rc, dev);
4278 if (!skb)
4279 goto out;
4280 nf_skip_egress(skb, false);
4281
4282 if (netdev_xmit_txqueue_skipped())
4283 txq = netdev_tx_queue_mapping(dev, skb);
4284 }
4285#endif
4286 /* If device/qdisc don't need skb->dst, release it right now while
4287 * its hot in this cpu cache.
4288 */
4289 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4290 skb_dst_drop(skb);
4291 else
4292 skb_dst_force(skb);
4293
4294 if (!txq)
4295 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4296
4297 q = rcu_dereference_bh(txq->qdisc);
4298
4299 trace_net_dev_queue(skb);
4300 if (q->enqueue) {
4301 rc = __dev_xmit_skb(skb, q, dev, txq);
4302 goto out;
4303 }
4304
4305 /* The device has no queue. Common case for software devices:
4306 * loopback, all the sorts of tunnels...
4307
4308 * Really, it is unlikely that netif_tx_lock protection is necessary
4309 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4310 * counters.)
4311 * However, it is possible, that they rely on protection
4312 * made by us here.
4313
4314 * Check this and shot the lock. It is not prone from deadlocks.
4315 *Either shot noqueue qdisc, it is even simpler 8)
4316 */
4317 if (dev->flags & IFF_UP) {
4318 int cpu = smp_processor_id(); /* ok because BHs are off */
4319
4320 /* Other cpus might concurrently change txq->xmit_lock_owner
4321 * to -1 or to their cpu id, but not to our id.
4322 */
4323 if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4324 if (dev_xmit_recursion())
4325 goto recursion_alert;
4326
4327 skb = validate_xmit_skb(skb, dev, &again);
4328 if (!skb)
4329 goto out;
4330
4331 HARD_TX_LOCK(dev, txq, cpu);
4332
4333 if (!netif_xmit_stopped(txq)) {
4334 dev_xmit_recursion_inc();
4335 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4336 dev_xmit_recursion_dec();
4337 if (dev_xmit_complete(rc)) {
4338 HARD_TX_UNLOCK(dev, txq);
4339 goto out;
4340 }
4341 }
4342 HARD_TX_UNLOCK(dev, txq);
4343 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4344 dev->name);
4345 } else {
4346 /* Recursion is detected! It is possible,
4347 * unfortunately
4348 */
4349recursion_alert:
4350 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4351 dev->name);
4352 }
4353 }
4354
4355 rc = -ENETDOWN;
4356 rcu_read_unlock_bh();
4357
4358 dev_core_stats_tx_dropped_inc(dev);
4359 kfree_skb_list(skb);
4360 return rc;
4361out:
4362 rcu_read_unlock_bh();
4363 return rc;
4364}
4365EXPORT_SYMBOL(__dev_queue_xmit);
4366
4367int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4368{
4369 struct net_device *dev = skb->dev;
4370 struct sk_buff *orig_skb = skb;
4371 struct netdev_queue *txq;
4372 int ret = NETDEV_TX_BUSY;
4373 bool again = false;
4374
4375 if (unlikely(!netif_running(dev) ||
4376 !netif_carrier_ok(dev)))
4377 goto drop;
4378
4379 skb = validate_xmit_skb_list(skb, dev, &again);
4380 if (skb != orig_skb)
4381 goto drop;
4382
4383 skb_set_queue_mapping(skb, queue_id);
4384 txq = skb_get_tx_queue(dev, skb);
4385
4386 local_bh_disable();
4387
4388 dev_xmit_recursion_inc();
4389 HARD_TX_LOCK(dev, txq, smp_processor_id());
4390 if (!netif_xmit_frozen_or_drv_stopped(txq))
4391 ret = netdev_start_xmit(skb, dev, txq, false);
4392 HARD_TX_UNLOCK(dev, txq);
4393 dev_xmit_recursion_dec();
4394
4395 local_bh_enable();
4396 return ret;
4397drop:
4398 dev_core_stats_tx_dropped_inc(dev);
4399 kfree_skb_list(skb);
4400 return NET_XMIT_DROP;
4401}
4402EXPORT_SYMBOL(__dev_direct_xmit);
4403
4404/*************************************************************************
4405 * Receiver routines
4406 *************************************************************************/
4407
4408unsigned int sysctl_skb_defer_max __read_mostly = 64;
4409int weight_p __read_mostly = 64; /* old backlog weight */
4410int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
4411int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
4412
4413/* Called with irq disabled */
4414static inline void ____napi_schedule(struct softnet_data *sd,
4415 struct napi_struct *napi)
4416{
4417 struct task_struct *thread;
4418
4419 lockdep_assert_irqs_disabled();
4420
4421 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4422 /* Paired with smp_mb__before_atomic() in
4423 * napi_enable()/dev_set_threaded().
4424 * Use READ_ONCE() to guarantee a complete
4425 * read on napi->thread. Only call
4426 * wake_up_process() when it's not NULL.
4427 */
4428 thread = READ_ONCE(napi->thread);
4429 if (thread) {
4430 /* Avoid doing set_bit() if the thread is in
4431 * INTERRUPTIBLE state, cause napi_thread_wait()
4432 * makes sure to proceed with napi polling
4433 * if the thread is explicitly woken from here.
4434 */
4435 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4436 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4437 wake_up_process(thread);
4438 return;
4439 }
4440 }
4441
4442 list_add_tail(&napi->poll_list, &sd->poll_list);
4443 WRITE_ONCE(napi->list_owner, smp_processor_id());
4444 /* If not called from net_rx_action()
4445 * we have to raise NET_RX_SOFTIRQ.
4446 */
4447 if (!sd->in_net_rx_action)
4448 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4449}
4450
4451#ifdef CONFIG_RPS
4452
4453struct static_key_false rps_needed __read_mostly;
4454EXPORT_SYMBOL(rps_needed);
4455struct static_key_false rfs_needed __read_mostly;
4456EXPORT_SYMBOL(rfs_needed);
4457
4458static struct rps_dev_flow *
4459set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4460 struct rps_dev_flow *rflow, u16 next_cpu)
4461{
4462 if (next_cpu < nr_cpu_ids) {
4463#ifdef CONFIG_RFS_ACCEL
4464 struct netdev_rx_queue *rxqueue;
4465 struct rps_dev_flow_table *flow_table;
4466 struct rps_dev_flow *old_rflow;
4467 u32 flow_id;
4468 u16 rxq_index;
4469 int rc;
4470
4471 /* Should we steer this flow to a different hardware queue? */
4472 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4473 !(dev->features & NETIF_F_NTUPLE))
4474 goto out;
4475 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4476 if (rxq_index == skb_get_rx_queue(skb))
4477 goto out;
4478
4479 rxqueue = dev->_rx + rxq_index;
4480 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4481 if (!flow_table)
4482 goto out;
4483 flow_id = skb_get_hash(skb) & flow_table->mask;
4484 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4485 rxq_index, flow_id);
4486 if (rc < 0)
4487 goto out;
4488 old_rflow = rflow;
4489 rflow = &flow_table->flows[flow_id];
4490 rflow->filter = rc;
4491 if (old_rflow->filter == rflow->filter)
4492 old_rflow->filter = RPS_NO_FILTER;
4493 out:
4494#endif
4495 rflow->last_qtail =
4496 per_cpu(softnet_data, next_cpu).input_queue_head;
4497 }
4498
4499 rflow->cpu = next_cpu;
4500 return rflow;
4501}
4502
4503/*
4504 * get_rps_cpu is called from netif_receive_skb and returns the target
4505 * CPU from the RPS map of the receiving queue for a given skb.
4506 * rcu_read_lock must be held on entry.
4507 */
4508static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4509 struct rps_dev_flow **rflowp)
4510{
4511 const struct rps_sock_flow_table *sock_flow_table;
4512 struct netdev_rx_queue *rxqueue = dev->_rx;
4513 struct rps_dev_flow_table *flow_table;
4514 struct rps_map *map;
4515 int cpu = -1;
4516 u32 tcpu;
4517 u32 hash;
4518
4519 if (skb_rx_queue_recorded(skb)) {
4520 u16 index = skb_get_rx_queue(skb);
4521
4522 if (unlikely(index >= dev->real_num_rx_queues)) {
4523 WARN_ONCE(dev->real_num_rx_queues > 1,
4524 "%s received packet on queue %u, but number "
4525 "of RX queues is %u\n",
4526 dev->name, index, dev->real_num_rx_queues);
4527 goto done;
4528 }
4529 rxqueue += index;
4530 }
4531
4532 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4533
4534 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4535 map = rcu_dereference(rxqueue->rps_map);
4536 if (!flow_table && !map)
4537 goto done;
4538
4539 skb_reset_network_header(skb);
4540 hash = skb_get_hash(skb);
4541 if (!hash)
4542 goto done;
4543
4544 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
4545 if (flow_table && sock_flow_table) {
4546 struct rps_dev_flow *rflow;
4547 u32 next_cpu;
4548 u32 ident;
4549
4550 /* First check into global flow table if there is a match.
4551 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4552 */
4553 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4554 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
4555 goto try_rps;
4556
4557 next_cpu = ident & net_hotdata.rps_cpu_mask;
4558
4559 /* OK, now we know there is a match,
4560 * we can look at the local (per receive queue) flow table
4561 */
4562 rflow = &flow_table->flows[hash & flow_table->mask];
4563 tcpu = rflow->cpu;
4564
4565 /*
4566 * If the desired CPU (where last recvmsg was done) is
4567 * different from current CPU (one in the rx-queue flow
4568 * table entry), switch if one of the following holds:
4569 * - Current CPU is unset (>= nr_cpu_ids).
4570 * - Current CPU is offline.
4571 * - The current CPU's queue tail has advanced beyond the
4572 * last packet that was enqueued using this table entry.
4573 * This guarantees that all previous packets for the flow
4574 * have been dequeued, thus preserving in order delivery.
4575 */
4576 if (unlikely(tcpu != next_cpu) &&
4577 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4578 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4579 rflow->last_qtail)) >= 0)) {
4580 tcpu = next_cpu;
4581 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4582 }
4583
4584 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4585 *rflowp = rflow;
4586 cpu = tcpu;
4587 goto done;
4588 }
4589 }
4590
4591try_rps:
4592
4593 if (map) {
4594 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4595 if (cpu_online(tcpu)) {
4596 cpu = tcpu;
4597 goto done;
4598 }
4599 }
4600
4601done:
4602 return cpu;
4603}
4604
4605#ifdef CONFIG_RFS_ACCEL
4606
4607/**
4608 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4609 * @dev: Device on which the filter was set
4610 * @rxq_index: RX queue index
4611 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4612 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4613 *
4614 * Drivers that implement ndo_rx_flow_steer() should periodically call
4615 * this function for each installed filter and remove the filters for
4616 * which it returns %true.
4617 */
4618bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4619 u32 flow_id, u16 filter_id)
4620{
4621 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4622 struct rps_dev_flow_table *flow_table;
4623 struct rps_dev_flow *rflow;
4624 bool expire = true;
4625 unsigned int cpu;
4626
4627 rcu_read_lock();
4628 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4629 if (flow_table && flow_id <= flow_table->mask) {
4630 rflow = &flow_table->flows[flow_id];
4631 cpu = READ_ONCE(rflow->cpu);
4632 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4633 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4634 rflow->last_qtail) <
4635 (int)(10 * flow_table->mask)))
4636 expire = false;
4637 }
4638 rcu_read_unlock();
4639 return expire;
4640}
4641EXPORT_SYMBOL(rps_may_expire_flow);
4642
4643#endif /* CONFIG_RFS_ACCEL */
4644
4645/* Called from hardirq (IPI) context */
4646static void rps_trigger_softirq(void *data)
4647{
4648 struct softnet_data *sd = data;
4649
4650 ____napi_schedule(sd, &sd->backlog);
4651 sd->received_rps++;
4652}
4653
4654#endif /* CONFIG_RPS */
4655
4656/* Called from hardirq (IPI) context */
4657static void trigger_rx_softirq(void *data)
4658{
4659 struct softnet_data *sd = data;
4660
4661 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4662 smp_store_release(&sd->defer_ipi_scheduled, 0);
4663}
4664
4665/*
4666 * After we queued a packet into sd->input_pkt_queue,
4667 * we need to make sure this queue is serviced soon.
4668 *
4669 * - If this is another cpu queue, link it to our rps_ipi_list,
4670 * and make sure we will process rps_ipi_list from net_rx_action().
4671 *
4672 * - If this is our own queue, NAPI schedule our backlog.
4673 * Note that this also raises NET_RX_SOFTIRQ.
4674 */
4675static void napi_schedule_rps(struct softnet_data *sd)
4676{
4677 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4678
4679#ifdef CONFIG_RPS
4680 if (sd != mysd) {
4681 sd->rps_ipi_next = mysd->rps_ipi_list;
4682 mysd->rps_ipi_list = sd;
4683
4684 /* If not called from net_rx_action() or napi_threaded_poll()
4685 * we have to raise NET_RX_SOFTIRQ.
4686 */
4687 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
4688 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4689 return;
4690 }
4691#endif /* CONFIG_RPS */
4692 __napi_schedule_irqoff(&mysd->backlog);
4693}
4694
4695#ifdef CONFIG_NET_FLOW_LIMIT
4696int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4697#endif
4698
4699static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4700{
4701#ifdef CONFIG_NET_FLOW_LIMIT
4702 struct sd_flow_limit *fl;
4703 struct softnet_data *sd;
4704 unsigned int old_flow, new_flow;
4705
4706 if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
4707 return false;
4708
4709 sd = this_cpu_ptr(&softnet_data);
4710
4711 rcu_read_lock();
4712 fl = rcu_dereference(sd->flow_limit);
4713 if (fl) {
4714 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4715 old_flow = fl->history[fl->history_head];
4716 fl->history[fl->history_head] = new_flow;
4717
4718 fl->history_head++;
4719 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4720
4721 if (likely(fl->buckets[old_flow]))
4722 fl->buckets[old_flow]--;
4723
4724 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4725 fl->count++;
4726 rcu_read_unlock();
4727 return true;
4728 }
4729 }
4730 rcu_read_unlock();
4731#endif
4732 return false;
4733}
4734
4735/*
4736 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4737 * queue (may be a remote CPU queue).
4738 */
4739static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4740 unsigned int *qtail)
4741{
4742 enum skb_drop_reason reason;
4743 struct softnet_data *sd;
4744 unsigned long flags;
4745 unsigned int qlen;
4746
4747 reason = SKB_DROP_REASON_NOT_SPECIFIED;
4748 sd = &per_cpu(softnet_data, cpu);
4749
4750 rps_lock_irqsave(sd, &flags);
4751 if (!netif_running(skb->dev))
4752 goto drop;
4753 qlen = skb_queue_len(&sd->input_pkt_queue);
4754 if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
4755 !skb_flow_limit(skb, qlen)) {
4756 if (qlen) {
4757enqueue:
4758 __skb_queue_tail(&sd->input_pkt_queue, skb);
4759 input_queue_tail_incr_save(sd, qtail);
4760 rps_unlock_irq_restore(sd, &flags);
4761 return NET_RX_SUCCESS;
4762 }
4763
4764 /* Schedule NAPI for backlog device
4765 * We can use non atomic operation since we own the queue lock
4766 */
4767 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4768 napi_schedule_rps(sd);
4769 goto enqueue;
4770 }
4771 reason = SKB_DROP_REASON_CPU_BACKLOG;
4772
4773drop:
4774 sd->dropped++;
4775 rps_unlock_irq_restore(sd, &flags);
4776
4777 dev_core_stats_rx_dropped_inc(skb->dev);
4778 kfree_skb_reason(skb, reason);
4779 return NET_RX_DROP;
4780}
4781
4782static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4783{
4784 struct net_device *dev = skb->dev;
4785 struct netdev_rx_queue *rxqueue;
4786
4787 rxqueue = dev->_rx;
4788
4789 if (skb_rx_queue_recorded(skb)) {
4790 u16 index = skb_get_rx_queue(skb);
4791
4792 if (unlikely(index >= dev->real_num_rx_queues)) {
4793 WARN_ONCE(dev->real_num_rx_queues > 1,
4794 "%s received packet on queue %u, but number "
4795 "of RX queues is %u\n",
4796 dev->name, index, dev->real_num_rx_queues);
4797
4798 return rxqueue; /* Return first rxqueue */
4799 }
4800 rxqueue += index;
4801 }
4802 return rxqueue;
4803}
4804
4805u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4806 struct bpf_prog *xdp_prog)
4807{
4808 void *orig_data, *orig_data_end, *hard_start;
4809 struct netdev_rx_queue *rxqueue;
4810 bool orig_bcast, orig_host;
4811 u32 mac_len, frame_sz;
4812 __be16 orig_eth_type;
4813 struct ethhdr *eth;
4814 u32 metalen, act;
4815 int off;
4816
4817 /* The XDP program wants to see the packet starting at the MAC
4818 * header.
4819 */
4820 mac_len = skb->data - skb_mac_header(skb);
4821 hard_start = skb->data - skb_headroom(skb);
4822
4823 /* SKB "head" area always have tailroom for skb_shared_info */
4824 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4825 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4826
4827 rxqueue = netif_get_rxqueue(skb);
4828 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4829 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4830 skb_headlen(skb) + mac_len, true);
4831 if (skb_is_nonlinear(skb)) {
4832 skb_shinfo(skb)->xdp_frags_size = skb->data_len;
4833 xdp_buff_set_frags_flag(xdp);
4834 } else {
4835 xdp_buff_clear_frags_flag(xdp);
4836 }
4837
4838 orig_data_end = xdp->data_end;
4839 orig_data = xdp->data;
4840 eth = (struct ethhdr *)xdp->data;
4841 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4842 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4843 orig_eth_type = eth->h_proto;
4844
4845 act = bpf_prog_run_xdp(xdp_prog, xdp);
4846
4847 /* check if bpf_xdp_adjust_head was used */
4848 off = xdp->data - orig_data;
4849 if (off) {
4850 if (off > 0)
4851 __skb_pull(skb, off);
4852 else if (off < 0)
4853 __skb_push(skb, -off);
4854
4855 skb->mac_header += off;
4856 skb_reset_network_header(skb);
4857 }
4858
4859 /* check if bpf_xdp_adjust_tail was used */
4860 off = xdp->data_end - orig_data_end;
4861 if (off != 0) {
4862 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4863 skb->len += off; /* positive on grow, negative on shrink */
4864 }
4865
4866 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
4867 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
4868 */
4869 if (xdp_buff_has_frags(xdp))
4870 skb->data_len = skb_shinfo(skb)->xdp_frags_size;
4871 else
4872 skb->data_len = 0;
4873
4874 /* check if XDP changed eth hdr such SKB needs update */
4875 eth = (struct ethhdr *)xdp->data;
4876 if ((orig_eth_type != eth->h_proto) ||
4877 (orig_host != ether_addr_equal_64bits(eth->h_dest,
4878 skb->dev->dev_addr)) ||
4879 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4880 __skb_push(skb, ETH_HLEN);
4881 skb->pkt_type = PACKET_HOST;
4882 skb->protocol = eth_type_trans(skb, skb->dev);
4883 }
4884
4885 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4886 * before calling us again on redirect path. We do not call do_redirect
4887 * as we leave that up to the caller.
4888 *
4889 * Caller is responsible for managing lifetime of skb (i.e. calling
4890 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4891 */
4892 switch (act) {
4893 case XDP_REDIRECT:
4894 case XDP_TX:
4895 __skb_push(skb, mac_len);
4896 break;
4897 case XDP_PASS:
4898 metalen = xdp->data - xdp->data_meta;
4899 if (metalen)
4900 skb_metadata_set(skb, metalen);
4901 break;
4902 }
4903
4904 return act;
4905}
4906
4907static int
4908netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
4909{
4910 struct sk_buff *skb = *pskb;
4911 int err, hroom, troom;
4912
4913 if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
4914 return 0;
4915
4916 /* In case we have to go down the path and also linearize,
4917 * then lets do the pskb_expand_head() work just once here.
4918 */
4919 hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4920 troom = skb->tail + skb->data_len - skb->end;
4921 err = pskb_expand_head(skb,
4922 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4923 troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
4924 if (err)
4925 return err;
4926
4927 return skb_linearize(skb);
4928}
4929
4930static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
4931 struct xdp_buff *xdp,
4932 struct bpf_prog *xdp_prog)
4933{
4934 struct sk_buff *skb = *pskb;
4935 u32 mac_len, act = XDP_DROP;
4936
4937 /* Reinjected packets coming from act_mirred or similar should
4938 * not get XDP generic processing.
4939 */
4940 if (skb_is_redirected(skb))
4941 return XDP_PASS;
4942
4943 /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
4944 * bytes. This is the guarantee that also native XDP provides,
4945 * thus we need to do it here as well.
4946 */
4947 mac_len = skb->data - skb_mac_header(skb);
4948 __skb_push(skb, mac_len);
4949
4950 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4951 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4952 if (netif_skb_check_for_xdp(pskb, xdp_prog))
4953 goto do_drop;
4954 }
4955
4956 __skb_pull(*pskb, mac_len);
4957
4958 act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
4959 switch (act) {
4960 case XDP_REDIRECT:
4961 case XDP_TX:
4962 case XDP_PASS:
4963 break;
4964 default:
4965 bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
4966 fallthrough;
4967 case XDP_ABORTED:
4968 trace_xdp_exception((*pskb)->dev, xdp_prog, act);
4969 fallthrough;
4970 case XDP_DROP:
4971 do_drop:
4972 kfree_skb(*pskb);
4973 break;
4974 }
4975
4976 return act;
4977}
4978
4979/* When doing generic XDP we have to bypass the qdisc layer and the
4980 * network taps in order to match in-driver-XDP behavior. This also means
4981 * that XDP packets are able to starve other packets going through a qdisc,
4982 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
4983 * queues, so they do not have this starvation issue.
4984 */
4985void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4986{
4987 struct net_device *dev = skb->dev;
4988 struct netdev_queue *txq;
4989 bool free_skb = true;
4990 int cpu, rc;
4991
4992 txq = netdev_core_pick_tx(dev, skb, NULL);
4993 cpu = smp_processor_id();
4994 HARD_TX_LOCK(dev, txq, cpu);
4995 if (!netif_xmit_frozen_or_drv_stopped(txq)) {
4996 rc = netdev_start_xmit(skb, dev, txq, 0);
4997 if (dev_xmit_complete(rc))
4998 free_skb = false;
4999 }
5000 HARD_TX_UNLOCK(dev, txq);
5001 if (free_skb) {
5002 trace_xdp_exception(dev, xdp_prog, XDP_TX);
5003 dev_core_stats_tx_dropped_inc(dev);
5004 kfree_skb(skb);
5005 }
5006}
5007
5008static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
5009
5010int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
5011{
5012 if (xdp_prog) {
5013 struct xdp_buff xdp;
5014 u32 act;
5015 int err;
5016
5017 act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
5018 if (act != XDP_PASS) {
5019 switch (act) {
5020 case XDP_REDIRECT:
5021 err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
5022 &xdp, xdp_prog);
5023 if (err)
5024 goto out_redir;
5025 break;
5026 case XDP_TX:
5027 generic_xdp_tx(*pskb, xdp_prog);
5028 break;
5029 }
5030 return XDP_DROP;
5031 }
5032 }
5033 return XDP_PASS;
5034out_redir:
5035 kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
5036 return XDP_DROP;
5037}
5038EXPORT_SYMBOL_GPL(do_xdp_generic);
5039
5040static int netif_rx_internal(struct sk_buff *skb)
5041{
5042 int ret;
5043
5044 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5045
5046 trace_netif_rx(skb);
5047
5048#ifdef CONFIG_RPS
5049 if (static_branch_unlikely(&rps_needed)) {
5050 struct rps_dev_flow voidflow, *rflow = &voidflow;
5051 int cpu;
5052
5053 rcu_read_lock();
5054
5055 cpu = get_rps_cpu(skb->dev, skb, &rflow);
5056 if (cpu < 0)
5057 cpu = smp_processor_id();
5058
5059 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5060
5061 rcu_read_unlock();
5062 } else
5063#endif
5064 {
5065 unsigned int qtail;
5066
5067 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
5068 }
5069 return ret;
5070}
5071
5072/**
5073 * __netif_rx - Slightly optimized version of netif_rx
5074 * @skb: buffer to post
5075 *
5076 * This behaves as netif_rx except that it does not disable bottom halves.
5077 * As a result this function may only be invoked from the interrupt context
5078 * (either hard or soft interrupt).
5079 */
5080int __netif_rx(struct sk_buff *skb)
5081{
5082 int ret;
5083
5084 lockdep_assert_once(hardirq_count() | softirq_count());
5085
5086 trace_netif_rx_entry(skb);
5087 ret = netif_rx_internal(skb);
5088 trace_netif_rx_exit(ret);
5089 return ret;
5090}
5091EXPORT_SYMBOL(__netif_rx);
5092
5093/**
5094 * netif_rx - post buffer to the network code
5095 * @skb: buffer to post
5096 *
5097 * This function receives a packet from a device driver and queues it for
5098 * the upper (protocol) levels to process via the backlog NAPI device. It
5099 * always succeeds. The buffer may be dropped during processing for
5100 * congestion control or by the protocol layers.
5101 * The network buffer is passed via the backlog NAPI device. Modern NIC
5102 * driver should use NAPI and GRO.
5103 * This function can used from interrupt and from process context. The
5104 * caller from process context must not disable interrupts before invoking
5105 * this function.
5106 *
5107 * return values:
5108 * NET_RX_SUCCESS (no congestion)
5109 * NET_RX_DROP (packet was dropped)
5110 *
5111 */
5112int netif_rx(struct sk_buff *skb)
5113{
5114 bool need_bh_off = !(hardirq_count() | softirq_count());
5115 int ret;
5116
5117 if (need_bh_off)
5118 local_bh_disable();
5119 trace_netif_rx_entry(skb);
5120 ret = netif_rx_internal(skb);
5121 trace_netif_rx_exit(ret);
5122 if (need_bh_off)
5123 local_bh_enable();
5124 return ret;
5125}
5126EXPORT_SYMBOL(netif_rx);
5127
5128static __latent_entropy void net_tx_action(struct softirq_action *h)
5129{
5130 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5131
5132 if (sd->completion_queue) {
5133 struct sk_buff *clist;
5134
5135 local_irq_disable();
5136 clist = sd->completion_queue;
5137 sd->completion_queue = NULL;
5138 local_irq_enable();
5139
5140 while (clist) {
5141 struct sk_buff *skb = clist;
5142
5143 clist = clist->next;
5144
5145 WARN_ON(refcount_read(&skb->users));
5146 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5147 trace_consume_skb(skb, net_tx_action);
5148 else
5149 trace_kfree_skb(skb, net_tx_action,
5150 get_kfree_skb_cb(skb)->reason);
5151
5152 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5153 __kfree_skb(skb);
5154 else
5155 __napi_kfree_skb(skb,
5156 get_kfree_skb_cb(skb)->reason);
5157 }
5158 }
5159
5160 if (sd->output_queue) {
5161 struct Qdisc *head;
5162
5163 local_irq_disable();
5164 head = sd->output_queue;
5165 sd->output_queue = NULL;
5166 sd->output_queue_tailp = &sd->output_queue;
5167 local_irq_enable();
5168
5169 rcu_read_lock();
5170
5171 while (head) {
5172 struct Qdisc *q = head;
5173 spinlock_t *root_lock = NULL;
5174
5175 head = head->next_sched;
5176
5177 /* We need to make sure head->next_sched is read
5178 * before clearing __QDISC_STATE_SCHED
5179 */
5180 smp_mb__before_atomic();
5181
5182 if (!(q->flags & TCQ_F_NOLOCK)) {
5183 root_lock = qdisc_lock(q);
5184 spin_lock(root_lock);
5185 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5186 &q->state))) {
5187 /* There is a synchronize_net() between
5188 * STATE_DEACTIVATED flag being set and
5189 * qdisc_reset()/some_qdisc_is_busy() in
5190 * dev_deactivate(), so we can safely bail out
5191 * early here to avoid data race between
5192 * qdisc_deactivate() and some_qdisc_is_busy()
5193 * for lockless qdisc.
5194 */
5195 clear_bit(__QDISC_STATE_SCHED, &q->state);
5196 continue;
5197 }
5198
5199 clear_bit(__QDISC_STATE_SCHED, &q->state);
5200 qdisc_run(q);
5201 if (root_lock)
5202 spin_unlock(root_lock);
5203 }
5204
5205 rcu_read_unlock();
5206 }
5207
5208 xfrm_dev_backlog(sd);
5209}
5210
5211#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5212/* This hook is defined here for ATM LANE */
5213int (*br_fdb_test_addr_hook)(struct net_device *dev,
5214 unsigned char *addr) __read_mostly;
5215EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5216#endif
5217
5218/**
5219 * netdev_is_rx_handler_busy - check if receive handler is registered
5220 * @dev: device to check
5221 *
5222 * Check if a receive handler is already registered for a given device.
5223 * Return true if there one.
5224 *
5225 * The caller must hold the rtnl_mutex.
5226 */
5227bool netdev_is_rx_handler_busy(struct net_device *dev)
5228{
5229 ASSERT_RTNL();
5230 return dev && rtnl_dereference(dev->rx_handler);
5231}
5232EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5233
5234/**
5235 * netdev_rx_handler_register - register receive handler
5236 * @dev: device to register a handler for
5237 * @rx_handler: receive handler to register
5238 * @rx_handler_data: data pointer that is used by rx handler
5239 *
5240 * Register a receive handler for a device. This handler will then be
5241 * called from __netif_receive_skb. A negative errno code is returned
5242 * on a failure.
5243 *
5244 * The caller must hold the rtnl_mutex.
5245 *
5246 * For a general description of rx_handler, see enum rx_handler_result.
5247 */
5248int netdev_rx_handler_register(struct net_device *dev,
5249 rx_handler_func_t *rx_handler,
5250 void *rx_handler_data)
5251{
5252 if (netdev_is_rx_handler_busy(dev))
5253 return -EBUSY;
5254
5255 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5256 return -EINVAL;
5257
5258 /* Note: rx_handler_data must be set before rx_handler */
5259 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5260 rcu_assign_pointer(dev->rx_handler, rx_handler);
5261
5262 return 0;
5263}
5264EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5265
5266/**
5267 * netdev_rx_handler_unregister - unregister receive handler
5268 * @dev: device to unregister a handler from
5269 *
5270 * Unregister a receive handler from a device.
5271 *
5272 * The caller must hold the rtnl_mutex.
5273 */
5274void netdev_rx_handler_unregister(struct net_device *dev)
5275{
5276
5277 ASSERT_RTNL();
5278 RCU_INIT_POINTER(dev->rx_handler, NULL);
5279 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5280 * section has a guarantee to see a non NULL rx_handler_data
5281 * as well.
5282 */
5283 synchronize_net();
5284 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5285}
5286EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5287
5288/*
5289 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5290 * the special handling of PFMEMALLOC skbs.
5291 */
5292static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5293{
5294 switch (skb->protocol) {
5295 case htons(ETH_P_ARP):
5296 case htons(ETH_P_IP):
5297 case htons(ETH_P_IPV6):
5298 case htons(ETH_P_8021Q):
5299 case htons(ETH_P_8021AD):
5300 return true;
5301 default:
5302 return false;
5303 }
5304}
5305
5306static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5307 int *ret, struct net_device *orig_dev)
5308{
5309 if (nf_hook_ingress_active(skb)) {
5310 int ingress_retval;
5311
5312 if (*pt_prev) {
5313 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5314 *pt_prev = NULL;
5315 }
5316
5317 rcu_read_lock();
5318 ingress_retval = nf_hook_ingress(skb);
5319 rcu_read_unlock();
5320 return ingress_retval;
5321 }
5322 return 0;
5323}
5324
5325static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5326 struct packet_type **ppt_prev)
5327{
5328 struct packet_type *ptype, *pt_prev;
5329 rx_handler_func_t *rx_handler;
5330 struct sk_buff *skb = *pskb;
5331 struct net_device *orig_dev;
5332 bool deliver_exact = false;
5333 int ret = NET_RX_DROP;
5334 __be16 type;
5335
5336 net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5337
5338 trace_netif_receive_skb(skb);
5339
5340 orig_dev = skb->dev;
5341
5342 skb_reset_network_header(skb);
5343 if (!skb_transport_header_was_set(skb))
5344 skb_reset_transport_header(skb);
5345 skb_reset_mac_len(skb);
5346
5347 pt_prev = NULL;
5348
5349another_round:
5350 skb->skb_iif = skb->dev->ifindex;
5351
5352 __this_cpu_inc(softnet_data.processed);
5353
5354 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5355 int ret2;
5356
5357 migrate_disable();
5358 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
5359 &skb);
5360 migrate_enable();
5361
5362 if (ret2 != XDP_PASS) {
5363 ret = NET_RX_DROP;
5364 goto out;
5365 }
5366 }
5367
5368 if (eth_type_vlan(skb->protocol)) {
5369 skb = skb_vlan_untag(skb);
5370 if (unlikely(!skb))
5371 goto out;
5372 }
5373
5374 if (skb_skip_tc_classify(skb))
5375 goto skip_classify;
5376
5377 if (pfmemalloc)
5378 goto skip_taps;
5379
5380 list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
5381 if (pt_prev)
5382 ret = deliver_skb(skb, pt_prev, orig_dev);
5383 pt_prev = ptype;
5384 }
5385
5386 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5387 if (pt_prev)
5388 ret = deliver_skb(skb, pt_prev, orig_dev);
5389 pt_prev = ptype;
5390 }
5391
5392skip_taps:
5393#ifdef CONFIG_NET_INGRESS
5394 if (static_branch_unlikely(&ingress_needed_key)) {
5395 bool another = false;
5396
5397 nf_skip_egress(skb, true);
5398 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5399 &another);
5400 if (another)
5401 goto another_round;
5402 if (!skb)
5403 goto out;
5404
5405 nf_skip_egress(skb, false);
5406 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5407 goto out;
5408 }
5409#endif
5410 skb_reset_redirect(skb);
5411skip_classify:
5412 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5413 goto drop;
5414
5415 if (skb_vlan_tag_present(skb)) {
5416 if (pt_prev) {
5417 ret = deliver_skb(skb, pt_prev, orig_dev);
5418 pt_prev = NULL;
5419 }
5420 if (vlan_do_receive(&skb))
5421 goto another_round;
5422 else if (unlikely(!skb))
5423 goto out;
5424 }
5425
5426 rx_handler = rcu_dereference(skb->dev->rx_handler);
5427 if (rx_handler) {
5428 if (pt_prev) {
5429 ret = deliver_skb(skb, pt_prev, orig_dev);
5430 pt_prev = NULL;
5431 }
5432 switch (rx_handler(&skb)) {
5433 case RX_HANDLER_CONSUMED:
5434 ret = NET_RX_SUCCESS;
5435 goto out;
5436 case RX_HANDLER_ANOTHER:
5437 goto another_round;
5438 case RX_HANDLER_EXACT:
5439 deliver_exact = true;
5440 break;
5441 case RX_HANDLER_PASS:
5442 break;
5443 default:
5444 BUG();
5445 }
5446 }
5447
5448 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5449check_vlan_id:
5450 if (skb_vlan_tag_get_id(skb)) {
5451 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5452 * find vlan device.
5453 */
5454 skb->pkt_type = PACKET_OTHERHOST;
5455 } else if (eth_type_vlan(skb->protocol)) {
5456 /* Outer header is 802.1P with vlan 0, inner header is
5457 * 802.1Q or 802.1AD and vlan_do_receive() above could
5458 * not find vlan dev for vlan id 0.
5459 */
5460 __vlan_hwaccel_clear_tag(skb);
5461 skb = skb_vlan_untag(skb);
5462 if (unlikely(!skb))
5463 goto out;
5464 if (vlan_do_receive(&skb))
5465 /* After stripping off 802.1P header with vlan 0
5466 * vlan dev is found for inner header.
5467 */
5468 goto another_round;
5469 else if (unlikely(!skb))
5470 goto out;
5471 else
5472 /* We have stripped outer 802.1P vlan 0 header.
5473 * But could not find vlan dev.
5474 * check again for vlan id to set OTHERHOST.
5475 */
5476 goto check_vlan_id;
5477 }
5478 /* Note: we might in the future use prio bits
5479 * and set skb->priority like in vlan_do_receive()
5480 * For the time being, just ignore Priority Code Point
5481 */
5482 __vlan_hwaccel_clear_tag(skb);
5483 }
5484
5485 type = skb->protocol;
5486
5487 /* deliver only exact match when indicated */
5488 if (likely(!deliver_exact)) {
5489 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5490 &ptype_base[ntohs(type) &
5491 PTYPE_HASH_MASK]);
5492 }
5493
5494 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5495 &orig_dev->ptype_specific);
5496
5497 if (unlikely(skb->dev != orig_dev)) {
5498 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5499 &skb->dev->ptype_specific);
5500 }
5501
5502 if (pt_prev) {
5503 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5504 goto drop;
5505 *ppt_prev = pt_prev;
5506 } else {
5507drop:
5508 if (!deliver_exact)
5509 dev_core_stats_rx_dropped_inc(skb->dev);
5510 else
5511 dev_core_stats_rx_nohandler_inc(skb->dev);
5512 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5513 /* Jamal, now you will not able to escape explaining
5514 * me how you were going to use this. :-)
5515 */
5516 ret = NET_RX_DROP;
5517 }
5518
5519out:
5520 /* The invariant here is that if *ppt_prev is not NULL
5521 * then skb should also be non-NULL.
5522 *
5523 * Apparently *ppt_prev assignment above holds this invariant due to
5524 * skb dereferencing near it.
5525 */
5526 *pskb = skb;
5527 return ret;
5528}
5529
5530static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5531{
5532 struct net_device *orig_dev = skb->dev;
5533 struct packet_type *pt_prev = NULL;
5534 int ret;
5535
5536 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5537 if (pt_prev)
5538 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5539 skb->dev, pt_prev, orig_dev);
5540 return ret;
5541}
5542
5543/**
5544 * netif_receive_skb_core - special purpose version of netif_receive_skb
5545 * @skb: buffer to process
5546 *
5547 * More direct receive version of netif_receive_skb(). It should
5548 * only be used by callers that have a need to skip RPS and Generic XDP.
5549 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5550 *
5551 * This function may only be called from softirq context and interrupts
5552 * should be enabled.
5553 *
5554 * Return values (usually ignored):
5555 * NET_RX_SUCCESS: no congestion
5556 * NET_RX_DROP: packet was dropped
5557 */
5558int netif_receive_skb_core(struct sk_buff *skb)
5559{
5560 int ret;
5561
5562 rcu_read_lock();
5563 ret = __netif_receive_skb_one_core(skb, false);
5564 rcu_read_unlock();
5565
5566 return ret;
5567}
5568EXPORT_SYMBOL(netif_receive_skb_core);
5569
5570static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5571 struct packet_type *pt_prev,
5572 struct net_device *orig_dev)
5573{
5574 struct sk_buff *skb, *next;
5575
5576 if (!pt_prev)
5577 return;
5578 if (list_empty(head))
5579 return;
5580 if (pt_prev->list_func != NULL)
5581 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5582 ip_list_rcv, head, pt_prev, orig_dev);
5583 else
5584 list_for_each_entry_safe(skb, next, head, list) {
5585 skb_list_del_init(skb);
5586 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5587 }
5588}
5589
5590static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5591{
5592 /* Fast-path assumptions:
5593 * - There is no RX handler.
5594 * - Only one packet_type matches.
5595 * If either of these fails, we will end up doing some per-packet
5596 * processing in-line, then handling the 'last ptype' for the whole
5597 * sublist. This can't cause out-of-order delivery to any single ptype,
5598 * because the 'last ptype' must be constant across the sublist, and all
5599 * other ptypes are handled per-packet.
5600 */
5601 /* Current (common) ptype of sublist */
5602 struct packet_type *pt_curr = NULL;
5603 /* Current (common) orig_dev of sublist */
5604 struct net_device *od_curr = NULL;
5605 struct list_head sublist;
5606 struct sk_buff *skb, *next;
5607
5608 INIT_LIST_HEAD(&sublist);
5609 list_for_each_entry_safe(skb, next, head, list) {
5610 struct net_device *orig_dev = skb->dev;
5611 struct packet_type *pt_prev = NULL;
5612
5613 skb_list_del_init(skb);
5614 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5615 if (!pt_prev)
5616 continue;
5617 if (pt_curr != pt_prev || od_curr != orig_dev) {
5618 /* dispatch old sublist */
5619 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5620 /* start new sublist */
5621 INIT_LIST_HEAD(&sublist);
5622 pt_curr = pt_prev;
5623 od_curr = orig_dev;
5624 }
5625 list_add_tail(&skb->list, &sublist);
5626 }
5627
5628 /* dispatch final sublist */
5629 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5630}
5631
5632static int __netif_receive_skb(struct sk_buff *skb)
5633{
5634 int ret;
5635
5636 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5637 unsigned int noreclaim_flag;
5638
5639 /*
5640 * PFMEMALLOC skbs are special, they should
5641 * - be delivered to SOCK_MEMALLOC sockets only
5642 * - stay away from userspace
5643 * - have bounded memory usage
5644 *
5645 * Use PF_MEMALLOC as this saves us from propagating the allocation
5646 * context down to all allocation sites.
5647 */
5648 noreclaim_flag = memalloc_noreclaim_save();
5649 ret = __netif_receive_skb_one_core(skb, true);
5650 memalloc_noreclaim_restore(noreclaim_flag);
5651 } else
5652 ret = __netif_receive_skb_one_core(skb, false);
5653
5654 return ret;
5655}
5656
5657static void __netif_receive_skb_list(struct list_head *head)
5658{
5659 unsigned long noreclaim_flag = 0;
5660 struct sk_buff *skb, *next;
5661 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5662
5663 list_for_each_entry_safe(skb, next, head, list) {
5664 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5665 struct list_head sublist;
5666
5667 /* Handle the previous sublist */
5668 list_cut_before(&sublist, head, &skb->list);
5669 if (!list_empty(&sublist))
5670 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5671 pfmemalloc = !pfmemalloc;
5672 /* See comments in __netif_receive_skb */
5673 if (pfmemalloc)
5674 noreclaim_flag = memalloc_noreclaim_save();
5675 else
5676 memalloc_noreclaim_restore(noreclaim_flag);
5677 }
5678 }
5679 /* Handle the remaining sublist */
5680 if (!list_empty(head))
5681 __netif_receive_skb_list_core(head, pfmemalloc);
5682 /* Restore pflags */
5683 if (pfmemalloc)
5684 memalloc_noreclaim_restore(noreclaim_flag);
5685}
5686
5687static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5688{
5689 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5690 struct bpf_prog *new = xdp->prog;
5691 int ret = 0;
5692
5693 switch (xdp->command) {
5694 case XDP_SETUP_PROG:
5695 rcu_assign_pointer(dev->xdp_prog, new);
5696 if (old)
5697 bpf_prog_put(old);
5698
5699 if (old && !new) {
5700 static_branch_dec(&generic_xdp_needed_key);
5701 } else if (new && !old) {
5702 static_branch_inc(&generic_xdp_needed_key);
5703 dev_disable_lro(dev);
5704 dev_disable_gro_hw(dev);
5705 }
5706 break;
5707
5708 default:
5709 ret = -EINVAL;
5710 break;
5711 }
5712
5713 return ret;
5714}
5715
5716static int netif_receive_skb_internal(struct sk_buff *skb)
5717{
5718 int ret;
5719
5720 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5721
5722 if (skb_defer_rx_timestamp(skb))
5723 return NET_RX_SUCCESS;
5724
5725 rcu_read_lock();
5726#ifdef CONFIG_RPS
5727 if (static_branch_unlikely(&rps_needed)) {
5728 struct rps_dev_flow voidflow, *rflow = &voidflow;
5729 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5730
5731 if (cpu >= 0) {
5732 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5733 rcu_read_unlock();
5734 return ret;
5735 }
5736 }
5737#endif
5738 ret = __netif_receive_skb(skb);
5739 rcu_read_unlock();
5740 return ret;
5741}
5742
5743void netif_receive_skb_list_internal(struct list_head *head)
5744{
5745 struct sk_buff *skb, *next;
5746 struct list_head sublist;
5747
5748 INIT_LIST_HEAD(&sublist);
5749 list_for_each_entry_safe(skb, next, head, list) {
5750 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
5751 skb);
5752 skb_list_del_init(skb);
5753 if (!skb_defer_rx_timestamp(skb))
5754 list_add_tail(&skb->list, &sublist);
5755 }
5756 list_splice_init(&sublist, head);
5757
5758 rcu_read_lock();
5759#ifdef CONFIG_RPS
5760 if (static_branch_unlikely(&rps_needed)) {
5761 list_for_each_entry_safe(skb, next, head, list) {
5762 struct rps_dev_flow voidflow, *rflow = &voidflow;
5763 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5764
5765 if (cpu >= 0) {
5766 /* Will be handled, remove from list */
5767 skb_list_del_init(skb);
5768 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5769 }
5770 }
5771 }
5772#endif
5773 __netif_receive_skb_list(head);
5774 rcu_read_unlock();
5775}
5776
5777/**
5778 * netif_receive_skb - process receive buffer from network
5779 * @skb: buffer to process
5780 *
5781 * netif_receive_skb() is the main receive data processing function.
5782 * It always succeeds. The buffer may be dropped during processing
5783 * for congestion control or by the protocol layers.
5784 *
5785 * This function may only be called from softirq context and interrupts
5786 * should be enabled.
5787 *
5788 * Return values (usually ignored):
5789 * NET_RX_SUCCESS: no congestion
5790 * NET_RX_DROP: packet was dropped
5791 */
5792int netif_receive_skb(struct sk_buff *skb)
5793{
5794 int ret;
5795
5796 trace_netif_receive_skb_entry(skb);
5797
5798 ret = netif_receive_skb_internal(skb);
5799 trace_netif_receive_skb_exit(ret);
5800
5801 return ret;
5802}
5803EXPORT_SYMBOL(netif_receive_skb);
5804
5805/**
5806 * netif_receive_skb_list - process many receive buffers from network
5807 * @head: list of skbs to process.
5808 *
5809 * Since return value of netif_receive_skb() is normally ignored, and
5810 * wouldn't be meaningful for a list, this function returns void.
5811 *
5812 * This function may only be called from softirq context and interrupts
5813 * should be enabled.
5814 */
5815void netif_receive_skb_list(struct list_head *head)
5816{
5817 struct sk_buff *skb;
5818
5819 if (list_empty(head))
5820 return;
5821 if (trace_netif_receive_skb_list_entry_enabled()) {
5822 list_for_each_entry(skb, head, list)
5823 trace_netif_receive_skb_list_entry(skb);
5824 }
5825 netif_receive_skb_list_internal(head);
5826 trace_netif_receive_skb_list_exit(0);
5827}
5828EXPORT_SYMBOL(netif_receive_skb_list);
5829
5830static DEFINE_PER_CPU(struct work_struct, flush_works);
5831
5832/* Network device is going away, flush any packets still pending */
5833static void flush_backlog(struct work_struct *work)
5834{
5835 struct sk_buff *skb, *tmp;
5836 struct softnet_data *sd;
5837
5838 local_bh_disable();
5839 sd = this_cpu_ptr(&softnet_data);
5840
5841 rps_lock_irq_disable(sd);
5842 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5843 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5844 __skb_unlink(skb, &sd->input_pkt_queue);
5845 dev_kfree_skb_irq(skb);
5846 input_queue_head_incr(sd);
5847 }
5848 }
5849 rps_unlock_irq_enable(sd);
5850
5851 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5852 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5853 __skb_unlink(skb, &sd->process_queue);
5854 kfree_skb(skb);
5855 input_queue_head_incr(sd);
5856 }
5857 }
5858 local_bh_enable();
5859}
5860
5861static bool flush_required(int cpu)
5862{
5863#if IS_ENABLED(CONFIG_RPS)
5864 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5865 bool do_flush;
5866
5867 rps_lock_irq_disable(sd);
5868
5869 /* as insertion into process_queue happens with the rps lock held,
5870 * process_queue access may race only with dequeue
5871 */
5872 do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5873 !skb_queue_empty_lockless(&sd->process_queue);
5874 rps_unlock_irq_enable(sd);
5875
5876 return do_flush;
5877#endif
5878 /* without RPS we can't safely check input_pkt_queue: during a
5879 * concurrent remote skb_queue_splice() we can detect as empty both
5880 * input_pkt_queue and process_queue even if the latter could end-up
5881 * containing a lot of packets.
5882 */
5883 return true;
5884}
5885
5886static void flush_all_backlogs(void)
5887{
5888 static cpumask_t flush_cpus;
5889 unsigned int cpu;
5890
5891 /* since we are under rtnl lock protection we can use static data
5892 * for the cpumask and avoid allocating on stack the possibly
5893 * large mask
5894 */
5895 ASSERT_RTNL();
5896
5897 cpus_read_lock();
5898
5899 cpumask_clear(&flush_cpus);
5900 for_each_online_cpu(cpu) {
5901 if (flush_required(cpu)) {
5902 queue_work_on(cpu, system_highpri_wq,
5903 per_cpu_ptr(&flush_works, cpu));
5904 cpumask_set_cpu(cpu, &flush_cpus);
5905 }
5906 }
5907
5908 /* we can have in flight packet[s] on the cpus we are not flushing,
5909 * synchronize_net() in unregister_netdevice_many() will take care of
5910 * them
5911 */
5912 for_each_cpu(cpu, &flush_cpus)
5913 flush_work(per_cpu_ptr(&flush_works, cpu));
5914
5915 cpus_read_unlock();
5916}
5917
5918static void net_rps_send_ipi(struct softnet_data *remsd)
5919{
5920#ifdef CONFIG_RPS
5921 while (remsd) {
5922 struct softnet_data *next = remsd->rps_ipi_next;
5923
5924 if (cpu_online(remsd->cpu))
5925 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5926 remsd = next;
5927 }
5928#endif
5929}
5930
5931/*
5932 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5933 * Note: called with local irq disabled, but exits with local irq enabled.
5934 */
5935static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5936{
5937#ifdef CONFIG_RPS
5938 struct softnet_data *remsd = sd->rps_ipi_list;
5939
5940 if (remsd) {
5941 sd->rps_ipi_list = NULL;
5942
5943 local_irq_enable();
5944
5945 /* Send pending IPI's to kick RPS processing on remote cpus. */
5946 net_rps_send_ipi(remsd);
5947 } else
5948#endif
5949 local_irq_enable();
5950}
5951
5952static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5953{
5954#ifdef CONFIG_RPS
5955 return sd->rps_ipi_list != NULL;
5956#else
5957 return false;
5958#endif
5959}
5960
5961static int process_backlog(struct napi_struct *napi, int quota)
5962{
5963 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5964 bool again = true;
5965 int work = 0;
5966
5967 /* Check if we have pending ipi, its better to send them now,
5968 * not waiting net_rx_action() end.
5969 */
5970 if (sd_has_rps_ipi_waiting(sd)) {
5971 local_irq_disable();
5972 net_rps_action_and_irq_enable(sd);
5973 }
5974
5975 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
5976 while (again) {
5977 struct sk_buff *skb;
5978
5979 while ((skb = __skb_dequeue(&sd->process_queue))) {
5980 rcu_read_lock();
5981 __netif_receive_skb(skb);
5982 rcu_read_unlock();
5983 input_queue_head_incr(sd);
5984 if (++work >= quota)
5985 return work;
5986
5987 }
5988
5989 rps_lock_irq_disable(sd);
5990 if (skb_queue_empty(&sd->input_pkt_queue)) {
5991 /*
5992 * Inline a custom version of __napi_complete().
5993 * only current cpu owns and manipulates this napi,
5994 * and NAPI_STATE_SCHED is the only possible flag set
5995 * on backlog.
5996 * We can use a plain write instead of clear_bit(),
5997 * and we dont need an smp_mb() memory barrier.
5998 */
5999 napi->state = 0;
6000 again = false;
6001 } else {
6002 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6003 &sd->process_queue);
6004 }
6005 rps_unlock_irq_enable(sd);
6006 }
6007
6008 return work;
6009}
6010
6011/**
6012 * __napi_schedule - schedule for receive
6013 * @n: entry to schedule
6014 *
6015 * The entry's receive function will be scheduled to run.
6016 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6017 */
6018void __napi_schedule(struct napi_struct *n)
6019{
6020 unsigned long flags;
6021
6022 local_irq_save(flags);
6023 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6024 local_irq_restore(flags);
6025}
6026EXPORT_SYMBOL(__napi_schedule);
6027
6028/**
6029 * napi_schedule_prep - check if napi can be scheduled
6030 * @n: napi context
6031 *
6032 * Test if NAPI routine is already running, and if not mark
6033 * it as running. This is used as a condition variable to
6034 * insure only one NAPI poll instance runs. We also make
6035 * sure there is no pending NAPI disable.
6036 */
6037bool napi_schedule_prep(struct napi_struct *n)
6038{
6039 unsigned long new, val = READ_ONCE(n->state);
6040
6041 do {
6042 if (unlikely(val & NAPIF_STATE_DISABLE))
6043 return false;
6044 new = val | NAPIF_STATE_SCHED;
6045
6046 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6047 * This was suggested by Alexander Duyck, as compiler
6048 * emits better code than :
6049 * if (val & NAPIF_STATE_SCHED)
6050 * new |= NAPIF_STATE_MISSED;
6051 */
6052 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6053 NAPIF_STATE_MISSED;
6054 } while (!try_cmpxchg(&n->state, &val, new));
6055
6056 return !(val & NAPIF_STATE_SCHED);
6057}
6058EXPORT_SYMBOL(napi_schedule_prep);
6059
6060/**
6061 * __napi_schedule_irqoff - schedule for receive
6062 * @n: entry to schedule
6063 *
6064 * Variant of __napi_schedule() assuming hard irqs are masked.
6065 *
6066 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6067 * because the interrupt disabled assumption might not be true
6068 * due to force-threaded interrupts and spinlock substitution.
6069 */
6070void __napi_schedule_irqoff(struct napi_struct *n)
6071{
6072 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6073 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6074 else
6075 __napi_schedule(n);
6076}
6077EXPORT_SYMBOL(__napi_schedule_irqoff);
6078
6079bool napi_complete_done(struct napi_struct *n, int work_done)
6080{
6081 unsigned long flags, val, new, timeout = 0;
6082 bool ret = true;
6083
6084 /*
6085 * 1) Don't let napi dequeue from the cpu poll list
6086 * just in case its running on a different cpu.
6087 * 2) If we are busy polling, do nothing here, we have
6088 * the guarantee we will be called later.
6089 */
6090 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6091 NAPIF_STATE_IN_BUSY_POLL)))
6092 return false;
6093
6094 if (work_done) {
6095 if (n->gro_bitmask)
6096 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6097 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6098 }
6099 if (n->defer_hard_irqs_count > 0) {
6100 n->defer_hard_irqs_count--;
6101 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6102 if (timeout)
6103 ret = false;
6104 }
6105 if (n->gro_bitmask) {
6106 /* When the NAPI instance uses a timeout and keeps postponing
6107 * it, we need to bound somehow the time packets are kept in
6108 * the GRO layer
6109 */
6110 napi_gro_flush(n, !!timeout);
6111 }
6112
6113 gro_normal_list(n);
6114
6115 if (unlikely(!list_empty(&n->poll_list))) {
6116 /* If n->poll_list is not empty, we need to mask irqs */
6117 local_irq_save(flags);
6118 list_del_init(&n->poll_list);
6119 local_irq_restore(flags);
6120 }
6121 WRITE_ONCE(n->list_owner, -1);
6122
6123 val = READ_ONCE(n->state);
6124 do {
6125 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6126
6127 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6128 NAPIF_STATE_SCHED_THREADED |
6129 NAPIF_STATE_PREFER_BUSY_POLL);
6130
6131 /* If STATE_MISSED was set, leave STATE_SCHED set,
6132 * because we will call napi->poll() one more time.
6133 * This C code was suggested by Alexander Duyck to help gcc.
6134 */
6135 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6136 NAPIF_STATE_SCHED;
6137 } while (!try_cmpxchg(&n->state, &val, new));
6138
6139 if (unlikely(val & NAPIF_STATE_MISSED)) {
6140 __napi_schedule(n);
6141 return false;
6142 }
6143
6144 if (timeout)
6145 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6146 HRTIMER_MODE_REL_PINNED);
6147 return ret;
6148}
6149EXPORT_SYMBOL(napi_complete_done);
6150
6151/* must be called under rcu_read_lock(), as we dont take a reference */
6152struct napi_struct *napi_by_id(unsigned int napi_id)
6153{
6154 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6155 struct napi_struct *napi;
6156
6157 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6158 if (napi->napi_id == napi_id)
6159 return napi;
6160
6161 return NULL;
6162}
6163
6164static void skb_defer_free_flush(struct softnet_data *sd)
6165{
6166 struct sk_buff *skb, *next;
6167
6168 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6169 if (!READ_ONCE(sd->defer_list))
6170 return;
6171
6172 spin_lock(&sd->defer_lock);
6173 skb = sd->defer_list;
6174 sd->defer_list = NULL;
6175 sd->defer_count = 0;
6176 spin_unlock(&sd->defer_lock);
6177
6178 while (skb != NULL) {
6179 next = skb->next;
6180 napi_consume_skb(skb, 1);
6181 skb = next;
6182 }
6183}
6184
6185#if defined(CONFIG_NET_RX_BUSY_POLL)
6186
6187static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6188{
6189 if (!skip_schedule) {
6190 gro_normal_list(napi);
6191 __napi_schedule(napi);
6192 return;
6193 }
6194
6195 if (napi->gro_bitmask) {
6196 /* flush too old packets
6197 * If HZ < 1000, flush all packets.
6198 */
6199 napi_gro_flush(napi, HZ >= 1000);
6200 }
6201
6202 gro_normal_list(napi);
6203 clear_bit(NAPI_STATE_SCHED, &napi->state);
6204}
6205
6206enum {
6207 NAPI_F_PREFER_BUSY_POLL = 1,
6208 NAPI_F_END_ON_RESCHED = 2,
6209};
6210
6211static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
6212 unsigned flags, u16 budget)
6213{
6214 bool skip_schedule = false;
6215 unsigned long timeout;
6216 int rc;
6217
6218 /* Busy polling means there is a high chance device driver hard irq
6219 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6220 * set in napi_schedule_prep().
6221 * Since we are about to call napi->poll() once more, we can safely
6222 * clear NAPI_STATE_MISSED.
6223 *
6224 * Note: x86 could use a single "lock and ..." instruction
6225 * to perform these two clear_bit()
6226 */
6227 clear_bit(NAPI_STATE_MISSED, &napi->state);
6228 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6229
6230 local_bh_disable();
6231
6232 if (flags & NAPI_F_PREFER_BUSY_POLL) {
6233 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6234 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6235 if (napi->defer_hard_irqs_count && timeout) {
6236 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6237 skip_schedule = true;
6238 }
6239 }
6240
6241 /* All we really want here is to re-enable device interrupts.
6242 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6243 */
6244 rc = napi->poll(napi, budget);
6245 /* We can't gro_normal_list() here, because napi->poll() might have
6246 * rearmed the napi (napi_complete_done()) in which case it could
6247 * already be running on another CPU.
6248 */
6249 trace_napi_poll(napi, rc, budget);
6250 netpoll_poll_unlock(have_poll_lock);
6251 if (rc == budget)
6252 __busy_poll_stop(napi, skip_schedule);
6253 local_bh_enable();
6254}
6255
6256static void __napi_busy_loop(unsigned int napi_id,
6257 bool (*loop_end)(void *, unsigned long),
6258 void *loop_end_arg, unsigned flags, u16 budget)
6259{
6260 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6261 int (*napi_poll)(struct napi_struct *napi, int budget);
6262 void *have_poll_lock = NULL;
6263 struct napi_struct *napi;
6264
6265 WARN_ON_ONCE(!rcu_read_lock_held());
6266
6267restart:
6268 napi_poll = NULL;
6269
6270 napi = napi_by_id(napi_id);
6271 if (!napi)
6272 return;
6273
6274 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6275 preempt_disable();
6276 for (;;) {
6277 int work = 0;
6278
6279 local_bh_disable();
6280 if (!napi_poll) {
6281 unsigned long val = READ_ONCE(napi->state);
6282
6283 /* If multiple threads are competing for this napi,
6284 * we avoid dirtying napi->state as much as we can.
6285 */
6286 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6287 NAPIF_STATE_IN_BUSY_POLL)) {
6288 if (flags & NAPI_F_PREFER_BUSY_POLL)
6289 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6290 goto count;
6291 }
6292 if (cmpxchg(&napi->state, val,
6293 val | NAPIF_STATE_IN_BUSY_POLL |
6294 NAPIF_STATE_SCHED) != val) {
6295 if (flags & NAPI_F_PREFER_BUSY_POLL)
6296 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6297 goto count;
6298 }
6299 have_poll_lock = netpoll_poll_lock(napi);
6300 napi_poll = napi->poll;
6301 }
6302 work = napi_poll(napi, budget);
6303 trace_napi_poll(napi, work, budget);
6304 gro_normal_list(napi);
6305count:
6306 if (work > 0)
6307 __NET_ADD_STATS(dev_net(napi->dev),
6308 LINUX_MIB_BUSYPOLLRXPACKETS, work);
6309 skb_defer_free_flush(this_cpu_ptr(&softnet_data));
6310 local_bh_enable();
6311
6312 if (!loop_end || loop_end(loop_end_arg, start_time))
6313 break;
6314
6315 if (unlikely(need_resched())) {
6316 if (flags & NAPI_F_END_ON_RESCHED)
6317 break;
6318 if (napi_poll)
6319 busy_poll_stop(napi, have_poll_lock, flags, budget);
6320 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6321 preempt_enable();
6322 rcu_read_unlock();
6323 cond_resched();
6324 rcu_read_lock();
6325 if (loop_end(loop_end_arg, start_time))
6326 return;
6327 goto restart;
6328 }
6329 cpu_relax();
6330 }
6331 if (napi_poll)
6332 busy_poll_stop(napi, have_poll_lock, flags, budget);
6333 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6334 preempt_enable();
6335}
6336
6337void napi_busy_loop_rcu(unsigned int napi_id,
6338 bool (*loop_end)(void *, unsigned long),
6339 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6340{
6341 unsigned flags = NAPI_F_END_ON_RESCHED;
6342
6343 if (prefer_busy_poll)
6344 flags |= NAPI_F_PREFER_BUSY_POLL;
6345
6346 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6347}
6348
6349void napi_busy_loop(unsigned int napi_id,
6350 bool (*loop_end)(void *, unsigned long),
6351 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6352{
6353 unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0;
6354
6355 rcu_read_lock();
6356 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6357 rcu_read_unlock();
6358}
6359EXPORT_SYMBOL(napi_busy_loop);
6360
6361#endif /* CONFIG_NET_RX_BUSY_POLL */
6362
6363static void napi_hash_add(struct napi_struct *napi)
6364{
6365 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6366 return;
6367
6368 spin_lock(&napi_hash_lock);
6369
6370 /* 0..NR_CPUS range is reserved for sender_cpu use */
6371 do {
6372 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6373 napi_gen_id = MIN_NAPI_ID;
6374 } while (napi_by_id(napi_gen_id));
6375 napi->napi_id = napi_gen_id;
6376
6377 hlist_add_head_rcu(&napi->napi_hash_node,
6378 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6379
6380 spin_unlock(&napi_hash_lock);
6381}
6382
6383/* Warning : caller is responsible to make sure rcu grace period
6384 * is respected before freeing memory containing @napi
6385 */
6386static void napi_hash_del(struct napi_struct *napi)
6387{
6388 spin_lock(&napi_hash_lock);
6389
6390 hlist_del_init_rcu(&napi->napi_hash_node);
6391
6392 spin_unlock(&napi_hash_lock);
6393}
6394
6395static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6396{
6397 struct napi_struct *napi;
6398
6399 napi = container_of(timer, struct napi_struct, timer);
6400
6401 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6402 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6403 */
6404 if (!napi_disable_pending(napi) &&
6405 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6406 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6407 __napi_schedule_irqoff(napi);
6408 }
6409
6410 return HRTIMER_NORESTART;
6411}
6412
6413static void init_gro_hash(struct napi_struct *napi)
6414{
6415 int i;
6416
6417 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6418 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6419 napi->gro_hash[i].count = 0;
6420 }
6421 napi->gro_bitmask = 0;
6422}
6423
6424int dev_set_threaded(struct net_device *dev, bool threaded)
6425{
6426 struct napi_struct *napi;
6427 int err = 0;
6428
6429 if (dev->threaded == threaded)
6430 return 0;
6431
6432 if (threaded) {
6433 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6434 if (!napi->thread) {
6435 err = napi_kthread_create(napi);
6436 if (err) {
6437 threaded = false;
6438 break;
6439 }
6440 }
6441 }
6442 }
6443
6444 dev->threaded = threaded;
6445
6446 /* Make sure kthread is created before THREADED bit
6447 * is set.
6448 */
6449 smp_mb__before_atomic();
6450
6451 /* Setting/unsetting threaded mode on a napi might not immediately
6452 * take effect, if the current napi instance is actively being
6453 * polled. In this case, the switch between threaded mode and
6454 * softirq mode will happen in the next round of napi_schedule().
6455 * This should not cause hiccups/stalls to the live traffic.
6456 */
6457 list_for_each_entry(napi, &dev->napi_list, dev_list)
6458 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
6459
6460 return err;
6461}
6462EXPORT_SYMBOL(dev_set_threaded);
6463
6464/**
6465 * netif_queue_set_napi - Associate queue with the napi
6466 * @dev: device to which NAPI and queue belong
6467 * @queue_index: Index of queue
6468 * @type: queue type as RX or TX
6469 * @napi: NAPI context, pass NULL to clear previously set NAPI
6470 *
6471 * Set queue with its corresponding napi context. This should be done after
6472 * registering the NAPI handler for the queue-vector and the queues have been
6473 * mapped to the corresponding interrupt vector.
6474 */
6475void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
6476 enum netdev_queue_type type, struct napi_struct *napi)
6477{
6478 struct netdev_rx_queue *rxq;
6479 struct netdev_queue *txq;
6480
6481 if (WARN_ON_ONCE(napi && !napi->dev))
6482 return;
6483 if (dev->reg_state >= NETREG_REGISTERED)
6484 ASSERT_RTNL();
6485
6486 switch (type) {
6487 case NETDEV_QUEUE_TYPE_RX:
6488 rxq = __netif_get_rx_queue(dev, queue_index);
6489 rxq->napi = napi;
6490 return;
6491 case NETDEV_QUEUE_TYPE_TX:
6492 txq = netdev_get_tx_queue(dev, queue_index);
6493 txq->napi = napi;
6494 return;
6495 default:
6496 return;
6497 }
6498}
6499EXPORT_SYMBOL(netif_queue_set_napi);
6500
6501void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6502 int (*poll)(struct napi_struct *, int), int weight)
6503{
6504 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6505 return;
6506
6507 INIT_LIST_HEAD(&napi->poll_list);
6508 INIT_HLIST_NODE(&napi->napi_hash_node);
6509 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6510 napi->timer.function = napi_watchdog;
6511 init_gro_hash(napi);
6512 napi->skb = NULL;
6513 INIT_LIST_HEAD(&napi->rx_list);
6514 napi->rx_count = 0;
6515 napi->poll = poll;
6516 if (weight > NAPI_POLL_WEIGHT)
6517 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6518 weight);
6519 napi->weight = weight;
6520 napi->dev = dev;
6521#ifdef CONFIG_NETPOLL
6522 napi->poll_owner = -1;
6523#endif
6524 napi->list_owner = -1;
6525 set_bit(NAPI_STATE_SCHED, &napi->state);
6526 set_bit(NAPI_STATE_NPSVC, &napi->state);
6527 list_add_rcu(&napi->dev_list, &dev->napi_list);
6528 napi_hash_add(napi);
6529 napi_get_frags_check(napi);
6530 /* Create kthread for this napi if dev->threaded is set.
6531 * Clear dev->threaded if kthread creation failed so that
6532 * threaded mode will not be enabled in napi_enable().
6533 */
6534 if (dev->threaded && napi_kthread_create(napi))
6535 dev->threaded = 0;
6536 netif_napi_set_irq(napi, -1);
6537}
6538EXPORT_SYMBOL(netif_napi_add_weight);
6539
6540void napi_disable(struct napi_struct *n)
6541{
6542 unsigned long val, new;
6543
6544 might_sleep();
6545 set_bit(NAPI_STATE_DISABLE, &n->state);
6546
6547 val = READ_ONCE(n->state);
6548 do {
6549 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6550 usleep_range(20, 200);
6551 val = READ_ONCE(n->state);
6552 }
6553
6554 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6555 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6556 } while (!try_cmpxchg(&n->state, &val, new));
6557
6558 hrtimer_cancel(&n->timer);
6559
6560 clear_bit(NAPI_STATE_DISABLE, &n->state);
6561}
6562EXPORT_SYMBOL(napi_disable);
6563
6564/**
6565 * napi_enable - enable NAPI scheduling
6566 * @n: NAPI context
6567 *
6568 * Resume NAPI from being scheduled on this context.
6569 * Must be paired with napi_disable.
6570 */
6571void napi_enable(struct napi_struct *n)
6572{
6573 unsigned long new, val = READ_ONCE(n->state);
6574
6575 do {
6576 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6577
6578 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6579 if (n->dev->threaded && n->thread)
6580 new |= NAPIF_STATE_THREADED;
6581 } while (!try_cmpxchg(&n->state, &val, new));
6582}
6583EXPORT_SYMBOL(napi_enable);
6584
6585static void flush_gro_hash(struct napi_struct *napi)
6586{
6587 int i;
6588
6589 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6590 struct sk_buff *skb, *n;
6591
6592 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6593 kfree_skb(skb);
6594 napi->gro_hash[i].count = 0;
6595 }
6596}
6597
6598/* Must be called in process context */
6599void __netif_napi_del(struct napi_struct *napi)
6600{
6601 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6602 return;
6603
6604 napi_hash_del(napi);
6605 list_del_rcu(&napi->dev_list);
6606 napi_free_frags(napi);
6607
6608 flush_gro_hash(napi);
6609 napi->gro_bitmask = 0;
6610
6611 if (napi->thread) {
6612 kthread_stop(napi->thread);
6613 napi->thread = NULL;
6614 }
6615}
6616EXPORT_SYMBOL(__netif_napi_del);
6617
6618static int __napi_poll(struct napi_struct *n, bool *repoll)
6619{
6620 int work, weight;
6621
6622 weight = n->weight;
6623
6624 /* This NAPI_STATE_SCHED test is for avoiding a race
6625 * with netpoll's poll_napi(). Only the entity which
6626 * obtains the lock and sees NAPI_STATE_SCHED set will
6627 * actually make the ->poll() call. Therefore we avoid
6628 * accidentally calling ->poll() when NAPI is not scheduled.
6629 */
6630 work = 0;
6631 if (napi_is_scheduled(n)) {
6632 work = n->poll(n, weight);
6633 trace_napi_poll(n, work, weight);
6634
6635 xdp_do_check_flushed(n);
6636 }
6637
6638 if (unlikely(work > weight))
6639 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6640 n->poll, work, weight);
6641
6642 if (likely(work < weight))
6643 return work;
6644
6645 /* Drivers must not modify the NAPI state if they
6646 * consume the entire weight. In such cases this code
6647 * still "owns" the NAPI instance and therefore can
6648 * move the instance around on the list at-will.
6649 */
6650 if (unlikely(napi_disable_pending(n))) {
6651 napi_complete(n);
6652 return work;
6653 }
6654
6655 /* The NAPI context has more processing work, but busy-polling
6656 * is preferred. Exit early.
6657 */
6658 if (napi_prefer_busy_poll(n)) {
6659 if (napi_complete_done(n, work)) {
6660 /* If timeout is not set, we need to make sure
6661 * that the NAPI is re-scheduled.
6662 */
6663 napi_schedule(n);
6664 }
6665 return work;
6666 }
6667
6668 if (n->gro_bitmask) {
6669 /* flush too old packets
6670 * If HZ < 1000, flush all packets.
6671 */
6672 napi_gro_flush(n, HZ >= 1000);
6673 }
6674
6675 gro_normal_list(n);
6676
6677 /* Some drivers may have called napi_schedule
6678 * prior to exhausting their budget.
6679 */
6680 if (unlikely(!list_empty(&n->poll_list))) {
6681 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6682 n->dev ? n->dev->name : "backlog");
6683 return work;
6684 }
6685
6686 *repoll = true;
6687
6688 return work;
6689}
6690
6691static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6692{
6693 bool do_repoll = false;
6694 void *have;
6695 int work;
6696
6697 list_del_init(&n->poll_list);
6698
6699 have = netpoll_poll_lock(n);
6700
6701 work = __napi_poll(n, &do_repoll);
6702
6703 if (do_repoll)
6704 list_add_tail(&n->poll_list, repoll);
6705
6706 netpoll_poll_unlock(have);
6707
6708 return work;
6709}
6710
6711static int napi_thread_wait(struct napi_struct *napi)
6712{
6713 bool woken = false;
6714
6715 set_current_state(TASK_INTERRUPTIBLE);
6716
6717 while (!kthread_should_stop()) {
6718 /* Testing SCHED_THREADED bit here to make sure the current
6719 * kthread owns this napi and could poll on this napi.
6720 * Testing SCHED bit is not enough because SCHED bit might be
6721 * set by some other busy poll thread or by napi_disable().
6722 */
6723 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6724 WARN_ON(!list_empty(&napi->poll_list));
6725 __set_current_state(TASK_RUNNING);
6726 return 0;
6727 }
6728
6729 schedule();
6730 /* woken being true indicates this thread owns this napi. */
6731 woken = true;
6732 set_current_state(TASK_INTERRUPTIBLE);
6733 }
6734 __set_current_state(TASK_RUNNING);
6735
6736 return -1;
6737}
6738
6739static int napi_threaded_poll(void *data)
6740{
6741 struct napi_struct *napi = data;
6742 struct softnet_data *sd;
6743 void *have;
6744
6745 while (!napi_thread_wait(napi)) {
6746 unsigned long last_qs = jiffies;
6747
6748 for (;;) {
6749 bool repoll = false;
6750
6751 local_bh_disable();
6752 sd = this_cpu_ptr(&softnet_data);
6753 sd->in_napi_threaded_poll = true;
6754
6755 have = netpoll_poll_lock(napi);
6756 __napi_poll(napi, &repoll);
6757 netpoll_poll_unlock(have);
6758
6759 sd->in_napi_threaded_poll = false;
6760 barrier();
6761
6762 if (sd_has_rps_ipi_waiting(sd)) {
6763 local_irq_disable();
6764 net_rps_action_and_irq_enable(sd);
6765 }
6766 skb_defer_free_flush(sd);
6767 local_bh_enable();
6768
6769 if (!repoll)
6770 break;
6771
6772 rcu_softirq_qs_periodic(last_qs);
6773 cond_resched();
6774 }
6775 }
6776 return 0;
6777}
6778
6779static __latent_entropy void net_rx_action(struct softirq_action *h)
6780{
6781 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6782 unsigned long time_limit = jiffies +
6783 usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
6784 int budget = READ_ONCE(net_hotdata.netdev_budget);
6785 LIST_HEAD(list);
6786 LIST_HEAD(repoll);
6787
6788start:
6789 sd->in_net_rx_action = true;
6790 local_irq_disable();
6791 list_splice_init(&sd->poll_list, &list);
6792 local_irq_enable();
6793
6794 for (;;) {
6795 struct napi_struct *n;
6796
6797 skb_defer_free_flush(sd);
6798
6799 if (list_empty(&list)) {
6800 if (list_empty(&repoll)) {
6801 sd->in_net_rx_action = false;
6802 barrier();
6803 /* We need to check if ____napi_schedule()
6804 * had refilled poll_list while
6805 * sd->in_net_rx_action was true.
6806 */
6807 if (!list_empty(&sd->poll_list))
6808 goto start;
6809 if (!sd_has_rps_ipi_waiting(sd))
6810 goto end;
6811 }
6812 break;
6813 }
6814
6815 n = list_first_entry(&list, struct napi_struct, poll_list);
6816 budget -= napi_poll(n, &repoll);
6817
6818 /* If softirq window is exhausted then punt.
6819 * Allow this to run for 2 jiffies since which will allow
6820 * an average latency of 1.5/HZ.
6821 */
6822 if (unlikely(budget <= 0 ||
6823 time_after_eq(jiffies, time_limit))) {
6824 sd->time_squeeze++;
6825 break;
6826 }
6827 }
6828
6829 local_irq_disable();
6830
6831 list_splice_tail_init(&sd->poll_list, &list);
6832 list_splice_tail(&repoll, &list);
6833 list_splice(&list, &sd->poll_list);
6834 if (!list_empty(&sd->poll_list))
6835 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6836 else
6837 sd->in_net_rx_action = false;
6838
6839 net_rps_action_and_irq_enable(sd);
6840end:;
6841}
6842
6843struct netdev_adjacent {
6844 struct net_device *dev;
6845 netdevice_tracker dev_tracker;
6846
6847 /* upper master flag, there can only be one master device per list */
6848 bool master;
6849
6850 /* lookup ignore flag */
6851 bool ignore;
6852
6853 /* counter for the number of times this device was added to us */
6854 u16 ref_nr;
6855
6856 /* private field for the users */
6857 void *private;
6858
6859 struct list_head list;
6860 struct rcu_head rcu;
6861};
6862
6863static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6864 struct list_head *adj_list)
6865{
6866 struct netdev_adjacent *adj;
6867
6868 list_for_each_entry(adj, adj_list, list) {
6869 if (adj->dev == adj_dev)
6870 return adj;
6871 }
6872 return NULL;
6873}
6874
6875static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6876 struct netdev_nested_priv *priv)
6877{
6878 struct net_device *dev = (struct net_device *)priv->data;
6879
6880 return upper_dev == dev;
6881}
6882
6883/**
6884 * netdev_has_upper_dev - Check if device is linked to an upper device
6885 * @dev: device
6886 * @upper_dev: upper device to check
6887 *
6888 * Find out if a device is linked to specified upper device and return true
6889 * in case it is. Note that this checks only immediate upper device,
6890 * not through a complete stack of devices. The caller must hold the RTNL lock.
6891 */
6892bool netdev_has_upper_dev(struct net_device *dev,
6893 struct net_device *upper_dev)
6894{
6895 struct netdev_nested_priv priv = {
6896 .data = (void *)upper_dev,
6897 };
6898
6899 ASSERT_RTNL();
6900
6901 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6902 &priv);
6903}
6904EXPORT_SYMBOL(netdev_has_upper_dev);
6905
6906/**
6907 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6908 * @dev: device
6909 * @upper_dev: upper device to check
6910 *
6911 * Find out if a device is linked to specified upper device and return true
6912 * in case it is. Note that this checks the entire upper device chain.
6913 * The caller must hold rcu lock.
6914 */
6915
6916bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6917 struct net_device *upper_dev)
6918{
6919 struct netdev_nested_priv priv = {
6920 .data = (void *)upper_dev,
6921 };
6922
6923 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6924 &priv);
6925}
6926EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6927
6928/**
6929 * netdev_has_any_upper_dev - Check if device is linked to some device
6930 * @dev: device
6931 *
6932 * Find out if a device is linked to an upper device and return true in case
6933 * it is. The caller must hold the RTNL lock.
6934 */
6935bool netdev_has_any_upper_dev(struct net_device *dev)
6936{
6937 ASSERT_RTNL();
6938
6939 return !list_empty(&dev->adj_list.upper);
6940}
6941EXPORT_SYMBOL(netdev_has_any_upper_dev);
6942
6943/**
6944 * netdev_master_upper_dev_get - Get master upper device
6945 * @dev: device
6946 *
6947 * Find a master upper device and return pointer to it or NULL in case
6948 * it's not there. The caller must hold the RTNL lock.
6949 */
6950struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6951{
6952 struct netdev_adjacent *upper;
6953
6954 ASSERT_RTNL();
6955
6956 if (list_empty(&dev->adj_list.upper))
6957 return NULL;
6958
6959 upper = list_first_entry(&dev->adj_list.upper,
6960 struct netdev_adjacent, list);
6961 if (likely(upper->master))
6962 return upper->dev;
6963 return NULL;
6964}
6965EXPORT_SYMBOL(netdev_master_upper_dev_get);
6966
6967static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6968{
6969 struct netdev_adjacent *upper;
6970
6971 ASSERT_RTNL();
6972
6973 if (list_empty(&dev->adj_list.upper))
6974 return NULL;
6975
6976 upper = list_first_entry(&dev->adj_list.upper,
6977 struct netdev_adjacent, list);
6978 if (likely(upper->master) && !upper->ignore)
6979 return upper->dev;
6980 return NULL;
6981}
6982
6983/**
6984 * netdev_has_any_lower_dev - Check if device is linked to some device
6985 * @dev: device
6986 *
6987 * Find out if a device is linked to a lower device and return true in case
6988 * it is. The caller must hold the RTNL lock.
6989 */
6990static bool netdev_has_any_lower_dev(struct net_device *dev)
6991{
6992 ASSERT_RTNL();
6993
6994 return !list_empty(&dev->adj_list.lower);
6995}
6996
6997void *netdev_adjacent_get_private(struct list_head *adj_list)
6998{
6999 struct netdev_adjacent *adj;
7000
7001 adj = list_entry(adj_list, struct netdev_adjacent, list);
7002
7003 return adj->private;
7004}
7005EXPORT_SYMBOL(netdev_adjacent_get_private);
7006
7007/**
7008 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7009 * @dev: device
7010 * @iter: list_head ** of the current position
7011 *
7012 * Gets the next device from the dev's upper list, starting from iter
7013 * position. The caller must hold RCU read lock.
7014 */
7015struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7016 struct list_head **iter)
7017{
7018 struct netdev_adjacent *upper;
7019
7020 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7021
7022 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7023
7024 if (&upper->list == &dev->adj_list.upper)
7025 return NULL;
7026
7027 *iter = &upper->list;
7028
7029 return upper->dev;
7030}
7031EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7032
7033static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7034 struct list_head **iter,
7035 bool *ignore)
7036{
7037 struct netdev_adjacent *upper;
7038
7039 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7040
7041 if (&upper->list == &dev->adj_list.upper)
7042 return NULL;
7043
7044 *iter = &upper->list;
7045 *ignore = upper->ignore;
7046
7047 return upper->dev;
7048}
7049
7050static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7051 struct list_head **iter)
7052{
7053 struct netdev_adjacent *upper;
7054
7055 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7056
7057 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7058
7059 if (&upper->list == &dev->adj_list.upper)
7060 return NULL;
7061
7062 *iter = &upper->list;
7063
7064 return upper->dev;
7065}
7066
7067static int __netdev_walk_all_upper_dev(struct net_device *dev,
7068 int (*fn)(struct net_device *dev,
7069 struct netdev_nested_priv *priv),
7070 struct netdev_nested_priv *priv)
7071{
7072 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7073 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7074 int ret, cur = 0;
7075 bool ignore;
7076
7077 now = dev;
7078 iter = &dev->adj_list.upper;
7079
7080 while (1) {
7081 if (now != dev) {
7082 ret = fn(now, priv);
7083 if (ret)
7084 return ret;
7085 }
7086
7087 next = NULL;
7088 while (1) {
7089 udev = __netdev_next_upper_dev(now, &iter, &ignore);
7090 if (!udev)
7091 break;
7092 if (ignore)
7093 continue;
7094
7095 next = udev;
7096 niter = &udev->adj_list.upper;
7097 dev_stack[cur] = now;
7098 iter_stack[cur++] = iter;
7099 break;
7100 }
7101
7102 if (!next) {
7103 if (!cur)
7104 return 0;
7105 next = dev_stack[--cur];
7106 niter = iter_stack[cur];
7107 }
7108
7109 now = next;
7110 iter = niter;
7111 }
7112
7113 return 0;
7114}
7115
7116int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7117 int (*fn)(struct net_device *dev,
7118 struct netdev_nested_priv *priv),
7119 struct netdev_nested_priv *priv)
7120{
7121 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7122 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7123 int ret, cur = 0;
7124
7125 now = dev;
7126 iter = &dev->adj_list.upper;
7127
7128 while (1) {
7129 if (now != dev) {
7130 ret = fn(now, priv);
7131 if (ret)
7132 return ret;
7133 }
7134
7135 next = NULL;
7136 while (1) {
7137 udev = netdev_next_upper_dev_rcu(now, &iter);
7138 if (!udev)
7139 break;
7140
7141 next = udev;
7142 niter = &udev->adj_list.upper;
7143 dev_stack[cur] = now;
7144 iter_stack[cur++] = iter;
7145 break;
7146 }
7147
7148 if (!next) {
7149 if (!cur)
7150 return 0;
7151 next = dev_stack[--cur];
7152 niter = iter_stack[cur];
7153 }
7154
7155 now = next;
7156 iter = niter;
7157 }
7158
7159 return 0;
7160}
7161EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7162
7163static bool __netdev_has_upper_dev(struct net_device *dev,
7164 struct net_device *upper_dev)
7165{
7166 struct netdev_nested_priv priv = {
7167 .flags = 0,
7168 .data = (void *)upper_dev,
7169 };
7170
7171 ASSERT_RTNL();
7172
7173 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7174 &priv);
7175}
7176
7177/**
7178 * netdev_lower_get_next_private - Get the next ->private from the
7179 * lower neighbour list
7180 * @dev: device
7181 * @iter: list_head ** of the current position
7182 *
7183 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7184 * list, starting from iter position. The caller must hold either hold the
7185 * RTNL lock or its own locking that guarantees that the neighbour lower
7186 * list will remain unchanged.
7187 */
7188void *netdev_lower_get_next_private(struct net_device *dev,
7189 struct list_head **iter)
7190{
7191 struct netdev_adjacent *lower;
7192
7193 lower = list_entry(*iter, struct netdev_adjacent, list);
7194
7195 if (&lower->list == &dev->adj_list.lower)
7196 return NULL;
7197
7198 *iter = lower->list.next;
7199
7200 return lower->private;
7201}
7202EXPORT_SYMBOL(netdev_lower_get_next_private);
7203
7204/**
7205 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7206 * lower neighbour list, RCU
7207 * variant
7208 * @dev: device
7209 * @iter: list_head ** of the current position
7210 *
7211 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7212 * list, starting from iter position. The caller must hold RCU read lock.
7213 */
7214void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7215 struct list_head **iter)
7216{
7217 struct netdev_adjacent *lower;
7218
7219 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7220
7221 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7222
7223 if (&lower->list == &dev->adj_list.lower)
7224 return NULL;
7225
7226 *iter = &lower->list;
7227
7228 return lower->private;
7229}
7230EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7231
7232/**
7233 * netdev_lower_get_next - Get the next device from the lower neighbour
7234 * list
7235 * @dev: device
7236 * @iter: list_head ** of the current position
7237 *
7238 * Gets the next netdev_adjacent from the dev's lower neighbour
7239 * list, starting from iter position. The caller must hold RTNL lock or
7240 * its own locking that guarantees that the neighbour lower
7241 * list will remain unchanged.
7242 */
7243void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7244{
7245 struct netdev_adjacent *lower;
7246
7247 lower = list_entry(*iter, struct netdev_adjacent, list);
7248
7249 if (&lower->list == &dev->adj_list.lower)
7250 return NULL;
7251
7252 *iter = lower->list.next;
7253
7254 return lower->dev;
7255}
7256EXPORT_SYMBOL(netdev_lower_get_next);
7257
7258static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7259 struct list_head **iter)
7260{
7261 struct netdev_adjacent *lower;
7262
7263 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7264
7265 if (&lower->list == &dev->adj_list.lower)
7266 return NULL;
7267
7268 *iter = &lower->list;
7269
7270 return lower->dev;
7271}
7272
7273static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7274 struct list_head **iter,
7275 bool *ignore)
7276{
7277 struct netdev_adjacent *lower;
7278
7279 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7280
7281 if (&lower->list == &dev->adj_list.lower)
7282 return NULL;
7283
7284 *iter = &lower->list;
7285 *ignore = lower->ignore;
7286
7287 return lower->dev;
7288}
7289
7290int netdev_walk_all_lower_dev(struct net_device *dev,
7291 int (*fn)(struct net_device *dev,
7292 struct netdev_nested_priv *priv),
7293 struct netdev_nested_priv *priv)
7294{
7295 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7296 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7297 int ret, cur = 0;
7298
7299 now = dev;
7300 iter = &dev->adj_list.lower;
7301
7302 while (1) {
7303 if (now != dev) {
7304 ret = fn(now, priv);
7305 if (ret)
7306 return ret;
7307 }
7308
7309 next = NULL;
7310 while (1) {
7311 ldev = netdev_next_lower_dev(now, &iter);
7312 if (!ldev)
7313 break;
7314
7315 next = ldev;
7316 niter = &ldev->adj_list.lower;
7317 dev_stack[cur] = now;
7318 iter_stack[cur++] = iter;
7319 break;
7320 }
7321
7322 if (!next) {
7323 if (!cur)
7324 return 0;
7325 next = dev_stack[--cur];
7326 niter = iter_stack[cur];
7327 }
7328
7329 now = next;
7330 iter = niter;
7331 }
7332
7333 return 0;
7334}
7335EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7336
7337static int __netdev_walk_all_lower_dev(struct net_device *dev,
7338 int (*fn)(struct net_device *dev,
7339 struct netdev_nested_priv *priv),
7340 struct netdev_nested_priv *priv)
7341{
7342 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7343 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7344 int ret, cur = 0;
7345 bool ignore;
7346
7347 now = dev;
7348 iter = &dev->adj_list.lower;
7349
7350 while (1) {
7351 if (now != dev) {
7352 ret = fn(now, priv);
7353 if (ret)
7354 return ret;
7355 }
7356
7357 next = NULL;
7358 while (1) {
7359 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7360 if (!ldev)
7361 break;
7362 if (ignore)
7363 continue;
7364
7365 next = ldev;
7366 niter = &ldev->adj_list.lower;
7367 dev_stack[cur] = now;
7368 iter_stack[cur++] = iter;
7369 break;
7370 }
7371
7372 if (!next) {
7373 if (!cur)
7374 return 0;
7375 next = dev_stack[--cur];
7376 niter = iter_stack[cur];
7377 }
7378
7379 now = next;
7380 iter = niter;
7381 }
7382
7383 return 0;
7384}
7385
7386struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7387 struct list_head **iter)
7388{
7389 struct netdev_adjacent *lower;
7390
7391 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7392 if (&lower->list == &dev->adj_list.lower)
7393 return NULL;
7394
7395 *iter = &lower->list;
7396
7397 return lower->dev;
7398}
7399EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7400
7401static u8 __netdev_upper_depth(struct net_device *dev)
7402{
7403 struct net_device *udev;
7404 struct list_head *iter;
7405 u8 max_depth = 0;
7406 bool ignore;
7407
7408 for (iter = &dev->adj_list.upper,
7409 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7410 udev;
7411 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7412 if (ignore)
7413 continue;
7414 if (max_depth < udev->upper_level)
7415 max_depth = udev->upper_level;
7416 }
7417
7418 return max_depth;
7419}
7420
7421static u8 __netdev_lower_depth(struct net_device *dev)
7422{
7423 struct net_device *ldev;
7424 struct list_head *iter;
7425 u8 max_depth = 0;
7426 bool ignore;
7427
7428 for (iter = &dev->adj_list.lower,
7429 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7430 ldev;
7431 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7432 if (ignore)
7433 continue;
7434 if (max_depth < ldev->lower_level)
7435 max_depth = ldev->lower_level;
7436 }
7437
7438 return max_depth;
7439}
7440
7441static int __netdev_update_upper_level(struct net_device *dev,
7442 struct netdev_nested_priv *__unused)
7443{
7444 dev->upper_level = __netdev_upper_depth(dev) + 1;
7445 return 0;
7446}
7447
7448#ifdef CONFIG_LOCKDEP
7449static LIST_HEAD(net_unlink_list);
7450
7451static void net_unlink_todo(struct net_device *dev)
7452{
7453 if (list_empty(&dev->unlink_list))
7454 list_add_tail(&dev->unlink_list, &net_unlink_list);
7455}
7456#endif
7457
7458static int __netdev_update_lower_level(struct net_device *dev,
7459 struct netdev_nested_priv *priv)
7460{
7461 dev->lower_level = __netdev_lower_depth(dev) + 1;
7462
7463#ifdef CONFIG_LOCKDEP
7464 if (!priv)
7465 return 0;
7466
7467 if (priv->flags & NESTED_SYNC_IMM)
7468 dev->nested_level = dev->lower_level - 1;
7469 if (priv->flags & NESTED_SYNC_TODO)
7470 net_unlink_todo(dev);
7471#endif
7472 return 0;
7473}
7474
7475int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7476 int (*fn)(struct net_device *dev,
7477 struct netdev_nested_priv *priv),
7478 struct netdev_nested_priv *priv)
7479{
7480 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7481 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7482 int ret, cur = 0;
7483
7484 now = dev;
7485 iter = &dev->adj_list.lower;
7486
7487 while (1) {
7488 if (now != dev) {
7489 ret = fn(now, priv);
7490 if (ret)
7491 return ret;
7492 }
7493
7494 next = NULL;
7495 while (1) {
7496 ldev = netdev_next_lower_dev_rcu(now, &iter);
7497 if (!ldev)
7498 break;
7499
7500 next = ldev;
7501 niter = &ldev->adj_list.lower;
7502 dev_stack[cur] = now;
7503 iter_stack[cur++] = iter;
7504 break;
7505 }
7506
7507 if (!next) {
7508 if (!cur)
7509 return 0;
7510 next = dev_stack[--cur];
7511 niter = iter_stack[cur];
7512 }
7513
7514 now = next;
7515 iter = niter;
7516 }
7517
7518 return 0;
7519}
7520EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7521
7522/**
7523 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7524 * lower neighbour list, RCU
7525 * variant
7526 * @dev: device
7527 *
7528 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7529 * list. The caller must hold RCU read lock.
7530 */
7531void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7532{
7533 struct netdev_adjacent *lower;
7534
7535 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7536 struct netdev_adjacent, list);
7537 if (lower)
7538 return lower->private;
7539 return NULL;
7540}
7541EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7542
7543/**
7544 * netdev_master_upper_dev_get_rcu - Get master upper device
7545 * @dev: device
7546 *
7547 * Find a master upper device and return pointer to it or NULL in case
7548 * it's not there. The caller must hold the RCU read lock.
7549 */
7550struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7551{
7552 struct netdev_adjacent *upper;
7553
7554 upper = list_first_or_null_rcu(&dev->adj_list.upper,
7555 struct netdev_adjacent, list);
7556 if (upper && likely(upper->master))
7557 return upper->dev;
7558 return NULL;
7559}
7560EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7561
7562static int netdev_adjacent_sysfs_add(struct net_device *dev,
7563 struct net_device *adj_dev,
7564 struct list_head *dev_list)
7565{
7566 char linkname[IFNAMSIZ+7];
7567
7568 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7569 "upper_%s" : "lower_%s", adj_dev->name);
7570 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7571 linkname);
7572}
7573static void netdev_adjacent_sysfs_del(struct net_device *dev,
7574 char *name,
7575 struct list_head *dev_list)
7576{
7577 char linkname[IFNAMSIZ+7];
7578
7579 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7580 "upper_%s" : "lower_%s", name);
7581 sysfs_remove_link(&(dev->dev.kobj), linkname);
7582}
7583
7584static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7585 struct net_device *adj_dev,
7586 struct list_head *dev_list)
7587{
7588 return (dev_list == &dev->adj_list.upper ||
7589 dev_list == &dev->adj_list.lower) &&
7590 net_eq(dev_net(dev), dev_net(adj_dev));
7591}
7592
7593static int __netdev_adjacent_dev_insert(struct net_device *dev,
7594 struct net_device *adj_dev,
7595 struct list_head *dev_list,
7596 void *private, bool master)
7597{
7598 struct netdev_adjacent *adj;
7599 int ret;
7600
7601 adj = __netdev_find_adj(adj_dev, dev_list);
7602
7603 if (adj) {
7604 adj->ref_nr += 1;
7605 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7606 dev->name, adj_dev->name, adj->ref_nr);
7607
7608 return 0;
7609 }
7610
7611 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7612 if (!adj)
7613 return -ENOMEM;
7614
7615 adj->dev = adj_dev;
7616 adj->master = master;
7617 adj->ref_nr = 1;
7618 adj->private = private;
7619 adj->ignore = false;
7620 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7621
7622 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7623 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7624
7625 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7626 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7627 if (ret)
7628 goto free_adj;
7629 }
7630
7631 /* Ensure that master link is always the first item in list. */
7632 if (master) {
7633 ret = sysfs_create_link(&(dev->dev.kobj),
7634 &(adj_dev->dev.kobj), "master");
7635 if (ret)
7636 goto remove_symlinks;
7637
7638 list_add_rcu(&adj->list, dev_list);
7639 } else {
7640 list_add_tail_rcu(&adj->list, dev_list);
7641 }
7642
7643 return 0;
7644
7645remove_symlinks:
7646 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7647 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7648free_adj:
7649 netdev_put(adj_dev, &adj->dev_tracker);
7650 kfree(adj);
7651
7652 return ret;
7653}
7654
7655static void __netdev_adjacent_dev_remove(struct net_device *dev,
7656 struct net_device *adj_dev,
7657 u16 ref_nr,
7658 struct list_head *dev_list)
7659{
7660 struct netdev_adjacent *adj;
7661
7662 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7663 dev->name, adj_dev->name, ref_nr);
7664
7665 adj = __netdev_find_adj(adj_dev, dev_list);
7666
7667 if (!adj) {
7668 pr_err("Adjacency does not exist for device %s from %s\n",
7669 dev->name, adj_dev->name);
7670 WARN_ON(1);
7671 return;
7672 }
7673
7674 if (adj->ref_nr > ref_nr) {
7675 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7676 dev->name, adj_dev->name, ref_nr,
7677 adj->ref_nr - ref_nr);
7678 adj->ref_nr -= ref_nr;
7679 return;
7680 }
7681
7682 if (adj->master)
7683 sysfs_remove_link(&(dev->dev.kobj), "master");
7684
7685 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7686 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7687
7688 list_del_rcu(&adj->list);
7689 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7690 adj_dev->name, dev->name, adj_dev->name);
7691 netdev_put(adj_dev, &adj->dev_tracker);
7692 kfree_rcu(adj, rcu);
7693}
7694
7695static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7696 struct net_device *upper_dev,
7697 struct list_head *up_list,
7698 struct list_head *down_list,
7699 void *private, bool master)
7700{
7701 int ret;
7702
7703 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7704 private, master);
7705 if (ret)
7706 return ret;
7707
7708 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7709 private, false);
7710 if (ret) {
7711 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7712 return ret;
7713 }
7714
7715 return 0;
7716}
7717
7718static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7719 struct net_device *upper_dev,
7720 u16 ref_nr,
7721 struct list_head *up_list,
7722 struct list_head *down_list)
7723{
7724 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7725 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7726}
7727
7728static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7729 struct net_device *upper_dev,
7730 void *private, bool master)
7731{
7732 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7733 &dev->adj_list.upper,
7734 &upper_dev->adj_list.lower,
7735 private, master);
7736}
7737
7738static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7739 struct net_device *upper_dev)
7740{
7741 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7742 &dev->adj_list.upper,
7743 &upper_dev->adj_list.lower);
7744}
7745
7746static int __netdev_upper_dev_link(struct net_device *dev,
7747 struct net_device *upper_dev, bool master,
7748 void *upper_priv, void *upper_info,
7749 struct netdev_nested_priv *priv,
7750 struct netlink_ext_ack *extack)
7751{
7752 struct netdev_notifier_changeupper_info changeupper_info = {
7753 .info = {
7754 .dev = dev,
7755 .extack = extack,
7756 },
7757 .upper_dev = upper_dev,
7758 .master = master,
7759 .linking = true,
7760 .upper_info = upper_info,
7761 };
7762 struct net_device *master_dev;
7763 int ret = 0;
7764
7765 ASSERT_RTNL();
7766
7767 if (dev == upper_dev)
7768 return -EBUSY;
7769
7770 /* To prevent loops, check if dev is not upper device to upper_dev. */
7771 if (__netdev_has_upper_dev(upper_dev, dev))
7772 return -EBUSY;
7773
7774 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7775 return -EMLINK;
7776
7777 if (!master) {
7778 if (__netdev_has_upper_dev(dev, upper_dev))
7779 return -EEXIST;
7780 } else {
7781 master_dev = __netdev_master_upper_dev_get(dev);
7782 if (master_dev)
7783 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7784 }
7785
7786 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7787 &changeupper_info.info);
7788 ret = notifier_to_errno(ret);
7789 if (ret)
7790 return ret;
7791
7792 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7793 master);
7794 if (ret)
7795 return ret;
7796
7797 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7798 &changeupper_info.info);
7799 ret = notifier_to_errno(ret);
7800 if (ret)
7801 goto rollback;
7802
7803 __netdev_update_upper_level(dev, NULL);
7804 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7805
7806 __netdev_update_lower_level(upper_dev, priv);
7807 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7808 priv);
7809
7810 return 0;
7811
7812rollback:
7813 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7814
7815 return ret;
7816}
7817
7818/**
7819 * netdev_upper_dev_link - Add a link to the upper device
7820 * @dev: device
7821 * @upper_dev: new upper device
7822 * @extack: netlink extended ack
7823 *
7824 * Adds a link to device which is upper to this one. The caller must hold
7825 * the RTNL lock. On a failure a negative errno code is returned.
7826 * On success the reference counts are adjusted and the function
7827 * returns zero.
7828 */
7829int netdev_upper_dev_link(struct net_device *dev,
7830 struct net_device *upper_dev,
7831 struct netlink_ext_ack *extack)
7832{
7833 struct netdev_nested_priv priv = {
7834 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7835 .data = NULL,
7836 };
7837
7838 return __netdev_upper_dev_link(dev, upper_dev, false,
7839 NULL, NULL, &priv, extack);
7840}
7841EXPORT_SYMBOL(netdev_upper_dev_link);
7842
7843/**
7844 * netdev_master_upper_dev_link - Add a master link to the upper device
7845 * @dev: device
7846 * @upper_dev: new upper device
7847 * @upper_priv: upper device private
7848 * @upper_info: upper info to be passed down via notifier
7849 * @extack: netlink extended ack
7850 *
7851 * Adds a link to device which is upper to this one. In this case, only
7852 * one master upper device can be linked, although other non-master devices
7853 * might be linked as well. The caller must hold the RTNL lock.
7854 * On a failure a negative errno code is returned. On success the reference
7855 * counts are adjusted and the function returns zero.
7856 */
7857int netdev_master_upper_dev_link(struct net_device *dev,
7858 struct net_device *upper_dev,
7859 void *upper_priv, void *upper_info,
7860 struct netlink_ext_ack *extack)
7861{
7862 struct netdev_nested_priv priv = {
7863 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7864 .data = NULL,
7865 };
7866
7867 return __netdev_upper_dev_link(dev, upper_dev, true,
7868 upper_priv, upper_info, &priv, extack);
7869}
7870EXPORT_SYMBOL(netdev_master_upper_dev_link);
7871
7872static void __netdev_upper_dev_unlink(struct net_device *dev,
7873 struct net_device *upper_dev,
7874 struct netdev_nested_priv *priv)
7875{
7876 struct netdev_notifier_changeupper_info changeupper_info = {
7877 .info = {
7878 .dev = dev,
7879 },
7880 .upper_dev = upper_dev,
7881 .linking = false,
7882 };
7883
7884 ASSERT_RTNL();
7885
7886 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7887
7888 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7889 &changeupper_info.info);
7890
7891 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7892
7893 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7894 &changeupper_info.info);
7895
7896 __netdev_update_upper_level(dev, NULL);
7897 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7898
7899 __netdev_update_lower_level(upper_dev, priv);
7900 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7901 priv);
7902}
7903
7904/**
7905 * netdev_upper_dev_unlink - Removes a link to upper device
7906 * @dev: device
7907 * @upper_dev: new upper device
7908 *
7909 * Removes a link to device which is upper to this one. The caller must hold
7910 * the RTNL lock.
7911 */
7912void netdev_upper_dev_unlink(struct net_device *dev,
7913 struct net_device *upper_dev)
7914{
7915 struct netdev_nested_priv priv = {
7916 .flags = NESTED_SYNC_TODO,
7917 .data = NULL,
7918 };
7919
7920 __netdev_upper_dev_unlink(dev, upper_dev, &priv);
7921}
7922EXPORT_SYMBOL(netdev_upper_dev_unlink);
7923
7924static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7925 struct net_device *lower_dev,
7926 bool val)
7927{
7928 struct netdev_adjacent *adj;
7929
7930 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7931 if (adj)
7932 adj->ignore = val;
7933
7934 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7935 if (adj)
7936 adj->ignore = val;
7937}
7938
7939static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7940 struct net_device *lower_dev)
7941{
7942 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7943}
7944
7945static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7946 struct net_device *lower_dev)
7947{
7948 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7949}
7950
7951int netdev_adjacent_change_prepare(struct net_device *old_dev,
7952 struct net_device *new_dev,
7953 struct net_device *dev,
7954 struct netlink_ext_ack *extack)
7955{
7956 struct netdev_nested_priv priv = {
7957 .flags = 0,
7958 .data = NULL,
7959 };
7960 int err;
7961
7962 if (!new_dev)
7963 return 0;
7964
7965 if (old_dev && new_dev != old_dev)
7966 netdev_adjacent_dev_disable(dev, old_dev);
7967 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7968 extack);
7969 if (err) {
7970 if (old_dev && new_dev != old_dev)
7971 netdev_adjacent_dev_enable(dev, old_dev);
7972 return err;
7973 }
7974
7975 return 0;
7976}
7977EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7978
7979void netdev_adjacent_change_commit(struct net_device *old_dev,
7980 struct net_device *new_dev,
7981 struct net_device *dev)
7982{
7983 struct netdev_nested_priv priv = {
7984 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7985 .data = NULL,
7986 };
7987
7988 if (!new_dev || !old_dev)
7989 return;
7990
7991 if (new_dev == old_dev)
7992 return;
7993
7994 netdev_adjacent_dev_enable(dev, old_dev);
7995 __netdev_upper_dev_unlink(old_dev, dev, &priv);
7996}
7997EXPORT_SYMBOL(netdev_adjacent_change_commit);
7998
7999void netdev_adjacent_change_abort(struct net_device *old_dev,
8000 struct net_device *new_dev,
8001 struct net_device *dev)
8002{
8003 struct netdev_nested_priv priv = {
8004 .flags = 0,
8005 .data = NULL,
8006 };
8007
8008 if (!new_dev)
8009 return;
8010
8011 if (old_dev && new_dev != old_dev)
8012 netdev_adjacent_dev_enable(dev, old_dev);
8013
8014 __netdev_upper_dev_unlink(new_dev, dev, &priv);
8015}
8016EXPORT_SYMBOL(netdev_adjacent_change_abort);
8017
8018/**
8019 * netdev_bonding_info_change - Dispatch event about slave change
8020 * @dev: device
8021 * @bonding_info: info to dispatch
8022 *
8023 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8024 * The caller must hold the RTNL lock.
8025 */
8026void netdev_bonding_info_change(struct net_device *dev,
8027 struct netdev_bonding_info *bonding_info)
8028{
8029 struct netdev_notifier_bonding_info info = {
8030 .info.dev = dev,
8031 };
8032
8033 memcpy(&info.bonding_info, bonding_info,
8034 sizeof(struct netdev_bonding_info));
8035 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
8036 &info.info);
8037}
8038EXPORT_SYMBOL(netdev_bonding_info_change);
8039
8040static int netdev_offload_xstats_enable_l3(struct net_device *dev,
8041 struct netlink_ext_ack *extack)
8042{
8043 struct netdev_notifier_offload_xstats_info info = {
8044 .info.dev = dev,
8045 .info.extack = extack,
8046 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8047 };
8048 int err;
8049 int rc;
8050
8051 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
8052 GFP_KERNEL);
8053 if (!dev->offload_xstats_l3)
8054 return -ENOMEM;
8055
8056 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
8057 NETDEV_OFFLOAD_XSTATS_DISABLE,
8058 &info.info);
8059 err = notifier_to_errno(rc);
8060 if (err)
8061 goto free_stats;
8062
8063 return 0;
8064
8065free_stats:
8066 kfree(dev->offload_xstats_l3);
8067 dev->offload_xstats_l3 = NULL;
8068 return err;
8069}
8070
8071int netdev_offload_xstats_enable(struct net_device *dev,
8072 enum netdev_offload_xstats_type type,
8073 struct netlink_ext_ack *extack)
8074{
8075 ASSERT_RTNL();
8076
8077 if (netdev_offload_xstats_enabled(dev, type))
8078 return -EALREADY;
8079
8080 switch (type) {
8081 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8082 return netdev_offload_xstats_enable_l3(dev, extack);
8083 }
8084
8085 WARN_ON(1);
8086 return -EINVAL;
8087}
8088EXPORT_SYMBOL(netdev_offload_xstats_enable);
8089
8090static void netdev_offload_xstats_disable_l3(struct net_device *dev)
8091{
8092 struct netdev_notifier_offload_xstats_info info = {
8093 .info.dev = dev,
8094 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8095 };
8096
8097 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
8098 &info.info);
8099 kfree(dev->offload_xstats_l3);
8100 dev->offload_xstats_l3 = NULL;
8101}
8102
8103int netdev_offload_xstats_disable(struct net_device *dev,
8104 enum netdev_offload_xstats_type type)
8105{
8106 ASSERT_RTNL();
8107
8108 if (!netdev_offload_xstats_enabled(dev, type))
8109 return -EALREADY;
8110
8111 switch (type) {
8112 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8113 netdev_offload_xstats_disable_l3(dev);
8114 return 0;
8115 }
8116
8117 WARN_ON(1);
8118 return -EINVAL;
8119}
8120EXPORT_SYMBOL(netdev_offload_xstats_disable);
8121
8122static void netdev_offload_xstats_disable_all(struct net_device *dev)
8123{
8124 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
8125}
8126
8127static struct rtnl_hw_stats64 *
8128netdev_offload_xstats_get_ptr(const struct net_device *dev,
8129 enum netdev_offload_xstats_type type)
8130{
8131 switch (type) {
8132 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8133 return dev->offload_xstats_l3;
8134 }
8135
8136 WARN_ON(1);
8137 return NULL;
8138}
8139
8140bool netdev_offload_xstats_enabled(const struct net_device *dev,
8141 enum netdev_offload_xstats_type type)
8142{
8143 ASSERT_RTNL();
8144
8145 return netdev_offload_xstats_get_ptr(dev, type);
8146}
8147EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8148
8149struct netdev_notifier_offload_xstats_ru {
8150 bool used;
8151};
8152
8153struct netdev_notifier_offload_xstats_rd {
8154 struct rtnl_hw_stats64 stats;
8155 bool used;
8156};
8157
8158static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8159 const struct rtnl_hw_stats64 *src)
8160{
8161 dest->rx_packets += src->rx_packets;
8162 dest->tx_packets += src->tx_packets;
8163 dest->rx_bytes += src->rx_bytes;
8164 dest->tx_bytes += src->tx_bytes;
8165 dest->rx_errors += src->rx_errors;
8166 dest->tx_errors += src->tx_errors;
8167 dest->rx_dropped += src->rx_dropped;
8168 dest->tx_dropped += src->tx_dropped;
8169 dest->multicast += src->multicast;
8170}
8171
8172static int netdev_offload_xstats_get_used(struct net_device *dev,
8173 enum netdev_offload_xstats_type type,
8174 bool *p_used,
8175 struct netlink_ext_ack *extack)
8176{
8177 struct netdev_notifier_offload_xstats_ru report_used = {};
8178 struct netdev_notifier_offload_xstats_info info = {
8179 .info.dev = dev,
8180 .info.extack = extack,
8181 .type = type,
8182 .report_used = &report_used,
8183 };
8184 int rc;
8185
8186 WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8187 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8188 &info.info);
8189 *p_used = report_used.used;
8190 return notifier_to_errno(rc);
8191}
8192
8193static int netdev_offload_xstats_get_stats(struct net_device *dev,
8194 enum netdev_offload_xstats_type type,
8195 struct rtnl_hw_stats64 *p_stats,
8196 bool *p_used,
8197 struct netlink_ext_ack *extack)
8198{
8199 struct netdev_notifier_offload_xstats_rd report_delta = {};
8200 struct netdev_notifier_offload_xstats_info info = {
8201 .info.dev = dev,
8202 .info.extack = extack,
8203 .type = type,
8204 .report_delta = &report_delta,
8205 };
8206 struct rtnl_hw_stats64 *stats;
8207 int rc;
8208
8209 stats = netdev_offload_xstats_get_ptr(dev, type);
8210 if (WARN_ON(!stats))
8211 return -EINVAL;
8212
8213 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8214 &info.info);
8215
8216 /* Cache whatever we got, even if there was an error, otherwise the
8217 * successful stats retrievals would get lost.
8218 */
8219 netdev_hw_stats64_add(stats, &report_delta.stats);
8220
8221 if (p_stats)
8222 *p_stats = *stats;
8223 *p_used = report_delta.used;
8224
8225 return notifier_to_errno(rc);
8226}
8227
8228int netdev_offload_xstats_get(struct net_device *dev,
8229 enum netdev_offload_xstats_type type,
8230 struct rtnl_hw_stats64 *p_stats, bool *p_used,
8231 struct netlink_ext_ack *extack)
8232{
8233 ASSERT_RTNL();
8234
8235 if (p_stats)
8236 return netdev_offload_xstats_get_stats(dev, type, p_stats,
8237 p_used, extack);
8238 else
8239 return netdev_offload_xstats_get_used(dev, type, p_used,
8240 extack);
8241}
8242EXPORT_SYMBOL(netdev_offload_xstats_get);
8243
8244void
8245netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8246 const struct rtnl_hw_stats64 *stats)
8247{
8248 report_delta->used = true;
8249 netdev_hw_stats64_add(&report_delta->stats, stats);
8250}
8251EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8252
8253void
8254netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8255{
8256 report_used->used = true;
8257}
8258EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8259
8260void netdev_offload_xstats_push_delta(struct net_device *dev,
8261 enum netdev_offload_xstats_type type,
8262 const struct rtnl_hw_stats64 *p_stats)
8263{
8264 struct rtnl_hw_stats64 *stats;
8265
8266 ASSERT_RTNL();
8267
8268 stats = netdev_offload_xstats_get_ptr(dev, type);
8269 if (WARN_ON(!stats))
8270 return;
8271
8272 netdev_hw_stats64_add(stats, p_stats);
8273}
8274EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8275
8276/**
8277 * netdev_get_xmit_slave - Get the xmit slave of master device
8278 * @dev: device
8279 * @skb: The packet
8280 * @all_slaves: assume all the slaves are active
8281 *
8282 * The reference counters are not incremented so the caller must be
8283 * careful with locks. The caller must hold RCU lock.
8284 * %NULL is returned if no slave is found.
8285 */
8286
8287struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8288 struct sk_buff *skb,
8289 bool all_slaves)
8290{
8291 const struct net_device_ops *ops = dev->netdev_ops;
8292
8293 if (!ops->ndo_get_xmit_slave)
8294 return NULL;
8295 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8296}
8297EXPORT_SYMBOL(netdev_get_xmit_slave);
8298
8299static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8300 struct sock *sk)
8301{
8302 const struct net_device_ops *ops = dev->netdev_ops;
8303
8304 if (!ops->ndo_sk_get_lower_dev)
8305 return NULL;
8306 return ops->ndo_sk_get_lower_dev(dev, sk);
8307}
8308
8309/**
8310 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8311 * @dev: device
8312 * @sk: the socket
8313 *
8314 * %NULL is returned if no lower device is found.
8315 */
8316
8317struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8318 struct sock *sk)
8319{
8320 struct net_device *lower;
8321
8322 lower = netdev_sk_get_lower_dev(dev, sk);
8323 while (lower) {
8324 dev = lower;
8325 lower = netdev_sk_get_lower_dev(dev, sk);
8326 }
8327
8328 return dev;
8329}
8330EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8331
8332static void netdev_adjacent_add_links(struct net_device *dev)
8333{
8334 struct netdev_adjacent *iter;
8335
8336 struct net *net = dev_net(dev);
8337
8338 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8339 if (!net_eq(net, dev_net(iter->dev)))
8340 continue;
8341 netdev_adjacent_sysfs_add(iter->dev, dev,
8342 &iter->dev->adj_list.lower);
8343 netdev_adjacent_sysfs_add(dev, iter->dev,
8344 &dev->adj_list.upper);
8345 }
8346
8347 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8348 if (!net_eq(net, dev_net(iter->dev)))
8349 continue;
8350 netdev_adjacent_sysfs_add(iter->dev, dev,
8351 &iter->dev->adj_list.upper);
8352 netdev_adjacent_sysfs_add(dev, iter->dev,
8353 &dev->adj_list.lower);
8354 }
8355}
8356
8357static void netdev_adjacent_del_links(struct net_device *dev)
8358{
8359 struct netdev_adjacent *iter;
8360
8361 struct net *net = dev_net(dev);
8362
8363 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8364 if (!net_eq(net, dev_net(iter->dev)))
8365 continue;
8366 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8367 &iter->dev->adj_list.lower);
8368 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8369 &dev->adj_list.upper);
8370 }
8371
8372 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8373 if (!net_eq(net, dev_net(iter->dev)))
8374 continue;
8375 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8376 &iter->dev->adj_list.upper);
8377 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8378 &dev->adj_list.lower);
8379 }
8380}
8381
8382void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8383{
8384 struct netdev_adjacent *iter;
8385
8386 struct net *net = dev_net(dev);
8387
8388 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8389 if (!net_eq(net, dev_net(iter->dev)))
8390 continue;
8391 netdev_adjacent_sysfs_del(iter->dev, oldname,
8392 &iter->dev->adj_list.lower);
8393 netdev_adjacent_sysfs_add(iter->dev, dev,
8394 &iter->dev->adj_list.lower);
8395 }
8396
8397 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8398 if (!net_eq(net, dev_net(iter->dev)))
8399 continue;
8400 netdev_adjacent_sysfs_del(iter->dev, oldname,
8401 &iter->dev->adj_list.upper);
8402 netdev_adjacent_sysfs_add(iter->dev, dev,
8403 &iter->dev->adj_list.upper);
8404 }
8405}
8406
8407void *netdev_lower_dev_get_private(struct net_device *dev,
8408 struct net_device *lower_dev)
8409{
8410 struct netdev_adjacent *lower;
8411
8412 if (!lower_dev)
8413 return NULL;
8414 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8415 if (!lower)
8416 return NULL;
8417
8418 return lower->private;
8419}
8420EXPORT_SYMBOL(netdev_lower_dev_get_private);
8421
8422
8423/**
8424 * netdev_lower_state_changed - Dispatch event about lower device state change
8425 * @lower_dev: device
8426 * @lower_state_info: state to dispatch
8427 *
8428 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8429 * The caller must hold the RTNL lock.
8430 */
8431void netdev_lower_state_changed(struct net_device *lower_dev,
8432 void *lower_state_info)
8433{
8434 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8435 .info.dev = lower_dev,
8436 };
8437
8438 ASSERT_RTNL();
8439 changelowerstate_info.lower_state_info = lower_state_info;
8440 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8441 &changelowerstate_info.info);
8442}
8443EXPORT_SYMBOL(netdev_lower_state_changed);
8444
8445static void dev_change_rx_flags(struct net_device *dev, int flags)
8446{
8447 const struct net_device_ops *ops = dev->netdev_ops;
8448
8449 if (ops->ndo_change_rx_flags)
8450 ops->ndo_change_rx_flags(dev, flags);
8451}
8452
8453static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8454{
8455 unsigned int old_flags = dev->flags;
8456 kuid_t uid;
8457 kgid_t gid;
8458
8459 ASSERT_RTNL();
8460
8461 dev->flags |= IFF_PROMISC;
8462 dev->promiscuity += inc;
8463 if (dev->promiscuity == 0) {
8464 /*
8465 * Avoid overflow.
8466 * If inc causes overflow, untouch promisc and return error.
8467 */
8468 if (inc < 0)
8469 dev->flags &= ~IFF_PROMISC;
8470 else {
8471 dev->promiscuity -= inc;
8472 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8473 return -EOVERFLOW;
8474 }
8475 }
8476 if (dev->flags != old_flags) {
8477 netdev_info(dev, "%s promiscuous mode\n",
8478 dev->flags & IFF_PROMISC ? "entered" : "left");
8479 if (audit_enabled) {
8480 current_uid_gid(&uid, &gid);
8481 audit_log(audit_context(), GFP_ATOMIC,
8482 AUDIT_ANOM_PROMISCUOUS,
8483 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8484 dev->name, (dev->flags & IFF_PROMISC),
8485 (old_flags & IFF_PROMISC),
8486 from_kuid(&init_user_ns, audit_get_loginuid(current)),
8487 from_kuid(&init_user_ns, uid),
8488 from_kgid(&init_user_ns, gid),
8489 audit_get_sessionid(current));
8490 }
8491
8492 dev_change_rx_flags(dev, IFF_PROMISC);
8493 }
8494 if (notify)
8495 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
8496 return 0;
8497}
8498
8499/**
8500 * dev_set_promiscuity - update promiscuity count on a device
8501 * @dev: device
8502 * @inc: modifier
8503 *
8504 * Add or remove promiscuity from a device. While the count in the device
8505 * remains above zero the interface remains promiscuous. Once it hits zero
8506 * the device reverts back to normal filtering operation. A negative inc
8507 * value is used to drop promiscuity on the device.
8508 * Return 0 if successful or a negative errno code on error.
8509 */
8510int dev_set_promiscuity(struct net_device *dev, int inc)
8511{
8512 unsigned int old_flags = dev->flags;
8513 int err;
8514
8515 err = __dev_set_promiscuity(dev, inc, true);
8516 if (err < 0)
8517 return err;
8518 if (dev->flags != old_flags)
8519 dev_set_rx_mode(dev);
8520 return err;
8521}
8522EXPORT_SYMBOL(dev_set_promiscuity);
8523
8524static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8525{
8526 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8527
8528 ASSERT_RTNL();
8529
8530 dev->flags |= IFF_ALLMULTI;
8531 dev->allmulti += inc;
8532 if (dev->allmulti == 0) {
8533 /*
8534 * Avoid overflow.
8535 * If inc causes overflow, untouch allmulti and return error.
8536 */
8537 if (inc < 0)
8538 dev->flags &= ~IFF_ALLMULTI;
8539 else {
8540 dev->allmulti -= inc;
8541 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8542 return -EOVERFLOW;
8543 }
8544 }
8545 if (dev->flags ^ old_flags) {
8546 netdev_info(dev, "%s allmulticast mode\n",
8547 dev->flags & IFF_ALLMULTI ? "entered" : "left");
8548 dev_change_rx_flags(dev, IFF_ALLMULTI);
8549 dev_set_rx_mode(dev);
8550 if (notify)
8551 __dev_notify_flags(dev, old_flags,
8552 dev->gflags ^ old_gflags, 0, NULL);
8553 }
8554 return 0;
8555}
8556
8557/**
8558 * dev_set_allmulti - update allmulti count on a device
8559 * @dev: device
8560 * @inc: modifier
8561 *
8562 * Add or remove reception of all multicast frames to a device. While the
8563 * count in the device remains above zero the interface remains listening
8564 * to all interfaces. Once it hits zero the device reverts back to normal
8565 * filtering operation. A negative @inc value is used to drop the counter
8566 * when releasing a resource needing all multicasts.
8567 * Return 0 if successful or a negative errno code on error.
8568 */
8569
8570int dev_set_allmulti(struct net_device *dev, int inc)
8571{
8572 return __dev_set_allmulti(dev, inc, true);
8573}
8574EXPORT_SYMBOL(dev_set_allmulti);
8575
8576/*
8577 * Upload unicast and multicast address lists to device and
8578 * configure RX filtering. When the device doesn't support unicast
8579 * filtering it is put in promiscuous mode while unicast addresses
8580 * are present.
8581 */
8582void __dev_set_rx_mode(struct net_device *dev)
8583{
8584 const struct net_device_ops *ops = dev->netdev_ops;
8585
8586 /* dev_open will call this function so the list will stay sane. */
8587 if (!(dev->flags&IFF_UP))
8588 return;
8589
8590 if (!netif_device_present(dev))
8591 return;
8592
8593 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8594 /* Unicast addresses changes may only happen under the rtnl,
8595 * therefore calling __dev_set_promiscuity here is safe.
8596 */
8597 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8598 __dev_set_promiscuity(dev, 1, false);
8599 dev->uc_promisc = true;
8600 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8601 __dev_set_promiscuity(dev, -1, false);
8602 dev->uc_promisc = false;
8603 }
8604 }
8605
8606 if (ops->ndo_set_rx_mode)
8607 ops->ndo_set_rx_mode(dev);
8608}
8609
8610void dev_set_rx_mode(struct net_device *dev)
8611{
8612 netif_addr_lock_bh(dev);
8613 __dev_set_rx_mode(dev);
8614 netif_addr_unlock_bh(dev);
8615}
8616
8617/**
8618 * dev_get_flags - get flags reported to userspace
8619 * @dev: device
8620 *
8621 * Get the combination of flag bits exported through APIs to userspace.
8622 */
8623unsigned int dev_get_flags(const struct net_device *dev)
8624{
8625 unsigned int flags;
8626
8627 flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
8628 IFF_ALLMULTI |
8629 IFF_RUNNING |
8630 IFF_LOWER_UP |
8631 IFF_DORMANT)) |
8632 (READ_ONCE(dev->gflags) & (IFF_PROMISC |
8633 IFF_ALLMULTI));
8634
8635 if (netif_running(dev)) {
8636 if (netif_oper_up(dev))
8637 flags |= IFF_RUNNING;
8638 if (netif_carrier_ok(dev))
8639 flags |= IFF_LOWER_UP;
8640 if (netif_dormant(dev))
8641 flags |= IFF_DORMANT;
8642 }
8643
8644 return flags;
8645}
8646EXPORT_SYMBOL(dev_get_flags);
8647
8648int __dev_change_flags(struct net_device *dev, unsigned int flags,
8649 struct netlink_ext_ack *extack)
8650{
8651 unsigned int old_flags = dev->flags;
8652 int ret;
8653
8654 ASSERT_RTNL();
8655
8656 /*
8657 * Set the flags on our device.
8658 */
8659
8660 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8661 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8662 IFF_AUTOMEDIA)) |
8663 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8664 IFF_ALLMULTI));
8665
8666 /*
8667 * Load in the correct multicast list now the flags have changed.
8668 */
8669
8670 if ((old_flags ^ flags) & IFF_MULTICAST)
8671 dev_change_rx_flags(dev, IFF_MULTICAST);
8672
8673 dev_set_rx_mode(dev);
8674
8675 /*
8676 * Have we downed the interface. We handle IFF_UP ourselves
8677 * according to user attempts to set it, rather than blindly
8678 * setting it.
8679 */
8680
8681 ret = 0;
8682 if ((old_flags ^ flags) & IFF_UP) {
8683 if (old_flags & IFF_UP)
8684 __dev_close(dev);
8685 else
8686 ret = __dev_open(dev, extack);
8687 }
8688
8689 if ((flags ^ dev->gflags) & IFF_PROMISC) {
8690 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8691 unsigned int old_flags = dev->flags;
8692
8693 dev->gflags ^= IFF_PROMISC;
8694
8695 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8696 if (dev->flags != old_flags)
8697 dev_set_rx_mode(dev);
8698 }
8699
8700 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8701 * is important. Some (broken) drivers set IFF_PROMISC, when
8702 * IFF_ALLMULTI is requested not asking us and not reporting.
8703 */
8704 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8705 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8706
8707 dev->gflags ^= IFF_ALLMULTI;
8708 __dev_set_allmulti(dev, inc, false);
8709 }
8710
8711 return ret;
8712}
8713
8714void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8715 unsigned int gchanges, u32 portid,
8716 const struct nlmsghdr *nlh)
8717{
8718 unsigned int changes = dev->flags ^ old_flags;
8719
8720 if (gchanges)
8721 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
8722
8723 if (changes & IFF_UP) {
8724 if (dev->flags & IFF_UP)
8725 call_netdevice_notifiers(NETDEV_UP, dev);
8726 else
8727 call_netdevice_notifiers(NETDEV_DOWN, dev);
8728 }
8729
8730 if (dev->flags & IFF_UP &&
8731 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8732 struct netdev_notifier_change_info change_info = {
8733 .info = {
8734 .dev = dev,
8735 },
8736 .flags_changed = changes,
8737 };
8738
8739 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8740 }
8741}
8742
8743/**
8744 * dev_change_flags - change device settings
8745 * @dev: device
8746 * @flags: device state flags
8747 * @extack: netlink extended ack
8748 *
8749 * Change settings on device based state flags. The flags are
8750 * in the userspace exported format.
8751 */
8752int dev_change_flags(struct net_device *dev, unsigned int flags,
8753 struct netlink_ext_ack *extack)
8754{
8755 int ret;
8756 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8757
8758 ret = __dev_change_flags(dev, flags, extack);
8759 if (ret < 0)
8760 return ret;
8761
8762 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8763 __dev_notify_flags(dev, old_flags, changes, 0, NULL);
8764 return ret;
8765}
8766EXPORT_SYMBOL(dev_change_flags);
8767
8768int __dev_set_mtu(struct net_device *dev, int new_mtu)
8769{
8770 const struct net_device_ops *ops = dev->netdev_ops;
8771
8772 if (ops->ndo_change_mtu)
8773 return ops->ndo_change_mtu(dev, new_mtu);
8774
8775 /* Pairs with all the lockless reads of dev->mtu in the stack */
8776 WRITE_ONCE(dev->mtu, new_mtu);
8777 return 0;
8778}
8779EXPORT_SYMBOL(__dev_set_mtu);
8780
8781int dev_validate_mtu(struct net_device *dev, int new_mtu,
8782 struct netlink_ext_ack *extack)
8783{
8784 /* MTU must be positive, and in range */
8785 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8786 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8787 return -EINVAL;
8788 }
8789
8790 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8791 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8792 return -EINVAL;
8793 }
8794 return 0;
8795}
8796
8797/**
8798 * dev_set_mtu_ext - Change maximum transfer unit
8799 * @dev: device
8800 * @new_mtu: new transfer unit
8801 * @extack: netlink extended ack
8802 *
8803 * Change the maximum transfer size of the network device.
8804 */
8805int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8806 struct netlink_ext_ack *extack)
8807{
8808 int err, orig_mtu;
8809
8810 if (new_mtu == dev->mtu)
8811 return 0;
8812
8813 err = dev_validate_mtu(dev, new_mtu, extack);
8814 if (err)
8815 return err;
8816
8817 if (!netif_device_present(dev))
8818 return -ENODEV;
8819
8820 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8821 err = notifier_to_errno(err);
8822 if (err)
8823 return err;
8824
8825 orig_mtu = dev->mtu;
8826 err = __dev_set_mtu(dev, new_mtu);
8827
8828 if (!err) {
8829 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8830 orig_mtu);
8831 err = notifier_to_errno(err);
8832 if (err) {
8833 /* setting mtu back and notifying everyone again,
8834 * so that they have a chance to revert changes.
8835 */
8836 __dev_set_mtu(dev, orig_mtu);
8837 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8838 new_mtu);
8839 }
8840 }
8841 return err;
8842}
8843
8844int dev_set_mtu(struct net_device *dev, int new_mtu)
8845{
8846 struct netlink_ext_ack extack;
8847 int err;
8848
8849 memset(&extack, 0, sizeof(extack));
8850 err = dev_set_mtu_ext(dev, new_mtu, &extack);
8851 if (err && extack._msg)
8852 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8853 return err;
8854}
8855EXPORT_SYMBOL(dev_set_mtu);
8856
8857/**
8858 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8859 * @dev: device
8860 * @new_len: new tx queue length
8861 */
8862int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8863{
8864 unsigned int orig_len = dev->tx_queue_len;
8865 int res;
8866
8867 if (new_len != (unsigned int)new_len)
8868 return -ERANGE;
8869
8870 if (new_len != orig_len) {
8871 dev->tx_queue_len = new_len;
8872 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8873 res = notifier_to_errno(res);
8874 if (res)
8875 goto err_rollback;
8876 res = dev_qdisc_change_tx_queue_len(dev);
8877 if (res)
8878 goto err_rollback;
8879 }
8880
8881 return 0;
8882
8883err_rollback:
8884 netdev_err(dev, "refused to change device tx_queue_len\n");
8885 dev->tx_queue_len = orig_len;
8886 return res;
8887}
8888
8889/**
8890 * dev_set_group - Change group this device belongs to
8891 * @dev: device
8892 * @new_group: group this device should belong to
8893 */
8894void dev_set_group(struct net_device *dev, int new_group)
8895{
8896 dev->group = new_group;
8897}
8898
8899/**
8900 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8901 * @dev: device
8902 * @addr: new address
8903 * @extack: netlink extended ack
8904 */
8905int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8906 struct netlink_ext_ack *extack)
8907{
8908 struct netdev_notifier_pre_changeaddr_info info = {
8909 .info.dev = dev,
8910 .info.extack = extack,
8911 .dev_addr = addr,
8912 };
8913 int rc;
8914
8915 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8916 return notifier_to_errno(rc);
8917}
8918EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8919
8920/**
8921 * dev_set_mac_address - Change Media Access Control Address
8922 * @dev: device
8923 * @sa: new address
8924 * @extack: netlink extended ack
8925 *
8926 * Change the hardware (MAC) address of the device
8927 */
8928int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8929 struct netlink_ext_ack *extack)
8930{
8931 const struct net_device_ops *ops = dev->netdev_ops;
8932 int err;
8933
8934 if (!ops->ndo_set_mac_address)
8935 return -EOPNOTSUPP;
8936 if (sa->sa_family != dev->type)
8937 return -EINVAL;
8938 if (!netif_device_present(dev))
8939 return -ENODEV;
8940 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8941 if (err)
8942 return err;
8943 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
8944 err = ops->ndo_set_mac_address(dev, sa);
8945 if (err)
8946 return err;
8947 }
8948 dev->addr_assign_type = NET_ADDR_SET;
8949 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8950 add_device_randomness(dev->dev_addr, dev->addr_len);
8951 return 0;
8952}
8953EXPORT_SYMBOL(dev_set_mac_address);
8954
8955DECLARE_RWSEM(dev_addr_sem);
8956
8957int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8958 struct netlink_ext_ack *extack)
8959{
8960 int ret;
8961
8962 down_write(&dev_addr_sem);
8963 ret = dev_set_mac_address(dev, sa, extack);
8964 up_write(&dev_addr_sem);
8965 return ret;
8966}
8967EXPORT_SYMBOL(dev_set_mac_address_user);
8968
8969int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8970{
8971 size_t size = sizeof(sa->sa_data_min);
8972 struct net_device *dev;
8973 int ret = 0;
8974
8975 down_read(&dev_addr_sem);
8976 rcu_read_lock();
8977
8978 dev = dev_get_by_name_rcu(net, dev_name);
8979 if (!dev) {
8980 ret = -ENODEV;
8981 goto unlock;
8982 }
8983 if (!dev->addr_len)
8984 memset(sa->sa_data, 0, size);
8985 else
8986 memcpy(sa->sa_data, dev->dev_addr,
8987 min_t(size_t, size, dev->addr_len));
8988 sa->sa_family = dev->type;
8989
8990unlock:
8991 rcu_read_unlock();
8992 up_read(&dev_addr_sem);
8993 return ret;
8994}
8995EXPORT_SYMBOL(dev_get_mac_address);
8996
8997/**
8998 * dev_change_carrier - Change device carrier
8999 * @dev: device
9000 * @new_carrier: new value
9001 *
9002 * Change device carrier
9003 */
9004int dev_change_carrier(struct net_device *dev, bool new_carrier)
9005{
9006 const struct net_device_ops *ops = dev->netdev_ops;
9007
9008 if (!ops->ndo_change_carrier)
9009 return -EOPNOTSUPP;
9010 if (!netif_device_present(dev))
9011 return -ENODEV;
9012 return ops->ndo_change_carrier(dev, new_carrier);
9013}
9014
9015/**
9016 * dev_get_phys_port_id - Get device physical port ID
9017 * @dev: device
9018 * @ppid: port ID
9019 *
9020 * Get device physical port ID
9021 */
9022int dev_get_phys_port_id(struct net_device *dev,
9023 struct netdev_phys_item_id *ppid)
9024{
9025 const struct net_device_ops *ops = dev->netdev_ops;
9026
9027 if (!ops->ndo_get_phys_port_id)
9028 return -EOPNOTSUPP;
9029 return ops->ndo_get_phys_port_id(dev, ppid);
9030}
9031
9032/**
9033 * dev_get_phys_port_name - Get device physical port name
9034 * @dev: device
9035 * @name: port name
9036 * @len: limit of bytes to copy to name
9037 *
9038 * Get device physical port name
9039 */
9040int dev_get_phys_port_name(struct net_device *dev,
9041 char *name, size_t len)
9042{
9043 const struct net_device_ops *ops = dev->netdev_ops;
9044 int err;
9045
9046 if (ops->ndo_get_phys_port_name) {
9047 err = ops->ndo_get_phys_port_name(dev, name, len);
9048 if (err != -EOPNOTSUPP)
9049 return err;
9050 }
9051 return devlink_compat_phys_port_name_get(dev, name, len);
9052}
9053
9054/**
9055 * dev_get_port_parent_id - Get the device's port parent identifier
9056 * @dev: network device
9057 * @ppid: pointer to a storage for the port's parent identifier
9058 * @recurse: allow/disallow recursion to lower devices
9059 *
9060 * Get the devices's port parent identifier
9061 */
9062int dev_get_port_parent_id(struct net_device *dev,
9063 struct netdev_phys_item_id *ppid,
9064 bool recurse)
9065{
9066 const struct net_device_ops *ops = dev->netdev_ops;
9067 struct netdev_phys_item_id first = { };
9068 struct net_device *lower_dev;
9069 struct list_head *iter;
9070 int err;
9071
9072 if (ops->ndo_get_port_parent_id) {
9073 err = ops->ndo_get_port_parent_id(dev, ppid);
9074 if (err != -EOPNOTSUPP)
9075 return err;
9076 }
9077
9078 err = devlink_compat_switch_id_get(dev, ppid);
9079 if (!recurse || err != -EOPNOTSUPP)
9080 return err;
9081
9082 netdev_for_each_lower_dev(dev, lower_dev, iter) {
9083 err = dev_get_port_parent_id(lower_dev, ppid, true);
9084 if (err)
9085 break;
9086 if (!first.id_len)
9087 first = *ppid;
9088 else if (memcmp(&first, ppid, sizeof(*ppid)))
9089 return -EOPNOTSUPP;
9090 }
9091
9092 return err;
9093}
9094EXPORT_SYMBOL(dev_get_port_parent_id);
9095
9096/**
9097 * netdev_port_same_parent_id - Indicate if two network devices have
9098 * the same port parent identifier
9099 * @a: first network device
9100 * @b: second network device
9101 */
9102bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9103{
9104 struct netdev_phys_item_id a_id = { };
9105 struct netdev_phys_item_id b_id = { };
9106
9107 if (dev_get_port_parent_id(a, &a_id, true) ||
9108 dev_get_port_parent_id(b, &b_id, true))
9109 return false;
9110
9111 return netdev_phys_item_id_same(&a_id, &b_id);
9112}
9113EXPORT_SYMBOL(netdev_port_same_parent_id);
9114
9115/**
9116 * dev_change_proto_down - set carrier according to proto_down.
9117 *
9118 * @dev: device
9119 * @proto_down: new value
9120 */
9121int dev_change_proto_down(struct net_device *dev, bool proto_down)
9122{
9123 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
9124 return -EOPNOTSUPP;
9125 if (!netif_device_present(dev))
9126 return -ENODEV;
9127 if (proto_down)
9128 netif_carrier_off(dev);
9129 else
9130 netif_carrier_on(dev);
9131 dev->proto_down = proto_down;
9132 return 0;
9133}
9134
9135/**
9136 * dev_change_proto_down_reason - proto down reason
9137 *
9138 * @dev: device
9139 * @mask: proto down mask
9140 * @value: proto down value
9141 */
9142void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9143 u32 value)
9144{
9145 int b;
9146
9147 if (!mask) {
9148 dev->proto_down_reason = value;
9149 } else {
9150 for_each_set_bit(b, &mask, 32) {
9151 if (value & (1 << b))
9152 dev->proto_down_reason |= BIT(b);
9153 else
9154 dev->proto_down_reason &= ~BIT(b);
9155 }
9156 }
9157}
9158
9159struct bpf_xdp_link {
9160 struct bpf_link link;
9161 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9162 int flags;
9163};
9164
9165static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9166{
9167 if (flags & XDP_FLAGS_HW_MODE)
9168 return XDP_MODE_HW;
9169 if (flags & XDP_FLAGS_DRV_MODE)
9170 return XDP_MODE_DRV;
9171 if (flags & XDP_FLAGS_SKB_MODE)
9172 return XDP_MODE_SKB;
9173 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9174}
9175
9176static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9177{
9178 switch (mode) {
9179 case XDP_MODE_SKB:
9180 return generic_xdp_install;
9181 case XDP_MODE_DRV:
9182 case XDP_MODE_HW:
9183 return dev->netdev_ops->ndo_bpf;
9184 default:
9185 return NULL;
9186 }
9187}
9188
9189static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9190 enum bpf_xdp_mode mode)
9191{
9192 return dev->xdp_state[mode].link;
9193}
9194
9195static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9196 enum bpf_xdp_mode mode)
9197{
9198 struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9199
9200 if (link)
9201 return link->link.prog;
9202 return dev->xdp_state[mode].prog;
9203}
9204
9205u8 dev_xdp_prog_count(struct net_device *dev)
9206{
9207 u8 count = 0;
9208 int i;
9209
9210 for (i = 0; i < __MAX_XDP_MODE; i++)
9211 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9212 count++;
9213 return count;
9214}
9215EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9216
9217u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9218{
9219 struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9220
9221 return prog ? prog->aux->id : 0;
9222}
9223
9224static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9225 struct bpf_xdp_link *link)
9226{
9227 dev->xdp_state[mode].link = link;
9228 dev->xdp_state[mode].prog = NULL;
9229}
9230
9231static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9232 struct bpf_prog *prog)
9233{
9234 dev->xdp_state[mode].link = NULL;
9235 dev->xdp_state[mode].prog = prog;
9236}
9237
9238static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9239 bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9240 u32 flags, struct bpf_prog *prog)
9241{
9242 struct netdev_bpf xdp;
9243 int err;
9244
9245 memset(&xdp, 0, sizeof(xdp));
9246 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9247 xdp.extack = extack;
9248 xdp.flags = flags;
9249 xdp.prog = prog;
9250
9251 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9252 * "moved" into driver), so they don't increment it on their own, but
9253 * they do decrement refcnt when program is detached or replaced.
9254 * Given net_device also owns link/prog, we need to bump refcnt here
9255 * to prevent drivers from underflowing it.
9256 */
9257 if (prog)
9258 bpf_prog_inc(prog);
9259 err = bpf_op(dev, &xdp);
9260 if (err) {
9261 if (prog)
9262 bpf_prog_put(prog);
9263 return err;
9264 }
9265
9266 if (mode != XDP_MODE_HW)
9267 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9268
9269 return 0;
9270}
9271
9272static void dev_xdp_uninstall(struct net_device *dev)
9273{
9274 struct bpf_xdp_link *link;
9275 struct bpf_prog *prog;
9276 enum bpf_xdp_mode mode;
9277 bpf_op_t bpf_op;
9278
9279 ASSERT_RTNL();
9280
9281 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9282 prog = dev_xdp_prog(dev, mode);
9283 if (!prog)
9284 continue;
9285
9286 bpf_op = dev_xdp_bpf_op(dev, mode);
9287 if (!bpf_op)
9288 continue;
9289
9290 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9291
9292 /* auto-detach link from net device */
9293 link = dev_xdp_link(dev, mode);
9294 if (link)
9295 link->dev = NULL;
9296 else
9297 bpf_prog_put(prog);
9298
9299 dev_xdp_set_link(dev, mode, NULL);
9300 }
9301}
9302
9303static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9304 struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9305 struct bpf_prog *old_prog, u32 flags)
9306{
9307 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9308 struct bpf_prog *cur_prog;
9309 struct net_device *upper;
9310 struct list_head *iter;
9311 enum bpf_xdp_mode mode;
9312 bpf_op_t bpf_op;
9313 int err;
9314
9315 ASSERT_RTNL();
9316
9317 /* either link or prog attachment, never both */
9318 if (link && (new_prog || old_prog))
9319 return -EINVAL;
9320 /* link supports only XDP mode flags */
9321 if (link && (flags & ~XDP_FLAGS_MODES)) {
9322 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9323 return -EINVAL;
9324 }
9325 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9326 if (num_modes > 1) {
9327 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9328 return -EINVAL;
9329 }
9330 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9331 if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9332 NL_SET_ERR_MSG(extack,
9333 "More than one program loaded, unset mode is ambiguous");
9334 return -EINVAL;
9335 }
9336 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9337 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9338 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9339 return -EINVAL;
9340 }
9341
9342 mode = dev_xdp_mode(dev, flags);
9343 /* can't replace attached link */
9344 if (dev_xdp_link(dev, mode)) {
9345 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9346 return -EBUSY;
9347 }
9348
9349 /* don't allow if an upper device already has a program */
9350 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9351 if (dev_xdp_prog_count(upper) > 0) {
9352 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9353 return -EEXIST;
9354 }
9355 }
9356
9357 cur_prog = dev_xdp_prog(dev, mode);
9358 /* can't replace attached prog with link */
9359 if (link && cur_prog) {
9360 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9361 return -EBUSY;
9362 }
9363 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9364 NL_SET_ERR_MSG(extack, "Active program does not match expected");
9365 return -EEXIST;
9366 }
9367
9368 /* put effective new program into new_prog */
9369 if (link)
9370 new_prog = link->link.prog;
9371
9372 if (new_prog) {
9373 bool offload = mode == XDP_MODE_HW;
9374 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9375 ? XDP_MODE_DRV : XDP_MODE_SKB;
9376
9377 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9378 NL_SET_ERR_MSG(extack, "XDP program already attached");
9379 return -EBUSY;
9380 }
9381 if (!offload && dev_xdp_prog(dev, other_mode)) {
9382 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9383 return -EEXIST;
9384 }
9385 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
9386 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
9387 return -EINVAL;
9388 }
9389 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9390 NL_SET_ERR_MSG(extack, "Program bound to different device");
9391 return -EINVAL;
9392 }
9393 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9394 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9395 return -EINVAL;
9396 }
9397 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9398 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9399 return -EINVAL;
9400 }
9401 }
9402
9403 /* don't call drivers if the effective program didn't change */
9404 if (new_prog != cur_prog) {
9405 bpf_op = dev_xdp_bpf_op(dev, mode);
9406 if (!bpf_op) {
9407 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9408 return -EOPNOTSUPP;
9409 }
9410
9411 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9412 if (err)
9413 return err;
9414 }
9415
9416 if (link)
9417 dev_xdp_set_link(dev, mode, link);
9418 else
9419 dev_xdp_set_prog(dev, mode, new_prog);
9420 if (cur_prog)
9421 bpf_prog_put(cur_prog);
9422
9423 return 0;
9424}
9425
9426static int dev_xdp_attach_link(struct net_device *dev,
9427 struct netlink_ext_ack *extack,
9428 struct bpf_xdp_link *link)
9429{
9430 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9431}
9432
9433static int dev_xdp_detach_link(struct net_device *dev,
9434 struct netlink_ext_ack *extack,
9435 struct bpf_xdp_link *link)
9436{
9437 enum bpf_xdp_mode mode;
9438 bpf_op_t bpf_op;
9439
9440 ASSERT_RTNL();
9441
9442 mode = dev_xdp_mode(dev, link->flags);
9443 if (dev_xdp_link(dev, mode) != link)
9444 return -EINVAL;
9445
9446 bpf_op = dev_xdp_bpf_op(dev, mode);
9447 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9448 dev_xdp_set_link(dev, mode, NULL);
9449 return 0;
9450}
9451
9452static void bpf_xdp_link_release(struct bpf_link *link)
9453{
9454 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9455
9456 rtnl_lock();
9457
9458 /* if racing with net_device's tear down, xdp_link->dev might be
9459 * already NULL, in which case link was already auto-detached
9460 */
9461 if (xdp_link->dev) {
9462 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9463 xdp_link->dev = NULL;
9464 }
9465
9466 rtnl_unlock();
9467}
9468
9469static int bpf_xdp_link_detach(struct bpf_link *link)
9470{
9471 bpf_xdp_link_release(link);
9472 return 0;
9473}
9474
9475static void bpf_xdp_link_dealloc(struct bpf_link *link)
9476{
9477 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9478
9479 kfree(xdp_link);
9480}
9481
9482static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9483 struct seq_file *seq)
9484{
9485 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9486 u32 ifindex = 0;
9487
9488 rtnl_lock();
9489 if (xdp_link->dev)
9490 ifindex = xdp_link->dev->ifindex;
9491 rtnl_unlock();
9492
9493 seq_printf(seq, "ifindex:\t%u\n", ifindex);
9494}
9495
9496static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9497 struct bpf_link_info *info)
9498{
9499 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9500 u32 ifindex = 0;
9501
9502 rtnl_lock();
9503 if (xdp_link->dev)
9504 ifindex = xdp_link->dev->ifindex;
9505 rtnl_unlock();
9506
9507 info->xdp.ifindex = ifindex;
9508 return 0;
9509}
9510
9511static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9512 struct bpf_prog *old_prog)
9513{
9514 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9515 enum bpf_xdp_mode mode;
9516 bpf_op_t bpf_op;
9517 int err = 0;
9518
9519 rtnl_lock();
9520
9521 /* link might have been auto-released already, so fail */
9522 if (!xdp_link->dev) {
9523 err = -ENOLINK;
9524 goto out_unlock;
9525 }
9526
9527 if (old_prog && link->prog != old_prog) {
9528 err = -EPERM;
9529 goto out_unlock;
9530 }
9531 old_prog = link->prog;
9532 if (old_prog->type != new_prog->type ||
9533 old_prog->expected_attach_type != new_prog->expected_attach_type) {
9534 err = -EINVAL;
9535 goto out_unlock;
9536 }
9537
9538 if (old_prog == new_prog) {
9539 /* no-op, don't disturb drivers */
9540 bpf_prog_put(new_prog);
9541 goto out_unlock;
9542 }
9543
9544 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9545 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9546 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9547 xdp_link->flags, new_prog);
9548 if (err)
9549 goto out_unlock;
9550
9551 old_prog = xchg(&link->prog, new_prog);
9552 bpf_prog_put(old_prog);
9553
9554out_unlock:
9555 rtnl_unlock();
9556 return err;
9557}
9558
9559static const struct bpf_link_ops bpf_xdp_link_lops = {
9560 .release = bpf_xdp_link_release,
9561 .dealloc = bpf_xdp_link_dealloc,
9562 .detach = bpf_xdp_link_detach,
9563 .show_fdinfo = bpf_xdp_link_show_fdinfo,
9564 .fill_link_info = bpf_xdp_link_fill_link_info,
9565 .update_prog = bpf_xdp_link_update,
9566};
9567
9568int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9569{
9570 struct net *net = current->nsproxy->net_ns;
9571 struct bpf_link_primer link_primer;
9572 struct netlink_ext_ack extack = {};
9573 struct bpf_xdp_link *link;
9574 struct net_device *dev;
9575 int err, fd;
9576
9577 rtnl_lock();
9578 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9579 if (!dev) {
9580 rtnl_unlock();
9581 return -EINVAL;
9582 }
9583
9584 link = kzalloc(sizeof(*link), GFP_USER);
9585 if (!link) {
9586 err = -ENOMEM;
9587 goto unlock;
9588 }
9589
9590 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9591 link->dev = dev;
9592 link->flags = attr->link_create.flags;
9593
9594 err = bpf_link_prime(&link->link, &link_primer);
9595 if (err) {
9596 kfree(link);
9597 goto unlock;
9598 }
9599
9600 err = dev_xdp_attach_link(dev, &extack, link);
9601 rtnl_unlock();
9602
9603 if (err) {
9604 link->dev = NULL;
9605 bpf_link_cleanup(&link_primer);
9606 trace_bpf_xdp_link_attach_failed(extack._msg);
9607 goto out_put_dev;
9608 }
9609
9610 fd = bpf_link_settle(&link_primer);
9611 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9612 dev_put(dev);
9613 return fd;
9614
9615unlock:
9616 rtnl_unlock();
9617
9618out_put_dev:
9619 dev_put(dev);
9620 return err;
9621}
9622
9623/**
9624 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9625 * @dev: device
9626 * @extack: netlink extended ack
9627 * @fd: new program fd or negative value to clear
9628 * @expected_fd: old program fd that userspace expects to replace or clear
9629 * @flags: xdp-related flags
9630 *
9631 * Set or clear a bpf program for a device
9632 */
9633int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9634 int fd, int expected_fd, u32 flags)
9635{
9636 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9637 struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9638 int err;
9639
9640 ASSERT_RTNL();
9641
9642 if (fd >= 0) {
9643 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9644 mode != XDP_MODE_SKB);
9645 if (IS_ERR(new_prog))
9646 return PTR_ERR(new_prog);
9647 }
9648
9649 if (expected_fd >= 0) {
9650 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9651 mode != XDP_MODE_SKB);
9652 if (IS_ERR(old_prog)) {
9653 err = PTR_ERR(old_prog);
9654 old_prog = NULL;
9655 goto err_out;
9656 }
9657 }
9658
9659 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9660
9661err_out:
9662 if (err && new_prog)
9663 bpf_prog_put(new_prog);
9664 if (old_prog)
9665 bpf_prog_put(old_prog);
9666 return err;
9667}
9668
9669/**
9670 * dev_index_reserve() - allocate an ifindex in a namespace
9671 * @net: the applicable net namespace
9672 * @ifindex: requested ifindex, pass %0 to get one allocated
9673 *
9674 * Allocate a ifindex for a new device. Caller must either use the ifindex
9675 * to store the device (via list_netdevice()) or call dev_index_release()
9676 * to give the index up.
9677 *
9678 * Return: a suitable unique value for a new device interface number or -errno.
9679 */
9680static int dev_index_reserve(struct net *net, u32 ifindex)
9681{
9682 int err;
9683
9684 if (ifindex > INT_MAX) {
9685 DEBUG_NET_WARN_ON_ONCE(1);
9686 return -EINVAL;
9687 }
9688
9689 if (!ifindex)
9690 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
9691 xa_limit_31b, &net->ifindex, GFP_KERNEL);
9692 else
9693 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
9694 if (err < 0)
9695 return err;
9696
9697 return ifindex;
9698}
9699
9700static void dev_index_release(struct net *net, int ifindex)
9701{
9702 /* Expect only unused indexes, unlist_netdevice() removes the used */
9703 WARN_ON(xa_erase(&net->dev_by_index, ifindex));
9704}
9705
9706/* Delayed registration/unregisteration */
9707LIST_HEAD(net_todo_list);
9708DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9709atomic_t dev_unreg_count = ATOMIC_INIT(0);
9710
9711static void net_set_todo(struct net_device *dev)
9712{
9713 list_add_tail(&dev->todo_list, &net_todo_list);
9714}
9715
9716static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9717 struct net_device *upper, netdev_features_t features)
9718{
9719 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9720 netdev_features_t feature;
9721 int feature_bit;
9722
9723 for_each_netdev_feature(upper_disables, feature_bit) {
9724 feature = __NETIF_F_BIT(feature_bit);
9725 if (!(upper->wanted_features & feature)
9726 && (features & feature)) {
9727 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9728 &feature, upper->name);
9729 features &= ~feature;
9730 }
9731 }
9732
9733 return features;
9734}
9735
9736static void netdev_sync_lower_features(struct net_device *upper,
9737 struct net_device *lower, netdev_features_t features)
9738{
9739 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9740 netdev_features_t feature;
9741 int feature_bit;
9742
9743 for_each_netdev_feature(upper_disables, feature_bit) {
9744 feature = __NETIF_F_BIT(feature_bit);
9745 if (!(features & feature) && (lower->features & feature)) {
9746 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9747 &feature, lower->name);
9748 lower->wanted_features &= ~feature;
9749 __netdev_update_features(lower);
9750
9751 if (unlikely(lower->features & feature))
9752 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9753 &feature, lower->name);
9754 else
9755 netdev_features_change(lower);
9756 }
9757 }
9758}
9759
9760static netdev_features_t netdev_fix_features(struct net_device *dev,
9761 netdev_features_t features)
9762{
9763 /* Fix illegal checksum combinations */
9764 if ((features & NETIF_F_HW_CSUM) &&
9765 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9766 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9767 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9768 }
9769
9770 /* TSO requires that SG is present as well. */
9771 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9772 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9773 features &= ~NETIF_F_ALL_TSO;
9774 }
9775
9776 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9777 !(features & NETIF_F_IP_CSUM)) {
9778 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9779 features &= ~NETIF_F_TSO;
9780 features &= ~NETIF_F_TSO_ECN;
9781 }
9782
9783 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9784 !(features & NETIF_F_IPV6_CSUM)) {
9785 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9786 features &= ~NETIF_F_TSO6;
9787 }
9788
9789 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9790 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9791 features &= ~NETIF_F_TSO_MANGLEID;
9792
9793 /* TSO ECN requires that TSO is present as well. */
9794 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9795 features &= ~NETIF_F_TSO_ECN;
9796
9797 /* Software GSO depends on SG. */
9798 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9799 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9800 features &= ~NETIF_F_GSO;
9801 }
9802
9803 /* GSO partial features require GSO partial be set */
9804 if ((features & dev->gso_partial_features) &&
9805 !(features & NETIF_F_GSO_PARTIAL)) {
9806 netdev_dbg(dev,
9807 "Dropping partially supported GSO features since no GSO partial.\n");
9808 features &= ~dev->gso_partial_features;
9809 }
9810
9811 if (!(features & NETIF_F_RXCSUM)) {
9812 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9813 * successfully merged by hardware must also have the
9814 * checksum verified by hardware. If the user does not
9815 * want to enable RXCSUM, logically, we should disable GRO_HW.
9816 */
9817 if (features & NETIF_F_GRO_HW) {
9818 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9819 features &= ~NETIF_F_GRO_HW;
9820 }
9821 }
9822
9823 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9824 if (features & NETIF_F_RXFCS) {
9825 if (features & NETIF_F_LRO) {
9826 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9827 features &= ~NETIF_F_LRO;
9828 }
9829
9830 if (features & NETIF_F_GRO_HW) {
9831 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9832 features &= ~NETIF_F_GRO_HW;
9833 }
9834 }
9835
9836 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9837 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9838 features &= ~NETIF_F_LRO;
9839 }
9840
9841 if (features & NETIF_F_HW_TLS_TX) {
9842 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9843 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9844 bool hw_csum = features & NETIF_F_HW_CSUM;
9845
9846 if (!ip_csum && !hw_csum) {
9847 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9848 features &= ~NETIF_F_HW_TLS_TX;
9849 }
9850 }
9851
9852 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9853 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9854 features &= ~NETIF_F_HW_TLS_RX;
9855 }
9856
9857 return features;
9858}
9859
9860int __netdev_update_features(struct net_device *dev)
9861{
9862 struct net_device *upper, *lower;
9863 netdev_features_t features;
9864 struct list_head *iter;
9865 int err = -1;
9866
9867 ASSERT_RTNL();
9868
9869 features = netdev_get_wanted_features(dev);
9870
9871 if (dev->netdev_ops->ndo_fix_features)
9872 features = dev->netdev_ops->ndo_fix_features(dev, features);
9873
9874 /* driver might be less strict about feature dependencies */
9875 features = netdev_fix_features(dev, features);
9876
9877 /* some features can't be enabled if they're off on an upper device */
9878 netdev_for_each_upper_dev_rcu(dev, upper, iter)
9879 features = netdev_sync_upper_features(dev, upper, features);
9880
9881 if (dev->features == features)
9882 goto sync_lower;
9883
9884 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9885 &dev->features, &features);
9886
9887 if (dev->netdev_ops->ndo_set_features)
9888 err = dev->netdev_ops->ndo_set_features(dev, features);
9889 else
9890 err = 0;
9891
9892 if (unlikely(err < 0)) {
9893 netdev_err(dev,
9894 "set_features() failed (%d); wanted %pNF, left %pNF\n",
9895 err, &features, &dev->features);
9896 /* return non-0 since some features might have changed and
9897 * it's better to fire a spurious notification than miss it
9898 */
9899 return -1;
9900 }
9901
9902sync_lower:
9903 /* some features must be disabled on lower devices when disabled
9904 * on an upper device (think: bonding master or bridge)
9905 */
9906 netdev_for_each_lower_dev(dev, lower, iter)
9907 netdev_sync_lower_features(dev, lower, features);
9908
9909 if (!err) {
9910 netdev_features_t diff = features ^ dev->features;
9911
9912 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9913 /* udp_tunnel_{get,drop}_rx_info both need
9914 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9915 * device, or they won't do anything.
9916 * Thus we need to update dev->features
9917 * *before* calling udp_tunnel_get_rx_info,
9918 * but *after* calling udp_tunnel_drop_rx_info.
9919 */
9920 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9921 dev->features = features;
9922 udp_tunnel_get_rx_info(dev);
9923 } else {
9924 udp_tunnel_drop_rx_info(dev);
9925 }
9926 }
9927
9928 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9929 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9930 dev->features = features;
9931 err |= vlan_get_rx_ctag_filter_info(dev);
9932 } else {
9933 vlan_drop_rx_ctag_filter_info(dev);
9934 }
9935 }
9936
9937 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9938 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9939 dev->features = features;
9940 err |= vlan_get_rx_stag_filter_info(dev);
9941 } else {
9942 vlan_drop_rx_stag_filter_info(dev);
9943 }
9944 }
9945
9946 dev->features = features;
9947 }
9948
9949 return err < 0 ? 0 : 1;
9950}
9951
9952/**
9953 * netdev_update_features - recalculate device features
9954 * @dev: the device to check
9955 *
9956 * Recalculate dev->features set and send notifications if it
9957 * has changed. Should be called after driver or hardware dependent
9958 * conditions might have changed that influence the features.
9959 */
9960void netdev_update_features(struct net_device *dev)
9961{
9962 if (__netdev_update_features(dev))
9963 netdev_features_change(dev);
9964}
9965EXPORT_SYMBOL(netdev_update_features);
9966
9967/**
9968 * netdev_change_features - recalculate device features
9969 * @dev: the device to check
9970 *
9971 * Recalculate dev->features set and send notifications even
9972 * if they have not changed. Should be called instead of
9973 * netdev_update_features() if also dev->vlan_features might
9974 * have changed to allow the changes to be propagated to stacked
9975 * VLAN devices.
9976 */
9977void netdev_change_features(struct net_device *dev)
9978{
9979 __netdev_update_features(dev);
9980 netdev_features_change(dev);
9981}
9982EXPORT_SYMBOL(netdev_change_features);
9983
9984/**
9985 * netif_stacked_transfer_operstate - transfer operstate
9986 * @rootdev: the root or lower level device to transfer state from
9987 * @dev: the device to transfer operstate to
9988 *
9989 * Transfer operational state from root to device. This is normally
9990 * called when a stacking relationship exists between the root
9991 * device and the device(a leaf device).
9992 */
9993void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9994 struct net_device *dev)
9995{
9996 if (rootdev->operstate == IF_OPER_DORMANT)
9997 netif_dormant_on(dev);
9998 else
9999 netif_dormant_off(dev);
10000
10001 if (rootdev->operstate == IF_OPER_TESTING)
10002 netif_testing_on(dev);
10003 else
10004 netif_testing_off(dev);
10005
10006 if (netif_carrier_ok(rootdev))
10007 netif_carrier_on(dev);
10008 else
10009 netif_carrier_off(dev);
10010}
10011EXPORT_SYMBOL(netif_stacked_transfer_operstate);
10012
10013static int netif_alloc_rx_queues(struct net_device *dev)
10014{
10015 unsigned int i, count = dev->num_rx_queues;
10016 struct netdev_rx_queue *rx;
10017 size_t sz = count * sizeof(*rx);
10018 int err = 0;
10019
10020 BUG_ON(count < 1);
10021
10022 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10023 if (!rx)
10024 return -ENOMEM;
10025
10026 dev->_rx = rx;
10027
10028 for (i = 0; i < count; i++) {
10029 rx[i].dev = dev;
10030
10031 /* XDP RX-queue setup */
10032 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
10033 if (err < 0)
10034 goto err_rxq_info;
10035 }
10036 return 0;
10037
10038err_rxq_info:
10039 /* Rollback successful reg's and free other resources */
10040 while (i--)
10041 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
10042 kvfree(dev->_rx);
10043 dev->_rx = NULL;
10044 return err;
10045}
10046
10047static void netif_free_rx_queues(struct net_device *dev)
10048{
10049 unsigned int i, count = dev->num_rx_queues;
10050
10051 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10052 if (!dev->_rx)
10053 return;
10054
10055 for (i = 0; i < count; i++)
10056 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10057
10058 kvfree(dev->_rx);
10059}
10060
10061static void netdev_init_one_queue(struct net_device *dev,
10062 struct netdev_queue *queue, void *_unused)
10063{
10064 /* Initialize queue lock */
10065 spin_lock_init(&queue->_xmit_lock);
10066 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10067 queue->xmit_lock_owner = -1;
10068 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10069 queue->dev = dev;
10070#ifdef CONFIG_BQL
10071 dql_init(&queue->dql, HZ);
10072#endif
10073}
10074
10075static void netif_free_tx_queues(struct net_device *dev)
10076{
10077 kvfree(dev->_tx);
10078}
10079
10080static int netif_alloc_netdev_queues(struct net_device *dev)
10081{
10082 unsigned int count = dev->num_tx_queues;
10083 struct netdev_queue *tx;
10084 size_t sz = count * sizeof(*tx);
10085
10086 if (count < 1 || count > 0xffff)
10087 return -EINVAL;
10088
10089 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10090 if (!tx)
10091 return -ENOMEM;
10092
10093 dev->_tx = tx;
10094
10095 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10096 spin_lock_init(&dev->tx_global_lock);
10097
10098 return 0;
10099}
10100
10101void netif_tx_stop_all_queues(struct net_device *dev)
10102{
10103 unsigned int i;
10104
10105 for (i = 0; i < dev->num_tx_queues; i++) {
10106 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10107
10108 netif_tx_stop_queue(txq);
10109 }
10110}
10111EXPORT_SYMBOL(netif_tx_stop_all_queues);
10112
10113static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
10114{
10115 void __percpu *v;
10116
10117 /* Drivers implementing ndo_get_peer_dev must support tstat
10118 * accounting, so that skb_do_redirect() can bump the dev's
10119 * RX stats upon network namespace switch.
10120 */
10121 if (dev->netdev_ops->ndo_get_peer_dev &&
10122 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
10123 return -EOPNOTSUPP;
10124
10125 switch (dev->pcpu_stat_type) {
10126 case NETDEV_PCPU_STAT_NONE:
10127 return 0;
10128 case NETDEV_PCPU_STAT_LSTATS:
10129 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
10130 break;
10131 case NETDEV_PCPU_STAT_TSTATS:
10132 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
10133 break;
10134 case NETDEV_PCPU_STAT_DSTATS:
10135 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
10136 break;
10137 default:
10138 return -EINVAL;
10139 }
10140
10141 return v ? 0 : -ENOMEM;
10142}
10143
10144static void netdev_do_free_pcpu_stats(struct net_device *dev)
10145{
10146 switch (dev->pcpu_stat_type) {
10147 case NETDEV_PCPU_STAT_NONE:
10148 return;
10149 case NETDEV_PCPU_STAT_LSTATS:
10150 free_percpu(dev->lstats);
10151 break;
10152 case NETDEV_PCPU_STAT_TSTATS:
10153 free_percpu(dev->tstats);
10154 break;
10155 case NETDEV_PCPU_STAT_DSTATS:
10156 free_percpu(dev->dstats);
10157 break;
10158 }
10159}
10160
10161/**
10162 * register_netdevice() - register a network device
10163 * @dev: device to register
10164 *
10165 * Take a prepared network device structure and make it externally accessible.
10166 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10167 * Callers must hold the rtnl lock - you may want register_netdev()
10168 * instead of this.
10169 */
10170int register_netdevice(struct net_device *dev)
10171{
10172 int ret;
10173 struct net *net = dev_net(dev);
10174
10175 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10176 NETDEV_FEATURE_COUNT);
10177 BUG_ON(dev_boot_phase);
10178 ASSERT_RTNL();
10179
10180 might_sleep();
10181
10182 /* When net_device's are persistent, this will be fatal. */
10183 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10184 BUG_ON(!net);
10185
10186 ret = ethtool_check_ops(dev->ethtool_ops);
10187 if (ret)
10188 return ret;
10189
10190 spin_lock_init(&dev->addr_list_lock);
10191 netdev_set_addr_lockdep_class(dev);
10192
10193 ret = dev_get_valid_name(net, dev, dev->name);
10194 if (ret < 0)
10195 goto out;
10196
10197 ret = -ENOMEM;
10198 dev->name_node = netdev_name_node_head_alloc(dev);
10199 if (!dev->name_node)
10200 goto out;
10201
10202 /* Init, if this function is available */
10203 if (dev->netdev_ops->ndo_init) {
10204 ret = dev->netdev_ops->ndo_init(dev);
10205 if (ret) {
10206 if (ret > 0)
10207 ret = -EIO;
10208 goto err_free_name;
10209 }
10210 }
10211
10212 if (((dev->hw_features | dev->features) &
10213 NETIF_F_HW_VLAN_CTAG_FILTER) &&
10214 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10215 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10216 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10217 ret = -EINVAL;
10218 goto err_uninit;
10219 }
10220
10221 ret = netdev_do_alloc_pcpu_stats(dev);
10222 if (ret)
10223 goto err_uninit;
10224
10225 ret = dev_index_reserve(net, dev->ifindex);
10226 if (ret < 0)
10227 goto err_free_pcpu;
10228 dev->ifindex = ret;
10229
10230 /* Transfer changeable features to wanted_features and enable
10231 * software offloads (GSO and GRO).
10232 */
10233 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10234 dev->features |= NETIF_F_SOFT_FEATURES;
10235
10236 if (dev->udp_tunnel_nic_info) {
10237 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10238 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10239 }
10240
10241 dev->wanted_features = dev->features & dev->hw_features;
10242
10243 if (!(dev->flags & IFF_LOOPBACK))
10244 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10245
10246 /* If IPv4 TCP segmentation offload is supported we should also
10247 * allow the device to enable segmenting the frame with the option
10248 * of ignoring a static IP ID value. This doesn't enable the
10249 * feature itself but allows the user to enable it later.
10250 */
10251 if (dev->hw_features & NETIF_F_TSO)
10252 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10253 if (dev->vlan_features & NETIF_F_TSO)
10254 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10255 if (dev->mpls_features & NETIF_F_TSO)
10256 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10257 if (dev->hw_enc_features & NETIF_F_TSO)
10258 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10259
10260 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10261 */
10262 dev->vlan_features |= NETIF_F_HIGHDMA;
10263
10264 /* Make NETIF_F_SG inheritable to tunnel devices.
10265 */
10266 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10267
10268 /* Make NETIF_F_SG inheritable to MPLS.
10269 */
10270 dev->mpls_features |= NETIF_F_SG;
10271
10272 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10273 ret = notifier_to_errno(ret);
10274 if (ret)
10275 goto err_ifindex_release;
10276
10277 ret = netdev_register_kobject(dev);
10278
10279 WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
10280
10281 if (ret)
10282 goto err_uninit_notify;
10283
10284 __netdev_update_features(dev);
10285
10286 /*
10287 * Default initial state at registry is that the
10288 * device is present.
10289 */
10290
10291 set_bit(__LINK_STATE_PRESENT, &dev->state);
10292
10293 linkwatch_init_dev(dev);
10294
10295 dev_init_scheduler(dev);
10296
10297 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10298 list_netdevice(dev);
10299
10300 add_device_randomness(dev->dev_addr, dev->addr_len);
10301
10302 /* If the device has permanent device address, driver should
10303 * set dev_addr and also addr_assign_type should be set to
10304 * NET_ADDR_PERM (default value).
10305 */
10306 if (dev->addr_assign_type == NET_ADDR_PERM)
10307 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10308
10309 /* Notify protocols, that a new device appeared. */
10310 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10311 ret = notifier_to_errno(ret);
10312 if (ret) {
10313 /* Expect explicit free_netdev() on failure */
10314 dev->needs_free_netdev = false;
10315 unregister_netdevice_queue(dev, NULL);
10316 goto out;
10317 }
10318 /*
10319 * Prevent userspace races by waiting until the network
10320 * device is fully setup before sending notifications.
10321 */
10322 if (!dev->rtnl_link_ops ||
10323 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10324 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
10325
10326out:
10327 return ret;
10328
10329err_uninit_notify:
10330 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10331err_ifindex_release:
10332 dev_index_release(net, dev->ifindex);
10333err_free_pcpu:
10334 netdev_do_free_pcpu_stats(dev);
10335err_uninit:
10336 if (dev->netdev_ops->ndo_uninit)
10337 dev->netdev_ops->ndo_uninit(dev);
10338 if (dev->priv_destructor)
10339 dev->priv_destructor(dev);
10340err_free_name:
10341 netdev_name_node_free(dev->name_node);
10342 goto out;
10343}
10344EXPORT_SYMBOL(register_netdevice);
10345
10346/**
10347 * init_dummy_netdev - init a dummy network device for NAPI
10348 * @dev: device to init
10349 *
10350 * This takes a network device structure and initialize the minimum
10351 * amount of fields so it can be used to schedule NAPI polls without
10352 * registering a full blown interface. This is to be used by drivers
10353 * that need to tie several hardware interfaces to a single NAPI
10354 * poll scheduler due to HW limitations.
10355 */
10356void init_dummy_netdev(struct net_device *dev)
10357{
10358 /* Clear everything. Note we don't initialize spinlocks
10359 * are they aren't supposed to be taken by any of the
10360 * NAPI code and this dummy netdev is supposed to be
10361 * only ever used for NAPI polls
10362 */
10363 memset(dev, 0, sizeof(struct net_device));
10364
10365 /* make sure we BUG if trying to hit standard
10366 * register/unregister code path
10367 */
10368 dev->reg_state = NETREG_DUMMY;
10369
10370 /* NAPI wants this */
10371 INIT_LIST_HEAD(&dev->napi_list);
10372
10373 /* a dummy interface is started by default */
10374 set_bit(__LINK_STATE_PRESENT, &dev->state);
10375 set_bit(__LINK_STATE_START, &dev->state);
10376
10377 /* napi_busy_loop stats accounting wants this */
10378 dev_net_set(dev, &init_net);
10379
10380 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10381 * because users of this 'device' dont need to change
10382 * its refcount.
10383 */
10384}
10385EXPORT_SYMBOL_GPL(init_dummy_netdev);
10386
10387
10388/**
10389 * register_netdev - register a network device
10390 * @dev: device to register
10391 *
10392 * Take a completed network device structure and add it to the kernel
10393 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10394 * chain. 0 is returned on success. A negative errno code is returned
10395 * on a failure to set up the device, or if the name is a duplicate.
10396 *
10397 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10398 * and expands the device name if you passed a format string to
10399 * alloc_netdev.
10400 */
10401int register_netdev(struct net_device *dev)
10402{
10403 int err;
10404
10405 if (rtnl_lock_killable())
10406 return -EINTR;
10407 err = register_netdevice(dev);
10408 rtnl_unlock();
10409 return err;
10410}
10411EXPORT_SYMBOL(register_netdev);
10412
10413int netdev_refcnt_read(const struct net_device *dev)
10414{
10415#ifdef CONFIG_PCPU_DEV_REFCNT
10416 int i, refcnt = 0;
10417
10418 for_each_possible_cpu(i)
10419 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10420 return refcnt;
10421#else
10422 return refcount_read(&dev->dev_refcnt);
10423#endif
10424}
10425EXPORT_SYMBOL(netdev_refcnt_read);
10426
10427int netdev_unregister_timeout_secs __read_mostly = 10;
10428
10429#define WAIT_REFS_MIN_MSECS 1
10430#define WAIT_REFS_MAX_MSECS 250
10431/**
10432 * netdev_wait_allrefs_any - wait until all references are gone.
10433 * @list: list of net_devices to wait on
10434 *
10435 * This is called when unregistering network devices.
10436 *
10437 * Any protocol or device that holds a reference should register
10438 * for netdevice notification, and cleanup and put back the
10439 * reference if they receive an UNREGISTER event.
10440 * We can get stuck here if buggy protocols don't correctly
10441 * call dev_put.
10442 */
10443static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10444{
10445 unsigned long rebroadcast_time, warning_time;
10446 struct net_device *dev;
10447 int wait = 0;
10448
10449 rebroadcast_time = warning_time = jiffies;
10450
10451 list_for_each_entry(dev, list, todo_list)
10452 if (netdev_refcnt_read(dev) == 1)
10453 return dev;
10454
10455 while (true) {
10456 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10457 rtnl_lock();
10458
10459 /* Rebroadcast unregister notification */
10460 list_for_each_entry(dev, list, todo_list)
10461 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10462
10463 __rtnl_unlock();
10464 rcu_barrier();
10465 rtnl_lock();
10466
10467 list_for_each_entry(dev, list, todo_list)
10468 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10469 &dev->state)) {
10470 /* We must not have linkwatch events
10471 * pending on unregister. If this
10472 * happens, we simply run the queue
10473 * unscheduled, resulting in a noop
10474 * for this device.
10475 */
10476 linkwatch_run_queue();
10477 break;
10478 }
10479
10480 __rtnl_unlock();
10481
10482 rebroadcast_time = jiffies;
10483 }
10484
10485 if (!wait) {
10486 rcu_barrier();
10487 wait = WAIT_REFS_MIN_MSECS;
10488 } else {
10489 msleep(wait);
10490 wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10491 }
10492
10493 list_for_each_entry(dev, list, todo_list)
10494 if (netdev_refcnt_read(dev) == 1)
10495 return dev;
10496
10497 if (time_after(jiffies, warning_time +
10498 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
10499 list_for_each_entry(dev, list, todo_list) {
10500 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10501 dev->name, netdev_refcnt_read(dev));
10502 ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10503 }
10504
10505 warning_time = jiffies;
10506 }
10507 }
10508}
10509
10510/* The sequence is:
10511 *
10512 * rtnl_lock();
10513 * ...
10514 * register_netdevice(x1);
10515 * register_netdevice(x2);
10516 * ...
10517 * unregister_netdevice(y1);
10518 * unregister_netdevice(y2);
10519 * ...
10520 * rtnl_unlock();
10521 * free_netdev(y1);
10522 * free_netdev(y2);
10523 *
10524 * We are invoked by rtnl_unlock().
10525 * This allows us to deal with problems:
10526 * 1) We can delete sysfs objects which invoke hotplug
10527 * without deadlocking with linkwatch via keventd.
10528 * 2) Since we run with the RTNL semaphore not held, we can sleep
10529 * safely in order to wait for the netdev refcnt to drop to zero.
10530 *
10531 * We must not return until all unregister events added during
10532 * the interval the lock was held have been completed.
10533 */
10534void netdev_run_todo(void)
10535{
10536 struct net_device *dev, *tmp;
10537 struct list_head list;
10538 int cnt;
10539#ifdef CONFIG_LOCKDEP
10540 struct list_head unlink_list;
10541
10542 list_replace_init(&net_unlink_list, &unlink_list);
10543
10544 while (!list_empty(&unlink_list)) {
10545 struct net_device *dev = list_first_entry(&unlink_list,
10546 struct net_device,
10547 unlink_list);
10548 list_del_init(&dev->unlink_list);
10549 dev->nested_level = dev->lower_level - 1;
10550 }
10551#endif
10552
10553 /* Snapshot list, allow later requests */
10554 list_replace_init(&net_todo_list, &list);
10555
10556 __rtnl_unlock();
10557
10558 /* Wait for rcu callbacks to finish before next phase */
10559 if (!list_empty(&list))
10560 rcu_barrier();
10561
10562 list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10563 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10564 netdev_WARN(dev, "run_todo but not unregistering\n");
10565 list_del(&dev->todo_list);
10566 continue;
10567 }
10568
10569 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
10570 linkwatch_sync_dev(dev);
10571 }
10572
10573 cnt = 0;
10574 while (!list_empty(&list)) {
10575 dev = netdev_wait_allrefs_any(&list);
10576 list_del(&dev->todo_list);
10577
10578 /* paranoia */
10579 BUG_ON(netdev_refcnt_read(dev) != 1);
10580 BUG_ON(!list_empty(&dev->ptype_all));
10581 BUG_ON(!list_empty(&dev->ptype_specific));
10582 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10583 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10584
10585 netdev_do_free_pcpu_stats(dev);
10586 if (dev->priv_destructor)
10587 dev->priv_destructor(dev);
10588 if (dev->needs_free_netdev)
10589 free_netdev(dev);
10590
10591 cnt++;
10592
10593 /* Free network device */
10594 kobject_put(&dev->dev.kobj);
10595 }
10596 if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
10597 wake_up(&netdev_unregistering_wq);
10598}
10599
10600/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10601 * all the same fields in the same order as net_device_stats, with only
10602 * the type differing, but rtnl_link_stats64 may have additional fields
10603 * at the end for newer counters.
10604 */
10605void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10606 const struct net_device_stats *netdev_stats)
10607{
10608 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
10609 const atomic_long_t *src = (atomic_long_t *)netdev_stats;
10610 u64 *dst = (u64 *)stats64;
10611
10612 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10613 for (i = 0; i < n; i++)
10614 dst[i] = (unsigned long)atomic_long_read(&src[i]);
10615 /* zero out counters that only exist in rtnl_link_stats64 */
10616 memset((char *)stats64 + n * sizeof(u64), 0,
10617 sizeof(*stats64) - n * sizeof(u64));
10618}
10619EXPORT_SYMBOL(netdev_stats_to_stats64);
10620
10621static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
10622 struct net_device *dev)
10623{
10624 struct net_device_core_stats __percpu *p;
10625
10626 p = alloc_percpu_gfp(struct net_device_core_stats,
10627 GFP_ATOMIC | __GFP_NOWARN);
10628
10629 if (p && cmpxchg(&dev->core_stats, NULL, p))
10630 free_percpu(p);
10631
10632 /* This READ_ONCE() pairs with the cmpxchg() above */
10633 return READ_ONCE(dev->core_stats);
10634}
10635
10636noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
10637{
10638 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10639 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
10640 unsigned long __percpu *field;
10641
10642 if (unlikely(!p)) {
10643 p = netdev_core_stats_alloc(dev);
10644 if (!p)
10645 return;
10646 }
10647
10648 field = (__force unsigned long __percpu *)((__force void *)p + offset);
10649 this_cpu_inc(*field);
10650}
10651EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
10652
10653/**
10654 * dev_get_stats - get network device statistics
10655 * @dev: device to get statistics from
10656 * @storage: place to store stats
10657 *
10658 * Get network statistics from device. Return @storage.
10659 * The device driver may provide its own method by setting
10660 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10661 * otherwise the internal statistics structure is used.
10662 */
10663struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10664 struct rtnl_link_stats64 *storage)
10665{
10666 const struct net_device_ops *ops = dev->netdev_ops;
10667 const struct net_device_core_stats __percpu *p;
10668
10669 if (ops->ndo_get_stats64) {
10670 memset(storage, 0, sizeof(*storage));
10671 ops->ndo_get_stats64(dev, storage);
10672 } else if (ops->ndo_get_stats) {
10673 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10674 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
10675 dev_get_tstats64(dev, storage);
10676 } else {
10677 netdev_stats_to_stats64(storage, &dev->stats);
10678 }
10679
10680 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10681 p = READ_ONCE(dev->core_stats);
10682 if (p) {
10683 const struct net_device_core_stats *core_stats;
10684 int i;
10685
10686 for_each_possible_cpu(i) {
10687 core_stats = per_cpu_ptr(p, i);
10688 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10689 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10690 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10691 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10692 }
10693 }
10694 return storage;
10695}
10696EXPORT_SYMBOL(dev_get_stats);
10697
10698/**
10699 * dev_fetch_sw_netstats - get per-cpu network device statistics
10700 * @s: place to store stats
10701 * @netstats: per-cpu network stats to read from
10702 *
10703 * Read per-cpu network statistics and populate the related fields in @s.
10704 */
10705void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10706 const struct pcpu_sw_netstats __percpu *netstats)
10707{
10708 int cpu;
10709
10710 for_each_possible_cpu(cpu) {
10711 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
10712 const struct pcpu_sw_netstats *stats;
10713 unsigned int start;
10714
10715 stats = per_cpu_ptr(netstats, cpu);
10716 do {
10717 start = u64_stats_fetch_begin(&stats->syncp);
10718 rx_packets = u64_stats_read(&stats->rx_packets);
10719 rx_bytes = u64_stats_read(&stats->rx_bytes);
10720 tx_packets = u64_stats_read(&stats->tx_packets);
10721 tx_bytes = u64_stats_read(&stats->tx_bytes);
10722 } while (u64_stats_fetch_retry(&stats->syncp, start));
10723
10724 s->rx_packets += rx_packets;
10725 s->rx_bytes += rx_bytes;
10726 s->tx_packets += tx_packets;
10727 s->tx_bytes += tx_bytes;
10728 }
10729}
10730EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10731
10732/**
10733 * dev_get_tstats64 - ndo_get_stats64 implementation
10734 * @dev: device to get statistics from
10735 * @s: place to store stats
10736 *
10737 * Populate @s from dev->stats and dev->tstats. Can be used as
10738 * ndo_get_stats64() callback.
10739 */
10740void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10741{
10742 netdev_stats_to_stats64(s, &dev->stats);
10743 dev_fetch_sw_netstats(s, dev->tstats);
10744}
10745EXPORT_SYMBOL_GPL(dev_get_tstats64);
10746
10747struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10748{
10749 struct netdev_queue *queue = dev_ingress_queue(dev);
10750
10751#ifdef CONFIG_NET_CLS_ACT
10752 if (queue)
10753 return queue;
10754 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10755 if (!queue)
10756 return NULL;
10757 netdev_init_one_queue(dev, queue, NULL);
10758 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10759 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
10760 rcu_assign_pointer(dev->ingress_queue, queue);
10761#endif
10762 return queue;
10763}
10764
10765static const struct ethtool_ops default_ethtool_ops;
10766
10767void netdev_set_default_ethtool_ops(struct net_device *dev,
10768 const struct ethtool_ops *ops)
10769{
10770 if (dev->ethtool_ops == &default_ethtool_ops)
10771 dev->ethtool_ops = ops;
10772}
10773EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10774
10775/**
10776 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
10777 * @dev: netdev to enable the IRQ coalescing on
10778 *
10779 * Sets a conservative default for SW IRQ coalescing. Users can use
10780 * sysfs attributes to override the default values.
10781 */
10782void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
10783{
10784 WARN_ON(dev->reg_state == NETREG_REGISTERED);
10785
10786 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
10787 dev->gro_flush_timeout = 20000;
10788 dev->napi_defer_hard_irqs = 1;
10789 }
10790}
10791EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
10792
10793void netdev_freemem(struct net_device *dev)
10794{
10795 char *addr = (char *)dev - dev->padded;
10796
10797 kvfree(addr);
10798}
10799
10800/**
10801 * alloc_netdev_mqs - allocate network device
10802 * @sizeof_priv: size of private data to allocate space for
10803 * @name: device name format string
10804 * @name_assign_type: origin of device name
10805 * @setup: callback to initialize device
10806 * @txqs: the number of TX subqueues to allocate
10807 * @rxqs: the number of RX subqueues to allocate
10808 *
10809 * Allocates a struct net_device with private data area for driver use
10810 * and performs basic initialization. Also allocates subqueue structs
10811 * for each queue on the device.
10812 */
10813struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10814 unsigned char name_assign_type,
10815 void (*setup)(struct net_device *),
10816 unsigned int txqs, unsigned int rxqs)
10817{
10818 struct net_device *dev;
10819 unsigned int alloc_size;
10820 struct net_device *p;
10821
10822 BUG_ON(strlen(name) >= sizeof(dev->name));
10823
10824 if (txqs < 1) {
10825 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10826 return NULL;
10827 }
10828
10829 if (rxqs < 1) {
10830 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10831 return NULL;
10832 }
10833
10834 alloc_size = sizeof(struct net_device);
10835 if (sizeof_priv) {
10836 /* ensure 32-byte alignment of private area */
10837 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10838 alloc_size += sizeof_priv;
10839 }
10840 /* ensure 32-byte alignment of whole construct */
10841 alloc_size += NETDEV_ALIGN - 1;
10842
10843 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10844 if (!p)
10845 return NULL;
10846
10847 dev = PTR_ALIGN(p, NETDEV_ALIGN);
10848 dev->padded = (char *)dev - (char *)p;
10849
10850 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
10851#ifdef CONFIG_PCPU_DEV_REFCNT
10852 dev->pcpu_refcnt = alloc_percpu(int);
10853 if (!dev->pcpu_refcnt)
10854 goto free_dev;
10855 __dev_hold(dev);
10856#else
10857 refcount_set(&dev->dev_refcnt, 1);
10858#endif
10859
10860 if (dev_addr_init(dev))
10861 goto free_pcpu;
10862
10863 dev_mc_init(dev);
10864 dev_uc_init(dev);
10865
10866 dev_net_set(dev, &init_net);
10867
10868 dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10869 dev->xdp_zc_max_segs = 1;
10870 dev->gso_max_segs = GSO_MAX_SEGS;
10871 dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10872 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
10873 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
10874 dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10875 dev->tso_max_segs = TSO_MAX_SEGS;
10876 dev->upper_level = 1;
10877 dev->lower_level = 1;
10878#ifdef CONFIG_LOCKDEP
10879 dev->nested_level = 0;
10880 INIT_LIST_HEAD(&dev->unlink_list);
10881#endif
10882
10883 INIT_LIST_HEAD(&dev->napi_list);
10884 INIT_LIST_HEAD(&dev->unreg_list);
10885 INIT_LIST_HEAD(&dev->close_list);
10886 INIT_LIST_HEAD(&dev->link_watch_list);
10887 INIT_LIST_HEAD(&dev->adj_list.upper);
10888 INIT_LIST_HEAD(&dev->adj_list.lower);
10889 INIT_LIST_HEAD(&dev->ptype_all);
10890 INIT_LIST_HEAD(&dev->ptype_specific);
10891 INIT_LIST_HEAD(&dev->net_notifier_list);
10892#ifdef CONFIG_NET_SCHED
10893 hash_init(dev->qdisc_hash);
10894#endif
10895 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10896 setup(dev);
10897
10898 if (!dev->tx_queue_len) {
10899 dev->priv_flags |= IFF_NO_QUEUE;
10900 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10901 }
10902
10903 dev->num_tx_queues = txqs;
10904 dev->real_num_tx_queues = txqs;
10905 if (netif_alloc_netdev_queues(dev))
10906 goto free_all;
10907
10908 dev->num_rx_queues = rxqs;
10909 dev->real_num_rx_queues = rxqs;
10910 if (netif_alloc_rx_queues(dev))
10911 goto free_all;
10912
10913 strcpy(dev->name, name);
10914 dev->name_assign_type = name_assign_type;
10915 dev->group = INIT_NETDEV_GROUP;
10916 if (!dev->ethtool_ops)
10917 dev->ethtool_ops = &default_ethtool_ops;
10918
10919 nf_hook_netdev_init(dev);
10920
10921 return dev;
10922
10923free_all:
10924 free_netdev(dev);
10925 return NULL;
10926
10927free_pcpu:
10928#ifdef CONFIG_PCPU_DEV_REFCNT
10929 free_percpu(dev->pcpu_refcnt);
10930free_dev:
10931#endif
10932 netdev_freemem(dev);
10933 return NULL;
10934}
10935EXPORT_SYMBOL(alloc_netdev_mqs);
10936
10937/**
10938 * free_netdev - free network device
10939 * @dev: device
10940 *
10941 * This function does the last stage of destroying an allocated device
10942 * interface. The reference to the device object is released. If this
10943 * is the last reference then it will be freed.Must be called in process
10944 * context.
10945 */
10946void free_netdev(struct net_device *dev)
10947{
10948 struct napi_struct *p, *n;
10949
10950 might_sleep();
10951
10952 /* When called immediately after register_netdevice() failed the unwind
10953 * handling may still be dismantling the device. Handle that case by
10954 * deferring the free.
10955 */
10956 if (dev->reg_state == NETREG_UNREGISTERING) {
10957 ASSERT_RTNL();
10958 dev->needs_free_netdev = true;
10959 return;
10960 }
10961
10962 netif_free_tx_queues(dev);
10963 netif_free_rx_queues(dev);
10964
10965 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10966
10967 /* Flush device addresses */
10968 dev_addr_flush(dev);
10969
10970 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10971 netif_napi_del(p);
10972
10973 ref_tracker_dir_exit(&dev->refcnt_tracker);
10974#ifdef CONFIG_PCPU_DEV_REFCNT
10975 free_percpu(dev->pcpu_refcnt);
10976 dev->pcpu_refcnt = NULL;
10977#endif
10978 free_percpu(dev->core_stats);
10979 dev->core_stats = NULL;
10980 free_percpu(dev->xdp_bulkq);
10981 dev->xdp_bulkq = NULL;
10982
10983 /* Compatibility with error handling in drivers */
10984 if (dev->reg_state == NETREG_UNINITIALIZED) {
10985 netdev_freemem(dev);
10986 return;
10987 }
10988
10989 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10990 WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
10991
10992 /* will free via device release */
10993 put_device(&dev->dev);
10994}
10995EXPORT_SYMBOL(free_netdev);
10996
10997/**
10998 * synchronize_net - Synchronize with packet receive processing
10999 *
11000 * Wait for packets currently being received to be done.
11001 * Does not block later packets from starting.
11002 */
11003void synchronize_net(void)
11004{
11005 might_sleep();
11006 if (rtnl_is_locked())
11007 synchronize_rcu_expedited();
11008 else
11009 synchronize_rcu();
11010}
11011EXPORT_SYMBOL(synchronize_net);
11012
11013/**
11014 * unregister_netdevice_queue - remove device from the kernel
11015 * @dev: device
11016 * @head: list
11017 *
11018 * This function shuts down a device interface and removes it
11019 * from the kernel tables.
11020 * If head not NULL, device is queued to be unregistered later.
11021 *
11022 * Callers must hold the rtnl semaphore. You may want
11023 * unregister_netdev() instead of this.
11024 */
11025
11026void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
11027{
11028 ASSERT_RTNL();
11029
11030 if (head) {
11031 list_move_tail(&dev->unreg_list, head);
11032 } else {
11033 LIST_HEAD(single);
11034
11035 list_add(&dev->unreg_list, &single);
11036 unregister_netdevice_many(&single);
11037 }
11038}
11039EXPORT_SYMBOL(unregister_netdevice_queue);
11040
11041void unregister_netdevice_many_notify(struct list_head *head,
11042 u32 portid, const struct nlmsghdr *nlh)
11043{
11044 struct net_device *dev, *tmp;
11045 LIST_HEAD(close_head);
11046 int cnt = 0;
11047
11048 BUG_ON(dev_boot_phase);
11049 ASSERT_RTNL();
11050
11051 if (list_empty(head))
11052 return;
11053
11054 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
11055 /* Some devices call without registering
11056 * for initialization unwind. Remove those
11057 * devices and proceed with the remaining.
11058 */
11059 if (dev->reg_state == NETREG_UNINITIALIZED) {
11060 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11061 dev->name, dev);
11062
11063 WARN_ON(1);
11064 list_del(&dev->unreg_list);
11065 continue;
11066 }
11067 dev->dismantle = true;
11068 BUG_ON(dev->reg_state != NETREG_REGISTERED);
11069 }
11070
11071 /* If device is running, close it first. */
11072 list_for_each_entry(dev, head, unreg_list)
11073 list_add_tail(&dev->close_list, &close_head);
11074 dev_close_many(&close_head, true);
11075
11076 list_for_each_entry(dev, head, unreg_list) {
11077 /* And unlink it from device chain. */
11078 unlist_netdevice(dev);
11079 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
11080 }
11081 flush_all_backlogs();
11082
11083 synchronize_net();
11084
11085 list_for_each_entry(dev, head, unreg_list) {
11086 struct sk_buff *skb = NULL;
11087
11088 /* Shutdown queueing discipline. */
11089 dev_shutdown(dev);
11090 dev_tcx_uninstall(dev);
11091 dev_xdp_uninstall(dev);
11092 bpf_dev_bound_netdev_unregister(dev);
11093
11094 netdev_offload_xstats_disable_all(dev);
11095
11096 /* Notify protocols, that we are about to destroy
11097 * this device. They should clean all the things.
11098 */
11099 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11100
11101 if (!dev->rtnl_link_ops ||
11102 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11103 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11104 GFP_KERNEL, NULL, 0,
11105 portid, nlh);
11106
11107 /*
11108 * Flush the unicast and multicast chains
11109 */
11110 dev_uc_flush(dev);
11111 dev_mc_flush(dev);
11112
11113 netdev_name_node_alt_flush(dev);
11114 netdev_name_node_free(dev->name_node);
11115
11116 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
11117
11118 if (dev->netdev_ops->ndo_uninit)
11119 dev->netdev_ops->ndo_uninit(dev);
11120
11121 if (skb)
11122 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
11123
11124 /* Notifier chain MUST detach us all upper devices. */
11125 WARN_ON(netdev_has_any_upper_dev(dev));
11126 WARN_ON(netdev_has_any_lower_dev(dev));
11127
11128 /* Remove entries from kobject tree */
11129 netdev_unregister_kobject(dev);
11130#ifdef CONFIG_XPS
11131 /* Remove XPS queueing entries */
11132 netif_reset_xps_queues_gt(dev, 0);
11133#endif
11134 }
11135
11136 synchronize_net();
11137
11138 list_for_each_entry(dev, head, unreg_list) {
11139 netdev_put(dev, &dev->dev_registered_tracker);
11140 net_set_todo(dev);
11141 cnt++;
11142 }
11143 atomic_add(cnt, &dev_unreg_count);
11144
11145 list_del(head);
11146}
11147
11148/**
11149 * unregister_netdevice_many - unregister many devices
11150 * @head: list of devices
11151 *
11152 * Note: As most callers use a stack allocated list_head,
11153 * we force a list_del() to make sure stack wont be corrupted later.
11154 */
11155void unregister_netdevice_many(struct list_head *head)
11156{
11157 unregister_netdevice_many_notify(head, 0, NULL);
11158}
11159EXPORT_SYMBOL(unregister_netdevice_many);
11160
11161/**
11162 * unregister_netdev - remove device from the kernel
11163 * @dev: device
11164 *
11165 * This function shuts down a device interface and removes it
11166 * from the kernel tables.
11167 *
11168 * This is just a wrapper for unregister_netdevice that takes
11169 * the rtnl semaphore. In general you want to use this and not
11170 * unregister_netdevice.
11171 */
11172void unregister_netdev(struct net_device *dev)
11173{
11174 rtnl_lock();
11175 unregister_netdevice(dev);
11176 rtnl_unlock();
11177}
11178EXPORT_SYMBOL(unregister_netdev);
11179
11180/**
11181 * __dev_change_net_namespace - move device to different nethost namespace
11182 * @dev: device
11183 * @net: network namespace
11184 * @pat: If not NULL name pattern to try if the current device name
11185 * is already taken in the destination network namespace.
11186 * @new_ifindex: If not zero, specifies device index in the target
11187 * namespace.
11188 *
11189 * This function shuts down a device interface and moves it
11190 * to a new network namespace. On success 0 is returned, on
11191 * a failure a netagive errno code is returned.
11192 *
11193 * Callers must hold the rtnl semaphore.
11194 */
11195
11196int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11197 const char *pat, int new_ifindex)
11198{
11199 struct netdev_name_node *name_node;
11200 struct net *net_old = dev_net(dev);
11201 char new_name[IFNAMSIZ] = {};
11202 int err, new_nsid;
11203
11204 ASSERT_RTNL();
11205
11206 /* Don't allow namespace local devices to be moved. */
11207 err = -EINVAL;
11208 if (dev->features & NETIF_F_NETNS_LOCAL)
11209 goto out;
11210
11211 /* Ensure the device has been registrered */
11212 if (dev->reg_state != NETREG_REGISTERED)
11213 goto out;
11214
11215 /* Get out if there is nothing todo */
11216 err = 0;
11217 if (net_eq(net_old, net))
11218 goto out;
11219
11220 /* Pick the destination device name, and ensure
11221 * we can use it in the destination network namespace.
11222 */
11223 err = -EEXIST;
11224 if (netdev_name_in_use(net, dev->name)) {
11225 /* We get here if we can't use the current device name */
11226 if (!pat)
11227 goto out;
11228 err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST);
11229 if (err < 0)
11230 goto out;
11231 }
11232 /* Check that none of the altnames conflicts. */
11233 err = -EEXIST;
11234 netdev_for_each_altname(dev, name_node)
11235 if (netdev_name_in_use(net, name_node->name))
11236 goto out;
11237
11238 /* Check that new_ifindex isn't used yet. */
11239 if (new_ifindex) {
11240 err = dev_index_reserve(net, new_ifindex);
11241 if (err < 0)
11242 goto out;
11243 } else {
11244 /* If there is an ifindex conflict assign a new one */
11245 err = dev_index_reserve(net, dev->ifindex);
11246 if (err == -EBUSY)
11247 err = dev_index_reserve(net, 0);
11248 if (err < 0)
11249 goto out;
11250 new_ifindex = err;
11251 }
11252
11253 /*
11254 * And now a mini version of register_netdevice unregister_netdevice.
11255 */
11256
11257 /* If device is running close it first. */
11258 dev_close(dev);
11259
11260 /* And unlink it from device chain */
11261 unlist_netdevice(dev);
11262
11263 synchronize_net();
11264
11265 /* Shutdown queueing discipline. */
11266 dev_shutdown(dev);
11267
11268 /* Notify protocols, that we are about to destroy
11269 * this device. They should clean all the things.
11270 *
11271 * Note that dev->reg_state stays at NETREG_REGISTERED.
11272 * This is wanted because this way 8021q and macvlan know
11273 * the device is just moving and can keep their slaves up.
11274 */
11275 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11276 rcu_barrier();
11277
11278 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11279
11280 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11281 new_ifindex);
11282
11283 /*
11284 * Flush the unicast and multicast chains
11285 */
11286 dev_uc_flush(dev);
11287 dev_mc_flush(dev);
11288
11289 /* Send a netdev-removed uevent to the old namespace */
11290 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11291 netdev_adjacent_del_links(dev);
11292
11293 /* Move per-net netdevice notifiers that are following the netdevice */
11294 move_netdevice_notifiers_dev_net(dev, net);
11295
11296 /* Actually switch the network namespace */
11297 dev_net_set(dev, net);
11298 dev->ifindex = new_ifindex;
11299
11300 if (new_name[0]) /* Rename the netdev to prepared name */
11301 strscpy(dev->name, new_name, IFNAMSIZ);
11302
11303 /* Fixup kobjects */
11304 dev_set_uevent_suppress(&dev->dev, 1);
11305 err = device_rename(&dev->dev, dev->name);
11306 dev_set_uevent_suppress(&dev->dev, 0);
11307 WARN_ON(err);
11308
11309 /* Send a netdev-add uevent to the new namespace */
11310 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11311 netdev_adjacent_add_links(dev);
11312
11313 /* Adapt owner in case owning user namespace of target network
11314 * namespace is different from the original one.
11315 */
11316 err = netdev_change_owner(dev, net_old, net);
11317 WARN_ON(err);
11318
11319 /* Add the device back in the hashes */
11320 list_netdevice(dev);
11321
11322 /* Notify protocols, that a new device appeared. */
11323 call_netdevice_notifiers(NETDEV_REGISTER, dev);
11324
11325 /*
11326 * Prevent userspace races by waiting until the network
11327 * device is fully setup before sending notifications.
11328 */
11329 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11330
11331 synchronize_net();
11332 err = 0;
11333out:
11334 return err;
11335}
11336EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11337
11338static int dev_cpu_dead(unsigned int oldcpu)
11339{
11340 struct sk_buff **list_skb;
11341 struct sk_buff *skb;
11342 unsigned int cpu;
11343 struct softnet_data *sd, *oldsd, *remsd = NULL;
11344
11345 local_irq_disable();
11346 cpu = smp_processor_id();
11347 sd = &per_cpu(softnet_data, cpu);
11348 oldsd = &per_cpu(softnet_data, oldcpu);
11349
11350 /* Find end of our completion_queue. */
11351 list_skb = &sd->completion_queue;
11352 while (*list_skb)
11353 list_skb = &(*list_skb)->next;
11354 /* Append completion queue from offline CPU. */
11355 *list_skb = oldsd->completion_queue;
11356 oldsd->completion_queue = NULL;
11357
11358 /* Append output queue from offline CPU. */
11359 if (oldsd->output_queue) {
11360 *sd->output_queue_tailp = oldsd->output_queue;
11361 sd->output_queue_tailp = oldsd->output_queue_tailp;
11362 oldsd->output_queue = NULL;
11363 oldsd->output_queue_tailp = &oldsd->output_queue;
11364 }
11365 /* Append NAPI poll list from offline CPU, with one exception :
11366 * process_backlog() must be called by cpu owning percpu backlog.
11367 * We properly handle process_queue & input_pkt_queue later.
11368 */
11369 while (!list_empty(&oldsd->poll_list)) {
11370 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11371 struct napi_struct,
11372 poll_list);
11373
11374 list_del_init(&napi->poll_list);
11375 if (napi->poll == process_backlog)
11376 napi->state = 0;
11377 else
11378 ____napi_schedule(sd, napi);
11379 }
11380
11381 raise_softirq_irqoff(NET_TX_SOFTIRQ);
11382 local_irq_enable();
11383
11384#ifdef CONFIG_RPS
11385 remsd = oldsd->rps_ipi_list;
11386 oldsd->rps_ipi_list = NULL;
11387#endif
11388 /* send out pending IPI's on offline CPU */
11389 net_rps_send_ipi(remsd);
11390
11391 /* Process offline CPU's input_pkt_queue */
11392 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11393 netif_rx(skb);
11394 input_queue_head_incr(oldsd);
11395 }
11396 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11397 netif_rx(skb);
11398 input_queue_head_incr(oldsd);
11399 }
11400
11401 return 0;
11402}
11403
11404/**
11405 * netdev_increment_features - increment feature set by one
11406 * @all: current feature set
11407 * @one: new feature set
11408 * @mask: mask feature set
11409 *
11410 * Computes a new feature set after adding a device with feature set
11411 * @one to the master device with current feature set @all. Will not
11412 * enable anything that is off in @mask. Returns the new feature set.
11413 */
11414netdev_features_t netdev_increment_features(netdev_features_t all,
11415 netdev_features_t one, netdev_features_t mask)
11416{
11417 if (mask & NETIF_F_HW_CSUM)
11418 mask |= NETIF_F_CSUM_MASK;
11419 mask |= NETIF_F_VLAN_CHALLENGED;
11420
11421 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11422 all &= one | ~NETIF_F_ALL_FOR_ALL;
11423
11424 /* If one device supports hw checksumming, set for all. */
11425 if (all & NETIF_F_HW_CSUM)
11426 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11427
11428 return all;
11429}
11430EXPORT_SYMBOL(netdev_increment_features);
11431
11432static struct hlist_head * __net_init netdev_create_hash(void)
11433{
11434 int i;
11435 struct hlist_head *hash;
11436
11437 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11438 if (hash != NULL)
11439 for (i = 0; i < NETDEV_HASHENTRIES; i++)
11440 INIT_HLIST_HEAD(&hash[i]);
11441
11442 return hash;
11443}
11444
11445/* Initialize per network namespace state */
11446static int __net_init netdev_init(struct net *net)
11447{
11448 BUILD_BUG_ON(GRO_HASH_BUCKETS >
11449 8 * sizeof_field(struct napi_struct, gro_bitmask));
11450
11451 INIT_LIST_HEAD(&net->dev_base_head);
11452
11453 net->dev_name_head = netdev_create_hash();
11454 if (net->dev_name_head == NULL)
11455 goto err_name;
11456
11457 net->dev_index_head = netdev_create_hash();
11458 if (net->dev_index_head == NULL)
11459 goto err_idx;
11460
11461 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
11462
11463 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11464
11465 return 0;
11466
11467err_idx:
11468 kfree(net->dev_name_head);
11469err_name:
11470 return -ENOMEM;
11471}
11472
11473/**
11474 * netdev_drivername - network driver for the device
11475 * @dev: network device
11476 *
11477 * Determine network driver for device.
11478 */
11479const char *netdev_drivername(const struct net_device *dev)
11480{
11481 const struct device_driver *driver;
11482 const struct device *parent;
11483 const char *empty = "";
11484
11485 parent = dev->dev.parent;
11486 if (!parent)
11487 return empty;
11488
11489 driver = parent->driver;
11490 if (driver && driver->name)
11491 return driver->name;
11492 return empty;
11493}
11494
11495static void __netdev_printk(const char *level, const struct net_device *dev,
11496 struct va_format *vaf)
11497{
11498 if (dev && dev->dev.parent) {
11499 dev_printk_emit(level[1] - '0',
11500 dev->dev.parent,
11501 "%s %s %s%s: %pV",
11502 dev_driver_string(dev->dev.parent),
11503 dev_name(dev->dev.parent),
11504 netdev_name(dev), netdev_reg_state(dev),
11505 vaf);
11506 } else if (dev) {
11507 printk("%s%s%s: %pV",
11508 level, netdev_name(dev), netdev_reg_state(dev), vaf);
11509 } else {
11510 printk("%s(NULL net_device): %pV", level, vaf);
11511 }
11512}
11513
11514void netdev_printk(const char *level, const struct net_device *dev,
11515 const char *format, ...)
11516{
11517 struct va_format vaf;
11518 va_list args;
11519
11520 va_start(args, format);
11521
11522 vaf.fmt = format;
11523 vaf.va = &args;
11524
11525 __netdev_printk(level, dev, &vaf);
11526
11527 va_end(args);
11528}
11529EXPORT_SYMBOL(netdev_printk);
11530
11531#define define_netdev_printk_level(func, level) \
11532void func(const struct net_device *dev, const char *fmt, ...) \
11533{ \
11534 struct va_format vaf; \
11535 va_list args; \
11536 \
11537 va_start(args, fmt); \
11538 \
11539 vaf.fmt = fmt; \
11540 vaf.va = &args; \
11541 \
11542 __netdev_printk(level, dev, &vaf); \
11543 \
11544 va_end(args); \
11545} \
11546EXPORT_SYMBOL(func);
11547
11548define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11549define_netdev_printk_level(netdev_alert, KERN_ALERT);
11550define_netdev_printk_level(netdev_crit, KERN_CRIT);
11551define_netdev_printk_level(netdev_err, KERN_ERR);
11552define_netdev_printk_level(netdev_warn, KERN_WARNING);
11553define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11554define_netdev_printk_level(netdev_info, KERN_INFO);
11555
11556static void __net_exit netdev_exit(struct net *net)
11557{
11558 kfree(net->dev_name_head);
11559 kfree(net->dev_index_head);
11560 xa_destroy(&net->dev_by_index);
11561 if (net != &init_net)
11562 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11563}
11564
11565static struct pernet_operations __net_initdata netdev_net_ops = {
11566 .init = netdev_init,
11567 .exit = netdev_exit,
11568};
11569
11570static void __net_exit default_device_exit_net(struct net *net)
11571{
11572 struct netdev_name_node *name_node, *tmp;
11573 struct net_device *dev, *aux;
11574 /*
11575 * Push all migratable network devices back to the
11576 * initial network namespace
11577 */
11578 ASSERT_RTNL();
11579 for_each_netdev_safe(net, dev, aux) {
11580 int err;
11581 char fb_name[IFNAMSIZ];
11582
11583 /* Ignore unmoveable devices (i.e. loopback) */
11584 if (dev->features & NETIF_F_NETNS_LOCAL)
11585 continue;
11586
11587 /* Leave virtual devices for the generic cleanup */
11588 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11589 continue;
11590
11591 /* Push remaining network devices to init_net */
11592 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11593 if (netdev_name_in_use(&init_net, fb_name))
11594 snprintf(fb_name, IFNAMSIZ, "dev%%d");
11595
11596 netdev_for_each_altname_safe(dev, name_node, tmp)
11597 if (netdev_name_in_use(&init_net, name_node->name))
11598 __netdev_name_node_alt_destroy(name_node);
11599
11600 err = dev_change_net_namespace(dev, &init_net, fb_name);
11601 if (err) {
11602 pr_emerg("%s: failed to move %s to init_net: %d\n",
11603 __func__, dev->name, err);
11604 BUG();
11605 }
11606 }
11607}
11608
11609static void __net_exit default_device_exit_batch(struct list_head *net_list)
11610{
11611 /* At exit all network devices most be removed from a network
11612 * namespace. Do this in the reverse order of registration.
11613 * Do this across as many network namespaces as possible to
11614 * improve batching efficiency.
11615 */
11616 struct net_device *dev;
11617 struct net *net;
11618 LIST_HEAD(dev_kill_list);
11619
11620 rtnl_lock();
11621 list_for_each_entry(net, net_list, exit_list) {
11622 default_device_exit_net(net);
11623 cond_resched();
11624 }
11625
11626 list_for_each_entry(net, net_list, exit_list) {
11627 for_each_netdev_reverse(net, dev) {
11628 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11629 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11630 else
11631 unregister_netdevice_queue(dev, &dev_kill_list);
11632 }
11633 }
11634 unregister_netdevice_many(&dev_kill_list);
11635 rtnl_unlock();
11636}
11637
11638static struct pernet_operations __net_initdata default_device_ops = {
11639 .exit_batch = default_device_exit_batch,
11640};
11641
11642static void __init net_dev_struct_check(void)
11643{
11644 /* TX read-mostly hotpath */
11645 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
11646 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
11647 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
11648 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
11649 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues);
11650 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size);
11651 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size);
11652 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs);
11653 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_partial_features);
11654 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc);
11655 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu);
11656 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom);
11657 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq);
11658#ifdef CONFIG_XPS
11659 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps);
11660#endif
11661#ifdef CONFIG_NETFILTER_EGRESS
11662 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress);
11663#endif
11664#ifdef CONFIG_NET_XGRESS
11665 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress);
11666#endif
11667 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160);
11668
11669 /* TXRX read-mostly hotpath */
11670 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats);
11671 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state);
11672 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
11673 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
11674 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
11675 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
11676 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46);
11677
11678 /* RX read-mostly hotpath */
11679 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
11680 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
11681 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
11682 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
11683 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_flush_timeout);
11684 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, napi_defer_hard_irqs);
11685 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
11686 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
11687 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
11688 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data);
11689 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net);
11690#ifdef CONFIG_NETPOLL
11691 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo);
11692#endif
11693#ifdef CONFIG_NET_XGRESS
11694 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
11695#endif
11696 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 104);
11697}
11698
11699/*
11700 * Initialize the DEV module. At boot time this walks the device list and
11701 * unhooks any devices that fail to initialise (normally hardware not
11702 * present) and leaves us with a valid list of present and active devices.
11703 *
11704 */
11705
11706/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
11707#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
11708
11709static int net_page_pool_create(int cpuid)
11710{
11711#if IS_ENABLED(CONFIG_PAGE_POOL)
11712 struct page_pool_params page_pool_params = {
11713 .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
11714 .flags = PP_FLAG_SYSTEM_POOL,
11715 .nid = NUMA_NO_NODE,
11716 };
11717 struct page_pool *pp_ptr;
11718
11719 pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
11720 if (IS_ERR(pp_ptr))
11721 return -ENOMEM;
11722
11723 per_cpu(system_page_pool, cpuid) = pp_ptr;
11724#endif
11725 return 0;
11726}
11727
11728/*
11729 * This is called single threaded during boot, so no need
11730 * to take the rtnl semaphore.
11731 */
11732static int __init net_dev_init(void)
11733{
11734 int i, rc = -ENOMEM;
11735
11736 BUG_ON(!dev_boot_phase);
11737
11738 net_dev_struct_check();
11739
11740 if (dev_proc_init())
11741 goto out;
11742
11743 if (netdev_kobject_init())
11744 goto out;
11745
11746 for (i = 0; i < PTYPE_HASH_SIZE; i++)
11747 INIT_LIST_HEAD(&ptype_base[i]);
11748
11749 if (register_pernet_subsys(&netdev_net_ops))
11750 goto out;
11751
11752 /*
11753 * Initialise the packet receive queues.
11754 */
11755
11756 for_each_possible_cpu(i) {
11757 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11758 struct softnet_data *sd = &per_cpu(softnet_data, i);
11759
11760 INIT_WORK(flush, flush_backlog);
11761
11762 skb_queue_head_init(&sd->input_pkt_queue);
11763 skb_queue_head_init(&sd->process_queue);
11764#ifdef CONFIG_XFRM_OFFLOAD
11765 skb_queue_head_init(&sd->xfrm_backlog);
11766#endif
11767 INIT_LIST_HEAD(&sd->poll_list);
11768 sd->output_queue_tailp = &sd->output_queue;
11769#ifdef CONFIG_RPS
11770 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11771 sd->cpu = i;
11772#endif
11773 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11774 spin_lock_init(&sd->defer_lock);
11775
11776 init_gro_hash(&sd->backlog);
11777 sd->backlog.poll = process_backlog;
11778 sd->backlog.weight = weight_p;
11779
11780 if (net_page_pool_create(i))
11781 goto out;
11782 }
11783
11784 dev_boot_phase = 0;
11785
11786 /* The loopback device is special if any other network devices
11787 * is present in a network namespace the loopback device must
11788 * be present. Since we now dynamically allocate and free the
11789 * loopback device ensure this invariant is maintained by
11790 * keeping the loopback device as the first device on the
11791 * list of network devices. Ensuring the loopback devices
11792 * is the first device that appears and the last network device
11793 * that disappears.
11794 */
11795 if (register_pernet_device(&loopback_net_ops))
11796 goto out;
11797
11798 if (register_pernet_device(&default_device_ops))
11799 goto out;
11800
11801 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11802 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11803
11804 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11805 NULL, dev_cpu_dead);
11806 WARN_ON(rc < 0);
11807 rc = 0;
11808out:
11809 if (rc < 0) {
11810 for_each_possible_cpu(i) {
11811 struct page_pool *pp_ptr;
11812
11813 pp_ptr = per_cpu(system_page_pool, i);
11814 if (!pp_ptr)
11815 continue;
11816
11817 page_pool_destroy(pp_ptr);
11818 per_cpu(system_page_pool, i) = NULL;
11819 }
11820 }
11821
11822 return rc;
11823}
11824
11825subsys_initcall(net_dev_init);