Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the Interfaces handler.
8 *
9 * Version: @(#)dev.h 1.0.10 08/12/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
15 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
16 * Bjorn Ekwall. <bj0rn@blox.se>
17 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 *
19 * Moved to /usr/include/linux for NET3
20 */
21#ifndef _LINUX_NETDEVICE_H
22#define _LINUX_NETDEVICE_H
23
24#include <linux/timer.h>
25#include <linux/bug.h>
26#include <linux/delay.h>
27#include <linux/atomic.h>
28#include <linux/prefetch.h>
29#include <asm/cache.h>
30#include <asm/byteorder.h>
31
32#include <linux/percpu.h>
33#include <linux/rculist.h>
34#include <linux/workqueue.h>
35#include <linux/dynamic_queue_limits.h>
36
37#include <linux/ethtool.h>
38#include <net/net_namespace.h>
39#ifdef CONFIG_DCB
40#include <net/dcbnl.h>
41#endif
42#include <net/netprio_cgroup.h>
43#include <net/xdp.h>
44
45#include <linux/netdev_features.h>
46#include <linux/neighbour.h>
47#include <uapi/linux/netdevice.h>
48#include <uapi/linux/if_bonding.h>
49#include <uapi/linux/pkt_cls.h>
50#include <linux/hashtable.h>
51
52struct netpoll_info;
53struct device;
54struct phy_device;
55struct dsa_port;
56struct ip_tunnel_parm;
57struct macsec_context;
58struct macsec_ops;
59
60struct sfp_bus;
61/* 802.11 specific */
62struct wireless_dev;
63/* 802.15.4 specific */
64struct wpan_dev;
65struct mpls_dev;
66/* UDP Tunnel offloads */
67struct udp_tunnel_info;
68struct udp_tunnel_nic_info;
69struct udp_tunnel_nic;
70struct bpf_prog;
71struct xdp_buff;
72
73void synchronize_net(void);
74void netdev_set_default_ethtool_ops(struct net_device *dev,
75 const struct ethtool_ops *ops);
76
77/* Backlog congestion levels */
78#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
79#define NET_RX_DROP 1 /* packet dropped */
80
81#define MAX_NEST_DEV 8
82
83/*
84 * Transmit return codes: transmit return codes originate from three different
85 * namespaces:
86 *
87 * - qdisc return codes
88 * - driver transmit return codes
89 * - errno values
90 *
91 * Drivers are allowed to return any one of those in their hard_start_xmit()
92 * function. Real network devices commonly used with qdiscs should only return
93 * the driver transmit return codes though - when qdiscs are used, the actual
94 * transmission happens asynchronously, so the value is not propagated to
95 * higher layers. Virtual network devices transmit synchronously; in this case
96 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
97 * others are propagated to higher layers.
98 */
99
100/* qdisc ->enqueue() return codes. */
101#define NET_XMIT_SUCCESS 0x00
102#define NET_XMIT_DROP 0x01 /* skb dropped */
103#define NET_XMIT_CN 0x02 /* congestion notification */
104#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
105
106/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
107 * indicates that the device will soon be dropping packets, or already drops
108 * some packets of the same priority; prompting us to send less aggressively. */
109#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
110#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
111
112/* Driver transmit return codes */
113#define NETDEV_TX_MASK 0xf0
114
115enum netdev_tx {
116 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
117 NETDEV_TX_OK = 0x00, /* driver took care of packet */
118 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
119};
120typedef enum netdev_tx netdev_tx_t;
121
122/*
123 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
124 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
125 */
126static inline bool dev_xmit_complete(int rc)
127{
128 /*
129 * Positive cases with an skb consumed by a driver:
130 * - successful transmission (rc == NETDEV_TX_OK)
131 * - error while transmitting (rc < 0)
132 * - error while queueing to a different device (rc & NET_XMIT_MASK)
133 */
134 if (likely(rc < NET_XMIT_MASK))
135 return true;
136
137 return false;
138}
139
140/*
141 * Compute the worst-case header length according to the protocols
142 * used.
143 */
144
145#if defined(CONFIG_HYPERV_NET)
146# define LL_MAX_HEADER 128
147#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
148# if defined(CONFIG_MAC80211_MESH)
149# define LL_MAX_HEADER 128
150# else
151# define LL_MAX_HEADER 96
152# endif
153#else
154# define LL_MAX_HEADER 32
155#endif
156
157#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
158 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
159#define MAX_HEADER LL_MAX_HEADER
160#else
161#define MAX_HEADER (LL_MAX_HEADER + 48)
162#endif
163
164/*
165 * Old network device statistics. Fields are native words
166 * (unsigned long) so they can be read and written atomically.
167 */
168
169struct net_device_stats {
170 unsigned long rx_packets;
171 unsigned long tx_packets;
172 unsigned long rx_bytes;
173 unsigned long tx_bytes;
174 unsigned long rx_errors;
175 unsigned long tx_errors;
176 unsigned long rx_dropped;
177 unsigned long tx_dropped;
178 unsigned long multicast;
179 unsigned long collisions;
180 unsigned long rx_length_errors;
181 unsigned long rx_over_errors;
182 unsigned long rx_crc_errors;
183 unsigned long rx_frame_errors;
184 unsigned long rx_fifo_errors;
185 unsigned long rx_missed_errors;
186 unsigned long tx_aborted_errors;
187 unsigned long tx_carrier_errors;
188 unsigned long tx_fifo_errors;
189 unsigned long tx_heartbeat_errors;
190 unsigned long tx_window_errors;
191 unsigned long rx_compressed;
192 unsigned long tx_compressed;
193};
194
195
196#include <linux/cache.h>
197#include <linux/skbuff.h>
198
199#ifdef CONFIG_RPS
200#include <linux/static_key.h>
201extern struct static_key_false rps_needed;
202extern struct static_key_false rfs_needed;
203#endif
204
205struct neighbour;
206struct neigh_parms;
207struct sk_buff;
208
209struct netdev_hw_addr {
210 struct list_head list;
211 unsigned char addr[MAX_ADDR_LEN];
212 unsigned char type;
213#define NETDEV_HW_ADDR_T_LAN 1
214#define NETDEV_HW_ADDR_T_SAN 2
215#define NETDEV_HW_ADDR_T_UNICAST 3
216#define NETDEV_HW_ADDR_T_MULTICAST 4
217 bool global_use;
218 int sync_cnt;
219 int refcount;
220 int synced;
221 struct rcu_head rcu_head;
222};
223
224struct netdev_hw_addr_list {
225 struct list_head list;
226 int count;
227};
228
229#define netdev_hw_addr_list_count(l) ((l)->count)
230#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
231#define netdev_hw_addr_list_for_each(ha, l) \
232 list_for_each_entry(ha, &(l)->list, list)
233
234#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
235#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
236#define netdev_for_each_uc_addr(ha, dev) \
237 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
238
239#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
240#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
241#define netdev_for_each_mc_addr(ha, dev) \
242 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
243
244struct hh_cache {
245 unsigned int hh_len;
246 seqlock_t hh_lock;
247
248 /* cached hardware header; allow for machine alignment needs. */
249#define HH_DATA_MOD 16
250#define HH_DATA_OFF(__len) \
251 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
252#define HH_DATA_ALIGN(__len) \
253 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
254 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
255};
256
257/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
258 * Alternative is:
259 * dev->hard_header_len ? (dev->hard_header_len +
260 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
261 *
262 * We could use other alignment values, but we must maintain the
263 * relationship HH alignment <= LL alignment.
264 */
265#define LL_RESERVED_SPACE(dev) \
266 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
267#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
268 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
269
270struct header_ops {
271 int (*create) (struct sk_buff *skb, struct net_device *dev,
272 unsigned short type, const void *daddr,
273 const void *saddr, unsigned int len);
274 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
275 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
276 void (*cache_update)(struct hh_cache *hh,
277 const struct net_device *dev,
278 const unsigned char *haddr);
279 bool (*validate)(const char *ll_header, unsigned int len);
280 __be16 (*parse_protocol)(const struct sk_buff *skb);
281};
282
283/* These flag bits are private to the generic network queueing
284 * layer; they may not be explicitly referenced by any other
285 * code.
286 */
287
288enum netdev_state_t {
289 __LINK_STATE_START,
290 __LINK_STATE_PRESENT,
291 __LINK_STATE_NOCARRIER,
292 __LINK_STATE_LINKWATCH_PENDING,
293 __LINK_STATE_DORMANT,
294 __LINK_STATE_TESTING,
295};
296
297
298/*
299 * This structure holds boot-time configured netdevice settings. They
300 * are then used in the device probing.
301 */
302struct netdev_boot_setup {
303 char name[IFNAMSIZ];
304 struct ifmap map;
305};
306#define NETDEV_BOOT_SETUP_MAX 8
307
308int __init netdev_boot_setup(char *str);
309
310struct gro_list {
311 struct list_head list;
312 int count;
313};
314
315/*
316 * size of gro hash buckets, must less than bit number of
317 * napi_struct::gro_bitmask
318 */
319#define GRO_HASH_BUCKETS 8
320
321/*
322 * Structure for NAPI scheduling similar to tasklet but with weighting
323 */
324struct napi_struct {
325 /* The poll_list must only be managed by the entity which
326 * changes the state of the NAPI_STATE_SCHED bit. This means
327 * whoever atomically sets that bit can add this napi_struct
328 * to the per-CPU poll_list, and whoever clears that bit
329 * can remove from the list right before clearing the bit.
330 */
331 struct list_head poll_list;
332
333 unsigned long state;
334 int weight;
335 int defer_hard_irqs_count;
336 unsigned long gro_bitmask;
337 int (*poll)(struct napi_struct *, int);
338#ifdef CONFIG_NETPOLL
339 int poll_owner;
340#endif
341 struct net_device *dev;
342 struct gro_list gro_hash[GRO_HASH_BUCKETS];
343 struct sk_buff *skb;
344 struct list_head rx_list; /* Pending GRO_NORMAL skbs */
345 int rx_count; /* length of rx_list */
346 struct hrtimer timer;
347 struct list_head dev_list;
348 struct hlist_node napi_hash_node;
349 unsigned int napi_id;
350};
351
352enum {
353 NAPI_STATE_SCHED, /* Poll is scheduled */
354 NAPI_STATE_MISSED, /* reschedule a napi */
355 NAPI_STATE_DISABLE, /* Disable pending */
356 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
357 NAPI_STATE_LISTED, /* NAPI added to system lists */
358 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
359 NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
360};
361
362enum {
363 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
364 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
365 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
366 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
367 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
368 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
369 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
370};
371
372enum gro_result {
373 GRO_MERGED,
374 GRO_MERGED_FREE,
375 GRO_HELD,
376 GRO_NORMAL,
377 GRO_DROP,
378 GRO_CONSUMED,
379};
380typedef enum gro_result gro_result_t;
381
382/*
383 * enum rx_handler_result - Possible return values for rx_handlers.
384 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
385 * further.
386 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
387 * case skb->dev was changed by rx_handler.
388 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
389 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
390 *
391 * rx_handlers are functions called from inside __netif_receive_skb(), to do
392 * special processing of the skb, prior to delivery to protocol handlers.
393 *
394 * Currently, a net_device can only have a single rx_handler registered. Trying
395 * to register a second rx_handler will return -EBUSY.
396 *
397 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
398 * To unregister a rx_handler on a net_device, use
399 * netdev_rx_handler_unregister().
400 *
401 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
402 * do with the skb.
403 *
404 * If the rx_handler consumed the skb in some way, it should return
405 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
406 * the skb to be delivered in some other way.
407 *
408 * If the rx_handler changed skb->dev, to divert the skb to another
409 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
410 * new device will be called if it exists.
411 *
412 * If the rx_handler decides the skb should be ignored, it should return
413 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
414 * are registered on exact device (ptype->dev == skb->dev).
415 *
416 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
417 * delivered, it should return RX_HANDLER_PASS.
418 *
419 * A device without a registered rx_handler will behave as if rx_handler
420 * returned RX_HANDLER_PASS.
421 */
422
423enum rx_handler_result {
424 RX_HANDLER_CONSUMED,
425 RX_HANDLER_ANOTHER,
426 RX_HANDLER_EXACT,
427 RX_HANDLER_PASS,
428};
429typedef enum rx_handler_result rx_handler_result_t;
430typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
431
432void __napi_schedule(struct napi_struct *n);
433void __napi_schedule_irqoff(struct napi_struct *n);
434
435static inline bool napi_disable_pending(struct napi_struct *n)
436{
437 return test_bit(NAPI_STATE_DISABLE, &n->state);
438}
439
440bool napi_schedule_prep(struct napi_struct *n);
441
442/**
443 * napi_schedule - schedule NAPI poll
444 * @n: NAPI context
445 *
446 * Schedule NAPI poll routine to be called if it is not already
447 * running.
448 */
449static inline void napi_schedule(struct napi_struct *n)
450{
451 if (napi_schedule_prep(n))
452 __napi_schedule(n);
453}
454
455/**
456 * napi_schedule_irqoff - schedule NAPI poll
457 * @n: NAPI context
458 *
459 * Variant of napi_schedule(), assuming hard irqs are masked.
460 */
461static inline void napi_schedule_irqoff(struct napi_struct *n)
462{
463 if (napi_schedule_prep(n))
464 __napi_schedule_irqoff(n);
465}
466
467/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
468static inline bool napi_reschedule(struct napi_struct *napi)
469{
470 if (napi_schedule_prep(napi)) {
471 __napi_schedule(napi);
472 return true;
473 }
474 return false;
475}
476
477bool napi_complete_done(struct napi_struct *n, int work_done);
478/**
479 * napi_complete - NAPI processing complete
480 * @n: NAPI context
481 *
482 * Mark NAPI processing as complete.
483 * Consider using napi_complete_done() instead.
484 * Return false if device should avoid rearming interrupts.
485 */
486static inline bool napi_complete(struct napi_struct *n)
487{
488 return napi_complete_done(n, 0);
489}
490
491/**
492 * napi_disable - prevent NAPI from scheduling
493 * @n: NAPI context
494 *
495 * Stop NAPI from being scheduled on this context.
496 * Waits till any outstanding processing completes.
497 */
498void napi_disable(struct napi_struct *n);
499
500/**
501 * napi_enable - enable NAPI scheduling
502 * @n: NAPI context
503 *
504 * Resume NAPI from being scheduled on this context.
505 * Must be paired with napi_disable.
506 */
507static inline void napi_enable(struct napi_struct *n)
508{
509 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
510 smp_mb__before_atomic();
511 clear_bit(NAPI_STATE_SCHED, &n->state);
512 clear_bit(NAPI_STATE_NPSVC, &n->state);
513}
514
515/**
516 * napi_synchronize - wait until NAPI is not running
517 * @n: NAPI context
518 *
519 * Wait until NAPI is done being scheduled on this context.
520 * Waits till any outstanding processing completes but
521 * does not disable future activations.
522 */
523static inline void napi_synchronize(const struct napi_struct *n)
524{
525 if (IS_ENABLED(CONFIG_SMP))
526 while (test_bit(NAPI_STATE_SCHED, &n->state))
527 msleep(1);
528 else
529 barrier();
530}
531
532/**
533 * napi_if_scheduled_mark_missed - if napi is running, set the
534 * NAPIF_STATE_MISSED
535 * @n: NAPI context
536 *
537 * If napi is running, set the NAPIF_STATE_MISSED, and return true if
538 * NAPI is scheduled.
539 **/
540static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
541{
542 unsigned long val, new;
543
544 do {
545 val = READ_ONCE(n->state);
546 if (val & NAPIF_STATE_DISABLE)
547 return true;
548
549 if (!(val & NAPIF_STATE_SCHED))
550 return false;
551
552 new = val | NAPIF_STATE_MISSED;
553 } while (cmpxchg(&n->state, val, new) != val);
554
555 return true;
556}
557
558enum netdev_queue_state_t {
559 __QUEUE_STATE_DRV_XOFF,
560 __QUEUE_STATE_STACK_XOFF,
561 __QUEUE_STATE_FROZEN,
562};
563
564#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
565#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
566#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
567
568#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
569#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
570 QUEUE_STATE_FROZEN)
571#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
572 QUEUE_STATE_FROZEN)
573
574/*
575 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
576 * netif_tx_* functions below are used to manipulate this flag. The
577 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
578 * queue independently. The netif_xmit_*stopped functions below are called
579 * to check if the queue has been stopped by the driver or stack (either
580 * of the XOFF bits are set in the state). Drivers should not need to call
581 * netif_xmit*stopped functions, they should only be using netif_tx_*.
582 */
583
584struct netdev_queue {
585/*
586 * read-mostly part
587 */
588 struct net_device *dev;
589 struct Qdisc __rcu *qdisc;
590 struct Qdisc *qdisc_sleeping;
591#ifdef CONFIG_SYSFS
592 struct kobject kobj;
593#endif
594#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
595 int numa_node;
596#endif
597 unsigned long tx_maxrate;
598 /*
599 * Number of TX timeouts for this queue
600 * (/sys/class/net/DEV/Q/trans_timeout)
601 */
602 unsigned long trans_timeout;
603
604 /* Subordinate device that the queue has been assigned to */
605 struct net_device *sb_dev;
606#ifdef CONFIG_XDP_SOCKETS
607 struct xsk_buff_pool *pool;
608#endif
609/*
610 * write-mostly part
611 */
612 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
613 int xmit_lock_owner;
614 /*
615 * Time (in jiffies) of last Tx
616 */
617 unsigned long trans_start;
618
619 unsigned long state;
620
621#ifdef CONFIG_BQL
622 struct dql dql;
623#endif
624} ____cacheline_aligned_in_smp;
625
626extern int sysctl_fb_tunnels_only_for_init_net;
627extern int sysctl_devconf_inherit_init_net;
628
629/*
630 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
631 * == 1 : For initns only
632 * == 2 : For none.
633 */
634static inline bool net_has_fallback_tunnels(const struct net *net)
635{
636 return !IS_ENABLED(CONFIG_SYSCTL) ||
637 !sysctl_fb_tunnels_only_for_init_net ||
638 (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
639}
640
641static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
642{
643#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
644 return q->numa_node;
645#else
646 return NUMA_NO_NODE;
647#endif
648}
649
650static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
651{
652#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
653 q->numa_node = node;
654#endif
655}
656
657#ifdef CONFIG_RPS
658/*
659 * This structure holds an RPS map which can be of variable length. The
660 * map is an array of CPUs.
661 */
662struct rps_map {
663 unsigned int len;
664 struct rcu_head rcu;
665 u16 cpus[];
666};
667#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
668
669/*
670 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
671 * tail pointer for that CPU's input queue at the time of last enqueue, and
672 * a hardware filter index.
673 */
674struct rps_dev_flow {
675 u16 cpu;
676 u16 filter;
677 unsigned int last_qtail;
678};
679#define RPS_NO_FILTER 0xffff
680
681/*
682 * The rps_dev_flow_table structure contains a table of flow mappings.
683 */
684struct rps_dev_flow_table {
685 unsigned int mask;
686 struct rcu_head rcu;
687 struct rps_dev_flow flows[];
688};
689#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
690 ((_num) * sizeof(struct rps_dev_flow)))
691
692/*
693 * The rps_sock_flow_table contains mappings of flows to the last CPU
694 * on which they were processed by the application (set in recvmsg).
695 * Each entry is a 32bit value. Upper part is the high-order bits
696 * of flow hash, lower part is CPU number.
697 * rps_cpu_mask is used to partition the space, depending on number of
698 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
699 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
700 * meaning we use 32-6=26 bits for the hash.
701 */
702struct rps_sock_flow_table {
703 u32 mask;
704
705 u32 ents[] ____cacheline_aligned_in_smp;
706};
707#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
708
709#define RPS_NO_CPU 0xffff
710
711extern u32 rps_cpu_mask;
712extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
713
714static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
715 u32 hash)
716{
717 if (table && hash) {
718 unsigned int index = hash & table->mask;
719 u32 val = hash & ~rps_cpu_mask;
720
721 /* We only give a hint, preemption can change CPU under us */
722 val |= raw_smp_processor_id();
723
724 if (table->ents[index] != val)
725 table->ents[index] = val;
726 }
727}
728
729#ifdef CONFIG_RFS_ACCEL
730bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
731 u16 filter_id);
732#endif
733#endif /* CONFIG_RPS */
734
735/* This structure contains an instance of an RX queue. */
736struct netdev_rx_queue {
737#ifdef CONFIG_RPS
738 struct rps_map __rcu *rps_map;
739 struct rps_dev_flow_table __rcu *rps_flow_table;
740#endif
741 struct kobject kobj;
742 struct net_device *dev;
743 struct xdp_rxq_info xdp_rxq;
744#ifdef CONFIG_XDP_SOCKETS
745 struct xsk_buff_pool *pool;
746#endif
747} ____cacheline_aligned_in_smp;
748
749/*
750 * RX queue sysfs structures and functions.
751 */
752struct rx_queue_attribute {
753 struct attribute attr;
754 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
755 ssize_t (*store)(struct netdev_rx_queue *queue,
756 const char *buf, size_t len);
757};
758
759#ifdef CONFIG_XPS
760/*
761 * This structure holds an XPS map which can be of variable length. The
762 * map is an array of queues.
763 */
764struct xps_map {
765 unsigned int len;
766 unsigned int alloc_len;
767 struct rcu_head rcu;
768 u16 queues[];
769};
770#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
771#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
772 - sizeof(struct xps_map)) / sizeof(u16))
773
774/*
775 * This structure holds all XPS maps for device. Maps are indexed by CPU.
776 */
777struct xps_dev_maps {
778 struct rcu_head rcu;
779 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
780};
781
782#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
783 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
784
785#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
786 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
787
788#endif /* CONFIG_XPS */
789
790#define TC_MAX_QUEUE 16
791#define TC_BITMASK 15
792/* HW offloaded queuing disciplines txq count and offset maps */
793struct netdev_tc_txq {
794 u16 count;
795 u16 offset;
796};
797
798#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
799/*
800 * This structure is to hold information about the device
801 * configured to run FCoE protocol stack.
802 */
803struct netdev_fcoe_hbainfo {
804 char manufacturer[64];
805 char serial_number[64];
806 char hardware_version[64];
807 char driver_version[64];
808 char optionrom_version[64];
809 char firmware_version[64];
810 char model[256];
811 char model_description[256];
812};
813#endif
814
815#define MAX_PHYS_ITEM_ID_LEN 32
816
817/* This structure holds a unique identifier to identify some
818 * physical item (port for example) used by a netdevice.
819 */
820struct netdev_phys_item_id {
821 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
822 unsigned char id_len;
823};
824
825static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
826 struct netdev_phys_item_id *b)
827{
828 return a->id_len == b->id_len &&
829 memcmp(a->id, b->id, a->id_len) == 0;
830}
831
832typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
833 struct sk_buff *skb,
834 struct net_device *sb_dev);
835
836enum tc_setup_type {
837 TC_SETUP_QDISC_MQPRIO,
838 TC_SETUP_CLSU32,
839 TC_SETUP_CLSFLOWER,
840 TC_SETUP_CLSMATCHALL,
841 TC_SETUP_CLSBPF,
842 TC_SETUP_BLOCK,
843 TC_SETUP_QDISC_CBS,
844 TC_SETUP_QDISC_RED,
845 TC_SETUP_QDISC_PRIO,
846 TC_SETUP_QDISC_MQ,
847 TC_SETUP_QDISC_ETF,
848 TC_SETUP_ROOT_QDISC,
849 TC_SETUP_QDISC_GRED,
850 TC_SETUP_QDISC_TAPRIO,
851 TC_SETUP_FT,
852 TC_SETUP_QDISC_ETS,
853 TC_SETUP_QDISC_TBF,
854 TC_SETUP_QDISC_FIFO,
855};
856
857/* These structures hold the attributes of bpf state that are being passed
858 * to the netdevice through the bpf op.
859 */
860enum bpf_netdev_command {
861 /* Set or clear a bpf program used in the earliest stages of packet
862 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
863 * is responsible for calling bpf_prog_put on any old progs that are
864 * stored. In case of error, the callee need not release the new prog
865 * reference, but on success it takes ownership and must bpf_prog_put
866 * when it is no longer used.
867 */
868 XDP_SETUP_PROG,
869 XDP_SETUP_PROG_HW,
870 /* BPF program for offload callbacks, invoked at program load time. */
871 BPF_OFFLOAD_MAP_ALLOC,
872 BPF_OFFLOAD_MAP_FREE,
873 XDP_SETUP_XSK_POOL,
874};
875
876struct bpf_prog_offload_ops;
877struct netlink_ext_ack;
878struct xdp_umem;
879struct xdp_dev_bulk_queue;
880struct bpf_xdp_link;
881
882enum bpf_xdp_mode {
883 XDP_MODE_SKB = 0,
884 XDP_MODE_DRV = 1,
885 XDP_MODE_HW = 2,
886 __MAX_XDP_MODE
887};
888
889struct bpf_xdp_entity {
890 struct bpf_prog *prog;
891 struct bpf_xdp_link *link;
892};
893
894struct netdev_bpf {
895 enum bpf_netdev_command command;
896 union {
897 /* XDP_SETUP_PROG */
898 struct {
899 u32 flags;
900 struct bpf_prog *prog;
901 struct netlink_ext_ack *extack;
902 };
903 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
904 struct {
905 struct bpf_offloaded_map *offmap;
906 };
907 /* XDP_SETUP_XSK_POOL */
908 struct {
909 struct xsk_buff_pool *pool;
910 u16 queue_id;
911 } xsk;
912 };
913};
914
915/* Flags for ndo_xsk_wakeup. */
916#define XDP_WAKEUP_RX (1 << 0)
917#define XDP_WAKEUP_TX (1 << 1)
918
919#ifdef CONFIG_XFRM_OFFLOAD
920struct xfrmdev_ops {
921 int (*xdo_dev_state_add) (struct xfrm_state *x);
922 void (*xdo_dev_state_delete) (struct xfrm_state *x);
923 void (*xdo_dev_state_free) (struct xfrm_state *x);
924 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
925 struct xfrm_state *x);
926 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
927};
928#endif
929
930struct dev_ifalias {
931 struct rcu_head rcuhead;
932 char ifalias[];
933};
934
935struct devlink;
936struct tlsdev_ops;
937
938struct netdev_name_node {
939 struct hlist_node hlist;
940 struct list_head list;
941 struct net_device *dev;
942 const char *name;
943};
944
945int netdev_name_node_alt_create(struct net_device *dev, const char *name);
946int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
947
948struct netdev_net_notifier {
949 struct list_head list;
950 struct notifier_block *nb;
951};
952
953/*
954 * This structure defines the management hooks for network devices.
955 * The following hooks can be defined; unless noted otherwise, they are
956 * optional and can be filled with a null pointer.
957 *
958 * int (*ndo_init)(struct net_device *dev);
959 * This function is called once when a network device is registered.
960 * The network device can use this for any late stage initialization
961 * or semantic validation. It can fail with an error code which will
962 * be propagated back to register_netdev.
963 *
964 * void (*ndo_uninit)(struct net_device *dev);
965 * This function is called when device is unregistered or when registration
966 * fails. It is not called if init fails.
967 *
968 * int (*ndo_open)(struct net_device *dev);
969 * This function is called when a network device transitions to the up
970 * state.
971 *
972 * int (*ndo_stop)(struct net_device *dev);
973 * This function is called when a network device transitions to the down
974 * state.
975 *
976 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
977 * struct net_device *dev);
978 * Called when a packet needs to be transmitted.
979 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
980 * the queue before that can happen; it's for obsolete devices and weird
981 * corner cases, but the stack really does a non-trivial amount
982 * of useless work if you return NETDEV_TX_BUSY.
983 * Required; cannot be NULL.
984 *
985 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
986 * struct net_device *dev
987 * netdev_features_t features);
988 * Called by core transmit path to determine if device is capable of
989 * performing offload operations on a given packet. This is to give
990 * the device an opportunity to implement any restrictions that cannot
991 * be otherwise expressed by feature flags. The check is called with
992 * the set of features that the stack has calculated and it returns
993 * those the driver believes to be appropriate.
994 *
995 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
996 * struct net_device *sb_dev);
997 * Called to decide which queue to use when device supports multiple
998 * transmit queues.
999 *
1000 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
1001 * This function is called to allow device receiver to make
1002 * changes to configuration when multicast or promiscuous is enabled.
1003 *
1004 * void (*ndo_set_rx_mode)(struct net_device *dev);
1005 * This function is called device changes address list filtering.
1006 * If driver handles unicast address filtering, it should set
1007 * IFF_UNICAST_FLT in its priv_flags.
1008 *
1009 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1010 * This function is called when the Media Access Control address
1011 * needs to be changed. If this interface is not defined, the
1012 * MAC address can not be changed.
1013 *
1014 * int (*ndo_validate_addr)(struct net_device *dev);
1015 * Test if Media Access Control address is valid for the device.
1016 *
1017 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1018 * Called when a user requests an ioctl which can't be handled by
1019 * the generic interface code. If not defined ioctls return
1020 * not supported error code.
1021 *
1022 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1023 * Used to set network devices bus interface parameters. This interface
1024 * is retained for legacy reasons; new devices should use the bus
1025 * interface (PCI) for low level management.
1026 *
1027 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1028 * Called when a user wants to change the Maximum Transfer Unit
1029 * of a device.
1030 *
1031 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
1032 * Callback used when the transmitter has not made any progress
1033 * for dev->watchdog ticks.
1034 *
1035 * void (*ndo_get_stats64)(struct net_device *dev,
1036 * struct rtnl_link_stats64 *storage);
1037 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1038 * Called when a user wants to get the network device usage
1039 * statistics. Drivers must do one of the following:
1040 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1041 * rtnl_link_stats64 structure passed by the caller.
1042 * 2. Define @ndo_get_stats to update a net_device_stats structure
1043 * (which should normally be dev->stats) and return a pointer to
1044 * it. The structure may be changed asynchronously only if each
1045 * field is written atomically.
1046 * 3. Update dev->stats asynchronously and atomically, and define
1047 * neither operation.
1048 *
1049 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
1050 * Return true if this device supports offload stats of this attr_id.
1051 *
1052 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1053 * void *attr_data)
1054 * Get statistics for offload operations by attr_id. Write it into the
1055 * attr_data pointer.
1056 *
1057 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
1058 * If device supports VLAN filtering this function is called when a
1059 * VLAN id is registered.
1060 *
1061 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
1062 * If device supports VLAN filtering this function is called when a
1063 * VLAN id is unregistered.
1064 *
1065 * void (*ndo_poll_controller)(struct net_device *dev);
1066 *
1067 * SR-IOV management functions.
1068 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
1069 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1070 * u8 qos, __be16 proto);
1071 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1072 * int max_tx_rate);
1073 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
1074 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
1075 * int (*ndo_get_vf_config)(struct net_device *dev,
1076 * int vf, struct ifla_vf_info *ivf);
1077 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
1078 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1079 * struct nlattr *port[]);
1080 *
1081 * Enable or disable the VF ability to query its RSS Redirection Table and
1082 * Hash Key. This is needed since on some devices VF share this information
1083 * with PF and querying it may introduce a theoretical security risk.
1084 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
1085 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1086 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
1087 * void *type_data);
1088 * Called to setup any 'tc' scheduler, classifier or action on @dev.
1089 * This is always called from the stack with the rtnl lock held and netif
1090 * tx queues stopped. This allows the netdevice to perform queue
1091 * management safely.
1092 *
1093 * Fiber Channel over Ethernet (FCoE) offload functions.
1094 * int (*ndo_fcoe_enable)(struct net_device *dev);
1095 * Called when the FCoE protocol stack wants to start using LLD for FCoE
1096 * so the underlying device can perform whatever needed configuration or
1097 * initialization to support acceleration of FCoE traffic.
1098 *
1099 * int (*ndo_fcoe_disable)(struct net_device *dev);
1100 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
1101 * so the underlying device can perform whatever needed clean-ups to
1102 * stop supporting acceleration of FCoE traffic.
1103 *
1104 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1105 * struct scatterlist *sgl, unsigned int sgc);
1106 * Called when the FCoE Initiator wants to initialize an I/O that
1107 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1108 * perform necessary setup and returns 1 to indicate the device is set up
1109 * successfully to perform DDP on this I/O, otherwise this returns 0.
1110 *
1111 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
1112 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
1113 * indicated by the FC exchange id 'xid', so the underlying device can
1114 * clean up and reuse resources for later DDP requests.
1115 *
1116 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1117 * struct scatterlist *sgl, unsigned int sgc);
1118 * Called when the FCoE Target wants to initialize an I/O that
1119 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1120 * perform necessary setup and returns 1 to indicate the device is set up
1121 * successfully to perform DDP on this I/O, otherwise this returns 0.
1122 *
1123 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1124 * struct netdev_fcoe_hbainfo *hbainfo);
1125 * Called when the FCoE Protocol stack wants information on the underlying
1126 * device. This information is utilized by the FCoE protocol stack to
1127 * register attributes with Fiber Channel management service as per the
1128 * FC-GS Fabric Device Management Information(FDMI) specification.
1129 *
1130 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1131 * Called when the underlying device wants to override default World Wide
1132 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1133 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1134 * protocol stack to use.
1135 *
1136 * RFS acceleration.
1137 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1138 * u16 rxq_index, u32 flow_id);
1139 * Set hardware filter for RFS. rxq_index is the target queue index;
1140 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1141 * Return the filter ID on success, or a negative error code.
1142 *
1143 * Slave management functions (for bridge, bonding, etc).
1144 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1145 * Called to make another netdev an underling.
1146 *
1147 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1148 * Called to release previously enslaved netdev.
1149 *
1150 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1151 * struct sk_buff *skb,
1152 * bool all_slaves);
1153 * Get the xmit slave of master device. If all_slaves is true, function
1154 * assume all the slaves can transmit.
1155 *
1156 * Feature/offload setting functions.
1157 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1158 * netdev_features_t features);
1159 * Adjusts the requested feature flags according to device-specific
1160 * constraints, and returns the resulting flags. Must not modify
1161 * the device state.
1162 *
1163 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1164 * Called to update device configuration to new features. Passed
1165 * feature set might be less than what was returned by ndo_fix_features()).
1166 * Must return >0 or -errno if it changed dev->features itself.
1167 *
1168 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1169 * struct net_device *dev,
1170 * const unsigned char *addr, u16 vid, u16 flags,
1171 * struct netlink_ext_ack *extack);
1172 * Adds an FDB entry to dev for addr.
1173 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1174 * struct net_device *dev,
1175 * const unsigned char *addr, u16 vid)
1176 * Deletes the FDB entry from dev coresponding to addr.
1177 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1178 * struct net_device *dev, struct net_device *filter_dev,
1179 * int *idx)
1180 * Used to add FDB entries to dump requests. Implementers should add
1181 * entries to skb and update idx with the number of entries.
1182 *
1183 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1184 * u16 flags, struct netlink_ext_ack *extack)
1185 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1186 * struct net_device *dev, u32 filter_mask,
1187 * int nlflags)
1188 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1189 * u16 flags);
1190 *
1191 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1192 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1193 * which do not represent real hardware may define this to allow their
1194 * userspace components to manage their virtual carrier state. Devices
1195 * that determine carrier state from physical hardware properties (eg
1196 * network cables) or protocol-dependent mechanisms (eg
1197 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1198 *
1199 * int (*ndo_get_phys_port_id)(struct net_device *dev,
1200 * struct netdev_phys_item_id *ppid);
1201 * Called to get ID of physical port of this device. If driver does
1202 * not implement this, it is assumed that the hw is not able to have
1203 * multiple net devices on single physical port.
1204 *
1205 * int (*ndo_get_port_parent_id)(struct net_device *dev,
1206 * struct netdev_phys_item_id *ppid)
1207 * Called to get the parent ID of the physical port of this device.
1208 *
1209 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1210 * struct udp_tunnel_info *ti);
1211 * Called by UDP tunnel to notify a driver about the UDP port and socket
1212 * address family that a UDP tunnel is listnening to. It is called only
1213 * when a new port starts listening. The operation is protected by the
1214 * RTNL.
1215 *
1216 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1217 * struct udp_tunnel_info *ti);
1218 * Called by UDP tunnel to notify the driver about a UDP port and socket
1219 * address family that the UDP tunnel is not listening to anymore. The
1220 * operation is protected by the RTNL.
1221 *
1222 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1223 * struct net_device *dev)
1224 * Called by upper layer devices to accelerate switching or other
1225 * station functionality into hardware. 'pdev is the lowerdev
1226 * to use for the offload and 'dev' is the net device that will
1227 * back the offload. Returns a pointer to the private structure
1228 * the upper layer will maintain.
1229 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1230 * Called by upper layer device to delete the station created
1231 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1232 * the station and priv is the structure returned by the add
1233 * operation.
1234 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1235 * int queue_index, u32 maxrate);
1236 * Called when a user wants to set a max-rate limitation of specific
1237 * TX queue.
1238 * int (*ndo_get_iflink)(const struct net_device *dev);
1239 * Called to get the iflink value of this device.
1240 * void (*ndo_change_proto_down)(struct net_device *dev,
1241 * bool proto_down);
1242 * This function is used to pass protocol port error state information
1243 * to the switch driver. The switch driver can react to the proto_down
1244 * by doing a phys down on the associated switch port.
1245 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1246 * This function is used to get egress tunnel information for given skb.
1247 * This is useful for retrieving outer tunnel header parameters while
1248 * sampling packet.
1249 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1250 * This function is used to specify the headroom that the skb must
1251 * consider when allocation skb during packet reception. Setting
1252 * appropriate rx headroom value allows avoiding skb head copy on
1253 * forward. Setting a negative value resets the rx headroom to the
1254 * default value.
1255 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1256 * This function is used to set or query state related to XDP on the
1257 * netdevice and manage BPF offload. See definition of
1258 * enum bpf_netdev_command for details.
1259 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1260 * u32 flags);
1261 * This function is used to submit @n XDP packets for transmit on a
1262 * netdevice. Returns number of frames successfully transmitted, frames
1263 * that got dropped are freed/returned via xdp_return_frame().
1264 * Returns negative number, means general error invoking ndo, meaning
1265 * no frames were xmit'ed and core-caller will free all frames.
1266 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1267 * This function is used to wake up the softirq, ksoftirqd or kthread
1268 * responsible for sending and/or receiving packets on a specific
1269 * queue id bound to an AF_XDP socket. The flags field specifies if
1270 * only RX, only Tx, or both should be woken up using the flags
1271 * XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1272 * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
1273 * Get devlink port instance associated with a given netdev.
1274 * Called with a reference on the netdevice and devlink locks only,
1275 * rtnl_lock is not held.
1276 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
1277 * int cmd);
1278 * Add, change, delete or get information on an IPv4 tunnel.
1279 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1280 * If a device is paired with a peer device, return the peer instance.
1281 * The caller must be under RCU read context.
1282 */
1283struct net_device_ops {
1284 int (*ndo_init)(struct net_device *dev);
1285 void (*ndo_uninit)(struct net_device *dev);
1286 int (*ndo_open)(struct net_device *dev);
1287 int (*ndo_stop)(struct net_device *dev);
1288 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1289 struct net_device *dev);
1290 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1291 struct net_device *dev,
1292 netdev_features_t features);
1293 u16 (*ndo_select_queue)(struct net_device *dev,
1294 struct sk_buff *skb,
1295 struct net_device *sb_dev);
1296 void (*ndo_change_rx_flags)(struct net_device *dev,
1297 int flags);
1298 void (*ndo_set_rx_mode)(struct net_device *dev);
1299 int (*ndo_set_mac_address)(struct net_device *dev,
1300 void *addr);
1301 int (*ndo_validate_addr)(struct net_device *dev);
1302 int (*ndo_do_ioctl)(struct net_device *dev,
1303 struct ifreq *ifr, int cmd);
1304 int (*ndo_set_config)(struct net_device *dev,
1305 struct ifmap *map);
1306 int (*ndo_change_mtu)(struct net_device *dev,
1307 int new_mtu);
1308 int (*ndo_neigh_setup)(struct net_device *dev,
1309 struct neigh_parms *);
1310 void (*ndo_tx_timeout) (struct net_device *dev,
1311 unsigned int txqueue);
1312
1313 void (*ndo_get_stats64)(struct net_device *dev,
1314 struct rtnl_link_stats64 *storage);
1315 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1316 int (*ndo_get_offload_stats)(int attr_id,
1317 const struct net_device *dev,
1318 void *attr_data);
1319 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1320
1321 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1322 __be16 proto, u16 vid);
1323 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1324 __be16 proto, u16 vid);
1325#ifdef CONFIG_NET_POLL_CONTROLLER
1326 void (*ndo_poll_controller)(struct net_device *dev);
1327 int (*ndo_netpoll_setup)(struct net_device *dev,
1328 struct netpoll_info *info);
1329 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1330#endif
1331 int (*ndo_set_vf_mac)(struct net_device *dev,
1332 int queue, u8 *mac);
1333 int (*ndo_set_vf_vlan)(struct net_device *dev,
1334 int queue, u16 vlan,
1335 u8 qos, __be16 proto);
1336 int (*ndo_set_vf_rate)(struct net_device *dev,
1337 int vf, int min_tx_rate,
1338 int max_tx_rate);
1339 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1340 int vf, bool setting);
1341 int (*ndo_set_vf_trust)(struct net_device *dev,
1342 int vf, bool setting);
1343 int (*ndo_get_vf_config)(struct net_device *dev,
1344 int vf,
1345 struct ifla_vf_info *ivf);
1346 int (*ndo_set_vf_link_state)(struct net_device *dev,
1347 int vf, int link_state);
1348 int (*ndo_get_vf_stats)(struct net_device *dev,
1349 int vf,
1350 struct ifla_vf_stats
1351 *vf_stats);
1352 int (*ndo_set_vf_port)(struct net_device *dev,
1353 int vf,
1354 struct nlattr *port[]);
1355 int (*ndo_get_vf_port)(struct net_device *dev,
1356 int vf, struct sk_buff *skb);
1357 int (*ndo_get_vf_guid)(struct net_device *dev,
1358 int vf,
1359 struct ifla_vf_guid *node_guid,
1360 struct ifla_vf_guid *port_guid);
1361 int (*ndo_set_vf_guid)(struct net_device *dev,
1362 int vf, u64 guid,
1363 int guid_type);
1364 int (*ndo_set_vf_rss_query_en)(
1365 struct net_device *dev,
1366 int vf, bool setting);
1367 int (*ndo_setup_tc)(struct net_device *dev,
1368 enum tc_setup_type type,
1369 void *type_data);
1370#if IS_ENABLED(CONFIG_FCOE)
1371 int (*ndo_fcoe_enable)(struct net_device *dev);
1372 int (*ndo_fcoe_disable)(struct net_device *dev);
1373 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1374 u16 xid,
1375 struct scatterlist *sgl,
1376 unsigned int sgc);
1377 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1378 u16 xid);
1379 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1380 u16 xid,
1381 struct scatterlist *sgl,
1382 unsigned int sgc);
1383 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1384 struct netdev_fcoe_hbainfo *hbainfo);
1385#endif
1386
1387#if IS_ENABLED(CONFIG_LIBFCOE)
1388#define NETDEV_FCOE_WWNN 0
1389#define NETDEV_FCOE_WWPN 1
1390 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1391 u64 *wwn, int type);
1392#endif
1393
1394#ifdef CONFIG_RFS_ACCEL
1395 int (*ndo_rx_flow_steer)(struct net_device *dev,
1396 const struct sk_buff *skb,
1397 u16 rxq_index,
1398 u32 flow_id);
1399#endif
1400 int (*ndo_add_slave)(struct net_device *dev,
1401 struct net_device *slave_dev,
1402 struct netlink_ext_ack *extack);
1403 int (*ndo_del_slave)(struct net_device *dev,
1404 struct net_device *slave_dev);
1405 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1406 struct sk_buff *skb,
1407 bool all_slaves);
1408 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1409 netdev_features_t features);
1410 int (*ndo_set_features)(struct net_device *dev,
1411 netdev_features_t features);
1412 int (*ndo_neigh_construct)(struct net_device *dev,
1413 struct neighbour *n);
1414 void (*ndo_neigh_destroy)(struct net_device *dev,
1415 struct neighbour *n);
1416
1417 int (*ndo_fdb_add)(struct ndmsg *ndm,
1418 struct nlattr *tb[],
1419 struct net_device *dev,
1420 const unsigned char *addr,
1421 u16 vid,
1422 u16 flags,
1423 struct netlink_ext_ack *extack);
1424 int (*ndo_fdb_del)(struct ndmsg *ndm,
1425 struct nlattr *tb[],
1426 struct net_device *dev,
1427 const unsigned char *addr,
1428 u16 vid);
1429 int (*ndo_fdb_dump)(struct sk_buff *skb,
1430 struct netlink_callback *cb,
1431 struct net_device *dev,
1432 struct net_device *filter_dev,
1433 int *idx);
1434 int (*ndo_fdb_get)(struct sk_buff *skb,
1435 struct nlattr *tb[],
1436 struct net_device *dev,
1437 const unsigned char *addr,
1438 u16 vid, u32 portid, u32 seq,
1439 struct netlink_ext_ack *extack);
1440 int (*ndo_bridge_setlink)(struct net_device *dev,
1441 struct nlmsghdr *nlh,
1442 u16 flags,
1443 struct netlink_ext_ack *extack);
1444 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1445 u32 pid, u32 seq,
1446 struct net_device *dev,
1447 u32 filter_mask,
1448 int nlflags);
1449 int (*ndo_bridge_dellink)(struct net_device *dev,
1450 struct nlmsghdr *nlh,
1451 u16 flags);
1452 int (*ndo_change_carrier)(struct net_device *dev,
1453 bool new_carrier);
1454 int (*ndo_get_phys_port_id)(struct net_device *dev,
1455 struct netdev_phys_item_id *ppid);
1456 int (*ndo_get_port_parent_id)(struct net_device *dev,
1457 struct netdev_phys_item_id *ppid);
1458 int (*ndo_get_phys_port_name)(struct net_device *dev,
1459 char *name, size_t len);
1460 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1461 struct udp_tunnel_info *ti);
1462 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1463 struct udp_tunnel_info *ti);
1464 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1465 struct net_device *dev);
1466 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1467 void *priv);
1468
1469 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1470 int queue_index,
1471 u32 maxrate);
1472 int (*ndo_get_iflink)(const struct net_device *dev);
1473 int (*ndo_change_proto_down)(struct net_device *dev,
1474 bool proto_down);
1475 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1476 struct sk_buff *skb);
1477 void (*ndo_set_rx_headroom)(struct net_device *dev,
1478 int needed_headroom);
1479 int (*ndo_bpf)(struct net_device *dev,
1480 struct netdev_bpf *bpf);
1481 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1482 struct xdp_frame **xdp,
1483 u32 flags);
1484 int (*ndo_xsk_wakeup)(struct net_device *dev,
1485 u32 queue_id, u32 flags);
1486 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1487 int (*ndo_tunnel_ctl)(struct net_device *dev,
1488 struct ip_tunnel_parm *p, int cmd);
1489 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
1490};
1491
1492/**
1493 * enum net_device_priv_flags - &struct net_device priv_flags
1494 *
1495 * These are the &struct net_device, they are only set internally
1496 * by drivers and used in the kernel. These flags are invisible to
1497 * userspace; this means that the order of these flags can change
1498 * during any kernel release.
1499 *
1500 * You should have a pretty good reason to be extending these flags.
1501 *
1502 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1503 * @IFF_EBRIDGE: Ethernet bridging device
1504 * @IFF_BONDING: bonding master or slave
1505 * @IFF_ISATAP: ISATAP interface (RFC4214)
1506 * @IFF_WAN_HDLC: WAN HDLC device
1507 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1508 * release skb->dst
1509 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1510 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1511 * @IFF_MACVLAN_PORT: device used as macvlan port
1512 * @IFF_BRIDGE_PORT: device used as bridge port
1513 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1514 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1515 * @IFF_UNICAST_FLT: Supports unicast filtering
1516 * @IFF_TEAM_PORT: device used as team port
1517 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1518 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1519 * change when it's running
1520 * @IFF_MACVLAN: Macvlan device
1521 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1522 * underlying stacked devices
1523 * @IFF_L3MDEV_MASTER: device is an L3 master device
1524 * @IFF_NO_QUEUE: device can run without qdisc attached
1525 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1526 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1527 * @IFF_TEAM: device is a team device
1528 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1529 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1530 * entity (i.e. the master device for bridged veth)
1531 * @IFF_MACSEC: device is a MACsec device
1532 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1533 * @IFF_FAILOVER: device is a failover master device
1534 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1535 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1536 * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
1537 */
1538enum netdev_priv_flags {
1539 IFF_802_1Q_VLAN = 1<<0,
1540 IFF_EBRIDGE = 1<<1,
1541 IFF_BONDING = 1<<2,
1542 IFF_ISATAP = 1<<3,
1543 IFF_WAN_HDLC = 1<<4,
1544 IFF_XMIT_DST_RELEASE = 1<<5,
1545 IFF_DONT_BRIDGE = 1<<6,
1546 IFF_DISABLE_NETPOLL = 1<<7,
1547 IFF_MACVLAN_PORT = 1<<8,
1548 IFF_BRIDGE_PORT = 1<<9,
1549 IFF_OVS_DATAPATH = 1<<10,
1550 IFF_TX_SKB_SHARING = 1<<11,
1551 IFF_UNICAST_FLT = 1<<12,
1552 IFF_TEAM_PORT = 1<<13,
1553 IFF_SUPP_NOFCS = 1<<14,
1554 IFF_LIVE_ADDR_CHANGE = 1<<15,
1555 IFF_MACVLAN = 1<<16,
1556 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1557 IFF_L3MDEV_MASTER = 1<<18,
1558 IFF_NO_QUEUE = 1<<19,
1559 IFF_OPENVSWITCH = 1<<20,
1560 IFF_L3MDEV_SLAVE = 1<<21,
1561 IFF_TEAM = 1<<22,
1562 IFF_RXFH_CONFIGURED = 1<<23,
1563 IFF_PHONY_HEADROOM = 1<<24,
1564 IFF_MACSEC = 1<<25,
1565 IFF_NO_RX_HANDLER = 1<<26,
1566 IFF_FAILOVER = 1<<27,
1567 IFF_FAILOVER_SLAVE = 1<<28,
1568 IFF_L3MDEV_RX_HANDLER = 1<<29,
1569 IFF_LIVE_RENAME_OK = 1<<30,
1570};
1571
1572#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1573#define IFF_EBRIDGE IFF_EBRIDGE
1574#define IFF_BONDING IFF_BONDING
1575#define IFF_ISATAP IFF_ISATAP
1576#define IFF_WAN_HDLC IFF_WAN_HDLC
1577#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1578#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1579#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1580#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1581#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1582#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1583#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1584#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1585#define IFF_TEAM_PORT IFF_TEAM_PORT
1586#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1587#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1588#define IFF_MACVLAN IFF_MACVLAN
1589#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1590#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1591#define IFF_NO_QUEUE IFF_NO_QUEUE
1592#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1593#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1594#define IFF_TEAM IFF_TEAM
1595#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1596#define IFF_MACSEC IFF_MACSEC
1597#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1598#define IFF_FAILOVER IFF_FAILOVER
1599#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1600#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1601#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1602
1603/**
1604 * struct net_device - The DEVICE structure.
1605 *
1606 * Actually, this whole structure is a big mistake. It mixes I/O
1607 * data with strictly "high-level" data, and it has to know about
1608 * almost every data structure used in the INET module.
1609 *
1610 * @name: This is the first field of the "visible" part of this structure
1611 * (i.e. as seen by users in the "Space.c" file). It is the name
1612 * of the interface.
1613 *
1614 * @name_node: Name hashlist node
1615 * @ifalias: SNMP alias
1616 * @mem_end: Shared memory end
1617 * @mem_start: Shared memory start
1618 * @base_addr: Device I/O address
1619 * @irq: Device IRQ number
1620 *
1621 * @state: Generic network queuing layer state, see netdev_state_t
1622 * @dev_list: The global list of network devices
1623 * @napi_list: List entry used for polling NAPI devices
1624 * @unreg_list: List entry when we are unregistering the
1625 * device; see the function unregister_netdev
1626 * @close_list: List entry used when we are closing the device
1627 * @ptype_all: Device-specific packet handlers for all protocols
1628 * @ptype_specific: Device-specific, protocol-specific packet handlers
1629 *
1630 * @adj_list: Directly linked devices, like slaves for bonding
1631 * @features: Currently active device features
1632 * @hw_features: User-changeable features
1633 *
1634 * @wanted_features: User-requested features
1635 * @vlan_features: Mask of features inheritable by VLAN devices
1636 *
1637 * @hw_enc_features: Mask of features inherited by encapsulating devices
1638 * This field indicates what encapsulation
1639 * offloads the hardware is capable of doing,
1640 * and drivers will need to set them appropriately.
1641 *
1642 * @mpls_features: Mask of features inheritable by MPLS
1643 * @gso_partial_features: value(s) from NETIF_F_GSO\*
1644 *
1645 * @ifindex: interface index
1646 * @group: The group the device belongs to
1647 *
1648 * @stats: Statistics struct, which was left as a legacy, use
1649 * rtnl_link_stats64 instead
1650 *
1651 * @rx_dropped: Dropped packets by core network,
1652 * do not use this in drivers
1653 * @tx_dropped: Dropped packets by core network,
1654 * do not use this in drivers
1655 * @rx_nohandler: nohandler dropped packets by core network on
1656 * inactive devices, do not use this in drivers
1657 * @carrier_up_count: Number of times the carrier has been up
1658 * @carrier_down_count: Number of times the carrier has been down
1659 *
1660 * @wireless_handlers: List of functions to handle Wireless Extensions,
1661 * instead of ioctl,
1662 * see <net/iw_handler.h> for details.
1663 * @wireless_data: Instance data managed by the core of wireless extensions
1664 *
1665 * @netdev_ops: Includes several pointers to callbacks,
1666 * if one wants to override the ndo_*() functions
1667 * @ethtool_ops: Management operations
1668 * @l3mdev_ops: Layer 3 master device operations
1669 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1670 * discovery handling. Necessary for e.g. 6LoWPAN.
1671 * @xfrmdev_ops: Transformation offload operations
1672 * @tlsdev_ops: Transport Layer Security offload operations
1673 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1674 * of Layer 2 headers.
1675 *
1676 * @flags: Interface flags (a la BSD)
1677 * @priv_flags: Like 'flags' but invisible to userspace,
1678 * see if.h for the definitions
1679 * @gflags: Global flags ( kept as legacy )
1680 * @padded: How much padding added by alloc_netdev()
1681 * @operstate: RFC2863 operstate
1682 * @link_mode: Mapping policy to operstate
1683 * @if_port: Selectable AUI, TP, ...
1684 * @dma: DMA channel
1685 * @mtu: Interface MTU value
1686 * @min_mtu: Interface Minimum MTU value
1687 * @max_mtu: Interface Maximum MTU value
1688 * @type: Interface hardware type
1689 * @hard_header_len: Maximum hardware header length.
1690 * @min_header_len: Minimum hardware header length
1691 *
1692 * @needed_headroom: Extra headroom the hardware may need, but not in all
1693 * cases can this be guaranteed
1694 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1695 * cases can this be guaranteed. Some cases also use
1696 * LL_MAX_HEADER instead to allocate the skb
1697 *
1698 * interface address info:
1699 *
1700 * @perm_addr: Permanent hw address
1701 * @addr_assign_type: Hw address assignment type
1702 * @addr_len: Hardware address length
1703 * @upper_level: Maximum depth level of upper devices.
1704 * @lower_level: Maximum depth level of lower devices.
1705 * @neigh_priv_len: Used in neigh_alloc()
1706 * @dev_id: Used to differentiate devices that share
1707 * the same link layer address
1708 * @dev_port: Used to differentiate devices that share
1709 * the same function
1710 * @addr_list_lock: XXX: need comments on this one
1711 * @name_assign_type: network interface name assignment type
1712 * @uc_promisc: Counter that indicates promiscuous mode
1713 * has been enabled due to the need to listen to
1714 * additional unicast addresses in a device that
1715 * does not implement ndo_set_rx_mode()
1716 * @uc: unicast mac addresses
1717 * @mc: multicast mac addresses
1718 * @dev_addrs: list of device hw addresses
1719 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1720 * @promiscuity: Number of times the NIC is told to work in
1721 * promiscuous mode; if it becomes 0 the NIC will
1722 * exit promiscuous mode
1723 * @allmulti: Counter, enables or disables allmulticast mode
1724 *
1725 * @vlan_info: VLAN info
1726 * @dsa_ptr: dsa specific data
1727 * @tipc_ptr: TIPC specific data
1728 * @atalk_ptr: AppleTalk link
1729 * @ip_ptr: IPv4 specific data
1730 * @dn_ptr: DECnet specific data
1731 * @ip6_ptr: IPv6 specific data
1732 * @ax25_ptr: AX.25 specific data
1733 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1734 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1735 * device struct
1736 * @mpls_ptr: mpls_dev struct pointer
1737 *
1738 * @dev_addr: Hw address (before bcast,
1739 * because most packets are unicast)
1740 *
1741 * @_rx: Array of RX queues
1742 * @num_rx_queues: Number of RX queues
1743 * allocated at register_netdev() time
1744 * @real_num_rx_queues: Number of RX queues currently active in device
1745 * @xdp_prog: XDP sockets filter program pointer
1746 * @gro_flush_timeout: timeout for GRO layer in NAPI
1747 * @napi_defer_hard_irqs: If not zero, provides a counter that would
1748 * allow to avoid NIC hard IRQ, on busy queues.
1749 *
1750 * @rx_handler: handler for received packets
1751 * @rx_handler_data: XXX: need comments on this one
1752 * @miniq_ingress: ingress/clsact qdisc specific data for
1753 * ingress processing
1754 * @ingress_queue: XXX: need comments on this one
1755 * @nf_hooks_ingress: netfilter hooks executed for ingress packets
1756 * @broadcast: hw bcast address
1757 *
1758 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1759 * indexed by RX queue number. Assigned by driver.
1760 * This must only be set if the ndo_rx_flow_steer
1761 * operation is defined
1762 * @index_hlist: Device index hash chain
1763 *
1764 * @_tx: Array of TX queues
1765 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1766 * @real_num_tx_queues: Number of TX queues currently active in device
1767 * @qdisc: Root qdisc from userspace point of view
1768 * @tx_queue_len: Max frames per queue allowed
1769 * @tx_global_lock: XXX: need comments on this one
1770 * @xdp_bulkq: XDP device bulk queue
1771 * @xps_cpus_map: all CPUs map for XPS device
1772 * @xps_rxqs_map: all RXQs map for XPS device
1773 *
1774 * @xps_maps: XXX: need comments on this one
1775 * @miniq_egress: clsact qdisc specific data for
1776 * egress processing
1777 * @qdisc_hash: qdisc hash table
1778 * @watchdog_timeo: Represents the timeout that is used by
1779 * the watchdog (see dev_watchdog())
1780 * @watchdog_timer: List of timers
1781 *
1782 * @proto_down_reason: reason a netdev interface is held down
1783 * @pcpu_refcnt: Number of references to this device
1784 * @todo_list: Delayed register/unregister
1785 * @link_watch_list: XXX: need comments on this one
1786 *
1787 * @reg_state: Register/unregister state machine
1788 * @dismantle: Device is going to be freed
1789 * @rtnl_link_state: This enum represents the phases of creating
1790 * a new link
1791 *
1792 * @needs_free_netdev: Should unregister perform free_netdev?
1793 * @priv_destructor: Called from unregister
1794 * @npinfo: XXX: need comments on this one
1795 * @nd_net: Network namespace this network device is inside
1796 *
1797 * @ml_priv: Mid-layer private
1798 * @lstats: Loopback statistics
1799 * @tstats: Tunnel statistics
1800 * @dstats: Dummy statistics
1801 * @vstats: Virtual ethernet statistics
1802 *
1803 * @garp_port: GARP
1804 * @mrp_port: MRP
1805 *
1806 * @dev: Class/net/name entry
1807 * @sysfs_groups: Space for optional device, statistics and wireless
1808 * sysfs groups
1809 *
1810 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1811 * @rtnl_link_ops: Rtnl_link_ops
1812 *
1813 * @gso_max_size: Maximum size of generic segmentation offload
1814 * @gso_max_segs: Maximum number of segments that can be passed to the
1815 * NIC for GSO
1816 *
1817 * @dcbnl_ops: Data Center Bridging netlink ops
1818 * @num_tc: Number of traffic classes in the net device
1819 * @tc_to_txq: XXX: need comments on this one
1820 * @prio_tc_map: XXX: need comments on this one
1821 *
1822 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1823 *
1824 * @priomap: XXX: need comments on this one
1825 * @phydev: Physical device may attach itself
1826 * for hardware timestamping
1827 * @sfp_bus: attached &struct sfp_bus structure.
1828 *
1829 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1830 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1831 *
1832 * @proto_down: protocol port state information can be sent to the
1833 * switch driver and used to set the phys state of the
1834 * switch port.
1835 *
1836 * @wol_enabled: Wake-on-LAN is enabled
1837 *
1838 * @net_notifier_list: List of per-net netdev notifier block
1839 * that follow this device when it is moved
1840 * to another network namespace.
1841 *
1842 * @macsec_ops: MACsec offloading ops
1843 *
1844 * @udp_tunnel_nic_info: static structure describing the UDP tunnel
1845 * offload capabilities of the device
1846 * @udp_tunnel_nic: UDP tunnel offload state
1847 * @xdp_state: stores info on attached XDP BPF programs
1848 *
1849 * @nested_level: Used as as a parameter of spin_lock_nested() of
1850 * dev->addr_list_lock.
1851 * @unlink_list: As netif_addr_lock() can be called recursively,
1852 * keep a list of interfaces to be deleted.
1853 *
1854 * FIXME: cleanup struct net_device such that network protocol info
1855 * moves out.
1856 */
1857
1858struct net_device {
1859 char name[IFNAMSIZ];
1860 struct netdev_name_node *name_node;
1861 struct dev_ifalias __rcu *ifalias;
1862 /*
1863 * I/O specific fields
1864 * FIXME: Merge these and struct ifmap into one
1865 */
1866 unsigned long mem_end;
1867 unsigned long mem_start;
1868 unsigned long base_addr;
1869 int irq;
1870
1871 /*
1872 * Some hardware also needs these fields (state,dev_list,
1873 * napi_list,unreg_list,close_list) but they are not
1874 * part of the usual set specified in Space.c.
1875 */
1876
1877 unsigned long state;
1878
1879 struct list_head dev_list;
1880 struct list_head napi_list;
1881 struct list_head unreg_list;
1882 struct list_head close_list;
1883 struct list_head ptype_all;
1884 struct list_head ptype_specific;
1885
1886 struct {
1887 struct list_head upper;
1888 struct list_head lower;
1889 } adj_list;
1890
1891 netdev_features_t features;
1892 netdev_features_t hw_features;
1893 netdev_features_t wanted_features;
1894 netdev_features_t vlan_features;
1895 netdev_features_t hw_enc_features;
1896 netdev_features_t mpls_features;
1897 netdev_features_t gso_partial_features;
1898
1899 int ifindex;
1900 int group;
1901
1902 struct net_device_stats stats;
1903
1904 atomic_long_t rx_dropped;
1905 atomic_long_t tx_dropped;
1906 atomic_long_t rx_nohandler;
1907
1908 /* Stats to monitor link on/off, flapping */
1909 atomic_t carrier_up_count;
1910 atomic_t carrier_down_count;
1911
1912#ifdef CONFIG_WIRELESS_EXT
1913 const struct iw_handler_def *wireless_handlers;
1914 struct iw_public_data *wireless_data;
1915#endif
1916 const struct net_device_ops *netdev_ops;
1917 const struct ethtool_ops *ethtool_ops;
1918#ifdef CONFIG_NET_L3_MASTER_DEV
1919 const struct l3mdev_ops *l3mdev_ops;
1920#endif
1921#if IS_ENABLED(CONFIG_IPV6)
1922 const struct ndisc_ops *ndisc_ops;
1923#endif
1924
1925#ifdef CONFIG_XFRM_OFFLOAD
1926 const struct xfrmdev_ops *xfrmdev_ops;
1927#endif
1928
1929#if IS_ENABLED(CONFIG_TLS_DEVICE)
1930 const struct tlsdev_ops *tlsdev_ops;
1931#endif
1932
1933 const struct header_ops *header_ops;
1934
1935 unsigned int flags;
1936 unsigned int priv_flags;
1937
1938 unsigned short gflags;
1939 unsigned short padded;
1940
1941 unsigned char operstate;
1942 unsigned char link_mode;
1943
1944 unsigned char if_port;
1945 unsigned char dma;
1946
1947 /* Note : dev->mtu is often read without holding a lock.
1948 * Writers usually hold RTNL.
1949 * It is recommended to use READ_ONCE() to annotate the reads,
1950 * and to use WRITE_ONCE() to annotate the writes.
1951 */
1952 unsigned int mtu;
1953 unsigned int min_mtu;
1954 unsigned int max_mtu;
1955 unsigned short type;
1956 unsigned short hard_header_len;
1957 unsigned char min_header_len;
1958 unsigned char name_assign_type;
1959
1960 unsigned short needed_headroom;
1961 unsigned short needed_tailroom;
1962
1963 /* Interface address info. */
1964 unsigned char perm_addr[MAX_ADDR_LEN];
1965 unsigned char addr_assign_type;
1966 unsigned char addr_len;
1967 unsigned char upper_level;
1968 unsigned char lower_level;
1969
1970 unsigned short neigh_priv_len;
1971 unsigned short dev_id;
1972 unsigned short dev_port;
1973 spinlock_t addr_list_lock;
1974
1975 struct netdev_hw_addr_list uc;
1976 struct netdev_hw_addr_list mc;
1977 struct netdev_hw_addr_list dev_addrs;
1978
1979#ifdef CONFIG_SYSFS
1980 struct kset *queues_kset;
1981#endif
1982#ifdef CONFIG_LOCKDEP
1983 struct list_head unlink_list;
1984#endif
1985 unsigned int promiscuity;
1986 unsigned int allmulti;
1987 bool uc_promisc;
1988#ifdef CONFIG_LOCKDEP
1989 unsigned char nested_level;
1990#endif
1991
1992
1993 /* Protocol-specific pointers */
1994
1995#if IS_ENABLED(CONFIG_VLAN_8021Q)
1996 struct vlan_info __rcu *vlan_info;
1997#endif
1998#if IS_ENABLED(CONFIG_NET_DSA)
1999 struct dsa_port *dsa_ptr;
2000#endif
2001#if IS_ENABLED(CONFIG_TIPC)
2002 struct tipc_bearer __rcu *tipc_ptr;
2003#endif
2004#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
2005 void *atalk_ptr;
2006#endif
2007 struct in_device __rcu *ip_ptr;
2008#if IS_ENABLED(CONFIG_DECNET)
2009 struct dn_dev __rcu *dn_ptr;
2010#endif
2011 struct inet6_dev __rcu *ip6_ptr;
2012#if IS_ENABLED(CONFIG_AX25)
2013 void *ax25_ptr;
2014#endif
2015 struct wireless_dev *ieee80211_ptr;
2016 struct wpan_dev *ieee802154_ptr;
2017#if IS_ENABLED(CONFIG_MPLS_ROUTING)
2018 struct mpls_dev __rcu *mpls_ptr;
2019#endif
2020
2021/*
2022 * Cache lines mostly used on receive path (including eth_type_trans())
2023 */
2024 /* Interface address info used in eth_type_trans() */
2025 unsigned char *dev_addr;
2026
2027 struct netdev_rx_queue *_rx;
2028 unsigned int num_rx_queues;
2029 unsigned int real_num_rx_queues;
2030
2031 struct bpf_prog __rcu *xdp_prog;
2032 unsigned long gro_flush_timeout;
2033 int napi_defer_hard_irqs;
2034 rx_handler_func_t __rcu *rx_handler;
2035 void __rcu *rx_handler_data;
2036
2037#ifdef CONFIG_NET_CLS_ACT
2038 struct mini_Qdisc __rcu *miniq_ingress;
2039#endif
2040 struct netdev_queue __rcu *ingress_queue;
2041#ifdef CONFIG_NETFILTER_INGRESS
2042 struct nf_hook_entries __rcu *nf_hooks_ingress;
2043#endif
2044
2045 unsigned char broadcast[MAX_ADDR_LEN];
2046#ifdef CONFIG_RFS_ACCEL
2047 struct cpu_rmap *rx_cpu_rmap;
2048#endif
2049 struct hlist_node index_hlist;
2050
2051/*
2052 * Cache lines mostly used on transmit path
2053 */
2054 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
2055 unsigned int num_tx_queues;
2056 unsigned int real_num_tx_queues;
2057 struct Qdisc *qdisc;
2058 unsigned int tx_queue_len;
2059 spinlock_t tx_global_lock;
2060
2061 struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2062
2063#ifdef CONFIG_XPS
2064 struct xps_dev_maps __rcu *xps_cpus_map;
2065 struct xps_dev_maps __rcu *xps_rxqs_map;
2066#endif
2067#ifdef CONFIG_NET_CLS_ACT
2068 struct mini_Qdisc __rcu *miniq_egress;
2069#endif
2070
2071#ifdef CONFIG_NET_SCHED
2072 DECLARE_HASHTABLE (qdisc_hash, 4);
2073#endif
2074 /* These may be needed for future network-power-down code. */
2075 struct timer_list watchdog_timer;
2076 int watchdog_timeo;
2077
2078 u32 proto_down_reason;
2079
2080 struct list_head todo_list;
2081 int __percpu *pcpu_refcnt;
2082
2083 struct list_head link_watch_list;
2084
2085 enum { NETREG_UNINITIALIZED=0,
2086 NETREG_REGISTERED, /* completed register_netdevice */
2087 NETREG_UNREGISTERING, /* called unregister_netdevice */
2088 NETREG_UNREGISTERED, /* completed unregister todo */
2089 NETREG_RELEASED, /* called free_netdev */
2090 NETREG_DUMMY, /* dummy device for NAPI poll */
2091 } reg_state:8;
2092
2093 bool dismantle;
2094
2095 enum {
2096 RTNL_LINK_INITIALIZED,
2097 RTNL_LINK_INITIALIZING,
2098 } rtnl_link_state:16;
2099
2100 bool needs_free_netdev;
2101 void (*priv_destructor)(struct net_device *dev);
2102
2103#ifdef CONFIG_NETPOLL
2104 struct netpoll_info __rcu *npinfo;
2105#endif
2106
2107 possible_net_t nd_net;
2108
2109 /* mid-layer private */
2110 union {
2111 void *ml_priv;
2112 struct pcpu_lstats __percpu *lstats;
2113 struct pcpu_sw_netstats __percpu *tstats;
2114 struct pcpu_dstats __percpu *dstats;
2115 };
2116
2117#if IS_ENABLED(CONFIG_GARP)
2118 struct garp_port __rcu *garp_port;
2119#endif
2120#if IS_ENABLED(CONFIG_MRP)
2121 struct mrp_port __rcu *mrp_port;
2122#endif
2123
2124 struct device dev;
2125 const struct attribute_group *sysfs_groups[4];
2126 const struct attribute_group *sysfs_rx_queue_group;
2127
2128 const struct rtnl_link_ops *rtnl_link_ops;
2129
2130 /* for setting kernel sock attribute on TCP connection setup */
2131#define GSO_MAX_SIZE 65536
2132 unsigned int gso_max_size;
2133#define GSO_MAX_SEGS 65535
2134 u16 gso_max_segs;
2135
2136#ifdef CONFIG_DCB
2137 const struct dcbnl_rtnl_ops *dcbnl_ops;
2138#endif
2139 s16 num_tc;
2140 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2141 u8 prio_tc_map[TC_BITMASK + 1];
2142
2143#if IS_ENABLED(CONFIG_FCOE)
2144 unsigned int fcoe_ddp_xid;
2145#endif
2146#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2147 struct netprio_map __rcu *priomap;
2148#endif
2149 struct phy_device *phydev;
2150 struct sfp_bus *sfp_bus;
2151 struct lock_class_key *qdisc_tx_busylock;
2152 struct lock_class_key *qdisc_running_key;
2153 bool proto_down;
2154 unsigned wol_enabled:1;
2155
2156 struct list_head net_notifier_list;
2157
2158#if IS_ENABLED(CONFIG_MACSEC)
2159 /* MACsec management functions */
2160 const struct macsec_ops *macsec_ops;
2161#endif
2162 const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
2163 struct udp_tunnel_nic *udp_tunnel_nic;
2164
2165 /* protected by rtnl_lock */
2166 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
2167};
2168#define to_net_dev(d) container_of(d, struct net_device, dev)
2169
2170static inline bool netif_elide_gro(const struct net_device *dev)
2171{
2172 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2173 return true;
2174 return false;
2175}
2176
2177#define NETDEV_ALIGN 32
2178
2179static inline
2180int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2181{
2182 return dev->prio_tc_map[prio & TC_BITMASK];
2183}
2184
2185static inline
2186int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2187{
2188 if (tc >= dev->num_tc)
2189 return -EINVAL;
2190
2191 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2192 return 0;
2193}
2194
2195int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2196void netdev_reset_tc(struct net_device *dev);
2197int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2198int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2199
2200static inline
2201int netdev_get_num_tc(struct net_device *dev)
2202{
2203 return dev->num_tc;
2204}
2205
2206static inline void net_prefetch(void *p)
2207{
2208 prefetch(p);
2209#if L1_CACHE_BYTES < 128
2210 prefetch((u8 *)p + L1_CACHE_BYTES);
2211#endif
2212}
2213
2214static inline void net_prefetchw(void *p)
2215{
2216 prefetchw(p);
2217#if L1_CACHE_BYTES < 128
2218 prefetchw((u8 *)p + L1_CACHE_BYTES);
2219#endif
2220}
2221
2222void netdev_unbind_sb_channel(struct net_device *dev,
2223 struct net_device *sb_dev);
2224int netdev_bind_sb_channel_queue(struct net_device *dev,
2225 struct net_device *sb_dev,
2226 u8 tc, u16 count, u16 offset);
2227int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2228static inline int netdev_get_sb_channel(struct net_device *dev)
2229{
2230 return max_t(int, -dev->num_tc, 0);
2231}
2232
2233static inline
2234struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2235 unsigned int index)
2236{
2237 return &dev->_tx[index];
2238}
2239
2240static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2241 const struct sk_buff *skb)
2242{
2243 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2244}
2245
2246static inline void netdev_for_each_tx_queue(struct net_device *dev,
2247 void (*f)(struct net_device *,
2248 struct netdev_queue *,
2249 void *),
2250 void *arg)
2251{
2252 unsigned int i;
2253
2254 for (i = 0; i < dev->num_tx_queues; i++)
2255 f(dev, &dev->_tx[i], arg);
2256}
2257
2258#define netdev_lockdep_set_classes(dev) \
2259{ \
2260 static struct lock_class_key qdisc_tx_busylock_key; \
2261 static struct lock_class_key qdisc_running_key; \
2262 static struct lock_class_key qdisc_xmit_lock_key; \
2263 static struct lock_class_key dev_addr_list_lock_key; \
2264 unsigned int i; \
2265 \
2266 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2267 (dev)->qdisc_running_key = &qdisc_running_key; \
2268 lockdep_set_class(&(dev)->addr_list_lock, \
2269 &dev_addr_list_lock_key); \
2270 for (i = 0; i < (dev)->num_tx_queues; i++) \
2271 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2272 &qdisc_xmit_lock_key); \
2273}
2274
2275u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2276 struct net_device *sb_dev);
2277struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2278 struct sk_buff *skb,
2279 struct net_device *sb_dev);
2280
2281/* returns the headroom that the master device needs to take in account
2282 * when forwarding to this dev
2283 */
2284static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2285{
2286 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2287}
2288
2289static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2290{
2291 if (dev->netdev_ops->ndo_set_rx_headroom)
2292 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2293}
2294
2295/* set the device rx headroom to the dev's default */
2296static inline void netdev_reset_rx_headroom(struct net_device *dev)
2297{
2298 netdev_set_rx_headroom(dev, -1);
2299}
2300
2301/*
2302 * Net namespace inlines
2303 */
2304static inline
2305struct net *dev_net(const struct net_device *dev)
2306{
2307 return read_pnet(&dev->nd_net);
2308}
2309
2310static inline
2311void dev_net_set(struct net_device *dev, struct net *net)
2312{
2313 write_pnet(&dev->nd_net, net);
2314}
2315
2316/**
2317 * netdev_priv - access network device private data
2318 * @dev: network device
2319 *
2320 * Get network device private data
2321 */
2322static inline void *netdev_priv(const struct net_device *dev)
2323{
2324 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2325}
2326
2327/* Set the sysfs physical device reference for the network logical device
2328 * if set prior to registration will cause a symlink during initialization.
2329 */
2330#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2331
2332/* Set the sysfs device type for the network logical device to allow
2333 * fine-grained identification of different network device types. For
2334 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2335 */
2336#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2337
2338/* Default NAPI poll() weight
2339 * Device drivers are strongly advised to not use bigger value
2340 */
2341#define NAPI_POLL_WEIGHT 64
2342
2343/**
2344 * netif_napi_add - initialize a NAPI context
2345 * @dev: network device
2346 * @napi: NAPI context
2347 * @poll: polling function
2348 * @weight: default weight
2349 *
2350 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2351 * *any* of the other NAPI-related functions.
2352 */
2353void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2354 int (*poll)(struct napi_struct *, int), int weight);
2355
2356/**
2357 * netif_tx_napi_add - initialize a NAPI context
2358 * @dev: network device
2359 * @napi: NAPI context
2360 * @poll: polling function
2361 * @weight: default weight
2362 *
2363 * This variant of netif_napi_add() should be used from drivers using NAPI
2364 * to exclusively poll a TX queue.
2365 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2366 */
2367static inline void netif_tx_napi_add(struct net_device *dev,
2368 struct napi_struct *napi,
2369 int (*poll)(struct napi_struct *, int),
2370 int weight)
2371{
2372 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2373 netif_napi_add(dev, napi, poll, weight);
2374}
2375
2376/**
2377 * __netif_napi_del - remove a NAPI context
2378 * @napi: NAPI context
2379 *
2380 * Warning: caller must observe RCU grace period before freeing memory
2381 * containing @napi. Drivers might want to call this helper to combine
2382 * all the needed RCU grace periods into a single one.
2383 */
2384void __netif_napi_del(struct napi_struct *napi);
2385
2386/**
2387 * netif_napi_del - remove a NAPI context
2388 * @napi: NAPI context
2389 *
2390 * netif_napi_del() removes a NAPI context from the network device NAPI list
2391 */
2392static inline void netif_napi_del(struct napi_struct *napi)
2393{
2394 __netif_napi_del(napi);
2395 synchronize_net();
2396}
2397
2398struct napi_gro_cb {
2399 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2400 void *frag0;
2401
2402 /* Length of frag0. */
2403 unsigned int frag0_len;
2404
2405 /* This indicates where we are processing relative to skb->data. */
2406 int data_offset;
2407
2408 /* This is non-zero if the packet cannot be merged with the new skb. */
2409 u16 flush;
2410
2411 /* Save the IP ID here and check when we get to the transport layer */
2412 u16 flush_id;
2413
2414 /* Number of segments aggregated. */
2415 u16 count;
2416
2417 /* Start offset for remote checksum offload */
2418 u16 gro_remcsum_start;
2419
2420 /* jiffies when first packet was created/queued */
2421 unsigned long age;
2422
2423 /* Used in ipv6_gro_receive() and foo-over-udp */
2424 u16 proto;
2425
2426 /* This is non-zero if the packet may be of the same flow. */
2427 u8 same_flow:1;
2428
2429 /* Used in tunnel GRO receive */
2430 u8 encap_mark:1;
2431
2432 /* GRO checksum is valid */
2433 u8 csum_valid:1;
2434
2435 /* Number of checksums via CHECKSUM_UNNECESSARY */
2436 u8 csum_cnt:3;
2437
2438 /* Free the skb? */
2439 u8 free:2;
2440#define NAPI_GRO_FREE 1
2441#define NAPI_GRO_FREE_STOLEN_HEAD 2
2442
2443 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2444 u8 is_ipv6:1;
2445
2446 /* Used in GRE, set in fou/gue_gro_receive */
2447 u8 is_fou:1;
2448
2449 /* Used to determine if flush_id can be ignored */
2450 u8 is_atomic:1;
2451
2452 /* Number of gro_receive callbacks this packet already went through */
2453 u8 recursion_counter:4;
2454
2455 /* GRO is done by frag_list pointer chaining. */
2456 u8 is_flist:1;
2457
2458 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2459 __wsum csum;
2460
2461 /* used in skb_gro_receive() slow path */
2462 struct sk_buff *last;
2463};
2464
2465#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2466
2467#define GRO_RECURSION_LIMIT 15
2468static inline int gro_recursion_inc_test(struct sk_buff *skb)
2469{
2470 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2471}
2472
2473typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2474static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2475 struct list_head *head,
2476 struct sk_buff *skb)
2477{
2478 if (unlikely(gro_recursion_inc_test(skb))) {
2479 NAPI_GRO_CB(skb)->flush |= 1;
2480 return NULL;
2481 }
2482
2483 return cb(head, skb);
2484}
2485
2486typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2487 struct sk_buff *);
2488static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2489 struct sock *sk,
2490 struct list_head *head,
2491 struct sk_buff *skb)
2492{
2493 if (unlikely(gro_recursion_inc_test(skb))) {
2494 NAPI_GRO_CB(skb)->flush |= 1;
2495 return NULL;
2496 }
2497
2498 return cb(sk, head, skb);
2499}
2500
2501struct packet_type {
2502 __be16 type; /* This is really htons(ether_type). */
2503 bool ignore_outgoing;
2504 struct net_device *dev; /* NULL is wildcarded here */
2505 int (*func) (struct sk_buff *,
2506 struct net_device *,
2507 struct packet_type *,
2508 struct net_device *);
2509 void (*list_func) (struct list_head *,
2510 struct packet_type *,
2511 struct net_device *);
2512 bool (*id_match)(struct packet_type *ptype,
2513 struct sock *sk);
2514 void *af_packet_priv;
2515 struct list_head list;
2516};
2517
2518struct offload_callbacks {
2519 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2520 netdev_features_t features);
2521 struct sk_buff *(*gro_receive)(struct list_head *head,
2522 struct sk_buff *skb);
2523 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2524};
2525
2526struct packet_offload {
2527 __be16 type; /* This is really htons(ether_type). */
2528 u16 priority;
2529 struct offload_callbacks callbacks;
2530 struct list_head list;
2531};
2532
2533/* often modified stats are per-CPU, other are shared (netdev->stats) */
2534struct pcpu_sw_netstats {
2535 u64 rx_packets;
2536 u64 rx_bytes;
2537 u64 tx_packets;
2538 u64 tx_bytes;
2539 struct u64_stats_sync syncp;
2540} __aligned(4 * sizeof(u64));
2541
2542struct pcpu_lstats {
2543 u64_stats_t packets;
2544 u64_stats_t bytes;
2545 struct u64_stats_sync syncp;
2546} __aligned(2 * sizeof(u64));
2547
2548void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2549
2550static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2551{
2552 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2553
2554 u64_stats_update_begin(&tstats->syncp);
2555 tstats->rx_bytes += len;
2556 tstats->rx_packets++;
2557 u64_stats_update_end(&tstats->syncp);
2558}
2559
2560static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2561{
2562 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2563
2564 u64_stats_update_begin(&lstats->syncp);
2565 u64_stats_add(&lstats->bytes, len);
2566 u64_stats_inc(&lstats->packets);
2567 u64_stats_update_end(&lstats->syncp);
2568}
2569
2570#define __netdev_alloc_pcpu_stats(type, gfp) \
2571({ \
2572 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2573 if (pcpu_stats) { \
2574 int __cpu; \
2575 for_each_possible_cpu(__cpu) { \
2576 typeof(type) *stat; \
2577 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2578 u64_stats_init(&stat->syncp); \
2579 } \
2580 } \
2581 pcpu_stats; \
2582})
2583
2584#define netdev_alloc_pcpu_stats(type) \
2585 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2586
2587enum netdev_lag_tx_type {
2588 NETDEV_LAG_TX_TYPE_UNKNOWN,
2589 NETDEV_LAG_TX_TYPE_RANDOM,
2590 NETDEV_LAG_TX_TYPE_BROADCAST,
2591 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2592 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2593 NETDEV_LAG_TX_TYPE_HASH,
2594};
2595
2596enum netdev_lag_hash {
2597 NETDEV_LAG_HASH_NONE,
2598 NETDEV_LAG_HASH_L2,
2599 NETDEV_LAG_HASH_L34,
2600 NETDEV_LAG_HASH_L23,
2601 NETDEV_LAG_HASH_E23,
2602 NETDEV_LAG_HASH_E34,
2603 NETDEV_LAG_HASH_UNKNOWN,
2604};
2605
2606struct netdev_lag_upper_info {
2607 enum netdev_lag_tx_type tx_type;
2608 enum netdev_lag_hash hash_type;
2609};
2610
2611struct netdev_lag_lower_state_info {
2612 u8 link_up : 1,
2613 tx_enabled : 1;
2614};
2615
2616#include <linux/notifier.h>
2617
2618/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
2619 * and the rtnetlink notification exclusion list in rtnetlink_event() when
2620 * adding new types.
2621 */
2622enum netdev_cmd {
2623 NETDEV_UP = 1, /* For now you can't veto a device up/down */
2624 NETDEV_DOWN,
2625 NETDEV_REBOOT, /* Tell a protocol stack a network interface
2626 detected a hardware crash and restarted
2627 - we can use this eg to kick tcp sessions
2628 once done */
2629 NETDEV_CHANGE, /* Notify device state change */
2630 NETDEV_REGISTER,
2631 NETDEV_UNREGISTER,
2632 NETDEV_CHANGEMTU, /* notify after mtu change happened */
2633 NETDEV_CHANGEADDR, /* notify after the address change */
2634 NETDEV_PRE_CHANGEADDR, /* notify before the address change */
2635 NETDEV_GOING_DOWN,
2636 NETDEV_CHANGENAME,
2637 NETDEV_FEAT_CHANGE,
2638 NETDEV_BONDING_FAILOVER,
2639 NETDEV_PRE_UP,
2640 NETDEV_PRE_TYPE_CHANGE,
2641 NETDEV_POST_TYPE_CHANGE,
2642 NETDEV_POST_INIT,
2643 NETDEV_RELEASE,
2644 NETDEV_NOTIFY_PEERS,
2645 NETDEV_JOIN,
2646 NETDEV_CHANGEUPPER,
2647 NETDEV_RESEND_IGMP,
2648 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
2649 NETDEV_CHANGEINFODATA,
2650 NETDEV_BONDING_INFO,
2651 NETDEV_PRECHANGEUPPER,
2652 NETDEV_CHANGELOWERSTATE,
2653 NETDEV_UDP_TUNNEL_PUSH_INFO,
2654 NETDEV_UDP_TUNNEL_DROP_INFO,
2655 NETDEV_CHANGE_TX_QUEUE_LEN,
2656 NETDEV_CVLAN_FILTER_PUSH_INFO,
2657 NETDEV_CVLAN_FILTER_DROP_INFO,
2658 NETDEV_SVLAN_FILTER_PUSH_INFO,
2659 NETDEV_SVLAN_FILTER_DROP_INFO,
2660};
2661const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2662
2663int register_netdevice_notifier(struct notifier_block *nb);
2664int unregister_netdevice_notifier(struct notifier_block *nb);
2665int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
2666int unregister_netdevice_notifier_net(struct net *net,
2667 struct notifier_block *nb);
2668int register_netdevice_notifier_dev_net(struct net_device *dev,
2669 struct notifier_block *nb,
2670 struct netdev_net_notifier *nn);
2671int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2672 struct notifier_block *nb,
2673 struct netdev_net_notifier *nn);
2674
2675struct netdev_notifier_info {
2676 struct net_device *dev;
2677 struct netlink_ext_ack *extack;
2678};
2679
2680struct netdev_notifier_info_ext {
2681 struct netdev_notifier_info info; /* must be first */
2682 union {
2683 u32 mtu;
2684 } ext;
2685};
2686
2687struct netdev_notifier_change_info {
2688 struct netdev_notifier_info info; /* must be first */
2689 unsigned int flags_changed;
2690};
2691
2692struct netdev_notifier_changeupper_info {
2693 struct netdev_notifier_info info; /* must be first */
2694 struct net_device *upper_dev; /* new upper dev */
2695 bool master; /* is upper dev master */
2696 bool linking; /* is the notification for link or unlink */
2697 void *upper_info; /* upper dev info */
2698};
2699
2700struct netdev_notifier_changelowerstate_info {
2701 struct netdev_notifier_info info; /* must be first */
2702 void *lower_state_info; /* is lower dev state */
2703};
2704
2705struct netdev_notifier_pre_changeaddr_info {
2706 struct netdev_notifier_info info; /* must be first */
2707 const unsigned char *dev_addr;
2708};
2709
2710static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2711 struct net_device *dev)
2712{
2713 info->dev = dev;
2714 info->extack = NULL;
2715}
2716
2717static inline struct net_device *
2718netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2719{
2720 return info->dev;
2721}
2722
2723static inline struct netlink_ext_ack *
2724netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2725{
2726 return info->extack;
2727}
2728
2729int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2730
2731
2732extern rwlock_t dev_base_lock; /* Device list lock */
2733
2734#define for_each_netdev(net, d) \
2735 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2736#define for_each_netdev_reverse(net, d) \
2737 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2738#define for_each_netdev_rcu(net, d) \
2739 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2740#define for_each_netdev_safe(net, d, n) \
2741 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2742#define for_each_netdev_continue(net, d) \
2743 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2744#define for_each_netdev_continue_reverse(net, d) \
2745 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2746 dev_list)
2747#define for_each_netdev_continue_rcu(net, d) \
2748 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2749#define for_each_netdev_in_bond_rcu(bond, slave) \
2750 for_each_netdev_rcu(&init_net, slave) \
2751 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2752#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2753
2754static inline struct net_device *next_net_device(struct net_device *dev)
2755{
2756 struct list_head *lh;
2757 struct net *net;
2758
2759 net = dev_net(dev);
2760 lh = dev->dev_list.next;
2761 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2762}
2763
2764static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2765{
2766 struct list_head *lh;
2767 struct net *net;
2768
2769 net = dev_net(dev);
2770 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2771 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2772}
2773
2774static inline struct net_device *first_net_device(struct net *net)
2775{
2776 return list_empty(&net->dev_base_head) ? NULL :
2777 net_device_entry(net->dev_base_head.next);
2778}
2779
2780static inline struct net_device *first_net_device_rcu(struct net *net)
2781{
2782 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2783
2784 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2785}
2786
2787int netdev_boot_setup_check(struct net_device *dev);
2788unsigned long netdev_boot_base(const char *prefix, int unit);
2789struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2790 const char *hwaddr);
2791struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2792struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2793void dev_add_pack(struct packet_type *pt);
2794void dev_remove_pack(struct packet_type *pt);
2795void __dev_remove_pack(struct packet_type *pt);
2796void dev_add_offload(struct packet_offload *po);
2797void dev_remove_offload(struct packet_offload *po);
2798
2799int dev_get_iflink(const struct net_device *dev);
2800int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2801struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2802 unsigned short mask);
2803struct net_device *dev_get_by_name(struct net *net, const char *name);
2804struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2805struct net_device *__dev_get_by_name(struct net *net, const char *name);
2806int dev_alloc_name(struct net_device *dev, const char *name);
2807int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2808void dev_close(struct net_device *dev);
2809void dev_close_many(struct list_head *head, bool unlink);
2810void dev_disable_lro(struct net_device *dev);
2811int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2812u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2813 struct net_device *sb_dev);
2814u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2815 struct net_device *sb_dev);
2816
2817int dev_queue_xmit(struct sk_buff *skb);
2818int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2819int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2820
2821static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
2822{
2823 int ret;
2824
2825 ret = __dev_direct_xmit(skb, queue_id);
2826 if (!dev_xmit_complete(ret))
2827 kfree_skb(skb);
2828 return ret;
2829}
2830
2831int register_netdevice(struct net_device *dev);
2832void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2833void unregister_netdevice_many(struct list_head *head);
2834static inline void unregister_netdevice(struct net_device *dev)
2835{
2836 unregister_netdevice_queue(dev, NULL);
2837}
2838
2839int netdev_refcnt_read(const struct net_device *dev);
2840void free_netdev(struct net_device *dev);
2841void netdev_freemem(struct net_device *dev);
2842int init_dummy_netdev(struct net_device *dev);
2843
2844struct net_device *netdev_get_xmit_slave(struct net_device *dev,
2845 struct sk_buff *skb,
2846 bool all_slaves);
2847struct net_device *dev_get_by_index(struct net *net, int ifindex);
2848struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2849struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2850struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2851int netdev_get_name(struct net *net, char *name, int ifindex);
2852int dev_restart(struct net_device *dev);
2853int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2854int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
2855
2856static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2857{
2858 return NAPI_GRO_CB(skb)->data_offset;
2859}
2860
2861static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2862{
2863 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2864}
2865
2866static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2867{
2868 NAPI_GRO_CB(skb)->data_offset += len;
2869}
2870
2871static inline void *skb_gro_header_fast(struct sk_buff *skb,
2872 unsigned int offset)
2873{
2874 return NAPI_GRO_CB(skb)->frag0 + offset;
2875}
2876
2877static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2878{
2879 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2880}
2881
2882static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2883{
2884 NAPI_GRO_CB(skb)->frag0 = NULL;
2885 NAPI_GRO_CB(skb)->frag0_len = 0;
2886}
2887
2888static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2889 unsigned int offset)
2890{
2891 if (!pskb_may_pull(skb, hlen))
2892 return NULL;
2893
2894 skb_gro_frag0_invalidate(skb);
2895 return skb->data + offset;
2896}
2897
2898static inline void *skb_gro_network_header(struct sk_buff *skb)
2899{
2900 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2901 skb_network_offset(skb);
2902}
2903
2904static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2905 const void *start, unsigned int len)
2906{
2907 if (NAPI_GRO_CB(skb)->csum_valid)
2908 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2909 csum_partial(start, len, 0));
2910}
2911
2912/* GRO checksum functions. These are logical equivalents of the normal
2913 * checksum functions (in skbuff.h) except that they operate on the GRO
2914 * offsets and fields in sk_buff.
2915 */
2916
2917__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2918
2919static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2920{
2921 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2922}
2923
2924static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2925 bool zero_okay,
2926 __sum16 check)
2927{
2928 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2929 skb_checksum_start_offset(skb) <
2930 skb_gro_offset(skb)) &&
2931 !skb_at_gro_remcsum_start(skb) &&
2932 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2933 (!zero_okay || check));
2934}
2935
2936static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2937 __wsum psum)
2938{
2939 if (NAPI_GRO_CB(skb)->csum_valid &&
2940 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2941 return 0;
2942
2943 NAPI_GRO_CB(skb)->csum = psum;
2944
2945 return __skb_gro_checksum_complete(skb);
2946}
2947
2948static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2949{
2950 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2951 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2952 NAPI_GRO_CB(skb)->csum_cnt--;
2953 } else {
2954 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2955 * verified a new top level checksum or an encapsulated one
2956 * during GRO. This saves work if we fallback to normal path.
2957 */
2958 __skb_incr_checksum_unnecessary(skb);
2959 }
2960}
2961
2962#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2963 compute_pseudo) \
2964({ \
2965 __sum16 __ret = 0; \
2966 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2967 __ret = __skb_gro_checksum_validate_complete(skb, \
2968 compute_pseudo(skb, proto)); \
2969 if (!__ret) \
2970 skb_gro_incr_csum_unnecessary(skb); \
2971 __ret; \
2972})
2973
2974#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2975 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2976
2977#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2978 compute_pseudo) \
2979 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2980
2981#define skb_gro_checksum_simple_validate(skb) \
2982 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2983
2984static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2985{
2986 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2987 !NAPI_GRO_CB(skb)->csum_valid);
2988}
2989
2990static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2991 __wsum pseudo)
2992{
2993 NAPI_GRO_CB(skb)->csum = ~pseudo;
2994 NAPI_GRO_CB(skb)->csum_valid = 1;
2995}
2996
2997#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
2998do { \
2999 if (__skb_gro_checksum_convert_check(skb)) \
3000 __skb_gro_checksum_convert(skb, \
3001 compute_pseudo(skb, proto)); \
3002} while (0)
3003
3004struct gro_remcsum {
3005 int offset;
3006 __wsum delta;
3007};
3008
3009static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
3010{
3011 grc->offset = 0;
3012 grc->delta = 0;
3013}
3014
3015static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
3016 unsigned int off, size_t hdrlen,
3017 int start, int offset,
3018 struct gro_remcsum *grc,
3019 bool nopartial)
3020{
3021 __wsum delta;
3022 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
3023
3024 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
3025
3026 if (!nopartial) {
3027 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
3028 return ptr;
3029 }
3030
3031 ptr = skb_gro_header_fast(skb, off);
3032 if (skb_gro_header_hard(skb, off + plen)) {
3033 ptr = skb_gro_header_slow(skb, off + plen, off);
3034 if (!ptr)
3035 return NULL;
3036 }
3037
3038 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
3039 start, offset);
3040
3041 /* Adjust skb->csum since we changed the packet */
3042 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
3043
3044 grc->offset = off + hdrlen + offset;
3045 grc->delta = delta;
3046
3047 return ptr;
3048}
3049
3050static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
3051 struct gro_remcsum *grc)
3052{
3053 void *ptr;
3054 size_t plen = grc->offset + sizeof(u16);
3055
3056 if (!grc->delta)
3057 return;
3058
3059 ptr = skb_gro_header_fast(skb, grc->offset);
3060 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
3061 ptr = skb_gro_header_slow(skb, plen, grc->offset);
3062 if (!ptr)
3063 return;
3064 }
3065
3066 remcsum_unadjust((__sum16 *)ptr, grc->delta);
3067}
3068
3069#ifdef CONFIG_XFRM_OFFLOAD
3070static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3071{
3072 if (PTR_ERR(pp) != -EINPROGRESS)
3073 NAPI_GRO_CB(skb)->flush |= flush;
3074}
3075static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3076 struct sk_buff *pp,
3077 int flush,
3078 struct gro_remcsum *grc)
3079{
3080 if (PTR_ERR(pp) != -EINPROGRESS) {
3081 NAPI_GRO_CB(skb)->flush |= flush;
3082 skb_gro_remcsum_cleanup(skb, grc);
3083 skb->remcsum_offload = 0;
3084 }
3085}
3086#else
3087static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3088{
3089 NAPI_GRO_CB(skb)->flush |= flush;
3090}
3091static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3092 struct sk_buff *pp,
3093 int flush,
3094 struct gro_remcsum *grc)
3095{
3096 NAPI_GRO_CB(skb)->flush |= flush;
3097 skb_gro_remcsum_cleanup(skb, grc);
3098 skb->remcsum_offload = 0;
3099}
3100#endif
3101
3102static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3103 unsigned short type,
3104 const void *daddr, const void *saddr,
3105 unsigned int len)
3106{
3107 if (!dev->header_ops || !dev->header_ops->create)
3108 return 0;
3109
3110 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3111}
3112
3113static inline int dev_parse_header(const struct sk_buff *skb,
3114 unsigned char *haddr)
3115{
3116 const struct net_device *dev = skb->dev;
3117
3118 if (!dev->header_ops || !dev->header_ops->parse)
3119 return 0;
3120 return dev->header_ops->parse(skb, haddr);
3121}
3122
3123static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3124{
3125 const struct net_device *dev = skb->dev;
3126
3127 if (!dev->header_ops || !dev->header_ops->parse_protocol)
3128 return 0;
3129 return dev->header_ops->parse_protocol(skb);
3130}
3131
3132/* ll_header must have at least hard_header_len allocated */
3133static inline bool dev_validate_header(const struct net_device *dev,
3134 char *ll_header, int len)
3135{
3136 if (likely(len >= dev->hard_header_len))
3137 return true;
3138 if (len < dev->min_header_len)
3139 return false;
3140
3141 if (capable(CAP_SYS_RAWIO)) {
3142 memset(ll_header + len, 0, dev->hard_header_len - len);
3143 return true;
3144 }
3145
3146 if (dev->header_ops && dev->header_ops->validate)
3147 return dev->header_ops->validate(ll_header, len);
3148
3149 return false;
3150}
3151
3152static inline bool dev_has_header(const struct net_device *dev)
3153{
3154 return dev->header_ops && dev->header_ops->create;
3155}
3156
3157typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
3158 int len, int size);
3159int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
3160static inline int unregister_gifconf(unsigned int family)
3161{
3162 return register_gifconf(family, NULL);
3163}
3164
3165#ifdef CONFIG_NET_FLOW_LIMIT
3166#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
3167struct sd_flow_limit {
3168 u64 count;
3169 unsigned int num_buckets;
3170 unsigned int history_head;
3171 u16 history[FLOW_LIMIT_HISTORY];
3172 u8 buckets[];
3173};
3174
3175extern int netdev_flow_limit_table_len;
3176#endif /* CONFIG_NET_FLOW_LIMIT */
3177
3178/*
3179 * Incoming packets are placed on per-CPU queues
3180 */
3181struct softnet_data {
3182 struct list_head poll_list;
3183 struct sk_buff_head process_queue;
3184
3185 /* stats */
3186 unsigned int processed;
3187 unsigned int time_squeeze;
3188 unsigned int received_rps;
3189#ifdef CONFIG_RPS
3190 struct softnet_data *rps_ipi_list;
3191#endif
3192#ifdef CONFIG_NET_FLOW_LIMIT
3193 struct sd_flow_limit __rcu *flow_limit;
3194#endif
3195 struct Qdisc *output_queue;
3196 struct Qdisc **output_queue_tailp;
3197 struct sk_buff *completion_queue;
3198#ifdef CONFIG_XFRM_OFFLOAD
3199 struct sk_buff_head xfrm_backlog;
3200#endif
3201 /* written and read only by owning cpu: */
3202 struct {
3203 u16 recursion;
3204 u8 more;
3205 } xmit;
3206#ifdef CONFIG_RPS
3207 /* input_queue_head should be written by cpu owning this struct,
3208 * and only read by other cpus. Worth using a cache line.
3209 */
3210 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3211
3212 /* Elements below can be accessed between CPUs for RPS/RFS */
3213 call_single_data_t csd ____cacheline_aligned_in_smp;
3214 struct softnet_data *rps_ipi_next;
3215 unsigned int cpu;
3216 unsigned int input_queue_tail;
3217#endif
3218 unsigned int dropped;
3219 struct sk_buff_head input_pkt_queue;
3220 struct napi_struct backlog;
3221
3222};
3223
3224static inline void input_queue_head_incr(struct softnet_data *sd)
3225{
3226#ifdef CONFIG_RPS
3227 sd->input_queue_head++;
3228#endif
3229}
3230
3231static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3232 unsigned int *qtail)
3233{
3234#ifdef CONFIG_RPS
3235 *qtail = ++sd->input_queue_tail;
3236#endif
3237}
3238
3239DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3240
3241static inline int dev_recursion_level(void)
3242{
3243 return this_cpu_read(softnet_data.xmit.recursion);
3244}
3245
3246#define XMIT_RECURSION_LIMIT 8
3247static inline bool dev_xmit_recursion(void)
3248{
3249 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3250 XMIT_RECURSION_LIMIT);
3251}
3252
3253static inline void dev_xmit_recursion_inc(void)
3254{
3255 __this_cpu_inc(softnet_data.xmit.recursion);
3256}
3257
3258static inline void dev_xmit_recursion_dec(void)
3259{
3260 __this_cpu_dec(softnet_data.xmit.recursion);
3261}
3262
3263void __netif_schedule(struct Qdisc *q);
3264void netif_schedule_queue(struct netdev_queue *txq);
3265
3266static inline void netif_tx_schedule_all(struct net_device *dev)
3267{
3268 unsigned int i;
3269
3270 for (i = 0; i < dev->num_tx_queues; i++)
3271 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3272}
3273
3274static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3275{
3276 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3277}
3278
3279/**
3280 * netif_start_queue - allow transmit
3281 * @dev: network device
3282 *
3283 * Allow upper layers to call the device hard_start_xmit routine.
3284 */
3285static inline void netif_start_queue(struct net_device *dev)
3286{
3287 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3288}
3289
3290static inline void netif_tx_start_all_queues(struct net_device *dev)
3291{
3292 unsigned int i;
3293
3294 for (i = 0; i < dev->num_tx_queues; i++) {
3295 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3296 netif_tx_start_queue(txq);
3297 }
3298}
3299
3300void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3301
3302/**
3303 * netif_wake_queue - restart transmit
3304 * @dev: network device
3305 *
3306 * Allow upper layers to call the device hard_start_xmit routine.
3307 * Used for flow control when transmit resources are available.
3308 */
3309static inline void netif_wake_queue(struct net_device *dev)
3310{
3311 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3312}
3313
3314static inline void netif_tx_wake_all_queues(struct net_device *dev)
3315{
3316 unsigned int i;
3317
3318 for (i = 0; i < dev->num_tx_queues; i++) {
3319 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3320 netif_tx_wake_queue(txq);
3321 }
3322}
3323
3324static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3325{
3326 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3327}
3328
3329/**
3330 * netif_stop_queue - stop transmitted packets
3331 * @dev: network device
3332 *
3333 * Stop upper layers calling the device hard_start_xmit routine.
3334 * Used for flow control when transmit resources are unavailable.
3335 */
3336static inline void netif_stop_queue(struct net_device *dev)
3337{
3338 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3339}
3340
3341void netif_tx_stop_all_queues(struct net_device *dev);
3342
3343static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3344{
3345 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3346}
3347
3348/**
3349 * netif_queue_stopped - test if transmit queue is flowblocked
3350 * @dev: network device
3351 *
3352 * Test if transmit queue on device is currently unable to send.
3353 */
3354static inline bool netif_queue_stopped(const struct net_device *dev)
3355{
3356 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3357}
3358
3359static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3360{
3361 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3362}
3363
3364static inline bool
3365netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3366{
3367 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3368}
3369
3370static inline bool
3371netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3372{
3373 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3374}
3375
3376/**
3377 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3378 * @dev_queue: pointer to transmit queue
3379 *
3380 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
3381 * to give appropriate hint to the CPU.
3382 */
3383static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3384{
3385#ifdef CONFIG_BQL
3386 prefetchw(&dev_queue->dql.num_queued);
3387#endif
3388}
3389
3390/**
3391 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3392 * @dev_queue: pointer to transmit queue
3393 *
3394 * BQL enabled drivers might use this helper in their TX completion path,
3395 * to give appropriate hint to the CPU.
3396 */
3397static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3398{
3399#ifdef CONFIG_BQL
3400 prefetchw(&dev_queue->dql.limit);
3401#endif
3402}
3403
3404static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3405 unsigned int bytes)
3406{
3407#ifdef CONFIG_BQL
3408 dql_queued(&dev_queue->dql, bytes);
3409
3410 if (likely(dql_avail(&dev_queue->dql) >= 0))
3411 return;
3412
3413 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3414
3415 /*
3416 * The XOFF flag must be set before checking the dql_avail below,
3417 * because in netdev_tx_completed_queue we update the dql_completed
3418 * before checking the XOFF flag.
3419 */
3420 smp_mb();
3421
3422 /* check again in case another CPU has just made room avail */
3423 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3424 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3425#endif
3426}
3427
3428/* Variant of netdev_tx_sent_queue() for drivers that are aware
3429 * that they should not test BQL status themselves.
3430 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3431 * skb of a batch.
3432 * Returns true if the doorbell must be used to kick the NIC.
3433 */
3434static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3435 unsigned int bytes,
3436 bool xmit_more)
3437{
3438 if (xmit_more) {
3439#ifdef CONFIG_BQL
3440 dql_queued(&dev_queue->dql, bytes);
3441#endif
3442 return netif_tx_queue_stopped(dev_queue);
3443 }
3444 netdev_tx_sent_queue(dev_queue, bytes);
3445 return true;
3446}
3447
3448/**
3449 * netdev_sent_queue - report the number of bytes queued to hardware
3450 * @dev: network device
3451 * @bytes: number of bytes queued to the hardware device queue
3452 *
3453 * Report the number of bytes queued for sending/completion to the network
3454 * device hardware queue. @bytes should be a good approximation and should
3455 * exactly match netdev_completed_queue() @bytes
3456 */
3457static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3458{
3459 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3460}
3461
3462static inline bool __netdev_sent_queue(struct net_device *dev,
3463 unsigned int bytes,
3464 bool xmit_more)
3465{
3466 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3467 xmit_more);
3468}
3469
3470static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3471 unsigned int pkts, unsigned int bytes)
3472{
3473#ifdef CONFIG_BQL
3474 if (unlikely(!bytes))
3475 return;
3476
3477 dql_completed(&dev_queue->dql, bytes);
3478
3479 /*
3480 * Without the memory barrier there is a small possiblity that
3481 * netdev_tx_sent_queue will miss the update and cause the queue to
3482 * be stopped forever
3483 */
3484 smp_mb();
3485
3486 if (unlikely(dql_avail(&dev_queue->dql) < 0))
3487 return;
3488
3489 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3490 netif_schedule_queue(dev_queue);
3491#endif
3492}
3493
3494/**
3495 * netdev_completed_queue - report bytes and packets completed by device
3496 * @dev: network device
3497 * @pkts: actual number of packets sent over the medium
3498 * @bytes: actual number of bytes sent over the medium
3499 *
3500 * Report the number of bytes and packets transmitted by the network device
3501 * hardware queue over the physical medium, @bytes must exactly match the
3502 * @bytes amount passed to netdev_sent_queue()
3503 */
3504static inline void netdev_completed_queue(struct net_device *dev,
3505 unsigned int pkts, unsigned int bytes)
3506{
3507 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3508}
3509
3510static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3511{
3512#ifdef CONFIG_BQL
3513 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3514 dql_reset(&q->dql);
3515#endif
3516}
3517
3518/**
3519 * netdev_reset_queue - reset the packets and bytes count of a network device
3520 * @dev_queue: network device
3521 *
3522 * Reset the bytes and packet count of a network device and clear the
3523 * software flow control OFF bit for this network device
3524 */
3525static inline void netdev_reset_queue(struct net_device *dev_queue)
3526{
3527 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3528}
3529
3530/**
3531 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3532 * @dev: network device
3533 * @queue_index: given tx queue index
3534 *
3535 * Returns 0 if given tx queue index >= number of device tx queues,
3536 * otherwise returns the originally passed tx queue index.
3537 */
3538static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3539{
3540 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3541 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3542 dev->name, queue_index,
3543 dev->real_num_tx_queues);
3544 return 0;
3545 }
3546
3547 return queue_index;
3548}
3549
3550/**
3551 * netif_running - test if up
3552 * @dev: network device
3553 *
3554 * Test if the device has been brought up.
3555 */
3556static inline bool netif_running(const struct net_device *dev)
3557{
3558 return test_bit(__LINK_STATE_START, &dev->state);
3559}
3560
3561/*
3562 * Routines to manage the subqueues on a device. We only need start,
3563 * stop, and a check if it's stopped. All other device management is
3564 * done at the overall netdevice level.
3565 * Also test the device if we're multiqueue.
3566 */
3567
3568/**
3569 * netif_start_subqueue - allow sending packets on subqueue
3570 * @dev: network device
3571 * @queue_index: sub queue index
3572 *
3573 * Start individual transmit queue of a device with multiple transmit queues.
3574 */
3575static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3576{
3577 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3578
3579 netif_tx_start_queue(txq);
3580}
3581
3582/**
3583 * netif_stop_subqueue - stop sending packets on subqueue
3584 * @dev: network device
3585 * @queue_index: sub queue index
3586 *
3587 * Stop individual transmit queue of a device with multiple transmit queues.
3588 */
3589static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3590{
3591 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3592 netif_tx_stop_queue(txq);
3593}
3594
3595/**
3596 * netif_subqueue_stopped - test status of subqueue
3597 * @dev: network device
3598 * @queue_index: sub queue index
3599 *
3600 * Check individual transmit queue of a device with multiple transmit queues.
3601 */
3602static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3603 u16 queue_index)
3604{
3605 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3606
3607 return netif_tx_queue_stopped(txq);
3608}
3609
3610static inline bool netif_subqueue_stopped(const struct net_device *dev,
3611 struct sk_buff *skb)
3612{
3613 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3614}
3615
3616/**
3617 * netif_wake_subqueue - allow sending packets on subqueue
3618 * @dev: network device
3619 * @queue_index: sub queue index
3620 *
3621 * Resume individual transmit queue of a device with multiple transmit queues.
3622 */
3623static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3624{
3625 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3626
3627 netif_tx_wake_queue(txq);
3628}
3629
3630#ifdef CONFIG_XPS
3631int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3632 u16 index);
3633int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3634 u16 index, bool is_rxqs_map);
3635
3636/**
3637 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3638 * @j: CPU/Rx queue index
3639 * @mask: bitmask of all cpus/rx queues
3640 * @nr_bits: number of bits in the bitmask
3641 *
3642 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3643 */
3644static inline bool netif_attr_test_mask(unsigned long j,
3645 const unsigned long *mask,
3646 unsigned int nr_bits)
3647{
3648 cpu_max_bits_warn(j, nr_bits);
3649 return test_bit(j, mask);
3650}
3651
3652/**
3653 * netif_attr_test_online - Test for online CPU/Rx queue
3654 * @j: CPU/Rx queue index
3655 * @online_mask: bitmask for CPUs/Rx queues that are online
3656 * @nr_bits: number of bits in the bitmask
3657 *
3658 * Returns true if a CPU/Rx queue is online.
3659 */
3660static inline bool netif_attr_test_online(unsigned long j,
3661 const unsigned long *online_mask,
3662 unsigned int nr_bits)
3663{
3664 cpu_max_bits_warn(j, nr_bits);
3665
3666 if (online_mask)
3667 return test_bit(j, online_mask);
3668
3669 return (j < nr_bits);
3670}
3671
3672/**
3673 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3674 * @n: CPU/Rx queue index
3675 * @srcp: the cpumask/Rx queue mask pointer
3676 * @nr_bits: number of bits in the bitmask
3677 *
3678 * Returns >= nr_bits if no further CPUs/Rx queues set.
3679 */
3680static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3681 unsigned int nr_bits)
3682{
3683 /* -1 is a legal arg here. */
3684 if (n != -1)
3685 cpu_max_bits_warn(n, nr_bits);
3686
3687 if (srcp)
3688 return find_next_bit(srcp, nr_bits, n + 1);
3689
3690 return n + 1;
3691}
3692
3693/**
3694 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3695 * @n: CPU/Rx queue index
3696 * @src1p: the first CPUs/Rx queues mask pointer
3697 * @src2p: the second CPUs/Rx queues mask pointer
3698 * @nr_bits: number of bits in the bitmask
3699 *
3700 * Returns >= nr_bits if no further CPUs/Rx queues set in both.
3701 */
3702static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3703 const unsigned long *src2p,
3704 unsigned int nr_bits)
3705{
3706 /* -1 is a legal arg here. */
3707 if (n != -1)
3708 cpu_max_bits_warn(n, nr_bits);
3709
3710 if (src1p && src2p)
3711 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3712 else if (src1p)
3713 return find_next_bit(src1p, nr_bits, n + 1);
3714 else if (src2p)
3715 return find_next_bit(src2p, nr_bits, n + 1);
3716
3717 return n + 1;
3718}
3719#else
3720static inline int netif_set_xps_queue(struct net_device *dev,
3721 const struct cpumask *mask,
3722 u16 index)
3723{
3724 return 0;
3725}
3726
3727static inline int __netif_set_xps_queue(struct net_device *dev,
3728 const unsigned long *mask,
3729 u16 index, bool is_rxqs_map)
3730{
3731 return 0;
3732}
3733#endif
3734
3735/**
3736 * netif_is_multiqueue - test if device has multiple transmit queues
3737 * @dev: network device
3738 *
3739 * Check if device has multiple transmit queues
3740 */
3741static inline bool netif_is_multiqueue(const struct net_device *dev)
3742{
3743 return dev->num_tx_queues > 1;
3744}
3745
3746int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3747
3748#ifdef CONFIG_SYSFS
3749int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3750#else
3751static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3752 unsigned int rxqs)
3753{
3754 dev->real_num_rx_queues = rxqs;
3755 return 0;
3756}
3757#endif
3758
3759static inline struct netdev_rx_queue *
3760__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3761{
3762 return dev->_rx + rxq;
3763}
3764
3765#ifdef CONFIG_SYSFS
3766static inline unsigned int get_netdev_rx_queue_index(
3767 struct netdev_rx_queue *queue)
3768{
3769 struct net_device *dev = queue->dev;
3770 int index = queue - dev->_rx;
3771
3772 BUG_ON(index >= dev->num_rx_queues);
3773 return index;
3774}
3775#endif
3776
3777#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3778int netif_get_num_default_rss_queues(void);
3779
3780enum skb_free_reason {
3781 SKB_REASON_CONSUMED,
3782 SKB_REASON_DROPPED,
3783};
3784
3785void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3786void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3787
3788/*
3789 * It is not allowed to call kfree_skb() or consume_skb() from hardware
3790 * interrupt context or with hardware interrupts being disabled.
3791 * (in_irq() || irqs_disabled())
3792 *
3793 * We provide four helpers that can be used in following contexts :
3794 *
3795 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3796 * replacing kfree_skb(skb)
3797 *
3798 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3799 * Typically used in place of consume_skb(skb) in TX completion path
3800 *
3801 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3802 * replacing kfree_skb(skb)
3803 *
3804 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3805 * and consumed a packet. Used in place of consume_skb(skb)
3806 */
3807static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3808{
3809 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3810}
3811
3812static inline void dev_consume_skb_irq(struct sk_buff *skb)
3813{
3814 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3815}
3816
3817static inline void dev_kfree_skb_any(struct sk_buff *skb)
3818{
3819 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3820}
3821
3822static inline void dev_consume_skb_any(struct sk_buff *skb)
3823{
3824 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3825}
3826
3827void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3828int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3829int netif_rx(struct sk_buff *skb);
3830int netif_rx_ni(struct sk_buff *skb);
3831int netif_rx_any_context(struct sk_buff *skb);
3832int netif_receive_skb(struct sk_buff *skb);
3833int netif_receive_skb_core(struct sk_buff *skb);
3834void netif_receive_skb_list(struct list_head *head);
3835gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3836void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3837struct sk_buff *napi_get_frags(struct napi_struct *napi);
3838gro_result_t napi_gro_frags(struct napi_struct *napi);
3839struct packet_offload *gro_find_receive_by_type(__be16 type);
3840struct packet_offload *gro_find_complete_by_type(__be16 type);
3841
3842static inline void napi_free_frags(struct napi_struct *napi)
3843{
3844 kfree_skb(napi->skb);
3845 napi->skb = NULL;
3846}
3847
3848bool netdev_is_rx_handler_busy(struct net_device *dev);
3849int netdev_rx_handler_register(struct net_device *dev,
3850 rx_handler_func_t *rx_handler,
3851 void *rx_handler_data);
3852void netdev_rx_handler_unregister(struct net_device *dev);
3853
3854bool dev_valid_name(const char *name);
3855int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3856 bool *need_copyout);
3857int dev_ifconf(struct net *net, struct ifconf *, int);
3858int dev_ethtool(struct net *net, struct ifreq *);
3859unsigned int dev_get_flags(const struct net_device *);
3860int __dev_change_flags(struct net_device *dev, unsigned int flags,
3861 struct netlink_ext_ack *extack);
3862int dev_change_flags(struct net_device *dev, unsigned int flags,
3863 struct netlink_ext_ack *extack);
3864void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3865 unsigned int gchanges);
3866int dev_change_name(struct net_device *, const char *);
3867int dev_set_alias(struct net_device *, const char *, size_t);
3868int dev_get_alias(const struct net_device *, char *, size_t);
3869int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3870int __dev_set_mtu(struct net_device *, int);
3871int dev_validate_mtu(struct net_device *dev, int mtu,
3872 struct netlink_ext_ack *extack);
3873int dev_set_mtu_ext(struct net_device *dev, int mtu,
3874 struct netlink_ext_ack *extack);
3875int dev_set_mtu(struct net_device *, int);
3876int dev_change_tx_queue_len(struct net_device *, unsigned long);
3877void dev_set_group(struct net_device *, int);
3878int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3879 struct netlink_ext_ack *extack);
3880int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3881 struct netlink_ext_ack *extack);
3882int dev_change_carrier(struct net_device *, bool new_carrier);
3883int dev_get_phys_port_id(struct net_device *dev,
3884 struct netdev_phys_item_id *ppid);
3885int dev_get_phys_port_name(struct net_device *dev,
3886 char *name, size_t len);
3887int dev_get_port_parent_id(struct net_device *dev,
3888 struct netdev_phys_item_id *ppid, bool recurse);
3889bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
3890int dev_change_proto_down(struct net_device *dev, bool proto_down);
3891int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3892void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
3893 u32 value);
3894struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3895struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3896 struct netdev_queue *txq, int *ret);
3897
3898typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3899int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3900 int fd, int expected_fd, u32 flags);
3901int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
3902u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
3903
3904int xdp_umem_query(struct net_device *dev, u16 queue_id);
3905
3906int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3907int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3908bool is_skb_forwardable(const struct net_device *dev,
3909 const struct sk_buff *skb);
3910
3911static __always_inline int ____dev_forward_skb(struct net_device *dev,
3912 struct sk_buff *skb)
3913{
3914 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3915 unlikely(!is_skb_forwardable(dev, skb))) {
3916 atomic_long_inc(&dev->rx_dropped);
3917 kfree_skb(skb);
3918 return NET_RX_DROP;
3919 }
3920
3921 skb_scrub_packet(skb, true);
3922 skb->priority = 0;
3923 return 0;
3924}
3925
3926bool dev_nit_active(struct net_device *dev);
3927void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3928
3929extern int netdev_budget;
3930extern unsigned int netdev_budget_usecs;
3931
3932/* Called by rtnetlink.c:rtnl_unlock() */
3933void netdev_run_todo(void);
3934
3935/**
3936 * dev_put - release reference to device
3937 * @dev: network device
3938 *
3939 * Release reference to device to allow it to be freed.
3940 */
3941static inline void dev_put(struct net_device *dev)
3942{
3943 this_cpu_dec(*dev->pcpu_refcnt);
3944}
3945
3946/**
3947 * dev_hold - get reference to device
3948 * @dev: network device
3949 *
3950 * Hold reference to device to keep it from being freed.
3951 */
3952static inline void dev_hold(struct net_device *dev)
3953{
3954 this_cpu_inc(*dev->pcpu_refcnt);
3955}
3956
3957/* Carrier loss detection, dial on demand. The functions netif_carrier_on
3958 * and _off may be called from IRQ context, but it is caller
3959 * who is responsible for serialization of these calls.
3960 *
3961 * The name carrier is inappropriate, these functions should really be
3962 * called netif_lowerlayer_*() because they represent the state of any
3963 * kind of lower layer not just hardware media.
3964 */
3965
3966void linkwatch_init_dev(struct net_device *dev);
3967void linkwatch_fire_event(struct net_device *dev);
3968void linkwatch_forget_dev(struct net_device *dev);
3969
3970/**
3971 * netif_carrier_ok - test if carrier present
3972 * @dev: network device
3973 *
3974 * Check if carrier is present on device
3975 */
3976static inline bool netif_carrier_ok(const struct net_device *dev)
3977{
3978 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3979}
3980
3981unsigned long dev_trans_start(struct net_device *dev);
3982
3983void __netdev_watchdog_up(struct net_device *dev);
3984
3985void netif_carrier_on(struct net_device *dev);
3986
3987void netif_carrier_off(struct net_device *dev);
3988
3989/**
3990 * netif_dormant_on - mark device as dormant.
3991 * @dev: network device
3992 *
3993 * Mark device as dormant (as per RFC2863).
3994 *
3995 * The dormant state indicates that the relevant interface is not
3996 * actually in a condition to pass packets (i.e., it is not 'up') but is
3997 * in a "pending" state, waiting for some external event. For "on-
3998 * demand" interfaces, this new state identifies the situation where the
3999 * interface is waiting for events to place it in the up state.
4000 */
4001static inline void netif_dormant_on(struct net_device *dev)
4002{
4003 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4004 linkwatch_fire_event(dev);
4005}
4006
4007/**
4008 * netif_dormant_off - set device as not dormant.
4009 * @dev: network device
4010 *
4011 * Device is not in dormant state.
4012 */
4013static inline void netif_dormant_off(struct net_device *dev)
4014{
4015 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4016 linkwatch_fire_event(dev);
4017}
4018
4019/**
4020 * netif_dormant - test if device is dormant
4021 * @dev: network device
4022 *
4023 * Check if device is dormant.
4024 */
4025static inline bool netif_dormant(const struct net_device *dev)
4026{
4027 return test_bit(__LINK_STATE_DORMANT, &dev->state);
4028}
4029
4030
4031/**
4032 * netif_testing_on - mark device as under test.
4033 * @dev: network device
4034 *
4035 * Mark device as under test (as per RFC2863).
4036 *
4037 * The testing state indicates that some test(s) must be performed on
4038 * the interface. After completion, of the test, the interface state
4039 * will change to up, dormant, or down, as appropriate.
4040 */
4041static inline void netif_testing_on(struct net_device *dev)
4042{
4043 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4044 linkwatch_fire_event(dev);
4045}
4046
4047/**
4048 * netif_testing_off - set device as not under test.
4049 * @dev: network device
4050 *
4051 * Device is not in testing state.
4052 */
4053static inline void netif_testing_off(struct net_device *dev)
4054{
4055 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4056 linkwatch_fire_event(dev);
4057}
4058
4059/**
4060 * netif_testing - test if device is under test
4061 * @dev: network device
4062 *
4063 * Check if device is under test
4064 */
4065static inline bool netif_testing(const struct net_device *dev)
4066{
4067 return test_bit(__LINK_STATE_TESTING, &dev->state);
4068}
4069
4070
4071/**
4072 * netif_oper_up - test if device is operational
4073 * @dev: network device
4074 *
4075 * Check if carrier is operational
4076 */
4077static inline bool netif_oper_up(const struct net_device *dev)
4078{
4079 return (dev->operstate == IF_OPER_UP ||
4080 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
4081}
4082
4083/**
4084 * netif_device_present - is device available or removed
4085 * @dev: network device
4086 *
4087 * Check if device has not been removed from system.
4088 */
4089static inline bool netif_device_present(struct net_device *dev)
4090{
4091 return test_bit(__LINK_STATE_PRESENT, &dev->state);
4092}
4093
4094void netif_device_detach(struct net_device *dev);
4095
4096void netif_device_attach(struct net_device *dev);
4097
4098/*
4099 * Network interface message level settings
4100 */
4101
4102enum {
4103 NETIF_MSG_DRV_BIT,
4104 NETIF_MSG_PROBE_BIT,
4105 NETIF_MSG_LINK_BIT,
4106 NETIF_MSG_TIMER_BIT,
4107 NETIF_MSG_IFDOWN_BIT,
4108 NETIF_MSG_IFUP_BIT,
4109 NETIF_MSG_RX_ERR_BIT,
4110 NETIF_MSG_TX_ERR_BIT,
4111 NETIF_MSG_TX_QUEUED_BIT,
4112 NETIF_MSG_INTR_BIT,
4113 NETIF_MSG_TX_DONE_BIT,
4114 NETIF_MSG_RX_STATUS_BIT,
4115 NETIF_MSG_PKTDATA_BIT,
4116 NETIF_MSG_HW_BIT,
4117 NETIF_MSG_WOL_BIT,
4118
4119 /* When you add a new bit above, update netif_msg_class_names array
4120 * in net/ethtool/common.c
4121 */
4122 NETIF_MSG_CLASS_COUNT,
4123};
4124/* Both ethtool_ops interface and internal driver implementation use u32 */
4125static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4126
4127#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4128#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4129
4130#define NETIF_MSG_DRV __NETIF_MSG(DRV)
4131#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4132#define NETIF_MSG_LINK __NETIF_MSG(LINK)
4133#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4134#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4135#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4136#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4137#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4138#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4139#define NETIF_MSG_INTR __NETIF_MSG(INTR)
4140#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4141#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4142#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4143#define NETIF_MSG_HW __NETIF_MSG(HW)
4144#define NETIF_MSG_WOL __NETIF_MSG(WOL)
4145
4146#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4147#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4148#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4149#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4150#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4151#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4152#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4153#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4154#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4155#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4156#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4157#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4158#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4159#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4160#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4161
4162static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4163{
4164 /* use default */
4165 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4166 return default_msg_enable_bits;
4167 if (debug_value == 0) /* no output */
4168 return 0;
4169 /* set low N bits */
4170 return (1U << debug_value) - 1;
4171}
4172
4173static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4174{
4175 spin_lock(&txq->_xmit_lock);
4176 txq->xmit_lock_owner = cpu;
4177}
4178
4179static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4180{
4181 __acquire(&txq->_xmit_lock);
4182 return true;
4183}
4184
4185static inline void __netif_tx_release(struct netdev_queue *txq)
4186{
4187 __release(&txq->_xmit_lock);
4188}
4189
4190static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4191{
4192 spin_lock_bh(&txq->_xmit_lock);
4193 txq->xmit_lock_owner = smp_processor_id();
4194}
4195
4196static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4197{
4198 bool ok = spin_trylock(&txq->_xmit_lock);
4199 if (likely(ok))
4200 txq->xmit_lock_owner = smp_processor_id();
4201 return ok;
4202}
4203
4204static inline void __netif_tx_unlock(struct netdev_queue *txq)
4205{
4206 txq->xmit_lock_owner = -1;
4207 spin_unlock(&txq->_xmit_lock);
4208}
4209
4210static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4211{
4212 txq->xmit_lock_owner = -1;
4213 spin_unlock_bh(&txq->_xmit_lock);
4214}
4215
4216static inline void txq_trans_update(struct netdev_queue *txq)
4217{
4218 if (txq->xmit_lock_owner != -1)
4219 txq->trans_start = jiffies;
4220}
4221
4222/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
4223static inline void netif_trans_update(struct net_device *dev)
4224{
4225 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4226
4227 if (txq->trans_start != jiffies)
4228 txq->trans_start = jiffies;
4229}
4230
4231/**
4232 * netif_tx_lock - grab network device transmit lock
4233 * @dev: network device
4234 *
4235 * Get network device transmit lock
4236 */
4237static inline void netif_tx_lock(struct net_device *dev)
4238{
4239 unsigned int i;
4240 int cpu;
4241
4242 spin_lock(&dev->tx_global_lock);
4243 cpu = smp_processor_id();
4244 for (i = 0; i < dev->num_tx_queues; i++) {
4245 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4246
4247 /* We are the only thread of execution doing a
4248 * freeze, but we have to grab the _xmit_lock in
4249 * order to synchronize with threads which are in
4250 * the ->hard_start_xmit() handler and already
4251 * checked the frozen bit.
4252 */
4253 __netif_tx_lock(txq, cpu);
4254 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
4255 __netif_tx_unlock(txq);
4256 }
4257}
4258
4259static inline void netif_tx_lock_bh(struct net_device *dev)
4260{
4261 local_bh_disable();
4262 netif_tx_lock(dev);
4263}
4264
4265static inline void netif_tx_unlock(struct net_device *dev)
4266{
4267 unsigned int i;
4268
4269 for (i = 0; i < dev->num_tx_queues; i++) {
4270 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4271
4272 /* No need to grab the _xmit_lock here. If the
4273 * queue is not stopped for another reason, we
4274 * force a schedule.
4275 */
4276 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
4277 netif_schedule_queue(txq);
4278 }
4279 spin_unlock(&dev->tx_global_lock);
4280}
4281
4282static inline void netif_tx_unlock_bh(struct net_device *dev)
4283{
4284 netif_tx_unlock(dev);
4285 local_bh_enable();
4286}
4287
4288#define HARD_TX_LOCK(dev, txq, cpu) { \
4289 if ((dev->features & NETIF_F_LLTX) == 0) { \
4290 __netif_tx_lock(txq, cpu); \
4291 } else { \
4292 __netif_tx_acquire(txq); \
4293 } \
4294}
4295
4296#define HARD_TX_TRYLOCK(dev, txq) \
4297 (((dev->features & NETIF_F_LLTX) == 0) ? \
4298 __netif_tx_trylock(txq) : \
4299 __netif_tx_acquire(txq))
4300
4301#define HARD_TX_UNLOCK(dev, txq) { \
4302 if ((dev->features & NETIF_F_LLTX) == 0) { \
4303 __netif_tx_unlock(txq); \
4304 } else { \
4305 __netif_tx_release(txq); \
4306 } \
4307}
4308
4309static inline void netif_tx_disable(struct net_device *dev)
4310{
4311 unsigned int i;
4312 int cpu;
4313
4314 local_bh_disable();
4315 cpu = smp_processor_id();
4316 for (i = 0; i < dev->num_tx_queues; i++) {
4317 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4318
4319 __netif_tx_lock(txq, cpu);
4320 netif_tx_stop_queue(txq);
4321 __netif_tx_unlock(txq);
4322 }
4323 local_bh_enable();
4324}
4325
4326static inline void netif_addr_lock(struct net_device *dev)
4327{
4328 unsigned char nest_level = 0;
4329
4330#ifdef CONFIG_LOCKDEP
4331 nest_level = dev->nested_level;
4332#endif
4333 spin_lock_nested(&dev->addr_list_lock, nest_level);
4334}
4335
4336static inline void netif_addr_lock_bh(struct net_device *dev)
4337{
4338 unsigned char nest_level = 0;
4339
4340#ifdef CONFIG_LOCKDEP
4341 nest_level = dev->nested_level;
4342#endif
4343 local_bh_disable();
4344 spin_lock_nested(&dev->addr_list_lock, nest_level);
4345}
4346
4347static inline void netif_addr_unlock(struct net_device *dev)
4348{
4349 spin_unlock(&dev->addr_list_lock);
4350}
4351
4352static inline void netif_addr_unlock_bh(struct net_device *dev)
4353{
4354 spin_unlock_bh(&dev->addr_list_lock);
4355}
4356
4357/*
4358 * dev_addrs walker. Should be used only for read access. Call with
4359 * rcu_read_lock held.
4360 */
4361#define for_each_dev_addr(dev, ha) \
4362 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4363
4364/* These functions live elsewhere (drivers/net/net_init.c, but related) */
4365
4366void ether_setup(struct net_device *dev);
4367
4368/* Support for loadable net-drivers */
4369struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4370 unsigned char name_assign_type,
4371 void (*setup)(struct net_device *),
4372 unsigned int txqs, unsigned int rxqs);
4373#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4374 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4375
4376#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4377 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4378 count)
4379
4380int register_netdev(struct net_device *dev);
4381void unregister_netdev(struct net_device *dev);
4382
4383int devm_register_netdev(struct device *dev, struct net_device *ndev);
4384
4385/* General hardware address lists handling functions */
4386int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4387 struct netdev_hw_addr_list *from_list, int addr_len);
4388void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4389 struct netdev_hw_addr_list *from_list, int addr_len);
4390int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4391 struct net_device *dev,
4392 int (*sync)(struct net_device *, const unsigned char *),
4393 int (*unsync)(struct net_device *,
4394 const unsigned char *));
4395int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4396 struct net_device *dev,
4397 int (*sync)(struct net_device *,
4398 const unsigned char *, int),
4399 int (*unsync)(struct net_device *,
4400 const unsigned char *, int));
4401void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4402 struct net_device *dev,
4403 int (*unsync)(struct net_device *,
4404 const unsigned char *, int));
4405void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4406 struct net_device *dev,
4407 int (*unsync)(struct net_device *,
4408 const unsigned char *));
4409void __hw_addr_init(struct netdev_hw_addr_list *list);
4410
4411/* Functions used for device addresses handling */
4412int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4413 unsigned char addr_type);
4414int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4415 unsigned char addr_type);
4416void dev_addr_flush(struct net_device *dev);
4417int dev_addr_init(struct net_device *dev);
4418
4419/* Functions used for unicast addresses handling */
4420int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4421int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4422int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4423int dev_uc_sync(struct net_device *to, struct net_device *from);
4424int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4425void dev_uc_unsync(struct net_device *to, struct net_device *from);
4426void dev_uc_flush(struct net_device *dev);
4427void dev_uc_init(struct net_device *dev);
4428
4429/**
4430 * __dev_uc_sync - Synchonize device's unicast list
4431 * @dev: device to sync
4432 * @sync: function to call if address should be added
4433 * @unsync: function to call if address should be removed
4434 *
4435 * Add newly added addresses to the interface, and release
4436 * addresses that have been deleted.
4437 */
4438static inline int __dev_uc_sync(struct net_device *dev,
4439 int (*sync)(struct net_device *,
4440 const unsigned char *),
4441 int (*unsync)(struct net_device *,
4442 const unsigned char *))
4443{
4444 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4445}
4446
4447/**
4448 * __dev_uc_unsync - Remove synchronized addresses from device
4449 * @dev: device to sync
4450 * @unsync: function to call if address should be removed
4451 *
4452 * Remove all addresses that were added to the device by dev_uc_sync().
4453 */
4454static inline void __dev_uc_unsync(struct net_device *dev,
4455 int (*unsync)(struct net_device *,
4456 const unsigned char *))
4457{
4458 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4459}
4460
4461/* Functions used for multicast addresses handling */
4462int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4463int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4464int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4465int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4466int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4467int dev_mc_sync(struct net_device *to, struct net_device *from);
4468int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4469void dev_mc_unsync(struct net_device *to, struct net_device *from);
4470void dev_mc_flush(struct net_device *dev);
4471void dev_mc_init(struct net_device *dev);
4472
4473/**
4474 * __dev_mc_sync - Synchonize device's multicast list
4475 * @dev: device to sync
4476 * @sync: function to call if address should be added
4477 * @unsync: function to call if address should be removed
4478 *
4479 * Add newly added addresses to the interface, and release
4480 * addresses that have been deleted.
4481 */
4482static inline int __dev_mc_sync(struct net_device *dev,
4483 int (*sync)(struct net_device *,
4484 const unsigned char *),
4485 int (*unsync)(struct net_device *,
4486 const unsigned char *))
4487{
4488 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4489}
4490
4491/**
4492 * __dev_mc_unsync - Remove synchronized addresses from device
4493 * @dev: device to sync
4494 * @unsync: function to call if address should be removed
4495 *
4496 * Remove all addresses that were added to the device by dev_mc_sync().
4497 */
4498static inline void __dev_mc_unsync(struct net_device *dev,
4499 int (*unsync)(struct net_device *,
4500 const unsigned char *))
4501{
4502 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4503}
4504
4505/* Functions used for secondary unicast and multicast support */
4506void dev_set_rx_mode(struct net_device *dev);
4507void __dev_set_rx_mode(struct net_device *dev);
4508int dev_set_promiscuity(struct net_device *dev, int inc);
4509int dev_set_allmulti(struct net_device *dev, int inc);
4510void netdev_state_change(struct net_device *dev);
4511void netdev_notify_peers(struct net_device *dev);
4512void netdev_features_change(struct net_device *dev);
4513/* Load a device via the kmod */
4514void dev_load(struct net *net, const char *name);
4515struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4516 struct rtnl_link_stats64 *storage);
4517void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4518 const struct net_device_stats *netdev_stats);
4519void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4520 const struct pcpu_sw_netstats __percpu *netstats);
4521
4522extern int netdev_max_backlog;
4523extern int netdev_tstamp_prequeue;
4524extern int weight_p;
4525extern int dev_weight_rx_bias;
4526extern int dev_weight_tx_bias;
4527extern int dev_rx_weight;
4528extern int dev_tx_weight;
4529extern int gro_normal_batch;
4530
4531enum {
4532 NESTED_SYNC_IMM_BIT,
4533 NESTED_SYNC_TODO_BIT,
4534};
4535
4536#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
4537#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4538
4539#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
4540#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
4541
4542struct netdev_nested_priv {
4543 unsigned char flags;
4544 void *data;
4545};
4546
4547bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4548struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4549 struct list_head **iter);
4550struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4551 struct list_head **iter);
4552
4553#ifdef CONFIG_LOCKDEP
4554static LIST_HEAD(net_unlink_list);
4555
4556static inline void net_unlink_todo(struct net_device *dev)
4557{
4558 if (list_empty(&dev->unlink_list))
4559 list_add_tail(&dev->unlink_list, &net_unlink_list);
4560}
4561#endif
4562
4563/* iterate through upper list, must be called under RCU read lock */
4564#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4565 for (iter = &(dev)->adj_list.upper, \
4566 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4567 updev; \
4568 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4569
4570int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4571 int (*fn)(struct net_device *upper_dev,
4572 struct netdev_nested_priv *priv),
4573 struct netdev_nested_priv *priv);
4574
4575bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4576 struct net_device *upper_dev);
4577
4578bool netdev_has_any_upper_dev(struct net_device *dev);
4579
4580void *netdev_lower_get_next_private(struct net_device *dev,
4581 struct list_head **iter);
4582void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4583 struct list_head **iter);
4584
4585#define netdev_for_each_lower_private(dev, priv, iter) \
4586 for (iter = (dev)->adj_list.lower.next, \
4587 priv = netdev_lower_get_next_private(dev, &(iter)); \
4588 priv; \
4589 priv = netdev_lower_get_next_private(dev, &(iter)))
4590
4591#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4592 for (iter = &(dev)->adj_list.lower, \
4593 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4594 priv; \
4595 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4596
4597void *netdev_lower_get_next(struct net_device *dev,
4598 struct list_head **iter);
4599
4600#define netdev_for_each_lower_dev(dev, ldev, iter) \
4601 for (iter = (dev)->adj_list.lower.next, \
4602 ldev = netdev_lower_get_next(dev, &(iter)); \
4603 ldev; \
4604 ldev = netdev_lower_get_next(dev, &(iter)))
4605
4606struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4607 struct list_head **iter);
4608int netdev_walk_all_lower_dev(struct net_device *dev,
4609 int (*fn)(struct net_device *lower_dev,
4610 struct netdev_nested_priv *priv),
4611 struct netdev_nested_priv *priv);
4612int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4613 int (*fn)(struct net_device *lower_dev,
4614 struct netdev_nested_priv *priv),
4615 struct netdev_nested_priv *priv);
4616
4617void *netdev_adjacent_get_private(struct list_head *adj_list);
4618void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4619struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4620struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4621int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4622 struct netlink_ext_ack *extack);
4623int netdev_master_upper_dev_link(struct net_device *dev,
4624 struct net_device *upper_dev,
4625 void *upper_priv, void *upper_info,
4626 struct netlink_ext_ack *extack);
4627void netdev_upper_dev_unlink(struct net_device *dev,
4628 struct net_device *upper_dev);
4629int netdev_adjacent_change_prepare(struct net_device *old_dev,
4630 struct net_device *new_dev,
4631 struct net_device *dev,
4632 struct netlink_ext_ack *extack);
4633void netdev_adjacent_change_commit(struct net_device *old_dev,
4634 struct net_device *new_dev,
4635 struct net_device *dev);
4636void netdev_adjacent_change_abort(struct net_device *old_dev,
4637 struct net_device *new_dev,
4638 struct net_device *dev);
4639void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4640void *netdev_lower_dev_get_private(struct net_device *dev,
4641 struct net_device *lower_dev);
4642void netdev_lower_state_changed(struct net_device *lower_dev,
4643 void *lower_state_info);
4644
4645/* RSS keys are 40 or 52 bytes long */
4646#define NETDEV_RSS_KEY_LEN 52
4647extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4648void netdev_rss_key_fill(void *buffer, size_t len);
4649
4650int skb_checksum_help(struct sk_buff *skb);
4651int skb_crc32c_csum_help(struct sk_buff *skb);
4652int skb_csum_hwoffload_help(struct sk_buff *skb,
4653 const netdev_features_t features);
4654
4655struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4656 netdev_features_t features, bool tx_path);
4657struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4658 netdev_features_t features);
4659
4660struct netdev_bonding_info {
4661 ifslave slave;
4662 ifbond master;
4663};
4664
4665struct netdev_notifier_bonding_info {
4666 struct netdev_notifier_info info; /* must be first */
4667 struct netdev_bonding_info bonding_info;
4668};
4669
4670void netdev_bonding_info_change(struct net_device *dev,
4671 struct netdev_bonding_info *bonding_info);
4672
4673#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
4674void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
4675#else
4676static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
4677 const void *data)
4678{
4679}
4680#endif
4681
4682static inline
4683struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4684{
4685 return __skb_gso_segment(skb, features, true);
4686}
4687__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4688
4689static inline bool can_checksum_protocol(netdev_features_t features,
4690 __be16 protocol)
4691{
4692 if (protocol == htons(ETH_P_FCOE))
4693 return !!(features & NETIF_F_FCOE_CRC);
4694
4695 /* Assume this is an IP checksum (not SCTP CRC) */
4696
4697 if (features & NETIF_F_HW_CSUM) {
4698 /* Can checksum everything */
4699 return true;
4700 }
4701
4702 switch (protocol) {
4703 case htons(ETH_P_IP):
4704 return !!(features & NETIF_F_IP_CSUM);
4705 case htons(ETH_P_IPV6):
4706 return !!(features & NETIF_F_IPV6_CSUM);
4707 default:
4708 return false;
4709 }
4710}
4711
4712#ifdef CONFIG_BUG
4713void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4714#else
4715static inline void netdev_rx_csum_fault(struct net_device *dev,
4716 struct sk_buff *skb)
4717{
4718}
4719#endif
4720/* rx skb timestamps */
4721void net_enable_timestamp(void);
4722void net_disable_timestamp(void);
4723
4724#ifdef CONFIG_PROC_FS
4725int __init dev_proc_init(void);
4726#else
4727#define dev_proc_init() 0
4728#endif
4729
4730static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4731 struct sk_buff *skb, struct net_device *dev,
4732 bool more)
4733{
4734 __this_cpu_write(softnet_data.xmit.more, more);
4735 return ops->ndo_start_xmit(skb, dev);
4736}
4737
4738static inline bool netdev_xmit_more(void)
4739{
4740 return __this_cpu_read(softnet_data.xmit.more);
4741}
4742
4743static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4744 struct netdev_queue *txq, bool more)
4745{
4746 const struct net_device_ops *ops = dev->netdev_ops;
4747 netdev_tx_t rc;
4748
4749 rc = __netdev_start_xmit(ops, skb, dev, more);
4750 if (rc == NETDEV_TX_OK)
4751 txq_trans_update(txq);
4752
4753 return rc;
4754}
4755
4756int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4757 const void *ns);
4758void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4759 const void *ns);
4760
4761extern const struct kobj_ns_type_operations net_ns_type_operations;
4762
4763const char *netdev_drivername(const struct net_device *dev);
4764
4765void linkwatch_run_queue(void);
4766
4767static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4768 netdev_features_t f2)
4769{
4770 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4771 if (f1 & NETIF_F_HW_CSUM)
4772 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4773 else
4774 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4775 }
4776
4777 return f1 & f2;
4778}
4779
4780static inline netdev_features_t netdev_get_wanted_features(
4781 struct net_device *dev)
4782{
4783 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4784}
4785netdev_features_t netdev_increment_features(netdev_features_t all,
4786 netdev_features_t one, netdev_features_t mask);
4787
4788/* Allow TSO being used on stacked device :
4789 * Performing the GSO segmentation before last device
4790 * is a performance improvement.
4791 */
4792static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4793 netdev_features_t mask)
4794{
4795 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4796}
4797
4798int __netdev_update_features(struct net_device *dev);
4799void netdev_update_features(struct net_device *dev);
4800void netdev_change_features(struct net_device *dev);
4801
4802void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4803 struct net_device *dev);
4804
4805netdev_features_t passthru_features_check(struct sk_buff *skb,
4806 struct net_device *dev,
4807 netdev_features_t features);
4808netdev_features_t netif_skb_features(struct sk_buff *skb);
4809
4810static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4811{
4812 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4813
4814 /* check flags correspondence */
4815 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4816 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4817 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4818 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4819 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4820 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4821 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4822 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4823 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4824 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4825 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4826 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4827 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4828 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4829 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4830 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4831 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
4832 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4833 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
4834
4835 return (features & feature) == feature;
4836}
4837
4838static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4839{
4840 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4841 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4842}
4843
4844static inline bool netif_needs_gso(struct sk_buff *skb,
4845 netdev_features_t features)
4846{
4847 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4848 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4849 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4850}
4851
4852static inline void netif_set_gso_max_size(struct net_device *dev,
4853 unsigned int size)
4854{
4855 dev->gso_max_size = size;
4856}
4857
4858static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4859 int pulled_hlen, u16 mac_offset,
4860 int mac_len)
4861{
4862 skb->protocol = protocol;
4863 skb->encapsulation = 1;
4864 skb_push(skb, pulled_hlen);
4865 skb_reset_transport_header(skb);
4866 skb->mac_header = mac_offset;
4867 skb->network_header = skb->mac_header + mac_len;
4868 skb->mac_len = mac_len;
4869}
4870
4871static inline bool netif_is_macsec(const struct net_device *dev)
4872{
4873 return dev->priv_flags & IFF_MACSEC;
4874}
4875
4876static inline bool netif_is_macvlan(const struct net_device *dev)
4877{
4878 return dev->priv_flags & IFF_MACVLAN;
4879}
4880
4881static inline bool netif_is_macvlan_port(const struct net_device *dev)
4882{
4883 return dev->priv_flags & IFF_MACVLAN_PORT;
4884}
4885
4886static inline bool netif_is_bond_master(const struct net_device *dev)
4887{
4888 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4889}
4890
4891static inline bool netif_is_bond_slave(const struct net_device *dev)
4892{
4893 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4894}
4895
4896static inline bool netif_supports_nofcs(struct net_device *dev)
4897{
4898 return dev->priv_flags & IFF_SUPP_NOFCS;
4899}
4900
4901static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4902{
4903 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4904}
4905
4906static inline bool netif_is_l3_master(const struct net_device *dev)
4907{
4908 return dev->priv_flags & IFF_L3MDEV_MASTER;
4909}
4910
4911static inline bool netif_is_l3_slave(const struct net_device *dev)
4912{
4913 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4914}
4915
4916static inline bool netif_is_bridge_master(const struct net_device *dev)
4917{
4918 return dev->priv_flags & IFF_EBRIDGE;
4919}
4920
4921static inline bool netif_is_bridge_port(const struct net_device *dev)
4922{
4923 return dev->priv_flags & IFF_BRIDGE_PORT;
4924}
4925
4926static inline bool netif_is_ovs_master(const struct net_device *dev)
4927{
4928 return dev->priv_flags & IFF_OPENVSWITCH;
4929}
4930
4931static inline bool netif_is_ovs_port(const struct net_device *dev)
4932{
4933 return dev->priv_flags & IFF_OVS_DATAPATH;
4934}
4935
4936static inline bool netif_is_any_bridge_port(const struct net_device *dev)
4937{
4938 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
4939}
4940
4941static inline bool netif_is_team_master(const struct net_device *dev)
4942{
4943 return dev->priv_flags & IFF_TEAM;
4944}
4945
4946static inline bool netif_is_team_port(const struct net_device *dev)
4947{
4948 return dev->priv_flags & IFF_TEAM_PORT;
4949}
4950
4951static inline bool netif_is_lag_master(const struct net_device *dev)
4952{
4953 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4954}
4955
4956static inline bool netif_is_lag_port(const struct net_device *dev)
4957{
4958 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4959}
4960
4961static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4962{
4963 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4964}
4965
4966static inline bool netif_is_failover(const struct net_device *dev)
4967{
4968 return dev->priv_flags & IFF_FAILOVER;
4969}
4970
4971static inline bool netif_is_failover_slave(const struct net_device *dev)
4972{
4973 return dev->priv_flags & IFF_FAILOVER_SLAVE;
4974}
4975
4976/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4977static inline void netif_keep_dst(struct net_device *dev)
4978{
4979 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4980}
4981
4982/* return true if dev can't cope with mtu frames that need vlan tag insertion */
4983static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4984{
4985 /* TODO: reserve and use an additional IFF bit, if we get more users */
4986 return dev->priv_flags & IFF_MACSEC;
4987}
4988
4989extern struct pernet_operations __net_initdata loopback_net_ops;
4990
4991/* Logging, debugging and troubleshooting/diagnostic helpers. */
4992
4993/* netdev_printk helpers, similar to dev_printk */
4994
4995static inline const char *netdev_name(const struct net_device *dev)
4996{
4997 if (!dev->name[0] || strchr(dev->name, '%'))
4998 return "(unnamed net_device)";
4999 return dev->name;
5000}
5001
5002static inline bool netdev_unregistering(const struct net_device *dev)
5003{
5004 return dev->reg_state == NETREG_UNREGISTERING;
5005}
5006
5007static inline const char *netdev_reg_state(const struct net_device *dev)
5008{
5009 switch (dev->reg_state) {
5010 case NETREG_UNINITIALIZED: return " (uninitialized)";
5011 case NETREG_REGISTERED: return "";
5012 case NETREG_UNREGISTERING: return " (unregistering)";
5013 case NETREG_UNREGISTERED: return " (unregistered)";
5014 case NETREG_RELEASED: return " (released)";
5015 case NETREG_DUMMY: return " (dummy)";
5016 }
5017
5018 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
5019 return " (unknown)";
5020}
5021
5022__printf(3, 4) __cold
5023void netdev_printk(const char *level, const struct net_device *dev,
5024 const char *format, ...);
5025__printf(2, 3) __cold
5026void netdev_emerg(const struct net_device *dev, const char *format, ...);
5027__printf(2, 3) __cold
5028void netdev_alert(const struct net_device *dev, const char *format, ...);
5029__printf(2, 3) __cold
5030void netdev_crit(const struct net_device *dev, const char *format, ...);
5031__printf(2, 3) __cold
5032void netdev_err(const struct net_device *dev, const char *format, ...);
5033__printf(2, 3) __cold
5034void netdev_warn(const struct net_device *dev, const char *format, ...);
5035__printf(2, 3) __cold
5036void netdev_notice(const struct net_device *dev, const char *format, ...);
5037__printf(2, 3) __cold
5038void netdev_info(const struct net_device *dev, const char *format, ...);
5039
5040#define netdev_level_once(level, dev, fmt, ...) \
5041do { \
5042 static bool __print_once __read_mostly; \
5043 \
5044 if (!__print_once) { \
5045 __print_once = true; \
5046 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
5047 } \
5048} while (0)
5049
5050#define netdev_emerg_once(dev, fmt, ...) \
5051 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
5052#define netdev_alert_once(dev, fmt, ...) \
5053 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
5054#define netdev_crit_once(dev, fmt, ...) \
5055 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
5056#define netdev_err_once(dev, fmt, ...) \
5057 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
5058#define netdev_warn_once(dev, fmt, ...) \
5059 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
5060#define netdev_notice_once(dev, fmt, ...) \
5061 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
5062#define netdev_info_once(dev, fmt, ...) \
5063 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
5064
5065#define MODULE_ALIAS_NETDEV(device) \
5066 MODULE_ALIAS("netdev-" device)
5067
5068#if defined(CONFIG_DYNAMIC_DEBUG) || \
5069 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5070#define netdev_dbg(__dev, format, args...) \
5071do { \
5072 dynamic_netdev_dbg(__dev, format, ##args); \
5073} while (0)
5074#elif defined(DEBUG)
5075#define netdev_dbg(__dev, format, args...) \
5076 netdev_printk(KERN_DEBUG, __dev, format, ##args)
5077#else
5078#define netdev_dbg(__dev, format, args...) \
5079({ \
5080 if (0) \
5081 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
5082})
5083#endif
5084
5085#if defined(VERBOSE_DEBUG)
5086#define netdev_vdbg netdev_dbg
5087#else
5088
5089#define netdev_vdbg(dev, format, args...) \
5090({ \
5091 if (0) \
5092 netdev_printk(KERN_DEBUG, dev, format, ##args); \
5093 0; \
5094})
5095#endif
5096
5097/*
5098 * netdev_WARN() acts like dev_printk(), but with the key difference
5099 * of using a WARN/WARN_ON to get the message out, including the
5100 * file/line information and a backtrace.
5101 */
5102#define netdev_WARN(dev, format, args...) \
5103 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
5104 netdev_reg_state(dev), ##args)
5105
5106#define netdev_WARN_ONCE(dev, format, args...) \
5107 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
5108 netdev_reg_state(dev), ##args)
5109
5110/* netif printk helpers, similar to netdev_printk */
5111
5112#define netif_printk(priv, type, level, dev, fmt, args...) \
5113do { \
5114 if (netif_msg_##type(priv)) \
5115 netdev_printk(level, (dev), fmt, ##args); \
5116} while (0)
5117
5118#define netif_level(level, priv, type, dev, fmt, args...) \
5119do { \
5120 if (netif_msg_##type(priv)) \
5121 netdev_##level(dev, fmt, ##args); \
5122} while (0)
5123
5124#define netif_emerg(priv, type, dev, fmt, args...) \
5125 netif_level(emerg, priv, type, dev, fmt, ##args)
5126#define netif_alert(priv, type, dev, fmt, args...) \
5127 netif_level(alert, priv, type, dev, fmt, ##args)
5128#define netif_crit(priv, type, dev, fmt, args...) \
5129 netif_level(crit, priv, type, dev, fmt, ##args)
5130#define netif_err(priv, type, dev, fmt, args...) \
5131 netif_level(err, priv, type, dev, fmt, ##args)
5132#define netif_warn(priv, type, dev, fmt, args...) \
5133 netif_level(warn, priv, type, dev, fmt, ##args)
5134#define netif_notice(priv, type, dev, fmt, args...) \
5135 netif_level(notice, priv, type, dev, fmt, ##args)
5136#define netif_info(priv, type, dev, fmt, args...) \
5137 netif_level(info, priv, type, dev, fmt, ##args)
5138
5139#if defined(CONFIG_DYNAMIC_DEBUG) || \
5140 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5141#define netif_dbg(priv, type, netdev, format, args...) \
5142do { \
5143 if (netif_msg_##type(priv)) \
5144 dynamic_netdev_dbg(netdev, format, ##args); \
5145} while (0)
5146#elif defined(DEBUG)
5147#define netif_dbg(priv, type, dev, format, args...) \
5148 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
5149#else
5150#define netif_dbg(priv, type, dev, format, args...) \
5151({ \
5152 if (0) \
5153 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5154 0; \
5155})
5156#endif
5157
5158/* if @cond then downgrade to debug, else print at @level */
5159#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
5160 do { \
5161 if (cond) \
5162 netif_dbg(priv, type, netdev, fmt, ##args); \
5163 else \
5164 netif_ ## level(priv, type, netdev, fmt, ##args); \
5165 } while (0)
5166
5167#if defined(VERBOSE_DEBUG)
5168#define netif_vdbg netif_dbg
5169#else
5170#define netif_vdbg(priv, type, dev, format, args...) \
5171({ \
5172 if (0) \
5173 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5174 0; \
5175})
5176#endif
5177
5178/*
5179 * The list of packet types we will receive (as opposed to discard)
5180 * and the routines to invoke.
5181 *
5182 * Why 16. Because with 16 the only overlap we get on a hash of the
5183 * low nibble of the protocol value is RARP/SNAP/X.25.
5184 *
5185 * 0800 IP
5186 * 0001 802.3
5187 * 0002 AX.25
5188 * 0004 802.2
5189 * 8035 RARP
5190 * 0005 SNAP
5191 * 0805 X.25
5192 * 0806 ARP
5193 * 8137 IPX
5194 * 0009 Localtalk
5195 * 86DD IPv6
5196 */
5197#define PTYPE_HASH_SIZE (16)
5198#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5199
5200extern struct net_device *blackhole_netdev;
5201
5202#endif /* _LINUX_NETDEVICE_H */