at for-next 170 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 15 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 16 * Bjorn Ekwall. <bj0rn@blox.se> 17 * Pekka Riikonen <priikone@poseidon.pspt.fi> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21#ifndef _LINUX_NETDEVICE_H 22#define _LINUX_NETDEVICE_H 23 24#include <linux/timer.h> 25#include <linux/bug.h> 26#include <linux/delay.h> 27#include <linux/atomic.h> 28#include <linux/prefetch.h> 29#include <asm/cache.h> 30#include <asm/byteorder.h> 31#include <asm/local.h> 32 33#include <linux/percpu.h> 34#include <linux/rculist.h> 35#include <linux/workqueue.h> 36#include <linux/dynamic_queue_limits.h> 37 38#include <net/net_namespace.h> 39#ifdef CONFIG_DCB 40#include <net/dcbnl.h> 41#endif 42#include <net/netprio_cgroup.h> 43#include <linux/netdev_features.h> 44#include <linux/neighbour.h> 45#include <linux/netdevice_xmit.h> 46#include <uapi/linux/netdevice.h> 47#include <uapi/linux/if_bonding.h> 48#include <uapi/linux/pkt_cls.h> 49#include <uapi/linux/netdev.h> 50#include <linux/hashtable.h> 51#include <linux/rbtree.h> 52#include <net/net_trackers.h> 53#include <net/net_debug.h> 54#include <net/dropreason-core.h> 55#include <net/neighbour_tables.h> 56 57struct netpoll_info; 58struct device; 59struct ethtool_ops; 60struct kernel_hwtstamp_config; 61struct phy_device; 62struct dsa_port; 63struct ip_tunnel_parm_kern; 64struct macsec_context; 65struct macsec_ops; 66struct netdev_name_node; 67struct sd_flow_limit; 68struct sfp_bus; 69/* 802.11 specific */ 70struct wireless_dev; 71/* 802.15.4 specific */ 72struct wpan_dev; 73struct mpls_dev; 74/* UDP Tunnel offloads */ 75struct udp_tunnel_info; 76struct udp_tunnel_nic_info; 77struct udp_tunnel_nic; 78struct bpf_prog; 79struct xdp_buff; 80struct xdp_frame; 81struct xdp_metadata_ops; 82struct xdp_md; 83struct ethtool_netdev_state; 84struct phy_link_topology; 85 86typedef u32 xdp_features_t; 87 88void synchronize_net(void); 89void netdev_set_default_ethtool_ops(struct net_device *dev, 90 const struct ethtool_ops *ops); 91void netdev_sw_irq_coalesce_default_on(struct net_device *dev); 92 93/* Backlog congestion levels */ 94#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 95#define NET_RX_DROP 1 /* packet dropped */ 96 97#define MAX_NEST_DEV 8 98 99/* 100 * Transmit return codes: transmit return codes originate from three different 101 * namespaces: 102 * 103 * - qdisc return codes 104 * - driver transmit return codes 105 * - errno values 106 * 107 * Drivers are allowed to return any one of those in their hard_start_xmit() 108 * function. Real network devices commonly used with qdiscs should only return 109 * the driver transmit return codes though - when qdiscs are used, the actual 110 * transmission happens asynchronously, so the value is not propagated to 111 * higher layers. Virtual network devices transmit synchronously; in this case 112 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 113 * others are propagated to higher layers. 114 */ 115 116/* qdisc ->enqueue() return codes. */ 117#define NET_XMIT_SUCCESS 0x00 118#define NET_XMIT_DROP 0x01 /* skb dropped */ 119#define NET_XMIT_CN 0x02 /* congestion notification */ 120#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 121 122/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 123 * indicates that the device will soon be dropping packets, or already drops 124 * some packets of the same priority; prompting us to send less aggressively. */ 125#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 126#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 127 128/* Driver transmit return codes */ 129#define NETDEV_TX_MASK 0xf0 130 131enum netdev_tx { 132 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 133 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 134 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 135}; 136typedef enum netdev_tx netdev_tx_t; 137 138/* 139 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 140 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 141 */ 142static inline bool dev_xmit_complete(int rc) 143{ 144 /* 145 * Positive cases with an skb consumed by a driver: 146 * - successful transmission (rc == NETDEV_TX_OK) 147 * - error while transmitting (rc < 0) 148 * - error while queueing to a different device (rc & NET_XMIT_MASK) 149 */ 150 if (likely(rc < NET_XMIT_MASK)) 151 return true; 152 153 return false; 154} 155 156/* 157 * Compute the worst-case header length according to the protocols 158 * used. 159 */ 160 161#if defined(CONFIG_HYPERV_NET) 162# define LL_MAX_HEADER 128 163#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 164# if defined(CONFIG_MAC80211_MESH) 165# define LL_MAX_HEADER 128 166# else 167# define LL_MAX_HEADER 96 168# endif 169#else 170# define LL_MAX_HEADER 32 171#endif 172 173#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 174 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 175#define MAX_HEADER LL_MAX_HEADER 176#else 177#define MAX_HEADER (LL_MAX_HEADER + 48) 178#endif 179 180/* 181 * Old network device statistics. Fields are native words 182 * (unsigned long) so they can be read and written atomically. 183 */ 184 185#define NET_DEV_STAT(FIELD) \ 186 union { \ 187 unsigned long FIELD; \ 188 atomic_long_t __##FIELD; \ 189 } 190 191struct net_device_stats { 192 NET_DEV_STAT(rx_packets); 193 NET_DEV_STAT(tx_packets); 194 NET_DEV_STAT(rx_bytes); 195 NET_DEV_STAT(tx_bytes); 196 NET_DEV_STAT(rx_errors); 197 NET_DEV_STAT(tx_errors); 198 NET_DEV_STAT(rx_dropped); 199 NET_DEV_STAT(tx_dropped); 200 NET_DEV_STAT(multicast); 201 NET_DEV_STAT(collisions); 202 NET_DEV_STAT(rx_length_errors); 203 NET_DEV_STAT(rx_over_errors); 204 NET_DEV_STAT(rx_crc_errors); 205 NET_DEV_STAT(rx_frame_errors); 206 NET_DEV_STAT(rx_fifo_errors); 207 NET_DEV_STAT(rx_missed_errors); 208 NET_DEV_STAT(tx_aborted_errors); 209 NET_DEV_STAT(tx_carrier_errors); 210 NET_DEV_STAT(tx_fifo_errors); 211 NET_DEV_STAT(tx_heartbeat_errors); 212 NET_DEV_STAT(tx_window_errors); 213 NET_DEV_STAT(rx_compressed); 214 NET_DEV_STAT(tx_compressed); 215}; 216#undef NET_DEV_STAT 217 218/* per-cpu stats, allocated on demand. 219 * Try to fit them in a single cache line, for dev_get_stats() sake. 220 */ 221struct net_device_core_stats { 222 unsigned long rx_dropped; 223 unsigned long tx_dropped; 224 unsigned long rx_nohandler; 225 unsigned long rx_otherhost_dropped; 226} __aligned(4 * sizeof(unsigned long)); 227 228#include <linux/cache.h> 229#include <linux/skbuff.h> 230 231struct neighbour; 232struct neigh_parms; 233struct sk_buff; 234 235struct netdev_hw_addr { 236 struct list_head list; 237 struct rb_node node; 238 unsigned char addr[MAX_ADDR_LEN]; 239 unsigned char type; 240#define NETDEV_HW_ADDR_T_LAN 1 241#define NETDEV_HW_ADDR_T_SAN 2 242#define NETDEV_HW_ADDR_T_UNICAST 3 243#define NETDEV_HW_ADDR_T_MULTICAST 4 244 bool global_use; 245 int sync_cnt; 246 int refcount; 247 int synced; 248 struct rcu_head rcu_head; 249}; 250 251struct netdev_hw_addr_list { 252 struct list_head list; 253 int count; 254 255 /* Auxiliary tree for faster lookup on addition and deletion */ 256 struct rb_root tree; 257}; 258 259#define netdev_hw_addr_list_count(l) ((l)->count) 260#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 261#define netdev_hw_addr_list_for_each(ha, l) \ 262 list_for_each_entry(ha, &(l)->list, list) 263 264#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 265#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 266#define netdev_for_each_uc_addr(ha, dev) \ 267 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 268#define netdev_for_each_synced_uc_addr(_ha, _dev) \ 269 netdev_for_each_uc_addr((_ha), (_dev)) \ 270 if ((_ha)->sync_cnt) 271 272#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 273#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 274#define netdev_for_each_mc_addr(ha, dev) \ 275 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 276#define netdev_for_each_synced_mc_addr(_ha, _dev) \ 277 netdev_for_each_mc_addr((_ha), (_dev)) \ 278 if ((_ha)->sync_cnt) 279 280struct hh_cache { 281 unsigned int hh_len; 282 seqlock_t hh_lock; 283 284 /* cached hardware header; allow for machine alignment needs. */ 285#define HH_DATA_MOD 16 286#define HH_DATA_OFF(__len) \ 287 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 288#define HH_DATA_ALIGN(__len) \ 289 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 290 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 291}; 292 293/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 294 * Alternative is: 295 * dev->hard_header_len ? (dev->hard_header_len + 296 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 297 * 298 * We could use other alignment values, but we must maintain the 299 * relationship HH alignment <= LL alignment. 300 */ 301#define LL_RESERVED_SPACE(dev) \ 302 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ 303 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 304#define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 305 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ 306 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 307 308struct header_ops { 309 int (*create) (struct sk_buff *skb, struct net_device *dev, 310 unsigned short type, const void *daddr, 311 const void *saddr, unsigned int len); 312 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 313 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 314 void (*cache_update)(struct hh_cache *hh, 315 const struct net_device *dev, 316 const unsigned char *haddr); 317 bool (*validate)(const char *ll_header, unsigned int len); 318 __be16 (*parse_protocol)(const struct sk_buff *skb); 319}; 320 321/* These flag bits are private to the generic network queueing 322 * layer; they may not be explicitly referenced by any other 323 * code. 324 */ 325 326enum netdev_state_t { 327 __LINK_STATE_START, 328 __LINK_STATE_PRESENT, 329 __LINK_STATE_NOCARRIER, 330 __LINK_STATE_LINKWATCH_PENDING, 331 __LINK_STATE_DORMANT, 332 __LINK_STATE_TESTING, 333}; 334 335struct gro_list { 336 struct list_head list; 337 int count; 338}; 339 340/* 341 * size of gro hash buckets, must less than bit number of 342 * napi_struct::gro_bitmask 343 */ 344#define GRO_HASH_BUCKETS 8 345 346/* 347 * Structure for per-NAPI config 348 */ 349struct napi_config { 350 u64 gro_flush_timeout; 351 u64 irq_suspend_timeout; 352 u32 defer_hard_irqs; 353 unsigned int napi_id; 354}; 355 356/* 357 * Structure for NAPI scheduling similar to tasklet but with weighting 358 */ 359struct napi_struct { 360 /* The poll_list must only be managed by the entity which 361 * changes the state of the NAPI_STATE_SCHED bit. This means 362 * whoever atomically sets that bit can add this napi_struct 363 * to the per-CPU poll_list, and whoever clears that bit 364 * can remove from the list right before clearing the bit. 365 */ 366 struct list_head poll_list; 367 368 unsigned long state; 369 int weight; 370 u32 defer_hard_irqs_count; 371 unsigned long gro_bitmask; 372 int (*poll)(struct napi_struct *, int); 373#ifdef CONFIG_NETPOLL 374 /* CPU actively polling if netpoll is configured */ 375 int poll_owner; 376#endif 377 /* CPU on which NAPI has been scheduled for processing */ 378 int list_owner; 379 struct net_device *dev; 380 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 381 struct sk_buff *skb; 382 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 383 int rx_count; /* length of rx_list */ 384 unsigned int napi_id; 385 struct hrtimer timer; 386 struct task_struct *thread; 387 unsigned long gro_flush_timeout; 388 unsigned long irq_suspend_timeout; 389 u32 defer_hard_irqs; 390 /* control-path-only fields follow */ 391 struct list_head dev_list; 392 struct hlist_node napi_hash_node; 393 int irq; 394 int index; 395 struct napi_config *config; 396}; 397 398enum { 399 NAPI_STATE_SCHED, /* Poll is scheduled */ 400 NAPI_STATE_MISSED, /* reschedule a napi */ 401 NAPI_STATE_DISABLE, /* Disable pending */ 402 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 403 NAPI_STATE_LISTED, /* NAPI added to system lists */ 404 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 405 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 406 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 407 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 408 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 409}; 410 411enum { 412 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 413 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 414 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 415 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 416 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 417 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 418 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 419 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 420 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 421 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 422}; 423 424enum gro_result { 425 GRO_MERGED, 426 GRO_MERGED_FREE, 427 GRO_HELD, 428 GRO_NORMAL, 429 GRO_CONSUMED, 430}; 431typedef enum gro_result gro_result_t; 432 433/* 434 * enum rx_handler_result - Possible return values for rx_handlers. 435 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 436 * further. 437 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 438 * case skb->dev was changed by rx_handler. 439 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 440 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 441 * 442 * rx_handlers are functions called from inside __netif_receive_skb(), to do 443 * special processing of the skb, prior to delivery to protocol handlers. 444 * 445 * Currently, a net_device can only have a single rx_handler registered. Trying 446 * to register a second rx_handler will return -EBUSY. 447 * 448 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 449 * To unregister a rx_handler on a net_device, use 450 * netdev_rx_handler_unregister(). 451 * 452 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 453 * do with the skb. 454 * 455 * If the rx_handler consumed the skb in some way, it should return 456 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 457 * the skb to be delivered in some other way. 458 * 459 * If the rx_handler changed skb->dev, to divert the skb to another 460 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 461 * new device will be called if it exists. 462 * 463 * If the rx_handler decides the skb should be ignored, it should return 464 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 465 * are registered on exact device (ptype->dev == skb->dev). 466 * 467 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 468 * delivered, it should return RX_HANDLER_PASS. 469 * 470 * A device without a registered rx_handler will behave as if rx_handler 471 * returned RX_HANDLER_PASS. 472 */ 473 474enum rx_handler_result { 475 RX_HANDLER_CONSUMED, 476 RX_HANDLER_ANOTHER, 477 RX_HANDLER_EXACT, 478 RX_HANDLER_PASS, 479}; 480typedef enum rx_handler_result rx_handler_result_t; 481typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 482 483void __napi_schedule(struct napi_struct *n); 484void __napi_schedule_irqoff(struct napi_struct *n); 485 486static inline bool napi_disable_pending(struct napi_struct *n) 487{ 488 return test_bit(NAPI_STATE_DISABLE, &n->state); 489} 490 491static inline bool napi_prefer_busy_poll(struct napi_struct *n) 492{ 493 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 494} 495 496/** 497 * napi_is_scheduled - test if NAPI is scheduled 498 * @n: NAPI context 499 * 500 * This check is "best-effort". With no locking implemented, 501 * a NAPI can be scheduled or terminate right after this check 502 * and produce not precise results. 503 * 504 * NAPI_STATE_SCHED is an internal state, napi_is_scheduled 505 * should not be used normally and napi_schedule should be 506 * used instead. 507 * 508 * Use only if the driver really needs to check if a NAPI 509 * is scheduled for example in the context of delayed timer 510 * that can be skipped if a NAPI is already scheduled. 511 * 512 * Return True if NAPI is scheduled, False otherwise. 513 */ 514static inline bool napi_is_scheduled(struct napi_struct *n) 515{ 516 return test_bit(NAPI_STATE_SCHED, &n->state); 517} 518 519bool napi_schedule_prep(struct napi_struct *n); 520 521/** 522 * napi_schedule - schedule NAPI poll 523 * @n: NAPI context 524 * 525 * Schedule NAPI poll routine to be called if it is not already 526 * running. 527 * Return true if we schedule a NAPI or false if not. 528 * Refer to napi_schedule_prep() for additional reason on why 529 * a NAPI might not be scheduled. 530 */ 531static inline bool napi_schedule(struct napi_struct *n) 532{ 533 if (napi_schedule_prep(n)) { 534 __napi_schedule(n); 535 return true; 536 } 537 538 return false; 539} 540 541/** 542 * napi_schedule_irqoff - schedule NAPI poll 543 * @n: NAPI context 544 * 545 * Variant of napi_schedule(), assuming hard irqs are masked. 546 */ 547static inline void napi_schedule_irqoff(struct napi_struct *n) 548{ 549 if (napi_schedule_prep(n)) 550 __napi_schedule_irqoff(n); 551} 552 553/** 554 * napi_complete_done - NAPI processing complete 555 * @n: NAPI context 556 * @work_done: number of packets processed 557 * 558 * Mark NAPI processing as complete. Should only be called if poll budget 559 * has not been completely consumed. 560 * Prefer over napi_complete(). 561 * Return false if device should avoid rearming interrupts. 562 */ 563bool napi_complete_done(struct napi_struct *n, int work_done); 564 565static inline bool napi_complete(struct napi_struct *n) 566{ 567 return napi_complete_done(n, 0); 568} 569 570int dev_set_threaded(struct net_device *dev, bool threaded); 571 572/** 573 * napi_disable - prevent NAPI from scheduling 574 * @n: NAPI context 575 * 576 * Stop NAPI from being scheduled on this context. 577 * Waits till any outstanding processing completes. 578 */ 579void napi_disable(struct napi_struct *n); 580 581void napi_enable(struct napi_struct *n); 582 583/** 584 * napi_synchronize - wait until NAPI is not running 585 * @n: NAPI context 586 * 587 * Wait until NAPI is done being scheduled on this context. 588 * Waits till any outstanding processing completes but 589 * does not disable future activations. 590 */ 591static inline void napi_synchronize(const struct napi_struct *n) 592{ 593 if (IS_ENABLED(CONFIG_SMP)) 594 while (test_bit(NAPI_STATE_SCHED, &n->state)) 595 msleep(1); 596 else 597 barrier(); 598} 599 600/** 601 * napi_if_scheduled_mark_missed - if napi is running, set the 602 * NAPIF_STATE_MISSED 603 * @n: NAPI context 604 * 605 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 606 * NAPI is scheduled. 607 **/ 608static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 609{ 610 unsigned long val, new; 611 612 val = READ_ONCE(n->state); 613 do { 614 if (val & NAPIF_STATE_DISABLE) 615 return true; 616 617 if (!(val & NAPIF_STATE_SCHED)) 618 return false; 619 620 new = val | NAPIF_STATE_MISSED; 621 } while (!try_cmpxchg(&n->state, &val, new)); 622 623 return true; 624} 625 626enum netdev_queue_state_t { 627 __QUEUE_STATE_DRV_XOFF, 628 __QUEUE_STATE_STACK_XOFF, 629 __QUEUE_STATE_FROZEN, 630}; 631 632#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 633#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 634#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 635 636#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 637#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 638 QUEUE_STATE_FROZEN) 639#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 640 QUEUE_STATE_FROZEN) 641 642/* 643 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 644 * netif_tx_* functions below are used to manipulate this flag. The 645 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 646 * queue independently. The netif_xmit_*stopped functions below are called 647 * to check if the queue has been stopped by the driver or stack (either 648 * of the XOFF bits are set in the state). Drivers should not need to call 649 * netif_xmit*stopped functions, they should only be using netif_tx_*. 650 */ 651 652struct netdev_queue { 653/* 654 * read-mostly part 655 */ 656 struct net_device *dev; 657 netdevice_tracker dev_tracker; 658 659 struct Qdisc __rcu *qdisc; 660 struct Qdisc __rcu *qdisc_sleeping; 661#ifdef CONFIG_SYSFS 662 struct kobject kobj; 663#endif 664 unsigned long tx_maxrate; 665 /* 666 * Number of TX timeouts for this queue 667 * (/sys/class/net/DEV/Q/trans_timeout) 668 */ 669 atomic_long_t trans_timeout; 670 671 /* Subordinate device that the queue has been assigned to */ 672 struct net_device *sb_dev; 673#ifdef CONFIG_XDP_SOCKETS 674 struct xsk_buff_pool *pool; 675#endif 676 677/* 678 * write-mostly part 679 */ 680#ifdef CONFIG_BQL 681 struct dql dql; 682#endif 683 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 684 int xmit_lock_owner; 685 /* 686 * Time (in jiffies) of last Tx 687 */ 688 unsigned long trans_start; 689 690 unsigned long state; 691 692/* 693 * slow- / control-path part 694 */ 695 /* NAPI instance for the queue 696 * Readers and writers must hold RTNL 697 */ 698 struct napi_struct *napi; 699 700#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 701 int numa_node; 702#endif 703} ____cacheline_aligned_in_smp; 704 705extern int sysctl_fb_tunnels_only_for_init_net; 706extern int sysctl_devconf_inherit_init_net; 707 708/* 709 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 710 * == 1 : For initns only 711 * == 2 : For none. 712 */ 713static inline bool net_has_fallback_tunnels(const struct net *net) 714{ 715#if IS_ENABLED(CONFIG_SYSCTL) 716 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); 717 718 return !fb_tunnels_only_for_init_net || 719 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); 720#else 721 return true; 722#endif 723} 724 725static inline int net_inherit_devconf(void) 726{ 727#if IS_ENABLED(CONFIG_SYSCTL) 728 return READ_ONCE(sysctl_devconf_inherit_init_net); 729#else 730 return 0; 731#endif 732} 733 734static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 735{ 736#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 737 return q->numa_node; 738#else 739 return NUMA_NO_NODE; 740#endif 741} 742 743static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 744{ 745#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 746 q->numa_node = node; 747#endif 748} 749 750#ifdef CONFIG_RFS_ACCEL 751bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 752 u16 filter_id); 753#endif 754 755/* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 756enum xps_map_type { 757 XPS_CPUS = 0, 758 XPS_RXQS, 759 XPS_MAPS_MAX, 760}; 761 762#ifdef CONFIG_XPS 763/* 764 * This structure holds an XPS map which can be of variable length. The 765 * map is an array of queues. 766 */ 767struct xps_map { 768 unsigned int len; 769 unsigned int alloc_len; 770 struct rcu_head rcu; 771 u16 queues[]; 772}; 773#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 774#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 775 - sizeof(struct xps_map)) / sizeof(u16)) 776 777/* 778 * This structure holds all XPS maps for device. Maps are indexed by CPU. 779 * 780 * We keep track of the number of cpus/rxqs used when the struct is allocated, 781 * in nr_ids. This will help not accessing out-of-bound memory. 782 * 783 * We keep track of the number of traffic classes used when the struct is 784 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 785 * not crossing its upper bound, as the original dev->num_tc can be updated in 786 * the meantime. 787 */ 788struct xps_dev_maps { 789 struct rcu_head rcu; 790 unsigned int nr_ids; 791 s16 num_tc; 792 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 793}; 794 795#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 796 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 797 798#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 799 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 800 801#endif /* CONFIG_XPS */ 802 803#define TC_MAX_QUEUE 16 804#define TC_BITMASK 15 805/* HW offloaded queuing disciplines txq count and offset maps */ 806struct netdev_tc_txq { 807 u16 count; 808 u16 offset; 809}; 810 811#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 812/* 813 * This structure is to hold information about the device 814 * configured to run FCoE protocol stack. 815 */ 816struct netdev_fcoe_hbainfo { 817 char manufacturer[64]; 818 char serial_number[64]; 819 char hardware_version[64]; 820 char driver_version[64]; 821 char optionrom_version[64]; 822 char firmware_version[64]; 823 char model[256]; 824 char model_description[256]; 825}; 826#endif 827 828#define MAX_PHYS_ITEM_ID_LEN 32 829 830/* This structure holds a unique identifier to identify some 831 * physical item (port for example) used by a netdevice. 832 */ 833struct netdev_phys_item_id { 834 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 835 unsigned char id_len; 836}; 837 838static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 839 struct netdev_phys_item_id *b) 840{ 841 return a->id_len == b->id_len && 842 memcmp(a->id, b->id, a->id_len) == 0; 843} 844 845typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 846 struct sk_buff *skb, 847 struct net_device *sb_dev); 848 849enum net_device_path_type { 850 DEV_PATH_ETHERNET = 0, 851 DEV_PATH_VLAN, 852 DEV_PATH_BRIDGE, 853 DEV_PATH_PPPOE, 854 DEV_PATH_DSA, 855 DEV_PATH_MTK_WDMA, 856}; 857 858struct net_device_path { 859 enum net_device_path_type type; 860 const struct net_device *dev; 861 union { 862 struct { 863 u16 id; 864 __be16 proto; 865 u8 h_dest[ETH_ALEN]; 866 } encap; 867 struct { 868 enum { 869 DEV_PATH_BR_VLAN_KEEP, 870 DEV_PATH_BR_VLAN_TAG, 871 DEV_PATH_BR_VLAN_UNTAG, 872 DEV_PATH_BR_VLAN_UNTAG_HW, 873 } vlan_mode; 874 u16 vlan_id; 875 __be16 vlan_proto; 876 } bridge; 877 struct { 878 int port; 879 u16 proto; 880 } dsa; 881 struct { 882 u8 wdma_idx; 883 u8 queue; 884 u16 wcid; 885 u8 bss; 886 u8 amsdu; 887 } mtk_wdma; 888 }; 889}; 890 891#define NET_DEVICE_PATH_STACK_MAX 5 892#define NET_DEVICE_PATH_VLAN_MAX 2 893 894struct net_device_path_stack { 895 int num_paths; 896 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 897}; 898 899struct net_device_path_ctx { 900 const struct net_device *dev; 901 u8 daddr[ETH_ALEN]; 902 903 int num_vlans; 904 struct { 905 u16 id; 906 __be16 proto; 907 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 908}; 909 910enum tc_setup_type { 911 TC_QUERY_CAPS, 912 TC_SETUP_QDISC_MQPRIO, 913 TC_SETUP_CLSU32, 914 TC_SETUP_CLSFLOWER, 915 TC_SETUP_CLSMATCHALL, 916 TC_SETUP_CLSBPF, 917 TC_SETUP_BLOCK, 918 TC_SETUP_QDISC_CBS, 919 TC_SETUP_QDISC_RED, 920 TC_SETUP_QDISC_PRIO, 921 TC_SETUP_QDISC_MQ, 922 TC_SETUP_QDISC_ETF, 923 TC_SETUP_ROOT_QDISC, 924 TC_SETUP_QDISC_GRED, 925 TC_SETUP_QDISC_TAPRIO, 926 TC_SETUP_FT, 927 TC_SETUP_QDISC_ETS, 928 TC_SETUP_QDISC_TBF, 929 TC_SETUP_QDISC_FIFO, 930 TC_SETUP_QDISC_HTB, 931 TC_SETUP_ACT, 932}; 933 934/* These structures hold the attributes of bpf state that are being passed 935 * to the netdevice through the bpf op. 936 */ 937enum bpf_netdev_command { 938 /* Set or clear a bpf program used in the earliest stages of packet 939 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 940 * is responsible for calling bpf_prog_put on any old progs that are 941 * stored. In case of error, the callee need not release the new prog 942 * reference, but on success it takes ownership and must bpf_prog_put 943 * when it is no longer used. 944 */ 945 XDP_SETUP_PROG, 946 XDP_SETUP_PROG_HW, 947 /* BPF program for offload callbacks, invoked at program load time. */ 948 BPF_OFFLOAD_MAP_ALLOC, 949 BPF_OFFLOAD_MAP_FREE, 950 XDP_SETUP_XSK_POOL, 951}; 952 953struct bpf_prog_offload_ops; 954struct netlink_ext_ack; 955struct xdp_umem; 956struct xdp_dev_bulk_queue; 957struct bpf_xdp_link; 958 959enum bpf_xdp_mode { 960 XDP_MODE_SKB = 0, 961 XDP_MODE_DRV = 1, 962 XDP_MODE_HW = 2, 963 __MAX_XDP_MODE 964}; 965 966struct bpf_xdp_entity { 967 struct bpf_prog *prog; 968 struct bpf_xdp_link *link; 969}; 970 971struct netdev_bpf { 972 enum bpf_netdev_command command; 973 union { 974 /* XDP_SETUP_PROG */ 975 struct { 976 u32 flags; 977 struct bpf_prog *prog; 978 struct netlink_ext_ack *extack; 979 }; 980 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 981 struct { 982 struct bpf_offloaded_map *offmap; 983 }; 984 /* XDP_SETUP_XSK_POOL */ 985 struct { 986 struct xsk_buff_pool *pool; 987 u16 queue_id; 988 } xsk; 989 }; 990}; 991 992/* Flags for ndo_xsk_wakeup. */ 993#define XDP_WAKEUP_RX (1 << 0) 994#define XDP_WAKEUP_TX (1 << 1) 995 996#ifdef CONFIG_XFRM_OFFLOAD 997struct xfrmdev_ops { 998 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); 999 void (*xdo_dev_state_delete) (struct xfrm_state *x); 1000 void (*xdo_dev_state_free) (struct xfrm_state *x); 1001 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1002 struct xfrm_state *x); 1003 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1004 void (*xdo_dev_state_update_stats) (struct xfrm_state *x); 1005 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); 1006 void (*xdo_dev_policy_delete) (struct xfrm_policy *x); 1007 void (*xdo_dev_policy_free) (struct xfrm_policy *x); 1008}; 1009#endif 1010 1011struct dev_ifalias { 1012 struct rcu_head rcuhead; 1013 char ifalias[]; 1014}; 1015 1016struct devlink; 1017struct tlsdev_ops; 1018 1019struct netdev_net_notifier { 1020 struct list_head list; 1021 struct notifier_block *nb; 1022}; 1023 1024/* 1025 * This structure defines the management hooks for network devices. 1026 * The following hooks can be defined; unless noted otherwise, they are 1027 * optional and can be filled with a null pointer. 1028 * 1029 * int (*ndo_init)(struct net_device *dev); 1030 * This function is called once when a network device is registered. 1031 * The network device can use this for any late stage initialization 1032 * or semantic validation. It can fail with an error code which will 1033 * be propagated back to register_netdev. 1034 * 1035 * void (*ndo_uninit)(struct net_device *dev); 1036 * This function is called when device is unregistered or when registration 1037 * fails. It is not called if init fails. 1038 * 1039 * int (*ndo_open)(struct net_device *dev); 1040 * This function is called when a network device transitions to the up 1041 * state. 1042 * 1043 * int (*ndo_stop)(struct net_device *dev); 1044 * This function is called when a network device transitions to the down 1045 * state. 1046 * 1047 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1048 * struct net_device *dev); 1049 * Called when a packet needs to be transmitted. 1050 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1051 * the queue before that can happen; it's for obsolete devices and weird 1052 * corner cases, but the stack really does a non-trivial amount 1053 * of useless work if you return NETDEV_TX_BUSY. 1054 * Required; cannot be NULL. 1055 * 1056 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1057 * struct net_device *dev 1058 * netdev_features_t features); 1059 * Called by core transmit path to determine if device is capable of 1060 * performing offload operations on a given packet. This is to give 1061 * the device an opportunity to implement any restrictions that cannot 1062 * be otherwise expressed by feature flags. The check is called with 1063 * the set of features that the stack has calculated and it returns 1064 * those the driver believes to be appropriate. 1065 * 1066 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1067 * struct net_device *sb_dev); 1068 * Called to decide which queue to use when device supports multiple 1069 * transmit queues. 1070 * 1071 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1072 * This function is called to allow device receiver to make 1073 * changes to configuration when multicast or promiscuous is enabled. 1074 * 1075 * void (*ndo_set_rx_mode)(struct net_device *dev); 1076 * This function is called device changes address list filtering. 1077 * If driver handles unicast address filtering, it should set 1078 * IFF_UNICAST_FLT in its priv_flags. 1079 * 1080 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1081 * This function is called when the Media Access Control address 1082 * needs to be changed. If this interface is not defined, the 1083 * MAC address can not be changed. 1084 * 1085 * int (*ndo_validate_addr)(struct net_device *dev); 1086 * Test if Media Access Control address is valid for the device. 1087 * 1088 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1089 * Old-style ioctl entry point. This is used internally by the 1090 * appletalk and ieee802154 subsystems but is no longer called by 1091 * the device ioctl handler. 1092 * 1093 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1094 * Used by the bonding driver for its device specific ioctls: 1095 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1096 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1097 * 1098 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1099 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1100 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1101 * 1102 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1103 * Used to set network devices bus interface parameters. This interface 1104 * is retained for legacy reasons; new devices should use the bus 1105 * interface (PCI) for low level management. 1106 * 1107 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1108 * Called when a user wants to change the Maximum Transfer Unit 1109 * of a device. 1110 * 1111 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1112 * Callback used when the transmitter has not made any progress 1113 * for dev->watchdog ticks. 1114 * 1115 * void (*ndo_get_stats64)(struct net_device *dev, 1116 * struct rtnl_link_stats64 *storage); 1117 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1118 * Called when a user wants to get the network device usage 1119 * statistics. Drivers must do one of the following: 1120 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1121 * rtnl_link_stats64 structure passed by the caller. 1122 * 2. Define @ndo_get_stats to update a net_device_stats structure 1123 * (which should normally be dev->stats) and return a pointer to 1124 * it. The structure may be changed asynchronously only if each 1125 * field is written atomically. 1126 * 3. Update dev->stats asynchronously and atomically, and define 1127 * neither operation. 1128 * 1129 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1130 * Return true if this device supports offload stats of this attr_id. 1131 * 1132 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1133 * void *attr_data) 1134 * Get statistics for offload operations by attr_id. Write it into the 1135 * attr_data pointer. 1136 * 1137 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1138 * If device supports VLAN filtering this function is called when a 1139 * VLAN id is registered. 1140 * 1141 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1142 * If device supports VLAN filtering this function is called when a 1143 * VLAN id is unregistered. 1144 * 1145 * void (*ndo_poll_controller)(struct net_device *dev); 1146 * 1147 * SR-IOV management functions. 1148 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1149 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1150 * u8 qos, __be16 proto); 1151 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1152 * int max_tx_rate); 1153 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1154 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1155 * int (*ndo_get_vf_config)(struct net_device *dev, 1156 * int vf, struct ifla_vf_info *ivf); 1157 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1158 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1159 * struct nlattr *port[]); 1160 * 1161 * Enable or disable the VF ability to query its RSS Redirection Table and 1162 * Hash Key. This is needed since on some devices VF share this information 1163 * with PF and querying it may introduce a theoretical security risk. 1164 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1165 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1166 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1167 * void *type_data); 1168 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1169 * This is always called from the stack with the rtnl lock held and netif 1170 * tx queues stopped. This allows the netdevice to perform queue 1171 * management safely. 1172 * 1173 * Fiber Channel over Ethernet (FCoE) offload functions. 1174 * int (*ndo_fcoe_enable)(struct net_device *dev); 1175 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1176 * so the underlying device can perform whatever needed configuration or 1177 * initialization to support acceleration of FCoE traffic. 1178 * 1179 * int (*ndo_fcoe_disable)(struct net_device *dev); 1180 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1181 * so the underlying device can perform whatever needed clean-ups to 1182 * stop supporting acceleration of FCoE traffic. 1183 * 1184 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1185 * struct scatterlist *sgl, unsigned int sgc); 1186 * Called when the FCoE Initiator wants to initialize an I/O that 1187 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1188 * perform necessary setup and returns 1 to indicate the device is set up 1189 * successfully to perform DDP on this I/O, otherwise this returns 0. 1190 * 1191 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1192 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1193 * indicated by the FC exchange id 'xid', so the underlying device can 1194 * clean up and reuse resources for later DDP requests. 1195 * 1196 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1197 * struct scatterlist *sgl, unsigned int sgc); 1198 * Called when the FCoE Target wants to initialize an I/O that 1199 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1200 * perform necessary setup and returns 1 to indicate the device is set up 1201 * successfully to perform DDP on this I/O, otherwise this returns 0. 1202 * 1203 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1204 * struct netdev_fcoe_hbainfo *hbainfo); 1205 * Called when the FCoE Protocol stack wants information on the underlying 1206 * device. This information is utilized by the FCoE protocol stack to 1207 * register attributes with Fiber Channel management service as per the 1208 * FC-GS Fabric Device Management Information(FDMI) specification. 1209 * 1210 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1211 * Called when the underlying device wants to override default World Wide 1212 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1213 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1214 * protocol stack to use. 1215 * 1216 * RFS acceleration. 1217 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1218 * u16 rxq_index, u32 flow_id); 1219 * Set hardware filter for RFS. rxq_index is the target queue index; 1220 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1221 * Return the filter ID on success, or a negative error code. 1222 * 1223 * Slave management functions (for bridge, bonding, etc). 1224 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1225 * Called to make another netdev an underling. 1226 * 1227 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1228 * Called to release previously enslaved netdev. 1229 * 1230 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1231 * struct sk_buff *skb, 1232 * bool all_slaves); 1233 * Get the xmit slave of master device. If all_slaves is true, function 1234 * assume all the slaves can transmit. 1235 * 1236 * Feature/offload setting functions. 1237 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1238 * netdev_features_t features); 1239 * Adjusts the requested feature flags according to device-specific 1240 * constraints, and returns the resulting flags. Must not modify 1241 * the device state. 1242 * 1243 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1244 * Called to update device configuration to new features. Passed 1245 * feature set might be less than what was returned by ndo_fix_features()). 1246 * Must return >0 or -errno if it changed dev->features itself. 1247 * 1248 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1249 * struct net_device *dev, 1250 * const unsigned char *addr, u16 vid, u16 flags, 1251 * bool *notified, struct netlink_ext_ack *extack); 1252 * Adds an FDB entry to dev for addr. 1253 * Callee shall set *notified to true if it sent any appropriate 1254 * notification(s). Otherwise core will send a generic one. 1255 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1256 * struct net_device *dev, 1257 * const unsigned char *addr, u16 vid 1258 * bool *notified, struct netlink_ext_ack *extack); 1259 * Deletes the FDB entry from dev corresponding to addr. 1260 * Callee shall set *notified to true if it sent any appropriate 1261 * notification(s). Otherwise core will send a generic one. 1262 * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, 1263 * struct netlink_ext_ack *extack); 1264 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1265 * struct net_device *dev, struct net_device *filter_dev, 1266 * int *idx) 1267 * Used to add FDB entries to dump requests. Implementers should add 1268 * entries to skb and update idx with the number of entries. 1269 * 1270 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], 1271 * u16 nlmsg_flags, struct netlink_ext_ack *extack); 1272 * Adds an MDB entry to dev. 1273 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], 1274 * struct netlink_ext_ack *extack); 1275 * Deletes the MDB entry from dev. 1276 * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[], 1277 * struct netlink_ext_ack *extack); 1278 * Bulk deletes MDB entries from dev. 1279 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, 1280 * struct netlink_callback *cb); 1281 * Dumps MDB entries from dev. The first argument (marker) in the netlink 1282 * callback is used by core rtnetlink code. 1283 * 1284 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1285 * u16 flags, struct netlink_ext_ack *extack) 1286 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1287 * struct net_device *dev, u32 filter_mask, 1288 * int nlflags) 1289 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1290 * u16 flags); 1291 * 1292 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1293 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1294 * which do not represent real hardware may define this to allow their 1295 * userspace components to manage their virtual carrier state. Devices 1296 * that determine carrier state from physical hardware properties (eg 1297 * network cables) or protocol-dependent mechanisms (eg 1298 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1299 * 1300 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1301 * struct netdev_phys_item_id *ppid); 1302 * Called to get ID of physical port of this device. If driver does 1303 * not implement this, it is assumed that the hw is not able to have 1304 * multiple net devices on single physical port. 1305 * 1306 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1307 * struct netdev_phys_item_id *ppid) 1308 * Called to get the parent ID of the physical port of this device. 1309 * 1310 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1311 * struct net_device *dev) 1312 * Called by upper layer devices to accelerate switching or other 1313 * station functionality into hardware. 'pdev is the lowerdev 1314 * to use for the offload and 'dev' is the net device that will 1315 * back the offload. Returns a pointer to the private structure 1316 * the upper layer will maintain. 1317 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1318 * Called by upper layer device to delete the station created 1319 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1320 * the station and priv is the structure returned by the add 1321 * operation. 1322 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1323 * int queue_index, u32 maxrate); 1324 * Called when a user wants to set a max-rate limitation of specific 1325 * TX queue. 1326 * int (*ndo_get_iflink)(const struct net_device *dev); 1327 * Called to get the iflink value of this device. 1328 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1329 * This function is used to get egress tunnel information for given skb. 1330 * This is useful for retrieving outer tunnel header parameters while 1331 * sampling packet. 1332 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1333 * This function is used to specify the headroom that the skb must 1334 * consider when allocation skb during packet reception. Setting 1335 * appropriate rx headroom value allows avoiding skb head copy on 1336 * forward. Setting a negative value resets the rx headroom to the 1337 * default value. 1338 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1339 * This function is used to set or query state related to XDP on the 1340 * netdevice and manage BPF offload. See definition of 1341 * enum bpf_netdev_command for details. 1342 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1343 * u32 flags); 1344 * This function is used to submit @n XDP packets for transmit on a 1345 * netdevice. Returns number of frames successfully transmitted, frames 1346 * that got dropped are freed/returned via xdp_return_frame(). 1347 * Returns negative number, means general error invoking ndo, meaning 1348 * no frames were xmit'ed and core-caller will free all frames. 1349 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1350 * struct xdp_buff *xdp); 1351 * Get the xmit slave of master device based on the xdp_buff. 1352 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1353 * This function is used to wake up the softirq, ksoftirqd or kthread 1354 * responsible for sending and/or receiving packets on a specific 1355 * queue id bound to an AF_XDP socket. The flags field specifies if 1356 * only RX, only Tx, or both should be woken up using the flags 1357 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1358 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p, 1359 * int cmd); 1360 * Add, change, delete or get information on an IPv4 tunnel. 1361 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1362 * If a device is paired with a peer device, return the peer instance. 1363 * The caller must be under RCU read context. 1364 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1365 * Get the forwarding path to reach the real device from the HW destination address 1366 * ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1367 * const struct skb_shared_hwtstamps *hwtstamps, 1368 * bool cycles); 1369 * Get hardware timestamp based on normal/adjustable time or free running 1370 * cycle counter. This function is required if physical clock supports a 1371 * free running cycle counter. 1372 * 1373 * int (*ndo_hwtstamp_get)(struct net_device *dev, 1374 * struct kernel_hwtstamp_config *kernel_config); 1375 * Get the currently configured hardware timestamping parameters for the 1376 * NIC device. 1377 * 1378 * int (*ndo_hwtstamp_set)(struct net_device *dev, 1379 * struct kernel_hwtstamp_config *kernel_config, 1380 * struct netlink_ext_ack *extack); 1381 * Change the hardware timestamping parameters for NIC device. 1382 */ 1383struct net_device_ops { 1384 int (*ndo_init)(struct net_device *dev); 1385 void (*ndo_uninit)(struct net_device *dev); 1386 int (*ndo_open)(struct net_device *dev); 1387 int (*ndo_stop)(struct net_device *dev); 1388 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1389 struct net_device *dev); 1390 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1391 struct net_device *dev, 1392 netdev_features_t features); 1393 u16 (*ndo_select_queue)(struct net_device *dev, 1394 struct sk_buff *skb, 1395 struct net_device *sb_dev); 1396 void (*ndo_change_rx_flags)(struct net_device *dev, 1397 int flags); 1398 void (*ndo_set_rx_mode)(struct net_device *dev); 1399 int (*ndo_set_mac_address)(struct net_device *dev, 1400 void *addr); 1401 int (*ndo_validate_addr)(struct net_device *dev); 1402 int (*ndo_do_ioctl)(struct net_device *dev, 1403 struct ifreq *ifr, int cmd); 1404 int (*ndo_eth_ioctl)(struct net_device *dev, 1405 struct ifreq *ifr, int cmd); 1406 int (*ndo_siocbond)(struct net_device *dev, 1407 struct ifreq *ifr, int cmd); 1408 int (*ndo_siocwandev)(struct net_device *dev, 1409 struct if_settings *ifs); 1410 int (*ndo_siocdevprivate)(struct net_device *dev, 1411 struct ifreq *ifr, 1412 void __user *data, int cmd); 1413 int (*ndo_set_config)(struct net_device *dev, 1414 struct ifmap *map); 1415 int (*ndo_change_mtu)(struct net_device *dev, 1416 int new_mtu); 1417 int (*ndo_neigh_setup)(struct net_device *dev, 1418 struct neigh_parms *); 1419 void (*ndo_tx_timeout) (struct net_device *dev, 1420 unsigned int txqueue); 1421 1422 void (*ndo_get_stats64)(struct net_device *dev, 1423 struct rtnl_link_stats64 *storage); 1424 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1425 int (*ndo_get_offload_stats)(int attr_id, 1426 const struct net_device *dev, 1427 void *attr_data); 1428 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1429 1430 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1431 __be16 proto, u16 vid); 1432 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1433 __be16 proto, u16 vid); 1434#ifdef CONFIG_NET_POLL_CONTROLLER 1435 void (*ndo_poll_controller)(struct net_device *dev); 1436 int (*ndo_netpoll_setup)(struct net_device *dev); 1437 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1438#endif 1439 int (*ndo_set_vf_mac)(struct net_device *dev, 1440 int queue, u8 *mac); 1441 int (*ndo_set_vf_vlan)(struct net_device *dev, 1442 int queue, u16 vlan, 1443 u8 qos, __be16 proto); 1444 int (*ndo_set_vf_rate)(struct net_device *dev, 1445 int vf, int min_tx_rate, 1446 int max_tx_rate); 1447 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1448 int vf, bool setting); 1449 int (*ndo_set_vf_trust)(struct net_device *dev, 1450 int vf, bool setting); 1451 int (*ndo_get_vf_config)(struct net_device *dev, 1452 int vf, 1453 struct ifla_vf_info *ivf); 1454 int (*ndo_set_vf_link_state)(struct net_device *dev, 1455 int vf, int link_state); 1456 int (*ndo_get_vf_stats)(struct net_device *dev, 1457 int vf, 1458 struct ifla_vf_stats 1459 *vf_stats); 1460 int (*ndo_set_vf_port)(struct net_device *dev, 1461 int vf, 1462 struct nlattr *port[]); 1463 int (*ndo_get_vf_port)(struct net_device *dev, 1464 int vf, struct sk_buff *skb); 1465 int (*ndo_get_vf_guid)(struct net_device *dev, 1466 int vf, 1467 struct ifla_vf_guid *node_guid, 1468 struct ifla_vf_guid *port_guid); 1469 int (*ndo_set_vf_guid)(struct net_device *dev, 1470 int vf, u64 guid, 1471 int guid_type); 1472 int (*ndo_set_vf_rss_query_en)( 1473 struct net_device *dev, 1474 int vf, bool setting); 1475 int (*ndo_setup_tc)(struct net_device *dev, 1476 enum tc_setup_type type, 1477 void *type_data); 1478#if IS_ENABLED(CONFIG_FCOE) 1479 int (*ndo_fcoe_enable)(struct net_device *dev); 1480 int (*ndo_fcoe_disable)(struct net_device *dev); 1481 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1482 u16 xid, 1483 struct scatterlist *sgl, 1484 unsigned int sgc); 1485 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1486 u16 xid); 1487 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1488 u16 xid, 1489 struct scatterlist *sgl, 1490 unsigned int sgc); 1491 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1492 struct netdev_fcoe_hbainfo *hbainfo); 1493#endif 1494 1495#if IS_ENABLED(CONFIG_LIBFCOE) 1496#define NETDEV_FCOE_WWNN 0 1497#define NETDEV_FCOE_WWPN 1 1498 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1499 u64 *wwn, int type); 1500#endif 1501 1502#ifdef CONFIG_RFS_ACCEL 1503 int (*ndo_rx_flow_steer)(struct net_device *dev, 1504 const struct sk_buff *skb, 1505 u16 rxq_index, 1506 u32 flow_id); 1507#endif 1508 int (*ndo_add_slave)(struct net_device *dev, 1509 struct net_device *slave_dev, 1510 struct netlink_ext_ack *extack); 1511 int (*ndo_del_slave)(struct net_device *dev, 1512 struct net_device *slave_dev); 1513 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1514 struct sk_buff *skb, 1515 bool all_slaves); 1516 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1517 struct sock *sk); 1518 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1519 netdev_features_t features); 1520 int (*ndo_set_features)(struct net_device *dev, 1521 netdev_features_t features); 1522 int (*ndo_neigh_construct)(struct net_device *dev, 1523 struct neighbour *n); 1524 void (*ndo_neigh_destroy)(struct net_device *dev, 1525 struct neighbour *n); 1526 1527 int (*ndo_fdb_add)(struct ndmsg *ndm, 1528 struct nlattr *tb[], 1529 struct net_device *dev, 1530 const unsigned char *addr, 1531 u16 vid, 1532 u16 flags, 1533 bool *notified, 1534 struct netlink_ext_ack *extack); 1535 int (*ndo_fdb_del)(struct ndmsg *ndm, 1536 struct nlattr *tb[], 1537 struct net_device *dev, 1538 const unsigned char *addr, 1539 u16 vid, 1540 bool *notified, 1541 struct netlink_ext_ack *extack); 1542 int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, 1543 struct net_device *dev, 1544 struct netlink_ext_ack *extack); 1545 int (*ndo_fdb_dump)(struct sk_buff *skb, 1546 struct netlink_callback *cb, 1547 struct net_device *dev, 1548 struct net_device *filter_dev, 1549 int *idx); 1550 int (*ndo_fdb_get)(struct sk_buff *skb, 1551 struct nlattr *tb[], 1552 struct net_device *dev, 1553 const unsigned char *addr, 1554 u16 vid, u32 portid, u32 seq, 1555 struct netlink_ext_ack *extack); 1556 int (*ndo_mdb_add)(struct net_device *dev, 1557 struct nlattr *tb[], 1558 u16 nlmsg_flags, 1559 struct netlink_ext_ack *extack); 1560 int (*ndo_mdb_del)(struct net_device *dev, 1561 struct nlattr *tb[], 1562 struct netlink_ext_ack *extack); 1563 int (*ndo_mdb_del_bulk)(struct net_device *dev, 1564 struct nlattr *tb[], 1565 struct netlink_ext_ack *extack); 1566 int (*ndo_mdb_dump)(struct net_device *dev, 1567 struct sk_buff *skb, 1568 struct netlink_callback *cb); 1569 int (*ndo_mdb_get)(struct net_device *dev, 1570 struct nlattr *tb[], u32 portid, 1571 u32 seq, 1572 struct netlink_ext_ack *extack); 1573 int (*ndo_bridge_setlink)(struct net_device *dev, 1574 struct nlmsghdr *nlh, 1575 u16 flags, 1576 struct netlink_ext_ack *extack); 1577 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1578 u32 pid, u32 seq, 1579 struct net_device *dev, 1580 u32 filter_mask, 1581 int nlflags); 1582 int (*ndo_bridge_dellink)(struct net_device *dev, 1583 struct nlmsghdr *nlh, 1584 u16 flags); 1585 int (*ndo_change_carrier)(struct net_device *dev, 1586 bool new_carrier); 1587 int (*ndo_get_phys_port_id)(struct net_device *dev, 1588 struct netdev_phys_item_id *ppid); 1589 int (*ndo_get_port_parent_id)(struct net_device *dev, 1590 struct netdev_phys_item_id *ppid); 1591 int (*ndo_get_phys_port_name)(struct net_device *dev, 1592 char *name, size_t len); 1593 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1594 struct net_device *dev); 1595 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1596 void *priv); 1597 1598 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1599 int queue_index, 1600 u32 maxrate); 1601 int (*ndo_get_iflink)(const struct net_device *dev); 1602 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1603 struct sk_buff *skb); 1604 void (*ndo_set_rx_headroom)(struct net_device *dev, 1605 int needed_headroom); 1606 int (*ndo_bpf)(struct net_device *dev, 1607 struct netdev_bpf *bpf); 1608 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1609 struct xdp_frame **xdp, 1610 u32 flags); 1611 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1612 struct xdp_buff *xdp); 1613 int (*ndo_xsk_wakeup)(struct net_device *dev, 1614 u32 queue_id, u32 flags); 1615 int (*ndo_tunnel_ctl)(struct net_device *dev, 1616 struct ip_tunnel_parm_kern *p, 1617 int cmd); 1618 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1619 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1620 struct net_device_path *path); 1621 ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1622 const struct skb_shared_hwtstamps *hwtstamps, 1623 bool cycles); 1624 int (*ndo_hwtstamp_get)(struct net_device *dev, 1625 struct kernel_hwtstamp_config *kernel_config); 1626 int (*ndo_hwtstamp_set)(struct net_device *dev, 1627 struct kernel_hwtstamp_config *kernel_config, 1628 struct netlink_ext_ack *extack); 1629 1630#if IS_ENABLED(CONFIG_NET_SHAPER) 1631 /** 1632 * @net_shaper_ops: Device shaping offload operations 1633 * see include/net/net_shapers.h 1634 */ 1635 const struct net_shaper_ops *net_shaper_ops; 1636#endif 1637}; 1638 1639/** 1640 * enum netdev_priv_flags - &struct net_device priv_flags 1641 * 1642 * These are the &struct net_device, they are only set internally 1643 * by drivers and used in the kernel. These flags are invisible to 1644 * userspace; this means that the order of these flags can change 1645 * during any kernel release. 1646 * 1647 * You should add bitfield booleans after either net_device::priv_flags 1648 * (hotpath) or ::threaded (slowpath) instead of extending these flags. 1649 * 1650 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1651 * @IFF_EBRIDGE: Ethernet bridging device 1652 * @IFF_BONDING: bonding master or slave 1653 * @IFF_ISATAP: ISATAP interface (RFC4214) 1654 * @IFF_WAN_HDLC: WAN HDLC device 1655 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1656 * release skb->dst 1657 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1658 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1659 * @IFF_MACVLAN_PORT: device used as macvlan port 1660 * @IFF_BRIDGE_PORT: device used as bridge port 1661 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1662 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1663 * @IFF_UNICAST_FLT: Supports unicast filtering 1664 * @IFF_TEAM_PORT: device used as team port 1665 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1666 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1667 * change when it's running 1668 * @IFF_MACVLAN: Macvlan device 1669 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1670 * underlying stacked devices 1671 * @IFF_L3MDEV_MASTER: device is an L3 master device 1672 * @IFF_NO_QUEUE: device can run without qdisc attached 1673 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1674 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1675 * @IFF_TEAM: device is a team device 1676 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1677 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1678 * entity (i.e. the master device for bridged veth) 1679 * @IFF_MACSEC: device is a MACsec device 1680 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1681 * @IFF_FAILOVER: device is a failover master device 1682 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1683 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1684 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf 1685 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1686 * skb_headlen(skb) == 0 (data starts from frag0) 1687 */ 1688enum netdev_priv_flags { 1689 IFF_802_1Q_VLAN = 1<<0, 1690 IFF_EBRIDGE = 1<<1, 1691 IFF_BONDING = 1<<2, 1692 IFF_ISATAP = 1<<3, 1693 IFF_WAN_HDLC = 1<<4, 1694 IFF_XMIT_DST_RELEASE = 1<<5, 1695 IFF_DONT_BRIDGE = 1<<6, 1696 IFF_DISABLE_NETPOLL = 1<<7, 1697 IFF_MACVLAN_PORT = 1<<8, 1698 IFF_BRIDGE_PORT = 1<<9, 1699 IFF_OVS_DATAPATH = 1<<10, 1700 IFF_TX_SKB_SHARING = 1<<11, 1701 IFF_UNICAST_FLT = 1<<12, 1702 IFF_TEAM_PORT = 1<<13, 1703 IFF_SUPP_NOFCS = 1<<14, 1704 IFF_LIVE_ADDR_CHANGE = 1<<15, 1705 IFF_MACVLAN = 1<<16, 1706 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1707 IFF_L3MDEV_MASTER = 1<<18, 1708 IFF_NO_QUEUE = 1<<19, 1709 IFF_OPENVSWITCH = 1<<20, 1710 IFF_L3MDEV_SLAVE = 1<<21, 1711 IFF_TEAM = 1<<22, 1712 IFF_RXFH_CONFIGURED = 1<<23, 1713 IFF_PHONY_HEADROOM = 1<<24, 1714 IFF_MACSEC = 1<<25, 1715 IFF_NO_RX_HANDLER = 1<<26, 1716 IFF_FAILOVER = 1<<27, 1717 IFF_FAILOVER_SLAVE = 1<<28, 1718 IFF_L3MDEV_RX_HANDLER = 1<<29, 1719 IFF_NO_ADDRCONF = BIT_ULL(30), 1720 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), 1721}; 1722 1723/* Specifies the type of the struct net_device::ml_priv pointer */ 1724enum netdev_ml_priv_type { 1725 ML_PRIV_NONE, 1726 ML_PRIV_CAN, 1727}; 1728 1729enum netdev_stat_type { 1730 NETDEV_PCPU_STAT_NONE, 1731 NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ 1732 NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ 1733 NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ 1734}; 1735 1736enum netdev_reg_state { 1737 NETREG_UNINITIALIZED = 0, 1738 NETREG_REGISTERED, /* completed register_netdevice */ 1739 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1740 NETREG_UNREGISTERED, /* completed unregister todo */ 1741 NETREG_RELEASED, /* called free_netdev */ 1742 NETREG_DUMMY, /* dummy device for NAPI poll */ 1743}; 1744 1745/** 1746 * struct net_device - The DEVICE structure. 1747 * 1748 * Actually, this whole structure is a big mistake. It mixes I/O 1749 * data with strictly "high-level" data, and it has to know about 1750 * almost every data structure used in the INET module. 1751 * 1752 * @priv_flags: flags invisible to userspace defined as bits, see 1753 * enum netdev_priv_flags for the definitions 1754 * @lltx: device supports lockless Tx. Deprecated for real HW 1755 * drivers. Mainly used by logical interfaces, such as 1756 * bonding and tunnels 1757 * 1758 * @name: This is the first field of the "visible" part of this structure 1759 * (i.e. as seen by users in the "Space.c" file). It is the name 1760 * of the interface. 1761 * 1762 * @name_node: Name hashlist node 1763 * @ifalias: SNMP alias 1764 * @mem_end: Shared memory end 1765 * @mem_start: Shared memory start 1766 * @base_addr: Device I/O address 1767 * @irq: Device IRQ number 1768 * 1769 * @state: Generic network queuing layer state, see netdev_state_t 1770 * @dev_list: The global list of network devices 1771 * @napi_list: List entry used for polling NAPI devices 1772 * @unreg_list: List entry when we are unregistering the 1773 * device; see the function unregister_netdev 1774 * @close_list: List entry used when we are closing the device 1775 * @ptype_all: Device-specific packet handlers for all protocols 1776 * @ptype_specific: Device-specific, protocol-specific packet handlers 1777 * 1778 * @adj_list: Directly linked devices, like slaves for bonding 1779 * @features: Currently active device features 1780 * @hw_features: User-changeable features 1781 * 1782 * @wanted_features: User-requested features 1783 * @vlan_features: Mask of features inheritable by VLAN devices 1784 * 1785 * @hw_enc_features: Mask of features inherited by encapsulating devices 1786 * This field indicates what encapsulation 1787 * offloads the hardware is capable of doing, 1788 * and drivers will need to set them appropriately. 1789 * 1790 * @mpls_features: Mask of features inheritable by MPLS 1791 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1792 * 1793 * @ifindex: interface index 1794 * @group: The group the device belongs to 1795 * 1796 * @stats: Statistics struct, which was left as a legacy, use 1797 * rtnl_link_stats64 instead 1798 * 1799 * @core_stats: core networking counters, 1800 * do not use this in drivers 1801 * @carrier_up_count: Number of times the carrier has been up 1802 * @carrier_down_count: Number of times the carrier has been down 1803 * 1804 * @wireless_handlers: List of functions to handle Wireless Extensions, 1805 * instead of ioctl, 1806 * see <net/iw_handler.h> for details. 1807 * 1808 * @netdev_ops: Includes several pointers to callbacks, 1809 * if one wants to override the ndo_*() functions 1810 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. 1811 * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks. 1812 * @ethtool_ops: Management operations 1813 * @l3mdev_ops: Layer 3 master device operations 1814 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1815 * discovery handling. Necessary for e.g. 6LoWPAN. 1816 * @xfrmdev_ops: Transformation offload operations 1817 * @tlsdev_ops: Transport Layer Security offload operations 1818 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1819 * of Layer 2 headers. 1820 * 1821 * @flags: Interface flags (a la BSD) 1822 * @xdp_features: XDP capability supported by the device 1823 * @gflags: Global flags ( kept as legacy ) 1824 * @priv_len: Size of the ->priv flexible array 1825 * @priv: Flexible array containing private data 1826 * @operstate: RFC2863 operstate 1827 * @link_mode: Mapping policy to operstate 1828 * @if_port: Selectable AUI, TP, ... 1829 * @dma: DMA channel 1830 * @mtu: Interface MTU value 1831 * @min_mtu: Interface Minimum MTU value 1832 * @max_mtu: Interface Maximum MTU value 1833 * @type: Interface hardware type 1834 * @hard_header_len: Maximum hardware header length. 1835 * @min_header_len: Minimum hardware header length 1836 * 1837 * @needed_headroom: Extra headroom the hardware may need, but not in all 1838 * cases can this be guaranteed 1839 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1840 * cases can this be guaranteed. Some cases also use 1841 * LL_MAX_HEADER instead to allocate the skb 1842 * 1843 * interface address info: 1844 * 1845 * @perm_addr: Permanent hw address 1846 * @addr_assign_type: Hw address assignment type 1847 * @addr_len: Hardware address length 1848 * @upper_level: Maximum depth level of upper devices. 1849 * @lower_level: Maximum depth level of lower devices. 1850 * @neigh_priv_len: Used in neigh_alloc() 1851 * @dev_id: Used to differentiate devices that share 1852 * the same link layer address 1853 * @dev_port: Used to differentiate devices that share 1854 * the same function 1855 * @addr_list_lock: XXX: need comments on this one 1856 * @name_assign_type: network interface name assignment type 1857 * @uc_promisc: Counter that indicates promiscuous mode 1858 * has been enabled due to the need to listen to 1859 * additional unicast addresses in a device that 1860 * does not implement ndo_set_rx_mode() 1861 * @uc: unicast mac addresses 1862 * @mc: multicast mac addresses 1863 * @dev_addrs: list of device hw addresses 1864 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1865 * @promiscuity: Number of times the NIC is told to work in 1866 * promiscuous mode; if it becomes 0 the NIC will 1867 * exit promiscuous mode 1868 * @allmulti: Counter, enables or disables allmulticast mode 1869 * 1870 * @vlan_info: VLAN info 1871 * @dsa_ptr: dsa specific data 1872 * @tipc_ptr: TIPC specific data 1873 * @atalk_ptr: AppleTalk link 1874 * @ip_ptr: IPv4 specific data 1875 * @ip6_ptr: IPv6 specific data 1876 * @ax25_ptr: AX.25 specific data 1877 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1878 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1879 * device struct 1880 * @mpls_ptr: mpls_dev struct pointer 1881 * @mctp_ptr: MCTP specific data 1882 * 1883 * @dev_addr: Hw address (before bcast, 1884 * because most packets are unicast) 1885 * 1886 * @_rx: Array of RX queues 1887 * @num_rx_queues: Number of RX queues 1888 * allocated at register_netdev() time 1889 * @real_num_rx_queues: Number of RX queues currently active in device 1890 * @xdp_prog: XDP sockets filter program pointer 1891 * 1892 * @rx_handler: handler for received packets 1893 * @rx_handler_data: XXX: need comments on this one 1894 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing 1895 * @ingress_queue: XXX: need comments on this one 1896 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1897 * @broadcast: hw bcast address 1898 * 1899 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1900 * indexed by RX queue number. Assigned by driver. 1901 * This must only be set if the ndo_rx_flow_steer 1902 * operation is defined 1903 * @index_hlist: Device index hash chain 1904 * 1905 * @_tx: Array of TX queues 1906 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1907 * @real_num_tx_queues: Number of TX queues currently active in device 1908 * @qdisc: Root qdisc from userspace point of view 1909 * @tx_queue_len: Max frames per queue allowed 1910 * @tx_global_lock: XXX: need comments on this one 1911 * @xdp_bulkq: XDP device bulk queue 1912 * @xps_maps: all CPUs/RXQs maps for XPS device 1913 * 1914 * @xps_maps: XXX: need comments on this one 1915 * @tcx_egress: BPF & clsact qdisc specific data for egress processing 1916 * @nf_hooks_egress: netfilter hooks executed for egress packets 1917 * @qdisc_hash: qdisc hash table 1918 * @watchdog_timeo: Represents the timeout that is used by 1919 * the watchdog (see dev_watchdog()) 1920 * @watchdog_timer: List of timers 1921 * 1922 * @proto_down_reason: reason a netdev interface is held down 1923 * @pcpu_refcnt: Number of references to this device 1924 * @dev_refcnt: Number of references to this device 1925 * @refcnt_tracker: Tracker directory for tracked references to this device 1926 * @todo_list: Delayed register/unregister 1927 * @link_watch_list: XXX: need comments on this one 1928 * 1929 * @reg_state: Register/unregister state machine 1930 * @dismantle: Device is going to be freed 1931 * @rtnl_link_state: This enum represents the phases of creating 1932 * a new link 1933 * 1934 * @needs_free_netdev: Should unregister perform free_netdev? 1935 * @priv_destructor: Called from unregister 1936 * @npinfo: XXX: need comments on this one 1937 * @nd_net: Network namespace this network device is inside 1938 * 1939 * @ml_priv: Mid-layer private 1940 * @ml_priv_type: Mid-layer private type 1941 * 1942 * @pcpu_stat_type: Type of device statistics which the core should 1943 * allocate/free: none, lstats, tstats, dstats. none 1944 * means the driver is handling statistics allocation/ 1945 * freeing internally. 1946 * @lstats: Loopback statistics: packets, bytes 1947 * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes 1948 * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes 1949 * 1950 * @garp_port: GARP 1951 * @mrp_port: MRP 1952 * 1953 * @dm_private: Drop monitor private 1954 * 1955 * @dev: Class/net/name entry 1956 * @sysfs_groups: Space for optional device, statistics and wireless 1957 * sysfs groups 1958 * 1959 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1960 * @rtnl_link_ops: Rtnl_link_ops 1961 * @stat_ops: Optional ops for queue-aware statistics 1962 * @queue_mgmt_ops: Optional ops for queue management 1963 * 1964 * @gso_max_size: Maximum size of generic segmentation offload 1965 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1966 * @gso_max_segs: Maximum number of segments that can be passed to the 1967 * NIC for GSO 1968 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 1969 * @gso_ipv4_max_size: Maximum size of generic segmentation offload, 1970 * for IPv4. 1971 * 1972 * @dcbnl_ops: Data Center Bridging netlink ops 1973 * @num_tc: Number of traffic classes in the net device 1974 * @tc_to_txq: XXX: need comments on this one 1975 * @prio_tc_map: XXX: need comments on this one 1976 * 1977 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1978 * 1979 * @priomap: XXX: need comments on this one 1980 * @link_topo: Physical link topology tracking attached PHYs 1981 * @phydev: Physical device may attach itself 1982 * for hardware timestamping 1983 * @sfp_bus: attached &struct sfp_bus structure. 1984 * 1985 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1986 * 1987 * @proto_down: protocol port state information can be sent to the 1988 * switch driver and used to set the phys state of the 1989 * switch port. 1990 * 1991 * @threaded: napi threaded mode is enabled 1992 * 1993 * @see_all_hwtstamp_requests: device wants to see calls to 1994 * ndo_hwtstamp_set() for all timestamp requests 1995 * regardless of source, even if those aren't 1996 * HWTSTAMP_SOURCE_NETDEV 1997 * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN 1998 * @netns_local: interface can't change network namespaces 1999 * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes 2000 * 2001 * @net_notifier_list: List of per-net netdev notifier block 2002 * that follow this device when it is moved 2003 * to another network namespace. 2004 * 2005 * @macsec_ops: MACsec offloading ops 2006 * 2007 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 2008 * offload capabilities of the device 2009 * @udp_tunnel_nic: UDP tunnel offload state 2010 * @ethtool: ethtool related state 2011 * @xdp_state: stores info on attached XDP BPF programs 2012 * 2013 * @nested_level: Used as a parameter of spin_lock_nested() of 2014 * dev->addr_list_lock. 2015 * @unlink_list: As netif_addr_lock() can be called recursively, 2016 * keep a list of interfaces to be deleted. 2017 * @gro_max_size: Maximum size of aggregated packet in generic 2018 * receive offload (GRO) 2019 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic 2020 * receive offload (GRO), for IPv4. 2021 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP 2022 * zero copy driver 2023 * 2024 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 2025 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 2026 * @watchdog_dev_tracker: refcount tracker used by watchdog. 2027 * @dev_registered_tracker: tracker for reference held while 2028 * registered 2029 * @offload_xstats_l3: L3 HW stats for this netdevice. 2030 * 2031 * @devlink_port: Pointer to related devlink port structure. 2032 * Assigned by a driver before netdev registration using 2033 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static 2034 * during the time netdevice is registered. 2035 * 2036 * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, 2037 * where the clock is recovered. 2038 * 2039 * @max_pacing_offload_horizon: max EDT offload horizon in nsec. 2040 * @napi_config: An array of napi_config structures containing per-NAPI 2041 * settings. 2042 * @gro_flush_timeout: timeout for GRO layer in NAPI 2043 * @napi_defer_hard_irqs: If not zero, provides a counter that would 2044 * allow to avoid NIC hard IRQ, on busy queues. 2045 * 2046 * @neighbours: List heads pointing to this device's neighbours' 2047 * dev_list, one per address-family. 2048 * 2049 * FIXME: cleanup struct net_device such that network protocol info 2050 * moves out. 2051 */ 2052 2053struct net_device { 2054 /* Cacheline organization can be found documented in 2055 * Documentation/networking/net_cachelines/net_device.rst. 2056 * Please update the document when adding new fields. 2057 */ 2058 2059 /* TX read-mostly hotpath */ 2060 __cacheline_group_begin(net_device_read_tx); 2061 struct_group(priv_flags_fast, 2062 unsigned long priv_flags:32; 2063 unsigned long lltx:1; 2064 ); 2065 const struct net_device_ops *netdev_ops; 2066 const struct header_ops *header_ops; 2067 struct netdev_queue *_tx; 2068 netdev_features_t gso_partial_features; 2069 unsigned int real_num_tx_queues; 2070 unsigned int gso_max_size; 2071 unsigned int gso_ipv4_max_size; 2072 u16 gso_max_segs; 2073 s16 num_tc; 2074 /* Note : dev->mtu is often read without holding a lock. 2075 * Writers usually hold RTNL. 2076 * It is recommended to use READ_ONCE() to annotate the reads, 2077 * and to use WRITE_ONCE() to annotate the writes. 2078 */ 2079 unsigned int mtu; 2080 unsigned short needed_headroom; 2081 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2082#ifdef CONFIG_XPS 2083 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2084#endif 2085#ifdef CONFIG_NETFILTER_EGRESS 2086 struct nf_hook_entries __rcu *nf_hooks_egress; 2087#endif 2088#ifdef CONFIG_NET_XGRESS 2089 struct bpf_mprog_entry __rcu *tcx_egress; 2090#endif 2091 __cacheline_group_end(net_device_read_tx); 2092 2093 /* TXRX read-mostly hotpath */ 2094 __cacheline_group_begin(net_device_read_txrx); 2095 union { 2096 struct pcpu_lstats __percpu *lstats; 2097 struct pcpu_sw_netstats __percpu *tstats; 2098 struct pcpu_dstats __percpu *dstats; 2099 }; 2100 unsigned long state; 2101 unsigned int flags; 2102 unsigned short hard_header_len; 2103 netdev_features_t features; 2104 struct inet6_dev __rcu *ip6_ptr; 2105 __cacheline_group_end(net_device_read_txrx); 2106 2107 /* RX read-mostly hotpath */ 2108 __cacheline_group_begin(net_device_read_rx); 2109 struct bpf_prog __rcu *xdp_prog; 2110 struct list_head ptype_specific; 2111 int ifindex; 2112 unsigned int real_num_rx_queues; 2113 struct netdev_rx_queue *_rx; 2114 unsigned int gro_max_size; 2115 unsigned int gro_ipv4_max_size; 2116 rx_handler_func_t __rcu *rx_handler; 2117 void __rcu *rx_handler_data; 2118 possible_net_t nd_net; 2119#ifdef CONFIG_NETPOLL 2120 struct netpoll_info __rcu *npinfo; 2121#endif 2122#ifdef CONFIG_NET_XGRESS 2123 struct bpf_mprog_entry __rcu *tcx_ingress; 2124#endif 2125 __cacheline_group_end(net_device_read_rx); 2126 2127 char name[IFNAMSIZ]; 2128 struct netdev_name_node *name_node; 2129 struct dev_ifalias __rcu *ifalias; 2130 /* 2131 * I/O specific fields 2132 * FIXME: Merge these and struct ifmap into one 2133 */ 2134 unsigned long mem_end; 2135 unsigned long mem_start; 2136 unsigned long base_addr; 2137 2138 /* 2139 * Some hardware also needs these fields (state,dev_list, 2140 * napi_list,unreg_list,close_list) but they are not 2141 * part of the usual set specified in Space.c. 2142 */ 2143 2144 2145 struct list_head dev_list; 2146 struct list_head napi_list; 2147 struct list_head unreg_list; 2148 struct list_head close_list; 2149 struct list_head ptype_all; 2150 2151 struct { 2152 struct list_head upper; 2153 struct list_head lower; 2154 } adj_list; 2155 2156 /* Read-mostly cache-line for fast-path access */ 2157 xdp_features_t xdp_features; 2158 const struct xdp_metadata_ops *xdp_metadata_ops; 2159 const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; 2160 unsigned short gflags; 2161 2162 unsigned short needed_tailroom; 2163 2164 netdev_features_t hw_features; 2165 netdev_features_t wanted_features; 2166 netdev_features_t vlan_features; 2167 netdev_features_t hw_enc_features; 2168 netdev_features_t mpls_features; 2169 2170 unsigned int min_mtu; 2171 unsigned int max_mtu; 2172 unsigned short type; 2173 unsigned char min_header_len; 2174 unsigned char name_assign_type; 2175 2176 int group; 2177 2178 struct net_device_stats stats; /* not used by modern drivers */ 2179 2180 struct net_device_core_stats __percpu *core_stats; 2181 2182 /* Stats to monitor link on/off, flapping */ 2183 atomic_t carrier_up_count; 2184 atomic_t carrier_down_count; 2185 2186#ifdef CONFIG_WIRELESS_EXT 2187 const struct iw_handler_def *wireless_handlers; 2188#endif 2189 const struct ethtool_ops *ethtool_ops; 2190#ifdef CONFIG_NET_L3_MASTER_DEV 2191 const struct l3mdev_ops *l3mdev_ops; 2192#endif 2193#if IS_ENABLED(CONFIG_IPV6) 2194 const struct ndisc_ops *ndisc_ops; 2195#endif 2196 2197#ifdef CONFIG_XFRM_OFFLOAD 2198 const struct xfrmdev_ops *xfrmdev_ops; 2199#endif 2200 2201#if IS_ENABLED(CONFIG_TLS_DEVICE) 2202 const struct tlsdev_ops *tlsdev_ops; 2203#endif 2204 2205 unsigned int operstate; 2206 unsigned char link_mode; 2207 2208 unsigned char if_port; 2209 unsigned char dma; 2210 2211 /* Interface address info. */ 2212 unsigned char perm_addr[MAX_ADDR_LEN]; 2213 unsigned char addr_assign_type; 2214 unsigned char addr_len; 2215 unsigned char upper_level; 2216 unsigned char lower_level; 2217 2218 unsigned short neigh_priv_len; 2219 unsigned short dev_id; 2220 unsigned short dev_port; 2221 int irq; 2222 u32 priv_len; 2223 2224 spinlock_t addr_list_lock; 2225 2226 struct netdev_hw_addr_list uc; 2227 struct netdev_hw_addr_list mc; 2228 struct netdev_hw_addr_list dev_addrs; 2229 2230#ifdef CONFIG_SYSFS 2231 struct kset *queues_kset; 2232#endif 2233#ifdef CONFIG_LOCKDEP 2234 struct list_head unlink_list; 2235#endif 2236 unsigned int promiscuity; 2237 unsigned int allmulti; 2238 bool uc_promisc; 2239#ifdef CONFIG_LOCKDEP 2240 unsigned char nested_level; 2241#endif 2242 2243 2244 /* Protocol-specific pointers */ 2245 struct in_device __rcu *ip_ptr; 2246 /** @fib_nh_head: nexthops associated with this netdev */ 2247 struct hlist_head fib_nh_head; 2248 2249#if IS_ENABLED(CONFIG_VLAN_8021Q) 2250 struct vlan_info __rcu *vlan_info; 2251#endif 2252#if IS_ENABLED(CONFIG_NET_DSA) 2253 struct dsa_port *dsa_ptr; 2254#endif 2255#if IS_ENABLED(CONFIG_TIPC) 2256 struct tipc_bearer __rcu *tipc_ptr; 2257#endif 2258#if IS_ENABLED(CONFIG_ATALK) 2259 void *atalk_ptr; 2260#endif 2261#if IS_ENABLED(CONFIG_AX25) 2262 void *ax25_ptr; 2263#endif 2264#if IS_ENABLED(CONFIG_CFG80211) 2265 struct wireless_dev *ieee80211_ptr; 2266#endif 2267#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) 2268 struct wpan_dev *ieee802154_ptr; 2269#endif 2270#if IS_ENABLED(CONFIG_MPLS_ROUTING) 2271 struct mpls_dev __rcu *mpls_ptr; 2272#endif 2273#if IS_ENABLED(CONFIG_MCTP) 2274 struct mctp_dev __rcu *mctp_ptr; 2275#endif 2276 2277/* 2278 * Cache lines mostly used on receive path (including eth_type_trans()) 2279 */ 2280 /* Interface address info used in eth_type_trans() */ 2281 const unsigned char *dev_addr; 2282 2283 unsigned int num_rx_queues; 2284#define GRO_LEGACY_MAX_SIZE 65536u 2285/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2286 * and shinfo->gso_segs is a 16bit field. 2287 */ 2288#define GRO_MAX_SIZE (8 * 65535u) 2289 unsigned int xdp_zc_max_segs; 2290 struct netdev_queue __rcu *ingress_queue; 2291#ifdef CONFIG_NETFILTER_INGRESS 2292 struct nf_hook_entries __rcu *nf_hooks_ingress; 2293#endif 2294 2295 unsigned char broadcast[MAX_ADDR_LEN]; 2296#ifdef CONFIG_RFS_ACCEL 2297 struct cpu_rmap *rx_cpu_rmap; 2298#endif 2299 struct hlist_node index_hlist; 2300 2301/* 2302 * Cache lines mostly used on transmit path 2303 */ 2304 unsigned int num_tx_queues; 2305 struct Qdisc __rcu *qdisc; 2306 unsigned int tx_queue_len; 2307 spinlock_t tx_global_lock; 2308 2309 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2310 2311#ifdef CONFIG_NET_SCHED 2312 DECLARE_HASHTABLE (qdisc_hash, 4); 2313#endif 2314 /* These may be needed for future network-power-down code. */ 2315 struct timer_list watchdog_timer; 2316 int watchdog_timeo; 2317 2318 u32 proto_down_reason; 2319 2320 struct list_head todo_list; 2321 2322#ifdef CONFIG_PCPU_DEV_REFCNT 2323 int __percpu *pcpu_refcnt; 2324#else 2325 refcount_t dev_refcnt; 2326#endif 2327 struct ref_tracker_dir refcnt_tracker; 2328 2329 struct list_head link_watch_list; 2330 2331 u8 reg_state; 2332 2333 bool dismantle; 2334 2335 enum { 2336 RTNL_LINK_INITIALIZED, 2337 RTNL_LINK_INITIALIZING, 2338 } rtnl_link_state:16; 2339 2340 bool needs_free_netdev; 2341 void (*priv_destructor)(struct net_device *dev); 2342 2343 /* mid-layer private */ 2344 void *ml_priv; 2345 enum netdev_ml_priv_type ml_priv_type; 2346 2347 enum netdev_stat_type pcpu_stat_type:8; 2348 2349#if IS_ENABLED(CONFIG_GARP) 2350 struct garp_port __rcu *garp_port; 2351#endif 2352#if IS_ENABLED(CONFIG_MRP) 2353 struct mrp_port __rcu *mrp_port; 2354#endif 2355#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2356 struct dm_hw_stat_delta __rcu *dm_private; 2357#endif 2358 struct device dev; 2359 const struct attribute_group *sysfs_groups[4]; 2360 const struct attribute_group *sysfs_rx_queue_group; 2361 2362 const struct rtnl_link_ops *rtnl_link_ops; 2363 2364 const struct netdev_stat_ops *stat_ops; 2365 2366 const struct netdev_queue_mgmt_ops *queue_mgmt_ops; 2367 2368 /* for setting kernel sock attribute on TCP connection setup */ 2369#define GSO_MAX_SEGS 65535u 2370#define GSO_LEGACY_MAX_SIZE 65536u 2371/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2372 * and shinfo->gso_segs is a 16bit field. 2373 */ 2374#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) 2375 2376#define TSO_LEGACY_MAX_SIZE 65536 2377#define TSO_MAX_SIZE UINT_MAX 2378 unsigned int tso_max_size; 2379#define TSO_MAX_SEGS U16_MAX 2380 u16 tso_max_segs; 2381 2382#ifdef CONFIG_DCB 2383 const struct dcbnl_rtnl_ops *dcbnl_ops; 2384#endif 2385 u8 prio_tc_map[TC_BITMASK + 1]; 2386 2387#if IS_ENABLED(CONFIG_FCOE) 2388 unsigned int fcoe_ddp_xid; 2389#endif 2390#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2391 struct netprio_map __rcu *priomap; 2392#endif 2393 struct phy_link_topology *link_topo; 2394 struct phy_device *phydev; 2395 struct sfp_bus *sfp_bus; 2396 struct lock_class_key *qdisc_tx_busylock; 2397 bool proto_down; 2398 bool threaded; 2399 2400 /* priv_flags_slow, ungrouped to save space */ 2401 unsigned long see_all_hwtstamp_requests:1; 2402 unsigned long change_proto_down:1; 2403 unsigned long netns_local:1; 2404 unsigned long fcoe_mtu:1; 2405 2406 struct list_head net_notifier_list; 2407 2408#if IS_ENABLED(CONFIG_MACSEC) 2409 /* MACsec management functions */ 2410 const struct macsec_ops *macsec_ops; 2411#endif 2412 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2413 struct udp_tunnel_nic *udp_tunnel_nic; 2414 2415 struct ethtool_netdev_state *ethtool; 2416 2417 /* protected by rtnl_lock */ 2418 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2419 2420 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2421 netdevice_tracker linkwatch_dev_tracker; 2422 netdevice_tracker watchdog_dev_tracker; 2423 netdevice_tracker dev_registered_tracker; 2424 struct rtnl_hw_stats64 *offload_xstats_l3; 2425 2426 struct devlink_port *devlink_port; 2427 2428#if IS_ENABLED(CONFIG_DPLL) 2429 struct dpll_pin __rcu *dpll_pin; 2430#endif 2431#if IS_ENABLED(CONFIG_PAGE_POOL) 2432 /** @page_pools: page pools created for this netdevice */ 2433 struct hlist_head page_pools; 2434#endif 2435 2436 /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ 2437 struct dim_irq_moder *irq_moder; 2438 2439 u64 max_pacing_offload_horizon; 2440 struct napi_config *napi_config; 2441 unsigned long gro_flush_timeout; 2442 u32 napi_defer_hard_irqs; 2443 2444 /** 2445 * @lock: protects @net_shaper_hierarchy, feel free to use for other 2446 * netdev-scope protection. Ordering: take after rtnl_lock. 2447 */ 2448 struct mutex lock; 2449 2450#if IS_ENABLED(CONFIG_NET_SHAPER) 2451 /** 2452 * @net_shaper_hierarchy: data tracking the current shaper status 2453 * see include/net/net_shapers.h 2454 */ 2455 struct net_shaper_hierarchy *net_shaper_hierarchy; 2456#endif 2457 2458 struct hlist_head neighbours[NEIGH_NR_TABLES]; 2459 2460 u8 priv[] ____cacheline_aligned 2461 __counted_by(priv_len); 2462} ____cacheline_aligned; 2463#define to_net_dev(d) container_of(d, struct net_device, dev) 2464 2465/* 2466 * Driver should use this to assign devlink port instance to a netdevice 2467 * before it registers the netdevice. Therefore devlink_port is static 2468 * during the netdev lifetime after it is registered. 2469 */ 2470#define SET_NETDEV_DEVLINK_PORT(dev, port) \ 2471({ \ 2472 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ 2473 ((dev)->devlink_port = (port)); \ 2474}) 2475 2476static inline bool netif_elide_gro(const struct net_device *dev) 2477{ 2478 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2479 return true; 2480 return false; 2481} 2482 2483#define NETDEV_ALIGN 32 2484 2485static inline 2486int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2487{ 2488 return dev->prio_tc_map[prio & TC_BITMASK]; 2489} 2490 2491static inline 2492int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2493{ 2494 if (tc >= dev->num_tc) 2495 return -EINVAL; 2496 2497 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2498 return 0; 2499} 2500 2501int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2502void netdev_reset_tc(struct net_device *dev); 2503int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2504int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2505 2506static inline 2507int netdev_get_num_tc(struct net_device *dev) 2508{ 2509 return dev->num_tc; 2510} 2511 2512static inline void net_prefetch(void *p) 2513{ 2514 prefetch(p); 2515#if L1_CACHE_BYTES < 128 2516 prefetch((u8 *)p + L1_CACHE_BYTES); 2517#endif 2518} 2519 2520static inline void net_prefetchw(void *p) 2521{ 2522 prefetchw(p); 2523#if L1_CACHE_BYTES < 128 2524 prefetchw((u8 *)p + L1_CACHE_BYTES); 2525#endif 2526} 2527 2528void netdev_unbind_sb_channel(struct net_device *dev, 2529 struct net_device *sb_dev); 2530int netdev_bind_sb_channel_queue(struct net_device *dev, 2531 struct net_device *sb_dev, 2532 u8 tc, u16 count, u16 offset); 2533int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2534static inline int netdev_get_sb_channel(struct net_device *dev) 2535{ 2536 return max_t(int, -dev->num_tc, 0); 2537} 2538 2539static inline 2540struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2541 unsigned int index) 2542{ 2543 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); 2544 return &dev->_tx[index]; 2545} 2546 2547static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2548 const struct sk_buff *skb) 2549{ 2550 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2551} 2552 2553static inline void netdev_for_each_tx_queue(struct net_device *dev, 2554 void (*f)(struct net_device *, 2555 struct netdev_queue *, 2556 void *), 2557 void *arg) 2558{ 2559 unsigned int i; 2560 2561 for (i = 0; i < dev->num_tx_queues; i++) 2562 f(dev, &dev->_tx[i], arg); 2563} 2564 2565#define netdev_lockdep_set_classes(dev) \ 2566{ \ 2567 static struct lock_class_key qdisc_tx_busylock_key; \ 2568 static struct lock_class_key qdisc_xmit_lock_key; \ 2569 static struct lock_class_key dev_addr_list_lock_key; \ 2570 unsigned int i; \ 2571 \ 2572 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2573 lockdep_set_class(&(dev)->addr_list_lock, \ 2574 &dev_addr_list_lock_key); \ 2575 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2576 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2577 &qdisc_xmit_lock_key); \ 2578} 2579 2580u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2581 struct net_device *sb_dev); 2582struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2583 struct sk_buff *skb, 2584 struct net_device *sb_dev); 2585 2586/* returns the headroom that the master device needs to take in account 2587 * when forwarding to this dev 2588 */ 2589static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2590{ 2591 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2592} 2593 2594static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2595{ 2596 if (dev->netdev_ops->ndo_set_rx_headroom) 2597 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2598} 2599 2600/* set the device rx headroom to the dev's default */ 2601static inline void netdev_reset_rx_headroom(struct net_device *dev) 2602{ 2603 netdev_set_rx_headroom(dev, -1); 2604} 2605 2606static inline void *netdev_get_ml_priv(struct net_device *dev, 2607 enum netdev_ml_priv_type type) 2608{ 2609 if (dev->ml_priv_type != type) 2610 return NULL; 2611 2612 return dev->ml_priv; 2613} 2614 2615static inline void netdev_set_ml_priv(struct net_device *dev, 2616 void *ml_priv, 2617 enum netdev_ml_priv_type type) 2618{ 2619 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2620 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2621 dev->ml_priv_type, type); 2622 WARN(!dev->ml_priv_type && dev->ml_priv, 2623 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2624 2625 dev->ml_priv = ml_priv; 2626 dev->ml_priv_type = type; 2627} 2628 2629/* 2630 * Net namespace inlines 2631 */ 2632static inline 2633struct net *dev_net(const struct net_device *dev) 2634{ 2635 return read_pnet(&dev->nd_net); 2636} 2637 2638static inline 2639void dev_net_set(struct net_device *dev, struct net *net) 2640{ 2641 write_pnet(&dev->nd_net, net); 2642} 2643 2644/** 2645 * netdev_priv - access network device private data 2646 * @dev: network device 2647 * 2648 * Get network device private data 2649 */ 2650static inline void *netdev_priv(const struct net_device *dev) 2651{ 2652 return (void *)dev->priv; 2653} 2654 2655/* Set the sysfs physical device reference for the network logical device 2656 * if set prior to registration will cause a symlink during initialization. 2657 */ 2658#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2659 2660/* Set the sysfs device type for the network logical device to allow 2661 * fine-grained identification of different network device types. For 2662 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2663 */ 2664#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2665 2666void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, 2667 enum netdev_queue_type type, 2668 struct napi_struct *napi); 2669 2670static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) 2671{ 2672 napi->irq = irq; 2673} 2674 2675/* Default NAPI poll() weight 2676 * Device drivers are strongly advised to not use bigger value 2677 */ 2678#define NAPI_POLL_WEIGHT 64 2679 2680void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2681 int (*poll)(struct napi_struct *, int), int weight); 2682 2683/** 2684 * netif_napi_add() - initialize a NAPI context 2685 * @dev: network device 2686 * @napi: NAPI context 2687 * @poll: polling function 2688 * 2689 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2690 * *any* of the other NAPI-related functions. 2691 */ 2692static inline void 2693netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2694 int (*poll)(struct napi_struct *, int)) 2695{ 2696 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2697} 2698 2699static inline void 2700netif_napi_add_tx_weight(struct net_device *dev, 2701 struct napi_struct *napi, 2702 int (*poll)(struct napi_struct *, int), 2703 int weight) 2704{ 2705 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2706 netif_napi_add_weight(dev, napi, poll, weight); 2707} 2708 2709/** 2710 * netif_napi_add_config - initialize a NAPI context with persistent config 2711 * @dev: network device 2712 * @napi: NAPI context 2713 * @poll: polling function 2714 * @index: the NAPI index 2715 */ 2716static inline void 2717netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, 2718 int (*poll)(struct napi_struct *, int), int index) 2719{ 2720 napi->index = index; 2721 napi->config = &dev->napi_config[index]; 2722 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2723} 2724 2725/** 2726 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2727 * @dev: network device 2728 * @napi: NAPI context 2729 * @poll: polling function 2730 * 2731 * This variant of netif_napi_add() should be used from drivers using NAPI 2732 * to exclusively poll a TX queue. 2733 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2734 */ 2735static inline void netif_napi_add_tx(struct net_device *dev, 2736 struct napi_struct *napi, 2737 int (*poll)(struct napi_struct *, int)) 2738{ 2739 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2740} 2741 2742/** 2743 * __netif_napi_del - remove a NAPI context 2744 * @napi: NAPI context 2745 * 2746 * Warning: caller must observe RCU grace period before freeing memory 2747 * containing @napi. Drivers might want to call this helper to combine 2748 * all the needed RCU grace periods into a single one. 2749 */ 2750void __netif_napi_del(struct napi_struct *napi); 2751 2752/** 2753 * netif_napi_del - remove a NAPI context 2754 * @napi: NAPI context 2755 * 2756 * netif_napi_del() removes a NAPI context from the network device NAPI list 2757 */ 2758static inline void netif_napi_del(struct napi_struct *napi) 2759{ 2760 __netif_napi_del(napi); 2761 synchronize_net(); 2762} 2763 2764struct packet_type { 2765 __be16 type; /* This is really htons(ether_type). */ 2766 bool ignore_outgoing; 2767 struct net_device *dev; /* NULL is wildcarded here */ 2768 netdevice_tracker dev_tracker; 2769 int (*func) (struct sk_buff *, 2770 struct net_device *, 2771 struct packet_type *, 2772 struct net_device *); 2773 void (*list_func) (struct list_head *, 2774 struct packet_type *, 2775 struct net_device *); 2776 bool (*id_match)(struct packet_type *ptype, 2777 struct sock *sk); 2778 struct net *af_packet_net; 2779 void *af_packet_priv; 2780 struct list_head list; 2781}; 2782 2783struct offload_callbacks { 2784 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2785 netdev_features_t features); 2786 struct sk_buff *(*gro_receive)(struct list_head *head, 2787 struct sk_buff *skb); 2788 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2789}; 2790 2791struct packet_offload { 2792 __be16 type; /* This is really htons(ether_type). */ 2793 u16 priority; 2794 struct offload_callbacks callbacks; 2795 struct list_head list; 2796}; 2797 2798/* often modified stats are per-CPU, other are shared (netdev->stats) */ 2799struct pcpu_sw_netstats { 2800 u64_stats_t rx_packets; 2801 u64_stats_t rx_bytes; 2802 u64_stats_t tx_packets; 2803 u64_stats_t tx_bytes; 2804 struct u64_stats_sync syncp; 2805} __aligned(4 * sizeof(u64)); 2806 2807struct pcpu_dstats { 2808 u64_stats_t rx_packets; 2809 u64_stats_t rx_bytes; 2810 u64_stats_t rx_drops; 2811 u64_stats_t tx_packets; 2812 u64_stats_t tx_bytes; 2813 u64_stats_t tx_drops; 2814 struct u64_stats_sync syncp; 2815} __aligned(8 * sizeof(u64)); 2816 2817struct pcpu_lstats { 2818 u64_stats_t packets; 2819 u64_stats_t bytes; 2820 struct u64_stats_sync syncp; 2821} __aligned(2 * sizeof(u64)); 2822 2823void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2824 2825static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2826{ 2827 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2828 2829 u64_stats_update_begin(&tstats->syncp); 2830 u64_stats_add(&tstats->rx_bytes, len); 2831 u64_stats_inc(&tstats->rx_packets); 2832 u64_stats_update_end(&tstats->syncp); 2833} 2834 2835static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2836 unsigned int packets, 2837 unsigned int len) 2838{ 2839 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2840 2841 u64_stats_update_begin(&tstats->syncp); 2842 u64_stats_add(&tstats->tx_bytes, len); 2843 u64_stats_add(&tstats->tx_packets, packets); 2844 u64_stats_update_end(&tstats->syncp); 2845} 2846 2847static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2848{ 2849 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2850 2851 u64_stats_update_begin(&lstats->syncp); 2852 u64_stats_add(&lstats->bytes, len); 2853 u64_stats_inc(&lstats->packets); 2854 u64_stats_update_end(&lstats->syncp); 2855} 2856 2857#define __netdev_alloc_pcpu_stats(type, gfp) \ 2858({ \ 2859 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2860 if (pcpu_stats) { \ 2861 int __cpu; \ 2862 for_each_possible_cpu(__cpu) { \ 2863 typeof(type) *stat; \ 2864 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2865 u64_stats_init(&stat->syncp); \ 2866 } \ 2867 } \ 2868 pcpu_stats; \ 2869}) 2870 2871#define netdev_alloc_pcpu_stats(type) \ 2872 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2873 2874#define devm_netdev_alloc_pcpu_stats(dev, type) \ 2875({ \ 2876 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 2877 if (pcpu_stats) { \ 2878 int __cpu; \ 2879 for_each_possible_cpu(__cpu) { \ 2880 typeof(type) *stat; \ 2881 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2882 u64_stats_init(&stat->syncp); \ 2883 } \ 2884 } \ 2885 pcpu_stats; \ 2886}) 2887 2888enum netdev_lag_tx_type { 2889 NETDEV_LAG_TX_TYPE_UNKNOWN, 2890 NETDEV_LAG_TX_TYPE_RANDOM, 2891 NETDEV_LAG_TX_TYPE_BROADCAST, 2892 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 2893 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 2894 NETDEV_LAG_TX_TYPE_HASH, 2895}; 2896 2897enum netdev_lag_hash { 2898 NETDEV_LAG_HASH_NONE, 2899 NETDEV_LAG_HASH_L2, 2900 NETDEV_LAG_HASH_L34, 2901 NETDEV_LAG_HASH_L23, 2902 NETDEV_LAG_HASH_E23, 2903 NETDEV_LAG_HASH_E34, 2904 NETDEV_LAG_HASH_VLAN_SRCMAC, 2905 NETDEV_LAG_HASH_UNKNOWN, 2906}; 2907 2908struct netdev_lag_upper_info { 2909 enum netdev_lag_tx_type tx_type; 2910 enum netdev_lag_hash hash_type; 2911}; 2912 2913struct netdev_lag_lower_state_info { 2914 u8 link_up : 1, 2915 tx_enabled : 1; 2916}; 2917 2918#include <linux/notifier.h> 2919 2920/* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 2921 * and the rtnetlink notification exclusion list in rtnetlink_event() when 2922 * adding new types. 2923 */ 2924enum netdev_cmd { 2925 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 2926 NETDEV_DOWN, 2927 NETDEV_REBOOT, /* Tell a protocol stack a network interface 2928 detected a hardware crash and restarted 2929 - we can use this eg to kick tcp sessions 2930 once done */ 2931 NETDEV_CHANGE, /* Notify device state change */ 2932 NETDEV_REGISTER, 2933 NETDEV_UNREGISTER, 2934 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 2935 NETDEV_CHANGEADDR, /* notify after the address change */ 2936 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 2937 NETDEV_GOING_DOWN, 2938 NETDEV_CHANGENAME, 2939 NETDEV_FEAT_CHANGE, 2940 NETDEV_BONDING_FAILOVER, 2941 NETDEV_PRE_UP, 2942 NETDEV_PRE_TYPE_CHANGE, 2943 NETDEV_POST_TYPE_CHANGE, 2944 NETDEV_POST_INIT, 2945 NETDEV_PRE_UNINIT, 2946 NETDEV_RELEASE, 2947 NETDEV_NOTIFY_PEERS, 2948 NETDEV_JOIN, 2949 NETDEV_CHANGEUPPER, 2950 NETDEV_RESEND_IGMP, 2951 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 2952 NETDEV_CHANGEINFODATA, 2953 NETDEV_BONDING_INFO, 2954 NETDEV_PRECHANGEUPPER, 2955 NETDEV_CHANGELOWERSTATE, 2956 NETDEV_UDP_TUNNEL_PUSH_INFO, 2957 NETDEV_UDP_TUNNEL_DROP_INFO, 2958 NETDEV_CHANGE_TX_QUEUE_LEN, 2959 NETDEV_CVLAN_FILTER_PUSH_INFO, 2960 NETDEV_CVLAN_FILTER_DROP_INFO, 2961 NETDEV_SVLAN_FILTER_PUSH_INFO, 2962 NETDEV_SVLAN_FILTER_DROP_INFO, 2963 NETDEV_OFFLOAD_XSTATS_ENABLE, 2964 NETDEV_OFFLOAD_XSTATS_DISABLE, 2965 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 2966 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 2967 NETDEV_XDP_FEAT_CHANGE, 2968}; 2969const char *netdev_cmd_to_name(enum netdev_cmd cmd); 2970 2971int register_netdevice_notifier(struct notifier_block *nb); 2972int unregister_netdevice_notifier(struct notifier_block *nb); 2973int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 2974int unregister_netdevice_notifier_net(struct net *net, 2975 struct notifier_block *nb); 2976int register_netdevice_notifier_dev_net(struct net_device *dev, 2977 struct notifier_block *nb, 2978 struct netdev_net_notifier *nn); 2979int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2980 struct notifier_block *nb, 2981 struct netdev_net_notifier *nn); 2982 2983struct netdev_notifier_info { 2984 struct net_device *dev; 2985 struct netlink_ext_ack *extack; 2986}; 2987 2988struct netdev_notifier_info_ext { 2989 struct netdev_notifier_info info; /* must be first */ 2990 union { 2991 u32 mtu; 2992 } ext; 2993}; 2994 2995struct netdev_notifier_change_info { 2996 struct netdev_notifier_info info; /* must be first */ 2997 unsigned int flags_changed; 2998}; 2999 3000struct netdev_notifier_changeupper_info { 3001 struct netdev_notifier_info info; /* must be first */ 3002 struct net_device *upper_dev; /* new upper dev */ 3003 bool master; /* is upper dev master */ 3004 bool linking; /* is the notification for link or unlink */ 3005 void *upper_info; /* upper dev info */ 3006}; 3007 3008struct netdev_notifier_changelowerstate_info { 3009 struct netdev_notifier_info info; /* must be first */ 3010 void *lower_state_info; /* is lower dev state */ 3011}; 3012 3013struct netdev_notifier_pre_changeaddr_info { 3014 struct netdev_notifier_info info; /* must be first */ 3015 const unsigned char *dev_addr; 3016}; 3017 3018enum netdev_offload_xstats_type { 3019 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 3020}; 3021 3022struct netdev_notifier_offload_xstats_info { 3023 struct netdev_notifier_info info; /* must be first */ 3024 enum netdev_offload_xstats_type type; 3025 3026 union { 3027 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 3028 struct netdev_notifier_offload_xstats_rd *report_delta; 3029 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 3030 struct netdev_notifier_offload_xstats_ru *report_used; 3031 }; 3032}; 3033 3034int netdev_offload_xstats_enable(struct net_device *dev, 3035 enum netdev_offload_xstats_type type, 3036 struct netlink_ext_ack *extack); 3037int netdev_offload_xstats_disable(struct net_device *dev, 3038 enum netdev_offload_xstats_type type); 3039bool netdev_offload_xstats_enabled(const struct net_device *dev, 3040 enum netdev_offload_xstats_type type); 3041int netdev_offload_xstats_get(struct net_device *dev, 3042 enum netdev_offload_xstats_type type, 3043 struct rtnl_hw_stats64 *stats, bool *used, 3044 struct netlink_ext_ack *extack); 3045void 3046netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 3047 const struct rtnl_hw_stats64 *stats); 3048void 3049netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 3050void netdev_offload_xstats_push_delta(struct net_device *dev, 3051 enum netdev_offload_xstats_type type, 3052 const struct rtnl_hw_stats64 *stats); 3053 3054static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 3055 struct net_device *dev) 3056{ 3057 info->dev = dev; 3058 info->extack = NULL; 3059} 3060 3061static inline struct net_device * 3062netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 3063{ 3064 return info->dev; 3065} 3066 3067static inline struct netlink_ext_ack * 3068netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 3069{ 3070 return info->extack; 3071} 3072 3073int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 3074int call_netdevice_notifiers_info(unsigned long val, 3075 struct netdev_notifier_info *info); 3076 3077#define for_each_netdev(net, d) \ 3078 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 3079#define for_each_netdev_reverse(net, d) \ 3080 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 3081#define for_each_netdev_rcu(net, d) \ 3082 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 3083#define for_each_netdev_safe(net, d, n) \ 3084 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 3085#define for_each_netdev_continue(net, d) \ 3086 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 3087#define for_each_netdev_continue_reverse(net, d) \ 3088 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 3089 dev_list) 3090#define for_each_netdev_continue_rcu(net, d) \ 3091 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 3092#define for_each_netdev_in_bond_rcu(bond, slave) \ 3093 for_each_netdev_rcu(&init_net, slave) \ 3094 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 3095#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 3096 3097#define for_each_netdev_dump(net, d, ifindex) \ 3098 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \ 3099 ULONG_MAX, XA_PRESENT)); ifindex++) 3100 3101static inline struct net_device *next_net_device(struct net_device *dev) 3102{ 3103 struct list_head *lh; 3104 struct net *net; 3105 3106 net = dev_net(dev); 3107 lh = dev->dev_list.next; 3108 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3109} 3110 3111static inline struct net_device *next_net_device_rcu(struct net_device *dev) 3112{ 3113 struct list_head *lh; 3114 struct net *net; 3115 3116 net = dev_net(dev); 3117 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 3118 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3119} 3120 3121static inline struct net_device *first_net_device(struct net *net) 3122{ 3123 return list_empty(&net->dev_base_head) ? NULL : 3124 net_device_entry(net->dev_base_head.next); 3125} 3126 3127static inline struct net_device *first_net_device_rcu(struct net *net) 3128{ 3129 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 3130 3131 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3132} 3133 3134int netdev_boot_setup_check(struct net_device *dev); 3135struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 3136 const char *hwaddr); 3137struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 3138void dev_add_pack(struct packet_type *pt); 3139void dev_remove_pack(struct packet_type *pt); 3140void __dev_remove_pack(struct packet_type *pt); 3141void dev_add_offload(struct packet_offload *po); 3142void dev_remove_offload(struct packet_offload *po); 3143 3144int dev_get_iflink(const struct net_device *dev); 3145int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 3146int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 3147 struct net_device_path_stack *stack); 3148struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 3149 unsigned short mask); 3150struct net_device *dev_get_by_name(struct net *net, const char *name); 3151struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 3152struct net_device *__dev_get_by_name(struct net *net, const char *name); 3153bool netdev_name_in_use(struct net *net, const char *name); 3154int dev_alloc_name(struct net_device *dev, const char *name); 3155int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 3156void dev_close(struct net_device *dev); 3157void dev_close_many(struct list_head *head, bool unlink); 3158void dev_disable_lro(struct net_device *dev); 3159int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 3160u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3161 struct net_device *sb_dev); 3162 3163int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 3164int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 3165 3166static inline int dev_queue_xmit(struct sk_buff *skb) 3167{ 3168 return __dev_queue_xmit(skb, NULL); 3169} 3170 3171static inline int dev_queue_xmit_accel(struct sk_buff *skb, 3172 struct net_device *sb_dev) 3173{ 3174 return __dev_queue_xmit(skb, sb_dev); 3175} 3176 3177static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3178{ 3179 int ret; 3180 3181 ret = __dev_direct_xmit(skb, queue_id); 3182 if (!dev_xmit_complete(ret)) 3183 kfree_skb(skb); 3184 return ret; 3185} 3186 3187int register_netdevice(struct net_device *dev); 3188void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 3189void unregister_netdevice_many(struct list_head *head); 3190static inline void unregister_netdevice(struct net_device *dev) 3191{ 3192 unregister_netdevice_queue(dev, NULL); 3193} 3194 3195int netdev_refcnt_read(const struct net_device *dev); 3196void free_netdev(struct net_device *dev); 3197void init_dummy_netdev(struct net_device *dev); 3198 3199struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3200 struct sk_buff *skb, 3201 bool all_slaves); 3202struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3203 struct sock *sk); 3204struct net_device *dev_get_by_index(struct net *net, int ifindex); 3205struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3206struct net_device *netdev_get_by_index(struct net *net, int ifindex, 3207 netdevice_tracker *tracker, gfp_t gfp); 3208struct net_device *netdev_get_by_name(struct net *net, const char *name, 3209 netdevice_tracker *tracker, gfp_t gfp); 3210struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3211struct net_device *dev_get_by_napi_id(unsigned int napi_id); 3212void netdev_copy_name(struct net_device *dev, char *name); 3213 3214static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3215 unsigned short type, 3216 const void *daddr, const void *saddr, 3217 unsigned int len) 3218{ 3219 if (!dev->header_ops || !dev->header_ops->create) 3220 return 0; 3221 3222 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3223} 3224 3225static inline int dev_parse_header(const struct sk_buff *skb, 3226 unsigned char *haddr) 3227{ 3228 const struct net_device *dev = skb->dev; 3229 3230 if (!dev->header_ops || !dev->header_ops->parse) 3231 return 0; 3232 return dev->header_ops->parse(skb, haddr); 3233} 3234 3235static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3236{ 3237 const struct net_device *dev = skb->dev; 3238 3239 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3240 return 0; 3241 return dev->header_ops->parse_protocol(skb); 3242} 3243 3244/* ll_header must have at least hard_header_len allocated */ 3245static inline bool dev_validate_header(const struct net_device *dev, 3246 char *ll_header, int len) 3247{ 3248 if (likely(len >= dev->hard_header_len)) 3249 return true; 3250 if (len < dev->min_header_len) 3251 return false; 3252 3253 if (capable(CAP_SYS_RAWIO)) { 3254 memset(ll_header + len, 0, dev->hard_header_len - len); 3255 return true; 3256 } 3257 3258 if (dev->header_ops && dev->header_ops->validate) 3259 return dev->header_ops->validate(ll_header, len); 3260 3261 return false; 3262} 3263 3264static inline bool dev_has_header(const struct net_device *dev) 3265{ 3266 return dev->header_ops && dev->header_ops->create; 3267} 3268 3269/* 3270 * Incoming packets are placed on per-CPU queues 3271 */ 3272struct softnet_data { 3273 struct list_head poll_list; 3274 struct sk_buff_head process_queue; 3275 local_lock_t process_queue_bh_lock; 3276 3277 /* stats */ 3278 unsigned int processed; 3279 unsigned int time_squeeze; 3280#ifdef CONFIG_RPS 3281 struct softnet_data *rps_ipi_list; 3282#endif 3283 3284 unsigned int received_rps; 3285 bool in_net_rx_action; 3286 bool in_napi_threaded_poll; 3287 3288#ifdef CONFIG_NET_FLOW_LIMIT 3289 struct sd_flow_limit __rcu *flow_limit; 3290#endif 3291 struct Qdisc *output_queue; 3292 struct Qdisc **output_queue_tailp; 3293 struct sk_buff *completion_queue; 3294#ifdef CONFIG_XFRM_OFFLOAD 3295 struct sk_buff_head xfrm_backlog; 3296#endif 3297 /* written and read only by owning cpu: */ 3298 struct netdev_xmit xmit; 3299#ifdef CONFIG_RPS 3300 /* input_queue_head should be written by cpu owning this struct, 3301 * and only read by other cpus. Worth using a cache line. 3302 */ 3303 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3304 3305 /* Elements below can be accessed between CPUs for RPS/RFS */ 3306 call_single_data_t csd ____cacheline_aligned_in_smp; 3307 struct softnet_data *rps_ipi_next; 3308 unsigned int cpu; 3309 unsigned int input_queue_tail; 3310#endif 3311 struct sk_buff_head input_pkt_queue; 3312 struct napi_struct backlog; 3313 3314 atomic_t dropped ____cacheline_aligned_in_smp; 3315 3316 /* Another possibly contended cache line */ 3317 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3318 int defer_count; 3319 int defer_ipi_scheduled; 3320 struct sk_buff *defer_list; 3321 call_single_data_t defer_csd; 3322}; 3323 3324DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3325 3326#ifndef CONFIG_PREEMPT_RT 3327static inline int dev_recursion_level(void) 3328{ 3329 return this_cpu_read(softnet_data.xmit.recursion); 3330} 3331#else 3332static inline int dev_recursion_level(void) 3333{ 3334 return current->net_xmit.recursion; 3335} 3336 3337#endif 3338 3339void __netif_schedule(struct Qdisc *q); 3340void netif_schedule_queue(struct netdev_queue *txq); 3341 3342static inline void netif_tx_schedule_all(struct net_device *dev) 3343{ 3344 unsigned int i; 3345 3346 for (i = 0; i < dev->num_tx_queues; i++) 3347 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3348} 3349 3350static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3351{ 3352 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3353} 3354 3355/** 3356 * netif_start_queue - allow transmit 3357 * @dev: network device 3358 * 3359 * Allow upper layers to call the device hard_start_xmit routine. 3360 */ 3361static inline void netif_start_queue(struct net_device *dev) 3362{ 3363 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3364} 3365 3366static inline void netif_tx_start_all_queues(struct net_device *dev) 3367{ 3368 unsigned int i; 3369 3370 for (i = 0; i < dev->num_tx_queues; i++) { 3371 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3372 netif_tx_start_queue(txq); 3373 } 3374} 3375 3376void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3377 3378/** 3379 * netif_wake_queue - restart transmit 3380 * @dev: network device 3381 * 3382 * Allow upper layers to call the device hard_start_xmit routine. 3383 * Used for flow control when transmit resources are available. 3384 */ 3385static inline void netif_wake_queue(struct net_device *dev) 3386{ 3387 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3388} 3389 3390static inline void netif_tx_wake_all_queues(struct net_device *dev) 3391{ 3392 unsigned int i; 3393 3394 for (i = 0; i < dev->num_tx_queues; i++) { 3395 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3396 netif_tx_wake_queue(txq); 3397 } 3398} 3399 3400static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3401{ 3402 /* Paired with READ_ONCE() from dev_watchdog() */ 3403 WRITE_ONCE(dev_queue->trans_start, jiffies); 3404 3405 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3406 smp_mb__before_atomic(); 3407 3408 /* Must be an atomic op see netif_txq_try_stop() */ 3409 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3410} 3411 3412/** 3413 * netif_stop_queue - stop transmitted packets 3414 * @dev: network device 3415 * 3416 * Stop upper layers calling the device hard_start_xmit routine. 3417 * Used for flow control when transmit resources are unavailable. 3418 */ 3419static inline void netif_stop_queue(struct net_device *dev) 3420{ 3421 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3422} 3423 3424void netif_tx_stop_all_queues(struct net_device *dev); 3425 3426static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3427{ 3428 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3429} 3430 3431/** 3432 * netif_queue_stopped - test if transmit queue is flowblocked 3433 * @dev: network device 3434 * 3435 * Test if transmit queue on device is currently unable to send. 3436 */ 3437static inline bool netif_queue_stopped(const struct net_device *dev) 3438{ 3439 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3440} 3441 3442static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3443{ 3444 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3445} 3446 3447static inline bool 3448netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3449{ 3450 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3451} 3452 3453static inline bool 3454netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3455{ 3456 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3457} 3458 3459/** 3460 * netdev_queue_set_dql_min_limit - set dql minimum limit 3461 * @dev_queue: pointer to transmit queue 3462 * @min_limit: dql minimum limit 3463 * 3464 * Forces xmit_more() to return true until the minimum threshold 3465 * defined by @min_limit is reached (or until the tx queue is 3466 * empty). Warning: to be use with care, misuse will impact the 3467 * latency. 3468 */ 3469static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3470 unsigned int min_limit) 3471{ 3472#ifdef CONFIG_BQL 3473 dev_queue->dql.min_limit = min_limit; 3474#endif 3475} 3476 3477static inline int netdev_queue_dql_avail(const struct netdev_queue *txq) 3478{ 3479#ifdef CONFIG_BQL 3480 /* Non-BQL migrated drivers will return 0, too. */ 3481 return dql_avail(&txq->dql); 3482#else 3483 return 0; 3484#endif 3485} 3486 3487/** 3488 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3489 * @dev_queue: pointer to transmit queue 3490 * 3491 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3492 * to give appropriate hint to the CPU. 3493 */ 3494static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3495{ 3496#ifdef CONFIG_BQL 3497 prefetchw(&dev_queue->dql.num_queued); 3498#endif 3499} 3500 3501/** 3502 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3503 * @dev_queue: pointer to transmit queue 3504 * 3505 * BQL enabled drivers might use this helper in their TX completion path, 3506 * to give appropriate hint to the CPU. 3507 */ 3508static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3509{ 3510#ifdef CONFIG_BQL 3511 prefetchw(&dev_queue->dql.limit); 3512#endif 3513} 3514 3515/** 3516 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue 3517 * @dev_queue: network device queue 3518 * @bytes: number of bytes queued to the device queue 3519 * 3520 * Report the number of bytes queued for sending/completion to the network 3521 * device hardware queue. @bytes should be a good approximation and should 3522 * exactly match netdev_completed_queue() @bytes. 3523 * This is typically called once per packet, from ndo_start_xmit(). 3524 */ 3525static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3526 unsigned int bytes) 3527{ 3528#ifdef CONFIG_BQL 3529 dql_queued(&dev_queue->dql, bytes); 3530 3531 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3532 return; 3533 3534 /* Paired with READ_ONCE() from dev_watchdog() */ 3535 WRITE_ONCE(dev_queue->trans_start, jiffies); 3536 3537 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3538 smp_mb__before_atomic(); 3539 3540 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3541 3542 /* 3543 * The XOFF flag must be set before checking the dql_avail below, 3544 * because in netdev_tx_completed_queue we update the dql_completed 3545 * before checking the XOFF flag. 3546 */ 3547 smp_mb__after_atomic(); 3548 3549 /* check again in case another CPU has just made room avail */ 3550 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3551 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3552#endif 3553} 3554 3555/* Variant of netdev_tx_sent_queue() for drivers that are aware 3556 * that they should not test BQL status themselves. 3557 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3558 * skb of a batch. 3559 * Returns true if the doorbell must be used to kick the NIC. 3560 */ 3561static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3562 unsigned int bytes, 3563 bool xmit_more) 3564{ 3565 if (xmit_more) { 3566#ifdef CONFIG_BQL 3567 dql_queued(&dev_queue->dql, bytes); 3568#endif 3569 return netif_tx_queue_stopped(dev_queue); 3570 } 3571 netdev_tx_sent_queue(dev_queue, bytes); 3572 return true; 3573} 3574 3575/** 3576 * netdev_sent_queue - report the number of bytes queued to hardware 3577 * @dev: network device 3578 * @bytes: number of bytes queued to the hardware device queue 3579 * 3580 * Report the number of bytes queued for sending/completion to the network 3581 * device hardware queue#0. @bytes should be a good approximation and should 3582 * exactly match netdev_completed_queue() @bytes. 3583 * This is typically called once per packet, from ndo_start_xmit(). 3584 */ 3585static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3586{ 3587 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3588} 3589 3590static inline bool __netdev_sent_queue(struct net_device *dev, 3591 unsigned int bytes, 3592 bool xmit_more) 3593{ 3594 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3595 xmit_more); 3596} 3597 3598/** 3599 * netdev_tx_completed_queue - report number of packets/bytes at TX completion. 3600 * @dev_queue: network device queue 3601 * @pkts: number of packets (currently ignored) 3602 * @bytes: number of bytes dequeued from the device queue 3603 * 3604 * Must be called at most once per TX completion round (and not per 3605 * individual packet), so that BQL can adjust its limits appropriately. 3606 */ 3607static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3608 unsigned int pkts, unsigned int bytes) 3609{ 3610#ifdef CONFIG_BQL 3611 if (unlikely(!bytes)) 3612 return; 3613 3614 dql_completed(&dev_queue->dql, bytes); 3615 3616 /* 3617 * Without the memory barrier there is a small possibility that 3618 * netdev_tx_sent_queue will miss the update and cause the queue to 3619 * be stopped forever 3620 */ 3621 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ 3622 3623 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3624 return; 3625 3626 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3627 netif_schedule_queue(dev_queue); 3628#endif 3629} 3630 3631/** 3632 * netdev_completed_queue - report bytes and packets completed by device 3633 * @dev: network device 3634 * @pkts: actual number of packets sent over the medium 3635 * @bytes: actual number of bytes sent over the medium 3636 * 3637 * Report the number of bytes and packets transmitted by the network device 3638 * hardware queue over the physical medium, @bytes must exactly match the 3639 * @bytes amount passed to netdev_sent_queue() 3640 */ 3641static inline void netdev_completed_queue(struct net_device *dev, 3642 unsigned int pkts, unsigned int bytes) 3643{ 3644 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3645} 3646 3647static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3648{ 3649#ifdef CONFIG_BQL 3650 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3651 dql_reset(&q->dql); 3652#endif 3653} 3654 3655/** 3656 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue 3657 * @dev: network device 3658 * @qid: stack index of the queue to reset 3659 */ 3660static inline void netdev_tx_reset_subqueue(const struct net_device *dev, 3661 u32 qid) 3662{ 3663 netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid)); 3664} 3665 3666/** 3667 * netdev_reset_queue - reset the packets and bytes count of a network device 3668 * @dev_queue: network device 3669 * 3670 * Reset the bytes and packet count of a network device and clear the 3671 * software flow control OFF bit for this network device 3672 */ 3673static inline void netdev_reset_queue(struct net_device *dev_queue) 3674{ 3675 netdev_tx_reset_subqueue(dev_queue, 0); 3676} 3677 3678/** 3679 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3680 * @dev: network device 3681 * @queue_index: given tx queue index 3682 * 3683 * Returns 0 if given tx queue index >= number of device tx queues, 3684 * otherwise returns the originally passed tx queue index. 3685 */ 3686static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3687{ 3688 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3689 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3690 dev->name, queue_index, 3691 dev->real_num_tx_queues); 3692 return 0; 3693 } 3694 3695 return queue_index; 3696} 3697 3698/** 3699 * netif_running - test if up 3700 * @dev: network device 3701 * 3702 * Test if the device has been brought up. 3703 */ 3704static inline bool netif_running(const struct net_device *dev) 3705{ 3706 return test_bit(__LINK_STATE_START, &dev->state); 3707} 3708 3709/* 3710 * Routines to manage the subqueues on a device. We only need start, 3711 * stop, and a check if it's stopped. All other device management is 3712 * done at the overall netdevice level. 3713 * Also test the device if we're multiqueue. 3714 */ 3715 3716/** 3717 * netif_start_subqueue - allow sending packets on subqueue 3718 * @dev: network device 3719 * @queue_index: sub queue index 3720 * 3721 * Start individual transmit queue of a device with multiple transmit queues. 3722 */ 3723static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3724{ 3725 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3726 3727 netif_tx_start_queue(txq); 3728} 3729 3730/** 3731 * netif_stop_subqueue - stop sending packets on subqueue 3732 * @dev: network device 3733 * @queue_index: sub queue index 3734 * 3735 * Stop individual transmit queue of a device with multiple transmit queues. 3736 */ 3737static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3738{ 3739 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3740 netif_tx_stop_queue(txq); 3741} 3742 3743/** 3744 * __netif_subqueue_stopped - test status of subqueue 3745 * @dev: network device 3746 * @queue_index: sub queue index 3747 * 3748 * Check individual transmit queue of a device with multiple transmit queues. 3749 */ 3750static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3751 u16 queue_index) 3752{ 3753 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3754 3755 return netif_tx_queue_stopped(txq); 3756} 3757 3758/** 3759 * netif_subqueue_stopped - test status of subqueue 3760 * @dev: network device 3761 * @skb: sub queue buffer pointer 3762 * 3763 * Check individual transmit queue of a device with multiple transmit queues. 3764 */ 3765static inline bool netif_subqueue_stopped(const struct net_device *dev, 3766 struct sk_buff *skb) 3767{ 3768 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3769} 3770 3771/** 3772 * netif_wake_subqueue - allow sending packets on subqueue 3773 * @dev: network device 3774 * @queue_index: sub queue index 3775 * 3776 * Resume individual transmit queue of a device with multiple transmit queues. 3777 */ 3778static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3779{ 3780 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3781 3782 netif_tx_wake_queue(txq); 3783} 3784 3785#ifdef CONFIG_XPS 3786int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3787 u16 index); 3788int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3789 u16 index, enum xps_map_type type); 3790 3791/** 3792 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3793 * @j: CPU/Rx queue index 3794 * @mask: bitmask of all cpus/rx queues 3795 * @nr_bits: number of bits in the bitmask 3796 * 3797 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3798 */ 3799static inline bool netif_attr_test_mask(unsigned long j, 3800 const unsigned long *mask, 3801 unsigned int nr_bits) 3802{ 3803 cpu_max_bits_warn(j, nr_bits); 3804 return test_bit(j, mask); 3805} 3806 3807/** 3808 * netif_attr_test_online - Test for online CPU/Rx queue 3809 * @j: CPU/Rx queue index 3810 * @online_mask: bitmask for CPUs/Rx queues that are online 3811 * @nr_bits: number of bits in the bitmask 3812 * 3813 * Returns true if a CPU/Rx queue is online. 3814 */ 3815static inline bool netif_attr_test_online(unsigned long j, 3816 const unsigned long *online_mask, 3817 unsigned int nr_bits) 3818{ 3819 cpu_max_bits_warn(j, nr_bits); 3820 3821 if (online_mask) 3822 return test_bit(j, online_mask); 3823 3824 return (j < nr_bits); 3825} 3826 3827/** 3828 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3829 * @n: CPU/Rx queue index 3830 * @srcp: the cpumask/Rx queue mask pointer 3831 * @nr_bits: number of bits in the bitmask 3832 * 3833 * Returns >= nr_bits if no further CPUs/Rx queues set. 3834 */ 3835static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3836 unsigned int nr_bits) 3837{ 3838 /* -1 is a legal arg here. */ 3839 if (n != -1) 3840 cpu_max_bits_warn(n, nr_bits); 3841 3842 if (srcp) 3843 return find_next_bit(srcp, nr_bits, n + 1); 3844 3845 return n + 1; 3846} 3847 3848/** 3849 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3850 * @n: CPU/Rx queue index 3851 * @src1p: the first CPUs/Rx queues mask pointer 3852 * @src2p: the second CPUs/Rx queues mask pointer 3853 * @nr_bits: number of bits in the bitmask 3854 * 3855 * Returns >= nr_bits if no further CPUs/Rx queues set in both. 3856 */ 3857static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 3858 const unsigned long *src2p, 3859 unsigned int nr_bits) 3860{ 3861 /* -1 is a legal arg here. */ 3862 if (n != -1) 3863 cpu_max_bits_warn(n, nr_bits); 3864 3865 if (src1p && src2p) 3866 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 3867 else if (src1p) 3868 return find_next_bit(src1p, nr_bits, n + 1); 3869 else if (src2p) 3870 return find_next_bit(src2p, nr_bits, n + 1); 3871 3872 return n + 1; 3873} 3874#else 3875static inline int netif_set_xps_queue(struct net_device *dev, 3876 const struct cpumask *mask, 3877 u16 index) 3878{ 3879 return 0; 3880} 3881 3882static inline int __netif_set_xps_queue(struct net_device *dev, 3883 const unsigned long *mask, 3884 u16 index, enum xps_map_type type) 3885{ 3886 return 0; 3887} 3888#endif 3889 3890/** 3891 * netif_is_multiqueue - test if device has multiple transmit queues 3892 * @dev: network device 3893 * 3894 * Check if device has multiple transmit queues 3895 */ 3896static inline bool netif_is_multiqueue(const struct net_device *dev) 3897{ 3898 return dev->num_tx_queues > 1; 3899} 3900 3901int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 3902 3903#ifdef CONFIG_SYSFS 3904int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3905#else 3906static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3907 unsigned int rxqs) 3908{ 3909 dev->real_num_rx_queues = rxqs; 3910 return 0; 3911} 3912#endif 3913int netif_set_real_num_queues(struct net_device *dev, 3914 unsigned int txq, unsigned int rxq); 3915 3916int netif_get_num_default_rss_queues(void); 3917 3918void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3919void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3920 3921/* 3922 * It is not allowed to call kfree_skb() or consume_skb() from hardware 3923 * interrupt context or with hardware interrupts being disabled. 3924 * (in_hardirq() || irqs_disabled()) 3925 * 3926 * We provide four helpers that can be used in following contexts : 3927 * 3928 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 3929 * replacing kfree_skb(skb) 3930 * 3931 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 3932 * Typically used in place of consume_skb(skb) in TX completion path 3933 * 3934 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3935 * replacing kfree_skb(skb) 3936 * 3937 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3938 * and consumed a packet. Used in place of consume_skb(skb) 3939 */ 3940static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3941{ 3942 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3943} 3944 3945static inline void dev_consume_skb_irq(struct sk_buff *skb) 3946{ 3947 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); 3948} 3949 3950static inline void dev_kfree_skb_any(struct sk_buff *skb) 3951{ 3952 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3953} 3954 3955static inline void dev_consume_skb_any(struct sk_buff *skb) 3956{ 3957 dev_kfree_skb_any_reason(skb, SKB_CONSUMED); 3958} 3959 3960u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 3961 struct bpf_prog *xdp_prog); 3962void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); 3963int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb); 3964int netif_rx(struct sk_buff *skb); 3965int __netif_rx(struct sk_buff *skb); 3966 3967int netif_receive_skb(struct sk_buff *skb); 3968int netif_receive_skb_core(struct sk_buff *skb); 3969void netif_receive_skb_list_internal(struct list_head *head); 3970void netif_receive_skb_list(struct list_head *head); 3971gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3972void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3973struct sk_buff *napi_get_frags(struct napi_struct *napi); 3974void napi_get_frags_check(struct napi_struct *napi); 3975gro_result_t napi_gro_frags(struct napi_struct *napi); 3976 3977static inline void napi_free_frags(struct napi_struct *napi) 3978{ 3979 kfree_skb(napi->skb); 3980 napi->skb = NULL; 3981} 3982 3983bool netdev_is_rx_handler_busy(struct net_device *dev); 3984int netdev_rx_handler_register(struct net_device *dev, 3985 rx_handler_func_t *rx_handler, 3986 void *rx_handler_data); 3987void netdev_rx_handler_unregister(struct net_device *dev); 3988 3989bool dev_valid_name(const char *name); 3990static inline bool is_socket_ioctl_cmd(unsigned int cmd) 3991{ 3992 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 3993} 3994int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 3995int put_user_ifreq(struct ifreq *ifr, void __user *arg); 3996int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 3997 void __user *data, bool *need_copyout); 3998int dev_ifconf(struct net *net, struct ifconf __user *ifc); 3999int generic_hwtstamp_get_lower(struct net_device *dev, 4000 struct kernel_hwtstamp_config *kernel_cfg); 4001int generic_hwtstamp_set_lower(struct net_device *dev, 4002 struct kernel_hwtstamp_config *kernel_cfg, 4003 struct netlink_ext_ack *extack); 4004int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 4005unsigned int dev_get_flags(const struct net_device *); 4006int __dev_change_flags(struct net_device *dev, unsigned int flags, 4007 struct netlink_ext_ack *extack); 4008int dev_change_flags(struct net_device *dev, unsigned int flags, 4009 struct netlink_ext_ack *extack); 4010int dev_set_alias(struct net_device *, const char *, size_t); 4011int dev_get_alias(const struct net_device *, char *, size_t); 4012int __dev_change_net_namespace(struct net_device *dev, struct net *net, 4013 const char *pat, int new_ifindex); 4014static inline 4015int dev_change_net_namespace(struct net_device *dev, struct net *net, 4016 const char *pat) 4017{ 4018 return __dev_change_net_namespace(dev, net, pat, 0); 4019} 4020int __dev_set_mtu(struct net_device *, int); 4021int dev_set_mtu(struct net_device *, int); 4022int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 4023 struct netlink_ext_ack *extack); 4024int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 4025 struct netlink_ext_ack *extack); 4026int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 4027 struct netlink_ext_ack *extack); 4028int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 4029int dev_get_port_parent_id(struct net_device *dev, 4030 struct netdev_phys_item_id *ppid, bool recurse); 4031bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 4032 4033struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 4034struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 4035 struct netdev_queue *txq, int *ret); 4036 4037int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 4038u8 dev_xdp_prog_count(struct net_device *dev); 4039int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); 4040u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 4041 4042u32 dev_get_min_mp_channel_count(const struct net_device *dev); 4043 4044int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4045int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4046int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 4047bool is_skb_forwardable(const struct net_device *dev, 4048 const struct sk_buff *skb); 4049 4050static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 4051 const struct sk_buff *skb, 4052 const bool check_mtu) 4053{ 4054 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 4055 unsigned int len; 4056 4057 if (!(dev->flags & IFF_UP)) 4058 return false; 4059 4060 if (!check_mtu) 4061 return true; 4062 4063 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 4064 if (skb->len <= len) 4065 return true; 4066 4067 /* if TSO is enabled, we don't care about the length as the packet 4068 * could be forwarded without being segmented before 4069 */ 4070 if (skb_is_gso(skb)) 4071 return true; 4072 4073 return false; 4074} 4075 4076void netdev_core_stats_inc(struct net_device *dev, u32 offset); 4077 4078#define DEV_CORE_STATS_INC(FIELD) \ 4079static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 4080{ \ 4081 netdev_core_stats_inc(dev, \ 4082 offsetof(struct net_device_core_stats, FIELD)); \ 4083} 4084DEV_CORE_STATS_INC(rx_dropped) 4085DEV_CORE_STATS_INC(tx_dropped) 4086DEV_CORE_STATS_INC(rx_nohandler) 4087DEV_CORE_STATS_INC(rx_otherhost_dropped) 4088#undef DEV_CORE_STATS_INC 4089 4090static __always_inline int ____dev_forward_skb(struct net_device *dev, 4091 struct sk_buff *skb, 4092 const bool check_mtu) 4093{ 4094 if (skb_orphan_frags(skb, GFP_ATOMIC) || 4095 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 4096 dev_core_stats_rx_dropped_inc(dev); 4097 kfree_skb(skb); 4098 return NET_RX_DROP; 4099 } 4100 4101 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4102 skb->priority = 0; 4103 return 0; 4104} 4105 4106bool dev_nit_active(struct net_device *dev); 4107void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4108 4109static inline void __dev_put(struct net_device *dev) 4110{ 4111 if (dev) { 4112#ifdef CONFIG_PCPU_DEV_REFCNT 4113 this_cpu_dec(*dev->pcpu_refcnt); 4114#else 4115 refcount_dec(&dev->dev_refcnt); 4116#endif 4117 } 4118} 4119 4120static inline void __dev_hold(struct net_device *dev) 4121{ 4122 if (dev) { 4123#ifdef CONFIG_PCPU_DEV_REFCNT 4124 this_cpu_inc(*dev->pcpu_refcnt); 4125#else 4126 refcount_inc(&dev->dev_refcnt); 4127#endif 4128 } 4129} 4130 4131static inline void __netdev_tracker_alloc(struct net_device *dev, 4132 netdevice_tracker *tracker, 4133 gfp_t gfp) 4134{ 4135#ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4136 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 4137#endif 4138} 4139 4140/* netdev_tracker_alloc() can upgrade a prior untracked reference 4141 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 4142 */ 4143static inline void netdev_tracker_alloc(struct net_device *dev, 4144 netdevice_tracker *tracker, gfp_t gfp) 4145{ 4146#ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4147 refcount_dec(&dev->refcnt_tracker.no_tracker); 4148 __netdev_tracker_alloc(dev, tracker, gfp); 4149#endif 4150} 4151 4152static inline void netdev_tracker_free(struct net_device *dev, 4153 netdevice_tracker *tracker) 4154{ 4155#ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4156 ref_tracker_free(&dev->refcnt_tracker, tracker); 4157#endif 4158} 4159 4160static inline void netdev_hold(struct net_device *dev, 4161 netdevice_tracker *tracker, gfp_t gfp) 4162{ 4163 if (dev) { 4164 __dev_hold(dev); 4165 __netdev_tracker_alloc(dev, tracker, gfp); 4166 } 4167} 4168 4169static inline void netdev_put(struct net_device *dev, 4170 netdevice_tracker *tracker) 4171{ 4172 if (dev) { 4173 netdev_tracker_free(dev, tracker); 4174 __dev_put(dev); 4175 } 4176} 4177 4178/** 4179 * dev_hold - get reference to device 4180 * @dev: network device 4181 * 4182 * Hold reference to device to keep it from being freed. 4183 * Try using netdev_hold() instead. 4184 */ 4185static inline void dev_hold(struct net_device *dev) 4186{ 4187 netdev_hold(dev, NULL, GFP_ATOMIC); 4188} 4189 4190/** 4191 * dev_put - release reference to device 4192 * @dev: network device 4193 * 4194 * Release reference to device to allow it to be freed. 4195 * Try using netdev_put() instead. 4196 */ 4197static inline void dev_put(struct net_device *dev) 4198{ 4199 netdev_put(dev, NULL); 4200} 4201 4202DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T)) 4203 4204static inline void netdev_ref_replace(struct net_device *odev, 4205 struct net_device *ndev, 4206 netdevice_tracker *tracker, 4207 gfp_t gfp) 4208{ 4209 if (odev) 4210 netdev_tracker_free(odev, tracker); 4211 4212 __dev_hold(ndev); 4213 __dev_put(odev); 4214 4215 if (ndev) 4216 __netdev_tracker_alloc(ndev, tracker, gfp); 4217} 4218 4219/* Carrier loss detection, dial on demand. The functions netif_carrier_on 4220 * and _off may be called from IRQ context, but it is caller 4221 * who is responsible for serialization of these calls. 4222 * 4223 * The name carrier is inappropriate, these functions should really be 4224 * called netif_lowerlayer_*() because they represent the state of any 4225 * kind of lower layer not just hardware media. 4226 */ 4227void linkwatch_fire_event(struct net_device *dev); 4228 4229/** 4230 * linkwatch_sync_dev - sync linkwatch for the given device 4231 * @dev: network device to sync linkwatch for 4232 * 4233 * Sync linkwatch for the given device, removing it from the 4234 * pending work list (if queued). 4235 */ 4236void linkwatch_sync_dev(struct net_device *dev); 4237 4238/** 4239 * netif_carrier_ok - test if carrier present 4240 * @dev: network device 4241 * 4242 * Check if carrier is present on device 4243 */ 4244static inline bool netif_carrier_ok(const struct net_device *dev) 4245{ 4246 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4247} 4248 4249unsigned long dev_trans_start(struct net_device *dev); 4250 4251void __netdev_watchdog_up(struct net_device *dev); 4252 4253void netif_carrier_on(struct net_device *dev); 4254void netif_carrier_off(struct net_device *dev); 4255void netif_carrier_event(struct net_device *dev); 4256 4257/** 4258 * netif_dormant_on - mark device as dormant. 4259 * @dev: network device 4260 * 4261 * Mark device as dormant (as per RFC2863). 4262 * 4263 * The dormant state indicates that the relevant interface is not 4264 * actually in a condition to pass packets (i.e., it is not 'up') but is 4265 * in a "pending" state, waiting for some external event. For "on- 4266 * demand" interfaces, this new state identifies the situation where the 4267 * interface is waiting for events to place it in the up state. 4268 */ 4269static inline void netif_dormant_on(struct net_device *dev) 4270{ 4271 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4272 linkwatch_fire_event(dev); 4273} 4274 4275/** 4276 * netif_dormant_off - set device as not dormant. 4277 * @dev: network device 4278 * 4279 * Device is not in dormant state. 4280 */ 4281static inline void netif_dormant_off(struct net_device *dev) 4282{ 4283 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4284 linkwatch_fire_event(dev); 4285} 4286 4287/** 4288 * netif_dormant - test if device is dormant 4289 * @dev: network device 4290 * 4291 * Check if device is dormant. 4292 */ 4293static inline bool netif_dormant(const struct net_device *dev) 4294{ 4295 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4296} 4297 4298 4299/** 4300 * netif_testing_on - mark device as under test. 4301 * @dev: network device 4302 * 4303 * Mark device as under test (as per RFC2863). 4304 * 4305 * The testing state indicates that some test(s) must be performed on 4306 * the interface. After completion, of the test, the interface state 4307 * will change to up, dormant, or down, as appropriate. 4308 */ 4309static inline void netif_testing_on(struct net_device *dev) 4310{ 4311 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4312 linkwatch_fire_event(dev); 4313} 4314 4315/** 4316 * netif_testing_off - set device as not under test. 4317 * @dev: network device 4318 * 4319 * Device is not in testing state. 4320 */ 4321static inline void netif_testing_off(struct net_device *dev) 4322{ 4323 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4324 linkwatch_fire_event(dev); 4325} 4326 4327/** 4328 * netif_testing - test if device is under test 4329 * @dev: network device 4330 * 4331 * Check if device is under test 4332 */ 4333static inline bool netif_testing(const struct net_device *dev) 4334{ 4335 return test_bit(__LINK_STATE_TESTING, &dev->state); 4336} 4337 4338 4339/** 4340 * netif_oper_up - test if device is operational 4341 * @dev: network device 4342 * 4343 * Check if carrier is operational 4344 */ 4345static inline bool netif_oper_up(const struct net_device *dev) 4346{ 4347 unsigned int operstate = READ_ONCE(dev->operstate); 4348 4349 return operstate == IF_OPER_UP || 4350 operstate == IF_OPER_UNKNOWN /* backward compat */; 4351} 4352 4353/** 4354 * netif_device_present - is device available or removed 4355 * @dev: network device 4356 * 4357 * Check if device has not been removed from system. 4358 */ 4359static inline bool netif_device_present(const struct net_device *dev) 4360{ 4361 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4362} 4363 4364void netif_device_detach(struct net_device *dev); 4365 4366void netif_device_attach(struct net_device *dev); 4367 4368/* 4369 * Network interface message level settings 4370 */ 4371 4372enum { 4373 NETIF_MSG_DRV_BIT, 4374 NETIF_MSG_PROBE_BIT, 4375 NETIF_MSG_LINK_BIT, 4376 NETIF_MSG_TIMER_BIT, 4377 NETIF_MSG_IFDOWN_BIT, 4378 NETIF_MSG_IFUP_BIT, 4379 NETIF_MSG_RX_ERR_BIT, 4380 NETIF_MSG_TX_ERR_BIT, 4381 NETIF_MSG_TX_QUEUED_BIT, 4382 NETIF_MSG_INTR_BIT, 4383 NETIF_MSG_TX_DONE_BIT, 4384 NETIF_MSG_RX_STATUS_BIT, 4385 NETIF_MSG_PKTDATA_BIT, 4386 NETIF_MSG_HW_BIT, 4387 NETIF_MSG_WOL_BIT, 4388 4389 /* When you add a new bit above, update netif_msg_class_names array 4390 * in net/ethtool/common.c 4391 */ 4392 NETIF_MSG_CLASS_COUNT, 4393}; 4394/* Both ethtool_ops interface and internal driver implementation use u32 */ 4395static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4396 4397#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4398#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4399 4400#define NETIF_MSG_DRV __NETIF_MSG(DRV) 4401#define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4402#define NETIF_MSG_LINK __NETIF_MSG(LINK) 4403#define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4404#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4405#define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4406#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4407#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4408#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4409#define NETIF_MSG_INTR __NETIF_MSG(INTR) 4410#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4411#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4412#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4413#define NETIF_MSG_HW __NETIF_MSG(HW) 4414#define NETIF_MSG_WOL __NETIF_MSG(WOL) 4415 4416#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4417#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4418#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4419#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4420#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4421#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4422#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4423#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4424#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4425#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4426#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4427#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4428#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4429#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4430#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4431 4432static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4433{ 4434 /* use default */ 4435 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4436 return default_msg_enable_bits; 4437 if (debug_value == 0) /* no output */ 4438 return 0; 4439 /* set low N bits */ 4440 return (1U << debug_value) - 1; 4441} 4442 4443static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4444{ 4445 spin_lock(&txq->_xmit_lock); 4446 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4447 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4448} 4449 4450static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4451{ 4452 __acquire(&txq->_xmit_lock); 4453 return true; 4454} 4455 4456static inline void __netif_tx_release(struct netdev_queue *txq) 4457{ 4458 __release(&txq->_xmit_lock); 4459} 4460 4461static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4462{ 4463 spin_lock_bh(&txq->_xmit_lock); 4464 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4465 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4466} 4467 4468static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4469{ 4470 bool ok = spin_trylock(&txq->_xmit_lock); 4471 4472 if (likely(ok)) { 4473 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4474 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4475 } 4476 return ok; 4477} 4478 4479static inline void __netif_tx_unlock(struct netdev_queue *txq) 4480{ 4481 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4482 WRITE_ONCE(txq->xmit_lock_owner, -1); 4483 spin_unlock(&txq->_xmit_lock); 4484} 4485 4486static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4487{ 4488 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4489 WRITE_ONCE(txq->xmit_lock_owner, -1); 4490 spin_unlock_bh(&txq->_xmit_lock); 4491} 4492 4493/* 4494 * txq->trans_start can be read locklessly from dev_watchdog() 4495 */ 4496static inline void txq_trans_update(struct netdev_queue *txq) 4497{ 4498 if (txq->xmit_lock_owner != -1) 4499 WRITE_ONCE(txq->trans_start, jiffies); 4500} 4501 4502static inline void txq_trans_cond_update(struct netdev_queue *txq) 4503{ 4504 unsigned long now = jiffies; 4505 4506 if (READ_ONCE(txq->trans_start) != now) 4507 WRITE_ONCE(txq->trans_start, now); 4508} 4509 4510/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4511static inline void netif_trans_update(struct net_device *dev) 4512{ 4513 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4514 4515 txq_trans_cond_update(txq); 4516} 4517 4518/** 4519 * netif_tx_lock - grab network device transmit lock 4520 * @dev: network device 4521 * 4522 * Get network device transmit lock 4523 */ 4524void netif_tx_lock(struct net_device *dev); 4525 4526static inline void netif_tx_lock_bh(struct net_device *dev) 4527{ 4528 local_bh_disable(); 4529 netif_tx_lock(dev); 4530} 4531 4532void netif_tx_unlock(struct net_device *dev); 4533 4534static inline void netif_tx_unlock_bh(struct net_device *dev) 4535{ 4536 netif_tx_unlock(dev); 4537 local_bh_enable(); 4538} 4539 4540#define HARD_TX_LOCK(dev, txq, cpu) { \ 4541 if (!(dev)->lltx) { \ 4542 __netif_tx_lock(txq, cpu); \ 4543 } else { \ 4544 __netif_tx_acquire(txq); \ 4545 } \ 4546} 4547 4548#define HARD_TX_TRYLOCK(dev, txq) \ 4549 (!(dev)->lltx ? \ 4550 __netif_tx_trylock(txq) : \ 4551 __netif_tx_acquire(txq)) 4552 4553#define HARD_TX_UNLOCK(dev, txq) { \ 4554 if (!(dev)->lltx) { \ 4555 __netif_tx_unlock(txq); \ 4556 } else { \ 4557 __netif_tx_release(txq); \ 4558 } \ 4559} 4560 4561static inline void netif_tx_disable(struct net_device *dev) 4562{ 4563 unsigned int i; 4564 int cpu; 4565 4566 local_bh_disable(); 4567 cpu = smp_processor_id(); 4568 spin_lock(&dev->tx_global_lock); 4569 for (i = 0; i < dev->num_tx_queues; i++) { 4570 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4571 4572 __netif_tx_lock(txq, cpu); 4573 netif_tx_stop_queue(txq); 4574 __netif_tx_unlock(txq); 4575 } 4576 spin_unlock(&dev->tx_global_lock); 4577 local_bh_enable(); 4578} 4579 4580static inline void netif_addr_lock(struct net_device *dev) 4581{ 4582 unsigned char nest_level = 0; 4583 4584#ifdef CONFIG_LOCKDEP 4585 nest_level = dev->nested_level; 4586#endif 4587 spin_lock_nested(&dev->addr_list_lock, nest_level); 4588} 4589 4590static inline void netif_addr_lock_bh(struct net_device *dev) 4591{ 4592 unsigned char nest_level = 0; 4593 4594#ifdef CONFIG_LOCKDEP 4595 nest_level = dev->nested_level; 4596#endif 4597 local_bh_disable(); 4598 spin_lock_nested(&dev->addr_list_lock, nest_level); 4599} 4600 4601static inline void netif_addr_unlock(struct net_device *dev) 4602{ 4603 spin_unlock(&dev->addr_list_lock); 4604} 4605 4606static inline void netif_addr_unlock_bh(struct net_device *dev) 4607{ 4608 spin_unlock_bh(&dev->addr_list_lock); 4609} 4610 4611/* 4612 * dev_addrs walker. Should be used only for read access. Call with 4613 * rcu_read_lock held. 4614 */ 4615#define for_each_dev_addr(dev, ha) \ 4616 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4617 4618/* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4619 4620void ether_setup(struct net_device *dev); 4621 4622/* Allocate dummy net_device */ 4623struct net_device *alloc_netdev_dummy(int sizeof_priv); 4624 4625/* Support for loadable net-drivers */ 4626struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4627 unsigned char name_assign_type, 4628 void (*setup)(struct net_device *), 4629 unsigned int txqs, unsigned int rxqs); 4630#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4631 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4632 4633#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4634 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4635 count) 4636 4637int register_netdev(struct net_device *dev); 4638void unregister_netdev(struct net_device *dev); 4639 4640int devm_register_netdev(struct device *dev, struct net_device *ndev); 4641 4642/* General hardware address lists handling functions */ 4643int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4644 struct netdev_hw_addr_list *from_list, int addr_len); 4645void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4646 struct netdev_hw_addr_list *from_list, int addr_len); 4647int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4648 struct net_device *dev, 4649 int (*sync)(struct net_device *, const unsigned char *), 4650 int (*unsync)(struct net_device *, 4651 const unsigned char *)); 4652int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4653 struct net_device *dev, 4654 int (*sync)(struct net_device *, 4655 const unsigned char *, int), 4656 int (*unsync)(struct net_device *, 4657 const unsigned char *, int)); 4658void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4659 struct net_device *dev, 4660 int (*unsync)(struct net_device *, 4661 const unsigned char *, int)); 4662void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4663 struct net_device *dev, 4664 int (*unsync)(struct net_device *, 4665 const unsigned char *)); 4666void __hw_addr_init(struct netdev_hw_addr_list *list); 4667 4668/* Functions used for device addresses handling */ 4669void dev_addr_mod(struct net_device *dev, unsigned int offset, 4670 const void *addr, size_t len); 4671 4672static inline void 4673__dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4674{ 4675 dev_addr_mod(dev, 0, addr, len); 4676} 4677 4678static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4679{ 4680 __dev_addr_set(dev, addr, dev->addr_len); 4681} 4682 4683int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4684 unsigned char addr_type); 4685int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4686 unsigned char addr_type); 4687 4688/* Functions used for unicast addresses handling */ 4689int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4690int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4691int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4692int dev_uc_sync(struct net_device *to, struct net_device *from); 4693int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4694void dev_uc_unsync(struct net_device *to, struct net_device *from); 4695void dev_uc_flush(struct net_device *dev); 4696void dev_uc_init(struct net_device *dev); 4697 4698/** 4699 * __dev_uc_sync - Synchronize device's unicast list 4700 * @dev: device to sync 4701 * @sync: function to call if address should be added 4702 * @unsync: function to call if address should be removed 4703 * 4704 * Add newly added addresses to the interface, and release 4705 * addresses that have been deleted. 4706 */ 4707static inline int __dev_uc_sync(struct net_device *dev, 4708 int (*sync)(struct net_device *, 4709 const unsigned char *), 4710 int (*unsync)(struct net_device *, 4711 const unsigned char *)) 4712{ 4713 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4714} 4715 4716/** 4717 * __dev_uc_unsync - Remove synchronized addresses from device 4718 * @dev: device to sync 4719 * @unsync: function to call if address should be removed 4720 * 4721 * Remove all addresses that were added to the device by dev_uc_sync(). 4722 */ 4723static inline void __dev_uc_unsync(struct net_device *dev, 4724 int (*unsync)(struct net_device *, 4725 const unsigned char *)) 4726{ 4727 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4728} 4729 4730/* Functions used for multicast addresses handling */ 4731int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4732int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4733int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4734int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4735int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4736int dev_mc_sync(struct net_device *to, struct net_device *from); 4737int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4738void dev_mc_unsync(struct net_device *to, struct net_device *from); 4739void dev_mc_flush(struct net_device *dev); 4740void dev_mc_init(struct net_device *dev); 4741 4742/** 4743 * __dev_mc_sync - Synchronize device's multicast list 4744 * @dev: device to sync 4745 * @sync: function to call if address should be added 4746 * @unsync: function to call if address should be removed 4747 * 4748 * Add newly added addresses to the interface, and release 4749 * addresses that have been deleted. 4750 */ 4751static inline int __dev_mc_sync(struct net_device *dev, 4752 int (*sync)(struct net_device *, 4753 const unsigned char *), 4754 int (*unsync)(struct net_device *, 4755 const unsigned char *)) 4756{ 4757 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4758} 4759 4760/** 4761 * __dev_mc_unsync - Remove synchronized addresses from device 4762 * @dev: device to sync 4763 * @unsync: function to call if address should be removed 4764 * 4765 * Remove all addresses that were added to the device by dev_mc_sync(). 4766 */ 4767static inline void __dev_mc_unsync(struct net_device *dev, 4768 int (*unsync)(struct net_device *, 4769 const unsigned char *)) 4770{ 4771 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4772} 4773 4774/* Functions used for secondary unicast and multicast support */ 4775void dev_set_rx_mode(struct net_device *dev); 4776int dev_set_promiscuity(struct net_device *dev, int inc); 4777int dev_set_allmulti(struct net_device *dev, int inc); 4778void netdev_state_change(struct net_device *dev); 4779void __netdev_notify_peers(struct net_device *dev); 4780void netdev_notify_peers(struct net_device *dev); 4781void netdev_features_change(struct net_device *dev); 4782/* Load a device via the kmod */ 4783void dev_load(struct net *net, const char *name); 4784struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4785 struct rtnl_link_stats64 *storage); 4786void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4787 const struct net_device_stats *netdev_stats); 4788void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4789 const struct pcpu_sw_netstats __percpu *netstats); 4790void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4791 4792enum { 4793 NESTED_SYNC_IMM_BIT, 4794 NESTED_SYNC_TODO_BIT, 4795}; 4796 4797#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4798#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4799 4800#define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4801#define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4802 4803struct netdev_nested_priv { 4804 unsigned char flags; 4805 void *data; 4806}; 4807 4808bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4809struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4810 struct list_head **iter); 4811 4812/* iterate through upper list, must be called under RCU read lock */ 4813#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4814 for (iter = &(dev)->adj_list.upper, \ 4815 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4816 updev; \ 4817 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4818 4819int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4820 int (*fn)(struct net_device *upper_dev, 4821 struct netdev_nested_priv *priv), 4822 struct netdev_nested_priv *priv); 4823 4824bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4825 struct net_device *upper_dev); 4826 4827bool netdev_has_any_upper_dev(struct net_device *dev); 4828 4829void *netdev_lower_get_next_private(struct net_device *dev, 4830 struct list_head **iter); 4831void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4832 struct list_head **iter); 4833 4834#define netdev_for_each_lower_private(dev, priv, iter) \ 4835 for (iter = (dev)->adj_list.lower.next, \ 4836 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4837 priv; \ 4838 priv = netdev_lower_get_next_private(dev, &(iter))) 4839 4840#define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4841 for (iter = &(dev)->adj_list.lower, \ 4842 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4843 priv; \ 4844 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4845 4846void *netdev_lower_get_next(struct net_device *dev, 4847 struct list_head **iter); 4848 4849#define netdev_for_each_lower_dev(dev, ldev, iter) \ 4850 for (iter = (dev)->adj_list.lower.next, \ 4851 ldev = netdev_lower_get_next(dev, &(iter)); \ 4852 ldev; \ 4853 ldev = netdev_lower_get_next(dev, &(iter))) 4854 4855struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 4856 struct list_head **iter); 4857int netdev_walk_all_lower_dev(struct net_device *dev, 4858 int (*fn)(struct net_device *lower_dev, 4859 struct netdev_nested_priv *priv), 4860 struct netdev_nested_priv *priv); 4861int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 4862 int (*fn)(struct net_device *lower_dev, 4863 struct netdev_nested_priv *priv), 4864 struct netdev_nested_priv *priv); 4865 4866void *netdev_adjacent_get_private(struct list_head *adj_list); 4867void *netdev_lower_get_first_private_rcu(struct net_device *dev); 4868struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 4869struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 4870int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 4871 struct netlink_ext_ack *extack); 4872int netdev_master_upper_dev_link(struct net_device *dev, 4873 struct net_device *upper_dev, 4874 void *upper_priv, void *upper_info, 4875 struct netlink_ext_ack *extack); 4876void netdev_upper_dev_unlink(struct net_device *dev, 4877 struct net_device *upper_dev); 4878int netdev_adjacent_change_prepare(struct net_device *old_dev, 4879 struct net_device *new_dev, 4880 struct net_device *dev, 4881 struct netlink_ext_ack *extack); 4882void netdev_adjacent_change_commit(struct net_device *old_dev, 4883 struct net_device *new_dev, 4884 struct net_device *dev); 4885void netdev_adjacent_change_abort(struct net_device *old_dev, 4886 struct net_device *new_dev, 4887 struct net_device *dev); 4888void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4889void *netdev_lower_dev_get_private(struct net_device *dev, 4890 struct net_device *lower_dev); 4891void netdev_lower_state_changed(struct net_device *lower_dev, 4892 void *lower_state_info); 4893 4894/* RSS keys are 40 or 52 bytes long */ 4895#define NETDEV_RSS_KEY_LEN 52 4896extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4897void netdev_rss_key_fill(void *buffer, size_t len); 4898 4899int skb_checksum_help(struct sk_buff *skb); 4900int skb_crc32c_csum_help(struct sk_buff *skb); 4901int skb_csum_hwoffload_help(struct sk_buff *skb, 4902 const netdev_features_t features); 4903 4904struct netdev_bonding_info { 4905 ifslave slave; 4906 ifbond master; 4907}; 4908 4909struct netdev_notifier_bonding_info { 4910 struct netdev_notifier_info info; /* must be first */ 4911 struct netdev_bonding_info bonding_info; 4912}; 4913 4914void netdev_bonding_info_change(struct net_device *dev, 4915 struct netdev_bonding_info *bonding_info); 4916 4917#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 4918void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 4919#else 4920static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 4921 const void *data) 4922{ 4923} 4924#endif 4925 4926__be16 skb_network_protocol(struct sk_buff *skb, int *depth); 4927 4928static inline bool can_checksum_protocol(netdev_features_t features, 4929 __be16 protocol) 4930{ 4931 if (protocol == htons(ETH_P_FCOE)) 4932 return !!(features & NETIF_F_FCOE_CRC); 4933 4934 /* Assume this is an IP checksum (not SCTP CRC) */ 4935 4936 if (features & NETIF_F_HW_CSUM) { 4937 /* Can checksum everything */ 4938 return true; 4939 } 4940 4941 switch (protocol) { 4942 case htons(ETH_P_IP): 4943 return !!(features & NETIF_F_IP_CSUM); 4944 case htons(ETH_P_IPV6): 4945 return !!(features & NETIF_F_IPV6_CSUM); 4946 default: 4947 return false; 4948 } 4949} 4950 4951#ifdef CONFIG_BUG 4952void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 4953#else 4954static inline void netdev_rx_csum_fault(struct net_device *dev, 4955 struct sk_buff *skb) 4956{ 4957} 4958#endif 4959/* rx skb timestamps */ 4960void net_enable_timestamp(void); 4961void net_disable_timestamp(void); 4962 4963static inline ktime_t netdev_get_tstamp(struct net_device *dev, 4964 const struct skb_shared_hwtstamps *hwtstamps, 4965 bool cycles) 4966{ 4967 const struct net_device_ops *ops = dev->netdev_ops; 4968 4969 if (ops->ndo_get_tstamp) 4970 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); 4971 4972 return hwtstamps->hwtstamp; 4973} 4974 4975#ifndef CONFIG_PREEMPT_RT 4976static inline void netdev_xmit_set_more(bool more) 4977{ 4978 __this_cpu_write(softnet_data.xmit.more, more); 4979} 4980 4981static inline bool netdev_xmit_more(void) 4982{ 4983 return __this_cpu_read(softnet_data.xmit.more); 4984} 4985#else 4986static inline void netdev_xmit_set_more(bool more) 4987{ 4988 current->net_xmit.more = more; 4989} 4990 4991static inline bool netdev_xmit_more(void) 4992{ 4993 return current->net_xmit.more; 4994} 4995#endif 4996 4997static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4998 struct sk_buff *skb, struct net_device *dev, 4999 bool more) 5000{ 5001 netdev_xmit_set_more(more); 5002 return ops->ndo_start_xmit(skb, dev); 5003} 5004 5005static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 5006 struct netdev_queue *txq, bool more) 5007{ 5008 const struct net_device_ops *ops = dev->netdev_ops; 5009 netdev_tx_t rc; 5010 5011 rc = __netdev_start_xmit(ops, skb, dev, more); 5012 if (rc == NETDEV_TX_OK) 5013 txq_trans_update(txq); 5014 5015 return rc; 5016} 5017 5018int netdev_class_create_file_ns(const struct class_attribute *class_attr, 5019 const void *ns); 5020void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 5021 const void *ns); 5022 5023extern const struct kobj_ns_type_operations net_ns_type_operations; 5024 5025const char *netdev_drivername(const struct net_device *dev); 5026 5027static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 5028 netdev_features_t f2) 5029{ 5030 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 5031 if (f1 & NETIF_F_HW_CSUM) 5032 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5033 else 5034 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5035 } 5036 5037 return f1 & f2; 5038} 5039 5040static inline netdev_features_t netdev_get_wanted_features( 5041 struct net_device *dev) 5042{ 5043 return (dev->features & ~dev->hw_features) | dev->wanted_features; 5044} 5045netdev_features_t netdev_increment_features(netdev_features_t all, 5046 netdev_features_t one, netdev_features_t mask); 5047 5048/* Allow TSO being used on stacked device : 5049 * Performing the GSO segmentation before last device 5050 * is a performance improvement. 5051 */ 5052static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 5053 netdev_features_t mask) 5054{ 5055 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 5056} 5057 5058int __netdev_update_features(struct net_device *dev); 5059void netdev_update_features(struct net_device *dev); 5060void netdev_change_features(struct net_device *dev); 5061 5062void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5063 struct net_device *dev); 5064 5065netdev_features_t passthru_features_check(struct sk_buff *skb, 5066 struct net_device *dev, 5067 netdev_features_t features); 5068netdev_features_t netif_skb_features(struct sk_buff *skb); 5069void skb_warn_bad_offload(const struct sk_buff *skb); 5070 5071static inline bool net_gso_ok(netdev_features_t features, int gso_type) 5072{ 5073 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 5074 5075 /* check flags correspondence */ 5076 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 5077 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 5078 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 5079 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 5080 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 5081 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 5082 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 5083 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 5084 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 5085 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 5086 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 5087 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 5088 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 5089 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 5090 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 5091 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 5092 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 5093 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 5094 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 5095 5096 return (features & feature) == feature; 5097} 5098 5099static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 5100{ 5101 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 5102 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 5103} 5104 5105static inline bool netif_needs_gso(struct sk_buff *skb, 5106 netdev_features_t features) 5107{ 5108 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 5109 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 5110 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 5111} 5112 5113void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 5114void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 5115void netif_inherit_tso_max(struct net_device *to, 5116 const struct net_device *from); 5117 5118static inline unsigned int 5119netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb) 5120{ 5121 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ 5122 return skb->protocol == htons(ETH_P_IPV6) ? 5123 READ_ONCE(dev->gro_max_size) : 5124 READ_ONCE(dev->gro_ipv4_max_size); 5125} 5126 5127static inline unsigned int 5128netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb) 5129{ 5130 /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 5131 return skb->protocol == htons(ETH_P_IPV6) ? 5132 READ_ONCE(dev->gso_max_size) : 5133 READ_ONCE(dev->gso_ipv4_max_size); 5134} 5135 5136static inline bool netif_is_macsec(const struct net_device *dev) 5137{ 5138 return dev->priv_flags & IFF_MACSEC; 5139} 5140 5141static inline bool netif_is_macvlan(const struct net_device *dev) 5142{ 5143 return dev->priv_flags & IFF_MACVLAN; 5144} 5145 5146static inline bool netif_is_macvlan_port(const struct net_device *dev) 5147{ 5148 return dev->priv_flags & IFF_MACVLAN_PORT; 5149} 5150 5151static inline bool netif_is_bond_master(const struct net_device *dev) 5152{ 5153 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 5154} 5155 5156static inline bool netif_is_bond_slave(const struct net_device *dev) 5157{ 5158 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 5159} 5160 5161static inline bool netif_supports_nofcs(struct net_device *dev) 5162{ 5163 return dev->priv_flags & IFF_SUPP_NOFCS; 5164} 5165 5166static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 5167{ 5168 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 5169} 5170 5171static inline bool netif_is_l3_master(const struct net_device *dev) 5172{ 5173 return dev->priv_flags & IFF_L3MDEV_MASTER; 5174} 5175 5176static inline bool netif_is_l3_slave(const struct net_device *dev) 5177{ 5178 return dev->priv_flags & IFF_L3MDEV_SLAVE; 5179} 5180 5181static inline int dev_sdif(const struct net_device *dev) 5182{ 5183#ifdef CONFIG_NET_L3_MASTER_DEV 5184 if (netif_is_l3_slave(dev)) 5185 return dev->ifindex; 5186#endif 5187 return 0; 5188} 5189 5190static inline bool netif_is_bridge_master(const struct net_device *dev) 5191{ 5192 return dev->priv_flags & IFF_EBRIDGE; 5193} 5194 5195static inline bool netif_is_bridge_port(const struct net_device *dev) 5196{ 5197 return dev->priv_flags & IFF_BRIDGE_PORT; 5198} 5199 5200static inline bool netif_is_ovs_master(const struct net_device *dev) 5201{ 5202 return dev->priv_flags & IFF_OPENVSWITCH; 5203} 5204 5205static inline bool netif_is_ovs_port(const struct net_device *dev) 5206{ 5207 return dev->priv_flags & IFF_OVS_DATAPATH; 5208} 5209 5210static inline bool netif_is_any_bridge_master(const struct net_device *dev) 5211{ 5212 return netif_is_bridge_master(dev) || netif_is_ovs_master(dev); 5213} 5214 5215static inline bool netif_is_any_bridge_port(const struct net_device *dev) 5216{ 5217 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 5218} 5219 5220static inline bool netif_is_team_master(const struct net_device *dev) 5221{ 5222 return dev->priv_flags & IFF_TEAM; 5223} 5224 5225static inline bool netif_is_team_port(const struct net_device *dev) 5226{ 5227 return dev->priv_flags & IFF_TEAM_PORT; 5228} 5229 5230static inline bool netif_is_lag_master(const struct net_device *dev) 5231{ 5232 return netif_is_bond_master(dev) || netif_is_team_master(dev); 5233} 5234 5235static inline bool netif_is_lag_port(const struct net_device *dev) 5236{ 5237 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5238} 5239 5240static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5241{ 5242 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5243} 5244 5245static inline bool netif_is_failover(const struct net_device *dev) 5246{ 5247 return dev->priv_flags & IFF_FAILOVER; 5248} 5249 5250static inline bool netif_is_failover_slave(const struct net_device *dev) 5251{ 5252 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5253} 5254 5255/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5256static inline void netif_keep_dst(struct net_device *dev) 5257{ 5258 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5259} 5260 5261/* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5262static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5263{ 5264 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5265 return netif_is_macsec(dev); 5266} 5267 5268extern struct pernet_operations __net_initdata loopback_net_ops; 5269 5270/* Logging, debugging and troubleshooting/diagnostic helpers. */ 5271 5272/* netdev_printk helpers, similar to dev_printk */ 5273 5274static inline const char *netdev_name(const struct net_device *dev) 5275{ 5276 if (!dev->name[0] || strchr(dev->name, '%')) 5277 return "(unnamed net_device)"; 5278 return dev->name; 5279} 5280 5281static inline const char *netdev_reg_state(const struct net_device *dev) 5282{ 5283 u8 reg_state = READ_ONCE(dev->reg_state); 5284 5285 switch (reg_state) { 5286 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5287 case NETREG_REGISTERED: return ""; 5288 case NETREG_UNREGISTERING: return " (unregistering)"; 5289 case NETREG_UNREGISTERED: return " (unregistered)"; 5290 case NETREG_RELEASED: return " (released)"; 5291 case NETREG_DUMMY: return " (dummy)"; 5292 } 5293 5294 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state); 5295 return " (unknown)"; 5296} 5297 5298#define MODULE_ALIAS_NETDEV(device) \ 5299 MODULE_ALIAS("netdev-" device) 5300 5301/* 5302 * netdev_WARN() acts like dev_printk(), but with the key difference 5303 * of using a WARN/WARN_ON to get the message out, including the 5304 * file/line information and a backtrace. 5305 */ 5306#define netdev_WARN(dev, format, args...) \ 5307 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5308 netdev_reg_state(dev), ##args) 5309 5310#define netdev_WARN_ONCE(dev, format, args...) \ 5311 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5312 netdev_reg_state(dev), ##args) 5313 5314/* 5315 * The list of packet types we will receive (as opposed to discard) 5316 * and the routines to invoke. 5317 * 5318 * Why 16. Because with 16 the only overlap we get on a hash of the 5319 * low nibble of the protocol value is RARP/SNAP/X.25. 5320 * 5321 * 0800 IP 5322 * 0001 802.3 5323 * 0002 AX.25 5324 * 0004 802.2 5325 * 8035 RARP 5326 * 0005 SNAP 5327 * 0805 X.25 5328 * 0806 ARP 5329 * 8137 IPX 5330 * 0009 Localtalk 5331 * 86DD IPv6 5332 */ 5333#define PTYPE_HASH_SIZE (16) 5334#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5335 5336extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5337 5338extern struct net_device *blackhole_netdev; 5339 5340/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ 5341#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) 5342#define DEV_STATS_ADD(DEV, FIELD, VAL) \ 5343 atomic_long_add((VAL), &(DEV)->stats.__##FIELD) 5344#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) 5345 5346#endif /* _LINUX_NETDEVICE_H */