at v5.11 164 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 15 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 16 * Bjorn Ekwall. <bj0rn@blox.se> 17 * Pekka Riikonen <priikone@poseidon.pspt.fi> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21#ifndef _LINUX_NETDEVICE_H 22#define _LINUX_NETDEVICE_H 23 24#include <linux/timer.h> 25#include <linux/bug.h> 26#include <linux/delay.h> 27#include <linux/atomic.h> 28#include <linux/prefetch.h> 29#include <asm/cache.h> 30#include <asm/byteorder.h> 31 32#include <linux/percpu.h> 33#include <linux/rculist.h> 34#include <linux/workqueue.h> 35#include <linux/dynamic_queue_limits.h> 36 37#include <net/net_namespace.h> 38#ifdef CONFIG_DCB 39#include <net/dcbnl.h> 40#endif 41#include <net/netprio_cgroup.h> 42#include <net/xdp.h> 43 44#include <linux/netdev_features.h> 45#include <linux/neighbour.h> 46#include <uapi/linux/netdevice.h> 47#include <uapi/linux/if_bonding.h> 48#include <uapi/linux/pkt_cls.h> 49#include <linux/hashtable.h> 50 51struct netpoll_info; 52struct device; 53struct ethtool_ops; 54struct phy_device; 55struct dsa_port; 56struct ip_tunnel_parm; 57struct macsec_context; 58struct macsec_ops; 59 60struct sfp_bus; 61/* 802.11 specific */ 62struct wireless_dev; 63/* 802.15.4 specific */ 64struct wpan_dev; 65struct mpls_dev; 66/* UDP Tunnel offloads */ 67struct udp_tunnel_info; 68struct udp_tunnel_nic_info; 69struct udp_tunnel_nic; 70struct bpf_prog; 71struct xdp_buff; 72 73void synchronize_net(void); 74void netdev_set_default_ethtool_ops(struct net_device *dev, 75 const struct ethtool_ops *ops); 76 77/* Backlog congestion levels */ 78#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 79#define NET_RX_DROP 1 /* packet dropped */ 80 81#define MAX_NEST_DEV 8 82 83/* 84 * Transmit return codes: transmit return codes originate from three different 85 * namespaces: 86 * 87 * - qdisc return codes 88 * - driver transmit return codes 89 * - errno values 90 * 91 * Drivers are allowed to return any one of those in their hard_start_xmit() 92 * function. Real network devices commonly used with qdiscs should only return 93 * the driver transmit return codes though - when qdiscs are used, the actual 94 * transmission happens asynchronously, so the value is not propagated to 95 * higher layers. Virtual network devices transmit synchronously; in this case 96 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 97 * others are propagated to higher layers. 98 */ 99 100/* qdisc ->enqueue() return codes. */ 101#define NET_XMIT_SUCCESS 0x00 102#define NET_XMIT_DROP 0x01 /* skb dropped */ 103#define NET_XMIT_CN 0x02 /* congestion notification */ 104#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 105 106/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 107 * indicates that the device will soon be dropping packets, or already drops 108 * some packets of the same priority; prompting us to send less aggressively. */ 109#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 110#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 111 112/* Driver transmit return codes */ 113#define NETDEV_TX_MASK 0xf0 114 115enum netdev_tx { 116 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 117 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 118 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 119}; 120typedef enum netdev_tx netdev_tx_t; 121 122/* 123 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 124 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 125 */ 126static inline bool dev_xmit_complete(int rc) 127{ 128 /* 129 * Positive cases with an skb consumed by a driver: 130 * - successful transmission (rc == NETDEV_TX_OK) 131 * - error while transmitting (rc < 0) 132 * - error while queueing to a different device (rc & NET_XMIT_MASK) 133 */ 134 if (likely(rc < NET_XMIT_MASK)) 135 return true; 136 137 return false; 138} 139 140/* 141 * Compute the worst-case header length according to the protocols 142 * used. 143 */ 144 145#if defined(CONFIG_HYPERV_NET) 146# define LL_MAX_HEADER 128 147#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 148# if defined(CONFIG_MAC80211_MESH) 149# define LL_MAX_HEADER 128 150# else 151# define LL_MAX_HEADER 96 152# endif 153#else 154# define LL_MAX_HEADER 32 155#endif 156 157#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 158 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 159#define MAX_HEADER LL_MAX_HEADER 160#else 161#define MAX_HEADER (LL_MAX_HEADER + 48) 162#endif 163 164/* 165 * Old network device statistics. Fields are native words 166 * (unsigned long) so they can be read and written atomically. 167 */ 168 169struct net_device_stats { 170 unsigned long rx_packets; 171 unsigned long tx_packets; 172 unsigned long rx_bytes; 173 unsigned long tx_bytes; 174 unsigned long rx_errors; 175 unsigned long tx_errors; 176 unsigned long rx_dropped; 177 unsigned long tx_dropped; 178 unsigned long multicast; 179 unsigned long collisions; 180 unsigned long rx_length_errors; 181 unsigned long rx_over_errors; 182 unsigned long rx_crc_errors; 183 unsigned long rx_frame_errors; 184 unsigned long rx_fifo_errors; 185 unsigned long rx_missed_errors; 186 unsigned long tx_aborted_errors; 187 unsigned long tx_carrier_errors; 188 unsigned long tx_fifo_errors; 189 unsigned long tx_heartbeat_errors; 190 unsigned long tx_window_errors; 191 unsigned long rx_compressed; 192 unsigned long tx_compressed; 193}; 194 195 196#include <linux/cache.h> 197#include <linux/skbuff.h> 198 199#ifdef CONFIG_RPS 200#include <linux/static_key.h> 201extern struct static_key_false rps_needed; 202extern struct static_key_false rfs_needed; 203#endif 204 205struct neighbour; 206struct neigh_parms; 207struct sk_buff; 208 209struct netdev_hw_addr { 210 struct list_head list; 211 unsigned char addr[MAX_ADDR_LEN]; 212 unsigned char type; 213#define NETDEV_HW_ADDR_T_LAN 1 214#define NETDEV_HW_ADDR_T_SAN 2 215#define NETDEV_HW_ADDR_T_UNICAST 3 216#define NETDEV_HW_ADDR_T_MULTICAST 4 217 bool global_use; 218 int sync_cnt; 219 int refcount; 220 int synced; 221 struct rcu_head rcu_head; 222}; 223 224struct netdev_hw_addr_list { 225 struct list_head list; 226 int count; 227}; 228 229#define netdev_hw_addr_list_count(l) ((l)->count) 230#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 231#define netdev_hw_addr_list_for_each(ha, l) \ 232 list_for_each_entry(ha, &(l)->list, list) 233 234#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 235#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 236#define netdev_for_each_uc_addr(ha, dev) \ 237 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 238 239#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 240#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 241#define netdev_for_each_mc_addr(ha, dev) \ 242 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 243 244struct hh_cache { 245 unsigned int hh_len; 246 seqlock_t hh_lock; 247 248 /* cached hardware header; allow for machine alignment needs. */ 249#define HH_DATA_MOD 16 250#define HH_DATA_OFF(__len) \ 251 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 252#define HH_DATA_ALIGN(__len) \ 253 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 254 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 255}; 256 257/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 258 * Alternative is: 259 * dev->hard_header_len ? (dev->hard_header_len + 260 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 261 * 262 * We could use other alignment values, but we must maintain the 263 * relationship HH alignment <= LL alignment. 264 */ 265#define LL_RESERVED_SPACE(dev) \ 266 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 267#define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 268 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 269 270struct header_ops { 271 int (*create) (struct sk_buff *skb, struct net_device *dev, 272 unsigned short type, const void *daddr, 273 const void *saddr, unsigned int len); 274 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 275 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 276 void (*cache_update)(struct hh_cache *hh, 277 const struct net_device *dev, 278 const unsigned char *haddr); 279 bool (*validate)(const char *ll_header, unsigned int len); 280 __be16 (*parse_protocol)(const struct sk_buff *skb); 281}; 282 283/* These flag bits are private to the generic network queueing 284 * layer; they may not be explicitly referenced by any other 285 * code. 286 */ 287 288enum netdev_state_t { 289 __LINK_STATE_START, 290 __LINK_STATE_PRESENT, 291 __LINK_STATE_NOCARRIER, 292 __LINK_STATE_LINKWATCH_PENDING, 293 __LINK_STATE_DORMANT, 294 __LINK_STATE_TESTING, 295}; 296 297 298/* 299 * This structure holds boot-time configured netdevice settings. They 300 * are then used in the device probing. 301 */ 302struct netdev_boot_setup { 303 char name[IFNAMSIZ]; 304 struct ifmap map; 305}; 306#define NETDEV_BOOT_SETUP_MAX 8 307 308int __init netdev_boot_setup(char *str); 309 310struct gro_list { 311 struct list_head list; 312 int count; 313}; 314 315/* 316 * size of gro hash buckets, must less than bit number of 317 * napi_struct::gro_bitmask 318 */ 319#define GRO_HASH_BUCKETS 8 320 321/* 322 * Structure for NAPI scheduling similar to tasklet but with weighting 323 */ 324struct napi_struct { 325 /* The poll_list must only be managed by the entity which 326 * changes the state of the NAPI_STATE_SCHED bit. This means 327 * whoever atomically sets that bit can add this napi_struct 328 * to the per-CPU poll_list, and whoever clears that bit 329 * can remove from the list right before clearing the bit. 330 */ 331 struct list_head poll_list; 332 333 unsigned long state; 334 int weight; 335 int defer_hard_irqs_count; 336 unsigned long gro_bitmask; 337 int (*poll)(struct napi_struct *, int); 338#ifdef CONFIG_NETPOLL 339 int poll_owner; 340#endif 341 struct net_device *dev; 342 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 343 struct sk_buff *skb; 344 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 345 int rx_count; /* length of rx_list */ 346 struct hrtimer timer; 347 struct list_head dev_list; 348 struct hlist_node napi_hash_node; 349 unsigned int napi_id; 350}; 351 352enum { 353 NAPI_STATE_SCHED, /* Poll is scheduled */ 354 NAPI_STATE_MISSED, /* reschedule a napi */ 355 NAPI_STATE_DISABLE, /* Disable pending */ 356 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 357 NAPI_STATE_LISTED, /* NAPI added to system lists */ 358 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 359 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 360 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 361}; 362 363enum { 364 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 365 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 366 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 367 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 368 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 369 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 370 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 371 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 372}; 373 374enum gro_result { 375 GRO_MERGED, 376 GRO_MERGED_FREE, 377 GRO_HELD, 378 GRO_NORMAL, 379 GRO_DROP, 380 GRO_CONSUMED, 381}; 382typedef enum gro_result gro_result_t; 383 384/* 385 * enum rx_handler_result - Possible return values for rx_handlers. 386 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 387 * further. 388 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 389 * case skb->dev was changed by rx_handler. 390 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 391 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 392 * 393 * rx_handlers are functions called from inside __netif_receive_skb(), to do 394 * special processing of the skb, prior to delivery to protocol handlers. 395 * 396 * Currently, a net_device can only have a single rx_handler registered. Trying 397 * to register a second rx_handler will return -EBUSY. 398 * 399 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 400 * To unregister a rx_handler on a net_device, use 401 * netdev_rx_handler_unregister(). 402 * 403 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 404 * do with the skb. 405 * 406 * If the rx_handler consumed the skb in some way, it should return 407 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 408 * the skb to be delivered in some other way. 409 * 410 * If the rx_handler changed skb->dev, to divert the skb to another 411 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 412 * new device will be called if it exists. 413 * 414 * If the rx_handler decides the skb should be ignored, it should return 415 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 416 * are registered on exact device (ptype->dev == skb->dev). 417 * 418 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 419 * delivered, it should return RX_HANDLER_PASS. 420 * 421 * A device without a registered rx_handler will behave as if rx_handler 422 * returned RX_HANDLER_PASS. 423 */ 424 425enum rx_handler_result { 426 RX_HANDLER_CONSUMED, 427 RX_HANDLER_ANOTHER, 428 RX_HANDLER_EXACT, 429 RX_HANDLER_PASS, 430}; 431typedef enum rx_handler_result rx_handler_result_t; 432typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 433 434void __napi_schedule(struct napi_struct *n); 435void __napi_schedule_irqoff(struct napi_struct *n); 436 437static inline bool napi_disable_pending(struct napi_struct *n) 438{ 439 return test_bit(NAPI_STATE_DISABLE, &n->state); 440} 441 442static inline bool napi_prefer_busy_poll(struct napi_struct *n) 443{ 444 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 445} 446 447bool napi_schedule_prep(struct napi_struct *n); 448 449/** 450 * napi_schedule - schedule NAPI poll 451 * @n: NAPI context 452 * 453 * Schedule NAPI poll routine to be called if it is not already 454 * running. 455 */ 456static inline void napi_schedule(struct napi_struct *n) 457{ 458 if (napi_schedule_prep(n)) 459 __napi_schedule(n); 460} 461 462/** 463 * napi_schedule_irqoff - schedule NAPI poll 464 * @n: NAPI context 465 * 466 * Variant of napi_schedule(), assuming hard irqs are masked. 467 */ 468static inline void napi_schedule_irqoff(struct napi_struct *n) 469{ 470 if (napi_schedule_prep(n)) 471 __napi_schedule_irqoff(n); 472} 473 474/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 475static inline bool napi_reschedule(struct napi_struct *napi) 476{ 477 if (napi_schedule_prep(napi)) { 478 __napi_schedule(napi); 479 return true; 480 } 481 return false; 482} 483 484bool napi_complete_done(struct napi_struct *n, int work_done); 485/** 486 * napi_complete - NAPI processing complete 487 * @n: NAPI context 488 * 489 * Mark NAPI processing as complete. 490 * Consider using napi_complete_done() instead. 491 * Return false if device should avoid rearming interrupts. 492 */ 493static inline bool napi_complete(struct napi_struct *n) 494{ 495 return napi_complete_done(n, 0); 496} 497 498/** 499 * napi_disable - prevent NAPI from scheduling 500 * @n: NAPI context 501 * 502 * Stop NAPI from being scheduled on this context. 503 * Waits till any outstanding processing completes. 504 */ 505void napi_disable(struct napi_struct *n); 506 507/** 508 * napi_enable - enable NAPI scheduling 509 * @n: NAPI context 510 * 511 * Resume NAPI from being scheduled on this context. 512 * Must be paired with napi_disable. 513 */ 514static inline void napi_enable(struct napi_struct *n) 515{ 516 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 517 smp_mb__before_atomic(); 518 clear_bit(NAPI_STATE_SCHED, &n->state); 519 clear_bit(NAPI_STATE_NPSVC, &n->state); 520} 521 522/** 523 * napi_synchronize - wait until NAPI is not running 524 * @n: NAPI context 525 * 526 * Wait until NAPI is done being scheduled on this context. 527 * Waits till any outstanding processing completes but 528 * does not disable future activations. 529 */ 530static inline void napi_synchronize(const struct napi_struct *n) 531{ 532 if (IS_ENABLED(CONFIG_SMP)) 533 while (test_bit(NAPI_STATE_SCHED, &n->state)) 534 msleep(1); 535 else 536 barrier(); 537} 538 539/** 540 * napi_if_scheduled_mark_missed - if napi is running, set the 541 * NAPIF_STATE_MISSED 542 * @n: NAPI context 543 * 544 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 545 * NAPI is scheduled. 546 **/ 547static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 548{ 549 unsigned long val, new; 550 551 do { 552 val = READ_ONCE(n->state); 553 if (val & NAPIF_STATE_DISABLE) 554 return true; 555 556 if (!(val & NAPIF_STATE_SCHED)) 557 return false; 558 559 new = val | NAPIF_STATE_MISSED; 560 } while (cmpxchg(&n->state, val, new) != val); 561 562 return true; 563} 564 565enum netdev_queue_state_t { 566 __QUEUE_STATE_DRV_XOFF, 567 __QUEUE_STATE_STACK_XOFF, 568 __QUEUE_STATE_FROZEN, 569}; 570 571#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 572#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 573#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 574 575#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 576#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 577 QUEUE_STATE_FROZEN) 578#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 579 QUEUE_STATE_FROZEN) 580 581/* 582 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 583 * netif_tx_* functions below are used to manipulate this flag. The 584 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 585 * queue independently. The netif_xmit_*stopped functions below are called 586 * to check if the queue has been stopped by the driver or stack (either 587 * of the XOFF bits are set in the state). Drivers should not need to call 588 * netif_xmit*stopped functions, they should only be using netif_tx_*. 589 */ 590 591struct netdev_queue { 592/* 593 * read-mostly part 594 */ 595 struct net_device *dev; 596 struct Qdisc __rcu *qdisc; 597 struct Qdisc *qdisc_sleeping; 598#ifdef CONFIG_SYSFS 599 struct kobject kobj; 600#endif 601#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 602 int numa_node; 603#endif 604 unsigned long tx_maxrate; 605 /* 606 * Number of TX timeouts for this queue 607 * (/sys/class/net/DEV/Q/trans_timeout) 608 */ 609 unsigned long trans_timeout; 610 611 /* Subordinate device that the queue has been assigned to */ 612 struct net_device *sb_dev; 613#ifdef CONFIG_XDP_SOCKETS 614 struct xsk_buff_pool *pool; 615#endif 616/* 617 * write-mostly part 618 */ 619 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 620 int xmit_lock_owner; 621 /* 622 * Time (in jiffies) of last Tx 623 */ 624 unsigned long trans_start; 625 626 unsigned long state; 627 628#ifdef CONFIG_BQL 629 struct dql dql; 630#endif 631} ____cacheline_aligned_in_smp; 632 633extern int sysctl_fb_tunnels_only_for_init_net; 634extern int sysctl_devconf_inherit_init_net; 635 636/* 637 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 638 * == 1 : For initns only 639 * == 2 : For none. 640 */ 641static inline bool net_has_fallback_tunnels(const struct net *net) 642{ 643 return !IS_ENABLED(CONFIG_SYSCTL) || 644 !sysctl_fb_tunnels_only_for_init_net || 645 (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1); 646} 647 648static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 649{ 650#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 651 return q->numa_node; 652#else 653 return NUMA_NO_NODE; 654#endif 655} 656 657static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 658{ 659#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 660 q->numa_node = node; 661#endif 662} 663 664#ifdef CONFIG_RPS 665/* 666 * This structure holds an RPS map which can be of variable length. The 667 * map is an array of CPUs. 668 */ 669struct rps_map { 670 unsigned int len; 671 struct rcu_head rcu; 672 u16 cpus[]; 673}; 674#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 675 676/* 677 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 678 * tail pointer for that CPU's input queue at the time of last enqueue, and 679 * a hardware filter index. 680 */ 681struct rps_dev_flow { 682 u16 cpu; 683 u16 filter; 684 unsigned int last_qtail; 685}; 686#define RPS_NO_FILTER 0xffff 687 688/* 689 * The rps_dev_flow_table structure contains a table of flow mappings. 690 */ 691struct rps_dev_flow_table { 692 unsigned int mask; 693 struct rcu_head rcu; 694 struct rps_dev_flow flows[]; 695}; 696#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 697 ((_num) * sizeof(struct rps_dev_flow))) 698 699/* 700 * The rps_sock_flow_table contains mappings of flows to the last CPU 701 * on which they were processed by the application (set in recvmsg). 702 * Each entry is a 32bit value. Upper part is the high-order bits 703 * of flow hash, lower part is CPU number. 704 * rps_cpu_mask is used to partition the space, depending on number of 705 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 706 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, 707 * meaning we use 32-6=26 bits for the hash. 708 */ 709struct rps_sock_flow_table { 710 u32 mask; 711 712 u32 ents[] ____cacheline_aligned_in_smp; 713}; 714#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) 715 716#define RPS_NO_CPU 0xffff 717 718extern u32 rps_cpu_mask; 719extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 720 721static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 722 u32 hash) 723{ 724 if (table && hash) { 725 unsigned int index = hash & table->mask; 726 u32 val = hash & ~rps_cpu_mask; 727 728 /* We only give a hint, preemption can change CPU under us */ 729 val |= raw_smp_processor_id(); 730 731 if (table->ents[index] != val) 732 table->ents[index] = val; 733 } 734} 735 736#ifdef CONFIG_RFS_ACCEL 737bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 738 u16 filter_id); 739#endif 740#endif /* CONFIG_RPS */ 741 742/* This structure contains an instance of an RX queue. */ 743struct netdev_rx_queue { 744#ifdef CONFIG_RPS 745 struct rps_map __rcu *rps_map; 746 struct rps_dev_flow_table __rcu *rps_flow_table; 747#endif 748 struct kobject kobj; 749 struct net_device *dev; 750 struct xdp_rxq_info xdp_rxq; 751#ifdef CONFIG_XDP_SOCKETS 752 struct xsk_buff_pool *pool; 753#endif 754} ____cacheline_aligned_in_smp; 755 756/* 757 * RX queue sysfs structures and functions. 758 */ 759struct rx_queue_attribute { 760 struct attribute attr; 761 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); 762 ssize_t (*store)(struct netdev_rx_queue *queue, 763 const char *buf, size_t len); 764}; 765 766#ifdef CONFIG_XPS 767/* 768 * This structure holds an XPS map which can be of variable length. The 769 * map is an array of queues. 770 */ 771struct xps_map { 772 unsigned int len; 773 unsigned int alloc_len; 774 struct rcu_head rcu; 775 u16 queues[]; 776}; 777#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 778#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 779 - sizeof(struct xps_map)) / sizeof(u16)) 780 781/* 782 * This structure holds all XPS maps for device. Maps are indexed by CPU. 783 */ 784struct xps_dev_maps { 785 struct rcu_head rcu; 786 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 787}; 788 789#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 790 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 791 792#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 793 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 794 795#endif /* CONFIG_XPS */ 796 797#define TC_MAX_QUEUE 16 798#define TC_BITMASK 15 799/* HW offloaded queuing disciplines txq count and offset maps */ 800struct netdev_tc_txq { 801 u16 count; 802 u16 offset; 803}; 804 805#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 806/* 807 * This structure is to hold information about the device 808 * configured to run FCoE protocol stack. 809 */ 810struct netdev_fcoe_hbainfo { 811 char manufacturer[64]; 812 char serial_number[64]; 813 char hardware_version[64]; 814 char driver_version[64]; 815 char optionrom_version[64]; 816 char firmware_version[64]; 817 char model[256]; 818 char model_description[256]; 819}; 820#endif 821 822#define MAX_PHYS_ITEM_ID_LEN 32 823 824/* This structure holds a unique identifier to identify some 825 * physical item (port for example) used by a netdevice. 826 */ 827struct netdev_phys_item_id { 828 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 829 unsigned char id_len; 830}; 831 832static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 833 struct netdev_phys_item_id *b) 834{ 835 return a->id_len == b->id_len && 836 memcmp(a->id, b->id, a->id_len) == 0; 837} 838 839typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 840 struct sk_buff *skb, 841 struct net_device *sb_dev); 842 843enum tc_setup_type { 844 TC_SETUP_QDISC_MQPRIO, 845 TC_SETUP_CLSU32, 846 TC_SETUP_CLSFLOWER, 847 TC_SETUP_CLSMATCHALL, 848 TC_SETUP_CLSBPF, 849 TC_SETUP_BLOCK, 850 TC_SETUP_QDISC_CBS, 851 TC_SETUP_QDISC_RED, 852 TC_SETUP_QDISC_PRIO, 853 TC_SETUP_QDISC_MQ, 854 TC_SETUP_QDISC_ETF, 855 TC_SETUP_ROOT_QDISC, 856 TC_SETUP_QDISC_GRED, 857 TC_SETUP_QDISC_TAPRIO, 858 TC_SETUP_FT, 859 TC_SETUP_QDISC_ETS, 860 TC_SETUP_QDISC_TBF, 861 TC_SETUP_QDISC_FIFO, 862}; 863 864/* These structures hold the attributes of bpf state that are being passed 865 * to the netdevice through the bpf op. 866 */ 867enum bpf_netdev_command { 868 /* Set or clear a bpf program used in the earliest stages of packet 869 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 870 * is responsible for calling bpf_prog_put on any old progs that are 871 * stored. In case of error, the callee need not release the new prog 872 * reference, but on success it takes ownership and must bpf_prog_put 873 * when it is no longer used. 874 */ 875 XDP_SETUP_PROG, 876 XDP_SETUP_PROG_HW, 877 /* BPF program for offload callbacks, invoked at program load time. */ 878 BPF_OFFLOAD_MAP_ALLOC, 879 BPF_OFFLOAD_MAP_FREE, 880 XDP_SETUP_XSK_POOL, 881}; 882 883struct bpf_prog_offload_ops; 884struct netlink_ext_ack; 885struct xdp_umem; 886struct xdp_dev_bulk_queue; 887struct bpf_xdp_link; 888 889enum bpf_xdp_mode { 890 XDP_MODE_SKB = 0, 891 XDP_MODE_DRV = 1, 892 XDP_MODE_HW = 2, 893 __MAX_XDP_MODE 894}; 895 896struct bpf_xdp_entity { 897 struct bpf_prog *prog; 898 struct bpf_xdp_link *link; 899}; 900 901struct netdev_bpf { 902 enum bpf_netdev_command command; 903 union { 904 /* XDP_SETUP_PROG */ 905 struct { 906 u32 flags; 907 struct bpf_prog *prog; 908 struct netlink_ext_ack *extack; 909 }; 910 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 911 struct { 912 struct bpf_offloaded_map *offmap; 913 }; 914 /* XDP_SETUP_XSK_POOL */ 915 struct { 916 struct xsk_buff_pool *pool; 917 u16 queue_id; 918 } xsk; 919 }; 920}; 921 922/* Flags for ndo_xsk_wakeup. */ 923#define XDP_WAKEUP_RX (1 << 0) 924#define XDP_WAKEUP_TX (1 << 1) 925 926#ifdef CONFIG_XFRM_OFFLOAD 927struct xfrmdev_ops { 928 int (*xdo_dev_state_add) (struct xfrm_state *x); 929 void (*xdo_dev_state_delete) (struct xfrm_state *x); 930 void (*xdo_dev_state_free) (struct xfrm_state *x); 931 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 932 struct xfrm_state *x); 933 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 934}; 935#endif 936 937struct dev_ifalias { 938 struct rcu_head rcuhead; 939 char ifalias[]; 940}; 941 942struct devlink; 943struct tlsdev_ops; 944 945struct netdev_name_node { 946 struct hlist_node hlist; 947 struct list_head list; 948 struct net_device *dev; 949 const char *name; 950}; 951 952int netdev_name_node_alt_create(struct net_device *dev, const char *name); 953int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); 954 955struct netdev_net_notifier { 956 struct list_head list; 957 struct notifier_block *nb; 958}; 959 960/* 961 * This structure defines the management hooks for network devices. 962 * The following hooks can be defined; unless noted otherwise, they are 963 * optional and can be filled with a null pointer. 964 * 965 * int (*ndo_init)(struct net_device *dev); 966 * This function is called once when a network device is registered. 967 * The network device can use this for any late stage initialization 968 * or semantic validation. It can fail with an error code which will 969 * be propagated back to register_netdev. 970 * 971 * void (*ndo_uninit)(struct net_device *dev); 972 * This function is called when device is unregistered or when registration 973 * fails. It is not called if init fails. 974 * 975 * int (*ndo_open)(struct net_device *dev); 976 * This function is called when a network device transitions to the up 977 * state. 978 * 979 * int (*ndo_stop)(struct net_device *dev); 980 * This function is called when a network device transitions to the down 981 * state. 982 * 983 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 984 * struct net_device *dev); 985 * Called when a packet needs to be transmitted. 986 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 987 * the queue before that can happen; it's for obsolete devices and weird 988 * corner cases, but the stack really does a non-trivial amount 989 * of useless work if you return NETDEV_TX_BUSY. 990 * Required; cannot be NULL. 991 * 992 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 993 * struct net_device *dev 994 * netdev_features_t features); 995 * Called by core transmit path to determine if device is capable of 996 * performing offload operations on a given packet. This is to give 997 * the device an opportunity to implement any restrictions that cannot 998 * be otherwise expressed by feature flags. The check is called with 999 * the set of features that the stack has calculated and it returns 1000 * those the driver believes to be appropriate. 1001 * 1002 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1003 * struct net_device *sb_dev); 1004 * Called to decide which queue to use when device supports multiple 1005 * transmit queues. 1006 * 1007 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1008 * This function is called to allow device receiver to make 1009 * changes to configuration when multicast or promiscuous is enabled. 1010 * 1011 * void (*ndo_set_rx_mode)(struct net_device *dev); 1012 * This function is called device changes address list filtering. 1013 * If driver handles unicast address filtering, it should set 1014 * IFF_UNICAST_FLT in its priv_flags. 1015 * 1016 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1017 * This function is called when the Media Access Control address 1018 * needs to be changed. If this interface is not defined, the 1019 * MAC address can not be changed. 1020 * 1021 * int (*ndo_validate_addr)(struct net_device *dev); 1022 * Test if Media Access Control address is valid for the device. 1023 * 1024 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1025 * Called when a user requests an ioctl which can't be handled by 1026 * the generic interface code. If not defined ioctls return 1027 * not supported error code. 1028 * 1029 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1030 * Used to set network devices bus interface parameters. This interface 1031 * is retained for legacy reasons; new devices should use the bus 1032 * interface (PCI) for low level management. 1033 * 1034 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1035 * Called when a user wants to change the Maximum Transfer Unit 1036 * of a device. 1037 * 1038 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1039 * Callback used when the transmitter has not made any progress 1040 * for dev->watchdog ticks. 1041 * 1042 * void (*ndo_get_stats64)(struct net_device *dev, 1043 * struct rtnl_link_stats64 *storage); 1044 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1045 * Called when a user wants to get the network device usage 1046 * statistics. Drivers must do one of the following: 1047 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1048 * rtnl_link_stats64 structure passed by the caller. 1049 * 2. Define @ndo_get_stats to update a net_device_stats structure 1050 * (which should normally be dev->stats) and return a pointer to 1051 * it. The structure may be changed asynchronously only if each 1052 * field is written atomically. 1053 * 3. Update dev->stats asynchronously and atomically, and define 1054 * neither operation. 1055 * 1056 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1057 * Return true if this device supports offload stats of this attr_id. 1058 * 1059 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1060 * void *attr_data) 1061 * Get statistics for offload operations by attr_id. Write it into the 1062 * attr_data pointer. 1063 * 1064 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1065 * If device supports VLAN filtering this function is called when a 1066 * VLAN id is registered. 1067 * 1068 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1069 * If device supports VLAN filtering this function is called when a 1070 * VLAN id is unregistered. 1071 * 1072 * void (*ndo_poll_controller)(struct net_device *dev); 1073 * 1074 * SR-IOV management functions. 1075 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1076 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1077 * u8 qos, __be16 proto); 1078 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1079 * int max_tx_rate); 1080 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1081 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1082 * int (*ndo_get_vf_config)(struct net_device *dev, 1083 * int vf, struct ifla_vf_info *ivf); 1084 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1085 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1086 * struct nlattr *port[]); 1087 * 1088 * Enable or disable the VF ability to query its RSS Redirection Table and 1089 * Hash Key. This is needed since on some devices VF share this information 1090 * with PF and querying it may introduce a theoretical security risk. 1091 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1092 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1093 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1094 * void *type_data); 1095 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1096 * This is always called from the stack with the rtnl lock held and netif 1097 * tx queues stopped. This allows the netdevice to perform queue 1098 * management safely. 1099 * 1100 * Fiber Channel over Ethernet (FCoE) offload functions. 1101 * int (*ndo_fcoe_enable)(struct net_device *dev); 1102 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1103 * so the underlying device can perform whatever needed configuration or 1104 * initialization to support acceleration of FCoE traffic. 1105 * 1106 * int (*ndo_fcoe_disable)(struct net_device *dev); 1107 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1108 * so the underlying device can perform whatever needed clean-ups to 1109 * stop supporting acceleration of FCoE traffic. 1110 * 1111 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1112 * struct scatterlist *sgl, unsigned int sgc); 1113 * Called when the FCoE Initiator wants to initialize an I/O that 1114 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1115 * perform necessary setup and returns 1 to indicate the device is set up 1116 * successfully to perform DDP on this I/O, otherwise this returns 0. 1117 * 1118 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1119 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1120 * indicated by the FC exchange id 'xid', so the underlying device can 1121 * clean up and reuse resources for later DDP requests. 1122 * 1123 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1124 * struct scatterlist *sgl, unsigned int sgc); 1125 * Called when the FCoE Target wants to initialize an I/O that 1126 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1127 * perform necessary setup and returns 1 to indicate the device is set up 1128 * successfully to perform DDP on this I/O, otherwise this returns 0. 1129 * 1130 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1131 * struct netdev_fcoe_hbainfo *hbainfo); 1132 * Called when the FCoE Protocol stack wants information on the underlying 1133 * device. This information is utilized by the FCoE protocol stack to 1134 * register attributes with Fiber Channel management service as per the 1135 * FC-GS Fabric Device Management Information(FDMI) specification. 1136 * 1137 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1138 * Called when the underlying device wants to override default World Wide 1139 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1140 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1141 * protocol stack to use. 1142 * 1143 * RFS acceleration. 1144 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1145 * u16 rxq_index, u32 flow_id); 1146 * Set hardware filter for RFS. rxq_index is the target queue index; 1147 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1148 * Return the filter ID on success, or a negative error code. 1149 * 1150 * Slave management functions (for bridge, bonding, etc). 1151 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1152 * Called to make another netdev an underling. 1153 * 1154 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1155 * Called to release previously enslaved netdev. 1156 * 1157 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1158 * struct sk_buff *skb, 1159 * bool all_slaves); 1160 * Get the xmit slave of master device. If all_slaves is true, function 1161 * assume all the slaves can transmit. 1162 * 1163 * Feature/offload setting functions. 1164 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1165 * netdev_features_t features); 1166 * Adjusts the requested feature flags according to device-specific 1167 * constraints, and returns the resulting flags. Must not modify 1168 * the device state. 1169 * 1170 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1171 * Called to update device configuration to new features. Passed 1172 * feature set might be less than what was returned by ndo_fix_features()). 1173 * Must return >0 or -errno if it changed dev->features itself. 1174 * 1175 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1176 * struct net_device *dev, 1177 * const unsigned char *addr, u16 vid, u16 flags, 1178 * struct netlink_ext_ack *extack); 1179 * Adds an FDB entry to dev for addr. 1180 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1181 * struct net_device *dev, 1182 * const unsigned char *addr, u16 vid) 1183 * Deletes the FDB entry from dev coresponding to addr. 1184 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1185 * struct net_device *dev, struct net_device *filter_dev, 1186 * int *idx) 1187 * Used to add FDB entries to dump requests. Implementers should add 1188 * entries to skb and update idx with the number of entries. 1189 * 1190 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1191 * u16 flags, struct netlink_ext_ack *extack) 1192 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1193 * struct net_device *dev, u32 filter_mask, 1194 * int nlflags) 1195 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1196 * u16 flags); 1197 * 1198 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1199 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1200 * which do not represent real hardware may define this to allow their 1201 * userspace components to manage their virtual carrier state. Devices 1202 * that determine carrier state from physical hardware properties (eg 1203 * network cables) or protocol-dependent mechanisms (eg 1204 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1205 * 1206 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1207 * struct netdev_phys_item_id *ppid); 1208 * Called to get ID of physical port of this device. If driver does 1209 * not implement this, it is assumed that the hw is not able to have 1210 * multiple net devices on single physical port. 1211 * 1212 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1213 * struct netdev_phys_item_id *ppid) 1214 * Called to get the parent ID of the physical port of this device. 1215 * 1216 * void (*ndo_udp_tunnel_add)(struct net_device *dev, 1217 * struct udp_tunnel_info *ti); 1218 * Called by UDP tunnel to notify a driver about the UDP port and socket 1219 * address family that a UDP tunnel is listnening to. It is called only 1220 * when a new port starts listening. The operation is protected by the 1221 * RTNL. 1222 * 1223 * void (*ndo_udp_tunnel_del)(struct net_device *dev, 1224 * struct udp_tunnel_info *ti); 1225 * Called by UDP tunnel to notify the driver about a UDP port and socket 1226 * address family that the UDP tunnel is not listening to anymore. The 1227 * operation is protected by the RTNL. 1228 * 1229 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1230 * struct net_device *dev) 1231 * Called by upper layer devices to accelerate switching or other 1232 * station functionality into hardware. 'pdev is the lowerdev 1233 * to use for the offload and 'dev' is the net device that will 1234 * back the offload. Returns a pointer to the private structure 1235 * the upper layer will maintain. 1236 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1237 * Called by upper layer device to delete the station created 1238 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1239 * the station and priv is the structure returned by the add 1240 * operation. 1241 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1242 * int queue_index, u32 maxrate); 1243 * Called when a user wants to set a max-rate limitation of specific 1244 * TX queue. 1245 * int (*ndo_get_iflink)(const struct net_device *dev); 1246 * Called to get the iflink value of this device. 1247 * void (*ndo_change_proto_down)(struct net_device *dev, 1248 * bool proto_down); 1249 * This function is used to pass protocol port error state information 1250 * to the switch driver. The switch driver can react to the proto_down 1251 * by doing a phys down on the associated switch port. 1252 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1253 * This function is used to get egress tunnel information for given skb. 1254 * This is useful for retrieving outer tunnel header parameters while 1255 * sampling packet. 1256 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1257 * This function is used to specify the headroom that the skb must 1258 * consider when allocation skb during packet reception. Setting 1259 * appropriate rx headroom value allows avoiding skb head copy on 1260 * forward. Setting a negative value resets the rx headroom to the 1261 * default value. 1262 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1263 * This function is used to set or query state related to XDP on the 1264 * netdevice and manage BPF offload. See definition of 1265 * enum bpf_netdev_command for details. 1266 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1267 * u32 flags); 1268 * This function is used to submit @n XDP packets for transmit on a 1269 * netdevice. Returns number of frames successfully transmitted, frames 1270 * that got dropped are freed/returned via xdp_return_frame(). 1271 * Returns negative number, means general error invoking ndo, meaning 1272 * no frames were xmit'ed and core-caller will free all frames. 1273 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1274 * This function is used to wake up the softirq, ksoftirqd or kthread 1275 * responsible for sending and/or receiving packets on a specific 1276 * queue id bound to an AF_XDP socket. The flags field specifies if 1277 * only RX, only Tx, or both should be woken up using the flags 1278 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1279 * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); 1280 * Get devlink port instance associated with a given netdev. 1281 * Called with a reference on the netdevice and devlink locks only, 1282 * rtnl_lock is not held. 1283 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, 1284 * int cmd); 1285 * Add, change, delete or get information on an IPv4 tunnel. 1286 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1287 * If a device is paired with a peer device, return the peer instance. 1288 * The caller must be under RCU read context. 1289 */ 1290struct net_device_ops { 1291 int (*ndo_init)(struct net_device *dev); 1292 void (*ndo_uninit)(struct net_device *dev); 1293 int (*ndo_open)(struct net_device *dev); 1294 int (*ndo_stop)(struct net_device *dev); 1295 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1296 struct net_device *dev); 1297 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1298 struct net_device *dev, 1299 netdev_features_t features); 1300 u16 (*ndo_select_queue)(struct net_device *dev, 1301 struct sk_buff *skb, 1302 struct net_device *sb_dev); 1303 void (*ndo_change_rx_flags)(struct net_device *dev, 1304 int flags); 1305 void (*ndo_set_rx_mode)(struct net_device *dev); 1306 int (*ndo_set_mac_address)(struct net_device *dev, 1307 void *addr); 1308 int (*ndo_validate_addr)(struct net_device *dev); 1309 int (*ndo_do_ioctl)(struct net_device *dev, 1310 struct ifreq *ifr, int cmd); 1311 int (*ndo_set_config)(struct net_device *dev, 1312 struct ifmap *map); 1313 int (*ndo_change_mtu)(struct net_device *dev, 1314 int new_mtu); 1315 int (*ndo_neigh_setup)(struct net_device *dev, 1316 struct neigh_parms *); 1317 void (*ndo_tx_timeout) (struct net_device *dev, 1318 unsigned int txqueue); 1319 1320 void (*ndo_get_stats64)(struct net_device *dev, 1321 struct rtnl_link_stats64 *storage); 1322 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1323 int (*ndo_get_offload_stats)(int attr_id, 1324 const struct net_device *dev, 1325 void *attr_data); 1326 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1327 1328 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1329 __be16 proto, u16 vid); 1330 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1331 __be16 proto, u16 vid); 1332#ifdef CONFIG_NET_POLL_CONTROLLER 1333 void (*ndo_poll_controller)(struct net_device *dev); 1334 int (*ndo_netpoll_setup)(struct net_device *dev, 1335 struct netpoll_info *info); 1336 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1337#endif 1338 int (*ndo_set_vf_mac)(struct net_device *dev, 1339 int queue, u8 *mac); 1340 int (*ndo_set_vf_vlan)(struct net_device *dev, 1341 int queue, u16 vlan, 1342 u8 qos, __be16 proto); 1343 int (*ndo_set_vf_rate)(struct net_device *dev, 1344 int vf, int min_tx_rate, 1345 int max_tx_rate); 1346 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1347 int vf, bool setting); 1348 int (*ndo_set_vf_trust)(struct net_device *dev, 1349 int vf, bool setting); 1350 int (*ndo_get_vf_config)(struct net_device *dev, 1351 int vf, 1352 struct ifla_vf_info *ivf); 1353 int (*ndo_set_vf_link_state)(struct net_device *dev, 1354 int vf, int link_state); 1355 int (*ndo_get_vf_stats)(struct net_device *dev, 1356 int vf, 1357 struct ifla_vf_stats 1358 *vf_stats); 1359 int (*ndo_set_vf_port)(struct net_device *dev, 1360 int vf, 1361 struct nlattr *port[]); 1362 int (*ndo_get_vf_port)(struct net_device *dev, 1363 int vf, struct sk_buff *skb); 1364 int (*ndo_get_vf_guid)(struct net_device *dev, 1365 int vf, 1366 struct ifla_vf_guid *node_guid, 1367 struct ifla_vf_guid *port_guid); 1368 int (*ndo_set_vf_guid)(struct net_device *dev, 1369 int vf, u64 guid, 1370 int guid_type); 1371 int (*ndo_set_vf_rss_query_en)( 1372 struct net_device *dev, 1373 int vf, bool setting); 1374 int (*ndo_setup_tc)(struct net_device *dev, 1375 enum tc_setup_type type, 1376 void *type_data); 1377#if IS_ENABLED(CONFIG_FCOE) 1378 int (*ndo_fcoe_enable)(struct net_device *dev); 1379 int (*ndo_fcoe_disable)(struct net_device *dev); 1380 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1381 u16 xid, 1382 struct scatterlist *sgl, 1383 unsigned int sgc); 1384 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1385 u16 xid); 1386 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1387 u16 xid, 1388 struct scatterlist *sgl, 1389 unsigned int sgc); 1390 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1391 struct netdev_fcoe_hbainfo *hbainfo); 1392#endif 1393 1394#if IS_ENABLED(CONFIG_LIBFCOE) 1395#define NETDEV_FCOE_WWNN 0 1396#define NETDEV_FCOE_WWPN 1 1397 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1398 u64 *wwn, int type); 1399#endif 1400 1401#ifdef CONFIG_RFS_ACCEL 1402 int (*ndo_rx_flow_steer)(struct net_device *dev, 1403 const struct sk_buff *skb, 1404 u16 rxq_index, 1405 u32 flow_id); 1406#endif 1407 int (*ndo_add_slave)(struct net_device *dev, 1408 struct net_device *slave_dev, 1409 struct netlink_ext_ack *extack); 1410 int (*ndo_del_slave)(struct net_device *dev, 1411 struct net_device *slave_dev); 1412 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1413 struct sk_buff *skb, 1414 bool all_slaves); 1415 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1416 netdev_features_t features); 1417 int (*ndo_set_features)(struct net_device *dev, 1418 netdev_features_t features); 1419 int (*ndo_neigh_construct)(struct net_device *dev, 1420 struct neighbour *n); 1421 void (*ndo_neigh_destroy)(struct net_device *dev, 1422 struct neighbour *n); 1423 1424 int (*ndo_fdb_add)(struct ndmsg *ndm, 1425 struct nlattr *tb[], 1426 struct net_device *dev, 1427 const unsigned char *addr, 1428 u16 vid, 1429 u16 flags, 1430 struct netlink_ext_ack *extack); 1431 int (*ndo_fdb_del)(struct ndmsg *ndm, 1432 struct nlattr *tb[], 1433 struct net_device *dev, 1434 const unsigned char *addr, 1435 u16 vid); 1436 int (*ndo_fdb_dump)(struct sk_buff *skb, 1437 struct netlink_callback *cb, 1438 struct net_device *dev, 1439 struct net_device *filter_dev, 1440 int *idx); 1441 int (*ndo_fdb_get)(struct sk_buff *skb, 1442 struct nlattr *tb[], 1443 struct net_device *dev, 1444 const unsigned char *addr, 1445 u16 vid, u32 portid, u32 seq, 1446 struct netlink_ext_ack *extack); 1447 int (*ndo_bridge_setlink)(struct net_device *dev, 1448 struct nlmsghdr *nlh, 1449 u16 flags, 1450 struct netlink_ext_ack *extack); 1451 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1452 u32 pid, u32 seq, 1453 struct net_device *dev, 1454 u32 filter_mask, 1455 int nlflags); 1456 int (*ndo_bridge_dellink)(struct net_device *dev, 1457 struct nlmsghdr *nlh, 1458 u16 flags); 1459 int (*ndo_change_carrier)(struct net_device *dev, 1460 bool new_carrier); 1461 int (*ndo_get_phys_port_id)(struct net_device *dev, 1462 struct netdev_phys_item_id *ppid); 1463 int (*ndo_get_port_parent_id)(struct net_device *dev, 1464 struct netdev_phys_item_id *ppid); 1465 int (*ndo_get_phys_port_name)(struct net_device *dev, 1466 char *name, size_t len); 1467 void (*ndo_udp_tunnel_add)(struct net_device *dev, 1468 struct udp_tunnel_info *ti); 1469 void (*ndo_udp_tunnel_del)(struct net_device *dev, 1470 struct udp_tunnel_info *ti); 1471 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1472 struct net_device *dev); 1473 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1474 void *priv); 1475 1476 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1477 int queue_index, 1478 u32 maxrate); 1479 int (*ndo_get_iflink)(const struct net_device *dev); 1480 int (*ndo_change_proto_down)(struct net_device *dev, 1481 bool proto_down); 1482 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1483 struct sk_buff *skb); 1484 void (*ndo_set_rx_headroom)(struct net_device *dev, 1485 int needed_headroom); 1486 int (*ndo_bpf)(struct net_device *dev, 1487 struct netdev_bpf *bpf); 1488 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1489 struct xdp_frame **xdp, 1490 u32 flags); 1491 int (*ndo_xsk_wakeup)(struct net_device *dev, 1492 u32 queue_id, u32 flags); 1493 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); 1494 int (*ndo_tunnel_ctl)(struct net_device *dev, 1495 struct ip_tunnel_parm *p, int cmd); 1496 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1497}; 1498 1499/** 1500 * enum netdev_priv_flags - &struct net_device priv_flags 1501 * 1502 * These are the &struct net_device, they are only set internally 1503 * by drivers and used in the kernel. These flags are invisible to 1504 * userspace; this means that the order of these flags can change 1505 * during any kernel release. 1506 * 1507 * You should have a pretty good reason to be extending these flags. 1508 * 1509 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1510 * @IFF_EBRIDGE: Ethernet bridging device 1511 * @IFF_BONDING: bonding master or slave 1512 * @IFF_ISATAP: ISATAP interface (RFC4214) 1513 * @IFF_WAN_HDLC: WAN HDLC device 1514 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1515 * release skb->dst 1516 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1517 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1518 * @IFF_MACVLAN_PORT: device used as macvlan port 1519 * @IFF_BRIDGE_PORT: device used as bridge port 1520 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1521 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1522 * @IFF_UNICAST_FLT: Supports unicast filtering 1523 * @IFF_TEAM_PORT: device used as team port 1524 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1525 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1526 * change when it's running 1527 * @IFF_MACVLAN: Macvlan device 1528 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1529 * underlying stacked devices 1530 * @IFF_L3MDEV_MASTER: device is an L3 master device 1531 * @IFF_NO_QUEUE: device can run without qdisc attached 1532 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1533 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1534 * @IFF_TEAM: device is a team device 1535 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1536 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1537 * entity (i.e. the master device for bridged veth) 1538 * @IFF_MACSEC: device is a MACsec device 1539 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1540 * @IFF_FAILOVER: device is a failover master device 1541 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1542 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1543 * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running 1544 */ 1545enum netdev_priv_flags { 1546 IFF_802_1Q_VLAN = 1<<0, 1547 IFF_EBRIDGE = 1<<1, 1548 IFF_BONDING = 1<<2, 1549 IFF_ISATAP = 1<<3, 1550 IFF_WAN_HDLC = 1<<4, 1551 IFF_XMIT_DST_RELEASE = 1<<5, 1552 IFF_DONT_BRIDGE = 1<<6, 1553 IFF_DISABLE_NETPOLL = 1<<7, 1554 IFF_MACVLAN_PORT = 1<<8, 1555 IFF_BRIDGE_PORT = 1<<9, 1556 IFF_OVS_DATAPATH = 1<<10, 1557 IFF_TX_SKB_SHARING = 1<<11, 1558 IFF_UNICAST_FLT = 1<<12, 1559 IFF_TEAM_PORT = 1<<13, 1560 IFF_SUPP_NOFCS = 1<<14, 1561 IFF_LIVE_ADDR_CHANGE = 1<<15, 1562 IFF_MACVLAN = 1<<16, 1563 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1564 IFF_L3MDEV_MASTER = 1<<18, 1565 IFF_NO_QUEUE = 1<<19, 1566 IFF_OPENVSWITCH = 1<<20, 1567 IFF_L3MDEV_SLAVE = 1<<21, 1568 IFF_TEAM = 1<<22, 1569 IFF_RXFH_CONFIGURED = 1<<23, 1570 IFF_PHONY_HEADROOM = 1<<24, 1571 IFF_MACSEC = 1<<25, 1572 IFF_NO_RX_HANDLER = 1<<26, 1573 IFF_FAILOVER = 1<<27, 1574 IFF_FAILOVER_SLAVE = 1<<28, 1575 IFF_L3MDEV_RX_HANDLER = 1<<29, 1576 IFF_LIVE_RENAME_OK = 1<<30, 1577}; 1578 1579#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1580#define IFF_EBRIDGE IFF_EBRIDGE 1581#define IFF_BONDING IFF_BONDING 1582#define IFF_ISATAP IFF_ISATAP 1583#define IFF_WAN_HDLC IFF_WAN_HDLC 1584#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1585#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1586#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL 1587#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT 1588#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT 1589#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH 1590#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING 1591#define IFF_UNICAST_FLT IFF_UNICAST_FLT 1592#define IFF_TEAM_PORT IFF_TEAM_PORT 1593#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1594#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1595#define IFF_MACVLAN IFF_MACVLAN 1596#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1597#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER 1598#define IFF_NO_QUEUE IFF_NO_QUEUE 1599#define IFF_OPENVSWITCH IFF_OPENVSWITCH 1600#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1601#define IFF_TEAM IFF_TEAM 1602#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED 1603#define IFF_MACSEC IFF_MACSEC 1604#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1605#define IFF_FAILOVER IFF_FAILOVER 1606#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1607#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER 1608#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK 1609 1610/** 1611 * struct net_device - The DEVICE structure. 1612 * 1613 * Actually, this whole structure is a big mistake. It mixes I/O 1614 * data with strictly "high-level" data, and it has to know about 1615 * almost every data structure used in the INET module. 1616 * 1617 * @name: This is the first field of the "visible" part of this structure 1618 * (i.e. as seen by users in the "Space.c" file). It is the name 1619 * of the interface. 1620 * 1621 * @name_node: Name hashlist node 1622 * @ifalias: SNMP alias 1623 * @mem_end: Shared memory end 1624 * @mem_start: Shared memory start 1625 * @base_addr: Device I/O address 1626 * @irq: Device IRQ number 1627 * 1628 * @state: Generic network queuing layer state, see netdev_state_t 1629 * @dev_list: The global list of network devices 1630 * @napi_list: List entry used for polling NAPI devices 1631 * @unreg_list: List entry when we are unregistering the 1632 * device; see the function unregister_netdev 1633 * @close_list: List entry used when we are closing the device 1634 * @ptype_all: Device-specific packet handlers for all protocols 1635 * @ptype_specific: Device-specific, protocol-specific packet handlers 1636 * 1637 * @adj_list: Directly linked devices, like slaves for bonding 1638 * @features: Currently active device features 1639 * @hw_features: User-changeable features 1640 * 1641 * @wanted_features: User-requested features 1642 * @vlan_features: Mask of features inheritable by VLAN devices 1643 * 1644 * @hw_enc_features: Mask of features inherited by encapsulating devices 1645 * This field indicates what encapsulation 1646 * offloads the hardware is capable of doing, 1647 * and drivers will need to set them appropriately. 1648 * 1649 * @mpls_features: Mask of features inheritable by MPLS 1650 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1651 * 1652 * @ifindex: interface index 1653 * @group: The group the device belongs to 1654 * 1655 * @stats: Statistics struct, which was left as a legacy, use 1656 * rtnl_link_stats64 instead 1657 * 1658 * @rx_dropped: Dropped packets by core network, 1659 * do not use this in drivers 1660 * @tx_dropped: Dropped packets by core network, 1661 * do not use this in drivers 1662 * @rx_nohandler: nohandler dropped packets by core network on 1663 * inactive devices, do not use this in drivers 1664 * @carrier_up_count: Number of times the carrier has been up 1665 * @carrier_down_count: Number of times the carrier has been down 1666 * 1667 * @wireless_handlers: List of functions to handle Wireless Extensions, 1668 * instead of ioctl, 1669 * see <net/iw_handler.h> for details. 1670 * @wireless_data: Instance data managed by the core of wireless extensions 1671 * 1672 * @netdev_ops: Includes several pointers to callbacks, 1673 * if one wants to override the ndo_*() functions 1674 * @ethtool_ops: Management operations 1675 * @l3mdev_ops: Layer 3 master device operations 1676 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1677 * discovery handling. Necessary for e.g. 6LoWPAN. 1678 * @xfrmdev_ops: Transformation offload operations 1679 * @tlsdev_ops: Transport Layer Security offload operations 1680 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1681 * of Layer 2 headers. 1682 * 1683 * @flags: Interface flags (a la BSD) 1684 * @priv_flags: Like 'flags' but invisible to userspace, 1685 * see if.h for the definitions 1686 * @gflags: Global flags ( kept as legacy ) 1687 * @padded: How much padding added by alloc_netdev() 1688 * @operstate: RFC2863 operstate 1689 * @link_mode: Mapping policy to operstate 1690 * @if_port: Selectable AUI, TP, ... 1691 * @dma: DMA channel 1692 * @mtu: Interface MTU value 1693 * @min_mtu: Interface Minimum MTU value 1694 * @max_mtu: Interface Maximum MTU value 1695 * @type: Interface hardware type 1696 * @hard_header_len: Maximum hardware header length. 1697 * @min_header_len: Minimum hardware header length 1698 * 1699 * @needed_headroom: Extra headroom the hardware may need, but not in all 1700 * cases can this be guaranteed 1701 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1702 * cases can this be guaranteed. Some cases also use 1703 * LL_MAX_HEADER instead to allocate the skb 1704 * 1705 * interface address info: 1706 * 1707 * @perm_addr: Permanent hw address 1708 * @addr_assign_type: Hw address assignment type 1709 * @addr_len: Hardware address length 1710 * @upper_level: Maximum depth level of upper devices. 1711 * @lower_level: Maximum depth level of lower devices. 1712 * @neigh_priv_len: Used in neigh_alloc() 1713 * @dev_id: Used to differentiate devices that share 1714 * the same link layer address 1715 * @dev_port: Used to differentiate devices that share 1716 * the same function 1717 * @addr_list_lock: XXX: need comments on this one 1718 * @name_assign_type: network interface name assignment type 1719 * @uc_promisc: Counter that indicates promiscuous mode 1720 * has been enabled due to the need to listen to 1721 * additional unicast addresses in a device that 1722 * does not implement ndo_set_rx_mode() 1723 * @uc: unicast mac addresses 1724 * @mc: multicast mac addresses 1725 * @dev_addrs: list of device hw addresses 1726 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1727 * @promiscuity: Number of times the NIC is told to work in 1728 * promiscuous mode; if it becomes 0 the NIC will 1729 * exit promiscuous mode 1730 * @allmulti: Counter, enables or disables allmulticast mode 1731 * 1732 * @vlan_info: VLAN info 1733 * @dsa_ptr: dsa specific data 1734 * @tipc_ptr: TIPC specific data 1735 * @atalk_ptr: AppleTalk link 1736 * @ip_ptr: IPv4 specific data 1737 * @dn_ptr: DECnet specific data 1738 * @ip6_ptr: IPv6 specific data 1739 * @ax25_ptr: AX.25 specific data 1740 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1741 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1742 * device struct 1743 * @mpls_ptr: mpls_dev struct pointer 1744 * 1745 * @dev_addr: Hw address (before bcast, 1746 * because most packets are unicast) 1747 * 1748 * @_rx: Array of RX queues 1749 * @num_rx_queues: Number of RX queues 1750 * allocated at register_netdev() time 1751 * @real_num_rx_queues: Number of RX queues currently active in device 1752 * @xdp_prog: XDP sockets filter program pointer 1753 * @gro_flush_timeout: timeout for GRO layer in NAPI 1754 * @napi_defer_hard_irqs: If not zero, provides a counter that would 1755 * allow to avoid NIC hard IRQ, on busy queues. 1756 * 1757 * @rx_handler: handler for received packets 1758 * @rx_handler_data: XXX: need comments on this one 1759 * @miniq_ingress: ingress/clsact qdisc specific data for 1760 * ingress processing 1761 * @ingress_queue: XXX: need comments on this one 1762 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1763 * @broadcast: hw bcast address 1764 * 1765 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1766 * indexed by RX queue number. Assigned by driver. 1767 * This must only be set if the ndo_rx_flow_steer 1768 * operation is defined 1769 * @index_hlist: Device index hash chain 1770 * 1771 * @_tx: Array of TX queues 1772 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1773 * @real_num_tx_queues: Number of TX queues currently active in device 1774 * @qdisc: Root qdisc from userspace point of view 1775 * @tx_queue_len: Max frames per queue allowed 1776 * @tx_global_lock: XXX: need comments on this one 1777 * @xdp_bulkq: XDP device bulk queue 1778 * @xps_cpus_map: all CPUs map for XPS device 1779 * @xps_rxqs_map: all RXQs map for XPS device 1780 * 1781 * @xps_maps: XXX: need comments on this one 1782 * @miniq_egress: clsact qdisc specific data for 1783 * egress processing 1784 * @qdisc_hash: qdisc hash table 1785 * @watchdog_timeo: Represents the timeout that is used by 1786 * the watchdog (see dev_watchdog()) 1787 * @watchdog_timer: List of timers 1788 * 1789 * @proto_down_reason: reason a netdev interface is held down 1790 * @pcpu_refcnt: Number of references to this device 1791 * @todo_list: Delayed register/unregister 1792 * @link_watch_list: XXX: need comments on this one 1793 * 1794 * @reg_state: Register/unregister state machine 1795 * @dismantle: Device is going to be freed 1796 * @rtnl_link_state: This enum represents the phases of creating 1797 * a new link 1798 * 1799 * @needs_free_netdev: Should unregister perform free_netdev? 1800 * @priv_destructor: Called from unregister 1801 * @npinfo: XXX: need comments on this one 1802 * @nd_net: Network namespace this network device is inside 1803 * 1804 * @ml_priv: Mid-layer private 1805 * @lstats: Loopback statistics 1806 * @tstats: Tunnel statistics 1807 * @dstats: Dummy statistics 1808 * @vstats: Virtual ethernet statistics 1809 * 1810 * @garp_port: GARP 1811 * @mrp_port: MRP 1812 * 1813 * @dev: Class/net/name entry 1814 * @sysfs_groups: Space for optional device, statistics and wireless 1815 * sysfs groups 1816 * 1817 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1818 * @rtnl_link_ops: Rtnl_link_ops 1819 * 1820 * @gso_max_size: Maximum size of generic segmentation offload 1821 * @gso_max_segs: Maximum number of segments that can be passed to the 1822 * NIC for GSO 1823 * 1824 * @dcbnl_ops: Data Center Bridging netlink ops 1825 * @num_tc: Number of traffic classes in the net device 1826 * @tc_to_txq: XXX: need comments on this one 1827 * @prio_tc_map: XXX: need comments on this one 1828 * 1829 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1830 * 1831 * @priomap: XXX: need comments on this one 1832 * @phydev: Physical device may attach itself 1833 * for hardware timestamping 1834 * @sfp_bus: attached &struct sfp_bus structure. 1835 * 1836 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1837 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount 1838 * 1839 * @proto_down: protocol port state information can be sent to the 1840 * switch driver and used to set the phys state of the 1841 * switch port. 1842 * 1843 * @wol_enabled: Wake-on-LAN is enabled 1844 * 1845 * @net_notifier_list: List of per-net netdev notifier block 1846 * that follow this device when it is moved 1847 * to another network namespace. 1848 * 1849 * @macsec_ops: MACsec offloading ops 1850 * 1851 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 1852 * offload capabilities of the device 1853 * @udp_tunnel_nic: UDP tunnel offload state 1854 * @xdp_state: stores info on attached XDP BPF programs 1855 * 1856 * @nested_level: Used as as a parameter of spin_lock_nested() of 1857 * dev->addr_list_lock. 1858 * @unlink_list: As netif_addr_lock() can be called recursively, 1859 * keep a list of interfaces to be deleted. 1860 * 1861 * FIXME: cleanup struct net_device such that network protocol info 1862 * moves out. 1863 */ 1864 1865struct net_device { 1866 char name[IFNAMSIZ]; 1867 struct netdev_name_node *name_node; 1868 struct dev_ifalias __rcu *ifalias; 1869 /* 1870 * I/O specific fields 1871 * FIXME: Merge these and struct ifmap into one 1872 */ 1873 unsigned long mem_end; 1874 unsigned long mem_start; 1875 unsigned long base_addr; 1876 int irq; 1877 1878 /* 1879 * Some hardware also needs these fields (state,dev_list, 1880 * napi_list,unreg_list,close_list) but they are not 1881 * part of the usual set specified in Space.c. 1882 */ 1883 1884 unsigned long state; 1885 1886 struct list_head dev_list; 1887 struct list_head napi_list; 1888 struct list_head unreg_list; 1889 struct list_head close_list; 1890 struct list_head ptype_all; 1891 struct list_head ptype_specific; 1892 1893 struct { 1894 struct list_head upper; 1895 struct list_head lower; 1896 } adj_list; 1897 1898 netdev_features_t features; 1899 netdev_features_t hw_features; 1900 netdev_features_t wanted_features; 1901 netdev_features_t vlan_features; 1902 netdev_features_t hw_enc_features; 1903 netdev_features_t mpls_features; 1904 netdev_features_t gso_partial_features; 1905 1906 int ifindex; 1907 int group; 1908 1909 struct net_device_stats stats; 1910 1911 atomic_long_t rx_dropped; 1912 atomic_long_t tx_dropped; 1913 atomic_long_t rx_nohandler; 1914 1915 /* Stats to monitor link on/off, flapping */ 1916 atomic_t carrier_up_count; 1917 atomic_t carrier_down_count; 1918 1919#ifdef CONFIG_WIRELESS_EXT 1920 const struct iw_handler_def *wireless_handlers; 1921 struct iw_public_data *wireless_data; 1922#endif 1923 const struct net_device_ops *netdev_ops; 1924 const struct ethtool_ops *ethtool_ops; 1925#ifdef CONFIG_NET_L3_MASTER_DEV 1926 const struct l3mdev_ops *l3mdev_ops; 1927#endif 1928#if IS_ENABLED(CONFIG_IPV6) 1929 const struct ndisc_ops *ndisc_ops; 1930#endif 1931 1932#ifdef CONFIG_XFRM_OFFLOAD 1933 const struct xfrmdev_ops *xfrmdev_ops; 1934#endif 1935 1936#if IS_ENABLED(CONFIG_TLS_DEVICE) 1937 const struct tlsdev_ops *tlsdev_ops; 1938#endif 1939 1940 const struct header_ops *header_ops; 1941 1942 unsigned int flags; 1943 unsigned int priv_flags; 1944 1945 unsigned short gflags; 1946 unsigned short padded; 1947 1948 unsigned char operstate; 1949 unsigned char link_mode; 1950 1951 unsigned char if_port; 1952 unsigned char dma; 1953 1954 /* Note : dev->mtu is often read without holding a lock. 1955 * Writers usually hold RTNL. 1956 * It is recommended to use READ_ONCE() to annotate the reads, 1957 * and to use WRITE_ONCE() to annotate the writes. 1958 */ 1959 unsigned int mtu; 1960 unsigned int min_mtu; 1961 unsigned int max_mtu; 1962 unsigned short type; 1963 unsigned short hard_header_len; 1964 unsigned char min_header_len; 1965 unsigned char name_assign_type; 1966 1967 unsigned short needed_headroom; 1968 unsigned short needed_tailroom; 1969 1970 /* Interface address info. */ 1971 unsigned char perm_addr[MAX_ADDR_LEN]; 1972 unsigned char addr_assign_type; 1973 unsigned char addr_len; 1974 unsigned char upper_level; 1975 unsigned char lower_level; 1976 1977 unsigned short neigh_priv_len; 1978 unsigned short dev_id; 1979 unsigned short dev_port; 1980 spinlock_t addr_list_lock; 1981 1982 struct netdev_hw_addr_list uc; 1983 struct netdev_hw_addr_list mc; 1984 struct netdev_hw_addr_list dev_addrs; 1985 1986#ifdef CONFIG_SYSFS 1987 struct kset *queues_kset; 1988#endif 1989#ifdef CONFIG_LOCKDEP 1990 struct list_head unlink_list; 1991#endif 1992 unsigned int promiscuity; 1993 unsigned int allmulti; 1994 bool uc_promisc; 1995#ifdef CONFIG_LOCKDEP 1996 unsigned char nested_level; 1997#endif 1998 1999 2000 /* Protocol-specific pointers */ 2001 2002#if IS_ENABLED(CONFIG_VLAN_8021Q) 2003 struct vlan_info __rcu *vlan_info; 2004#endif 2005#if IS_ENABLED(CONFIG_NET_DSA) 2006 struct dsa_port *dsa_ptr; 2007#endif 2008#if IS_ENABLED(CONFIG_TIPC) 2009 struct tipc_bearer __rcu *tipc_ptr; 2010#endif 2011#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) 2012 void *atalk_ptr; 2013#endif 2014 struct in_device __rcu *ip_ptr; 2015#if IS_ENABLED(CONFIG_DECNET) 2016 struct dn_dev __rcu *dn_ptr; 2017#endif 2018 struct inet6_dev __rcu *ip6_ptr; 2019#if IS_ENABLED(CONFIG_AX25) 2020 void *ax25_ptr; 2021#endif 2022 struct wireless_dev *ieee80211_ptr; 2023 struct wpan_dev *ieee802154_ptr; 2024#if IS_ENABLED(CONFIG_MPLS_ROUTING) 2025 struct mpls_dev __rcu *mpls_ptr; 2026#endif 2027 2028/* 2029 * Cache lines mostly used on receive path (including eth_type_trans()) 2030 */ 2031 /* Interface address info used in eth_type_trans() */ 2032 unsigned char *dev_addr; 2033 2034 struct netdev_rx_queue *_rx; 2035 unsigned int num_rx_queues; 2036 unsigned int real_num_rx_queues; 2037 2038 struct bpf_prog __rcu *xdp_prog; 2039 unsigned long gro_flush_timeout; 2040 int napi_defer_hard_irqs; 2041 rx_handler_func_t __rcu *rx_handler; 2042 void __rcu *rx_handler_data; 2043 2044#ifdef CONFIG_NET_CLS_ACT 2045 struct mini_Qdisc __rcu *miniq_ingress; 2046#endif 2047 struct netdev_queue __rcu *ingress_queue; 2048#ifdef CONFIG_NETFILTER_INGRESS 2049 struct nf_hook_entries __rcu *nf_hooks_ingress; 2050#endif 2051 2052 unsigned char broadcast[MAX_ADDR_LEN]; 2053#ifdef CONFIG_RFS_ACCEL 2054 struct cpu_rmap *rx_cpu_rmap; 2055#endif 2056 struct hlist_node index_hlist; 2057 2058/* 2059 * Cache lines mostly used on transmit path 2060 */ 2061 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 2062 unsigned int num_tx_queues; 2063 unsigned int real_num_tx_queues; 2064 struct Qdisc *qdisc; 2065 unsigned int tx_queue_len; 2066 spinlock_t tx_global_lock; 2067 2068 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2069 2070#ifdef CONFIG_XPS 2071 struct xps_dev_maps __rcu *xps_cpus_map; 2072 struct xps_dev_maps __rcu *xps_rxqs_map; 2073#endif 2074#ifdef CONFIG_NET_CLS_ACT 2075 struct mini_Qdisc __rcu *miniq_egress; 2076#endif 2077 2078#ifdef CONFIG_NET_SCHED 2079 DECLARE_HASHTABLE (qdisc_hash, 4); 2080#endif 2081 /* These may be needed for future network-power-down code. */ 2082 struct timer_list watchdog_timer; 2083 int watchdog_timeo; 2084 2085 u32 proto_down_reason; 2086 2087 struct list_head todo_list; 2088 int __percpu *pcpu_refcnt; 2089 2090 struct list_head link_watch_list; 2091 2092 enum { NETREG_UNINITIALIZED=0, 2093 NETREG_REGISTERED, /* completed register_netdevice */ 2094 NETREG_UNREGISTERING, /* called unregister_netdevice */ 2095 NETREG_UNREGISTERED, /* completed unregister todo */ 2096 NETREG_RELEASED, /* called free_netdev */ 2097 NETREG_DUMMY, /* dummy device for NAPI poll */ 2098 } reg_state:8; 2099 2100 bool dismantle; 2101 2102 enum { 2103 RTNL_LINK_INITIALIZED, 2104 RTNL_LINK_INITIALIZING, 2105 } rtnl_link_state:16; 2106 2107 bool needs_free_netdev; 2108 void (*priv_destructor)(struct net_device *dev); 2109 2110#ifdef CONFIG_NETPOLL 2111 struct netpoll_info __rcu *npinfo; 2112#endif 2113 2114 possible_net_t nd_net; 2115 2116 /* mid-layer private */ 2117 union { 2118 void *ml_priv; 2119 struct pcpu_lstats __percpu *lstats; 2120 struct pcpu_sw_netstats __percpu *tstats; 2121 struct pcpu_dstats __percpu *dstats; 2122 }; 2123 2124#if IS_ENABLED(CONFIG_GARP) 2125 struct garp_port __rcu *garp_port; 2126#endif 2127#if IS_ENABLED(CONFIG_MRP) 2128 struct mrp_port __rcu *mrp_port; 2129#endif 2130 2131 struct device dev; 2132 const struct attribute_group *sysfs_groups[4]; 2133 const struct attribute_group *sysfs_rx_queue_group; 2134 2135 const struct rtnl_link_ops *rtnl_link_ops; 2136 2137 /* for setting kernel sock attribute on TCP connection setup */ 2138#define GSO_MAX_SIZE 65536 2139 unsigned int gso_max_size; 2140#define GSO_MAX_SEGS 65535 2141 u16 gso_max_segs; 2142 2143#ifdef CONFIG_DCB 2144 const struct dcbnl_rtnl_ops *dcbnl_ops; 2145#endif 2146 s16 num_tc; 2147 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2148 u8 prio_tc_map[TC_BITMASK + 1]; 2149 2150#if IS_ENABLED(CONFIG_FCOE) 2151 unsigned int fcoe_ddp_xid; 2152#endif 2153#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2154 struct netprio_map __rcu *priomap; 2155#endif 2156 struct phy_device *phydev; 2157 struct sfp_bus *sfp_bus; 2158 struct lock_class_key *qdisc_tx_busylock; 2159 struct lock_class_key *qdisc_running_key; 2160 bool proto_down; 2161 unsigned wol_enabled:1; 2162 2163 struct list_head net_notifier_list; 2164 2165#if IS_ENABLED(CONFIG_MACSEC) 2166 /* MACsec management functions */ 2167 const struct macsec_ops *macsec_ops; 2168#endif 2169 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2170 struct udp_tunnel_nic *udp_tunnel_nic; 2171 2172 /* protected by rtnl_lock */ 2173 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2174}; 2175#define to_net_dev(d) container_of(d, struct net_device, dev) 2176 2177static inline bool netif_elide_gro(const struct net_device *dev) 2178{ 2179 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2180 return true; 2181 return false; 2182} 2183 2184#define NETDEV_ALIGN 32 2185 2186static inline 2187int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2188{ 2189 return dev->prio_tc_map[prio & TC_BITMASK]; 2190} 2191 2192static inline 2193int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2194{ 2195 if (tc >= dev->num_tc) 2196 return -EINVAL; 2197 2198 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2199 return 0; 2200} 2201 2202int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2203void netdev_reset_tc(struct net_device *dev); 2204int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2205int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2206 2207static inline 2208int netdev_get_num_tc(struct net_device *dev) 2209{ 2210 return dev->num_tc; 2211} 2212 2213static inline void net_prefetch(void *p) 2214{ 2215 prefetch(p); 2216#if L1_CACHE_BYTES < 128 2217 prefetch((u8 *)p + L1_CACHE_BYTES); 2218#endif 2219} 2220 2221static inline void net_prefetchw(void *p) 2222{ 2223 prefetchw(p); 2224#if L1_CACHE_BYTES < 128 2225 prefetchw((u8 *)p + L1_CACHE_BYTES); 2226#endif 2227} 2228 2229void netdev_unbind_sb_channel(struct net_device *dev, 2230 struct net_device *sb_dev); 2231int netdev_bind_sb_channel_queue(struct net_device *dev, 2232 struct net_device *sb_dev, 2233 u8 tc, u16 count, u16 offset); 2234int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2235static inline int netdev_get_sb_channel(struct net_device *dev) 2236{ 2237 return max_t(int, -dev->num_tc, 0); 2238} 2239 2240static inline 2241struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2242 unsigned int index) 2243{ 2244 return &dev->_tx[index]; 2245} 2246 2247static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2248 const struct sk_buff *skb) 2249{ 2250 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2251} 2252 2253static inline void netdev_for_each_tx_queue(struct net_device *dev, 2254 void (*f)(struct net_device *, 2255 struct netdev_queue *, 2256 void *), 2257 void *arg) 2258{ 2259 unsigned int i; 2260 2261 for (i = 0; i < dev->num_tx_queues; i++) 2262 f(dev, &dev->_tx[i], arg); 2263} 2264 2265#define netdev_lockdep_set_classes(dev) \ 2266{ \ 2267 static struct lock_class_key qdisc_tx_busylock_key; \ 2268 static struct lock_class_key qdisc_running_key; \ 2269 static struct lock_class_key qdisc_xmit_lock_key; \ 2270 static struct lock_class_key dev_addr_list_lock_key; \ 2271 unsigned int i; \ 2272 \ 2273 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2274 (dev)->qdisc_running_key = &qdisc_running_key; \ 2275 lockdep_set_class(&(dev)->addr_list_lock, \ 2276 &dev_addr_list_lock_key); \ 2277 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2278 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2279 &qdisc_xmit_lock_key); \ 2280} 2281 2282u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2283 struct net_device *sb_dev); 2284struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2285 struct sk_buff *skb, 2286 struct net_device *sb_dev); 2287 2288/* returns the headroom that the master device needs to take in account 2289 * when forwarding to this dev 2290 */ 2291static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2292{ 2293 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2294} 2295 2296static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2297{ 2298 if (dev->netdev_ops->ndo_set_rx_headroom) 2299 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2300} 2301 2302/* set the device rx headroom to the dev's default */ 2303static inline void netdev_reset_rx_headroom(struct net_device *dev) 2304{ 2305 netdev_set_rx_headroom(dev, -1); 2306} 2307 2308/* 2309 * Net namespace inlines 2310 */ 2311static inline 2312struct net *dev_net(const struct net_device *dev) 2313{ 2314 return read_pnet(&dev->nd_net); 2315} 2316 2317static inline 2318void dev_net_set(struct net_device *dev, struct net *net) 2319{ 2320 write_pnet(&dev->nd_net, net); 2321} 2322 2323/** 2324 * netdev_priv - access network device private data 2325 * @dev: network device 2326 * 2327 * Get network device private data 2328 */ 2329static inline void *netdev_priv(const struct net_device *dev) 2330{ 2331 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 2332} 2333 2334/* Set the sysfs physical device reference for the network logical device 2335 * if set prior to registration will cause a symlink during initialization. 2336 */ 2337#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2338 2339/* Set the sysfs device type for the network logical device to allow 2340 * fine-grained identification of different network device types. For 2341 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2342 */ 2343#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2344 2345/* Default NAPI poll() weight 2346 * Device drivers are strongly advised to not use bigger value 2347 */ 2348#define NAPI_POLL_WEIGHT 64 2349 2350/** 2351 * netif_napi_add - initialize a NAPI context 2352 * @dev: network device 2353 * @napi: NAPI context 2354 * @poll: polling function 2355 * @weight: default weight 2356 * 2357 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2358 * *any* of the other NAPI-related functions. 2359 */ 2360void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2361 int (*poll)(struct napi_struct *, int), int weight); 2362 2363/** 2364 * netif_tx_napi_add - initialize a NAPI context 2365 * @dev: network device 2366 * @napi: NAPI context 2367 * @poll: polling function 2368 * @weight: default weight 2369 * 2370 * This variant of netif_napi_add() should be used from drivers using NAPI 2371 * to exclusively poll a TX queue. 2372 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2373 */ 2374static inline void netif_tx_napi_add(struct net_device *dev, 2375 struct napi_struct *napi, 2376 int (*poll)(struct napi_struct *, int), 2377 int weight) 2378{ 2379 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2380 netif_napi_add(dev, napi, poll, weight); 2381} 2382 2383/** 2384 * __netif_napi_del - remove a NAPI context 2385 * @napi: NAPI context 2386 * 2387 * Warning: caller must observe RCU grace period before freeing memory 2388 * containing @napi. Drivers might want to call this helper to combine 2389 * all the needed RCU grace periods into a single one. 2390 */ 2391void __netif_napi_del(struct napi_struct *napi); 2392 2393/** 2394 * netif_napi_del - remove a NAPI context 2395 * @napi: NAPI context 2396 * 2397 * netif_napi_del() removes a NAPI context from the network device NAPI list 2398 */ 2399static inline void netif_napi_del(struct napi_struct *napi) 2400{ 2401 __netif_napi_del(napi); 2402 synchronize_net(); 2403} 2404 2405struct napi_gro_cb { 2406 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ 2407 void *frag0; 2408 2409 /* Length of frag0. */ 2410 unsigned int frag0_len; 2411 2412 /* This indicates where we are processing relative to skb->data. */ 2413 int data_offset; 2414 2415 /* This is non-zero if the packet cannot be merged with the new skb. */ 2416 u16 flush; 2417 2418 /* Save the IP ID here and check when we get to the transport layer */ 2419 u16 flush_id; 2420 2421 /* Number of segments aggregated. */ 2422 u16 count; 2423 2424 /* Start offset for remote checksum offload */ 2425 u16 gro_remcsum_start; 2426 2427 /* jiffies when first packet was created/queued */ 2428 unsigned long age; 2429 2430 /* Used in ipv6_gro_receive() and foo-over-udp */ 2431 u16 proto; 2432 2433 /* This is non-zero if the packet may be of the same flow. */ 2434 u8 same_flow:1; 2435 2436 /* Used in tunnel GRO receive */ 2437 u8 encap_mark:1; 2438 2439 /* GRO checksum is valid */ 2440 u8 csum_valid:1; 2441 2442 /* Number of checksums via CHECKSUM_UNNECESSARY */ 2443 u8 csum_cnt:3; 2444 2445 /* Free the skb? */ 2446 u8 free:2; 2447#define NAPI_GRO_FREE 1 2448#define NAPI_GRO_FREE_STOLEN_HEAD 2 2449 2450 /* Used in foo-over-udp, set in udp[46]_gro_receive */ 2451 u8 is_ipv6:1; 2452 2453 /* Used in GRE, set in fou/gue_gro_receive */ 2454 u8 is_fou:1; 2455 2456 /* Used to determine if flush_id can be ignored */ 2457 u8 is_atomic:1; 2458 2459 /* Number of gro_receive callbacks this packet already went through */ 2460 u8 recursion_counter:4; 2461 2462 /* GRO is done by frag_list pointer chaining. */ 2463 u8 is_flist:1; 2464 2465 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 2466 __wsum csum; 2467 2468 /* used in skb_gro_receive() slow path */ 2469 struct sk_buff *last; 2470}; 2471 2472#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 2473 2474#define GRO_RECURSION_LIMIT 15 2475static inline int gro_recursion_inc_test(struct sk_buff *skb) 2476{ 2477 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; 2478} 2479 2480typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); 2481static inline struct sk_buff *call_gro_receive(gro_receive_t cb, 2482 struct list_head *head, 2483 struct sk_buff *skb) 2484{ 2485 if (unlikely(gro_recursion_inc_test(skb))) { 2486 NAPI_GRO_CB(skb)->flush |= 1; 2487 return NULL; 2488 } 2489 2490 return cb(head, skb); 2491} 2492 2493typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, 2494 struct sk_buff *); 2495static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, 2496 struct sock *sk, 2497 struct list_head *head, 2498 struct sk_buff *skb) 2499{ 2500 if (unlikely(gro_recursion_inc_test(skb))) { 2501 NAPI_GRO_CB(skb)->flush |= 1; 2502 return NULL; 2503 } 2504 2505 return cb(sk, head, skb); 2506} 2507 2508struct packet_type { 2509 __be16 type; /* This is really htons(ether_type). */ 2510 bool ignore_outgoing; 2511 struct net_device *dev; /* NULL is wildcarded here */ 2512 int (*func) (struct sk_buff *, 2513 struct net_device *, 2514 struct packet_type *, 2515 struct net_device *); 2516 void (*list_func) (struct list_head *, 2517 struct packet_type *, 2518 struct net_device *); 2519 bool (*id_match)(struct packet_type *ptype, 2520 struct sock *sk); 2521 void *af_packet_priv; 2522 struct list_head list; 2523}; 2524 2525struct offload_callbacks { 2526 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2527 netdev_features_t features); 2528 struct sk_buff *(*gro_receive)(struct list_head *head, 2529 struct sk_buff *skb); 2530 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2531}; 2532 2533struct packet_offload { 2534 __be16 type; /* This is really htons(ether_type). */ 2535 u16 priority; 2536 struct offload_callbacks callbacks; 2537 struct list_head list; 2538}; 2539 2540/* often modified stats are per-CPU, other are shared (netdev->stats) */ 2541struct pcpu_sw_netstats { 2542 u64 rx_packets; 2543 u64 rx_bytes; 2544 u64 tx_packets; 2545 u64 tx_bytes; 2546 struct u64_stats_sync syncp; 2547} __aligned(4 * sizeof(u64)); 2548 2549struct pcpu_lstats { 2550 u64_stats_t packets; 2551 u64_stats_t bytes; 2552 struct u64_stats_sync syncp; 2553} __aligned(2 * sizeof(u64)); 2554 2555void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2556 2557static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2558{ 2559 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2560 2561 u64_stats_update_begin(&tstats->syncp); 2562 tstats->rx_bytes += len; 2563 tstats->rx_packets++; 2564 u64_stats_update_end(&tstats->syncp); 2565} 2566 2567static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2568 unsigned int packets, 2569 unsigned int len) 2570{ 2571 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2572 2573 u64_stats_update_begin(&tstats->syncp); 2574 tstats->tx_bytes += len; 2575 tstats->tx_packets += packets; 2576 u64_stats_update_end(&tstats->syncp); 2577} 2578 2579static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2580{ 2581 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2582 2583 u64_stats_update_begin(&lstats->syncp); 2584 u64_stats_add(&lstats->bytes, len); 2585 u64_stats_inc(&lstats->packets); 2586 u64_stats_update_end(&lstats->syncp); 2587} 2588 2589#define __netdev_alloc_pcpu_stats(type, gfp) \ 2590({ \ 2591 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2592 if (pcpu_stats) { \ 2593 int __cpu; \ 2594 for_each_possible_cpu(__cpu) { \ 2595 typeof(type) *stat; \ 2596 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2597 u64_stats_init(&stat->syncp); \ 2598 } \ 2599 } \ 2600 pcpu_stats; \ 2601}) 2602 2603#define netdev_alloc_pcpu_stats(type) \ 2604 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2605 2606#define devm_netdev_alloc_pcpu_stats(dev, type) \ 2607({ \ 2608 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 2609 if (pcpu_stats) { \ 2610 int __cpu; \ 2611 for_each_possible_cpu(__cpu) { \ 2612 typeof(type) *stat; \ 2613 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2614 u64_stats_init(&stat->syncp); \ 2615 } \ 2616 } \ 2617 pcpu_stats; \ 2618}) 2619 2620enum netdev_lag_tx_type { 2621 NETDEV_LAG_TX_TYPE_UNKNOWN, 2622 NETDEV_LAG_TX_TYPE_RANDOM, 2623 NETDEV_LAG_TX_TYPE_BROADCAST, 2624 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 2625 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 2626 NETDEV_LAG_TX_TYPE_HASH, 2627}; 2628 2629enum netdev_lag_hash { 2630 NETDEV_LAG_HASH_NONE, 2631 NETDEV_LAG_HASH_L2, 2632 NETDEV_LAG_HASH_L34, 2633 NETDEV_LAG_HASH_L23, 2634 NETDEV_LAG_HASH_E23, 2635 NETDEV_LAG_HASH_E34, 2636 NETDEV_LAG_HASH_UNKNOWN, 2637}; 2638 2639struct netdev_lag_upper_info { 2640 enum netdev_lag_tx_type tx_type; 2641 enum netdev_lag_hash hash_type; 2642}; 2643 2644struct netdev_lag_lower_state_info { 2645 u8 link_up : 1, 2646 tx_enabled : 1; 2647}; 2648 2649#include <linux/notifier.h> 2650 2651/* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 2652 * and the rtnetlink notification exclusion list in rtnetlink_event() when 2653 * adding new types. 2654 */ 2655enum netdev_cmd { 2656 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 2657 NETDEV_DOWN, 2658 NETDEV_REBOOT, /* Tell a protocol stack a network interface 2659 detected a hardware crash and restarted 2660 - we can use this eg to kick tcp sessions 2661 once done */ 2662 NETDEV_CHANGE, /* Notify device state change */ 2663 NETDEV_REGISTER, 2664 NETDEV_UNREGISTER, 2665 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 2666 NETDEV_CHANGEADDR, /* notify after the address change */ 2667 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 2668 NETDEV_GOING_DOWN, 2669 NETDEV_CHANGENAME, 2670 NETDEV_FEAT_CHANGE, 2671 NETDEV_BONDING_FAILOVER, 2672 NETDEV_PRE_UP, 2673 NETDEV_PRE_TYPE_CHANGE, 2674 NETDEV_POST_TYPE_CHANGE, 2675 NETDEV_POST_INIT, 2676 NETDEV_RELEASE, 2677 NETDEV_NOTIFY_PEERS, 2678 NETDEV_JOIN, 2679 NETDEV_CHANGEUPPER, 2680 NETDEV_RESEND_IGMP, 2681 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 2682 NETDEV_CHANGEINFODATA, 2683 NETDEV_BONDING_INFO, 2684 NETDEV_PRECHANGEUPPER, 2685 NETDEV_CHANGELOWERSTATE, 2686 NETDEV_UDP_TUNNEL_PUSH_INFO, 2687 NETDEV_UDP_TUNNEL_DROP_INFO, 2688 NETDEV_CHANGE_TX_QUEUE_LEN, 2689 NETDEV_CVLAN_FILTER_PUSH_INFO, 2690 NETDEV_CVLAN_FILTER_DROP_INFO, 2691 NETDEV_SVLAN_FILTER_PUSH_INFO, 2692 NETDEV_SVLAN_FILTER_DROP_INFO, 2693}; 2694const char *netdev_cmd_to_name(enum netdev_cmd cmd); 2695 2696int register_netdevice_notifier(struct notifier_block *nb); 2697int unregister_netdevice_notifier(struct notifier_block *nb); 2698int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 2699int unregister_netdevice_notifier_net(struct net *net, 2700 struct notifier_block *nb); 2701int register_netdevice_notifier_dev_net(struct net_device *dev, 2702 struct notifier_block *nb, 2703 struct netdev_net_notifier *nn); 2704int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2705 struct notifier_block *nb, 2706 struct netdev_net_notifier *nn); 2707 2708struct netdev_notifier_info { 2709 struct net_device *dev; 2710 struct netlink_ext_ack *extack; 2711}; 2712 2713struct netdev_notifier_info_ext { 2714 struct netdev_notifier_info info; /* must be first */ 2715 union { 2716 u32 mtu; 2717 } ext; 2718}; 2719 2720struct netdev_notifier_change_info { 2721 struct netdev_notifier_info info; /* must be first */ 2722 unsigned int flags_changed; 2723}; 2724 2725struct netdev_notifier_changeupper_info { 2726 struct netdev_notifier_info info; /* must be first */ 2727 struct net_device *upper_dev; /* new upper dev */ 2728 bool master; /* is upper dev master */ 2729 bool linking; /* is the notification for link or unlink */ 2730 void *upper_info; /* upper dev info */ 2731}; 2732 2733struct netdev_notifier_changelowerstate_info { 2734 struct netdev_notifier_info info; /* must be first */ 2735 void *lower_state_info; /* is lower dev state */ 2736}; 2737 2738struct netdev_notifier_pre_changeaddr_info { 2739 struct netdev_notifier_info info; /* must be first */ 2740 const unsigned char *dev_addr; 2741}; 2742 2743static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2744 struct net_device *dev) 2745{ 2746 info->dev = dev; 2747 info->extack = NULL; 2748} 2749 2750static inline struct net_device * 2751netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 2752{ 2753 return info->dev; 2754} 2755 2756static inline struct netlink_ext_ack * 2757netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 2758{ 2759 return info->extack; 2760} 2761 2762int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 2763 2764 2765extern rwlock_t dev_base_lock; /* Device list lock */ 2766 2767#define for_each_netdev(net, d) \ 2768 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 2769#define for_each_netdev_reverse(net, d) \ 2770 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 2771#define for_each_netdev_rcu(net, d) \ 2772 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 2773#define for_each_netdev_safe(net, d, n) \ 2774 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 2775#define for_each_netdev_continue(net, d) \ 2776 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 2777#define for_each_netdev_continue_reverse(net, d) \ 2778 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 2779 dev_list) 2780#define for_each_netdev_continue_rcu(net, d) \ 2781 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2782#define for_each_netdev_in_bond_rcu(bond, slave) \ 2783 for_each_netdev_rcu(&init_net, slave) \ 2784 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 2785#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2786 2787static inline struct net_device *next_net_device(struct net_device *dev) 2788{ 2789 struct list_head *lh; 2790 struct net *net; 2791 2792 net = dev_net(dev); 2793 lh = dev->dev_list.next; 2794 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2795} 2796 2797static inline struct net_device *next_net_device_rcu(struct net_device *dev) 2798{ 2799 struct list_head *lh; 2800 struct net *net; 2801 2802 net = dev_net(dev); 2803 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 2804 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2805} 2806 2807static inline struct net_device *first_net_device(struct net *net) 2808{ 2809 return list_empty(&net->dev_base_head) ? NULL : 2810 net_device_entry(net->dev_base_head.next); 2811} 2812 2813static inline struct net_device *first_net_device_rcu(struct net *net) 2814{ 2815 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 2816 2817 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2818} 2819 2820int netdev_boot_setup_check(struct net_device *dev); 2821unsigned long netdev_boot_base(const char *prefix, int unit); 2822struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 2823 const char *hwaddr); 2824struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 2825void dev_add_pack(struct packet_type *pt); 2826void dev_remove_pack(struct packet_type *pt); 2827void __dev_remove_pack(struct packet_type *pt); 2828void dev_add_offload(struct packet_offload *po); 2829void dev_remove_offload(struct packet_offload *po); 2830 2831int dev_get_iflink(const struct net_device *dev); 2832int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 2833struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 2834 unsigned short mask); 2835struct net_device *dev_get_by_name(struct net *net, const char *name); 2836struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 2837struct net_device *__dev_get_by_name(struct net *net, const char *name); 2838int dev_alloc_name(struct net_device *dev, const char *name); 2839int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 2840void dev_close(struct net_device *dev); 2841void dev_close_many(struct list_head *head, bool unlink); 2842void dev_disable_lro(struct net_device *dev); 2843int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 2844u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 2845 struct net_device *sb_dev); 2846u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 2847 struct net_device *sb_dev); 2848 2849int dev_queue_xmit(struct sk_buff *skb); 2850int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); 2851int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 2852 2853static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 2854{ 2855 int ret; 2856 2857 ret = __dev_direct_xmit(skb, queue_id); 2858 if (!dev_xmit_complete(ret)) 2859 kfree_skb(skb); 2860 return ret; 2861} 2862 2863int register_netdevice(struct net_device *dev); 2864void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 2865void unregister_netdevice_many(struct list_head *head); 2866static inline void unregister_netdevice(struct net_device *dev) 2867{ 2868 unregister_netdevice_queue(dev, NULL); 2869} 2870 2871int netdev_refcnt_read(const struct net_device *dev); 2872void free_netdev(struct net_device *dev); 2873void netdev_freemem(struct net_device *dev); 2874int init_dummy_netdev(struct net_device *dev); 2875 2876struct net_device *netdev_get_xmit_slave(struct net_device *dev, 2877 struct sk_buff *skb, 2878 bool all_slaves); 2879struct net_device *dev_get_by_index(struct net *net, int ifindex); 2880struct net_device *__dev_get_by_index(struct net *net, int ifindex); 2881struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 2882struct net_device *dev_get_by_napi_id(unsigned int napi_id); 2883int netdev_get_name(struct net *net, char *name, int ifindex); 2884int dev_restart(struct net_device *dev); 2885int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); 2886int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); 2887 2888static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 2889{ 2890 return NAPI_GRO_CB(skb)->data_offset; 2891} 2892 2893static inline unsigned int skb_gro_len(const struct sk_buff *skb) 2894{ 2895 return skb->len - NAPI_GRO_CB(skb)->data_offset; 2896} 2897 2898static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) 2899{ 2900 NAPI_GRO_CB(skb)->data_offset += len; 2901} 2902 2903static inline void *skb_gro_header_fast(struct sk_buff *skb, 2904 unsigned int offset) 2905{ 2906 return NAPI_GRO_CB(skb)->frag0 + offset; 2907} 2908 2909static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) 2910{ 2911 return NAPI_GRO_CB(skb)->frag0_len < hlen; 2912} 2913 2914static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) 2915{ 2916 NAPI_GRO_CB(skb)->frag0 = NULL; 2917 NAPI_GRO_CB(skb)->frag0_len = 0; 2918} 2919 2920static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 2921 unsigned int offset) 2922{ 2923 if (!pskb_may_pull(skb, hlen)) 2924 return NULL; 2925 2926 skb_gro_frag0_invalidate(skb); 2927 return skb->data + offset; 2928} 2929 2930static inline void *skb_gro_network_header(struct sk_buff *skb) 2931{ 2932 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + 2933 skb_network_offset(skb); 2934} 2935 2936static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, 2937 const void *start, unsigned int len) 2938{ 2939 if (NAPI_GRO_CB(skb)->csum_valid) 2940 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, 2941 csum_partial(start, len, 0)); 2942} 2943 2944/* GRO checksum functions. These are logical equivalents of the normal 2945 * checksum functions (in skbuff.h) except that they operate on the GRO 2946 * offsets and fields in sk_buff. 2947 */ 2948 2949__sum16 __skb_gro_checksum_complete(struct sk_buff *skb); 2950 2951static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) 2952{ 2953 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); 2954} 2955 2956static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, 2957 bool zero_okay, 2958 __sum16 check) 2959{ 2960 return ((skb->ip_summed != CHECKSUM_PARTIAL || 2961 skb_checksum_start_offset(skb) < 2962 skb_gro_offset(skb)) && 2963 !skb_at_gro_remcsum_start(skb) && 2964 NAPI_GRO_CB(skb)->csum_cnt == 0 && 2965 (!zero_okay || check)); 2966} 2967 2968static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, 2969 __wsum psum) 2970{ 2971 if (NAPI_GRO_CB(skb)->csum_valid && 2972 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) 2973 return 0; 2974 2975 NAPI_GRO_CB(skb)->csum = psum; 2976 2977 return __skb_gro_checksum_complete(skb); 2978} 2979 2980static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) 2981{ 2982 if (NAPI_GRO_CB(skb)->csum_cnt > 0) { 2983 /* Consume a checksum from CHECKSUM_UNNECESSARY */ 2984 NAPI_GRO_CB(skb)->csum_cnt--; 2985 } else { 2986 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we 2987 * verified a new top level checksum or an encapsulated one 2988 * during GRO. This saves work if we fallback to normal path. 2989 */ 2990 __skb_incr_checksum_unnecessary(skb); 2991 } 2992} 2993 2994#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ 2995 compute_pseudo) \ 2996({ \ 2997 __sum16 __ret = 0; \ 2998 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ 2999 __ret = __skb_gro_checksum_validate_complete(skb, \ 3000 compute_pseudo(skb, proto)); \ 3001 if (!__ret) \ 3002 skb_gro_incr_csum_unnecessary(skb); \ 3003 __ret; \ 3004}) 3005 3006#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ 3007 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) 3008 3009#define skb_gro_checksum_validate_zero_check(skb, proto, check, \ 3010 compute_pseudo) \ 3011 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) 3012 3013#define skb_gro_checksum_simple_validate(skb) \ 3014 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) 3015 3016static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) 3017{ 3018 return (NAPI_GRO_CB(skb)->csum_cnt == 0 && 3019 !NAPI_GRO_CB(skb)->csum_valid); 3020} 3021 3022static inline void __skb_gro_checksum_convert(struct sk_buff *skb, 3023 __wsum pseudo) 3024{ 3025 NAPI_GRO_CB(skb)->csum = ~pseudo; 3026 NAPI_GRO_CB(skb)->csum_valid = 1; 3027} 3028 3029#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \ 3030do { \ 3031 if (__skb_gro_checksum_convert_check(skb)) \ 3032 __skb_gro_checksum_convert(skb, \ 3033 compute_pseudo(skb, proto)); \ 3034} while (0) 3035 3036struct gro_remcsum { 3037 int offset; 3038 __wsum delta; 3039}; 3040 3041static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) 3042{ 3043 grc->offset = 0; 3044 grc->delta = 0; 3045} 3046 3047static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, 3048 unsigned int off, size_t hdrlen, 3049 int start, int offset, 3050 struct gro_remcsum *grc, 3051 bool nopartial) 3052{ 3053 __wsum delta; 3054 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); 3055 3056 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); 3057 3058 if (!nopartial) { 3059 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; 3060 return ptr; 3061 } 3062 3063 ptr = skb_gro_header_fast(skb, off); 3064 if (skb_gro_header_hard(skb, off + plen)) { 3065 ptr = skb_gro_header_slow(skb, off + plen, off); 3066 if (!ptr) 3067 return NULL; 3068 } 3069 3070 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, 3071 start, offset); 3072 3073 /* Adjust skb->csum since we changed the packet */ 3074 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); 3075 3076 grc->offset = off + hdrlen + offset; 3077 grc->delta = delta; 3078 3079 return ptr; 3080} 3081 3082static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, 3083 struct gro_remcsum *grc) 3084{ 3085 void *ptr; 3086 size_t plen = grc->offset + sizeof(u16); 3087 3088 if (!grc->delta) 3089 return; 3090 3091 ptr = skb_gro_header_fast(skb, grc->offset); 3092 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { 3093 ptr = skb_gro_header_slow(skb, plen, grc->offset); 3094 if (!ptr) 3095 return; 3096 } 3097 3098 remcsum_unadjust((__sum16 *)ptr, grc->delta); 3099} 3100 3101#ifdef CONFIG_XFRM_OFFLOAD 3102static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) 3103{ 3104 if (PTR_ERR(pp) != -EINPROGRESS) 3105 NAPI_GRO_CB(skb)->flush |= flush; 3106} 3107static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, 3108 struct sk_buff *pp, 3109 int flush, 3110 struct gro_remcsum *grc) 3111{ 3112 if (PTR_ERR(pp) != -EINPROGRESS) { 3113 NAPI_GRO_CB(skb)->flush |= flush; 3114 skb_gro_remcsum_cleanup(skb, grc); 3115 skb->remcsum_offload = 0; 3116 } 3117} 3118#else 3119static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) 3120{ 3121 NAPI_GRO_CB(skb)->flush |= flush; 3122} 3123static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, 3124 struct sk_buff *pp, 3125 int flush, 3126 struct gro_remcsum *grc) 3127{ 3128 NAPI_GRO_CB(skb)->flush |= flush; 3129 skb_gro_remcsum_cleanup(skb, grc); 3130 skb->remcsum_offload = 0; 3131} 3132#endif 3133 3134static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3135 unsigned short type, 3136 const void *daddr, const void *saddr, 3137 unsigned int len) 3138{ 3139 if (!dev->header_ops || !dev->header_ops->create) 3140 return 0; 3141 3142 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3143} 3144 3145static inline int dev_parse_header(const struct sk_buff *skb, 3146 unsigned char *haddr) 3147{ 3148 const struct net_device *dev = skb->dev; 3149 3150 if (!dev->header_ops || !dev->header_ops->parse) 3151 return 0; 3152 return dev->header_ops->parse(skb, haddr); 3153} 3154 3155static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3156{ 3157 const struct net_device *dev = skb->dev; 3158 3159 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3160 return 0; 3161 return dev->header_ops->parse_protocol(skb); 3162} 3163 3164/* ll_header must have at least hard_header_len allocated */ 3165static inline bool dev_validate_header(const struct net_device *dev, 3166 char *ll_header, int len) 3167{ 3168 if (likely(len >= dev->hard_header_len)) 3169 return true; 3170 if (len < dev->min_header_len) 3171 return false; 3172 3173 if (capable(CAP_SYS_RAWIO)) { 3174 memset(ll_header + len, 0, dev->hard_header_len - len); 3175 return true; 3176 } 3177 3178 if (dev->header_ops && dev->header_ops->validate) 3179 return dev->header_ops->validate(ll_header, len); 3180 3181 return false; 3182} 3183 3184static inline bool dev_has_header(const struct net_device *dev) 3185{ 3186 return dev->header_ops && dev->header_ops->create; 3187} 3188 3189typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, 3190 int len, int size); 3191int register_gifconf(unsigned int family, gifconf_func_t *gifconf); 3192static inline int unregister_gifconf(unsigned int family) 3193{ 3194 return register_gifconf(family, NULL); 3195} 3196 3197#ifdef CONFIG_NET_FLOW_LIMIT 3198#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ 3199struct sd_flow_limit { 3200 u64 count; 3201 unsigned int num_buckets; 3202 unsigned int history_head; 3203 u16 history[FLOW_LIMIT_HISTORY]; 3204 u8 buckets[]; 3205}; 3206 3207extern int netdev_flow_limit_table_len; 3208#endif /* CONFIG_NET_FLOW_LIMIT */ 3209 3210/* 3211 * Incoming packets are placed on per-CPU queues 3212 */ 3213struct softnet_data { 3214 struct list_head poll_list; 3215 struct sk_buff_head process_queue; 3216 3217 /* stats */ 3218 unsigned int processed; 3219 unsigned int time_squeeze; 3220 unsigned int received_rps; 3221#ifdef CONFIG_RPS 3222 struct softnet_data *rps_ipi_list; 3223#endif 3224#ifdef CONFIG_NET_FLOW_LIMIT 3225 struct sd_flow_limit __rcu *flow_limit; 3226#endif 3227 struct Qdisc *output_queue; 3228 struct Qdisc **output_queue_tailp; 3229 struct sk_buff *completion_queue; 3230#ifdef CONFIG_XFRM_OFFLOAD 3231 struct sk_buff_head xfrm_backlog; 3232#endif 3233 /* written and read only by owning cpu: */ 3234 struct { 3235 u16 recursion; 3236 u8 more; 3237 } xmit; 3238#ifdef CONFIG_RPS 3239 /* input_queue_head should be written by cpu owning this struct, 3240 * and only read by other cpus. Worth using a cache line. 3241 */ 3242 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3243 3244 /* Elements below can be accessed between CPUs for RPS/RFS */ 3245 call_single_data_t csd ____cacheline_aligned_in_smp; 3246 struct softnet_data *rps_ipi_next; 3247 unsigned int cpu; 3248 unsigned int input_queue_tail; 3249#endif 3250 unsigned int dropped; 3251 struct sk_buff_head input_pkt_queue; 3252 struct napi_struct backlog; 3253 3254}; 3255 3256static inline void input_queue_head_incr(struct softnet_data *sd) 3257{ 3258#ifdef CONFIG_RPS 3259 sd->input_queue_head++; 3260#endif 3261} 3262 3263static inline void input_queue_tail_incr_save(struct softnet_data *sd, 3264 unsigned int *qtail) 3265{ 3266#ifdef CONFIG_RPS 3267 *qtail = ++sd->input_queue_tail; 3268#endif 3269} 3270 3271DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3272 3273static inline int dev_recursion_level(void) 3274{ 3275 return this_cpu_read(softnet_data.xmit.recursion); 3276} 3277 3278#define XMIT_RECURSION_LIMIT 8 3279static inline bool dev_xmit_recursion(void) 3280{ 3281 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 3282 XMIT_RECURSION_LIMIT); 3283} 3284 3285static inline void dev_xmit_recursion_inc(void) 3286{ 3287 __this_cpu_inc(softnet_data.xmit.recursion); 3288} 3289 3290static inline void dev_xmit_recursion_dec(void) 3291{ 3292 __this_cpu_dec(softnet_data.xmit.recursion); 3293} 3294 3295void __netif_schedule(struct Qdisc *q); 3296void netif_schedule_queue(struct netdev_queue *txq); 3297 3298static inline void netif_tx_schedule_all(struct net_device *dev) 3299{ 3300 unsigned int i; 3301 3302 for (i = 0; i < dev->num_tx_queues; i++) 3303 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3304} 3305 3306static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3307{ 3308 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3309} 3310 3311/** 3312 * netif_start_queue - allow transmit 3313 * @dev: network device 3314 * 3315 * Allow upper layers to call the device hard_start_xmit routine. 3316 */ 3317static inline void netif_start_queue(struct net_device *dev) 3318{ 3319 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3320} 3321 3322static inline void netif_tx_start_all_queues(struct net_device *dev) 3323{ 3324 unsigned int i; 3325 3326 for (i = 0; i < dev->num_tx_queues; i++) { 3327 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3328 netif_tx_start_queue(txq); 3329 } 3330} 3331 3332void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3333 3334/** 3335 * netif_wake_queue - restart transmit 3336 * @dev: network device 3337 * 3338 * Allow upper layers to call the device hard_start_xmit routine. 3339 * Used for flow control when transmit resources are available. 3340 */ 3341static inline void netif_wake_queue(struct net_device *dev) 3342{ 3343 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3344} 3345 3346static inline void netif_tx_wake_all_queues(struct net_device *dev) 3347{ 3348 unsigned int i; 3349 3350 for (i = 0; i < dev->num_tx_queues; i++) { 3351 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3352 netif_tx_wake_queue(txq); 3353 } 3354} 3355 3356static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3357{ 3358 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3359} 3360 3361/** 3362 * netif_stop_queue - stop transmitted packets 3363 * @dev: network device 3364 * 3365 * Stop upper layers calling the device hard_start_xmit routine. 3366 * Used for flow control when transmit resources are unavailable. 3367 */ 3368static inline void netif_stop_queue(struct net_device *dev) 3369{ 3370 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3371} 3372 3373void netif_tx_stop_all_queues(struct net_device *dev); 3374 3375static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3376{ 3377 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3378} 3379 3380/** 3381 * netif_queue_stopped - test if transmit queue is flowblocked 3382 * @dev: network device 3383 * 3384 * Test if transmit queue on device is currently unable to send. 3385 */ 3386static inline bool netif_queue_stopped(const struct net_device *dev) 3387{ 3388 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3389} 3390 3391static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3392{ 3393 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3394} 3395 3396static inline bool 3397netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3398{ 3399 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3400} 3401 3402static inline bool 3403netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3404{ 3405 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3406} 3407 3408/** 3409 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3410 * @dev_queue: pointer to transmit queue 3411 * 3412 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3413 * to give appropriate hint to the CPU. 3414 */ 3415static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3416{ 3417#ifdef CONFIG_BQL 3418 prefetchw(&dev_queue->dql.num_queued); 3419#endif 3420} 3421 3422/** 3423 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3424 * @dev_queue: pointer to transmit queue 3425 * 3426 * BQL enabled drivers might use this helper in their TX completion path, 3427 * to give appropriate hint to the CPU. 3428 */ 3429static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3430{ 3431#ifdef CONFIG_BQL 3432 prefetchw(&dev_queue->dql.limit); 3433#endif 3434} 3435 3436static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3437 unsigned int bytes) 3438{ 3439#ifdef CONFIG_BQL 3440 dql_queued(&dev_queue->dql, bytes); 3441 3442 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3443 return; 3444 3445 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3446 3447 /* 3448 * The XOFF flag must be set before checking the dql_avail below, 3449 * because in netdev_tx_completed_queue we update the dql_completed 3450 * before checking the XOFF flag. 3451 */ 3452 smp_mb(); 3453 3454 /* check again in case another CPU has just made room avail */ 3455 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3456 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3457#endif 3458} 3459 3460/* Variant of netdev_tx_sent_queue() for drivers that are aware 3461 * that they should not test BQL status themselves. 3462 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3463 * skb of a batch. 3464 * Returns true if the doorbell must be used to kick the NIC. 3465 */ 3466static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3467 unsigned int bytes, 3468 bool xmit_more) 3469{ 3470 if (xmit_more) { 3471#ifdef CONFIG_BQL 3472 dql_queued(&dev_queue->dql, bytes); 3473#endif 3474 return netif_tx_queue_stopped(dev_queue); 3475 } 3476 netdev_tx_sent_queue(dev_queue, bytes); 3477 return true; 3478} 3479 3480/** 3481 * netdev_sent_queue - report the number of bytes queued to hardware 3482 * @dev: network device 3483 * @bytes: number of bytes queued to the hardware device queue 3484 * 3485 * Report the number of bytes queued for sending/completion to the network 3486 * device hardware queue. @bytes should be a good approximation and should 3487 * exactly match netdev_completed_queue() @bytes 3488 */ 3489static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3490{ 3491 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3492} 3493 3494static inline bool __netdev_sent_queue(struct net_device *dev, 3495 unsigned int bytes, 3496 bool xmit_more) 3497{ 3498 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3499 xmit_more); 3500} 3501 3502static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3503 unsigned int pkts, unsigned int bytes) 3504{ 3505#ifdef CONFIG_BQL 3506 if (unlikely(!bytes)) 3507 return; 3508 3509 dql_completed(&dev_queue->dql, bytes); 3510 3511 /* 3512 * Without the memory barrier there is a small possiblity that 3513 * netdev_tx_sent_queue will miss the update and cause the queue to 3514 * be stopped forever 3515 */ 3516 smp_mb(); 3517 3518 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3519 return; 3520 3521 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3522 netif_schedule_queue(dev_queue); 3523#endif 3524} 3525 3526/** 3527 * netdev_completed_queue - report bytes and packets completed by device 3528 * @dev: network device 3529 * @pkts: actual number of packets sent over the medium 3530 * @bytes: actual number of bytes sent over the medium 3531 * 3532 * Report the number of bytes and packets transmitted by the network device 3533 * hardware queue over the physical medium, @bytes must exactly match the 3534 * @bytes amount passed to netdev_sent_queue() 3535 */ 3536static inline void netdev_completed_queue(struct net_device *dev, 3537 unsigned int pkts, unsigned int bytes) 3538{ 3539 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3540} 3541 3542static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3543{ 3544#ifdef CONFIG_BQL 3545 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3546 dql_reset(&q->dql); 3547#endif 3548} 3549 3550/** 3551 * netdev_reset_queue - reset the packets and bytes count of a network device 3552 * @dev_queue: network device 3553 * 3554 * Reset the bytes and packet count of a network device and clear the 3555 * software flow control OFF bit for this network device 3556 */ 3557static inline void netdev_reset_queue(struct net_device *dev_queue) 3558{ 3559 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 3560} 3561 3562/** 3563 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3564 * @dev: network device 3565 * @queue_index: given tx queue index 3566 * 3567 * Returns 0 if given tx queue index >= number of device tx queues, 3568 * otherwise returns the originally passed tx queue index. 3569 */ 3570static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3571{ 3572 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3573 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3574 dev->name, queue_index, 3575 dev->real_num_tx_queues); 3576 return 0; 3577 } 3578 3579 return queue_index; 3580} 3581 3582/** 3583 * netif_running - test if up 3584 * @dev: network device 3585 * 3586 * Test if the device has been brought up. 3587 */ 3588static inline bool netif_running(const struct net_device *dev) 3589{ 3590 return test_bit(__LINK_STATE_START, &dev->state); 3591} 3592 3593/* 3594 * Routines to manage the subqueues on a device. We only need start, 3595 * stop, and a check if it's stopped. All other device management is 3596 * done at the overall netdevice level. 3597 * Also test the device if we're multiqueue. 3598 */ 3599 3600/** 3601 * netif_start_subqueue - allow sending packets on subqueue 3602 * @dev: network device 3603 * @queue_index: sub queue index 3604 * 3605 * Start individual transmit queue of a device with multiple transmit queues. 3606 */ 3607static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3608{ 3609 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3610 3611 netif_tx_start_queue(txq); 3612} 3613 3614/** 3615 * netif_stop_subqueue - stop sending packets on subqueue 3616 * @dev: network device 3617 * @queue_index: sub queue index 3618 * 3619 * Stop individual transmit queue of a device with multiple transmit queues. 3620 */ 3621static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3622{ 3623 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3624 netif_tx_stop_queue(txq); 3625} 3626 3627/** 3628 * __netif_subqueue_stopped - test status of subqueue 3629 * @dev: network device 3630 * @queue_index: sub queue index 3631 * 3632 * Check individual transmit queue of a device with multiple transmit queues. 3633 */ 3634static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3635 u16 queue_index) 3636{ 3637 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3638 3639 return netif_tx_queue_stopped(txq); 3640} 3641 3642/** 3643 * netif_subqueue_stopped - test status of subqueue 3644 * @dev: network device 3645 * @skb: sub queue buffer pointer 3646 * 3647 * Check individual transmit queue of a device with multiple transmit queues. 3648 */ 3649static inline bool netif_subqueue_stopped(const struct net_device *dev, 3650 struct sk_buff *skb) 3651{ 3652 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3653} 3654 3655/** 3656 * netif_wake_subqueue - allow sending packets on subqueue 3657 * @dev: network device 3658 * @queue_index: sub queue index 3659 * 3660 * Resume individual transmit queue of a device with multiple transmit queues. 3661 */ 3662static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3663{ 3664 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3665 3666 netif_tx_wake_queue(txq); 3667} 3668 3669#ifdef CONFIG_XPS 3670int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3671 u16 index); 3672int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3673 u16 index, bool is_rxqs_map); 3674 3675/** 3676 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3677 * @j: CPU/Rx queue index 3678 * @mask: bitmask of all cpus/rx queues 3679 * @nr_bits: number of bits in the bitmask 3680 * 3681 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3682 */ 3683static inline bool netif_attr_test_mask(unsigned long j, 3684 const unsigned long *mask, 3685 unsigned int nr_bits) 3686{ 3687 cpu_max_bits_warn(j, nr_bits); 3688 return test_bit(j, mask); 3689} 3690 3691/** 3692 * netif_attr_test_online - Test for online CPU/Rx queue 3693 * @j: CPU/Rx queue index 3694 * @online_mask: bitmask for CPUs/Rx queues that are online 3695 * @nr_bits: number of bits in the bitmask 3696 * 3697 * Returns true if a CPU/Rx queue is online. 3698 */ 3699static inline bool netif_attr_test_online(unsigned long j, 3700 const unsigned long *online_mask, 3701 unsigned int nr_bits) 3702{ 3703 cpu_max_bits_warn(j, nr_bits); 3704 3705 if (online_mask) 3706 return test_bit(j, online_mask); 3707 3708 return (j < nr_bits); 3709} 3710 3711/** 3712 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3713 * @n: CPU/Rx queue index 3714 * @srcp: the cpumask/Rx queue mask pointer 3715 * @nr_bits: number of bits in the bitmask 3716 * 3717 * Returns >= nr_bits if no further CPUs/Rx queues set. 3718 */ 3719static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3720 unsigned int nr_bits) 3721{ 3722 /* -1 is a legal arg here. */ 3723 if (n != -1) 3724 cpu_max_bits_warn(n, nr_bits); 3725 3726 if (srcp) 3727 return find_next_bit(srcp, nr_bits, n + 1); 3728 3729 return n + 1; 3730} 3731 3732/** 3733 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3734 * @n: CPU/Rx queue index 3735 * @src1p: the first CPUs/Rx queues mask pointer 3736 * @src2p: the second CPUs/Rx queues mask pointer 3737 * @nr_bits: number of bits in the bitmask 3738 * 3739 * Returns >= nr_bits if no further CPUs/Rx queues set in both. 3740 */ 3741static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 3742 const unsigned long *src2p, 3743 unsigned int nr_bits) 3744{ 3745 /* -1 is a legal arg here. */ 3746 if (n != -1) 3747 cpu_max_bits_warn(n, nr_bits); 3748 3749 if (src1p && src2p) 3750 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 3751 else if (src1p) 3752 return find_next_bit(src1p, nr_bits, n + 1); 3753 else if (src2p) 3754 return find_next_bit(src2p, nr_bits, n + 1); 3755 3756 return n + 1; 3757} 3758#else 3759static inline int netif_set_xps_queue(struct net_device *dev, 3760 const struct cpumask *mask, 3761 u16 index) 3762{ 3763 return 0; 3764} 3765 3766static inline int __netif_set_xps_queue(struct net_device *dev, 3767 const unsigned long *mask, 3768 u16 index, bool is_rxqs_map) 3769{ 3770 return 0; 3771} 3772#endif 3773 3774/** 3775 * netif_is_multiqueue - test if device has multiple transmit queues 3776 * @dev: network device 3777 * 3778 * Check if device has multiple transmit queues 3779 */ 3780static inline bool netif_is_multiqueue(const struct net_device *dev) 3781{ 3782 return dev->num_tx_queues > 1; 3783} 3784 3785int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 3786 3787#ifdef CONFIG_SYSFS 3788int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3789#else 3790static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3791 unsigned int rxqs) 3792{ 3793 dev->real_num_rx_queues = rxqs; 3794 return 0; 3795} 3796#endif 3797 3798static inline struct netdev_rx_queue * 3799__netif_get_rx_queue(struct net_device *dev, unsigned int rxq) 3800{ 3801 return dev->_rx + rxq; 3802} 3803 3804#ifdef CONFIG_SYSFS 3805static inline unsigned int get_netdev_rx_queue_index( 3806 struct netdev_rx_queue *queue) 3807{ 3808 struct net_device *dev = queue->dev; 3809 int index = queue - dev->_rx; 3810 3811 BUG_ON(index >= dev->num_rx_queues); 3812 return index; 3813} 3814#endif 3815 3816#define DEFAULT_MAX_NUM_RSS_QUEUES (8) 3817int netif_get_num_default_rss_queues(void); 3818 3819enum skb_free_reason { 3820 SKB_REASON_CONSUMED, 3821 SKB_REASON_DROPPED, 3822}; 3823 3824void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); 3825void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); 3826 3827/* 3828 * It is not allowed to call kfree_skb() or consume_skb() from hardware 3829 * interrupt context or with hardware interrupts being disabled. 3830 * (in_irq() || irqs_disabled()) 3831 * 3832 * We provide four helpers that can be used in following contexts : 3833 * 3834 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 3835 * replacing kfree_skb(skb) 3836 * 3837 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 3838 * Typically used in place of consume_skb(skb) in TX completion path 3839 * 3840 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3841 * replacing kfree_skb(skb) 3842 * 3843 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3844 * and consumed a packet. Used in place of consume_skb(skb) 3845 */ 3846static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3847{ 3848 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); 3849} 3850 3851static inline void dev_consume_skb_irq(struct sk_buff *skb) 3852{ 3853 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); 3854} 3855 3856static inline void dev_kfree_skb_any(struct sk_buff *skb) 3857{ 3858 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); 3859} 3860 3861static inline void dev_consume_skb_any(struct sk_buff *skb) 3862{ 3863 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); 3864} 3865 3866void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); 3867int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); 3868int netif_rx(struct sk_buff *skb); 3869int netif_rx_ni(struct sk_buff *skb); 3870int netif_rx_any_context(struct sk_buff *skb); 3871int netif_receive_skb(struct sk_buff *skb); 3872int netif_receive_skb_core(struct sk_buff *skb); 3873void netif_receive_skb_list(struct list_head *head); 3874gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3875void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3876struct sk_buff *napi_get_frags(struct napi_struct *napi); 3877gro_result_t napi_gro_frags(struct napi_struct *napi); 3878struct packet_offload *gro_find_receive_by_type(__be16 type); 3879struct packet_offload *gro_find_complete_by_type(__be16 type); 3880 3881static inline void napi_free_frags(struct napi_struct *napi) 3882{ 3883 kfree_skb(napi->skb); 3884 napi->skb = NULL; 3885} 3886 3887bool netdev_is_rx_handler_busy(struct net_device *dev); 3888int netdev_rx_handler_register(struct net_device *dev, 3889 rx_handler_func_t *rx_handler, 3890 void *rx_handler_data); 3891void netdev_rx_handler_unregister(struct net_device *dev); 3892 3893bool dev_valid_name(const char *name); 3894int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 3895 bool *need_copyout); 3896int dev_ifconf(struct net *net, struct ifconf *, int); 3897int dev_ethtool(struct net *net, struct ifreq *); 3898unsigned int dev_get_flags(const struct net_device *); 3899int __dev_change_flags(struct net_device *dev, unsigned int flags, 3900 struct netlink_ext_ack *extack); 3901int dev_change_flags(struct net_device *dev, unsigned int flags, 3902 struct netlink_ext_ack *extack); 3903void __dev_notify_flags(struct net_device *, unsigned int old_flags, 3904 unsigned int gchanges); 3905int dev_change_name(struct net_device *, const char *); 3906int dev_set_alias(struct net_device *, const char *, size_t); 3907int dev_get_alias(const struct net_device *, char *, size_t); 3908int dev_change_net_namespace(struct net_device *, struct net *, const char *); 3909int __dev_set_mtu(struct net_device *, int); 3910int dev_validate_mtu(struct net_device *dev, int mtu, 3911 struct netlink_ext_ack *extack); 3912int dev_set_mtu_ext(struct net_device *dev, int mtu, 3913 struct netlink_ext_ack *extack); 3914int dev_set_mtu(struct net_device *, int); 3915int dev_change_tx_queue_len(struct net_device *, unsigned long); 3916void dev_set_group(struct net_device *, int); 3917int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 3918 struct netlink_ext_ack *extack); 3919int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 3920 struct netlink_ext_ack *extack); 3921int dev_change_carrier(struct net_device *, bool new_carrier); 3922int dev_get_phys_port_id(struct net_device *dev, 3923 struct netdev_phys_item_id *ppid); 3924int dev_get_phys_port_name(struct net_device *dev, 3925 char *name, size_t len); 3926int dev_get_port_parent_id(struct net_device *dev, 3927 struct netdev_phys_item_id *ppid, bool recurse); 3928bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 3929int dev_change_proto_down(struct net_device *dev, bool proto_down); 3930int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); 3931void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 3932 u32 value); 3933struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 3934struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3935 struct netdev_queue *txq, int *ret); 3936 3937typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); 3938int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 3939 int fd, int expected_fd, u32 flags); 3940int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 3941u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 3942 3943int xdp_umem_query(struct net_device *dev, u16 queue_id); 3944 3945int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3946int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3947bool is_skb_forwardable(const struct net_device *dev, 3948 const struct sk_buff *skb); 3949 3950static __always_inline int ____dev_forward_skb(struct net_device *dev, 3951 struct sk_buff *skb) 3952{ 3953 if (skb_orphan_frags(skb, GFP_ATOMIC) || 3954 unlikely(!is_skb_forwardable(dev, skb))) { 3955 atomic_long_inc(&dev->rx_dropped); 3956 kfree_skb(skb); 3957 return NET_RX_DROP; 3958 } 3959 3960 skb_scrub_packet(skb, true); 3961 skb->priority = 0; 3962 return 0; 3963} 3964 3965bool dev_nit_active(struct net_device *dev); 3966void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 3967 3968extern int netdev_budget; 3969extern unsigned int netdev_budget_usecs; 3970 3971/* Called by rtnetlink.c:rtnl_unlock() */ 3972void netdev_run_todo(void); 3973 3974/** 3975 * dev_put - release reference to device 3976 * @dev: network device 3977 * 3978 * Release reference to device to allow it to be freed. 3979 */ 3980static inline void dev_put(struct net_device *dev) 3981{ 3982 this_cpu_dec(*dev->pcpu_refcnt); 3983} 3984 3985/** 3986 * dev_hold - get reference to device 3987 * @dev: network device 3988 * 3989 * Hold reference to device to keep it from being freed. 3990 */ 3991static inline void dev_hold(struct net_device *dev) 3992{ 3993 this_cpu_inc(*dev->pcpu_refcnt); 3994} 3995 3996/* Carrier loss detection, dial on demand. The functions netif_carrier_on 3997 * and _off may be called from IRQ context, but it is caller 3998 * who is responsible for serialization of these calls. 3999 * 4000 * The name carrier is inappropriate, these functions should really be 4001 * called netif_lowerlayer_*() because they represent the state of any 4002 * kind of lower layer not just hardware media. 4003 */ 4004 4005void linkwatch_init_dev(struct net_device *dev); 4006void linkwatch_fire_event(struct net_device *dev); 4007void linkwatch_forget_dev(struct net_device *dev); 4008 4009/** 4010 * netif_carrier_ok - test if carrier present 4011 * @dev: network device 4012 * 4013 * Check if carrier is present on device 4014 */ 4015static inline bool netif_carrier_ok(const struct net_device *dev) 4016{ 4017 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4018} 4019 4020unsigned long dev_trans_start(struct net_device *dev); 4021 4022void __netdev_watchdog_up(struct net_device *dev); 4023 4024void netif_carrier_on(struct net_device *dev); 4025 4026void netif_carrier_off(struct net_device *dev); 4027 4028/** 4029 * netif_dormant_on - mark device as dormant. 4030 * @dev: network device 4031 * 4032 * Mark device as dormant (as per RFC2863). 4033 * 4034 * The dormant state indicates that the relevant interface is not 4035 * actually in a condition to pass packets (i.e., it is not 'up') but is 4036 * in a "pending" state, waiting for some external event. For "on- 4037 * demand" interfaces, this new state identifies the situation where the 4038 * interface is waiting for events to place it in the up state. 4039 */ 4040static inline void netif_dormant_on(struct net_device *dev) 4041{ 4042 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4043 linkwatch_fire_event(dev); 4044} 4045 4046/** 4047 * netif_dormant_off - set device as not dormant. 4048 * @dev: network device 4049 * 4050 * Device is not in dormant state. 4051 */ 4052static inline void netif_dormant_off(struct net_device *dev) 4053{ 4054 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4055 linkwatch_fire_event(dev); 4056} 4057 4058/** 4059 * netif_dormant - test if device is dormant 4060 * @dev: network device 4061 * 4062 * Check if device is dormant. 4063 */ 4064static inline bool netif_dormant(const struct net_device *dev) 4065{ 4066 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4067} 4068 4069 4070/** 4071 * netif_testing_on - mark device as under test. 4072 * @dev: network device 4073 * 4074 * Mark device as under test (as per RFC2863). 4075 * 4076 * The testing state indicates that some test(s) must be performed on 4077 * the interface. After completion, of the test, the interface state 4078 * will change to up, dormant, or down, as appropriate. 4079 */ 4080static inline void netif_testing_on(struct net_device *dev) 4081{ 4082 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4083 linkwatch_fire_event(dev); 4084} 4085 4086/** 4087 * netif_testing_off - set device as not under test. 4088 * @dev: network device 4089 * 4090 * Device is not in testing state. 4091 */ 4092static inline void netif_testing_off(struct net_device *dev) 4093{ 4094 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4095 linkwatch_fire_event(dev); 4096} 4097 4098/** 4099 * netif_testing - test if device is under test 4100 * @dev: network device 4101 * 4102 * Check if device is under test 4103 */ 4104static inline bool netif_testing(const struct net_device *dev) 4105{ 4106 return test_bit(__LINK_STATE_TESTING, &dev->state); 4107} 4108 4109 4110/** 4111 * netif_oper_up - test if device is operational 4112 * @dev: network device 4113 * 4114 * Check if carrier is operational 4115 */ 4116static inline bool netif_oper_up(const struct net_device *dev) 4117{ 4118 return (dev->operstate == IF_OPER_UP || 4119 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 4120} 4121 4122/** 4123 * netif_device_present - is device available or removed 4124 * @dev: network device 4125 * 4126 * Check if device has not been removed from system. 4127 */ 4128static inline bool netif_device_present(struct net_device *dev) 4129{ 4130 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4131} 4132 4133void netif_device_detach(struct net_device *dev); 4134 4135void netif_device_attach(struct net_device *dev); 4136 4137/* 4138 * Network interface message level settings 4139 */ 4140 4141enum { 4142 NETIF_MSG_DRV_BIT, 4143 NETIF_MSG_PROBE_BIT, 4144 NETIF_MSG_LINK_BIT, 4145 NETIF_MSG_TIMER_BIT, 4146 NETIF_MSG_IFDOWN_BIT, 4147 NETIF_MSG_IFUP_BIT, 4148 NETIF_MSG_RX_ERR_BIT, 4149 NETIF_MSG_TX_ERR_BIT, 4150 NETIF_MSG_TX_QUEUED_BIT, 4151 NETIF_MSG_INTR_BIT, 4152 NETIF_MSG_TX_DONE_BIT, 4153 NETIF_MSG_RX_STATUS_BIT, 4154 NETIF_MSG_PKTDATA_BIT, 4155 NETIF_MSG_HW_BIT, 4156 NETIF_MSG_WOL_BIT, 4157 4158 /* When you add a new bit above, update netif_msg_class_names array 4159 * in net/ethtool/common.c 4160 */ 4161 NETIF_MSG_CLASS_COUNT, 4162}; 4163/* Both ethtool_ops interface and internal driver implementation use u32 */ 4164static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4165 4166#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4167#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4168 4169#define NETIF_MSG_DRV __NETIF_MSG(DRV) 4170#define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4171#define NETIF_MSG_LINK __NETIF_MSG(LINK) 4172#define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4173#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4174#define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4175#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4176#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4177#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4178#define NETIF_MSG_INTR __NETIF_MSG(INTR) 4179#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4180#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4181#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4182#define NETIF_MSG_HW __NETIF_MSG(HW) 4183#define NETIF_MSG_WOL __NETIF_MSG(WOL) 4184 4185#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4186#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4187#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4188#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4189#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4190#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4191#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4192#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4193#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4194#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4195#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4196#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4197#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4198#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4199#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4200 4201static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4202{ 4203 /* use default */ 4204 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4205 return default_msg_enable_bits; 4206 if (debug_value == 0) /* no output */ 4207 return 0; 4208 /* set low N bits */ 4209 return (1U << debug_value) - 1; 4210} 4211 4212static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4213{ 4214 spin_lock(&txq->_xmit_lock); 4215 txq->xmit_lock_owner = cpu; 4216} 4217 4218static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4219{ 4220 __acquire(&txq->_xmit_lock); 4221 return true; 4222} 4223 4224static inline void __netif_tx_release(struct netdev_queue *txq) 4225{ 4226 __release(&txq->_xmit_lock); 4227} 4228 4229static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4230{ 4231 spin_lock_bh(&txq->_xmit_lock); 4232 txq->xmit_lock_owner = smp_processor_id(); 4233} 4234 4235static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4236{ 4237 bool ok = spin_trylock(&txq->_xmit_lock); 4238 if (likely(ok)) 4239 txq->xmit_lock_owner = smp_processor_id(); 4240 return ok; 4241} 4242 4243static inline void __netif_tx_unlock(struct netdev_queue *txq) 4244{ 4245 txq->xmit_lock_owner = -1; 4246 spin_unlock(&txq->_xmit_lock); 4247} 4248 4249static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4250{ 4251 txq->xmit_lock_owner = -1; 4252 spin_unlock_bh(&txq->_xmit_lock); 4253} 4254 4255static inline void txq_trans_update(struct netdev_queue *txq) 4256{ 4257 if (txq->xmit_lock_owner != -1) 4258 txq->trans_start = jiffies; 4259} 4260 4261/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4262static inline void netif_trans_update(struct net_device *dev) 4263{ 4264 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4265 4266 if (txq->trans_start != jiffies) 4267 txq->trans_start = jiffies; 4268} 4269 4270/** 4271 * netif_tx_lock - grab network device transmit lock 4272 * @dev: network device 4273 * 4274 * Get network device transmit lock 4275 */ 4276static inline void netif_tx_lock(struct net_device *dev) 4277{ 4278 unsigned int i; 4279 int cpu; 4280 4281 spin_lock(&dev->tx_global_lock); 4282 cpu = smp_processor_id(); 4283 for (i = 0; i < dev->num_tx_queues; i++) { 4284 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4285 4286 /* We are the only thread of execution doing a 4287 * freeze, but we have to grab the _xmit_lock in 4288 * order to synchronize with threads which are in 4289 * the ->hard_start_xmit() handler and already 4290 * checked the frozen bit. 4291 */ 4292 __netif_tx_lock(txq, cpu); 4293 set_bit(__QUEUE_STATE_FROZEN, &txq->state); 4294 __netif_tx_unlock(txq); 4295 } 4296} 4297 4298static inline void netif_tx_lock_bh(struct net_device *dev) 4299{ 4300 local_bh_disable(); 4301 netif_tx_lock(dev); 4302} 4303 4304static inline void netif_tx_unlock(struct net_device *dev) 4305{ 4306 unsigned int i; 4307 4308 for (i = 0; i < dev->num_tx_queues; i++) { 4309 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4310 4311 /* No need to grab the _xmit_lock here. If the 4312 * queue is not stopped for another reason, we 4313 * force a schedule. 4314 */ 4315 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); 4316 netif_schedule_queue(txq); 4317 } 4318 spin_unlock(&dev->tx_global_lock); 4319} 4320 4321static inline void netif_tx_unlock_bh(struct net_device *dev) 4322{ 4323 netif_tx_unlock(dev); 4324 local_bh_enable(); 4325} 4326 4327#define HARD_TX_LOCK(dev, txq, cpu) { \ 4328 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4329 __netif_tx_lock(txq, cpu); \ 4330 } else { \ 4331 __netif_tx_acquire(txq); \ 4332 } \ 4333} 4334 4335#define HARD_TX_TRYLOCK(dev, txq) \ 4336 (((dev->features & NETIF_F_LLTX) == 0) ? \ 4337 __netif_tx_trylock(txq) : \ 4338 __netif_tx_acquire(txq)) 4339 4340#define HARD_TX_UNLOCK(dev, txq) { \ 4341 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4342 __netif_tx_unlock(txq); \ 4343 } else { \ 4344 __netif_tx_release(txq); \ 4345 } \ 4346} 4347 4348static inline void netif_tx_disable(struct net_device *dev) 4349{ 4350 unsigned int i; 4351 int cpu; 4352 4353 local_bh_disable(); 4354 cpu = smp_processor_id(); 4355 spin_lock(&dev->tx_global_lock); 4356 for (i = 0; i < dev->num_tx_queues; i++) { 4357 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4358 4359 __netif_tx_lock(txq, cpu); 4360 netif_tx_stop_queue(txq); 4361 __netif_tx_unlock(txq); 4362 } 4363 spin_unlock(&dev->tx_global_lock); 4364 local_bh_enable(); 4365} 4366 4367static inline void netif_addr_lock(struct net_device *dev) 4368{ 4369 unsigned char nest_level = 0; 4370 4371#ifdef CONFIG_LOCKDEP 4372 nest_level = dev->nested_level; 4373#endif 4374 spin_lock_nested(&dev->addr_list_lock, nest_level); 4375} 4376 4377static inline void netif_addr_lock_bh(struct net_device *dev) 4378{ 4379 unsigned char nest_level = 0; 4380 4381#ifdef CONFIG_LOCKDEP 4382 nest_level = dev->nested_level; 4383#endif 4384 local_bh_disable(); 4385 spin_lock_nested(&dev->addr_list_lock, nest_level); 4386} 4387 4388static inline void netif_addr_unlock(struct net_device *dev) 4389{ 4390 spin_unlock(&dev->addr_list_lock); 4391} 4392 4393static inline void netif_addr_unlock_bh(struct net_device *dev) 4394{ 4395 spin_unlock_bh(&dev->addr_list_lock); 4396} 4397 4398/* 4399 * dev_addrs walker. Should be used only for read access. Call with 4400 * rcu_read_lock held. 4401 */ 4402#define for_each_dev_addr(dev, ha) \ 4403 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4404 4405/* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4406 4407void ether_setup(struct net_device *dev); 4408 4409/* Support for loadable net-drivers */ 4410struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4411 unsigned char name_assign_type, 4412 void (*setup)(struct net_device *), 4413 unsigned int txqs, unsigned int rxqs); 4414#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4415 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4416 4417#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4418 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4419 count) 4420 4421int register_netdev(struct net_device *dev); 4422void unregister_netdev(struct net_device *dev); 4423 4424int devm_register_netdev(struct device *dev, struct net_device *ndev); 4425 4426/* General hardware address lists handling functions */ 4427int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4428 struct netdev_hw_addr_list *from_list, int addr_len); 4429void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4430 struct netdev_hw_addr_list *from_list, int addr_len); 4431int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4432 struct net_device *dev, 4433 int (*sync)(struct net_device *, const unsigned char *), 4434 int (*unsync)(struct net_device *, 4435 const unsigned char *)); 4436int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4437 struct net_device *dev, 4438 int (*sync)(struct net_device *, 4439 const unsigned char *, int), 4440 int (*unsync)(struct net_device *, 4441 const unsigned char *, int)); 4442void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4443 struct net_device *dev, 4444 int (*unsync)(struct net_device *, 4445 const unsigned char *, int)); 4446void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4447 struct net_device *dev, 4448 int (*unsync)(struct net_device *, 4449 const unsigned char *)); 4450void __hw_addr_init(struct netdev_hw_addr_list *list); 4451 4452/* Functions used for device addresses handling */ 4453int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4454 unsigned char addr_type); 4455int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4456 unsigned char addr_type); 4457void dev_addr_flush(struct net_device *dev); 4458int dev_addr_init(struct net_device *dev); 4459 4460/* Functions used for unicast addresses handling */ 4461int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4462int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4463int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4464int dev_uc_sync(struct net_device *to, struct net_device *from); 4465int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4466void dev_uc_unsync(struct net_device *to, struct net_device *from); 4467void dev_uc_flush(struct net_device *dev); 4468void dev_uc_init(struct net_device *dev); 4469 4470/** 4471 * __dev_uc_sync - Synchonize device's unicast list 4472 * @dev: device to sync 4473 * @sync: function to call if address should be added 4474 * @unsync: function to call if address should be removed 4475 * 4476 * Add newly added addresses to the interface, and release 4477 * addresses that have been deleted. 4478 */ 4479static inline int __dev_uc_sync(struct net_device *dev, 4480 int (*sync)(struct net_device *, 4481 const unsigned char *), 4482 int (*unsync)(struct net_device *, 4483 const unsigned char *)) 4484{ 4485 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4486} 4487 4488/** 4489 * __dev_uc_unsync - Remove synchronized addresses from device 4490 * @dev: device to sync 4491 * @unsync: function to call if address should be removed 4492 * 4493 * Remove all addresses that were added to the device by dev_uc_sync(). 4494 */ 4495static inline void __dev_uc_unsync(struct net_device *dev, 4496 int (*unsync)(struct net_device *, 4497 const unsigned char *)) 4498{ 4499 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4500} 4501 4502/* Functions used for multicast addresses handling */ 4503int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4504int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4505int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4506int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4507int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4508int dev_mc_sync(struct net_device *to, struct net_device *from); 4509int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4510void dev_mc_unsync(struct net_device *to, struct net_device *from); 4511void dev_mc_flush(struct net_device *dev); 4512void dev_mc_init(struct net_device *dev); 4513 4514/** 4515 * __dev_mc_sync - Synchonize device's multicast list 4516 * @dev: device to sync 4517 * @sync: function to call if address should be added 4518 * @unsync: function to call if address should be removed 4519 * 4520 * Add newly added addresses to the interface, and release 4521 * addresses that have been deleted. 4522 */ 4523static inline int __dev_mc_sync(struct net_device *dev, 4524 int (*sync)(struct net_device *, 4525 const unsigned char *), 4526 int (*unsync)(struct net_device *, 4527 const unsigned char *)) 4528{ 4529 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4530} 4531 4532/** 4533 * __dev_mc_unsync - Remove synchronized addresses from device 4534 * @dev: device to sync 4535 * @unsync: function to call if address should be removed 4536 * 4537 * Remove all addresses that were added to the device by dev_mc_sync(). 4538 */ 4539static inline void __dev_mc_unsync(struct net_device *dev, 4540 int (*unsync)(struct net_device *, 4541 const unsigned char *)) 4542{ 4543 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4544} 4545 4546/* Functions used for secondary unicast and multicast support */ 4547void dev_set_rx_mode(struct net_device *dev); 4548void __dev_set_rx_mode(struct net_device *dev); 4549int dev_set_promiscuity(struct net_device *dev, int inc); 4550int dev_set_allmulti(struct net_device *dev, int inc); 4551void netdev_state_change(struct net_device *dev); 4552void __netdev_notify_peers(struct net_device *dev); 4553void netdev_notify_peers(struct net_device *dev); 4554void netdev_features_change(struct net_device *dev); 4555/* Load a device via the kmod */ 4556void dev_load(struct net *net, const char *name); 4557struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4558 struct rtnl_link_stats64 *storage); 4559void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4560 const struct net_device_stats *netdev_stats); 4561void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4562 const struct pcpu_sw_netstats __percpu *netstats); 4563void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4564 4565extern int netdev_max_backlog; 4566extern int netdev_tstamp_prequeue; 4567extern int weight_p; 4568extern int dev_weight_rx_bias; 4569extern int dev_weight_tx_bias; 4570extern int dev_rx_weight; 4571extern int dev_tx_weight; 4572extern int gro_normal_batch; 4573 4574enum { 4575 NESTED_SYNC_IMM_BIT, 4576 NESTED_SYNC_TODO_BIT, 4577}; 4578 4579#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4580#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4581 4582#define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4583#define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4584 4585struct netdev_nested_priv { 4586 unsigned char flags; 4587 void *data; 4588}; 4589 4590bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4591struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4592 struct list_head **iter); 4593struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 4594 struct list_head **iter); 4595 4596#ifdef CONFIG_LOCKDEP 4597static LIST_HEAD(net_unlink_list); 4598 4599static inline void net_unlink_todo(struct net_device *dev) 4600{ 4601 if (list_empty(&dev->unlink_list)) 4602 list_add_tail(&dev->unlink_list, &net_unlink_list); 4603} 4604#endif 4605 4606/* iterate through upper list, must be called under RCU read lock */ 4607#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4608 for (iter = &(dev)->adj_list.upper, \ 4609 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4610 updev; \ 4611 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4612 4613int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4614 int (*fn)(struct net_device *upper_dev, 4615 struct netdev_nested_priv *priv), 4616 struct netdev_nested_priv *priv); 4617 4618bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4619 struct net_device *upper_dev); 4620 4621bool netdev_has_any_upper_dev(struct net_device *dev); 4622 4623void *netdev_lower_get_next_private(struct net_device *dev, 4624 struct list_head **iter); 4625void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4626 struct list_head **iter); 4627 4628#define netdev_for_each_lower_private(dev, priv, iter) \ 4629 for (iter = (dev)->adj_list.lower.next, \ 4630 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4631 priv; \ 4632 priv = netdev_lower_get_next_private(dev, &(iter))) 4633 4634#define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4635 for (iter = &(dev)->adj_list.lower, \ 4636 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4637 priv; \ 4638 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4639 4640void *netdev_lower_get_next(struct net_device *dev, 4641 struct list_head **iter); 4642 4643#define netdev_for_each_lower_dev(dev, ldev, iter) \ 4644 for (iter = (dev)->adj_list.lower.next, \ 4645 ldev = netdev_lower_get_next(dev, &(iter)); \ 4646 ldev; \ 4647 ldev = netdev_lower_get_next(dev, &(iter))) 4648 4649struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 4650 struct list_head **iter); 4651int netdev_walk_all_lower_dev(struct net_device *dev, 4652 int (*fn)(struct net_device *lower_dev, 4653 struct netdev_nested_priv *priv), 4654 struct netdev_nested_priv *priv); 4655int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 4656 int (*fn)(struct net_device *lower_dev, 4657 struct netdev_nested_priv *priv), 4658 struct netdev_nested_priv *priv); 4659 4660void *netdev_adjacent_get_private(struct list_head *adj_list); 4661void *netdev_lower_get_first_private_rcu(struct net_device *dev); 4662struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 4663struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 4664int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 4665 struct netlink_ext_ack *extack); 4666int netdev_master_upper_dev_link(struct net_device *dev, 4667 struct net_device *upper_dev, 4668 void *upper_priv, void *upper_info, 4669 struct netlink_ext_ack *extack); 4670void netdev_upper_dev_unlink(struct net_device *dev, 4671 struct net_device *upper_dev); 4672int netdev_adjacent_change_prepare(struct net_device *old_dev, 4673 struct net_device *new_dev, 4674 struct net_device *dev, 4675 struct netlink_ext_ack *extack); 4676void netdev_adjacent_change_commit(struct net_device *old_dev, 4677 struct net_device *new_dev, 4678 struct net_device *dev); 4679void netdev_adjacent_change_abort(struct net_device *old_dev, 4680 struct net_device *new_dev, 4681 struct net_device *dev); 4682void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4683void *netdev_lower_dev_get_private(struct net_device *dev, 4684 struct net_device *lower_dev); 4685void netdev_lower_state_changed(struct net_device *lower_dev, 4686 void *lower_state_info); 4687 4688/* RSS keys are 40 or 52 bytes long */ 4689#define NETDEV_RSS_KEY_LEN 52 4690extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4691void netdev_rss_key_fill(void *buffer, size_t len); 4692 4693int skb_checksum_help(struct sk_buff *skb); 4694int skb_crc32c_csum_help(struct sk_buff *skb); 4695int skb_csum_hwoffload_help(struct sk_buff *skb, 4696 const netdev_features_t features); 4697 4698struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 4699 netdev_features_t features, bool tx_path); 4700struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 4701 netdev_features_t features); 4702 4703struct netdev_bonding_info { 4704 ifslave slave; 4705 ifbond master; 4706}; 4707 4708struct netdev_notifier_bonding_info { 4709 struct netdev_notifier_info info; /* must be first */ 4710 struct netdev_bonding_info bonding_info; 4711}; 4712 4713void netdev_bonding_info_change(struct net_device *dev, 4714 struct netdev_bonding_info *bonding_info); 4715 4716#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 4717void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 4718#else 4719static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 4720 const void *data) 4721{ 4722} 4723#endif 4724 4725static inline 4726struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 4727{ 4728 return __skb_gso_segment(skb, features, true); 4729} 4730__be16 skb_network_protocol(struct sk_buff *skb, int *depth); 4731 4732static inline bool can_checksum_protocol(netdev_features_t features, 4733 __be16 protocol) 4734{ 4735 if (protocol == htons(ETH_P_FCOE)) 4736 return !!(features & NETIF_F_FCOE_CRC); 4737 4738 /* Assume this is an IP checksum (not SCTP CRC) */ 4739 4740 if (features & NETIF_F_HW_CSUM) { 4741 /* Can checksum everything */ 4742 return true; 4743 } 4744 4745 switch (protocol) { 4746 case htons(ETH_P_IP): 4747 return !!(features & NETIF_F_IP_CSUM); 4748 case htons(ETH_P_IPV6): 4749 return !!(features & NETIF_F_IPV6_CSUM); 4750 default: 4751 return false; 4752 } 4753} 4754 4755#ifdef CONFIG_BUG 4756void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 4757#else 4758static inline void netdev_rx_csum_fault(struct net_device *dev, 4759 struct sk_buff *skb) 4760{ 4761} 4762#endif 4763/* rx skb timestamps */ 4764void net_enable_timestamp(void); 4765void net_disable_timestamp(void); 4766 4767#ifdef CONFIG_PROC_FS 4768int __init dev_proc_init(void); 4769#else 4770#define dev_proc_init() 0 4771#endif 4772 4773static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4774 struct sk_buff *skb, struct net_device *dev, 4775 bool more) 4776{ 4777 __this_cpu_write(softnet_data.xmit.more, more); 4778 return ops->ndo_start_xmit(skb, dev); 4779} 4780 4781static inline bool netdev_xmit_more(void) 4782{ 4783 return __this_cpu_read(softnet_data.xmit.more); 4784} 4785 4786static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 4787 struct netdev_queue *txq, bool more) 4788{ 4789 const struct net_device_ops *ops = dev->netdev_ops; 4790 netdev_tx_t rc; 4791 4792 rc = __netdev_start_xmit(ops, skb, dev, more); 4793 if (rc == NETDEV_TX_OK) 4794 txq_trans_update(txq); 4795 4796 return rc; 4797} 4798 4799int netdev_class_create_file_ns(const struct class_attribute *class_attr, 4800 const void *ns); 4801void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 4802 const void *ns); 4803 4804extern const struct kobj_ns_type_operations net_ns_type_operations; 4805 4806const char *netdev_drivername(const struct net_device *dev); 4807 4808void linkwatch_run_queue(void); 4809 4810static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 4811 netdev_features_t f2) 4812{ 4813 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 4814 if (f1 & NETIF_F_HW_CSUM) 4815 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4816 else 4817 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4818 } 4819 4820 return f1 & f2; 4821} 4822 4823static inline netdev_features_t netdev_get_wanted_features( 4824 struct net_device *dev) 4825{ 4826 return (dev->features & ~dev->hw_features) | dev->wanted_features; 4827} 4828netdev_features_t netdev_increment_features(netdev_features_t all, 4829 netdev_features_t one, netdev_features_t mask); 4830 4831/* Allow TSO being used on stacked device : 4832 * Performing the GSO segmentation before last device 4833 * is a performance improvement. 4834 */ 4835static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 4836 netdev_features_t mask) 4837{ 4838 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 4839} 4840 4841int __netdev_update_features(struct net_device *dev); 4842void netdev_update_features(struct net_device *dev); 4843void netdev_change_features(struct net_device *dev); 4844 4845void netif_stacked_transfer_operstate(const struct net_device *rootdev, 4846 struct net_device *dev); 4847 4848netdev_features_t passthru_features_check(struct sk_buff *skb, 4849 struct net_device *dev, 4850 netdev_features_t features); 4851netdev_features_t netif_skb_features(struct sk_buff *skb); 4852 4853static inline bool net_gso_ok(netdev_features_t features, int gso_type) 4854{ 4855 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 4856 4857 /* check flags correspondence */ 4858 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 4859 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 4860 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 4861 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 4862 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 4863 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 4864 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 4865 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 4866 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 4867 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 4868 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 4869 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 4870 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 4871 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 4872 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 4873 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 4874 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 4875 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 4876 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 4877 4878 return (features & feature) == feature; 4879} 4880 4881static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 4882{ 4883 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 4884 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 4885} 4886 4887static inline bool netif_needs_gso(struct sk_buff *skb, 4888 netdev_features_t features) 4889{ 4890 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 4891 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 4892 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 4893} 4894 4895static inline void netif_set_gso_max_size(struct net_device *dev, 4896 unsigned int size) 4897{ 4898 dev->gso_max_size = size; 4899} 4900 4901static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, 4902 int pulled_hlen, u16 mac_offset, 4903 int mac_len) 4904{ 4905 skb->protocol = protocol; 4906 skb->encapsulation = 1; 4907 skb_push(skb, pulled_hlen); 4908 skb_reset_transport_header(skb); 4909 skb->mac_header = mac_offset; 4910 skb->network_header = skb->mac_header + mac_len; 4911 skb->mac_len = mac_len; 4912} 4913 4914static inline bool netif_is_macsec(const struct net_device *dev) 4915{ 4916 return dev->priv_flags & IFF_MACSEC; 4917} 4918 4919static inline bool netif_is_macvlan(const struct net_device *dev) 4920{ 4921 return dev->priv_flags & IFF_MACVLAN; 4922} 4923 4924static inline bool netif_is_macvlan_port(const struct net_device *dev) 4925{ 4926 return dev->priv_flags & IFF_MACVLAN_PORT; 4927} 4928 4929static inline bool netif_is_bond_master(const struct net_device *dev) 4930{ 4931 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 4932} 4933 4934static inline bool netif_is_bond_slave(const struct net_device *dev) 4935{ 4936 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 4937} 4938 4939static inline bool netif_supports_nofcs(struct net_device *dev) 4940{ 4941 return dev->priv_flags & IFF_SUPP_NOFCS; 4942} 4943 4944static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 4945{ 4946 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 4947} 4948 4949static inline bool netif_is_l3_master(const struct net_device *dev) 4950{ 4951 return dev->priv_flags & IFF_L3MDEV_MASTER; 4952} 4953 4954static inline bool netif_is_l3_slave(const struct net_device *dev) 4955{ 4956 return dev->priv_flags & IFF_L3MDEV_SLAVE; 4957} 4958 4959static inline bool netif_is_bridge_master(const struct net_device *dev) 4960{ 4961 return dev->priv_flags & IFF_EBRIDGE; 4962} 4963 4964static inline bool netif_is_bridge_port(const struct net_device *dev) 4965{ 4966 return dev->priv_flags & IFF_BRIDGE_PORT; 4967} 4968 4969static inline bool netif_is_ovs_master(const struct net_device *dev) 4970{ 4971 return dev->priv_flags & IFF_OPENVSWITCH; 4972} 4973 4974static inline bool netif_is_ovs_port(const struct net_device *dev) 4975{ 4976 return dev->priv_flags & IFF_OVS_DATAPATH; 4977} 4978 4979static inline bool netif_is_any_bridge_port(const struct net_device *dev) 4980{ 4981 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 4982} 4983 4984static inline bool netif_is_team_master(const struct net_device *dev) 4985{ 4986 return dev->priv_flags & IFF_TEAM; 4987} 4988 4989static inline bool netif_is_team_port(const struct net_device *dev) 4990{ 4991 return dev->priv_flags & IFF_TEAM_PORT; 4992} 4993 4994static inline bool netif_is_lag_master(const struct net_device *dev) 4995{ 4996 return netif_is_bond_master(dev) || netif_is_team_master(dev); 4997} 4998 4999static inline bool netif_is_lag_port(const struct net_device *dev) 5000{ 5001 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5002} 5003 5004static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5005{ 5006 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5007} 5008 5009static inline bool netif_is_failover(const struct net_device *dev) 5010{ 5011 return dev->priv_flags & IFF_FAILOVER; 5012} 5013 5014static inline bool netif_is_failover_slave(const struct net_device *dev) 5015{ 5016 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5017} 5018 5019/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5020static inline void netif_keep_dst(struct net_device *dev) 5021{ 5022 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5023} 5024 5025/* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5026static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5027{ 5028 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5029 return dev->priv_flags & IFF_MACSEC; 5030} 5031 5032extern struct pernet_operations __net_initdata loopback_net_ops; 5033 5034/* Logging, debugging and troubleshooting/diagnostic helpers. */ 5035 5036/* netdev_printk helpers, similar to dev_printk */ 5037 5038static inline const char *netdev_name(const struct net_device *dev) 5039{ 5040 if (!dev->name[0] || strchr(dev->name, '%')) 5041 return "(unnamed net_device)"; 5042 return dev->name; 5043} 5044 5045static inline bool netdev_unregistering(const struct net_device *dev) 5046{ 5047 return dev->reg_state == NETREG_UNREGISTERING; 5048} 5049 5050static inline const char *netdev_reg_state(const struct net_device *dev) 5051{ 5052 switch (dev->reg_state) { 5053 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5054 case NETREG_REGISTERED: return ""; 5055 case NETREG_UNREGISTERING: return " (unregistering)"; 5056 case NETREG_UNREGISTERED: return " (unregistered)"; 5057 case NETREG_RELEASED: return " (released)"; 5058 case NETREG_DUMMY: return " (dummy)"; 5059 } 5060 5061 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); 5062 return " (unknown)"; 5063} 5064 5065__printf(3, 4) __cold 5066void netdev_printk(const char *level, const struct net_device *dev, 5067 const char *format, ...); 5068__printf(2, 3) __cold 5069void netdev_emerg(const struct net_device *dev, const char *format, ...); 5070__printf(2, 3) __cold 5071void netdev_alert(const struct net_device *dev, const char *format, ...); 5072__printf(2, 3) __cold 5073void netdev_crit(const struct net_device *dev, const char *format, ...); 5074__printf(2, 3) __cold 5075void netdev_err(const struct net_device *dev, const char *format, ...); 5076__printf(2, 3) __cold 5077void netdev_warn(const struct net_device *dev, const char *format, ...); 5078__printf(2, 3) __cold 5079void netdev_notice(const struct net_device *dev, const char *format, ...); 5080__printf(2, 3) __cold 5081void netdev_info(const struct net_device *dev, const char *format, ...); 5082 5083#define netdev_level_once(level, dev, fmt, ...) \ 5084do { \ 5085 static bool __print_once __read_mostly; \ 5086 \ 5087 if (!__print_once) { \ 5088 __print_once = true; \ 5089 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ 5090 } \ 5091} while (0) 5092 5093#define netdev_emerg_once(dev, fmt, ...) \ 5094 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) 5095#define netdev_alert_once(dev, fmt, ...) \ 5096 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) 5097#define netdev_crit_once(dev, fmt, ...) \ 5098 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) 5099#define netdev_err_once(dev, fmt, ...) \ 5100 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) 5101#define netdev_warn_once(dev, fmt, ...) \ 5102 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) 5103#define netdev_notice_once(dev, fmt, ...) \ 5104 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) 5105#define netdev_info_once(dev, fmt, ...) \ 5106 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) 5107 5108#define MODULE_ALIAS_NETDEV(device) \ 5109 MODULE_ALIAS("netdev-" device) 5110 5111#if defined(CONFIG_DYNAMIC_DEBUG) || \ 5112 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 5113#define netdev_dbg(__dev, format, args...) \ 5114do { \ 5115 dynamic_netdev_dbg(__dev, format, ##args); \ 5116} while (0) 5117#elif defined(DEBUG) 5118#define netdev_dbg(__dev, format, args...) \ 5119 netdev_printk(KERN_DEBUG, __dev, format, ##args) 5120#else 5121#define netdev_dbg(__dev, format, args...) \ 5122({ \ 5123 if (0) \ 5124 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 5125}) 5126#endif 5127 5128#if defined(VERBOSE_DEBUG) 5129#define netdev_vdbg netdev_dbg 5130#else 5131 5132#define netdev_vdbg(dev, format, args...) \ 5133({ \ 5134 if (0) \ 5135 netdev_printk(KERN_DEBUG, dev, format, ##args); \ 5136 0; \ 5137}) 5138#endif 5139 5140/* 5141 * netdev_WARN() acts like dev_printk(), but with the key difference 5142 * of using a WARN/WARN_ON to get the message out, including the 5143 * file/line information and a backtrace. 5144 */ 5145#define netdev_WARN(dev, format, args...) \ 5146 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5147 netdev_reg_state(dev), ##args) 5148 5149#define netdev_WARN_ONCE(dev, format, args...) \ 5150 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5151 netdev_reg_state(dev), ##args) 5152 5153/* netif printk helpers, similar to netdev_printk */ 5154 5155#define netif_printk(priv, type, level, dev, fmt, args...) \ 5156do { \ 5157 if (netif_msg_##type(priv)) \ 5158 netdev_printk(level, (dev), fmt, ##args); \ 5159} while (0) 5160 5161#define netif_level(level, priv, type, dev, fmt, args...) \ 5162do { \ 5163 if (netif_msg_##type(priv)) \ 5164 netdev_##level(dev, fmt, ##args); \ 5165} while (0) 5166 5167#define netif_emerg(priv, type, dev, fmt, args...) \ 5168 netif_level(emerg, priv, type, dev, fmt, ##args) 5169#define netif_alert(priv, type, dev, fmt, args...) \ 5170 netif_level(alert, priv, type, dev, fmt, ##args) 5171#define netif_crit(priv, type, dev, fmt, args...) \ 5172 netif_level(crit, priv, type, dev, fmt, ##args) 5173#define netif_err(priv, type, dev, fmt, args...) \ 5174 netif_level(err, priv, type, dev, fmt, ##args) 5175#define netif_warn(priv, type, dev, fmt, args...) \ 5176 netif_level(warn, priv, type, dev, fmt, ##args) 5177#define netif_notice(priv, type, dev, fmt, args...) \ 5178 netif_level(notice, priv, type, dev, fmt, ##args) 5179#define netif_info(priv, type, dev, fmt, args...) \ 5180 netif_level(info, priv, type, dev, fmt, ##args) 5181 5182#if defined(CONFIG_DYNAMIC_DEBUG) || \ 5183 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 5184#define netif_dbg(priv, type, netdev, format, args...) \ 5185do { \ 5186 if (netif_msg_##type(priv)) \ 5187 dynamic_netdev_dbg(netdev, format, ##args); \ 5188} while (0) 5189#elif defined(DEBUG) 5190#define netif_dbg(priv, type, dev, format, args...) \ 5191 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) 5192#else 5193#define netif_dbg(priv, type, dev, format, args...) \ 5194({ \ 5195 if (0) \ 5196 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 5197 0; \ 5198}) 5199#endif 5200 5201/* if @cond then downgrade to debug, else print at @level */ 5202#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ 5203 do { \ 5204 if (cond) \ 5205 netif_dbg(priv, type, netdev, fmt, ##args); \ 5206 else \ 5207 netif_ ## level(priv, type, netdev, fmt, ##args); \ 5208 } while (0) 5209 5210#if defined(VERBOSE_DEBUG) 5211#define netif_vdbg netif_dbg 5212#else 5213#define netif_vdbg(priv, type, dev, format, args...) \ 5214({ \ 5215 if (0) \ 5216 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 5217 0; \ 5218}) 5219#endif 5220 5221/* 5222 * The list of packet types we will receive (as opposed to discard) 5223 * and the routines to invoke. 5224 * 5225 * Why 16. Because with 16 the only overlap we get on a hash of the 5226 * low nibble of the protocol value is RARP/SNAP/X.25. 5227 * 5228 * 0800 IP 5229 * 0001 802.3 5230 * 0002 AX.25 5231 * 0004 802.2 5232 * 8035 RARP 5233 * 0005 SNAP 5234 * 0805 X.25 5235 * 0806 ARP 5236 * 8137 IPX 5237 * 0009 Localtalk 5238 * 86DD IPv6 5239 */ 5240#define PTYPE_HASH_SIZE (16) 5241#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5242 5243extern struct net_device *blackhole_netdev; 5244 5245#endif /* _LINUX_NETDEVICE_H */