Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix verifier memory corruption and other bugs in BPF layer, from
Alexei Starovoitov.

2) Add a conservative fix for doing BPF properly in the BPF classifier
of the packet scheduler on ingress. Also from Alexei.

3) The SKB scrubber should not clear out the packet MARK and security
label, from Herbert Xu.

4) Fix oops on rmmod in stmmac driver, from Bryan O'Donoghue.

5) Pause handling is not correct in the stmmac driver because it
doesn't take into consideration the RX and TX fifo sizes. From
Vince Bridgers.

6) Failure path missing unlock in FOU driver, from Wang Cong.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits)
net: dsa: use DEVICE_ATTR_RW to declare temp1_max
netns: remove BUG_ONs from net_generic()
IB/ipoib: Fix ndo_get_iflink
sfc: Fix memcpy() with const destination compiler warning.
altera tse: Fix network-delays and -retransmissions after high throughput.
net: remove unused 'dev' argument from netif_needs_gso()
act_mirred: Fix bogus header when redirecting from VLAN
inet_diag: fix access to tcp cc information
tcp: tcp_get_info() should fetch socket fields once
net: dsa: mv88e6xxx: Add missing initialization in mv88e6xxx_set_port_state()
skbuff: Do not scrub skb mark within the same name space
Revert "net: Reset secmark when scrubbing packet"
bpf: fix two bugs in verification logic when accessing 'ctx' pointer
bpf: fix bpf helpers to use skb->mac_header relative offsets
stmmac: Configure Flow Control to work correctly based on rxfifo size
stmmac: Enable unicast pause frame detect in GMAC Register 6
stmmac: Read tx-fifo-depth and rx-fifo-depth from the devicetree
stmmac: Add defines and documentation for enabling flow control
stmmac: Add properties for transmit and receive fifo sizes
stmmac: fix oops on rmmod after assigning ip addr
...

+655 -351
+6
Documentation/devicetree/bindings/net/ethernet.txt
··· 19 19 - phy: the same as "phy-handle" property, not recommended for new bindings. 20 20 - phy-device: the same as "phy-handle" property, not recommended for new 21 21 bindings. 22 + - rx-fifo-depth: the size of the controller's receive fifo in bytes. This 23 + is used for components that can have configurable receive fifo sizes, 24 + and is useful for determining certain configuration settings such as 25 + flow control thresholds. 26 + - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This 27 + is used for components that can have configurable fifo sizes. 22 28 23 29 Child nodes of the Ethernet controller are typically the individual PHY devices 24 30 connected via the MDIO bus (sometimes the MDIO bus controller is separate).
+4
Documentation/devicetree/bindings/net/stmmac.txt
··· 45 45 If not passed then the system clock will be used and this is fine on some 46 46 platforms. 47 47 - snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register. 48 + - tx-fifo-depth: See ethernet.txt file in the same directory 49 + - rx-fifo-depth: See ethernet.txt file in the same directory 48 50 49 51 Examples: 50 52 ··· 61 59 phy-mode = "gmii"; 62 60 snps,multicast-filter-bins = <256>; 63 61 snps,perfect-filter-entries = <128>; 62 + rx-fifo-depth = <16384>; 63 + tx-fifo-depth = <16384>; 64 64 clocks = <&clock>; 65 65 clock-names = "stmmaceth"; 66 66 };
+1 -1
drivers/infiniband/hw/cxgb4/mem.c
··· 73 73 c4iw_init_wr_wait(&wr_wait); 74 74 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); 75 75 76 - skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); 76 + skb = alloc_skb(wr_len, GFP_KERNEL); 77 77 if (!skb) 78 78 return -ENOMEM; 79 79 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+5
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 846 846 { 847 847 struct ipoib_dev_priv *priv = netdev_priv(dev); 848 848 849 + /* parent interface */ 850 + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 851 + return dev->ifindex; 852 + 853 + /* child/vlan interface */ 849 854 return priv->parent->ifindex; 850 855 } 851 856
+1 -2
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 58 58 /* MTU will be reset when mcast join happens */ 59 59 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 60 60 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 61 + priv->parent = ppriv->dev; 61 62 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 62 63 63 64 result = ipoib_set_dev_features(priv, ppriv->ca); ··· 84 83 ipoib_warn(priv, "failed to initialize; error %i", result); 85 84 goto register_failed; 86 85 } 87 - 88 - priv->parent = ppriv->dev; 89 86 90 87 ipoib_create_debug_files(priv->dev); 91 88
+4 -4
drivers/net/dsa/mv88e6xxx.c
··· 602 602 u32 high = 0; 603 603 604 604 if (s->reg >= 0x100) { 605 - int ret; 606 - 607 605 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), 608 606 s->reg - 0x100); 609 607 if (ret < 0) ··· 900 902 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state) 901 903 { 902 904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 903 - int reg, ret; 905 + int reg, ret = 0; 904 906 u8 oldstate; 905 907 906 908 mutex_lock(&ps->smi_mutex); 907 909 908 910 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL); 909 - if (reg < 0) 911 + if (reg < 0) { 912 + ret = reg; 910 913 goto abort; 914 + } 911 915 912 916 oldstate = reg & PORT_CONTROL_STATE_MASK; 913 917 if (oldstate != state) {
+7 -2
drivers/net/ethernet/altera/altera_tse_main.c
··· 376 376 u16 pktlength; 377 377 u16 pktstatus; 378 378 379 - while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) && 380 - (count < limit)) { 379 + /* Check for count < limit first as get_rx_status is changing 380 + * the response-fifo so we must process the next packet 381 + * after calling get_rx_status if a response is pending. 382 + * (reading the last byte of the response pops the value from the fifo.) 383 + */ 384 + while ((count < limit) && 385 + ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { 381 386 pktstatus = rxstatus >> 16; 382 387 pktlength = rxstatus & 0xffff; 383 388
+50 -85
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 531 531 struct napi_struct napi; 532 532 533 533 #ifdef CONFIG_NET_RX_BUSY_POLL 534 - unsigned int state; 535 - #define BNX2X_FP_STATE_IDLE 0 536 - #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 537 - #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 538 - #define BNX2X_FP_STATE_DISABLED (1 << 2) 539 - #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ 540 - #define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ 541 - #define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 542 - #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 543 - #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) 544 - #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 545 - /* protect state */ 546 - spinlock_t lock; 547 - #endif /* CONFIG_NET_RX_BUSY_POLL */ 534 + unsigned long busy_poll_state; 535 + #endif 548 536 549 537 union host_hc_status_block status_blk; 550 538 /* chip independent shortcuts into sb structure */ ··· 607 619 #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) 608 620 609 621 #ifdef CONFIG_NET_RX_BUSY_POLL 610 - static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 622 + 623 + enum bnx2x_fp_state { 624 + BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */ 625 + 626 + BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */ 627 + BNX2X_STATE_FP_NAPI_REQ = BIT(1), 628 + 629 + BNX2X_STATE_FP_POLL_BIT = 2, 630 + BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */ 631 + 632 + BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */ 633 + }; 634 + 635 + static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) 611 636 { 612 - spin_lock_init(&fp->lock); 613 - fp->state = BNX2X_FP_STATE_IDLE; 637 + WRITE_ONCE(fp->busy_poll_state, 0); 614 638 } 615 639 616 640 /* called from the device poll routine to get ownership of a FP */ 617 641 static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) 618 642 { 619 - bool rc = true; 643 + unsigned long prev, old = READ_ONCE(fp->busy_poll_state); 620 644 621 - spin_lock_bh(&fp->lock); 622 - if (fp->state & BNX2X_FP_LOCKED) { 623 - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 624 - fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 625 - rc = false; 626 - } else { 627 - /* we don't care if someone yielded */ 628 - fp->state = BNX2X_FP_STATE_NAPI; 645 + while (1) { 646 + switch (old) { 647 + case BNX2X_STATE_FP_POLL: 648 + /* make sure bnx2x_fp_lock_poll() wont starve us */ 649 + set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT, 650 + &fp->busy_poll_state); 651 + /* fallthrough */ 652 + case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ: 653 + return false; 654 + default: 655 + break; 656 + } 657 + prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI); 658 + if (unlikely(prev != old)) { 659 + old = prev; 660 + continue; 661 + } 662 + return true; 629 663 } 630 - spin_unlock_bh(&fp->lock); 631 - return rc; 632 664 } 633 665 634 - /* returns true is someone tried to get the FP while napi had it */ 635 - static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) 666 + static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) 636 667 { 637 - bool rc = false; 638 - 639 - spin_lock_bh(&fp->lock); 640 - WARN_ON(fp->state & 641 - (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 642 - 643 - if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 644 - rc = true; 645 - 646 - /* state ==> idle, unless currently disabled */ 647 - fp->state &= BNX2X_FP_STATE_DISABLED; 648 - spin_unlock_bh(&fp->lock); 649 - return rc; 668 + smp_wmb(); 669 + fp->busy_poll_state = 0; 650 670 } 651 671 652 672 /* called from bnx2x_low_latency_poll() */ 653 673 static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) 654 674 { 655 - bool rc = true; 656 - 657 - spin_lock_bh(&fp->lock); 658 - if ((fp->state & BNX2X_FP_LOCKED)) { 659 - fp->state |= BNX2X_FP_STATE_POLL_YIELD; 660 - rc = false; 661 - } else { 662 - /* preserve yield marks */ 663 - fp->state |= BNX2X_FP_STATE_POLL; 664 - } 665 - spin_unlock_bh(&fp->lock); 666 - return rc; 675 + return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0; 667 676 } 668 677 669 - /* returns true if someone tried to get the FP while it was locked */ 670 - static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) 678 + static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) 671 679 { 672 - bool rc = false; 673 - 674 - spin_lock_bh(&fp->lock); 675 - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 676 - 677 - if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 678 - rc = true; 679 - 680 - /* state ==> idle, unless currently disabled */ 681 - fp->state &= BNX2X_FP_STATE_DISABLED; 682 - spin_unlock_bh(&fp->lock); 683 - return rc; 680 + smp_mb__before_atomic(); 681 + clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state); 684 682 } 685 683 686 - /* true if a socket is polling, even if it did not get the lock */ 684 + /* true if a socket is polling */ 687 685 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 688 686 { 689 - WARN_ON(!(fp->state & BNX2X_FP_OWNED)); 690 - return fp->state & BNX2X_FP_USER_PEND; 687 + return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; 691 688 } 692 689 693 690 /* false if fp is currently owned */ 694 691 static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) 695 692 { 696 - int rc = true; 693 + set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state); 694 + return !bnx2x_fp_ll_polling(fp); 697 695 698 - spin_lock_bh(&fp->lock); 699 - if (fp->state & BNX2X_FP_OWNED) 700 - rc = false; 701 - fp->state |= BNX2X_FP_STATE_DISABLED; 702 - spin_unlock_bh(&fp->lock); 703 - 704 - return rc; 705 696 } 706 697 #else 707 - static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 698 + static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) 708 699 { 709 700 } 710 701 ··· 692 725 return true; 693 726 } 694 727 695 - static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) 728 + static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) 696 729 { 697 - return false; 698 730 } 699 731 700 732 static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) ··· 701 735 return false; 702 736 } 703 737 704 - static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) 738 + static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) 705 739 { 706 - return false; 707 740 } 708 741 709 742 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+5 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 1849 1849 int i; 1850 1850 1851 1851 for_each_rx_queue_cnic(bp, i) { 1852 - bnx2x_fp_init_lock(&bp->fp[i]); 1852 + bnx2x_fp_busy_poll_init(&bp->fp[i]); 1853 1853 napi_enable(&bnx2x_fp(bp, i, napi)); 1854 1854 } 1855 1855 } ··· 1859 1859 int i; 1860 1860 1861 1861 for_each_eth_queue(bp, i) { 1862 - bnx2x_fp_init_lock(&bp->fp[i]); 1862 + bnx2x_fp_busy_poll_init(&bp->fp[i]); 1863 1863 napi_enable(&bnx2x_fp(bp, i, napi)); 1864 1864 } 1865 1865 } ··· 3191 3191 } 3192 3192 } 3193 3193 3194 + bnx2x_fp_unlock_napi(fp); 3195 + 3194 3196 /* Fall out from the NAPI loop if needed */ 3195 - if (!bnx2x_fp_unlock_napi(fp) && 3196 - !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 3197 + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 3197 3198 3198 3199 /* No need to update SB for FCoE L2 ring as long as 3199 3200 * it's connected to the default SB and the SB
+12 -3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 1140 1140 struct fw_filter_wr *fwr; 1141 1141 unsigned int ftid; 1142 1142 1143 + skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); 1144 + if (!skb) 1145 + return -ENOMEM; 1146 + 1143 1147 /* If the new filter requires loopback Destination MAC and/or VLAN 1144 1148 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for 1145 1149 * the filter. ··· 1151 1147 if (f->fs.newdmac || f->fs.newvlan) { 1152 1148 /* allocate L2T entry for new filter */ 1153 1149 f->l2t = t4_l2t_alloc_switching(adapter->l2t); 1154 - if (f->l2t == NULL) 1150 + if (f->l2t == NULL) { 1151 + kfree_skb(skb); 1155 1152 return -EAGAIN; 1153 + } 1156 1154 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, 1157 1155 f->fs.eport, f->fs.dmac)) { 1158 1156 cxgb4_l2t_release(f->l2t); 1159 1157 f->l2t = NULL; 1158 + kfree_skb(skb); 1160 1159 return -ENOMEM; 1161 1160 } 1162 1161 } 1163 1162 1164 1163 ftid = adapter->tids.ftid_base + fidx; 1165 1164 1166 - skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL); 1167 1165 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); 1168 1166 memset(fwr, 0, sizeof(*fwr)); 1169 1167 ··· 1263 1257 len = sizeof(*fwr); 1264 1258 ftid = adapter->tids.ftid_base + fidx; 1265 1259 1266 - skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); 1260 + skb = alloc_skb(len, GFP_KERNEL); 1261 + if (!skb) 1262 + return -ENOMEM; 1263 + 1267 1264 fwr = (struct fw_filter_wr *)__skb_put(skb, len); 1268 1265 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); 1269 1266
+11 -7
drivers/net/ethernet/hisilicon/hip04_eth.c
··· 413 413 return count; 414 414 } 415 415 416 + static void hip04_start_tx_timer(struct hip04_priv *priv) 417 + { 418 + unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2; 419 + 420 + /* allow timer to fire after half the time at the earliest */ 421 + hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns), 422 + ns, HRTIMER_MODE_REL); 423 + } 424 + 416 425 static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 417 426 { 418 427 struct hip04_priv *priv = netdev_priv(ndev); ··· 475 466 } 476 467 } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { 477 468 /* cleanup not pending yet, start a new timer */ 478 - hrtimer_start_expires(&priv->tx_coalesce_timer, 479 - HRTIMER_MODE_REL); 469 + hip04_start_tx_timer(priv); 480 470 } 481 471 482 472 return NETDEV_TX_OK; ··· 557 549 /* clean up tx descriptors and start a new timer if necessary */ 558 550 tx_remaining = hip04_tx_reclaim(ndev, false); 559 551 if (rx < budget && tx_remaining) 560 - hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL); 552 + hip04_start_tx_timer(priv); 561 553 562 554 return rx; 563 555 } ··· 817 809 struct hip04_priv *priv; 818 810 struct resource *res; 819 811 unsigned int irq; 820 - ktime_t txtime; 821 812 int ret; 822 813 823 814 ndev = alloc_etherdev(sizeof(struct hip04_priv)); ··· 853 846 */ 854 847 priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; 855 848 priv->tx_coalesce_usecs = 200; 856 - /* allow timer to fire after half the time at the earliest */ 857 - txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2); 858 - hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime); 859 849 priv->tx_coalesce_timer.function = tx_done; 860 850 861 851 priv->map = syscon_node_to_regmap(arg.np);
+65
drivers/net/ethernet/intel/i40e/i40e_common.c
··· 2397 2397 #define I40E_DEV_FUNC_CAP_LED 0x61 2398 2398 #define I40E_DEV_FUNC_CAP_SDP 0x62 2399 2399 #define I40E_DEV_FUNC_CAP_MDIO 0x63 2400 + #define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 2400 2401 2401 2402 /** 2402 2403 * i40e_parse_discover_capabilities ··· 2542 2541 p->fd_filters_guaranteed = number; 2543 2542 p->fd_filters_best_effort = logical_id; 2544 2543 break; 2544 + case I40E_DEV_FUNC_CAP_WR_CSR_PROT: 2545 + p->wr_csr_prot = (u64)number; 2546 + p->wr_csr_prot |= (u64)logical_id << 32; 2547 + break; 2545 2548 default: 2546 2549 break; 2547 2550 } 2548 2551 } 2552 + 2553 + if (p->fcoe) 2554 + i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 2549 2555 2550 2556 /* Software override ensuring FCoE is disabled if npar or mfp 2551 2557 * mode because it is not supported in these modes. ··· 3508 3500 hw->bus.speed = i40e_bus_speed_unknown; 3509 3501 break; 3510 3502 } 3503 + } 3504 + 3505 + /** 3506 + * i40e_aq_debug_dump 3507 + * @hw: pointer to the hardware structure 3508 + * @cluster_id: specific cluster to dump 3509 + * @table_id: table id within cluster 3510 + * @start_index: index of line in the block to read 3511 + * @buff_size: dump buffer size 3512 + * @buff: dump buffer 3513 + * @ret_buff_size: actual buffer size returned 3514 + * @ret_next_table: next block to read 3515 + * @ret_next_index: next index to read 3516 + * 3517 + * Dump internal FW/HW data for debug purposes. 3518 + * 3519 + **/ 3520 + i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 3521 + u8 table_id, u32 start_index, u16 buff_size, 3522 + void *buff, u16 *ret_buff_size, 3523 + u8 *ret_next_table, u32 *ret_next_index, 3524 + struct i40e_asq_cmd_details *cmd_details) 3525 + { 3526 + struct i40e_aq_desc desc; 3527 + struct i40e_aqc_debug_dump_internals *cmd = 3528 + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 3529 + struct i40e_aqc_debug_dump_internals *resp = 3530 + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 3531 + i40e_status status; 3532 + 3533 + if (buff_size == 0 || !buff) 3534 + return I40E_ERR_PARAM; 3535 + 3536 + i40e_fill_default_direct_cmd_desc(&desc, 3537 + i40e_aqc_opc_debug_dump_internals); 3538 + /* Indirect Command */ 3539 + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3540 + if (buff_size > I40E_AQ_LARGE_BUF) 3541 + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3542 + 3543 + cmd->cluster_id = cluster_id; 3544 + cmd->table_id = table_id; 3545 + cmd->idx = cpu_to_le32(start_index); 3546 + 3547 + desc.datalen = cpu_to_le16(buff_size); 3548 + 3549 + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3550 + if (!status) { 3551 + if (ret_buff_size) 3552 + *ret_buff_size = le16_to_cpu(desc.datalen); 3553 + if (ret_next_table) 3554 + *ret_next_table = resp->table_id; 3555 + if (ret_next_index) 3556 + *ret_next_index = le32_to_cpu(resp->idx); 3557 + } 3558 + 3559 + return status; 3511 3560 } 3512 3561 3513 3562 /**
+2 -4
drivers/net/ethernet/intel/i40e/i40e_dcb.c
··· 419 419 { 420 420 u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); 421 421 u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); 422 - u8 i, tc, err, sync, oper; 422 + u8 i, tc, err; 423 423 424 424 /* CEE PG data to ETS config */ 425 425 dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; ··· 456 456 status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> 457 457 I40E_AQC_CEE_APP_STATUS_SHIFT; 458 458 err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; 459 - sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; 460 - oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; 461 - /* Add APPs if Error is False and Oper/Sync is True */ 459 + /* Add APPs if Error is False */ 462 460 if (!err) { 463 461 /* CEE operating configuration supports FCoE/iSCSI/FIP only */ 464 462 dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+45
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
··· 1388 1388 r_cfg->app[i].selector, 1389 1389 r_cfg->app[i].protocolid); 1390 1390 } 1391 + } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { 1392 + int cluster_id, table_id; 1393 + int index, ret; 1394 + u16 buff_len = 4096; 1395 + u32 next_index; 1396 + u8 next_table; 1397 + u8 *buff; 1398 + u16 rlen; 1399 + 1400 + cnt = sscanf(&cmd_buf[18], "%i %i %i", 1401 + &cluster_id, &table_id, &index); 1402 + if (cnt != 3) { 1403 + dev_info(&pf->pdev->dev, 1404 + "dump debug fwdata <cluster_id> <table_id> <index>\n"); 1405 + goto command_write_done; 1406 + } 1407 + 1408 + dev_info(&pf->pdev->dev, 1409 + "AQ debug dump fwdata params %x %x %x %x\n", 1410 + cluster_id, table_id, index, buff_len); 1411 + buff = kzalloc(buff_len, GFP_KERNEL); 1412 + if (!buff) 1413 + goto command_write_done; 1414 + 1415 + ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, 1416 + index, buff_len, buff, &rlen, 1417 + &next_table, &next_index, 1418 + NULL); 1419 + if (ret) { 1420 + dev_info(&pf->pdev->dev, 1421 + "debug dump fwdata AQ Failed %d 0x%x\n", 1422 + ret, pf->hw.aq.asq_last_status); 1423 + kfree(buff); 1424 + buff = NULL; 1425 + goto command_write_done; 1426 + } 1427 + dev_info(&pf->pdev->dev, 1428 + "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", 1429 + rlen, next_table, next_index); 1430 + print_hex_dump(KERN_INFO, "AQ buffer WB: ", 1431 + DUMP_PREFIX_OFFSET, 16, 1, 1432 + buff, rlen, true); 1433 + kfree(buff); 1434 + buff = NULL; 1391 1435 } else { 1392 1436 dev_info(&pf->pdev->dev, 1393 1437 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); ··· 1947 1903 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1948 1904 dev_info(&pf->pdev->dev, " dump desc aq\n"); 1949 1905 dev_info(&pf->pdev->dev, " dump reset stats\n"); 1906 + dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); 1950 1907 dev_info(&pf->pdev->dev, " msg_enable [level]\n"); 1951 1908 dev_info(&pf->pdev->dev, " read <reg>\n"); 1952 1909 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
+27 -2
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 356 356 /* Set speed and duplex */ 357 357 switch (link_speed) { 358 358 case I40E_LINK_SPEED_40GB: 359 - /* need a SPEED_40000 in ethtool.h */ 360 - ethtool_cmd_speed_set(ecmd, 40000); 359 + ethtool_cmd_speed_set(ecmd, SPEED_40000); 361 360 break; 362 361 case I40E_LINK_SPEED_20GB: 363 362 ethtool_cmd_speed_set(ecmd, SPEED_20000); ··· 1913 1914 else 1914 1915 fsp->ring_cookie = rule->q_index; 1915 1916 1917 + if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { 1918 + struct i40e_vsi *vsi; 1919 + 1920 + vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); 1921 + if (vsi && vsi->type == I40E_VSI_SRIOV) { 1922 + fsp->h_ext.data[1] = htonl(vsi->vf_id); 1923 + fsp->m_ext.data[1] = htonl(0x1); 1924 + } 1925 + } 1926 + 1916 1927 return 0; 1917 1928 } 1918 1929 ··· 2216 2207 struct i40e_fdir_filter *input; 2217 2208 struct i40e_pf *pf; 2218 2209 int ret = -EINVAL; 2210 + u16 vf_id; 2219 2211 2220 2212 if (!vsi) 2221 2213 return -EINVAL; ··· 2277 2267 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2278 2268 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2279 2269 2270 + if (ntohl(fsp->m_ext.data[1])) { 2271 + if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { 2272 + netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); 2273 + goto free_input; 2274 + } 2275 + vf_id = ntohl(fsp->h_ext.data[1]); 2276 + /* Find vsi id from vf id and override dest vsi */ 2277 + input->dest_vsi = pf->vf[vf_id].lan_vsi_id; 2278 + if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { 2279 + netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); 2280 + goto free_input; 2281 + } 2282 + } 2283 + 2280 2284 ret = i40e_add_del_fdir(vsi, input, true); 2285 + free_input: 2281 2286 if (ret) 2282 2287 kfree(input); 2283 2288 else
+12 -4
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 39 39 40 40 #define DRV_VERSION_MAJOR 1 41 41 #define DRV_VERSION_MINOR 3 42 - #define DRV_VERSION_BUILD 1 42 + #define DRV_VERSION_BUILD 2 43 43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 44 44 __stringify(DRV_VERSION_MINOR) "." \ 45 45 __stringify(DRV_VERSION_BUILD) DRV_KERN ··· 7301 7301 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7302 7302 * @pf: board private structure to initialize 7303 7303 **/ 7304 - static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 7304 + static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 7305 7305 { 7306 7306 int vectors = 0; 7307 7307 ssize_t size; ··· 7343 7343 /* set up vector assignment tracking */ 7344 7344 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7345 7345 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7346 + if (!pf->irq_pile) { 7347 + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); 7348 + return -ENOMEM; 7349 + } 7346 7350 pf->irq_pile->num_entries = vectors; 7347 7351 pf->irq_pile->search_hint = 0; 7348 7352 7349 - /* track first vector for misc interrupts */ 7353 + /* track first vector for misc interrupts, ignore return */ 7350 7354 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7355 + 7356 + return 0; 7351 7357 } 7352 7358 7353 7359 /** ··· 9833 9827 9834 9828 /* set up the main switch operations */ 9835 9829 i40e_determine_queue_usage(pf); 9836 - i40e_init_interrupt_scheme(pf); 9830 + err = i40e_init_interrupt_scheme(pf); 9831 + if (err) 9832 + goto err_switch_setup; 9837 9833 9838 9834 /* The number of VSIs reported by the FW is the minimum guaranteed 9839 9835 * to us; HW supports far more and we share the remaining pool with
+1 -2
drivers/net/ethernet/intel/i40e/i40e_nvm.c
··· 821 821 int *errno) 822 822 { 823 823 enum i40e_nvmupd_cmd upd_cmd; 824 - u8 transaction, module; 824 + u8 transaction; 825 825 826 826 /* anything that doesn't match a recognized case is an error */ 827 827 upd_cmd = I40E_NVMUPD_INVALID; 828 828 829 829 transaction = i40e_nvmupd_get_transaction(cmd->config); 830 - module = i40e_nvmupd_get_module(cmd->config); 831 830 832 831 /* limits on data size */ 833 832 if ((cmd->data_size < 1) ||
+5
drivers/net/ethernet/intel/i40e/i40e_prototype.h
··· 303 303 u16 vsi_seid, u16 queue, bool is_add, 304 304 struct i40e_control_filter_stats *stats, 305 305 struct i40e_asq_cmd_details *cmd_details); 306 + i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 307 + u8 table_id, u32 start_index, u16 buff_size, 308 + void *buff, u16 *ret_buff_size, 309 + u8 *ret_next_table, u32 *ret_next_index, 310 + struct i40e_asq_cmd_details *cmd_details); 306 311 #endif /* _I40E_PROTOTYPE_H_ */
+1
drivers/net/ethernet/intel/i40e/i40e_type.h
··· 242 242 u8 rx_buf_chain_len; 243 243 u32 enabled_tcmap; 244 244 u32 maxtc; 245 + u64 wr_csr_prot; 245 246 }; 246 247 247 248 struct i40e_mac_info {
+132 -112
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 26 26 27 27 #include "i40e.h" 28 28 29 + /*********************notification routines***********************/ 30 + 31 + /** 32 + * i40e_vc_vf_broadcast 33 + * @pf: pointer to the PF structure 34 + * @opcode: operation code 35 + * @retval: return value 36 + * @msg: pointer to the msg buffer 37 + * @msglen: msg length 38 + * 39 + * send a message to all VFs on a given PF 40 + **/ 41 + static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 42 + enum i40e_virtchnl_ops v_opcode, 43 + i40e_status v_retval, u8 *msg, 44 + u16 msglen) 45 + { 46 + struct i40e_hw *hw = &pf->hw; 47 + struct i40e_vf *vf = pf->vf; 48 + int i; 49 + 50 + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 51 + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 52 + /* Not all vfs are enabled so skip the ones that are not */ 53 + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 54 + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 55 + continue; 56 + 57 + /* Ignore return value on purpose - a given VF may fail, but 58 + * we need to keep going and send to all of them 59 + */ 60 + i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 61 + msg, msglen, NULL); 62 + } 63 + } 64 + 65 + /** 66 + * i40e_vc_notify_link_state 67 + * @vf: pointer to the VF structure 68 + * 69 + * send a link status message to a single VF 70 + **/ 71 + static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 72 + { 73 + struct i40e_virtchnl_pf_event pfe; 74 + struct i40e_pf *pf = vf->pf; 75 + struct i40e_hw *hw = &pf->hw; 76 + struct i40e_link_status *ls = &pf->hw.phy.link_info; 77 + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 78 + 79 + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 80 + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 81 + if (vf->link_forced) { 82 + pfe.event_data.link_event.link_status = vf->link_up; 83 + pfe.event_data.link_event.link_speed = 84 + (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 85 + } else { 86 + pfe.event_data.link_event.link_status = 87 + ls->link_info & I40E_AQ_LINK_UP; 88 + pfe.event_data.link_event.link_speed = ls->link_speed; 89 + } 90 + i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 91 + 0, (u8 *)&pfe, sizeof(pfe), NULL); 92 + } 93 + 94 + /** 95 + * i40e_vc_notify_link_state 96 + * @pf: pointer to the PF structure 97 + * 98 + * send a link status message to all VFs on a given PF 99 + **/ 100 + void i40e_vc_notify_link_state(struct i40e_pf *pf) 101 + { 102 + int i; 103 + 104 + for (i = 0; i < pf->num_alloc_vfs; i++) 105 + i40e_vc_notify_vf_link_state(&pf->vf[i]); 106 + } 107 + 108 + /** 109 + * i40e_vc_notify_reset 110 + * @pf: pointer to the PF structure 111 + * 112 + * indicate a pending reset to all VFs on a given PF 113 + **/ 114 + void i40e_vc_notify_reset(struct i40e_pf *pf) 115 + { 116 + struct i40e_virtchnl_pf_event pfe; 117 + 118 + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 119 + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 120 + i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, 121 + (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 122 + } 123 + 124 + /** 125 + * i40e_vc_notify_vf_reset 126 + * @vf: pointer to the VF structure 127 + * 128 + * indicate a pending reset to the given VF 129 + **/ 130 + void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 131 + { 132 + struct i40e_virtchnl_pf_event pfe; 133 + int abs_vf_id; 134 + 135 + /* validate the request */ 136 + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 137 + return; 138 + 139 + /* verify if the VF is in either init or active before proceeding */ 140 + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 141 + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 142 + return; 143 + 144 + abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 145 + 146 + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 147 + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 148 + i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 149 + 0, (u8 *)&pfe, 150 + sizeof(struct i40e_virtchnl_pf_event), NULL); 151 + } 29 152 /***********************misc routines*****************************/ 30 153 31 154 /** ··· 812 689 } 813 690 } 814 691 692 + if (flr) 693 + usleep_range(10000, 20000); 694 + 815 695 if (!rsd) 816 696 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 817 697 vf->vf_id); ··· 858 732 return; 859 733 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 860 734 usleep_range(1000, 2000); 735 + 736 + for (i = 0; i < pf->num_alloc_vfs; i++) 737 + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 738 + i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], 739 + false); 861 740 862 741 /* Disable IOV before freeing resources. This lets any VF drivers 863 742 * running in the host get themselves cleaned up before we yank ··· 1893 1762 break; 1894 1763 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1895 1764 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1765 + i40e_vc_notify_vf_link_state(vf); 1896 1766 break; 1897 1767 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1898 1768 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); ··· 1964 1832 } 1965 1833 1966 1834 return 0; 1967 - } 1968 - 1969 - /** 1970 - * i40e_vc_vf_broadcast 1971 - * @pf: pointer to the PF structure 1972 - * @opcode: operation code 1973 - * @retval: return value 1974 - * @msg: pointer to the msg buffer 1975 - * @msglen: msg length 1976 - * 1977 - * send a message to all VFs on a given PF 1978 - **/ 1979 - static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 1980 - enum i40e_virtchnl_ops v_opcode, 1981 - i40e_status v_retval, u8 *msg, 1982 - u16 msglen) 1983 - { 1984 - struct i40e_hw *hw = &pf->hw; 1985 - struct i40e_vf *vf = pf->vf; 1986 - int i; 1987 - 1988 - for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 1989 - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1990 - /* Not all VFs are enabled so skip the ones that are not */ 1991 - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 1992 - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1993 - continue; 1994 - 1995 - /* Ignore return value on purpose - a given VF may fail, but 1996 - * we need to keep going and send to all of them 1997 - */ 1998 - i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1999 - msg, msglen, NULL); 2000 - } 2001 - } 2002 - 2003 - /** 2004 - * i40e_vc_notify_link_state 2005 - * @pf: pointer to the PF structure 2006 - * 2007 - * send a link status message to all VFs on a given PF 2008 - **/ 2009 - void i40e_vc_notify_link_state(struct i40e_pf *pf) 2010 - { 2011 - struct i40e_virtchnl_pf_event pfe; 2012 - struct i40e_hw *hw = &pf->hw; 2013 - struct i40e_vf *vf = pf->vf; 2014 - struct i40e_link_status *ls = &pf->hw.phy.link_info; 2015 - int i; 2016 - 2017 - pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2018 - pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2019 - for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 2020 - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2021 - if (vf->link_forced) { 2022 - pfe.event_data.link_event.link_status = vf->link_up; 2023 - pfe.event_data.link_event.link_speed = 2024 - (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 2025 - } else { 2026 - pfe.event_data.link_event.link_status = 2027 - ls->link_info & I40E_AQ_LINK_UP; 2028 - pfe.event_data.link_event.link_speed = ls->link_speed; 2029 - } 2030 - i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2031 - 0, (u8 *)&pfe, sizeof(pfe), 2032 - NULL); 2033 - } 2034 - } 2035 - 2036 - /** 2037 - * i40e_vc_notify_reset 2038 - * @pf: pointer to the PF structure 2039 - * 2040 - * indicate a pending reset to all VFs on a given PF 2041 - **/ 2042 - void i40e_vc_notify_reset(struct i40e_pf *pf) 2043 - { 2044 - struct i40e_virtchnl_pf_event pfe; 2045 - 2046 - pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 2047 - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 2048 - i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 2049 - (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 2050 - } 2051 - 2052 - /** 2053 - * i40e_vc_notify_vf_reset 2054 - * @vf: pointer to the VF structure 2055 - * 2056 - * indicate a pending reset to the given VF 2057 - **/ 2058 - void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 2059 - { 2060 - struct i40e_virtchnl_pf_event pfe; 2061 - int abs_vf_id; 2062 - 2063 - /* validate the request */ 2064 - if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 2065 - return; 2066 - 2067 - /* verify if the VF is in either init or active before proceeding */ 2068 - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 2069 - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 2070 - return; 2071 - 2072 - abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 2073 - 2074 - pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 2075 - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 2076 - i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2077 - I40E_SUCCESS, (u8 *)&pfe, 2078 - sizeof(struct i40e_virtchnl_pf_event), NULL); 2079 1835 } 2080 1836 2081 1837 /**
+1
drivers/net/ethernet/intel/i40evf/i40e_type.h
··· 242 242 u8 rx_buf_chain_len; 243 243 u32 enabled_tcmap; 244 244 u32 maxtc; 245 + u64 wr_csr_prot; 245 246 }; 246 247 247 248 struct i40e_mac_info {
-1
drivers/net/ethernet/intel/i40evf/i40evf.h
··· 225 225 #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED 226 226 /* flags for admin queue service task */ 227 227 u32 aq_required; 228 - u32 aq_pending; 229 228 #define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) 230 229 #define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) 231 230 #define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+2 -7
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 1008 1008 adapter->state != __I40EVF_RESETTING) { 1009 1009 /* cancel any current operation */ 1010 1010 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 1011 - adapter->aq_pending = 0; 1012 1011 /* Schedule operations to close down the HW. Don't wait 1013 1012 * here for this to complete. The watchdog is still running 1014 1013 * and it will take care of this. ··· 1334 1335 */ 1335 1336 return; 1336 1337 } 1337 - adapter->aq_pending = 0; 1338 1338 adapter->aq_required = 0; 1339 1339 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 1340 1340 goto watchdog_done; ··· 1353 1355 adapter->flags |= I40EVF_FLAG_RESET_PENDING; 1354 1356 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1355 1357 schedule_work(&adapter->reset_task); 1356 - adapter->aq_pending = 0; 1357 1358 adapter->aq_required = 0; 1358 1359 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 1359 1360 goto watchdog_done; ··· 1361 1364 /* Process admin queue tasks. After init, everything gets done 1362 1365 * here so we don't race on the admin queue. 1363 1366 */ 1364 - if (adapter->aq_pending) { 1367 + if (adapter->current_op) { 1365 1368 if (!i40evf_asq_done(hw)) { 1366 1369 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); 1367 1370 i40evf_send_api_ver(adapter); ··· 2026 2029 if (err) { 2027 2030 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", 2028 2031 err); 2029 - goto err; 2032 + goto err; 2030 2033 } 2031 2034 err = i40evf_check_reset_complete(hw); 2032 2035 if (err) { ··· 2246 2249 /* Prevent the watchdog from running. */ 2247 2250 adapter->state = __I40EVF_REMOVE; 2248 2251 adapter->aq_required = 0; 2249 - adapter->aq_pending = 0; 2250 2252 2251 2253 #ifdef CONFIG_PM 2252 2254 pci_save_state(pdev); ··· 2463 2467 /* Shut down all the garbage mashers on the detention level */ 2464 2468 adapter->state = __I40EVF_REMOVE; 2465 2469 adapter->aq_required = 0; 2466 - adapter->aq_pending = 0; 2467 2470 i40evf_request_reset(adapter); 2468 2471 msleep(20); 2469 2472 /* If the FW isn't responding, kick it once, but only once. */
+11 -31
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
··· 250 250 vqpi++; 251 251 } 252 252 253 - adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; 254 253 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; 255 254 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 256 255 (u8 *)vqci, len); ··· 276 277 vqs.vsi_id = adapter->vsi_res->vsi_id; 277 278 vqs.tx_queues = (1 << adapter->num_active_queues) - 1; 278 279 vqs.rx_queues = vqs.tx_queues; 279 - adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; 280 280 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; 281 281 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 282 282 (u8 *)&vqs, sizeof(vqs)); ··· 301 303 vqs.vsi_id = adapter->vsi_res->vsi_id; 302 304 vqs.tx_queues = (1 << adapter->num_active_queues) - 1; 303 305 vqs.rx_queues = vqs.tx_queues; 304 - adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 305 306 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; 306 307 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 307 308 (u8 *)&vqs, sizeof(vqs)); ··· 351 354 vimi->vecmap[v_idx].txq_map = 0; 352 355 vimi->vecmap[v_idx].rxq_map = 0; 353 356 354 - adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS; 355 357 adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; 356 358 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 357 359 (u8 *)vimi, len); ··· 411 415 f->add = false; 412 416 } 413 417 } 414 - adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 415 418 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; 416 419 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 417 420 (u8 *)veal, len); ··· 471 476 kfree(f); 472 477 } 473 478 } 474 - adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; 475 479 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; 476 480 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 477 481 (u8 *)veal, len); ··· 531 537 f->add = false; 532 538 } 533 539 } 534 - adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 535 540 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 536 541 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 537 542 kfree(vvfl); ··· 591 598 kfree(f); 592 599 } 593 600 } 594 - adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 595 601 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 596 602 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 597 603 kfree(vvfl); ··· 712 720 __func__, v_retval, v_opcode); 713 721 } 714 722 switch (v_opcode) { 715 - case I40E_VIRTCHNL_OP_VERSION: 716 - /* no action, but also not an error */ 717 - break; 718 723 case I40E_VIRTCHNL_OP_GET_STATS: { 719 724 struct i40e_eth_stats *stats = 720 725 (struct i40e_eth_stats *)msg; ··· 729 740 adapter->current_stats = *stats; 730 741 } 731 742 break; 732 - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 733 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER); 734 - break; 735 - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 736 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER); 737 - break; 738 - case I40E_VIRTCHNL_OP_ADD_VLAN: 739 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER); 740 - break; 741 - case I40E_VIRTCHNL_OP_DEL_VLAN: 742 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER); 743 - break; 744 743 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 745 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES); 746 744 /* enable transmits */ 747 745 i40evf_irq_enable(adapter, true); 748 746 netif_tx_start_all_queues(adapter->netdev); 749 747 netif_carrier_on(adapter->netdev); 750 748 break; 751 749 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 752 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES); 753 750 i40evf_free_all_tx_resources(adapter); 754 751 i40evf_free_all_rx_resources(adapter); 755 752 break; 756 - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 757 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES); 758 - break; 753 + case I40E_VIRTCHNL_OP_VERSION: 754 + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 759 755 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 760 - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); 756 + /* Don't display an error if we get these out of sequence. 757 + * If the firmware needed to get kicked, we'll get these and 758 + * it's no problem. 759 + */ 760 + if (v_opcode != adapter->current_op) 761 + return; 761 762 break; 762 763 default: 763 - dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", 764 - v_opcode); 764 + if (v_opcode != adapter->current_op) 765 + dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 766 + adapter->current_op, v_opcode); 765 767 break; 766 768 } /* switch v_opcode */ 767 769 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
-1
drivers/net/ethernet/qualcomm/qca_spi.c
··· 41 41 #include <linux/skbuff.h> 42 42 #include <linux/spi/spi.h> 43 43 #include <linux/types.h> 44 - #include <linux/version.h> 45 44 46 45 #include "qca_7k.h" 47 46 #include "qca_debug.h"
+1
drivers/net/ethernet/rocker/rocker.c
··· 4759 4759 4760 4760 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { 4761 4761 dev_err(&pdev->dev, "invalid PCI region size\n"); 4762 + err = -EINVAL; 4762 4763 goto err_pci_resource_len_check; 4763 4764 } 4764 4765
+1 -1
drivers/net/ethernet/sfc/selftest.c
··· 46 46 struct iphdr ip; 47 47 struct udphdr udp; 48 48 __be16 iteration; 49 - const char msg[64]; 49 + char msg[64]; 50 50 } __packed; 51 51 52 52 /* Loopback test source MAC address */
+3 -2
drivers/net/ethernet/stmicro/stmmac/common.h
··· 150 150 #define MAC_CSR_H_FRQ_MASK 0x20 151 151 152 152 #define HASH_TABLE_SIZE 64 153 - #define PAUSE_TIME 0x200 153 + #define PAUSE_TIME 0xffff 154 154 155 155 /* Flow Control defines */ 156 156 #define FLOW_OFF 0 ··· 357 357 void (*dump_regs) (void __iomem *ioaddr); 358 358 /* Set tx/rx threshold in the csr6 register 359 359 * An invalid value enables the store-and-forward mode */ 360 - void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode); 360 + void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, 361 + int rxfifosz); 361 362 /* To track extra statistic (if supported) */ 362 363 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, 363 364 void __iomem *ioaddr);
+51
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
··· 172 172 /* GMAC FLOW CTRL defines */ 173 173 #define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ 174 174 #define GMAC_FLOW_CTRL_PT_SHIFT 16 175 + #define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ 175 176 #define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ 176 177 #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ 177 178 #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ ··· 246 245 #define DMA_CONTROL_EFC 0x00000100 247 246 #define DMA_CONTROL_FEF 0x00000080 248 247 #define DMA_CONTROL_FUF 0x00000040 248 + 249 + /* Receive flow control activation field 250 + * RFA field in DMA control register, bits 23,10:9 251 + */ 252 + #define DMA_CONTROL_RFA_MASK 0x00800600 253 + 254 + /* Receive flow control deactivation field 255 + * RFD field in DMA control register, bits 22,12:11 256 + */ 257 + #define DMA_CONTROL_RFD_MASK 0x00401800 258 + 259 + /* RFD and RFA fields are encoded as follows 260 + * 261 + * Bit Field 262 + * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled) 263 + * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled) 264 + * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled) 265 + * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled) 266 + * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled) 267 + * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled) 268 + * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled) 269 + * 1,11 - Reserved 270 + * 271 + * RFD should always be > RFA for a given FIFO size. RFD == RFA may work, 272 + * but packet throughput performance may not be as expected. 273 + * 274 + * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame 275 + * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause 276 + * Description). 277 + * 278 + * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6), 279 + * is set to 0. This allows pause frames with a quanta of 0 to be sent 280 + * as an XOFF message to the link peer. 281 + */ 282 + 283 + #define RFA_FULL_MINUS_1K 0x00000000 284 + #define RFA_FULL_MINUS_2K 0x00000200 285 + #define RFA_FULL_MINUS_3K 0x00000400 286 + #define RFA_FULL_MINUS_4K 0x00000600 287 + #define RFA_FULL_MINUS_5K 0x00800000 288 + #define RFA_FULL_MINUS_6K 0x00800200 289 + #define RFA_FULL_MINUS_7K 0x00800400 290 + 291 + #define RFD_FULL_MINUS_1K 0x00000000 292 + #define RFD_FULL_MINUS_2K 0x00000800 293 + #define RFD_FULL_MINUS_3K 0x00001000 294 + #define RFD_FULL_MINUS_4K 0x00001800 295 + #define RFD_FULL_MINUS_5K 0x00400000 296 + #define RFD_FULL_MINUS_6K 0x00400800 297 + #define RFD_FULL_MINUS_7K 0x00401000 249 298 250 299 enum rtc_control { 251 300 DMA_CONTROL_RTC_64 = 0x00000000,
+4 -1
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
··· 201 201 unsigned int fc, unsigned int pause_time) 202 202 { 203 203 void __iomem *ioaddr = hw->pcsr; 204 - unsigned int flow = 0; 204 + /* Set flow such that DZPQ in Mac Register 6 is 0, 205 + * and unicast pause detect is enabled. 206 + */ 207 + unsigned int flow = GMAC_FLOW_CTRL_UP; 205 208 206 209 pr_debug("GMAC Flow-Control:\n"); 207 210 if (fc & FLOW_RX) {
+25 -1
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
··· 106 106 return 0; 107 107 } 108 108 109 + static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) 110 + { 111 + csr6 &= ~DMA_CONTROL_RFA_MASK; 112 + csr6 &= ~DMA_CONTROL_RFD_MASK; 113 + 114 + /* Leave flow control disabled if receive fifo size is less than 115 + * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full, 116 + * and send XON when 2K less than full. 117 + */ 118 + if (rxfifosz < 4096) { 119 + csr6 &= ~DMA_CONTROL_EFC; 120 + pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n", 121 + rxfifosz); 122 + } else { 123 + csr6 |= DMA_CONTROL_EFC; 124 + csr6 |= RFA_FULL_MINUS_1K; 125 + csr6 |= RFD_FULL_MINUS_2K; 126 + } 127 + return csr6; 128 + } 129 + 109 130 static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, 110 - int rxmode) 131 + int rxmode, int rxfifosz) 111 132 { 112 133 u32 csr6 = readl(ioaddr + DMA_CONTROL); 113 134 ··· 173 152 else 174 153 csr6 |= DMA_CONTROL_RTC_128; 175 154 } 155 + 156 + /* Configure flow control based on rx fifo size */ 157 + csr6 = dwmac1000_configure_fc(csr6, rxfifosz); 176 158 177 159 writel(csr6, ioaddr + DMA_CONTROL); 178 160 }
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
··· 72 72 * control register. 73 73 */ 74 74 static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, 75 - int rxmode) 75 + int rxmode, int rxfifosz) 76 76 { 77 77 u32 csr6 = readl(ioaddr + DMA_CONTROL); 78 78
+14 -8
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1277 1277 */ 1278 1278 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1279 1279 { 1280 + int rxfifosz = priv->plat->rx_fifo_size; 1281 + 1280 1282 if (priv->plat->force_thresh_dma_mode) 1281 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); 1283 + priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); 1282 1284 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1283 1285 /* 1284 1286 * In case of GMAC, SF mode can be enabled ··· 1289 1287 * 2) There is no bugged Jumbo frame support 1290 1288 * that needs to not insert csum in the TDES. 1291 1289 */ 1292 - priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); 1290 + priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, 1291 + rxfifosz); 1293 1292 priv->xstats.threshold = SF_DMA_MODE; 1294 1293 } else 1295 - priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1294 + priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, 1295 + rxfifosz); 1296 1296 } 1297 1297 1298 1298 /** ··· 1446 1442 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 1447 1443 { 1448 1444 int status; 1445 + int rxfifosz = priv->plat->rx_fifo_size; 1449 1446 1450 1447 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); 1451 1448 if (likely((status & handle_rx)) || (status & handle_tx)) { ··· 1461 1456 (tc <= 256)) { 1462 1457 tc += 64; 1463 1458 if (priv->plat->force_thresh_dma_mode) 1464 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); 1459 + priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, 1460 + rxfifosz); 1465 1461 else 1466 1462 priv->hw->dma->dma_mode(priv->ioaddr, tc, 1467 - SF_DMA_MODE); 1463 + SF_DMA_MODE, rxfifosz); 1468 1464 priv->xstats.threshold = tc; 1469 1465 } 1470 1466 } else if (unlikely(status == tx_hard_error)) ··· 2976 2970 priv->hw->dma->stop_tx(priv->ioaddr); 2977 2971 2978 2972 stmmac_set_mac(priv->ioaddr, false); 2979 - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 2980 - priv->pcs != STMMAC_PCS_RTBI) 2981 - stmmac_mdio_unregister(ndev); 2982 2973 netif_carrier_off(ndev); 2983 2974 unregister_netdev(ndev); 2984 2975 if (priv->stmmac_rst) 2985 2976 reset_control_assert(priv->stmmac_rst); 2986 2977 clk_disable_unprepare(priv->pclk); 2987 2978 clk_disable_unprepare(priv->stmmac_clk); 2979 + if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 2980 + priv->pcs != STMMAC_PCS_RTBI) 2981 + stmmac_mdio_unregister(ndev); 2988 2982 free_netdev(ndev); 2989 2983 2990 2984 return 0;
+4
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 181 181 sizeof(struct stmmac_mdio_bus_data), 182 182 GFP_KERNEL); 183 183 184 + of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 185 + 186 + of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); 187 + 184 188 plat->force_sf_dma_mode = 185 189 of_property_read_bool(np, "snps,force_sf_dma_mode"); 186 190
+1 -1
drivers/net/ifb.c
··· 105 105 if (from & AT_EGRESS) { 106 106 dev_queue_xmit(skb); 107 107 } else if (from & AT_INGRESS) { 108 - skb_pull(skb, skb->dev->hard_header_len); 108 + skb_pull(skb, skb->mac_len); 109 109 netif_receive_skb(skb); 110 110 } else 111 111 BUG();
+1 -1
drivers/net/macvtap.c
··· 313 313 */ 314 314 if (q->flags & IFF_VNET_HDR) 315 315 features |= vlan->tap_features; 316 - if (netif_needs_gso(dev, skb, features)) { 316 + if (netif_needs_gso(skb, features)) { 317 317 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 318 318 319 319 if (IS_ERR(segs))
+1 -1
drivers/net/xen-netfront.c
··· 560 560 561 561 if (unlikely(!netif_carrier_ok(dev) || 562 562 (slots > 1 && !xennet_can_sg(dev)) || 563 - netif_needs_gso(dev, skb, netif_skb_features(skb)))) { 563 + netif_needs_gso(skb, netif_skb_features(skb)))) { 564 564 spin_unlock_irqrestore(&queue->tx_lock, flags); 565 565 goto drop; 566 566 }
+1 -1
include/linux/netdevice.h
··· 3713 3713 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 3714 3714 } 3715 3715 3716 - static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb, 3716 + static inline bool netif_needs_gso(struct sk_buff *skb, 3717 3717 netdev_features_t features) 3718 3718 { 3719 3719 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
+2
include/linux/stmmac.h
··· 114 114 int maxmtu; 115 115 int multicast_filter_bins; 116 116 int unicast_filter_entries; 117 + int tx_fifo_size; 118 + int rx_fifo_size; 117 119 void (*fix_mac_speed)(void *priv, unsigned int speed); 118 120 void (*bus_setup)(void __iomem *ioaddr); 119 121 void *(*setup)(struct platform_device *pdev);
-2
include/net/netns/generic.h
··· 38 38 39 39 rcu_read_lock(); 40 40 ng = rcu_dereference(net->gen); 41 - BUG_ON(id == 0 || id > ng->len); 42 41 ptr = ng->ptr[id - 1]; 43 42 rcu_read_unlock(); 44 43 45 - BUG_ON(!ptr); 46 44 return ptr; 47 45 } 48 46 #endif
+1 -1
include/net/tcp.h
··· 829 829 /* hook for packet ack accounting (optional) */ 830 830 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); 831 831 /* get info for inet_diag (optional) */ 832 - void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); 832 + int (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); 833 833 834 834 char name[TCP_CA_NAME_MAX]; 835 835 struct module *owner;
+1 -1
include/uapi/linux/bpf.h
··· 177 177 /** 178 178 * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet 179 179 * @skb: pointer to skb 180 - * @offset: offset within packet from skb->data 180 + * @offset: offset within packet from skb->mac_header 181 181 * @from: pointer where to copy bytes from 182 182 * @len: number of bytes to store into packet 183 183 * @flags: bit 0 - if true, recompute skb->csum
+5 -2
include/uapi/linux/filter.h
··· 79 79 #define SKF_AD_RANDOM 56 80 80 #define SKF_AD_VLAN_TPID 60 81 81 #define SKF_AD_MAX 64 82 - #define SKF_NET_OFF (-0x100000) 83 - #define SKF_LL_OFF (-0x200000) 84 82 83 + #define SKF_NET_OFF (-0x100000) 84 + #define SKF_LL_OFF (-0x200000) 85 + 86 + #define BPF_NET_OFF SKF_NET_OFF 87 + #define BPF_LL_OFF SKF_LL_OFF 85 88 86 89 #endif /* _UAPI__LINUX_FILTER_H__ */
+9 -3
kernel/bpf/verifier.c
··· 1397 1397 /* tell verifier to check for equivalent states 1398 1398 * after every call and jump 1399 1399 */ 1400 - env->explored_states[t + 1] = STATE_LIST_MARK; 1400 + if (t + 1 < insn_cnt) 1401 + env->explored_states[t + 1] = STATE_LIST_MARK; 1401 1402 } else { 1402 1403 /* conditional jump with two edges */ 1403 1404 ret = push_insn(t, t + 1, FALLTHROUGH, env); ··· 1637 1636 if (err) 1638 1637 return err; 1639 1638 1639 + src_reg_type = regs[insn->src_reg].type; 1640 + 1640 1641 /* check that memory (src_reg + off) is readable, 1641 1642 * the state of dst_reg will be updated by this func 1642 1643 */ ··· 1648 1645 if (err) 1649 1646 return err; 1650 1647 1651 - src_reg_type = regs[insn->src_reg].type; 1648 + if (BPF_SIZE(insn->code) != BPF_W) { 1649 + insn_idx++; 1650 + continue; 1651 + } 1652 1652 1653 - if (insn->imm == 0 && BPF_SIZE(insn->code) == BPF_W) { 1653 + if (insn->imm == 0) { 1654 1654 /* saw a valid insn 1655 1655 * dst_reg = *(u32 *)(src_reg + off) 1656 1656 * use reserved 'imm' field to mark this insn
+1 -1
net/core/dev.c
··· 2713 2713 if (unlikely(!skb)) 2714 2714 goto out_null; 2715 2715 2716 - if (netif_needs_gso(dev, skb, features)) { 2716 + if (netif_needs_gso(skb, features)) { 2717 2717 struct sk_buff *segs; 2718 2718 2719 2719 segs = skb_gso_segment(skb, features);
+32 -9
net/core/filter.c
··· 1175 1175 return 0; 1176 1176 } 1177 1177 1178 + /** 1179 + * bpf_skb_clone_not_writable - is the header of a clone not writable 1180 + * @skb: buffer to check 1181 + * @len: length up to which to write, can be negative 1182 + * 1183 + * Returns true if modifying the header part of the cloned buffer 1184 + * does require the data to be copied. I.e. this version works with 1185 + * negative lengths needed for eBPF case! 1186 + */ 1187 + static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len) 1188 + { 1189 + return skb_header_cloned(skb) || 1190 + (int) skb_headroom(skb) + len > skb->hdr_len; 1191 + } 1192 + 1178 1193 #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) 1179 1194 1180 1195 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) 1181 1196 { 1182 1197 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1183 - unsigned int offset = (unsigned int) r2; 1198 + int offset = (int) r2; 1184 1199 void *from = (void *) (long) r3; 1185 1200 unsigned int len = (unsigned int) r4; 1186 1201 char buf[16]; ··· 1209 1194 * 1210 1195 * so check for invalid 'offset' and too large 'len' 1211 1196 */ 1212 - if (unlikely(offset > 0xffff || len > sizeof(buf))) 1197 + if (unlikely((u32) offset > 0xffff || len > sizeof(buf))) 1213 1198 return -EFAULT; 1214 1199 1215 - if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len)) 1200 + offset -= skb->data - skb_mac_header(skb); 1201 + if (unlikely(skb_cloned(skb) && 1202 + bpf_skb_clone_unwritable(skb, offset + len))) 1216 1203 return -EFAULT; 1217 1204 1218 1205 ptr = skb_header_pointer(skb, offset, len, buf); ··· 1249 1232 #define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) 1250 1233 #define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) 1251 1234 1252 - static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) 1235 + static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) 1253 1236 { 1254 1237 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1238 + int offset = (int) r2; 1255 1239 __sum16 sum, *ptr; 1256 1240 1257 - if (unlikely(offset > 0xffff)) 1241 + if (unlikely((u32) offset > 0xffff)) 1258 1242 return -EFAULT; 1259 1243 1260 - if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) 1244 + offset -= skb->data - skb_mac_header(skb); 1245 + if (unlikely(skb_cloned(skb) && 1246 + bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) 1261 1247 return -EFAULT; 1262 1248 1263 1249 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); ··· 1296 1276 .arg5_type = ARG_ANYTHING, 1297 1277 }; 1298 1278 1299 - static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) 1279 + static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) 1300 1280 { 1301 1281 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1302 1282 u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags); 1283 + int offset = (int) r2; 1303 1284 __sum16 sum, *ptr; 1304 1285 1305 - if (unlikely(offset > 0xffff)) 1286 + if (unlikely((u32) offset > 0xffff)) 1306 1287 return -EFAULT; 1307 1288 1308 - if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) 1289 + offset -= skb->data - skb_mac_header(skb); 1290 + if (unlikely(skb_cloned(skb) && 1291 + bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) 1309 1292 return -EFAULT; 1310 1293 1311 1294 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
-1
net/core/net_namespace.c
··· 16 16 #include <linux/export.h> 17 17 #include <linux/user_namespace.h> 18 18 #include <linux/net_namespace.h> 19 - #include <linux/rtnetlink.h> 20 19 #include <net/sock.h> 21 20 #include <net/netlink.h> 22 21 #include <net/net_namespace.h>
+6 -4
net/core/skbuff.c
··· 4124 4124 */ 4125 4125 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4126 4126 { 4127 - if (xnet) 4128 - skb_orphan(skb); 4129 4127 skb->tstamp.tv64 = 0; 4130 4128 skb->pkt_type = PACKET_HOST; 4131 4129 skb->skb_iif = 0; 4132 4130 skb->ignore_df = 0; 4133 4131 skb_dst_drop(skb); 4134 - skb->mark = 0; 4135 4132 skb_sender_cpu_clear(skb); 4136 - skb_init_secmark(skb); 4137 4133 secpath_reset(skb); 4138 4134 nf_reset(skb); 4139 4135 nf_reset_trace(skb); 4136 + 4137 + if (!xnet) 4138 + return; 4139 + 4140 + skb_orphan(skb); 4141 + skb->mark = 0; 4140 4142 } 4141 4143 EXPORT_SYMBOL_GPL(skb_scrub_packet); 4142 4144
+3 -3
net/dsa/dsa.c
··· 124 124 125 125 return count; 126 126 } 127 - static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store); 127 + static DEVICE_ATTR_RW(temp1_max); 128 128 129 129 static ssize_t temp1_max_alarm_show(struct device *dev, 130 130 struct device_attribute *attr, char *buf) ··· 159 159 if (index == 1) { 160 160 if (!drv->get_temp_limit) 161 161 mode = 0; 162 - else if (drv->set_temp_limit) 163 - mode |= S_IWUSR; 162 + else if (!drv->set_temp_limit) 163 + mode &= ~S_IWUSR; 164 164 } else if (index == 2 && !drv->get_temp_alarm) { 165 165 mode = 0; 166 166 }
+1 -2
net/ipv4/fou.c
··· 711 711 cb->nlh->nlmsg_seq, NLM_F_MULTI, 712 712 skb, FOU_CMD_GET); 713 713 if (ret) 714 - goto done; 714 + break; 715 715 } 716 716 mutex_unlock(&fn->fou_lock); 717 717 718 - done: 719 718 cb->args[0] = idx; 720 719 return skb->len; 721 720 }
+22 -6
net/ipv4/inet_diag.c
··· 111 111 const struct nlmsghdr *unlh) 112 112 { 113 113 const struct inet_sock *inet = inet_sk(sk); 114 + const struct tcp_congestion_ops *ca_ops; 114 115 const struct inet_diag_handler *handler; 115 116 int ext = req->idiag_ext; 116 117 struct inet_diag_msg *r; ··· 209 208 info = nla_data(attr); 210 209 } 211 210 212 - if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) 213 - if (nla_put_string(skb, INET_DIAG_CONG, 214 - icsk->icsk_ca_ops->name) < 0) 211 + if (ext & (1 << (INET_DIAG_CONG - 1))) { 212 + int err = 0; 213 + 214 + rcu_read_lock(); 215 + ca_ops = READ_ONCE(icsk->icsk_ca_ops); 216 + if (ca_ops) 217 + err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name); 218 + rcu_read_unlock(); 219 + if (err < 0) 215 220 goto errout; 221 + } 216 222 217 223 handler->idiag_get_info(sk, r, info); 218 224 219 - if (sk->sk_state < TCP_TIME_WAIT && 220 - icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) 221 - icsk->icsk_ca_ops->get_info(sk, ext, skb); 225 + if (sk->sk_state < TCP_TIME_WAIT) { 226 + int err = 0; 227 + 228 + rcu_read_lock(); 229 + ca_ops = READ_ONCE(icsk->icsk_ca_ops); 230 + if (ca_ops && ca_ops->get_info) 231 + err = ca_ops->get_info(sk, ext, skb); 232 + rcu_read_unlock(); 233 + if (err < 0) 234 + goto errout; 235 + } 222 236 223 237 out: 224 238 nlmsg_end(skb, nlh);
+6 -4
net/ipv4/tcp.c
··· 2595 2595 const struct tcp_sock *tp = tcp_sk(sk); 2596 2596 const struct inet_connection_sock *icsk = inet_csk(sk); 2597 2597 u32 now = tcp_time_stamp; 2598 + u32 rate; 2598 2599 2599 2600 memset(info, 0, sizeof(*info)); 2600 2601 ··· 2656 2655 2657 2656 info->tcpi_total_retrans = tp->total_retrans; 2658 2657 2659 - info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ? 2660 - sk->sk_pacing_rate : ~0ULL; 2661 - info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ? 2662 - sk->sk_max_pacing_rate : ~0ULL; 2658 + rate = READ_ONCE(sk->sk_pacing_rate); 2659 + info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL; 2660 + 2661 + rate = READ_ONCE(sk->sk_max_pacing_rate); 2662 + info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; 2663 2663 } 2664 2664 EXPORT_SYMBOL_GPL(tcp_get_info); 2665 2665
+3 -2
net/ipv4/tcp_dctcp.c
··· 277 277 } 278 278 } 279 279 280 - static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) 280 + static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) 281 281 { 282 282 const struct dctcp *ca = inet_csk_ca(sk); 283 283 ··· 297 297 info.dctcp_ab_tot = ca->acked_bytes_total; 298 298 } 299 299 300 - nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info); 300 + return nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info); 301 301 } 302 + return 0; 302 303 } 303 304 304 305 static struct tcp_congestion_ops dctcp __read_mostly = {
+3 -3
net/ipv4/tcp_illinois.c
··· 300 300 } 301 301 302 302 /* Extract info for Tcp socket info provided via netlink. */ 303 - static void tcp_illinois_info(struct sock *sk, u32 ext, 304 - struct sk_buff *skb) 303 + static int tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb) 305 304 { 306 305 const struct illinois *ca = inet_csk_ca(sk); 307 306 ··· 317 318 do_div(t, info.tcpv_rttcnt); 318 319 info.tcpv_rtt = t; 319 320 } 320 - nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 321 + return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 321 322 } 323 + return 0; 322 324 } 323 325 324 326 static struct tcp_congestion_ops tcp_illinois __read_mostly = {
+3 -2
net/ipv4/tcp_vegas.c
··· 286 286 } 287 287 288 288 /* Extract info for Tcp socket info provided via netlink. */ 289 - void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) 289 + int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) 290 290 { 291 291 const struct vegas *ca = inet_csk_ca(sk); 292 292 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { ··· 297 297 .tcpv_minrtt = ca->minRTT, 298 298 }; 299 299 300 - nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 300 + return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 301 301 } 302 + return 0; 302 303 } 303 304 EXPORT_SYMBOL_GPL(tcp_vegas_get_info); 304 305
+1 -1
net/ipv4/tcp_vegas.h
··· 19 19 void tcp_vegas_state(struct sock *sk, u8 ca_state); 20 20 void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); 21 21 void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); 22 - void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); 22 + int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); 23 23 24 24 #endif /* __TCP_VEGAS_H */
+3 -3
net/ipv4/tcp_westwood.c
··· 256 256 } 257 257 258 258 /* Extract info for Tcp socket info provided via netlink. */ 259 - static void tcp_westwood_info(struct sock *sk, u32 ext, 260 - struct sk_buff *skb) 259 + static int tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb) 261 260 { 262 261 const struct westwood *ca = inet_csk_ca(sk); 263 262 ··· 267 268 .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), 268 269 }; 269 270 270 - nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 271 + return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 271 272 } 273 + return 0; 272 274 } 273 275 274 276 static struct tcp_congestion_ops tcp_westwood __read_mostly = {
+3
net/sched/act_bpf.c
··· 38 38 struct tcf_bpf *prog = act->priv; 39 39 int action, filter_res; 40 40 41 + if (unlikely(!skb_mac_header_was_set(skb))) 42 + return TC_ACT_UNSPEC; 43 + 41 44 spin_lock(&prog->tcf_lock); 42 45 43 46 prog->tcf_tm.lastuse = jiffies;
+1 -1
net/sched/act_mirred.c
··· 157 157 158 158 if (!(at & AT_EGRESS)) { 159 159 if (m->tcfm_ok_push) 160 - skb_push(skb2, skb2->dev->hard_header_len); 160 + skb_push(skb2, skb->mac_len); 161 161 } 162 162 163 163 /* mirror is always swallowed */
+3
net/sched/cls_bpf.c
··· 66 66 struct cls_bpf_prog *prog; 67 67 int ret = -1; 68 68 69 + if (unlikely(!skb_mac_header_was_set(skb))) 70 + return -1; 71 + 69 72 /* Needed here for accessing maps. */ 70 73 rcu_read_lock(); 71 74 list_for_each_entry_rcu(prog, &head->plist, link) {
+6 -10
samples/bpf/tcbpf1_kern.c
··· 4 4 #include <uapi/linux/ip.h> 5 5 #include <uapi/linux/in.h> 6 6 #include <uapi/linux/tcp.h> 7 + #include <uapi/linux/filter.h> 8 + 7 9 #include "bpf_helpers.h" 8 10 9 11 /* compiler workaround */ ··· 16 14 bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1); 17 15 } 18 16 19 - /* use 1 below for ingress qdisc and 0 for egress */ 20 - #if 0 21 - #undef ETH_HLEN 22 - #define ETH_HLEN 0 23 - #endif 24 - 25 17 #define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check)) 26 18 #define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos)) 27 19 28 20 static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) 29 21 { 30 - __u8 old_tos = load_byte(skb, TOS_OFF); 22 + __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF); 31 23 32 24 bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); 33 25 bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); ··· 34 38 35 39 static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) 36 40 { 37 - __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF)); 41 + __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF)); 38 42 39 43 bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); 40 44 bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); ··· 44 48 #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest)) 45 49 static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) 46 50 { 47 - __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF)); 51 + __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF)); 48 52 49 53 bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); 50 54 bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); ··· 53 57 SEC("classifier") 54 58 int bpf_prog1(struct __sk_buff *skb) 55 59 { 56 - __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); 60 + __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol)); 57 61 long *value; 58 62 59 63 if (proto == IPPROTO_TCP) {
+22
samples/bpf/test_verifier.c
··· 721 721 .errstr = "different pointers", 722 722 .result = REJECT, 723 723 }, 724 + { 725 + "access skb fields bad4", 726 + .insns = { 727 + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3), 728 + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 729 + offsetof(struct __sk_buff, len)), 730 + BPF_MOV64_IMM(BPF_REG_0, 0), 731 + BPF_EXIT_INSN(), 732 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 733 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 734 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 735 + BPF_LD_MAP_FD(BPF_REG_1, 0), 736 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 737 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 738 + BPF_EXIT_INSN(), 739 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 740 + BPF_JMP_IMM(BPF_JA, 0, 0, -13), 741 + }, 742 + .fixup = {7}, 743 + .errstr = "different pointers", 744 + .result = REJECT, 745 + }, 724 746 }; 725 747 726 748 static int probe_filter_length(struct bpf_insn *fp)