Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Handle stations tied to AP_VLANs properly during mac80211 hw
reconfig. From Manikanta Pubbisetty.

2) Fix jump stack depth validation in nf_tables, from Taehee Yoo.

3) Fix quota handling in aRFS flow expiration of mlx5 driver, from Eran
Ben Elisha.

4) Exit path handling fix in powerpc64 BPF JIT, from Daniel Borkmann.

5) Use ptr_ring_consume_bh() in page pool code, from Tariq Toukan.

6) Fix cached netdev name leak in nf_tables, from Florian Westphal.

7) Fix memory leaks on chain rename, also from Florian Westphal.

8) Several fixes to DCTCP congestion control ACK handling, from Yuchunk
Cheng.

9) Missing rcu_read_unlock() in CAIF protocol code, from Yue Haibing.

10) Fix link local address handling with VRF, from David Ahern.

11) Don't clobber 'err' on a successful call to __skb_linearize() in
skb_segment(). From Eric Dumazet.

12) Fix vxlan fdb notification races, from Roopa Prabhu.

13) Hash UDP fragments consistently, from Paolo Abeni.

14) If TCP receives lots of out of order tiny packets, we do really
silly stuff. Make the out-of-order queue ending more robust to this
kind of behavior, from Eric Dumazet.

15) Don't leak netlink dump state in nf_tables, from Florian Westphal.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits)
net: axienet: Fix double deregister of mdio
qmi_wwan: fix interface number for DW5821e production firmware
ip: in cmsg IP(V6)_ORIGDSTADDR call pskb_may_pull
bnx2x: Fix invalid memory access in rss hash config path.
net/mlx4_core: Save the qpn from the input modifier in RST2INIT wrapper
r8169: restore previous behavior to accept BIOS WoL settings
cfg80211: never ignore user regulatory hint
sock: fix sg page frag coalescing in sk_alloc_sg
netfilter: nf_tables: move dumper state allocation into ->start
tcp: add tcp_ooo_try_coalesce() helper
tcp: call tcp_drop() from tcp_data_queue_ofo()
tcp: detect malicious patterns in tcp_collapse_ofo_queue()
tcp: avoid collapses in tcp_prune_queue() if possible
tcp: free batches of packets in tcp_prune_ofo_queue()
ip: hash fragments consistently
ipv6: use fib6_info_hold_safe() when necessary
can: xilinx_can: fix power management handling
can: xilinx_can: fix incorrect clear of non-processed interrupts
can: xilinx_can: fix RX overflow interrupt not being enabled
can: xilinx_can: keep only 1-2 frames in TX FIFO to fix TX accounting
...

+1030 -563
+5 -24
arch/powerpc/net/bpf_jit_comp64.c
··· 286 286 u64 imm64; 287 287 u8 *func; 288 288 u32 true_cond; 289 + u32 tmp_idx; 289 290 290 291 /* 291 292 * addrs[] maps a BPF bytecode address into a real offset from ··· 638 637 case BPF_STX | BPF_XADD | BPF_W: 639 638 /* Get EA into TMP_REG_1 */ 640 639 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); 641 - /* error if EA is not word-aligned */ 642 - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03); 643 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12); 644 - PPC_LI(b2p[BPF_REG_0], 0); 645 - PPC_JMP(exit_addr); 640 + tmp_idx = ctx->idx * 4; 646 641 /* load value from memory into TMP_REG_2 */ 647 642 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); 648 643 /* add value from src_reg into this */ ··· 646 649 /* store result back */ 647 650 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); 648 651 /* we're done if this succeeded */ 649 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); 650 - /* otherwise, let's try once more */ 651 - PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); 652 - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); 653 - PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); 654 - /* exit if the store was not successful */ 655 - PPC_LI(b2p[BPF_REG_0], 0); 656 - PPC_BCC(COND_NE, exit_addr); 652 + PPC_BCC_SHORT(COND_NE, tmp_idx); 657 653 break; 658 654 /* *(u64 *)(dst + off) += src */ 659 655 case BPF_STX | BPF_XADD | BPF_DW: 660 656 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); 661 - /* error if EA is not doubleword-aligned */ 662 - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07); 663 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4)); 664 - PPC_LI(b2p[BPF_REG_0], 0); 665 - PPC_JMP(exit_addr); 657 + tmp_idx = ctx->idx * 4; 666 658 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); 667 659 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); 668 660 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); 669 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); 670 - PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); 671 - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); 672 - PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); 673 - PPC_LI(b2p[BPF_REG_0], 0); 674 - PPC_BCC(COND_NE, exit_addr); 661 + PPC_BCC_SHORT(COND_NE, tmp_idx); 675 662 break; 676 663 677 664 /*
+14 -9
drivers/net/bonding/bond_options.c
··· 743 743 static int bond_option_mode_set(struct bonding *bond, 744 744 const struct bond_opt_value *newval) 745 745 { 746 - if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { 747 - netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", 748 - newval->string); 749 - /* disable arp monitoring */ 750 - bond->params.arp_interval = 0; 751 - /* set miimon to default value */ 752 - bond->params.miimon = BOND_DEFAULT_MIIMON; 753 - netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", 754 - bond->params.miimon); 746 + if (!bond_mode_uses_arp(newval->value)) { 747 + if (bond->params.arp_interval) { 748 + netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", 749 + newval->string); 750 + /* disable arp monitoring */ 751 + bond->params.arp_interval = 0; 752 + } 753 + 754 + if (!bond->params.miimon) { 755 + /* set miimon to default value */ 756 + bond->params.miimon = BOND_DEFAULT_MIIMON; 757 + netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", 758 + bond->params.miimon); 759 + } 755 760 } 756 761 757 762 if (newval->value == BOND_MODE_ALB)
+9 -9
drivers/net/can/m_can/m_can.c
··· 634 634 int err; 635 635 636 636 err = pm_runtime_get_sync(priv->device); 637 - if (err) 637 + if (err < 0) { 638 638 pm_runtime_put_noidle(priv->device); 639 + return err; 640 + } 639 641 640 - return err; 642 + return 0; 641 643 } 642 644 643 645 static void m_can_clk_stop(struct m_can_priv *priv) ··· 1111 1109 1112 1110 } else { 1113 1111 /* Version 3.1.x or 3.2.x */ 1114 - cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); 1112 + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | 1113 + CCCR_NISO); 1115 1114 1116 1115 /* Only 3.2.x has NISO Bit implemented */ 1117 1116 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ··· 1645 1642 priv->can.clock.freq = clk_get_rate(cclk); 1646 1643 priv->mram_base = mram_addr; 1647 1644 1648 - m_can_of_parse_mram(priv, mram_config_vals); 1649 - 1650 1645 platform_set_drvdata(pdev, dev); 1651 1646 SET_NETDEV_DEV(dev, &pdev->dev); 1652 1647 ··· 1667 1666 goto clk_disable; 1668 1667 } 1669 1668 1669 + m_can_of_parse_mram(priv, mram_config_vals); 1670 + 1670 1671 devm_can_led_init(dev); 1671 1672 1672 1673 of_can_transceiver(dev); ··· 1689 1686 failed_ret: 1690 1687 return ret; 1691 1688 } 1692 - 1693 - /* TODO: runtime PM with power down or sleep mode */ 1694 1689 1695 1690 static __maybe_unused int m_can_suspend(struct device *dev) 1696 1691 { ··· 1716 1715 1717 1716 pinctrl_pm_select_default_state(dev); 1718 1717 1719 - m_can_init_ram(priv); 1720 - 1721 1718 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1722 1719 1723 1720 if (netif_running(ndev)) { ··· 1725 1726 if (ret) 1726 1727 return ret; 1727 1728 1729 + m_can_init_ram(priv); 1728 1730 m_can_start(ndev); 1729 1731 netif_device_attach(ndev); 1730 1732 netif_start_queue(ndev);
+5
drivers/net/can/mscan/mpc5xxx_can.c
··· 86 86 return 0; 87 87 } 88 88 cdm = of_iomap(np_cdm, 0); 89 + if (!cdm) { 90 + of_node_put(np_cdm); 91 + dev_err(&ofdev->dev, "can't map clock node!\n"); 92 + return 0; 93 + } 89 94 90 95 if (in_8(&cdm->ipb_clk_sel) & 0x1) 91 96 freq *= 2;
+19
drivers/net/can/peak_canfd/peak_pciefd_main.c
··· 58 58 #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ 59 59 #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ 60 60 61 + #define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ 62 + ((u32)(y) << 16) | \ 63 + ((u32)(z) << 8)) 64 + 61 65 /* System Control Registers Bits */ 62 66 #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ 63 67 #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ ··· 785 781 dev_info(&pdev->dev, 786 782 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, 787 783 hw_ver_major, hw_ver_minor, hw_ver_sub); 784 + 785 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 786 + /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and 787 + * 64-bit logical addresses: this workaround forces usage of 32-bit 788 + * DMA addresses only when such a fw is detected. 789 + */ 790 + if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < 791 + PCIEFD_FW_VERSION(3, 3, 0)) { 792 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 793 + if (err) 794 + dev_warn(&pdev->dev, 795 + "warning: can't set DMA mask %llxh (err %d)\n", 796 + DMA_BIT_MASK(32), err); 797 + } 798 + #endif 788 799 789 800 /* stop system clock */ 790 801 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
+288 -104
drivers/net/can/xilinx_can.c
··· 2 2 * 3 3 * Copyright (C) 2012 - 2014 Xilinx, Inc. 4 4 * Copyright (C) 2009 PetaLogix. All rights reserved. 5 + * Copyright (C) 2017 Sandvik Mining and Construction Oy 5 6 * 6 7 * Description: 7 8 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. ··· 26 25 #include <linux/module.h> 27 26 #include <linux/netdevice.h> 28 27 #include <linux/of.h> 28 + #include <linux/of_device.h> 29 29 #include <linux/platform_device.h> 30 30 #include <linux/skbuff.h> 31 + #include <linux/spinlock.h> 31 32 #include <linux/string.h> 32 33 #include <linux/types.h> 33 34 #include <linux/can/dev.h> ··· 104 101 #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ 105 102 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ 106 103 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ 107 - XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) 104 + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) 108 105 109 106 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 110 107 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ ··· 121 118 /** 122 119 * struct xcan_priv - This definition define CAN driver instance 123 120 * @can: CAN private data structure. 121 + * @tx_lock: Lock for synchronizing TX interrupt handling 124 122 * @tx_head: Tx CAN packets ready to send on the queue 125 123 * @tx_tail: Tx CAN packets successfully sended on the queue 126 124 * @tx_max: Maximum number packets the driver can send ··· 136 132 */ 137 133 struct xcan_priv { 138 134 struct can_priv can; 135 + spinlock_t tx_lock; 139 136 unsigned int tx_head; 140 137 unsigned int tx_tail; 141 138 unsigned int tx_max; ··· 162 157 .brp_min = 1, 163 158 .brp_max = 256, 164 159 .brp_inc = 1, 160 + }; 161 + 162 + #define XCAN_CAP_WATERMARK 0x0001 163 + struct xcan_devtype_data { 164 + unsigned int caps; 165 165 }; 166 166 167 167 /** ··· 247 237 } 248 238 usleep_range(500, 10000); 249 239 } 240 + 241 + /* reset clears FIFOs */ 242 + priv->tx_head = 0; 243 + priv->tx_tail = 0; 250 244 251 245 return 0; 252 246 } ··· 406 392 struct net_device_stats *stats = &ndev->stats; 407 393 struct can_frame *cf = (struct can_frame *)skb->data; 408 394 u32 id, dlc, data[2] = {0, 0}; 395 + unsigned long flags; 409 396 410 397 if (can_dropped_invalid_skb(ndev, skb)) 411 398 return NETDEV_TX_OK; ··· 454 439 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 455 440 456 441 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 442 + 443 + spin_lock_irqsave(&priv->tx_lock, flags); 444 + 457 445 priv->tx_head++; 458 446 459 447 /* Write the Frame to Xilinx CAN TX FIFO */ ··· 472 454 stats->tx_bytes += cf->can_dlc; 473 455 } 474 456 457 + /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 458 + if (priv->tx_max > 1) 459 + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); 460 + 475 461 /* Check if the TX buffer is full */ 476 462 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 477 463 netif_stop_queue(ndev); 464 + 465 + spin_unlock_irqrestore(&priv->tx_lock, flags); 478 466 479 467 return NETDEV_TX_OK; 480 468 } ··· 554 530 } 555 531 556 532 /** 533 + * xcan_current_error_state - Get current error state from HW 534 + * @ndev: Pointer to net_device structure 535 + * 536 + * Checks the current CAN error state from the HW. Note that this 537 + * only checks for ERROR_PASSIVE and ERROR_WARNING. 538 + * 539 + * Return: 540 + * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE 541 + * otherwise. 542 + */ 543 + static enum can_state xcan_current_error_state(struct net_device *ndev) 544 + { 545 + struct xcan_priv *priv = netdev_priv(ndev); 546 + u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); 547 + 548 + if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) 549 + return CAN_STATE_ERROR_PASSIVE; 550 + else if (status & XCAN_SR_ERRWRN_MASK) 551 + return CAN_STATE_ERROR_WARNING; 552 + else 553 + return CAN_STATE_ERROR_ACTIVE; 554 + } 555 + 556 + /** 557 + * xcan_set_error_state - Set new CAN error state 558 + * @ndev: Pointer to net_device structure 559 + * @new_state: The new CAN state to be set 560 + * @cf: Error frame to be populated or NULL 561 + * 562 + * Set new CAN error state for the device, updating statistics and 563 + * populating the error frame if given. 564 + */ 565 + static void xcan_set_error_state(struct net_device *ndev, 566 + enum can_state new_state, 567 + struct can_frame *cf) 568 + { 569 + struct xcan_priv *priv = netdev_priv(ndev); 570 + u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); 571 + u32 txerr = ecr & XCAN_ECR_TEC_MASK; 572 + u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; 573 + 574 + priv->can.state = new_state; 575 + 576 + if (cf) { 577 + cf->can_id |= CAN_ERR_CRTL; 578 + cf->data[6] = txerr; 579 + cf->data[7] = rxerr; 580 + } 581 + 582 + switch (new_state) { 583 + case CAN_STATE_ERROR_PASSIVE: 584 + priv->can.can_stats.error_passive++; 585 + if (cf) 586 + cf->data[1] = (rxerr > 127) ? 587 + CAN_ERR_CRTL_RX_PASSIVE : 588 + CAN_ERR_CRTL_TX_PASSIVE; 589 + break; 590 + case CAN_STATE_ERROR_WARNING: 591 + priv->can.can_stats.error_warning++; 592 + if (cf) 593 + cf->data[1] |= (txerr > rxerr) ? 594 + CAN_ERR_CRTL_TX_WARNING : 595 + CAN_ERR_CRTL_RX_WARNING; 596 + break; 597 + case CAN_STATE_ERROR_ACTIVE: 598 + if (cf) 599 + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; 600 + break; 601 + default: 602 + /* non-ERROR states are handled elsewhere */ 603 + WARN_ON(1); 604 + break; 605 + } 606 + } 607 + 608 + /** 609 + * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX 610 + * @ndev: Pointer to net_device structure 611 + * 612 + * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if 613 + * the performed RX/TX has caused it to drop to a lesser state and set 614 + * the interface state accordingly. 615 + */ 616 + static void xcan_update_error_state_after_rxtx(struct net_device *ndev) 617 + { 618 + struct xcan_priv *priv = netdev_priv(ndev); 619 + enum can_state old_state = priv->can.state; 620 + enum can_state new_state; 621 + 622 + /* changing error state due to successful frame RX/TX can only 623 + * occur from these states 624 + */ 625 + if (old_state != CAN_STATE_ERROR_WARNING && 626 + old_state != CAN_STATE_ERROR_PASSIVE) 627 + return; 628 + 629 + new_state = xcan_current_error_state(ndev); 630 + 631 + if (new_state != old_state) { 632 + struct sk_buff *skb; 633 + struct can_frame *cf; 634 + 635 + skb = alloc_can_err_skb(ndev, &cf); 636 + 637 + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 638 + 639 + if (skb) { 640 + struct net_device_stats *stats = &ndev->stats; 641 + 642 + stats->rx_packets++; 643 + stats->rx_bytes += cf->can_dlc; 644 + netif_rx(skb); 645 + } 646 + } 647 + } 648 + 649 + /** 557 650 * xcan_err_interrupt - error frame Isr 558 651 * @ndev: net_device pointer 559 652 * @isr: interrupt status register value ··· 685 544 struct net_device_stats *stats = &ndev->stats; 686 545 struct can_frame *cf; 687 546 struct sk_buff *skb; 688 - u32 err_status, status, txerr = 0, rxerr = 0; 547 + u32 err_status; 689 548 690 549 skb = alloc_can_err_skb(ndev, &cf); 691 550 692 551 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 693 552 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 694 - txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; 695 - rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & 696 - XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); 697 - status = priv->read_reg(priv, XCAN_SR_OFFSET); 698 553 699 554 if (isr & XCAN_IXR_BSOFF_MASK) { 700 555 priv->can.state = CAN_STATE_BUS_OFF; ··· 700 563 can_bus_off(ndev); 701 564 if (skb) 702 565 cf->can_id |= CAN_ERR_BUSOFF; 703 - } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { 704 - priv->can.state = CAN_STATE_ERROR_PASSIVE; 705 - priv->can.can_stats.error_passive++; 706 - if (skb) { 707 - cf->can_id |= CAN_ERR_CRTL; 708 - cf->data[1] = (rxerr > 127) ? 709 - CAN_ERR_CRTL_RX_PASSIVE : 710 - CAN_ERR_CRTL_TX_PASSIVE; 711 - cf->data[6] = txerr; 712 - cf->data[7] = rxerr; 713 - } 714 - } else if (status & XCAN_SR_ERRWRN_MASK) { 715 - priv->can.state = CAN_STATE_ERROR_WARNING; 716 - priv->can.can_stats.error_warning++; 717 - if (skb) { 718 - cf->can_id |= CAN_ERR_CRTL; 719 - cf->data[1] |= (txerr > rxerr) ? 720 - CAN_ERR_CRTL_TX_WARNING : 721 - CAN_ERR_CRTL_RX_WARNING; 722 - cf->data[6] = txerr; 723 - cf->data[7] = rxerr; 724 - } 566 + } else { 567 + enum can_state new_state = xcan_current_error_state(ndev); 568 + 569 + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 725 570 } 726 571 727 572 /* Check for Arbitration lost interrupt */ ··· 719 600 if (isr & XCAN_IXR_RXOFLW_MASK) { 720 601 stats->rx_over_errors++; 721 602 stats->rx_errors++; 722 - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 723 603 if (skb) { 724 604 cf->can_id |= CAN_ERR_CRTL; 725 605 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; ··· 827 709 828 710 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 829 711 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { 830 - if (isr & XCAN_IXR_RXOK_MASK) { 831 - priv->write_reg(priv, XCAN_ICR_OFFSET, 832 - XCAN_IXR_RXOK_MASK); 833 - work_done += xcan_rx(ndev); 834 - } else { 835 - priv->write_reg(priv, XCAN_ICR_OFFSET, 836 - XCAN_IXR_RXNEMP_MASK); 837 - break; 838 - } 712 + work_done += xcan_rx(ndev); 839 713 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); 840 714 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 841 715 } 842 716 843 - if (work_done) 717 + if (work_done) { 844 718 can_led_event(ndev, CAN_LED_EVENT_RX); 719 + xcan_update_error_state_after_rxtx(ndev); 720 + } 845 721 846 722 if (work_done < quota) { 847 723 napi_complete_done(napi, work_done); 848 724 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 849 - ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); 725 + ier |= XCAN_IXR_RXNEMP_MASK; 850 726 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 851 727 } 852 728 return work_done; ··· 855 743 { 856 744 struct xcan_priv *priv = netdev_priv(ndev); 857 745 struct net_device_stats *stats = &ndev->stats; 746 + unsigned int frames_in_fifo; 747 + int frames_sent = 1; /* TXOK => at least 1 frame was sent */ 748 + unsigned long flags; 749 + int retries = 0; 858 750 859 - while ((priv->tx_head - priv->tx_tail > 0) && 860 - (isr & XCAN_IXR_TXOK_MASK)) { 751 + /* Synchronize with xmit as we need to know the exact number 752 + * of frames in the FIFO to stay in sync due to the TXFEMP 753 + * handling. 754 + * This also prevents a race between netif_wake_queue() and 755 + * netif_stop_queue(). 756 + */ 757 + spin_lock_irqsave(&priv->tx_lock, flags); 758 + 759 + frames_in_fifo = priv->tx_head - priv->tx_tail; 760 + 761 + if (WARN_ON_ONCE(frames_in_fifo == 0)) { 762 + /* clear TXOK anyway to avoid getting back here */ 861 763 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 764 + spin_unlock_irqrestore(&priv->tx_lock, flags); 765 + return; 766 + } 767 + 768 + /* Check if 2 frames were sent (TXOK only means that at least 1 769 + * frame was sent). 770 + */ 771 + if (frames_in_fifo > 1) { 772 + WARN_ON(frames_in_fifo > priv->tx_max); 773 + 774 + /* Synchronize TXOK and isr so that after the loop: 775 + * (1) isr variable is up-to-date at least up to TXOK clear 776 + * time. This avoids us clearing a TXOK of a second frame 777 + * but not noticing that the FIFO is now empty and thus 778 + * marking only a single frame as sent. 779 + * (2) No TXOK is left. Having one could mean leaving a 780 + * stray TXOK as we might process the associated frame 781 + * via TXFEMP handling as we read TXFEMP *after* TXOK 782 + * clear to satisfy (1). 783 + */ 784 + while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { 785 + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 786 + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 787 + } 788 + 789 + if (isr & XCAN_IXR_TXFEMP_MASK) { 790 + /* nothing in FIFO anymore */ 791 + frames_sent = frames_in_fifo; 792 + } 793 + } else { 794 + /* single frame in fifo, just clear TXOK */ 795 + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 796 + } 797 + 798 + while (frames_sent--) { 862 799 can_get_echo_skb(ndev, priv->tx_tail % 863 800 priv->tx_max); 864 801 priv->tx_tail++; 865 802 stats->tx_packets++; 866 - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 867 803 } 868 - can_led_event(ndev, CAN_LED_EVENT_TX); 804 + 869 805 netif_wake_queue(ndev); 806 + 807 + spin_unlock_irqrestore(&priv->tx_lock, flags); 808 + 809 + can_led_event(ndev, CAN_LED_EVENT_TX); 810 + xcan_update_error_state_after_rxtx(ndev); 870 811 } 871 812 872 813 /** ··· 938 773 struct net_device *ndev = (struct net_device *)dev_id; 939 774 struct xcan_priv *priv = netdev_priv(ndev); 940 775 u32 isr, ier; 776 + u32 isr_errors; 941 777 942 778 /* Get the interrupt status from Xilinx CAN */ 943 779 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); ··· 957 791 xcan_tx_interrupt(ndev, isr); 958 792 959 793 /* Check for the type of error interrupt and Processing it */ 960 - if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 961 - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { 962 - priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | 963 - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | 964 - XCAN_IXR_ARBLST_MASK)); 794 + isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 795 + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); 796 + if (isr_errors) { 797 + priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); 965 798 xcan_err_interrupt(ndev, isr); 966 799 } 967 800 968 801 /* Check for the type of receive interrupt and Processing it */ 969 - if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { 802 + if (isr & XCAN_IXR_RXNEMP_MASK) { 970 803 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 971 - ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); 804 + ier &= ~XCAN_IXR_RXNEMP_MASK; 972 805 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 973 806 napi_schedule(&priv->napi); 974 807 } ··· 984 819 static void xcan_chip_stop(struct net_device *ndev) 985 820 { 986 821 struct xcan_priv *priv = netdev_priv(ndev); 987 - u32 ier; 988 822 989 823 /* Disable interrupts and leave the can in configuration mode */ 990 - ier = priv->read_reg(priv, XCAN_IER_OFFSET); 991 - ier &= ~XCAN_INTR_ALL; 992 - priv->write_reg(priv, XCAN_IER_OFFSET, ier); 993 - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 824 + set_reset_mode(ndev); 994 825 priv->can.state = CAN_STATE_STOPPED; 995 826 } 996 827 ··· 1119 958 */ 1120 959 static int __maybe_unused xcan_suspend(struct device *dev) 1121 960 { 1122 - if (!device_may_wakeup(dev)) 1123 - return pm_runtime_force_suspend(dev); 961 + struct net_device *ndev = dev_get_drvdata(dev); 1124 962 1125 - return 0; 963 + if (netif_running(ndev)) { 964 + netif_stop_queue(ndev); 965 + netif_device_detach(ndev); 966 + xcan_chip_stop(ndev); 967 + } 968 + 969 + return pm_runtime_force_suspend(dev); 1126 970 } 1127 971 1128 972 /** ··· 1139 973 */ 1140 974 static int __maybe_unused xcan_resume(struct device *dev) 1141 975 { 1142 - if (!device_may_wakeup(dev)) 1143 - return pm_runtime_force_resume(dev); 976 + struct net_device *ndev = dev_get_drvdata(dev); 977 + int ret; 978 + 979 + ret = pm_runtime_force_resume(dev); 980 + if (ret) { 981 + dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 982 + return ret; 983 + } 984 + 985 + if (netif_running(ndev)) { 986 + ret = xcan_chip_start(ndev); 987 + if (ret) { 988 + dev_err(dev, "xcan_chip_start failed on resume\n"); 989 + return ret; 990 + } 991 + 992 + netif_device_attach(ndev); 993 + netif_start_queue(ndev); 994 + } 1144 995 1145 996 return 0; 1146 - 1147 997 } 1148 998 1149 999 /** ··· 1173 991 { 1174 992 struct net_device *ndev = dev_get_drvdata(dev); 1175 993 struct xcan_priv *priv = netdev_priv(ndev); 1176 - 1177 - if (netif_running(ndev)) { 1178 - netif_stop_queue(ndev); 1179 - netif_device_detach(ndev); 1180 - } 1181 - 1182 - priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); 1183 - priv->can.state = CAN_STATE_SLEEPING; 1184 994 1185 995 clk_disable_unprepare(priv->bus_clk); 1186 996 clk_disable_unprepare(priv->can_clk); ··· 1192 1018 struct net_device *ndev = dev_get_drvdata(dev); 1193 1019 struct xcan_priv *priv = netdev_priv(ndev); 1194 1020 int ret; 1195 - u32 isr, status; 1196 1021 1197 1022 ret = clk_prepare_enable(priv->bus_clk); 1198 1023 if (ret) { ··· 1205 1032 return ret; 1206 1033 } 1207 1034 1208 - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 1209 - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1210 - status = priv->read_reg(priv, XCAN_SR_OFFSET); 1211 - 1212 - if (netif_running(ndev)) { 1213 - if (isr & XCAN_IXR_BSOFF_MASK) { 1214 - priv->can.state = CAN_STATE_BUS_OFF; 1215 - priv->write_reg(priv, XCAN_SRR_OFFSET, 1216 - XCAN_SRR_RESET_MASK); 1217 - } else if ((status & XCAN_SR_ESTAT_MASK) == 1218 - XCAN_SR_ESTAT_MASK) { 1219 - priv->can.state = CAN_STATE_ERROR_PASSIVE; 1220 - } else if (status & XCAN_SR_ERRWRN_MASK) { 1221 - priv->can.state = CAN_STATE_ERROR_WARNING; 1222 - } else { 1223 - priv->can.state = CAN_STATE_ERROR_ACTIVE; 1224 - } 1225 - netif_device_attach(ndev); 1226 - netif_start_queue(ndev); 1227 - } 1228 - 1229 1035 return 0; 1230 1036 } 1231 1037 ··· 1212 1060 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) 1213 1061 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1214 1062 }; 1063 + 1064 + static const struct xcan_devtype_data xcan_zynq_data = { 1065 + .caps = XCAN_CAP_WATERMARK, 1066 + }; 1067 + 1068 + /* Match table for OF platform binding */ 1069 + static const struct of_device_id xcan_of_match[] = { 1070 + { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, 1071 + { .compatible = "xlnx,axi-can-1.00.a", }, 1072 + { /* end of list */ }, 1073 + }; 1074 + MODULE_DEVICE_TABLE(of, xcan_of_match); 1215 1075 1216 1076 /** 1217 1077 * xcan_probe - Platform registration call ··· 1239 1075 struct resource *res; /* IO mem resources */ 1240 1076 struct net_device *ndev; 1241 1077 struct xcan_priv *priv; 1078 + const struct of_device_id *of_id; 1079 + int caps = 0; 1242 1080 void __iomem *addr; 1243 - int ret, rx_max, tx_max; 1081 + int ret, rx_max, tx_max, tx_fifo_depth; 1244 1082 1245 1083 /* Get the virtual base address for the device */ 1246 1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 1252 1086 goto err; 1253 1087 } 1254 1088 1255 - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); 1089 + ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1090 + &tx_fifo_depth); 1256 1091 if (ret < 0) 1257 1092 goto err; 1258 1093 1259 1094 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max); 1260 1095 if (ret < 0) 1261 1096 goto err; 1097 + 1098 + of_id = of_match_device(xcan_of_match, &pdev->dev); 1099 + if (of_id) { 1100 + const struct xcan_devtype_data *devtype_data = of_id->data; 1101 + 1102 + if (devtype_data) 1103 + caps = devtype_data->caps; 1104 + } 1105 + 1106 + /* There is no way to directly figure out how many frames have been 1107 + * sent when the TXOK interrupt is processed. If watermark programming 1108 + * is supported, we can have 2 frames in the FIFO and use TXFEMP 1109 + * to determine if 1 or 2 frames have been sent. 1110 + * Theoretically we should be able to use TXFWMEMP to determine up 1111 + * to 3 frames, but it seems that after putting a second frame in the 1112 + * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less 1113 + * than 2 frames in FIFO) is set anyway with no TXOK (a frame was 1114 + * sent), which is not a sensible state - possibly TXFWMEMP is not 1115 + * completely synchronized with the rest of the bits? 1116 + */ 1117 + if (caps & XCAN_CAP_WATERMARK) 1118 + tx_max = min(tx_fifo_depth, 2); 1119 + else 1120 + tx_max = 1; 1262 1121 1263 1122 /* Create a CAN device instance */ 1264 1123 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); ··· 1299 1108 CAN_CTRLMODE_BERR_REPORTING; 1300 1109 priv->reg_base = addr; 1301 1110 priv->tx_max = tx_max; 1111 + spin_lock_init(&priv->tx_lock); 1302 1112 1303 1113 /* Get IRQ for the device */ 1304 1114 ndev->irq = platform_get_irq(pdev, 0); ··· 1364 1172 1365 1173 pm_runtime_put(&pdev->dev); 1366 1174 1367 - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", 1175 + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", 1368 1176 priv->reg_base, ndev->irq, priv->can.clock.freq, 1369 - priv->tx_max); 1177 + tx_fifo_depth, priv->tx_max); 1370 1178 1371 1179 return 0; 1372 1180 ··· 1399 1207 1400 1208 return 0; 1401 1209 } 1402 - 1403 - /* Match table for OF platform binding */ 1404 - static const struct of_device_id xcan_of_match[] = { 1405 - { .compatible = "xlnx,zynq-can-1.0", }, 1406 - { .compatible = "xlnx,axi-can-1.00.a", }, 1407 - { /* end of list */ }, 1408 - }; 1409 - MODULE_DEVICE_TABLE(of, xcan_of_match); 1410 1210 1411 1211 static struct platform_driver xcan_driver = { 1412 1212 .probe = xcan_probe,
+13 -8
drivers/net/dsa/mv88e6xxx/chip.c
··· 343 343 .xlate = irq_domain_xlate_twocell, 344 344 }; 345 345 346 + /* To be called with reg_lock held */ 346 347 static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip) 347 348 { 348 349 int irq, virq; ··· 363 362 364 363 static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) 365 364 { 366 - mv88e6xxx_g1_irq_free_common(chip); 367 - 365 + /* 366 + * free_irq must be called without reg_lock taken because the irq 367 + * handler takes this lock, too. 368 + */ 368 369 free_irq(chip->irq, chip); 370 + 371 + mutex_lock(&chip->reg_lock); 372 + mv88e6xxx_g1_irq_free_common(chip); 373 + mutex_unlock(&chip->reg_lock); 369 374 } 370 375 371 376 static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) ··· 476 469 477 470 static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip) 478 471 { 479 - mv88e6xxx_g1_irq_free_common(chip); 480 - 481 472 kthread_cancel_delayed_work_sync(&chip->irq_poll_work); 482 473 kthread_destroy_worker(chip->kworker); 474 + 475 + mutex_lock(&chip->reg_lock); 476 + mv88e6xxx_g1_irq_free_common(chip); 477 + mutex_unlock(&chip->reg_lock); 483 478 } 484 479 485 480 int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) ··· 4515 4506 if (chip->info->g2_irqs > 0) 4516 4507 mv88e6xxx_g2_irq_free(chip); 4517 4508 out_g1_irq: 4518 - mutex_lock(&chip->reg_lock); 4519 4509 if (chip->irq > 0) 4520 4510 mv88e6xxx_g1_irq_free(chip); 4521 4511 else 4522 4512 mv88e6xxx_irq_poll_free(chip); 4523 - mutex_unlock(&chip->reg_lock); 4524 4513 out: 4525 4514 if (pdata) 4526 4515 dev_put(pdata->netdev); ··· 4546 4539 if (chip->info->g2_irqs > 0) 4547 4540 mv88e6xxx_g2_irq_free(chip); 4548 4541 4549 - mutex_lock(&chip->reg_lock); 4550 4542 if (chip->irq > 0) 4551 4543 mv88e6xxx_g1_irq_free(chip); 4552 4544 else 4553 4545 mv88e6xxx_irq_poll_free(chip); 4554 - mutex_unlock(&chip->reg_lock); 4555 4546 } 4556 4547 4557 4548 static const struct of_device_id mv88e6xxx_of_match[] = {
+1 -1
drivers/net/ethernet/3com/Kconfig
··· 32 32 33 33 config 3C515 34 34 tristate "3c515 ISA \"Fast EtherLink\"" 35 - depends on ISA && ISA_DMA_API 35 + depends on ISA && ISA_DMA_API && !PPC32 36 36 ---help--- 37 37 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet 38 38 network card, say Y here.
+2 -2
drivers/net/ethernet/amd/Kconfig
··· 44 44 45 45 config LANCE 46 46 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 47 - depends on ISA && ISA_DMA_API && !ARM 47 + depends on ISA && ISA_DMA_API && !ARM && !PPC32 48 48 ---help--- 49 49 If you have a network (Ethernet) card of this type, say Y here. 50 50 Some LinkSys cards are of this type. ··· 138 138 139 139 config NI65 140 140 tristate "NI6510 support" 141 - depends on ISA && ISA_DMA_API && !ARM 141 + depends on ISA && ISA_DMA_API && !ARM && !PPC32 142 142 ---help--- 143 143 If you have a network (Ethernet) card of this type, say Y here. 144 144
+1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 1686 1686 skb = build_skb(page_address(page) + adapter->rx_page_offset, 1687 1687 adapter->rx_frag_size); 1688 1688 if (likely(skb)) { 1689 + skb_reserve(skb, NET_SKB_PAD); 1689 1690 adapter->rx_page_offset += adapter->rx_frag_size; 1690 1691 if (adapter->rx_page_offset >= PAGE_SIZE) 1691 1692 adapter->rx_page = NULL;
+10 -3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
··· 3388 3388 DP(BNX2X_MSG_ETHTOOL, 3389 3389 "rss re-configured, UDP 4-tupple %s\n", 3390 3390 udp_rss_requested ? "enabled" : "disabled"); 3391 - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3391 + if (bp->state == BNX2X_STATE_OPEN) 3392 + return bnx2x_rss(bp, &bp->rss_conf_obj, false, 3393 + true); 3392 3394 } else if ((info->flow_type == UDP_V6_FLOW) && 3393 3395 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { 3394 3396 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; 3395 3397 DP(BNX2X_MSG_ETHTOOL, 3396 3398 "rss re-configured, UDP 4-tupple %s\n", 3397 3399 udp_rss_requested ? "enabled" : "disabled"); 3398 - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3400 + if (bp->state == BNX2X_STATE_OPEN) 3401 + return bnx2x_rss(bp, &bp->rss_conf_obj, false, 3402 + true); 3399 3403 } 3400 3404 return 0; 3401 3405 ··· 3513 3509 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; 3514 3510 } 3515 3511 3516 - return bnx2x_config_rss_eth(bp, false); 3512 + if (bp->state == BNX2X_STATE_OPEN) 3513 + return bnx2x_config_rss_eth(bp, false); 3514 + 3515 + return 0; 3517 3516 } 3518 3517 3519 3518 /**
+1
drivers/net/ethernet/cirrus/Kconfig
··· 19 19 config CS89x0 20 20 tristate "CS89x0 support" 21 21 depends on ISA || EISA || ARM 22 + depends on !PPC32 22 23 ---help--- 23 24 Support for CS89x0 chipset based Ethernet cards. If you have a 24 25 network (Ethernet) card of this type, say Y and read the file
+1
drivers/net/ethernet/huawei/hinic/hinic_tx.c
··· 229 229 txq->txq_stats.tx_busy++; 230 230 u64_stats_update_end(&txq->txq_stats.syncp); 231 231 err = NETDEV_TX_BUSY; 232 + wqe_size = 0; 232 233 goto flush_skbs; 233 234 } 234 235
+1 -1
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 2958 2958 u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2959 2959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2960 2960 struct res_srq *srq; 2961 - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2961 + int local_qpn = vhcr->in_modifier & 0xffffff; 2962 2962 2963 2963 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 2964 2964 if (err)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
··· 123 123 int i; 124 124 125 125 buf->size = size; 126 - buf->npages = 1 << get_order(size); 126 + buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); 127 127 buf->page_shift = PAGE_SHIFT; 128 128 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), 129 129 GFP_KERNEL);
+5 -2
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
··· 381 381 HLIST_HEAD(del_list); 382 382 spin_lock_bh(&priv->fs.arfs.arfs_lock); 383 383 mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { 384 - if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) 385 - break; 386 384 if (!work_pending(&arfs_rule->arfs_work) && 387 385 rps_may_expire_flow(priv->netdev, 388 386 arfs_rule->rxq, arfs_rule->flow_id, 389 387 arfs_rule->filter_id)) { 390 388 hlist_del_init(&arfs_rule->hlist); 391 389 hlist_add_head(&arfs_rule->hlist, &del_list); 390 + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) 391 + break; 392 392 } 393 393 } 394 394 spin_unlock_bh(&priv->fs.arfs.arfs_lock); ··· 709 709 710 710 if (skb->protocol != htons(ETH_P_IP) && 711 711 skb->protocol != htons(ETH_P_IPV6)) 712 + return -EPROTONOSUPPORT; 713 + 714 + if (skb->encapsulation) 712 715 return -EPROTONOSUPPORT; 713 716 714 717 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+8 -9
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 275 275 } 276 276 277 277 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, 278 - struct ieee_ets *ets) 278 + struct ieee_ets *ets, 279 + bool zero_sum_allowed) 279 280 { 280 281 bool have_ets_tc = false; 281 282 int bw_sum = 0; ··· 301 300 } 302 301 303 302 if (have_ets_tc && bw_sum != 100) { 304 - netdev_err(netdev, 305 - "Failed to validate ETS: BW sum is illegal\n"); 303 + if (bw_sum || (!bw_sum && !zero_sum_allowed)) 304 + netdev_err(netdev, 305 + "Failed to validate ETS: BW sum is illegal\n"); 306 306 return -EINVAL; 307 307 } 308 308 return 0; ··· 318 316 if (!MLX5_CAP_GEN(priv->mdev, ets)) 319 317 return -EOPNOTSUPP; 320 318 321 - err = mlx5e_dbcnl_validate_ets(netdev, ets); 319 + err = mlx5e_dbcnl_validate_ets(netdev, ets, false); 322 320 if (err) 323 321 return err; 324 322 ··· 644 642 ets.prio_tc[i]); 645 643 } 646 644 647 - err = mlx5e_dbcnl_validate_ets(netdev, &ets); 648 - if (err) { 649 - netdev_err(netdev, 650 - "%s, Failed to validate ETS: %d\n", __func__, err); 645 + err = mlx5e_dbcnl_validate_ets(netdev, &ets, true); 646 + if (err) 651 647 goto out; 652 - } 653 648 654 649 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); 655 650 if (err) {
+4
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1957 1957 else 1958 1958 actions = flow->nic_attr->action; 1959 1959 1960 + if (flow->flags & MLX5E_TC_FLOW_EGRESS && 1961 + !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)) 1962 + return false; 1963 + 1960 1964 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1961 1965 return modify_header_match_supported(&parse_attr->spec, exts); 1962 1966
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 2216 2216 2217 2217 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) 2218 2218 { 2219 - return esw->mode; 2219 + return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE; 2220 2220 } 2221 2221 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1887 1887 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 1888 1888 if (!fwd_next_prio_supported(ft)) 1889 1889 return ERR_PTR(-EOPNOTSUPP); 1890 - if (dest) 1890 + if (dest_num) 1891 1891 return ERR_PTR(-EINVAL); 1892 1892 mutex_lock(&root->chain_lock); 1893 1893 next_ft = find_next_chained_ft(prio);
+10 -2
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 488 488 void mlx5_init_clock(struct mlx5_core_dev *mdev) 489 489 { 490 490 struct mlx5_clock *clock = &mdev->clock; 491 + u64 overflow_cycles; 491 492 u64 ns; 492 493 u64 frac = 0; 493 494 u32 dev_freq; ··· 512 511 513 512 /* Calculate period in seconds to call the overflow watchdog - to make 514 513 * sure counter is checked at least once every wrap around. 514 + * The period is calculated as the minimum between max HW cycles count 515 + * (The clock source mask) and max amount of cycles that can be 516 + * multiplied by clock multiplier where the result doesn't exceed 517 + * 64bits. 515 518 */ 516 - ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask, 519 + overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); 520 + overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1); 521 + 522 + ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 517 523 frac, &frac); 518 - do_div(ns, NSEC_PER_SEC / 2 / HZ); 524 + do_div(ns, NSEC_PER_SEC / HZ); 519 525 clock->overflow_period = ns; 520 526 521 527 mdev->clock_info_page = alloc_page(GFP_KERNEL);
+22 -12
drivers/net/ethernet/mellanox/mlx5/core/wq.c
··· 113 113 return err; 114 114 } 115 115 116 - static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, 117 - struct mlx5_wq_qp *qp) 116 + static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf, 117 + struct mlx5_wq_qp *qp) 118 118 { 119 + struct mlx5_frag_buf_ctrl *sq_fbc; 119 120 struct mlx5_frag_buf *rqb, *sqb; 120 121 121 - rqb = &qp->rq.fbc.frag_buf; 122 + rqb = &qp->rq.fbc.frag_buf; 122 123 *rqb = *buf; 123 124 rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); 124 - rqb->npages = 1 << get_order(rqb->size); 125 + rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE); 125 126 126 - sqb = &qp->sq.fbc.frag_buf; 127 - *sqb = *buf; 128 - sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); 129 - sqb->npages = 1 << get_order(sqb->size); 127 + sq_fbc = &qp->sq.fbc; 128 + sqb = &sq_fbc->frag_buf; 129 + *sqb = *buf; 130 + sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq); 131 + sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE); 130 132 sqb->frags += rqb->npages; /* first part is for the rq */ 133 + if (sq_fbc->strides_offset) 134 + sqb->frags--; 131 135 } 132 136 133 137 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 134 138 void *qpc, struct mlx5_wq_qp *wq, 135 139 struct mlx5_wq_ctrl *wq_ctrl) 136 140 { 141 + u32 sq_strides_offset; 137 142 int err; 138 143 139 144 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, 140 145 MLX5_GET(qpc, qpc, log_rq_size), 141 146 &wq->rq.fbc); 142 - mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB), 143 - MLX5_GET(qpc, qpc, log_sq_size), 144 - &wq->sq.fbc); 147 + 148 + sq_strides_offset = 149 + ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; 150 + 151 + mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), 152 + MLX5_GET(qpc, qpc, log_sq_size), 153 + sq_strides_offset, 154 + &wq->sq.fbc); 145 155 146 156 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 147 157 if (err) { ··· 166 156 goto err_db_free; 167 157 } 168 158 169 - mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); 159 + mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq); 170 160 171 161 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; 172 162 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
+1 -1
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
··· 317 317 payload.dst_ipv4 = flow->daddr; 318 318 319 319 /* If entry has expired send dst IP with all other fields 0. */ 320 - if (!(neigh->nud_state & NUD_VALID)) { 320 + if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { 321 321 nfp_tun_del_route_from_cache(app, payload.dst_ipv4); 322 322 /* Trigger ARP to verify invalid neighbour state. */ 323 323 neigh_event_send(neigh, NULL);
+7 -8
drivers/net/ethernet/qlogic/qed/qed_l2.c
··· 665 665 666 666 p_ramrod->common.update_approx_mcast_flg = 1; 667 667 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 668 - u32 *p_bins = (u32 *)p_params->bins; 668 + u32 *p_bins = p_params->bins; 669 669 670 670 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 671 671 } ··· 1476 1476 enum spq_mode comp_mode, 1477 1477 struct qed_spq_comp_cb *p_comp_data) 1478 1478 { 1479 - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1480 1479 struct vport_update_ramrod_data *p_ramrod = NULL; 1480 + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1481 1481 struct qed_spq_entry *p_ent = NULL; 1482 1482 struct qed_sp_init_data init_data; 1483 1483 u8 abs_vport_id = 0; ··· 1513 1513 /* explicitly clear out the entire vector */ 1514 1514 memset(&p_ramrod->approx_mcast.bins, 0, 1515 1515 sizeof(p_ramrod->approx_mcast.bins)); 1516 - memset(bins, 0, sizeof(unsigned long) * 1517 - ETH_MULTICAST_MAC_BINS_IN_REGS); 1516 + memset(bins, 0, sizeof(bins)); 1518 1517 /* filter ADD op is explicit set op and it removes 1519 1518 * any existing filters for the vport 1520 1519 */ 1521 1520 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1522 1521 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1523 - u32 bit; 1522 + u32 bit, nbits; 1524 1523 1525 1524 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1526 - __set_bit(bit, bins); 1525 + nbits = sizeof(u32) * BITS_PER_BYTE; 1526 + bins[bit / nbits] |= 1 << (bit % nbits); 1527 1527 } 1528 1528 1529 1529 /* Convert to correct endianity */ 1530 1530 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1531 1531 struct vport_update_ramrod_mcast *p_ramrod_bins; 1532 - u32 *p_bins = (u32 *)bins; 1533 1532 1534 1533 p_ramrod_bins = &p_ramrod->approx_mcast; 1535 - p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1534 + p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); 1536 1535 } 1537 1536 } 1538 1537
+1 -1
drivers/net/ethernet/qlogic/qed/qed_l2.h
··· 215 215 u8 anti_spoofing_en; 216 216 u8 update_accept_any_vlan_flg; 217 217 u8 accept_any_vlan; 218 - unsigned long bins[8]; 218 + u32 bins[8]; 219 219 struct qed_rss_params *rss_params; 220 220 struct qed_filter_accept_flags accept_flags; 221 221 struct qed_sge_tpa_params *sge_tpa_params;
+10 -3
drivers/net/ethernet/qlogic/qed/qed_mcp.c
··· 1211 1211 break; 1212 1212 default: 1213 1213 p_link->speed = 0; 1214 + p_link->link_up = 0; 1214 1215 } 1215 1216 1216 1217 if (p_link->link_up && p_link->speed) ··· 1309 1308 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1310 1309 phy_cfg.adv_speed = params->speed.advertised_speeds; 1311 1310 phy_cfg.loopback_mode = params->loopback_mode; 1312 - if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 1313 - if (params->eee.enable) 1314 - phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1311 + 1312 + /* There are MFWs that share this capability regardless of whether 1313 + * this is feasible or not. And given that at the very least adv_caps 1314 + * would be set internally by qed, we want to make sure LFA would 1315 + * still work. 1316 + */ 1317 + if ((p_hwfn->mcp_info->capabilities & 1318 + FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { 1319 + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1315 1320 if (params->eee.tx_lpi_enable) 1316 1321 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1317 1322 if (params->eee.adv_caps & QED_EEE_1G_ADV)
+1 -1
drivers/net/ethernet/qlogic/qed/qed_sriov.c
··· 2831 2831 2832 2832 p_data->update_approx_mcast_flg = 1; 2833 2833 memcpy(p_data->bins, p_mcast_tlv->bins, 2834 - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2834 + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2835 2835 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2836 2836 } 2837 2837
+2 -2
drivers/net/ethernet/qlogic/qed/qed_vf.c
··· 1126 1126 resp_size += sizeof(struct pfvf_def_resp_tlv); 1127 1127 1128 1128 memcpy(p_mcast_tlv->bins, p_params->bins, 1129 - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1129 + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1130 1130 } 1131 1131 1132 1132 update_rx = p_params->accept_flags.update_rx_mode_config; ··· 1272 1272 u32 bit; 1273 1273 1274 1274 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1275 - __set_bit(bit, sp_params.bins); 1275 + sp_params.bins[bit / 32] |= 1 << (bit % 32); 1276 1276 } 1277 1277 } 1278 1278
+6 -1
drivers/net/ethernet/qlogic/qed/qed_vf.h
··· 392 392 struct channel_tlv tl; 393 393 u8 padding[4]; 394 394 395 - u64 bins[8]; 395 + /* There are only 256 approx bins, and in HSI they're divided into 396 + * 32-bit values. As old VFs used to set-bit to the values on its side, 397 + * the upper half of the array is never expected to contain any data. 398 + */ 399 + u64 bins[4]; 400 + u64 obsolete_bins[4]; 396 401 }; 397 402 398 403 struct vfpf_vport_update_accept_param_tlv {
+1 -2
drivers/net/ethernet/realtek/r8169.c
··· 7734 7734 return rc; 7735 7735 } 7736 7736 7737 - /* override BIOS settings, use userspace tools to enable WOL */ 7738 - __rtl8169_set_wol(tp, 0); 7737 + tp->saved_wolopts = __rtl8169_get_wol(tp); 7739 7738 7740 7739 if (rtl_tbi_enabled(tp)) { 7741 7740 tp->set_speed = rtl8169_set_speed_tbi;
+1
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
··· 218 218 ret = of_mdiobus_register(bus, np1); 219 219 if (ret) { 220 220 mdiobus_free(bus); 221 + lp->mii_bus = NULL; 221 222 return ret; 222 223 } 223 224 return 0;
+1 -1
drivers/net/phy/phy.c
··· 514 514 * negotiation may already be done and aneg interrupt may not be 515 515 * generated. 516 516 */ 517 - if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { 517 + if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { 518 518 err = phy_aneg_done(phydev); 519 519 if (err > 0) { 520 520 trigger = true;
+1 -1
drivers/net/usb/qmi_wwan.c
··· 1246 1246 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 1247 1247 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1248 1248 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1249 - {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ 1249 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ 1250 1250 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 1251 1251 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ 1252 1252 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
+87 -39
drivers/net/vxlan.c
··· 636 636 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 637 637 } 638 638 639 - /* Add new entry to forwarding table -- assumes lock held */ 639 + static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, 640 + const u8 *mac, __u16 state, 641 + __be32 src_vni, __u8 ndm_flags) 642 + { 643 + struct vxlan_fdb *f; 644 + 645 + f = kmalloc(sizeof(*f), GFP_ATOMIC); 646 + if (!f) 647 + return NULL; 648 + f->state = state; 649 + f->flags = ndm_flags; 650 + f->updated = f->used = jiffies; 651 + f->vni = src_vni; 652 + INIT_LIST_HEAD(&f->remotes); 653 + memcpy(f->eth_addr, mac, ETH_ALEN); 654 + 655 + return f; 656 + } 657 + 640 658 static int vxlan_fdb_create(struct vxlan_dev *vxlan, 659 + const u8 *mac, union vxlan_addr *ip, 660 + __u16 state, __be16 port, __be32 src_vni, 661 + __be32 vni, __u32 ifindex, __u8 ndm_flags, 662 + struct vxlan_fdb **fdb) 663 + { 664 + struct vxlan_rdst *rd = NULL; 665 + struct vxlan_fdb *f; 666 + int rc; 667 + 668 + if (vxlan->cfg.addrmax && 669 + vxlan->addrcnt >= vxlan->cfg.addrmax) 670 + return -ENOSPC; 671 + 672 + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 673 + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); 674 + if (!f) 675 + return -ENOMEM; 676 + 677 + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 678 + if (rc < 0) { 679 + kfree(f); 680 + return rc; 681 + } 682 + 683 + ++vxlan->addrcnt; 684 + hlist_add_head_rcu(&f->hlist, 685 + vxlan_fdb_head(vxlan, mac, src_vni)); 686 + 687 + *fdb = f; 688 + 689 + return 0; 690 + } 691 + 692 + /* Add new entry to forwarding table -- assumes lock held */ 693 + static int vxlan_fdb_update(struct vxlan_dev *vxlan, 641 694 const u8 *mac, union vxlan_addr *ip, 642 695 __u16 state, __u16 flags, 643 696 __be16 port, __be32 src_vni, __be32 vni, ··· 740 687 if (!(flags & NLM_F_CREATE)) 741 688 return -ENOENT; 742 689 743 - if (vxlan->cfg.addrmax && 744 - vxlan->addrcnt >= vxlan->cfg.addrmax) 745 - return -ENOSPC; 746 - 747 690 /* Disallow replace to add a multicast entry */ 748 691 if ((flags & NLM_F_REPLACE) && 749 692 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 750 693 return -EOPNOTSUPP; 751 694 752 695 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 753 - f = kmalloc(sizeof(*f), GFP_ATOMIC); 754 - if (!f) 755 - return -ENOMEM; 756 - 757 - notify = 1; 758 - f->state = state; 759 - f->flags = ndm_flags; 760 - f->updated = f->used = jiffies; 761 - f->vni = src_vni; 762 - INIT_LIST_HEAD(&f->remotes); 763 - memcpy(f->eth_addr, mac, ETH_ALEN); 764 - 765 - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 766 - if (rc < 0) { 767 - kfree(f); 696 + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, 697 + vni, ifindex, ndm_flags, &f); 698 + if (rc < 0) 768 699 return rc; 769 - } 770 - 771 - ++vxlan->addrcnt; 772 - hlist_add_head_rcu(&f->hlist, 773 - vxlan_fdb_head(vxlan, mac, src_vni)); 700 + notify = 1; 774 701 } 775 702 776 703 if (notify) { ··· 774 741 kfree(f); 775 742 } 776 743 777 - static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 744 + static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, 745 + bool do_notify) 778 746 { 779 747 netdev_dbg(vxlan->dev, 780 748 "delete %pM\n", f->eth_addr); 781 749 782 750 --vxlan->addrcnt; 783 - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 751 + if (do_notify) 752 + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 784 753 785 754 hlist_del_rcu(&f->hlist); 786 755 call_rcu(&f->rcu, vxlan_fdb_free); ··· 898 863 return -EAFNOSUPPORT; 899 864 900 865 spin_lock_bh(&vxlan->hash_lock); 901 - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 866 + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, 902 867 port, src_vni, vni, ifindex, ndm->ndm_flags); 903 868 spin_unlock_bh(&vxlan->hash_lock); 904 869 ··· 932 897 goto out; 933 898 } 934 899 935 - vxlan_fdb_destroy(vxlan, f); 900 + vxlan_fdb_destroy(vxlan, f, true); 936 901 937 902 out: 938 903 return 0; ··· 1041 1006 1042 1007 /* close off race between vxlan_flush and incoming packets */ 1043 1008 if (netif_running(dev)) 1044 - vxlan_fdb_create(vxlan, src_mac, src_ip, 1009 + vxlan_fdb_update(vxlan, src_mac, src_ip, 1045 1010 NUD_REACHABLE, 1046 1011 NLM_F_EXCL|NLM_F_CREATE, 1047 1012 vxlan->cfg.dst_port, ··· 2399 2364 "garbage collect %pM\n", 2400 2365 f->eth_addr); 2401 2366 f->state = NUD_STALE; 2402 - vxlan_fdb_destroy(vxlan, f); 2367 + vxlan_fdb_destroy(vxlan, f, true); 2403 2368 } else if (time_before(timeout, next_timer)) 2404 2369 next_timer = timeout; 2405 2370 } ··· 2450 2415 spin_lock_bh(&vxlan->hash_lock); 2451 2416 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); 2452 2417 if (f) 2453 - vxlan_fdb_destroy(vxlan, f); 2418 + vxlan_fdb_destroy(vxlan, f, true); 2454 2419 spin_unlock_bh(&vxlan->hash_lock); 2455 2420 } 2456 2421 ··· 2504 2469 continue; 2505 2470 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2506 2471 if (!is_zero_ether_addr(f->eth_addr)) 2507 - vxlan_fdb_destroy(vxlan, f); 2472 + vxlan_fdb_destroy(vxlan, f, true); 2508 2473 } 2509 2474 } 2510 2475 spin_unlock_bh(&vxlan->hash_lock); ··· 3195 3160 { 3196 3161 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3197 3162 struct vxlan_dev *vxlan = netdev_priv(dev); 3163 + struct vxlan_fdb *f = NULL; 3198 3164 int err; 3199 3165 3200 3166 err = vxlan_dev_configure(net, dev, conf, false, extack); ··· 3209 3173 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3210 3174 &vxlan->default_dst.remote_ip, 3211 3175 NUD_REACHABLE | NUD_PERMANENT, 3212 - NLM_F_EXCL | NLM_F_CREATE, 3213 3176 vxlan->cfg.dst_port, 3214 3177 vxlan->default_dst.remote_vni, 3215 3178 vxlan->default_dst.remote_vni, 3216 3179 vxlan->default_dst.remote_ifindex, 3217 - NTF_SELF); 3180 + NTF_SELF, &f); 3218 3181 if (err) 3219 3182 return err; 3220 3183 } 3221 3184 3222 3185 err = register_netdevice(dev); 3186 + if (err) 3187 + goto errout; 3188 + 3189 + err = rtnl_configure_link(dev, NULL); 3223 3190 if (err) { 3224 - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); 3225 - return err; 3191 + unregister_netdevice(dev); 3192 + goto errout; 3226 3193 } 3194 + 3195 + /* notify default fdb entry */ 3196 + if (f) 3197 + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); 3227 3198 3228 3199 list_add(&vxlan->next, &vn->vxlan_list); 3229 3200 return 0; 3201 + errout: 3202 + if (f) 3203 + vxlan_fdb_destroy(vxlan, f, false); 3204 + return err; 3230 3205 } 3231 3206 3232 3207 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], ··· 3472 3425 struct vxlan_rdst *dst = &vxlan->default_dst; 3473 3426 struct vxlan_rdst old_dst; 3474 3427 struct vxlan_config conf; 3428 + struct vxlan_fdb *f = NULL; 3475 3429 int err; 3476 3430 3477 3431 err = vxlan_nl2conf(tb, data, ··· 3501 3453 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3502 3454 &dst->remote_ip, 3503 3455 NUD_REACHABLE | NUD_PERMANENT, 3504 - NLM_F_CREATE | NLM_F_APPEND, 3505 3456 vxlan->cfg.dst_port, 3506 3457 dst->remote_vni, 3507 3458 dst->remote_vni, 3508 3459 dst->remote_ifindex, 3509 - NTF_SELF); 3460 + NTF_SELF, &f); 3510 3461 if (err) { 3511 3462 spin_unlock_bh(&vxlan->hash_lock); 3512 3463 return err; 3513 3464 } 3465 + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); 3514 3466 } 3515 3467 spin_unlock_bh(&vxlan->hash_lock); 3516 3468 }
+3 -3
include/linux/bpfilter.h
··· 5 5 #include <uapi/linux/bpfilter.h> 6 6 7 7 struct sock; 8 - int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval, 8 + int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, 9 9 unsigned int optlen); 10 - int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval, 11 - int *optlen); 10 + int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, 11 + int __user *optlen); 12 12 extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 13 13 char __user *optval, 14 14 unsigned int optlen, bool is_set);
+15 -3
include/linux/mlx5/driver.h
··· 358 358 struct mlx5_frag_buf frag_buf; 359 359 u32 sz_m1; 360 360 u32 frag_sz_m1; 361 + u32 strides_offset; 361 362 u8 log_sz; 362 363 u8 log_stride; 363 364 u8 log_frag_strides; ··· 984 983 return key & 0xffffff00u; 985 984 } 986 985 987 - static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, 988 - struct mlx5_frag_buf_ctrl *fbc) 986 + static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, 987 + u32 strides_offset, 988 + struct mlx5_frag_buf_ctrl *fbc) 989 989 { 990 990 fbc->log_stride = log_stride; 991 991 fbc->log_sz = log_sz; 992 992 fbc->sz_m1 = (1 << fbc->log_sz) - 1; 993 993 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; 994 994 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; 995 + fbc->strides_offset = strides_offset; 996 + } 997 + 998 + static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, 999 + struct mlx5_frag_buf_ctrl *fbc) 1000 + { 1001 + mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc); 995 1002 } 996 1003 997 1004 static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, ··· 1013 1004 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, 1014 1005 u32 ix) 1015 1006 { 1016 - unsigned int frag = (ix >> fbc->log_frag_strides); 1007 + unsigned int frag; 1008 + 1009 + ix += fbc->strides_offset; 1010 + frag = ix >> fbc->log_frag_strides; 1017 1011 1018 1012 return fbc->frag_buf.frags[frag].buf + 1019 1013 ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+6 -6
include/net/cfg80211.h
··· 5835 5835 /** 5836 5836 * cfg80211_rx_control_port - notification about a received control port frame 5837 5837 * @dev: The device the frame matched to 5838 - * @buf: control port frame 5839 - * @len: length of the frame data 5840 - * @addr: The peer from which the frame was received 5841 - * @proto: frame protocol, typically PAE or Pre-authentication 5838 + * @skb: The skbuf with the control port frame. It is assumed that the skbuf 5839 + * is 802.3 formatted (with 802.3 header). The skb can be non-linear. 5840 + * This function does not take ownership of the skb, so the caller is 5841 + * responsible for any cleanup. The caller must also ensure that 5842 + * skb->protocol is set appropriately. 5842 5843 * @unencrypted: Whether the frame was received unencrypted 5843 5844 * 5844 5845 * This function is used to inform userspace about a received control port ··· 5852 5851 * Return: %true if the frame was passed to userspace 5853 5852 */ 5854 5853 bool cfg80211_rx_control_port(struct net_device *dev, 5855 - const u8 *buf, size_t len, 5856 - const u8 *addr, u16 proto, bool unencrypted); 5854 + struct sk_buff *skb, bool unencrypted); 5857 5855 5858 5856 /** 5859 5857 * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
+5
include/net/ip6_fib.h
··· 281 281 atomic_inc(&f6i->fib6_ref); 282 282 } 283 283 284 + static inline bool fib6_info_hold_safe(struct fib6_info *f6i) 285 + { 286 + return atomic_inc_not_zero(&f6i->fib6_ref); 287 + } 288 + 284 289 static inline void fib6_info_release(struct fib6_info *f6i) 285 290 { 286 291 if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
+2 -3
include/net/netfilter/nf_tables.h
··· 150 150 * @portid: netlink portID of the original message 151 151 * @seq: netlink sequence number 152 152 * @family: protocol family 153 + * @level: depth of the chains 153 154 * @report: notify via unicast netlink message 154 155 */ 155 156 struct nft_ctx { ··· 161 160 u32 portid; 162 161 u32 seq; 163 162 u8 family; 163 + u8 level; 164 164 bool report; 165 165 }; 166 166 ··· 867 865 * @table: table that this chain belongs to 868 866 * @handle: chain handle 869 867 * @use: number of jump references to this chain 870 - * @level: length of longest path to this chain 871 868 * @flags: bitmask of enum nft_chain_flags 872 869 * @name: name of the chain 873 870 */ ··· 879 878 struct nft_table *table; 880 879 u64 handle; 881 880 u32 use; 882 - u16 level; 883 881 u8 flags:6, 884 882 genmask:2; 885 883 char *name; ··· 1124 1124 u32 genmask:2, 1125 1125 use:30; 1126 1126 u64 handle; 1127 - char *dev_name[NFT_FLOWTABLE_DEVICE_MAX]; 1128 1127 /* runtime data below here */ 1129 1128 struct nf_hook_ops *ops ____cacheline_aligned; 1130 1129 struct nf_flowtable data;
+7
include/net/tcp.h
··· 342 342 struct pipe_inode_info *pipe, size_t len, 343 343 unsigned int flags); 344 344 345 + void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); 345 346 static inline void tcp_dec_quickack_mode(struct sock *sk, 346 347 const unsigned int pkts) 347 348 { ··· 540 539 void tcp_send_active_reset(struct sock *sk, gfp_t priority); 541 540 int tcp_send_synack(struct sock *); 542 541 void tcp_push_one(struct sock *, unsigned int mss_now); 542 + void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); 543 543 void tcp_send_ack(struct sock *sk); 544 544 void tcp_send_delayed_ack(struct sock *sk); 545 545 void tcp_send_loss_probe(struct sock *sk); ··· 840 838 * as TCP moves IP6CB into a different location in skb->cb[] 841 839 */ 842 840 static inline int tcp_v6_iif(const struct sk_buff *skb) 841 + { 842 + return TCP_SKB_CB(skb)->header.h6.iif; 843 + } 844 + 845 + static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) 843 846 { 844 847 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 845 848
+1 -1
include/uapi/linux/btf.h
··· 76 76 */ 77 77 #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 78 78 #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 79 - #define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) 79 + #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) 80 80 81 81 /* Attributes stored in the BTF_INT_ENCODING */ 82 82 #define BTF_INT_SIGNED (1 << 0)
+10 -6
kernel/bpf/btf.c
··· 450 450 */ 451 451 static bool btf_type_int_is_regular(const struct btf_type *t) 452 452 { 453 - u16 nr_bits, nr_bytes; 453 + u8 nr_bits, nr_bytes; 454 454 u32 int_data; 455 455 456 456 int_data = btf_type_int(t); ··· 993 993 { 994 994 u16 left_shift_bits, right_shift_bits; 995 995 u32 int_data = btf_type_int(t); 996 - u16 nr_bits = BTF_INT_BITS(int_data); 997 - u16 total_bits_offset; 998 - u16 nr_copy_bytes; 999 - u16 nr_copy_bits; 996 + u8 nr_bits = BTF_INT_BITS(int_data); 997 + u8 total_bits_offset; 998 + u8 nr_copy_bytes; 999 + u8 nr_copy_bits; 1000 1000 u64 print_num; 1001 1001 1002 + /* 1003 + * bits_offset is at most 7. 1004 + * BTF_INT_OFFSET() cannot exceed 64 bits. 1005 + */ 1002 1006 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1003 1007 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 1004 1008 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); ··· 1032 1028 u32 int_data = btf_type_int(t); 1033 1029 u8 encoding = BTF_INT_ENCODING(int_data); 1034 1030 bool sign = encoding & BTF_INT_SIGNED; 1035 - u32 nr_bits = BTF_INT_BITS(int_data); 1031 + u8 nr_bits = BTF_INT_BITS(int_data); 1036 1032 1037 1033 if (bits_offset || BTF_INT_OFFSET(int_data) || 1038 1034 BITS_PER_BYTE_MASKED(nr_bits)) {
+3 -1
net/caif/caif_dev.c
··· 131 131 caifd = caif_get(skb->dev); 132 132 133 133 WARN_ON(caifd == NULL); 134 - if (caifd == NULL) 134 + if (!caifd) { 135 + rcu_read_unlock(); 135 136 return; 137 + } 136 138 137 139 caifd_hold(caifd); 138 140 rcu_read_unlock();
+1 -1
net/core/page_pool.c
··· 269 269 struct page *page; 270 270 271 271 /* Empty recycle ring */ 272 - while ((page = ptr_ring_consume(&pool->ring))) { 272 + while ((page = ptr_ring_consume_bh(&pool->ring))) { 273 273 /* Verify the refcnt invariant of cached pages */ 274 274 if (!(page_ref_count(page) == 1)) 275 275 pr_crit("%s() page_pool refcnt %d violation\n",
+6 -3
net/core/rtnetlink.c
··· 2759 2759 return err; 2760 2760 } 2761 2761 2762 - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2763 - 2764 - __dev_notify_flags(dev, old_flags, ~0U); 2762 + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 2763 + __dev_notify_flags(dev, old_flags, 0U); 2764 + } else { 2765 + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2766 + __dev_notify_flags(dev, old_flags, ~0U); 2767 + } 2765 2768 return 0; 2766 2769 } 2767 2770 EXPORT_SYMBOL(rtnl_configure_link);
+5 -5
net/core/skbuff.c
··· 3720 3720 net_warn_ratelimited( 3721 3721 "skb_segment: too many frags: %u %u\n", 3722 3722 pos, mss); 3723 + err = -EINVAL; 3723 3724 goto err; 3724 3725 } 3725 3726 ··· 3754 3753 3755 3754 perform_csum_check: 3756 3755 if (!csum) { 3757 - if (skb_has_shared_frag(nskb)) { 3758 - err = __skb_linearize(nskb); 3759 - if (err) 3760 - goto err; 3761 - } 3756 + if (skb_has_shared_frag(nskb) && 3757 + __skb_linearize(nskb)) 3758 + goto err; 3759 + 3762 3760 if (!nskb->remcsum_offload) 3763 3761 nskb->ip_summed = CHECKSUM_NONE; 3764 3762 SKB_GSO_CB(nskb)->csum =
+3 -3
net/core/sock.c
··· 2277 2277 pfrag->offset += use; 2278 2278 2279 2279 sge = sg + sg_curr - 1; 2280 - if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page && 2281 - sg->offset + sg->length == orig_offset) { 2282 - sg->length += use; 2280 + if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page && 2281 + sge->offset + sge->length == orig_offset) { 2282 + sge->length += use; 2283 2283 } else { 2284 2284 sge = sg + sg_curr; 2285 2285 sg_unmark_end(sge);
+1 -2
net/ipv4/igmp.c
··· 1200 1200 spin_lock_bh(&im->lock); 1201 1201 if (pmc) { 1202 1202 im->interface = pmc->interface; 1203 - im->sfmode = pmc->sfmode; 1204 - if (pmc->sfmode == MCAST_INCLUDE) { 1203 + if (im->sfmode == MCAST_INCLUDE) { 1205 1204 im->tomb = pmc->tomb; 1206 1205 im->sources = pmc->sources; 1207 1206 for (psf = im->sources; psf; psf = psf->sf_next)
+2
net/ipv4/ip_output.c
··· 523 523 to->dev = from->dev; 524 524 to->mark = from->mark; 525 525 526 + skb_copy_hash(to, from); 527 + 526 528 /* Copy the flags to each fragment. */ 527 529 IPCB(to)->flags = IPCB(from)->flags; 528 530
+5 -2
net/ipv4/ip_sockglue.c
··· 150 150 { 151 151 struct sockaddr_in sin; 152 152 const struct iphdr *iph = ip_hdr(skb); 153 - __be16 *ports = (__be16 *)skb_transport_header(skb); 153 + __be16 *ports; 154 + int end; 154 155 155 - if (skb_transport_offset(skb) + 4 > (int)skb->len) 156 + end = skb_transport_offset(skb) + 4; 157 + if (end > 0 && !pskb_may_pull(skb, end)) 156 158 return; 157 159 158 160 /* All current transport protocols have the port numbers in the 159 161 * first four bytes of the transport header and this function is 160 162 * written with this assumption in mind. 161 163 */ 164 + ports = (__be16 *)skb_transport_header(skb); 162 165 163 166 sin.sin_family = AF_INET; 164 167 sin.sin_addr.s_addr = iph->daddr;
+16 -36
net/ipv4/tcp_dctcp.c
··· 129 129 struct dctcp *ca = inet_csk_ca(sk); 130 130 struct tcp_sock *tp = tcp_sk(sk); 131 131 132 - /* State has changed from CE=0 to CE=1 and delayed 133 - * ACK has not sent yet. 134 - */ 135 - if (!ca->ce_state && 136 - inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 137 - u32 tmp_rcv_nxt; 138 - 139 - /* Save current rcv_nxt. */ 140 - tmp_rcv_nxt = tp->rcv_nxt; 141 - 142 - /* Generate previous ack with CE=0. */ 143 - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 144 - tp->rcv_nxt = ca->prior_rcv_nxt; 145 - 146 - tcp_send_ack(sk); 147 - 148 - /* Recover current rcv_nxt. */ 149 - tp->rcv_nxt = tmp_rcv_nxt; 132 + if (!ca->ce_state) { 133 + /* State has changed from CE=0 to CE=1, force an immediate 134 + * ACK to reflect the new CE state. If an ACK was delayed, 135 + * send that first to reflect the prior CE state. 136 + */ 137 + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) 138 + __tcp_send_ack(sk, ca->prior_rcv_nxt); 139 + tcp_enter_quickack_mode(sk, 1); 150 140 } 151 141 152 142 ca->prior_rcv_nxt = tp->rcv_nxt; ··· 150 160 struct dctcp *ca = inet_csk_ca(sk); 151 161 struct tcp_sock *tp = tcp_sk(sk); 152 162 153 - /* State has changed from CE=1 to CE=0 and delayed 154 - * ACK has not sent yet. 155 - */ 156 - if (ca->ce_state && 157 - inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 158 - u32 tmp_rcv_nxt; 159 - 160 - /* Save current rcv_nxt. */ 161 - tmp_rcv_nxt = tp->rcv_nxt; 162 - 163 - /* Generate previous ack with CE=1. */ 164 - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 165 - tp->rcv_nxt = ca->prior_rcv_nxt; 166 - 167 - tcp_send_ack(sk); 168 - 169 - /* Recover current rcv_nxt. */ 170 - tp->rcv_nxt = tmp_rcv_nxt; 163 + if (ca->ce_state) { 164 + /* State has changed from CE=1 to CE=0, force an immediate 165 + * ACK to reflect the new CE state. If an ACK was delayed, 166 + * send that first to reflect the prior CE state. 167 + */ 168 + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) 169 + __tcp_send_ack(sk, ca->prior_rcv_nxt); 170 + tcp_enter_quickack_mode(sk, 1); 171 171 } 172 172 173 173 ca->prior_rcv_nxt = tp->rcv_nxt;
+52 -13
net/ipv4/tcp_input.c
··· 215 215 icsk->icsk_ack.quick = quickacks; 216 216 } 217 217 218 - static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) 218 + void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) 219 219 { 220 220 struct inet_connection_sock *icsk = inet_csk(sk); 221 221 ··· 223 223 icsk->icsk_ack.pingpong = 0; 224 224 icsk->icsk_ack.ato = TCP_ATO_MIN; 225 225 } 226 + EXPORT_SYMBOL(tcp_enter_quickack_mode); 226 227 227 228 /* Send ACKs quickly, if "quick" count is not exhausted 228 229 * and the session is not interactive. ··· 4358 4357 return true; 4359 4358 } 4360 4359 4360 + static bool tcp_ooo_try_coalesce(struct sock *sk, 4361 + struct sk_buff *to, 4362 + struct sk_buff *from, 4363 + bool *fragstolen) 4364 + { 4365 + bool res = tcp_try_coalesce(sk, to, from, fragstolen); 4366 + 4367 + /* In case tcp_drop() is called later, update to->gso_segs */ 4368 + if (res) { 4369 + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + 4370 + max_t(u16, 1, skb_shinfo(from)->gso_segs); 4371 + 4372 + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); 4373 + } 4374 + return res; 4375 + } 4376 + 4361 4377 static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4362 4378 { 4363 4379 sk_drops_add(sk, skb); ··· 4498 4480 /* In the typical case, we are adding an skb to the end of the list. 4499 4481 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4500 4482 */ 4501 - if (tcp_try_coalesce(sk, tp->ooo_last_skb, 4502 - skb, &fragstolen)) { 4483 + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, 4484 + skb, &fragstolen)) { 4503 4485 coalesce_done: 4504 4486 tcp_grow_window(sk, skb); 4505 4487 kfree_skb_partial(skb, fragstolen); ··· 4527 4509 /* All the bits are present. Drop. */ 4528 4510 NET_INC_STATS(sock_net(sk), 4529 4511 LINUX_MIB_TCPOFOMERGE); 4530 - __kfree_skb(skb); 4512 + tcp_drop(sk, skb); 4531 4513 skb = NULL; 4532 4514 tcp_dsack_set(sk, seq, end_seq); 4533 4515 goto add_sack; ··· 4546 4528 TCP_SKB_CB(skb1)->end_seq); 4547 4529 NET_INC_STATS(sock_net(sk), 4548 4530 LINUX_MIB_TCPOFOMERGE); 4549 - __kfree_skb(skb1); 4531 + tcp_drop(sk, skb1); 4550 4532 goto merge_right; 4551 4533 } 4552 - } else if (tcp_try_coalesce(sk, skb1, 4553 - skb, &fragstolen)) { 4534 + } else if (tcp_ooo_try_coalesce(sk, skb1, 4535 + skb, &fragstolen)) { 4554 4536 goto coalesce_done; 4555 4537 } 4556 4538 p = &parent->rb_right; ··· 4919 4901 static void tcp_collapse_ofo_queue(struct sock *sk) 4920 4902 { 4921 4903 struct tcp_sock *tp = tcp_sk(sk); 4904 + u32 range_truesize, sum_tiny = 0; 4922 4905 struct sk_buff *skb, *head; 4923 4906 u32 start, end; 4924 4907 ··· 4931 4912 } 4932 4913 start = TCP_SKB_CB(skb)->seq; 4933 4914 end = TCP_SKB_CB(skb)->end_seq; 4915 + range_truesize = skb->truesize; 4934 4916 4935 4917 for (head = skb;;) { 4936 4918 skb = skb_rb_next(skb); ··· 4942 4922 if (!skb || 4943 4923 after(TCP_SKB_CB(skb)->seq, end) || 4944 4924 before(TCP_SKB_CB(skb)->end_seq, start)) { 4945 - tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4946 - head, skb, start, end); 4925 + /* Do not attempt collapsing tiny skbs */ 4926 + if (range_truesize != head->truesize || 4927 + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { 4928 + tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4929 + head, skb, start, end); 4930 + } else { 4931 + sum_tiny += range_truesize; 4932 + if (sum_tiny > sk->sk_rcvbuf >> 3) 4933 + return; 4934 + } 4947 4935 goto new_range; 4948 4936 } 4949 4937 4938 + range_truesize += skb->truesize; 4950 4939 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 4951 4940 start = TCP_SKB_CB(skb)->seq; 4952 4941 if (after(TCP_SKB_CB(skb)->end_seq, end)) ··· 4970 4941 * 2) not add too big latencies if thousands of packets sit there. 4971 4942 * (But if application shrinks SO_RCVBUF, we could still end up 4972 4943 * freeing whole queue here) 4944 + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. 4973 4945 * 4974 4946 * Return true if queue has shrunk. 4975 4947 */ ··· 4978 4948 { 4979 4949 struct tcp_sock *tp = tcp_sk(sk); 4980 4950 struct rb_node *node, *prev; 4951 + int goal; 4981 4952 4982 4953 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 4983 4954 return false; 4984 4955 4985 4956 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 4957 + goal = sk->sk_rcvbuf >> 3; 4986 4958 node = &tp->ooo_last_skb->rbnode; 4987 4959 do { 4988 4960 prev = rb_prev(node); 4989 4961 rb_erase(node, &tp->out_of_order_queue); 4962 + goal -= rb_to_skb(node)->truesize; 4990 4963 tcp_drop(sk, rb_to_skb(node)); 4991 - sk_mem_reclaim(sk); 4992 - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 4993 - !tcp_under_memory_pressure(sk)) 4994 - break; 4964 + if (!prev || goal <= 0) { 4965 + sk_mem_reclaim(sk); 4966 + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 4967 + !tcp_under_memory_pressure(sk)) 4968 + break; 4969 + goal = sk->sk_rcvbuf >> 3; 4970 + } 4995 4971 node = prev; 4996 4972 } while (node); 4997 4973 tp->ooo_last_skb = rb_to_skb(prev); ··· 5031 4995 tcp_clamp_window(sk); 5032 4996 else if (tcp_under_memory_pressure(sk)) 5033 4997 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4998 + 4999 + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5000 + return 0; 5034 5001 5035 5002 tcp_collapse_ofo_queue(sk); 5036 5003 if (!skb_queue_empty(&sk->sk_receive_queue))
+24 -8
net/ipv4/tcp_output.c
··· 160 160 } 161 161 162 162 /* Account for an ACK we sent. */ 163 - static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 163 + static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, 164 + u32 rcv_nxt) 164 165 { 165 166 struct tcp_sock *tp = tcp_sk(sk); 166 167 ··· 172 171 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 173 172 __sock_put(sk); 174 173 } 174 + 175 + if (unlikely(rcv_nxt != tp->rcv_nxt)) 176 + return; /* Special ACK sent by DCTCP to reflect ECN */ 175 177 tcp_dec_quickack_mode(sk, pkts); 176 178 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 177 179 } ··· 1027 1023 * We are working here with either a clone of the original 1028 1024 * SKB, or a fresh unique copy made by the retransmit engine. 1029 1025 */ 1030 - static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 1031 - gfp_t gfp_mask) 1026 + static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, 1027 + int clone_it, gfp_t gfp_mask, u32 rcv_nxt) 1032 1028 { 1033 1029 const struct inet_connection_sock *icsk = inet_csk(sk); 1034 1030 struct inet_sock *inet; ··· 1104 1100 th->source = inet->inet_sport; 1105 1101 th->dest = inet->inet_dport; 1106 1102 th->seq = htonl(tcb->seq); 1107 - th->ack_seq = htonl(tp->rcv_nxt); 1103 + th->ack_seq = htonl(rcv_nxt); 1108 1104 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 1109 1105 tcb->tcp_flags); 1110 1106 ··· 1145 1141 icsk->icsk_af_ops->send_check(sk, skb); 1146 1142 1147 1143 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1148 - tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 1144 + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); 1149 1145 1150 1146 if (skb->len != tcp_header_size) { 1151 1147 tcp_event_data_sent(tp, sk); ··· 1180 1176 tcp_rate_skb_sent(sk, oskb); 1181 1177 } 1182 1178 return err; 1179 + } 1180 + 1181 + static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 1182 + gfp_t gfp_mask) 1183 + { 1184 + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, 1185 + tcp_sk(sk)->rcv_nxt); 1183 1186 } 1184 1187 1185 1188 /* This routine just queues the buffer for sending. ··· 3582 3571 } 3583 3572 3584 3573 /* This routine sends an ack and also updates the window. */ 3585 - void tcp_send_ack(struct sock *sk) 3574 + void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) 3586 3575 { 3587 3576 struct sk_buff *buff; 3588 3577 ··· 3615 3604 skb_set_tcp_pure_ack(buff); 3616 3605 3617 3606 /* Send it off, this clears delayed acks for us. */ 3618 - tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); 3607 + __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); 3619 3608 } 3620 - EXPORT_SYMBOL_GPL(tcp_send_ack); 3609 + EXPORT_SYMBOL_GPL(__tcp_send_ack); 3610 + 3611 + void tcp_send_ack(struct sock *sk) 3612 + { 3613 + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); 3614 + } 3621 3615 3622 3616 /* This routine sends a packet with an out of date sequence 3623 3617 * number. It assumes the other end will try to ack it.
+2 -1
net/ipv6/addrconf.c
··· 2374 2374 continue; 2375 2375 if ((rt->fib6_flags & noflags) != 0) 2376 2376 continue; 2377 - fib6_info_hold(rt); 2377 + if (!fib6_info_hold_safe(rt)) 2378 + continue; 2378 2379 break; 2379 2380 } 2380 2381 out:
+5 -2
net/ipv6/datagram.c
··· 700 700 } 701 701 if (np->rxopt.bits.rxorigdstaddr) { 702 702 struct sockaddr_in6 sin6; 703 - __be16 *ports = (__be16 *) skb_transport_header(skb); 703 + __be16 *ports; 704 + int end; 704 705 705 - if (skb_transport_offset(skb) + 4 <= (int)skb->len) { 706 + end = skb_transport_offset(skb) + 4; 707 + if (end <= 0 || pskb_may_pull(skb, end)) { 706 708 /* All current transport protocols have the port numbers in the 707 709 * first four bytes of the transport header and this function is 708 710 * written with this assumption in mind. 709 711 */ 712 + ports = (__be16 *)skb_transport_header(skb); 710 713 711 714 sin6.sin6_family = AF_INET6; 712 715 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
+3 -2
net/ipv6/icmp.c
··· 402 402 403 403 /* for local traffic to local address, skb dev is the loopback 404 404 * device. Check if there is a dst attached to the skb and if so 405 - * get the real device index. 405 + * get the real device index. Same is needed for replies to a link 406 + * local address on a device enslaved to an L3 master device 406 407 */ 407 - if (unlikely(iif == LOOPBACK_IFINDEX)) { 408 + if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { 408 409 const struct rt6_info *rt6 = skb_rt6_info(skb); 409 410 410 411 if (rt6)
+2
net/ipv6/ip6_output.c
··· 570 570 to->dev = from->dev; 571 571 to->mark = from->mark; 572 572 573 + skb_copy_hash(to, from); 574 + 573 575 #ifdef CONFIG_NET_SCHED 574 576 to->tc_index = from->tc_index; 575 577 #endif
+1 -2
net/ipv6/mcast.c
··· 790 790 spin_lock_bh(&im->mca_lock); 791 791 if (pmc) { 792 792 im->idev = pmc->idev; 793 - im->mca_sfmode = pmc->mca_sfmode; 794 - if (pmc->mca_sfmode == MCAST_INCLUDE) { 793 + if (im->mca_sfmode == MCAST_INCLUDE) { 795 794 im->mca_tomb = pmc->mca_tomb; 796 795 im->mca_sources = pmc->mca_sources; 797 796 for (psf = im->mca_sources; psf; psf = psf->sf_next)
+31 -10
net/ipv6/route.c
··· 972 972 rt->dst.lastuse = jiffies; 973 973 } 974 974 975 + /* Caller must already hold reference to @from */ 975 976 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) 976 977 { 977 978 rt->rt6i_flags &= ~RTF_EXPIRES; 978 - fib6_info_hold(from); 979 979 rcu_assign_pointer(rt->from, from); 980 980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); 981 981 if (from->fib6_metrics != &dst_default_metrics) { ··· 984 984 } 985 985 } 986 986 987 + /* Caller must already hold reference to @ort */ 987 988 static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) 988 989 { 989 990 struct net_device *dev = fib6_info_nh_dev(ort); ··· 1045 1044 struct net_device *dev = rt->fib6_nh.nh_dev; 1046 1045 struct rt6_info *nrt; 1047 1046 1047 + if (!fib6_info_hold_safe(rt)) 1048 + return NULL; 1049 + 1048 1050 nrt = ip6_dst_alloc(dev_net(dev), dev, flags); 1049 1051 if (nrt) 1050 1052 ip6_rt_copy_init(nrt, rt); 1053 + else 1054 + fib6_info_release(rt); 1051 1055 1052 1056 return nrt; 1053 1057 } ··· 1184 1178 * Clone the route. 1185 1179 */ 1186 1180 1181 + if (!fib6_info_hold_safe(ort)) 1182 + return NULL; 1183 + 1187 1184 dev = ip6_rt_get_dev_rcu(ort); 1188 1185 rt = ip6_dst_alloc(dev_net(dev), dev, 0); 1189 - if (!rt) 1186 + if (!rt) { 1187 + fib6_info_release(ort); 1190 1188 return NULL; 1189 + } 1191 1190 1192 1191 ip6_rt_copy_init(rt, ort); 1193 1192 rt->rt6i_flags |= RTF_CACHE; ··· 1221 1210 struct net_device *dev; 1222 1211 struct rt6_info *pcpu_rt; 1223 1212 1213 + if (!fib6_info_hold_safe(rt)) 1214 + return NULL; 1215 + 1224 1216 rcu_read_lock(); 1225 1217 dev = ip6_rt_get_dev_rcu(rt); 1226 1218 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); 1227 1219 rcu_read_unlock(); 1228 - if (!pcpu_rt) 1220 + if (!pcpu_rt) { 1221 + fib6_info_release(rt); 1229 1222 return NULL; 1223 + } 1230 1224 ip6_rt_copy_init(pcpu_rt, rt); 1231 1225 pcpu_rt->rt6i_flags |= RTF_PCPU; 1232 1226 return pcpu_rt; ··· 2502 2486 2503 2487 out: 2504 2488 if (ret) 2505 - dst_hold(&ret->dst); 2489 + ip6_hold_safe(net, &ret, true); 2506 2490 else 2507 2491 ret = ip6_create_rt_rcu(rt); 2508 2492 ··· 3319 3303 continue; 3320 3304 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) 3321 3305 continue; 3322 - fib6_info_hold(rt); 3306 + if (!fib6_info_hold_safe(rt)) 3307 + continue; 3323 3308 rcu_read_unlock(); 3324 3309 3325 3310 /* if gateway was specified only delete the one hop */ ··· 3426 3409 3427 3410 rcu_read_lock(); 3428 3411 from = rcu_dereference(rt->from); 3412 + /* This fib6_info_hold() is safe here because we hold reference to rt 3413 + * and rt already holds reference to fib6_info. 3414 + */ 3429 3415 fib6_info_hold(from); 3430 3416 rcu_read_unlock(); 3431 3417 ··· 3490 3470 continue; 3491 3471 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) 3492 3472 continue; 3493 - fib6_info_hold(rt); 3473 + if (!fib6_info_hold_safe(rt)) 3474 + continue; 3494 3475 break; 3495 3476 } 3496 3477 out: ··· 3551 3530 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) 3552 3531 break; 3553 3532 } 3554 - if (rt) 3555 - fib6_info_hold(rt); 3533 + if (rt && !fib6_info_hold_safe(rt)) 3534 + rt = NULL; 3556 3535 rcu_read_unlock(); 3557 3536 return rt; 3558 3537 } ··· 3600 3579 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; 3601 3580 3602 3581 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 3603 - (!idev || idev->cnf.accept_ra != 2)) { 3604 - fib6_info_hold(rt); 3582 + (!idev || idev->cnf.accept_ra != 2) && 3583 + fib6_info_hold_safe(rt)) { 3605 3584 rcu_read_unlock(); 3606 3585 ip6_del_rt(net, rt); 3607 3586 goto restart;
+4 -2
net/ipv6/tcp_ipv6.c
··· 938 938 &tcp_hashinfo, NULL, 0, 939 939 &ipv6h->saddr, 940 940 th->source, &ipv6h->daddr, 941 - ntohs(th->source), tcp_v6_iif(skb), 941 + ntohs(th->source), 942 + tcp_v6_iif_l3_slave(skb), 942 943 tcp_v6_sdif(skb)); 943 944 if (!sk1) 944 945 goto out; ··· 1610 1609 skb, __tcp_hdrlen(th), 1611 1610 &ipv6_hdr(skb)->saddr, th->source, 1612 1611 &ipv6_hdr(skb)->daddr, 1613 - ntohs(th->dest), tcp_v6_iif(skb), 1612 + ntohs(th->dest), 1613 + tcp_v6_iif_l3_slave(skb), 1614 1614 sdif); 1615 1615 if (sk2) { 1616 1616 struct inet_timewait_sock *tw = inet_twsk(sk);
+1 -4
net/mac80211/rx.c
··· 2254 2254 sdata->control_port_over_nl80211)) { 2255 2255 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2256 2256 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2257 - struct ethhdr *ehdr = eth_hdr(skb); 2258 2257 2259 - cfg80211_rx_control_port(dev, skb->data, skb->len, 2260 - ehdr->h_source, 2261 - be16_to_cpu(skb->protocol), noencrypt); 2258 + cfg80211_rx_control_port(dev, skb, noencrypt); 2262 2259 dev_kfree_skb(skb); 2263 2260 } else { 2264 2261 /* deliver to local stack */
+2 -1
net/mac80211/util.c
··· 2111 2111 if (!sta->uploaded) 2112 2112 continue; 2113 2113 2114 - if (sta->sdata->vif.type != NL80211_IFTYPE_AP) 2114 + if (sta->sdata->vif.type != NL80211_IFTYPE_AP && 2115 + sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 2115 2116 continue; 2116 2117 2117 2118 for (state = IEEE80211_STA_NOTEXIST;
+4 -4
net/netfilter/nf_conntrack_proto_dccp.c
··· 243 243 * We currently ignore Sync packets 244 244 * 245 245 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 246 - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 246 + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 247 247 }, 248 248 [DCCP_PKT_SYNCACK] = { 249 249 /* 250 250 * We currently ignore SyncAck packets 251 251 * 252 252 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 253 - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 253 + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 254 254 }, 255 255 }, 256 256 [CT_DCCP_ROLE_SERVER] = { ··· 371 371 * We currently ignore Sync packets 372 372 * 373 373 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 374 - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 374 + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 375 375 }, 376 376 [DCCP_PKT_SYNCACK] = { 377 377 /* 378 378 * We currently ignore SyncAck packets 379 379 * 380 380 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 381 - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 381 + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 382 382 }, 383 383 }, 384 384 };
+166 -140
net/netfilter/nf_tables_api.c
··· 75 75 { 76 76 ctx->net = net; 77 77 ctx->family = family; 78 + ctx->level = 0; 78 79 ctx->table = table; 79 80 ctx->chain = chain; 80 81 ctx->nla = nla; ··· 1598 1597 struct nft_base_chain *basechain; 1599 1598 struct nft_stats *stats = NULL; 1600 1599 struct nft_chain_hook hook; 1601 - const struct nlattr *name; 1602 1600 struct nf_hook_ops *ops; 1603 1601 struct nft_trans *trans; 1604 1602 int err; ··· 1645 1645 return PTR_ERR(stats); 1646 1646 } 1647 1647 1648 + err = -ENOMEM; 1648 1649 trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, 1649 1650 sizeof(struct nft_trans_chain)); 1650 - if (trans == NULL) { 1651 - free_percpu(stats); 1652 - return -ENOMEM; 1653 - } 1651 + if (trans == NULL) 1652 + goto err; 1654 1653 1655 1654 nft_trans_chain_stats(trans) = stats; 1656 1655 nft_trans_chain_update(trans) = true; ··· 1659 1660 else 1660 1661 nft_trans_chain_policy(trans) = -1; 1661 1662 1662 - name = nla[NFTA_CHAIN_NAME]; 1663 - if (nla[NFTA_CHAIN_HANDLE] && name) { 1664 - nft_trans_chain_name(trans) = 1665 - nla_strdup(name, GFP_KERNEL); 1666 - if (!nft_trans_chain_name(trans)) { 1667 - kfree(trans); 1668 - free_percpu(stats); 1669 - return -ENOMEM; 1663 + if (nla[NFTA_CHAIN_HANDLE] && 1664 + nla[NFTA_CHAIN_NAME]) { 1665 + struct nft_trans *tmp; 1666 + char *name; 1667 + 1668 + err = -ENOMEM; 1669 + name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); 1670 + if (!name) 1671 + goto err; 1672 + 1673 + err = -EEXIST; 1674 + list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) { 1675 + if (tmp->msg_type == NFT_MSG_NEWCHAIN && 1676 + tmp->ctx.table == table && 1677 + nft_trans_chain_update(tmp) && 1678 + nft_trans_chain_name(tmp) && 1679 + strcmp(name, nft_trans_chain_name(tmp)) == 0) { 1680 + kfree(name); 1681 + goto err; 1682 + } 1670 1683 } 1684 + 1685 + nft_trans_chain_name(trans) = name; 1671 1686 } 1672 1687 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 1673 1688 1674 1689 return 0; 1690 + err: 1691 + free_percpu(stats); 1692 + kfree(trans); 1693 + return err; 1675 1694 } 1676 1695 1677 1696 static int nf_tables_newchain(struct net *net, struct sock *nlsk, ··· 2271 2254 return skb->len; 2272 2255 } 2273 2256 2257 + static int nf_tables_dump_rules_start(struct netlink_callback *cb) 2258 + { 2259 + const struct nlattr * const *nla = cb->data; 2260 + struct nft_rule_dump_ctx *ctx = NULL; 2261 + 2262 + if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { 2263 + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 2264 + if (!ctx) 2265 + return -ENOMEM; 2266 + 2267 + if (nla[NFTA_RULE_TABLE]) { 2268 + ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], 2269 + GFP_ATOMIC); 2270 + if (!ctx->table) { 2271 + kfree(ctx); 2272 + return -ENOMEM; 2273 + } 2274 + } 2275 + if (nla[NFTA_RULE_CHAIN]) { 2276 + ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], 2277 + GFP_ATOMIC); 2278 + if (!ctx->chain) { 2279 + kfree(ctx->table); 2280 + kfree(ctx); 2281 + return -ENOMEM; 2282 + } 2283 + } 2284 + } 2285 + 2286 + cb->data = ctx; 2287 + return 0; 2288 + } 2289 + 2274 2290 static int nf_tables_dump_rules_done(struct netlink_callback *cb) 2275 2291 { 2276 2292 struct nft_rule_dump_ctx *ctx = cb->data; ··· 2333 2283 2334 2284 if (nlh->nlmsg_flags & NLM_F_DUMP) { 2335 2285 struct netlink_dump_control c = { 2286 + .start= nf_tables_dump_rules_start, 2336 2287 .dump = nf_tables_dump_rules, 2337 2288 .done = nf_tables_dump_rules_done, 2338 2289 .module = THIS_MODULE, 2290 + .data = (void *)nla, 2339 2291 }; 2340 - 2341 - if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { 2342 - struct nft_rule_dump_ctx *ctx; 2343 - 2344 - ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 2345 - if (!ctx) 2346 - return -ENOMEM; 2347 - 2348 - if (nla[NFTA_RULE_TABLE]) { 2349 - ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], 2350 - GFP_ATOMIC); 2351 - if (!ctx->table) { 2352 - kfree(ctx); 2353 - return -ENOMEM; 2354 - } 2355 - } 2356 - if (nla[NFTA_RULE_CHAIN]) { 2357 - ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], 2358 - GFP_ATOMIC); 2359 - if (!ctx->chain) { 2360 - kfree(ctx->table); 2361 - kfree(ctx); 2362 - return -ENOMEM; 2363 - } 2364 - } 2365 - c.data = ctx; 2366 - } 2367 2292 2368 2293 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 2369 2294 } ··· 2408 2383 const struct nft_data *data; 2409 2384 struct nft_rule *rule; 2410 2385 int err; 2386 + 2387 + if (ctx->level == NFT_JUMP_STACK_SIZE) 2388 + return -EMLINK; 2411 2389 2412 2390 list_for_each_entry(rule, &chain->rules, list) { 2413 2391 if (!nft_is_active_next(ctx->net, rule)) ··· 3189 3161 return skb->len; 3190 3162 } 3191 3163 3164 + static int nf_tables_dump_sets_start(struct netlink_callback *cb) 3165 + { 3166 + struct nft_ctx *ctx_dump = NULL; 3167 + 3168 + ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC); 3169 + if (ctx_dump == NULL) 3170 + return -ENOMEM; 3171 + 3172 + cb->data = ctx_dump; 3173 + return 0; 3174 + } 3175 + 3192 3176 static int nf_tables_dump_sets_done(struct netlink_callback *cb) 3193 3177 { 3194 3178 kfree(cb->data); ··· 3228 3188 3229 3189 if (nlh->nlmsg_flags & NLM_F_DUMP) { 3230 3190 struct netlink_dump_control c = { 3191 + .start = nf_tables_dump_sets_start, 3231 3192 .dump = nf_tables_dump_sets, 3232 3193 .done = nf_tables_dump_sets_done, 3194 + .data = &ctx, 3233 3195 .module = THIS_MODULE, 3234 3196 }; 3235 - struct nft_ctx *ctx_dump; 3236 - 3237 - ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC); 3238 - if (ctx_dump == NULL) 3239 - return -ENOMEM; 3240 - 3241 - *ctx_dump = ctx; 3242 - c.data = ctx_dump; 3243 3197 3244 3198 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 3245 3199 } ··· 3883 3849 return -ENOSPC; 3884 3850 } 3885 3851 3852 + static int nf_tables_dump_set_start(struct netlink_callback *cb) 3853 + { 3854 + struct nft_set_dump_ctx *dump_ctx = cb->data; 3855 + 3856 + cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC); 3857 + 3858 + return cb->data ? 0 : -ENOMEM; 3859 + } 3860 + 3886 3861 static int nf_tables_dump_set_done(struct netlink_callback *cb) 3887 3862 { 3888 3863 kfree(cb->data); ··· 4045 4002 4046 4003 if (nlh->nlmsg_flags & NLM_F_DUMP) { 4047 4004 struct netlink_dump_control c = { 4005 + .start = nf_tables_dump_set_start, 4048 4006 .dump = nf_tables_dump_set, 4049 4007 .done = nf_tables_dump_set_done, 4050 4008 .module = THIS_MODULE, 4051 4009 }; 4052 - struct nft_set_dump_ctx *dump_ctx; 4010 + struct nft_set_dump_ctx dump_ctx = { 4011 + .set = set, 4012 + .ctx = ctx, 4013 + }; 4053 4014 4054 - dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC); 4055 - if (!dump_ctx) 4056 - return -ENOMEM; 4057 - 4058 - dump_ctx->set = set; 4059 - dump_ctx->ctx = ctx; 4060 - 4061 - c.data = dump_ctx; 4015 + c.data = &dump_ctx; 4062 4016 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 4063 4017 } 4064 4018 ··· 5015 4975 return skb->len; 5016 4976 } 5017 4977 4978 + static int nf_tables_dump_obj_start(struct netlink_callback *cb) 4979 + { 4980 + const struct nlattr * const *nla = cb->data; 4981 + struct nft_obj_filter *filter = NULL; 4982 + 4983 + if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) { 4984 + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); 4985 + if (!filter) 4986 + return -ENOMEM; 4987 + 4988 + if (nla[NFTA_OBJ_TABLE]) { 4989 + filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); 4990 + if (!filter->table) { 4991 + kfree(filter); 4992 + return -ENOMEM; 4993 + } 4994 + } 4995 + 4996 + if (nla[NFTA_OBJ_TYPE]) 4997 + filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 4998 + } 4999 + 5000 + cb->data = filter; 5001 + return 0; 5002 + } 5003 + 5018 5004 static int nf_tables_dump_obj_done(struct netlink_callback *cb) 5019 5005 { 5020 5006 struct nft_obj_filter *filter = cb->data; ··· 5051 4985 } 5052 4986 5053 4987 return 0; 5054 - } 5055 - 5056 - static struct nft_obj_filter * 5057 - nft_obj_filter_alloc(const struct nlattr * const nla[]) 5058 - { 5059 - struct nft_obj_filter *filter; 5060 - 5061 - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); 5062 - if (!filter) 5063 - return ERR_PTR(-ENOMEM); 5064 - 5065 - if (nla[NFTA_OBJ_TABLE]) { 5066 - filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); 5067 - if (!filter->table) { 5068 - kfree(filter); 5069 - return ERR_PTR(-ENOMEM); 5070 - } 5071 - } 5072 - if (nla[NFTA_OBJ_TYPE]) 5073 - filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 5074 - 5075 - return filter; 5076 4988 } 5077 4989 5078 4990 /* called with rcu_read_lock held */ ··· 5071 5027 5072 5028 if (nlh->nlmsg_flags & NLM_F_DUMP) { 5073 5029 struct netlink_dump_control c = { 5030 + .start = nf_tables_dump_obj_start, 5074 5031 .dump = nf_tables_dump_obj, 5075 5032 .done = nf_tables_dump_obj_done, 5076 5033 .module = THIS_MODULE, 5034 + .data = (void *)nla, 5077 5035 }; 5078 5036 5079 - if (nla[NFTA_OBJ_TABLE] || 5080 - nla[NFTA_OBJ_TYPE]) { 5081 - struct nft_obj_filter *filter; 5082 - 5083 - filter = nft_obj_filter_alloc(nla); 5084 - if (IS_ERR(filter)) 5085 - return -ENOMEM; 5086 - 5087 - c.data = filter; 5088 - } 5089 5037 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 5090 5038 } 5091 5039 ··· 5356 5320 flowtable->ops[i].priv = &flowtable->data; 5357 5321 flowtable->ops[i].hook = flowtable->data.type->hook; 5358 5322 flowtable->ops[i].dev = dev_array[i]; 5359 - flowtable->dev_name[i] = kstrdup(dev_array[i]->name, 5360 - GFP_KERNEL); 5361 5323 } 5362 5324 5363 5325 return err; ··· 5513 5479 err6: 5514 5480 i = flowtable->ops_len; 5515 5481 err5: 5516 - for (k = i - 1; k >= 0; k--) { 5517 - kfree(flowtable->dev_name[k]); 5482 + for (k = i - 1; k >= 0; k--) 5518 5483 nf_unregister_net_hook(net, &flowtable->ops[k]); 5519 - } 5520 5484 5521 5485 kfree(flowtable->ops); 5522 5486 err4: ··· 5613 5581 goto nla_put_failure; 5614 5582 5615 5583 for (i = 0; i < flowtable->ops_len; i++) { 5616 - if (flowtable->dev_name[i][0] && 5617 - nla_put_string(skb, NFTA_DEVICE_NAME, 5618 - flowtable->dev_name[i])) 5584 + const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev); 5585 + 5586 + if (dev && 5587 + nla_put_string(skb, NFTA_DEVICE_NAME, dev->name)) 5619 5588 goto nla_put_failure; 5620 5589 } 5621 5590 nla_nest_end(skb, nest_devs); ··· 5683 5650 return skb->len; 5684 5651 } 5685 5652 5653 + static int nf_tables_dump_flowtable_start(struct netlink_callback *cb) 5654 + { 5655 + const struct nlattr * const *nla = cb->data; 5656 + struct nft_flowtable_filter *filter = NULL; 5657 + 5658 + if (nla[NFTA_FLOWTABLE_TABLE]) { 5659 + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); 5660 + if (!filter) 5661 + return -ENOMEM; 5662 + 5663 + filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], 5664 + GFP_ATOMIC); 5665 + if (!filter->table) { 5666 + kfree(filter); 5667 + return -ENOMEM; 5668 + } 5669 + } 5670 + 5671 + cb->data = filter; 5672 + return 0; 5673 + } 5674 + 5686 5675 static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) 5687 5676 { 5688 5677 struct nft_flowtable_filter *filter = cb->data; ··· 5716 5661 kfree(filter); 5717 5662 5718 5663 return 0; 5719 - } 5720 - 5721 - static struct nft_flowtable_filter * 5722 - nft_flowtable_filter_alloc(const struct nlattr * const nla[]) 5723 - { 5724 - struct nft_flowtable_filter *filter; 5725 - 5726 - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); 5727 - if (!filter) 5728 - return ERR_PTR(-ENOMEM); 5729 - 5730 - if (nla[NFTA_FLOWTABLE_TABLE]) { 5731 - filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], 5732 - GFP_ATOMIC); 5733 - if (!filter->table) { 5734 - kfree(filter); 5735 - return ERR_PTR(-ENOMEM); 5736 - } 5737 - } 5738 - return filter; 5739 5664 } 5740 5665 5741 5666 /* called with rcu_read_lock held */ ··· 5735 5700 5736 5701 if (nlh->nlmsg_flags & NLM_F_DUMP) { 5737 5702 struct netlink_dump_control c = { 5703 + .start = nf_tables_dump_flowtable_start, 5738 5704 .dump = nf_tables_dump_flowtable, 5739 5705 .done = nf_tables_dump_flowtable_done, 5740 5706 .module = THIS_MODULE, 5707 + .data = (void *)nla, 5741 5708 }; 5742 5709 5743 - if (nla[NFTA_FLOWTABLE_TABLE]) { 5744 - struct nft_flowtable_filter *filter; 5745 - 5746 - filter = nft_flowtable_filter_alloc(nla); 5747 - if (IS_ERR(filter)) 5748 - return -ENOMEM; 5749 - 5750 - c.data = filter; 5751 - } 5752 5710 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 5753 5711 } 5754 5712 ··· 5811 5783 kfree(flowtable->name); 5812 5784 flowtable->data.type->free(&flowtable->data); 5813 5785 module_put(flowtable->data.type->owner); 5786 + kfree(flowtable); 5814 5787 } 5815 5788 5816 5789 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, ··· 5854 5825 continue; 5855 5826 5856 5827 nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); 5857 - flowtable->dev_name[i][0] = '\0'; 5858 5828 flowtable->ops[i].dev = NULL; 5859 5829 break; 5860 5830 } ··· 6114 6086 case NFT_MSG_DELTABLE: 6115 6087 nf_tables_table_destroy(&trans->ctx); 6116 6088 break; 6089 + case NFT_MSG_NEWCHAIN: 6090 + kfree(nft_trans_chain_name(trans)); 6091 + break; 6117 6092 case NFT_MSG_DELCHAIN: 6118 6093 nf_tables_chain_destroy(&trans->ctx); 6119 6094 break; ··· 6346 6315 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); 6347 6316 break; 6348 6317 case NFT_MSG_NEWCHAIN: 6349 - if (nft_trans_chain_update(trans)) 6318 + if (nft_trans_chain_update(trans)) { 6350 6319 nft_chain_commit_update(trans); 6351 - else 6320 + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); 6321 + /* trans destroyed after rcu grace period */ 6322 + } else { 6352 6323 nft_clear(net, trans->ctx.chain); 6353 - 6354 - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); 6355 - nft_trans_destroy(trans); 6324 + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); 6325 + nft_trans_destroy(trans); 6326 + } 6356 6327 break; 6357 6328 case NFT_MSG_DELCHAIN: 6358 6329 nft_chain_del(trans->ctx.chain); ··· 6504 6471 case NFT_MSG_NEWCHAIN: 6505 6472 if (nft_trans_chain_update(trans)) { 6506 6473 free_percpu(nft_trans_chain_stats(trans)); 6507 - 6474 + kfree(nft_trans_chain_name(trans)); 6508 6475 nft_trans_destroy(trans); 6509 6476 } else { 6510 6477 trans->ctx.table->use--; ··· 6870 6837 err = nf_tables_check_loops(ctx, data->verdict.chain); 6871 6838 if (err < 0) 6872 6839 return err; 6873 - 6874 - if (ctx->chain->level + 1 > 6875 - data->verdict.chain->level) { 6876 - if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE) 6877 - return -EMLINK; 6878 - data->verdict.chain->level = ctx->chain->level + 1; 6879 - } 6880 6840 } 6881 6841 6882 6842 return 0;
+3
net/netfilter/nft_immediate.c
··· 98 98 const struct nft_data **d) 99 99 { 100 100 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 101 + struct nft_ctx *pctx = (struct nft_ctx *)ctx; 101 102 const struct nft_data *data; 102 103 int err; 103 104 ··· 110 109 switch (data->verdict.code) { 111 110 case NFT_JUMP: 112 111 case NFT_GOTO: 112 + pctx->level++; 113 113 err = nft_chain_validate(ctx, data->verdict.chain); 114 114 if (err < 0) 115 115 return err; 116 + pctx->level--; 116 117 break; 117 118 default: 118 119 break;
+11 -2
net/netfilter/nft_lookup.c
··· 155 155 struct nft_set_elem *elem) 156 156 { 157 157 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 158 + struct nft_ctx *pctx = (struct nft_ctx *)ctx; 158 159 const struct nft_data *data; 160 + int err; 159 161 160 162 if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && 161 163 *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) ··· 167 165 switch (data->verdict.code) { 168 166 case NFT_JUMP: 169 167 case NFT_GOTO: 170 - return nft_chain_validate(ctx, data->verdict.chain); 168 + pctx->level++; 169 + err = nft_chain_validate(ctx, data->verdict.chain); 170 + if (err < 0) 171 + return err; 172 + pctx->level--; 173 + break; 171 174 default: 172 - return 0; 175 + break; 173 176 } 177 + 178 + return 0; 174 179 } 175 180 176 181 static int nft_lookup_validate(const struct nft_ctx *ctx,
+1
net/netfilter/nft_set_hash.c
··· 387 387 struct nft_rhash *priv = nft_set_priv(set); 388 388 389 389 cancel_delayed_work_sync(&priv->gc_work); 390 + rcu_barrier(); 390 391 rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, 391 392 (void *)set); 392 393 }
+5 -2
net/netfilter/nft_set_rbtree.c
··· 381 381 382 382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); 383 383 if (!gcb) 384 - goto out; 384 + break; 385 385 386 386 atomic_dec(&set->nelems); 387 387 nft_set_gc_batch_add(gcb, rbe); ··· 390 390 rbe = rb_entry(prev, struct nft_rbtree_elem, node); 391 391 atomic_dec(&set->nelems); 392 392 nft_set_gc_batch_add(gcb, rbe); 393 + prev = NULL; 393 394 } 394 395 node = rb_next(node); 396 + if (!node) 397 + break; 395 398 } 396 - out: 397 399 if (gcb) { 398 400 for (i = 0; i < gcb->head.cnt; i++) { 399 401 rbe = gcb->elems[i]; ··· 442 440 struct rb_node *node; 443 441 444 442 cancel_delayed_work_sync(&priv->gc_work); 443 + rcu_barrier(); 445 444 while ((node = priv->root.rb_node) != NULL) { 446 445 rb_erase(node, &priv->root); 447 446 rbe = rb_entry(node, struct nft_rbtree_elem, node);
+3
net/tls/tls_sw.c
··· 646 646 return NULL; 647 647 } 648 648 649 + if (sk->sk_shutdown & RCV_SHUTDOWN) 650 + return NULL; 651 + 649 652 if (sock_flag(sk, SOCK_DONE)) 650 653 return NULL; 651 654
+16 -9
net/wireless/nl80211.c
··· 4409 4409 params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | 4410 4410 BIT(NL80211_STA_FLAG_MFP) | 4411 4411 BIT(NL80211_STA_FLAG_AUTHORIZED); 4412 + break; 4412 4413 default: 4413 4414 return -EINVAL; 4414 4415 } ··· 14924 14923 EXPORT_SYMBOL(cfg80211_mgmt_tx_status); 14925 14924 14926 14925 static int __nl80211_rx_control_port(struct net_device *dev, 14927 - const u8 *buf, size_t len, 14928 - const u8 *addr, u16 proto, 14926 + struct sk_buff *skb, 14929 14927 bool unencrypted, gfp_t gfp) 14930 14928 { 14931 14929 struct wireless_dev *wdev = dev->ieee80211_ptr; 14932 14930 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 14931 + struct ethhdr *ehdr = eth_hdr(skb); 14932 + const u8 *addr = ehdr->h_source; 14933 + u16 proto = be16_to_cpu(skb->protocol); 14933 14934 struct sk_buff *msg; 14934 14935 void *hdr; 14936 + struct nlattr *frame; 14937 + 14935 14938 u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); 14936 14939 14937 14940 if (!nlportid) 14938 14941 return -ENOENT; 14939 14942 14940 - msg = nlmsg_new(100 + len, gfp); 14943 + msg = nlmsg_new(100 + skb->len, gfp); 14941 14944 if (!msg) 14942 14945 return -ENOMEM; 14943 14946 ··· 14955 14950 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || 14956 14951 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 14957 14952 NL80211_ATTR_PAD) || 14958 - nla_put(msg, NL80211_ATTR_FRAME, len, buf) || 14959 14953 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || 14960 14954 nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || 14961 14955 (unencrypted && nla_put_flag(msg, 14962 14956 NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) 14963 14957 goto nla_put_failure; 14964 14958 14959 + frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len); 14960 + if (!frame) 14961 + goto nla_put_failure; 14962 + 14963 + skb_copy_bits(skb, 0, nla_data(frame), skb->len); 14965 14964 genlmsg_end(msg, hdr); 14966 14965 14967 14966 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); ··· 14976 14967 } 14977 14968 14978 14969 bool cfg80211_rx_control_port(struct net_device *dev, 14979 - const u8 *buf, size_t len, 14980 - const u8 *addr, u16 proto, bool unencrypted) 14970 + struct sk_buff *skb, bool unencrypted) 14981 14971 { 14982 14972 int ret; 14983 14973 14984 - trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted); 14985 - ret = __nl80211_rx_control_port(dev, buf, len, addr, proto, 14986 - unencrypted, GFP_ATOMIC); 14974 + trace_cfg80211_rx_control_port(dev, skb, unencrypted); 14975 + ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC); 14987 14976 trace_cfg80211_return_bool(ret == 0); 14988 14977 return ret == 0; 14989 14978 }
+3 -25
net/wireless/reg.c
··· 2240 2240 * as some drivers used this to restore its orig_* reg domain. 2241 2241 */ 2242 2242 if (initiator == NL80211_REGDOM_SET_BY_CORE && 2243 - wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) 2243 + wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && 2244 + !(wiphy->regulatory_flags & 2245 + REGULATORY_WIPHY_SELF_MANAGED)) 2244 2246 reg_call_notifier(wiphy, lr); 2245 2247 return; 2246 2248 } ··· 2789 2787 } 2790 2788 } 2791 2789 2792 - static bool reg_only_self_managed_wiphys(void) 2793 - { 2794 - struct cfg80211_registered_device *rdev; 2795 - struct wiphy *wiphy; 2796 - bool self_managed_found = false; 2797 - 2798 - ASSERT_RTNL(); 2799 - 2800 - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { 2801 - wiphy = &rdev->wiphy; 2802 - if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) 2803 - self_managed_found = true; 2804 - else 2805 - return false; 2806 - } 2807 - 2808 - /* make sure at least one self-managed wiphy exists */ 2809 - return self_managed_found; 2810 - } 2811 - 2812 2790 /* 2813 2791 * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* 2814 2792 * Regulatory hints come on a first come first serve basis and we ··· 2821 2839 spin_unlock(&reg_requests_lock); 2822 2840 2823 2841 notify_self_managed_wiphys(reg_request); 2824 - if (reg_only_self_managed_wiphys()) { 2825 - reg_free_request(reg_request); 2826 - return; 2827 - } 2828 2842 2829 2843 reg_process_hint(reg_request); 2830 2844
+10 -8
net/wireless/trace.h
··· 2627 2627 ); 2628 2628 2629 2629 TRACE_EVENT(cfg80211_rx_control_port, 2630 - TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len, 2631 - const u8 *addr, u16 proto, bool unencrypted), 2632 - TP_ARGS(netdev, buf, len, addr, proto, unencrypted), 2630 + TP_PROTO(struct net_device *netdev, struct sk_buff *skb, 2631 + bool unencrypted), 2632 + TP_ARGS(netdev, skb, unencrypted), 2633 2633 TP_STRUCT__entry( 2634 2634 NETDEV_ENTRY 2635 - MAC_ENTRY(addr) 2635 + __field(int, len) 2636 + MAC_ENTRY(from) 2636 2637 __field(u16, proto) 2637 2638 __field(bool, unencrypted) 2638 2639 ), 2639 2640 TP_fast_assign( 2640 2641 NETDEV_ASSIGN; 2641 - MAC_ASSIGN(addr, addr); 2642 - __entry->proto = proto; 2642 + __entry->len = skb->len; 2643 + MAC_ASSIGN(from, eth_hdr(skb)->h_source); 2644 + __entry->proto = be16_to_cpu(skb->protocol); 2643 2645 __entry->unencrypted = unencrypted; 2644 2646 ), 2645 - TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s", 2646 - NETDEV_PR_ARG, MAC_PR_ARG(addr), 2647 + TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s", 2648 + NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from), 2647 2649 __entry->proto, BOOL_TO_STR(__entry->unencrypted)) 2648 2650 ); 2649 2651
+8 -3
tools/bpf/bpftool/common.c
··· 217 217 int err; 218 218 int fd; 219 219 220 + if (argc < 3) { 221 + p_err("too few arguments, id ID and FILE path is required"); 222 + return -1; 223 + } else if (argc > 3) { 224 + p_err("too many arguments"); 225 + return -1; 226 + } 227 + 220 228 if (!is_prefix(*argv, "id")) { 221 229 p_err("expected 'id' got %s", *argv); 222 230 return -1; ··· 237 229 return -1; 238 230 } 239 231 NEXT_ARG(); 240 - 241 - if (argc != 1) 242 - usage(); 243 232 244 233 fd = get_fd_by_id(id); 245 234 if (fd < 0) {
+1 -1
tools/testing/selftests/bpf/Makefile
··· 105 105 106 106 BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris) 107 107 BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) 108 - BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --version 2>&1 | grep LLVM) 108 + BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') 109 109 110 110 ifneq ($(BTF_LLC_PROBE),) 111 111 ifneq ($(BTF_PAHOLE_PROBE),)
+40
tools/testing/selftests/bpf/test_verifier.c
··· 12005 12005 .prog_type = BPF_PROG_TYPE_XDP, 12006 12006 }, 12007 12007 { 12008 + "xadd/w check whether src/dst got mangled, 1", 12009 + .insns = { 12010 + BPF_MOV64_IMM(BPF_REG_0, 1), 12011 + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 12012 + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 12013 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 12014 + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 12015 + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 12016 + BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), 12017 + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), 12018 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 12019 + BPF_EXIT_INSN(), 12020 + BPF_MOV64_IMM(BPF_REG_0, 42), 12021 + BPF_EXIT_INSN(), 12022 + }, 12023 + .result = ACCEPT, 12024 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 12025 + .retval = 3, 12026 + }, 12027 + { 12028 + "xadd/w check whether src/dst got mangled, 2", 12029 + .insns = { 12030 + BPF_MOV64_IMM(BPF_REG_0, 1), 12031 + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 12032 + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 12033 + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), 12034 + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), 12035 + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), 12036 + BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), 12037 + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), 12038 + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), 12039 + BPF_EXIT_INSN(), 12040 + BPF_MOV64_IMM(BPF_REG_0, 42), 12041 + BPF_EXIT_INSN(), 12042 + }, 12043 + .result = ACCEPT, 12044 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 12045 + .retval = 3, 12046 + }, 12047 + { 12008 12048 "bpf_get_stack return R0 within range", 12009 12049 .insns = { 12010 12050 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),