Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Handle notifier registry failures properly in tun/tap driver, from
Tonghao Zhang.

2) Fix bpf verifier handling of subtraction bounds and add a testcase
for this, from Edward Cree.

3) Increase reset timeout in ftgmac100 driver, from Ben Herrenschmidt.

4) Fix use after free in prd_retire_rx_blk_timer_exired() in AF_PACKET,
from Cong Wang.

5) Fix SElinux regression due to recent UDP optimizations, from Paolo
Abeni.

6) We accidently increment IPSTATS_MIB_FRAGFAILS in the ipv6 code
paths, fix from Stefano Brivio.

7) Fix some mem leaks in dccp, from Xin Long.

8) Adjust MDIO_BUS kconfig deps to avoid build errors, from Arnd
Bergmann.

9) Mac address length check and buffer size fixes from Cong Wang.

10) Don't leak sockets in ipv6 udp early demux, from Paolo Abeni.

11) Fix return value when copy_from_user() fails in
bpf_prog_get_info_by_fd(), from Daniel Borkmann.

12) Handle PHY_HALTED properly in phy library state machine, from
Florian Fainelli.

13) Fix OOPS in fib_sync_down_dev(), from Ido Schimmel.

14) Fix truesize calculation in virtio_net which led to performance
regressions, from Michael S Tsirkin.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits)
samples/bpf: fix bpf tunnel cleanup
udp6: fix jumbogram reception
ppp: Fix a scheduling-while-atomic bug in del_chan
Revert "net: bcmgenet: Remove init parameter from bcmgenet_mii_config"
virtio_net: fix truesize for mergeable buffers
mv643xx_eth: fix of_irq_to_resource() error check
MAINTAINERS: Add more files to the PHY LIBRARY section
ipv4: fib: Fix NULL pointer deref during fib_sync_down_dev()
net: phy: Correctly process PHY_HALTED in phy_stop_machine()
sunhme: fix up GREG_STAT and GREG_IMASK register offsets
bpf: fix bpf_prog_get_info_by_fd to dump correct xlated_prog_len
tcp: avoid bogus gcc-7 array-bounds warning
net: tc35815: fix spelling mistake: "Intterrupt" -> "Interrupt"
bpf: don't indicate success when copy_from_user fails
udp6: fix socket leak on early demux
net: thunderx: Fix BGX transmit stall due to underflow
Revert "vhost: cache used event for better performance"
team: use a larger struct for mac address
net: check dev->addr_len for dev_set_mac_address()
phy: bcm-ns-usb3: fix MDIO_BUS dependency
...

+639 -306
+11 -3
MAINTAINERS
··· 5090 5090 M: Florian Fainelli <f.fainelli@gmail.com> 5091 5091 L: netdev@vger.kernel.org 5092 5092 S: Maintained 5093 - F: include/linux/phy.h 5094 - F: include/linux/phy_fixed.h 5095 - F: drivers/net/phy/ 5093 + F: Documentation/ABI/testing/sysfs-bus-mdio 5094 + F: Documentation/devicetree/bindings/net/mdio* 5096 5095 F: Documentation/networking/phy.txt 5096 + F: drivers/net/phy/ 5097 5097 F: drivers/of/of_mdio.c 5098 5098 F: drivers/of/of_net.c 5099 + F: include/linux/*mdio*.h 5100 + F: include/linux/of_net.h 5101 + F: include/linux/phy.h 5102 + F: include/linux/phy_fixed.h 5103 + F: include/linux/platform_data/mdio-gpio.h 5104 + F: include/trace/events/mdio.h 5105 + F: include/uapi/linux/mdio.h 5106 + F: include/uapi/linux/mii.h 5099 5107 5100 5108 EXT2 FILE SYSTEM 5101 5109 M: Jan Kara <jack@suse.com>
+2
drivers/net/bonding/bond_main.c
··· 2050 2050 continue; 2051 2051 2052 2052 bond_propose_link_state(slave, BOND_LINK_FAIL); 2053 + commit++; 2053 2054 slave->delay = bond->params.downdelay; 2054 2055 if (slave->delay) { 2055 2056 netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", ··· 2089 2088 continue; 2090 2089 2091 2090 bond_propose_link_state(slave, BOND_LINK_BACK); 2091 + commit++; 2092 2092 slave->delay = bond->params.updelay; 2093 2093 2094 2094 if (slave->delay) {
+4 -5
drivers/net/ethernet/aurora/nb8800.c
··· 609 609 mac_mode |= HALF_DUPLEX; 610 610 611 611 if (gigabit) { 612 - if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) 612 + if (phy_interface_is_rgmii(dev->phydev)) 613 613 mac_mode |= RGMII_MODE; 614 614 615 615 mac_mode |= GMAC_MODE; ··· 1268 1268 break; 1269 1269 1270 1270 case PHY_INTERFACE_MODE_RGMII: 1271 - pad_mode = PAD_MODE_RGMII; 1272 - break; 1273 - 1271 + case PHY_INTERFACE_MODE_RGMII_ID: 1272 + case PHY_INTERFACE_MODE_RGMII_RXID: 1274 1273 case PHY_INTERFACE_MODE_RGMII_TXID: 1275 - pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; 1274 + pad_mode = PAD_MODE_RGMII; 1276 1275 break; 1277 1276 1278 1277 default:
+1 -1
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 3669 3669 3670 3670 phy_init_hw(priv->phydev); 3671 3671 /* Speed settings must be restored */ 3672 - bcmgenet_mii_config(priv->dev); 3672 + bcmgenet_mii_config(priv->dev, false); 3673 3673 3674 3674 /* disable ethernet MAC while updating its registers */ 3675 3675 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+1 -1
drivers/net/ethernet/broadcom/genet/bcmgenet.h
··· 698 698 699 699 /* MDIO routines */ 700 700 int bcmgenet_mii_init(struct net_device *dev); 701 - int bcmgenet_mii_config(struct net_device *dev); 701 + int bcmgenet_mii_config(struct net_device *dev, bool init); 702 702 int bcmgenet_mii_probe(struct net_device *dev); 703 703 void bcmgenet_mii_exit(struct net_device *dev); 704 704 void bcmgenet_mii_reset(struct net_device *dev);
+4 -3
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 238 238 bcmgenet_fixed_phy_link_update); 239 239 } 240 240 241 - int bcmgenet_mii_config(struct net_device *dev) 241 + int bcmgenet_mii_config(struct net_device *dev, bool init) 242 242 { 243 243 struct bcmgenet_priv *priv = netdev_priv(dev); 244 244 struct phy_device *phydev = priv->phydev; ··· 327 327 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); 328 328 } 329 329 330 - dev_info_once(kdev, "configuring instance for %s\n", phy_name); 330 + if (init) 331 + dev_info(kdev, "configuring instance for %s\n", phy_name); 331 332 332 333 return 0; 333 334 } ··· 376 375 * PHY speed which is needed for bcmgenet_mii_config() to configure 377 376 * things appropriately. 378 377 */ 379 - ret = bcmgenet_mii_config(dev); 378 + ret = bcmgenet_mii_config(dev, true); 380 379 if (ret) { 381 380 phy_disconnect(priv->phydev); 382 381 return ret;
+22 -5
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 292 292 u64 cmr_cfg; 293 293 u64 port_cfg = 0; 294 294 u64 misc_ctl = 0; 295 + bool tx_en, rx_en; 295 296 296 297 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 297 - cmr_cfg &= ~CMR_EN; 298 + tx_en = cmr_cfg & CMR_PKT_TX_EN; 299 + rx_en = cmr_cfg & CMR_PKT_RX_EN; 300 + cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); 298 301 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 302 + 303 + /* Wait for BGX RX to be idle */ 304 + if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, 305 + GMI_PORT_CFG_RX_IDLE, false)) { 306 + dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n", 307 + bgx->bgx_id, lmac->lmacid); 308 + return; 309 + } 310 + 311 + /* Wait for BGX TX to be idle */ 312 + if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, 313 + GMI_PORT_CFG_TX_IDLE, false)) { 314 + dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n", 315 + bgx->bgx_id, lmac->lmacid); 316 + return; 317 + } 299 318 300 319 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 301 320 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); ··· 366 347 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 367 348 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 368 349 369 - port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 370 - 371 - /* Re-enable lmac */ 372 - cmr_cfg |= CMR_EN; 350 + /* Restore CMR config settings */ 351 + cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0); 373 352 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 374 353 375 354 if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
+2
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
··· 170 170 #define GMI_PORT_CFG_DUPLEX BIT_ULL(2) 171 171 #define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) 172 172 #define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) 173 + #define GMI_PORT_CFG_RX_IDLE BIT_ULL(12) 174 + #define GMI_PORT_CFG_TX_IDLE BIT_ULL(13) 173 175 #define BGX_GMP_GMI_RXX_JABBER 0x38038 174 176 #define BGX_GMP_GMI_TXX_THRESH 0x38210 175 177 #define BGX_GMP_GMI_TXX_APPEND 0x38218
+4 -3
drivers/net/ethernet/faraday/ftgmac100.c
··· 125 125 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 126 126 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 127 127 priv->base + FTGMAC100_OFFSET_MACCR); 128 - for (i = 0; i < 50; i++) { 128 + for (i = 0; i < 200; i++) { 129 129 unsigned int maccr; 130 130 131 131 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); ··· 392 392 struct net_device *netdev = priv->netdev; 393 393 struct sk_buff *skb; 394 394 dma_addr_t map; 395 - int err; 395 + int err = 0; 396 396 397 397 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 398 398 if (unlikely(!skb)) { ··· 428 428 else 429 429 rxdes->rxdes0 = 0; 430 430 431 - return 0; 431 + return err; 432 432 } 433 433 434 434 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, ··· 1682 1682 priv->mii_bus->name = "ftgmac100_mdio"; 1683 1683 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1684 1684 pdev->name, pdev->id); 1685 + priv->mii_bus->parent = priv->dev; 1685 1686 priv->mii_bus->priv = priv->netdev; 1686 1687 priv->mii_bus->read = ftgmac100_mdiobus_read; 1687 1688 priv->mii_bus->write = ftgmac100_mdiobus_write;
+1 -1
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 2734 2734 ppd.shared = pdev; 2735 2735 2736 2736 memset(&res, 0, sizeof(res)); 2737 - if (!of_irq_to_resource(pnp, 0, &res)) { 2737 + if (of_irq_to_resource(pnp, 0, &res) <= 0) { 2738 2738 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); 2739 2739 return -EINVAL; 2740 2740 }
+5
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 22 22 #include <linux/if_vlan.h> 23 23 #include <linux/reset.h> 24 24 #include <linux/tcp.h> 25 + #include <linux/interrupt.h> 25 26 26 27 #include "mtk_eth_soc.h" 27 28 ··· 947 946 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & 948 947 RX_DMA_FPORT_MASK; 949 948 mac--; 949 + 950 + if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || 951 + !eth->netdev[mac])) 952 + goto release_desc; 950 953 951 954 netdev = eth->netdev[mac]; 952 955
+21 -4
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 786 786 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 787 787 } 788 788 789 + static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 790 + static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 791 + struct mlx5_cmd_msg *msg); 792 + 789 793 static void cmd_work_handler(struct work_struct *work) 790 794 { 791 795 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); ··· 800 796 struct semaphore *sem; 801 797 unsigned long flags; 802 798 bool poll_cmd = ent->polling; 799 + int alloc_ret; 803 800 804 801 805 802 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 806 803 down(sem); 807 804 if (!ent->page_queue) { 808 - ent->idx = alloc_ent(cmd); 809 - if (ent->idx < 0) { 805 + alloc_ret = alloc_ent(cmd); 806 + if (alloc_ret < 0) { 810 807 mlx5_core_err(dev, "failed to allocate command entry\n"); 808 + if (ent->callback) { 809 + ent->callback(-EAGAIN, ent->context); 810 + mlx5_free_cmd_msg(dev, ent->out); 811 + free_msg(dev, ent->in); 812 + free_cmd(ent); 813 + } else { 814 + ent->ret = -EAGAIN; 815 + complete(&ent->done); 816 + } 811 817 up(sem); 812 818 return; 813 819 } 820 + ent->idx = alloc_ret; 814 821 } else { 815 822 ent->idx = cmd->max_reg_cmds; 816 823 spin_lock_irqsave(&cmd->alloc_lock, flags); ··· 982 967 983 968 err = wait_func(dev, ent); 984 969 if (err == -ETIMEDOUT) 985 - goto out_free; 970 + goto out; 986 971 987 972 ds = ent->ts2 - ent->ts1; 988 973 op = MLX5_GET(mbox_in, in->first.data, opcode); ··· 1445 1430 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", 1446 1431 ent->idx); 1447 1432 free_ent(cmd, ent->idx); 1433 + free_cmd(ent); 1448 1434 } 1449 1435 continue; 1450 1436 } ··· 1504 1488 free_msg(dev, ent->in); 1505 1489 1506 1490 err = err ? err : ent->status; 1507 - free_cmd(ent); 1491 + if (!forced) 1492 + free_cmd(ent); 1508 1493 callback(err, context); 1509 1494 } else { 1510 1495 complete(&ent->done);
+9 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 266 266 }; 267 267 #endif 268 268 269 + #define MAX_PIN_NUM 8 270 + struct mlx5e_pps { 271 + u8 pin_caps[MAX_PIN_NUM]; 272 + struct work_struct out_work; 273 + u64 start[MAX_PIN_NUM]; 274 + u8 enabled; 275 + }; 276 + 269 277 struct mlx5e_tstamp { 270 278 rwlock_t lock; 271 279 struct cyclecounter cycles; ··· 285 277 struct mlx5_core_dev *mdev; 286 278 struct ptp_clock *ptp; 287 279 struct ptp_clock_info ptp_info; 288 - u8 *pps_pin_caps; 280 + struct mlx5e_pps pps_info; 289 281 }; 290 282 291 283 enum {
+159 -63
drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
··· 53 53 MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, 54 54 }; 55 55 56 + enum { 57 + MLX5E_MTPPS_FS_ENABLE = BIT(0x0), 58 + MLX5E_MTPPS_FS_PATTERN = BIT(0x2), 59 + MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3), 60 + MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4), 61 + MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), 62 + MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), 63 + }; 64 + 56 65 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, 57 66 struct skb_shared_hwtstamps *hwts) 58 67 { ··· 82 73 return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; 83 74 } 84 75 76 + static void mlx5e_pps_out(struct work_struct *work) 77 + { 78 + struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps, 79 + out_work); 80 + struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp, 81 + pps_info); 82 + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 83 + unsigned long flags; 84 + int i; 85 + 86 + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { 87 + u64 tstart; 88 + 89 + write_lock_irqsave(&tstamp->lock, flags); 90 + tstart = tstamp->pps_info.start[i]; 91 + tstamp->pps_info.start[i] = 0; 92 + write_unlock_irqrestore(&tstamp->lock, flags); 93 + if (!tstart) 94 + continue; 95 + 96 + MLX5_SET(mtpps_reg, in, pin, i); 97 + MLX5_SET64(mtpps_reg, in, time_stamp, tstart); 98 + MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP); 99 + mlx5_set_mtpps(tstamp->mdev, in, sizeof(in)); 100 + } 101 + } 102 + 85 103 static void mlx5e_timestamp_overflow(struct work_struct *work) 86 104 { 87 105 struct delayed_work *dwork = to_delayed_work(work); 88 106 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, 89 107 overflow_work); 108 + struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); 90 109 unsigned long flags; 91 110 92 111 write_lock_irqsave(&tstamp->lock, flags); 93 112 timecounter_read(&tstamp->clock); 94 113 write_unlock_irqrestore(&tstamp->lock, flags); 95 - schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); 114 + queue_delayed_work(priv->wq, &tstamp->overflow_work, 115 + msecs_to_jiffies(tstamp->overflow_period * 1000)); 96 116 } 97 117 98 118 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) ··· 251 213 int neg_adj = 0; 252 214 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 253 215 ptp_info); 254 - struct mlx5e_priv *priv = 255 - container_of(tstamp, struct mlx5e_priv, tstamp); 256 - 257 - if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { 258 - u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 259 - 260 - /* For future use need to add a loop for finding all 1PPS out pins */ 261 - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); 262 - MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); 263 - 264 - mlx5_set_mtpps(priv->mdev, in, sizeof(in)); 265 - } 266 216 267 217 if (delta < 0) { 268 218 neg_adj = 1; ··· 279 253 struct mlx5e_priv *priv = 280 254 container_of(tstamp, struct mlx5e_priv, tstamp); 281 255 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 256 + u32 field_select = 0; 257 + u8 pin_mode = 0; 282 258 u8 pattern = 0; 283 259 int pin = -1; 284 260 int err = 0; 285 261 286 - if (!MLX5_CAP_GEN(priv->mdev, pps) || 287 - !MLX5_CAP_GEN(priv->mdev, pps_modify)) 262 + if (!MLX5_PPS_CAP(priv->mdev)) 288 263 return -EOPNOTSUPP; 289 264 290 265 if (rq->extts.index >= tstamp->ptp_info.n_pins) ··· 295 268 pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); 296 269 if (pin < 0) 297 270 return -EBUSY; 271 + pin_mode = MLX5E_PIN_MODE_IN; 272 + pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); 273 + field_select = MLX5E_MTPPS_FS_PIN_MODE | 274 + MLX5E_MTPPS_FS_PATTERN | 275 + MLX5E_MTPPS_FS_ENABLE; 276 + } else { 277 + pin = rq->extts.index; 278 + field_select = MLX5E_MTPPS_FS_ENABLE; 298 279 } 299 280 300 - if (rq->extts.flags & PTP_FALLING_EDGE) 301 - pattern = 1; 302 - 303 281 MLX5_SET(mtpps_reg, in, pin, pin); 304 - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); 282 + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); 305 283 MLX5_SET(mtpps_reg, in, pattern, pattern); 306 284 MLX5_SET(mtpps_reg, in, enable, on); 285 + MLX5_SET(mtpps_reg, in, field_select, field_select); 307 286 308 287 err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); 309 288 if (err) ··· 328 295 struct mlx5e_priv *priv = 329 296 container_of(tstamp, struct mlx5e_priv, tstamp); 330 297 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 331 - u64 nsec_now, nsec_delta, time_stamp; 298 + u64 nsec_now, nsec_delta, time_stamp = 0; 332 299 u64 cycles_now, cycles_delta; 333 300 struct timespec64 ts; 334 301 unsigned long flags; 302 + u32 field_select = 0; 303 + u8 pin_mode = 0; 304 + u8 pattern = 0; 335 305 int pin = -1; 306 + int err = 0; 336 307 s64 ns; 337 308 338 - if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) 309 + if (!MLX5_PPS_CAP(priv->mdev)) 339 310 return -EOPNOTSUPP; 340 311 341 312 if (rq->perout.index >= tstamp->ptp_info.n_pins) ··· 350 313 rq->perout.index); 351 314 if (pin < 0) 352 315 return -EBUSY; 353 - } 354 316 355 - ts.tv_sec = rq->perout.period.sec; 356 - ts.tv_nsec = rq->perout.period.nsec; 357 - ns = timespec64_to_ns(&ts); 358 - if (on) 317 + pin_mode = MLX5E_PIN_MODE_OUT; 318 + pattern = MLX5E_OUT_PATTERN_PERIODIC; 319 + ts.tv_sec = rq->perout.period.sec; 320 + ts.tv_nsec = rq->perout.period.nsec; 321 + ns = timespec64_to_ns(&ts); 322 + 359 323 if ((ns >> 1) != 500000000LL) 360 324 return -EINVAL; 361 - ts.tv_sec = rq->perout.start.sec; 362 - ts.tv_nsec = rq->perout.start.nsec; 363 - ns = timespec64_to_ns(&ts); 364 - cycles_now = mlx5_read_internal_timer(tstamp->mdev); 365 - write_lock_irqsave(&tstamp->lock, flags); 366 - nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); 367 - nsec_delta = ns - nsec_now; 368 - cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, 369 - tstamp->cycles.mult); 370 - write_unlock_irqrestore(&tstamp->lock, flags); 371 - time_stamp = cycles_now + cycles_delta; 325 + 326 + ts.tv_sec = rq->perout.start.sec; 327 + ts.tv_nsec = rq->perout.start.nsec; 328 + ns = timespec64_to_ns(&ts); 329 + cycles_now = mlx5_read_internal_timer(tstamp->mdev); 330 + write_lock_irqsave(&tstamp->lock, flags); 331 + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); 332 + nsec_delta = ns - nsec_now; 333 + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, 334 + tstamp->cycles.mult); 335 + write_unlock_irqrestore(&tstamp->lock, flags); 336 + time_stamp = cycles_now + cycles_delta; 337 + field_select = MLX5E_MTPPS_FS_PIN_MODE | 338 + MLX5E_MTPPS_FS_PATTERN | 339 + MLX5E_MTPPS_FS_ENABLE | 340 + MLX5E_MTPPS_FS_TIME_STAMP; 341 + } else { 342 + pin = rq->perout.index; 343 + field_select = MLX5E_MTPPS_FS_ENABLE; 344 + } 345 + 372 346 MLX5_SET(mtpps_reg, in, pin, pin); 373 - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); 374 - MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); 347 + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); 348 + MLX5_SET(mtpps_reg, in, pattern, pattern); 375 349 MLX5_SET(mtpps_reg, in, enable, on); 376 350 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); 351 + MLX5_SET(mtpps_reg, in, field_select, field_select); 377 352 378 - return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); 353 + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); 354 + if (err) 355 + return err; 356 + 357 + return mlx5_set_mtppse(priv->mdev, pin, 0, 358 + MLX5E_EVENT_MODE_REPETETIVE & on); 359 + } 360 + 361 + static int mlx5e_pps_configure(struct ptp_clock_info *ptp, 362 + struct ptp_clock_request *rq, 363 + int on) 364 + { 365 + struct mlx5e_tstamp *tstamp = 366 + container_of(ptp, struct mlx5e_tstamp, ptp_info); 367 + 368 + tstamp->pps_info.enabled = !!on; 369 + return 0; 379 370 } 380 371 381 372 static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, ··· 415 350 return mlx5e_extts_configure(ptp, rq, on); 416 351 case PTP_CLK_REQ_PEROUT: 417 352 return mlx5e_perout_configure(ptp, rq, on); 353 + case PTP_CLK_REQ_PPS: 354 + return mlx5e_pps_configure(ptp, rq, on); 418 355 default: 419 356 return -EOPNOTSUPP; 420 357 } ··· 462 395 return -ENOMEM; 463 396 tstamp->ptp_info.enable = mlx5e_ptp_enable; 464 397 tstamp->ptp_info.verify = mlx5e_ptp_verify; 398 + tstamp->ptp_info.pps = 1; 465 399 466 400 for (i = 0; i < tstamp->ptp_info.n_pins; i++) { 467 401 snprintf(tstamp->ptp_info.pin_config[i].name, ··· 490 422 tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, 491 423 cap_max_num_of_pps_out_pins); 492 424 493 - tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); 494 - tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); 495 - tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); 496 - tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); 497 - tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); 498 - tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); 499 - tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); 500 - tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); 425 + tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); 426 + tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); 427 + tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); 428 + tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); 429 + tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); 430 + tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); 431 + tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); 432 + tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); 501 433 } 502 434 503 435 void mlx5e_pps_event_handler(struct mlx5e_priv *priv, 504 436 struct ptp_clock_event *event) 505 437 { 438 + struct net_device *netdev = priv->netdev; 506 439 struct mlx5e_tstamp *tstamp = &priv->tstamp; 440 + struct timespec64 ts; 441 + u64 nsec_now, nsec_delta; 442 + u64 cycles_now, cycles_delta; 443 + int pin = event->index; 444 + s64 ns; 445 + unsigned long flags; 507 446 508 - ptp_clock_event(tstamp->ptp, event); 447 + switch (tstamp->ptp_info.pin_config[pin].func) { 448 + case PTP_PF_EXTTS: 449 + if (tstamp->pps_info.enabled) { 450 + event->type = PTP_CLOCK_PPSUSR; 451 + event->pps_times.ts_real = ns_to_timespec64(event->timestamp); 452 + } else { 453 + event->type = PTP_CLOCK_EXTTS; 454 + } 455 + ptp_clock_event(tstamp->ptp, event); 456 + break; 457 + case PTP_PF_PEROUT: 458 + mlx5e_ptp_gettime(&tstamp->ptp_info, &ts); 459 + cycles_now = mlx5_read_internal_timer(tstamp->mdev); 460 + ts.tv_sec += 1; 461 + ts.tv_nsec = 0; 462 + ns = timespec64_to_ns(&ts); 463 + write_lock_irqsave(&tstamp->lock, flags); 464 + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); 465 + nsec_delta = ns - nsec_now; 466 + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, 467 + tstamp->cycles.mult); 468 + tstamp->pps_info.start[pin] = cycles_now + cycles_delta; 469 + queue_work(priv->wq, &tstamp->pps_info.out_work); 470 + write_unlock_irqrestore(&tstamp->lock, flags); 471 + break; 472 + default: 473 + netdev_err(netdev, "%s: Unhandled event\n", __func__); 474 + } 509 475 } 510 476 511 477 void mlx5e_timestamp_init(struct mlx5e_priv *priv) ··· 575 473 do_div(ns, NSEC_PER_SEC / 2 / HZ); 576 474 tstamp->overflow_period = ns; 577 475 476 + INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); 578 477 INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); 579 478 if (tstamp->overflow_period) 580 - schedule_delayed_work(&tstamp->overflow_work, 0); 479 + queue_delayed_work(priv->wq, &tstamp->overflow_work, 0); 581 480 else 582 481 mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); 583 482 ··· 587 484 snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); 588 485 589 486 /* Initialize 1PPS data structures */ 590 - #define MAX_PIN_NUM 8 591 - tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); 592 - if (tstamp->pps_pin_caps) { 593 - if (MLX5_CAP_GEN(priv->mdev, pps)) 594 - mlx5e_get_pps_caps(priv, tstamp); 595 - if (tstamp->ptp_info.n_pins) 596 - mlx5e_init_pin_config(tstamp); 597 - } else { 598 - mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); 599 - } 487 + if (MLX5_PPS_CAP(priv->mdev)) 488 + mlx5e_get_pps_caps(priv, tstamp); 489 + if (tstamp->ptp_info.n_pins) 490 + mlx5e_init_pin_config(tstamp); 600 491 601 492 tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, 602 493 &priv->mdev->pdev->dev); ··· 613 516 priv->tstamp.ptp = NULL; 614 517 } 615 518 616 - kfree(tstamp->pps_pin_caps); 617 - kfree(tstamp->ptp_info.pin_config); 618 - 519 + cancel_work_sync(&tstamp->pps_info.out_work); 619 520 cancel_delayed_work_sync(&tstamp->overflow_work); 521 + kfree(tstamp->ptp_info.pin_config); 620 522 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
··· 276 276 277 277 static bool outer_header_zero(u32 *match_criteria) 278 278 { 279 - int size = MLX5_ST_SZ_BYTES(fte_match_param); 279 + int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers); 280 280 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, 281 281 outer_headers); 282 282 ··· 320 320 321 321 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); 322 322 flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 323 - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); 323 + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0); 324 324 if (IS_ERR(rule)) { 325 325 err = PTR_ERR(rule); 326 326 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
-1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 377 377 break; 378 378 case MLX5_DEV_EVENT_PPS: 379 379 eqe = (struct mlx5_eqe *)param; 380 - ptp_event.type = PTP_CLOCK_EXTTS; 381 380 ptp_event.index = eqe->data.pps.pin; 382 381 ptp_event.timestamp = 383 382 timecounter_cyc2time(&priv->tstamp.clock,
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 698 698 else 699 699 mlx5_core_dbg(dev, "port_module_event is not set\n"); 700 700 701 - if (MLX5_CAP_GEN(dev, pps)) 701 + if (MLX5_PPS_CAP(dev)) 702 702 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); 703 703 704 704 if (MLX5_CAP_GEN(dev, fpga))
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1668 1668 int i; 1669 1669 1670 1670 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || 1671 - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1671 + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH || 1672 + esw->mode == SRIOV_NONE) 1672 1673 return; 1673 1674 1674 1675 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
+11 -5
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 178 178 179 179 static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 180 180 { 181 - mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn); 182 - 183 181 mlx5_core_destroy_qp(mdev, qp); 184 182 } 185 183 ··· 191 193 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); 192 194 return err; 193 195 } 194 - 195 - mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); 196 196 197 197 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); 198 198 if (err) { ··· 249 253 250 254 static int mlx5i_init_rx(struct mlx5e_priv *priv) 251 255 { 256 + struct mlx5i_priv *ipriv = priv->ppriv; 252 257 int err; 253 258 254 259 err = mlx5e_create_indirect_rqt(priv); ··· 268 271 if (err) 269 272 goto err_destroy_indirect_tirs; 270 273 271 - err = mlx5i_create_flow_steering(priv); 274 + err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); 272 275 if (err) 273 276 goto err_destroy_direct_tirs; 274 277 278 + err = mlx5i_create_flow_steering(priv); 279 + if (err) 280 + goto err_remove_rx_underlay_qpn; 281 + 275 282 return 0; 276 283 284 + err_remove_rx_underlay_qpn: 285 + mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); 277 286 err_destroy_direct_tirs: 278 287 mlx5e_destroy_direct_tirs(priv); 279 288 err_destroy_indirect_tirs: ··· 293 290 294 291 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) 295 292 { 293 + struct mlx5i_priv *ipriv = priv->ppriv; 294 + 295 + mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); 296 296 mlx5i_destroy_flow_steering(priv); 297 297 mlx5e_destroy_direct_tirs(priv); 298 298 mlx5e_destroy_indirect_tirs(priv);
+10 -15
drivers/net/ethernet/mellanox/mlx5/core/lag.c
··· 162 162 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, 163 163 u8 *port1, u8 *port2) 164 164 { 165 - if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { 166 - if (tracker->netdev_state[0].tx_enabled) { 167 - *port1 = 1; 168 - *port2 = 1; 169 - } else { 170 - *port1 = 2; 171 - *port2 = 2; 172 - } 173 - } else { 174 - *port1 = 1; 175 - *port2 = 2; 176 - if (!tracker->netdev_state[0].link_up) 177 - *port1 = 2; 178 - else if (!tracker->netdev_state[1].link_up) 179 - *port2 = 1; 165 + *port1 = 1; 166 + *port2 = 2; 167 + if (!tracker->netdev_state[0].tx_enabled || 168 + !tracker->netdev_state[0].link_up) { 169 + *port1 = 2; 170 + return; 180 171 } 172 + 173 + if (!tracker->netdev_state[1].tx_enabled || 174 + !tracker->netdev_state[1].link_up) 175 + *port2 = 1; 181 176 } 182 177 183 178 static void mlx5_activate_lag(struct mlx5_lag *ldev,
+5
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 154 154 int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); 155 155 int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); 156 156 157 + #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ 158 + MLX5_CAP_GEN((mdev), pps_modify) && \ 159 + MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ 160 + MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj)) 161 + 157 162 int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); 158 163 159 164 void mlx5e_init(void);
+5
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
··· 88 88 int vf; 89 89 90 90 if (!sriov->enabled_vfs) 91 + #ifdef CONFIG_MLX5_CORE_EN 92 + goto disable_sriov_resources; 93 + #else 91 94 return; 95 + #endif 92 96 93 97 for (vf = 0; vf < sriov->num_vfs; vf++) { 94 98 if (!sriov->vfs_ctx[vf].enabled) ··· 107 103 } 108 104 109 105 #ifdef CONFIG_MLX5_CORE_EN 106 + disable_sriov_resources: 110 107 mlx5_eswitch_disable_sriov(dev->priv.eswitch); 111 108 #endif 112 109
+7
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 1512 1512 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 1513 1513 struct mlxsw_sp_fib_entry *fib_entry); 1514 1514 1515 + static bool 1516 + mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, 1517 + const struct mlxsw_sp_fib_entry *fib_entry); 1518 + 1515 1519 static int 1516 1520 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, 1517 1521 struct mlxsw_sp_nexthop_group *nh_grp) ··· 1524 1520 int err; 1525 1521 1526 1522 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 1523 + if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, 1524 + fib_entry)) 1525 + continue; 1527 1526 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1528 1527 if (err) 1529 1528 return err;
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
··· 205 205 { 206 206 int i; 207 207 208 - for (i = 0; i < 23; i++) 208 + for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++) 209 209 if ((i < 12) || (i > 17)) 210 210 reg_space[DMA_BUS_MODE / 4 + i] = 211 211 readl(ioaddr + DMA_BUS_MODE + i * 4);
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
··· 70 70 { 71 71 int i; 72 72 73 - for (i = 0; i < 9; i++) 73 + for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++) 74 74 reg_space[DMA_BUS_MODE / 4 + i] = 75 75 readl(ioaddr + DMA_BUS_MODE + i * 4); 76 76
+3
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
··· 136 136 #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 137 137 #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ 138 138 139 + #define NUM_DWMAC100_DMA_REGS 9 140 + #define NUM_DWMAC1000_DMA_REGS 23 141 + 139 142 void dwmac_enable_dma_transmission(void __iomem *ioaddr); 140 143 void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); 141 144 void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+5
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 33 33 #define MAC100_ETHTOOL_NAME "st_mac100" 34 34 #define GMAC_ETHTOOL_NAME "st_gmac" 35 35 36 + #define ETHTOOL_DMA_OFFSET 55 37 + 36 38 struct stmmac_stats { 37 39 char stat_string[ETH_GSTRING_LEN]; 38 40 int sizeof_stat; ··· 444 442 445 443 priv->hw->mac->dump_regs(priv->hw, reg_space); 446 444 priv->hw->dma->dump_regs(priv->ioaddr, reg_space); 445 + /* Copy DMA registers to where ethtool expects them */ 446 + memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4], 447 + NUM_DWMAC1000_DMA_REGS * 4); 447 448 } 448 449 449 450 static void
+3 -3
drivers/net/ethernet/sun/sunhme.h
··· 13 13 /* Happy Meal global registers. */ 14 14 #define GREG_SWRESET 0x000UL /* Software Reset */ 15 15 #define GREG_CFG 0x004UL /* Config Register */ 16 - #define GREG_STAT 0x108UL /* Status */ 17 - #define GREG_IMASK 0x10cUL /* Interrupt Mask */ 18 - #define GREG_REG_SIZE 0x110UL 16 + #define GREG_STAT 0x100UL /* Status */ 17 + #define GREG_IMASK 0x104UL /* Interrupt Mask */ 18 + #define GREG_REG_SIZE 0x108UL 19 19 20 20 /* Global reset register. */ 21 21 #define GREG_RESET_ETX 0x01
+1 -1
drivers/net/ethernet/toshiba/tc35815.c
··· 1338 1338 static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) 1339 1339 { 1340 1340 static int count; 1341 - printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", 1341 + printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):", 1342 1342 dev->name, status); 1343 1343 if (status & Int_IntPCI) 1344 1344 printk(" IntPCI");
+33 -10
drivers/net/hyperv/netvsc_drv.c
··· 315 315 return slots_used; 316 316 } 317 317 318 - /* Estimate number of page buffers neede to transmit 319 - * Need at most 2 for RNDIS header plus skb body and fragments. 320 - */ 321 - static unsigned int netvsc_get_slots(const struct sk_buff *skb) 318 + static int count_skb_frag_slots(struct sk_buff *skb) 322 319 { 323 - return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)) 324 - + skb_shinfo(skb)->nr_frags 325 - + 2; 320 + int i, frags = skb_shinfo(skb)->nr_frags; 321 + int pages = 0; 322 + 323 + for (i = 0; i < frags; i++) { 324 + skb_frag_t *frag = skb_shinfo(skb)->frags + i; 325 + unsigned long size = skb_frag_size(frag); 326 + unsigned long offset = frag->page_offset; 327 + 328 + /* Skip unused frames from start of page */ 329 + offset &= ~PAGE_MASK; 330 + pages += PFN_UP(offset + size); 331 + } 332 + return pages; 333 + } 334 + 335 + static int netvsc_get_slots(struct sk_buff *skb) 336 + { 337 + char *data = skb->data; 338 + unsigned int offset = offset_in_page(data); 339 + unsigned int len = skb_headlen(skb); 340 + int slots; 341 + int frag_slots; 342 + 343 + slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); 344 + frag_slots = count_skb_frag_slots(skb); 345 + return slots + frag_slots; 326 346 } 327 347 328 348 static u32 net_checksum_info(struct sk_buff *skb) ··· 380 360 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; 381 361 struct hv_page_buffer *pb = page_buf; 382 362 383 - /* We can only transmit MAX_PAGE_BUFFER_COUNT number 363 + /* We will atmost need two pages to describe the rndis 364 + * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 384 365 * of pages in a single packet. If skb is scattered around 385 366 * more pages we try linearizing it. 386 367 */ 387 - num_data_pgs = netvsc_get_slots(skb); 368 + 369 + num_data_pgs = netvsc_get_slots(skb) + 2; 370 + 388 371 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { 389 372 ++net_device_ctx->eth_stats.tx_scattered; 390 373 391 374 if (skb_linearize(skb)) 392 375 goto no_memory; 393 376 394 - num_data_pgs = netvsc_get_slots(skb); 377 + num_data_pgs = netvsc_get_slots(skb) + 2; 395 378 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 396 379 ++net_device_ctx->eth_stats.tx_too_big; 397 380 goto drop;
+13 -3
drivers/net/irda/mcs7780.c
··· 141 141 static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) 142 142 { 143 143 struct usb_device *dev = mcs->usbdev; 144 - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 145 - MCS_RD_RTYPE, 0, reg, val, 2, 146 - msecs_to_jiffies(MCS_CTRL_TIMEOUT)); 144 + void *dmabuf; 145 + int ret; 146 + 147 + dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); 148 + if (!dmabuf) 149 + return -ENOMEM; 150 + 151 + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 152 + MCS_RD_RTYPE, 0, reg, dmabuf, 2, 153 + msecs_to_jiffies(MCS_CTRL_TIMEOUT)); 154 + 155 + memcpy(val, dmabuf, sizeof(__u16)); 156 + kfree(dmabuf); 147 157 148 158 return ret; 149 159 }
+10 -3
drivers/net/phy/Kconfig
··· 7 7 help 8 8 MDIO devices and driver infrastructure code. 9 9 10 - if MDIO_DEVICE 10 + config MDIO_BUS 11 + tristate 12 + default m if PHYLIB=m 13 + default MDIO_DEVICE 14 + help 15 + This internal symbol is used for link time dependencies and it 16 + reflects whether the mdio_bus/mdio_device code is built as a 17 + loadable module or built-in. 18 + 19 + if MDIO_BUS 11 20 12 21 config MDIO_BCM_IPROC 13 22 tristate "Broadcom iProc MDIO bus controller" ··· 37 28 38 29 config MDIO_BITBANG 39 30 tristate "Bitbanged MDIO buses" 40 - depends on !(MDIO_DEVICE=y && PHYLIB=m) 41 31 help 42 32 This module implements the MDIO bus protocol in software, 43 33 for use by low level drivers that export the ability to ··· 135 127 tristate "ThunderX SOCs MDIO buses" 136 128 depends on 64BIT 137 129 depends on PCI 138 - depends on !(MDIO_DEVICE=y && PHYLIB=m) 139 130 select MDIO_CAVIUM 140 131 help 141 132 This driver supports the MDIO interfaces found on Cavium
+3
drivers/net/phy/phy.c
··· 749 749 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) 750 750 phydev->state = PHY_UP; 751 751 mutex_unlock(&phydev->lock); 752 + 753 + /* Now we can run the state machine synchronously */ 754 + phy_state_machine(&phydev->state_queue.work); 752 755 } 753 756 754 757 /**
+1 -1
drivers/net/ppp/pptp.c
··· 131 131 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); 132 132 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); 133 133 spin_unlock(&chan_lock); 134 - synchronize_rcu(); 135 134 } 136 135 137 136 static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) ··· 519 520 520 521 po = pppox_sk(sk); 521 522 del_chan(po); 523 + synchronize_rcu(); 522 524 523 525 pppox_unbind_sock(sk); 524 526 sk->sk_state = PPPOX_DEAD;
+4 -4
drivers/net/team/team.c
··· 60 60 static int __set_port_dev_addr(struct net_device *port_dev, 61 61 const unsigned char *dev_addr) 62 62 { 63 - struct sockaddr addr; 63 + struct sockaddr_storage addr; 64 64 65 - memcpy(addr.sa_data, dev_addr, port_dev->addr_len); 66 - addr.sa_family = port_dev->type; 67 - return dev_set_mac_address(port_dev, &addr); 65 + memcpy(addr.__data, dev_addr, port_dev->addr_len); 66 + addr.ss_family = port_dev->type; 67 + return dev_set_mac_address(port_dev, (struct sockaddr *)&addr); 68 68 } 69 69 70 70 static int team_port_set_orig_dev_addr(struct team_port *port)
+9 -1
drivers/net/tun.c
··· 2598 2598 goto err_misc; 2599 2599 } 2600 2600 2601 - register_netdevice_notifier(&tun_notifier_block); 2601 + ret = register_netdevice_notifier(&tun_notifier_block); 2602 + if (ret) { 2603 + pr_err("Can't register netdevice notifier\n"); 2604 + goto err_notifier; 2605 + } 2606 + 2602 2607 return 0; 2608 + 2609 + err_notifier: 2610 + misc_deregister(&tun_miscdev); 2603 2611 err_misc: 2604 2612 rtnl_link_unregister(&tun_link_ops); 2605 2613 err_linkops:
+2 -3
drivers/net/virtio_net.c
··· 889 889 890 890 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 891 891 buf += headroom; /* advance address leaving hole at front of pkt */ 892 - ctx = (void *)(unsigned long)len; 893 892 get_page(alloc_frag->page); 894 893 alloc_frag->offset += len + headroom; 895 894 hole = alloc_frag->size - alloc_frag->offset; 896 895 if (hole < len + headroom) { 897 896 /* To avoid internal fragmentation, if there is very likely not 898 897 * enough space for another buffer, add the remaining space to 899 - * the current buffer. This extra space is not included in 900 - * the truesize stored in ctx. 898 + * the current buffer. 901 899 */ 902 900 len += hole; 903 901 alloc_frag->offset += hole; 904 902 } 905 903 906 904 sg_init_one(rq->sg, buf, len); 905 + ctx = (void *)(unsigned long)len; 907 906 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 908 907 if (err < 0) 909 908 put_page(virt_to_head_page(buf));
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
··· 214 214 215 215 /* Make sure there's enough writeable headroom */ 216 216 if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { 217 - head_delta = drvr->hdrlen - skb_headroom(skb); 217 + head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); 218 218 219 219 brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", 220 220 brcmf_ifname(ifp), head_delta);
+2 -6
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
··· 2053 2053 atomic_inc(&stats->pktcow_failed); 2054 2054 return -ENOMEM; 2055 2055 } 2056 + head_pad = 0; 2056 2057 } 2057 2058 skb_push(pkt, head_pad); 2058 2059 dat_buf = (u8 *)(pkt->data); 2059 2060 } 2060 2061 memset(dat_buf, 0, head_pad + bus->tx_hdrlen); 2061 - return 0; 2062 + return head_pad; 2062 2063 } 2063 2064 2064 2065 /** ··· 4174 4173 brcmf_err("brcmf_attach failed\n"); 4175 4174 goto fail; 4176 4175 } 4177 - 4178 - /* allocate scatter-gather table. sg support 4179 - * will be disabled upon allocation failure. 4180 - */ 4181 - brcmf_sdiod_sgtable_alloc(bus->sdiodev); 4182 4176 4183 4177 /* Query the F2 block size, set roundup accordingly */ 4184 4178 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+1 -1
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
··· 1189 1189 next_reclaimed; 1190 1190 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1191 1191 next_reclaimed); 1192 + iwlagn_check_ratid_empty(priv, sta_id, tid); 1192 1193 } 1193 1194 1194 1195 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); 1195 1196 1196 - iwlagn_check_ratid_empty(priv, sta_id, tid); 1197 1197 freed = 0; 1198 1198 1199 1199 /* process frames */
+2 -2
drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
··· 55 55 /* also account for the RFC 1042 header, of course */ 56 56 offs += 6; 57 57 58 - return skb->len > offs + 2 && 59 - *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE); 58 + return skb->len <= offs + 2 || 59 + *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE); 60 60 } 61 61 62 62 static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
+7 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 1084 1084 1085 1085 lockdep_assert_held(&mvm->mutex); 1086 1086 1087 - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1087 + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1088 + /* 1089 + * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1090 + * so later code will - from now on - see that we're doing it. 1091 + */ 1092 + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1093 + clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1088 1094 /* Clean up some internal and mac80211 state on restart */ 1089 1095 iwl_mvm_restart_cleanup(mvm); 1090 1096 } else {
+2
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
··· 1090 1090 * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted 1091 1091 * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active 1092 1092 * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running 1093 + * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested 1093 1094 * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active 1094 1095 * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 1095 1096 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running ··· 1102 1101 IWL_MVM_STATUS_HW_RFKILL, 1103 1102 IWL_MVM_STATUS_HW_CTKILL, 1104 1103 IWL_MVM_STATUS_ROC_RUNNING, 1104 + IWL_MVM_STATUS_HW_RESTART_REQUESTED, 1105 1105 IWL_MVM_STATUS_IN_HW_RESTART, 1106 1106 IWL_MVM_STATUS_IN_D0I3, 1107 1107 IWL_MVM_STATUS_ROC_AUX_RUNNING,
+3 -3
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 1235 1235 */ 1236 1236 if (!mvm->fw_restart && fw_error) { 1237 1237 iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 1238 - NULL); 1239 - } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, 1240 - &mvm->status)) { 1238 + NULL); 1239 + } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1241 1240 struct iwl_mvm_reprobe *reprobe; 1242 1241 1243 1242 IWL_ERR(mvm, ··· 1267 1268 1268 1269 if (fw_error && mvm->fw_restart > 0) 1269 1270 mvm->fw_restart--; 1271 + set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1270 1272 ieee80211_restart_hw(mvm->hw); 1271 1273 } 1272 1274 }
+14 -1
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 277 277 278 278 /* Timer expired */ 279 279 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); 280 + 281 + /* 282 + * sta should be valid unless the following happens: 283 + * The firmware asserts which triggers a reconfig flow, but 284 + * the reconfig fails before we set the pointer to sta into 285 + * the fw_id_to_mac_id pointer table. Mac80211 can't stop 286 + * A-MDPU and hence the timer continues to run. Then, the 287 + * timer expires and sta is NULL. 288 + */ 289 + if (!sta) 290 + goto unlock; 291 + 280 292 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 281 293 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, 282 294 sta->addr, ba_data->tid); ··· 2027 2015 IWL_MAX_TID_COUNT, 2028 2016 wdg_timeout); 2029 2017 2030 - if (vif->type == NL80211_IFTYPE_AP) 2018 + if (vif->type == NL80211_IFTYPE_AP || 2019 + vif->type == NL80211_IFTYPE_ADHOC) 2031 2020 mvm->probe_queue = queue; 2032 2021 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 2033 2022 mvm->p2p_dev_queue = queue;
+2 -1
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 3150 3150 init_waitqueue_head(&trans_pcie->d0i3_waitq); 3151 3151 3152 3152 if (trans_pcie->msix_enabled) { 3153 - if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) 3153 + ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); 3154 + if (ret) 3154 3155 goto out_no_pci; 3155 3156 } else { 3156 3157 ret = iwl_pcie_alloc_ict(trans);
+3
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 298 298 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 299 299 struct iwl_txq *txq = trans_pcie->txq[i]; 300 300 301 + if (!test_bit(i, trans_pcie->queue_used)) 302 + continue; 303 + 301 304 spin_lock_bh(&txq->lock); 302 305 if (txq->need_update) { 303 306 iwl_pcie_txq_inc_wr_ptr(trans, txq);
-3
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
··· 846 846 return false; 847 847 } 848 848 849 - if (rtlpriv->cfg->ops->get_btc_status()) 850 - rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); 851 - 852 849 bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); 853 850 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); 854 851
-1
drivers/net/wireless/realtek/rtlwifi/wifi.h
··· 2547 2547 struct rtl_btc_ops { 2548 2548 void (*btc_init_variables) (struct rtl_priv *rtlpriv); 2549 2549 void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); 2550 - void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); 2551 2550 void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); 2552 2551 void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); 2553 2552 void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
+1 -1
drivers/phy/broadcom/Kconfig
··· 30 30 tristate "Broadcom Northstar USB 3.0 PHY Driver" 31 31 depends on ARCH_BCM_IPROC || COMPILE_TEST 32 32 depends on HAS_IOMEM && OF 33 + depends on MDIO_BUS 33 34 select GENERIC_PHY 34 - select MDIO_DEVICE 35 35 help 36 36 Enable this to support Broadcom USB 3.0 PHY connected to the USB 37 37 controller on Northstar family.
+6 -22
drivers/vhost/vhost.c
··· 308 308 vq->avail = NULL; 309 309 vq->used = NULL; 310 310 vq->last_avail_idx = 0; 311 - vq->last_used_event = 0; 312 311 vq->avail_idx = 0; 313 312 vq->last_used_idx = 0; 314 313 vq->signalled_used = 0; ··· 1401 1402 r = -EINVAL; 1402 1403 break; 1403 1404 } 1404 - vq->last_avail_idx = vq->last_used_event = s.num; 1405 + vq->last_avail_idx = s.num; 1405 1406 /* Forget the cached index value. */ 1406 1407 vq->avail_idx = vq->last_avail_idx; 1407 1408 break; ··· 2240 2241 __u16 old, new; 2241 2242 __virtio16 event; 2242 2243 bool v; 2244 + /* Flush out used index updates. This is paired 2245 + * with the barrier that the Guest executes when enabling 2246 + * interrupts. */ 2247 + smp_mb(); 2243 2248 2244 2249 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && 2245 2250 unlikely(vq->avail_idx == vq->last_avail_idx)) ··· 2251 2248 2252 2249 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2253 2250 __virtio16 flags; 2254 - /* Flush out used index updates. This is paired 2255 - * with the barrier that the Guest executes when enabling 2256 - * interrupts. */ 2257 - smp_mb(); 2258 2251 if (vhost_get_avail(vq, flags, &vq->avail->flags)) { 2259 2252 vq_err(vq, "Failed to get flags"); 2260 2253 return true; ··· 2265 2266 if (unlikely(!v)) 2266 2267 return true; 2267 2268 2268 - /* We're sure if the following conditions are met, there's no 2269 - * need to notify guest: 2270 - * 1) cached used event is ahead of new 2271 - * 2) old to new updating does not cross cached used event. */ 2272 - if (vring_need_event(vq->last_used_event, new + vq->num, new) && 2273 - !vring_need_event(vq->last_used_event, new, old)) 2274 - return false; 2275 - 2276 - /* Flush out used index updates. This is paired 2277 - * with the barrier that the Guest executes when enabling 2278 - * interrupts. */ 2279 - smp_mb(); 2280 - 2281 2269 if (vhost_get_avail(vq, event, vhost_used_event(vq))) { 2282 2270 vq_err(vq, "Failed to get used event idx"); 2283 2271 return true; 2284 2272 } 2285 - vq->last_used_event = vhost16_to_cpu(vq, event); 2286 - 2287 - return vring_need_event(vq->last_used_event, new, old); 2273 + return vring_need_event(vhost16_to_cpu(vq, event), new, old); 2288 2274 } 2289 2275 2290 2276 /* This actually signals the guest, using eventfd. */
-3
drivers/vhost/vhost.h
··· 115 115 /* Last index we used. */ 116 116 u16 last_used_idx; 117 117 118 - /* Last used evet we've seen */ 119 - u16 last_used_event; 120 - 121 118 /* Used flags */ 122 119 u16 used_flags; 123 120
+6
include/linux/ipv6.h
··· 128 128 #define IP6SKB_FRAGMENTED 16 129 129 #define IP6SKB_HOPBYHOP 32 130 130 #define IP6SKB_L3SLAVE 64 131 + #define IP6SKB_JUMBOGRAM 128 131 132 }; 132 133 133 134 #if defined(CONFIG_NET_L3_MASTER_DEV) ··· 151 150 bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags); 152 151 153 152 return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; 153 + } 154 + 155 + static inline bool inet6_is_jumbogram(const struct sk_buff *skb) 156 + { 157 + return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM); 154 158 } 155 159 156 160 /* can not be used in TCP layer after tcp_v6_fill_cb */
+7 -3
include/linux/mlx5/mlx5_ifc.h
··· 7749 7749 }; 7750 7750 7751 7751 struct mlx5_ifc_mcam_enhanced_features_bits { 7752 - u8 reserved_at_0[0x7f]; 7752 + u8 reserved_at_0[0x7d]; 7753 7753 7754 + u8 mtpps_enh_out_per_adj[0x1]; 7755 + u8 mtpps_fs[0x1]; 7754 7756 u8 pcie_performance_group[0x1]; 7755 7757 }; 7756 7758 ··· 8161 8159 u8 reserved_at_78[0x4]; 8162 8160 u8 cap_pin_4_mode[0x4]; 8163 8161 8164 - u8 reserved_at_80[0x80]; 8162 + u8 field_select[0x20]; 8163 + u8 reserved_at_a0[0x60]; 8165 8164 8166 8165 u8 enable[0x1]; 8167 8166 u8 reserved_at_101[0xb]; ··· 8177 8174 8178 8175 u8 out_pulse_duration[0x10]; 8179 8176 u8 out_periodic_adjustment[0x10]; 8177 + u8 enhanced_out_periodic_adjustment[0x20]; 8180 8178 8181 - u8 reserved_at_1a0[0x60]; 8179 + u8 reserved_at_1c0[0x20]; 8182 8180 }; 8183 8181 8184 8182 struct mlx5_ifc_mtppse_reg_bits {
+1 -1
include/linux/phy.h
··· 830 830 dev_err(&_phydev->mdio.dev, format, ##args) 831 831 832 832 #define phydev_dbg(_phydev, format, args...) \ 833 - dev_dbg(&_phydev->mdio.dev, format, ##args); 833 + dev_dbg(&_phydev->mdio.dev, format, ##args) 834 834 835 835 static inline const char *phydev_name(const struct phy_device *phydev) 836 836 {
+2 -2
include/net/sctp/sctp.h
··· 469 469 470 470 #define _sctp_walk_params(pos, chunk, end, member)\ 471 471 for (pos.v = chunk->member;\ 472 - (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <\ 472 + (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\ 473 473 (void *)chunk + end) &&\ 474 474 pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ 475 475 ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\ ··· 481 481 #define _sctp_walk_errors(err, chunk_hdr, end)\ 482 482 for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ 483 483 sizeof(struct sctp_chunkhdr));\ 484 - ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <\ 484 + ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\ 485 485 (void *)chunk_hdr + end) &&\ 486 486 (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ 487 487 ntohs(err->length) >= sizeof(sctp_errhdr_t); \
+23 -11
include/net/udp.h
··· 260 260 } 261 261 262 262 void udp_v4_early_demux(struct sk_buff *skb); 263 + void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); 263 264 int udp_get_port(struct sock *sk, unsigned short snum, 264 265 int (*saddr_cmp)(const struct sock *, 265 266 const struct sock *)); ··· 306 305 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid 307 306 * possibly multiple cache miss on dequeue() 308 307 */ 309 - #if BITS_PER_LONG == 64 310 - 311 - /* truesize, len and the bit needed to compute skb_csum_unnecessary will be on 312 - * cold cache lines at recvmsg time. 313 - * skb->len can be stored on 16 bits since the udp header has been already 314 - * validated and pulled. 315 - */ 316 308 struct udp_dev_scratch { 317 - u32 truesize; 309 + /* skb->truesize and the stateless bit are embedded in a single field; 310 + * do not use a bitfield since the compiler emits better/smaller code 311 + * this way 312 + */ 313 + u32 _tsize_state; 314 + 315 + #if BITS_PER_LONG == 64 316 + /* len and the bit needed to compute skb_csum_unnecessary 317 + * will be on cold cache lines at recvmsg time. 318 + * skb->len can be stored on 16 bits since the udp header has been 319 + * already validated and pulled. 320 + */ 318 321 u16 len; 319 322 bool is_linear; 320 323 bool csum_unnecessary; 324 + #endif 321 325 }; 322 326 327 + static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb) 328 + { 329 + return (struct udp_dev_scratch *)&skb->dev_scratch; 330 + } 331 + 332 + #if BITS_PER_LONG == 64 323 333 static inline unsigned int udp_skb_len(struct sk_buff *skb) 324 334 { 325 - return ((struct udp_dev_scratch *)&skb->dev_scratch)->len; 335 + return udp_skb_scratch(skb)->len; 326 336 } 327 337 328 338 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb) 329 339 { 330 - return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary; 340 + return udp_skb_scratch(skb)->csum_unnecessary; 331 341 } 332 342 333 343 static inline bool udp_skb_is_linear(struct sk_buff *skb) 334 344 { 335 - return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear; 345 + return udp_skb_scratch(skb)->is_linear; 336 346 } 337 347 338 348 #else
+2 -2
kernel/bpf/syscall.c
··· 1289 1289 info_len = min_t(u32, sizeof(info), info_len); 1290 1290 1291 1291 if (copy_from_user(&info, uinfo, info_len)) 1292 - return err; 1292 + return -EFAULT; 1293 1293 1294 1294 info.type = prog->type; 1295 1295 info.id = prog->aux->id; ··· 1312 1312 } 1313 1313 1314 1314 ulen = info.xlated_prog_len; 1315 - info.xlated_prog_len = bpf_prog_size(prog->len); 1315 + info.xlated_prog_len = bpf_prog_insn_size(prog); 1316 1316 if (info.xlated_prog_len && ulen) { 1317 1317 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 1318 1318 ulen = min_t(u32, info.xlated_prog_len, ulen);
+15 -6
kernel/bpf/verifier.c
··· 1865 1865 * do our normal operations to the register, we need to set the values 1866 1866 * to the min/max since they are undefined. 1867 1867 */ 1868 - if (min_val == BPF_REGISTER_MIN_RANGE) 1869 - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1870 - if (max_val == BPF_REGISTER_MAX_RANGE) 1871 - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1868 + if (opcode != BPF_SUB) { 1869 + if (min_val == BPF_REGISTER_MIN_RANGE) 1870 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1871 + if (max_val == BPF_REGISTER_MAX_RANGE) 1872 + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1873 + } 1872 1874 1873 1875 switch (opcode) { 1874 1876 case BPF_ADD: ··· 1881 1879 dst_reg->min_align = min(src_align, dst_align); 1882 1880 break; 1883 1881 case BPF_SUB: 1882 + /* If one of our values was at the end of our ranges, then the 1883 + * _opposite_ value in the dst_reg goes to the end of our range. 1884 + */ 1885 + if (min_val == BPF_REGISTER_MIN_RANGE) 1886 + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1887 + if (max_val == BPF_REGISTER_MAX_RANGE) 1888 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1884 1889 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1885 - dst_reg->min_value -= min_val; 1890 + dst_reg->min_value -= max_val; 1886 1891 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1887 - dst_reg->max_value -= max_val; 1892 + dst_reg->max_value -= min_val; 1888 1893 dst_reg->min_align = min(src_align, dst_align); 1889 1894 break; 1890 1895 case BPF_MUL:
+35 -22
lib/test_rhashtable.c
··· 56 56 module_param(enomem_retry, bool, 0); 57 57 MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)"); 58 58 59 + struct test_obj_val { 60 + int id; 61 + int tid; 62 + }; 63 + 59 64 struct test_obj { 60 - int value; 65 + struct test_obj_val value; 61 66 struct rhash_head node; 62 67 }; 63 68 ··· 77 72 static struct rhashtable_params test_rht_params = { 78 73 .head_offset = offsetof(struct test_obj, node), 79 74 .key_offset = offsetof(struct test_obj, value), 80 - .key_len = sizeof(int), 75 + .key_len = sizeof(struct test_obj_val), 81 76 .hashfn = jhash, 82 77 .nulls_base = (3U << RHT_BASE_SHIFT), 83 78 }; ··· 114 109 for (i = 0; i < entries * 2; i++) { 115 110 struct test_obj *obj; 116 111 bool expected = !(i % 2); 117 - u32 key = i; 112 + struct test_obj_val key = { 113 + .id = i, 114 + }; 118 115 119 - if (array[i / 2].value == TEST_INSERT_FAIL) 116 + if (array[i / 2].value.id == TEST_INSERT_FAIL) 120 117 expected = false; 121 118 122 119 obj = rhashtable_lookup_fast(ht, &key, test_rht_params); 123 120 124 121 if (expected && !obj) { 125 - pr_warn("Test failed: Could not find key %u\n", key); 122 + pr_warn("Test failed: Could not find key %u\n", key.id); 126 123 return -ENOENT; 127 124 } else if (!expected && obj) { 128 125 pr_warn("Test failed: Unexpected entry found for key %u\n", 129 - key); 126 + key.id); 130 127 return -EEXIST; 131 128 } else if (expected && obj) { 132 - if (obj->value != i) { 129 + if (obj->value.id != i) { 133 130 pr_warn("Test failed: Lookup value mismatch %u!=%u\n", 134 - obj->value, i); 131 + obj->value.id, i); 135 132 return -EINVAL; 136 133 } 137 134 } ··· 202 195 for (i = 0; i < entries; i++) { 203 196 struct test_obj *obj = &array[i]; 204 197 205 - obj->value = i * 2; 198 + obj->value.id = i * 2; 206 199 err = insert_retry(ht, &obj->node, test_rht_params); 207 200 if (err > 0) 208 201 insert_retries += err; ··· 223 216 224 217 pr_info(" Deleting %d keys\n", entries); 225 218 for (i = 0; i < entries; i++) { 226 - u32 key = i * 2; 219 + struct test_obj_val key = { 220 + .id = i * 2, 221 + }; 227 222 228 - if (array[i].value != TEST_INSERT_FAIL) { 223 + if (array[i].value.id != TEST_INSERT_FAIL) { 229 224 obj = rhashtable_lookup_fast(ht, &key, test_rht_params); 230 225 BUG_ON(!obj); 231 226 ··· 251 242 252 243 for (i = 0; i < entries; i++) { 253 244 struct test_obj *obj; 254 - int key = (tdata->id << 16) | i; 245 + struct test_obj_val key = { 246 + .id = i, 247 + .tid = tdata->id, 248 + }; 255 249 256 250 obj = rhashtable_lookup_fast(&ht, &key, test_rht_params); 257 - if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) { 258 - pr_err(" found unexpected object %d\n", key); 251 + if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) { 252 + pr_err(" found unexpected object %d-%d\n", key.tid, key.id); 259 253 err++; 260 - } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) { 261 - pr_err(" object %d not found!\n", key); 254 + } else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) { 255 + pr_err(" object %d-%d not found!\n", key.tid, key.id); 262 256 err++; 263 - } else if (obj && (obj->value != key)) { 264 - pr_err(" wrong object returned (got %d, expected %d)\n", 265 - obj->value, key); 257 + } else if (obj && memcmp(&obj->value, &key, sizeof(key))) { 258 + pr_err(" wrong object returned (got %d-%d, expected %d-%d)\n", 259 + obj->value.tid, obj->value.id, key.tid, key.id); 266 260 err++; 267 261 } 268 262 ··· 284 272 pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); 285 273 286 274 for (i = 0; i < entries; i++) { 287 - tdata->objs[i].value = (tdata->id << 16) | i; 275 + tdata->objs[i].value.id = i; 276 + tdata->objs[i].value.tid = tdata->id; 288 277 err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params); 289 278 if (err > 0) { 290 279 insert_retries += err; ··· 308 295 309 296 for (step = 10; step > 0; step--) { 310 297 for (i = 0; i < entries; i += step) { 311 - if (tdata->objs[i].value == TEST_INSERT_FAIL) 298 + if (tdata->objs[i].value.id == TEST_INSERT_FAIL) 312 299 continue; 313 300 err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, 314 301 test_rht_params); ··· 317 304 tdata->id); 318 305 goto out; 319 306 } 320 - tdata->objs[i].value = TEST_INSERT_FAIL; 307 + tdata->objs[i].value.id = TEST_INSERT_FAIL; 321 308 322 309 cond_resched(); 323 310 }
+2
net/core/dev_ioctl.c
··· 263 263 return dev_set_mtu(dev, ifr->ifr_mtu); 264 264 265 265 case SIOCSIFHWADDR: 266 + if (dev->addr_len > sizeof(struct sockaddr)) 267 + return -EINVAL; 266 268 return dev_set_mac_address(dev, &ifr->ifr_hwaddr); 267 269 268 270 case SIOCSIFHWBROADCAST:
+1 -1
net/core/netpoll.c
··· 666 666 int err; 667 667 668 668 rtnl_lock(); 669 - if (np->dev_name) { 669 + if (np->dev_name[0]) { 670 670 struct net *net = current->nsproxy->net_ns; 671 671 ndev = __dev_get_by_name(net, np->dev_name); 672 672 }
+5 -2
net/dccp/feat.c
··· 1471 1471 * singleton values (which always leads to failure). 1472 1472 * These settings can still (later) be overridden via sockopts. 1473 1473 */ 1474 - if (ccid_get_builtin_ccids(&tx.val, &tx.len) || 1475 - ccid_get_builtin_ccids(&rx.val, &rx.len)) 1474 + if (ccid_get_builtin_ccids(&tx.val, &tx.len)) 1476 1475 return -ENOBUFS; 1476 + if (ccid_get_builtin_ccids(&rx.val, &rx.len)) { 1477 + kfree(tx.val); 1478 + return -ENOBUFS; 1479 + } 1477 1480 1478 1481 if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || 1479 1482 !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
+1
net/dccp/ipv4.c
··· 631 631 goto drop_and_free; 632 632 633 633 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 634 + reqsk_put(req); 634 635 return 0; 635 636 636 637 drop_and_free:
+1
net/dccp/ipv6.c
··· 380 380 goto drop_and_free; 381 381 382 382 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 383 + reqsk_put(req); 383 384 return 0; 384 385 385 386 drop_and_free:
+10 -9
net/dsa/dsa2.c
··· 509 509 dst->cpu_dp->netdev = ethernet_dev; 510 510 } 511 511 512 - tag_protocol = ds->ops->get_tag_protocol(ds); 513 - dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); 514 - if (IS_ERR(dst->tag_ops)) { 515 - dev_warn(ds->dev, "No tagger for this switch\n"); 516 - return PTR_ERR(dst->tag_ops); 517 - } 518 - 519 - dst->rcv = dst->tag_ops->rcv; 520 - 521 512 /* Initialize cpu_port_mask now for drv->setup() 522 513 * to have access to a correct value, just like what 523 514 * net/dsa/dsa.c::dsa_switch_setup_one does. 524 515 */ 525 516 ds->cpu_port_mask |= BIT(index); 517 + 518 + tag_protocol = ds->ops->get_tag_protocol(ds); 519 + dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); 520 + if (IS_ERR(dst->tag_ops)) { 521 + dev_warn(ds->dev, "No tagger for this switch\n"); 522 + ds->cpu_port_mask &= ~BIT(index); 523 + return PTR_ERR(dst->tag_ops); 524 + } 525 + 526 + dst->rcv = dst->tag_ops->rcv; 526 527 527 528 return 0; 528 529 }
+1 -1
net/ipv4/fib_semantics.c
··· 1452 1452 return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, 1453 1453 &info.info); 1454 1454 case FIB_EVENT_NH_DEL: 1455 - if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && 1455 + if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && 1456 1456 fib_nh->nh_flags & RTNH_F_LINKDOWN) || 1457 1457 (fib_nh->nh_flags & RTNH_F_DEAD)) 1458 1458 return call_fib_notifiers(dev_net(fib_nh->nh_dev),
+3 -2
net/ipv4/tcp_output.c
··· 2202 2202 static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) 2203 2203 { 2204 2204 const u32 now = tcp_jiffies32; 2205 + enum tcp_chrono old = tp->chrono_type; 2205 2206 2206 - if (tp->chrono_type > TCP_CHRONO_UNSPEC) 2207 - tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start; 2207 + if (old > TCP_CHRONO_UNSPEC) 2208 + tp->chrono_stat[old - 1] += now - tp->chrono_start; 2208 2209 tp->chrono_start = now; 2209 2210 tp->chrono_type = new; 2210 2211 }
+27 -28
net/ipv4/udp.c
··· 1163 1163 return ret; 1164 1164 } 1165 1165 1166 - #if BITS_PER_LONG == 64 1166 + #define UDP_SKB_IS_STATELESS 0x80000000 1167 + 1167 1168 static void udp_set_dev_scratch(struct sk_buff *skb) 1168 1169 { 1169 - struct udp_dev_scratch *scratch; 1170 + struct udp_dev_scratch *scratch = udp_skb_scratch(skb); 1170 1171 1171 1172 BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); 1172 - scratch = (struct udp_dev_scratch *)&skb->dev_scratch; 1173 - scratch->truesize = skb->truesize; 1173 + scratch->_tsize_state = skb->truesize; 1174 + #if BITS_PER_LONG == 64 1174 1175 scratch->len = skb->len; 1175 1176 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); 1176 1177 scratch->is_linear = !skb_is_nonlinear(skb); 1177 - } 1178 - 1179 - static int udp_skb_truesize(struct sk_buff *skb) 1180 - { 1181 - return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize; 1182 - } 1183 - #else 1184 - static void udp_set_dev_scratch(struct sk_buff *skb) 1185 - { 1186 - skb->dev_scratch = skb->truesize; 1187 - } 1188 - 1189 - static int udp_skb_truesize(struct sk_buff *skb) 1190 - { 1191 - return skb->dev_scratch; 1192 - } 1193 1178 #endif 1179 + if (likely(!skb->_skb_refdst)) 1180 + scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1181 + } 1182 + 1183 + static int udp_skb_truesize(struct sk_buff *skb) 1184 + { 1185 + return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; 1186 + } 1187 + 1188 + static bool udp_skb_has_head_state(struct sk_buff *skb) 1189 + { 1190 + return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); 1191 + } 1194 1192 1195 1193 /* fully reclaim rmem/fwd memory allocated for skb */ 1196 1194 static void udp_rmem_release(struct sock *sk, int size, int partial, ··· 1386 1388 unlock_sock_fast(sk, slow); 1387 1389 } 1388 1390 1389 - /* we cleared the head states previously only if the skb lacks any IP 1390 - * options, see __udp_queue_rcv_skb(). 1391 + /* In the more common cases we cleared the head states previously, 1392 + * see __udp_queue_rcv_skb(). 1391 1393 */ 1392 - if (unlikely(IPCB(skb)->opt.optlen > 0)) 1394 + if (unlikely(udp_skb_has_head_state(skb))) 1393 1395 skb_release_head_state(skb); 1394 1396 consume_stateless_skb(skb); 1395 1397 } ··· 1782 1784 sk_mark_napi_id_once(sk, skb); 1783 1785 } 1784 1786 1785 - /* At recvmsg() time we need skb->dst to process IP options-related 1786 - * cmsg, elsewhere can we clear all pending head states while they are 1787 - * hot in the cache 1787 + /* At recvmsg() time we may access skb->dst or skb->sp depending on 1788 + * the IP options and the cmsg flags, elsewhere can we clear all 1789 + * pending head states while they are hot in the cache 1788 1790 */ 1789 - if (likely(IPCB(skb)->opt.optlen == 0)) 1791 + if (likely(IPCB(skb)->opt.optlen == 0 && !skb_sec_path(skb))) 1790 1792 skb_release_head_state(skb); 1791 1793 1792 1794 rc = __udp_enqueue_schedule_skb(sk, skb); ··· 1928 1930 /* For TCP sockets, sk_rx_dst is protected by socket lock 1929 1931 * For UDP, we use xchg() to guard against concurrent changes. 1930 1932 */ 1931 - static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 1933 + void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 1932 1934 { 1933 1935 struct dst_entry *old; 1934 1936 ··· 1937 1939 dst_release(old); 1938 1940 } 1939 1941 } 1942 + EXPORT_SYMBOL(udp_sk_rx_dst_set); 1940 1943 1941 1944 /* 1942 1945 * Multicasts and broadcasts go to each listener.
+1
net/ipv6/exthdrs.c
··· 756 756 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) 757 757 goto drop; 758 758 759 + IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM; 759 760 return true; 760 761 761 762 drop:
-4
net/ipv6/ip6_output.c
··· 673 673 *prevhdr = NEXTHDR_FRAGMENT; 674 674 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); 675 675 if (!tmp_hdr) { 676 - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 677 - IPSTATS_MIB_FRAGFAILS); 678 676 err = -ENOMEM; 679 677 goto fail; 680 678 } ··· 787 789 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + 788 790 hroom + troom, GFP_ATOMIC); 789 791 if (!frag) { 790 - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 791 - IPSTATS_MIB_FRAGFAILS); 792 792 err = -ENOMEM; 793 793 goto fail; 794 794 }
+28 -10
net/ipv6/udp.c
··· 291 291 struct udp_table *udptable) 292 292 { 293 293 const struct ipv6hdr *iph = ipv6_hdr(skb); 294 - struct sock *sk; 295 294 296 - sk = skb_steal_sock(skb); 297 - if (unlikely(sk)) 298 - return sk; 299 295 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 300 296 &iph->daddr, dport, inet6_iif(skb), 301 297 udptable, skb); ··· 328 332 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 329 333 #endif 330 334 335 + /* do not use the scratch area len for jumbogram: their length execeeds the 336 + * scratch area space; note that the IP6CB flags is still in the first 337 + * cacheline, so checking for jumbograms is cheap 338 + */ 339 + static int udp6_skb_len(struct sk_buff *skb) 340 + { 341 + return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 342 + } 343 + 331 344 /* 332 345 * This should be easy, if there is something there we 333 346 * return it, otherwise we block. ··· 367 362 if (!skb) 368 363 return err; 369 364 370 - ulen = udp_skb_len(skb); 365 + ulen = udp6_skb_len(skb); 371 366 copied = len; 372 367 if (copied > ulen - off) 373 368 copied = ulen - off; ··· 809 804 if (udp6_csum_init(skb, uh, proto)) 810 805 goto csum_error; 811 806 807 + /* Check if the socket is already available, e.g. due to early demux */ 808 + sk = skb_steal_sock(skb); 809 + if (sk) { 810 + struct dst_entry *dst = skb_dst(skb); 811 + int ret; 812 + 813 + if (unlikely(sk->sk_rx_dst != dst)) 814 + udp_sk_rx_dst_set(sk, dst); 815 + 816 + ret = udpv6_queue_rcv_skb(sk, skb); 817 + sock_put(sk); 818 + 819 + /* a return value > 0 means to resubmit the input */ 820 + if (ret > 0) 821 + return ret; 822 + return 0; 823 + } 824 + 812 825 /* 813 826 * Multicast receive code 814 827 */ ··· 835 812 saddr, daddr, udptable, proto); 836 813 837 814 /* Unicast */ 838 - 839 - /* 840 - * check socket cache ... must talk to Alan about his plans 841 - * for sock caches... i'll skip this for now. 842 - */ 843 815 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 844 816 if (sk) { 845 817 int ret;
+5 -2
net/openvswitch/conntrack.c
··· 1310 1310 1311 1311 nla_for_each_nested(a, attr, rem) { 1312 1312 int type = nla_type(a); 1313 - int maxlen = ovs_ct_attr_lens[type].maxlen; 1314 - int minlen = ovs_ct_attr_lens[type].minlen; 1313 + int maxlen; 1314 + int minlen; 1315 1315 1316 1316 if (type > OVS_CT_ATTR_MAX) { 1317 1317 OVS_NLERR(log, ··· 1319 1319 type, OVS_CT_ATTR_MAX); 1320 1320 return -EINVAL; 1321 1321 } 1322 + 1323 + maxlen = ovs_ct_attr_lens[type].maxlen; 1324 + minlen = ovs_ct_attr_lens[type].minlen; 1322 1325 if (nla_len(a) < minlen || nla_len(a) > maxlen) { 1323 1326 OVS_NLERR(log, 1324 1327 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
+1 -1
net/packet/af_packet.c
··· 4329 4329 register_prot_hook(sk); 4330 4330 } 4331 4331 spin_unlock(&po->bind_lock); 4332 - if (closing && (po->tp_version > TPACKET_V2)) { 4332 + if (pg_vec && (po->tp_version > TPACKET_V2)) { 4333 4333 /* Because we don't support block-based V3 on tx-ring */ 4334 4334 if (!tx_ring) 4335 4335 prb_shutdown_retire_blk_timer(po, rb_queue);
+3 -2
net/socket.c
··· 1916 1916 if (copy_from_user(&msg, umsg, sizeof(*umsg))) 1917 1917 return -EFAULT; 1918 1918 1919 - kmsg->msg_control = msg.msg_control; 1919 + kmsg->msg_control = (void __force *)msg.msg_control; 1920 1920 kmsg->msg_controllen = msg.msg_controllen; 1921 1921 kmsg->msg_flags = msg.msg_flags; 1922 1922 ··· 1935 1935 1936 1936 if (msg.msg_name && kmsg->msg_namelen) { 1937 1937 if (!save_addr) { 1938 - err = move_addr_to_kernel(msg.msg_name, kmsg->msg_namelen, 1938 + err = move_addr_to_kernel(msg.msg_name, 1939 + kmsg->msg_namelen, 1939 1940 kmsg->msg_name); 1940 1941 if (err < 0) 1941 1942 return err;
+2 -2
samples/bpf/tcbpf2_kern.c
··· 147 147 __builtin_memset(&gopt, 0x0, sizeof(gopt)); 148 148 gopt.opt_class = 0x102; /* Open Virtual Networking (OVN) */ 149 149 gopt.type = 0x08; 150 - gopt.r1 = 1; 150 + gopt.r1 = 0; 151 151 gopt.r2 = 0; 152 - gopt.r3 = 1; 152 + gopt.r3 = 0; 153 153 gopt.length = 2; /* 4-byte multiple */ 154 154 *(int *) &gopt.opt_data = 0xdeadbeef; 155 155
+1
samples/bpf/test_tunnel_bpf.sh
··· 149 149 ip link del veth1 150 150 ip link del ipip11 151 151 ip link del gretap11 152 + ip link del vxlan11 152 153 ip link del geneve11 153 154 pkill tcpdump 154 155 pkill cat
-1
tools/lib/bpf/bpf.c
··· 314 314 int err; 315 315 316 316 bzero(&attr, sizeof(attr)); 317 - bzero(info, *info_len); 318 317 attr.info.bpf_fd = prog_fd; 319 318 attr.info.info_len = *info_len; 320 319 attr.info.info = ptr_to_u64(info);
+6 -2
tools/testing/selftests/bpf/test_progs.c
··· 340 340 341 341 /* Check getting prog info */ 342 342 info_len = sizeof(struct bpf_prog_info) * 2; 343 + bzero(&prog_infos[i], info_len); 343 344 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); 344 345 prog_infos[i].jited_prog_len = sizeof(jited_insns); 345 346 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); ··· 370 369 371 370 /* Check getting map info */ 372 371 info_len = sizeof(struct bpf_map_info) * 2; 372 + bzero(&map_infos[i], info_len); 373 373 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], 374 374 &info_len); 375 375 if (CHECK(err || ··· 396 394 nr_id_found = 0; 397 395 next_id = 0; 398 396 while (!bpf_prog_get_next_id(next_id, &next_id)) { 399 - struct bpf_prog_info prog_info; 397 + struct bpf_prog_info prog_info = {}; 400 398 int prog_fd; 401 399 402 400 info_len = sizeof(prog_info); ··· 420 418 nr_id_found++; 421 419 422 420 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); 421 + prog_infos[i].jited_prog_insns = 0; 422 + prog_infos[i].xlated_prog_insns = 0; 423 423 CHECK(err || info_len != sizeof(struct bpf_prog_info) || 424 424 memcmp(&prog_info, &prog_infos[i], info_len), 425 425 "get-prog-info(next_id->fd)", ··· 440 436 nr_id_found = 0; 441 437 next_id = 0; 442 438 while (!bpf_map_get_next_id(next_id, &next_id)) { 443 - struct bpf_map_info map_info; 439 + struct bpf_map_info map_info = {}; 444 440 int map_fd; 445 441 446 442 info_len = sizeof(map_info);
+28
tools/testing/selftests/bpf/test_verifier.c
··· 5980 5980 .result = REJECT, 5981 5981 .result_unpriv = REJECT, 5982 5982 }, 5983 + { 5984 + "subtraction bounds (map value)", 5985 + .insns = { 5986 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5987 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5988 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5989 + BPF_LD_MAP_FD(BPF_REG_1, 0), 5990 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5991 + BPF_FUNC_map_lookup_elem), 5992 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5993 + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 5994 + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), 5995 + BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), 5996 + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), 5997 + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), 5998 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), 5999 + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6000 + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 6001 + BPF_EXIT_INSN(), 6002 + BPF_MOV64_IMM(BPF_REG_0, 0), 6003 + BPF_EXIT_INSN(), 6004 + }, 6005 + .fixup_map1 = { 3 }, 6006 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 6007 + .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 6008 + .result = REJECT, 6009 + .result_unpriv = REJECT, 6010 + }, 5983 6011 }; 5984 6012 5985 6013 static int probe_filter_length(const struct bpf_insn *fp)