Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) More jumbo frame fixes in r8169, from Heiner Kallweit.

2) Fix bpf build in minimal configuration, from Alexei Starovoitov.

3) Use after free in slcan driver, from Jouni Hogander.

4) Flower classifier port ranges don't work properly in the HW offload
case, from Yoshiki Komachi.

5) Use after free in hns3_nic_maybe_stop_tx(), from Yunsheng Lin.

6) Out of bounds access in mqprio_dump(), from Vladyslav Tarasiuk.

7) Fix flow dissection in dsa TX path, from Alexander Lobakin.

8) Stale syncookie timestampe fixes from Guillaume Nault.

[ Did an evil merge to silence a warning introduced by this pull - Linus ]

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits)
r8169: fix rtl_hw_jumbo_disable for RTL8168evl
net_sched: validate TCA_KIND attribute in tc_chain_tmplt_add()
r8169: add missing RX enabling for WoL on RTL8125
vhost/vsock: accept only packets with the right dst_cid
net: phy: dp83867: fix hfs boot in rgmii mode
net: ethernet: ti: cpsw: fix extra rx interrupt
inet: protect against too small mtu values.
gre: refetch erspan header from skb->data after pskb_may_pull()
pppoe: remove redundant BUG_ON() check in pppoe_pernet
tcp: Protect accesses to .ts_recent_stamp with {READ,WRITE}_ONCE()
tcp: tighten acceptance of ACKs not matching a child socket
tcp: fix rejected syncookies due to stale timestamps
lpc_eth: kernel BUG on remove
tcp: md5: fix potential overestimation of TCP option space
net: sched: allow indirect blocks to bind to clsact in TC
net: core: rename indirect block ingress cb function
net-sysfs: Call dev_hold always in netdev_queue_add_kobject
net: dsa: fix flow dissection on Tx path
net/tls: Fix return values to avoid ENOTSUPP
net: avoid an indirect call in ____sys_recvmsg()
...

Changed files
+1026 -629
drivers
include
kernel
net
samples
scripts
tools
+17
MAINTAINERS
··· 10107 10107 S: Maintained 10108 10108 F: drivers/media/radio/radio-maxiradio* 10109 10109 10110 + MCAN MMIO DEVICE DRIVER 10111 + M: Sriram Dash <sriram.dash@samsung.com> 10112 + L: linux-can@vger.kernel.org 10113 + S: Maintained 10114 + F: Documentation/devicetree/bindings/net/can/m_can.txt 10115 + F: drivers/net/can/m_can/m_can.c 10116 + F: drivers/net/can/m_can/m_can.h 10117 + F: drivers/net/can/m_can/m_can_platform.c 10118 + 10110 10119 MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS 10111 10120 M: Peter Rosin <peda@axentia.se> 10112 10121 L: linux-iio@vger.kernel.org ··· 18147 18138 M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com> 18148 18139 S: Maintained 18149 18140 F: drivers/net/ethernet/xilinx/xilinx_axienet* 18141 + 18142 + XILINX CAN DRIVER 18143 + M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com> 18144 + R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com> 18145 + L: linux-can@vger.kernel.org 18146 + S: Maintained 18147 + F: Documentation/devicetree/bindings/net/can/xilinx_can.txt 18148 + F: drivers/net/can/xilinx_can.c 18150 18149 18151 18150 XILINX UARTLITE SERIAL DRIVER 18152 18151 M: Peter Korsgaard <jacmet@sunsite.dk>
+3 -4
drivers/infiniband/core/addr.c
··· 421 421 (const struct sockaddr_in6 *)dst_sock; 422 422 struct flowi6 fl6; 423 423 struct dst_entry *dst; 424 - int ret; 425 424 426 425 memset(&fl6, 0, sizeof fl6); 427 426 fl6.daddr = dst_in->sin6_addr; 428 427 fl6.saddr = src_in->sin6_addr; 429 428 fl6.flowi6_oif = addr->bound_dev_if; 430 429 431 - ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); 432 - if (ret < 0) 433 - return ret; 430 + dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); 431 + if (IS_ERR(dst)) 432 + return PTR_ERR(dst); 434 433 435 434 if (ipv6_addr_any(&src_in->sin6_addr)) 436 435 src_in->sin6_addr = fl6.saddr;
+5 -3
drivers/infiniband/sw/rxe/rxe_net.c
··· 117 117 memcpy(&fl6.daddr, daddr, sizeof(*daddr)); 118 118 fl6.flowi6_proto = IPPROTO_UDP; 119 119 120 - if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), 121 - recv_sockets.sk6->sk, &ndst, &fl6))) { 120 + ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), 121 + recv_sockets.sk6->sk, &fl6, 122 + NULL); 123 + if (unlikely(IS_ERR(ndst))) { 122 124 pr_err_ratelimited("no route to %pI6\n", daddr); 123 - goto put; 125 + return NULL; 124 126 } 125 127 126 128 if (unlikely(ndst->error)) {
+1
drivers/net/can/slcan.c
··· 617 617 sl->tty = NULL; 618 618 tty->disc_data = NULL; 619 619 clear_bit(SLF_INUSE, &sl->flags); 620 + slc_free_netdev(sl->dev); 620 621 free_netdev(sl->dev); 621 622 622 623 err_exit:
+1 -1
drivers/net/can/usb/ucan.c
··· 792 792 up); 793 793 794 794 usb_anchor_urb(urb, &up->rx_urbs); 795 - ret = usb_submit_urb(urb, GFP_KERNEL); 795 + ret = usb_submit_urb(urb, GFP_ATOMIC); 796 796 797 797 if (ret < 0) { 798 798 netdev_err(up->netdev,
+15 -13
drivers/net/can/xilinx_can.c
··· 542 542 543 543 /** 544 544 * xcan_write_frame - Write a frame to HW 545 - * @priv: Driver private data structure 545 + * @ndev: Pointer to net_device structure 546 546 * @skb: sk_buff pointer that contains data to be Txed 547 547 * @frame_offset: Register offset to write the frame to 548 548 */ 549 - static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, 549 + static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, 550 550 int frame_offset) 551 551 { 552 552 u32 id, dlc, data[2] = {0, 0}; 553 553 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 554 554 u32 ramoff, dwindex = 0, i; 555 + struct xcan_priv *priv = netdev_priv(ndev); 555 556 556 557 /* Watch carefully on the bit sequence */ 557 558 if (cf->can_id & CAN_EFF_FLAG) { ··· 587 586 dlc |= XCAN_DLCR_BRS_MASK; 588 587 dlc |= XCAN_DLCR_EDL_MASK; 589 588 } 589 + 590 + if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && 591 + (priv->devtype.flags & XCAN_FLAG_TXFEMP)) 592 + can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 593 + else 594 + can_put_echo_skb(skb, ndev, 0); 595 + 596 + priv->tx_head++; 590 597 591 598 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); 592 599 /* If the CAN frame is RTR frame this write triggers transmission ··· 647 638 XCAN_SR_TXFLL_MASK)) 648 639 return -ENOSPC; 649 640 650 - can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 651 - 652 641 spin_lock_irqsave(&priv->tx_lock, flags); 653 642 654 - priv->tx_head++; 655 - 656 - xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET); 643 + xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET); 657 644 658 645 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 659 646 if (priv->tx_max > 1) ··· 680 675 BIT(XCAN_TX_MAILBOX_IDX))) 681 676 return -ENOSPC; 682 677 683 - can_put_echo_skb(skb, ndev, 0); 684 - 685 678 spin_lock_irqsave(&priv->tx_lock, flags); 686 679 687 - priv->tx_head++; 688 - 689 - xcan_write_frame(priv, skb, 680 + xcan_write_frame(ndev, skb, 690 681 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); 691 682 692 683 /* Mark buffer as ready for transmit */ ··· 1773 1772 1774 1773 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1775 1774 if (IS_ERR(priv->bus_clk)) { 1776 - dev_err(&pdev->dev, "bus clock not found\n"); 1775 + if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER) 1776 + dev_err(&pdev->dev, "bus clock not found\n"); 1777 1777 ret = PTR_ERR(priv->bus_clk); 1778 1778 goto err_free; 1779 1779 }
+1 -1
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1115 1115 phy_interface_mode(lmac->lmac_type))) 1116 1116 return -ENODEV; 1117 1117 1118 - phy_start_aneg(lmac->phydev); 1118 + phy_start(lmac->phydev); 1119 1119 return 0; 1120 1120 } 1121 1121
+5
drivers/net/ethernet/freescale/enetc/enetc.c
··· 1332 1332 { 1333 1333 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1334 1334 struct phy_device *phydev; 1335 + struct ethtool_eee edata; 1335 1336 1336 1337 if (!priv->phy_node) 1337 1338 return 0; /* phy-less mode */ ··· 1345 1344 } 1346 1345 1347 1346 phy_attached_info(phydev); 1347 + 1348 + /* disable EEE autoneg, until ENETC driver supports it */ 1349 + memset(&edata, 0, sizeof(struct ethtool_eee)); 1350 + phy_ethtool_set_eee(phydev, &edata); 1348 1351 1349 1352 return 0; 1350 1353 }
+26 -24
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 1287 1287 } 1288 1288 1289 1289 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1290 - struct sk_buff **out_skb) 1290 + struct net_device *netdev, 1291 + struct sk_buff *skb) 1291 1292 { 1293 + struct hns3_nic_priv *priv = netdev_priv(netdev); 1292 1294 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1293 - struct sk_buff *skb = *out_skb; 1294 1295 unsigned int bd_num; 1295 1296 1296 1297 bd_num = hns3_tx_bd_num(skb, bd_size); 1297 1298 if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { 1298 - struct sk_buff *new_skb; 1299 - 1300 1299 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1301 1300 !hns3_skb_need_linearized(skb, bd_size, bd_num)) 1302 1301 goto out; 1303 1302 1304 - /* manual split the send packet */ 1305 - new_skb = skb_copy(skb, GFP_ATOMIC); 1306 - if (!new_skb) 1303 + if (__skb_linearize(skb)) 1307 1304 return -ENOMEM; 1308 - dev_kfree_skb_any(skb); 1309 - *out_skb = new_skb; 1310 1305 1311 - bd_num = hns3_tx_bd_count(new_skb->len); 1312 - if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || 1313 - (!skb_is_gso(new_skb) && 1306 + bd_num = hns3_tx_bd_count(skb->len); 1307 + if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || 1308 + (!skb_is_gso(skb) && 1314 1309 bd_num > HNS3_MAX_NON_TSO_BD_NUM)) 1315 1310 return -ENOMEM; 1316 1311 ··· 1315 1320 } 1316 1321 1317 1322 out: 1318 - if (unlikely(ring_space(ring) < bd_num)) 1319 - return -EBUSY; 1323 + if (likely(ring_space(ring) >= bd_num)) 1324 + return bd_num; 1320 1325 1321 - return bd_num; 1326 + netif_stop_subqueue(netdev, ring->queue_index); 1327 + smp_mb(); /* Memory barrier before checking ring_space */ 1328 + 1329 + /* Start queue in case hns3_clean_tx_ring has just made room 1330 + * available and has not seen the queue stopped state performed 1331 + * by netif_stop_subqueue above. 1332 + */ 1333 + if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1334 + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1335 + netif_start_subqueue(netdev, ring->queue_index); 1336 + return bd_num; 1337 + } 1338 + 1339 + return -EBUSY; 1322 1340 } 1323 1341 1324 1342 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) ··· 1408 1400 /* Prefetch the data used later */ 1409 1401 prefetch(skb->data); 1410 1402 1411 - ret = hns3_nic_maybe_stop_tx(ring, &skb); 1403 + ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 1412 1404 if (unlikely(ret <= 0)) { 1413 1405 if (ret == -EBUSY) { 1414 1406 u64_stats_update_begin(&ring->syncp); 1415 1407 ring->stats.tx_busy++; 1416 1408 u64_stats_update_end(&ring->syncp); 1417 - goto out_net_tx_busy; 1409 + return NETDEV_TX_BUSY; 1418 1410 } else if (ret == -ENOMEM) { 1419 1411 u64_stats_update_begin(&ring->syncp); 1420 1412 ring->stats.sw_err_cnt++; ··· 1465 1457 out_err_tx_ok: 1466 1458 dev_kfree_skb_any(skb); 1467 1459 return NETDEV_TX_OK; 1468 - 1469 - out_net_tx_busy: 1470 - netif_stop_subqueue(netdev, ring->queue_index); 1471 - smp_mb(); /* Commit all data before submit */ 1472 - 1473 - return NETDEV_TX_BUSY; 1474 1460 } 1475 1461 1476 1462 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) ··· 2521 2519 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2522 2520 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2523 2521 2524 - if (unlikely(pkts && netif_carrier_ok(netdev) && 2522 + if (unlikely(netif_carrier_ok(netdev) && 2525 2523 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 2526 2524 /* Make sure that anybody stopping the queue after this 2527 2525 * sees the new next_to_clean.
+6 -12
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 8438 8438 if (hdev->pdev->revision == 0x20) 8439 8439 return -EOPNOTSUPP; 8440 8440 8441 + vport = hclge_get_vf_vport(hdev, vfid); 8442 + if (!vport) 8443 + return -EINVAL; 8444 + 8441 8445 /* qos is a 3 bits value, so can not be bigger than 7 */ 8442 - if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) 8446 + if (vlan > VLAN_N_VID - 1 || qos > 7) 8443 8447 return -EINVAL; 8444 8448 if (proto != htons(ETH_P_8021Q)) 8445 8449 return -EPROTONOSUPPORT; 8446 8450 8447 - vport = &hdev->vport[vfid]; 8448 8451 state = hclge_get_port_base_vlan_state(vport, 8449 8452 vport->port_base_vlan_cfg.state, 8450 8453 vlan); ··· 8458 8455 vlan_info.qos = qos; 8459 8456 vlan_info.vlan_proto = ntohs(proto); 8460 8457 8461 - /* update port based VLAN for PF */ 8462 - if (!vfid) { 8463 - hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 8464 - ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); 8465 - hclge_notify_client(hdev, HNAE3_UP_CLIENT); 8466 - 8467 - return ret; 8468 - } 8469 - 8470 8458 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { 8471 8459 return hclge_update_port_base_vlan_cfg(vport, state, 8472 8460 &vlan_info); 8473 8461 } else { 8474 8462 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], 8475 - (u8)vfid, state, 8463 + vport->vport_id, state, 8476 8464 vlan, qos, 8477 8465 ntohs(proto)); 8478 8466 return ret;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 816 816 struct mlx5e_priv { 817 817 /* priv data path fields - start */ 818 818 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; 819 - int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; 819 + int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; 820 820 #ifdef CONFIG_MLX5_CORE_EN_DCB 821 821 struct mlx5e_dcbx_dp dcbx_dp; 822 822 #endif
+1
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
··· 73 73 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000, 74 74 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000, 75 75 [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, 76 + [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, 76 77 [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, 77 78 [MLX5E_400GAUI_8] = 400000, 78 79 };
+25 -2
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
··· 155 155 } 156 156 157 157 if (port_buffer->buffer[i].size < 158 - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) 158 + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { 159 + pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", 160 + i, port_buffer->buffer[i].size); 159 161 return -ENOMEM; 162 + } 160 163 161 164 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; 162 165 port_buffer->buffer[i].xon = ··· 235 232 return 0; 236 233 } 237 234 235 + static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en) 236 + { 237 + u32 g_rx_pause, g_tx_pause; 238 + int err; 239 + 240 + err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause); 241 + if (err) 242 + return err; 243 + 244 + /* If global pause enabled, set all active buffers to lossless. 245 + * Otherwise, check PFC setting. 246 + */ 247 + if (g_rx_pause || g_tx_pause) 248 + *pfc_en = 0xff; 249 + else 250 + err = mlx5_query_port_pfc(mdev, pfc_en, NULL); 251 + 252 + return err; 253 + } 254 + 238 255 #define MINIMUM_MAX_MTU 9216 239 256 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, 240 257 u32 change, unsigned int mtu, ··· 300 277 301 278 if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { 302 279 update_prio2buffer = true; 303 - err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL); 280 + err = fill_pfc_en(priv->mdev, &curr_pfc_en); 304 281 if (err) 305 282 return err; 306 283
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 297 297 298 298 int ret; 299 299 300 - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, 301 - fl6); 302 - if (ret < 0) 303 - return ret; 300 + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, 301 + NULL); 302 + if (IS_ERR(dst)) 303 + return PTR_ERR(dst); 304 304 305 305 if (!(*out_ttl)) 306 306 *out_ttl = ip6_dst_hoplimit(dst); ··· 329 329 struct net_device *out_dev, *route_dev; 330 330 struct flowi6 fl6 = {}; 331 331 struct ipv6hdr *ip6h; 332 - struct neighbour *n; 332 + struct neighbour *n = NULL; 333 333 int ipv6_encap_size; 334 334 char *encap_header; 335 335 u8 nud_state, ttl;
+4 -11
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1027 1027 return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); 1028 1028 } 1029 1029 1030 - static bool ext_speed_requested(u32 speed) 1031 - { 1032 - #define MLX5E_MAX_PTYS_LEGACY_SPEED 100000 1033 - return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED); 1034 - } 1035 - 1036 - static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed) 1030 + static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported) 1037 1031 { 1038 1032 bool ext_link_mode = ext_link_mode_requested(adver); 1039 - bool ext_speed = ext_speed_requested(speed); 1040 1033 1041 - return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed; 1034 + return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported; 1042 1035 } 1043 1036 1044 1037 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, ··· 1058 1065 autoneg = link_ksettings->base.autoneg; 1059 1066 speed = link_ksettings->base.speed; 1060 1067 1061 - ext = ext_requested(autoneg, adver, speed), 1062 1068 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 1069 + ext = ext_requested(autoneg, adver, ext_supported); 1063 1070 if (!ext_supported && ext) 1064 1071 return -EOPNOTSUPP; 1065 1072 ··· 1636 1643 break; 1637 1644 case MLX5_MODULE_ID_SFP: 1638 1645 modinfo->type = ETH_MODULE_SFF_8472; 1639 - modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; 1646 + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1640 1647 break; 1641 1648 default: 1642 1649 netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+12 -19
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1691 1691 struct mlx5e_params *params, 1692 1692 struct mlx5e_channel_param *cparam) 1693 1693 { 1694 - struct mlx5e_priv *priv = c->priv; 1695 1694 int err, tc; 1696 1695 1697 1696 for (tc = 0; tc < params->num_tc; tc++) { 1698 - int txq_ix = c->ix + tc * priv->max_nch; 1697 + int txq_ix = c->ix + tc * params->num_channels; 1699 1698 1700 1699 err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, 1701 1700 params, &cparam->sq, &c->sq[tc], tc); ··· 2875 2876 netdev_set_tc_queue(netdev, tc, nch, 0); 2876 2877 } 2877 2878 2878 - static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) 2879 + static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) 2879 2880 { 2880 - int i, tc; 2881 + int i, ch; 2881 2882 2882 - for (i = 0; i < priv->max_nch; i++) 2883 - for (tc = 0; tc < priv->profile->max_tc; tc++) 2884 - priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch; 2885 - } 2883 + ch = priv->channels.num; 2886 2884 2887 - static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) 2888 - { 2889 - struct mlx5e_channel *c; 2890 - struct mlx5e_txqsq *sq; 2891 - int i, tc; 2885 + for (i = 0; i < ch; i++) { 2886 + int tc; 2892 2887 2893 - for (i = 0; i < priv->channels.num; i++) { 2894 - c = priv->channels.c[i]; 2895 - for (tc = 0; tc < c->num_tc; tc++) { 2896 - sq = &c->sq[tc]; 2888 + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { 2889 + struct mlx5e_channel *c = priv->channels.c[i]; 2890 + struct mlx5e_txqsq *sq = &c->sq[tc]; 2891 + 2897 2892 priv->txq2sq[sq->txq_ix] = sq; 2893 + priv->channel_tc2realtxq[i][tc] = i + tc * ch; 2898 2894 } 2899 2895 } 2900 2896 } ··· 2904 2910 netif_set_real_num_tx_queues(netdev, num_txqs); 2905 2911 netif_set_real_num_rx_queues(netdev, num_rxqs); 2906 2912 2907 - mlx5e_build_tx2sq_maps(priv); 2913 + mlx5e_build_txq_maps(priv); 2908 2914 mlx5e_activate_channels(&priv->channels); 2909 2915 mlx5e_xdp_tx_enable(priv); 2910 2916 netif_tx_start_all_queues(priv->netdev); ··· 5015 5021 if (err) 5016 5022 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); 5017 5023 mlx5e_build_nic_netdev(netdev); 5018 - mlx5e_build_tc2txq_maps(priv); 5019 5024 mlx5e_health_create_reporters(priv); 5020 5025 5021 5026 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 1601 1601 for (j = 0; j < NUM_SQ_STATS; j++) 1602 1602 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1603 1603 sq_stats_desc[j].format, 1604 - priv->channel_tc2txq[i][tc]); 1604 + i + tc * max_nch); 1605 1605 1606 1606 for (i = 0; i < max_nch; i++) { 1607 1607 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
+5 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1626 1626 1627 1627 flow_flag_clear(flow, DUP); 1628 1628 1629 - mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); 1630 - kvfree(flow->peer_flow); 1629 + if (refcount_dec_and_test(&flow->peer_flow->refcnt)) { 1630 + mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); 1631 + kfree(flow->peer_flow); 1632 + } 1633 + 1631 1634 flow->peer_flow = NULL; 1632 1635 } 1633 1636
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 93 93 if (txq_ix >= num_channels) 94 94 txq_ix = priv->txq2sq[txq_ix]->ch_ix; 95 95 96 - return priv->channel_tc2txq[txq_ix][up]; 96 + return priv->channel_tc2realtxq[txq_ix][up]; 97 97 } 98 98 99 99 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
+8 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 81 81 struct mlx5_fc *drop_counter; 82 82 } legacy; 83 83 struct { 84 - struct mlx5_flow_group *metadata_grp; 84 + /* Optional group to add an FTE to do internal priority 85 + * tagging on ingress packets. 86 + */ 87 + struct mlx5_flow_group *metadata_prio_tag_grp; 88 + /* Group to add default match-all FTE entry to tag ingress 89 + * packet with metadata. 90 + */ 91 + struct mlx5_flow_group *metadata_allmatch_grp; 85 92 struct mlx5_modify_hdr *modify_metadata; 86 93 struct mlx5_flow_handle *modify_metadata_rule; 87 94 } offloads;
+86 -38
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 88 88 return 1; 89 89 } 90 90 91 + static bool 92 + esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, 93 + const struct mlx5_vport *vport) 94 + { 95 + return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && 96 + mlx5_eswitch_is_vf_vport(esw, vport->vport)); 97 + } 98 + 91 99 static void 92 100 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, 93 101 struct mlx5_flow_spec *spec, ··· 1768 1760 * required, allow 1769 1761 * Unmatched traffic is allowed by default 1770 1762 */ 1771 - 1772 1763 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1773 - if (!spec) { 1774 - err = -ENOMEM; 1775 - goto out_no_mem; 1776 - } 1764 + if (!spec) 1765 + return -ENOMEM; 1777 1766 1778 1767 /* Untagged packets - push prio tag VLAN, allow */ 1779 1768 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); ··· 1796 1791 "vport[%d] configure ingress untagged allow rule, err(%d)\n", 1797 1792 vport->vport, err); 1798 1793 vport->ingress.allow_rule = NULL; 1799 - goto out; 1800 1794 } 1801 1795 1802 - out: 1803 1796 kvfree(spec); 1804 - out_no_mem: 1805 - if (err) 1806 - esw_vport_cleanup_ingress_rules(esw, vport); 1807 1797 return err; 1808 1798 } 1809 1799 ··· 1836 1836 esw_warn(esw->dev, 1837 1837 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", 1838 1838 vport->vport, err); 1839 - vport->ingress.offloads.modify_metadata_rule = NULL; 1840 - goto out; 1841 - } 1842 - 1843 - out: 1844 - if (err) 1845 1839 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); 1840 + vport->ingress.offloads.modify_metadata_rule = NULL; 1841 + } 1846 1842 return err; 1847 1843 } 1848 1844 ··· 1858 1862 { 1859 1863 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1860 1864 struct mlx5_flow_group *g; 1865 + void *match_criteria; 1861 1866 u32 *flow_group_in; 1867 + u32 flow_index = 0; 1862 1868 int ret = 0; 1863 1869 1864 1870 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1865 1871 if (!flow_group_in) 1866 1872 return -ENOMEM; 1867 1873 1868 - memset(flow_group_in, 0, inlen); 1869 - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1870 - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 1874 + if (esw_check_ingress_prio_tag_enabled(esw, vport)) { 1875 + /* This group is to hold FTE to match untagged packets when prio_tag 1876 + * is enabled. 1877 + */ 1878 + memset(flow_group_in, 0, inlen); 1871 1879 1872 - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1873 - if (IS_ERR(g)) { 1874 - ret = PTR_ERR(g); 1875 - esw_warn(esw->dev, 1876 - "Failed to create vport[%d] ingress metadata group, err(%d)\n", 1877 - vport->vport, ret); 1878 - goto grp_err; 1880 + match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1881 + flow_group_in, match_criteria); 1882 + MLX5_SET(create_flow_group_in, flow_group_in, 1883 + match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1884 + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); 1885 + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); 1886 + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); 1887 + 1888 + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1889 + if (IS_ERR(g)) { 1890 + ret = PTR_ERR(g); 1891 + esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n", 1892 + vport->vport, ret); 1893 + goto prio_tag_err; 1894 + } 1895 + vport->ingress.offloads.metadata_prio_tag_grp = g; 1896 + flow_index++; 1879 1897 } 1880 - vport->ingress.offloads.metadata_grp = g; 1881 - grp_err: 1898 + 1899 + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1900 + /* This group holds an FTE with no matches for add metadata for 1901 + * tagged packets, if prio-tag is enabled (as a fallthrough), 1902 + * or all traffic in case prio-tag is disabled. 1903 + */ 1904 + memset(flow_group_in, 0, inlen); 1905 + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); 1906 + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); 1907 + 1908 + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1909 + if (IS_ERR(g)) { 1910 + ret = PTR_ERR(g); 1911 + esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", 1912 + vport->vport, ret); 1913 + goto metadata_err; 1914 + } 1915 + vport->ingress.offloads.metadata_allmatch_grp = g; 1916 + } 1917 + 1918 + kvfree(flow_group_in); 1919 + return 0; 1920 + 1921 + metadata_err: 1922 + if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) { 1923 + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); 1924 + vport->ingress.offloads.metadata_prio_tag_grp = NULL; 1925 + } 1926 + prio_tag_err: 1882 1927 kvfree(flow_group_in); 1883 1928 return ret; 1884 1929 } 1885 1930 1886 1931 static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) 1887 1932 { 1888 - if (vport->ingress.offloads.metadata_grp) { 1889 - mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); 1890 - vport->ingress.offloads.metadata_grp = NULL; 1933 + if (vport->ingress.offloads.metadata_allmatch_grp) { 1934 + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp); 1935 + vport->ingress.offloads.metadata_allmatch_grp = NULL; 1936 + } 1937 + 1938 + if (vport->ingress.offloads.metadata_prio_tag_grp) { 1939 + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); 1940 + vport->ingress.offloads.metadata_prio_tag_grp = NULL; 1891 1941 } 1892 1942 } 1893 1943 1894 1944 static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 1895 1945 struct mlx5_vport *vport) 1896 1946 { 1947 + int num_ftes = 0; 1897 1948 int err; 1898 1949 1899 1950 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && 1900 - !MLX5_CAP_GEN(esw->dev, prio_tag_required)) 1951 + !esw_check_ingress_prio_tag_enabled(esw, vport)) 1901 1952 return 0; 1902 1953 1903 1954 esw_vport_cleanup_ingress_rules(esw, vport); 1904 - err = esw_vport_create_ingress_acl_table(esw, vport, 1); 1955 + 1956 + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) 1957 + num_ftes++; 1958 + if (esw_check_ingress_prio_tag_enabled(esw, vport)) 1959 + num_ftes++; 1960 + 1961 + err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes); 1905 1962 if (err) { 1906 1963 esw_warn(esw->dev, 1907 1964 "failed to enable ingress acl (%d) on vport[%d]\n", ··· 1975 1926 goto metadata_err; 1976 1927 } 1977 1928 1978 - if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && 1979 - mlx5_eswitch_is_vf_vport(esw, vport->vport)) { 1929 + if (esw_check_ingress_prio_tag_enabled(esw, vport)) { 1980 1930 err = esw_vport_ingress_prio_tag_config(esw, vport); 1981 1931 if (err) 1982 1932 goto prio_tag_err; ··· 1985 1937 prio_tag_err: 1986 1938 esw_vport_del_ingress_acl_modify_metadata(esw, vport); 1987 1939 metadata_err: 1988 - esw_vport_cleanup_ingress_rules(esw, vport); 1989 1940 esw_vport_destroy_ingress_acl_group(vport); 1990 1941 group_err: 1991 1942 esw_vport_destroy_ingress_acl_table(vport); ··· 2055 2008 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { 2056 2009 err = esw_vport_egress_config(esw, vport); 2057 2010 if (err) { 2058 - esw_vport_del_ingress_acl_modify_metadata(esw, vport); 2059 2011 esw_vport_cleanup_ingress_rules(esw, vport); 2012 + esw_vport_del_ingress_acl_modify_metadata(esw, vport); 2013 + esw_vport_destroy_ingress_acl_group(vport); 2060 2014 esw_vport_destroy_ingress_acl_table(vport); 2061 2015 } 2062 2016 } ··· 2069 2021 struct mlx5_vport *vport) 2070 2022 { 2071 2023 esw_vport_disable_egress_acl(esw, vport); 2072 - esw_vport_del_ingress_acl_modify_metadata(esw, vport); 2073 2024 esw_vport_cleanup_ingress_rules(esw, vport); 2025 + esw_vport_del_ingress_acl_modify_metadata(esw, vport); 2074 2026 esw_vport_destroy_ingress_acl_group(vport); 2075 2027 esw_vport_destroy_ingress_acl_table(vport); 2076 2028 }
+10 -4
drivers/net/ethernet/mscc/ocelot.c
··· 2149 2149 2150 2150 static int ocelot_init_timestamp(struct ocelot *ocelot) 2151 2151 { 2152 + struct ptp_clock *ptp_clock; 2153 + 2152 2154 ocelot->ptp_info = ocelot_ptp_clock_info; 2153 - ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); 2154 - if (IS_ERR(ocelot->ptp_clock)) 2155 - return PTR_ERR(ocelot->ptp_clock); 2155 + ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); 2156 + if (IS_ERR(ptp_clock)) 2157 + return PTR_ERR(ptp_clock); 2156 2158 /* Check if PHC support is missing at the configuration level */ 2157 - if (!ocelot->ptp_clock) 2159 + if (!ptp_clock) 2158 2160 return 0; 2161 + 2162 + ocelot->ptp_clock = ptp_clock; 2159 2163 2160 2164 ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG); 2161 2165 ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW); ··· 2493 2489 destroy_workqueue(ocelot->stats_queue); 2494 2490 mutex_destroy(&ocelot->stats_lock); 2495 2491 ocelot_ace_deinit(); 2492 + if (ocelot->ptp_clock) 2493 + ptp_clock_unregister(ocelot->ptp_clock); 2496 2494 2497 2495 for (i = 0; i < ocelot->num_phys_ports; i++) { 2498 2496 port = ocelot->ports[i];
-2
drivers/net/ethernet/nxp/lpc_eth.c
··· 817 817 pldat->mii_bus->priv = pldat; 818 818 pldat->mii_bus->parent = &pldat->pdev->dev; 819 819 820 - platform_set_drvdata(pldat->pdev, pldat->mii_bus); 821 - 822 820 node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio"); 823 821 err = of_mdiobus_register(pldat->mii_bus, node); 824 822 of_node_put(node);
+10 -6
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 1381 1381 1382 1382 static int ionic_lif_rss_init(struct ionic_lif *lif) 1383 1383 { 1384 - u8 rss_key[IONIC_RSS_HASH_KEY_SIZE]; 1385 1384 unsigned int tbl_sz; 1386 1385 unsigned int i; 1387 - 1388 - netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE); 1389 1386 1390 1387 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1391 1388 IONIC_RSS_TYPE_IPV4_TCP | ··· 1396 1399 for (i = 0; i < tbl_sz; i++) 1397 1400 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1398 1401 1399 - return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL); 1402 + return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1400 1403 } 1401 1404 1402 - static int ionic_lif_rss_deinit(struct ionic_lif *lif) 1405 + static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1403 1406 { 1404 - return ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1407 + int tbl_sz; 1408 + 1409 + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1410 + memset(lif->rss_ind_tbl, 0, tbl_sz); 1411 + memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1412 + 1413 + ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1405 1414 } 1406 1415 1407 1416 static void ionic_txrx_disable(struct ionic_lif *lif) ··· 1732 1729 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 1733 1730 goto err_out_free_qcqs; 1734 1731 } 1732 + netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 1735 1733 1736 1734 list_add_tail(&lif->list, &ionic->lifs); 1737 1735
+2 -2
drivers/net/ethernet/realtek/r8169_main.c
··· 3695 3695 case RTL_GIGA_MAC_VER_32: 3696 3696 case RTL_GIGA_MAC_VER_33: 3697 3697 case RTL_GIGA_MAC_VER_34: 3698 - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_52: 3698 + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61: 3699 3699 RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | 3700 3700 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3701 3701 break; ··· 3896 3896 case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: 3897 3897 r8168dp_hw_jumbo_disable(tp); 3898 3898 break; 3899 - case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34: 3899 + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: 3900 3900 r8168e_hw_jumbo_disable(tp); 3901 3901 break; 3902 3902 default:
+2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2009 2009 tx_q->cur_tx = 0; 2010 2010 tx_q->mss = 0; 2011 2011 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 2012 + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2013 + tx_q->dma_tx_phy, chan); 2012 2014 stmmac_start_tx_dma(priv, chan); 2013 2015 2014 2016 priv->dev->stats.tx_errors++;
+1 -1
drivers/net/ethernet/ti/Kconfig
··· 62 62 config TI_CPSW_SWITCHDEV 63 63 tristate "TI CPSW Switch Support with switchdev" 64 64 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST 65 - select NET_SWITCHDEV 65 + depends on NET_SWITCHDEV 66 66 select TI_DAVINCI_MDIO 67 67 select MFD_SYSCON 68 68 select REGMAP
+1 -1
drivers/net/ethernet/ti/cpsw_priv.c
··· 100 100 { 101 101 struct cpsw_common *cpsw = dev_id; 102 102 103 - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); 104 103 writel(0, &cpsw->wr_regs->rx_en); 104 + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); 105 105 106 106 if (cpsw->quirk_irq) { 107 107 disable_irq_nosync(cpsw->irqs_table[0]);
+3 -1
drivers/net/geneve.c
··· 853 853 if (dst) 854 854 return dst; 855 855 } 856 - if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { 856 + dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, 857 + NULL); 858 + if (IS_ERR(dst)) { 857 859 netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); 858 860 return ERR_PTR(-ENETUNREACH); 859 861 }
+71 -48
drivers/net/phy/dp83867.c
··· 101 101 /* RGMIIDCTL bits */ 102 102 #define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf 103 103 #define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 104 + #define DP83867_RGMII_TX_CLK_DELAY_INV (DP83867_RGMII_TX_CLK_DELAY_MAX + 1) 104 105 #define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf 105 106 #define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0 107 + #define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1) 108 + 106 109 107 110 /* IO_MUX_CFG bits */ 108 111 #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f ··· 297 294 return 0; 298 295 } 299 296 297 + static int dp83867_verify_rgmii_cfg(struct phy_device *phydev) 298 + { 299 + struct dp83867_private *dp83867 = phydev->priv; 300 + 301 + /* Existing behavior was to use default pin strapping delay in rgmii 302 + * mode, but rgmii should have meant no delay. Warn existing users. 303 + */ 304 + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 305 + const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR, 306 + DP83867_STRAP_STS2); 307 + const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >> 308 + DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT; 309 + const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >> 310 + DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT; 311 + 312 + if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE || 313 + rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE) 314 + phydev_warn(phydev, 315 + "PHY has delays via pin strapping, but phy-mode = 'rgmii'\n" 316 + "Should be 'rgmii-id' to use internal delays txskew:%x rxskew:%x\n", 317 + txskew, rxskew); 318 + } 319 + 320 + /* RX delay *must* be specified if internal delay of RX is used. */ 321 + if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 322 + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) && 323 + dp83867->rx_id_delay == DP83867_RGMII_RX_CLK_DELAY_INV) { 324 + phydev_err(phydev, "ti,rx-internal-delay must be specified\n"); 325 + return -EINVAL; 326 + } 327 + 328 + /* TX delay *must* be specified if internal delay of TX is used. */ 329 + if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 330 + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) && 331 + dp83867->tx_id_delay == DP83867_RGMII_TX_CLK_DELAY_INV) { 332 + phydev_err(phydev, "ti,tx-internal-delay must be specified\n"); 333 + return -EINVAL; 334 + } 335 + 336 + return 0; 337 + } 338 + 300 339 #ifdef CONFIG_OF_MDIO 301 340 static int dp83867_of_init(struct phy_device *phydev) 302 341 { ··· 380 335 dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node, 381 336 "ti,sgmii-ref-clock-output-enable"); 382 337 383 - /* Existing behavior was to use default pin strapping delay in rgmii 384 - * mode, but rgmii should have meant no delay. Warn existing users. 385 - */ 386 - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 387 - const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2); 388 - const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >> 389 - DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT; 390 - const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >> 391 - DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT; 392 338 393 - if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE || 394 - rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE) 395 - phydev_warn(phydev, 396 - "PHY has delays via pin strapping, but phy-mode = 'rgmii'\n" 397 - "Should be 'rgmii-id' to use internal delays\n"); 339 + dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV; 340 + ret = of_property_read_u32(of_node, "ti,rx-internal-delay", 341 + &dp83867->rx_id_delay); 342 + if (!ret && dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) { 343 + phydev_err(phydev, 344 + "ti,rx-internal-delay value of %u out of range\n", 345 + dp83867->rx_id_delay); 346 + return -EINVAL; 398 347 } 399 348 400 - /* RX delay *must* be specified if internal delay of RX is used. */ 401 - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 402 - phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { 403 - ret = of_property_read_u32(of_node, "ti,rx-internal-delay", 404 - &dp83867->rx_id_delay); 405 - if (ret) { 406 - phydev_err(phydev, "ti,rx-internal-delay must be specified\n"); 407 - return ret; 408 - } 409 - if (dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) { 410 - phydev_err(phydev, 411 - "ti,rx-internal-delay value of %u out of range\n", 412 - dp83867->rx_id_delay); 413 - return -EINVAL; 414 - } 415 - } 416 - 417 - /* TX delay *must* be specified if internal delay of RX is used. */ 418 - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 419 - phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { 420 - ret = of_property_read_u32(of_node, "ti,tx-internal-delay", 421 - &dp83867->tx_id_delay); 422 - if (ret) { 423 - phydev_err(phydev, "ti,tx-internal-delay must be specified\n"); 424 - return ret; 425 - } 426 - if (dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) { 427 - phydev_err(phydev, 428 - "ti,tx-internal-delay value of %u out of range\n", 429 - dp83867->tx_id_delay); 430 - return -EINVAL; 431 - } 349 + dp83867->tx_id_delay = DP83867_RGMII_TX_CLK_DELAY_INV; 350 + ret = of_property_read_u32(of_node, "ti,tx-internal-delay", 351 + &dp83867->tx_id_delay); 352 + if (!ret && dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) { 353 + phydev_err(phydev, 354 + "ti,tx-internal-delay value of %u out of range\n", 355 + dp83867->tx_id_delay); 356 + return -EINVAL; 432 357 } 433 358 434 359 if (of_property_read_bool(of_node, "enet-phy-lane-swap")) ··· 448 433 struct dp83867_private *dp83867 = phydev->priv; 449 434 int ret, val, bs; 450 435 u16 delay; 436 + 437 + ret = dp83867_verify_rgmii_cfg(phydev); 438 + if (ret) 439 + return ret; 451 440 452 441 /* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */ 453 442 if (dp83867->rxctrl_strap_quirk) ··· 504 485 505 486 phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val); 506 487 507 - delay = (dp83867->rx_id_delay | 508 - (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT)); 488 + delay = 0; 489 + if (dp83867->rx_id_delay != DP83867_RGMII_RX_CLK_DELAY_INV) 490 + delay |= dp83867->rx_id_delay; 491 + if (dp83867->tx_id_delay != DP83867_RGMII_TX_CLK_DELAY_INV) 492 + delay |= dp83867->tx_id_delay << 493 + DP83867_RGMII_TX_CLK_DELAY_SHIFT; 509 494 510 495 phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL, 511 496 delay);
+1
drivers/net/phy/mdio-thunder.c
··· 129 129 mdiobus_free(bus->mii_bus); 130 130 oct_mdio_writeq(0, bus->register_base + SMI_EN); 131 131 } 132 + pci_release_regions(pdev); 132 133 pci_set_drvdata(pdev, NULL); 133 134 } 134 135
+8 -9
drivers/net/phy/sfp.c
··· 1754 1754 break; 1755 1755 } 1756 1756 1757 + err = sfp_hwmon_insert(sfp); 1758 + if (err) 1759 + dev_warn(sfp->dev, "hwmon probe failed: %d\n", err); 1760 + 1757 1761 sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0); 1758 1762 /* fall through */ 1759 1763 case SFP_MOD_WAITDEV: ··· 1807 1803 case SFP_MOD_ERROR: 1808 1804 break; 1809 1805 } 1810 - 1811 - #if IS_ENABLED(CONFIG_HWMON) 1812 - if (sfp->sm_mod_state >= SFP_MOD_WAITDEV && 1813 - IS_ERR_OR_NULL(sfp->hwmon_dev)) { 1814 - err = sfp_hwmon_insert(sfp); 1815 - if (err) 1816 - dev_warn(sfp->dev, "hwmon probe failed: %d\n", err); 1817 - } 1818 - #endif 1819 1806 } 1820 1807 1821 1808 static void sfp_sm_main(struct sfp *sfp, unsigned int event) ··· 2288 2293 struct sfp *sfp = platform_get_drvdata(pdev); 2289 2294 2290 2295 sfp_unregister_socket(sfp->sfp_bus); 2296 + 2297 + rtnl_lock(); 2298 + sfp_sm_event(sfp, SFP_E_REMOVE); 2299 + rtnl_unlock(); 2291 2300 2292 2301 return 0; 2293 2302 }
+3 -2
drivers/net/ppp/ppp_generic.c
··· 564 564 return NULL; 565 565 566 566 /* uprog->len is unsigned short, so no overflow here */ 567 - fprog.len = uprog->len * sizeof(struct sock_filter); 568 - fprog.filter = memdup_user(uprog->filter, fprog.len); 567 + fprog.len = uprog->len; 568 + fprog.filter = memdup_user(uprog->filter, 569 + uprog->len * sizeof(struct sock_filter)); 569 570 if (IS_ERR(fprog.filter)) 570 571 return ERR_CAST(fprog.filter); 571 572
-2
drivers/net/ppp/pppoe.c
··· 119 119 120 120 static inline struct pppoe_net *pppoe_pernet(struct net *net) 121 121 { 122 - BUG_ON(!net); 123 - 124 122 return net_generic(net, pppoe_net_id); 125 123 } 126 124
+3 -5
drivers/net/vxlan.c
··· 2275 2275 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 2276 2276 struct dst_entry *ndst; 2277 2277 struct flowi6 fl6; 2278 - int err; 2279 2278 2280 2279 if (!sock6) 2281 2280 return ERR_PTR(-EIO); ··· 2297 2298 fl6.fl6_dport = dport; 2298 2299 fl6.fl6_sport = sport; 2299 2300 2300 - err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 2301 - sock6->sock->sk, 2302 - &ndst, &fl6); 2303 - if (unlikely(err < 0)) { 2301 + ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, 2302 + &fl6, NULL); 2303 + if (unlikely(IS_ERR(ndst))) { 2304 2304 netdev_dbg(dev, "no route to %pI6\n", daddr); 2305 2305 return ERR_PTR(-ENETUNREACH); 2306 2306 }
+4
drivers/s390/net/qeth_core.h
··· 480 480 481 481 u64 rx_dropped_nomem; 482 482 u64 rx_dropped_notsupp; 483 + u64 rx_dropped_runt; 483 484 484 485 /* rtnl_link_stats64 */ 485 486 u64 rx_packets; ··· 628 627 629 628 struct qeth_channel { 630 629 struct ccw_device *ccwdev; 630 + struct qeth_cmd_buffer *active_cmd; 631 631 enum qeth_channel_states state; 632 632 atomic_t irq_pending; 633 633 }; ··· 1039 1037 void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); 1040 1038 void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); 1041 1039 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok); 1040 + int qeth_stop_channel(struct qeth_channel *channel); 1041 + 1042 1042 void qeth_print_status_message(struct qeth_card *); 1043 1043 int qeth_init_qdio_queues(struct qeth_card *); 1044 1044 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
+102 -58
drivers/s390/net/qeth_core_main.c
··· 515 515 516 516 QETH_CARD_TEXT(card, 6, "noirqpnd"); 517 517 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 518 - if (rc) { 518 + if (!rc) { 519 + channel->active_cmd = iob; 520 + } else { 519 521 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 520 522 rc, CARD_DEVID(card)); 521 523 atomic_set(&channel->irq_pending, 0); ··· 988 986 QETH_CARD_TEXT(card, 5, "data"); 989 987 } 990 988 991 - if (qeth_intparm_is_iob(intparm)) 992 - iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); 989 + if (intparm == 0) { 990 + QETH_CARD_TEXT(card, 5, "irqunsol"); 991 + } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 992 + QETH_CARD_TEXT(card, 5, "irqunexp"); 993 + 994 + dev_err(&cdev->dev, 995 + "Received IRQ with intparm %lx, expected %px\n", 996 + intparm, channel->active_cmd); 997 + if (channel->active_cmd) 998 + qeth_cancel_cmd(channel->active_cmd, -EIO); 999 + } else { 1000 + iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1001 + } 1002 + 1003 + channel->active_cmd = NULL; 993 1004 994 1005 rc = qeth_check_irb_error(card, cdev, irb); 995 1006 if (rc) { ··· 1022 1007 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) 1023 1008 channel->state = CH_STATE_HALTED; 1024 1009 1025 - if (intparm == QETH_CLEAR_CHANNEL_PARM) { 1026 - QETH_CARD_TEXT(card, 6, "clrchpar"); 1027 - /* we don't have to handle this further */ 1028 - intparm = 0; 1029 - } 1030 - if (intparm == QETH_HALT_CHANNEL_PARM) { 1031 - QETH_CARD_TEXT(card, 6, "hltchpar"); 1032 - /* we don't have to handle this further */ 1033 - intparm = 0; 1010 + if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1011 + SCSW_FCTL_HALT_FUNC))) { 1012 + qeth_cancel_cmd(iob, -ECANCELED); 1013 + iob = NULL; 1034 1014 } 1035 1015 1036 1016 cstat = irb->scsw.cmd.cstat; ··· 1418 1408 1419 1409 QETH_CARD_TEXT(card, 3, "clearch"); 1420 1410 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1421 - rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); 1411 + rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1422 1412 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1423 1413 1424 1414 if (rc) ··· 1440 1430 1441 1431 QETH_CARD_TEXT(card, 3, "haltch"); 1442 1432 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1443 - rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); 1433 + rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1444 1434 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1445 1435 1446 1436 if (rc) ··· 1453 1443 return -ETIME; 1454 1444 return 0; 1455 1445 } 1446 + 1447 + int qeth_stop_channel(struct qeth_channel *channel) 1448 + { 1449 + struct ccw_device *cdev = channel->ccwdev; 1450 + int rc; 1451 + 1452 + rc = ccw_device_set_offline(cdev); 1453 + 1454 + spin_lock_irq(get_ccwdev_lock(cdev)); 1455 + if (channel->active_cmd) { 1456 + dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1457 + channel->active_cmd); 1458 + channel->active_cmd = NULL; 1459 + } 1460 + spin_unlock_irq(get_ccwdev_lock(cdev)); 1461 + 1462 + return rc; 1463 + } 1464 + EXPORT_SYMBOL_GPL(qeth_stop_channel); 1456 1465 1457 1466 static int qeth_halt_channels(struct qeth_card *card) 1458 1467 { ··· 1775 1746 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1776 1747 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 1777 1748 (addr_t) iob, 0, 0, timeout); 1749 + if (!rc) 1750 + channel->active_cmd = iob; 1778 1751 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1779 1752 if (rc) { 1780 1753 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", ··· 4698 4667 4699 4668 static void qeth_determine_capabilities(struct qeth_card *card) 4700 4669 { 4670 + struct qeth_channel *channel = &card->data; 4671 + struct ccw_device *ddev = channel->ccwdev; 4701 4672 int rc; 4702 - struct ccw_device *ddev; 4703 4673 int ddev_offline = 0; 4704 4674 4705 4675 QETH_CARD_TEXT(card, 2, "detcapab"); 4706 - ddev = CARD_DDEV(card); 4707 4676 if (!ddev->online) { 4708 4677 ddev_offline = 1; 4709 4678 rc = ccw_device_set_online(ddev); ··· 4742 4711 4743 4712 out_offline: 4744 4713 if (ddev_offline == 1) 4745 - ccw_device_set_offline(ddev); 4714 + qeth_stop_channel(channel); 4746 4715 out: 4747 4716 return; 4748 4717 } ··· 4942 4911 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 4943 4912 CARD_DEVID(card)); 4944 4913 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 4945 - ccw_device_set_offline(CARD_DDEV(card)); 4946 - ccw_device_set_offline(CARD_WDEV(card)); 4947 - ccw_device_set_offline(CARD_RDEV(card)); 4914 + qeth_stop_channel(&card->data); 4915 + qeth_stop_channel(&card->write); 4916 + qeth_stop_channel(&card->read); 4948 4917 qdio_free(CARD_DDEV(card)); 4949 4918 rc = ccw_device_set_online(CARD_RDEV(card)); 4950 4919 if (rc) ··· 5059 5028 } 5060 5029 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 5061 5030 5062 - static void qeth_create_skb_frag(struct qdio_buffer_element *element, 5063 - struct sk_buff *skb, int offset, int data_len) 5031 + static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5064 5032 { 5065 - struct page *page = virt_to_page(element->addr); 5033 + struct page *page = virt_to_page(data); 5066 5034 unsigned int next_frag; 5067 - 5068 - /* first fill the linear space */ 5069 - if (!skb->len) { 5070 - unsigned int linear = min(data_len, skb_tailroom(skb)); 5071 - 5072 - skb_put_data(skb, element->addr + offset, linear); 5073 - data_len -= linear; 5074 - if (!data_len) 5075 - return; 5076 - offset += linear; 5077 - /* fall through to add page frag for remaining data */ 5078 - } 5079 5035 5080 5036 next_frag = skb_shinfo(skb)->nr_frags; 5081 5037 get_page(page); 5082 - skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len); 5038 + skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5039 + data_len); 5083 5040 } 5084 5041 5085 5042 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) ··· 5082 5063 { 5083 5064 struct qdio_buffer_element *element = *__element; 5084 5065 struct qdio_buffer *buffer = qethbuffer->buffer; 5066 + unsigned int linear_len = 0; 5085 5067 int offset = *__offset; 5086 5068 bool use_rx_sg = false; 5087 5069 unsigned int headroom; 5088 5070 struct sk_buff *skb; 5089 5071 int skb_len = 0; 5090 - void *data_ptr; 5091 - int data_len; 5092 5072 5093 5073 next_packet: 5094 5074 /* qeth_hdr must not cross element boundaries */ ··· 5100 5082 *hdr = element->addr + offset; 5101 5083 5102 5084 offset += sizeof(struct qeth_hdr); 5085 + skb = NULL; 5086 + 5103 5087 switch ((*hdr)->hdr.l2.id) { 5104 5088 case QETH_HEADER_TYPE_LAYER2: 5105 5089 skb_len = (*hdr)->hdr.l2.pkt_length; 5090 + linear_len = ETH_HLEN; 5106 5091 headroom = 0; 5107 5092 break; 5108 5093 case QETH_HEADER_TYPE_LAYER3: 5109 5094 skb_len = (*hdr)->hdr.l3.length; 5110 5095 if (!IS_LAYER3(card)) { 5111 5096 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5112 - skb = NULL; 5113 5097 goto walk_packet; 5114 5098 } 5115 5099 5100 + if ((*hdr)->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5101 + linear_len = ETH_HLEN; 5102 + headroom = 0; 5103 + break; 5104 + } 5105 + 5106 + if ((*hdr)->hdr.l3.flags & QETH_HDR_IPV6) 5107 + linear_len = sizeof(struct ipv6hdr); 5108 + else 5109 + linear_len = sizeof(struct iphdr); 5116 5110 headroom = ETH_HLEN; 5117 5111 break; 5118 5112 case QETH_HEADER_TYPE_OSN: 5119 5113 skb_len = (*hdr)->hdr.osn.pdu_length; 5120 5114 if (!IS_OSN(card)) { 5121 5115 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5122 - skb = NULL; 5123 5116 goto walk_packet; 5124 5117 } 5125 5118 5119 + linear_len = skb_len; 5126 5120 headroom = sizeof(struct qeth_hdr); 5127 5121 break; 5128 5122 default: ··· 5147 5117 return NULL; 5148 5118 } 5149 5119 5150 - if (!skb_len) 5151 - return NULL; 5120 + if (skb_len < linear_len) { 5121 + QETH_CARD_STAT_INC(card, rx_dropped_runt); 5122 + goto walk_packet; 5123 + } 5152 5124 5153 5125 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5154 5126 ((skb_len >= card->options.rx_sg_cb) && ··· 5162 5130 skb = qethbuffer->rx_skb; 5163 5131 qethbuffer->rx_skb = NULL; 5164 5132 } else { 5165 - unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len; 5166 - 5167 - skb = napi_alloc_skb(&card->napi, linear + headroom); 5133 + if (!use_rx_sg) 5134 + linear_len = skb_len; 5135 + skb = napi_alloc_skb(&card->napi, linear_len + headroom); 5168 5136 } 5169 5137 5170 5138 if (!skb) ··· 5173 5141 skb_reserve(skb, headroom); 5174 5142 5175 5143 walk_packet: 5176 - data_ptr = element->addr + offset; 5177 5144 while (skb_len) { 5178 - data_len = min(skb_len, (int)(element->length - offset)); 5145 + int data_len = min(skb_len, (int)(element->length - offset)); 5146 + char *data = element->addr + offset; 5179 5147 5180 - if (skb && data_len) { 5181 - if (use_rx_sg) 5182 - qeth_create_skb_frag(element, skb, offset, 5183 - data_len); 5184 - else 5185 - skb_put_data(skb, data_ptr, data_len); 5186 - } 5187 5148 skb_len -= data_len; 5149 + offset += data_len; 5150 + 5151 + /* Extract data from current element: */ 5152 + if (skb && data_len) { 5153 + if (linear_len) { 5154 + unsigned int copy_len; 5155 + 5156 + copy_len = min_t(unsigned int, linear_len, 5157 + data_len); 5158 + 5159 + skb_put_data(skb, data, copy_len); 5160 + linear_len -= copy_len; 5161 + data_len -= copy_len; 5162 + data += copy_len; 5163 + } 5164 + 5165 + if (data_len) 5166 + qeth_create_skb_frag(skb, data, data_len); 5167 + } 5168 + 5169 + /* Step forward to next element: */ 5188 5170 if (skb_len) { 5189 5171 if (qeth_is_last_sbale(element)) { 5190 5172 QETH_CARD_TEXT(card, 4, "unexeob"); ··· 5212 5166 } 5213 5167 element++; 5214 5168 offset = 0; 5215 - data_ptr = element->addr; 5216 - } else { 5217 - offset += data_len; 5218 5169 } 5219 5170 } 5220 5171 ··· 6311 6268 card->stats.rx_frame_errors + 6312 6269 card->stats.rx_fifo_errors; 6313 6270 stats->rx_dropped = card->stats.rx_dropped_nomem + 6314 - card->stats.rx_dropped_notsupp; 6271 + card->stats.rx_dropped_notsupp + 6272 + card->stats.rx_dropped_runt; 6315 6273 stats->multicast = card->stats.rx_multicast; 6316 6274 stats->rx_length_errors = card->stats.rx_length_errors; 6317 6275 stats->rx_frame_errors = card->stats.rx_frame_errors;
-14
drivers/s390/net/qeth_core_mpc.h
··· 29 29 #define QETH_TIMEOUT (10 * HZ) 30 30 #define QETH_IPA_TIMEOUT (45 * HZ) 31 31 32 - #define QETH_CLEAR_CHANNEL_PARM -10 33 - #define QETH_HALT_CHANNEL_PARM -11 34 - 35 - static inline bool qeth_intparm_is_iob(unsigned long intparm) 36 - { 37 - switch (intparm) { 38 - case QETH_CLEAR_CHANNEL_PARM: 39 - case QETH_HALT_CHANNEL_PARM: 40 - case 0: 41 - return false; 42 - } 43 - return true; 44 - } 45 - 46 32 /*****************************************************************************/ 47 33 /* IP Assist related definitions */ 48 34 /*****************************************************************************/
+1
drivers/s390/net/qeth_ethtool.c
··· 51 51 QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page), 52 52 QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem), 53 53 QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp), 54 + QETH_CARD_STAT("rx0 dropped, runt", rx_dropped_runt), 54 55 }; 55 56 56 57 #define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
+6 -6
drivers/s390/net/qeth_l2_main.c
··· 845 845 846 846 out_remove: 847 847 qeth_l2_stop_card(card); 848 - ccw_device_set_offline(CARD_DDEV(card)); 849 - ccw_device_set_offline(CARD_WDEV(card)); 850 - ccw_device_set_offline(CARD_RDEV(card)); 848 + qeth_stop_channel(&card->data); 849 + qeth_stop_channel(&card->write); 850 + qeth_stop_channel(&card->read); 851 851 qdio_free(CARD_DDEV(card)); 852 852 853 853 mutex_unlock(&card->conf_mutex); ··· 878 878 rtnl_unlock(); 879 879 880 880 qeth_l2_stop_card(card); 881 - rc = ccw_device_set_offline(CARD_DDEV(card)); 882 - rc2 = ccw_device_set_offline(CARD_WDEV(card)); 883 - rc3 = ccw_device_set_offline(CARD_RDEV(card)); 881 + rc = qeth_stop_channel(&card->data); 882 + rc2 = qeth_stop_channel(&card->write); 883 + rc3 = qeth_stop_channel(&card->read); 884 884 if (!rc) 885 885 rc = (rc2) ? rc2 : rc3; 886 886 if (rc)
+7 -6
drivers/s390/net/qeth_l3_main.c
··· 2259 2259 return 0; 2260 2260 out_remove: 2261 2261 qeth_l3_stop_card(card); 2262 - ccw_device_set_offline(CARD_DDEV(card)); 2263 - ccw_device_set_offline(CARD_WDEV(card)); 2264 - ccw_device_set_offline(CARD_RDEV(card)); 2262 + qeth_stop_channel(&card->data); 2263 + qeth_stop_channel(&card->write); 2264 + qeth_stop_channel(&card->read); 2265 2265 qdio_free(CARD_DDEV(card)); 2266 2266 2267 2267 mutex_unlock(&card->conf_mutex); ··· 2297 2297 call_netdevice_notifiers(NETDEV_REBOOT, card->dev); 2298 2298 rtnl_unlock(); 2299 2299 } 2300 - rc = ccw_device_set_offline(CARD_DDEV(card)); 2301 - rc2 = ccw_device_set_offline(CARD_WDEV(card)); 2302 - rc3 = ccw_device_set_offline(CARD_RDEV(card)); 2300 + 2301 + rc = qeth_stop_channel(&card->data); 2302 + rc2 = qeth_stop_channel(&card->write); 2303 + rc3 = qeth_stop_channel(&card->read); 2303 2304 if (!rc) 2304 2305 rc = (rc2) ? rc2 : rc3; 2305 2306 if (rc)
+3 -1
drivers/vhost/vsock.c
··· 480 480 virtio_transport_deliver_tap_pkt(pkt); 481 481 482 482 /* Only accept correctly addressed packets */ 483 - if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) 483 + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && 484 + le64_to_cpu(pkt->hdr.dst_cid) == 485 + vhost_transport_get_local_cid()) 484 486 virtio_transport_recv_pkt(&vhost_transport, pkt); 485 487 else 486 488 virtio_transport_free_pkt(pkt);
+6 -2
include/linux/filter.h
··· 776 776 777 777 static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 778 778 { 779 - set_vm_flush_reset_perms(fp); 780 - set_memory_ro((unsigned long)fp, fp->pages); 779 + #ifndef CONFIG_BPF_JIT_ALWAYS_ON 780 + if (!fp->jited) { 781 + set_vm_flush_reset_perms(fp); 782 + set_memory_ro((unsigned long)fp, fp->pages); 783 + } 784 + #endif 781 785 } 782 786 783 787 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+5
include/linux/netdevice.h
··· 1881 1881 unsigned char if_port; 1882 1882 unsigned char dma; 1883 1883 1884 + /* Note : dev->mtu is often read without holding a lock. 1885 + * Writers usually hold RTNL. 1886 + * It is recommended to use READ_ONCE() to annotate the reads, 1887 + * and to use WRITE_ONCE() to annotate the writes. 1888 + */ 1884 1889 unsigned int mtu; 1885 1890 unsigned int min_mtu; 1886 1891 unsigned int max_mtu;
+3 -2
include/linux/skbuff.h
··· 3529 3529 int skb_vlan_pop(struct sk_buff *skb); 3530 3530 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); 3531 3531 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 3532 - int mac_len); 3533 - int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len); 3532 + int mac_len, bool ethernet); 3533 + int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 3534 + bool ethernet); 3534 3535 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); 3535 3536 int skb_mpls_dec_ttl(struct sk_buff *skb); 3536 3537 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
+13
include/linux/time.h
··· 97 97 */ 98 98 #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0) 99 99 #define time_before32(b, a) time_after32(a, b) 100 + 101 + /** 102 + * time_between32 - check if a 32-bit timestamp is within a given time range 103 + * @t: the time which may be within [l,h] 104 + * @l: the lower bound of the range 105 + * @h: the higher bound of the range 106 + * 107 + * time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are 108 + * treated as 32-bit integers. 109 + * 110 + * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)). 111 + */ 112 + #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) 100 113 #endif
+1
include/net/flow_dissector.h
··· 235 235 FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ 236 236 FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ 237 237 FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ 238 + FLOW_DISSECTOR_KEY_PORTS_RANGE, /* struct flow_dissector_key_ports */ 238 239 FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */ 239 240 FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ 240 241 FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
+7 -8
include/net/flow_offload.h
··· 380 380 typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, 381 381 enum tc_setup_type type, void *type_data); 382 382 383 - typedef void flow_indr_block_ing_cmd_t(struct net_device *dev, 384 - flow_indr_block_bind_cb_t *cb, 385 - void *cb_priv, 386 - enum flow_block_command command); 383 + typedef void flow_indr_block_cmd_t(struct net_device *dev, 384 + flow_indr_block_bind_cb_t *cb, void *cb_priv, 385 + enum flow_block_command command); 387 386 388 - struct flow_indr_block_ing_entry { 389 - flow_indr_block_ing_cmd_t *cb; 387 + struct flow_indr_block_entry { 388 + flow_indr_block_cmd_t *cb; 390 389 struct list_head list; 391 390 }; 392 391 393 - void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry); 392 + void flow_indr_add_block_cb(struct flow_indr_block_entry *entry); 394 393 395 - void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry); 394 + void flow_indr_del_block_cb(struct flow_indr_block_entry *entry); 396 395 397 396 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, 398 397 flow_indr_block_bind_cb_t *cb,
+5
include/net/ip.h
··· 760 760 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family, 761 761 struct netlink_ext_ack *extack); 762 762 763 + static inline bool inetdev_valid_mtu(unsigned int mtu) 764 + { 765 + return likely(mtu >= IPV4_MIN_MTU); 766 + } 767 + 763 768 #endif /* _IP_H */
+1 -1
include/net/ipv6.h
··· 1022 1022 1023 1023 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, 1024 1024 struct flowi6 *fl6); 1025 - struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, 1025 + struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, 1026 1026 const struct in6_addr *final_dst); 1027 1027 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, 1028 1028 const struct in6_addr *final_dst,
+4 -2
include/net/ipv6_stubs.h
··· 24 24 const struct in6_addr *addr); 25 25 int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, 26 26 const struct in6_addr *addr); 27 - int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, 28 - struct dst_entry **dst, struct flowi6 *fl6); 27 + struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, 28 + const struct sock *sk, 29 + struct flowi6 *fl6, 30 + const struct in6_addr *final_dst); 29 31 int (*ipv6_route_input)(struct sk_buff *skb); 30 32 31 33 struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
+19 -8
include/net/tcp.h
··· 494 494 reuse = rcu_dereference(sk->sk_reuseport_cb); 495 495 if (likely(reuse)) { 496 496 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 497 - if (time_after32(now, last_overflow + HZ)) 497 + if (!time_between32(now, last_overflow, 498 + last_overflow + HZ)) 498 499 WRITE_ONCE(reuse->synq_overflow_ts, now); 499 500 return; 500 501 } 501 502 } 502 503 503 - last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 504 - if (time_after32(now, last_overflow + HZ)) 505 - tcp_sk(sk)->rx_opt.ts_recent_stamp = now; 504 + last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); 505 + if (!time_between32(now, last_overflow, last_overflow + HZ)) 506 + WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now); 506 507 } 507 508 508 509 /* syncookies: no recent synqueue overflow on this listening socket? */ ··· 518 517 reuse = rcu_dereference(sk->sk_reuseport_cb); 519 518 if (likely(reuse)) { 520 519 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 521 - return time_after32(now, last_overflow + 522 - TCP_SYNCOOKIE_VALID); 520 + return !time_between32(now, last_overflow - HZ, 521 + last_overflow + 522 + TCP_SYNCOOKIE_VALID); 523 523 } 524 524 } 525 525 526 - last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 527 - return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); 526 + last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); 527 + 528 + /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID, 529 + * then we're under synflood. However, we have to use 530 + * 'last_overflow - HZ' as lower bound. That's because a concurrent 531 + * tcp_synq_overflow() could update .ts_recent_stamp after we read 532 + * jiffies but before we store .ts_recent_stamp into last_overflow, 533 + * which could lead to rejecting a valid syncookie. 534 + */ 535 + return !time_between32(now, last_overflow - HZ, 536 + last_overflow + TCP_SYNCOOKIE_VALID); 528 537 } 529 538 530 539 static inline u32 tcp_cookie_time(void)
+4 -1
kernel/bpf/btf.c
··· 3463 3463 __ctx_convert##_id, 3464 3464 #include <linux/bpf_types.h> 3465 3465 #undef BPF_PROG_TYPE 3466 + __ctx_convert_unused, /* to avoid empty enum in extreme .config */ 3466 3467 }; 3467 3468 static u8 bpf_ctx_convert_map[] = { 3468 3469 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ ··· 3977 3976 t = btf_type_by_id(btf, btf_id); 3978 3977 while (t && btf_type_is_modifier(t)) 3979 3978 t = btf_type_by_id(btf, t->type); 3980 - if (!t) 3979 + if (!t) { 3980 + *bad_type = btf->types[0]; 3981 3981 return -EINVAL; 3982 + } 3982 3983 if (btf_type_is_ptr(t)) 3983 3984 /* kernel size of pointer. Not BPF's size of pointer*/ 3984 3985 return sizeof(void *);
+4 -1
kernel/bpf/verifier.c
··· 9636 9636 ret = -EINVAL; 9637 9637 goto out; 9638 9638 } 9639 - addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 9639 + if (subprog == 0) 9640 + addr = (long) tgt_prog->bpf_func; 9641 + else 9642 + addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 9640 9643 } else { 9641 9644 addr = kallsyms_lookup_name(tname); 9642 9645 if (!addr) {
+6
net/bridge/br_device.c
··· 245 245 if (!is_valid_ether_addr(addr->sa_data)) 246 246 return -EADDRNOTAVAIL; 247 247 248 + /* dev_set_mac_addr() can be called by a master device on bridge's 249 + * NETDEV_UNREGISTER, but since it's being destroyed do nothing 250 + */ 251 + if (dev->reg_state != NETREG_REGISTERED) 252 + return -EBUSY; 253 + 248 254 spin_lock_bh(&br->lock); 249 255 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { 250 256 /* Mac address will be changed in br_stp_change_bridge_id(). */
+5 -4
net/core/dev.c
··· 8188 8188 if (ops->ndo_change_mtu) 8189 8189 return ops->ndo_change_mtu(dev, new_mtu); 8190 8190 8191 - dev->mtu = new_mtu; 8191 + /* Pairs with all the lockless reads of dev->mtu in the stack */ 8192 + WRITE_ONCE(dev->mtu, new_mtu); 8192 8193 return 0; 8193 8194 } 8194 8195 EXPORT_SYMBOL(__dev_set_mtu); ··· 9247 9246 if (ret) { 9248 9247 if (ret > 0) 9249 9248 ret = -EIO; 9250 - goto out; 9249 + goto err_free_name; 9251 9250 } 9252 9251 } 9253 9252 ··· 9362 9361 return ret; 9363 9362 9364 9363 err_uninit: 9365 - if (dev->name_node) 9366 - netdev_name_node_free(dev->name_node); 9367 9364 if (dev->netdev_ops->ndo_uninit) 9368 9365 dev->netdev_ops->ndo_uninit(dev); 9369 9366 if (dev->priv_destructor) 9370 9367 dev->priv_destructor(dev); 9368 + err_free_name: 9369 + netdev_name_node_free(dev->name_node); 9371 9370 goto out; 9372 9371 } 9373 9372 EXPORT_SYMBOL(register_netdevice);
+31 -11
net/core/flow_dissector.c
··· 760 760 } 761 761 762 762 static void 763 + __skb_flow_dissect_ports(const struct sk_buff *skb, 764 + struct flow_dissector *flow_dissector, 765 + void *target_container, void *data, int nhoff, 766 + u8 ip_proto, int hlen) 767 + { 768 + enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX; 769 + struct flow_dissector_key_ports *key_ports; 770 + 771 + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) 772 + dissector_ports = FLOW_DISSECTOR_KEY_PORTS; 773 + else if (dissector_uses_key(flow_dissector, 774 + FLOW_DISSECTOR_KEY_PORTS_RANGE)) 775 + dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE; 776 + 777 + if (dissector_ports == FLOW_DISSECTOR_KEY_MAX) 778 + return; 779 + 780 + key_ports = skb_flow_dissector_target(flow_dissector, 781 + dissector_ports, 782 + target_container); 783 + key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, 784 + data, hlen); 785 + } 786 + 787 + static void 763 788 __skb_flow_dissect_ipv4(const struct sk_buff *skb, 764 789 struct flow_dissector *flow_dissector, 765 790 void *target_container, void *data, const struct iphdr *iph) ··· 953 928 struct flow_dissector_key_control *key_control; 954 929 struct flow_dissector_key_basic *key_basic; 955 930 struct flow_dissector_key_addrs *key_addrs; 956 - struct flow_dissector_key_ports *key_ports; 957 931 struct flow_dissector_key_tags *key_tags; 958 932 struct flow_dissector_key_vlan *key_vlan; 959 933 struct bpf_prog *attached = NULL; ··· 969 945 nhoff = skb_network_offset(skb); 970 946 hlen = skb_headlen(skb); 971 947 #if IS_ENABLED(CONFIG_NET_DSA) 972 - if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) { 948 + if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) && 949 + proto == htons(ETH_P_XDSA))) { 973 950 const struct dsa_device_ops *ops; 974 - int offset; 951 + int offset = 0; 975 952 976 953 ops = skb->dev->dsa_ptr->tag_ops; 977 954 if (ops->flow_dissect && ··· 1408 1383 break; 1409 1384 } 1410 1385 1411 - if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) && 1412 - !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) { 1413 - key_ports = skb_flow_dissector_target(flow_dissector, 1414 - FLOW_DISSECTOR_KEY_PORTS, 1415 - target_container); 1416 - key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, 1417 - data, hlen); 1418 - } 1386 + if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) 1387 + __skb_flow_dissect_ports(skb, flow_dissector, target_container, 1388 + data, nhoff, ip_proto, hlen); 1419 1389 1420 1390 /* Process result of IP proto processing */ 1421 1391 switch (fdret) {
+22 -23
net/core/flow_offload.c
··· 283 283 } 284 284 EXPORT_SYMBOL(flow_block_cb_setup_simple); 285 285 286 - static LIST_HEAD(block_ing_cb_list); 286 + static LIST_HEAD(block_cb_list); 287 287 288 288 static struct rhashtable indr_setup_block_ht; 289 289 ··· 391 391 kfree(indr_block_cb); 392 392 } 393 393 394 - static DEFINE_MUTEX(flow_indr_block_ing_cb_lock); 394 + static DEFINE_MUTEX(flow_indr_block_cb_lock); 395 395 396 - static void flow_block_ing_cmd(struct net_device *dev, 397 - flow_indr_block_bind_cb_t *cb, 398 - void *cb_priv, 399 - enum flow_block_command command) 396 + static void flow_block_cmd(struct net_device *dev, 397 + flow_indr_block_bind_cb_t *cb, void *cb_priv, 398 + enum flow_block_command command) 400 399 { 401 - struct flow_indr_block_ing_entry *entry; 400 + struct flow_indr_block_entry *entry; 402 401 403 - mutex_lock(&flow_indr_block_ing_cb_lock); 404 - list_for_each_entry(entry, &block_ing_cb_list, list) { 402 + mutex_lock(&flow_indr_block_cb_lock); 403 + list_for_each_entry(entry, &block_cb_list, list) { 405 404 entry->cb(dev, cb, cb_priv, command); 406 405 } 407 - mutex_unlock(&flow_indr_block_ing_cb_lock); 406 + mutex_unlock(&flow_indr_block_cb_lock); 408 407 } 409 408 410 409 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, ··· 423 424 if (err) 424 425 goto err_dev_put; 425 426 426 - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 427 - FLOW_BLOCK_BIND); 427 + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 428 + FLOW_BLOCK_BIND); 428 429 429 430 return 0; 430 431 ··· 463 464 if (!indr_block_cb) 464 465 return; 465 466 466 - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 467 - FLOW_BLOCK_UNBIND); 467 + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 468 + FLOW_BLOCK_UNBIND); 468 469 469 470 flow_indr_block_cb_del(indr_block_cb); 470 471 flow_indr_block_dev_put(indr_dev); ··· 498 499 } 499 500 EXPORT_SYMBOL_GPL(flow_indr_block_call); 500 501 501 - void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry) 502 + void flow_indr_add_block_cb(struct flow_indr_block_entry *entry) 502 503 { 503 - mutex_lock(&flow_indr_block_ing_cb_lock); 504 - list_add_tail(&entry->list, &block_ing_cb_list); 505 - mutex_unlock(&flow_indr_block_ing_cb_lock); 504 + mutex_lock(&flow_indr_block_cb_lock); 505 + list_add_tail(&entry->list, &block_cb_list); 506 + mutex_unlock(&flow_indr_block_cb_lock); 506 507 } 507 - EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb); 508 + EXPORT_SYMBOL_GPL(flow_indr_add_block_cb); 508 509 509 - void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry) 510 + void flow_indr_del_block_cb(struct flow_indr_block_entry *entry) 510 511 { 511 - mutex_lock(&flow_indr_block_ing_cb_lock); 512 + mutex_lock(&flow_indr_block_cb_lock); 512 513 list_del(&entry->list); 513 - mutex_unlock(&flow_indr_block_ing_cb_lock); 514 + mutex_unlock(&flow_indr_block_cb_lock); 514 515 } 515 - EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb); 516 + EXPORT_SYMBOL_GPL(flow_indr_del_block_cb); 516 517 517 518 static int __init init_flow_indr_rhashtable(void) 518 519 {
+1 -3
net/core/lwt_bpf.c
··· 230 230 fl6.daddr = iph6->daddr; 231 231 fl6.saddr = iph6->saddr; 232 232 233 - err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6); 234 - if (unlikely(err)) 235 - goto err; 233 + dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL); 236 234 if (IS_ERR(dst)) { 237 235 err = PTR_ERR(dst); 238 236 goto err;
+5 -2
net/core/net-sysfs.c
··· 1459 1459 struct kobject *kobj = &queue->kobj; 1460 1460 int error = 0; 1461 1461 1462 + /* Kobject_put later will trigger netdev_queue_release call 1463 + * which decreases dev refcount: Take that reference here 1464 + */ 1465 + dev_hold(queue->dev); 1466 + 1462 1467 kobj->kset = dev->queues_kset; 1463 1468 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, 1464 1469 "tx-%u", index); 1465 1470 if (error) 1466 1471 goto err; 1467 - 1468 - dev_hold(queue->dev); 1469 1472 1470 1473 #ifdef CONFIG_BQL 1471 1474 error = sysfs_create_group(kobj, &dql_group);
+3 -1
net/core/rtnetlink.c
··· 1250 1250 vf_spoofchk.vf = 1251 1251 vf_linkstate.vf = 1252 1252 vf_rss_query_en.vf = 1253 - vf_trust.vf = ivi.vf; 1253 + vf_trust.vf = 1254 + node_guid.vf = 1255 + port_guid.vf = ivi.vf; 1254 1256 1255 1257 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1256 1258 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
+6 -4
net/core/skbuff.c
··· 5484 5484 * Returns 0 on success, -errno otherwise. 5485 5485 */ 5486 5486 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 5487 - int mac_len) 5487 + int mac_len, bool ethernet) 5488 5488 { 5489 5489 struct mpls_shim_hdr *lse; 5490 5490 int err; ··· 5515 5515 lse->label_stack_entry = mpls_lse; 5516 5516 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 5517 5517 5518 - if (skb->dev && skb->dev->type == ARPHRD_ETHER) 5518 + if (ethernet) 5519 5519 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 5520 5520 skb->protocol = mpls_proto; 5521 5521 ··· 5529 5529 * @skb: buffer 5530 5530 * @next_proto: ethertype of header after popped MPLS header 5531 5531 * @mac_len: length of the MAC header 5532 + * @ethernet: flag to indicate if ethernet header is present in packet 5532 5533 * 5533 5534 * Expects skb->data at mac header. 5534 5535 * 5535 5536 * Returns 0 on success, -errno otherwise. 5536 5537 */ 5537 - int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len) 5538 + int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 5539 + bool ethernet) 5538 5540 { 5539 5541 int err; 5540 5542 ··· 5555 5553 skb_reset_mac_header(skb); 5556 5554 skb_set_network_header(skb, mac_len); 5557 5555 5558 - if (skb->dev && skb->dev->type == ARPHRD_ETHER) { 5556 + if (ethernet) { 5559 5557 struct ethhdr *hdr; 5560 5558 5561 5559 /* use mpls_hdr() to get ethertype to account for VLANs. */
+4 -4
net/core/xdp.c
··· 80 80 { 81 81 trace_mem_disconnect(xa); 82 82 83 - mutex_lock(&mem_id_lock); 84 - 85 83 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) 86 84 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); 87 - 88 - mutex_unlock(&mem_id_lock); 89 85 } 90 86 91 87 static void mem_allocator_disconnect(void *allocator) 92 88 { 93 89 struct xdp_mem_allocator *xa; 94 90 struct rhashtable_iter iter; 91 + 92 + mutex_lock(&mem_id_lock); 95 93 96 94 rhashtable_walk_enter(mem_id_ht, &iter); 97 95 do { ··· 104 106 105 107 } while (xa == ERR_PTR(-EAGAIN)); 106 108 rhashtable_walk_exit(&iter); 109 + 110 + mutex_unlock(&mem_id_lock); 107 111 } 108 112 109 113 static void mem_id_disconnect(int id)
+3 -3
net/dccp/ipv6.c
··· 210 210 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); 211 211 rcu_read_unlock(); 212 212 213 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 213 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 214 214 if (IS_ERR(dst)) { 215 215 err = PTR_ERR(dst); 216 216 dst = NULL; ··· 282 282 security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); 283 283 284 284 /* sk = NULL, but it is safe for now. RST socket required. */ 285 - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 285 + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); 286 286 if (!IS_ERR(dst)) { 287 287 skb_dst_set(skb, dst); 288 288 ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0); ··· 912 912 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 913 913 final_p = fl6_update_dst(&fl6, opt, &final); 914 914 915 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 915 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 916 916 if (IS_ERR(dst)) { 917 917 err = PTR_ERR(dst); 918 918 goto failure;
+7 -2
net/hsr/hsr_device.c
··· 227 227 struct hsr_port *master; 228 228 229 229 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 230 - skb->dev = master->dev; 231 - hsr_forward_skb(skb, master); 230 + if (master) { 231 + skb->dev = master->dev; 232 + hsr_forward_skb(skb, master); 233 + } else { 234 + atomic_long_inc(&dev->tx_dropped); 235 + dev_kfree_skb_any(skb); 236 + } 232 237 return NETDEV_TX_OK; 233 238 } 234 239
-5
net/ipv4/devinet.c
··· 1496 1496 } 1497 1497 } 1498 1498 1499 - static bool inetdev_valid_mtu(unsigned int mtu) 1500 - { 1501 - return mtu >= IPV4_MIN_MTU; 1502 - } 1503 - 1504 1499 static void inetdev_send_gratuitous_arp(struct net_device *dev, 1505 1500 struct in_device *in_dev) 1506 1501
+1 -1
net/ipv4/gre_demux.c
··· 127 127 if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr))) 128 128 return -EINVAL; 129 129 130 - ershdr = (struct erspan_base_hdr *)options; 130 + ershdr = (struct erspan_base_hdr *)(skb->data + nhs + hdr_len); 131 131 tpi->key = cpu_to_be32(get_session_id(ershdr)); 132 132 } 133 133
+8 -5
net/ipv4/ip_output.c
··· 1258 1258 cork->addr = ipc->addr; 1259 1259 } 1260 1260 1261 - /* 1262 - * We steal reference to this route, caller should not release it 1263 - */ 1264 - *rtp = NULL; 1265 1261 cork->fragsize = ip_sk_use_pmtu(sk) ? 1266 - dst_mtu(&rt->dst) : rt->dst.dev->mtu; 1262 + dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu); 1263 + 1264 + if (!inetdev_valid_mtu(cork->fragsize)) 1265 + return -ENETUNREACH; 1267 1266 1268 1267 cork->gso_size = ipc->gso_size; 1268 + 1269 1269 cork->dst = &rt->dst; 1270 + /* We stole this route, caller should not release it. */ 1271 + *rtp = NULL; 1272 + 1270 1273 cork->length = 0; 1271 1274 cork->ttl = ipc->ttl; 1272 1275 cork->tos = ipc->tos;
+3 -2
net/ipv4/tcp_output.c
··· 755 755 min_t(unsigned int, eff_sacks, 756 756 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 757 757 TCPOLEN_SACK_PERBLOCK); 758 - size += TCPOLEN_SACK_BASE_ALIGNED + 759 - opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 758 + if (likely(opts->num_sack_blocks)) 759 + size += TCPOLEN_SACK_BASE_ALIGNED + 760 + opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 760 761 } 761 762 762 763 return size;
+8 -2
net/ipv4/tcp_timer.c
··· 434 434 struct net *net = sock_net(sk); 435 435 struct inet_connection_sock *icsk = inet_csk(sk); 436 436 struct request_sock *req; 437 + struct sk_buff *skb; 437 438 438 439 req = rcu_dereference_protected(tp->fastopen_rsk, 439 440 lockdep_sock_is_held(sk)); ··· 447 446 */ 448 447 return; 449 448 } 450 - if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk))) 449 + 450 + if (!tp->packets_out) 451 + return; 452 + 453 + skb = tcp_rtx_queue_head(sk); 454 + if (WARN_ON_ONCE(!skb)) 451 455 return; 452 456 453 457 tp->tlp_high_seq = 0; ··· 486 480 goto out; 487 481 } 488 482 tcp_enter_loss(sk); 489 - tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1); 483 + tcp_retransmit_skb(sk, skb, 1); 490 484 __sk_dst_reset(sk); 491 485 goto out_reset_timer; 492 486 }
+6 -5
net/ipv6/addrconf_core.c
··· 129 129 } 130 130 EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); 131 131 132 - static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1, 133 - struct dst_entry **u2, 134 - struct flowi6 *u3) 132 + static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, 133 + const struct sock *sk, 134 + struct flowi6 *fl6, 135 + const struct in6_addr *final_dst) 135 136 { 136 - return -EAFNOSUPPORT; 137 + return ERR_PTR(-EAFNOSUPPORT); 137 138 } 138 139 139 140 static int eafnosupport_ipv6_route_input(struct sk_buff *skb) ··· 191 190 } 192 191 193 192 const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { 194 - .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup, 193 + .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, 195 194 .ipv6_route_input = eafnosupport_ipv6_route_input, 196 195 .fib6_get_table = eafnosupport_fib6_get_table, 197 196 .fib6_table_lookup = eafnosupport_fib6_table_lookup,
+2 -2
net/ipv6/af_inet6.c
··· 765 765 &final); 766 766 rcu_read_unlock(); 767 767 768 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 768 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 769 769 if (IS_ERR(dst)) { 770 770 sk->sk_route_caps = 0; 771 771 sk->sk_err_soft = -PTR_ERR(dst); ··· 946 946 static const struct ipv6_stub ipv6_stub_impl = { 947 947 .ipv6_sock_mc_join = ipv6_sock_mc_join, 948 948 .ipv6_sock_mc_drop = ipv6_sock_mc_drop, 949 - .ipv6_dst_lookup = ip6_dst_lookup, 949 + .ipv6_dst_lookup_flow = ip6_dst_lookup_flow, 950 950 .ipv6_route_input = ipv6_route_input, 951 951 .fib6_get_table = fib6_get_table, 952 952 .fib6_table_lookup = fib6_table_lookup,
+1 -1
net/ipv6/datagram.c
··· 85 85 final_p = fl6_update_dst(&fl6, opt, &final); 86 86 rcu_read_unlock(); 87 87 88 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 88 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 89 89 if (IS_ERR(dst)) { 90 90 err = PTR_ERR(dst); 91 91 goto out;
+2 -2
net/ipv6/inet6_connection_sock.c
··· 48 48 fl6->flowi6_uid = sk->sk_uid; 49 49 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 50 50 51 - dst = ip6_dst_lookup_flow(sk, fl6, final_p); 51 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); 52 52 if (IS_ERR(dst)) 53 53 return NULL; 54 54 ··· 103 103 104 104 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 105 105 if (!dst) { 106 - dst = ip6_dst_lookup_flow(sk, fl6, final_p); 106 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); 107 107 108 108 if (!IS_ERR(dst)) 109 109 ip6_dst_store(sk, dst, NULL, NULL);
+4 -4
net/ipv6/ip6_output.c
··· 1144 1144 * It returns a valid dst pointer on success, or a pointer encoded 1145 1145 * error code. 1146 1146 */ 1147 - struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, 1147 + struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, 1148 1148 const struct in6_addr *final_dst) 1149 1149 { 1150 1150 struct dst_entry *dst = NULL; 1151 1151 int err; 1152 1152 1153 - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); 1153 + err = ip6_dst_lookup_tail(net, sk, &dst, fl6); 1154 1154 if (err) 1155 1155 return ERR_PTR(err); 1156 1156 if (final_dst) 1157 1157 fl6->daddr = *final_dst; 1158 1158 1159 - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); 1159 + return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0); 1160 1160 } 1161 1161 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); 1162 1162 ··· 1188 1188 if (dst) 1189 1189 return dst; 1190 1190 1191 - dst = ip6_dst_lookup_flow(sk, fl6, final_dst); 1191 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst); 1192 1192 if (connected && !IS_ERR(dst)) 1193 1193 ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6); 1194 1194
+1 -1
net/ipv6/raw.c
··· 925 925 926 926 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 927 927 928 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 928 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 929 929 if (IS_ERR(dst)) { 930 930 err = PTR_ERR(dst); 931 931 goto out;
+1 -1
net/ipv6/syncookies.c
··· 235 235 fl6.flowi6_uid = sk->sk_uid; 236 236 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 237 237 238 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 238 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 239 239 if (IS_ERR(dst)) 240 240 goto out_free; 241 241 }
+2 -2
net/ipv6/tcp_ipv6.c
··· 275 275 276 276 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 277 277 278 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 278 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 279 279 if (IS_ERR(dst)) { 280 280 err = PTR_ERR(dst); 281 281 goto failure; ··· 906 906 * Underlying function will use this to retrieve the network 907 907 * namespace 908 908 */ 909 - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 909 + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); 910 910 if (!IS_ERR(dst)) { 911 911 skb_dst_set(buff, dst); 912 912 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
+1 -1
net/l2tp/l2tp_ip6.c
··· 615 615 616 616 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 617 617 618 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 618 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 619 619 if (IS_ERR(dst)) { 620 620 err = PTR_ERR(dst); 621 621 goto out;
+3 -4
net/mpls/af_mpls.c
··· 617 617 struct net_device *dev; 618 618 struct dst_entry *dst; 619 619 struct flowi6 fl6; 620 - int err; 621 620 622 621 if (!ipv6_stub) 623 622 return ERR_PTR(-EAFNOSUPPORT); 624 623 625 624 memset(&fl6, 0, sizeof(fl6)); 626 625 memcpy(&fl6.daddr, addr, sizeof(struct in6_addr)); 627 - err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6); 628 - if (err) 629 - return ERR_PTR(err); 626 + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); 627 + if (IS_ERR(dst)) 628 + return ERR_CAST(dst); 630 629 631 630 dev = dst->dev; 632 631 dev_hold(dev);
+3 -3
net/netfilter/nf_tables_offload.c
··· 588 588 return NOTIFY_DONE; 589 589 } 590 590 591 - static struct flow_indr_block_ing_entry block_ing_entry = { 591 + static struct flow_indr_block_entry block_ing_entry = { 592 592 .cb = nft_indr_block_cb, 593 593 .list = LIST_HEAD_INIT(block_ing_entry.list), 594 594 }; ··· 605 605 if (err < 0) 606 606 return err; 607 607 608 - flow_indr_add_block_ing_cb(&block_ing_entry); 608 + flow_indr_add_block_cb(&block_ing_entry); 609 609 610 610 return 0; 611 611 } 612 612 613 613 void nft_offload_exit(void) 614 614 { 615 - flow_indr_del_block_ing_cb(&block_ing_entry); 615 + flow_indr_del_block_cb(&block_ing_entry); 616 616 unregister_netdevice_notifier(&nft_offload_netdev_notifier); 617 617 }
+4 -2
net/nfc/nci/spi.c
··· 44 44 t.len = 0; 45 45 } 46 46 t.cs_change = cs_change; 47 - t.delay_usecs = nspi->xfer_udelay; 47 + t.delay.value = nspi->xfer_udelay; 48 + t.delay.unit = SPI_DELAY_UNIT_USECS; 48 49 t.speed_hz = nspi->xfer_speed_hz; 49 50 50 51 spi_message_init(&m); ··· 217 216 rx.rx_buf = skb_put(skb, rx_len); 218 217 rx.len = rx_len; 219 218 rx.cs_change = 0; 220 - rx.delay_usecs = nspi->xfer_udelay; 219 + rx.delay.value = nspi->xfer_udelay; 220 + rx.delay.unit = SPI_DELAY_UNIT_USECS; 221 221 rx.speed_hz = nspi->xfer_speed_hz; 222 222 spi_message_add_tail(&rx, &m); 223 223
+4 -2
net/openvswitch/actions.c
··· 166 166 int err; 167 167 168 168 err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype, 169 - skb->mac_len); 169 + skb->mac_len, 170 + ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET); 170 171 if (err) 171 172 return err; 172 173 ··· 180 179 { 181 180 int err; 182 181 183 - err = skb_mpls_pop(skb, ethertype, skb->mac_len); 182 + err = skb_mpls_pop(skb, ethertype, skb->mac_len, 183 + ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET); 184 184 if (err) 185 185 return err; 186 186
+11
net/openvswitch/conntrack.c
··· 903 903 } 904 904 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); 905 905 906 + if (err == NF_ACCEPT && 907 + ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { 908 + if (maniptype == NF_NAT_MANIP_SRC) 909 + maniptype = NF_NAT_MANIP_DST; 910 + else 911 + maniptype = NF_NAT_MANIP_SRC; 912 + 913 + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, 914 + maniptype); 915 + } 916 + 906 917 /* Mark NAT done if successful and update the flow key. */ 907 918 if (err == NF_ACCEPT) 908 919 ovs_nat_update_key(key, skb, maniptype);
+12 -1
net/sched/act_ct.c
··· 329 329 bool commit) 330 330 { 331 331 #if IS_ENABLED(CONFIG_NF_NAT) 332 + int err; 332 333 enum nf_nat_manip_type maniptype; 333 334 334 335 if (!(ct_action & TCA_CT_ACT_NAT)) ··· 360 359 return NF_ACCEPT; 361 360 } 362 361 363 - return ct_nat_execute(skb, ct, ctinfo, range, maniptype); 362 + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); 363 + if (err == NF_ACCEPT && 364 + ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { 365 + if (maniptype == NF_NAT_MANIP_SRC) 366 + maniptype = NF_NAT_MANIP_DST; 367 + else 368 + maniptype = NF_NAT_MANIP_SRC; 369 + 370 + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); 371 + } 372 + return err; 364 373 #else 365 374 return NF_ACCEPT; 366 375 #endif
+5 -2
net/sched/act_mpls.c
··· 1 1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 2 /* Copyright (C) 2019 Netronome Systems, Inc. */ 3 3 4 + #include <linux/if_arp.h> 4 5 #include <linux/init.h> 5 6 #include <linux/kernel.h> 6 7 #include <linux/module.h> ··· 77 76 78 77 switch (p->tcfm_action) { 79 78 case TCA_MPLS_ACT_POP: 80 - if (skb_mpls_pop(skb, p->tcfm_proto, mac_len)) 79 + if (skb_mpls_pop(skb, p->tcfm_proto, mac_len, 80 + skb->dev && skb->dev->type == ARPHRD_ETHER)) 81 81 goto drop; 82 82 break; 83 83 case TCA_MPLS_ACT_PUSH: 84 84 new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol)); 85 - if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len)) 85 + if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len, 86 + skb->dev && skb->dev->type == ARPHRD_ETHER)) 86 87 goto drop; 87 88 break; 88 89 case TCA_MPLS_ACT_MODIFY:
+40 -20
net/sched/cls_api.c
··· 626 626 static int tcf_block_setup(struct tcf_block *block, 627 627 struct flow_block_offload *bo); 628 628 629 - static void tc_indr_block_ing_cmd(struct net_device *dev, 630 - struct tcf_block *block, 631 - flow_indr_block_bind_cb_t *cb, 632 - void *cb_priv, 633 - enum flow_block_command command) 629 + static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, 630 + flow_indr_block_bind_cb_t *cb, void *cb_priv, 631 + enum flow_block_command command, bool ingress) 634 632 { 635 633 struct flow_block_offload bo = { 636 634 .command = command, 637 - .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, 635 + .binder_type = ingress ? 636 + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : 637 + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 638 638 .net = dev_net(dev), 639 639 .block_shared = tcf_block_non_null_shared(block), 640 640 }; ··· 652 652 up_write(&block->cb_lock); 653 653 } 654 654 655 - static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) 655 + static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) 656 656 { 657 657 const struct Qdisc_class_ops *cops; 658 + const struct Qdisc_ops *ops; 658 659 struct Qdisc *qdisc; 659 660 660 661 if (!dev_ingress_queue(dev)) ··· 665 664 if (!qdisc) 666 665 return NULL; 667 666 668 - cops = qdisc->ops->cl_ops; 667 + ops = qdisc->ops; 668 + if (!ops) 669 + return NULL; 670 + 671 + if (!ingress && !strcmp("ingress", ops->id)) 672 + return NULL; 673 + 674 + cops = ops->cl_ops; 669 675 if (!cops) 670 676 return NULL; 671 677 672 678 if (!cops->tcf_block) 673 679 return NULL; 674 680 675 - return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL); 681 + return cops->tcf_block(qdisc, 682 + ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, 683 + NULL); 676 684 } 677 685 678 - static void tc_indr_block_get_and_ing_cmd(struct net_device *dev, 679 - flow_indr_block_bind_cb_t *cb, 680 - void *cb_priv, 681 - enum flow_block_command command) 686 + static void tc_indr_block_get_and_cmd(struct net_device *dev, 687 + flow_indr_block_bind_cb_t *cb, 688 + void *cb_priv, 689 + enum flow_block_command command) 682 690 { 683 - struct tcf_block *block = tc_dev_ingress_block(dev); 691 + struct tcf_block *block; 684 692 685 - tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command); 693 + block = tc_dev_block(dev, true); 694 + tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); 695 + 696 + block = tc_dev_block(dev, false); 697 + tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); 686 698 } 687 699 688 700 static void tc_indr_block_call(struct tcf_block *block, ··· 2735 2721 struct netlink_ext_ack *extack) 2736 2722 { 2737 2723 const struct tcf_proto_ops *ops; 2724 + char name[IFNAMSIZ]; 2738 2725 void *tmplt_priv; 2739 2726 2740 2727 /* If kind is not set, user did not specify template. */ 2741 2728 if (!tca[TCA_KIND]) 2742 2729 return 0; 2743 2730 2744 - ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack); 2731 + if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2732 + NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2733 + return -EINVAL; 2734 + } 2735 + 2736 + ops = tcf_proto_lookup_ops(name, true, extack); 2745 2737 if (IS_ERR(ops)) 2746 2738 return PTR_ERR(ops); 2747 2739 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { ··· 3646 3626 .size = sizeof(struct tcf_net), 3647 3627 }; 3648 3628 3649 - static struct flow_indr_block_ing_entry block_ing_entry = { 3650 - .cb = tc_indr_block_get_and_ing_cmd, 3651 - .list = LIST_HEAD_INIT(block_ing_entry.list), 3629 + static struct flow_indr_block_entry block_entry = { 3630 + .cb = tc_indr_block_get_and_cmd, 3631 + .list = LIST_HEAD_INIT(block_entry.list), 3652 3632 }; 3653 3633 3654 3634 static int __init tc_filter_init(void) ··· 3663 3643 if (err) 3664 3644 goto err_register_pernet_subsys; 3665 3645 3666 - flow_indr_add_block_ing_cb(&block_ing_entry); 3646 + flow_indr_add_block_cb(&block_entry); 3667 3647 3668 3648 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3669 3649 RTNL_FLAG_DOIT_UNLOCKED);
+65 -51
net/sched/cls_flower.c
··· 56 56 struct flow_dissector_key_ip ip; 57 57 struct flow_dissector_key_ip enc_ip; 58 58 struct flow_dissector_key_enc_opts enc_opts; 59 - struct flow_dissector_key_ports tp_min; 60 - struct flow_dissector_key_ports tp_max; 59 + union { 60 + struct flow_dissector_key_ports tp; 61 + struct { 62 + struct flow_dissector_key_ports tp_min; 63 + struct flow_dissector_key_ports tp_max; 64 + }; 65 + } tp_range; 61 66 struct flow_dissector_key_ct ct; 62 67 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 63 68 ··· 205 200 { 206 201 __be16 min_mask, max_mask, min_val, max_val; 207 202 208 - min_mask = htons(filter->mask->key.tp_min.dst); 209 - max_mask = htons(filter->mask->key.tp_max.dst); 210 - min_val = htons(filter->key.tp_min.dst); 211 - max_val = htons(filter->key.tp_max.dst); 203 + min_mask = htons(filter->mask->key.tp_range.tp_min.dst); 204 + max_mask = htons(filter->mask->key.tp_range.tp_max.dst); 205 + min_val = htons(filter->key.tp_range.tp_min.dst); 206 + max_val = htons(filter->key.tp_range.tp_max.dst); 212 207 213 208 if (min_mask && max_mask) { 214 - if (htons(key->tp.dst) < min_val || 215 - htons(key->tp.dst) > max_val) 209 + if (htons(key->tp_range.tp.dst) < min_val || 210 + htons(key->tp_range.tp.dst) > max_val) 216 211 return false; 217 212 218 213 /* skb does not have min and max values */ 219 - mkey->tp_min.dst = filter->mkey.tp_min.dst; 220 - mkey->tp_max.dst = filter->mkey.tp_max.dst; 214 + mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 215 + mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 221 216 } 222 217 return true; 223 218 } ··· 228 223 { 229 224 __be16 min_mask, max_mask, min_val, max_val; 230 225 231 - min_mask = htons(filter->mask->key.tp_min.src); 232 - max_mask = htons(filter->mask->key.tp_max.src); 233 - min_val = htons(filter->key.tp_min.src); 234 - max_val = htons(filter->key.tp_max.src); 226 + min_mask = htons(filter->mask->key.tp_range.tp_min.src); 227 + max_mask = htons(filter->mask->key.tp_range.tp_max.src); 228 + min_val = htons(filter->key.tp_range.tp_min.src); 229 + max_val = htons(filter->key.tp_range.tp_max.src); 235 230 236 231 if (min_mask && max_mask) { 237 - if (htons(key->tp.src) < min_val || 238 - htons(key->tp.src) > max_val) 232 + if (htons(key->tp_range.tp.src) < min_val || 233 + htons(key->tp_range.tp.src) > max_val) 239 234 return false; 240 235 241 236 /* skb does not have min and max values */ 242 - mkey->tp_min.src = filter->mkey.tp_min.src; 243 - mkey->tp_max.src = filter->mkey.tp_max.src; 237 + mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 238 + mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 244 239 } 245 240 return true; 246 241 } ··· 739 734 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 740 735 struct fl_flow_key *mask) 741 736 { 742 - fl_set_key_val(tb, &key->tp_min.dst, 743 - TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst, 744 - TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst)); 745 - fl_set_key_val(tb, &key->tp_max.dst, 746 - TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst, 747 - TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst)); 748 - fl_set_key_val(tb, &key->tp_min.src, 749 - TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src, 750 - TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src)); 751 - fl_set_key_val(tb, &key->tp_max.src, 752 - TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src, 753 - TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src)); 737 + fl_set_key_val(tb, &key->tp_range.tp_min.dst, 738 + TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 739 + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 740 + fl_set_key_val(tb, &key->tp_range.tp_max.dst, 741 + TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 742 + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 743 + fl_set_key_val(tb, &key->tp_range.tp_min.src, 744 + TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 745 + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 746 + fl_set_key_val(tb, &key->tp_range.tp_max.src, 747 + TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 748 + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 754 749 755 - if ((mask->tp_min.dst && mask->tp_max.dst && 756 - htons(key->tp_max.dst) <= htons(key->tp_min.dst)) || 757 - (mask->tp_min.src && mask->tp_max.src && 758 - htons(key->tp_max.src) <= htons(key->tp_min.src))) 750 + if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 751 + htons(key->tp_range.tp_max.dst) <= 752 + htons(key->tp_range.tp_min.dst)) || 753 + (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 754 + htons(key->tp_range.tp_max.src) <= 755 + htons(key->tp_range.tp_min.src))) 759 756 return -EINVAL; 760 757 761 758 return 0; ··· 1516 1509 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 1517 1510 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1518 1511 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 1519 - if (FL_KEY_IS_MASKED(mask, tp) || 1520 - FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max)) 1521 - FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp); 1512 + FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1513 + FLOW_DISSECTOR_KEY_PORTS, tp); 1514 + FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1515 + FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 1522 1516 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1523 1517 FLOW_DISSECTOR_KEY_IP, ip); 1524 1518 FL_KEY_SET_IF_MASKED(mask, keys, cnt, ··· 1568 1560 1569 1561 fl_mask_copy(newmask, mask); 1570 1562 1571 - if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) || 1572 - (newmask->key.tp_min.src && newmask->key.tp_max.src)) 1563 + if ((newmask->key.tp_range.tp_min.dst && 1564 + newmask->key.tp_range.tp_max.dst) || 1565 + (newmask->key.tp_range.tp_min.src && 1566 + newmask->key.tp_range.tp_max.src)) 1573 1567 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 1574 1568 1575 1569 err = fl_init_mask_hashtable(newmask); ··· 2169 2159 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2170 2160 struct fl_flow_key *mask) 2171 2161 { 2172 - if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN, 2173 - &mask->tp_min.dst, TCA_FLOWER_UNSPEC, 2174 - sizeof(key->tp_min.dst)) || 2175 - fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX, 2176 - &mask->tp_max.dst, TCA_FLOWER_UNSPEC, 2177 - sizeof(key->tp_max.dst)) || 2178 - fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN, 2179 - &mask->tp_min.src, TCA_FLOWER_UNSPEC, 2180 - sizeof(key->tp_min.src)) || 2181 - fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX, 2182 - &mask->tp_max.src, TCA_FLOWER_UNSPEC, 2183 - sizeof(key->tp_max.src))) 2162 + if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2163 + TCA_FLOWER_KEY_PORT_DST_MIN, 2164 + &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2165 + sizeof(key->tp_range.tp_min.dst)) || 2166 + fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2167 + TCA_FLOWER_KEY_PORT_DST_MAX, 2168 + &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2169 + sizeof(key->tp_range.tp_max.dst)) || 2170 + fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2171 + TCA_FLOWER_KEY_PORT_SRC_MIN, 2172 + &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2173 + sizeof(key->tp_range.tp_min.src)) || 2174 + fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2175 + TCA_FLOWER_KEY_PORT_SRC_MAX, 2176 + &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2177 + sizeof(key->tp_range.tp_max.src))) 2184 2178 return -1; 2185 2179 2186 2180 return 0;
+1
net/sched/sch_cake.c
··· 2184 2184 [TCA_CAKE_MPU] = { .type = NLA_U32 }, 2185 2185 [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, 2186 2186 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, 2187 + [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 }, 2187 2188 [TCA_CAKE_FWMARK] = { .type = NLA_U32 }, 2188 2189 }; 2189 2190
+1
net/sched/sch_mq.c
··· 153 153 __gnet_stats_copy_queue(&sch->qstats, 154 154 qdisc->cpu_qstats, 155 155 &qdisc->qstats, qlen); 156 + sch->q.qlen += qlen; 156 157 } else { 157 158 sch->q.qlen += qdisc->q.qlen; 158 159 sch->bstats.bytes += qdisc->bstats.bytes;
+2 -1
net/sched/sch_mqprio.c
··· 411 411 __gnet_stats_copy_queue(&sch->qstats, 412 412 qdisc->cpu_qstats, 413 413 &qdisc->qstats, qlen); 414 + sch->q.qlen += qlen; 414 415 } else { 415 416 sch->q.qlen += qdisc->q.qlen; 416 417 sch->bstats.bytes += qdisc->bstats.bytes; ··· 434 433 opt.offset[tc] = dev->tc_to_txq[tc].offset; 435 434 } 436 435 437 - if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt)) 436 + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 438 437 goto nla_put_failure; 439 438 440 439 if ((priv->flags & TC_MQPRIO_F_MODE) &&
+2 -2
net/sctp/ipv6.c
··· 275 275 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 276 276 rcu_read_unlock(); 277 277 278 - dst = ip6_dst_lookup_flow(sk, fl6, final_p); 278 + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); 279 279 if (!asoc || saddr) 280 280 goto out; 281 281 ··· 328 328 fl6->saddr = laddr->a.v6.sin6_addr; 329 329 fl6->fl6_sport = laddr->a.v6.sin6_port; 330 330 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 331 - bdst = ip6_dst_lookup_flow(sk, fl6, final_p); 331 + bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); 332 332 333 333 if (IS_ERR(bdst)) 334 334 continue;
+6 -1
net/socket.c
··· 2546 2546 2547 2547 if (sock->file->f_flags & O_NONBLOCK) 2548 2548 flags |= MSG_DONTWAIT; 2549 - err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags); 2549 + 2550 + if (unlikely(nosec)) 2551 + err = sock_recvmsg_nosec(sock, msg_sys, flags); 2552 + else 2553 + err = sock_recvmsg(sock, msg_sys, flags); 2554 + 2550 2555 if (err < 0) 2551 2556 goto out; 2552 2557 len = err;
+15 -14
net/tipc/core.c
··· 148 148 sysctl_tipc_rmem[1] = RCVBUF_DEF; 149 149 sysctl_tipc_rmem[2] = RCVBUF_MAX; 150 150 151 - err = tipc_netlink_start(); 152 - if (err) 153 - goto out_netlink; 154 - 155 - err = tipc_netlink_compat_start(); 156 - if (err) 157 - goto out_netlink_compat; 158 - 159 151 err = tipc_register_sysctl(); 160 152 if (err) 161 153 goto out_sysctl; ··· 172 180 if (err) 173 181 goto out_bearer; 174 182 183 + err = tipc_netlink_start(); 184 + if (err) 185 + goto out_netlink; 186 + 187 + err = tipc_netlink_compat_start(); 188 + if (err) 189 + goto out_netlink_compat; 190 + 175 191 pr_info("Started in single node mode\n"); 176 192 return 0; 193 + 194 + out_netlink_compat: 195 + tipc_netlink_stop(); 196 + out_netlink: 197 + tipc_bearer_cleanup(); 177 198 out_bearer: 178 199 unregister_pernet_subsys(&tipc_pernet_pre_exit_ops); 179 200 out_register_pernet_subsys: ··· 198 193 out_pernet: 199 194 tipc_unregister_sysctl(); 200 195 out_sysctl: 201 - tipc_netlink_compat_stop(); 202 - out_netlink_compat: 203 - tipc_netlink_stop(); 204 - out_netlink: 205 196 pr_err("Unable to start in single node mode\n"); 206 197 return err; 207 198 } 208 199 209 200 static void __exit tipc_exit(void) 210 201 { 202 + tipc_netlink_compat_stop(); 203 + tipc_netlink_stop(); 211 204 tipc_bearer_cleanup(); 212 205 unregister_pernet_subsys(&tipc_pernet_pre_exit_ops); 213 206 unregister_pernet_device(&tipc_topsrv_net_ops); 214 207 tipc_socket_stop(); 215 208 unregister_pernet_device(&tipc_net_ops); 216 - tipc_netlink_stop(); 217 - tipc_netlink_compat_stop(); 218 209 tipc_unregister_sysctl(); 219 210 220 211 pr_info("Deactivated\n");
+6 -3
net/tipc/udp_media.c
··· 195 195 .saddr = src->ipv6, 196 196 .flowi6_proto = IPPROTO_UDP 197 197 }; 198 - err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, 199 - &ndst, &fl6); 200 - if (err) 198 + ndst = ipv6_stub->ipv6_dst_lookup_flow(net, 199 + ub->ubsock->sk, 200 + &fl6, NULL); 201 + if (IS_ERR(ndst)) { 202 + err = PTR_ERR(ndst); 201 203 goto tx_error; 204 + } 202 205 dst_cache_set_ip6(cache, ndst, &fl6.saddr); 203 206 } 204 207 ttl = ip6_dst_hoplimit(ndst);
+4 -4
net/tls/tls_device.c
··· 429 429 430 430 if (flags & 431 431 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) 432 - return -ENOTSUPP; 432 + return -EOPNOTSUPP; 433 433 434 434 if (unlikely(sk->sk_err)) 435 435 return -sk->sk_err; ··· 571 571 lock_sock(sk); 572 572 573 573 if (flags & MSG_OOB) { 574 - rc = -ENOTSUPP; 574 + rc = -EOPNOTSUPP; 575 575 goto out; 576 576 } 577 577 ··· 1023 1023 } 1024 1024 1025 1025 if (!(netdev->features & NETIF_F_HW_TLS_TX)) { 1026 - rc = -ENOTSUPP; 1026 + rc = -EOPNOTSUPP; 1027 1027 goto release_netdev; 1028 1028 } 1029 1029 ··· 1098 1098 } 1099 1099 1100 1100 if (!(netdev->features & NETIF_F_HW_TLS_RX)) { 1101 - rc = -ENOTSUPP; 1101 + rc = -EOPNOTSUPP; 1102 1102 goto release_netdev; 1103 1103 } 1104 1104
+2 -2
net/tls/tls_main.c
··· 487 487 /* check version */ 488 488 if (crypto_info->version != TLS_1_2_VERSION && 489 489 crypto_info->version != TLS_1_3_VERSION) { 490 - rc = -ENOTSUPP; 490 + rc = -EINVAL; 491 491 goto err_crypto_info; 492 492 } 493 493 ··· 714 714 * share the ulp context. 715 715 */ 716 716 if (sk->sk_state != TCP_ESTABLISHED) 717 - return -ENOTSUPP; 717 + return -ENOTCONN; 718 718 719 719 /* allocate tls context */ 720 720 write_lock_bh(&sk->sk_callback_lock);
+4 -4
net/tls/tls_sw.c
··· 905 905 int ret = 0; 906 906 907 907 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 908 - return -ENOTSUPP; 908 + return -EOPNOTSUPP; 909 909 910 910 mutex_lock(&tls_ctx->tx_lock); 911 911 lock_sock(sk); ··· 1220 1220 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1221 1221 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | 1222 1222 MSG_NO_SHARED_FRAGS)) 1223 - return -ENOTSUPP; 1223 + return -EOPNOTSUPP; 1224 1224 1225 1225 return tls_sw_do_sendpage(sk, page, offset, size, flags); 1226 1226 } ··· 1233 1233 1234 1234 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1235 1235 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) 1236 - return -ENOTSUPP; 1236 + return -EOPNOTSUPP; 1237 1237 1238 1238 mutex_lock(&tls_ctx->tx_lock); 1239 1239 lock_sock(sk); ··· 1932 1932 1933 1933 /* splice does not support reading control messages */ 1934 1934 if (ctx->control != TLS_RECORD_TYPE_DATA) { 1935 - err = -ENOTSUPP; 1935 + err = -EINVAL; 1936 1936 goto splice_read_end; 1937 1937 } 1938 1938
+3 -3
samples/bpf/xdp_rxq_info_user.c
··· 489 489 if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) 490 490 return EXIT_FAIL; 491 491 492 - map = bpf_map__next(NULL, obj); 493 - stats_global_map = bpf_map__next(map, obj); 494 - rx_queue_index_map = bpf_map__next(stats_global_map, obj); 492 + map = bpf_object__find_map_by_name(obj, "config_map"); 493 + stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map"); 494 + rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map"); 495 495 if (!map || !stats_global_map || !rx_queue_index_map) { 496 496 printf("finding a map in obj file failed\n"); 497 497 return EXIT_FAIL;
-1
tools/lib/bpf/.gitignore
··· 1 1 libbpf_version.h 2 2 libbpf.pc 3 3 FEATURE-DUMP.libbpf 4 - test_libbpf 5 4 libbpf.so.* 6 5 TAGS 7 6 tags
+6 -9
tools/lib/bpf/Makefile
··· 147 147 148 148 GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ 149 149 cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ 150 - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \ 150 + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ 151 151 sort -u | wc -l) 152 152 VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ 153 153 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) 154 154 155 - CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) $(OUTPUT)test_libbpf 155 + CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) 156 156 157 157 all: fixdep 158 158 $(Q)$(MAKE) all_cmd ··· 180 180 $(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h 181 181 $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) 182 182 183 - bpf_helper_defs.h: $(srctree)/include/uapi/linux/bpf.h 183 + bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h 184 184 $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \ 185 - --file $(srctree)/include/uapi/linux/bpf.h > bpf_helper_defs.h 185 + --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h 186 186 187 187 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) 188 188 ··· 195 195 196 196 $(OUTPUT)libbpf.a: $(BPF_IN_STATIC) 197 197 $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ 198 - 199 - $(OUTPUT)test_libbpf: test_libbpf.c $(OUTPUT)libbpf.a 200 - $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(INCLUDES) $^ -lelf -o $@ 201 198 202 199 $(OUTPUT)libbpf.pc: 203 200 $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \ ··· 211 214 "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ 212 215 "Please make sure all LIBBPF_API symbols are" \ 213 216 "versioned in $(VERSION_SCRIPT)." >&2; \ 214 - readelf -s --wide $(OUTPUT)libbpf-in.o | \ 217 + readelf -s --wide $(BPF_IN_SHARED) | \ 215 218 cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ 216 - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ 219 + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ 217 220 sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ 218 221 readelf -s --wide $(OUTPUT)libbpf.so | \ 219 222 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
+20 -25
tools/lib/bpf/libbpf.c
··· 171 171 RELO_DATA, 172 172 } type; 173 173 int insn_idx; 174 - union { 175 - int map_idx; 176 - int text_off; 177 - }; 174 + int map_idx; 175 + int sym_off; 178 176 } *reloc_desc; 179 177 int nr_reloc; 180 178 int log_level; ··· 1817 1819 return -LIBBPF_ERRNO__RELOC; 1818 1820 } 1819 1821 if (sym->st_value % 8) { 1820 - pr_warn("bad call relo offset: %lu\n", sym->st_value); 1822 + pr_warn("bad call relo offset: %llu\n", (__u64)sym->st_value); 1821 1823 return -LIBBPF_ERRNO__RELOC; 1822 1824 } 1823 1825 reloc_desc->type = RELO_CALL; 1824 1826 reloc_desc->insn_idx = insn_idx; 1825 - reloc_desc->text_off = sym->st_value / 8; 1827 + reloc_desc->sym_off = sym->st_value; 1826 1828 obj->has_pseudo_calls = true; 1827 1829 return 0; 1828 1830 } ··· 1866 1868 reloc_desc->type = RELO_LD64; 1867 1869 reloc_desc->insn_idx = insn_idx; 1868 1870 reloc_desc->map_idx = map_idx; 1871 + reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ 1869 1872 return 0; 1870 1873 } 1871 1874 ··· 1898 1899 reloc_desc->type = RELO_DATA; 1899 1900 reloc_desc->insn_idx = insn_idx; 1900 1901 reloc_desc->map_idx = map_idx; 1902 + reloc_desc->sym_off = sym->st_value; 1901 1903 return 0; 1902 1904 } 1903 1905 ··· 3563 3563 return -LIBBPF_ERRNO__RELOC; 3564 3564 3565 3565 if (prog->idx == obj->efile.text_shndx) { 3566 - pr_warn("relo in .text insn %d into off %d\n", 3567 - relo->insn_idx, relo->text_off); 3566 + pr_warn("relo in .text insn %d into off %d (insn #%d)\n", 3567 + relo->insn_idx, relo->sym_off, relo->sym_off / 8); 3568 3568 return -LIBBPF_ERRNO__RELOC; 3569 3569 } 3570 3570 ··· 3599 3599 prog->section_name); 3600 3600 } 3601 3601 insn = &prog->insns[relo->insn_idx]; 3602 - insn->imm += relo->text_off + prog->main_prog_cnt - relo->insn_idx; 3602 + insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx; 3603 3603 return 0; 3604 3604 } 3605 3605 ··· 3622 3622 return 0; 3623 3623 3624 3624 for (i = 0; i < prog->nr_reloc; i++) { 3625 - if (prog->reloc_desc[i].type == RELO_LD64 || 3626 - prog->reloc_desc[i].type == RELO_DATA) { 3627 - bool relo_data = prog->reloc_desc[i].type == RELO_DATA; 3628 - struct bpf_insn *insns = prog->insns; 3629 - int insn_idx, map_idx; 3625 + struct reloc_desc *relo = &prog->reloc_desc[i]; 3630 3626 3631 - insn_idx = prog->reloc_desc[i].insn_idx; 3632 - map_idx = prog->reloc_desc[i].map_idx; 3627 + if (relo->type == RELO_LD64 || relo->type == RELO_DATA) { 3628 + struct bpf_insn *insn = &prog->insns[relo->insn_idx]; 3633 3629 3634 - if (insn_idx + 1 >= (int)prog->insns_cnt) { 3630 + if (relo->insn_idx + 1 >= (int)prog->insns_cnt) { 3635 3631 pr_warn("relocation out of range: '%s'\n", 3636 3632 prog->section_name); 3637 3633 return -LIBBPF_ERRNO__RELOC; 3638 3634 } 3639 3635 3640 - if (!relo_data) { 3641 - insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 3636 + if (relo->type != RELO_DATA) { 3637 + insn[0].src_reg = BPF_PSEUDO_MAP_FD; 3642 3638 } else { 3643 - insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE; 3644 - insns[insn_idx + 1].imm = insns[insn_idx].imm; 3639 + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 3640 + insn[1].imm = insn[0].imm + relo->sym_off; 3645 3641 } 3646 - insns[insn_idx].imm = obj->maps[map_idx].fd; 3647 - } else if (prog->reloc_desc[i].type == RELO_CALL) { 3648 - err = bpf_program__reloc_text(prog, obj, 3649 - &prog->reloc_desc[i]); 3642 + insn[0].imm = obj->maps[relo->map_idx].fd; 3643 + } else if (relo->type == RELO_CALL) { 3644 + err = bpf_program__reloc_text(prog, obj, relo); 3650 3645 if (err) 3651 3646 return err; 3652 3647 }
tools/lib/bpf/test_libbpf.c tools/testing/selftests/bpf/test_cpp.cpp
+1
tools/perf/MANIFEST
··· 19 19 tools/lib/str_error_r.c 20 20 tools/lib/vsprintf.c 21 21 tools/lib/zalloc.c 22 + scripts/bpf_helpers_doc.py
+1
tools/testing/selftests/bpf/.gitignore
··· 37 37 test_hashmap 38 38 test_btf_dump 39 39 xdping 40 + test_cpp 40 41 /no_alu32 41 42 /bpf_gcc
+5 -1
tools/testing/selftests/bpf/Makefile
··· 71 71 # Compile but not part of 'make run_tests' 72 72 TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \ 73 73 flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ 74 - test_lirc_mode2_user xdping 74 + test_lirc_mode2_user xdping test_cpp 75 75 76 76 TEST_CUSTOM_PROGS = urandom_read 77 77 ··· 316 316 ) > verifier/tests.h) 317 317 $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT) 318 318 $(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@ 319 + 320 + # Make sure we are able to include and link libbpf against c++. 321 + $(OUTPUT)/test_cpp: test_cpp.cpp $(BPFOBJ) 322 + $(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@ 319 323 320 324 EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) \ 321 325 prog_tests/tests.h map_tests/tests.h verifier/tests.h \
+53 -17
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
··· 2 2 /* Copyright (c) 2019 Facebook */ 3 3 #include <test_progs.h> 4 4 5 - #define PROG_CNT 3 6 - 7 - void test_fexit_bpf2bpf(void) 5 + static void test_fexit_bpf2bpf_common(const char *obj_file, 6 + const char *target_obj_file, 7 + int prog_cnt, 8 + const char **prog_name) 8 9 { 9 - const char *prog_name[PROG_CNT] = { 10 - "fexit/test_pkt_access", 11 - "fexit/test_pkt_access_subprog1", 12 - "fexit/test_pkt_access_subprog2", 13 - }; 14 10 struct bpf_object *obj = NULL, *pkt_obj; 15 11 int err, pkt_fd, i; 16 - struct bpf_link *link[PROG_CNT] = {}; 17 - struct bpf_program *prog[PROG_CNT]; 12 + struct bpf_link **link = NULL; 13 + struct bpf_program **prog = NULL; 18 14 __u32 duration, retval; 19 15 struct bpf_map *data_map; 20 16 const int zero = 0; 21 - u64 result[PROG_CNT]; 17 + u64 *result = NULL; 22 18 23 - err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC, 19 + err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, 24 20 &pkt_obj, &pkt_fd); 25 21 if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) 26 22 return; ··· 24 28 .attach_prog_fd = pkt_fd, 25 29 ); 26 30 27 - obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts); 31 + link = calloc(sizeof(struct bpf_link *), prog_cnt); 32 + prog = calloc(sizeof(struct bpf_program *), prog_cnt); 33 + result = malloc(prog_cnt * sizeof(u64)); 34 + if (CHECK(!link || !prog || !result, "alloc_memory", 35 + "failed to alloc memory")) 36 + goto close_prog; 37 + 38 + obj = bpf_object__open_file(obj_file, &opts); 28 39 if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", 29 40 "failed to open fexit_bpf2bpf: %ld\n", 30 41 PTR_ERR(obj))) ··· 41 38 if (CHECK(err, "obj_load", "err %d\n", err)) 42 39 goto close_prog; 43 40 44 - for (i = 0; i < PROG_CNT; i++) { 41 + for (i = 0; i < prog_cnt; i++) { 45 42 prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]); 46 43 if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i])) 47 44 goto close_prog; ··· 59 56 "err %d errno %d retval %d duration %d\n", 60 57 err, errno, retval, duration); 61 58 62 - err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); 59 + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, result); 63 60 if (CHECK(err, "get_result", 64 61 "failed to get output data: %d\n", err)) 65 62 goto close_prog; 66 63 67 - for (i = 0; i < PROG_CNT; i++) 64 + for (i = 0; i < prog_cnt; i++) 68 65 if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", 69 66 result[i])) 70 67 goto close_prog; 71 68 72 69 close_prog: 73 - for (i = 0; i < PROG_CNT; i++) 70 + for (i = 0; i < prog_cnt; i++) 74 71 if (!IS_ERR_OR_NULL(link[i])) 75 72 bpf_link__destroy(link[i]); 76 73 if (!IS_ERR_OR_NULL(obj)) 77 74 bpf_object__close(obj); 78 75 bpf_object__close(pkt_obj); 76 + free(link); 77 + free(prog); 78 + free(result); 79 + } 80 + 81 + static void test_target_no_callees(void) 82 + { 83 + const char *prog_name[] = { 84 + "fexit/test_pkt_md_access", 85 + }; 86 + test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.o", 87 + "./test_pkt_md_access.o", 88 + ARRAY_SIZE(prog_name), 89 + prog_name); 90 + } 91 + 92 + static void test_target_yes_callees(void) 93 + { 94 + const char *prog_name[] = { 95 + "fexit/test_pkt_access", 96 + "fexit/test_pkt_access_subprog1", 97 + "fexit/test_pkt_access_subprog2", 98 + }; 99 + test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o", 100 + "./test_pkt_access.o", 101 + ARRAY_SIZE(prog_name), 102 + prog_name); 103 + } 104 + 105 + void test_fexit_bpf2bpf(void) 106 + { 107 + test_target_no_callees(); 108 + test_target_yes_callees(); 79 109 }
+6 -6
tools/testing/selftests/bpf/progs/fentry_test.c
··· 6 6 7 7 char _license[] SEC("license") = "GPL"; 8 8 9 - static volatile __u64 test1_result; 9 + __u64 test1_result = 0; 10 10 BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a) 11 11 { 12 12 test1_result = a == 1; 13 13 return 0; 14 14 } 15 15 16 - static volatile __u64 test2_result; 16 + __u64 test2_result = 0; 17 17 BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b) 18 18 { 19 19 test2_result = a == 2 && b == 3; 20 20 return 0; 21 21 } 22 22 23 - static volatile __u64 test3_result; 23 + __u64 test3_result = 0; 24 24 BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c) 25 25 { 26 26 test3_result = a == 4 && b == 5 && c == 6; 27 27 return 0; 28 28 } 29 29 30 - static volatile __u64 test4_result; 30 + __u64 test4_result = 0; 31 31 BPF_TRACE_4("fentry/bpf_fentry_test4", test4, 32 32 void *, a, char, b, int, c, __u64, d) 33 33 { ··· 35 35 return 0; 36 36 } 37 37 38 - static volatile __u64 test5_result; 38 + __u64 test5_result = 0; 39 39 BPF_TRACE_5("fentry/bpf_fentry_test5", test5, 40 40 __u64, a, void *, b, short, c, int, d, __u64, e) 41 41 { ··· 44 44 return 0; 45 45 } 46 46 47 - static volatile __u64 test6_result; 47 + __u64 test6_result = 0; 48 48 BPF_TRACE_6("fentry/bpf_fentry_test6", test6, 49 49 __u64, a, void *, b, short, c, int, d, void *, e, __u64, f) 50 50 {
+3 -3
tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
··· 8 8 unsigned int len; 9 9 }; 10 10 11 - static volatile __u64 test_result; 11 + __u64 test_result = 0; 12 12 BPF_TRACE_2("fexit/test_pkt_access", test_main, 13 13 struct sk_buff *, skb, int, ret) 14 14 { ··· 23 23 return 0; 24 24 } 25 25 26 - static volatile __u64 test_result_subprog1; 26 + __u64 test_result_subprog1 = 0; 27 27 BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1, 28 28 struct sk_buff *, skb, int, ret) 29 29 { ··· 56 56 __u64 args[5]; 57 57 __u64 ret; 58 58 }; 59 - static volatile __u64 test_result_subprog2; 59 + __u64 test_result_subprog2 = 0; 60 60 SEC("fexit/test_pkt_access_subprog2") 61 61 int test_subprog2(struct args_subprog2 *ctx) 62 62 {
+26
tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2019 Facebook */ 3 + #include <linux/bpf.h> 4 + #include "bpf_helpers.h" 5 + #include "bpf_trace_helpers.h" 6 + 7 + struct sk_buff { 8 + unsigned int len; 9 + }; 10 + 11 + __u64 test_result = 0; 12 + BPF_TRACE_2("fexit/test_pkt_md_access", test_main2, 13 + struct sk_buff *, skb, int, ret) 14 + { 15 + int len; 16 + 17 + __builtin_preserve_access_index(({ 18 + len = skb->len; 19 + })); 20 + if (len != 74 || ret != 0) 21 + return 0; 22 + 23 + test_result = 1; 24 + return 0; 25 + } 26 + char _license[] SEC("license") = "GPL";
+6 -6
tools/testing/selftests/bpf/progs/fexit_test.c
··· 6 6 7 7 char _license[] SEC("license") = "GPL"; 8 8 9 - static volatile __u64 test1_result; 9 + __u64 test1_result = 0; 10 10 BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret) 11 11 { 12 12 test1_result = a == 1 && ret == 2; 13 13 return 0; 14 14 } 15 15 16 - static volatile __u64 test2_result; 16 + __u64 test2_result = 0; 17 17 BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret) 18 18 { 19 19 test2_result = a == 2 && b == 3 && ret == 5; 20 20 return 0; 21 21 } 22 22 23 - static volatile __u64 test3_result; 23 + __u64 test3_result = 0; 24 24 BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret) 25 25 { 26 26 test3_result = a == 4 && b == 5 && c == 6 && ret == 15; 27 27 return 0; 28 28 } 29 29 30 - static volatile __u64 test4_result; 30 + __u64 test4_result = 0; 31 31 BPF_TRACE_5("fexit/bpf_fentry_test4", test4, 32 32 void *, a, char, b, int, c, __u64, d, int, ret) 33 33 { ··· 37 37 return 0; 38 38 } 39 39 40 - static volatile __u64 test5_result; 40 + __u64 test5_result = 0; 41 41 BPF_TRACE_6("fexit/bpf_fentry_test5", test5, 42 42 __u64, a, void *, b, short, c, int, d, __u64, e, int, ret) 43 43 { ··· 46 46 return 0; 47 47 } 48 48 49 - static volatile __u64 test6_result; 49 + __u64 test6_result = 0; 50 50 BPF_TRACE_7("fexit/bpf_fentry_test6", test6, 51 51 __u64, a, void *, b, short, c, int, d, void *, e, __u64, f, 52 52 int, ret)
+2 -2
tools/testing/selftests/bpf/progs/test_mmap.c
··· 15 15 __type(value, __u64); 16 16 } data_map SEC(".maps"); 17 17 18 - static volatile __u64 in_val; 19 - static volatile __u64 out_val; 18 + __u64 in_val = 0; 19 + __u64 out_val = 0; 20 20 21 21 SEC("raw_tracepoint/sys_enter") 22 22 int test_mmap(void *ctx)
+2 -2
tools/testing/selftests/bpf/progs/test_pkt_md_access.c
··· 27 27 } 28 28 #endif 29 29 30 - SEC("test1") 31 - int process(struct __sk_buff *skb) 30 + SEC("classifier/test_pkt_md_access") 31 + int test_pkt_md_access(struct __sk_buff *skb) 32 32 { 33 33 TEST_FIELD(__u8, len, 0xFF); 34 34 TEST_FIELD(__u16, len, 0xFFFF);
+1
tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
··· 131 131 g.bytes_received = skops->bytes_received; 132 132 g.bytes_acked = skops->bytes_acked; 133 133 } 134 + g.num_close_events++; 134 135 bpf_map_update_elem(&global_map, &key, &g, 135 136 BPF_ANY); 136 137 }
+1 -1
tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
··· 120 120 int err = 0; 121 121 int map_fd; 122 122 123 - expected_ids[0] = 0x100000001; /* root cgroup */ 123 + expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */ 124 124 expected_ids[1] = get_cgroup_id(""); 125 125 expected_ids[2] = get_cgroup_id(CGROUP_PATH); 126 126 expected_ids[3] = 0; /* non-existent cgroup */
+1
tools/testing/selftests/bpf/test_tcpbpf.h
··· 13 13 __u64 bytes_received; 14 14 __u64 bytes_acked; 15 15 __u32 num_listen; 16 + __u32 num_close_events; 16 17 }; 17 18 #endif
+18 -7
tools/testing/selftests/bpf/test_tcpbpf_user.c
··· 16 16 17 17 #include "test_tcpbpf.h" 18 18 19 + /* 3 comes from one listening socket + both ends of the connection */ 20 + #define EXPECTED_CLOSE_EVENTS 3 21 + 19 22 #define EXPECT_EQ(expected, actual, fmt) \ 20 23 do { \ 21 24 if ((expected) != (actual)) { \ ··· 26 23 " Actual: %" fmt "\n" \ 27 24 " Expected: %" fmt "\n", \ 28 25 (actual), (expected)); \ 29 - goto err; \ 26 + ret--; \ 30 27 } \ 31 28 } while (0) 32 29 33 30 int verify_result(const struct tcpbpf_globals *result) 34 31 { 35 32 __u32 expected_events; 33 + int ret = 0; 36 34 37 35 expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | 38 36 (1 << BPF_SOCK_OPS_RWND_INIT) | ··· 52 48 EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32); 53 49 EXPECT_EQ(0, result->good_cb_test_rv, PRIu32); 54 50 EXPECT_EQ(1, result->num_listen, PRIu32); 51 + EXPECT_EQ(EXPECTED_CLOSE_EVENTS, result->num_close_events, PRIu32); 55 52 56 - return 0; 57 - err: 58 - return -1; 53 + return ret; 59 54 } 60 55 61 56 int verify_sockopt_result(int sock_map_fd) 62 57 { 63 58 __u32 key = 0; 59 + int ret = 0; 64 60 int res; 65 61 int rv; 66 62 ··· 73 69 rv = bpf_map_lookup_elem(sock_map_fd, &key, &res); 74 70 EXPECT_EQ(0, rv, "d"); 75 71 EXPECT_EQ(1, res, "d"); 76 - return 0; 77 - err: 78 - return -1; 72 + return ret; 79 73 } 80 74 81 75 static int bpf_find_map(const char *test, struct bpf_object *obj, ··· 98 96 int error = EXIT_FAILURE; 99 97 struct bpf_object *obj; 100 98 int cg_fd = -1; 99 + int retry = 10; 101 100 __u32 key = 0; 102 101 int rv; 103 102 ··· 137 134 if (sock_map_fd < 0) 138 135 goto err; 139 136 137 + retry_lookup: 140 138 rv = bpf_map_lookup_elem(map_fd, &key, &g); 141 139 if (rv != 0) { 142 140 printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); 143 141 goto err; 142 + } 143 + 144 + if (g.num_close_events != EXPECTED_CLOSE_EVENTS && retry--) { 145 + printf("Unexpected number of close events (%d), retrying!\n", 146 + g.num_close_events); 147 + usleep(100); 148 + goto retry_lookup; 144 149 } 145 150 146 151 if (verify_result(&g)) {
+2 -6
tools/testing/selftests/net/tls.c
··· 25 25 #define TLS_PAYLOAD_MAX_LEN 16384 26 26 #define SOL_TLS 282 27 27 28 - #ifndef ENOTSUPP 29 - #define ENOTSUPP 524 30 - #endif 31 - 32 28 FIXTURE(tls_basic) 33 29 { 34 30 int fd, cfd; ··· 1201 1205 /* TLS ULP not supported */ 1202 1206 if (errno == ENOENT) 1203 1207 return; 1204 - EXPECT_EQ(errno, ENOTSUPP); 1208 + EXPECT_EQ(errno, ENOTCONN); 1205 1209 1206 1210 ret = setsockopt(sfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); 1207 1211 EXPECT_EQ(ret, -1); 1208 - EXPECT_EQ(errno, ENOTSUPP); 1212 + EXPECT_EQ(errno, ENOTCONN); 1209 1213 1210 1214 ret = getsockname(sfd, &addr, &len); 1211 1215 ASSERT_EQ(ret, 0);