Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix null deref in xt_TEE netfilter module, from Eric Dumazet.

2) Several spots need to get to the original listner for SYN-ACK
packets, most spots got this ok but some were not. Whilst covering
the remaining cases, create a helper to do this. From Eric Dumazet.

3) Missiing check of return value from alloc_netdev() in CAIF SPI code,
from Rasmus Villemoes.

4) Don't sleep while != TASK_RUNNING in macvtap, from Vlad Yasevich.

5) Use after free in mvneta driver, from Justin Maggard.

6) Fix race on dst->flags access in dst_release(), from Eric Dumazet.

7) Add missing ZLIB_INFLATE dependency for new qed driver. From Arnd
Bergmann.

8) Fix multicast getsockopt deadlock, from WANG Cong.

9) Fix deadlock in btusb, from Kuba Pawlak.

10) Some ipv6_add_dev() failure paths were not cleaning up the SNMP6
counter state. From Sabrina Dubroca.

11) Fix packet_bind() race, which can cause lost notifications, from
Francesco Ruggeri.

12) Fix MAC restoration in qlcnic driver during bonding mode changes,
from Jarod Wilson.

13) Revert bridging forward delay change which broke libvirt and other
userspace things, from Vlad Yasevich.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits)
Revert "bridge: Allow forward delay to be cfgd when STP enabled"
bpf_trace: Make dependent on PERF_EVENTS
qed: select ZLIB_INFLATE
net: fix a race in dst_release()
net: mvneta: Fix memory use after free.
net: Documentation: Fix default value tcp_limit_output_bytes
macvtap: Resolve possible __might_sleep warning in macvtap_do_read()
mvneta: add FIXED_PHY dependency
net: caif: check return value of alloc_netdev
net: hisilicon: NET_VENDOR_HISILICON should depend on HAS_DMA
drivers: net: xgene: fix RGMII 10/100Mb mode
netfilter: nft_meta: use skb_to_full_sk() helper
net_sched: em_meta: use skb_to_full_sk() helper
sched: cls_flow: use skb_to_full_sk() helper
netfilter: xt_owner: use skb_to_full_sk() helper
smack: use skb_to_full_sk() helper
net: add skb_to_full_sk() helper and use it in selinux_netlbl_skbuff_setsid()
bpf: doc: correct arch list for supported eBPF JIT
dwc_eth_qos: Delete an unnecessary check before the function call "of_node_put"
bonding: fix panic on non-ARPHRD_ETHER enslave failure
...

+642 -322
+5
Documentation/devicetree/bindings/net/cpsw.txt
··· 48 48 - mac-address : See ethernet.txt file in the same directory 49 49 - phy-handle : See ethernet.txt file in the same directory 50 50 51 + Slave sub-nodes: 52 + - fixed-link : See fixed-link.txt file in the same directory 53 + Either the properties phy_id and phy-mode, 54 + or the sub-node fixed-link can be specified 55 + 51 56 Note: "ti,hwmods" field is used to fetch the base address and irq 52 57 resources from TI, omap hwmod data base during device registration. 53 58 Future plan is to migrate hwmod data base contents into device tree
+3 -3
Documentation/networking/filter.txt
··· 596 596 before a conversion to the new layout is being done behind the scenes! 597 597 598 598 Currently, the classic BPF format is being used for JITing on most of the 599 - architectures. Only x86-64 performs JIT compilation from eBPF instruction set, 600 - however, future work will migrate other JIT compilers as well, so that they 601 - will profit from the very same benefits. 599 + architectures. x86-64, aarch64 and s390x perform JIT compilation from eBPF 600 + instruction set, however, future work will migrate other JIT compilers as well, 601 + so that they will profit from the very same benefits. 602 602 603 603 Some core changes of the new internal format: 604 604
+1 -1
Documentation/networking/ip-sysctl.txt
··· 709 709 typical pfifo_fast qdiscs. 710 710 tcp_limit_output_bytes limits the number of bytes on qdisc 711 711 or device to reduce artificial RTT/cwnd and reduce bufferbloat. 712 - Default: 131072 712 + Default: 262144 713 713 714 714 tcp_challenge_ack_limit - INTEGER 715 715 Limits number of Challenge ACK sent per second, as recommended
+4 -2
drivers/bluetooth/btusb.c
··· 1372 1372 } 1373 1373 1374 1374 if (data->isoc_altsetting != new_alts) { 1375 + unsigned long flags; 1376 + 1375 1377 clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 1376 1378 usb_kill_anchored_urbs(&data->isoc_anchor); 1377 1379 ··· 1386 1384 * Clear outstanding fragment when selecting a new 1387 1385 * alternate setting. 1388 1386 */ 1389 - spin_lock(&data->rxlock); 1387 + spin_lock_irqsave(&data->rxlock, flags); 1390 1388 kfree_skb(data->sco_skb); 1391 1389 data->sco_skb = NULL; 1392 - spin_unlock(&data->rxlock); 1390 + spin_unlock_irqrestore(&data->rxlock, flags); 1393 1391 1394 1392 if (__set_isoc_interface(hdev, new_alts) < 0) 1395 1393 return;
+1
drivers/net/bonding/bond_main.c
··· 1749 1749 slave_dev->dev_addr)) 1750 1750 eth_hw_addr_random(bond_dev); 1751 1751 if (bond_dev->type != ARPHRD_ETHER) { 1752 + dev_close(bond_dev); 1752 1753 ether_setup(bond_dev); 1753 1754 bond_dev->flags |= IFF_MASTER; 1754 1755 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+5 -2
drivers/net/caif/caif_spi.c
··· 730 730 int res; 731 731 dev = (struct cfspi_dev *)pdev->dev.platform_data; 732 732 733 - ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d", 734 - NET_NAME_UNKNOWN, cfspi_setup); 735 733 if (!dev) 736 734 return -ENODEV; 735 + 736 + ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d", 737 + NET_NAME_UNKNOWN, cfspi_setup); 738 + if (!ndev) 739 + return -ENOMEM; 737 740 738 741 cfspi = netdev_priv(ndev); 739 742 netif_stop_queue(ndev);
+2
drivers/net/dsa/mv88e6171.c
··· 103 103 #endif 104 104 .get_regs_len = mv88e6xxx_get_regs_len, 105 105 .get_regs = mv88e6xxx_get_regs, 106 + .port_join_bridge = mv88e6xxx_port_bridge_join, 107 + .port_leave_bridge = mv88e6xxx_port_bridge_leave, 106 108 .port_stp_update = mv88e6xxx_port_stp_update, 107 109 .port_pvid_get = mv88e6xxx_port_pvid_get, 108 110 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
+2
drivers/net/dsa/mv88e6352.c
··· 323 323 .set_eeprom = mv88e6352_set_eeprom, 324 324 .get_regs_len = mv88e6xxx_get_regs_len, 325 325 .get_regs = mv88e6xxx_get_regs, 326 + .port_join_bridge = mv88e6xxx_port_bridge_join, 327 + .port_leave_bridge = mv88e6xxx_port_bridge_leave, 326 328 .port_stp_update = mv88e6xxx_port_stp_update, 327 329 .port_pvid_get = mv88e6xxx_port_pvid_get, 328 330 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
+42
drivers/net/dsa/mv88e6xxx.c
··· 1462 1462 const struct switchdev_obj_port_vlan *vlan, 1463 1463 struct switchdev_trans *trans) 1464 1464 { 1465 + /* We reserve a few VLANs to isolate unbridged ports */ 1466 + if (vlan->vid_end >= 4000) 1467 + return -EOPNOTSUPP; 1468 + 1465 1469 /* We don't need any dynamic resource from the kernel (yet), 1466 1470 * so skip the prepare phase. 1467 1471 */ ··· 1874 1870 return err; 1875 1871 } 1876 1872 1873 + int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members) 1874 + { 1875 + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1876 + const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; 1877 + int err; 1878 + 1879 + /* The port joined a bridge, so leave its reserved VLAN */ 1880 + mutex_lock(&ps->smi_mutex); 1881 + err = _mv88e6xxx_port_vlan_del(ds, port, pvid); 1882 + if (!err) 1883 + err = _mv88e6xxx_port_pvid_set(ds, port, 0); 1884 + mutex_unlock(&ps->smi_mutex); 1885 + return err; 1886 + } 1887 + 1888 + int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members) 1889 + { 1890 + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1891 + const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; 1892 + int err; 1893 + 1894 + /* The port left the bridge, so join its reserved VLAN */ 1895 + mutex_lock(&ps->smi_mutex); 1896 + err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true); 1897 + if (!err) 1898 + err = _mv88e6xxx_port_pvid_set(ds, port, pvid); 1899 + mutex_unlock(&ps->smi_mutex); 1900 + return err; 1901 + } 1902 + 1877 1903 static void mv88e6xxx_bridge_work(struct work_struct *work) 1878 1904 { 1879 1905 struct mv88e6xxx_priv_state *ps; ··· 2172 2138 2173 2139 for (i = 0; i < ps->num_ports; i++) { 2174 2140 ret = mv88e6xxx_setup_port(ds, i); 2141 + if (ret < 0) 2142 + return ret; 2143 + 2144 + if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) 2145 + continue; 2146 + 2147 + /* setup the unbridged state */ 2148 + ret = mv88e6xxx_port_bridge_leave(ds, i, 0); 2175 2149 if (ret < 0) 2176 2150 return ret; 2177 2151 }
+2
drivers/net/dsa/mv88e6xxx.h
··· 468 468 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); 469 469 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, 470 470 struct phy_device *phydev, struct ethtool_eee *e); 471 + int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members); 472 + int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members); 471 473 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); 472 474 int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, 473 475 const struct switchdev_obj_port_vlan *vlan,
+47 -2
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
··· 459 459 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); 460 460 } 461 461 462 + static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) 463 + { 464 + struct device *dev = &pdata->pdev->dev; 465 + 466 + if (dev->of_node) { 467 + struct clk *parent = clk_get_parent(pdata->clk); 468 + 469 + switch (pdata->phy_speed) { 470 + case SPEED_10: 471 + clk_set_rate(parent, 2500000); 472 + break; 473 + case SPEED_100: 474 + clk_set_rate(parent, 25000000); 475 + break; 476 + default: 477 + clk_set_rate(parent, 125000000); 478 + break; 479 + } 480 + } 481 + #ifdef CONFIG_ACPI 482 + else { 483 + switch (pdata->phy_speed) { 484 + case SPEED_10: 485 + acpi_evaluate_object(ACPI_HANDLE(dev), 486 + "S10", NULL, NULL); 487 + break; 488 + case SPEED_100: 489 + acpi_evaluate_object(ACPI_HANDLE(dev), 490 + "S100", NULL, NULL); 491 + break; 492 + default: 493 + acpi_evaluate_object(ACPI_HANDLE(dev), 494 + "S1G", NULL, NULL); 495 + break; 496 + } 497 + } 498 + #endif 499 + } 500 + 462 501 static void xgene_gmac_init(struct xgene_enet_pdata *pdata) 463 502 { 464 503 struct device *dev = &pdata->pdev->dev; ··· 516 477 switch (pdata->phy_speed) { 517 478 case SPEED_10: 518 479 ENET_INTERFACE_MODE2_SET(&mc2, 1); 480 + intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); 519 481 CFG_MACMODE_SET(&icm0, 0); 520 482 CFG_WAITASYNCRD_SET(&icm2, 500); 521 483 rgmii &= ~CFG_SPEED_1250; 522 484 break; 523 485 case SPEED_100: 524 486 ENET_INTERFACE_MODE2_SET(&mc2, 1); 487 + intf_ctl &= ~ENET_GHD_MODE; 525 488 intf_ctl |= ENET_LHD_MODE; 526 489 CFG_MACMODE_SET(&icm0, 1); 527 490 CFG_WAITASYNCRD_SET(&icm2, 80); ··· 531 490 break; 532 491 default: 533 492 ENET_INTERFACE_MODE2_SET(&mc2, 2); 493 + intf_ctl &= ~ENET_LHD_MODE; 534 494 intf_ctl |= ENET_GHD_MODE; 535 - 495 + CFG_MACMODE_SET(&icm0, 2); 496 + CFG_WAITASYNCRD_SET(&icm2, 0); 536 497 if (dev->of_node) { 537 498 CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay); 538 499 CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay); 539 500 } 501 + rgmii |= CFG_SPEED_1250; 540 502 541 503 xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); 542 504 value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; ··· 547 503 break; 548 504 } 549 505 550 - mc2 |= FULL_DUPLEX2; 506 + mc2 |= FULL_DUPLEX2 | PAD_CRC; 551 507 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); 552 508 xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); 553 509 ··· 566 522 /* Rtype should be copied from FP */ 567 523 xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); 568 524 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); 525 + xgene_enet_configure_clock(pdata); 569 526 570 527 /* Rx-Tx traffic resume */ 571 528 xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
+1
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
··· 181 181 #define ENET_LHD_MODE BIT(25) 182 182 #define ENET_GHD_MODE BIT(26) 183 183 #define FULL_DUPLEX2 BIT(0) 184 + #define PAD_CRC BIT(2) 184 185 #define SCAN_AUTO_INCR BIT(5) 185 186 #define TBYT_ADDR 0x38 186 187 #define TPKT_ADDR 0x39
-1
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 698 698 else 699 699 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); 700 700 701 - netif_carrier_off(ndev); 702 701 netif_start_queue(ndev); 703 702 704 703 return ret;
+1
drivers/net/ethernet/broadcom/Kconfig
··· 173 173 config BNXT 174 174 tristate "Broadcom NetXtreme-C/E support" 175 175 depends on PCI 176 + depends on VXLAN || VXLAN=n 176 177 select FW_LOADER 177 178 select LIBCRC32C 178 179 ---help---
+21 -7
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 1292 1292 return TX_CMP_VALID(txcmp, raw_cons); 1293 1293 } 1294 1294 1295 - #define CAG_LEGACY_INT_STATUS 0x2014 1296 - 1297 1295 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1298 1296 { 1299 1297 struct bnxt_napi *bnapi = dev_instance; ··· 1303 1305 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1304 1306 1305 1307 if (!bnxt_has_work(bp, cpr)) { 1306 - int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS); 1308 + int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1307 1309 /* return if erroneous interrupt */ 1308 1310 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1309 1311 return IRQ_NONE; ··· 4525 4527 return rc; 4526 4528 } 4527 4529 4530 + /* Common routine to pre-map certain register block to different GRC window. 4531 + * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 4532 + * in PF and 3 windows in VF that can be customized to map in different 4533 + * register blocks. 4534 + */ 4535 + static void bnxt_preset_reg_win(struct bnxt *bp) 4536 + { 4537 + if (BNXT_PF(bp)) { 4538 + /* CAG registers map to GRC window #4 */ 4539 + writel(BNXT_CAG_REG_BASE, 4540 + bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 4541 + } 4542 + } 4543 + 4528 4544 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 4529 4545 { 4530 4546 int rc = 0; 4531 4547 4548 + bnxt_preset_reg_win(bp); 4532 4549 netif_carrier_off(bp->dev); 4533 4550 if (irq_re_init) { 4534 4551 rc = bnxt_setup_int_mode(bp); ··· 5307 5294 struct bnxt_ntuple_filter *fltr, *new_fltr; 5308 5295 struct flow_keys *fkeys; 5309 5296 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 5310 - int rc = 0, idx; 5297 + int rc = 0, idx, bit_id; 5311 5298 struct hlist_head *head; 5312 5299 5313 5300 if (skb->encapsulation) ··· 5345 5332 rcu_read_unlock(); 5346 5333 5347 5334 spin_lock_bh(&bp->ntp_fltr_lock); 5348 - new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5349 - BNXT_NTP_FLTR_MAX_FLTR, 0); 5350 - if (new_fltr->sw_id < 0) { 5335 + bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5336 + BNXT_NTP_FLTR_MAX_FLTR, 0); 5337 + if (bit_id < 0) { 5351 5338 spin_unlock_bh(&bp->ntp_fltr_lock); 5352 5339 rc = -ENOMEM; 5353 5340 goto err_free; 5354 5341 } 5355 5342 5343 + new_fltr->sw_id = (u16)bit_id; 5356 5344 new_fltr->flow_id = flow_id; 5357 5345 new_fltr->rxq = rxq_index; 5358 5346 hlist_add_head_rcu(&new_fltr->hash, head);
+16 -10
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 166 166 #define RX_CMP_HASH_VALID(rxcmp) \ 167 167 ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) 168 168 169 + #define RSS_PROFILE_ID_MASK 0x1f 170 + 169 171 #define RX_CMP_HASH_TYPE(rxcmp) \ 170 - ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ 171 - RX_CMP_RSS_HASH_TYPE_SHIFT) 172 + (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ 173 + RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) 172 174 173 175 struct rx_cmp_ext { 174 176 __le32 rx_cmp_flags2; ··· 284 282 cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID)) 285 283 286 284 #define TPA_START_HASH_TYPE(rx_tpa_start) \ 287 - ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ 288 - RX_TPA_START_CMP_RSS_HASH_TYPE) >> \ 289 - RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) 285 + (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ 286 + RX_TPA_START_CMP_RSS_HASH_TYPE) >> \ 287 + RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) 290 288 291 289 #define TPA_START_AGG_ID(rx_tpa_start) \ 292 290 ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ ··· 841 839 u8 queue_profile; 842 840 }; 843 841 842 + #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 843 + #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 844 + #define BNXT_CAG_REG_BASE 0x300000 845 + 844 846 struct bnxt { 845 847 void __iomem *bar0; 846 848 void __iomem *bar1; ··· 965 959 #define BNXT_RX_MASK_SP_EVENT 0 966 960 #define BNXT_RX_NTP_FLTR_SP_EVENT 1 967 961 #define BNXT_LINK_CHNG_SP_EVENT 2 968 - #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4 969 - #define BNXT_VXLAN_ADD_PORT_SP_EVENT 8 970 - #define BNXT_VXLAN_DEL_PORT_SP_EVENT 16 971 - #define BNXT_RESET_TASK_SP_EVENT 32 972 - #define BNXT_RST_RING_SP_EVENT 64 962 + #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3 963 + #define BNXT_VXLAN_ADD_PORT_SP_EVENT 4 964 + #define BNXT_VXLAN_DEL_PORT_SP_EVENT 5 965 + #define BNXT_RESET_TASK_SP_EVENT 6 966 + #define BNXT_RST_RING_SP_EVENT 7 973 967 974 968 struct bnxt_pf_info pf; 975 969 #ifdef CONFIG_BNXT_SRIOV
+27 -13
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
··· 258 258 return 0; 259 259 } 260 260 261 - static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp) 261 + static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 262 262 { 263 263 int i, rc = 0; 264 264 struct bnxt_pf_info *pf = &bp->pf; ··· 267 267 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 268 268 269 269 mutex_lock(&bp->hwrm_cmd_lock); 270 - for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) { 270 + for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 271 271 req.vf_id = cpu_to_le16(i); 272 272 rc = _hwrm_send_message(bp, &req, sizeof(req), 273 273 HWRM_CMD_TIMEOUT); ··· 509 509 510 510 err_out2: 511 511 /* Free the resources reserved for various VF's */ 512 - bnxt_hwrm_func_vf_resource_free(bp); 512 + bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 513 513 514 514 err_out1: 515 515 bnxt_free_vf_resources(bp); ··· 519 519 520 520 void bnxt_sriov_disable(struct bnxt *bp) 521 521 { 522 - if (!bp->pf.active_vfs) 522 + u16 num_vfs = pci_num_vf(bp->pdev); 523 + 524 + if (!num_vfs) 523 525 return; 524 526 525 - pci_disable_sriov(bp->pdev); 526 - 527 - /* Free the resources reserved for various VF's */ 528 - bnxt_hwrm_func_vf_resource_free(bp); 527 + if (pci_vfs_assigned(bp->pdev)) { 528 + netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 529 + num_vfs); 530 + } else { 531 + pci_disable_sriov(bp->pdev); 532 + /* Free the HW resources reserved for various VF's */ 533 + bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 534 + } 529 535 530 536 bnxt_free_vf_resources(bp); 531 537 ··· 558 552 } 559 553 bp->sriov_cfg = true; 560 554 rtnl_unlock(); 561 - if (!num_vfs) { 562 - bnxt_sriov_disable(bp); 563 - return 0; 555 + 556 + if (pci_vfs_assigned(bp->pdev)) { 557 + netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 558 + num_vfs = 0; 559 + goto sriov_cfg_exit; 564 560 } 565 561 566 562 /* Check if enabled VFs is same as requested */ 567 - if (num_vfs == bp->pf.active_vfs) 568 - return 0; 563 + if (num_vfs && num_vfs == bp->pf.active_vfs) 564 + goto sriov_cfg_exit; 565 + 566 + /* if there are previous existing VFs, clean them up */ 567 + bnxt_sriov_disable(bp); 568 + if (!num_vfs) 569 + goto sriov_cfg_exit; 569 570 570 571 bnxt_sriov_enable(bp, &num_vfs); 571 572 573 + sriov_cfg_exit: 572 574 bp->sriov_cfg = false; 573 575 wake_up(&bp->sriov_cfg_wait); 574 576
+2 -1
drivers/net/ethernet/hisilicon/Kconfig
··· 5 5 config NET_VENDOR_HISILICON 6 6 bool "Hisilicon devices" 7 7 default y 8 - depends on OF && (ARM || ARM64 || COMPILE_TEST) 8 + depends on OF && HAS_DMA 9 + depends on ARM || ARM64 || COMPILE_TEST 9 10 ---help--- 10 11 If you have a network (Ethernet) card belonging to this class, say Y. 11 12
+1
drivers/net/ethernet/marvell/Kconfig
··· 44 44 tristate "Marvell Armada 370/38x/XP network interface support" 45 45 depends on PLAT_ORION 46 46 select MVMDIO 47 + select FIXED_PHY 47 48 ---help--- 48 49 This driver supports the network interface units in the 49 50 Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 1493 1493 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 1494 1494 void *data = (void *)rx_desc->buf_cookie; 1495 1495 1496 - mvneta_frag_free(pp, data); 1497 1496 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1498 1497 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1498 + mvneta_frag_free(pp, data); 1499 1499 } 1500 1500 1501 1501 if (rx_done)
+1
drivers/net/ethernet/qlogic/Kconfig
··· 94 94 config QED 95 95 tristate "QLogic QED 25/40/100Gb core driver" 96 96 depends on PCI 97 + select ZLIB_INFLATE 97 98 ---help--- 98 99 This enables the support for ... 99 100
+10 -4
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 223 223 if (!p_hwfn->p_tx_cids) { 224 224 DP_NOTICE(p_hwfn, 225 225 "Failed to allocate memory for Tx Cids\n"); 226 + rc = -ENOMEM; 226 227 goto alloc_err; 227 228 } 228 229 ··· 231 230 if (!p_hwfn->p_rx_cids) { 232 231 DP_NOTICE(p_hwfn, 233 232 "Failed to allocate memory for Rx Cids\n"); 233 + rc = -ENOMEM; 234 234 goto alloc_err; 235 235 } 236 236 } ··· 283 281 284 282 /* EQ */ 285 283 p_eq = qed_eq_alloc(p_hwfn, 256); 286 - 287 - if (!p_eq) 284 + if (!p_eq) { 285 + rc = -ENOMEM; 288 286 goto alloc_err; 287 + } 289 288 p_hwfn->p_eq = p_eq; 290 289 291 290 p_consq = qed_consq_alloc(p_hwfn); 292 - if (!p_consq) 291 + if (!p_consq) { 292 + rc = -ENOMEM; 293 293 goto alloc_err; 294 + } 294 295 p_hwfn->p_consq = p_consq; 295 296 296 297 /* DMA info initialization */ ··· 308 303 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); 309 304 if (!cdev->reset_stats) { 310 305 DP_NOTICE(cdev, "Failed to allocate reset statistics\n"); 306 + rc = -ENOMEM; 311 307 goto alloc_err; 312 308 } 313 309 ··· 568 562 } 569 563 570 564 /* Enable classification by MAC if needed */ 571 - if (hw_mode & MODE_MF_SI) { 565 + if (hw_mode & (1 << MODE_MF_SI)) { 572 566 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 573 567 "Configuring TAGMAC_CLS_TYPE\n"); 574 568 STORE_RT_REG(p_hwfn,
-5
drivers/net/ethernet/qlogic/qed/qed_int.c
··· 251 251 int arr_size; 252 252 u16 rc = 0; 253 253 254 - if (!p_hwfn) { 255 - DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n"); 256 - return; 257 - } 258 - 259 254 if (!p_hwfn->p_sp_sb) { 260 255 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 261 256 return;
+2 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 353 353 if (!is_valid_ether_addr(addr->sa_data)) 354 354 return -EINVAL; 355 355 356 - if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data)) 356 + if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data) && 357 + ether_addr_equal_unaligned(netdev->dev_addr, addr->sa_data)) 357 358 return 0; 358 359 359 360 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+30 -54
drivers/net/ethernet/renesas/sh_eth.c
··· 1098 1098 static void sh_eth_ring_free(struct net_device *ndev) 1099 1099 { 1100 1100 struct sh_eth_private *mdp = netdev_priv(ndev); 1101 - int i; 1101 + int ringsize, i; 1102 1102 1103 1103 /* Free Rx skb ringbuffer */ 1104 1104 if (mdp->rx_skbuff) { ··· 1115 1115 } 1116 1116 kfree(mdp->tx_skbuff); 1117 1117 mdp->tx_skbuff = NULL; 1118 + 1119 + if (mdp->rx_ring) { 1120 + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1121 + dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1122 + mdp->rx_desc_dma); 1123 + mdp->rx_ring = NULL; 1124 + } 1125 + 1126 + if (mdp->tx_ring) { 1127 + ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1128 + dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1129 + mdp->tx_desc_dma); 1130 + mdp->tx_ring = NULL; 1131 + } 1118 1132 } 1119 1133 1120 1134 /* format skb and descriptor buffer */ ··· 1213 1199 static int sh_eth_ring_init(struct net_device *ndev) 1214 1200 { 1215 1201 struct sh_eth_private *mdp = netdev_priv(ndev); 1216 - int rx_ringsize, tx_ringsize, ret = 0; 1202 + int rx_ringsize, tx_ringsize; 1217 1203 1218 1204 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 1219 1205 * card needs room to do 8 byte alignment, +2 so we can reserve ··· 1228 1214 /* Allocate RX and TX skb rings */ 1229 1215 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), 1230 1216 GFP_KERNEL); 1231 - if (!mdp->rx_skbuff) { 1232 - ret = -ENOMEM; 1233 - return ret; 1234 - } 1217 + if (!mdp->rx_skbuff) 1218 + return -ENOMEM; 1235 1219 1236 1220 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), 1237 1221 GFP_KERNEL); 1238 - if (!mdp->tx_skbuff) { 1239 - ret = -ENOMEM; 1240 - goto skb_ring_free; 1241 - } 1222 + if (!mdp->tx_skbuff) 1223 + goto ring_free; 1242 1224 1243 1225 /* Allocate all Rx descriptors. */ 1244 1226 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1245 1227 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1246 1228 GFP_KERNEL); 1247 - if (!mdp->rx_ring) { 1248 - ret = -ENOMEM; 1249 - goto skb_ring_free; 1250 - } 1229 + if (!mdp->rx_ring) 1230 + goto ring_free; 1251 1231 1252 1232 mdp->dirty_rx = 0; 1253 1233 ··· 1249 1241 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1250 1242 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1251 1243 GFP_KERNEL); 1252 - if (!mdp->tx_ring) { 1253 - ret = -ENOMEM; 1254 - goto desc_ring_free; 1255 - } 1256 - return ret; 1244 + if (!mdp->tx_ring) 1245 + goto ring_free; 1246 + return 0; 1257 1247 1258 - desc_ring_free: 1259 - /* free DMA buffer */ 1260 - dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1261 - 1262 - skb_ring_free: 1263 - /* Free Rx and Tx skb ring buffer */ 1248 + ring_free: 1249 + /* Free Rx and Tx skb ring buffer and DMA buffer */ 1264 1250 sh_eth_ring_free(ndev); 1265 - mdp->tx_ring = NULL; 1266 - mdp->rx_ring = NULL; 1267 1251 1268 - return ret; 1269 - } 1270 - 1271 - static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) 1272 - { 1273 - int ringsize; 1274 - 1275 - if (mdp->rx_ring) { 1276 - ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1277 - dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1278 - mdp->rx_desc_dma); 1279 - mdp->rx_ring = NULL; 1280 - } 1281 - 1282 - if (mdp->tx_ring) { 1283 - ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1284 - dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1285 - mdp->tx_desc_dma); 1286 - mdp->tx_ring = NULL; 1287 - } 1252 + return -ENOMEM; 1288 1253 } 1289 1254 1290 1255 static int sh_eth_dev_init(struct net_device *ndev, bool start) ··· 2220 2239 2221 2240 sh_eth_dev_exit(ndev); 2222 2241 2223 - /* Free all the skbuffs in the Rx queue. */ 2242 + /* Free all the skbuffs in the Rx queue and the DMA buffers. */ 2224 2243 sh_eth_ring_free(ndev); 2225 - /* Free DMA buffer */ 2226 - sh_eth_free_dma_buffer(mdp); 2227 2244 } 2228 2245 2229 2246 /* Set new parameters */ ··· 2466 2487 2467 2488 free_irq(ndev->irq, ndev); 2468 2489 2469 - /* Free all the skbuffs in the Rx queue. */ 2490 + /* Free all the skbuffs in the Rx queue and the DMA buffer. */ 2470 2491 sh_eth_ring_free(ndev); 2471 - 2472 - /* free DMA buffer */ 2473 - sh_eth_free_dma_buffer(mdp); 2474 2492 2475 2493 pm_runtime_put_sync(&mdp->pdev->dev); 2476 2494
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
··· 354 354 355 355 static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) 356 356 { 357 - int phy_iface = phy_iface = bsp_priv->phy_iface; 357 + int phy_iface = bsp_priv->phy_iface; 358 358 359 359 if (enable) { 360 360 if (!bsp_priv->clk_enabled) {
+1 -2
drivers/net/ethernet/synopsys/dwc_eth_qos.c
··· 2970 2970 err_out_clk_dis_aper: 2971 2971 clk_disable_unprepare(lp->apb_pclk); 2972 2972 err_out_free_netdev: 2973 - if (lp->phy_node) 2974 - of_node_put(lp->phy_node); 2973 + of_node_put(lp->phy_node); 2975 2974 free_netdev(ndev); 2976 2975 platform_set_drvdata(pdev, NULL); 2977 2976 return ret;
+13
drivers/net/ethernet/ti/cpsw.c
··· 2037 2037 continue; 2038 2038 2039 2039 priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); 2040 + if (of_phy_is_fixed_link(slave_node)) { 2041 + struct phy_device *pd; 2042 + 2043 + ret = of_phy_register_fixed_link(slave_node); 2044 + if (ret) 2045 + return ret; 2046 + pd = of_phy_find_device(slave_node); 2047 + if (!pd) 2048 + return -ENODEV; 2049 + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2050 + PHY_ID_FMT, pd->bus->id, pd->phy_id); 2051 + goto no_phy_slave; 2052 + } 2040 2053 parp = of_get_property(slave_node, "phy_id", &lenp); 2041 2054 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 2042 2055 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
+1 -3
drivers/net/fjes/fjes_hw.c
··· 143 143 144 144 static void fjes_hw_free_epbuf(struct epbuf_handler *epbh) 145 145 { 146 - if (epbh->buffer) 147 - vfree(epbh->buffer); 148 - 146 + vfree(epbh->buffer); 149 147 epbh->buffer = NULL; 150 148 epbh->size = 0; 151 149
+3 -2
drivers/net/macvtap.c
··· 935 935 /* Nothing to read, let's sleep */ 936 936 schedule(); 937 937 } 938 + if (!noblock) 939 + finish_wait(sk_sleep(&q->sk), &wait); 940 + 938 941 if (skb) { 939 942 ret = macvtap_put_user(q, skb, to); 940 943 if (unlikely(ret < 0)) ··· 945 942 else 946 943 consume_skb(skb); 947 944 } 948 - if (!noblock) 949 - finish_wait(sk_sleep(&q->sk), &wait); 950 945 return ret; 951 946 } 952 947
+21
drivers/net/usb/qmi_wwan.c
··· 771 771 {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ 772 772 {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ 773 773 {QMI_GOBI_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */ 774 + {QMI_FIXED_INTF(0x05c6, 0x9215, 4)}, /* Quectel EC20 Mini PCIe */ 774 775 {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ 775 776 {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ 776 777 {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ ··· 803 802 }; 804 803 MODULE_DEVICE_TABLE(usb, products); 805 804 805 + static bool quectel_ec20_detected(struct usb_interface *intf) 806 + { 807 + struct usb_device *dev = interface_to_usbdev(intf); 808 + 809 + if (dev->actconfig && 810 + le16_to_cpu(dev->descriptor.idVendor) == 0x05c6 && 811 + le16_to_cpu(dev->descriptor.idProduct) == 0x9215 && 812 + dev->actconfig->desc.bNumInterfaces == 5) 813 + return true; 814 + 815 + return false; 816 + } 817 + 806 818 static int qmi_wwan_probe(struct usb_interface *intf, 807 819 const struct usb_device_id *prod) 808 820 { 809 821 struct usb_device_id *id = (struct usb_device_id *)prod; 822 + struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; 810 823 811 824 /* Workaround to enable dynamic IDs. This disables usbnet 812 825 * blacklisting functionality. Which, if required, can be ··· 830 815 if (!id->driver_info) { 831 816 dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); 832 817 id->driver_info = (unsigned long)&qmi_wwan_info; 818 + } 819 + 820 + /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ 821 + if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { 822 + dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); 823 + return -ENODEV; 833 824 } 834 825 835 826 return usbnet_probe(intf, id);
+1 -1
drivers/nfc/nfcmrvl/Kconfig
··· 44 44 45 45 config NFC_MRVL_SPI 46 46 tristate "Marvell NFC-over-SPI driver" 47 - depends on NFC_MRVL && SPI 47 + depends on NFC_MRVL && NFC_NCI_SPI 48 48 help 49 49 Marvell NFC-over-SPI driver. 50 50
+9 -3
drivers/nfc/nfcmrvl/fw_dnld.c
··· 113 113 } 114 114 115 115 atomic_set(&priv->ndev->cmd_cnt, 0); 116 - del_timer_sync(&priv->ndev->cmd_timer); 117 116 118 - del_timer_sync(&priv->fw_dnld.timer); 117 + if (timer_pending(&priv->ndev->cmd_timer)) 118 + del_timer_sync(&priv->ndev->cmd_timer); 119 + 120 + if (timer_pending(&priv->fw_dnld.timer)) 121 + del_timer_sync(&priv->fw_dnld.timer); 119 122 120 123 nfc_info(priv->dev, "FW loading over (%d)]\n", error); 121 124 ··· 475 472 void nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv, 476 473 struct sk_buff *skb) 477 474 { 475 + /* Discard command timer */ 476 + if (timer_pending(&priv->ndev->cmd_timer)) 477 + del_timer_sync(&priv->ndev->cmd_timer); 478 + 478 479 /* Allow next command */ 479 480 atomic_set(&priv->ndev->cmd_cnt, 1); 480 - del_timer_sync(&priv->ndev->cmd_timer); 481 481 482 482 /* Queue and trigger rx work */ 483 483 skb_queue_tail(&priv->fw_dnld.rx_q, skb);
+3 -12
drivers/nfc/nfcmrvl/main.c
··· 194 194 195 195 nfcmrvl_fw_dnld_deinit(priv); 196 196 197 + if (priv->config.reset_n_io) 198 + devm_gpio_free(priv->dev, priv->config.reset_n_io); 199 + 197 200 nci_unregister_device(ndev); 198 201 nci_free_device(ndev); 199 202 kfree(priv); ··· 254 251 gpio_set_value(priv->config.reset_n_io, 0); 255 252 } 256 253 257 - #ifdef CONFIG_OF 258 - 259 254 int nfcmrvl_parse_dt(struct device_node *node, 260 255 struct nfcmrvl_platform_data *pdata) 261 256 { ··· 276 275 277 276 return 0; 278 277 } 279 - 280 - #else 281 - 282 - int nfcmrvl_parse_dt(struct device_node *node, 283 - struct nfcmrvl_platform_data *pdata) 284 - { 285 - return -ENODEV; 286 - } 287 - 288 - #endif 289 278 EXPORT_SYMBOL_GPL(nfcmrvl_parse_dt); 290 279 291 280 MODULE_AUTHOR("Marvell International Ltd.");
+9 -17
drivers/nfc/nfcmrvl/uart.c
··· 67 67 .nci_update_config = nfcmrvl_uart_nci_update_config 68 68 }; 69 69 70 - #ifdef CONFIG_OF 71 - 72 70 static int nfcmrvl_uart_parse_dt(struct device_node *node, 73 71 struct nfcmrvl_platform_data *pdata) 74 72 { ··· 99 101 100 102 return 0; 101 103 } 102 - 103 - #else 104 - 105 - static int nfcmrvl_uart_parse_dt(struct device_node *node, 106 - struct nfcmrvl_platform_data *pdata) 107 - { 108 - return -ENODEV; 109 - } 110 - 111 - #endif 112 104 113 105 /* 114 106 ** NCI UART OPS ··· 140 152 nu->drv_data = priv; 141 153 nu->ndev = priv->ndev; 142 154 143 - /* Set BREAK */ 144 - if (priv->config.break_control && nu->tty->ops->break_ctl) 145 - nu->tty->ops->break_ctl(nu->tty, -1); 146 - 147 155 return 0; 148 156 } 149 157 ··· 158 174 { 159 175 struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data; 160 176 177 + if (priv->ndev->nfc_dev->fw_download_in_progress) 178 + return; 179 + 161 180 /* Remove BREAK to wake up the NFCC */ 162 181 if (priv->config.break_control && nu->tty->ops->break_ctl) { 163 182 nu->tty->ops->break_ctl(nu->tty, 0); ··· 172 185 { 173 186 struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data; 174 187 188 + if (priv->ndev->nfc_dev->fw_download_in_progress) 189 + return; 190 + 175 191 /* 176 192 ** To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him 177 193 ** up. we set BREAK. Once we will be ready to send again we will remove 178 194 ** it. 179 195 */ 180 - if (priv->config.break_control && nu->tty->ops->break_ctl) 196 + if (priv->config.break_control && nu->tty->ops->break_ctl) { 181 197 nu->tty->ops->break_ctl(nu->tty, -1); 198 + usleep_range(1000, 3000); 199 + } 182 200 } 183 201 184 202 static struct nci_uart nfcmrvl_nci_uart = {
+1
include/linux/netdevice.h
··· 1322 1322 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER 1323 1323 #define IFF_NO_QUEUE IFF_NO_QUEUE 1324 1324 #define IFF_OPENVSWITCH IFF_OPENVSWITCH 1325 + #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1325 1326 1326 1327 /** 1327 1328 * struct net_device - The DEVICE structure.
+7
include/linux/tcp.h
··· 397 397 queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); 398 398 } 399 399 400 + static inline void tcp_move_syn(struct tcp_sock *tp, 401 + struct request_sock *req) 402 + { 403 + tp->saved_syn = req->saved_syn; 404 + req->saved_syn = NULL; 405 + } 406 + 400 407 static inline void tcp_saved_syn_free(struct tcp_sock *tp) 401 408 { 402 409 kfree(tp->saved_syn);
+2
include/net/bluetooth/l2cap.h
··· 275 275 #define L2CAP_CR_AUTHORIZATION 0x0006 276 276 #define L2CAP_CR_BAD_KEY_SIZE 0x0007 277 277 #define L2CAP_CR_ENCRYPTION 0x0008 278 + #define L2CAP_CR_INVALID_SCID 0x0009 279 + #define L2CAP_CR_SCID_IN_USE 0x0010 278 280 279 281 /* connect/create channel status */ 280 282 #define L2CAP_CS_NO_INFO 0x0000
+2 -1
include/net/dst_metadata.h
··· 63 63 static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb) 64 64 { 65 65 struct metadata_dst *md_dst = skb_metadata_dst(skb); 66 - int md_size = md_dst->u.tun_info.options_len; 66 + int md_size; 67 67 struct metadata_dst *new_md; 68 68 69 69 if (!md_dst) 70 70 return ERR_PTR(-EINVAL); 71 71 72 + md_size = md_dst->u.tun_info.options_len; 72 73 new_md = metadata_dst_alloc(md_size, GFP_ATOMIC); 73 74 if (!new_md) 74 75 return ERR_PTR(-ENOMEM);
+12
include/net/inet_sock.h
··· 210 210 #define IP_CMSG_ORIGDSTADDR BIT(6) 211 211 #define IP_CMSG_CHECKSUM BIT(7) 212 212 213 + /* SYNACK messages might be attached to request sockets. 214 + * Some places want to reach the listener in this case. 215 + */ 216 + static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) 217 + { 218 + struct sock *sk = skb->sk; 219 + 220 + if (sk && sk->sk_state == TCP_NEW_SYN_RECV) 221 + sk = inet_reqsk(sk)->rsk_listener; 222 + return sk; 223 + } 224 + 213 225 static inline struct inet_sock *inet_sk(const struct sock *sk) 214 226 { 215 227 return (struct inet_sock *)sk;
+1 -1
kernel/trace/Kconfig
··· 434 434 435 435 config BPF_EVENTS 436 436 depends on BPF_SYSCALL 437 - depends on KPROBE_EVENT || UPROBE_EVENT 437 + depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS 438 438 bool 439 439 default y 440 440 help
+30
lib/test_bpf.c
··· 5056 5056 { {0x1, 0x0 } }, 5057 5057 }, 5058 5058 { 5059 + "MOD default X", 5060 + .u.insns = { 5061 + /* 5062 + * A = 0x42 5063 + * A = A mod X ; this halt the filter execution if X is 0 5064 + * ret 0x42 5065 + */ 5066 + BPF_STMT(BPF_LD | BPF_IMM, 0x42), 5067 + BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), 5068 + BPF_STMT(BPF_RET | BPF_K, 0x42), 5069 + }, 5070 + CLASSIC | FLAG_NO_DATA, 5071 + {}, 5072 + { {0x1, 0x0 } }, 5073 + }, 5074 + { 5075 + "MOD default A", 5076 + .u.insns = { 5077 + /* 5078 + * A = A mod 1 5079 + * ret A 5080 + */ 5081 + BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1), 5082 + BPF_STMT(BPF_RET | BPF_A, 0x0), 5083 + }, 5084 + CLASSIC | FLAG_NO_DATA, 5085 + {}, 5086 + { {0x1, 0x0 } }, 5087 + }, 5088 + { 5059 5089 "JMP EQ default A", 5060 5090 .u.insns = { 5061 5091 /*
+11 -6
net/bluetooth/hci_core.c
··· 508 508 /* Read LE Supported States */ 509 509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); 510 510 511 - /* Read LE White List Size */ 512 - hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); 513 - 514 - /* Clear LE White List */ 515 - hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); 516 - 517 511 /* LE-only controllers have LE implicitly enabled */ 518 512 if (!lmp_bredr_capable(hdev)) 519 513 hci_dev_set_flag(hdev, HCI_LE_ENABLED); ··· 824 830 if (hdev->commands[25] & 0x40) { 825 831 /* Read LE Advertising Channel TX Power */ 826 832 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 833 + } 834 + 835 + if (hdev->commands[26] & 0x40) { 836 + /* Read LE White List Size */ 837 + hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 838 + 0, NULL); 839 + } 840 + 841 + if (hdev->commands[26] & 0x80) { 842 + /* Clear LE White List */ 843 + hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); 827 844 } 828 845 829 846 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
+17 -3
net/bluetooth/l2cap_core.c
··· 239 239 else 240 240 dyn_end = L2CAP_CID_DYN_END; 241 241 242 - for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) { 242 + for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { 243 243 if (!__l2cap_get_chan_by_scid(conn, cid)) 244 244 return cid; 245 245 } ··· 5250 5250 credits = __le16_to_cpu(rsp->credits); 5251 5251 result = __le16_to_cpu(rsp->result); 5252 5252 5253 - if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23)) 5253 + if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 || 5254 + dcid < L2CAP_CID_DYN_START || 5255 + dcid > L2CAP_CID_LE_DYN_END)) 5254 5256 return -EPROTO; 5255 5257 5256 5258 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", ··· 5272 5270 5273 5271 switch (result) { 5274 5272 case L2CAP_CR_SUCCESS: 5273 + if (__l2cap_get_chan_by_dcid(conn, dcid)) { 5274 + err = -EBADSLT; 5275 + break; 5276 + } 5277 + 5275 5278 chan->ident = 0; 5276 5279 chan->dcid = dcid; 5277 5280 chan->omtu = mtu; ··· 5444 5437 goto response_unlock; 5445 5438 } 5446 5439 5440 + /* Check for valid dynamic CID range */ 5441 + if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 5442 + result = L2CAP_CR_INVALID_SCID; 5443 + chan = NULL; 5444 + goto response_unlock; 5445 + } 5446 + 5447 5447 /* Check if we already have channel with that dcid */ 5448 5448 if (__l2cap_get_chan_by_dcid(conn, scid)) { 5449 - result = L2CAP_CR_NO_MEM; 5449 + result = L2CAP_CR_SCID_IN_USE; 5450 5450 chan = NULL; 5451 5451 goto response_unlock; 5452 5452 }
+9 -4
net/bridge/br_stp.c
··· 600 600 int br_set_forward_delay(struct net_bridge *br, unsigned long val) 601 601 { 602 602 unsigned long t = clock_t_to_jiffies(val); 603 - 604 - if (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY) 605 - return -ERANGE; 603 + int err = -ERANGE; 606 604 607 605 spin_lock_bh(&br->lock); 606 + if (br->stp_enabled != BR_NO_STP && 607 + (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) 608 + goto unlock; 609 + 608 610 __br_set_forward_delay(br, t); 611 + err = 0; 612 + 613 + unlock: 609 614 spin_unlock_bh(&br->lock); 610 - return 0; 615 + return err; 611 616 }
+4 -3
net/core/dev.c
··· 6402 6402 struct net_device *upper, *lower; 6403 6403 netdev_features_t features; 6404 6404 struct list_head *iter; 6405 - int err = 0; 6405 + int err = -1; 6406 6406 6407 6407 ASSERT_RTNL(); 6408 6408 ··· 6419 6419 features = netdev_sync_upper_features(dev, upper, features); 6420 6420 6421 6421 if (dev->features == features) 6422 - return 0; 6422 + goto sync_lower; 6423 6423 6424 6424 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 6425 6425 &dev->features, &features); ··· 6434 6434 return -1; 6435 6435 } 6436 6436 6437 + sync_lower: 6437 6438 /* some features must be disabled on lower devices when disabled 6438 6439 * on an upper device (think: bonding master or bridge) 6439 6440 */ ··· 6444 6443 if (!err) 6445 6444 dev->features = features; 6446 6445 6447 - return 1; 6446 + return err < 0 ? 0 : 1; 6448 6447 } 6449 6448 6450 6449 /**
+1 -1
net/core/dst.c
··· 306 306 if (unlikely(newrefcnt < 0)) 307 307 net_warn_ratelimited("%s: dst:%p refcnt:%d\n", 308 308 __func__, dst, newrefcnt); 309 - if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 309 + if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE)) 310 310 call_rcu(&dst->rcu_head, dst_destroy_rcu); 311 311 } 312 312 }
+10 -3
net/ipv4/fib_semantics.c
··· 923 923 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || 924 924 fib_prefsrc != cfg->fc_dst) { 925 925 u32 tb_id = cfg->fc_table; 926 + int rc; 926 927 927 928 if (tb_id == RT_TABLE_MAIN) 928 929 tb_id = RT_TABLE_LOCAL; 929 930 930 - if (inet_addr_type_table(cfg->fc_nlinfo.nl_net, 931 - fib_prefsrc, tb_id) != RTN_LOCAL) { 932 - return false; 931 + rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net, 932 + fib_prefsrc, tb_id); 933 + 934 + if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) { 935 + rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net, 936 + fib_prefsrc, RT_TABLE_LOCAL); 933 937 } 938 + 939 + if (rc != RTN_LOCAL) 940 + return false; 934 941 } 935 942 return true; 936 943 }
+4 -8
net/ipv4/igmp.c
··· 2392 2392 struct ip_sf_socklist *psl; 2393 2393 struct net *net = sock_net(sk); 2394 2394 2395 + ASSERT_RTNL(); 2396 + 2395 2397 if (!ipv4_is_multicast(addr)) 2396 2398 return -EINVAL; 2397 - 2398 - rtnl_lock(); 2399 2399 2400 2400 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2401 2401 imr.imr_address.s_addr = msf->imsf_interface; ··· 2417 2417 goto done; 2418 2418 msf->imsf_fmode = pmc->sfmode; 2419 2419 psl = rtnl_dereference(pmc->sflist); 2420 - rtnl_unlock(); 2421 2420 if (!psl) { 2422 2421 len = 0; 2423 2422 count = 0; ··· 2435 2436 return -EFAULT; 2436 2437 return 0; 2437 2438 done: 2438 - rtnl_unlock(); 2439 2439 return err; 2440 2440 } 2441 2441 ··· 2448 2450 struct inet_sock *inet = inet_sk(sk); 2449 2451 struct ip_sf_socklist *psl; 2450 2452 2453 + ASSERT_RTNL(); 2454 + 2451 2455 psin = (struct sockaddr_in *)&gsf->gf_group; 2452 2456 if (psin->sin_family != AF_INET) 2453 2457 return -EINVAL; 2454 2458 addr = psin->sin_addr.s_addr; 2455 2459 if (!ipv4_is_multicast(addr)) 2456 2460 return -EINVAL; 2457 - 2458 - rtnl_lock(); 2459 2461 2460 2462 err = -EADDRNOTAVAIL; 2461 2463 ··· 2468 2470 goto done; 2469 2471 gsf->gf_fmode = pmc->sfmode; 2470 2472 psl = rtnl_dereference(pmc->sflist); 2471 - rtnl_unlock(); 2472 2473 count = psl ? psl->sl_count : 0; 2473 2474 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 2474 2475 gsf->gf_numsrc = count; ··· 2487 2490 } 2488 2491 return 0; 2489 2492 done: 2490 - rtnl_unlock(); 2491 2493 return err; 2492 2494 } 2493 2495
+30 -15
net/ipv4/ip_sockglue.c
··· 1251 1251 * the _received_ ones. The set sets the _sent_ ones. 1252 1252 */ 1253 1253 1254 + static bool getsockopt_needs_rtnl(int optname) 1255 + { 1256 + switch (optname) { 1257 + case IP_MSFILTER: 1258 + case MCAST_MSFILTER: 1259 + return true; 1260 + } 1261 + return false; 1262 + } 1263 + 1254 1264 static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1255 1265 char __user *optval, int __user *optlen, unsigned int flags) 1256 1266 { 1257 1267 struct inet_sock *inet = inet_sk(sk); 1258 - int val; 1268 + bool needs_rtnl = getsockopt_needs_rtnl(optname); 1269 + int val, err = 0; 1259 1270 int len; 1260 1271 1261 1272 if (level != SOL_IP) ··· 1280 1269 if (len < 0) 1281 1270 return -EINVAL; 1282 1271 1272 + if (needs_rtnl) 1273 + rtnl_lock(); 1283 1274 lock_sock(sk); 1284 1275 1285 1276 switch (optname) { ··· 1399 1386 case IP_MSFILTER: 1400 1387 { 1401 1388 struct ip_msfilter msf; 1402 - int err; 1403 1389 1404 1390 if (len < IP_MSFILTER_SIZE(0)) { 1405 - release_sock(sk); 1406 - return -EINVAL; 1391 + err = -EINVAL; 1392 + goto out; 1407 1393 } 1408 1394 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) { 1409 - release_sock(sk); 1410 - return -EFAULT; 1395 + err = -EFAULT; 1396 + goto out; 1411 1397 } 1412 1398 err = ip_mc_msfget(sk, &msf, 1413 1399 (struct ip_msfilter __user *)optval, optlen); 1414 - release_sock(sk); 1415 - return err; 1400 + goto out; 1416 1401 } 1417 1402 case MCAST_MSFILTER: 1418 1403 { 1419 1404 struct group_filter gsf; 1420 - int err; 1421 1405 1422 1406 if (len < GROUP_FILTER_SIZE(0)) { 1423 - release_sock(sk); 1424 - return -EINVAL; 1407 + err = -EINVAL; 1408 + goto out; 1425 1409 } 1426 1410 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) { 1427 - release_sock(sk); 1428 - return -EFAULT; 1411 + err = -EFAULT; 1412 + goto out; 1429 1413 } 1430 1414 err = ip_mc_gsfget(sk, &gsf, 1431 1415 (struct group_filter __user *)optval, 1432 1416 optlen); 1433 - release_sock(sk); 1434 - return err; 1417 + goto out; 1435 1418 } 1436 1419 case IP_MULTICAST_ALL: 1437 1420 val = inet->mc_all; ··· 1494 1485 return -EFAULT; 1495 1486 } 1496 1487 return 0; 1488 + 1489 + out: 1490 + release_sock(sk); 1491 + if (needs_rtnl) 1492 + rtnl_unlock(); 1493 + return err; 1497 1494 } 1498 1495 1499 1496 int ip_getsockopt(struct sock *sk, int level,
+2 -3
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 67 67 const struct nf_hook_state *state) 68 68 { 69 69 struct sock *sk = skb->sk; 70 - struct inet_sock *inet = inet_sk(skb->sk); 71 70 72 - if (sk && (sk->sk_family == PF_INET) && 73 - inet->nodefrag) 71 + if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) && 72 + inet_sk(sk)->nodefrag) 74 73 return NF_ACCEPT; 75 74 76 75 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+2 -2
net/ipv4/sysctl_net_ipv4.c
··· 48 48 { 49 49 bool same_parity = !((range[0] ^ range[1]) & 1); 50 50 51 - write_seqlock(&net->ipv4.ip_local_ports.lock); 51 + write_seqlock_bh(&net->ipv4.ip_local_ports.lock); 52 52 if (same_parity && !net->ipv4.ip_local_ports.warned) { 53 53 net->ipv4.ip_local_ports.warned = true; 54 54 pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n"); 55 55 } 56 56 net->ipv4.ip_local_ports.range[0] = range[0]; 57 57 net->ipv4.ip_local_ports.range[1] = range[1]; 58 - write_sequnlock(&net->ipv4.ip_local_ports.lock); 58 + write_sequnlock_bh(&net->ipv4.ip_local_ports.lock); 59 59 } 60 60 61 61 /* Validate changes from /proc interface. */
+2
net/ipv4/tcp_ipv4.c
··· 1326 1326 if (__inet_inherit_port(sk, newsk) < 0) 1327 1327 goto put_and_exit; 1328 1328 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1329 + if (*own_req) 1330 + tcp_move_syn(newtp, req); 1329 1331 1330 1332 return newsk; 1331 1333
-3
net/ipv4/tcp_minisocks.c
··· 551 551 newtp->rack.mstamp.v64 = 0; 552 552 newtp->rack.advanced = 0; 553 553 554 - newtp->saved_syn = req->saved_syn; 555 - req->saved_syn = NULL; 556 - 557 554 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); 558 555 } 559 556 return newsk;
+1
net/ipv6/addrconf.c
··· 418 418 if (err) { 419 419 ipv6_mc_destroy_dev(ndev); 420 420 del_timer(&ndev->regen_timer); 421 + snmp6_unregister_dev(ndev); 421 422 goto err_release; 422 423 } 423 424 /* protected by rtnl_lock */
+12 -8
net/ipv6/tcp_ipv6.c
··· 1140 1140 goto out; 1141 1141 } 1142 1142 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1143 - /* Clone pktoptions received with SYN, if we own the req */ 1144 - if (*own_req && ireq->pktopts) { 1145 - newnp->pktoptions = skb_clone(ireq->pktopts, 1146 - sk_gfp_atomic(sk, GFP_ATOMIC)); 1147 - consume_skb(ireq->pktopts); 1148 - ireq->pktopts = NULL; 1149 - if (newnp->pktoptions) 1150 - skb_set_owner_r(newnp->pktoptions, newsk); 1143 + if (*own_req) { 1144 + tcp_move_syn(newtp, req); 1145 + 1146 + /* Clone pktoptions received with SYN, if we own the req */ 1147 + if (ireq->pktopts) { 1148 + newnp->pktoptions = skb_clone(ireq->pktopts, 1149 + sk_gfp_atomic(sk, GFP_ATOMIC)); 1150 + consume_skb(ireq->pktopts); 1151 + ireq->pktopts = NULL; 1152 + if (newnp->pktoptions) 1153 + skb_set_owner_r(newnp->pktoptions, newsk); 1154 + } 1151 1155 } 1152 1156 1153 1157 return newsk;
+1 -1
net/netfilter/nf_nat_redirect.c
··· 55 55 56 56 rcu_read_lock(); 57 57 indev = __in_dev_get_rcu(skb->dev); 58 - if (indev != NULL) { 58 + if (indev && indev->ifa_list) { 59 59 ifa = indev->ifa_list; 60 60 newdst = ifa->ifa_local; 61 61 }
+1 -1
net/netfilter/nfnetlink.c
··· 492 492 type = nfnl_group2type[group]; 493 493 494 494 rcu_read_lock(); 495 - ss = nfnetlink_get_subsys(type); 495 + ss = nfnetlink_get_subsys(type << 8); 496 496 rcu_read_unlock(); 497 497 if (!ss) 498 498 request_module("nfnetlink-subsys-%d", type);
+20 -16
net/netfilter/nft_meta.c
··· 31 31 const struct nft_meta *priv = nft_expr_priv(expr); 32 32 const struct sk_buff *skb = pkt->skb; 33 33 const struct net_device *in = pkt->in, *out = pkt->out; 34 + struct sock *sk; 34 35 u32 *dest = &regs->data[priv->dreg]; 35 36 36 37 switch (priv->key) { ··· 87 86 *(u16 *)dest = out->type; 88 87 break; 89 88 case NFT_META_SKUID: 90 - if (skb->sk == NULL || !sk_fullsock(skb->sk)) 89 + sk = skb_to_full_sk(skb); 90 + if (!sk || !sk_fullsock(sk)) 91 91 goto err; 92 92 93 - read_lock_bh(&skb->sk->sk_callback_lock); 94 - if (skb->sk->sk_socket == NULL || 95 - skb->sk->sk_socket->file == NULL) { 96 - read_unlock_bh(&skb->sk->sk_callback_lock); 93 + read_lock_bh(&sk->sk_callback_lock); 94 + if (sk->sk_socket == NULL || 95 + sk->sk_socket->file == NULL) { 96 + read_unlock_bh(&sk->sk_callback_lock); 97 97 goto err; 98 98 } 99 99 100 100 *dest = from_kuid_munged(&init_user_ns, 101 - skb->sk->sk_socket->file->f_cred->fsuid); 102 - read_unlock_bh(&skb->sk->sk_callback_lock); 101 + sk->sk_socket->file->f_cred->fsuid); 102 + read_unlock_bh(&sk->sk_callback_lock); 103 103 break; 104 104 case NFT_META_SKGID: 105 - if (skb->sk == NULL || !sk_fullsock(skb->sk)) 105 + sk = skb_to_full_sk(skb); 106 + if (!sk || !sk_fullsock(sk)) 106 107 goto err; 107 108 108 - read_lock_bh(&skb->sk->sk_callback_lock); 109 - if (skb->sk->sk_socket == NULL || 110 - skb->sk->sk_socket->file == NULL) { 111 - read_unlock_bh(&skb->sk->sk_callback_lock); 109 + read_lock_bh(&sk->sk_callback_lock); 110 + if (sk->sk_socket == NULL || 111 + sk->sk_socket->file == NULL) { 112 + read_unlock_bh(&sk->sk_callback_lock); 112 113 goto err; 113 114 } 114 115 *dest = from_kgid_munged(&init_user_ns, 115 - skb->sk->sk_socket->file->f_cred->fsgid); 116 - read_unlock_bh(&skb->sk->sk_callback_lock); 116 + sk->sk_socket->file->f_cred->fsgid); 117 + read_unlock_bh(&sk->sk_callback_lock); 117 118 break; 118 119 #ifdef CONFIG_IP_ROUTE_CLASSID 119 120 case NFT_META_RTCLASSID: { ··· 171 168 break; 172 169 #ifdef CONFIG_CGROUP_NET_CLASSID 173 170 case NFT_META_CGROUP: 174 - if (skb->sk == NULL || !sk_fullsock(skb->sk)) 171 + sk = skb_to_full_sk(skb); 172 + if (!sk || !sk_fullsock(sk)) 175 173 goto err; 176 - *dest = skb->sk->sk_classid; 174 + *dest = sk->sk_classid; 177 175 break; 178 176 #endif 179 177 default:
+4 -2
net/netfilter/xt_TEE.c
··· 31 31 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) 32 32 { 33 33 const struct xt_tee_tginfo *info = par->targinfo; 34 + int oif = info->priv ? info->priv->oif : 0; 34 35 35 - nf_dup_ipv4(par->net, skb, par->hooknum, &info->gw.in, info->priv->oif); 36 + nf_dup_ipv4(par->net, skb, par->hooknum, &info->gw.in, oif); 36 37 37 38 return XT_CONTINUE; 38 39 } ··· 43 42 tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) 44 43 { 45 44 const struct xt_tee_tginfo *info = par->targinfo; 45 + int oif = info->priv ? info->priv->oif : 0; 46 46 47 - nf_dup_ipv6(par->net, skb, par->hooknum, &info->gw.in6, info->priv->oif); 47 + nf_dup_ipv6(par->net, skb, par->hooknum, &info->gw.in6, oif); 48 48 49 49 return XT_CONTINUE; 50 50 }
+4 -2
net/netfilter/xt_owner.c
··· 14 14 #include <linux/skbuff.h> 15 15 #include <linux/file.h> 16 16 #include <net/sock.h> 17 + #include <net/inet_sock.h> 17 18 #include <linux/netfilter/x_tables.h> 18 19 #include <linux/netfilter/xt_owner.h> 19 20 ··· 34 33 { 35 34 const struct xt_owner_match_info *info = par->matchinfo; 36 35 const struct file *filp; 36 + struct sock *sk = skb_to_full_sk(skb); 37 37 38 - if (skb->sk == NULL || skb->sk->sk_socket == NULL) 38 + if (sk == NULL || sk->sk_socket == NULL) 39 39 return (info->match ^ info->invert) == 0; 40 40 else if (info->match & info->invert & XT_OWNER_SOCKET) 41 41 /* ··· 45 43 */ 46 44 return false; 47 45 48 - filp = skb->sk->sk_socket->file; 46 + filp = sk->sk_socket->file; 49 47 if (filp == NULL) 50 48 return ((info->match ^ info->invert) & 51 49 (XT_OWNER_UID | XT_OWNER_GID)) == 0;
+49 -31
net/packet/af_packet.c
··· 2911 2911 * Attach a packet hook. 2912 2912 */ 2913 2913 2914 - static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2914 + static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 2915 + __be16 proto) 2915 2916 { 2916 2917 struct packet_sock *po = pkt_sk(sk); 2917 2918 struct net_device *dev_curr; 2918 2919 __be16 proto_curr; 2919 2920 bool need_rehook; 2921 + struct net_device *dev = NULL; 2922 + int ret = 0; 2923 + bool unlisted = false; 2920 2924 2921 - if (po->fanout) { 2922 - if (dev) 2923 - dev_put(dev); 2924 - 2925 + if (po->fanout) 2925 2926 return -EINVAL; 2926 - } 2927 2927 2928 2928 lock_sock(sk); 2929 2929 spin_lock(&po->bind_lock); 2930 + rcu_read_lock(); 2931 + 2932 + if (name) { 2933 + dev = dev_get_by_name_rcu(sock_net(sk), name); 2934 + if (!dev) { 2935 + ret = -ENODEV; 2936 + goto out_unlock; 2937 + } 2938 + } else if (ifindex) { 2939 + dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 2940 + if (!dev) { 2941 + ret = -ENODEV; 2942 + goto out_unlock; 2943 + } 2944 + } 2945 + 2946 + if (dev) 2947 + dev_hold(dev); 2930 2948 2931 2949 proto_curr = po->prot_hook.type; 2932 2950 dev_curr = po->prot_hook.dev; ··· 2952 2934 need_rehook = proto_curr != proto || dev_curr != dev; 2953 2935 2954 2936 if (need_rehook) { 2955 - unregister_prot_hook(sk, true); 2937 + if (po->running) { 2938 + rcu_read_unlock(); 2939 + __unregister_prot_hook(sk, true); 2940 + rcu_read_lock(); 2941 + dev_curr = po->prot_hook.dev; 2942 + if (dev) 2943 + unlisted = !dev_get_by_index_rcu(sock_net(sk), 2944 + dev->ifindex); 2945 + } 2956 2946 2957 2947 po->num = proto; 2958 2948 po->prot_hook.type = proto; 2959 - po->prot_hook.dev = dev; 2960 2949 2961 - po->ifindex = dev ? dev->ifindex : 0; 2962 - packet_cached_dev_assign(po, dev); 2950 + if (unlikely(unlisted)) { 2951 + dev_put(dev); 2952 + po->prot_hook.dev = NULL; 2953 + po->ifindex = -1; 2954 + packet_cached_dev_reset(po); 2955 + } else { 2956 + po->prot_hook.dev = dev; 2957 + po->ifindex = dev ? dev->ifindex : 0; 2958 + packet_cached_dev_assign(po, dev); 2959 + } 2963 2960 } 2964 2961 if (dev_curr) 2965 2962 dev_put(dev_curr); ··· 2982 2949 if (proto == 0 || !need_rehook) 2983 2950 goto out_unlock; 2984 2951 2985 - if (!dev || (dev->flags & IFF_UP)) { 2952 + if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 2986 2953 register_prot_hook(sk); 2987 2954 } else { 2988 2955 sk->sk_err = ENETDOWN; ··· 2991 2958 } 2992 2959 2993 2960 out_unlock: 2961 + rcu_read_unlock(); 2994 2962 spin_unlock(&po->bind_lock); 2995 2963 release_sock(sk); 2996 - return 0; 2964 + return ret; 2997 2965 } 2998 2966 2999 2967 /* ··· 3006 2972 { 3007 2973 struct sock *sk = sock->sk; 3008 2974 char name[15]; 3009 - struct net_device *dev; 3010 - int err = -ENODEV; 3011 2975 3012 2976 /* 3013 2977 * Check legality ··· 3015 2983 return -EINVAL; 3016 2984 strlcpy(name, uaddr->sa_data, sizeof(name)); 3017 2985 3018 - dev = dev_get_by_name(sock_net(sk), name); 3019 - if (dev) 3020 - err = packet_do_bind(sk, dev, pkt_sk(sk)->num); 3021 - return err; 2986 + return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3022 2987 } 3023 2988 3024 2989 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3025 2990 { 3026 2991 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3027 2992 struct sock *sk = sock->sk; 3028 - struct net_device *dev = NULL; 3029 - int err; 3030 - 3031 2993 3032 2994 /* 3033 2995 * Check legality ··· 3032 3006 if (sll->sll_family != AF_PACKET) 3033 3007 return -EINVAL; 3034 3008 3035 - if (sll->sll_ifindex) { 3036 - err = -ENODEV; 3037 - dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); 3038 - if (dev == NULL) 3039 - goto out; 3040 - } 3041 - err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); 3042 - 3043 - out: 3044 - return err; 3009 + return packet_do_bind(sk, NULL, sll->sll_ifindex, 3010 + sll->sll_protocol ? : pkt_sk(sk)->num); 3045 3011 } 3046 3012 3047 3013 static struct proto packet_proto = {
+11 -4
net/sched/cls_flow.c
··· 22 22 #include <linux/if_vlan.h> 23 23 #include <linux/slab.h> 24 24 #include <linux/module.h> 25 + #include <net/inet_sock.h> 25 26 26 27 #include <net/pkt_cls.h> 27 28 #include <net/ip.h> ··· 198 197 199 198 static u32 flow_get_skuid(const struct sk_buff *skb) 200 199 { 201 - if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { 202 - kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid; 200 + struct sock *sk = skb_to_full_sk(skb); 201 + 202 + if (sk && sk->sk_socket && sk->sk_socket->file) { 203 + kuid_t skuid = sk->sk_socket->file->f_cred->fsuid; 204 + 203 205 return from_kuid(&init_user_ns, skuid); 204 206 } 205 207 return 0; ··· 210 206 211 207 static u32 flow_get_skgid(const struct sk_buff *skb) 212 208 { 213 - if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { 214 - kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid; 209 + struct sock *sk = skb_to_full_sk(skb); 210 + 211 + if (sk && sk->sk_socket && sk->sk_socket->file) { 212 + kgid_t skgid = sk->sk_socket->file->f_cred->fsgid; 213 + 215 214 return from_kgid(&init_user_ns, skgid); 216 215 } 217 216 return 0;
+92 -46
net/sched/em_meta.c
··· 343 343 344 344 META_COLLECTOR(int_sk_rcvbuf) 345 345 { 346 - if (skip_nonlocal(skb)) { 346 + const struct sock *sk = skb_to_full_sk(skb); 347 + 348 + if (!sk) { 347 349 *err = -1; 348 350 return; 349 351 } 350 - dst->value = skb->sk->sk_rcvbuf; 352 + dst->value = sk->sk_rcvbuf; 351 353 } 352 354 353 355 META_COLLECTOR(int_sk_shutdown) 354 356 { 355 - if (skip_nonlocal(skb)) { 357 + const struct sock *sk = skb_to_full_sk(skb); 358 + 359 + if (!sk) { 356 360 *err = -1; 357 361 return; 358 362 } 359 - dst->value = skb->sk->sk_shutdown; 363 + dst->value = sk->sk_shutdown; 360 364 } 361 365 362 366 META_COLLECTOR(int_sk_proto) 363 367 { 364 - if (skip_nonlocal(skb)) { 368 + const struct sock *sk = skb_to_full_sk(skb); 369 + 370 + if (!sk) { 365 371 *err = -1; 366 372 return; 367 373 } 368 - dst->value = skb->sk->sk_protocol; 374 + dst->value = sk->sk_protocol; 369 375 } 370 376 371 377 META_COLLECTOR(int_sk_type) 372 378 { 373 - if (skip_nonlocal(skb)) { 379 + const struct sock *sk = skb_to_full_sk(skb); 380 + 381 + if (!sk) { 374 382 *err = -1; 375 383 return; 376 384 } 377 - dst->value = skb->sk->sk_type; 385 + dst->value = sk->sk_type; 378 386 } 379 387 380 388 META_COLLECTOR(int_sk_rmem_alloc) 381 389 { 382 - if (skip_nonlocal(skb)) { 390 + const struct sock *sk = skb_to_full_sk(skb); 391 + 392 + if (!sk) { 383 393 *err = -1; 384 394 return; 385 395 } 386 - dst->value = sk_rmem_alloc_get(skb->sk); 396 + dst->value = sk_rmem_alloc_get(sk); 387 397 } 388 398 389 399 META_COLLECTOR(int_sk_wmem_alloc) 390 400 { 391 - if (skip_nonlocal(skb)) { 401 + const struct sock *sk = skb_to_full_sk(skb); 402 + 403 + if (!sk) { 392 404 *err = -1; 393 405 return; 394 406 } 395 - dst->value = sk_wmem_alloc_get(skb->sk); 407 + dst->value = sk_wmem_alloc_get(sk); 396 408 } 397 409 398 410 META_COLLECTOR(int_sk_omem_alloc) 399 411 { 400 - if (skip_nonlocal(skb)) { 412 + const struct sock *sk = skb_to_full_sk(skb); 413 + 414 + if (!sk) { 401 415 *err = -1; 402 416 return; 403 417 } 404 - dst->value = atomic_read(&skb->sk->sk_omem_alloc); 418 + dst->value = atomic_read(&sk->sk_omem_alloc); 405 419 } 406 420 407 421 META_COLLECTOR(int_sk_rcv_qlen) 408 422 { 409 - if (skip_nonlocal(skb)) { 423 + const struct sock *sk = skb_to_full_sk(skb); 424 + 425 + if (!sk) { 410 426 *err = -1; 411 427 return; 412 428 } 413 - dst->value = skb->sk->sk_receive_queue.qlen; 429 + dst->value = sk->sk_receive_queue.qlen; 414 430 } 415 431 416 432 META_COLLECTOR(int_sk_snd_qlen) 417 433 { 418 - if (skip_nonlocal(skb)) { 434 + const struct sock *sk = skb_to_full_sk(skb); 435 + 436 + if (!sk) { 419 437 *err = -1; 420 438 return; 421 439 } 422 - dst->value = skb->sk->sk_write_queue.qlen; 440 + dst->value = sk->sk_write_queue.qlen; 423 441 } 424 442 425 443 META_COLLECTOR(int_sk_wmem_queued) 426 444 { 427 - if (skip_nonlocal(skb)) { 445 + const struct sock *sk = skb_to_full_sk(skb); 446 + 447 + if (!sk) { 428 448 *err = -1; 429 449 return; 430 450 } 431 - dst->value = skb->sk->sk_wmem_queued; 451 + dst->value = sk->sk_wmem_queued; 432 452 } 433 453 434 454 META_COLLECTOR(int_sk_fwd_alloc) 435 455 { 436 - if (skip_nonlocal(skb)) { 456 + const struct sock *sk = skb_to_full_sk(skb); 457 + 458 + if (!sk) { 437 459 *err = -1; 438 460 return; 439 461 } 440 - dst->value = skb->sk->sk_forward_alloc; 462 + dst->value = sk->sk_forward_alloc; 441 463 } 442 464 443 465 META_COLLECTOR(int_sk_sndbuf) 444 466 { 445 - if (skip_nonlocal(skb)) { 467 + const struct sock *sk = skb_to_full_sk(skb); 468 + 469 + if (!sk) { 446 470 *err = -1; 447 471 return; 448 472 } 449 - dst->value = skb->sk->sk_sndbuf; 473 + dst->value = sk->sk_sndbuf; 450 474 } 451 475 452 476 META_COLLECTOR(int_sk_alloc) 453 477 { 454 - if (skip_nonlocal(skb)) { 478 + const struct sock *sk = skb_to_full_sk(skb); 479 + 480 + if (!sk) { 455 481 *err = -1; 456 482 return; 457 483 } 458 - dst->value = (__force int) skb->sk->sk_allocation; 484 + dst->value = (__force int) sk->sk_allocation; 459 485 } 460 486 461 487 META_COLLECTOR(int_sk_hash) ··· 495 469 496 470 META_COLLECTOR(int_sk_lingertime) 497 471 { 498 - if (skip_nonlocal(skb)) { 472 + const struct sock *sk = skb_to_full_sk(skb); 473 + 474 + if (!sk) { 499 475 *err = -1; 500 476 return; 501 477 } 502 - dst->value = skb->sk->sk_lingertime / HZ; 478 + dst->value = sk->sk_lingertime / HZ; 503 479 } 504 480 505 481 META_COLLECTOR(int_sk_err_qlen) 506 482 { 507 - if (skip_nonlocal(skb)) { 483 + const struct sock *sk = skb_to_full_sk(skb); 484 + 485 + if (!sk) { 508 486 *err = -1; 509 487 return; 510 488 } 511 - dst->value = skb->sk->sk_error_queue.qlen; 489 + dst->value = sk->sk_error_queue.qlen; 512 490 } 513 491 514 492 META_COLLECTOR(int_sk_ack_bl) 515 493 { 516 - if (skip_nonlocal(skb)) { 494 + const struct sock *sk = skb_to_full_sk(skb); 495 + 496 + if (!sk) { 517 497 *err = -1; 518 498 return; 519 499 } 520 - dst->value = skb->sk->sk_ack_backlog; 500 + dst->value = sk->sk_ack_backlog; 521 501 } 522 502 523 503 META_COLLECTOR(int_sk_max_ack_bl) 524 504 { 525 - if (skip_nonlocal(skb)) { 505 + const struct sock *sk = skb_to_full_sk(skb); 506 + 507 + if (!sk) { 526 508 *err = -1; 527 509 return; 528 510 } 529 - dst->value = skb->sk->sk_max_ack_backlog; 511 + dst->value = sk->sk_max_ack_backlog; 530 512 } 531 513 532 514 META_COLLECTOR(int_sk_prio) 533 515 { 534 - if (skip_nonlocal(skb)) { 516 + const struct sock *sk = skb_to_full_sk(skb); 517 + 518 + if (!sk) { 535 519 *err = -1; 536 520 return; 537 521 } 538 - dst->value = skb->sk->sk_priority; 522 + dst->value = sk->sk_priority; 539 523 } 540 524 541 525 META_COLLECTOR(int_sk_rcvlowat) 542 526 { 543 - if (skip_nonlocal(skb)) { 527 + const struct sock *sk = skb_to_full_sk(skb); 528 + 529 + if (!sk) { 544 530 *err = -1; 545 531 return; 546 532 } 547 - dst->value = skb->sk->sk_rcvlowat; 533 + dst->value = sk->sk_rcvlowat; 548 534 } 549 535 550 536 META_COLLECTOR(int_sk_rcvtimeo) 551 537 { 552 - if (skip_nonlocal(skb)) { 538 + const struct sock *sk = skb_to_full_sk(skb); 539 + 540 + if (!sk) { 553 541 *err = -1; 554 542 return; 555 543 } 556 - dst->value = skb->sk->sk_rcvtimeo / HZ; 544 + dst->value = sk->sk_rcvtimeo / HZ; 557 545 } 558 546 559 547 META_COLLECTOR(int_sk_sndtimeo) 560 548 { 561 - if (skip_nonlocal(skb)) { 549 + const struct sock *sk = skb_to_full_sk(skb); 550 + 551 + if (!sk) { 562 552 *err = -1; 563 553 return; 564 554 } 565 - dst->value = skb->sk->sk_sndtimeo / HZ; 555 + dst->value = sk->sk_sndtimeo / HZ; 566 556 } 567 557 568 558 META_COLLECTOR(int_sk_sendmsg_off) 569 559 { 570 - if (skip_nonlocal(skb)) { 560 + const struct sock *sk = skb_to_full_sk(skb); 561 + 562 + if (!sk) { 571 563 *err = -1; 572 564 return; 573 565 } 574 - dst->value = skb->sk->sk_frag.offset; 566 + dst->value = sk->sk_frag.offset; 575 567 } 576 568 577 569 META_COLLECTOR(int_sk_write_pend) 578 570 { 579 - if (skip_nonlocal(skb)) { 571 + const struct sock *sk = skb_to_full_sk(skb); 572 + 573 + if (!sk) { 580 574 *err = -1; 581 575 return; 582 576 } 583 - dst->value = skb->sk->sk_write_pending; 577 + dst->value = sk->sk_write_pending; 584 578 } 585 579 586 580 /**************************************************************************
+1 -1
net/vmw_vsock/vmci_transport.c
··· 1234 1234 /* Callers of accept() will be be waiting on the listening socket, not 1235 1235 * the pending socket. 1236 1236 */ 1237 - listener->sk_state_change(listener); 1237 + listener->sk_data_ready(listener); 1238 1238 1239 1239 return 0; 1240 1240
+2 -4
security/selinux/hooks.c
··· 4933 4933 int ifindex, 4934 4934 u16 family) 4935 4935 { 4936 - struct sock *sk = skb->sk; 4936 + struct sock *sk = skb_to_full_sk(skb); 4937 4937 struct sk_security_struct *sksec; 4938 4938 struct common_audit_data ad; 4939 4939 struct lsm_network_audit net = {0,}; ··· 4988 4988 if (!secmark_active && !peerlbl_active) 4989 4989 return NF_ACCEPT; 4990 4990 4991 - sk = skb->sk; 4991 + sk = skb_to_full_sk(skb); 4992 4992 4993 4993 #ifdef CONFIG_XFRM 4994 4994 /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec ··· 5033 5033 u32 skb_sid; 5034 5034 struct sk_security_struct *sksec; 5035 5035 5036 - if (sk->sk_state == TCP_NEW_SYN_RECV) 5037 - sk = inet_reqsk(sk)->rsk_listener; 5038 5036 sksec = sk->sk_security; 5039 5037 if (selinux_skb_peerlbl_sid(skb, family, &skb_sid)) 5040 5038 return NF_DROP;
+1 -1
security/selinux/netlabel.c
··· 245 245 246 246 /* if this is a locally generated packet check to see if it is already 247 247 * being labeled by it's parent socket, if it is just exit */ 248 - sk = skb->sk; 248 + sk = skb_to_full_sk(skb); 249 249 if (sk != NULL) { 250 250 struct sk_security_struct *sksec = sk->sk_security; 251 251 if (sksec->nlbl_state != NLBL_REQSKB)
+7 -4
security/smack/smack_netfilter.c
··· 17 17 #include <linux/netfilter_ipv4.h> 18 18 #include <linux/netfilter_ipv6.h> 19 19 #include <linux/netdevice.h> 20 + #include <net/inet_sock.h> 20 21 #include "smack.h" 21 22 22 23 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ··· 26 25 struct sk_buff *skb, 27 26 const struct nf_hook_state *state) 28 27 { 28 + struct sock *sk = skb_to_full_sk(skb); 29 29 struct socket_smack *ssp; 30 30 struct smack_known *skp; 31 31 32 - if (skb && skb->sk && skb->sk->sk_security) { 33 - ssp = skb->sk->sk_security; 32 + if (sk && sk->sk_security) { 33 + ssp = sk->sk_security; 34 34 skp = ssp->smk_out; 35 35 skb->secmark = skp->smk_secid; 36 36 } ··· 44 42 struct sk_buff *skb, 45 43 const struct nf_hook_state *state) 46 44 { 45 + struct sock *sk = skb_to_full_sk(skb); 47 46 struct socket_smack *ssp; 48 47 struct smack_known *skp; 49 48 50 - if (skb && skb->sk && skb->sk->sk_security) { 51 - ssp = skb->sk->sk_security; 49 + if (sk && sk->sk_security) { 50 + ssp = sk->sk_security; 52 51 skp = ssp->smk_out; 53 52 skb->secmark = skp->smk_secid; 54 53 }