Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) The sockmap code has to free socket memory on close if there is
corked data, from John Fastabend.

2) Tunnel names coming from userspace need to be length validated. From
Eric Dumazet.

3) arp_filter() has to take VRFs properly into account, from Miguel
Fadon Perlines.

4) Fix oops in error path of tcf_bpf_init(), from Davide Caratti.

5) Missing idr_remove() in u32_delete_key(), from Cong Wang.

6) More syzbot stuff. Several use of uninitialized value fixes all
over, from Eric Dumazet.

7) Do not leak kernel memory to userspace in sctp, also from Eric
Dumazet.

8) Discard frames from unused ports in DSA, from Andrew Lunn.

9) Fix DMA mapping and reset/failover problems in ibmvnic, from Thomas
Falcon.

10) Do not access dp83640 PHY registers prematurely after reset, from
Esben Haabendal.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
vhost-net: set packet weight of tx polling to 2 * vq size
net: thunderx: rework mac addresses list to u64 array
inetpeer: fix uninit-value in inet_getpeer
dp83640: Ensure against premature access to PHY registers after reset
devlink: convert occ_get op to separate registration
ARM: dts: ls1021a: Specify TBIPA register address
net/fsl_pq_mdio: Allow explicit speficition of TBIPA address
ibmvnic: Do not reset CRQ for Mobility driver resets
ibmvnic: Fix failover case for non-redundant configuration
ibmvnic: Fix reset scheduler error handling
ibmvnic: Zero used TX descriptor counter on reset
ibmvnic: Fix DMA mapping mistakes
tipc: use the right skb in tipc_sk_fill_sock_diag()
sctp: sctp_sockaddr_af must check minimal addr length for AF_INET6
net: dsa: Discard frames from unused ports
sctp: do not leak kernel memory to user space
soreuseport: initialise timewait reuseport field
ipv4: fix uninit-value in ip_route_output_key_hash_rcu()
dccp: initialize ireq->ir_mark
net: fix uninit-value in __hw_addr_add_ex()
...

+554 -299
+5 -1
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
··· 6 6 of how to define a PHY. 7 7 8 8 Required properties: 9 - - reg : Offset and length of the register set for the device 9 + - reg : Offset and length of the register set for the device, and optionally 10 + the offset and length of the TBIPA register (TBI PHY address 11 + register). If TBIPA register is not specified, the driver will 12 + attempt to infer it from the register set specified (your mileage may 13 + vary). 10 14 - compatible : Should define the compatible device type for the 11 15 mdio. Currently supported strings/devices are: 12 16 - "fsl,gianfar-tbi"
+2 -1
arch/arm/boot/dts/ls1021a.dtsi
··· 587 587 device_type = "mdio"; 588 588 #address-cells = <1>; 589 589 #size-cells = <0>; 590 - reg = <0x0 0x2d24000 0x0 0x4000>; 590 + reg = <0x0 0x2d24000 0x0 0x4000>, 591 + <0x0 0x2d10030 0x0 0x4>; 591 592 }; 592 593 593 594 ptp_clock@2d10e00 {
+4 -4
crypto/af_alg.c
··· 158 158 void *private; 159 159 int err; 160 160 161 - /* If caller uses non-allowed flag, return error. */ 162 - if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) 163 - return -EINVAL; 164 - 165 161 if (sock->state == SS_CONNECTED) 166 162 return -EINVAL; 167 163 168 164 if (addr_len < sizeof(*sa)) 165 + return -EINVAL; 166 + 167 + /* If caller uses non-allowed flag, return error. */ 168 + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) 169 169 return -EINVAL; 170 170 171 171 sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+1 -6
drivers/net/ethernet/cavium/thunder/nic.h
··· 265 265 266 266 struct cavium_ptp; 267 267 268 - struct xcast_addr { 269 - struct list_head list; 270 - u64 addr; 271 - }; 272 - 273 268 struct xcast_addr_list { 274 - struct list_head list; 275 269 int count; 270 + u64 mc[]; 276 271 }; 277 272 278 273 struct nicvf_work {
+10 -18
drivers/net/ethernet/cavium/thunder/nicvf_main.c
··· 1929 1929 work.work); 1930 1930 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); 1931 1931 union nic_mbx mbx = {}; 1932 - struct xcast_addr *xaddr, *next; 1932 + int idx; 1933 1933 1934 1934 if (!vf_work) 1935 1935 return; ··· 1956 1956 /* check if we have any specific MACs to be added to PF DMAC filter */ 1957 1957 if (vf_work->mc) { 1958 1958 /* now go through kernel list of MACs and add them one by one */ 1959 - list_for_each_entry_safe(xaddr, next, 1960 - &vf_work->mc->list, list) { 1959 + for (idx = 0; idx < vf_work->mc->count; idx++) { 1961 1960 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 1962 - mbx.xcast.data.mac = xaddr->addr; 1961 + mbx.xcast.data.mac = vf_work->mc->mc[idx]; 1963 1962 nicvf_send_msg_to_pf(nic, &mbx); 1964 - 1965 - /* after receiving ACK from PF release memory */ 1966 - list_del(&xaddr->list); 1967 - kfree(xaddr); 1968 - vf_work->mc->count--; 1969 1963 } 1970 1964 kfree(vf_work->mc); 1971 1965 } ··· 1990 1996 mode |= BGX_XCAST_MCAST_FILTER; 1991 1997 /* here we need to copy mc addrs */ 1992 1998 if (netdev_mc_count(netdev)) { 1993 - struct xcast_addr *xaddr; 1994 - 1995 - mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC); 1996 - INIT_LIST_HEAD(&mc_list->list); 1999 + mc_list = kmalloc(offsetof(typeof(*mc_list), 2000 + mc[netdev_mc_count(netdev)]), 2001 + GFP_ATOMIC); 2002 + if (unlikely(!mc_list)) 2003 + return; 2004 + mc_list->count = 0; 1997 2005 netdev_hw_addr_list_for_each(ha, &netdev->mc) { 1998 - xaddr = kmalloc(sizeof(*xaddr), 1999 - GFP_ATOMIC); 2000 - xaddr->addr = 2006 + mc_list->mc[mc_list->count] = 2001 2007 ether_addr_to_u64(ha->addr); 2002 - list_add_tail(&xaddr->list, 2003 - &mc_list->list); 2004 2008 mc_list->count++; 2005 2009 } 2006 2010 }
+34 -16
drivers/net/ethernet/freescale/fsl_pq_mdio.c
··· 377 377 }; 378 378 MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); 379 379 380 + static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev, 381 + uint32_t __iomem * (*get_tbipa)(void __iomem *), 382 + void __iomem *reg_map, struct resource *reg_res) 383 + { 384 + struct device_node *np = pdev->dev.of_node; 385 + uint32_t __iomem *tbipa; 386 + bool tbipa_mapped; 387 + 388 + tbipa = of_iomap(np, 1); 389 + if (tbipa) { 390 + tbipa_mapped = true; 391 + } else { 392 + tbipa_mapped = false; 393 + tbipa = (*get_tbipa)(reg_map); 394 + 395 + /* 396 + * Add consistency check to make sure TBI is contained within 397 + * the mapped range (not because we would get a segfault, 398 + * rather to catch bugs in computing TBI address). Print error 399 + * message but continue anyway. 400 + */ 401 + if ((void *)tbipa > reg_map + resource_size(reg_res) - 4) 402 + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", 403 + ((void *)tbipa - reg_map) + 4); 404 + } 405 + 406 + iowrite32be(be32_to_cpu(tbipa_val), tbipa); 407 + 408 + if (tbipa_mapped) 409 + iounmap(tbipa); 410 + } 411 + 380 412 static int fsl_pq_mdio_probe(struct platform_device *pdev) 381 413 { 382 414 const struct of_device_id *id = ··· 482 450 483 451 if (tbi) { 484 452 const u32 *prop = of_get_property(tbi, "reg", NULL); 485 - uint32_t __iomem *tbipa; 486 - 487 453 if (!prop) { 488 454 dev_err(&pdev->dev, 489 455 "missing 'reg' property in node %pOF\n", ··· 489 459 err = -EBUSY; 490 460 goto error; 491 461 } 492 - 493 - tbipa = data->get_tbipa(priv->map); 494 - 495 - /* 496 - * Add consistency check to make sure TBI is contained 497 - * within the mapped range (not because we would get a 498 - * segfault, rather to catch bugs in computing TBI 499 - * address). Print error message but continue anyway. 500 - */ 501 - if ((void *)tbipa > priv->map + resource_size(&res) - 4) 502 - dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", 503 - ((void *)tbipa - priv->map) + 4); 504 - 505 - iowrite32be(be32_to_cpup(prop), tbipa); 462 + set_tbipa(*prop, pdev, 463 + data->get_tbipa, priv->map, &res); 506 464 } 507 465 } 508 466
+97 -49
drivers/net/ethernet/ibm/ibmvnic.c
··· 118 118 static int ibmvnic_init(struct ibmvnic_adapter *); 119 119 static void release_crq_queue(struct ibmvnic_adapter *); 120 120 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); 121 + static int init_crq_queue(struct ibmvnic_adapter *adapter); 121 122 122 123 struct ibmvnic_stat { 123 124 char name[ETH_GSTRING_LEN]; ··· 321 320 dev_info(dev, "replenish pools failure\n"); 322 321 pool->free_map[pool->next_free] = index; 323 322 pool->rx_buff[index].skb = NULL; 324 - if (!dma_mapping_error(dev, dma_addr)) 325 - dma_unmap_single(dev, dma_addr, pool->buff_size, 326 - DMA_FROM_DEVICE); 327 323 328 324 dev_kfree_skb_any(skb); 329 325 adapter->replenish_add_buff_failure++; 330 326 atomic_add(buffers_added, &pool->available); 331 327 332 - if (lpar_rc == H_CLOSED) { 328 + if (lpar_rc == H_CLOSED || adapter->failover_pending) { 333 329 /* Disable buffer pool replenishment and report carrier off if 334 - * queue is closed. Firmware guarantees that a signal will 335 - * be sent to the driver, triggering a reset. 330 + * queue is closed or pending failover. 331 + * Firmware guarantees that a signal will be sent to the 332 + * driver, triggering a reset. 336 333 */ 337 334 deactivate_rx_pools(adapter); 338 335 netif_carrier_off(adapter->netdev); ··· 1070 1071 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1071 1072 int rc; 1072 1073 1074 + /* If device failover is pending, just set device state and return. 1075 + * Device operation will be handled by reset routine. 1076 + */ 1077 + if (adapter->failover_pending) { 1078 + adapter->state = VNIC_OPEN; 1079 + return 0; 1080 + } 1081 + 1073 1082 mutex_lock(&adapter->reset_lock); 1074 1083 1075 1084 if (adapter->state != VNIC_CLOSED) { ··· 1225 1218 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1226 1219 if (rc) 1227 1220 return rc; 1228 - ibmvnic_cleanup(netdev); 1229 1221 adapter->state = VNIC_CLOSED; 1230 1222 return 0; 1231 1223 } ··· 1234 1228 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1235 1229 int rc; 1236 1230 1231 + /* If device failover is pending, just set device state and return. 1232 + * Device operation will be handled by reset routine. 1233 + */ 1234 + if (adapter->failover_pending) { 1235 + adapter->state = VNIC_CLOSED; 1236 + return 0; 1237 + } 1238 + 1237 1239 mutex_lock(&adapter->reset_lock); 1238 1240 rc = __ibmvnic_close(netdev); 1241 + ibmvnic_cleanup(netdev); 1239 1242 mutex_unlock(&adapter->reset_lock); 1240 1243 1241 1244 return rc; ··· 1577 1562 dev_kfree_skb_any(skb); 1578 1563 tx_buff->skb = NULL; 1579 1564 1580 - if (lpar_rc == H_CLOSED) { 1581 - /* Disable TX and report carrier off if queue is closed. 1565 + if (lpar_rc == H_CLOSED || adapter->failover_pending) { 1566 + /* Disable TX and report carrier off if queue is closed 1567 + * or pending failover. 1582 1568 * Firmware guarantees that a signal will be sent to the 1583 1569 * driver, triggering a reset or some other action. 1584 1570 */ ··· 1727 1711 old_num_rx_queues = adapter->req_rx_queues; 1728 1712 old_num_tx_queues = adapter->req_tx_queues; 1729 1713 1730 - if (rwi->reset_reason == VNIC_RESET_MOBILITY) { 1731 - rc = ibmvnic_reenable_crq_queue(adapter); 1732 - if (rc) 1733 - return 0; 1734 - ibmvnic_cleanup(netdev); 1735 - } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) { 1736 - ibmvnic_cleanup(netdev); 1737 - } else { 1714 + ibmvnic_cleanup(netdev); 1715 + 1716 + if (adapter->reset_reason != VNIC_RESET_MOBILITY && 1717 + adapter->reset_reason != VNIC_RESET_FAILOVER) { 1738 1718 rc = __ibmvnic_close(netdev); 1739 1719 if (rc) 1740 1720 return rc; ··· 1748 1736 * we are coming from the probed state. 1749 1737 */ 1750 1738 adapter->state = VNIC_PROBED; 1739 + 1740 + if (adapter->wait_for_reset) { 1741 + rc = init_crq_queue(adapter); 1742 + } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 1743 + rc = ibmvnic_reenable_crq_queue(adapter); 1744 + release_sub_crqs(adapter, 1); 1745 + } else { 1746 + rc = ibmvnic_reset_crq(adapter); 1747 + if (!rc) 1748 + rc = vio_enable_interrupts(adapter->vdev); 1749 + } 1750 + 1751 + if (rc) { 1752 + netdev_err(adapter->netdev, 1753 + "Couldn't initialize crq. rc=%d\n", rc); 1754 + return rc; 1755 + } 1751 1756 1752 1757 rc = ibmvnic_init(adapter); 1753 1758 if (rc) ··· 1907 1878 mutex_unlock(&adapter->reset_lock); 1908 1879 } 1909 1880 1910 - static void ibmvnic_reset(struct ibmvnic_adapter *adapter, 1911 - enum ibmvnic_reset_reason reason) 1881 + static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 1882 + enum ibmvnic_reset_reason reason) 1912 1883 { 1913 1884 struct ibmvnic_rwi *rwi, *tmp; 1914 1885 struct net_device *netdev = adapter->netdev; 1915 1886 struct list_head *entry; 1887 + int ret; 1916 1888 1917 1889 if (adapter->state == VNIC_REMOVING || 1918 - adapter->state == VNIC_REMOVED) { 1919 - netdev_dbg(netdev, "Adapter removing, skipping reset\n"); 1920 - return; 1890 + adapter->state == VNIC_REMOVED || 1891 + adapter->failover_pending) { 1892 + ret = EBUSY; 1893 + netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 1894 + goto err; 1921 1895 } 1922 1896 1923 1897 if (adapter->state == VNIC_PROBING) { 1924 1898 netdev_warn(netdev, "Adapter reset during probe\n"); 1925 - adapter->init_done_rc = EAGAIN; 1926 - return; 1899 + ret = adapter->init_done_rc = EAGAIN; 1900 + goto err; 1927 1901 } 1928 1902 1929 1903 mutex_lock(&adapter->rwi_lock); ··· 1936 1904 if (tmp->reset_reason == reason) { 1937 1905 netdev_dbg(netdev, "Skipping matching reset\n"); 1938 1906 mutex_unlock(&adapter->rwi_lock); 1939 - return; 1907 + ret = EBUSY; 1908 + goto err; 1940 1909 } 1941 1910 } 1942 1911 ··· 1945 1912 if (!rwi) { 1946 1913 mutex_unlock(&adapter->rwi_lock); 1947 1914 ibmvnic_close(netdev); 1948 - return; 1915 + ret = ENOMEM; 1916 + goto err; 1949 1917 } 1950 1918 1951 1919 rwi->reset_reason = reason; ··· 1955 1921 1956 1922 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); 1957 1923 schedule_work(&adapter->ibmvnic_reset); 1924 + 1925 + return 0; 1926 + err: 1927 + if (adapter->wait_for_reset) 1928 + adapter->wait_for_reset = false; 1929 + return -ret; 1958 1930 } 1959 1931 1960 1932 static void ibmvnic_tx_timeout(struct net_device *dev) ··· 2095 2055 2096 2056 static int wait_for_reset(struct ibmvnic_adapter *adapter) 2097 2057 { 2058 + int rc, ret; 2059 + 2098 2060 adapter->fallback.mtu = adapter->req_mtu; 2099 2061 adapter->fallback.rx_queues = adapter->req_rx_queues; 2100 2062 adapter->fallback.tx_queues = adapter->req_tx_queues; ··· 2104 2062 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 2105 2063 2106 2064 init_completion(&adapter->reset_done); 2107 - ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2108 2065 adapter->wait_for_reset = true; 2066 + rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2067 + if (rc) 2068 + return rc; 2109 2069 wait_for_completion(&adapter->reset_done); 2110 2070 2071 + ret = 0; 2111 2072 if (adapter->reset_done_rc) { 2073 + ret = -EIO; 2112 2074 adapter->desired.mtu = adapter->fallback.mtu; 2113 2075 adapter->desired.rx_queues = adapter->fallback.rx_queues; 2114 2076 adapter->desired.tx_queues = adapter->fallback.tx_queues; ··· 2120 2074 adapter->desired.tx_entries = adapter->fallback.tx_entries; 2121 2075 2122 2076 init_completion(&adapter->reset_done); 2123 - ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2077 + adapter->wait_for_reset = true; 2078 + rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2079 + if (rc) 2080 + return ret; 2124 2081 wait_for_completion(&adapter->reset_done); 2125 2082 } 2126 2083 adapter->wait_for_reset = false; 2127 2084 2128 - return adapter->reset_done_rc; 2085 + return ret; 2129 2086 } 2130 2087 2131 2088 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) ··· 2413 2364 } 2414 2365 2415 2366 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 2367 + atomic_set(&scrq->used, 0); 2416 2368 scrq->cur = 0; 2417 2369 2418 2370 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, ··· 2624 2574 union sub_crq *next; 2625 2575 int index; 2626 2576 int i, j; 2627 - u8 first; 2577 + u8 *first; 2628 2578 2629 2579 restart_loop: 2630 2580 while (pending_scrq(adapter, scrq)) { ··· 2655 2605 txbuff->data_dma[j] = 0; 2656 2606 } 2657 2607 /* if sub_crq was sent indirectly */ 2658 - first = txbuff->indir_arr[0].generic.first; 2659 - if (first == IBMVNIC_CRQ_CMD) { 2608 + first = &txbuff->indir_arr[0].generic.first; 2609 + if (*first == IBMVNIC_CRQ_CMD) { 2660 2610 dma_unmap_single(dev, txbuff->indir_dma, 2661 2611 sizeof(txbuff->indir_arr), 2662 2612 DMA_TO_DEVICE); 2613 + *first = 0; 2663 2614 } 2664 2615 2665 2616 if (txbuff->last_frag) { ··· 3933 3882 int i; 3934 3883 3935 3884 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 3936 - DMA_BIDIRECTIONAL); 3885 + DMA_TO_DEVICE); 3937 3886 dma_unmap_single(dev, adapter->login_rsp_buf_token, 3938 - adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); 3887 + adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 3939 3888 3940 3889 /* If the number of queues requested can't be allocated by the 3941 3890 * server, the login response will return with code 1. We will need ··· 4195 4144 case IBMVNIC_CRQ_INIT: 4196 4145 dev_info(dev, "Partner initialized\n"); 4197 4146 adapter->from_passive_init = true; 4147 + adapter->failover_pending = false; 4198 4148 complete(&adapter->init_done); 4149 + ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 4199 4150 break; 4200 4151 case IBMVNIC_CRQ_INIT_COMPLETE: 4201 4152 dev_info(dev, "Partner initialization complete\n"); ··· 4214 4161 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 4215 4162 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 4216 4163 dev_info(dev, "Backing device failover detected\n"); 4217 - ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 4164 + adapter->failover_pending = true; 4218 4165 } else { 4219 4166 /* The adapter lost the connection */ 4220 4167 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", ··· 4514 4461 u64 old_num_rx_queues, old_num_tx_queues; 4515 4462 int rc; 4516 4463 4517 - if (adapter->resetting && !adapter->wait_for_reset) { 4518 - rc = ibmvnic_reset_crq(adapter); 4519 - if (!rc) 4520 - rc = vio_enable_interrupts(adapter->vdev); 4521 - } else { 4522 - rc = init_crq_queue(adapter); 4523 - } 4524 - 4525 - if (rc) { 4526 - dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); 4527 - return rc; 4528 - } 4529 - 4530 4464 adapter->from_passive_init = false; 4531 4465 4532 4466 old_num_rx_queues = adapter->req_rx_queues; ··· 4538 4498 return -1; 4539 4499 } 4540 4500 4541 - if (adapter->resetting && !adapter->wait_for_reset) { 4501 + if (adapter->resetting && !adapter->wait_for_reset && 4502 + adapter->reset_reason != VNIC_RESET_MOBILITY) { 4542 4503 if (adapter->req_rx_queues != old_num_rx_queues || 4543 4504 adapter->req_tx_queues != old_num_tx_queues) { 4544 4505 release_sub_crqs(adapter, 0); ··· 4627 4586 adapter->mac_change_pending = false; 4628 4587 4629 4588 do { 4589 + rc = init_crq_queue(adapter); 4590 + if (rc) { 4591 + dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 4592 + rc); 4593 + goto ibmvnic_init_fail; 4594 + } 4595 + 4630 4596 rc = ibmvnic_init(adapter); 4631 4597 if (rc && rc != EAGAIN) 4632 4598 goto ibmvnic_init_fail;
+1
drivers/net/ethernet/ibm/ibmvnic.h
··· 1108 1108 bool napi_enabled, from_passive_init; 1109 1109 1110 1110 bool mac_change_pending; 1111 + bool failover_pending; 1111 1112 1112 1113 struct ibmvnic_tunables desired; 1113 1114 struct ibmvnic_tunables fallback;
+3 -1
drivers/net/ethernet/intel/ice/ice_common.c
··· 468 468 mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); 469 469 mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); 470 470 471 - if (!mac_buf) 471 + if (!mac_buf) { 472 + status = ICE_ERR_NO_MEMORY; 472 473 goto err_unroll_fltr_mgmt_struct; 474 + } 473 475 474 476 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 475 477 devm_kfree(ice_hw_to_dev(hw), mac_buf);
+2 -2
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 156 156 157 157 static int ice_get_regs_len(struct net_device __always_unused *netdev) 158 158 { 159 - return ARRAY_SIZE(ice_regs_dump_list); 159 + return sizeof(ice_regs_dump_list); 160 160 } 161 161 162 162 static void ··· 170 170 171 171 regs->version = 1; 172 172 173 - for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i) 173 + for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) 174 174 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); 175 175 } 176 176
+1 -1
drivers/net/ethernet/marvell/mvpp2.c
··· 1604 1604 { 1605 1605 int i; 1606 1606 1607 - if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1607 + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1608 1608 return -EINVAL; 1609 1609 1610 1610 memset(pe, 0, sizeof(*pe));
+4 -20
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 3805 3805 }, 3806 3806 }; 3807 3807 3808 - static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) 3809 - { 3810 - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3811 - struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3812 - 3813 - return mlxsw_sp_kvdl_occ_get(mlxsw_sp); 3814 - } 3815 - 3816 - static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { 3817 - .occ_get = mlxsw_sp_resource_kvd_linear_occ_get, 3818 - }; 3819 - 3820 3808 static void 3821 3809 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3822 3810 struct devlink_resource_size_params *kvd_size_params, ··· 3865 3877 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3866 3878 kvd_size, MLXSW_SP_RESOURCE_KVD, 3867 3879 DEVLINK_RESOURCE_ID_PARENT_TOP, 3868 - &kvd_size_params, 3869 - NULL); 3880 + &kvd_size_params); 3870 3881 if (err) 3871 3882 return err; 3872 3883 ··· 3874 3887 linear_size, 3875 3888 MLXSW_SP_RESOURCE_KVD_LINEAR, 3876 3889 MLXSW_SP_RESOURCE_KVD, 3877 - &linear_size_params, 3878 - &mlxsw_sp_resource_kvd_linear_ops); 3890 + &linear_size_params); 3879 3891 if (err) 3880 3892 return err; 3881 3893 ··· 3891 3905 double_size, 3892 3906 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3893 3907 MLXSW_SP_RESOURCE_KVD, 3894 - &hash_double_size_params, 3895 - NULL); 3908 + &hash_double_size_params); 3896 3909 if (err) 3897 3910 return err; 3898 3911 ··· 3900 3915 single_size, 3901 3916 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3902 3917 MLXSW_SP_RESOURCE_KVD, 3903 - &hash_single_size_params, 3904 - NULL); 3918 + &hash_single_size_params); 3905 3919 if (err) 3906 3920 return err; 3907 3921
-1
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 442 442 int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, 443 443 unsigned int entry_count, 444 444 unsigned int *p_alloc_size); 445 - u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp); 446 445 int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core); 447 446 448 447 struct mlxsw_sp_acl_rule_info {
+39 -28
drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
··· 315 315 return occ; 316 316 } 317 317 318 - u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp) 318 + static u64 mlxsw_sp_kvdl_occ_get(void *priv) 319 319 { 320 + const struct mlxsw_sp *mlxsw_sp = priv; 320 321 u64 occ = 0; 321 322 int i; 322 323 ··· 327 326 return occ; 328 327 } 329 328 330 - static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink) 329 + static u64 mlxsw_sp_kvdl_single_occ_get(void *priv) 331 330 { 332 - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 333 - struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 331 + const struct mlxsw_sp *mlxsw_sp = priv; 334 332 struct mlxsw_sp_kvdl_part *part; 335 333 336 334 part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE]; 337 335 return mlxsw_sp_kvdl_part_occ(part); 338 336 } 339 337 340 - static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink) 338 + static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv) 341 339 { 342 - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 343 - struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 340 + const struct mlxsw_sp *mlxsw_sp = priv; 344 341 struct mlxsw_sp_kvdl_part *part; 345 342 346 343 part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS]; 347 344 return mlxsw_sp_kvdl_part_occ(part); 348 345 } 349 346 350 - static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink) 347 + static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv) 351 348 { 352 - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 353 - struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 349 + const struct mlxsw_sp *mlxsw_sp = priv; 354 350 struct mlxsw_sp_kvdl_part *part; 355 351 356 352 part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS]; 357 353 return mlxsw_sp_kvdl_part_occ(part); 358 354 } 359 - 360 - static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = { 361 - .occ_get = mlxsw_sp_kvdl_single_occ_get, 362 - }; 363 - 364 - static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = { 365 - .occ_get = mlxsw_sp_kvdl_chunks_occ_get, 366 - }; 367 - 368 - static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = { 369 - .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get, 370 - }; 371 355 372 356 int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core) 373 357 { ··· 372 386 MLXSW_SP_KVDL_SINGLE_SIZE, 373 387 MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, 374 388 MLXSW_SP_RESOURCE_KVD_LINEAR, 375 - &size_params, 376 - &mlxsw_sp_kvdl_single_ops); 389 + &size_params); 377 390 if (err) 378 391 return err; 379 392 ··· 383 398 MLXSW_SP_KVDL_CHUNKS_SIZE, 384 399 MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, 385 400 MLXSW_SP_RESOURCE_KVD_LINEAR, 386 - &size_params, 387 - &mlxsw_sp_kvdl_chunks_ops); 401 + &size_params); 388 402 if (err) 389 403 return err; 390 404 ··· 394 410 MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE, 395 411 MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, 396 412 MLXSW_SP_RESOURCE_KVD_LINEAR, 397 - &size_params, 398 - &mlxsw_sp_kvdl_chunks_large_ops); 413 + &size_params); 399 414 return err; 400 415 } 401 416 402 417 int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) 403 418 { 419 + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 404 420 struct mlxsw_sp_kvdl *kvdl; 405 421 int err; 406 422 ··· 413 429 if (err) 414 430 goto err_kvdl_parts_init; 415 431 432 + devlink_resource_occ_get_register(devlink, 433 + MLXSW_SP_RESOURCE_KVD_LINEAR, 434 + mlxsw_sp_kvdl_occ_get, 435 + mlxsw_sp); 436 + devlink_resource_occ_get_register(devlink, 437 + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, 438 + mlxsw_sp_kvdl_single_occ_get, 439 + mlxsw_sp); 440 + devlink_resource_occ_get_register(devlink, 441 + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, 442 + mlxsw_sp_kvdl_chunks_occ_get, 443 + mlxsw_sp); 444 + devlink_resource_occ_get_register(devlink, 445 + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, 446 + mlxsw_sp_kvdl_large_chunks_occ_get, 447 + mlxsw_sp); 448 + 416 449 return 0; 417 450 418 451 err_kvdl_parts_init: ··· 439 438 440 439 void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) 441 440 { 441 + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 442 + 443 + devlink_resource_occ_get_unregister(devlink, 444 + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS); 445 + devlink_resource_occ_get_unregister(devlink, 446 + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS); 447 + devlink_resource_occ_get_unregister(devlink, 448 + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE); 449 + devlink_resource_occ_get_unregister(devlink, 450 + MLXSW_SP_RESOURCE_KVD_LINEAR); 442 451 mlxsw_sp_kvdl_parts_fini(mlxsw_sp); 443 452 kfree(mlxsw_sp->kvdl); 444 453 }
+44 -16
drivers/net/hyperv/netvsc.c
··· 109 109 call_rcu(&nvdev->rcu, free_netvsc_device); 110 110 } 111 111 112 - static void netvsc_revoke_buf(struct hv_device *device, 113 - struct netvsc_device *net_device) 112 + static void netvsc_revoke_recv_buf(struct hv_device *device, 113 + struct netvsc_device *net_device, 114 + struct net_device *ndev) 114 115 { 115 116 struct nvsp_message *revoke_packet; 116 - struct net_device *ndev = hv_get_drvdata(device); 117 117 int ret; 118 118 119 119 /* ··· 157 157 } 158 158 net_device->recv_section_cnt = 0; 159 159 } 160 + } 161 + 162 + static void netvsc_revoke_send_buf(struct hv_device *device, 163 + struct netvsc_device *net_device, 164 + struct net_device *ndev) 165 + { 166 + struct nvsp_message *revoke_packet; 167 + int ret; 160 168 161 169 /* Deal with the send buffer we may have setup. 162 170 * If we got a send section size, it means we received a ··· 210 202 } 211 203 } 212 204 213 - static void netvsc_teardown_gpadl(struct hv_device *device, 214 - struct netvsc_device *net_device) 205 + static void netvsc_teardown_recv_gpadl(struct hv_device *device, 206 + struct netvsc_device *net_device, 207 + struct net_device *ndev) 215 208 { 216 - struct net_device *ndev = hv_get_drvdata(device); 217 209 int ret; 218 210 219 211 if (net_device->recv_buf_gpadl_handle) { ··· 230 222 } 231 223 net_device->recv_buf_gpadl_handle = 0; 232 224 } 225 + } 226 + 227 + static void netvsc_teardown_send_gpadl(struct hv_device *device, 228 + struct netvsc_device *net_device, 229 + struct net_device *ndev) 230 + { 231 + int ret; 233 232 234 233 if (net_device->send_buf_gpadl_handle) { 235 234 ret = vmbus_teardown_gpadl(device->channel, ··· 452 437 goto exit; 453 438 454 439 cleanup: 455 - netvsc_revoke_buf(device, net_device); 456 - netvsc_teardown_gpadl(device, net_device); 440 + netvsc_revoke_recv_buf(device, net_device, ndev); 441 + netvsc_revoke_send_buf(device, net_device, ndev); 442 + netvsc_teardown_recv_gpadl(device, net_device, ndev); 443 + netvsc_teardown_send_gpadl(device, net_device, ndev); 457 444 458 445 exit: 459 446 return ret; ··· 474 457 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; 475 458 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; 476 459 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; 477 - 478 460 trace_nvsp_send(ndev, init_packet); 479 461 480 462 /* Send the init request */ ··· 591 575 = rtnl_dereference(net_device_ctx->nvdev); 592 576 int i; 593 577 594 - netvsc_revoke_buf(device, net_device); 578 + /* 579 + * Revoke receive buffer. If host is pre-Win2016 then tear down 580 + * receive buffer GPADL. Do the same for send buffer. 581 + */ 582 + netvsc_revoke_recv_buf(device, net_device, ndev); 583 + if (vmbus_proto_version < VERSION_WIN10) 584 + netvsc_teardown_recv_gpadl(device, net_device, ndev); 585 + 586 + netvsc_revoke_send_buf(device, net_device, ndev); 587 + if (vmbus_proto_version < VERSION_WIN10) 588 + netvsc_teardown_send_gpadl(device, net_device, ndev); 595 589 596 590 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 597 591 ··· 615 589 */ 616 590 netdev_dbg(ndev, "net device safe to remove\n"); 617 591 618 - /* older versions require that buffer be revoked before close */ 619 - if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4) 620 - netvsc_teardown_gpadl(device, net_device); 621 - 622 592 /* Now, we can close the channel safely */ 623 593 vmbus_close(device->channel); 624 594 625 - if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4) 626 - netvsc_teardown_gpadl(device, net_device); 595 + /* 596 + * If host is Win2016 or higher then we do the GPADL tear down 597 + * here after VMBus is closed. 598 + */ 599 + if (vmbus_proto_version >= VERSION_WIN10) { 600 + netvsc_teardown_recv_gpadl(device, net_device, ndev); 601 + netvsc_teardown_send_gpadl(device, net_device, ndev); 602 + } 627 603 628 604 /* Release all resources */ 629 605 free_netvsc_device_rcu(net_device);
+31 -34
drivers/net/netdevsim/devlink.c
··· 30 30 31 31 /* IPv4 32 32 */ 33 - static u64 nsim_ipv4_fib_resource_occ_get(struct devlink *devlink) 33 + static u64 nsim_ipv4_fib_resource_occ_get(void *priv) 34 34 { 35 - struct net *net = nsim_devlink_net(devlink); 35 + struct net *net = priv; 36 36 37 37 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false); 38 38 } 39 39 40 - static struct devlink_resource_ops nsim_ipv4_fib_res_ops = { 41 - .occ_get = nsim_ipv4_fib_resource_occ_get, 42 - }; 43 - 44 - static u64 nsim_ipv4_fib_rules_res_occ_get(struct devlink *devlink) 40 + static u64 nsim_ipv4_fib_rules_res_occ_get(void *priv) 45 41 { 46 - struct net *net = nsim_devlink_net(devlink); 42 + struct net *net = priv; 47 43 48 44 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false); 49 45 } 50 46 51 - static struct devlink_resource_ops nsim_ipv4_fib_rules_res_ops = { 52 - .occ_get = nsim_ipv4_fib_rules_res_occ_get, 53 - }; 54 - 55 47 /* IPv6 56 48 */ 57 - static u64 nsim_ipv6_fib_resource_occ_get(struct devlink *devlink) 49 + static u64 nsim_ipv6_fib_resource_occ_get(void *priv) 58 50 { 59 - struct net *net = nsim_devlink_net(devlink); 51 + struct net *net = priv; 60 52 61 53 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false); 62 54 } 63 55 64 - static struct devlink_resource_ops nsim_ipv6_fib_res_ops = { 65 - .occ_get = nsim_ipv6_fib_resource_occ_get, 66 - }; 67 - 68 - static u64 nsim_ipv6_fib_rules_res_occ_get(struct devlink *devlink) 56 + static u64 nsim_ipv6_fib_rules_res_occ_get(void *priv) 69 57 { 70 - struct net *net = nsim_devlink_net(devlink); 58 + struct net *net = priv; 71 59 72 60 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false); 73 61 } 74 - 75 - static struct devlink_resource_ops nsim_ipv6_fib_rules_res_ops = { 76 - .occ_get = nsim_ipv6_fib_rules_res_occ_get, 77 - }; 78 62 79 63 static int devlink_resources_register(struct devlink *devlink) 80 64 { ··· 75 91 err = devlink_resource_register(devlink, "IPv4", (u64)-1, 76 92 NSIM_RESOURCE_IPV4, 77 93 DEVLINK_RESOURCE_ID_PARENT_TOP, 78 - &params, NULL); 94 + &params); 79 95 if (err) { 80 96 pr_err("Failed to register IPv4 top resource\n"); 81 97 goto out; ··· 84 100 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); 85 101 err = devlink_resource_register(devlink, "fib", n, 86 102 NSIM_RESOURCE_IPV4_FIB, 87 - NSIM_RESOURCE_IPV4, 88 - &params, &nsim_ipv4_fib_res_ops); 103 + NSIM_RESOURCE_IPV4, &params); 89 104 if (err) { 90 105 pr_err("Failed to register IPv4 FIB resource\n"); 91 106 return err; ··· 93 110 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); 94 111 err = devlink_resource_register(devlink, "fib-rules", n, 95 112 NSIM_RESOURCE_IPV4_FIB_RULES, 96 - NSIM_RESOURCE_IPV4, 97 - &params, &nsim_ipv4_fib_rules_res_ops); 113 + NSIM_RESOURCE_IPV4, &params); 98 114 if (err) { 99 115 pr_err("Failed to register IPv4 FIB rules resource\n"); 100 116 return err; ··· 103 121 err = devlink_resource_register(devlink, "IPv6", (u64)-1, 104 122 NSIM_RESOURCE_IPV6, 105 123 DEVLINK_RESOURCE_ID_PARENT_TOP, 106 - &params, NULL); 124 + &params); 107 125 if (err) { 108 126 pr_err("Failed to register IPv6 top resource\n"); 109 127 goto out; ··· 112 130 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); 113 131 err = devlink_resource_register(devlink, "fib", n, 114 132 NSIM_RESOURCE_IPV6_FIB, 115 - NSIM_RESOURCE_IPV6, 116 - &params, &nsim_ipv6_fib_res_ops); 133 + NSIM_RESOURCE_IPV6, &params); 117 134 if (err) { 118 135 pr_err("Failed to register IPv6 FIB resource\n"); 119 136 return err; ··· 121 140 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); 122 141 err = devlink_resource_register(devlink, "fib-rules", n, 123 142 NSIM_RESOURCE_IPV6_FIB_RULES, 124 - NSIM_RESOURCE_IPV6, 125 - &params, &nsim_ipv6_fib_rules_res_ops); 143 + NSIM_RESOURCE_IPV6, &params); 126 144 if (err) { 127 145 pr_err("Failed to register IPv6 FIB rules resource\n"); 128 146 return err; 129 147 } 148 + 149 + devlink_resource_occ_get_register(devlink, 150 + NSIM_RESOURCE_IPV4_FIB, 151 + nsim_ipv4_fib_resource_occ_get, 152 + net); 153 + devlink_resource_occ_get_register(devlink, 154 + NSIM_RESOURCE_IPV4_FIB_RULES, 155 + nsim_ipv4_fib_rules_res_occ_get, 156 + net); 157 + devlink_resource_occ_get_register(devlink, 158 + NSIM_RESOURCE_IPV6_FIB, 159 + nsim_ipv6_fib_resource_occ_get, 160 + net); 161 + devlink_resource_occ_get_register(devlink, 162 + NSIM_RESOURCE_IPV6_FIB_RULES, 163 + nsim_ipv6_fib_rules_res_occ_get, 164 + net); 130 165 out: 131 166 return err; 132 167 }
+18
drivers/net/phy/dp83640.c
··· 1207 1207 kfree(dp83640); 1208 1208 } 1209 1209 1210 + static int dp83640_soft_reset(struct phy_device *phydev) 1211 + { 1212 + int ret; 1213 + 1214 + ret = genphy_soft_reset(phydev); 1215 + if (ret < 0) 1216 + return ret; 1217 + 1218 + /* From DP83640 datasheet: "Software driver code must wait 3 us 1219 + * following a software reset before allowing further serial MII 1220 + * operations with the DP83640." 1221 + */ 1222 + udelay(10); /* Taking udelay inaccuracy into account */ 1223 + 1224 + return 0; 1225 + } 1226 + 1210 1227 static int dp83640_config_init(struct phy_device *phydev) 1211 1228 { 1212 1229 struct dp83640_private *dp83640 = phydev->priv; ··· 1518 1501 .flags = PHY_HAS_INTERRUPT, 1519 1502 .probe = dp83640_probe, 1520 1503 .remove = dp83640_remove, 1504 + .soft_reset = dp83640_soft_reset, 1521 1505 .config_init = dp83640_config_init, 1522 1506 .ack_interrupt = dp83640_ack_interrupt, 1523 1507 .config_intr = dp83640_config_intr,
+18 -2
drivers/net/phy/marvell.c
··· 828 828 return marvell_config_init(phydev); 829 829 } 830 830 831 + static int m88e1318_config_init(struct phy_device *phydev) 832 + { 833 + if (phy_interrupt_is_valid(phydev)) { 834 + int err = phy_modify_paged( 835 + phydev, MII_MARVELL_LED_PAGE, 836 + MII_88E1318S_PHY_LED_TCR, 837 + MII_88E1318S_PHY_LED_TCR_FORCE_INT, 838 + MII_88E1318S_PHY_LED_TCR_INTn_ENABLE | 839 + MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW); 840 + if (err < 0) 841 + return err; 842 + } 843 + 844 + return m88e1121_config_init(phydev); 845 + } 846 + 831 847 static int m88e1510_config_init(struct phy_device *phydev) 832 848 { 833 849 int err; ··· 886 870 phydev->advertising &= ~pause; 887 871 } 888 872 889 - return m88e1121_config_init(phydev); 873 + return m88e1318_config_init(phydev); 890 874 } 891 875 892 876 static int m88e1118_config_aneg(struct phy_device *phydev) ··· 2102 2086 .features = PHY_GBIT_FEATURES, 2103 2087 .flags = PHY_HAS_INTERRUPT, 2104 2088 .probe = marvell_probe, 2105 - .config_init = &m88e1121_config_init, 2089 + .config_init = &m88e1318_config_init, 2106 2090 .config_aneg = &m88e1318_config_aneg, 2107 2091 .read_status = &marvell_read_status, 2108 2092 .ack_interrupt = &marvell_ack_interrupt,
+7 -1
drivers/vhost/net.c
··· 44 44 * Using this limit prevents one virtqueue from starving others. */ 45 45 #define VHOST_NET_WEIGHT 0x80000 46 46 47 + /* Max number of packets transferred before requeueing the job. 48 + * Using this limit prevents one virtqueue from starving rx. */ 49 + #define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2) 50 + 47 51 /* MAX number of TX used buffers for outstanding zerocopy */ 48 52 #define VHOST_MAX_PEND 128 49 53 #define VHOST_GOODCOPY_LEN 256 ··· 477 473 struct socket *sock; 478 474 struct vhost_net_ubuf_ref *uninitialized_var(ubufs); 479 475 bool zcopy, zcopy_used; 476 + int sent_pkts = 0; 480 477 481 478 mutex_lock(&vq->mutex); 482 479 sock = vq->private_data; ··· 585 580 else 586 581 vhost_zerocopy_signal_used(net, vq); 587 582 vhost_net_tx_packet(net); 588 - if (unlikely(total_len >= VHOST_NET_WEIGHT)) { 583 + if (unlikely(total_len >= VHOST_NET_WEIGHT) || 584 + unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) { 589 585 vhost_poll_queue(&vq->poll); 590 586 break; 591 587 }
+1 -1
include/net/bluetooth/hci_core.h
··· 895 895 u16 conn_timeout); 896 896 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 897 897 u8 dst_type, u8 sec_level, u16 conn_timeout, 898 - u8 role); 898 + u8 role, bdaddr_t *direct_rpa); 899 899 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, 900 900 u8 sec_level, u8 auth_type); 901 901 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+26 -14
include/net/devlink.h
··· 232 232 }; 233 233 234 234 /** 235 - * struct devlink_resource_ops - resource ops 236 - * @occ_get: get the occupied size 237 - */ 238 - struct devlink_resource_ops { 239 - u64 (*occ_get)(struct devlink *devlink); 240 - }; 241 - 242 - /** 243 235 * struct devlink_resource_size_params - resource's size parameters 244 236 * @size_min: minimum size which can be set 245 237 * @size_max: maximum size which can be set ··· 257 265 size_params->unit = unit; 258 266 } 259 267 268 + typedef u64 devlink_resource_occ_get_t(void *priv); 269 + 260 270 /** 261 271 * struct devlink_resource - devlink resource 262 272 * @name: name of the resource ··· 271 277 * @size_params: size parameters 272 278 * @list: parent list 273 279 * @resource_list: list of child resources 274 - * @resource_ops: resource ops 275 280 */ 276 281 struct devlink_resource { 277 282 const char *name; ··· 282 289 struct devlink_resource_size_params size_params; 283 290 struct list_head list; 284 291 struct list_head resource_list; 285 - const struct devlink_resource_ops *resource_ops; 292 + devlink_resource_occ_get_t *occ_get; 293 + void *occ_get_priv; 286 294 }; 287 295 288 296 #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 ··· 403 409 u64 resource_size, 404 410 u64 resource_id, 405 411 u64 parent_resource_id, 406 - const struct devlink_resource_size_params *size_params, 407 - const struct devlink_resource_ops *resource_ops); 412 + const struct devlink_resource_size_params *size_params); 408 413 void devlink_resources_unregister(struct devlink *devlink, 409 414 struct devlink_resource *resource); 410 415 int devlink_resource_size_get(struct devlink *devlink, ··· 412 419 int devlink_dpipe_table_resource_set(struct devlink *devlink, 413 420 const char *table_name, u64 resource_id, 414 421 u64 resource_units); 422 + void devlink_resource_occ_get_register(struct devlink *devlink, 423 + u64 resource_id, 424 + devlink_resource_occ_get_t *occ_get, 425 + void *occ_get_priv); 426 + void devlink_resource_occ_get_unregister(struct devlink *devlink, 427 + u64 resource_id); 415 428 416 429 #else 417 430 ··· 561 562 u64 resource_size, 562 563 u64 resource_id, 563 564 u64 parent_resource_id, 564 - const struct devlink_resource_size_params *size_params, 565 - const struct devlink_resource_ops *resource_ops) 565 + const struct devlink_resource_size_params *size_params) 566 566 { 567 567 return 0; 568 568 } ··· 585 587 u64 resource_units) 586 588 { 587 589 return -EOPNOTSUPP; 590 + } 591 + 592 + static inline void 593 + devlink_resource_occ_get_register(struct devlink *devlink, 594 + u64 resource_id, 595 + devlink_resource_occ_get_t *occ_get, 596 + void *occ_get_priv) 597 + { 598 + } 599 + 600 + static inline void 601 + devlink_resource_occ_get_unregister(struct devlink *devlink, 602 + u64 resource_id) 603 + { 588 604 } 589 605 590 606 #endif
+1
include/net/inet_timewait_sock.h
··· 43 43 #define tw_family __tw_common.skc_family 44 44 #define tw_state __tw_common.skc_state 45 45 #define tw_reuse __tw_common.skc_reuse 46 + #define tw_reuseport __tw_common.skc_reuseport 46 47 #define tw_ipv6only __tw_common.skc_ipv6only 47 48 #define tw_bound_dev_if __tw_common.skc_bound_dev_if 48 49 #define tw_node __tw_common.skc_nulls_node
+1 -1
include/net/nexthop.h
··· 7 7 8 8 static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining) 9 9 { 10 - return remaining >= sizeof(*rtnh) && 10 + return remaining >= (int)sizeof(*rtnh) && 11 11 rtnh->rtnh_len >= sizeof(*rtnh) && 12 12 rtnh->rtnh_len <= remaining; 13 13 }
+10 -2
kernel/bpf/sockmap.c
··· 182 182 psock->cork = NULL; 183 183 } 184 184 185 - sk->sk_prot = psock->sk_proto; 186 - psock->sk_proto = NULL; 185 + if (psock->sk_proto) { 186 + sk->sk_prot = psock->sk_proto; 187 + psock->sk_proto = NULL; 188 + } 187 189 out: 188 190 rcu_read_unlock(); 189 191 } ··· 213 211 close_fun = psock->save_close; 214 212 215 213 write_lock_bh(&sk->sk_callback_lock); 214 + if (psock->cork) { 215 + free_start_sg(psock->sock, psock->cork); 216 + kfree(psock->cork); 217 + psock->cork = NULL; 218 + } 219 + 216 220 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { 217 221 list_del(&md->list); 218 222 free_start_sg(psock->sock, md);
+12 -12
kernel/bpf/syscall.c
··· 1226 1226 } 1227 1227 } 1228 1228 1229 - static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 1230 - enum bpf_attach_type attach_type) 1231 - { 1232 - switch (prog->type) { 1233 - case BPF_PROG_TYPE_CGROUP_SOCK: 1234 - case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1235 - return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1236 - default: 1237 - return 0; 1238 - } 1239 - } 1240 - 1241 1229 /* last field in 'union bpf_attr' used by this command */ 1242 1230 #define BPF_PROG_LOAD_LAST_FIELD expected_attach_type 1243 1231 ··· 1452 1464 } 1453 1465 1454 1466 #ifdef CONFIG_CGROUP_BPF 1467 + 1468 + static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 1469 + enum bpf_attach_type attach_type) 1470 + { 1471 + switch (prog->type) { 1472 + case BPF_PROG_TYPE_CGROUP_SOCK: 1473 + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1474 + return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1475 + default: 1476 + return 0; 1477 + } 1478 + } 1455 1479 1456 1480 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags 1457 1481
+21 -8
net/bluetooth/hci_conn.c
··· 749 749 } 750 750 751 751 static void hci_req_add_le_create_conn(struct hci_request *req, 752 - struct hci_conn *conn) 752 + struct hci_conn *conn, 753 + bdaddr_t *direct_rpa) 753 754 { 754 755 struct hci_cp_le_create_conn cp; 755 756 struct hci_dev *hdev = conn->hdev; 756 757 u8 own_addr_type; 757 758 758 - /* Update random address, but set require_privacy to false so 759 - * that we never connect with an non-resolvable address. 759 + /* If direct address was provided we use it instead of current 760 + * address. 760 761 */ 761 - if (hci_update_random_address(req, false, conn_use_rpa(conn), 762 - &own_addr_type)) 763 - return; 762 + if (direct_rpa) { 763 + if (bacmp(&req->hdev->random_addr, direct_rpa)) 764 + hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, 765 + direct_rpa); 766 + 767 + /* direct address is always RPA */ 768 + own_addr_type = ADDR_LE_DEV_RANDOM; 769 + } else { 770 + /* Update random address, but set require_privacy to false so 771 + * that we never connect with an non-resolvable address. 772 + */ 773 + if (hci_update_random_address(req, false, conn_use_rpa(conn), 774 + &own_addr_type)) 775 + return; 776 + } 764 777 765 778 memset(&cp, 0, sizeof(cp)); 766 779 ··· 838 825 839 826 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 840 827 u8 dst_type, u8 sec_level, u16 conn_timeout, 841 - u8 role) 828 + u8 role, bdaddr_t *direct_rpa) 842 829 { 843 830 struct hci_conn_params *params; 844 831 struct hci_conn *conn; ··· 953 940 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); 954 941 } 955 942 956 - hci_req_add_le_create_conn(&req, conn); 943 + hci_req_add_le_create_conn(&req, conn, direct_rpa); 957 944 958 945 create_conn: 959 946 err = hci_req_run(&req, create_le_conn_complete);
+11 -4
net/bluetooth/hci_event.c
··· 4648 4648 /* This function requires the caller holds hdev->lock */ 4649 4649 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 4650 4650 bdaddr_t *addr, 4651 - u8 addr_type, u8 adv_type) 4651 + u8 addr_type, u8 adv_type, 4652 + bdaddr_t *direct_rpa) 4652 4653 { 4653 4654 struct hci_conn *conn; 4654 4655 struct hci_conn_params *params; ··· 4700 4699 } 4701 4700 4702 4701 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 4703 - HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); 4702 + HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER, 4703 + direct_rpa); 4704 4704 if (!IS_ERR(conn)) { 4705 4705 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 4706 4706 * by higher layer that tried to connect, if no then ··· 4810 4808 bdaddr_type = irk->addr_type; 4811 4809 } 4812 4810 4813 - /* Check if we have been requested to connect to this device */ 4814 - conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); 4811 + /* Check if we have been requested to connect to this device. 4812 + * 4813 + * direct_addr is set only for directed advertising reports (it is NULL 4814 + * for advertising reports) and is already verified to be RPA above. 4815 + */ 4816 + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, 4817 + direct_addr); 4815 4818 if (conn && type == LE_ADV_IND) { 4816 4819 /* Store report for later inclusion by 4817 4820 * mgmt_device_connected
+1 -1
net/bluetooth/l2cap_core.c
··· 7156 7156 hcon = hci_connect_le(hdev, dst, dst_type, 7157 7157 chan->sec_level, 7158 7158 HCI_LE_CONN_TIMEOUT, 7159 - HCI_ROLE_SLAVE); 7159 + HCI_ROLE_SLAVE, NULL); 7160 7160 else 7161 7161 hcon = hci_connect_le_scan(hdev, dst, dst_type, 7162 7162 chan->sec_level,
+1 -1
net/core/dev.c
··· 1027 1027 { 1028 1028 if (*name == '\0') 1029 1029 return false; 1030 - if (strlen(name) >= IFNAMSIZ) 1030 + if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1031 1031 return false; 1032 1032 if (!strcmp(name, ".") || !strcmp(name, "..")) 1033 1033 return false;
+2 -2
net/core/dev_addr_lists.c
··· 57 57 return -EINVAL; 58 58 59 59 list_for_each_entry(ha, &list->list, list) { 60 - if (!memcmp(ha->addr, addr, addr_len) && 61 - ha->type == addr_type) { 60 + if (ha->type == addr_type && 61 + !memcmp(ha->addr, addr, addr_len)) { 62 62 if (global) { 63 63 /* check if addr is already used as global */ 64 64 if (ha->global_use)
+65 -9
net/core/devlink.c
··· 2405 2405 return 0; 2406 2406 } 2407 2407 2408 + static int devlink_resource_occ_put(struct devlink_resource *resource, 2409 + struct sk_buff *skb) 2410 + { 2411 + if (!resource->occ_get) 2412 + return 0; 2413 + return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, 2414 + resource->occ_get(resource->occ_get_priv), 2415 + DEVLINK_ATTR_PAD); 2416 + } 2417 + 2408 2418 static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, 2409 2419 struct devlink_resource *resource) 2410 2420 { ··· 2435 2425 if (resource->size != resource->size_new) 2436 2426 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, 2437 2427 resource->size_new, DEVLINK_ATTR_PAD); 2438 - if (resource->resource_ops && resource->resource_ops->occ_get) 2439 - if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, 2440 - resource->resource_ops->occ_get(devlink), 2441 - DEVLINK_ATTR_PAD)) 2442 - goto nla_put_failure; 2428 + if (devlink_resource_occ_put(resource, skb)) 2429 + goto nla_put_failure; 2443 2430 if (devlink_resource_size_params_put(resource, skb)) 2444 2431 goto nla_put_failure; 2445 2432 if (list_empty(&resource->resource_list)) ··· 3169 3162 * @resource_id: resource's id 3170 3163 * @parent_reosurce_id: resource's parent id 3171 3164 * @size params: size parameters 3172 - * @resource_ops: resource ops 3173 3165 */ 3174 3166 int devlink_resource_register(struct devlink *devlink, 3175 3167 const char *resource_name, 3176 3168 u64 resource_size, 3177 3169 u64 resource_id, 3178 3170 u64 parent_resource_id, 3179 - const struct devlink_resource_size_params *size_params, 3180 - const struct devlink_resource_ops *resource_ops) 3171 + const struct devlink_resource_size_params *size_params) 3181 3172 { 3182 3173 struct devlink_resource *resource; 3183 3174 struct list_head *resource_list; ··· 3218 3213 resource->size = resource_size; 3219 3214 resource->size_new = resource_size; 3220 3215 resource->id = resource_id; 3221 - resource->resource_ops = resource_ops; 3222 3216 resource->size_valid = true; 3223 3217 memcpy(&resource->size_params, size_params, 3224 3218 sizeof(resource->size_params)); ··· 3318 3314 return err; 3319 3315 } 3320 3316 EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set); 3317 + 3318 + /** 3319 + * devlink_resource_occ_get_register - register occupancy getter 3320 + * 3321 + * @devlink: devlink 3322 + * @resource_id: resource id 3323 + * @occ_get: occupancy getter callback 3324 + * @occ_get_priv: occupancy getter callback priv 3325 + */ 3326 + void devlink_resource_occ_get_register(struct devlink *devlink, 3327 + u64 resource_id, 3328 + devlink_resource_occ_get_t *occ_get, 3329 + void *occ_get_priv) 3330 + { 3331 + struct devlink_resource *resource; 3332 + 3333 + mutex_lock(&devlink->lock); 3334 + resource = devlink_resource_find(devlink, NULL, resource_id); 3335 + if (WARN_ON(!resource)) 3336 + goto out; 3337 + WARN_ON(resource->occ_get); 3338 + 3339 + resource->occ_get = occ_get; 3340 + resource->occ_get_priv = occ_get_priv; 3341 + out: 3342 + mutex_unlock(&devlink->lock); 3343 + } 3344 + EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register); 3345 + 3346 + /** 3347 + * devlink_resource_occ_get_unregister - unregister occupancy getter 3348 + * 3349 + * @devlink: devlink 3350 + * @resource_id: resource id 3351 + */ 3352 + void devlink_resource_occ_get_unregister(struct devlink *devlink, 3353 + u64 resource_id) 3354 + { 3355 + struct devlink_resource *resource; 3356 + 3357 + mutex_lock(&devlink->lock); 3358 + resource = devlink_resource_find(devlink, NULL, resource_id); 3359 + if (WARN_ON(!resource)) 3360 + goto out; 3361 + WARN_ON(!resource->occ_get); 3362 + 3363 + resource->occ_get = NULL; 3364 + resource->occ_get_priv = NULL; 3365 + out: 3366 + mutex_unlock(&devlink->lock); 3367 + } 3368 + EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister); 3321 3369 3322 3370 static int __init devlink_module_init(void) 3323 3371 {
+1
net/core/skbuff.c
··· 857 857 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 858 858 n->cloned = 1; 859 859 n->nohdr = 0; 860 + n->peeked = 0; 860 861 n->destructor = NULL; 861 862 C(tail); 862 863 C(end);
+1
net/dccp/ipv4.c
··· 614 614 ireq = inet_rsk(req); 615 615 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 616 616 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 617 + ireq->ir_mark = inet_request_mark(sk, skb); 617 618 ireq->ireq_family = AF_INET; 618 619 ireq->ir_iif = sk->sk_bound_dev_if; 619 620
+1
net/dccp/ipv6.c
··· 351 351 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 352 352 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 353 353 ireq->ireq_family = AF_INET6; 354 + ireq->ir_mark = inet_request_mark(sk, skb); 354 355 355 356 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || 356 357 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+7 -1
net/dsa/dsa_priv.h
··· 126 126 struct dsa_port *cpu_dp = dev->dsa_ptr; 127 127 struct dsa_switch_tree *dst = cpu_dp->dst; 128 128 struct dsa_switch *ds; 129 + struct dsa_port *slave_port; 129 130 130 131 if (device < 0 || device >= DSA_MAX_SWITCHES) 131 132 return NULL; ··· 138 137 if (port < 0 || port >= ds->num_ports) 139 138 return NULL; 140 139 141 - return ds->ports[port].slave; 140 + slave_port = &ds->ports[port]; 141 + 142 + if (unlikely(slave_port->type != DSA_PORT_TYPE_USER)) 143 + return NULL; 144 + 145 + return slave_port->slave; 142 146 } 143 147 144 148 /* port.c */
+1 -1
net/ipv4/arp.c
··· 437 437 /*unsigned long now; */ 438 438 struct net *net = dev_net(dev); 439 439 440 - rt = ip_route_output(net, sip, tip, 0, 0); 440 + rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev)); 441 441 if (IS_ERR(rt)) 442 442 return 1; 443 443 if (rt->dst.dev != dev) {
+1
net/ipv4/inet_timewait_sock.c
··· 178 178 tw->tw_dport = inet->inet_dport; 179 179 tw->tw_family = sk->sk_family; 180 180 tw->tw_reuse = sk->sk_reuse; 181 + tw->tw_reuseport = sk->sk_reuseport; 181 182 tw->tw_hash = sk->sk_hash; 182 183 tw->tw_ipv6only = 0; 183 184 tw->tw_transparent = inet->transparent;
+1
net/ipv4/inetpeer.c
··· 211 211 p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); 212 212 if (p) { 213 213 p->daddr = *daddr; 214 + p->dtime = (__u32)jiffies; 214 215 refcount_set(&p->refcnt, 2); 215 216 atomic_set(&p->rid, 0); 216 217 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+7 -6
net/ipv4/ip_tunnel.c
··· 253 253 struct net_device *dev; 254 254 char name[IFNAMSIZ]; 255 255 256 - if (parms->name[0]) 257 - strlcpy(name, parms->name, IFNAMSIZ); 258 - else { 259 - if (strlen(ops->kind) > (IFNAMSIZ - 3)) { 260 - err = -E2BIG; 256 + err = -E2BIG; 257 + if (parms->name[0]) { 258 + if (!dev_valid_name(parms->name)) 261 259 goto failed; 262 - } 260 + strlcpy(name, parms->name, IFNAMSIZ); 261 + } else { 262 + if (strlen(ops->kind) > (IFNAMSIZ - 3)) 263 + goto failed; 263 264 strlcpy(name, ops->kind, IFNAMSIZ); 264 265 strncat(name, "%d", 2); 265 266 }
+6 -5
net/ipv4/route.c
··· 2296 2296 const struct sk_buff *skb) 2297 2297 { 2298 2298 __u8 tos = RT_FL_TOS(fl4); 2299 - struct fib_result res; 2299 + struct fib_result res = { 2300 + .type = RTN_UNSPEC, 2301 + .fi = NULL, 2302 + .table = NULL, 2303 + .tclassid = 0, 2304 + }; 2300 2305 struct rtable *rth; 2301 - 2302 - res.tclassid = 0; 2303 - res.fi = NULL; 2304 - res.table = NULL; 2305 2306 2306 2307 fl4->flowi4_iif = LOOPBACK_IFINDEX; 2307 2308 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
+5 -3
net/ipv6/ip6_gre.c
··· 335 335 if (t || !create) 336 336 return t; 337 337 338 - if (parms->name[0]) 338 + if (parms->name[0]) { 339 + if (!dev_valid_name(parms->name)) 340 + return NULL; 339 341 strlcpy(name, parms->name, IFNAMSIZ); 340 - else 342 + } else { 341 343 strcpy(name, "ip6gre%d"); 342 - 344 + } 343 345 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 344 346 ip6gre_tunnel_setup); 345 347 if (!dev)
+5 -2
net/ipv6/ip6_output.c
··· 375 375 static inline int ip6_forward_finish(struct net *net, struct sock *sk, 376 376 struct sk_buff *skb) 377 377 { 378 + struct dst_entry *dst = skb_dst(skb); 379 + 380 + __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 381 + __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); 382 + 378 383 return dst_output(net, sk, skb); 379 384 } 380 385 ··· 574 569 575 570 hdr->hop_limit--; 576 571 577 - __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 578 - __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); 579 572 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, 580 573 net, NULL, skb, skb->dev, dst->dev, 581 574 ip6_forward_finish);
+7 -4
net/ipv6/ip6_tunnel.c
··· 297 297 struct net_device *dev; 298 298 struct ip6_tnl *t; 299 299 char name[IFNAMSIZ]; 300 - int err = -ENOMEM; 300 + int err = -E2BIG; 301 301 302 - if (p->name[0]) 302 + if (p->name[0]) { 303 + if (!dev_valid_name(p->name)) 304 + goto failed; 303 305 strlcpy(name, p->name, IFNAMSIZ); 304 - else 306 + } else { 305 307 sprintf(name, "ip6tnl%%d"); 306 - 308 + } 309 + err = -ENOMEM; 307 310 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 308 311 ip6_tnl_dev_setup); 309 312 if (!dev)
+5 -2
net/ipv6/ip6_vti.c
··· 212 212 char name[IFNAMSIZ]; 213 213 int err; 214 214 215 - if (p->name[0]) 215 + if (p->name[0]) { 216 + if (!dev_valid_name(p->name)) 217 + goto failed; 216 218 strlcpy(name, p->name, IFNAMSIZ); 217 - else 219 + } else { 218 220 sprintf(name, "ip6_vti%%d"); 221 + } 219 222 220 223 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup); 221 224 if (!dev)
+5 -3
net/ipv6/sit.c
··· 250 250 if (!create) 251 251 goto failed; 252 252 253 - if (parms->name[0]) 253 + if (parms->name[0]) { 254 + if (!dev_valid_name(parms->name)) 255 + goto failed; 254 256 strlcpy(name, parms->name, IFNAMSIZ); 255 - else 257 + } else { 256 258 strcpy(name, "sit%d"); 257 - 259 + } 258 260 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 259 261 ipip6_tunnel_setup); 260 262 if (!dev)
+2
net/netlink/af_netlink.c
··· 1844 1844 1845 1845 if (msg->msg_namelen) { 1846 1846 err = -EINVAL; 1847 + if (msg->msg_namelen < sizeof(struct sockaddr_nl)) 1848 + goto out; 1847 1849 if (addr->nl_family != AF_NETLINK) 1848 1850 goto out; 1849 1851 dst_portid = addr->nl_pid;
+8 -4
net/sched/act_bpf.c
··· 248 248 249 249 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) 250 250 { 251 - if (cfg->is_ebpf) 252 - bpf_prog_put(cfg->filter); 253 - else 254 - bpf_prog_destroy(cfg->filter); 251 + struct bpf_prog *filter = cfg->filter; 252 + 253 + if (filter) { 254 + if (cfg->is_ebpf) 255 + bpf_prog_put(filter); 256 + else 257 + bpf_prog_destroy(filter); 258 + } 255 259 256 260 kfree(cfg->bpf_ops); 257 261 kfree(cfg->bpf_name);
+1
net/sched/cls_u32.c
··· 489 489 RCU_INIT_POINTER(*kp, key->next); 490 490 491 491 tcf_unbind_filter(tp, &key->res); 492 + idr_remove(&ht->handle_idr, key->handle); 492 493 tcf_exts_get_net(&key->exts); 493 494 call_rcu(&key->rcu, u32_delete_key_freepf_rcu); 494 495 return 0;
+3 -1
net/sctp/ipv6.c
··· 757 757 sctp_v6_map_v4(addr); 758 758 } 759 759 760 - if (addr->sa.sa_family == AF_INET) 760 + if (addr->sa.sa_family == AF_INET) { 761 + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); 761 762 return sizeof(struct sockaddr_in); 763 + } 762 764 return sizeof(struct sockaddr_in6); 763 765 } 764 766
+8 -5
net/sctp/socket.c
··· 357 357 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 358 358 return NULL; 359 359 360 - /* V4 mapped address are really of AF_INET family */ 361 - if (addr->sa.sa_family == AF_INET6 && 362 - ipv6_addr_v4mapped(&addr->v6.sin6_addr) && 363 - !opt->pf->af_supported(AF_INET, opt)) 364 - return NULL; 360 + if (addr->sa.sa_family == AF_INET6) { 361 + if (len < SIN6_LEN_RFC2133) 362 + return NULL; 363 + /* V4 mapped address are really of AF_INET family */ 364 + if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) && 365 + !opt->pf->af_supported(AF_INET, opt)) 366 + return NULL; 367 + } 365 368 366 369 /* If we get this far, af is valid. */ 367 370 af = sctp_get_af_specific(addr->sa.sa_family);
+1 -1
net/tipc/diag.c
··· 59 59 if (!nlh) 60 60 return -EMSGSIZE; 61 61 62 - err = tipc_sk_fill_sock_diag(skb, tsk, req->tidiag_states, 62 + err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states, 63 63 __tipc_diag_gen_cookie); 64 64 if (err) 65 65 return err;
+3 -3
net/tipc/socket.c
··· 3257 3257 } 3258 3258 EXPORT_SYMBOL(tipc_nl_sk_walk); 3259 3259 3260 - int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk, 3261 - u32 sk_filter_state, 3260 + int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3261 + struct tipc_sock *tsk, u32 sk_filter_state, 3262 3262 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3263 3263 { 3264 3264 struct sock *sk = &tsk->sk; ··· 3280 3280 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3281 3281 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3282 3282 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3283 - from_kuid_munged(sk_user_ns(NETLINK_CB(skb).sk), 3283 + from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3284 3284 sock_i_uid(sk))) || 3285 3285 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3286 3286 tipc_diag_gen_cookie(sk),
+2 -2
net/tipc/socket.h
··· 61 61 void tipc_sk_rht_destroy(struct net *net); 62 62 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb); 63 63 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb); 64 - int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk, 65 - u32 sk_filter_state, 64 + int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 65 + struct tipc_sock *tsk, u32 sk_filter_state, 66 66 u64 (*tipc_diag_gen_cookie)(struct sock *sk)); 67 67 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 68 68 int (*skb_handler)(struct sk_buff *skb,