Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) The forcedeth conversion from pci_*() DMA interfaces to dma_*() ones
missed one spot. From Zhu Yanjun.

2) Missing CRYPTO_SHA256 Kconfig dep in cfg80211, from Johannes Berg.

3) Fix checksum offloading in thunderx driver, from Sunil Goutham.

4) Add SPDX to vm_sockets_diag.h, from Stephen Hemminger.

5) Fix use after free of packet headers in TIPC, from Jon Maloy.

6) "sizeof(ptr)" vs "sizeof(*ptr)" bug in i40e, from Gustavo A R Silva.

7) Tunneling fixes in mlxsw driver, from Petr Machata.

8) Fix crash in fanout_demux_rollover() of AF_PACKET, from Mike
Maloney.

9) Fix race in AF_PACKET bind() vs. NETDEV_UP notifier, from Eric
Dumazet.

10) Fix regression in sch_sfq.c due to one of the timer_setup()
conversions. From Paolo Abeni.

11) SCTP does list_for_each_entry() using wrong struct member, fix from
Xin Long.

12) Don't use big endian netlink attribute read for
IFLA_BOND_AD_ACTOR_SYSTEM, it is in cpu endianness. Also from Xin
Long.

13) Fix mis-initialization of q->link.clock in CBQ scheduler, preventing
adding filters there. From Jiri Pirko.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (67 commits)
ethernet: dwmac-stm32: Fix copyright
net: via: via-rhine: use %p to format void * address instead of %x
net: ethernet: xilinx: Mark XILINX_LL_TEMAC broken on 64-bit
myri10ge: Update MAINTAINERS
net: sched: cbq: create block for q->link.block
atm: suni: remove extraneous space to fix indentation
atm: lanai: use %p to format kernel addresses instead of %x
VSOCK: Don't set sk_state to TCP_CLOSE before testing it
atm: fore200e: use %pK to format kernel addresses instead of %x
ambassador: fix incorrect indentation of assignment statement
vxlan: use __be32 type for the param vni in __vxlan_fdb_delete
bonding: use nla_get_u64 to extract the value for IFLA_BOND_AD_ACTOR_SYSTEM
sctp: use right member as the param of list_for_each_entry
sch_sfq: fix null pointer dereference at timer expiration
cls_bpf: don't decrement net's refcount when offload fails
net/packet: fix a race in packet_bind() and packet_notifier()
packet: fix crash in fanout_demux_rollover()
sctp: remove extern from stream sched
sctp: force the params with right types for sctp csum apis
sctp: force SCTP_ERROR_INV_STRM with __u32 when calling sctp_chunk_fail
...

+1179 -579
+2 -2
MAINTAINERS
··· 9331 9331 F: Documentation/devicetree/bindings/display/mxsfb-drm.txt 9332 9332 9333 9333 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 9334 - M: Hyong-Youb Kim <hykim@myri.com> 9334 + M: Chris Lee <christopher.lee@cspi.com> 9335 9335 L: netdev@vger.kernel.org 9336 - W: https://www.myricom.com/support/downloads/myri10ge.html 9336 + W: https://www.cspi.com/ethernet-products/support/downloads/ 9337 9337 S: Supported 9338 9338 F: drivers/net/ethernet/myricom/myri10ge/ 9339 9339
+1 -1
drivers/atm/ambassador.c
··· 2258 2258 2259 2259 PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p", 2260 2260 dev->atm_dev->number, dev, dev->atm_dev); 2261 - dev->atm_dev->dev_data = (void *) dev; 2261 + dev->atm_dev->dev_data = (void *) dev; 2262 2262 2263 2263 // register our address 2264 2264 amb_esi (dev, dev->atm_dev->esi);
+2 -2
drivers/atm/fore200e.c
··· 3083 3083 ASSERT(fore200e_vcc); 3084 3084 3085 3085 len = sprintf(page, 3086 - " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 3087 - (u32)(unsigned long)vcc, 3086 + " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 3087 + vcc, 3088 3088 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 3089 3089 fore200e_vcc->tx_pdu, 3090 3090 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
+4 -4
drivers/atm/lanai.c
··· 1586 1586 lanai->pci); 1587 1587 if (unlikely(lanai->service.start == NULL)) 1588 1588 return -ENOMEM; 1589 - DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n", 1590 - (unsigned long) lanai->service.start, 1589 + DPRINTK("allocated service buffer at %p, size %zu(%d)\n", 1590 + lanai->service.start, 1591 1591 lanai_buf_size(&lanai->service), 1592 1592 lanai_buf_size_cardorder(&lanai->service)); 1593 1593 /* Clear ServWrite register to be safe */ ··· 2218 2218 #endif 2219 2219 memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN); 2220 2220 lanai_timed_poll_start(lanai); 2221 - printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u " 2221 + printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u " 2222 2222 "(%pMF)\n", lanai->number, (int) lanai->pci->revision, 2223 - (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi); 2223 + lanai->base, lanai->pci->irq, atmdev->esi); 2224 2224 printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), " 2225 2225 "board_rev=%d\n", lanai->number, 2226 2226 lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
+1 -1
drivers/atm/suni.c
··· 177 177 default: 178 178 return -EINVAL; 179 179 } 180 - dev->ops->phy_put(dev, control, reg); 180 + dev->ops->phy_put(dev, control, reg); 181 181 PRIV(dev)->loop_mode = mode; 182 182 return 0; 183 183 }
+1 -1
drivers/net/bonding/bond_netlink.c
··· 423 423 return -EINVAL; 424 424 425 425 bond_opt_initval(&newval, 426 - nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM])); 426 + nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM])); 427 427 err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval); 428 428 if (err) 429 429 return err;
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 2136 2136 /* Read A2 portion of the EEPROM */ 2137 2137 if (length) { 2138 2138 start -= ETH_MODULE_SFF_8436_LEN; 2139 - bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, 2140 - length, data); 2139 + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, 2140 + start, length, data); 2141 2141 } 2142 2142 return rc; 2143 2143 }
-1
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 1355 1355 1356 1356 /* Offload checksum calculation to HW */ 1357 1357 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1358 - hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1359 1358 hdr->l3_offset = skb_network_offset(skb); 1360 1359 hdr->l4_offset = skb_transport_offset(skb); 1361 1360
+4 -2
drivers/net/ethernet/intel/e1000/e1000_hw.c
··· 4307 4307 4308 4308 rar_num = E1000_RAR_ENTRIES; 4309 4309 4310 - /* Zero out the other 15 receive addresses. */ 4311 - e_dbg("Clearing RAR[1-15]\n"); 4310 + /* Zero out the following 14 receive addresses. RAR[15] is for 4311 + * manageability 4312 + */ 4313 + e_dbg("Clearing RAR[1-14]\n"); 4312 4314 for (i = 1; i < rar_num; i++) { 4313 4315 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4314 4316 E1000_WRITE_FLUSH();
+2 -1
drivers/net/ethernet/intel/e1000e/ich8lan.h
··· 113 113 #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ 114 114 #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ 115 115 #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ 116 - #define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) 116 + #define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000 117 + #define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000 117 118 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 118 119 119 120 #define E1000_ICH_RAR_ENTRIES 7
+6 -3
drivers/net/ethernet/intel/e1000e/netdev.c
··· 3034 3034 ew32(IOSFPC, reg_val); 3035 3035 3036 3036 reg_val = er32(TARC(0)); 3037 - /* SPT and KBL Si errata workaround to avoid Tx hang */ 3038 - reg_val &= ~BIT(28); 3039 - reg_val |= BIT(29); 3037 + /* SPT and KBL Si errata workaround to avoid Tx hang. 3038 + * Dropping the number of outstanding requests from 3039 + * 3 to 2 in order to avoid a buffer overrun. 3040 + */ 3041 + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; 3042 + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; 3040 3043 ew32(TARC(0), reg_val); 3041 3044 } 3042 3045 }
-1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 7401 7401 dev_err(&pf->pdev->dev, 7402 7402 "Failed to add cloud filter, err %s\n", 7403 7403 i40e_stat_str(&pf->hw, err)); 7404 - err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); 7405 7404 goto err; 7406 7405 } 7407 7406
+1 -1
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 2086 2086 } 2087 2087 2088 2088 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2089 - (u8 *)vfres, sizeof(vfres)); 2089 + (u8 *)vfres, sizeof(*vfres)); 2090 2090 } 2091 2091 2092 2092 /**
+17 -29
drivers/net/ethernet/marvell/mvpp2.c
··· 4629 4629 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4630 4630 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 4631 4631 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); 4632 - 4633 - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4634 - val |= MVPP2_GMAC_DISABLE_PADDING; 4635 - val &= ~MVPP2_GMAC_FLOW_CTRL_MASK; 4636 - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4637 4632 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { 4638 4633 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 4639 4634 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | ··· 4636 4641 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4637 4642 val &= ~MVPP22_CTRL4_DP_CLK_SEL; 4638 4643 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); 4639 - 4640 - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4641 - val &= ~MVPP2_GMAC_DISABLE_PADDING; 4642 - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4643 4644 } 4644 4645 4645 4646 /* The port is connected to a copper PHY */ ··· 5796 5805 sizeof(*txq_pcpu->buffs), 5797 5806 GFP_KERNEL); 5798 5807 if (!txq_pcpu->buffs) 5799 - goto cleanup; 5808 + return -ENOMEM; 5800 5809 5801 5810 txq_pcpu->count = 0; 5802 5811 txq_pcpu->reserved_num = 0; ··· 5812 5821 &txq_pcpu->tso_headers_dma, 5813 5822 GFP_KERNEL); 5814 5823 if (!txq_pcpu->tso_headers) 5815 - goto cleanup; 5824 + return -ENOMEM; 5816 5825 } 5817 5826 5818 5827 return 0; 5819 - cleanup: 5820 - for_each_present_cpu(cpu) { 5821 - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5822 - kfree(txq_pcpu->buffs); 5823 - 5824 - dma_free_coherent(port->dev->dev.parent, 5825 - txq_pcpu->size * TSO_HEADER_SIZE, 5826 - txq_pcpu->tso_headers, 5827 - txq_pcpu->tso_headers_dma); 5828 - } 5829 - 5830 - dma_free_coherent(port->dev->dev.parent, 5831 - txq->size * MVPP2_DESC_ALIGNED_SIZE, 5832 - txq->descs, txq->descs_dma); 5833 - 5834 - return -ENOMEM; 5835 5828 } 5836 5829 5837 5830 /* Free allocated TXQ resources */ ··· 6841 6866 new_tx_pending = MVPP2_MAX_TXD; 6842 6867 else if (!IS_ALIGNED(ring->tx_pending, 32)) 6843 6868 new_tx_pending = ALIGN(ring->tx_pending, 32); 6869 + 6870 + /* The Tx ring size cannot be smaller than the minimum number of 6871 + * descriptors needed for TSO. 6872 + */ 6873 + if (new_tx_pending < MVPP2_MAX_SKB_DESCS) 6874 + new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); 6844 6875 6845 6876 if (ring->rx_pending != new_rx_pending) { 6846 6877 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", ··· 8326 8345 for_each_available_child_of_node(dn, port_node) { 8327 8346 err = mvpp2_port_probe(pdev, port_node, priv, i); 8328 8347 if (err < 0) 8329 - goto err_mg_clk; 8348 + goto err_port_probe; 8330 8349 i++; 8331 8350 } 8332 8351 ··· 8342 8361 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 8343 8362 if (!priv->stats_queue) { 8344 8363 err = -ENOMEM; 8345 - goto err_mg_clk; 8364 + goto err_port_probe; 8346 8365 } 8347 8366 8348 8367 platform_set_drvdata(pdev, priv); 8349 8368 return 0; 8350 8369 8370 + err_port_probe: 8371 + i = 0; 8372 + for_each_available_child_of_node(dn, port_node) { 8373 + if (priv->port_list[i]) 8374 + mvpp2_port_remove(priv->port_list[i]); 8375 + i++; 8376 + } 8351 8377 err_mg_clk: 8352 8378 clk_disable_unprepare(priv->axi_clk); 8353 8379 if (priv->hw_version == MVPP22)
+69 -40
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 1370 1370 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); 1371 1371 } 1372 1372 1373 - static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, 1374 - struct mlxsw_sp_rif *rif); 1373 + static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, 1374 + struct mlxsw_sp_rif *old_rif, 1375 + struct mlxsw_sp_rif *new_rif); 1375 1376 static int 1376 1377 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp, 1377 1378 struct mlxsw_sp_ipip_entry *ipip_entry, ··· 1390 1389 return PTR_ERR(new_lb_rif); 1391 1390 ipip_entry->ol_lb = new_lb_rif; 1392 1391 1393 - if (keep_encap) { 1394 - list_splice_init(&old_lb_rif->common.nexthop_list, 1395 - &new_lb_rif->common.nexthop_list); 1396 - mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common); 1397 - } 1392 + if (keep_encap) 1393 + mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common, 1394 + &new_lb_rif->common); 1398 1395 1399 1396 mlxsw_sp_rif_destroy(&old_lb_rif->common); 1400 1397 1401 1398 return 0; 1402 1399 } 1400 + 1401 + static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, 1402 + struct mlxsw_sp_rif *rif); 1403 1403 1404 1404 /** 1405 1405 * Update the offload related to an IPIP entry. This always updates decap, and ··· 1451 1449 { 1452 1450 struct mlxsw_sp_ipip_entry *ipip_entry = 1453 1451 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1452 + enum mlxsw_sp_l3proto ul_proto; 1453 + union mlxsw_sp_l3addr saddr; 1454 + u32 ul_tb_id; 1454 1455 1455 1456 if (!ipip_entry) 1456 1457 return 0; 1458 + 1459 + /* For flat configuration cases, moving overlay to a different VRF might 1460 + * cause local address conflict, and the conflicting tunnels need to be 1461 + * demoted. 1462 + */ 1463 + ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); 1464 + ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; 1465 + saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); 1466 + if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, 1467 + saddr, ul_tb_id, 1468 + ipip_entry)) { 1469 + mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1470 + return 0; 1471 + } 1472 + 1457 1473 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1458 1474 true, false, false, extack); 1459 1475 } ··· 3363 3343 return ul_dev ? (ul_dev->flags & IFF_UP) : true; 3364 3344 } 3365 3345 3366 - static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, 3367 - struct mlxsw_sp_nexthop *nh, 3368 - struct net_device *ol_dev) 3346 + static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, 3347 + struct mlxsw_sp_nexthop *nh, 3348 + struct mlxsw_sp_ipip_entry *ipip_entry) 3369 3349 { 3370 3350 bool removing; 3371 3351 3372 3352 if (!nh->nh_grp->gateway || nh->ipip_entry) 3373 - return 0; 3353 + return; 3374 3354 3375 - nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 3376 - if (!nh->ipip_entry) 3377 - return -ENOENT; 3378 - 3379 - removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev); 3355 + nh->ipip_entry = ipip_entry; 3356 + removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev); 3380 3357 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3381 - return 0; 3358 + mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common); 3382 3359 } 3383 3360 3384 3361 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp, ··· 3420 3403 struct mlxsw_sp_nexthop *nh, 3421 3404 struct fib_nh *fib_nh) 3422 3405 { 3423 - struct mlxsw_sp_router *router = mlxsw_sp->router; 3406 + const struct mlxsw_sp_ipip_ops *ipip_ops; 3424 3407 struct net_device *dev = fib_nh->nh_dev; 3425 - enum mlxsw_sp_ipip_type ipipt; 3408 + struct mlxsw_sp_ipip_entry *ipip_entry; 3426 3409 struct mlxsw_sp_rif *rif; 3427 3410 int err; 3428 3411 3429 - if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) && 3430 - router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 3431 - MLXSW_SP_L3_PROTO_IPV4)) { 3432 - nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 3433 - err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); 3434 - if (err) 3435 - return err; 3436 - mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); 3437 - return 0; 3412 + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); 3413 + if (ipip_entry) { 3414 + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 3415 + if (ipip_ops->can_offload(mlxsw_sp, dev, 3416 + MLXSW_SP_L3_PROTO_IPV4)) { 3417 + nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 3418 + mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); 3419 + return 0; 3420 + } 3438 3421 } 3439 3422 3440 3423 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; ··· 3560 3543 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3561 3544 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3562 3545 } 3546 + } 3547 + 3548 + static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, 3549 + struct mlxsw_sp_rif *old_rif, 3550 + struct mlxsw_sp_rif *new_rif) 3551 + { 3552 + struct mlxsw_sp_nexthop *nh; 3553 + 3554 + list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list); 3555 + list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node) 3556 + nh->rif = new_rif; 3557 + mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif); 3563 3558 } 3564 3559 3565 3560 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, ··· 4025 3996 case RTN_LOCAL: 4026 3997 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev, 4027 3998 MLXSW_SP_L3_PROTO_IPV4, dip); 4028 - if (ipip_entry) { 3999 + if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) { 4029 4000 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; 4030 4001 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, 4031 4002 fib_entry, ··· 4723 4694 struct mlxsw_sp_nexthop *nh, 4724 4695 const struct rt6_info *rt) 4725 4696 { 4726 - struct mlxsw_sp_router *router = mlxsw_sp->router; 4697 + const struct mlxsw_sp_ipip_ops *ipip_ops; 4698 + struct mlxsw_sp_ipip_entry *ipip_entry; 4727 4699 struct net_device *dev = rt->dst.dev; 4728 - enum mlxsw_sp_ipip_type ipipt; 4729 4700 struct mlxsw_sp_rif *rif; 4730 4701 int err; 4731 4702 4732 - if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) && 4733 - router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 4734 - MLXSW_SP_L3_PROTO_IPV6)) { 4735 - nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 4736 - err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); 4737 - if (err) 4738 - return err; 4739 - mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); 4740 - return 0; 4703 + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); 4704 + if (ipip_entry) { 4705 + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 4706 + if (ipip_ops->can_offload(mlxsw_sp, dev, 4707 + MLXSW_SP_L3_PROTO_IPV6)) { 4708 + nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 4709 + mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); 4710 + return 0; 4711 + } 4741 4712 } 4742 4713 4743 4714 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
+2 -2
drivers/net/ethernet/nvidia/forcedeth.c
··· 1986 1986 tx_skb->dma_len, 1987 1987 DMA_TO_DEVICE); 1988 1988 else 1989 - pci_unmap_page(np->pci_dev, tx_skb->dma, 1989 + dma_unmap_page(&np->pci_dev->dev, tx_skb->dma, 1990 1990 tx_skb->dma_len, 1991 - PCI_DMA_TODEVICE); 1991 + DMA_TO_DEVICE); 1992 1992 tx_skb->dma = 0; 1993 1993 } 1994 1994 }
+2 -2
drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
··· 1 1 /* 2 2 * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU 3 3 * 4 - * Copyright (C) Alexandre Torgue 2015 5 - * Author: Alexandre Torgue <alexandre.torgue@gmail.com> 4 + * Copyright (C) STMicroelectronics SA 2017 5 + * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics. 6 6 * License terms: GNU General Public License (GPL), version 2 7 7 * 8 8 */
+2 -2
drivers/net/ethernet/via/via-rhine.c
··· 995 995 else 996 996 name = "Rhine III"; 997 997 998 - netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 999 - name, (long)ioaddr, dev->dev_addr, rp->irq); 998 + netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n", 999 + name, ioaddr, dev->dev_addr, rp->irq); 1000 1000 1001 1001 dev_set_drvdata(hwdev, dev); 1002 1002
+1
drivers/net/ethernet/xilinx/Kconfig
··· 34 34 config XILINX_LL_TEMAC 35 35 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 36 36 depends on (PPC || MICROBLAZE) 37 + depends on !64BIT || BROKEN 37 38 select PHYLIB 38 39 ---help--- 39 40 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+3 -2
drivers/net/phy/marvell10g.c
··· 16 16 * link takes priority and the other port is completely locked out. 17 17 */ 18 18 #include <linux/phy.h> 19 + #include <linux/marvell_phy.h> 19 20 20 21 enum { 21 22 MV_PCS_BASE_T = 0x0000, ··· 339 338 static struct phy_driver mv3310_drivers[] = { 340 339 { 341 340 .phy_id = 0x002b09aa, 342 - .phy_id_mask = 0xffffffff, 341 + .phy_id_mask = MARVELL_PHY_ID_MASK, 343 342 .name = "mv88x3310", 344 343 .features = SUPPORTED_10baseT_Full | 345 344 SUPPORTED_100baseT_Full | ··· 361 360 module_phy_driver(mv3310_drivers); 362 361 363 362 static struct mdio_device_id __maybe_unused mv3310_tbl[] = { 364 - { 0x002b09aa, 0xffffffff }, 363 + { 0x002b09aa, MARVELL_PHY_ID_MASK }, 365 364 { }, 366 365 }; 367 366 MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
+24 -33
drivers/net/thunderbolt.c
··· 335 335 if (ring->ring->is_tx) { 336 336 dir = DMA_TO_DEVICE; 337 337 order = 0; 338 - size = tbnet_frame_size(tf); 338 + size = TBNET_FRAME_SIZE; 339 339 } else { 340 340 dir = DMA_FROM_DEVICE; 341 341 order = TBNET_RX_PAGE_ORDER; ··· 512 512 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) 513 513 { 514 514 struct tbnet_ring *ring = &net->tx_ring; 515 + struct device *dma_dev = tb_ring_dma_device(ring->ring); 515 516 struct tbnet_frame *tf; 516 517 unsigned int index; 517 518 ··· 523 522 524 523 tf = &ring->frames[index]; 525 524 tf->frame.size = 0; 526 - tf->frame.buffer_phy = 0; 525 + 526 + dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy, 527 + tbnet_frame_size(tf), DMA_TO_DEVICE); 527 528 528 529 return tf; 529 530 } ··· 534 531 bool canceled) 535 532 { 536 533 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); 537 - struct device *dma_dev = tb_ring_dma_device(ring); 538 534 struct tbnet *net = netdev_priv(tf->dev); 539 - 540 - dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf), 541 - DMA_TO_DEVICE); 542 - tf->frame.buffer_phy = 0; 543 535 544 536 /* Return buffer to the ring */ 545 537 net->tx_ring.prod++; ··· 546 548 static int tbnet_alloc_tx_buffers(struct tbnet *net) 547 549 { 548 550 struct tbnet_ring *ring = &net->tx_ring; 551 + struct device *dma_dev = tb_ring_dma_device(ring->ring); 549 552 unsigned int i; 550 553 551 554 for (i = 0; i < TBNET_RING_SIZE; i++) { 552 555 struct tbnet_frame *tf = &ring->frames[i]; 556 + dma_addr_t dma_addr; 553 557 554 558 tf->page = alloc_page(GFP_KERNEL); 555 559 if (!tf->page) { ··· 559 559 return -ENOMEM; 560 560 } 561 561 562 + dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, 563 + DMA_TO_DEVICE); 564 + if (dma_mapping_error(dma_dev, dma_addr)) { 565 + __free_page(tf->page); 566 + tf->page = NULL; 567 + tbnet_free_buffers(ring); 568 + return -ENOMEM; 569 + } 570 + 562 571 tf->dev = net->dev; 572 + tf->frame.buffer_phy = dma_addr; 563 573 tf->frame.callback = tbnet_tx_callback; 564 574 tf->frame.sof = TBIP_PDF_FRAME_START; 565 575 tf->frame.eof = TBIP_PDF_FRAME_END; ··· 891 881 return 0; 892 882 } 893 883 894 - static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf) 895 - { 896 - dma_addr_t dma_addr; 897 - 898 - dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf), 899 - DMA_TO_DEVICE); 900 - if (dma_mapping_error(dma_dev, dma_addr)) 901 - return false; 902 - 903 - tf->frame.buffer_phy = dma_addr; 904 - return true; 905 - } 906 - 907 884 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, 908 885 struct tbnet_frame **frames, u32 frame_count) 909 886 { ··· 905 908 906 909 if (skb->ip_summed != CHECKSUM_PARTIAL) { 907 910 /* No need to calculate checksum so we just update the 908 - * total frame count and map the frames for DMA. 911 + * total frame count and sync the frames for DMA. 909 912 */ 910 913 for (i = 0; i < frame_count; i++) { 911 914 hdr = page_address(frames[i]->page); 912 915 hdr->frame_count = cpu_to_le32(frame_count); 913 - if (!tbnet_xmit_map(dma_dev, frames[i])) 914 - goto err_unmap; 916 + dma_sync_single_for_device(dma_dev, 917 + frames[i]->frame.buffer_phy, 918 + tbnet_frame_size(frames[i]), DMA_TO_DEVICE); 915 919 } 916 920 917 921 return true; ··· 981 983 *tucso = csum_fold(wsum); 982 984 983 985 /* Checksum is finally calculated and we don't touch the memory 984 - * anymore, so DMA map the frames now. 986 + * anymore, so DMA sync the frames now. 985 987 */ 986 988 for (i = 0; i < frame_count; i++) { 987 - if (!tbnet_xmit_map(dma_dev, frames[i])) 988 - goto err_unmap; 989 + dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy, 990 + tbnet_frame_size(frames[i]), DMA_TO_DEVICE); 989 991 } 990 992 991 993 return true; 992 - 993 - err_unmap: 994 - while (i--) 995 - dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy, 996 - tbnet_frame_size(frames[i]), DMA_TO_DEVICE); 997 - 998 - return false; 999 994 } 1000 995 1001 996 static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
+2 -2
drivers/net/vxlan.c
··· 874 874 875 875 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, 876 876 const unsigned char *addr, union vxlan_addr ip, 877 - __be16 port, __be32 src_vni, u32 vni, u32 ifindex, 878 - u16 vid) 877 + __be16 port, __be32 src_vni, __be32 vni, 878 + u32 ifindex, u16 vid) 879 879 { 880 880 struct vxlan_fdb *f; 881 881 struct vxlan_rdst *rd = NULL;
+3 -10
drivers/net/wan/lmc/lmc_main.c
··· 494 494 break; 495 495 } 496 496 497 - data = kmalloc(xc.len, GFP_KERNEL); 498 - if (!data) { 499 - ret = -ENOMEM; 497 + data = memdup_user(xc.data, xc.len); 498 + if (IS_ERR(data)) { 499 + ret = PTR_ERR(data); 500 500 break; 501 - } 502 - 503 - if(copy_from_user(data, xc.data, xc.len)) 504 - { 505 - kfree(data); 506 - ret = -ENOMEM; 507 - break; 508 501 } 509 502 510 503 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
+1 -1
drivers/net/wireless/ath/ath9k/channel.c
··· 1113 1113 if (!avp->assoc) 1114 1114 return false; 1115 1115 1116 - skb = ieee80211_nullfunc_get(sc->hw, vif); 1116 + skb = ieee80211_nullfunc_get(sc->hw, vif, false); 1117 1117 if (!skb) 1118 1118 return false; 1119 1119
+2 -2
drivers/net/wireless/st/cw1200/sta.c
··· 198 198 199 199 priv->bss_loss_state++; 200 200 201 - skb = ieee80211_nullfunc_get(priv->hw, priv->vif); 201 + skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); 202 202 WARN_ON(!skb); 203 203 if (skb) 204 204 cw1200_tx(priv->hw, NULL, skb); ··· 2265 2265 .rate = 0xFF, 2266 2266 }; 2267 2267 2268 - frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); 2268 + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); 2269 2269 if (!frame.skb) 2270 2270 return -ENOMEM; 2271 2271
+1 -1
drivers/net/wireless/ti/wl1251/main.c
··· 566 566 size = sizeof(struct wl12xx_null_data_template); 567 567 ptr = NULL; 568 568 } else { 569 - skb = ieee80211_nullfunc_get(wl->hw, wl->vif); 569 + skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false); 570 570 if (!skb) 571 571 goto out; 572 572 size = skb->len;
+3 -2
drivers/net/wireless/ti/wlcore/cmd.c
··· 1069 1069 ptr = NULL; 1070 1070 } else { 1071 1071 skb = ieee80211_nullfunc_get(wl->hw, 1072 - wl12xx_wlvif_to_vif(wlvif)); 1072 + wl12xx_wlvif_to_vif(wlvif), 1073 + false); 1073 1074 if (!skb) 1074 1075 goto out; 1075 1076 size = skb->len; ··· 1097 1096 struct sk_buff *skb = NULL; 1098 1097 int ret = -ENOMEM; 1099 1098 1100 - skb = ieee80211_nullfunc_get(wl->hw, vif); 1099 + skb = ieee80211_nullfunc_get(wl->hw, vif, false); 1101 1100 if (!skb) 1102 1101 goto out; 1103 1102
+18
drivers/net/xen-netfront.c
··· 87 87 /* IRQ name is queue name with "-tx" or "-rx" appended */ 88 88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 89 89 90 + static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); 91 + 90 92 struct netfront_stats { 91 93 u64 packets; 92 94 u64 bytes; ··· 2022 2020 break; 2023 2021 2024 2022 case XenbusStateClosed: 2023 + wake_up_all(&module_unload_q); 2025 2024 if (dev->state == XenbusStateClosed) 2026 2025 break; 2027 2026 /* Missed the backend's CLOSING state -- fallthrough */ 2028 2027 case XenbusStateClosing: 2028 + wake_up_all(&module_unload_q); 2029 2029 xenbus_frontend_closed(dev); 2030 2030 break; 2031 2031 } ··· 2132 2128 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2133 2129 2134 2130 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2131 + 2132 + if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 2133 + xenbus_switch_state(dev, XenbusStateClosing); 2134 + wait_event(module_unload_q, 2135 + xenbus_read_driver_state(dev->otherend) == 2136 + XenbusStateClosing); 2137 + 2138 + xenbus_switch_state(dev, XenbusStateClosed); 2139 + wait_event(module_unload_q, 2140 + xenbus_read_driver_state(dev->otherend) == 2141 + XenbusStateClosed || 2142 + xenbus_read_driver_state(dev->otherend) == 2143 + XenbusStateUnknown); 2144 + } 2135 2145 2136 2146 xennet_disconnect_backend(info); 2137 2147
+7 -1
include/net/mac80211.h
··· 4470 4470 * ieee80211_nullfunc_get - retrieve a nullfunc template 4471 4471 * @hw: pointer obtained from ieee80211_alloc_hw(). 4472 4472 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 4473 + * @qos_ok: QoS NDP is acceptable to the caller, this should be set 4474 + * if at all possible 4473 4475 * 4474 4476 * Creates a Nullfunc template which can, for example, uploaded to 4475 4477 * hardware. The template must be updated after association so that correct 4476 4478 * BSSID and address is used. 4479 + * 4480 + * If @qos_ndp is set and the association is to an AP with QoS/WMM, the 4481 + * returned packet will be QoS NDP. 4477 4482 * 4478 4483 * Note: Caller (or hardware) is responsible for setting the 4479 4484 * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields. ··· 4486 4481 * Return: The nullfunc template. %NULL on error. 4487 4482 */ 4488 4483 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, 4489 - struct ieee80211_vif *vif); 4484 + struct ieee80211_vif *vif, 4485 + bool qos_ok); 4490 4486 4491 4487 /** 4492 4488 * ieee80211_probereq_get - retrieve a Probe Request template
+7 -6
include/net/sctp/checksum.h
··· 48 48 /* This uses the crypto implementation of crc32c, which is either 49 49 * implemented w/ hardware support or resolves to __crc32c_le(). 50 50 */ 51 - return crc32c(sum, buff, len); 51 + return (__force __wsum)crc32c((__force __u32)sum, buff, len); 52 52 } 53 53 54 54 static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2, 55 55 int offset, int len) 56 56 { 57 - return __crc32c_le_combine(csum, csum2, len); 57 + return (__force __wsum)__crc32c_le_combine((__force __u32)csum, 58 + (__force __u32)csum2, len); 58 59 } 59 60 60 61 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, 61 62 unsigned int offset) 62 63 { 63 64 struct sctphdr *sh = sctp_hdr(skb); 64 - __le32 ret, old = sh->checksum; 65 65 const struct skb_checksum_ops ops = { 66 66 .update = sctp_csum_update, 67 67 .combine = sctp_csum_combine, 68 68 }; 69 + __le32 old = sh->checksum; 70 + __wsum new; 69 71 70 72 sh->checksum = 0; 71 - ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset, 72 - ~(__u32)0, &ops)); 73 + new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops); 73 74 sh->checksum = old; 74 75 75 - return ret; 76 + return cpu_to_le32((__force __u32)new); 76 77 } 77 78 78 79 #endif /* __sctp_checksum_h__ */
+5
include/net/sctp/sctp.h
··· 195 195 int sctp_offload_init(void); 196 196 197 197 /* 198 + * sctp/stream_sched.c 199 + */ 200 + void sctp_sched_ops_init(void); 201 + 202 + /* 198 203 * sctp/stream.c 199 204 */ 200 205 int sctp_send_reset_streams(struct sctp_association *asoc,
+5
include/net/sctp/stream_sched.h
··· 69 69 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp); 70 70 struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream); 71 71 72 + void sctp_sched_ops_register(enum sctp_sched_type sched, 73 + struct sctp_sched_ops *sched_ops); 74 + void sctp_sched_ops_prio_init(void); 75 + void sctp_sched_ops_rr_init(void); 76 + 72 77 #endif /* __sctp_stream_sched_h__ */
+63 -23
include/trace/events/rxrpc.h
··· 49 49 rxrpc_conn_put_client, 50 50 rxrpc_conn_put_service, 51 51 rxrpc_conn_queued, 52 + rxrpc_conn_reap_service, 52 53 rxrpc_conn_seen, 53 54 }; 54 55 ··· 139 138 140 139 enum rxrpc_timer_trace { 141 140 rxrpc_timer_begin, 141 + rxrpc_timer_exp_ack, 142 + rxrpc_timer_exp_hard, 143 + rxrpc_timer_exp_idle, 144 + rxrpc_timer_exp_keepalive, 145 + rxrpc_timer_exp_lost_ack, 146 + rxrpc_timer_exp_normal, 147 + rxrpc_timer_exp_ping, 148 + rxrpc_timer_exp_resend, 142 149 rxrpc_timer_expired, 143 150 rxrpc_timer_init_for_reply, 144 151 rxrpc_timer_init_for_send_reply, 152 + rxrpc_timer_restart, 145 153 rxrpc_timer_set_for_ack, 154 + rxrpc_timer_set_for_hard, 155 + rxrpc_timer_set_for_idle, 156 + rxrpc_timer_set_for_keepalive, 157 + rxrpc_timer_set_for_lost_ack, 158 + rxrpc_timer_set_for_normal, 146 159 rxrpc_timer_set_for_ping, 147 160 rxrpc_timer_set_for_resend, 148 161 rxrpc_timer_set_for_send, ··· 165 150 enum rxrpc_propose_ack_trace { 166 151 rxrpc_propose_ack_client_tx_end, 167 152 rxrpc_propose_ack_input_data, 153 + rxrpc_propose_ack_ping_for_keepalive, 168 154 rxrpc_propose_ack_ping_for_lost_ack, 169 155 rxrpc_propose_ack_ping_for_lost_reply, 170 156 rxrpc_propose_ack_ping_for_params, ··· 222 206 EM(rxrpc_conn_put_client, "PTc") \ 223 207 EM(rxrpc_conn_put_service, "PTs") \ 224 208 EM(rxrpc_conn_queued, "QUE") \ 209 + EM(rxrpc_conn_reap_service, "RPs") \ 225 210 E_(rxrpc_conn_seen, "SEE") 226 211 227 212 #define rxrpc_client_traces \ ··· 313 296 #define rxrpc_timer_traces \ 314 297 EM(rxrpc_timer_begin, "Begin ") \ 315 298 EM(rxrpc_timer_expired, "*EXPR*") \ 299 + EM(rxrpc_timer_exp_ack, "ExpAck") \ 300 + EM(rxrpc_timer_exp_hard, "ExpHrd") \ 301 + EM(rxrpc_timer_exp_idle, "ExpIdl") \ 302 + EM(rxrpc_timer_exp_keepalive, "ExpKA ") \ 303 + EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \ 304 + EM(rxrpc_timer_exp_normal, "ExpNml") \ 305 + EM(rxrpc_timer_exp_ping, "ExpPng") \ 306 + EM(rxrpc_timer_exp_resend, "ExpRsn") \ 316 307 EM(rxrpc_timer_init_for_reply, "IniRpl") \ 317 308 EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ 309 + EM(rxrpc_timer_restart, "Restrt") \ 318 310 EM(rxrpc_timer_set_for_ack, "SetAck") \ 311 + EM(rxrpc_timer_set_for_hard, "SetHrd") \ 312 + EM(rxrpc_timer_set_for_idle, "SetIdl") \ 313 + EM(rxrpc_timer_set_for_keepalive, "KeepAl") \ 314 + EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \ 315 + EM(rxrpc_timer_set_for_normal, "SetNml") \ 319 316 EM(rxrpc_timer_set_for_ping, "SetPng") \ 320 317 EM(rxrpc_timer_set_for_resend, "SetRTx") \ 321 - E_(rxrpc_timer_set_for_send, "SetTx ") 318 + E_(rxrpc_timer_set_for_send, "SetSnd") 322 319 323 320 #define rxrpc_propose_ack_traces \ 324 321 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ 325 322 EM(rxrpc_propose_ack_input_data, "DataIn ") \ 323 + EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \ 326 324 EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ 327 325 EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ 328 326 EM(rxrpc_propose_ack_ping_for_params, "Params ") \ ··· 964 932 965 933 TRACE_EVENT(rxrpc_timer, 966 934 TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why, 967 - ktime_t now, unsigned long now_j), 935 + unsigned long now), 968 936 969 - TP_ARGS(call, why, now, now_j), 937 + TP_ARGS(call, why, now), 970 938 971 939 TP_STRUCT__entry( 972 940 __field(struct rxrpc_call *, call ) 973 941 __field(enum rxrpc_timer_trace, why ) 974 - __field_struct(ktime_t, now ) 975 - __field_struct(ktime_t, expire_at ) 976 - __field_struct(ktime_t, ack_at ) 977 - __field_struct(ktime_t, resend_at ) 978 - __field(unsigned long, now_j ) 979 - __field(unsigned long, timer ) 942 + __field(long, now ) 943 + __field(long, ack_at ) 944 + __field(long, ack_lost_at ) 945 + __field(long, resend_at ) 946 + __field(long, ping_at ) 947 + __field(long, expect_rx_by ) 948 + __field(long, expect_req_by ) 949 + __field(long, expect_term_by ) 950 + __field(long, timer ) 980 951 ), 981 952 982 953 TP_fast_assign( 983 - __entry->call = call; 984 - __entry->why = why; 985 - __entry->now = now; 986 - __entry->expire_at = call->expire_at; 987 - __entry->ack_at = call->ack_at; 988 - __entry->resend_at = call->resend_at; 989 - __entry->now_j = now_j; 990 - __entry->timer = call->timer.expires; 954 + __entry->call = call; 955 + __entry->why = why; 956 + __entry->now = now; 957 + __entry->ack_at = call->ack_at; 958 + __entry->ack_lost_at = call->ack_lost_at; 959 + __entry->resend_at = call->resend_at; 960 + __entry->expect_rx_by = call->expect_rx_by; 961 + __entry->expect_req_by = call->expect_req_by; 962 + __entry->expect_term_by = call->expect_term_by; 963 + __entry->timer = call->timer.expires; 991 964 ), 992 965 993 - TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld", 966 + TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld", 994 967 __entry->call, 995 968 __print_symbolic(__entry->why, rxrpc_timer_traces), 996 - ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)), 997 - ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)), 998 - ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)), 999 - __entry->timer - __entry->now_j) 969 + __entry->ack_at - __entry->now, 970 + __entry->ack_lost_at - __entry->now, 971 + __entry->resend_at - __entry->now, 972 + __entry->expect_rx_by - __entry->now, 973 + __entry->expect_req_by - __entry->now, 974 + __entry->expect_term_by - __entry->now, 975 + __entry->timer - __entry->now) 1000 976 ); 1001 977 1002 978 TRACE_EVENT(rxrpc_rx_lose, ··· 1120 1080 memcpy(&__entry->sum, summary, sizeof(__entry->sum)); 1121 1081 ), 1122 1082 1123 - TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", 1083 + TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", 1124 1084 __entry->call, 1125 1085 __entry->ack_serial, 1126 1086 __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
+1
include/uapi/linux/rxrpc.h
··· 59 59 RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ 60 60 RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ 61 61 RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ 62 + RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */ 62 63 RXRPC__SUPPORTED 63 64 }; 64 65
+1
include/uapi/linux/vm_sockets_diag.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 /* AF_VSOCK sock_diag(7) interface for querying open sockets */ 2 3 3 4 #ifndef _UAPI__VM_SOCKETS_DIAG_H__
+15 -12
net/dsa/dsa2.c
··· 51 51 INIT_LIST_HEAD(&dst->list); 52 52 list_add_tail(&dsa_tree_list, &dst->list); 53 53 54 - /* Initialize the reference counter to the number of switches, not 1 */ 55 54 kref_init(&dst->refcount); 56 - refcount_set(&dst->refcount.refcount, 0); 57 55 58 56 return dst; 59 57 } ··· 62 64 kfree(dst); 63 65 } 64 66 67 + static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst) 68 + { 69 + if (dst) 70 + kref_get(&dst->refcount); 71 + 72 + return dst; 73 + } 74 + 65 75 static struct dsa_switch_tree *dsa_tree_touch(int index) 66 76 { 67 77 struct dsa_switch_tree *dst; 68 78 69 79 dst = dsa_tree_find(index); 70 - if (!dst) 71 - dst = dsa_tree_alloc(index); 72 - 73 - return dst; 74 - } 75 - 76 - static void dsa_tree_get(struct dsa_switch_tree *dst) 77 - { 78 - kref_get(&dst->refcount); 80 + if (dst) 81 + return dsa_tree_get(dst); 82 + else 83 + return dsa_tree_alloc(index); 79 84 } 80 85 81 86 static void dsa_tree_release(struct kref *ref) ··· 92 91 93 92 static void dsa_tree_put(struct dsa_switch_tree *dst) 94 93 { 95 - kref_put(&dst->refcount, dsa_tree_release); 94 + if (dst) 95 + kref_put(&dst->refcount, dsa_tree_release); 96 96 } 97 97 98 98 static bool dsa_port_is_dsa(struct dsa_port *port) ··· 767 765 768 766 mutex_lock(&dsa2_mutex); 769 767 err = dsa_switch_probe(ds); 768 + dsa_tree_put(ds->dst); 770 769 mutex_unlock(&dsa2_mutex); 771 770 772 771 return err;
+3 -1
net/mac80211/ht.c
··· 292 292 293 293 mutex_lock(&sta->ampdu_mlme.mtx); 294 294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 295 - ___ieee80211_stop_tx_ba_session(sta, i, reason); 296 295 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 297 296 WLAN_REASON_QSTA_LEAVE_QBSS, 298 297 reason != AGG_STOP_DESTROY_STA && 299 298 reason != AGG_STOP_PEER_REQUEST); 300 299 } 301 300 mutex_unlock(&sta->ampdu_mlme.mtx); 301 + 302 + for (i = 0; i < IEEE80211_NUM_TIDS; i++) 303 + ___ieee80211_stop_tx_ba_session(sta, i, reason); 302 304 303 305 /* stopping might queue the work again - so cancel only afterwards */ 304 306 cancel_work_sync(&sta->ampdu_mlme.work);
+9 -6
net/mac80211/mesh_hwmp.c
··· 797 797 struct mesh_path *mpath; 798 798 u8 ttl, flags, hopcount; 799 799 const u8 *orig_addr; 800 - u32 orig_sn, metric, metric_txsta, interval; 800 + u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; 801 801 bool root_is_gate; 802 802 803 803 ttl = rann->rann_ttl; ··· 808 808 interval = le32_to_cpu(rann->rann_interval); 809 809 hopcount = rann->rann_hopcount; 810 810 hopcount++; 811 - metric = le32_to_cpu(rann->rann_metric); 811 + orig_metric = le32_to_cpu(rann->rann_metric); 812 812 813 813 /* Ignore our own RANNs */ 814 814 if (ether_addr_equal(orig_addr, sdata->vif.addr)) ··· 825 825 return; 826 826 } 827 827 828 - metric_txsta = airtime_link_metric_get(local, sta); 828 + last_hop_metric = airtime_link_metric_get(local, sta); 829 + new_metric = orig_metric + last_hop_metric; 830 + if (new_metric < orig_metric) 831 + new_metric = MAX_METRIC; 829 832 830 833 mpath = mesh_path_lookup(sdata, orig_addr); 831 834 if (!mpath) { ··· 841 838 } 842 839 843 840 if (!(SN_LT(mpath->sn, orig_sn)) && 844 - !(mpath->sn == orig_sn && metric < mpath->rann_metric)) { 841 + !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { 845 842 rcu_read_unlock(); 846 843 return; 847 844 } ··· 859 856 } 860 857 861 858 mpath->sn = orig_sn; 862 - mpath->rann_metric = metric + metric_txsta; 859 + mpath->rann_metric = new_metric; 863 860 mpath->is_root = true; 864 861 /* Recording RANNs sender address to send individually 865 862 * addressed PREQs destined for root mesh STA */ ··· 879 876 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 880 877 orig_sn, 0, NULL, 0, broadcast_addr, 881 878 hopcount, ttl, interval, 882 - metric + metric_txsta, 0, sdata); 879 + new_metric, 0, sdata); 883 880 } 884 881 885 882 rcu_read_unlock();
+1 -1
net/mac80211/mlme.c
··· 895 895 struct ieee80211_hdr_3addr *nullfunc; 896 896 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 897 897 898 - skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); 898 + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true); 899 899 if (!skb) 900 900 return; 901 901
+27 -2
net/mac80211/tx.c
··· 4438 4438 EXPORT_SYMBOL(ieee80211_pspoll_get); 4439 4439 4440 4440 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, 4441 - struct ieee80211_vif *vif) 4441 + struct ieee80211_vif *vif, 4442 + bool qos_ok) 4442 4443 { 4443 4444 struct ieee80211_hdr_3addr *nullfunc; 4444 4445 struct ieee80211_sub_if_data *sdata; 4445 4446 struct ieee80211_if_managed *ifmgd; 4446 4447 struct ieee80211_local *local; 4447 4448 struct sk_buff *skb; 4449 + bool qos = false; 4448 4450 4449 4451 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) 4450 4452 return NULL; ··· 4455 4453 ifmgd = &sdata->u.mgd; 4456 4454 local = sdata->local; 4457 4455 4458 - skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); 4456 + if (qos_ok) { 4457 + struct sta_info *sta; 4458 + 4459 + rcu_read_lock(); 4460 + sta = sta_info_get(sdata, ifmgd->bssid); 4461 + qos = sta && sta->sta.wme; 4462 + rcu_read_unlock(); 4463 + } 4464 + 4465 + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 4466 + sizeof(*nullfunc) + 2); 4459 4467 if (!skb) 4460 4468 return NULL; 4461 4469 ··· 4475 4463 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 4476 4464 IEEE80211_STYPE_NULLFUNC | 4477 4465 IEEE80211_FCTL_TODS); 4466 + if (qos) { 4467 + __le16 qos = cpu_to_le16(7); 4468 + 4469 + BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC | 4470 + IEEE80211_STYPE_NULLFUNC) != 4471 + IEEE80211_STYPE_QOS_NULLFUNC); 4472 + nullfunc->frame_control |= 4473 + cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC); 4474 + skb->priority = 7; 4475 + skb_set_queue_mapping(skb, IEEE80211_AC_VO); 4476 + skb_put_data(skb, &qos, sizeof(qos)); 4477 + } 4478 + 4478 4479 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); 4479 4480 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); 4480 4481 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
+1 -1
net/openvswitch/datapath.c
··· 308 308 const struct dp_upcall_info *upcall_info, 309 309 uint32_t cutlen) 310 310 { 311 - unsigned short gso_type = skb_shinfo(skb)->gso_type; 311 + unsigned int gso_type = skb_shinfo(skb)->gso_type; 312 312 struct sw_flow_key later_key; 313 313 struct sk_buff *segs, *nskb; 314 314 int err;
+8 -8
net/openvswitch/flow_netlink.c
··· 2241 2241 2242 2242 #define MAX_ACTIONS_BUFSIZE (32 * 1024) 2243 2243 2244 - static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) 2244 + static struct sw_flow_actions *nla_alloc_flow_actions(int size) 2245 2245 { 2246 2246 struct sw_flow_actions *sfa; 2247 2247 2248 - if (size > MAX_ACTIONS_BUFSIZE) { 2249 - OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); 2250 - return ERR_PTR(-EINVAL); 2251 - } 2248 + WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); 2252 2249 2253 2250 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); 2254 2251 if (!sfa) ··· 2318 2321 new_acts_size = ksize(*sfa) * 2; 2319 2322 2320 2323 if (new_acts_size > MAX_ACTIONS_BUFSIZE) { 2321 - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) 2324 + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { 2325 + OVS_NLERR(log, "Flow action size exceeds max %u", 2326 + MAX_ACTIONS_BUFSIZE); 2322 2327 return ERR_PTR(-EMSGSIZE); 2328 + } 2323 2329 new_acts_size = MAX_ACTIONS_BUFSIZE; 2324 2330 } 2325 2331 2326 - acts = nla_alloc_flow_actions(new_acts_size, log); 2332 + acts = nla_alloc_flow_actions(new_acts_size); 2327 2333 if (IS_ERR(acts)) 2328 2334 return (void *)acts; 2329 2335 ··· 3059 3059 { 3060 3060 int err; 3061 3061 3062 - *sfa = nla_alloc_flow_actions(nla_len(attr), log); 3062 + *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); 3063 3063 if (IS_ERR(*sfa)) 3064 3064 return PTR_ERR(*sfa); 3065 3065
+15 -22
net/packet/af_packet.c
··· 1687 1687 atomic_long_set(&rollover->num, 0); 1688 1688 atomic_long_set(&rollover->num_huge, 0); 1689 1689 atomic_long_set(&rollover->num_failed, 0); 1690 - po->rollover = rollover; 1691 1690 } 1692 1691 1693 1692 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { ··· 1744 1745 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1745 1746 __dev_remove_pack(&po->prot_hook); 1746 1747 po->fanout = match; 1748 + po->rollover = rollover; 1749 + rollover = NULL; 1747 1750 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1748 1751 __fanout_link(sk, po); 1749 1752 err = 0; ··· 1759 1758 } 1760 1759 1761 1760 out: 1762 - if (err && rollover) { 1763 - kfree_rcu(rollover, rcu); 1764 - po->rollover = NULL; 1765 - } 1761 + kfree(rollover); 1766 1762 mutex_unlock(&fanout_mutex); 1767 1763 return err; 1768 1764 } ··· 1783 1785 list_del(&f->list); 1784 1786 else 1785 1787 f = NULL; 1786 - 1787 - if (po->rollover) { 1788 - kfree_rcu(po->rollover, rcu); 1789 - po->rollover = NULL; 1790 - } 1791 1788 } 1792 1789 mutex_unlock(&fanout_mutex); 1793 1790 ··· 3022 3029 synchronize_net(); 3023 3030 3024 3031 if (f) { 3032 + kfree(po->rollover); 3025 3033 fanout_release_data(f); 3026 3034 kfree(f); 3027 3035 } ··· 3091 3097 if (need_rehook) { 3092 3098 if (po->running) { 3093 3099 rcu_read_unlock(); 3100 + /* prevents packet_notifier() from calling 3101 + * register_prot_hook() 3102 + */ 3103 + po->num = 0; 3094 3104 __unregister_prot_hook(sk, true); 3095 3105 rcu_read_lock(); 3096 3106 dev_curr = po->prot_hook.dev; ··· 3103 3105 dev->ifindex); 3104 3106 } 3105 3107 3108 + BUG_ON(po->running); 3106 3109 po->num = proto; 3107 3110 po->prot_hook.type = proto; 3108 3111 ··· 3842 3843 void *data = &val; 3843 3844 union tpacket_stats_u st; 3844 3845 struct tpacket_rollover_stats rstats; 3845 - struct packet_rollover *rollover; 3846 3846 3847 3847 if (level != SOL_PACKET) 3848 3848 return -ENOPROTOOPT; ··· 3920 3922 0); 3921 3923 break; 3922 3924 case PACKET_ROLLOVER_STATS: 3923 - rcu_read_lock(); 3924 - rollover = rcu_dereference(po->rollover); 3925 - if (rollover) { 3926 - rstats.tp_all = atomic_long_read(&rollover->num); 3927 - rstats.tp_huge = atomic_long_read(&rollover->num_huge); 3928 - rstats.tp_failed = atomic_long_read(&rollover->num_failed); 3929 - data = &rstats; 3930 - lv = sizeof(rstats); 3931 - } 3932 - rcu_read_unlock(); 3933 - if (!rollover) 3925 + if (!po->rollover) 3934 3926 return -EINVAL; 3927 + rstats.tp_all = atomic_long_read(&po->rollover->num); 3928 + rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 3929 + rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 3930 + data = &rstats; 3931 + lv = sizeof(rstats); 3935 3932 break; 3936 3933 case PACKET_TX_HAS_OFF: 3937 3934 val = po->tp_tx_has_off;
-1
net/packet/internal.h
··· 95 95 96 96 struct packet_rollover { 97 97 int sock; 98 - struct rcu_head rcu; 99 98 atomic_long_t num; 100 99 atomic_long_t num_huge; 101 100 atomic_long_t num_failed;
+21 -2
net/rxrpc/af_rxrpc.c
··· 285 285 bool upgrade) 286 286 { 287 287 struct rxrpc_conn_parameters cp; 288 + struct rxrpc_call_params p; 288 289 struct rxrpc_call *call; 289 290 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 290 291 int ret; ··· 303 302 if (key && !key->payload.data[0]) 304 303 key = NULL; /* a no-security key */ 305 304 305 + memset(&p, 0, sizeof(p)); 306 + p.user_call_ID = user_call_ID; 307 + p.tx_total_len = tx_total_len; 308 + 306 309 memset(&cp, 0, sizeof(cp)); 307 310 cp.local = rx->local; 308 311 cp.key = key; ··· 314 309 cp.exclusive = false; 315 310 cp.upgrade = upgrade; 316 311 cp.service_id = srx->srx_service; 317 - call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, 318 - gfp); 312 + call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp); 319 313 /* The socket has been unlocked. */ 320 314 if (!IS_ERR(call)) { 321 315 call->notify_rx = notify_rx; ··· 867 863 sock_orphan(sk); 868 864 sk->sk_shutdown = SHUTDOWN_MASK; 869 865 866 + /* We want to kill off all connections from a service socket 867 + * as fast as possible because we can't share these; client 868 + * sockets, on the other hand, can share an endpoint. 869 + */ 870 + switch (sk->sk_state) { 871 + case RXRPC_SERVER_BOUND: 872 + case RXRPC_SERVER_BOUND2: 873 + case RXRPC_SERVER_LISTENING: 874 + case RXRPC_SERVER_LISTEN_DISABLED: 875 + rx->local->service_closed = true; 876 + break; 877 + } 878 + 870 879 spin_lock_bh(&sk->sk_receive_queue.lock); 871 880 sk->sk_state = RXRPC_CLOSE; 872 881 spin_unlock_bh(&sk->sk_receive_queue.lock); ··· 895 878 rxrpc_release_calls_on_socket(rx); 896 879 flush_workqueue(rxrpc_workqueue); 897 880 rxrpc_purge_queue(&sk->sk_receive_queue); 881 + rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper); 882 + rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper); 898 883 899 884 rxrpc_put_local(rx->local); 900 885 rx->local = NULL;
+85 -18
net/rxrpc/ar-internal.h
··· 79 79 struct list_head conn_proc_list; /* List of conns in this namespace for proc */ 80 80 struct list_head service_conns; /* Service conns in this namespace */ 81 81 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ 82 - struct delayed_work service_conn_reaper; 82 + struct work_struct service_conn_reaper; 83 + struct timer_list service_conn_reap_timer; 83 84 84 85 unsigned int nr_client_conns; 85 86 unsigned int nr_active_client_conns; 86 87 bool kill_all_client_conns; 88 + bool live; 87 89 spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ 88 90 spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ 89 91 struct list_head waiting_client_conns; 90 92 struct list_head active_client_conns; 91 93 struct list_head idle_client_conns; 92 - struct delayed_work client_conn_reaper; 94 + struct work_struct client_conn_reaper; 95 + struct timer_list client_conn_reap_timer; 93 96 94 97 struct list_head local_endpoints; 95 98 struct mutex local_mutex; /* Lock for ->local_endpoints */ ··· 268 265 rwlock_t services_lock; /* lock for services list */ 269 266 int debug_id; /* debug ID for printks */ 270 267 bool dead; 268 + bool service_closed; /* Service socket closed */ 271 269 struct sockaddr_rxrpc srx; /* local address */ 272 270 }; 273 271 ··· 342 338 RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ 343 339 RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */ 344 340 RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ 341 + RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */ 342 + RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */ 343 + RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */ 344 + RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */ 345 345 }; 346 + 347 + #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \ 348 + (1UL << RXRPC_CONN_FINAL_ACK_1) | \ 349 + (1UL << RXRPC_CONN_FINAL_ACK_2) | \ 350 + (1UL << RXRPC_CONN_FINAL_ACK_3)) 346 351 347 352 /* 348 353 * Events that can be raised upon a connection. ··· 406 393 #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1) 407 394 struct list_head waiting_calls; /* Calls waiting for channels */ 408 395 struct rxrpc_channel { 396 + unsigned long final_ack_at; /* Time at which to issue final ACK */ 409 397 struct rxrpc_call __rcu *call; /* Active call */ 410 398 u32 call_id; /* ID of current call */ 411 399 u32 call_counter; /* Call ID counter */ ··· 418 404 }; 419 405 } channels[RXRPC_MAXCALLS]; 420 406 407 + struct timer_list timer; /* Conn event timer */ 421 408 struct work_struct processor; /* connection event processor */ 422 409 union { 423 410 struct rb_node client_node; /* Node in local->client_conns */ ··· 472 457 enum rxrpc_call_event { 473 458 RXRPC_CALL_EV_ACK, /* need to generate ACK */ 474 459 RXRPC_CALL_EV_ABORT, /* need to generate abort */ 475 - RXRPC_CALL_EV_TIMER, /* Timer expired */ 476 460 RXRPC_CALL_EV_RESEND, /* Tx resend required */ 477 461 RXRPC_CALL_EV_PING, /* Ping send required */ 462 + RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */ 463 + RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */ 478 464 }; 479 465 480 466 /* ··· 519 503 struct rxrpc_peer *peer; /* Peer record for remote address */ 520 504 struct rxrpc_sock __rcu *socket; /* socket responsible */ 521 505 struct mutex user_mutex; /* User access mutex */ 522 - ktime_t ack_at; /* When deferred ACK needs to happen */ 523 - ktime_t resend_at; /* When next resend needs to happen */ 524 - ktime_t ping_at; /* When next to send a ping */ 525 - ktime_t expire_at; /* When the call times out */ 506 + unsigned long ack_at; /* When deferred ACK needs to happen */ 507 + unsigned long ack_lost_at; /* When ACK is figured as lost */ 508 + unsigned long resend_at; /* When next resend needs to happen */ 509 + unsigned long ping_at; /* When next to send a ping */ 510 + unsigned long keepalive_at; /* When next to send a keepalive ping */ 511 + unsigned long expect_rx_by; /* When we expect to get a packet by */ 512 + unsigned long expect_req_by; /* When we expect to get a request DATA packet by */ 513 + unsigned long expect_term_by; /* When we expect call termination by */ 514 + u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ 515 + u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ 526 516 struct timer_list timer; /* Combined event timer */ 527 517 struct work_struct processor; /* Event processor */ 528 518 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ ··· 631 609 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ 632 610 rxrpc_serial_t acks_latest; /* serial number of latest ACK received */ 633 611 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ 612 + rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */ 613 + rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */ 634 614 }; 635 615 636 616 /* ··· 654 630 u8 ssthresh; 655 631 u8 dup_acks; 656 632 u8 cumulative_acks; 633 + }; 634 + 635 + /* 636 + * sendmsg() cmsg-specified parameters. 637 + */ 638 + enum rxrpc_command { 639 + RXRPC_CMD_SEND_DATA, /* send data message */ 640 + RXRPC_CMD_SEND_ABORT, /* request abort generation */ 641 + RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ 642 + RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ 643 + }; 644 + 645 + struct rxrpc_call_params { 646 + s64 tx_total_len; /* Total Tx data length (if send data) */ 647 + unsigned long user_call_ID; /* User's call ID */ 648 + struct { 649 + u32 hard; /* Maximum lifetime (sec) */ 650 + u32 idle; /* Max time since last data packet (msec) */ 651 + u32 normal; /* Max time since last call packet (msec) */ 652 + } timeouts; 653 + u8 nr_timeouts; /* Number of timeouts specified */ 654 + }; 655 + 656 + struct rxrpc_send_params { 657 + struct rxrpc_call_params call; 658 + u32 abort_code; /* Abort code to Tx (if abort) */ 659 + enum rxrpc_command command : 8; /* The command to implement */ 660 + bool exclusive; /* Shared or exclusive call */ 661 + bool upgrade; /* If the connection is upgradeable */ 657 662 }; 658 663 659 664 #include <trace/events/rxrpc.h> ··· 710 657 /* 711 658 * call_event.c 712 659 */ 713 - void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); 714 - void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); 715 660 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, 716 661 enum rxrpc_propose_ack_trace); 717 662 void rxrpc_process_call(struct work_struct *); 663 + 664 + static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call, 665 + unsigned long expire_at, 666 + unsigned long now, 667 + enum rxrpc_timer_trace why) 668 + { 669 + trace_rxrpc_timer(call, why, now); 670 + timer_reduce(&call->timer, expire_at); 671 + } 718 672 719 673 /* 720 674 * call_object.c ··· 732 672 extern struct kmem_cache *rxrpc_call_jar; 733 673 734 674 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); 735 - struct rxrpc_call *rxrpc_alloc_call(gfp_t); 675 + struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t); 736 676 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, 737 677 struct rxrpc_conn_parameters *, 738 678 struct sockaddr_rxrpc *, 739 - unsigned long, s64, gfp_t); 679 + struct rxrpc_call_params *, gfp_t); 740 680 int rxrpc_retry_client_call(struct rxrpc_sock *, 741 681 struct rxrpc_call *, 742 682 struct rxrpc_conn_parameters *, ··· 863 803 */ 864 804 extern unsigned int rxrpc_max_client_connections; 865 805 extern unsigned int rxrpc_reap_client_connections; 866 - extern unsigned int rxrpc_conn_idle_client_expiry; 867 - extern unsigned int rxrpc_conn_idle_client_fast_expiry; 806 + extern unsigned long rxrpc_conn_idle_client_expiry; 807 + extern unsigned long rxrpc_conn_idle_client_fast_expiry; 868 808 extern struct idr rxrpc_client_conn_ids; 869 809 870 810 void rxrpc_destroy_client_conn_ids(void); ··· 885 825 * conn_object.c 886 826 */ 887 827 extern unsigned int rxrpc_connection_expiry; 828 + extern unsigned int rxrpc_closed_conn_expiry; 888 829 889 830 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); 890 831 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, ··· 920 859 rxrpc_put_client_conn(conn); 921 860 else 922 861 rxrpc_put_service_conn(conn); 862 + } 863 + 864 + static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, 865 + unsigned long expire_at) 866 + { 867 + timer_reduce(&conn->timer, expire_at); 923 868 } 924 869 925 870 /* ··· 997 930 * misc.c 998 931 */ 999 932 extern unsigned int rxrpc_max_backlog __read_mostly; 1000 - extern unsigned int rxrpc_requested_ack_delay; 1001 - extern unsigned int rxrpc_soft_ack_delay; 1002 - extern unsigned int rxrpc_idle_ack_delay; 933 + extern unsigned long rxrpc_requested_ack_delay; 934 + extern unsigned long rxrpc_soft_ack_delay; 935 + extern unsigned long rxrpc_idle_ack_delay; 1003 936 extern unsigned int rxrpc_rx_window_size; 1004 937 extern unsigned int rxrpc_rx_mtu; 1005 938 extern unsigned int rxrpc_rx_jumbo_max; 1006 - extern unsigned int rxrpc_resend_timeout; 939 + extern unsigned long rxrpc_resend_timeout; 1007 940 1008 941 extern const s8 rxrpc_ack_priority[]; 1009 942 ··· 1021 954 /* 1022 955 * output.c 1023 956 */ 1024 - int rxrpc_send_ack_packet(struct rxrpc_call *, bool); 957 + int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *); 1025 958 int rxrpc_send_abort_packet(struct rxrpc_call *); 1026 959 int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); 1027 960 void rxrpc_reject_packets(struct rxrpc_local *);
+1 -1
net/rxrpc/call_accept.c
··· 94 94 /* Now it gets complicated, because calls get registered with the 95 95 * socket here, particularly if a user ID is preassigned by the user. 96 96 */ 97 - call = rxrpc_alloc_call(gfp); 97 + call = rxrpc_alloc_call(rx, gfp); 98 98 if (!call) 99 99 return -ENOMEM; 100 100 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
+130 -99
net/rxrpc/call_event.c
··· 22 22 #include "ar-internal.h" 23 23 24 24 /* 25 - * Set the timer 26 - */ 27 - void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, 28 - ktime_t now) 29 - { 30 - unsigned long t_j, now_j = jiffies; 31 - ktime_t t; 32 - bool queue = false; 33 - 34 - if (call->state < RXRPC_CALL_COMPLETE) { 35 - t = call->expire_at; 36 - if (!ktime_after(t, now)) { 37 - trace_rxrpc_timer(call, why, now, now_j); 38 - queue = true; 39 - goto out; 40 - } 41 - 42 - if (!ktime_after(call->resend_at, now)) { 43 - call->resend_at = call->expire_at; 44 - if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 45 - queue = true; 46 - } else if (ktime_before(call->resend_at, t)) { 47 - t = call->resend_at; 48 - } 49 - 50 - if (!ktime_after(call->ack_at, now)) { 51 - call->ack_at = call->expire_at; 52 - if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) 53 - queue = true; 54 - } else if (ktime_before(call->ack_at, t)) { 55 - t = call->ack_at; 56 - } 57 - 58 - if (!ktime_after(call->ping_at, now)) { 59 - call->ping_at = call->expire_at; 60 - if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) 61 - queue = true; 62 - } else if (ktime_before(call->ping_at, t)) { 63 - t = call->ping_at; 64 - } 65 - 66 - t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); 67 - t_j += jiffies; 68 - 69 - /* We have to make sure that the calculated jiffies value falls 70 - * at or after the nsec value, or we may loop ceaselessly 71 - * because the timer times out, but we haven't reached the nsec 72 - * timeout yet. 73 - */ 74 - t_j++; 75 - 76 - if (call->timer.expires != t_j || !timer_pending(&call->timer)) { 77 - mod_timer(&call->timer, t_j); 78 - trace_rxrpc_timer(call, why, now, now_j); 79 - } 80 - } 81 - 82 - out: 83 - if (queue) 84 - rxrpc_queue_call(call); 85 - } 86 - 87 - /* 88 - * Set the timer 89 - */ 90 - void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, 91 - ktime_t now) 92 - { 93 - read_lock_bh(&call->state_lock); 94 - __rxrpc_set_timer(call, why, now); 95 - read_unlock_bh(&call->state_lock); 96 - } 97 - 98 - /* 99 25 * Propose a PING ACK be sent. 100 26 */ 101 27 static void rxrpc_propose_ping(struct rxrpc_call *call, ··· 32 106 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) 33 107 rxrpc_queue_call(call); 34 108 } else { 35 - ktime_t now = ktime_get_real(); 36 - ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay); 109 + unsigned long now = jiffies; 110 + unsigned long ping_at = now + rxrpc_idle_ack_delay; 37 111 38 - if (ktime_before(ping_at, call->ping_at)) { 39 - call->ping_at = ping_at; 40 - rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now); 112 + if (time_before(ping_at, call->ping_at)) { 113 + WRITE_ONCE(call->ping_at, ping_at); 114 + rxrpc_reduce_call_timer(call, ping_at, now, 115 + rxrpc_timer_set_for_ping); 41 116 } 42 117 } 43 118 } ··· 52 125 enum rxrpc_propose_ack_trace why) 53 126 { 54 127 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 55 - unsigned int expiry = rxrpc_soft_ack_delay; 56 - ktime_t now, ack_at; 128 + unsigned long expiry = rxrpc_soft_ack_delay; 57 129 s8 prior = rxrpc_ack_priority[ack_reason]; 58 130 59 131 /* Pings are handled specially because we don't want to accidentally ··· 116 190 background) 117 191 rxrpc_queue_call(call); 118 192 } else { 119 - now = ktime_get_real(); 120 - ack_at = ktime_add_ms(now, expiry); 121 - if (ktime_before(ack_at, call->ack_at)) { 122 - call->ack_at = ack_at; 123 - rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now); 193 + unsigned long now = jiffies, ack_at; 194 + 195 + if (call->peer->rtt_usage > 0) 196 + ack_at = nsecs_to_jiffies(call->peer->rtt); 197 + else 198 + ack_at = expiry; 199 + 200 + ack_at = jiffies + expiry; 201 + if (time_before(ack_at, call->ack_at)) { 202 + WRITE_ONCE(call->ack_at, ack_at); 203 + rxrpc_reduce_call_timer(call, ack_at, now, 204 + rxrpc_timer_set_for_ack); 124 205 } 125 206 } 126 207 ··· 160 227 /* 161 228 * Perform retransmission of NAK'd and unack'd packets. 162 229 */ 163 - static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) 230 + static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) 164 231 { 165 232 struct rxrpc_skb_priv *sp; 166 233 struct sk_buff *skb; 234 + unsigned long resend_at; 167 235 rxrpc_seq_t cursor, seq, top; 168 - ktime_t max_age, oldest, ack_ts; 236 + ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo; 169 237 int ix; 170 238 u8 annotation, anno_type, retrans = 0, unacked = 0; 171 239 172 240 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); 173 241 174 - max_age = ktime_sub_ms(now, rxrpc_resend_timeout); 242 + if (call->peer->rtt_usage > 1) 243 + timeout = ns_to_ktime(call->peer->rtt * 3 / 2); 244 + else 245 + timeout = ms_to_ktime(rxrpc_resend_timeout); 246 + min_timeo = ns_to_ktime((1000000000 / HZ) * 4); 247 + if (ktime_before(timeout, min_timeo)) 248 + timeout = min_timeo; 249 + 250 + now = ktime_get_real(); 251 + max_age = ktime_sub(now, timeout); 175 252 176 253 spin_lock_bh(&call->lock); 177 254 ··· 225 282 ktime_to_ns(ktime_sub(skb->tstamp, max_age))); 226 283 } 227 284 228 - call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout); 285 + resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now))); 286 + resend_at += jiffies + rxrpc_resend_timeout; 287 + WRITE_ONCE(call->resend_at, resend_at); 229 288 230 289 if (unacked) 231 290 rxrpc_congestion_timeout(call); ··· 237 292 * retransmitting data. 238 293 */ 239 294 if (!retrans) { 240 - rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); 295 + rxrpc_reduce_call_timer(call, resend_at, now, 296 + rxrpc_timer_set_for_resend); 241 297 spin_unlock_bh(&call->lock); 242 298 ack_ts = ktime_sub(now, call->acks_latest_ts); 243 299 if (ktime_to_ns(ack_ts) < call->peer->rtt) 244 300 goto out; 245 301 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 246 302 rxrpc_propose_ack_ping_for_lost_ack); 247 - rxrpc_send_ack_packet(call, true); 303 + rxrpc_send_ack_packet(call, true, NULL); 248 304 goto out; 249 305 } 250 306 ··· 310 364 { 311 365 struct rxrpc_call *call = 312 366 container_of(work, struct rxrpc_call, processor); 313 - ktime_t now; 367 + rxrpc_serial_t *send_ack; 368 + unsigned long now, next, t; 314 369 315 370 rxrpc_see_call(call); 316 371 ··· 331 384 goto out_put; 332 385 } 333 386 334 - now = ktime_get_real(); 335 - if (ktime_before(call->expire_at, now)) { 387 + /* Work out if any timeouts tripped */ 388 + now = jiffies; 389 + t = READ_ONCE(call->expect_rx_by); 390 + if (time_after_eq(now, t)) { 391 + trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now); 392 + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 393 + } 394 + 395 + t = READ_ONCE(call->expect_req_by); 396 + if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST && 397 + time_after_eq(now, t)) { 398 + trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now); 399 + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 400 + } 401 + 402 + t = READ_ONCE(call->expect_term_by); 403 + if (time_after_eq(now, t)) { 404 + trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now); 405 + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 406 + } 407 + 408 + t = READ_ONCE(call->ack_at); 409 + if (time_after_eq(now, t)) { 410 + trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now); 411 + cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET); 412 + set_bit(RXRPC_CALL_EV_ACK, &call->events); 413 + } 414 + 415 + t = READ_ONCE(call->ack_lost_at); 416 + if (time_after_eq(now, t)) { 417 + trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now); 418 + cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET); 419 + set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events); 420 + } 421 + 422 + t = READ_ONCE(call->keepalive_at); 423 + if (time_after_eq(now, t)) { 424 + trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); 425 + cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); 426 + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, 427 + rxrpc_propose_ack_ping_for_keepalive); 428 + set_bit(RXRPC_CALL_EV_PING, &call->events); 429 + } 430 + 431 + t = READ_ONCE(call->ping_at); 432 + if (time_after_eq(now, t)) { 433 + trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now); 434 + cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET); 435 + set_bit(RXRPC_CALL_EV_PING, &call->events); 436 + } 437 + 438 + t = READ_ONCE(call->resend_at); 439 + if (time_after_eq(now, t)) { 440 + trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now); 441 + cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); 442 + set_bit(RXRPC_CALL_EV_RESEND, &call->events); 443 + } 444 + 445 + /* Process events */ 446 + if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) { 336 447 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME); 337 448 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 338 449 goto recheck_state; 339 450 } 340 451 341 - if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { 452 + send_ack = NULL; 453 + if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { 454 + call->acks_lost_top = call->tx_top; 455 + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 456 + rxrpc_propose_ack_ping_for_lost_ack); 457 + send_ack = &call->acks_lost_ping; 458 + } 459 + 460 + if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) || 461 + send_ack) { 342 462 if (call->ackr_reason) { 343 - rxrpc_send_ack_packet(call, false); 463 + rxrpc_send_ack_packet(call, false, send_ack); 344 464 goto recheck_state; 345 465 } 346 466 } 347 467 348 468 if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { 349 - rxrpc_send_ack_packet(call, true); 469 + rxrpc_send_ack_packet(call, true, NULL); 350 470 goto recheck_state; 351 471 } 352 472 ··· 422 408 goto recheck_state; 423 409 } 424 410 425 - rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); 411 + /* Make sure the timer is restarted */ 412 + next = call->expect_rx_by; 413 + 414 + #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; } 415 + 416 + set(call->expect_req_by); 417 + set(call->expect_term_by); 418 + set(call->ack_at); 419 + set(call->ack_lost_at); 420 + set(call->resend_at); 421 + set(call->keepalive_at); 422 + set(call->ping_at); 423 + 424 + now = jiffies; 425 + if (time_after_eq(now, next)) 426 + goto recheck_state; 427 + 428 + rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart); 426 429 427 430 /* other events may have been raised since we started checking */ 428 431 if (call->events && call->state < RXRPC_CALL_COMPLETE) {
+38 -22
net/rxrpc/call_object.c
··· 51 51 52 52 _enter("%d", call->debug_id); 53 53 54 - if (call->state < RXRPC_CALL_COMPLETE) 55 - rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); 54 + if (call->state < RXRPC_CALL_COMPLETE) { 55 + trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); 56 + rxrpc_queue_call(call); 57 + } 56 58 } 59 + 60 + static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; 57 61 58 62 /* 59 63 * find an extant server call ··· 99 95 /* 100 96 * allocate a new call 101 97 */ 102 - struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) 98 + struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) 103 99 { 104 100 struct rxrpc_call *call; 105 101 ··· 118 114 goto nomem_2; 119 115 120 116 mutex_init(&call->user_mutex); 117 + 118 + /* Prevent lockdep reporting a deadlock false positive between the afs 119 + * filesystem and sys_sendmsg() via the mmap sem. 120 + */ 121 + if (rx->sk.sk_kern_sock) 122 + lockdep_set_class(&call->user_mutex, 123 + &rxrpc_call_user_mutex_lock_class_key); 124 + 121 125 timer_setup(&call->timer, rxrpc_call_timer_expired, 0); 122 126 INIT_WORK(&call->processor, &rxrpc_process_call); 123 127 INIT_LIST_HEAD(&call->link); ··· 140 128 atomic_set(&call->usage, 1); 141 129 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 142 130 call->tx_total_len = -1; 131 + call->next_rx_timo = 20 * HZ; 132 + call->next_req_timo = 1 * HZ; 143 133 144 134 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 145 135 ··· 164 150 /* 165 151 * Allocate a new client call. 166 152 */ 167 - static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, 153 + static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, 154 + struct sockaddr_rxrpc *srx, 168 155 gfp_t gfp) 169 156 { 170 157 struct rxrpc_call *call; ··· 173 158 174 159 _enter(""); 175 160 176 - call = rxrpc_alloc_call(gfp); 161 + call = rxrpc_alloc_call(rx, gfp); 177 162 if (!call) 178 163 return ERR_PTR(-ENOMEM); 179 164 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; ··· 192 177 */ 193 178 static void rxrpc_start_call_timer(struct rxrpc_call *call) 194 179 { 195 - ktime_t now = ktime_get_real(), expire_at; 180 + unsigned long now = jiffies; 181 + unsigned long j = now + MAX_JIFFY_OFFSET; 196 182 197 - expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); 198 - call->expire_at = expire_at; 199 - call->ack_at = expire_at; 200 - call->ping_at = expire_at; 201 - call->resend_at = expire_at; 202 - call->timer.expires = jiffies + LONG_MAX / 2; 203 - rxrpc_set_timer(call, rxrpc_timer_begin, now); 183 + call->ack_at = j; 184 + call->ack_lost_at = j; 185 + call->resend_at = j; 186 + call->ping_at = j; 187 + call->expect_rx_by = j; 188 + call->expect_req_by = j; 189 + call->expect_term_by = j; 190 + call->timer.expires = now; 204 191 } 205 192 206 193 /* ··· 213 196 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 214 197 struct rxrpc_conn_parameters *cp, 215 198 struct sockaddr_rxrpc *srx, 216 - unsigned long user_call_ID, 217 - s64 tx_total_len, 199 + struct rxrpc_call_params *p, 218 200 gfp_t gfp) 219 201 __releases(&rx->sk.sk_lock.slock) 220 202 { ··· 223 207 const void *here = __builtin_return_address(0); 224 208 int ret; 225 209 226 - _enter("%p,%lx", rx, user_call_ID); 210 + _enter("%p,%lx", rx, p->user_call_ID); 227 211 228 - call = rxrpc_alloc_client_call(srx, gfp); 212 + call = rxrpc_alloc_client_call(rx, srx, gfp); 229 213 if (IS_ERR(call)) { 230 214 release_sock(&rx->sk); 231 215 _leave(" = %ld", PTR_ERR(call)); 232 216 return call; 233 217 } 234 218 235 - call->tx_total_len = tx_total_len; 219 + call->tx_total_len = p->tx_total_len; 236 220 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), 237 - here, (const void *)user_call_ID); 221 + here, (const void *)p->user_call_ID); 238 222 239 223 /* We need to protect a partially set up call against the user as we 240 224 * will be acting outside the socket lock. ··· 250 234 parent = *pp; 251 235 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 252 236 253 - if (user_call_ID < xcall->user_call_ID) 237 + if (p->user_call_ID < xcall->user_call_ID) 254 238 pp = &(*pp)->rb_left; 255 - else if (user_call_ID > xcall->user_call_ID) 239 + else if (p->user_call_ID > xcall->user_call_ID) 256 240 pp = &(*pp)->rb_right; 257 241 else 258 242 goto error_dup_user_ID; 259 243 } 260 244 261 245 rcu_assign_pointer(call->socket, rx); 262 - call->user_call_ID = user_call_ID; 246 + call->user_call_ID = p->user_call_ID; 263 247 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 264 248 rxrpc_get_call(call, rxrpc_call_got_userid); 265 249 rb_link_node(&call->sock_node, parent, pp);
+41 -13
net/rxrpc/conn_client.c
··· 85 85 86 86 __read_mostly unsigned int rxrpc_max_client_connections = 1000; 87 87 __read_mostly unsigned int rxrpc_reap_client_connections = 900; 88 - __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 89 - __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 88 + __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 89 + __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 90 90 91 91 /* 92 92 * We use machine-unique IDs for our client connections. ··· 554 554 555 555 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 556 556 557 + /* Cancel the final ACK on the previous call if it hasn't been sent yet 558 + * as the DATA packet will implicitly ACK it. 559 + */ 560 + clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 561 + 557 562 write_lock_bh(&call->state_lock); 558 563 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) 559 564 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; ··· 691 686 692 687 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 693 688 694 - rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work); 689 + rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); 695 690 rxrpc_cull_active_client_conns(rxnet); 696 691 697 692 ret = rxrpc_get_client_conn(call, cp, srx, gfp); ··· 757 752 } 758 753 759 754 /* 755 + * Set the reap timer. 756 + */ 757 + static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) 758 + { 759 + unsigned long now = jiffies; 760 + unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; 761 + 762 + if (rxnet->live) 763 + timer_reduce(&rxnet->client_conn_reap_timer, reap_at); 764 + } 765 + 766 + /* 760 767 * Disconnect a client call. 761 768 */ 762 769 void rxrpc_disconnect_client_call(struct rxrpc_call *call) ··· 828 811 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 829 812 rxrpc_activate_one_channel(conn, channel); 830 813 goto out_2; 814 + } 815 + 816 + /* Schedule the final ACK to be transmitted in a short while so that it 817 + * can be skipped if we find a follow-on call. The first DATA packet 818 + * of the follow on call will implicitly ACK this call. 819 + */ 820 + if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 821 + unsigned long final_ack_at = jiffies + 2; 822 + 823 + WRITE_ONCE(chan->final_ack_at, final_ack_at); 824 + smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ 825 + set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 826 + rxrpc_reduce_conn_timer(conn, final_ack_at); 831 827 } 832 828 833 829 /* Things are more complex and we need the cache lock. We might be ··· 908 878 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); 909 879 if (rxnet->idle_client_conns.next == &conn->cache_link && 910 880 !rxnet->kill_all_client_conns) 911 - queue_delayed_work(rxrpc_workqueue, 912 - &rxnet->client_conn_reaper, 913 - rxrpc_conn_idle_client_expiry); 881 + rxrpc_set_client_reap_timer(rxnet); 914 882 } else { 915 883 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); 916 884 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; ··· 1046 1018 { 1047 1019 struct rxrpc_connection *conn; 1048 1020 struct rxrpc_net *rxnet = 1049 - container_of(to_delayed_work(work), 1050 - struct rxrpc_net, client_conn_reaper); 1021 + container_of(work, struct rxrpc_net, client_conn_reaper); 1051 1022 unsigned long expiry, conn_expires_at, now; 1052 1023 unsigned int nr_conns; 1053 1024 bool did_discard = false; ··· 1088 1061 expiry = rxrpc_conn_idle_client_expiry; 1089 1062 if (nr_conns > rxrpc_reap_client_connections) 1090 1063 expiry = rxrpc_conn_idle_client_fast_expiry; 1064 + if (conn->params.local->service_closed) 1065 + expiry = rxrpc_closed_conn_expiry * HZ; 1091 1066 1092 1067 conn_expires_at = conn->idle_timestamp + expiry; 1093 1068 ··· 1125 1096 */ 1126 1097 _debug("not yet"); 1127 1098 if (!rxnet->kill_all_client_conns) 1128 - queue_delayed_work(rxrpc_workqueue, 1129 - &rxnet->client_conn_reaper, 1130 - conn_expires_at - now); 1099 + timer_reduce(&rxnet->client_conn_reap_timer, 1100 + conn_expires_at); 1131 1101 1132 1102 out: 1133 1103 spin_unlock(&rxnet->client_conn_cache_lock); ··· 1146 1118 rxnet->kill_all_client_conns = true; 1147 1119 spin_unlock(&rxnet->client_conn_cache_lock); 1148 1120 1149 - cancel_delayed_work(&rxnet->client_conn_reaper); 1121 + del_timer_sync(&rxnet->client_conn_reap_timer); 1150 1122 1151 - if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0)) 1123 + if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) 1152 1124 _debug("destroy: queue failed"); 1153 1125 1154 1126 _leave("");
+61 -13
net/rxrpc/conn_event.c
··· 24 24 * Retransmit terminal ACK or ABORT of the previous call. 25 25 */ 26 26 static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, 27 - struct sk_buff *skb) 27 + struct sk_buff *skb, 28 + unsigned int channel) 28 29 { 29 - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 30 + struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL; 30 31 struct rxrpc_channel *chan; 31 32 struct msghdr msg; 32 33 struct kvec iov; ··· 49 48 50 49 _enter("%d", conn->debug_id); 51 50 52 - chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK]; 51 + chan = &conn->channels[channel]; 53 52 54 53 /* If the last call got moved on whilst we were waiting to run, just 55 54 * ignore this packet. ··· 57 56 call_id = READ_ONCE(chan->last_call); 58 57 /* Sync with __rxrpc_disconnect_call() */ 59 58 smp_rmb(); 60 - if (call_id != sp->hdr.callNumber) 59 + if (skb && call_id != sp->hdr.callNumber) 61 60 return; 62 61 63 62 msg.msg_name = &conn->params.peer->srx.transport; ··· 66 65 msg.msg_controllen = 0; 67 66 msg.msg_flags = 0; 68 67 69 - pkt.whdr.epoch = htonl(sp->hdr.epoch); 70 - pkt.whdr.cid = htonl(sp->hdr.cid); 71 - pkt.whdr.callNumber = htonl(sp->hdr.callNumber); 68 + pkt.whdr.epoch = htonl(conn->proto.epoch); 69 + pkt.whdr.cid = htonl(conn->proto.cid); 70 + pkt.whdr.callNumber = htonl(call_id); 72 71 pkt.whdr.seq = 0; 73 72 pkt.whdr.type = chan->last_type; 74 73 pkt.whdr.flags = conn->out_clientflag; ··· 88 87 mtu = conn->params.peer->if_mtu; 89 88 mtu -= conn->params.peer->hdrsize; 90 89 pkt.ack.bufferSpace = 0; 91 - pkt.ack.maxSkew = htons(skb->priority); 92 - pkt.ack.firstPacket = htonl(chan->last_seq); 93 - pkt.ack.previousPacket = htonl(chan->last_seq - 1); 94 - pkt.ack.serial = htonl(sp->hdr.serial); 95 - pkt.ack.reason = RXRPC_ACK_DUPLICATE; 90 + pkt.ack.maxSkew = htons(skb ? skb->priority : 0); 91 + pkt.ack.firstPacket = htonl(chan->last_seq + 1); 92 + pkt.ack.previousPacket = htonl(chan->last_seq); 93 + pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0); 94 + pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE; 96 95 pkt.ack.nAcks = 0; 97 96 pkt.info.rxMTU = htonl(rxrpc_rx_mtu); 98 97 pkt.info.maxMTU = htonl(mtu); ··· 273 272 switch (sp->hdr.type) { 274 273 case RXRPC_PACKET_TYPE_DATA: 275 274 case RXRPC_PACKET_TYPE_ACK: 276 - rxrpc_conn_retransmit_call(conn, skb); 275 + rxrpc_conn_retransmit_call(conn, skb, 276 + sp->hdr.cid & RXRPC_CHANNELMASK); 277 277 return 0; 278 278 279 279 case RXRPC_PACKET_TYPE_BUSY: ··· 381 379 } 382 380 383 381 /* 382 + * Process delayed final ACKs that we haven't subsumed into a subsequent call. 383 + */ 384 + static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn) 385 + { 386 + unsigned long j = jiffies, next_j; 387 + unsigned int channel; 388 + bool set; 389 + 390 + again: 391 + next_j = j + LONG_MAX; 392 + set = false; 393 + for (channel = 0; channel < RXRPC_MAXCALLS; channel++) { 394 + struct rxrpc_channel *chan = &conn->channels[channel]; 395 + unsigned long ack_at; 396 + 397 + if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags)) 398 + continue; 399 + 400 + smp_rmb(); /* vs rxrpc_disconnect_client_call */ 401 + ack_at = READ_ONCE(chan->final_ack_at); 402 + 403 + if (time_before(j, ack_at)) { 404 + if (time_before(ack_at, next_j)) { 405 + next_j = ack_at; 406 + set = true; 407 + } 408 + continue; 409 + } 410 + 411 + if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, 412 + &conn->flags)) 413 + rxrpc_conn_retransmit_call(conn, NULL, channel); 414 + } 415 + 416 + j = jiffies; 417 + if (time_before_eq(next_j, j)) 418 + goto again; 419 + if (set) 420 + rxrpc_reduce_conn_timer(conn, next_j); 421 + } 422 + 423 + /* 384 424 * connection-level event processor 385 425 */ 386 426 void rxrpc_process_connection(struct work_struct *work) ··· 437 393 438 394 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) 439 395 rxrpc_secure_connection(conn); 396 + 397 + /* Process delayed ACKs whose time has come. */ 398 + if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 399 + rxrpc_process_delayed_final_acks(conn); 440 400 441 401 /* go through the conn-level event packets, releasing the ref on this 442 402 * connection that each one has when we've finished with it */
+48 -26
net/rxrpc/conn_object.c
··· 20 20 /* 21 21 * Time till a connection expires after last use (in seconds). 22 22 */ 23 - unsigned int rxrpc_connection_expiry = 10 * 60; 23 + unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60; 24 + unsigned int __read_mostly rxrpc_closed_conn_expiry = 10; 24 25 25 26 static void rxrpc_destroy_connection(struct rcu_head *); 27 + 28 + static void rxrpc_connection_timer(struct timer_list *timer) 29 + { 30 + struct rxrpc_connection *conn = 31 + container_of(timer, struct rxrpc_connection, timer); 32 + 33 + rxrpc_queue_conn(conn); 34 + } 26 35 27 36 /* 28 37 * allocate a new connection ··· 47 38 INIT_LIST_HEAD(&conn->cache_link); 48 39 spin_lock_init(&conn->channel_lock); 49 40 INIT_LIST_HEAD(&conn->waiting_calls); 41 + timer_setup(&conn->timer, &rxrpc_connection_timer, 0); 50 42 INIT_WORK(&conn->processor, &rxrpc_process_connection); 51 43 INIT_LIST_HEAD(&conn->proc_link); 52 44 INIT_LIST_HEAD(&conn->link); ··· 311 301 } 312 302 313 303 /* 304 + * Set the service connection reap timer. 305 + */ 306 + static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet, 307 + unsigned long reap_at) 308 + { 309 + if (rxnet->live) 310 + timer_reduce(&rxnet->service_conn_reap_timer, reap_at); 311 + } 312 + 313 + /* 314 314 * Release a service connection 315 315 */ 316 316 void rxrpc_put_service_conn(struct rxrpc_connection *conn) 317 317 { 318 - struct rxrpc_net *rxnet; 319 318 const void *here = __builtin_return_address(0); 320 319 int n; 321 320 322 321 n = atomic_dec_return(&conn->usage); 323 322 trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); 324 323 ASSERTCMP(n, >=, 0); 325 - if (n == 0) { 326 - rxnet = conn->params.local->rxnet; 327 - rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); 328 - } 324 + if (n == 1) 325 + rxrpc_set_service_reap_timer(conn->params.local->rxnet, 326 + jiffies + rxrpc_connection_expiry); 329 327 } 330 328 331 329 /* ··· 350 332 351 333 _net("DESTROY CONN %d", conn->debug_id); 352 334 335 + del_timer_sync(&conn->timer); 353 336 rxrpc_purge_queue(&conn->rx_queue); 354 337 355 338 conn->security->clear(conn); ··· 370 351 { 371 352 struct rxrpc_connection *conn, *_p; 372 353 struct rxrpc_net *rxnet = 373 - container_of(to_delayed_work(work), 374 - struct rxrpc_net, service_conn_reaper); 375 - unsigned long reap_older_than, earliest, idle_timestamp, now; 354 + container_of(work, struct rxrpc_net, service_conn_reaper); 355 + unsigned long expire_at, earliest, idle_timestamp, now; 376 356 377 357 LIST_HEAD(graveyard); 378 358 379 359 _enter(""); 380 360 381 361 now = jiffies; 382 - reap_older_than = now - rxrpc_connection_expiry * HZ; 383 - earliest = ULONG_MAX; 362 + earliest = now + MAX_JIFFY_OFFSET; 384 363 385 364 write_lock(&rxnet->conn_lock); 386 365 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { ··· 388 371 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 389 372 continue; 390 373 391 - idle_timestamp = READ_ONCE(conn->idle_timestamp); 392 - _debug("reap CONN %d { u=%d,t=%ld }", 393 - conn->debug_id, atomic_read(&conn->usage), 394 - (long)reap_older_than - (long)idle_timestamp); 374 + if (rxnet->live) { 375 + idle_timestamp = READ_ONCE(conn->idle_timestamp); 376 + expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; 377 + if (conn->params.local->service_closed) 378 + expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; 395 379 396 - if (time_after(idle_timestamp, reap_older_than)) { 397 - if (time_before(idle_timestamp, earliest)) 398 - earliest = idle_timestamp; 399 - continue; 380 + _debug("reap CONN %d { u=%d,t=%ld }", 381 + conn->debug_id, atomic_read(&conn->usage), 382 + (long)expire_at - (long)now); 383 + 384 + if (time_before(now, expire_at)) { 385 + if (time_before(expire_at, earliest)) 386 + earliest = expire_at; 387 + continue; 388 + } 400 389 } 401 390 402 391 /* The usage count sits at 1 whilst the object is unused on the ··· 410 387 */ 411 388 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) 412 389 continue; 390 + trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0); 413 391 414 392 if (rxrpc_conn_is_client(conn)) 415 393 BUG(); ··· 421 397 } 422 398 write_unlock(&rxnet->conn_lock); 423 399 424 - if (earliest != ULONG_MAX) { 425 - _debug("reschedule reaper %ld", (long) earliest - now); 400 + if (earliest != now + MAX_JIFFY_OFFSET) { 401 + _debug("reschedule reaper %ld", (long)earliest - (long)now); 426 402 ASSERT(time_after(earliest, now)); 427 - rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 428 - earliest - now); 403 + rxrpc_set_service_reap_timer(rxnet, earliest); 429 404 } 430 405 431 406 while (!list_empty(&graveyard)) { ··· 452 429 453 430 rxrpc_destroy_all_client_connections(rxnet); 454 431 455 - rxrpc_connection_expiry = 0; 456 - cancel_delayed_work(&rxnet->client_conn_reaper); 457 - rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0); 432 + del_timer_sync(&rxnet->service_conn_reap_timer); 433 + rxrpc_queue_work(&rxnet->service_conn_reaper); 458 434 flush_workqueue(rxrpc_workqueue); 459 435 460 436 write_lock(&rxnet->conn_lock);
+70 -4
net/rxrpc/input.c
··· 318 318 static bool rxrpc_receiving_reply(struct rxrpc_call *call) 319 319 { 320 320 struct rxrpc_ack_summary summary = { 0 }; 321 + unsigned long now, timo; 321 322 rxrpc_seq_t top = READ_ONCE(call->tx_top); 322 323 323 324 if (call->ackr_reason) { 324 325 spin_lock_bh(&call->lock); 325 326 call->ackr_reason = 0; 326 - call->resend_at = call->expire_at; 327 - call->ack_at = call->expire_at; 328 327 spin_unlock_bh(&call->lock); 329 - rxrpc_set_timer(call, rxrpc_timer_init_for_reply, 330 - ktime_get_real()); 328 + now = jiffies; 329 + timo = now + MAX_JIFFY_OFFSET; 330 + WRITE_ONCE(call->resend_at, timo); 331 + WRITE_ONCE(call->ack_at, timo); 332 + trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); 331 333 } 332 334 333 335 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) ··· 438 436 state = READ_ONCE(call->state); 439 437 if (state >= RXRPC_CALL_COMPLETE) 440 438 return; 439 + 440 + if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { 441 + unsigned long timo = READ_ONCE(call->next_req_timo); 442 + unsigned long now, expect_req_by; 443 + 444 + if (timo) { 445 + now = jiffies; 446 + expect_req_by = now + timo; 447 + WRITE_ONCE(call->expect_req_by, expect_req_by); 448 + rxrpc_reduce_call_timer(call, expect_req_by, now, 449 + rxrpc_timer_set_for_idle); 450 + } 451 + } 441 452 442 453 /* Received data implicitly ACKs all of the request packets we sent 443 454 * when we're acting as a client. ··· 631 616 } 632 617 633 618 /* 619 + * Process the response to a ping that we sent to find out if we lost an ACK. 620 + * 621 + * If we got back a ping response that indicates a lower tx_top than what we 622 + * had at the time of the ping transmission, we adjudge all the DATA packets 623 + * sent between the response tx_top and the ping-time tx_top to have been lost. 624 + */ 625 + static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call) 626 + { 627 + rxrpc_seq_t top, bottom, seq; 628 + bool resend = false; 629 + 630 + spin_lock_bh(&call->lock); 631 + 632 + bottom = call->tx_hard_ack + 1; 633 + top = call->acks_lost_top; 634 + if (before(bottom, top)) { 635 + for (seq = bottom; before_eq(seq, top); seq++) { 636 + int ix = seq & RXRPC_RXTX_BUFF_MASK; 637 + u8 annotation = call->rxtx_annotations[ix]; 638 + u8 anno_type = annotation & RXRPC_TX_ANNO_MASK; 639 + 640 + if (anno_type != RXRPC_TX_ANNO_UNACK) 641 + continue; 642 + annotation &= ~RXRPC_TX_ANNO_MASK; 643 + annotation |= RXRPC_TX_ANNO_RETRANS; 644 + call->rxtx_annotations[ix] = annotation; 645 + resend = true; 646 + } 647 + } 648 + 649 + spin_unlock_bh(&call->lock); 650 + 651 + if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 652 + rxrpc_queue_call(call); 653 + } 654 + 655 + /* 634 656 * Process a ping response. 635 657 */ 636 658 static void rxrpc_input_ping_response(struct rxrpc_call *call, ··· 681 629 ping_time = call->ping_time; 682 630 smp_rmb(); 683 631 ping_serial = call->ping_serial; 632 + 633 + if (orig_serial == call->acks_lost_ping) 634 + rxrpc_input_check_for_lost_ack(call); 684 635 685 636 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || 686 637 before(orig_serial, ping_serial)) ··· 963 908 struct sk_buff *skb, u16 skew) 964 909 { 965 910 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 911 + unsigned long timo; 966 912 967 913 _enter("%p,%p", call, skb); 968 914 915 + timo = READ_ONCE(call->next_rx_timo); 916 + if (timo) { 917 + unsigned long now = jiffies, expect_rx_by; 918 + 919 + expect_rx_by = jiffies + timo; 920 + WRITE_ONCE(call->expect_rx_by, expect_rx_by); 921 + rxrpc_reduce_call_timer(call, expect_rx_by, now, 922 + rxrpc_timer_set_for_normal); 923 + } 924 + 969 925 switch (sp->hdr.type) { 970 926 case RXRPC_PACKET_TYPE_DATA: 971 927 rxrpc_input_data(call, skb, skew);
+7 -12
net/rxrpc/misc.c
··· 21 21 unsigned int rxrpc_max_backlog __read_mostly = 10; 22 22 23 23 /* 24 - * Maximum lifetime of a call (in mx). 25 - */ 26 - unsigned int rxrpc_max_call_lifetime = 60 * 1000; 27 - 28 - /* 29 24 * How long to wait before scheduling ACK generation after seeing a 30 - * packet with RXRPC_REQUEST_ACK set (in ms). 25 + * packet with RXRPC_REQUEST_ACK set (in jiffies). 31 26 */ 32 - unsigned int rxrpc_requested_ack_delay = 1; 27 + unsigned long rxrpc_requested_ack_delay = 1; 33 28 34 29 /* 35 - * How long to wait before scheduling an ACK with subtype DELAY (in ms). 30 + * How long to wait before scheduling an ACK with subtype DELAY (in jiffies). 36 31 * 37 32 * We use this when we've received new data packets. If those packets aren't 38 33 * all consumed within this time we will send a DELAY ACK if an ACK was not 39 34 * requested to let the sender know it doesn't need to resend. 40 35 */ 41 - unsigned int rxrpc_soft_ack_delay = 1 * 1000; 36 + unsigned long rxrpc_soft_ack_delay = HZ; 42 37 43 38 /* 44 - * How long to wait before scheduling an ACK with subtype IDLE (in ms). 39 + * How long to wait before scheduling an ACK with subtype IDLE (in jiffies). 45 40 * 46 41 * We use this when we've consumed some previously soft-ACK'd packets when 47 42 * further packets aren't immediately received to decide when to send an IDLE 48 43 * ACK let the other end know that it can free up its Tx buffer space. 49 44 */ 50 - unsigned int rxrpc_idle_ack_delay = 0.5 * 1000; 45 + unsigned long rxrpc_idle_ack_delay = HZ / 2; 51 46 52 47 /* 53 48 * Receive window size in packets. This indicates the maximum number of ··· 70 75 /* 71 76 * Time till packet resend (in milliseconds). 72 77 */ 73 - unsigned int rxrpc_resend_timeout = 4 * 1000; 78 + unsigned long rxrpc_resend_timeout = 4 * HZ; 74 79 75 80 const s8 rxrpc_ack_priority[] = { 76 81 [0] = 0,
+29 -4
net/rxrpc/net_ns.c
··· 14 14 15 15 unsigned int rxrpc_net_id; 16 16 17 + static void rxrpc_client_conn_reap_timeout(struct timer_list *timer) 18 + { 19 + struct rxrpc_net *rxnet = 20 + container_of(timer, struct rxrpc_net, client_conn_reap_timer); 21 + 22 + if (rxnet->live) 23 + rxrpc_queue_work(&rxnet->client_conn_reaper); 24 + } 25 + 26 + static void rxrpc_service_conn_reap_timeout(struct timer_list *timer) 27 + { 28 + struct rxrpc_net *rxnet = 29 + container_of(timer, struct rxrpc_net, service_conn_reap_timer); 30 + 31 + if (rxnet->live) 32 + rxrpc_queue_work(&rxnet->service_conn_reaper); 33 + } 34 + 17 35 /* 18 36 * Initialise a per-network namespace record. 19 37 */ ··· 40 22 struct rxrpc_net *rxnet = rxrpc_net(net); 41 23 int ret; 42 24 25 + rxnet->live = true; 43 26 get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); 44 27 rxnet->epoch |= RXRPC_RANDOM_EPOCH; 45 28 ··· 50 31 INIT_LIST_HEAD(&rxnet->conn_proc_list); 51 32 INIT_LIST_HEAD(&rxnet->service_conns); 52 33 rwlock_init(&rxnet->conn_lock); 53 - INIT_DELAYED_WORK(&rxnet->service_conn_reaper, 54 - rxrpc_service_connection_reaper); 34 + INIT_WORK(&rxnet->service_conn_reaper, 35 + rxrpc_service_connection_reaper); 36 + timer_setup(&rxnet->service_conn_reap_timer, 37 + rxrpc_service_conn_reap_timeout, 0); 55 38 56 39 rxnet->nr_client_conns = 0; 57 40 rxnet->nr_active_client_conns = 0; ··· 63 42 INIT_LIST_HEAD(&rxnet->waiting_client_conns); 64 43 INIT_LIST_HEAD(&rxnet->active_client_conns); 65 44 INIT_LIST_HEAD(&rxnet->idle_client_conns); 66 - INIT_DELAYED_WORK(&rxnet->client_conn_reaper, 67 - rxrpc_discard_expired_client_conns); 45 + INIT_WORK(&rxnet->client_conn_reaper, 46 + rxrpc_discard_expired_client_conns); 47 + timer_setup(&rxnet->client_conn_reap_timer, 48 + rxrpc_client_conn_reap_timeout, 0); 68 49 69 50 INIT_LIST_HEAD(&rxnet->local_endpoints); 70 51 mutex_init(&rxnet->local_mutex); ··· 83 60 return 0; 84 61 85 62 err_proc: 63 + rxnet->live = false; 86 64 return ret; 87 65 } 88 66 ··· 94 70 { 95 71 struct rxrpc_net *rxnet = rxrpc_net(net); 96 72 73 + rxnet->live = false; 97 74 rxrpc_destroy_all_calls(rxnet); 98 75 rxrpc_destroy_all_connections(rxnet); 99 76 rxrpc_destroy_all_locals(rxnet);
+41 -2
net/rxrpc/output.c
··· 33 33 }; 34 34 35 35 /* 36 + * Arrange for a keepalive ping a certain time after we last transmitted. This 37 + * lets the far side know we're still interested in this call and helps keep 38 + * the route through any intervening firewall open. 39 + * 40 + * Receiving a response to the ping will prevent the ->expect_rx_by timer from 41 + * expiring. 42 + */ 43 + static void rxrpc_set_keepalive(struct rxrpc_call *call) 44 + { 45 + unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6; 46 + 47 + keepalive_at += now; 48 + WRITE_ONCE(call->keepalive_at, keepalive_at); 49 + rxrpc_reduce_call_timer(call, keepalive_at, now, 50 + rxrpc_timer_set_for_keepalive); 51 + } 52 + 53 + /* 36 54 * Fill out an ACK packet. 37 55 */ 38 56 static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, ··· 113 95 /* 114 96 * Send an ACK call packet. 115 97 */ 116 - int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) 98 + int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, 99 + rxrpc_serial_t *_serial) 117 100 { 118 101 struct rxrpc_connection *conn = NULL; 119 102 struct rxrpc_ack_buffer *pkt; ··· 184 165 ntohl(pkt->ack.firstPacket), 185 166 ntohl(pkt->ack.serial), 186 167 pkt->ack.reason, pkt->ack.nAcks); 168 + if (_serial) 169 + *_serial = serial; 187 170 188 171 if (ping) { 189 172 call->ping_serial = serial; ··· 223 202 call->ackr_seen = top; 224 203 spin_unlock_bh(&call->lock); 225 204 } 205 + 206 + rxrpc_set_keepalive(call); 226 207 } 227 208 228 209 out: ··· 346 323 * ACKs if a DATA packet appears to have been lost. 347 324 */ 348 325 if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && 349 - (retrans || 326 + (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || 327 + retrans || 350 328 call->cong_mode == RXRPC_CALL_SLOW_START || 351 329 (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || 352 330 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ··· 394 370 if (whdr.flags & RXRPC_REQUEST_ACK) { 395 371 call->peer->rtt_last_req = now; 396 372 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); 373 + if (call->peer->rtt_usage > 1) { 374 + unsigned long nowj = jiffies, ack_lost_at; 375 + 376 + ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt); 377 + if (ack_lost_at < 1) 378 + ack_lost_at = 1; 379 + 380 + ack_lost_at += nowj; 381 + WRITE_ONCE(call->ack_lost_at, ack_lost_at); 382 + rxrpc_reduce_call_timer(call, ack_lost_at, nowj, 383 + rxrpc_timer_set_for_lost_ack); 384 + } 397 385 } 398 386 } 387 + 388 + rxrpc_set_keepalive(call); 389 + 399 390 _leave(" = %d [%u]", ret, call->peer->maxdata); 400 391 return ret; 401 392
+7 -5
net/rxrpc/recvmsg.c
··· 144 144 trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); 145 145 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); 146 146 147 + #if 0 // TODO: May want to transmit final ACK under some circumstances anyway 147 148 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { 148 149 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, 149 150 rxrpc_propose_ack_terminal_ack); 150 - rxrpc_send_ack_packet(call, false); 151 + rxrpc_send_ack_packet(call, false, NULL); 151 152 } 153 + #endif 152 154 153 155 write_lock_bh(&call->state_lock); 154 156 ··· 163 161 case RXRPC_CALL_SERVER_RECV_REQUEST: 164 162 call->tx_phase = true; 165 163 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 166 - call->ack_at = call->expire_at; 164 + call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 167 165 write_unlock_bh(&call->state_lock); 168 166 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 169 167 rxrpc_propose_ack_processing_op); ··· 219 217 after_eq(top, call->ackr_seen + 2) || 220 218 (hard_ack == top && after(hard_ack, call->ackr_consumed))) 221 219 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, 222 - true, false, 220 + true, true, 223 221 rxrpc_propose_ack_rotate_rx); 224 - if (call->ackr_reason) 225 - rxrpc_send_ack_packet(call, false); 222 + if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) 223 + rxrpc_send_ack_packet(call, false, NULL); 226 224 } 227 225 } 228 226
+76 -46
net/rxrpc/sendmsg.c
··· 21 21 #include <net/af_rxrpc.h> 22 22 #include "ar-internal.h" 23 23 24 - enum rxrpc_command { 25 - RXRPC_CMD_SEND_DATA, /* send data message */ 26 - RXRPC_CMD_SEND_ABORT, /* request abort generation */ 27 - RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ 28 - RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ 29 - }; 30 - 31 - struct rxrpc_send_params { 32 - s64 tx_total_len; /* Total Tx data length (if send data) */ 33 - unsigned long user_call_ID; /* User's call ID */ 34 - u32 abort_code; /* Abort code to Tx (if abort) */ 35 - enum rxrpc_command command : 8; /* The command to implement */ 36 - bool exclusive; /* Shared or exclusive call */ 37 - bool upgrade; /* If the connection is upgradeable */ 38 - }; 39 - 40 24 /* 41 25 * Wait for space to appear in the Tx queue or a signal to occur. 42 26 */ ··· 158 174 rxrpc_notify_end_tx_t notify_end_tx) 159 175 { 160 176 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 177 + unsigned long now; 161 178 rxrpc_seq_t seq = sp->hdr.seq; 162 179 int ret, ix; 163 180 u8 annotation = RXRPC_TX_ANNO_UNACK; ··· 198 213 break; 199 214 case RXRPC_CALL_SERVER_ACK_REQUEST: 200 215 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 201 - call->ack_at = call->expire_at; 216 + now = jiffies; 217 + WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET); 202 218 if (call->ackr_reason == RXRPC_ACK_DELAY) 203 219 call->ackr_reason = 0; 204 - __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply, 205 - ktime_get_real()); 220 + trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); 206 221 if (!last) 207 222 break; 208 223 /* Fall through */ ··· 224 239 _debug("need instant resend %d", ret); 225 240 rxrpc_instant_resend(call, ix); 226 241 } else { 227 - ktime_t now = ktime_get_real(), resend_at; 242 + unsigned long now = jiffies, resend_at; 228 243 229 - resend_at = ktime_add_ms(now, rxrpc_resend_timeout); 244 + if (call->peer->rtt_usage > 1) 245 + resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2); 246 + else 247 + resend_at = rxrpc_resend_timeout; 248 + if (resend_at < 1) 249 + resend_at = 1; 230 250 231 - if (ktime_before(resend_at, call->resend_at)) { 232 - call->resend_at = resend_at; 233 - rxrpc_set_timer(call, rxrpc_timer_set_for_send, now); 234 - } 251 + resend_at = now + rxrpc_resend_timeout; 252 + WRITE_ONCE(call->resend_at, resend_at); 253 + rxrpc_reduce_call_timer(call, resend_at, now, 254 + rxrpc_timer_set_for_send); 235 255 } 236 256 237 257 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); ··· 285 295 do { 286 296 /* Check to see if there's a ping ACK to reply to. */ 287 297 if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) 288 - rxrpc_send_ack_packet(call, false); 298 + rxrpc_send_ack_packet(call, false, NULL); 289 299 290 300 if (!skb) { 291 301 size_t size, chunk, max, space; ··· 470 480 if (msg->msg_flags & MSG_CMSG_COMPAT) { 471 481 if (len != sizeof(u32)) 472 482 return -EINVAL; 473 - p->user_call_ID = *(u32 *)CMSG_DATA(cmsg); 483 + p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); 474 484 } else { 475 485 if (len != sizeof(unsigned long)) 476 486 return -EINVAL; 477 - p->user_call_ID = *(unsigned long *) 487 + p->call.user_call_ID = *(unsigned long *) 478 488 CMSG_DATA(cmsg); 479 489 } 480 490 got_user_ID = true; ··· 512 522 break; 513 523 514 524 case RXRPC_TX_LENGTH: 515 - if (p->tx_total_len != -1 || len != sizeof(__s64)) 525 + if (p->call.tx_total_len != -1 || len != sizeof(__s64)) 516 526 return -EINVAL; 517 - p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 518 - if (p->tx_total_len < 0) 527 + p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 528 + if (p->call.tx_total_len < 0) 519 529 return -EINVAL; 530 + break; 531 + 532 + case RXRPC_SET_CALL_TIMEOUT: 533 + if (len & 3 || len < 4 || len > 12) 534 + return -EINVAL; 535 + memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); 536 + p->call.nr_timeouts = len / 4; 537 + if (p->call.timeouts.hard > INT_MAX / HZ) 538 + return -ERANGE; 539 + if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) 540 + return -ERANGE; 541 + if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) 542 + return -ERANGE; 520 543 break; 521 544 522 545 default: ··· 539 536 540 537 if (!got_user_ID) 541 538 return -EINVAL; 542 - if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 539 + if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 543 540 return -EINVAL; 544 541 _leave(" = 0"); 545 542 return 0; ··· 579 576 cp.exclusive = rx->exclusive | p->exclusive; 580 577 cp.upgrade = p->upgrade; 581 578 cp.service_id = srx->srx_service; 582 - call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID, 583 - p->tx_total_len, GFP_KERNEL); 579 + call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL); 584 580 /* The socket is now unlocked */ 585 581 586 582 _leave(" = %p\n", call); ··· 596 594 { 597 595 enum rxrpc_call_state state; 598 596 struct rxrpc_call *call; 597 + unsigned long now, j; 599 598 int ret; 600 599 601 600 struct rxrpc_send_params p = { 602 - .tx_total_len = -1, 603 - .user_call_ID = 0, 604 - .abort_code = 0, 605 - .command = RXRPC_CMD_SEND_DATA, 606 - .exclusive = false, 607 - .upgrade = true, 601 + .call.tx_total_len = -1, 602 + .call.user_call_ID = 0, 603 + .call.nr_timeouts = 0, 604 + .abort_code = 0, 605 + .command = RXRPC_CMD_SEND_DATA, 606 + .exclusive = false, 607 + .upgrade = false, 608 608 }; 609 609 610 610 _enter(""); ··· 619 615 ret = -EINVAL; 620 616 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 621 617 goto error_release_sock; 622 - call = rxrpc_accept_call(rx, p.user_call_ID, NULL); 618 + call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); 623 619 /* The socket is now unlocked. */ 624 620 if (IS_ERR(call)) 625 621 return PTR_ERR(call); 626 - rxrpc_put_call(call, rxrpc_call_put); 627 - return 0; 622 + ret = 0; 623 + goto out_put_unlock; 628 624 } 629 625 630 - call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); 626 + call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); 631 627 if (!call) { 632 628 ret = -EBADSLT; 633 629 if (p.command != RXRPC_CMD_SEND_DATA) ··· 657 653 goto error_put; 658 654 } 659 655 660 - if (p.tx_total_len != -1) { 656 + if (p.call.tx_total_len != -1) { 661 657 ret = -EINVAL; 662 658 if (call->tx_total_len != -1 || 663 659 call->tx_pending || 664 660 call->tx_top != 0) 665 661 goto error_put; 666 - call->tx_total_len = p.tx_total_len; 662 + call->tx_total_len = p.call.tx_total_len; 667 663 } 664 + } 665 + 666 + switch (p.call.nr_timeouts) { 667 + case 3: 668 + j = msecs_to_jiffies(p.call.timeouts.normal); 669 + if (p.call.timeouts.normal > 0 && j == 0) 670 + j = 1; 671 + WRITE_ONCE(call->next_rx_timo, j); 672 + /* Fall through */ 673 + case 2: 674 + j = msecs_to_jiffies(p.call.timeouts.idle); 675 + if (p.call.timeouts.idle > 0 && j == 0) 676 + j = 1; 677 + WRITE_ONCE(call->next_req_timo, j); 678 + /* Fall through */ 679 + case 1: 680 + if (p.call.timeouts.hard > 0) { 681 + j = msecs_to_jiffies(p.call.timeouts.hard); 682 + now = jiffies; 683 + j += now; 684 + WRITE_ONCE(call->expect_term_by, j); 685 + rxrpc_reduce_call_timer(call, j, now, 686 + rxrpc_timer_set_for_hard); 687 + } 688 + break; 668 689 } 669 690 670 691 state = READ_ONCE(call->state); ··· 718 689 ret = rxrpc_send_data(rx, call, msg, len, NULL); 719 690 } 720 691 692 + out_put_unlock: 721 693 mutex_unlock(&call->user_mutex); 722 694 error_put: 723 695 rxrpc_put_call(call, rxrpc_call_put);
+29 -31
net/rxrpc/sysctl.c
··· 21 21 static const unsigned int thirtytwo = 32; 22 22 static const unsigned int n_65535 = 65535; 23 23 static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; 24 + static const unsigned long one_jiffy = 1; 25 + static const unsigned long max_jiffies = MAX_JIFFY_OFFSET; 24 26 25 27 /* 26 28 * RxRPC operating parameters. ··· 31 29 * information on the individual parameters. 32 30 */ 33 31 static struct ctl_table rxrpc_sysctl_table[] = { 34 - /* Values measured in milliseconds */ 32 + /* Values measured in milliseconds but used in jiffies */ 35 33 { 36 34 .procname = "req_ack_delay", 37 35 .data = &rxrpc_requested_ack_delay, 38 - .maxlen = sizeof(unsigned int), 36 + .maxlen = sizeof(unsigned long), 39 37 .mode = 0644, 40 - .proc_handler = proc_dointvec, 41 - .extra1 = (void *)&zero, 38 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 39 + .extra1 = (void *)&one_jiffy, 40 + .extra2 = (void *)&max_jiffies, 42 41 }, 43 42 { 44 43 .procname = "soft_ack_delay", 45 44 .data = &rxrpc_soft_ack_delay, 46 - .maxlen = sizeof(unsigned int), 45 + .maxlen = sizeof(unsigned long), 47 46 .mode = 0644, 48 - .proc_handler = proc_dointvec, 49 - .extra1 = (void *)&one, 47 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 48 + .extra1 = (void *)&one_jiffy, 49 + .extra2 = (void *)&max_jiffies, 50 50 }, 51 51 { 52 52 .procname = "idle_ack_delay", 53 53 .data = &rxrpc_idle_ack_delay, 54 - .maxlen = sizeof(unsigned int), 54 + .maxlen = sizeof(unsigned long), 55 55 .mode = 0644, 56 - .proc_handler = proc_dointvec, 57 - .extra1 = (void *)&one, 58 - }, 59 - { 60 - .procname = "resend_timeout", 61 - .data = &rxrpc_resend_timeout, 62 - .maxlen = sizeof(unsigned int), 63 - .mode = 0644, 64 - .proc_handler = proc_dointvec, 65 - .extra1 = (void *)&one, 56 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 57 + .extra1 = (void *)&one_jiffy, 58 + .extra2 = (void *)&max_jiffies, 66 59 }, 67 60 { 68 61 .procname = "idle_conn_expiry", 69 62 .data = &rxrpc_conn_idle_client_expiry, 70 - .maxlen = sizeof(unsigned int), 63 + .maxlen = sizeof(unsigned long), 71 64 .mode = 0644, 72 - .proc_handler = proc_dointvec_ms_jiffies, 73 - .extra1 = (void *)&one, 65 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 66 + .extra1 = (void *)&one_jiffy, 67 + .extra2 = (void *)&max_jiffies, 74 68 }, 75 69 { 76 70 .procname = "idle_conn_fast_expiry", 77 71 .data = &rxrpc_conn_idle_client_fast_expiry, 78 - .maxlen = sizeof(unsigned int), 72 + .maxlen = sizeof(unsigned long), 79 73 .mode = 0644, 80 - .proc_handler = proc_dointvec_ms_jiffies, 81 - .extra1 = (void *)&one, 74 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 75 + .extra1 = (void *)&one_jiffy, 76 + .extra2 = (void *)&max_jiffies, 82 77 }, 83 - 84 - /* Values measured in seconds but used in jiffies */ 85 78 { 86 - .procname = "max_call_lifetime", 87 - .data = &rxrpc_max_call_lifetime, 88 - .maxlen = sizeof(unsigned int), 79 + .procname = "resend_timeout", 80 + .data = &rxrpc_resend_timeout, 81 + .maxlen = sizeof(unsigned long), 89 82 .mode = 0644, 90 - .proc_handler = proc_dointvec, 91 - .extra1 = (void *)&one, 83 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 84 + .extra1 = (void *)&one_jiffy, 85 + .extra2 = (void *)&max_jiffies, 92 86 }, 93 87 94 88 /* Non-time values */
+12 -5
net/sched/cls_api.c
··· 336 336 struct tcf_chain *chain, *tmp; 337 337 338 338 rtnl_lock(); 339 - /* Only chain 0 should be still here. */ 339 + 340 + /* At this point, all the chains should have refcnt == 1. */ 340 341 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 341 342 tcf_chain_put(chain); 342 343 rtnl_unlock(); ··· 345 344 } 346 345 347 346 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 348 - * actions should be all removed after flushing. However, filters are now 349 - * destroyed in tc filter workqueue with RTNL lock, they can not race here. 347 + * actions should be all removed after flushing. 350 348 */ 351 349 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 352 350 struct tcf_block_ext_info *ei) 353 351 { 354 - struct tcf_chain *chain, *tmp; 352 + struct tcf_chain *chain; 355 353 356 - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 354 + /* Hold a refcnt for all chains, except 0, so that they don't disappear 355 + * while we are iterating. 356 + */ 357 + list_for_each_entry(chain, &block->chain_list, list) 358 + if (chain->index) 359 + tcf_chain_hold(chain); 360 + 361 + list_for_each_entry(chain, &block->chain_list, list) 357 362 tcf_chain_flush(chain); 358 363 359 364 tcf_block_offload_unbind(block, q, ei);
+13 -10
net/sched/cls_bpf.c
··· 258 258 return 0; 259 259 } 260 260 261 - static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 261 + static void cls_bpf_free_parms(struct cls_bpf_prog *prog) 262 262 { 263 - tcf_exts_destroy(&prog->exts); 264 - tcf_exts_put_net(&prog->exts); 265 - 266 263 if (cls_bpf_is_ebpf(prog)) 267 264 bpf_prog_put(prog->filter); 268 265 else ··· 267 270 268 271 kfree(prog->bpf_name); 269 272 kfree(prog->bpf_ops); 273 + } 274 + 275 + static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 276 + { 277 + tcf_exts_destroy(&prog->exts); 278 + tcf_exts_put_net(&prog->exts); 279 + 280 + cls_bpf_free_parms(prog); 270 281 kfree(prog); 271 282 } 272 283 ··· 519 514 goto errout_idr; 520 515 521 516 ret = cls_bpf_offload(tp, prog, oldprog); 522 - if (ret) { 523 - if (!oldprog) 524 - idr_remove_ext(&head->handle_idr, prog->handle); 525 - __cls_bpf_delete_prog(prog); 526 - return ret; 527 - } 517 + if (ret) 518 + goto errout_parms; 528 519 529 520 if (!tc_in_hw(prog->gen_flags)) 530 521 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; ··· 538 537 *arg = prog; 539 538 return 0; 540 539 540 + errout_parms: 541 + cls_bpf_free_parms(prog); 541 542 errout_idr: 542 543 if (!oldprog) 543 544 idr_remove_ext(&head->handle_idr, prog->handle);
+8 -1
net/sched/sch_cbq.c
··· 1158 1158 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) 1159 1159 return -EINVAL; 1160 1160 1161 + err = tcf_block_get(&q->link.block, &q->link.filter_list, sch); 1162 + if (err) 1163 + goto put_rtab; 1164 + 1161 1165 err = qdisc_class_hash_init(&q->clhash); 1162 1166 if (err < 0) 1163 - goto put_rtab; 1167 + goto put_block; 1164 1168 1165 1169 q->link.sibling = &q->link; 1166 1170 q->link.common.classid = sch->handle; ··· 1197 1193 1198 1194 cbq_addprio(q, &q->link); 1199 1195 return 0; 1196 + 1197 + put_block: 1198 + tcf_block_put(q->link.block); 1200 1199 1201 1200 put_rtab: 1202 1201 qdisc_put_rtab(q->link.R_tab);
+1
net/sched/sch_sfq.c
··· 724 724 int i; 725 725 int err; 726 726 727 + q->sch = sch; 727 728 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE); 728 729 729 730 err = tcf_block_get(&q->block, &q->filter_list, sch);
+1
net/sctp/protocol.c
··· 1499 1499 INIT_LIST_HEAD(&sctp_address_families); 1500 1500 sctp_v4_pf_init(); 1501 1501 sctp_v6_pf_init(); 1502 + sctp_sched_ops_init(); 1502 1503 1503 1504 status = register_pernet_subsys(&sctp_defaults_ops); 1504 1505 if (status)
+3 -3
net/sctp/socket.c
··· 188 188 list_for_each_entry(chunk, &t->transmitted, transmitted_list) 189 189 cb(chunk); 190 190 191 - list_for_each_entry(chunk, &q->retransmit, list) 191 + list_for_each_entry(chunk, &q->retransmit, transmitted_list) 192 192 cb(chunk); 193 193 194 - list_for_each_entry(chunk, &q->sacked, list) 194 + list_for_each_entry(chunk, &q->sacked, transmitted_list) 195 195 cb(chunk); 196 196 197 - list_for_each_entry(chunk, &q->abandoned, list) 197 + list_for_each_entry(chunk, &q->abandoned, transmitted_list) 198 198 cb(chunk); 199 199 200 200 list_for_each_entry(chunk, &q->out_chunk_list, list)
+66 -13
net/sctp/stream.c
··· 64 64 */ 65 65 66 66 /* Mark as failed send. */ 67 - sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM); 67 + sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM); 68 68 if (asoc->peer.prsctp_capable && 69 69 SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags)) 70 70 asoc->sent_cnt_removable--; ··· 254 254 return retval; 255 255 } 256 256 257 + static bool sctp_stream_outq_is_empty(struct sctp_stream *stream, 258 + __u16 str_nums, __be16 *str_list) 259 + { 260 + struct sctp_association *asoc; 261 + __u16 i; 262 + 263 + asoc = container_of(stream, struct sctp_association, stream); 264 + if (!asoc->outqueue.out_qlen) 265 + return true; 266 + 267 + if (!str_nums) 268 + return false; 269 + 270 + for (i = 0; i < str_nums; i++) { 271 + __u16 sid = ntohs(str_list[i]); 272 + 273 + if (stream->out[sid].ext && 274 + !list_empty(&stream->out[sid].ext->outq)) 275 + return false; 276 + } 277 + 278 + return true; 279 + } 280 + 257 281 int sctp_send_reset_streams(struct sctp_association *asoc, 258 282 struct sctp_reset_streams *params) 259 283 { ··· 341 317 for (i = 0; i < str_nums; i++) 342 318 nstr_list[i] = htons(str_list[i]); 343 319 320 + if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { 321 + retval = -EAGAIN; 322 + goto out; 323 + } 324 + 344 325 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in); 345 326 346 327 kfree(nstr_list); ··· 405 376 406 377 if (asoc->strreset_outstanding) 407 378 return -EINPROGRESS; 379 + 380 + if (!sctp_outq_is_empty(&asoc->outqueue)) 381 + return -EAGAIN; 408 382 409 383 chunk = sctp_make_strreset_tsnreq(asoc); 410 384 if (!chunk) ··· 595 563 flags = SCTP_STREAM_RESET_INCOMING_SSN; 596 564 } 597 565 598 - nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2; 566 + nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); 599 567 if (nums) { 600 568 str_p = outreq->list_of_streams; 601 569 for (i = 0; i < nums; i++) { ··· 659 627 goto out; 660 628 } 661 629 662 - nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2; 630 + nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16); 663 631 str_p = inreq->list_of_streams; 664 632 for (i = 0; i < nums; i++) { 665 633 if (ntohs(str_p[i]) >= stream->outcnt) { 666 634 result = SCTP_STRRESET_ERR_WRONG_SSN; 667 635 goto out; 668 636 } 637 + } 638 + 639 + if (!sctp_stream_outq_is_empty(stream, nums, str_p)) { 640 + result = SCTP_STRRESET_IN_PROGRESS; 641 + asoc->strreset_inseq--; 642 + goto err; 669 643 } 670 644 671 645 chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0); ··· 725 687 i = asoc->strreset_inseq - request_seq - 1; 726 688 result = asoc->strreset_result[i]; 727 689 if (result == SCTP_STRRESET_PERFORMED) { 728 - next_tsn = asoc->next_tsn; 690 + next_tsn = asoc->ctsn_ack_point + 1; 729 691 init_tsn = 730 692 sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1; 731 693 } 732 694 goto err; 733 695 } 696 + 697 + if (!sctp_outq_is_empty(&asoc->outqueue)) { 698 + result = SCTP_STRRESET_IN_PROGRESS; 699 + goto err; 700 + } 701 + 734 702 asoc->strreset_inseq++; 735 703 736 704 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ)) ··· 747 703 goto out; 748 704 } 749 705 750 - /* G3: The same processing as though a SACK chunk with no gap report 751 - * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were 752 - * received MUST be performed. 706 + /* G4: The same processing as though a FWD-TSN chunk (as defined in 707 + * [RFC3758]) with all streams affected and a new cumulative TSN 708 + * ACK of the Receiver's Next TSN minus 1 were received MUST be 709 + * performed. 753 710 */ 754 711 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); 755 712 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen); ··· 765 720 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 766 721 init_tsn, GFP_ATOMIC); 767 722 768 - /* G4: The same processing as though a FWD-TSN chunk (as defined in 769 - * [RFC3758]) with all streams affected and a new cumulative TSN 770 - * ACK of the Receiver's Next TSN minus 1 were received MUST be 771 - * performed. 723 + /* G3: The same processing as though a SACK chunk with no gap report 724 + * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were 725 + * received MUST be performed. 772 726 */ 773 727 sctp_outq_free(&asoc->outqueue); 774 728 ··· 971 927 972 928 outreq = (struct sctp_strreset_outreq *)req; 973 929 str_p = outreq->list_of_streams; 974 - nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2; 930 + nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 931 + sizeof(__u16); 975 932 976 933 if (result == SCTP_STRRESET_PERFORMED) { 977 934 if (nums) { ··· 1001 956 1002 957 inreq = (struct sctp_strreset_inreq *)req; 1003 958 str_p = inreq->list_of_streams; 1004 - nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2; 959 + nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 960 + sizeof(__u16); 1005 961 1006 962 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, 1007 963 nums, str_p, GFP_ATOMIC); ··· 1021 975 if (result == SCTP_STRRESET_PERFORMED) { 1022 976 __u32 mtsn = sctp_tsnmap_get_max_tsn_seen( 1023 977 &asoc->peer.tsn_map); 978 + LIST_HEAD(temp); 1024 979 1025 980 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn); 1026 981 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); ··· 1030 983 SCTP_TSN_MAP_INITIAL, 1031 984 stsn, GFP_ATOMIC); 1032 985 986 + /* Clean up sacked and abandoned queues only. As the 987 + * out_chunk_list may not be empty, splice it to temp, 988 + * then get it back after sctp_outq_free is done. 989 + */ 990 + list_splice_init(&asoc->outqueue.out_chunk_list, &temp); 1033 991 sctp_outq_free(&asoc->outqueue); 992 + list_splice_init(&temp, &asoc->outqueue.out_chunk_list); 1034 993 1035 994 asoc->next_tsn = rtsn; 1036 995 asoc->ctsn_ack_point = asoc->next_tsn - 1;
+18 -7
net/sctp/stream_sched.c
··· 119 119 .unsched_all = sctp_sched_fcfs_unsched_all, 120 120 }; 121 121 122 + static void sctp_sched_ops_fcfs_init(void) 123 + { 124 + sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs); 125 + } 126 + 122 127 /* API to other parts of the stack */ 123 128 124 - extern struct sctp_sched_ops sctp_sched_prio; 125 - extern struct sctp_sched_ops sctp_sched_rr; 129 + static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1]; 126 130 127 - static struct sctp_sched_ops *sctp_sched_ops[] = { 128 - &sctp_sched_fcfs, 129 - &sctp_sched_prio, 130 - &sctp_sched_rr, 131 - }; 131 + void sctp_sched_ops_register(enum sctp_sched_type sched, 132 + struct sctp_sched_ops *sched_ops) 133 + { 134 + sctp_sched_ops[sched] = sched_ops; 135 + } 136 + 137 + void sctp_sched_ops_init(void) 138 + { 139 + sctp_sched_ops_fcfs_init(); 140 + sctp_sched_ops_prio_init(); 141 + sctp_sched_ops_rr_init(); 142 + } 132 143 133 144 int sctp_sched_set_sched(struct sctp_association *asoc, 134 145 enum sctp_sched_type sched)
+6 -1
net/sctp/stream_sched_prio.c
··· 333 333 sctp_sched_prio_unsched(soute); 334 334 } 335 335 336 - struct sctp_sched_ops sctp_sched_prio = { 336 + static struct sctp_sched_ops sctp_sched_prio = { 337 337 .set = sctp_sched_prio_set, 338 338 .get = sctp_sched_prio_get, 339 339 .init = sctp_sched_prio_init, ··· 345 345 .sched_all = sctp_sched_prio_sched_all, 346 346 .unsched_all = sctp_sched_prio_unsched_all, 347 347 }; 348 + 349 + void sctp_sched_ops_prio_init(void) 350 + { 351 + sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio); 352 + }
+6 -1
net/sctp/stream_sched_rr.c
··· 187 187 sctp_sched_rr_unsched(stream, soute); 188 188 } 189 189 190 - struct sctp_sched_ops sctp_sched_rr = { 190 + static struct sctp_sched_ops sctp_sched_rr = { 191 191 .set = sctp_sched_rr_set, 192 192 .get = sctp_sched_rr_get, 193 193 .init = sctp_sched_rr_init, ··· 199 199 .sched_all = sctp_sched_rr_sched_all, 200 200 .unsched_all = sctp_sched_rr_unsched_all, 201 201 }; 202 + 203 + void sctp_sched_ops_rr_init(void) 204 + { 205 + sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr); 206 + }
+1 -1
net/tipc/group.c
··· 497 497 while ((skb = skb_peek(defq))) { 498 498 hdr = buf_msg(skb); 499 499 mtyp = msg_type(hdr); 500 + blks = msg_blocks(hdr); 500 501 deliver = true; 501 502 ack = false; 502 503 update = false; ··· 547 546 if (!update) 548 547 continue; 549 548 550 - blks = msg_blocks(hdr); 551 549 tipc_group_update_rcv_win(grp, blks, node, port, xmitq); 552 550 } 553 551 return;
+9 -5
net/vmw_vsock/vmci_transport.c
··· 797 797 798 798 /* We should not be sending anymore since the peer won't be 799 799 * there to receive, but we can still receive if there is data 800 - * left in our consume queue. 800 + * left in our consume queue. If the local endpoint is a host, 801 + * we can't call vsock_stream_has_data, since that may block, 802 + * but a host endpoint can't read data once the VM has 803 + * detached, so there is no available data in that case. 801 804 */ 802 - if (vsock_stream_has_data(vsk) <= 0) { 803 - sk->sk_state = TCP_CLOSE; 804 - 805 + if (vsk->local_addr.svm_cid == VMADDR_CID_HOST || 806 + vsock_stream_has_data(vsk) <= 0) { 805 807 if (sk->sk_state == TCP_SYN_SENT) { 806 808 /* The peer may detach from a queue pair while 807 809 * we are still in the connecting state, i.e., ··· 813 811 * event like a reset. 814 812 */ 815 813 814 + sk->sk_state = TCP_CLOSE; 816 815 sk->sk_err = ECONNRESET; 817 816 sk->sk_error_report(sk); 818 817 return; 819 818 } 819 + sk->sk_state = TCP_CLOSE; 820 820 } 821 821 sk->sk_state_change(sk); 822 822 } ··· 2148 2144 2149 2145 MODULE_AUTHOR("VMware, Inc."); 2150 2146 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2151 - MODULE_VERSION("1.0.4.0-k"); 2147 + MODULE_VERSION("1.0.5.0-k"); 2152 2148 MODULE_LICENSE("GPL v2"); 2153 2149 MODULE_ALIAS("vmware_vsock"); 2154 2150 MODULE_ALIAS_NETPROTO(PF_VSOCK);
+7
net/wireless/Kconfig
··· 20 20 tristate "cfg80211 - wireless configuration API" 21 21 depends on RFKILL || !RFKILL 22 22 select FW_LOADER 23 + # may need to update this when certificates are changed and are 24 + # using a different algorithm, though right now they shouldn't 25 + # (this is here rather than below to allow it to be a module) 26 + select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS 23 27 ---help--- 24 28 cfg80211 is the Linux wireless LAN (802.11) configuration API. 25 29 Enable this if you have a wireless device. ··· 116 112 If selected, point to a directory with DER-encoded X.509 117 113 certificates like in the kernel sources (net/wireless/certs/) 118 114 that shall be accepted for a signed regulatory database. 115 + 116 + Note that you need to also select the correct CRYPTO_<hash> modules 117 + for your certificates, and if cfg80211 is built-in they also must be. 119 118 120 119 config CFG80211_REG_CELLULAR_HINTS 121 120 bool "cfg80211 regulatory support for cellular base station hints"