Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/net/ethernet/cadence/macb_main.c
5cebb40bc955 ("net: macb: Fix PTP one step sync support")
138badbc21a0 ("net: macb: use NAPI for TX completion path")
https://lore.kernel.org/all/20220523111021.31489367@canb.auug.org.au/

net/smc/af_smc.c
75c1edf23b95 ("net/smc: postpone sk_refcnt increment in connect()")
3aba103006bc ("net/smc: align the connect behaviour with TCP")
https://lore.kernel.org/all/20220524114408.4bf1af38@canb.auug.org.au/

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+192 -91
+9
Documentation/admin-guide/sysctl/net.rst
··· 382 382 If set to 1 (default), hash rethink is performed on listening socket. 383 383 If set to 0, hash rethink is not performed. 384 384 385 + gro_normal_batch 386 + ---------------- 387 + 388 + Maximum number of the segments to batch up on output of GRO. When a packet 389 + exits GRO, either as a coalesced superframe or as an original packet which 390 + GRO has decided not to coalesce, it is placed on a per-NAPI list. This 391 + list is then passed to the stack when the number of segments reaches the 392 + gro_normal_batch limit. 393 + 385 394 2. /proc/sys/net/unix - Parameters for Unix domain sockets 386 395 ---------------------------------------------------------- 387 396
+6 -5
drivers/net/amt.c
··· 943 943 if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT) 944 944 goto out; 945 945 946 - if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) { 946 + if (amt->req_cnt > AMT_MAX_REQ_COUNT) { 947 947 netdev_dbg(amt->dev, "Gateway is not ready"); 948 948 amt->qi = AMT_INIT_REQ_TIMEOUT; 949 949 amt->ready4 = false; ··· 951 951 amt->remote_ip = 0; 952 952 __amt_update_gw_status(amt, AMT_STATUS_INIT, false); 953 953 amt->req_cnt = 0; 954 + goto out; 954 955 } 955 956 spin_unlock_bh(&amt->lock); 956 957 957 958 amt_send_request(amt, false); 958 959 amt_send_request(amt, true); 959 - amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); 960 960 spin_lock_bh(&amt->lock); 961 + __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); 962 + amt->req_cnt++; 961 963 out: 962 964 exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); 963 965 mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); ··· 2698 2696 err = true; 2699 2697 goto drop; 2700 2698 } 2701 - if (amt_advertisement_handler(amt, skb)) 2702 - amt->dev->stats.rx_dropped++; 2703 - goto out; 2699 + err = amt_advertisement_handler(amt, skb); 2700 + break; 2704 2701 case AMT_MSG_MULTICAST_DATA: 2705 2702 if (iph->saddr != amt->remote_ip) { 2706 2703 netdev_dbg(amt->dev, "Invalid Relay IP\n");
+12 -3
drivers/net/bonding/bond_main.c
··· 5591 5591 const struct ethtool_ops *ops; 5592 5592 struct net_device *real_dev; 5593 5593 struct phy_device *phydev; 5594 + int ret = 0; 5594 5595 5596 + rcu_read_lock(); 5595 5597 real_dev = bond_option_active_slave_get_rcu(bond); 5598 + dev_hold(real_dev); 5599 + rcu_read_unlock(); 5600 + 5596 5601 if (real_dev) { 5597 5602 ops = real_dev->ethtool_ops; 5598 5603 phydev = real_dev->phydev; 5599 5604 5600 5605 if (phy_has_tsinfo(phydev)) { 5601 - return phy_ts_info(phydev, info); 5606 + ret = phy_ts_info(phydev, info); 5607 + goto out; 5602 5608 } else if (ops->get_ts_info) { 5603 - return ops->get_ts_info(real_dev, info); 5609 + ret = ops->get_ts_info(real_dev, info); 5610 + goto out; 5604 5611 } 5605 5612 } 5606 5613 ··· 5615 5608 SOF_TIMESTAMPING_SOFTWARE; 5616 5609 info->phc_index = -1; 5617 5610 5618 - return 0; 5611 + out: 5612 + dev_put(real_dev); 5613 + return ret; 5619 5614 } 5620 5615 5621 5616 static const struct ethtool_ops bond_ethtool_ops = {
+2 -1
drivers/net/dsa/Kconfig
··· 72 72 73 73 config NET_DSA_SMSC_LAN9303 74 74 tristate 75 - depends on VLAN_8021Q || VLAN_8021Q=n 76 75 select NET_DSA_TAG_LAN9303 77 76 select REGMAP 78 77 help ··· 81 82 config NET_DSA_SMSC_LAN9303_I2C 82 83 tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode" 83 84 depends on I2C 85 + depends on VLAN_8021Q || VLAN_8021Q=n 84 86 select NET_DSA_SMSC_LAN9303 85 87 select REGMAP_I2C 86 88 help ··· 91 91 config NET_DSA_SMSC_LAN9303_MDIO 92 92 tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode" 93 93 select NET_DSA_SMSC_LAN9303 94 + depends on VLAN_8021Q || VLAN_8021Q=n 94 95 help 95 96 Enable access functions if the SMSC/Microchip LAN9303 is configured 96 97 for MDIO managed mode.
+36 -4
drivers/net/ethernet/cadence/macb_main.c
··· 36 36 #include <linux/iopoll.h> 37 37 #include <linux/phy/phy.h> 38 38 #include <linux/pm_runtime.h> 39 + #include <linux/ptp_classify.h> 39 40 #include <linux/reset.h> 40 41 #include "macb.h" 41 42 ··· 1123 1122 napi_enable(&queue->napi_tx); 1124 1123 } 1125 1124 1125 + static bool ptp_one_step_sync(struct sk_buff *skb) 1126 + { 1127 + struct ptp_header *hdr; 1128 + unsigned int ptp_class; 1129 + u8 msgtype; 1130 + 1131 + /* No need to parse packet if PTP TS is not involved */ 1132 + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 1133 + goto not_oss; 1134 + 1135 + /* Identify and return whether PTP one step sync is being processed */ 1136 + ptp_class = ptp_classify_raw(skb); 1137 + if (ptp_class == PTP_CLASS_NONE) 1138 + goto not_oss; 1139 + 1140 + hdr = ptp_parse_header(skb, ptp_class); 1141 + if (!hdr) 1142 + goto not_oss; 1143 + 1144 + if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP) 1145 + goto not_oss; 1146 + 1147 + msgtype = ptp_get_msgtype(hdr, ptp_class); 1148 + if (msgtype == PTP_MSGTYPE_SYNC) 1149 + return true; 1150 + 1151 + not_oss: 1152 + return false; 1153 + } 1154 + 1126 1155 static int macb_tx_complete(struct macb_queue *queue, int budget) 1127 1156 { 1128 1157 struct macb *bp = queue->bp; ··· 1189 1158 1190 1159 /* First, update TX stats if needed */ 1191 1160 if (skb) { 1192 - if (unlikely(skb_shinfo(skb)->tx_flags & 1193 - SKBTX_HW_TSTAMP) && 1161 + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 1162 + !ptp_one_step_sync(skb) && 1194 1163 gem_ptp_do_txstamp(queue, skb, desc) == 0) { 1195 1164 /* skb now belongs to timestamp buffer 1196 1165 * and will be removed later ··· 2094 2063 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 2095 2064 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 2096 2065 if ((bp->dev->features & NETIF_F_HW_CSUM) && 2097 - skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) 2066 + skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl && 2067 + !ptp_one_step_sync(skb)) 2098 2068 ctrl |= MACB_BIT(TX_NOCRC); 2099 2069 } else 2100 2070 /* Only set MSS/MFS on payload descriptors ··· 2193 2161 2194 2162 if (!(ndev->features & NETIF_F_HW_CSUM) || 2195 2163 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || 2196 - skb_shinfo(*skb)->gso_size) /* Not available for GSO */ 2164 + skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb)) 2197 2165 return 0; 2198 2166 2199 2167 if (padlen <= 0) {
+3 -1
drivers/net/ethernet/cadence/macb_ptp.c
··· 470 470 case HWTSTAMP_TX_ONESTEP_SYNC: 471 471 if (gem_ptp_set_one_step_sync(bp, 1) != 0) 472 472 return -ERANGE; 473 - fallthrough; 473 + tx_bd_control = TSTAMP_ALL_FRAMES; 474 + break; 474 475 case HWTSTAMP_TX_ON: 476 + gem_ptp_set_one_step_sync(bp, 0); 475 477 tx_bd_control = TSTAMP_ALL_FRAMES; 476 478 break; 477 479 default:
+7 -5
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
··· 1097 1097 u32 fd_len = dpaa2_fd_get_len(fd); 1098 1098 struct dpaa2_sg_entry *sgt; 1099 1099 int should_free_skb = 1; 1100 + void *tso_hdr; 1100 1101 int i; 1101 1102 1102 1103 fd_addr = dpaa2_fd_get_addr(fd); ··· 1136 1135 sgt = (struct dpaa2_sg_entry *)(buffer_start + 1137 1136 priv->tx_data_offset); 1138 1137 1138 + /* Unmap the SGT buffer */ 1139 + dma_unmap_single(dev, fd_addr, swa->tso.sgt_size, 1140 + DMA_BIDIRECTIONAL); 1141 + 1139 1142 /* Unmap and free the header */ 1143 + tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)); 1140 1144 dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE, 1141 1145 DMA_TO_DEVICE); 1142 - kfree(dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt))); 1146 + kfree(tso_hdr); 1143 1147 1144 1148 /* Unmap the other SG entries for the data */ 1145 1149 for (i = 1; i < swa->tso.num_sg; i++) 1146 1150 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]), 1147 1151 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE); 1148 - 1149 - /* Unmap the SGT buffer */ 1150 - dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, 1151 - DMA_BIDIRECTIONAL); 1152 1152 1153 1153 if (!swa->tso.is_last_fd) 1154 1154 should_free_skb = 0;
+5 -3
drivers/net/ethernet/freescale/fec_main.c
··· 3876 3876 mutex_init(&fep->ptp_clk_mutex); 3877 3877 3878 3878 /* clk_ref is optional, depends on board */ 3879 - fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); 3880 - if (IS_ERR(fep->clk_ref)) 3881 - fep->clk_ref = NULL; 3879 + fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); 3880 + if (IS_ERR(fep->clk_ref)) { 3881 + ret = PTR_ERR(fep->clk_ref); 3882 + goto failed_clk; 3883 + } 3882 3884 fep->clk_ref_rate = clk_get_rate(fep->clk_ref); 3883 3885 3884 3886 /* clk_2x_txclk is optional, depends on board */
+1 -1
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
··· 386 386 return -ENOMEM; 387 387 388 388 wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages, 389 - sizeof(wq->prod_idx), GFP_KERNEL); 389 + sizeof(*wq->shadow_idx), GFP_KERNEL); 390 390 if (!wq->shadow_idx) 391 391 goto err_shadow_idx; 392 392
+6 -7
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
··· 1084 1084 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; 1085 1085 struct tc_cls_u32_offload cls_u32 = { }; 1086 1086 struct stmmac_packet_attrs attr = { }; 1087 - struct tc_action **actions, *act; 1087 + struct tc_action **actions; 1088 1088 struct tc_u32_sel *sel; 1089 + struct tcf_gact *gact; 1089 1090 struct tcf_exts *exts; 1090 1091 int ret, i, nk = 1; 1091 1092 ··· 1111 1110 goto cleanup_exts; 1112 1111 } 1113 1112 1114 - act = kcalloc(nk, sizeof(*act), GFP_KERNEL); 1115 - if (!act) { 1113 + gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL); 1114 + if (!gact) { 1116 1115 ret = -ENOMEM; 1117 1116 goto cleanup_actions; 1118 1117 } ··· 1127 1126 exts->nr_actions = nk; 1128 1127 exts->actions = actions; 1129 1128 for (i = 0; i < nk; i++) { 1130 - struct tcf_gact *gact = to_gact(&act[i]); 1131 - 1132 - actions[i] = &act[i]; 1129 + actions[i] = (struct tc_action *)&gact[i]; 1133 1130 gact->tcf_action = TC_ACT_SHOT; 1134 1131 } 1135 1132 ··· 1151 1152 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1152 1153 1153 1154 cleanup_act: 1154 - kfree(act); 1155 + kfree(gact); 1155 1156 cleanup_actions: 1156 1157 kfree(actions); 1157 1158 cleanup_exts:
+4 -1
drivers/net/hyperv/netvsc_drv.c
··· 2643 2643 2644 2644 /* Save the current config info */ 2645 2645 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); 2646 - 2646 + if (!ndev_ctx->saved_netvsc_dev_info) { 2647 + ret = -ENOMEM; 2648 + goto out; 2649 + } 2647 2650 ret = netvsc_detach(net, nvdev); 2648 2651 out: 2649 2652 rtnl_unlock();
+14 -3
drivers/nfc/st21nfca/se.c
··· 241 241 } 242 242 EXPORT_SYMBOL(st21nfca_hci_se_io); 243 243 244 - static void st21nfca_se_wt_timeout(struct timer_list *t) 244 + static void st21nfca_se_wt_work(struct work_struct *work) 245 245 { 246 246 /* 247 247 * No answer from the secure element ··· 254 254 */ 255 255 /* hardware reset managed through VCC_UICC_OUT power supply */ 256 256 u8 param = 0x01; 257 - struct st21nfca_hci_info *info = from_timer(info, t, 258 - se_info.bwi_timer); 257 + struct st21nfca_hci_info *info = container_of(work, 258 + struct st21nfca_hci_info, 259 + se_info.timeout_work); 259 260 260 261 info->se_info.bwi_active = false; 261 262 ··· 270 269 ST21NFCA_EVT_SE_HARD_RESET, &param, 1); 271 270 } 272 271 info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); 272 + } 273 + 274 + static void st21nfca_se_wt_timeout(struct timer_list *t) 275 + { 276 + struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer); 277 + 278 + schedule_work(&info->se_info.timeout_work); 273 279 } 274 280 275 281 static void st21nfca_se_activation_timeout(struct timer_list *t) ··· 368 360 switch (event) { 369 361 case ST21NFCA_EVT_TRANSMIT_DATA: 370 362 del_timer_sync(&info->se_info.bwi_timer); 363 + cancel_work_sync(&info->se_info.timeout_work); 371 364 info->se_info.bwi_active = false; 372 365 r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, 373 366 ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0); ··· 398 389 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); 399 390 400 391 init_completion(&info->se_info.req_completion); 392 + INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work); 401 393 /* initialize timers */ 402 394 timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0); 403 395 info->se_info.bwi_active = false; ··· 426 416 if (info->se_info.se_active) 427 417 del_timer_sync(&info->se_info.se_active_timer); 428 418 419 + cancel_work_sync(&info->se_info.timeout_work); 429 420 info->se_info.bwi_active = false; 430 421 info->se_info.se_active = false; 431 422 }
+1
drivers/nfc/st21nfca/st21nfca.h
··· 141 141 142 142 se_io_cb_t cb; 143 143 void *cb_context; 144 + struct work_struct timeout_work; 144 145 }; 145 146 146 147 struct st21nfca_hci_info {
+3
include/linux/ptp_classify.h
··· 43 43 #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */ 44 44 #define OFF_PTP_SEQUENCE_ID 30 45 45 46 + /* PTP header flag fields */ 47 + #define PTP_FLAG_TWOSTEP BIT(1) 48 + 46 49 /* Below defines should actually be removed at some point in time. */ 47 50 #define IP6_HLEN 40 48 51 #define UDP_HLEN 8
+1 -1
include/trace/events/rxrpc.h
··· 1330 1330 __entry->call_serial = call->rx_serial; 1331 1331 __entry->conn_serial = call->conn->hi_serial; 1332 1332 __entry->tx_seq = call->tx_hard_ack; 1333 - __entry->rx_seq = call->ackr_seen; 1333 + __entry->rx_seq = call->rx_hard_ack; 1334 1334 ), 1335 1335 1336 1336 TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x",
+3 -2
net/bluetooth/hci_conn.c
··· 943 943 944 944 bt_dev_err(hdev, "request failed to create LE connection: err %d", err); 945 945 946 - if (!conn) 946 + /* Check if connection is still pending */ 947 + if (conn != hci_lookup_le_connect(hdev)) 947 948 goto done; 948 949 949 - hci_le_conn_failed(conn, err); 950 + hci_conn_failed(conn, err); 950 951 951 952 done: 952 953 hci_dev_unlock(hdev);
+5 -3
net/bluetooth/hci_event.c
··· 5632 5632 status = HCI_ERROR_INVALID_PARAMETERS; 5633 5633 } 5634 5634 5635 - if (status) { 5636 - hci_conn_failed(conn, status); 5635 + /* All connection failure handling is taken care of by the 5636 + * hci_conn_failed function which is triggered by the HCI 5637 + * request completion callbacks used for connecting. 5638 + */ 5639 + if (status) 5637 5640 goto unlock; 5638 - } 5639 5641 5640 5642 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5641 5643 addr_type = BDADDR_LE_PUBLIC;
+7 -6
net/rxrpc/ar-internal.h
··· 668 668 669 669 spinlock_t input_lock; /* Lock for packet input to this call */ 670 670 671 - /* receive-phase ACK management */ 671 + /* Receive-phase ACK management (ACKs we send). */ 672 672 u8 ackr_reason; /* reason to ACK */ 673 673 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ 674 - rxrpc_serial_t ackr_first_seq; /* first sequence number received */ 675 - rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ 676 - rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ 677 - rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ 674 + rxrpc_seq_t ackr_highest_seq; /* Higest sequence number received */ 675 + atomic_t ackr_nr_unacked; /* Number of unacked packets */ 676 + atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */ 678 677 679 678 /* RTT management */ 680 679 rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */ ··· 683 684 #define RXRPC_CALL_RTT_AVAIL_MASK 0xf 684 685 #define RXRPC_CALL_RTT_PEND_SHIFT 8 685 686 686 - /* transmission-phase ACK management */ 687 + /* Transmission-phase ACK management (ACKs we've received). */ 687 688 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ 689 + rxrpc_seq_t acks_first_seq; /* first sequence number received */ 690 + rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */ 688 691 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ 689 692 rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */ 690 693 rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
+2 -1
net/rxrpc/call_event.c
··· 406 406 goto recheck_state; 407 407 } 408 408 409 - if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) { 409 + if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) && 410 + call->state != RXRPC_CALL_CLIENT_RECV_REPLY) { 410 411 rxrpc_resend(call, now); 411 412 goto recheck_state; 412 413 }
+20 -11
net/rxrpc/input.c
··· 412 412 { 413 413 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 414 414 enum rxrpc_call_state state; 415 - unsigned int j, nr_subpackets; 416 - rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 415 + unsigned int j, nr_subpackets, nr_unacked = 0; 416 + rxrpc_serial_t serial = sp->hdr.serial, ack_serial = serial; 417 417 rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; 418 418 bool immediate_ack = false, jumbo_bad = false; 419 419 u8 ack = 0; ··· 453 453 !rxrpc_receiving_reply(call)) 454 454 goto unlock; 455 455 456 - call->ackr_prev_seq = seq0; 457 456 hard_ack = READ_ONCE(call->rx_hard_ack); 458 457 459 458 nr_subpackets = sp->nr_subpackets; ··· 533 534 ack_serial = serial; 534 535 } 535 536 537 + if (after(seq0, call->ackr_highest_seq)) 538 + call->ackr_highest_seq = seq0; 539 + 536 540 /* Queue the packet. We use a couple of memory barriers here as need 537 541 * to make sure that rx_top is perceived to be set after the buffer 538 542 * pointer and that the buffer pointer is set after the annotation and ··· 569 567 sp = NULL; 570 568 } 571 569 570 + nr_unacked++; 571 + 572 572 if (last) { 573 573 set_bit(RXRPC_CALL_RX_LAST, &call->flags); 574 574 if (!ack) { ··· 590 586 } 591 587 call->rx_expect_next = seq + 1; 592 588 } 589 + if (!ack) 590 + ack_serial = serial; 593 591 } 594 592 595 593 ack: 594 + if (atomic_add_return(nr_unacked, &call->ackr_nr_unacked) > 2 && !ack) 595 + ack = RXRPC_ACK_IDLE; 596 + 596 597 if (ack) 597 598 rxrpc_propose_ACK(call, ack, ack_serial, 598 599 immediate_ack, true, ··· 821 812 static bool rxrpc_is_ack_valid(struct rxrpc_call *call, 822 813 rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) 823 814 { 824 - rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq); 815 + rxrpc_seq_t base = READ_ONCE(call->acks_first_seq); 825 816 826 817 if (after(first_pkt, base)) 827 818 return true; /* The window advanced */ ··· 829 820 if (before(first_pkt, base)) 830 821 return false; /* firstPacket regressed */ 831 822 832 - if (after_eq(prev_pkt, call->ackr_prev_seq)) 823 + if (after_eq(prev_pkt, call->acks_prev_seq)) 833 824 return true; /* previousPacket hasn't regressed. */ 834 825 835 826 /* Some rx implementations put a serial number in previousPacket. */ ··· 942 933 /* Discard any out-of-order or duplicate ACKs (outside lock). */ 943 934 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 944 935 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, 945 - first_soft_ack, call->ackr_first_seq, 946 - prev_pkt, call->ackr_prev_seq); 936 + first_soft_ack, call->acks_first_seq, 937 + prev_pkt, call->acks_prev_seq); 947 938 return; 948 939 } 949 940 ··· 958 949 /* Discard any out-of-order or duplicate ACKs (inside lock). */ 959 950 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 960 951 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, 961 - first_soft_ack, call->ackr_first_seq, 962 - prev_pkt, call->ackr_prev_seq); 952 + first_soft_ack, call->acks_first_seq, 953 + prev_pkt, call->acks_prev_seq); 963 954 goto out; 964 955 } 965 956 call->acks_latest_ts = skb->tstamp; 966 957 967 - call->ackr_first_seq = first_soft_ack; 968 - call->ackr_prev_seq = prev_pkt; 958 + call->acks_first_seq = first_soft_ack; 959 + call->acks_prev_seq = prev_pkt; 969 960 970 961 /* Parse rwind and mtu sizes if provided. */ 971 962 if (buf.info.rxMTU)
+12 -8
net/rxrpc/output.c
··· 74 74 u8 reason) 75 75 { 76 76 rxrpc_serial_t serial; 77 + unsigned int tmp; 77 78 rxrpc_seq_t hard_ack, top, seq; 78 79 int ix; 79 80 u32 mtu, jmax; 80 81 u8 *ackp = pkt->acks; 82 + 83 + tmp = atomic_xchg(&call->ackr_nr_unacked, 0); 84 + tmp |= atomic_xchg(&call->ackr_nr_consumed, 0); 85 + if (!tmp && (reason == RXRPC_ACK_DELAY || 86 + reason == RXRPC_ACK_IDLE)) 87 + return 0; 81 88 82 89 /* Barrier against rxrpc_input_data(). */ 83 90 serial = call->ackr_serial; ··· 96 89 pkt->ack.bufferSpace = htons(8); 97 90 pkt->ack.maxSkew = htons(0); 98 91 pkt->ack.firstPacket = htonl(hard_ack + 1); 99 - pkt->ack.previousPacket = htonl(call->ackr_prev_seq); 92 + pkt->ack.previousPacket = htonl(call->ackr_highest_seq); 100 93 pkt->ack.serial = htonl(serial); 101 94 pkt->ack.reason = reason; 102 95 pkt->ack.nAcks = top - hard_ack; ··· 230 223 n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason); 231 224 232 225 spin_unlock_bh(&call->lock); 226 + if (n == 0) { 227 + kfree(pkt); 228 + return 0; 229 + } 233 230 234 231 iov[0].iov_base = pkt; 235 232 iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n; ··· 270 259 ntohl(pkt->ack.serial), 271 260 false, true, 272 261 rxrpc_propose_ack_retry_tx); 273 - } else { 274 - spin_lock_bh(&call->lock); 275 - if (after(hard_ack, call->ackr_consumed)) 276 - call->ackr_consumed = hard_ack; 277 - if (after(top, call->ackr_seen)) 278 - call->ackr_seen = top; 279 - spin_unlock_bh(&call->lock); 280 262 } 281 263 282 264 rxrpc_set_keepalive(call);
+3 -5
net/rxrpc/recvmsg.c
··· 260 260 rxrpc_end_rx_phase(call, serial); 261 261 } else { 262 262 /* Check to see if there's an ACK that needs sending. */ 263 - if (after_eq(hard_ack, call->ackr_consumed + 2) || 264 - after_eq(top, call->ackr_seen + 2) || 265 - (hard_ack == top && after(hard_ack, call->ackr_consumed))) 266 - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, 267 - true, true, 263 + if (atomic_inc_return(&call->ackr_nr_consumed) > 2) 264 + rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, 265 + true, false, 268 266 rxrpc_propose_ack_rotate_rx); 269 267 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) 270 268 rxrpc_send_ack_packet(call, false, NULL);
+2 -2
net/rxrpc/sysctl.c
··· 12 12 13 13 static struct ctl_table_header *rxrpc_sysctl_reg_table; 14 14 static const unsigned int four = 4; 15 - static const unsigned int thirtytwo = 32; 15 + static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1; 16 16 static const unsigned int n_65535 = 65535; 17 17 static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; 18 18 static const unsigned long one_jiffy = 1; ··· 89 89 .mode = 0644, 90 90 .proc_handler = proc_dointvec_minmax, 91 91 .extra1 = (void *)&four, 92 - .extra2 = (void *)&thirtytwo, 92 + .extra2 = (void *)&max_backlog, 93 93 }, 94 94 { 95 95 .procname = "rx_window_size",
+28 -18
net/smc/af_smc.c
··· 1584 1584 if (rc && rc != -EINPROGRESS) 1585 1585 goto out; 1586 1586 1587 - sock_hold(&smc->sk); /* sock put in passive closing */ 1588 1587 if (smc->use_fallback) { 1589 1588 sock->state = rc ? SS_CONNECTING : SS_CONNECTED; 1590 1589 goto out; 1591 1590 } 1591 + sock_hold(&smc->sk); /* sock put in passive closing */ 1592 1592 if (flags & O_NONBLOCK) { 1593 1593 if (queue_work(smc_hs_wq, &smc->connect_work)) 1594 1594 smc->connect_nonblock = 1; ··· 2118 2118 return 0; 2119 2119 } 2120 2120 2121 - static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, 2122 - struct smc_clc_msg_proposal *pclc, 2123 - struct smc_init_info *ini) 2121 + static int smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, 2122 + struct smc_clc_msg_proposal *pclc, 2123 + struct smc_init_info *ini) 2124 2124 { 2125 2125 struct smc_clc_v2_extension *smc_v2_ext; 2126 2126 u8 smcr_version; 2127 - int rc; 2127 + int rc = 0; 2128 2128 2129 2129 if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2)) 2130 2130 goto not_found; ··· 2142 2142 ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr; 2143 2143 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce); 2144 2144 rc = smc_find_rdma_device(new_smc, ini); 2145 - if (rc) { 2146 - smc_find_ism_store_rc(rc, ini); 2145 + if (rc) 2147 2146 goto not_found; 2148 - } 2147 + 2149 2148 if (!ini->smcrv2.uses_gateway) 2150 2149 memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN); 2151 2150 2152 2151 smcr_version = ini->smcr_version; 2153 2152 ini->smcr_version = SMC_V2; 2154 2153 rc = smc_listen_rdma_init(new_smc, ini); 2155 - if (!rc) 2156 - rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local); 2157 - if (!rc) 2158 - return; 2159 - ini->smcr_version = smcr_version; 2160 - smc_find_ism_store_rc(rc, ini); 2154 + if (rc) { 2155 + ini->smcr_version = smcr_version; 2156 + goto not_found; 2157 + } 2158 + rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local); 2159 + if (rc) { 2160 + ini->smcr_version = smcr_version; 2161 + goto not_found; 2162 + } 2163 + return 0; 2161 2164 2162 2165 not_found: 2166 + rc = rc ?: SMC_CLC_DECL_NOSMCDEV; 2163 2167 ini->smcr_version &= ~SMC_V2; 2164 2168 ini->check_smcrv2 = false; 2169 + return rc; 2165 2170 } 2166 2171 2167 2172 static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc, ··· 2199 2194 struct smc_init_info *ini) 2200 2195 { 2201 2196 int prfx_rc; 2197 + int rc; 2202 2198 2203 2199 /* check for ISM device matching V2 proposed device */ 2204 2200 smc_find_ism_v2_device_serv(new_smc, pclc, ini); ··· 2227 2221 return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV; 2228 2222 2229 2223 /* check if RDMA V2 is available */ 2230 - smc_find_rdma_v2_device_serv(new_smc, pclc, ini); 2231 - if (ini->smcrv2.ib_dev_v2) 2224 + rc = smc_find_rdma_v2_device_serv(new_smc, pclc, ini); 2225 + if (!rc) 2232 2226 return 0; 2227 + 2228 + /* skip V1 check if V2 is unavailable for non-Device reason */ 2229 + if (rc != SMC_CLC_DECL_NOSMCDEV && 2230 + rc != SMC_CLC_DECL_NOSMCRDEV && 2231 + rc != SMC_CLC_DECL_NOSMCDDEV) 2232 + return rc; 2233 2233 2234 2234 /* check if RDMA V1 is available */ 2235 2235 if (!prfx_rc) { 2236 - int rc; 2237 - 2238 2236 rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini); 2239 2237 smc_find_ism_store_rc(rc, ini); 2240 2238 return (!rc) ? 0 : ini->rc;