Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: target: cxgbit: Enable Delayed ACK

Enable Delayed ACK to reduce the number of TCP ACKs.

Link: https://lore.kernel.org/r/1634135109-5044-1-git-send-email-varun@chelsio.com
Signed-off-by: Varun Prakash <varun@chelsio.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Varun Prakash and committed by
Martin K. Petersen
d1e51ea6 7f96c7a6

+26 -10
+3 -5
drivers/target/iscsi/cxgbit/cxgbit_cm.c
··· 836 836 csk->rcv_win = CXGBIT_10G_RCV_WIN; 837 837 if (scale) 838 838 csk->rcv_win *= scale; 839 + csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10); 839 840 840 841 #define CXGBIT_10G_SND_WIN (256 * 1024) 841 842 csk->snd_win = CXGBIT_10G_SND_WIN; 842 843 if (scale) 843 844 csk->snd_win *= scale; 845 + csk->snd_win = min(csk->snd_win, 512U * 1024); 844 846 845 847 pr_debug("%s snd_win %d rcv_win %d\n", 846 848 __func__, csk->snd_win, csk->rcv_win); ··· 1067 1065 if (!skb) 1068 1066 return -1; 1069 1067 1070 - credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) | 1068 + credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) | 1071 1069 RX_CREDITS_V(csk->rx_credits); 1072 1070 1073 1071 cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx, ··· 1199 1197 if (tcph->ece && tcph->cwr) 1200 1198 opt2 |= CCTRL_ECN_V(1); 1201 1199 1202 - opt2 |= RX_COALESCE_V(3); 1203 1200 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); 1204 1201 1205 1202 opt2 |= T5_ISS_F; ··· 1646 1645 csk->snd_nxt = snd_isn; 1647 1646 1648 1647 csk->rcv_nxt = rcv_isn; 1649 - 1650 - if (csk->rcv_win > (RCV_BUFSIZ_M << 10)) 1651 - csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10)); 1652 1648 1653 1649 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); 1654 1650 cxgbit_set_emss(csk, tcp_opt);
+23 -5
drivers/target/iscsi/cxgbit/cxgbit_target.c
··· 189 189 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | 190 190 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 191 191 192 - req->tunnel_to_proxy = htonl((wr_ulp_mode) | force | 193 - FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); 192 + req->tunnel_to_proxy = htonl(wr_ulp_mode | force | 193 + FW_OFLD_TX_DATA_WR_SHOVE_F); 194 194 } 195 195 196 196 static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) ··· 1531 1531 return ret; 1532 1532 } 1533 1533 1534 - static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1534 + static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1535 1535 { 1536 1536 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); 1537 1537 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); ··· 1557 1557 return ret; 1558 1558 } 1559 1559 1560 + static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1561 + { 1562 + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); 1563 + int ret; 1564 + 1565 + ret = cxgbit_process_lro_skb(csk, skb); 1566 + if (ret) 1567 + return ret; 1568 + 1569 + csk->rx_credits += lro_cb->pdu_totallen; 1570 + if (csk->rx_credits >= csk->rcv_win) { 1571 + csk->rx_credits = 0; 1572 + cxgbit_rx_data_ack(csk); 1573 + } 1574 + 1575 + return 0; 1576 + } 1577 + 1560 1578 static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1561 1579 { 1562 1580 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; ··· 1582 1564 1583 1565 if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { 1584 1566 if (is_t5(lldi->adapter_type)) 1585 - ret = cxgbit_rx_lro_skb(csk, skb); 1567 + ret = cxgbit_t5_rx_lro_skb(csk, skb); 1586 1568 else 1587 - ret = cxgbit_process_lro_skb(csk, skb); 1569 + ret = cxgbit_rx_lro_skb(csk, skb); 1588 1570 } 1589 1571 1590 1572 __kfree_skb(skb);