Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'smc-fixes'

Guvenc Gulce says:

====================
net/smc: fixes 2021-08-09

please apply the following patch series for smc to netdev's net tree.
One patch fixes invalid connection counting for links and the other
one fixes an access to an already cleared link.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+38 -10
+1 -1
net/smc/af_smc.c
··· 795 795 reason_code = SMC_CLC_DECL_NOSRVLINK; 796 796 goto connect_abort; 797 797 } 798 - smc->conn.lnk = link; 798 + smc_switch_link_and_count(&smc->conn, link); 799 799 } 800 800 801 801 /* create send buffer and rmb */
+2 -2
net/smc/smc_core.c
··· 917 917 return rc; 918 918 } 919 919 920 - static void smc_switch_link_and_count(struct smc_connection *conn, 921 - struct smc_link *to_lnk) 920 + void smc_switch_link_and_count(struct smc_connection *conn, 921 + struct smc_link *to_lnk) 922 922 { 923 923 atomic_dec(&conn->lnk->conn_cnt); 924 924 conn->lnk = to_lnk;
+4
net/smc/smc_core.h
··· 97 97 unsigned long *wr_tx_mask; /* bit mask of used indexes */ 98 98 u32 wr_tx_cnt; /* number of WR send buffers */ 99 99 wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */ 100 + atomic_t wr_tx_refcnt; /* tx refs to link */ 100 101 101 102 struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */ 102 103 struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ ··· 110 109 111 110 struct ib_reg_wr wr_reg; /* WR register memory region */ 112 111 wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ 112 + atomic_t wr_reg_refcnt; /* reg refs to link */ 113 113 enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ 114 114 115 115 u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/ ··· 446 444 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, 447 445 u8 link_idx, struct smc_init_info *ini); 448 446 void smcr_link_clear(struct smc_link *lnk, bool log); 447 + void smc_switch_link_and_count(struct smc_connection *conn, 448 + struct smc_link *to_lnk); 449 449 int smcr_buf_map_lgr(struct smc_link *lnk); 450 450 int smcr_buf_reg_lgr(struct smc_link *lnk); 451 451 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
+4 -6
net/smc/smc_llc.c
··· 888 888 if (!rc) 889 889 goto out; 890 890 out_clear_lnk: 891 + lnk_new->state = SMC_LNK_INACTIVE; 891 892 smcr_link_clear(lnk_new, false); 892 893 out_reject: 893 894 smc_llc_cli_add_link_reject(qentry); ··· 1185 1184 goto out_err; 1186 1185 return 0; 1187 1186 out_err: 1187 + link_new->state = SMC_LNK_INACTIVE; 1188 1188 smcr_link_clear(link_new, false); 1189 1189 return rc; 1190 1190 } ··· 1288 1286 del_llc->reason = 0; 1289 1287 smc_llc_send_message(lnk, &qentry->msg); /* response */ 1290 1288 1291 - if (smc_link_downing(&lnk_del->state)) { 1292 - if (smc_switch_conns(lgr, lnk_del, false)) 1293 - smc_wr_tx_wait_no_pending_sends(lnk_del); 1294 - } 1289 + if (smc_link_downing(&lnk_del->state)) 1290 + smc_switch_conns(lgr, lnk_del, false); 1295 1291 smcr_link_clear(lnk_del, true); 1296 1292 1297 1293 active_links = smc_llc_active_link_count(lgr); ··· 1805 1805 link->smcibdev->ibdev->name, link->ibport); 1806 1806 complete(&link->llc_testlink_resp); 1807 1807 cancel_delayed_work_sync(&link->llc_testlink_wrk); 1808 - smc_wr_wakeup_reg_wait(link); 1809 - smc_wr_wakeup_tx_wait(link); 1810 1808 } 1811 1809 1812 1810 /* register a new rtoken at the remote peer (for all links) */
+17 -1
net/smc/smc_tx.c
··· 496 496 /* Wakeup sndbuf consumers from any context (IRQ or process) 497 497 * since there is more data to transmit; usable snd_wnd as max transmit 498 498 */ 499 - static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 499 + static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 500 500 { 501 501 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; 502 502 struct smc_link *link = conn->lnk; ··· 547 547 548 548 out_unlock: 549 549 spin_unlock_bh(&conn->send_lock); 550 + return rc; 551 + } 552 + 553 + static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 554 + { 555 + struct smc_link *link = conn->lnk; 556 + int rc = -ENOLINK; 557 + 558 + if (!link) 559 + return rc; 560 + 561 + atomic_inc(&link->wr_tx_refcnt); 562 + if (smc_link_usable(link)) 563 + rc = _smcr_tx_sndbuf_nonempty(conn); 564 + if (atomic_dec_and_test(&link->wr_tx_refcnt)) 565 + wake_up_all(&link->wr_tx_wait); 550 566 return rc; 551 567 } 552 568
+10
net/smc/smc_wr.c
··· 322 322 if (rc) 323 323 return rc; 324 324 325 + atomic_inc(&link->wr_reg_refcnt); 325 326 rc = wait_event_interruptible_timeout(link->wr_reg_wait, 326 327 (link->wr_reg_state != POSTED), 327 328 SMC_WR_REG_MR_WAIT_TIME); 329 + if (atomic_dec_and_test(&link->wr_reg_refcnt)) 330 + wake_up_all(&link->wr_reg_wait); 328 331 if (!rc) { 329 332 /* timeout - terminate link */ 330 333 smcr_link_down_cond_sched(link); ··· 569 566 return; 570 567 ibdev = lnk->smcibdev->ibdev; 571 568 569 + smc_wr_wakeup_reg_wait(lnk); 570 + smc_wr_wakeup_tx_wait(lnk); 571 + 572 572 if (smc_wr_tx_wait_no_pending_sends(lnk)) 573 573 memset(lnk->wr_tx_mask, 0, 574 574 BITS_TO_LONGS(SMC_WR_BUF_CNT) * 575 575 sizeof(*lnk->wr_tx_mask)); 576 + wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt))); 577 + wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt))); 576 578 577 579 if (lnk->wr_rx_dma_addr) { 578 580 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, ··· 736 728 memset(lnk->wr_tx_mask, 0, 737 729 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); 738 730 init_waitqueue_head(&lnk->wr_tx_wait); 731 + atomic_set(&lnk->wr_tx_refcnt, 0); 739 732 init_waitqueue_head(&lnk->wr_reg_wait); 733 + atomic_set(&lnk->wr_reg_refcnt, 0); 740 734 return rc; 741 735 742 736 dma_unmap: