Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/smc: immediate termination for SMCD link groups

SMCD link group termination is called when peer signals its shutdown
of its corresponding link group. For regular shutdowns no connections
exist anymore. For abnormal shutdowns connections must be killed and
their DMBs must be unregistered immediately. That means the SMCR method
to delay the link group freeing several seconds does not fit.

This patch adds immediate termination of a link group and its SMCD
connections and makes sure all SMCD link group related cleanup steps
are finished.

Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ursula Braun and committed by
David S. Miller
42bfba9e 50c6b20e

+72 -17
-2
drivers/s390/net/ism.h
··· 32 32 #define ISM_UNREG_SBA 0x11 33 33 #define ISM_UNREG_IEQ 0x12 34 34 35 - #define ISM_ERROR 0xFFFF 36 - 37 35 struct ism_req_hdr { 38 36 u32 cmd; 39 37 u16 : 16;
+2
include/net/smc.h
··· 37 37 #define ISM_EVENT_GID 1 38 38 #define ISM_EVENT_SWR 2 39 39 40 + #define ISM_ERROR 0xFFFF 41 + 40 42 struct smcd_event { 41 43 u32 type; 42 44 u32 code;
+19 -6
net/smc/smc_close.c
··· 110 110 return smc_cdc_get_slot_and_msg_send(conn); 111 111 } 112 112 113 + static void smc_close_cancel_work(struct smc_sock *smc) 114 + { 115 + struct sock *sk = &smc->sk; 116 + 117 + release_sock(sk); 118 + cancel_work_sync(&smc->conn.close_work); 119 + cancel_delayed_work_sync(&smc->conn.tx_work); 120 + lock_sock(sk); 121 + sk->sk_state = SMC_CLOSED; 122 + } 123 + 113 124 /* terminate smc socket abnormally - active abort 114 125 * link group is terminated, i.e. RDMA communication no longer possible 115 126 */ ··· 137 126 switch (sk->sk_state) { 138 127 case SMC_ACTIVE: 139 128 sk->sk_state = SMC_PEERABORTWAIT; 140 - release_sock(sk); 141 - cancel_delayed_work_sync(&smc->conn.tx_work); 142 - lock_sock(sk); 129 + smc_close_cancel_work(smc); 143 130 sk->sk_state = SMC_CLOSED; 144 131 sock_put(sk); /* passive closing */ 145 132 break; 146 133 case SMC_APPCLOSEWAIT1: 147 134 case SMC_APPCLOSEWAIT2: 148 - release_sock(sk); 149 - cancel_delayed_work_sync(&smc->conn.tx_work); 150 - lock_sock(sk); 135 + smc_close_cancel_work(smc); 151 136 sk->sk_state = SMC_CLOSED; 152 137 sock_put(sk); /* postponed passive closing */ 153 138 break; 154 139 case SMC_PEERCLOSEWAIT1: 155 140 case SMC_PEERCLOSEWAIT2: 156 141 case SMC_PEERFINCLOSEWAIT: 142 + sk->sk_state = SMC_PEERABORTWAIT; 143 + smc_close_cancel_work(smc); 157 144 sk->sk_state = SMC_CLOSED; 158 145 smc_conn_free(&smc->conn); 159 146 release_clcsock = true; ··· 159 150 break; 160 151 case SMC_PROCESSABORT: 161 152 case SMC_APPFINCLOSEWAIT: 153 + sk->sk_state = SMC_PEERABORTWAIT; 154 + smc_close_cancel_work(smc); 162 155 sk->sk_state = SMC_CLOSED; 156 + smc_conn_free(&smc->conn); 157 + release_clcsock = true; 163 158 break; 164 159 case SMC_INIT: 165 160 case SMC_PEERABORTWAIT:
+39 -7
net/smc/smc_core.c
··· 214 214 215 215 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) 216 216 smc_llc_link_inactive(lnk); 217 - if (lgr->is_smcd) 217 + if (lgr->is_smcd && !lgr->terminating) 218 218 smc_ism_signal_shutdown(lgr); 219 219 smc_lgr_free(lgr); 220 220 } ··· 381 381 if (!lgr) 382 382 return; 383 383 if (lgr->is_smcd) { 384 - smc_ism_unset_conn(conn); 384 + if (!list_empty(&lgr->list)) 385 + smc_ism_unset_conn(conn); 385 386 tasklet_kill(&conn->rx_tsklet); 386 387 } else { 387 388 smc_cdc_tx_dismiss_slots(conn); ··· 482 481 { 483 482 smc_lgr_free_bufs(lgr); 484 483 if (lgr->is_smcd) { 485 - smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); 486 - put_device(&lgr->smcd->dev); 484 + if (!lgr->terminating) { 485 + smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); 486 + put_device(&lgr->smcd->dev); 487 + } 487 488 } else { 488 489 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); 489 490 put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev); ··· 504 501 if (!list_empty(lgr_list)) 505 502 list_del_init(lgr_list); 506 503 spin_unlock_bh(lgr_lock); 504 + } 505 + 506 + static void smcd_unregister_all_dmbs(struct smc_link_group *lgr) 507 + { 508 + int i; 509 + 510 + for (i = 0; i < SMC_RMBE_SIZES; i++) { 511 + struct smc_buf_desc *buf_desc; 512 + 513 + list_for_each_entry(buf_desc, &lgr->rmbs[i], list) { 514 + buf_desc->len += sizeof(struct smcd_cdc_msg); 515 + smc_ism_unregister_dmb(lgr->smcd, buf_desc); 516 + } 517 + } 507 518 } 508 519 509 520 static void smc_sk_wake_ups(struct smc_sock *smc) ··· 539 522 conn->killed = 1; 540 523 smc->sk.sk_err = ECONNABORTED; 541 524 smc_sk_wake_ups(smc); 542 - if (conn->lgr->is_smcd) 525 + if (conn->lgr->is_smcd) { 526 + smc_ism_unset_conn(conn); 543 527 tasklet_kill(&conn->rx_tsklet); 528 + } 544 529 smc_lgr_unregister_conn(conn); 545 530 smc_close_active_abort(smc); 531 + } 532 + 533 + static void smc_lgr_cleanup(struct smc_link_group *lgr) 534 + { 535 + if (lgr->is_smcd) { 536 + smc_ism_signal_shutdown(lgr); 537 + smcd_unregister_all_dmbs(lgr); 538 + smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); 539 + put_device(&lgr->smcd->dev); 540 + } else { 541 + struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; 542 + 543 + wake_up(&lnk->wr_reg_wait); 544 + } 546 545 } 547 546 548 547 /* terminate link group */ ··· 590 557 node = rb_first(&lgr->conns_all); 591 558 } 592 559 read_unlock_bh(&lgr->conns_lock); 593 - if (!lgr->is_smcd) 594 - wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait); 560 + smc_lgr_cleanup(lgr); 595 561 smc_lgr_schedule_free_work_fast(lgr); 596 562 } 597 563
+12 -2
net/smc/smc_ism.c
··· 146 146 int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) 147 147 { 148 148 struct smcd_dmb dmb; 149 + int rc = 0; 150 + 151 + if (!dmb_desc->dma_addr) 152 + return rc; 149 153 150 154 memset(&dmb, 0, sizeof(dmb)); 151 155 dmb.dmb_tok = dmb_desc->token; ··· 157 153 dmb.cpu_addr = dmb_desc->cpu_addr; 158 154 dmb.dma_addr = dmb_desc->dma_addr; 159 155 dmb.dmb_len = dmb_desc->len; 160 - return smcd->ops->unregister_dmb(smcd, &dmb); 156 + rc = smcd->ops->unregister_dmb(smcd, &dmb); 157 + if (!rc || rc == ISM_ERROR) { 158 + dmb_desc->cpu_addr = NULL; 159 + dmb_desc->dma_addr = 0; 160 + } 161 + 162 + return rc; 161 163 } 162 164 163 165 int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, ··· 385 375 386 376 spin_lock_irqsave(&smcd->lock, flags); 387 377 conn = smcd->conn[dmbno]; 388 - if (conn) 378 + if (conn && !conn->killed) 389 379 tasklet_schedule(&conn->rx_tsklet); 390 380 spin_unlock_irqrestore(&smcd->lock, flags); 391 381 }