Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/smc: implement DMB-merged operations of loopback-ism

This implements operations related to merging sndbuf with peer DMB in
loopback-ism. The DMB won't be freed until no sndbuf is attached to it.

Signed-off-by: Wen Gu <guwen@linux.alibaba.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-and-tested-by: Jan Karcher <jaka@linux.ibm.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Wen Gu and committed by
Paolo Abeni
c3a910f2 cc0ab806

+108 -15
+105 -15
net/smc/smc_loopback.c
··· 20 20 #include "smc_loopback.h" 21 21 22 22 #define SMC_LO_V2_CAPABLE 0x1 /* loopback-ism acts as ISMv2 */ 23 + #define SMC_LO_SUPPORT_NOCOPY 0x1 23 24 #define SMC_DMA_ADDR_INVALID (~(dma_addr_t)0) 24 25 25 26 static const char smc_lo_dev_name[] = "loopback-ism"; ··· 82 81 goto err_node; 83 82 } 84 83 dmb_node->dma_addr = SMC_DMA_ADDR_INVALID; 84 + refcount_set(&dmb_node->refcnt, 1); 85 85 86 86 again: 87 87 /* add new dmb into hash table */ ··· 96 94 } 97 95 hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token); 98 96 write_unlock_bh(&ldev->dmb_ht_lock); 97 + atomic_inc(&ldev->dmb_cnt); 99 98 100 99 dmb->sba_idx = dmb_node->sba_idx; 101 100 dmb->dmb_tok = dmb_node->token; ··· 113 110 return rc; 114 111 } 115 112 113 + static void __smc_lo_unregister_dmb(struct smc_lo_dev *ldev, 114 + struct smc_lo_dmb_node *dmb_node) 115 + { 116 + /* remove dmb from hash table */ 117 + write_lock_bh(&ldev->dmb_ht_lock); 118 + hash_del(&dmb_node->list); 119 + write_unlock_bh(&ldev->dmb_ht_lock); 120 + 121 + clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask); 122 + kvfree(dmb_node->cpu_addr); 123 + kfree(dmb_node); 124 + 125 + if (atomic_dec_and_test(&ldev->dmb_cnt)) 126 + wake_up(&ldev->ldev_release); 127 + } 128 + 116 129 static int smc_lo_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) 117 130 { 118 131 struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node; 119 132 struct smc_lo_dev *ldev = smcd->priv; 120 133 121 - /* remove dmb from hash table */ 122 - write_lock_bh(&ldev->dmb_ht_lock); 134 + /* find dmb from hash table */ 135 + read_lock_bh(&ldev->dmb_ht_lock); 123 136 hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) { 124 137 if (tmp_node->token == dmb->dmb_tok) { 125 138 dmb_node = tmp_node; ··· 143 124 } 144 125 } 145 126 if (!dmb_node) { 146 - write_unlock_bh(&ldev->dmb_ht_lock); 127 + read_unlock_bh(&ldev->dmb_ht_lock); 147 128 return -EINVAL; 148 129 } 149 - hash_del(&dmb_node->list); 150 - write_unlock_bh(&ldev->dmb_ht_lock); 130 + read_unlock_bh(&ldev->dmb_ht_lock); 151 131 152 - clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask); 153 - kfree(dmb_node->cpu_addr); 154 - kfree(dmb_node); 132 + if (refcount_dec_and_test(&dmb_node->refcnt)) 133 + __smc_lo_unregister_dmb(ldev, dmb_node); 134 + return 0; 135 + } 155 136 137 + static int smc_lo_support_dmb_nocopy(struct smcd_dev *smcd) 138 + { 139 + return SMC_LO_SUPPORT_NOCOPY; 140 + } 141 + 142 + static int smc_lo_attach_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) 143 + { 144 + struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node; 145 + struct smc_lo_dev *ldev = smcd->priv; 146 + 147 + /* find dmb_node according to dmb->dmb_tok */ 148 + read_lock_bh(&ldev->dmb_ht_lock); 149 + hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) { 150 + if (tmp_node->token == dmb->dmb_tok) { 151 + dmb_node = tmp_node; 152 + break; 153 + } 154 + } 155 + if (!dmb_node) { 156 + read_unlock_bh(&ldev->dmb_ht_lock); 157 + return -EINVAL; 158 + } 159 + read_unlock_bh(&ldev->dmb_ht_lock); 160 + 161 + if (!refcount_inc_not_zero(&dmb_node->refcnt)) 162 + /* the dmb is being unregistered, but has 163 + * not been removed from the hash table. 164 + */ 165 + return -EINVAL; 166 + 167 + /* provide dmb information */ 168 + dmb->sba_idx = dmb_node->sba_idx; 169 + dmb->dmb_tok = dmb_node->token; 170 + dmb->cpu_addr = dmb_node->cpu_addr; 171 + dmb->dma_addr = dmb_node->dma_addr; 172 + dmb->dmb_len = dmb_node->len; 173 + return 0; 174 + } 175 + 176 + static int smc_lo_detach_dmb(struct smcd_dev *smcd, u64 token) 177 + { 178 + struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node; 179 + struct smc_lo_dev *ldev = smcd->priv; 180 + 181 + /* find dmb_node according to dmb->dmb_tok */ 182 + read_lock_bh(&ldev->dmb_ht_lock); 183 + hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) { 184 + if (tmp_node->token == token) { 185 + dmb_node = tmp_node; 186 + break; 187 + } 188 + } 189 + if (!dmb_node) { 190 + read_unlock_bh(&ldev->dmb_ht_lock); 191 + return -EINVAL; 192 + } 193 + read_unlock_bh(&ldev->dmb_ht_lock); 194 + 195 + if (refcount_dec_and_test(&dmb_node->refcnt)) 196 + __smc_lo_unregister_dmb(ldev, dmb_node); 156 197 return 0; 157 198 } 158 199 ··· 223 144 struct smc_lo_dmb_node *rmb_node = NULL, *tmp_node; 224 145 struct smc_lo_dev *ldev = smcd->priv; 225 146 struct smc_connection *conn; 147 + 148 + if (!sf) 149 + /* since sndbuf is merged with peer DMB, there is 150 + * no need to copy data from sndbuf to peer DMB. 151 + */ 152 + return 0; 226 153 227 154 read_lock_bh(&ldev->dmb_ht_lock); 228 155 hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) { ··· 244 159 memcpy((char *)rmb_node->cpu_addr + offset, data, size); 245 160 read_unlock_bh(&ldev->dmb_ht_lock); 246 161 247 - if (sf) { 248 - conn = smcd->conn[rmb_node->sba_idx]; 249 - if (conn && !conn->killed) 250 - tasklet_schedule(&conn->rx_tsklet); 251 - else 252 - return -EPIPE; 253 - } 162 + conn = smcd->conn[rmb_node->sba_idx]; 163 + if (!conn || conn->killed) 164 + return -EPIPE; 165 + tasklet_schedule(&conn->rx_tsklet); 254 166 return 0; 255 167 } 256 168 ··· 279 197 .query_remote_gid = smc_lo_query_rgid, 280 198 .register_dmb = smc_lo_register_dmb, 281 199 .unregister_dmb = smc_lo_unregister_dmb, 200 + .support_dmb_nocopy = smc_lo_support_dmb_nocopy, 201 + .attach_dmb = smc_lo_attach_dmb, 202 + .detach_dmb = smc_lo_detach_dmb, 282 203 .add_vlan_id = NULL, 283 204 .del_vlan_id = NULL, 284 205 .set_vlan_required = NULL, ··· 360 275 smc_lo_generate_ids(ldev); 361 276 rwlock_init(&ldev->dmb_ht_lock); 362 277 hash_init(ldev->dmb_ht); 278 + atomic_set(&ldev->dmb_cnt, 0); 279 + init_waitqueue_head(&ldev->ldev_release); 280 + 363 281 return smcd_lo_register_dev(ldev); 364 282 } 365 283 366 284 static void smc_lo_dev_exit(struct smc_lo_dev *ldev) 367 285 { 368 286 smcd_lo_unregister_dev(ldev); 287 + if (atomic_read(&ldev->dmb_cnt)) 288 + wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt)); 369 289 } 370 290 371 291 static void smc_lo_dev_release(struct device *dev)
+3
net/smc/smc_loopback.h
··· 30 30 u32 sba_idx; 31 31 void *cpu_addr; 32 32 dma_addr_t dma_addr; 33 + refcount_t refcnt; 33 34 }; 34 35 35 36 struct smc_lo_dev { ··· 38 37 struct device dev; 39 38 u16 chid; 40 39 struct smcd_gid local_gid; 40 + atomic_t dmb_cnt; 41 41 rwlock_t dmb_ht_lock; 42 42 DECLARE_BITMAP(sba_idx_mask, SMC_LO_MAX_DMBS); 43 43 DECLARE_HASHTABLE(dmb_ht, SMC_LO_DMBS_HASH_BITS); 44 + wait_queue_head_t ldev_release; 44 45 }; 45 46 46 47 int smc_loopback_init(void);