Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Basic Transport Functions exploiting Infiniband API
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12#include <linux/socket.h>
13#include <linux/if_vlan.h>
14#include <linux/random.h>
15#include <linux/workqueue.h>
16#include <linux/wait.h>
17#include <linux/reboot.h>
18#include <linux/mutex.h>
19#include <linux/list.h>
20#include <linux/smc.h>
21#include <net/tcp.h>
22#include <net/sock.h>
23#include <rdma/ib_verbs.h>
24#include <rdma/ib_cache.h>
25
26#include "smc.h"
27#include "smc_clc.h"
28#include "smc_core.h"
29#include "smc_ib.h"
30#include "smc_wr.h"
31#include "smc_llc.h"
32#include "smc_cdc.h"
33#include "smc_close.h"
34#include "smc_ism.h"
35#include "smc_netlink.h"
36#include "smc_stats.h"
37#include "smc_tracepoint.h"
38
39#define SMC_LGR_NUM_INCR 256
40#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
41#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
42
43struct smc_lgr_list smc_lgr_list = { /* established link groups */
44 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
45 .list = LIST_HEAD_INIT(smc_lgr_list.list),
46 .num = 0,
47};
48
49static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
50static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
51
52static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
53 struct smc_buf_desc *buf_desc);
54static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
55
56static void smc_link_down_work(struct work_struct *work);
57
58/* return head of link group list and its lock for a given link group */
59static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
60 spinlock_t **lgr_lock)
61{
62 if (lgr->is_smcd) {
63 *lgr_lock = &lgr->smcd->lgr_lock;
64 return &lgr->smcd->lgr_list;
65 }
66
67 *lgr_lock = &smc_lgr_list.lock;
68 return &smc_lgr_list.list;
69}
70
71static void smc_ibdev_cnt_inc(struct smc_link *lnk)
72{
73 atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
74}
75
76static void smc_ibdev_cnt_dec(struct smc_link *lnk)
77{
78 atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
79}
80
81static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
82{
83 /* client link group creation always follows the server link group
84 * creation. For client use a somewhat higher removal delay time,
85 * otherwise there is a risk of out-of-sync link groups.
86 */
87 if (!lgr->freeing) {
88 mod_delayed_work(system_wq, &lgr->free_work,
89 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
90 SMC_LGR_FREE_DELAY_CLNT :
91 SMC_LGR_FREE_DELAY_SERV);
92 }
93}
94
95/* Register connection's alert token in our lookup structure.
96 * To use rbtrees we have to implement our own insert core.
97 * Requires @conns_lock
98 * @smc connection to register
99 * Returns 0 on success, != otherwise.
100 */
101static void smc_lgr_add_alert_token(struct smc_connection *conn)
102{
103 struct rb_node **link, *parent = NULL;
104 u32 token = conn->alert_token_local;
105
106 link = &conn->lgr->conns_all.rb_node;
107 while (*link) {
108 struct smc_connection *cur = rb_entry(*link,
109 struct smc_connection, alert_node);
110
111 parent = *link;
112 if (cur->alert_token_local > token)
113 link = &parent->rb_left;
114 else
115 link = &parent->rb_right;
116 }
117 /* Put the new node there */
118 rb_link_node(&conn->alert_node, parent, link);
119 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
120}
121
122/* assign an SMC-R link to the connection */
123static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
124{
125 enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
126 SMC_LNK_ACTIVE;
127 int i, j;
128
129 /* do link balancing */
130 conn->lnk = NULL; /* reset conn->lnk first */
131 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
132 struct smc_link *lnk = &conn->lgr->lnk[i];
133
134 if (lnk->state != expected || lnk->link_is_asym)
135 continue;
136 if (conn->lgr->role == SMC_CLNT) {
137 conn->lnk = lnk; /* temporary, SMC server assigns link*/
138 break;
139 }
140 if (conn->lgr->conns_num % 2) {
141 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
142 struct smc_link *lnk2;
143
144 lnk2 = &conn->lgr->lnk[j];
145 if (lnk2->state == expected &&
146 !lnk2->link_is_asym) {
147 conn->lnk = lnk2;
148 break;
149 }
150 }
151 }
152 if (!conn->lnk)
153 conn->lnk = lnk;
154 break;
155 }
156 if (!conn->lnk)
157 return SMC_CLC_DECL_NOACTLINK;
158 atomic_inc(&conn->lnk->conn_cnt);
159 return 0;
160}
161
162/* Register connection in link group by assigning an alert token
163 * registered in a search tree.
164 * Requires @conns_lock
165 * Note that '0' is a reserved value and not assigned.
166 */
167static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
168{
169 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
170 static atomic_t nexttoken = ATOMIC_INIT(0);
171 int rc;
172
173 if (!conn->lgr->is_smcd) {
174 rc = smcr_lgr_conn_assign_link(conn, first);
175 if (rc) {
176 conn->lgr = NULL;
177 return rc;
178 }
179 }
180 /* find a new alert_token_local value not yet used by some connection
181 * in this link group
182 */
183 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
184 while (!conn->alert_token_local) {
185 conn->alert_token_local = atomic_inc_return(&nexttoken);
186 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
187 conn->alert_token_local = 0;
188 }
189 smc_lgr_add_alert_token(conn);
190 conn->lgr->conns_num++;
191 return 0;
192}
193
194/* Unregister connection and reset the alert token of the given connection<
195 */
196static void __smc_lgr_unregister_conn(struct smc_connection *conn)
197{
198 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
199 struct smc_link_group *lgr = conn->lgr;
200
201 rb_erase(&conn->alert_node, &lgr->conns_all);
202 if (conn->lnk)
203 atomic_dec(&conn->lnk->conn_cnt);
204 lgr->conns_num--;
205 conn->alert_token_local = 0;
206 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
207}
208
209/* Unregister connection from lgr
210 */
211static void smc_lgr_unregister_conn(struct smc_connection *conn)
212{
213 struct smc_link_group *lgr = conn->lgr;
214
215 if (!smc_conn_lgr_valid(conn))
216 return;
217 write_lock_bh(&lgr->conns_lock);
218 if (conn->alert_token_local) {
219 __smc_lgr_unregister_conn(conn);
220 }
221 write_unlock_bh(&lgr->conns_lock);
222}
223
224int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
225{
226 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
227 char hostname[SMC_MAX_HOSTNAME_LEN + 1];
228 char smc_seid[SMC_MAX_EID_LEN + 1];
229 struct nlattr *attrs;
230 u8 *seid = NULL;
231 u8 *host = NULL;
232 void *nlh;
233
234 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
235 &smc_gen_nl_family, NLM_F_MULTI,
236 SMC_NETLINK_GET_SYS_INFO);
237 if (!nlh)
238 goto errmsg;
239 if (cb_ctx->pos[0])
240 goto errout;
241 attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO);
242 if (!attrs)
243 goto errout;
244 if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2))
245 goto errattr;
246 if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE))
247 goto errattr;
248 if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
249 goto errattr;
250 if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true))
251 goto errattr;
252 smc_clc_get_hostname(&host);
253 if (host) {
254 memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
255 hostname[SMC_MAX_HOSTNAME_LEN] = 0;
256 if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
257 goto errattr;
258 }
259 if (smc_ism_is_v2_capable()) {
260 smc_ism_get_system_eid(&seid);
261 memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
262 smc_seid[SMC_MAX_EID_LEN] = 0;
263 if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
264 goto errattr;
265 }
266 nla_nest_end(skb, attrs);
267 genlmsg_end(skb, nlh);
268 cb_ctx->pos[0] = 1;
269 return skb->len;
270
271errattr:
272 nla_nest_cancel(skb, attrs);
273errout:
274 genlmsg_cancel(skb, nlh);
275errmsg:
276 return skb->len;
277}
278
279/* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */
280static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr,
281 struct sk_buff *skb,
282 struct netlink_callback *cb,
283 struct nlattr *v2_attrs)
284{
285 char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
286 char smc_eid[SMC_MAX_EID_LEN + 1];
287
288 if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
289 goto errv2attr;
290 if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
291 goto errv2attr;
292 if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
293 goto errv2attr;
294 memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
295 smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
296 if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
297 goto errv2attr;
298 memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
299 smc_eid[SMC_MAX_EID_LEN] = 0;
300 if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
301 goto errv2attr;
302
303 nla_nest_end(skb, v2_attrs);
304 return 0;
305
306errv2attr:
307 nla_nest_cancel(skb, v2_attrs);
308 return -EMSGSIZE;
309}
310
311static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr,
312 struct sk_buff *skb,
313 struct netlink_callback *cb)
314{
315 struct nlattr *v2_attrs;
316
317 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2);
318 if (!v2_attrs)
319 goto errattr;
320 if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway))
321 goto errv2attr;
322
323 nla_nest_end(skb, v2_attrs);
324 return 0;
325
326errv2attr:
327 nla_nest_cancel(skb, v2_attrs);
328errattr:
329 return -EMSGSIZE;
330}
331
332static int smc_nl_fill_lgr(struct smc_link_group *lgr,
333 struct sk_buff *skb,
334 struct netlink_callback *cb)
335{
336 char smc_target[SMC_MAX_PNETID_LEN + 1];
337 struct nlattr *attrs, *v2_attrs;
338
339 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
340 if (!attrs)
341 goto errout;
342
343 if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id)))
344 goto errattr;
345 if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num))
346 goto errattr;
347 if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role))
348 goto errattr;
349 if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
350 goto errattr;
351 if (nla_put_u8(skb, SMC_NLA_LGR_R_BUF_TYPE, lgr->buf_type))
352 goto errattr;
353 if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
354 goto errattr;
355 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE,
356 lgr->net->net_cookie, SMC_NLA_LGR_R_PAD))
357 goto errattr;
358 memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
359 smc_target[SMC_MAX_PNETID_LEN] = 0;
360 if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
361 goto errattr;
362 if (lgr->smc_version > SMC_V1) {
363 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON);
364 if (!v2_attrs)
365 goto errattr;
366 if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
367 goto errattr;
368 if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb))
369 goto errattr;
370 }
371
372 nla_nest_end(skb, attrs);
373 return 0;
374errattr:
375 nla_nest_cancel(skb, attrs);
376errout:
377 return -EMSGSIZE;
378}
379
380static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
381 struct smc_link *link,
382 struct sk_buff *skb,
383 struct netlink_callback *cb)
384{
385 char smc_ibname[IB_DEVICE_NAME_MAX];
386 u8 smc_gid_target[41];
387 struct nlattr *attrs;
388 u32 link_uid = 0;
389 void *nlh;
390
391 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
392 &smc_gen_nl_family, NLM_F_MULTI,
393 SMC_NETLINK_GET_LINK_SMCR);
394 if (!nlh)
395 goto errmsg;
396
397 attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR);
398 if (!attrs)
399 goto errout;
400
401 if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id))
402 goto errattr;
403 if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state))
404 goto errattr;
405 if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT,
406 atomic_read(&link->conn_cnt)))
407 goto errattr;
408 if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport))
409 goto errattr;
410 if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx))
411 goto errattr;
412 snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname);
413 if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname))
414 goto errattr;
415 memcpy(&link_uid, link->link_uid, sizeof(link_uid));
416 if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid))
417 goto errattr;
418 memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid));
419 if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid))
420 goto errattr;
421 memset(smc_gid_target, 0, sizeof(smc_gid_target));
422 smc_gid_be16_convert(smc_gid_target, link->gid);
423 if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target))
424 goto errattr;
425 memset(smc_gid_target, 0, sizeof(smc_gid_target));
426 smc_gid_be16_convert(smc_gid_target, link->peer_gid);
427 if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target))
428 goto errattr;
429
430 nla_nest_end(skb, attrs);
431 genlmsg_end(skb, nlh);
432 return 0;
433errattr:
434 nla_nest_cancel(skb, attrs);
435errout:
436 genlmsg_cancel(skb, nlh);
437errmsg:
438 return -EMSGSIZE;
439}
440
441static int smc_nl_handle_lgr(struct smc_link_group *lgr,
442 struct sk_buff *skb,
443 struct netlink_callback *cb,
444 bool list_links)
445{
446 void *nlh;
447 int i;
448
449 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
450 &smc_gen_nl_family, NLM_F_MULTI,
451 SMC_NETLINK_GET_LGR_SMCR);
452 if (!nlh)
453 goto errmsg;
454 if (smc_nl_fill_lgr(lgr, skb, cb))
455 goto errout;
456
457 genlmsg_end(skb, nlh);
458 if (!list_links)
459 goto out;
460 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
461 if (!smc_link_usable(&lgr->lnk[i]))
462 continue;
463 if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
464 goto errout;
465 }
466out:
467 return 0;
468
469errout:
470 genlmsg_cancel(skb, nlh);
471errmsg:
472 return -EMSGSIZE;
473}
474
475static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr,
476 struct sk_buff *skb,
477 struct netlink_callback *cb,
478 bool list_links)
479{
480 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
481 struct smc_link_group *lgr;
482 int snum = cb_ctx->pos[0];
483 int num = 0;
484
485 spin_lock_bh(&smc_lgr->lock);
486 list_for_each_entry(lgr, &smc_lgr->list, list) {
487 if (num < snum)
488 goto next;
489 if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
490 goto errout;
491next:
492 num++;
493 }
494errout:
495 spin_unlock_bh(&smc_lgr->lock);
496 cb_ctx->pos[0] = num;
497}
498
499static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
500 struct sk_buff *skb,
501 struct netlink_callback *cb)
502{
503 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
504 struct smcd_dev *smcd = lgr->smcd;
505 struct nlattr *attrs;
506 void *nlh;
507
508 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
509 &smc_gen_nl_family, NLM_F_MULTI,
510 SMC_NETLINK_GET_LGR_SMCD);
511 if (!nlh)
512 goto errmsg;
513
514 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD);
515 if (!attrs)
516 goto errout;
517
518 if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
519 goto errattr;
520 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID,
521 smcd->ops->get_local_gid(smcd),
522 SMC_NLA_LGR_D_PAD))
523 goto errattr;
524 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid,
525 SMC_NLA_LGR_D_PAD))
526 goto errattr;
527 if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id))
528 goto errattr;
529 if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num))
530 goto errattr;
531 if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
532 goto errattr;
533 memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
534 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
535 if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
536 goto errattr;
537 if (lgr->smc_version > SMC_V1) {
538 struct nlattr *v2_attrs;
539
540 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON);
541 if (!v2_attrs)
542 goto errattr;
543 if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
544 goto errattr;
545 }
546 nla_nest_end(skb, attrs);
547 genlmsg_end(skb, nlh);
548 return 0;
549
550errattr:
551 nla_nest_cancel(skb, attrs);
552errout:
553 genlmsg_cancel(skb, nlh);
554errmsg:
555 return -EMSGSIZE;
556}
557
558static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev,
559 struct sk_buff *skb,
560 struct netlink_callback *cb)
561{
562 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
563 struct smc_link_group *lgr;
564 int snum = cb_ctx->pos[1];
565 int rc = 0, num = 0;
566
567 spin_lock_bh(&dev->lgr_lock);
568 list_for_each_entry(lgr, &dev->lgr_list, list) {
569 if (!lgr->is_smcd)
570 continue;
571 if (num < snum)
572 goto next;
573 rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
574 if (rc)
575 goto errout;
576next:
577 num++;
578 }
579errout:
580 spin_unlock_bh(&dev->lgr_lock);
581 cb_ctx->pos[1] = num;
582 return rc;
583}
584
585static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list,
586 struct sk_buff *skb,
587 struct netlink_callback *cb)
588{
589 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
590 struct smcd_dev *smcd_dev;
591 int snum = cb_ctx->pos[0];
592 int rc = 0, num = 0;
593
594 mutex_lock(&dev_list->mutex);
595 list_for_each_entry(smcd_dev, &dev_list->list, list) {
596 if (list_empty(&smcd_dev->lgr_list))
597 continue;
598 if (num < snum)
599 goto next;
600 rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
601 if (rc)
602 goto errout;
603next:
604 num++;
605 }
606errout:
607 mutex_unlock(&dev_list->mutex);
608 cb_ctx->pos[0] = num;
609 return rc;
610}
611
612int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
613{
614 bool list_links = false;
615
616 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
617 return skb->len;
618}
619
620int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
621{
622 bool list_links = true;
623
624 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
625 return skb->len;
626}
627
628int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
629{
630 smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
631 return skb->len;
632}
633
634void smc_lgr_cleanup_early(struct smc_link_group *lgr)
635{
636 spinlock_t *lgr_lock;
637
638 if (!lgr)
639 return;
640
641 smc_lgr_list_head(lgr, &lgr_lock);
642 spin_lock_bh(lgr_lock);
643 /* do not use this link group for new connections */
644 if (!list_empty(&lgr->list))
645 list_del_init(&lgr->list);
646 spin_unlock_bh(lgr_lock);
647 __smc_lgr_terminate(lgr, true);
648}
649
650static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
651{
652 int i;
653
654 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
655 struct smc_link *lnk = &lgr->lnk[i];
656
657 if (smc_link_sendable(lnk))
658 lnk->state = SMC_LNK_INACTIVE;
659 }
660 wake_up_all(&lgr->llc_msg_waiter);
661 wake_up_all(&lgr->llc_flow_waiter);
662}
663
664static void smc_lgr_free(struct smc_link_group *lgr);
665
666static void smc_lgr_free_work(struct work_struct *work)
667{
668 struct smc_link_group *lgr = container_of(to_delayed_work(work),
669 struct smc_link_group,
670 free_work);
671 spinlock_t *lgr_lock;
672 bool conns;
673
674 smc_lgr_list_head(lgr, &lgr_lock);
675 spin_lock_bh(lgr_lock);
676 if (lgr->freeing) {
677 spin_unlock_bh(lgr_lock);
678 return;
679 }
680 read_lock_bh(&lgr->conns_lock);
681 conns = RB_EMPTY_ROOT(&lgr->conns_all);
682 read_unlock_bh(&lgr->conns_lock);
683 if (!conns) { /* number of lgr connections is no longer zero */
684 spin_unlock_bh(lgr_lock);
685 return;
686 }
687 list_del_init(&lgr->list); /* remove from smc_lgr_list */
688 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
689 spin_unlock_bh(lgr_lock);
690 cancel_delayed_work(&lgr->free_work);
691
692 if (!lgr->is_smcd && !lgr->terminating)
693 smc_llc_send_link_delete_all(lgr, true,
694 SMC_LLC_DEL_PROG_INIT_TERM);
695 if (lgr->is_smcd && !lgr->terminating)
696 smc_ism_signal_shutdown(lgr);
697 if (!lgr->is_smcd)
698 smcr_lgr_link_deactivate_all(lgr);
699 smc_lgr_free(lgr);
700}
701
702static void smc_lgr_terminate_work(struct work_struct *work)
703{
704 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
705 terminate_work);
706
707 __smc_lgr_terminate(lgr, true);
708}
709
710/* return next unique link id for the lgr */
711static u8 smcr_next_link_id(struct smc_link_group *lgr)
712{
713 u8 link_id;
714 int i;
715
716 while (1) {
717again:
718 link_id = ++lgr->next_link_id;
719 if (!link_id) /* skip zero as link_id */
720 link_id = ++lgr->next_link_id;
721 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
722 if (smc_link_usable(&lgr->lnk[i]) &&
723 lgr->lnk[i].link_id == link_id)
724 goto again;
725 }
726 break;
727 }
728 return link_id;
729}
730
731static void smcr_copy_dev_info_to_link(struct smc_link *link)
732{
733 struct smc_ib_device *smcibdev = link->smcibdev;
734
735 snprintf(link->ibname, sizeof(link->ibname), "%s",
736 smcibdev->ibdev->name);
737 link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1];
738}
739
740int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
741 u8 link_idx, struct smc_init_info *ini)
742{
743 struct smc_ib_device *smcibdev;
744 u8 rndvec[3];
745 int rc;
746
747 if (lgr->smc_version == SMC_V2) {
748 lnk->smcibdev = ini->smcrv2.ib_dev_v2;
749 lnk->ibport = ini->smcrv2.ib_port_v2;
750 } else {
751 lnk->smcibdev = ini->ib_dev;
752 lnk->ibport = ini->ib_port;
753 }
754 get_device(&lnk->smcibdev->ibdev->dev);
755 atomic_inc(&lnk->smcibdev->lnk_cnt);
756 refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */
757 lnk->clearing = 0;
758 lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
759 lnk->link_id = smcr_next_link_id(lgr);
760 lnk->lgr = lgr;
761 smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
762 lnk->link_idx = link_idx;
763 lnk->wr_rx_id_compl = 0;
764 smc_ibdev_cnt_inc(lnk);
765 smcr_copy_dev_info_to_link(lnk);
766 atomic_set(&lnk->conn_cnt, 0);
767 smc_llc_link_set_uid(lnk);
768 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
769 if (!lnk->smcibdev->initialized) {
770 rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev);
771 if (rc)
772 goto out;
773 }
774 get_random_bytes(rndvec, sizeof(rndvec));
775 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
776 (rndvec[2] << 16);
777 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
778 ini->vlan_id, lnk->gid, &lnk->sgid_index,
779 lgr->smc_version == SMC_V2 ?
780 &ini->smcrv2 : NULL);
781 if (rc)
782 goto out;
783 rc = smc_llc_link_init(lnk);
784 if (rc)
785 goto out;
786 rc = smc_wr_alloc_link_mem(lnk);
787 if (rc)
788 goto clear_llc_lnk;
789 rc = smc_ib_create_protection_domain(lnk);
790 if (rc)
791 goto free_link_mem;
792 rc = smc_ib_create_queue_pair(lnk);
793 if (rc)
794 goto dealloc_pd;
795 rc = smc_wr_create_link(lnk);
796 if (rc)
797 goto destroy_qp;
798 lnk->state = SMC_LNK_ACTIVATING;
799 return 0;
800
801destroy_qp:
802 smc_ib_destroy_queue_pair(lnk);
803dealloc_pd:
804 smc_ib_dealloc_protection_domain(lnk);
805free_link_mem:
806 smc_wr_free_link_mem(lnk);
807clear_llc_lnk:
808 smc_llc_link_clear(lnk, false);
809out:
810 smc_ibdev_cnt_dec(lnk);
811 put_device(&lnk->smcibdev->ibdev->dev);
812 smcibdev = lnk->smcibdev;
813 memset(lnk, 0, sizeof(struct smc_link));
814 lnk->state = SMC_LNK_UNUSED;
815 if (!atomic_dec_return(&smcibdev->lnk_cnt))
816 wake_up(&smcibdev->lnks_deleted);
817 smc_lgr_put(lgr); /* lgr_hold above */
818 return rc;
819}
820
821/* create a new SMC link group */
822static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
823{
824 struct smc_link_group *lgr;
825 struct list_head *lgr_list;
826 struct smcd_dev *smcd;
827 struct smc_link *lnk;
828 spinlock_t *lgr_lock;
829 u8 link_idx;
830 int rc = 0;
831 int i;
832
833 if (ini->is_smcd && ini->vlan_id) {
834 if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
835 ini->vlan_id)) {
836 rc = SMC_CLC_DECL_ISMVLANERR;
837 goto out;
838 }
839 }
840
841 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
842 if (!lgr) {
843 rc = SMC_CLC_DECL_MEM;
844 goto ism_put_vlan;
845 }
846 lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
847 SMC_LGR_ID_SIZE, &lgr->id);
848 if (!lgr->tx_wq) {
849 rc = -ENOMEM;
850 goto free_lgr;
851 }
852 lgr->is_smcd = ini->is_smcd;
853 lgr->sync_err = 0;
854 lgr->terminating = 0;
855 lgr->freeing = 0;
856 lgr->vlan_id = ini->vlan_id;
857 refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
858 init_rwsem(&lgr->sndbufs_lock);
859 init_rwsem(&lgr->rmbs_lock);
860 rwlock_init(&lgr->conns_lock);
861 for (i = 0; i < SMC_RMBE_SIZES; i++) {
862 INIT_LIST_HEAD(&lgr->sndbufs[i]);
863 INIT_LIST_HEAD(&lgr->rmbs[i]);
864 }
865 lgr->next_link_id = 0;
866 smc_lgr_list.num += SMC_LGR_NUM_INCR;
867 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
868 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
869 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
870 lgr->conns_all = RB_ROOT;
871 if (ini->is_smcd) {
872 /* SMC-D specific settings */
873 smcd = ini->ism_dev[ini->ism_selected];
874 get_device(smcd->ops->get_dev(smcd));
875 lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
876 lgr->smcd = ini->ism_dev[ini->ism_selected];
877 lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
878 lgr_lock = &lgr->smcd->lgr_lock;
879 lgr->smc_version = ini->smcd_version;
880 lgr->peer_shutdown = 0;
881 atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
882 } else {
883 /* SMC-R specific settings */
884 struct smc_ib_device *ibdev;
885 int ibport;
886
887 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
888 lgr->smc_version = ini->smcr_version;
889 memcpy(lgr->peer_systemid, ini->peer_systemid,
890 SMC_SYSTEMID_LEN);
891 if (lgr->smc_version == SMC_V2) {
892 ibdev = ini->smcrv2.ib_dev_v2;
893 ibport = ini->smcrv2.ib_port_v2;
894 lgr->saddr = ini->smcrv2.saddr;
895 lgr->uses_gateway = ini->smcrv2.uses_gateway;
896 memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac,
897 ETH_ALEN);
898 } else {
899 ibdev = ini->ib_dev;
900 ibport = ini->ib_port;
901 }
902 memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1],
903 SMC_MAX_PNETID_LEN);
904 rc = smc_wr_alloc_lgr_mem(lgr);
905 if (rc)
906 goto free_wq;
907 smc_llc_lgr_init(lgr, smc);
908
909 link_idx = SMC_SINGLE_LINK;
910 lnk = &lgr->lnk[link_idx];
911 rc = smcr_link_init(lgr, lnk, link_idx, ini);
912 if (rc) {
913 smc_wr_free_lgr_mem(lgr);
914 goto free_wq;
915 }
916 lgr->net = smc_ib_net(lnk->smcibdev);
917 lgr_list = &smc_lgr_list.list;
918 lgr_lock = &smc_lgr_list.lock;
919 lgr->buf_type = lgr->net->smc.sysctl_smcr_buf_type;
920 atomic_inc(&lgr_cnt);
921 }
922 smc->conn.lgr = lgr;
923 spin_lock_bh(lgr_lock);
924 list_add_tail(&lgr->list, lgr_list);
925 spin_unlock_bh(lgr_lock);
926 return 0;
927
928free_wq:
929 destroy_workqueue(lgr->tx_wq);
930free_lgr:
931 kfree(lgr);
932ism_put_vlan:
933 if (ini->is_smcd && ini->vlan_id)
934 smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
935out:
936 if (rc < 0) {
937 if (rc == -ENOMEM)
938 rc = SMC_CLC_DECL_MEM;
939 else
940 rc = SMC_CLC_DECL_INTERR;
941 }
942 return rc;
943}
944
945static int smc_write_space(struct smc_connection *conn)
946{
947 int buffer_len = conn->peer_rmbe_size;
948 union smc_host_cursor prod;
949 union smc_host_cursor cons;
950 int space;
951
952 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
953 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
954 /* determine rx_buf space */
955 space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
956 return space;
957}
958
959static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
960 struct smc_wr_buf *wr_buf)
961{
962 struct smc_connection *conn = &smc->conn;
963 union smc_host_cursor cons, fin;
964 int rc = 0;
965 int diff;
966
967 smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
968 smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
969 /* set prod cursor to old state, enforce tx_rdma_writes() */
970 smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
971 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
972
973 if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
974 /* cons cursor advanced more than fin, and prod was set
975 * fin above, so now prod is smaller than cons. Fix that.
976 */
977 diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
978 smc_curs_add(conn->sndbuf_desc->len,
979 &conn->tx_curs_sent, diff);
980 smc_curs_add(conn->sndbuf_desc->len,
981 &conn->tx_curs_fin, diff);
982
983 smp_mb__before_atomic();
984 atomic_add(diff, &conn->sndbuf_space);
985 smp_mb__after_atomic();
986
987 smc_curs_add(conn->peer_rmbe_size,
988 &conn->local_tx_ctrl.prod, diff);
989 smc_curs_add(conn->peer_rmbe_size,
990 &conn->local_tx_ctrl_fin, diff);
991 }
992 /* recalculate, value is used by tx_rdma_writes() */
993 atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
994
995 if (smc->sk.sk_state != SMC_INIT &&
996 smc->sk.sk_state != SMC_CLOSED) {
997 rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
998 if (!rc) {
999 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
1000 smc->sk.sk_data_ready(&smc->sk);
1001 }
1002 } else {
1003 smc_wr_tx_put_slot(conn->lnk,
1004 (struct smc_wr_tx_pend_priv *)pend);
1005 }
1006 return rc;
1007}
1008
1009void smc_switch_link_and_count(struct smc_connection *conn,
1010 struct smc_link *to_lnk)
1011{
1012 atomic_dec(&conn->lnk->conn_cnt);
1013 /* link_hold in smc_conn_create() */
1014 smcr_link_put(conn->lnk);
1015 conn->lnk = to_lnk;
1016 atomic_inc(&conn->lnk->conn_cnt);
1017 /* link_put in smc_conn_free() */
1018 smcr_link_hold(conn->lnk);
1019}
1020
1021struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
1022 struct smc_link *from_lnk, bool is_dev_err)
1023{
1024 struct smc_link *to_lnk = NULL;
1025 struct smc_cdc_tx_pend *pend;
1026 struct smc_connection *conn;
1027 struct smc_wr_buf *wr_buf;
1028 struct smc_sock *smc;
1029 struct rb_node *node;
1030 int i, rc = 0;
1031
1032 /* link is inactive, wake up tx waiters */
1033 smc_wr_wakeup_tx_wait(from_lnk);
1034
1035 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1036 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
1037 continue;
1038 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
1039 from_lnk->ibport == lgr->lnk[i].ibport) {
1040 continue;
1041 }
1042 to_lnk = &lgr->lnk[i];
1043 break;
1044 }
1045 if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
1046 smc_lgr_terminate_sched(lgr);
1047 return NULL;
1048 }
1049again:
1050 read_lock_bh(&lgr->conns_lock);
1051 for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
1052 conn = rb_entry(node, struct smc_connection, alert_node);
1053 if (conn->lnk != from_lnk)
1054 continue;
1055 smc = container_of(conn, struct smc_sock, conn);
1056 /* conn->lnk not yet set in SMC_INIT state */
1057 if (smc->sk.sk_state == SMC_INIT)
1058 continue;
1059 if (smc->sk.sk_state == SMC_CLOSED ||
1060 smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
1061 smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
1062 smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
1063 smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
1064 smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
1065 smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
1066 smc->sk.sk_state == SMC_PEERABORTWAIT ||
1067 smc->sk.sk_state == SMC_PROCESSABORT) {
1068 spin_lock_bh(&conn->send_lock);
1069 smc_switch_link_and_count(conn, to_lnk);
1070 spin_unlock_bh(&conn->send_lock);
1071 continue;
1072 }
1073 sock_hold(&smc->sk);
1074 read_unlock_bh(&lgr->conns_lock);
1075 /* pre-fetch buffer outside of send_lock, might sleep */
1076 rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
1077 if (rc)
1078 goto err_out;
1079 /* avoid race with smcr_tx_sndbuf_nonempty() */
1080 spin_lock_bh(&conn->send_lock);
1081 smc_switch_link_and_count(conn, to_lnk);
1082 rc = smc_switch_cursor(smc, pend, wr_buf);
1083 spin_unlock_bh(&conn->send_lock);
1084 sock_put(&smc->sk);
1085 if (rc)
1086 goto err_out;
1087 goto again;
1088 }
1089 read_unlock_bh(&lgr->conns_lock);
1090 smc_wr_tx_link_put(to_lnk);
1091 return to_lnk;
1092
1093err_out:
1094 smcr_link_down_cond_sched(to_lnk);
1095 smc_wr_tx_link_put(to_lnk);
1096 return NULL;
1097}
1098
1099static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
1100 struct smc_link_group *lgr)
1101{
1102 struct rw_semaphore *lock; /* lock buffer list */
1103 int rc;
1104
1105 if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
1106 /* unregister rmb with peer */
1107 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
1108 if (!rc) {
1109 /* protect against smc_llc_cli_rkey_exchange() */
1110 down_read(&lgr->llc_conf_mutex);
1111 smc_llc_do_delete_rkey(lgr, buf_desc);
1112 buf_desc->is_conf_rkey = false;
1113 up_read(&lgr->llc_conf_mutex);
1114 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1115 }
1116 }
1117
1118 if (buf_desc->is_reg_err) {
1119 /* buf registration failed, reuse not possible */
1120 lock = is_rmb ? &lgr->rmbs_lock :
1121 &lgr->sndbufs_lock;
1122 down_write(lock);
1123 list_del(&buf_desc->list);
1124 up_write(lock);
1125
1126 smc_buf_free(lgr, is_rmb, buf_desc);
1127 } else {
1128 /* memzero_explicit provides potential memory barrier semantics */
1129 memzero_explicit(buf_desc->cpu_addr, buf_desc->len);
1130 WRITE_ONCE(buf_desc->used, 0);
1131 }
1132}
1133
1134static void smc_buf_unuse(struct smc_connection *conn,
1135 struct smc_link_group *lgr)
1136{
1137 if (conn->sndbuf_desc) {
1138 if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
1139 smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
1140 } else {
1141 memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len);
1142 WRITE_ONCE(conn->sndbuf_desc->used, 0);
1143 }
1144 }
1145 if (conn->rmb_desc) {
1146 if (!lgr->is_smcd) {
1147 smcr_buf_unuse(conn->rmb_desc, true, lgr);
1148 } else {
1149 memzero_explicit(conn->rmb_desc->cpu_addr,
1150 conn->rmb_desc->len + sizeof(struct smcd_cdc_msg));
1151 WRITE_ONCE(conn->rmb_desc->used, 0);
1152 }
1153 }
1154}
1155
1156/* remove a finished connection from its link group */
1157void smc_conn_free(struct smc_connection *conn)
1158{
1159 struct smc_link_group *lgr = conn->lgr;
1160
1161 if (!lgr || conn->freed)
1162 /* Connection has never been registered in a
1163 * link group, or has already been freed.
1164 */
1165 return;
1166
1167 conn->freed = 1;
1168 if (!smc_conn_lgr_valid(conn))
1169 /* Connection has already unregistered from
1170 * link group.
1171 */
1172 goto lgr_put;
1173
1174 if (lgr->is_smcd) {
1175 if (!list_empty(&lgr->list))
1176 smc_ism_unset_conn(conn);
1177 tasklet_kill(&conn->rx_tsklet);
1178 } else {
1179 smc_cdc_wait_pend_tx_wr(conn);
1180 if (current_work() != &conn->abort_work)
1181 cancel_work_sync(&conn->abort_work);
1182 }
1183 if (!list_empty(&lgr->list)) {
1184 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
1185 smc_lgr_unregister_conn(conn);
1186 }
1187
1188 if (!lgr->conns_num)
1189 smc_lgr_schedule_free_work(lgr);
1190lgr_put:
1191 if (!lgr->is_smcd)
1192 smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */
1193 smc_lgr_put(lgr); /* lgr_hold in smc_conn_create() */
1194}
1195
1196/* unregister a link from a buf_desc */
1197static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1198 struct smc_link *lnk)
1199{
1200 if (is_rmb || buf_desc->is_vm)
1201 buf_desc->is_reg_mr[lnk->link_idx] = false;
1202 if (!buf_desc->is_map_ib[lnk->link_idx])
1203 return;
1204
1205 if ((is_rmb || buf_desc->is_vm) &&
1206 buf_desc->mr[lnk->link_idx]) {
1207 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]);
1208 buf_desc->mr[lnk->link_idx] = NULL;
1209 }
1210 if (is_rmb)
1211 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1212 else
1213 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1214
1215 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1216 buf_desc->is_map_ib[lnk->link_idx] = false;
1217}
1218
1219/* unmap all buffers of lgr for a deleted link */
1220static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1221{
1222 struct smc_link_group *lgr = lnk->lgr;
1223 struct smc_buf_desc *buf_desc, *bf;
1224 int i;
1225
1226 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1227 down_write(&lgr->rmbs_lock);
1228 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
1229 smcr_buf_unmap_link(buf_desc, true, lnk);
1230 up_write(&lgr->rmbs_lock);
1231
1232 down_write(&lgr->sndbufs_lock);
1233 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
1234 list)
1235 smcr_buf_unmap_link(buf_desc, false, lnk);
1236 up_write(&lgr->sndbufs_lock);
1237 }
1238}
1239
1240static void smcr_rtoken_clear_link(struct smc_link *lnk)
1241{
1242 struct smc_link_group *lgr = lnk->lgr;
1243 int i;
1244
1245 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1246 lgr->rtokens[i][lnk->link_idx].rkey = 0;
1247 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1248 }
1249}
1250
1251static void __smcr_link_clear(struct smc_link *lnk)
1252{
1253 struct smc_link_group *lgr = lnk->lgr;
1254 struct smc_ib_device *smcibdev;
1255
1256 smc_wr_free_link_mem(lnk);
1257 smc_ibdev_cnt_dec(lnk);
1258 put_device(&lnk->smcibdev->ibdev->dev);
1259 smcibdev = lnk->smcibdev;
1260 memset(lnk, 0, sizeof(struct smc_link));
1261 lnk->state = SMC_LNK_UNUSED;
1262 if (!atomic_dec_return(&smcibdev->lnk_cnt))
1263 wake_up(&smcibdev->lnks_deleted);
1264 smc_lgr_put(lgr); /* lgr_hold in smcr_link_init() */
1265}
1266
1267/* must be called under lgr->llc_conf_mutex lock */
1268void smcr_link_clear(struct smc_link *lnk, bool log)
1269{
1270 if (!lnk->lgr || lnk->clearing ||
1271 lnk->state == SMC_LNK_UNUSED)
1272 return;
1273 lnk->clearing = 1;
1274 lnk->peer_qpn = 0;
1275 smc_llc_link_clear(lnk, log);
1276 smcr_buf_unmap_lgr(lnk);
1277 smcr_rtoken_clear_link(lnk);
1278 smc_ib_modify_qp_error(lnk);
1279 smc_wr_free_link(lnk);
1280 smc_ib_destroy_queue_pair(lnk);
1281 smc_ib_dealloc_protection_domain(lnk);
1282 smcr_link_put(lnk); /* theoretically last link_put */
1283}
1284
1285void smcr_link_hold(struct smc_link *lnk)
1286{
1287 refcount_inc(&lnk->refcnt);
1288}
1289
1290void smcr_link_put(struct smc_link *lnk)
1291{
1292 if (refcount_dec_and_test(&lnk->refcnt))
1293 __smcr_link_clear(lnk);
1294}
1295
1296static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
1297 struct smc_buf_desc *buf_desc)
1298{
1299 int i;
1300
1301 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1302 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1303
1304 if (!buf_desc->is_vm && buf_desc->pages)
1305 __free_pages(buf_desc->pages, buf_desc->order);
1306 else if (buf_desc->is_vm && buf_desc->cpu_addr)
1307 vfree(buf_desc->cpu_addr);
1308 kfree(buf_desc);
1309}
1310
1311static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
1312 struct smc_buf_desc *buf_desc)
1313{
1314 if (is_dmb) {
1315 /* restore original buf len */
1316 buf_desc->len += sizeof(struct smcd_cdc_msg);
1317 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
1318 } else {
1319 kfree(buf_desc->cpu_addr);
1320 }
1321 kfree(buf_desc);
1322}
1323
1324static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
1325 struct smc_buf_desc *buf_desc)
1326{
1327 if (lgr->is_smcd)
1328 smcd_buf_free(lgr, is_rmb, buf_desc);
1329 else
1330 smcr_buf_free(lgr, is_rmb, buf_desc);
1331}
1332
1333static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
1334{
1335 struct smc_buf_desc *buf_desc, *bf_desc;
1336 struct list_head *buf_list;
1337 int i;
1338
1339 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1340 if (is_rmb)
1341 buf_list = &lgr->rmbs[i];
1342 else
1343 buf_list = &lgr->sndbufs[i];
1344 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
1345 list) {
1346 list_del(&buf_desc->list);
1347 smc_buf_free(lgr, is_rmb, buf_desc);
1348 }
1349 }
1350}
1351
1352static void smc_lgr_free_bufs(struct smc_link_group *lgr)
1353{
1354 /* free send buffers */
1355 __smc_lgr_free_bufs(lgr, false);
1356 /* free rmbs */
1357 __smc_lgr_free_bufs(lgr, true);
1358}
1359
1360/* won't be freed until no one accesses to lgr anymore */
1361static void __smc_lgr_free(struct smc_link_group *lgr)
1362{
1363 smc_lgr_free_bufs(lgr);
1364 if (lgr->is_smcd) {
1365 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
1366 wake_up(&lgr->smcd->lgrs_deleted);
1367 } else {
1368 smc_wr_free_lgr_mem(lgr);
1369 if (!atomic_dec_return(&lgr_cnt))
1370 wake_up(&lgrs_deleted);
1371 }
1372 kfree(lgr);
1373}
1374
1375/* remove a link group */
1376static void smc_lgr_free(struct smc_link_group *lgr)
1377{
1378 int i;
1379
1380 if (!lgr->is_smcd) {
1381 down_write(&lgr->llc_conf_mutex);
1382 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1383 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1384 smcr_link_clear(&lgr->lnk[i], false);
1385 }
1386 up_write(&lgr->llc_conf_mutex);
1387 smc_llc_lgr_clear(lgr);
1388 }
1389
1390 destroy_workqueue(lgr->tx_wq);
1391 if (lgr->is_smcd) {
1392 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
1393 put_device(lgr->smcd->ops->get_dev(lgr->smcd));
1394 }
1395 smc_lgr_put(lgr); /* theoretically last lgr_put */
1396}
1397
1398void smc_lgr_hold(struct smc_link_group *lgr)
1399{
1400 refcount_inc(&lgr->refcnt);
1401}
1402
1403void smc_lgr_put(struct smc_link_group *lgr)
1404{
1405 if (refcount_dec_and_test(&lgr->refcnt))
1406 __smc_lgr_free(lgr);
1407}
1408
1409static void smc_sk_wake_ups(struct smc_sock *smc)
1410{
1411 smc->sk.sk_write_space(&smc->sk);
1412 smc->sk.sk_data_ready(&smc->sk);
1413 smc->sk.sk_state_change(&smc->sk);
1414}
1415
1416/* kill a connection */
1417static void smc_conn_kill(struct smc_connection *conn, bool soft)
1418{
1419 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1420
1421 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
1422 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
1423 else
1424 smc_close_abort(conn);
1425 conn->killed = 1;
1426 smc->sk.sk_err = ECONNABORTED;
1427 smc_sk_wake_ups(smc);
1428 if (conn->lgr->is_smcd) {
1429 smc_ism_unset_conn(conn);
1430 if (soft)
1431 tasklet_kill(&conn->rx_tsklet);
1432 else
1433 tasklet_unlock_wait(&conn->rx_tsklet);
1434 } else {
1435 smc_cdc_wait_pend_tx_wr(conn);
1436 }
1437 smc_lgr_unregister_conn(conn);
1438 smc_close_active_abort(smc);
1439}
1440
1441static void smc_lgr_cleanup(struct smc_link_group *lgr)
1442{
1443 if (lgr->is_smcd) {
1444 smc_ism_signal_shutdown(lgr);
1445 } else {
1446 u32 rsn = lgr->llc_termination_rsn;
1447
1448 if (!rsn)
1449 rsn = SMC_LLC_DEL_PROG_INIT_TERM;
1450 smc_llc_send_link_delete_all(lgr, false, rsn);
1451 smcr_lgr_link_deactivate_all(lgr);
1452 }
1453}
1454
1455/* terminate link group
1456 * @soft: true if link group shutdown can take its time
1457 * false if immediate link group shutdown is required
1458 */
1459static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
1460{
1461 struct smc_connection *conn;
1462 struct smc_sock *smc;
1463 struct rb_node *node;
1464
1465 if (lgr->terminating)
1466 return; /* lgr already terminating */
1467 /* cancel free_work sync, will terminate when lgr->freeing is set */
1468 cancel_delayed_work(&lgr->free_work);
1469 lgr->terminating = 1;
1470
1471 /* kill remaining link group connections */
1472 read_lock_bh(&lgr->conns_lock);
1473 node = rb_first(&lgr->conns_all);
1474 while (node) {
1475 read_unlock_bh(&lgr->conns_lock);
1476 conn = rb_entry(node, struct smc_connection, alert_node);
1477 smc = container_of(conn, struct smc_sock, conn);
1478 sock_hold(&smc->sk); /* sock_put below */
1479 lock_sock(&smc->sk);
1480 smc_conn_kill(conn, soft);
1481 release_sock(&smc->sk);
1482 sock_put(&smc->sk); /* sock_hold above */
1483 read_lock_bh(&lgr->conns_lock);
1484 node = rb_first(&lgr->conns_all);
1485 }
1486 read_unlock_bh(&lgr->conns_lock);
1487 smc_lgr_cleanup(lgr);
1488 smc_lgr_free(lgr);
1489}
1490
1491/* unlink link group and schedule termination */
1492void smc_lgr_terminate_sched(struct smc_link_group *lgr)
1493{
1494 spinlock_t *lgr_lock;
1495
1496 smc_lgr_list_head(lgr, &lgr_lock);
1497 spin_lock_bh(lgr_lock);
1498 if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
1499 spin_unlock_bh(lgr_lock);
1500 return; /* lgr already terminating */
1501 }
1502 list_del_init(&lgr->list);
1503 lgr->freeing = 1;
1504 spin_unlock_bh(lgr_lock);
1505 schedule_work(&lgr->terminate_work);
1506}
1507
1508/* Called when peer lgr shutdown (regularly or abnormally) is received */
1509void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
1510{
1511 struct smc_link_group *lgr, *l;
1512 LIST_HEAD(lgr_free_list);
1513
1514 /* run common cleanup function and build free list */
1515 spin_lock_bh(&dev->lgr_lock);
1516 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
1517 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
1518 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
1519 if (peer_gid) /* peer triggered termination */
1520 lgr->peer_shutdown = 1;
1521 list_move(&lgr->list, &lgr_free_list);
1522 lgr->freeing = 1;
1523 }
1524 }
1525 spin_unlock_bh(&dev->lgr_lock);
1526
1527 /* cancel the regular free workers and actually free lgrs */
1528 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
1529 list_del_init(&lgr->list);
1530 schedule_work(&lgr->terminate_work);
1531 }
1532}
1533
1534/* Called when an SMCD device is removed or the smc module is unloaded */
1535void smc_smcd_terminate_all(struct smcd_dev *smcd)
1536{
1537 struct smc_link_group *lgr, *lg;
1538 LIST_HEAD(lgr_free_list);
1539
1540 spin_lock_bh(&smcd->lgr_lock);
1541 list_splice_init(&smcd->lgr_list, &lgr_free_list);
1542 list_for_each_entry(lgr, &lgr_free_list, list)
1543 lgr->freeing = 1;
1544 spin_unlock_bh(&smcd->lgr_lock);
1545
1546 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1547 list_del_init(&lgr->list);
1548 __smc_lgr_terminate(lgr, false);
1549 }
1550
1551 if (atomic_read(&smcd->lgr_cnt))
1552 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1553}
1554
1555/* Called when an SMCR device is removed or the smc module is unloaded.
1556 * If smcibdev is given, all SMCR link groups using this device are terminated.
1557 * If smcibdev is NULL, all SMCR link groups are terminated.
1558 */
1559void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1560{
1561 struct smc_link_group *lgr, *lg;
1562 LIST_HEAD(lgr_free_list);
1563 int i;
1564
1565 spin_lock_bh(&smc_lgr_list.lock);
1566 if (!smcibdev) {
1567 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1568 list_for_each_entry(lgr, &lgr_free_list, list)
1569 lgr->freeing = 1;
1570 } else {
1571 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1572 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1573 if (lgr->lnk[i].smcibdev == smcibdev)
1574 smcr_link_down_cond_sched(&lgr->lnk[i]);
1575 }
1576 }
1577 }
1578 spin_unlock_bh(&smc_lgr_list.lock);
1579
1580 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1581 list_del_init(&lgr->list);
1582 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1583 __smc_lgr_terminate(lgr, false);
1584 }
1585
1586 if (smcibdev) {
1587 if (atomic_read(&smcibdev->lnk_cnt))
1588 wait_event(smcibdev->lnks_deleted,
1589 !atomic_read(&smcibdev->lnk_cnt));
1590 } else {
1591 if (atomic_read(&lgr_cnt))
1592 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1593 }
1594}
1595
1596/* set new lgr type and clear all asymmetric link tagging */
1597void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1598{
1599 char *lgr_type = "";
1600 int i;
1601
1602 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1603 if (smc_link_usable(&lgr->lnk[i]))
1604 lgr->lnk[i].link_is_asym = false;
1605 if (lgr->type == new_type)
1606 return;
1607 lgr->type = new_type;
1608
1609 switch (lgr->type) {
1610 case SMC_LGR_NONE:
1611 lgr_type = "NONE";
1612 break;
1613 case SMC_LGR_SINGLE:
1614 lgr_type = "SINGLE";
1615 break;
1616 case SMC_LGR_SYMMETRIC:
1617 lgr_type = "SYMMETRIC";
1618 break;
1619 case SMC_LGR_ASYMMETRIC_PEER:
1620 lgr_type = "ASYMMETRIC_PEER";
1621 break;
1622 case SMC_LGR_ASYMMETRIC_LOCAL:
1623 lgr_type = "ASYMMETRIC_LOCAL";
1624 break;
1625 }
1626 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu state changed: "
1627 "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1628 lgr->net->net_cookie, lgr_type, lgr->pnet_id);
1629}
1630
1631/* set new lgr type and tag a link as asymmetric */
1632void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1633 enum smc_lgr_type new_type, int asym_lnk_idx)
1634{
1635 smcr_lgr_set_type(lgr, new_type);
1636 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1637}
1638
1639/* abort connection, abort_work scheduled from tasklet context */
1640static void smc_conn_abort_work(struct work_struct *work)
1641{
1642 struct smc_connection *conn = container_of(work,
1643 struct smc_connection,
1644 abort_work);
1645 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1646
1647 lock_sock(&smc->sk);
1648 smc_conn_kill(conn, true);
1649 release_sock(&smc->sk);
1650 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1651}
1652
1653void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1654{
1655 struct smc_link_group *lgr, *n;
1656
1657 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1658 struct smc_link *link;
1659
1660 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1661 SMC_MAX_PNETID_LEN) ||
1662 lgr->type == SMC_LGR_SYMMETRIC ||
1663 lgr->type == SMC_LGR_ASYMMETRIC_PEER ||
1664 !rdma_dev_access_netns(smcibdev->ibdev, lgr->net))
1665 continue;
1666
1667 /* trigger local add link processing */
1668 link = smc_llc_usable_link(lgr);
1669 if (link)
1670 smc_llc_add_link_local(link);
1671 }
1672}
1673
1674/* link is down - switch connections to alternate link,
1675 * must be called under lgr->llc_conf_mutex lock
1676 */
1677static void smcr_link_down(struct smc_link *lnk)
1678{
1679 struct smc_link_group *lgr = lnk->lgr;
1680 struct smc_link *to_lnk;
1681 int del_link_id;
1682
1683 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1684 return;
1685
1686 to_lnk = smc_switch_conns(lgr, lnk, true);
1687 if (!to_lnk) { /* no backup link available */
1688 smcr_link_clear(lnk, true);
1689 return;
1690 }
1691 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1692 del_link_id = lnk->link_id;
1693
1694 if (lgr->role == SMC_SERV) {
1695 /* trigger local delete link processing */
1696 smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1697 } else {
1698 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1699 /* another llc task is ongoing */
1700 up_write(&lgr->llc_conf_mutex);
1701 wait_event_timeout(lgr->llc_flow_waiter,
1702 (list_empty(&lgr->list) ||
1703 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1704 SMC_LLC_WAIT_TIME);
1705 down_write(&lgr->llc_conf_mutex);
1706 }
1707 if (!list_empty(&lgr->list)) {
1708 smc_llc_send_delete_link(to_lnk, del_link_id,
1709 SMC_LLC_REQ, true,
1710 SMC_LLC_DEL_LOST_PATH);
1711 smcr_link_clear(lnk, true);
1712 }
1713 wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1714 }
1715}
1716
1717/* must be called under lgr->llc_conf_mutex lock */
1718void smcr_link_down_cond(struct smc_link *lnk)
1719{
1720 if (smc_link_downing(&lnk->state)) {
1721 trace_smcr_link_down(lnk, __builtin_return_address(0));
1722 smcr_link_down(lnk);
1723 }
1724}
1725
1726/* will get the lgr->llc_conf_mutex lock */
1727void smcr_link_down_cond_sched(struct smc_link *lnk)
1728{
1729 if (smc_link_downing(&lnk->state)) {
1730 trace_smcr_link_down(lnk, __builtin_return_address(0));
1731 schedule_work(&lnk->link_down_wrk);
1732 }
1733}
1734
1735void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1736{
1737 struct smc_link_group *lgr, *n;
1738 int i;
1739
1740 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1741 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1742 SMC_MAX_PNETID_LEN))
1743 continue; /* lgr is not affected */
1744 if (list_empty(&lgr->list))
1745 continue;
1746 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1747 struct smc_link *lnk = &lgr->lnk[i];
1748
1749 if (smc_link_usable(lnk) &&
1750 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1751 smcr_link_down_cond_sched(lnk);
1752 }
1753 }
1754}
1755
1756static void smc_link_down_work(struct work_struct *work)
1757{
1758 struct smc_link *link = container_of(work, struct smc_link,
1759 link_down_wrk);
1760 struct smc_link_group *lgr = link->lgr;
1761
1762 if (list_empty(&lgr->list))
1763 return;
1764 wake_up_all(&lgr->llc_msg_waiter);
1765 down_write(&lgr->llc_conf_mutex);
1766 smcr_link_down(link);
1767 up_write(&lgr->llc_conf_mutex);
1768}
1769
1770static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1771 struct netdev_nested_priv *priv)
1772{
1773 unsigned short *vlan_id = (unsigned short *)priv->data;
1774
1775 if (is_vlan_dev(lower_dev)) {
1776 *vlan_id = vlan_dev_vlan_id(lower_dev);
1777 return 1;
1778 }
1779
1780 return 0;
1781}
1782
1783/* Determine vlan of internal TCP socket. */
1784int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1785{
1786 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1787 struct netdev_nested_priv priv;
1788 struct net_device *ndev;
1789 int rc = 0;
1790
1791 ini->vlan_id = 0;
1792 if (!dst) {
1793 rc = -ENOTCONN;
1794 goto out;
1795 }
1796 if (!dst->dev) {
1797 rc = -ENODEV;
1798 goto out_rel;
1799 }
1800
1801 ndev = dst->dev;
1802 if (is_vlan_dev(ndev)) {
1803 ini->vlan_id = vlan_dev_vlan_id(ndev);
1804 goto out_rel;
1805 }
1806
1807 priv.data = (void *)&ini->vlan_id;
1808 rtnl_lock();
1809 netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
1810 rtnl_unlock();
1811
1812out_rel:
1813 dst_release(dst);
1814out:
1815 return rc;
1816}
1817
1818static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version,
1819 u8 peer_systemid[],
1820 u8 peer_gid[],
1821 u8 peer_mac_v1[],
1822 enum smc_lgr_role role, u32 clcqpn,
1823 struct net *net)
1824{
1825 struct smc_link *lnk;
1826 int i;
1827
1828 if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) ||
1829 lgr->role != role)
1830 return false;
1831
1832 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1833 lnk = &lgr->lnk[i];
1834
1835 if (!smc_link_active(lnk))
1836 continue;
1837 /* use verbs API to check netns, instead of lgr->net */
1838 if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net))
1839 return false;
1840 if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) &&
1841 !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) &&
1842 (smcr_version == SMC_V2 ||
1843 !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN)))
1844 return true;
1845 }
1846 return false;
1847}
1848
1849static bool smcd_lgr_match(struct smc_link_group *lgr,
1850 struct smcd_dev *smcismdev, u64 peer_gid)
1851{
1852 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
1853}
1854
1855/* create a new SMC connection (and a new link group if necessary) */
1856int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1857{
1858 struct smc_connection *conn = &smc->conn;
1859 struct net *net = sock_net(&smc->sk);
1860 struct list_head *lgr_list;
1861 struct smc_link_group *lgr;
1862 enum smc_lgr_role role;
1863 spinlock_t *lgr_lock;
1864 int rc = 0;
1865
1866 lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1867 &smc_lgr_list.list;
1868 lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1869 &smc_lgr_list.lock;
1870 ini->first_contact_local = 1;
1871 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1872 if (role == SMC_CLNT && ini->first_contact_peer)
1873 /* create new link group as well */
1874 goto create;
1875
1876 /* determine if an existing link group can be reused */
1877 spin_lock_bh(lgr_lock);
1878 list_for_each_entry(lgr, lgr_list, list) {
1879 write_lock_bh(&lgr->conns_lock);
1880 if ((ini->is_smcd ?
1881 smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1882 ini->ism_peer_gid[ini->ism_selected]) :
1883 smcr_lgr_match(lgr, ini->smcr_version,
1884 ini->peer_systemid,
1885 ini->peer_gid, ini->peer_mac, role,
1886 ini->ib_clcqpn, net)) &&
1887 !lgr->sync_err &&
1888 (ini->smcd_version == SMC_V2 ||
1889 lgr->vlan_id == ini->vlan_id) &&
1890 (role == SMC_CLNT || ini->is_smcd ||
1891 (lgr->conns_num < SMC_RMBS_PER_LGR_MAX &&
1892 !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
1893 /* link group found */
1894 ini->first_contact_local = 0;
1895 conn->lgr = lgr;
1896 rc = smc_lgr_register_conn(conn, false);
1897 write_unlock_bh(&lgr->conns_lock);
1898 if (!rc && delayed_work_pending(&lgr->free_work))
1899 cancel_delayed_work(&lgr->free_work);
1900 break;
1901 }
1902 write_unlock_bh(&lgr->conns_lock);
1903 }
1904 spin_unlock_bh(lgr_lock);
1905 if (rc)
1906 return rc;
1907
1908 if (role == SMC_CLNT && !ini->first_contact_peer &&
1909 ini->first_contact_local) {
1910 /* Server reuses a link group, but Client wants to start
1911 * a new one
1912 * send out_of_sync decline, reason synchr. error
1913 */
1914 return SMC_CLC_DECL_SYNCERR;
1915 }
1916
1917create:
1918 if (ini->first_contact_local) {
1919 rc = smc_lgr_create(smc, ini);
1920 if (rc)
1921 goto out;
1922 lgr = conn->lgr;
1923 write_lock_bh(&lgr->conns_lock);
1924 rc = smc_lgr_register_conn(conn, true);
1925 write_unlock_bh(&lgr->conns_lock);
1926 if (rc) {
1927 smc_lgr_cleanup_early(lgr);
1928 goto out;
1929 }
1930 }
1931 smc_lgr_hold(conn->lgr); /* lgr_put in smc_conn_free() */
1932 if (!conn->lgr->is_smcd)
1933 smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */
1934 conn->freed = 0;
1935 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1936 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1937 conn->urg_state = SMC_URG_READ;
1938 init_waitqueue_head(&conn->cdc_pend_tx_wq);
1939 INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1940 if (ini->is_smcd) {
1941 conn->rx_off = sizeof(struct smcd_cdc_msg);
1942 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1943 } else {
1944 conn->rx_off = 0;
1945 }
1946#ifndef KERNEL_HAS_ATOMIC64
1947 spin_lock_init(&conn->acurs_lock);
1948#endif
1949
1950out:
1951 return rc;
1952}
1953
1954#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1955#define SMCR_RMBE_SIZES 5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */
1956
1957/* convert the RMB size into the compressed notation (minimum 16K, see
1958 * SMCD/R_DMBE_SIZES.
1959 * In contrast to plain ilog2, this rounds towards the next power of 2,
1960 * so the socket application gets at least its desired sndbuf / rcvbuf size.
1961 */
1962static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
1963{
1964 const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
1965 u8 compressed;
1966
1967 if (size <= SMC_BUF_MIN_SIZE)
1968 return 0;
1969
1970 size = (size - 1) >> 14; /* convert to 16K multiple */
1971 compressed = min_t(u8, ilog2(size) + 1,
1972 is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
1973
1974 if (!is_smcd && is_rmb)
1975 /* RMBs are backed by & limited to max size of scatterlists */
1976 compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
1977
1978 return compressed;
1979}
1980
1981/* convert the RMB size from compressed notation into integer */
1982int smc_uncompress_bufsize(u8 compressed)
1983{
1984 u32 size;
1985
1986 size = 0x00000001 << (((int)compressed) + 14);
1987 return (int)size;
1988}
1989
1990/* try to reuse a sndbuf or rmb description slot for a certain
1991 * buffer size; if not available, return NULL
1992 */
1993static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
1994 struct rw_semaphore *lock,
1995 struct list_head *buf_list)
1996{
1997 struct smc_buf_desc *buf_slot;
1998
1999 down_read(lock);
2000 list_for_each_entry(buf_slot, buf_list, list) {
2001 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
2002 up_read(lock);
2003 return buf_slot;
2004 }
2005 }
2006 up_read(lock);
2007 return NULL;
2008}
2009
2010/* one of the conditions for announcing a receiver's current window size is
2011 * that it "results in a minimum increase in the window size of 10% of the
2012 * receive buffer space" [RFC7609]
2013 */
2014static inline int smc_rmb_wnd_update_limit(int rmbe_size)
2015{
2016 return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
2017}
2018
2019/* map an buf to a link */
2020static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
2021 struct smc_link *lnk)
2022{
2023 int rc, i, nents, offset, buf_size, size, access_flags;
2024 struct scatterlist *sg;
2025 void *buf;
2026
2027 if (buf_desc->is_map_ib[lnk->link_idx])
2028 return 0;
2029
2030 if (buf_desc->is_vm) {
2031 buf = buf_desc->cpu_addr;
2032 buf_size = buf_desc->len;
2033 offset = offset_in_page(buf_desc->cpu_addr);
2034 nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE;
2035 } else {
2036 nents = 1;
2037 }
2038
2039 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL);
2040 if (rc)
2041 return rc;
2042
2043 if (buf_desc->is_vm) {
2044 /* virtually contiguous buffer */
2045 for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
2046 size = min_t(int, PAGE_SIZE - offset, buf_size);
2047 sg_set_page(sg, vmalloc_to_page(buf), size, offset);
2048 buf += size / sizeof(*buf);
2049 buf_size -= size;
2050 offset = 0;
2051 }
2052 } else {
2053 /* physically contiguous buffer */
2054 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
2055 buf_desc->cpu_addr, buf_desc->len);
2056 }
2057
2058 /* map sg table to DMA address */
2059 rc = smc_ib_buf_map_sg(lnk, buf_desc,
2060 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2061 /* SMC protocol depends on mapping to one DMA address only */
2062 if (rc != nents) {
2063 rc = -EAGAIN;
2064 goto free_table;
2065 }
2066
2067 buf_desc->is_dma_need_sync |=
2068 smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;
2069
2070 if (is_rmb || buf_desc->is_vm) {
2071 /* create a new memory region for the RMB or vzalloced sndbuf */
2072 access_flags = is_rmb ?
2073 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2074 IB_ACCESS_LOCAL_WRITE;
2075
2076 rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags,
2077 buf_desc, lnk->link_idx);
2078 if (rc)
2079 goto buf_unmap;
2080 smc_ib_sync_sg_for_device(lnk, buf_desc,
2081 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2082 }
2083 buf_desc->is_map_ib[lnk->link_idx] = true;
2084 return 0;
2085
2086buf_unmap:
2087 smc_ib_buf_unmap_sg(lnk, buf_desc,
2088 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2089free_table:
2090 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
2091 return rc;
2092}
2093
2094/* register a new buf on IB device, rmb or vzalloced sndbuf
2095 * must be called under lgr->llc_conf_mutex lock
2096 */
2097int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
2098{
2099 if (list_empty(&link->lgr->list))
2100 return -ENOLINK;
2101 if (!buf_desc->is_reg_mr[link->link_idx]) {
2102 /* register memory region for new buf */
2103 if (buf_desc->is_vm)
2104 buf_desc->mr[link->link_idx]->iova =
2105 (uintptr_t)buf_desc->cpu_addr;
2106 if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) {
2107 buf_desc->is_reg_err = true;
2108 return -EFAULT;
2109 }
2110 buf_desc->is_reg_mr[link->link_idx] = true;
2111 }
2112 return 0;
2113}
2114
2115static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
2116 struct list_head *lst, bool is_rmb)
2117{
2118 struct smc_buf_desc *buf_desc, *bf;
2119 int rc = 0;
2120
2121 down_write(lock);
2122 list_for_each_entry_safe(buf_desc, bf, lst, list) {
2123 if (!buf_desc->used)
2124 continue;
2125 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
2126 if (rc)
2127 goto out;
2128 }
2129out:
2130 up_write(lock);
2131 return rc;
2132}
2133
2134/* map all used buffers of lgr for a new link */
2135int smcr_buf_map_lgr(struct smc_link *lnk)
2136{
2137 struct smc_link_group *lgr = lnk->lgr;
2138 int i, rc = 0;
2139
2140 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2141 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
2142 &lgr->rmbs[i], true);
2143 if (rc)
2144 return rc;
2145 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
2146 &lgr->sndbufs[i], false);
2147 if (rc)
2148 return rc;
2149 }
2150 return 0;
2151}
2152
2153/* register all used buffers of lgr for a new link,
2154 * must be called under lgr->llc_conf_mutex lock
2155 */
2156int smcr_buf_reg_lgr(struct smc_link *lnk)
2157{
2158 struct smc_link_group *lgr = lnk->lgr;
2159 struct smc_buf_desc *buf_desc, *bf;
2160 int i, rc = 0;
2161
2162 /* reg all RMBs for a new link */
2163 down_write(&lgr->rmbs_lock);
2164 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2165 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
2166 if (!buf_desc->used)
2167 continue;
2168 rc = smcr_link_reg_buf(lnk, buf_desc);
2169 if (rc) {
2170 up_write(&lgr->rmbs_lock);
2171 return rc;
2172 }
2173 }
2174 }
2175 up_write(&lgr->rmbs_lock);
2176
2177 if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2178 return rc;
2179
2180 /* reg all vzalloced sndbufs for a new link */
2181 down_write(&lgr->sndbufs_lock);
2182 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2183 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
2184 if (!buf_desc->used || !buf_desc->is_vm)
2185 continue;
2186 rc = smcr_link_reg_buf(lnk, buf_desc);
2187 if (rc) {
2188 up_write(&lgr->sndbufs_lock);
2189 return rc;
2190 }
2191 }
2192 }
2193 up_write(&lgr->sndbufs_lock);
2194 return rc;
2195}
2196
2197static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
2198 bool is_rmb, int bufsize)
2199{
2200 struct smc_buf_desc *buf_desc;
2201
2202 /* try to alloc a new buffer */
2203 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2204 if (!buf_desc)
2205 return ERR_PTR(-ENOMEM);
2206
2207 switch (lgr->buf_type) {
2208 case SMCR_PHYS_CONT_BUFS:
2209 case SMCR_MIXED_BUFS:
2210 buf_desc->order = get_order(bufsize);
2211 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
2212 __GFP_NOMEMALLOC | __GFP_COMP |
2213 __GFP_NORETRY | __GFP_ZERO,
2214 buf_desc->order);
2215 if (buf_desc->pages) {
2216 buf_desc->cpu_addr =
2217 (void *)page_address(buf_desc->pages);
2218 buf_desc->len = bufsize;
2219 buf_desc->is_vm = false;
2220 break;
2221 }
2222 if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2223 goto out;
2224 fallthrough; // try virtually continguous buf
2225 case SMCR_VIRT_CONT_BUFS:
2226 buf_desc->order = get_order(bufsize);
2227 buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order);
2228 if (!buf_desc->cpu_addr)
2229 goto out;
2230 buf_desc->pages = NULL;
2231 buf_desc->len = bufsize;
2232 buf_desc->is_vm = true;
2233 break;
2234 }
2235 return buf_desc;
2236
2237out:
2238 kfree(buf_desc);
2239 return ERR_PTR(-EAGAIN);
2240}
2241
2242/* map buf_desc on all usable links,
2243 * unused buffers stay mapped as long as the link is up
2244 */
2245static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
2246 struct smc_buf_desc *buf_desc, bool is_rmb)
2247{
2248 int i, rc = 0, cnt = 0;
2249
2250 /* protect against parallel link reconfiguration */
2251 down_read(&lgr->llc_conf_mutex);
2252 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2253 struct smc_link *lnk = &lgr->lnk[i];
2254
2255 if (!smc_link_usable(lnk))
2256 continue;
2257 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
2258 rc = -ENOMEM;
2259 goto out;
2260 }
2261 cnt++;
2262 }
2263out:
2264 up_read(&lgr->llc_conf_mutex);
2265 if (!rc && !cnt)
2266 rc = -EINVAL;
2267 return rc;
2268}
2269
2270static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
2271 bool is_dmb, int bufsize)
2272{
2273 struct smc_buf_desc *buf_desc;
2274 int rc;
2275
2276 /* try to alloc a new DMB */
2277 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2278 if (!buf_desc)
2279 return ERR_PTR(-ENOMEM);
2280 if (is_dmb) {
2281 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
2282 if (rc) {
2283 kfree(buf_desc);
2284 if (rc == -ENOMEM)
2285 return ERR_PTR(-EAGAIN);
2286 if (rc == -ENOSPC)
2287 return ERR_PTR(-ENOSPC);
2288 return ERR_PTR(-EIO);
2289 }
2290 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
2291 /* CDC header stored in buf. So, pretend it was smaller */
2292 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
2293 } else {
2294 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
2295 __GFP_NOWARN | __GFP_NORETRY |
2296 __GFP_NOMEMALLOC);
2297 if (!buf_desc->cpu_addr) {
2298 kfree(buf_desc);
2299 return ERR_PTR(-EAGAIN);
2300 }
2301 buf_desc->len = bufsize;
2302 }
2303 return buf_desc;
2304}
2305
2306static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
2307{
2308 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
2309 struct smc_connection *conn = &smc->conn;
2310 struct smc_link_group *lgr = conn->lgr;
2311 struct list_head *buf_list;
2312 int bufsize, bufsize_short;
2313 struct rw_semaphore *lock; /* lock buffer list */
2314 bool is_dgraded = false;
2315 int sk_buf_size;
2316
2317 if (is_rmb)
2318 /* use socket recv buffer size (w/o overhead) as start value */
2319 sk_buf_size = smc->sk.sk_rcvbuf;
2320 else
2321 /* use socket send buffer size (w/o overhead) as start value */
2322 sk_buf_size = smc->sk.sk_sndbuf;
2323
2324 for (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb);
2325 bufsize_short >= 0; bufsize_short--) {
2326 if (is_rmb) {
2327 lock = &lgr->rmbs_lock;
2328 buf_list = &lgr->rmbs[bufsize_short];
2329 } else {
2330 lock = &lgr->sndbufs_lock;
2331 buf_list = &lgr->sndbufs[bufsize_short];
2332 }
2333 bufsize = smc_uncompress_bufsize(bufsize_short);
2334
2335 /* check for reusable slot in the link group */
2336 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
2337 if (buf_desc) {
2338 buf_desc->is_dma_need_sync = 0;
2339 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2340 SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
2341 break; /* found reusable slot */
2342 }
2343
2344 if (is_smcd)
2345 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
2346 else
2347 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
2348
2349 if (PTR_ERR(buf_desc) == -ENOMEM)
2350 break;
2351 if (IS_ERR(buf_desc)) {
2352 if (!is_dgraded) {
2353 is_dgraded = true;
2354 SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
2355 }
2356 continue;
2357 }
2358
2359 SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
2360 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2361 buf_desc->used = 1;
2362 down_write(lock);
2363 list_add(&buf_desc->list, buf_list);
2364 up_write(lock);
2365 break; /* found */
2366 }
2367
2368 if (IS_ERR(buf_desc))
2369 return PTR_ERR(buf_desc);
2370
2371 if (!is_smcd) {
2372 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
2373 smcr_buf_unuse(buf_desc, is_rmb, lgr);
2374 return -ENOMEM;
2375 }
2376 }
2377
2378 if (is_rmb) {
2379 conn->rmb_desc = buf_desc;
2380 conn->rmbe_size_short = bufsize_short;
2381 smc->sk.sk_rcvbuf = bufsize;
2382 atomic_set(&conn->bytes_to_rcv, 0);
2383 conn->rmbe_update_limit =
2384 smc_rmb_wnd_update_limit(buf_desc->len);
2385 if (is_smcd)
2386 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
2387 } else {
2388 conn->sndbuf_desc = buf_desc;
2389 smc->sk.sk_sndbuf = bufsize;
2390 atomic_set(&conn->sndbuf_space, bufsize);
2391 }
2392 return 0;
2393}
2394
2395void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
2396{
2397 if (!conn->sndbuf_desc->is_dma_need_sync)
2398 return;
2399 if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd ||
2400 !smc_link_active(conn->lnk))
2401 return;
2402 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2403}
2404
2405void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
2406{
2407 int i;
2408
2409 if (!conn->rmb_desc->is_dma_need_sync)
2410 return;
2411 if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd)
2412 return;
2413 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2414 if (!smc_link_active(&conn->lgr->lnk[i]))
2415 continue;
2416 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2417 DMA_FROM_DEVICE);
2418 }
2419}
2420
2421/* create the send and receive buffer for an SMC socket;
2422 * receive buffers are called RMBs;
2423 * (even though the SMC protocol allows more than one RMB-element per RMB,
2424 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
2425 * extra RMB for every connection in a link group
2426 */
2427int smc_buf_create(struct smc_sock *smc, bool is_smcd)
2428{
2429 int rc;
2430
2431 /* create send buffer */
2432 rc = __smc_buf_create(smc, is_smcd, false);
2433 if (rc)
2434 return rc;
2435 /* create rmb */
2436 rc = __smc_buf_create(smc, is_smcd, true);
2437 if (rc) {
2438 down_write(&smc->conn.lgr->sndbufs_lock);
2439 list_del(&smc->conn.sndbuf_desc->list);
2440 up_write(&smc->conn.lgr->sndbufs_lock);
2441 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
2442 smc->conn.sndbuf_desc = NULL;
2443 }
2444 return rc;
2445}
2446
2447static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
2448{
2449 int i;
2450
2451 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
2452 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
2453 return i;
2454 }
2455 return -ENOSPC;
2456}
2457
2458static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
2459 u32 rkey)
2460{
2461 int i;
2462
2463 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2464 if (test_bit(i, lgr->rtokens_used_mask) &&
2465 lgr->rtokens[i][lnk_idx].rkey == rkey)
2466 return i;
2467 }
2468 return -ENOENT;
2469}
2470
2471/* set rtoken for a new link to an existing rmb */
2472void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
2473 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
2474{
2475 int rtok_idx;
2476
2477 rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
2478 if (rtok_idx == -ENOENT)
2479 return;
2480 lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
2481 lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
2482}
2483
2484/* set rtoken for a new link whose link_id is given */
2485void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
2486 __be64 nw_vaddr, __be32 nw_rkey)
2487{
2488 u64 dma_addr = be64_to_cpu(nw_vaddr);
2489 u32 rkey = ntohl(nw_rkey);
2490 bool found = false;
2491 int link_idx;
2492
2493 for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
2494 if (lgr->lnk[link_idx].link_id == link_id) {
2495 found = true;
2496 break;
2497 }
2498 }
2499 if (!found)
2500 return;
2501 lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
2502 lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
2503}
2504
2505/* add a new rtoken from peer */
2506int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2507{
2508 struct smc_link_group *lgr = smc_get_lgr(lnk);
2509 u64 dma_addr = be64_to_cpu(nw_vaddr);
2510 u32 rkey = ntohl(nw_rkey);
2511 int i;
2512
2513 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2514 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2515 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2516 test_bit(i, lgr->rtokens_used_mask)) {
2517 /* already in list */
2518 return i;
2519 }
2520 }
2521 i = smc_rmb_reserve_rtoken_idx(lgr);
2522 if (i < 0)
2523 return i;
2524 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2525 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2526 return i;
2527}
2528
2529/* delete an rtoken from all links */
2530int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2531{
2532 struct smc_link_group *lgr = smc_get_lgr(lnk);
2533 u32 rkey = ntohl(nw_rkey);
2534 int i, j;
2535
2536 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2537 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2538 test_bit(i, lgr->rtokens_used_mask)) {
2539 for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
2540 lgr->rtokens[i][j].rkey = 0;
2541 lgr->rtokens[i][j].dma_addr = 0;
2542 }
2543 clear_bit(i, lgr->rtokens_used_mask);
2544 return 0;
2545 }
2546 }
2547 return -ENOENT;
2548}
2549
2550/* save rkey and dma_addr received from peer during clc handshake */
2551int smc_rmb_rtoken_handling(struct smc_connection *conn,
2552 struct smc_link *lnk,
2553 struct smc_clc_msg_accept_confirm *clc)
2554{
2555 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
2556 clc->r0.rmb_rkey);
2557 if (conn->rtoken_idx < 0)
2558 return conn->rtoken_idx;
2559 return 0;
2560}
2561
2562static void smc_core_going_away(void)
2563{
2564 struct smc_ib_device *smcibdev;
2565 struct smcd_dev *smcd;
2566
2567 mutex_lock(&smc_ib_devices.mutex);
2568 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
2569 int i;
2570
2571 for (i = 0; i < SMC_MAX_PORTS; i++)
2572 set_bit(i, smcibdev->ports_going_away);
2573 }
2574 mutex_unlock(&smc_ib_devices.mutex);
2575
2576 mutex_lock(&smcd_dev_list.mutex);
2577 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2578 smcd->going_away = 1;
2579 }
2580 mutex_unlock(&smcd_dev_list.mutex);
2581}
2582
2583/* Clean up all SMC link groups */
2584static void smc_lgrs_shutdown(void)
2585{
2586 struct smcd_dev *smcd;
2587
2588 smc_core_going_away();
2589
2590 smc_smcr_terminate_all(NULL);
2591
2592 mutex_lock(&smcd_dev_list.mutex);
2593 list_for_each_entry(smcd, &smcd_dev_list.list, list)
2594 smc_smcd_terminate_all(smcd);
2595 mutex_unlock(&smcd_dev_list.mutex);
2596}
2597
2598static int smc_core_reboot_event(struct notifier_block *this,
2599 unsigned long event, void *ptr)
2600{
2601 smc_lgrs_shutdown();
2602 smc_ib_unregister_client();
2603 smc_ism_exit();
2604 return 0;
2605}
2606
2607static struct notifier_block smc_reboot_notifier = {
2608 .notifier_call = smc_core_reboot_event,
2609};
2610
2611int __init smc_core_init(void)
2612{
2613 return register_reboot_notifier(&smc_reboot_notifier);
2614}
2615
2616/* Called (from smc_exit) when module is removed */
2617void smc_core_exit(void)
2618{
2619 unregister_reboot_notifier(&smc_reboot_notifier);
2620 smc_lgrs_shutdown();
2621}