Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.10-rc6 4482 lines 116 kB view raw
1/* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <linux/completion.h> 37#include <linux/in.h> 38#include <linux/in6.h> 39#include <linux/mutex.h> 40#include <linux/random.h> 41#include <linux/igmp.h> 42#include <linux/idr.h> 43#include <linux/inetdevice.h> 44#include <linux/slab.h> 45#include <linux/module.h> 46#include <net/route.h> 47 48#include <net/net_namespace.h> 49#include <net/netns/generic.h> 50#include <net/tcp.h> 51#include <net/ipv6.h> 52#include <net/ip_fib.h> 53#include <net/ip6_route.h> 54 55#include <rdma/rdma_cm.h> 56#include <rdma/rdma_cm_ib.h> 57#include <rdma/rdma_netlink.h> 58#include <rdma/ib.h> 59#include <rdma/ib_cache.h> 60#include <rdma/ib_cm.h> 61#include <rdma/ib_sa.h> 62#include <rdma/iw_cm.h> 63 64#include "core_priv.h" 65 66MODULE_AUTHOR("Sean Hefty"); 67MODULE_DESCRIPTION("Generic RDMA CM Agent"); 68MODULE_LICENSE("Dual BSD/GPL"); 69 70#define CMA_CM_RESPONSE_TIMEOUT 20 71#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 72#define CMA_MAX_CM_RETRIES 15 73#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 74#define CMA_IBOE_PACKET_LIFETIME 18 75 76static const char * const cma_events[] = { 77 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 78 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 79 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 80 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 81 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 82 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 83 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 84 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 85 [RDMA_CM_EVENT_REJECTED] = "rejected", 86 [RDMA_CM_EVENT_ESTABLISHED] = "established", 87 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 88 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 89 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 90 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 91 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 92 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 93}; 94 95const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 96{ 97 size_t index = event; 98 99 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 100 cma_events[index] : "unrecognized event"; 101} 102EXPORT_SYMBOL(rdma_event_msg); 103 104const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 105 int reason) 106{ 107 if (rdma_ib_or_roce(id->device, id->port_num)) 108 return ibcm_reject_msg(reason); 109 110 if (rdma_protocol_iwarp(id->device, id->port_num)) 111 return iwcm_reject_msg(reason); 112 113 WARN_ON_ONCE(1); 114 return "unrecognized transport"; 115} 116EXPORT_SYMBOL(rdma_reject_msg); 117 118bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) 119{ 120 if (rdma_ib_or_roce(id->device, id->port_num)) 121 return reason == IB_CM_REJ_CONSUMER_DEFINED; 122 123 if (rdma_protocol_iwarp(id->device, id->port_num)) 124 return reason == -ECONNREFUSED; 125 126 WARN_ON_ONCE(1); 127 return false; 128} 129EXPORT_SYMBOL(rdma_is_consumer_reject); 130 131const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 132 struct rdma_cm_event *ev, u8 *data_len) 133{ 134 const void *p; 135 136 if (rdma_is_consumer_reject(id, ev->status)) { 137 *data_len = ev->param.conn.private_data_len; 138 p = ev->param.conn.private_data; 139 } else { 140 *data_len = 0; 141 p = NULL; 142 } 143 return p; 144} 145EXPORT_SYMBOL(rdma_consumer_reject_data); 146 147static void cma_add_one(struct ib_device *device); 148static void cma_remove_one(struct ib_device *device, void *client_data); 149 150static struct ib_client cma_client = { 151 .name = "cma", 152 .add = cma_add_one, 153 .remove = cma_remove_one 154}; 155 156static struct ib_sa_client sa_client; 157static struct rdma_addr_client addr_client; 158static LIST_HEAD(dev_list); 159static LIST_HEAD(listen_any_list); 160static DEFINE_MUTEX(lock); 161static struct workqueue_struct *cma_wq; 162static unsigned int cma_pernet_id; 163 164struct cma_pernet { 165 struct idr tcp_ps; 166 struct idr udp_ps; 167 struct idr ipoib_ps; 168 struct idr ib_ps; 169}; 170 171static struct cma_pernet *cma_pernet(struct net *net) 172{ 173 return net_generic(net, cma_pernet_id); 174} 175 176static struct idr *cma_pernet_idr(struct net *net, enum rdma_port_space ps) 177{ 178 struct cma_pernet *pernet = cma_pernet(net); 179 180 switch (ps) { 181 case RDMA_PS_TCP: 182 return &pernet->tcp_ps; 183 case RDMA_PS_UDP: 184 return &pernet->udp_ps; 185 case RDMA_PS_IPOIB: 186 return &pernet->ipoib_ps; 187 case RDMA_PS_IB: 188 return &pernet->ib_ps; 189 default: 190 return NULL; 191 } 192} 193 194struct cma_device { 195 struct list_head list; 196 struct ib_device *device; 197 struct completion comp; 198 atomic_t refcount; 199 struct list_head id_list; 200 enum ib_gid_type *default_gid_type; 201}; 202 203struct rdma_bind_list { 204 enum rdma_port_space ps; 205 struct hlist_head owners; 206 unsigned short port; 207}; 208 209struct class_port_info_context { 210 struct ib_class_port_info *class_port_info; 211 struct ib_device *device; 212 struct completion done; 213 struct ib_sa_query *sa_query; 214 u8 port_num; 215}; 216 217static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, 218 struct rdma_bind_list *bind_list, int snum) 219{ 220 struct idr *idr = cma_pernet_idr(net, ps); 221 222 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); 223} 224 225static struct rdma_bind_list *cma_ps_find(struct net *net, 226 enum rdma_port_space ps, int snum) 227{ 228 struct idr *idr = cma_pernet_idr(net, ps); 229 230 return idr_find(idr, snum); 231} 232 233static void cma_ps_remove(struct net *net, enum rdma_port_space ps, int snum) 234{ 235 struct idr *idr = cma_pernet_idr(net, ps); 236 237 idr_remove(idr, snum); 238} 239 240enum { 241 CMA_OPTION_AFONLY, 242}; 243 244void cma_ref_dev(struct cma_device *cma_dev) 245{ 246 atomic_inc(&cma_dev->refcount); 247} 248 249struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 250 void *cookie) 251{ 252 struct cma_device *cma_dev; 253 struct cma_device *found_cma_dev = NULL; 254 255 mutex_lock(&lock); 256 257 list_for_each_entry(cma_dev, &dev_list, list) 258 if (filter(cma_dev->device, cookie)) { 259 found_cma_dev = cma_dev; 260 break; 261 } 262 263 if (found_cma_dev) 264 cma_ref_dev(found_cma_dev); 265 mutex_unlock(&lock); 266 return found_cma_dev; 267} 268 269int cma_get_default_gid_type(struct cma_device *cma_dev, 270 unsigned int port) 271{ 272 if (port < rdma_start_port(cma_dev->device) || 273 port > rdma_end_port(cma_dev->device)) 274 return -EINVAL; 275 276 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 277} 278 279int cma_set_default_gid_type(struct cma_device *cma_dev, 280 unsigned int port, 281 enum ib_gid_type default_gid_type) 282{ 283 unsigned long supported_gids; 284 285 if (port < rdma_start_port(cma_dev->device) || 286 port > rdma_end_port(cma_dev->device)) 287 return -EINVAL; 288 289 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 290 291 if (!(supported_gids & 1 << default_gid_type)) 292 return -EINVAL; 293 294 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 295 default_gid_type; 296 297 return 0; 298} 299 300struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 301{ 302 return cma_dev->device; 303} 304 305/* 306 * Device removal can occur at anytime, so we need extra handling to 307 * serialize notifying the user of device removal with other callbacks. 308 * We do this by disabling removal notification while a callback is in process, 309 * and reporting it after the callback completes. 310 */ 311struct rdma_id_private { 312 struct rdma_cm_id id; 313 314 struct rdma_bind_list *bind_list; 315 struct hlist_node node; 316 struct list_head list; /* listen_any_list or cma_device.list */ 317 struct list_head listen_list; /* per device listens */ 318 struct cma_device *cma_dev; 319 struct list_head mc_list; 320 321 int internal_id; 322 enum rdma_cm_state state; 323 spinlock_t lock; 324 struct mutex qp_mutex; 325 326 struct completion comp; 327 atomic_t refcount; 328 struct mutex handler_mutex; 329 330 int backlog; 331 int timeout_ms; 332 struct ib_sa_query *query; 333 int query_id; 334 union { 335 struct ib_cm_id *ib; 336 struct iw_cm_id *iw; 337 } cm_id; 338 339 u32 seq_num; 340 u32 qkey; 341 u32 qp_num; 342 pid_t owner; 343 u32 options; 344 u8 srq; 345 u8 tos; 346 u8 reuseaddr; 347 u8 afonly; 348 enum ib_gid_type gid_type; 349}; 350 351struct cma_multicast { 352 struct rdma_id_private *id_priv; 353 union { 354 struct ib_sa_multicast *ib; 355 } multicast; 356 struct list_head list; 357 void *context; 358 struct sockaddr_storage addr; 359 struct kref mcref; 360 bool igmp_joined; 361 u8 join_state; 362}; 363 364struct cma_work { 365 struct work_struct work; 366 struct rdma_id_private *id; 367 enum rdma_cm_state old_state; 368 enum rdma_cm_state new_state; 369 struct rdma_cm_event event; 370}; 371 372struct cma_ndev_work { 373 struct work_struct work; 374 struct rdma_id_private *id; 375 struct rdma_cm_event event; 376}; 377 378struct iboe_mcast_work { 379 struct work_struct work; 380 struct rdma_id_private *id; 381 struct cma_multicast *mc; 382}; 383 384union cma_ip_addr { 385 struct in6_addr ip6; 386 struct { 387 __be32 pad[3]; 388 __be32 addr; 389 } ip4; 390}; 391 392struct cma_hdr { 393 u8 cma_version; 394 u8 ip_version; /* IP version: 7:4 */ 395 __be16 port; 396 union cma_ip_addr src_addr; 397 union cma_ip_addr dst_addr; 398}; 399 400#define CMA_VERSION 0x00 401 402struct cma_req_info { 403 struct ib_device *device; 404 int port; 405 union ib_gid local_gid; 406 __be64 service_id; 407 u16 pkey; 408 bool has_gid:1; 409}; 410 411static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 412{ 413 unsigned long flags; 414 int ret; 415 416 spin_lock_irqsave(&id_priv->lock, flags); 417 ret = (id_priv->state == comp); 418 spin_unlock_irqrestore(&id_priv->lock, flags); 419 return ret; 420} 421 422static int cma_comp_exch(struct rdma_id_private *id_priv, 423 enum rdma_cm_state comp, enum rdma_cm_state exch) 424{ 425 unsigned long flags; 426 int ret; 427 428 spin_lock_irqsave(&id_priv->lock, flags); 429 if ((ret = (id_priv->state == comp))) 430 id_priv->state = exch; 431 spin_unlock_irqrestore(&id_priv->lock, flags); 432 return ret; 433} 434 435static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 436 enum rdma_cm_state exch) 437{ 438 unsigned long flags; 439 enum rdma_cm_state old; 440 441 spin_lock_irqsave(&id_priv->lock, flags); 442 old = id_priv->state; 443 id_priv->state = exch; 444 spin_unlock_irqrestore(&id_priv->lock, flags); 445 return old; 446} 447 448static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 449{ 450 return hdr->ip_version >> 4; 451} 452 453static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 454{ 455 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 456} 457 458static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 459{ 460 struct in_device *in_dev = NULL; 461 462 if (ndev) { 463 rtnl_lock(); 464 in_dev = __in_dev_get_rtnl(ndev); 465 if (in_dev) { 466 if (join) 467 ip_mc_inc_group(in_dev, 468 *(__be32 *)(mgid->raw + 12)); 469 else 470 ip_mc_dec_group(in_dev, 471 *(__be32 *)(mgid->raw + 12)); 472 } 473 rtnl_unlock(); 474 } 475 return (in_dev) ? 0 : -ENODEV; 476} 477 478static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 479 struct cma_device *cma_dev) 480{ 481 cma_ref_dev(cma_dev); 482 id_priv->cma_dev = cma_dev; 483 id_priv->gid_type = 0; 484 id_priv->id.device = cma_dev->device; 485 id_priv->id.route.addr.dev_addr.transport = 486 rdma_node_get_transport(cma_dev->device->node_type); 487 list_add_tail(&id_priv->list, &cma_dev->id_list); 488} 489 490static void cma_attach_to_dev(struct rdma_id_private *id_priv, 491 struct cma_device *cma_dev) 492{ 493 _cma_attach_to_dev(id_priv, cma_dev); 494 id_priv->gid_type = 495 cma_dev->default_gid_type[id_priv->id.port_num - 496 rdma_start_port(cma_dev->device)]; 497} 498 499void cma_deref_dev(struct cma_device *cma_dev) 500{ 501 if (atomic_dec_and_test(&cma_dev->refcount)) 502 complete(&cma_dev->comp); 503} 504 505static inline void release_mc(struct kref *kref) 506{ 507 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 508 509 kfree(mc->multicast.ib); 510 kfree(mc); 511} 512 513static void cma_release_dev(struct rdma_id_private *id_priv) 514{ 515 mutex_lock(&lock); 516 list_del(&id_priv->list); 517 cma_deref_dev(id_priv->cma_dev); 518 id_priv->cma_dev = NULL; 519 mutex_unlock(&lock); 520} 521 522static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 523{ 524 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; 525} 526 527static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 528{ 529 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 530} 531 532static inline unsigned short cma_family(struct rdma_id_private *id_priv) 533{ 534 return id_priv->id.route.addr.src_addr.ss_family; 535} 536 537static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 538{ 539 struct ib_sa_mcmember_rec rec; 540 int ret = 0; 541 542 if (id_priv->qkey) { 543 if (qkey && id_priv->qkey != qkey) 544 return -EINVAL; 545 return 0; 546 } 547 548 if (qkey) { 549 id_priv->qkey = qkey; 550 return 0; 551 } 552 553 switch (id_priv->id.ps) { 554 case RDMA_PS_UDP: 555 case RDMA_PS_IB: 556 id_priv->qkey = RDMA_UDP_QKEY; 557 break; 558 case RDMA_PS_IPOIB: 559 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 560 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 561 id_priv->id.port_num, &rec.mgid, 562 &rec); 563 if (!ret) 564 id_priv->qkey = be32_to_cpu(rec.qkey); 565 break; 566 default: 567 break; 568 } 569 return ret; 570} 571 572static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 573{ 574 dev_addr->dev_type = ARPHRD_INFINIBAND; 575 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 576 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 577} 578 579static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 580{ 581 int ret; 582 583 if (addr->sa_family != AF_IB) { 584 ret = rdma_translate_ip(addr, dev_addr, NULL); 585 } else { 586 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 587 ret = 0; 588 } 589 590 return ret; 591} 592 593static inline int cma_validate_port(struct ib_device *device, u8 port, 594 enum ib_gid_type gid_type, 595 union ib_gid *gid, int dev_type, 596 int bound_if_index) 597{ 598 int ret = -ENODEV; 599 struct net_device *ndev = NULL; 600 601 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 602 return ret; 603 604 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 605 return ret; 606 607 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 608 ndev = dev_get_by_index(&init_net, bound_if_index); 609 if (ndev && ndev->flags & IFF_LOOPBACK) { 610 pr_info("detected loopback device\n"); 611 dev_put(ndev); 612 613 if (!device->get_netdev) 614 return -EOPNOTSUPP; 615 616 ndev = device->get_netdev(device, port); 617 if (!ndev) 618 return -ENODEV; 619 } 620 } else { 621 gid_type = IB_GID_TYPE_IB; 622 } 623 624 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 625 ndev, NULL); 626 627 if (ndev) 628 dev_put(ndev); 629 630 return ret; 631} 632 633static int cma_acquire_dev(struct rdma_id_private *id_priv, 634 struct rdma_id_private *listen_id_priv) 635{ 636 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 637 struct cma_device *cma_dev; 638 union ib_gid gid, iboe_gid, *gidp; 639 int ret = -ENODEV; 640 u8 port; 641 642 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 643 id_priv->id.ps == RDMA_PS_IPOIB) 644 return -EINVAL; 645 646 mutex_lock(&lock); 647 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 648 &iboe_gid); 649 650 memcpy(&gid, dev_addr->src_dev_addr + 651 rdma_addr_gid_offset(dev_addr), sizeof gid); 652 653 if (listen_id_priv) { 654 cma_dev = listen_id_priv->cma_dev; 655 port = listen_id_priv->id.port_num; 656 gidp = rdma_protocol_roce(cma_dev->device, port) ? 657 &iboe_gid : &gid; 658 659 ret = cma_validate_port(cma_dev->device, port, 660 rdma_protocol_ib(cma_dev->device, port) ? 661 IB_GID_TYPE_IB : 662 listen_id_priv->gid_type, gidp, 663 dev_addr->dev_type, 664 dev_addr->bound_dev_if); 665 if (!ret) { 666 id_priv->id.port_num = port; 667 goto out; 668 } 669 } 670 671 list_for_each_entry(cma_dev, &dev_list, list) { 672 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 673 if (listen_id_priv && 674 listen_id_priv->cma_dev == cma_dev && 675 listen_id_priv->id.port_num == port) 676 continue; 677 678 gidp = rdma_protocol_roce(cma_dev->device, port) ? 679 &iboe_gid : &gid; 680 681 ret = cma_validate_port(cma_dev->device, port, 682 rdma_protocol_ib(cma_dev->device, port) ? 683 IB_GID_TYPE_IB : 684 cma_dev->default_gid_type[port - 1], 685 gidp, dev_addr->dev_type, 686 dev_addr->bound_dev_if); 687 if (!ret) { 688 id_priv->id.port_num = port; 689 goto out; 690 } 691 } 692 } 693 694out: 695 if (!ret) 696 cma_attach_to_dev(id_priv, cma_dev); 697 698 mutex_unlock(&lock); 699 return ret; 700} 701 702/* 703 * Select the source IB device and address to reach the destination IB address. 704 */ 705static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 706{ 707 struct cma_device *cma_dev, *cur_dev; 708 struct sockaddr_ib *addr; 709 union ib_gid gid, sgid, *dgid; 710 u16 pkey, index; 711 u8 p; 712 int i; 713 714 cma_dev = NULL; 715 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 716 dgid = (union ib_gid *) &addr->sib_addr; 717 pkey = ntohs(addr->sib_pkey); 718 719 list_for_each_entry(cur_dev, &dev_list, list) { 720 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 721 if (!rdma_cap_af_ib(cur_dev->device, p)) 722 continue; 723 724 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 725 continue; 726 727 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, 728 &gid, NULL); 729 i++) { 730 if (!memcmp(&gid, dgid, sizeof(gid))) { 731 cma_dev = cur_dev; 732 sgid = gid; 733 id_priv->id.port_num = p; 734 goto found; 735 } 736 737 if (!cma_dev && (gid.global.subnet_prefix == 738 dgid->global.subnet_prefix)) { 739 cma_dev = cur_dev; 740 sgid = gid; 741 id_priv->id.port_num = p; 742 } 743 } 744 } 745 } 746 747 if (!cma_dev) 748 return -ENODEV; 749 750found: 751 cma_attach_to_dev(id_priv, cma_dev); 752 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 753 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 754 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 755 return 0; 756} 757 758static void cma_deref_id(struct rdma_id_private *id_priv) 759{ 760 if (atomic_dec_and_test(&id_priv->refcount)) 761 complete(&id_priv->comp); 762} 763 764struct rdma_cm_id *rdma_create_id(struct net *net, 765 rdma_cm_event_handler event_handler, 766 void *context, enum rdma_port_space ps, 767 enum ib_qp_type qp_type) 768{ 769 struct rdma_id_private *id_priv; 770 771 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 772 if (!id_priv) 773 return ERR_PTR(-ENOMEM); 774 775 id_priv->owner = task_pid_nr(current); 776 id_priv->state = RDMA_CM_IDLE; 777 id_priv->id.context = context; 778 id_priv->id.event_handler = event_handler; 779 id_priv->id.ps = ps; 780 id_priv->id.qp_type = qp_type; 781 spin_lock_init(&id_priv->lock); 782 mutex_init(&id_priv->qp_mutex); 783 init_completion(&id_priv->comp); 784 atomic_set(&id_priv->refcount, 1); 785 mutex_init(&id_priv->handler_mutex); 786 INIT_LIST_HEAD(&id_priv->listen_list); 787 INIT_LIST_HEAD(&id_priv->mc_list); 788 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 789 id_priv->id.route.addr.dev_addr.net = get_net(net); 790 791 return &id_priv->id; 792} 793EXPORT_SYMBOL(rdma_create_id); 794 795static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 796{ 797 struct ib_qp_attr qp_attr; 798 int qp_attr_mask, ret; 799 800 qp_attr.qp_state = IB_QPS_INIT; 801 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 802 if (ret) 803 return ret; 804 805 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 806 if (ret) 807 return ret; 808 809 qp_attr.qp_state = IB_QPS_RTR; 810 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 811 if (ret) 812 return ret; 813 814 qp_attr.qp_state = IB_QPS_RTS; 815 qp_attr.sq_psn = 0; 816 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 817 818 return ret; 819} 820 821static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 822{ 823 struct ib_qp_attr qp_attr; 824 int qp_attr_mask, ret; 825 826 qp_attr.qp_state = IB_QPS_INIT; 827 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 828 if (ret) 829 return ret; 830 831 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 832} 833 834int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 835 struct ib_qp_init_attr *qp_init_attr) 836{ 837 struct rdma_id_private *id_priv; 838 struct ib_qp *qp; 839 int ret; 840 841 id_priv = container_of(id, struct rdma_id_private, id); 842 if (id->device != pd->device) 843 return -EINVAL; 844 845 qp_init_attr->port_num = id->port_num; 846 qp = ib_create_qp(pd, qp_init_attr); 847 if (IS_ERR(qp)) 848 return PTR_ERR(qp); 849 850 if (id->qp_type == IB_QPT_UD) 851 ret = cma_init_ud_qp(id_priv, qp); 852 else 853 ret = cma_init_conn_qp(id_priv, qp); 854 if (ret) 855 goto err; 856 857 id->qp = qp; 858 id_priv->qp_num = qp->qp_num; 859 id_priv->srq = (qp->srq != NULL); 860 return 0; 861err: 862 ib_destroy_qp(qp); 863 return ret; 864} 865EXPORT_SYMBOL(rdma_create_qp); 866 867void rdma_destroy_qp(struct rdma_cm_id *id) 868{ 869 struct rdma_id_private *id_priv; 870 871 id_priv = container_of(id, struct rdma_id_private, id); 872 mutex_lock(&id_priv->qp_mutex); 873 ib_destroy_qp(id_priv->id.qp); 874 id_priv->id.qp = NULL; 875 mutex_unlock(&id_priv->qp_mutex); 876} 877EXPORT_SYMBOL(rdma_destroy_qp); 878 879static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 880 struct rdma_conn_param *conn_param) 881{ 882 struct ib_qp_attr qp_attr; 883 int qp_attr_mask, ret; 884 union ib_gid sgid; 885 886 mutex_lock(&id_priv->qp_mutex); 887 if (!id_priv->id.qp) { 888 ret = 0; 889 goto out; 890 } 891 892 /* Need to update QP attributes from default values. */ 893 qp_attr.qp_state = IB_QPS_INIT; 894 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 895 if (ret) 896 goto out; 897 898 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 899 if (ret) 900 goto out; 901 902 qp_attr.qp_state = IB_QPS_RTR; 903 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 904 if (ret) 905 goto out; 906 907 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, 908 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); 909 if (ret) 910 goto out; 911 912 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 913 914 if (conn_param) 915 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 916 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 917out: 918 mutex_unlock(&id_priv->qp_mutex); 919 return ret; 920} 921 922static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 923 struct rdma_conn_param *conn_param) 924{ 925 struct ib_qp_attr qp_attr; 926 int qp_attr_mask, ret; 927 928 mutex_lock(&id_priv->qp_mutex); 929 if (!id_priv->id.qp) { 930 ret = 0; 931 goto out; 932 } 933 934 qp_attr.qp_state = IB_QPS_RTS; 935 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 936 if (ret) 937 goto out; 938 939 if (conn_param) 940 qp_attr.max_rd_atomic = conn_param->initiator_depth; 941 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 942out: 943 mutex_unlock(&id_priv->qp_mutex); 944 return ret; 945} 946 947static int cma_modify_qp_err(struct rdma_id_private *id_priv) 948{ 949 struct ib_qp_attr qp_attr; 950 int ret; 951 952 mutex_lock(&id_priv->qp_mutex); 953 if (!id_priv->id.qp) { 954 ret = 0; 955 goto out; 956 } 957 958 qp_attr.qp_state = IB_QPS_ERR; 959 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 960out: 961 mutex_unlock(&id_priv->qp_mutex); 962 return ret; 963} 964 965static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 966 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 967{ 968 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 969 int ret; 970 u16 pkey; 971 972 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 973 pkey = 0xffff; 974 else 975 pkey = ib_addr_get_pkey(dev_addr); 976 977 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 978 pkey, &qp_attr->pkey_index); 979 if (ret) 980 return ret; 981 982 qp_attr->port_num = id_priv->id.port_num; 983 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 984 985 if (id_priv->id.qp_type == IB_QPT_UD) { 986 ret = cma_set_qkey(id_priv, 0); 987 if (ret) 988 return ret; 989 990 qp_attr->qkey = id_priv->qkey; 991 *qp_attr_mask |= IB_QP_QKEY; 992 } else { 993 qp_attr->qp_access_flags = 0; 994 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 995 } 996 return 0; 997} 998 999int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 1000 int *qp_attr_mask) 1001{ 1002 struct rdma_id_private *id_priv; 1003 int ret = 0; 1004 1005 id_priv = container_of(id, struct rdma_id_private, id); 1006 if (rdma_cap_ib_cm(id->device, id->port_num)) { 1007 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 1008 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 1009 else 1010 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 1011 qp_attr_mask); 1012 1013 if (qp_attr->qp_state == IB_QPS_RTR) 1014 qp_attr->rq_psn = id_priv->seq_num; 1015 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 1016 if (!id_priv->cm_id.iw) { 1017 qp_attr->qp_access_flags = 0; 1018 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1019 } else 1020 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1021 qp_attr_mask); 1022 } else 1023 ret = -ENOSYS; 1024 1025 return ret; 1026} 1027EXPORT_SYMBOL(rdma_init_qp_attr); 1028 1029static inline int cma_zero_addr(struct sockaddr *addr) 1030{ 1031 switch (addr->sa_family) { 1032 case AF_INET: 1033 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 1034 case AF_INET6: 1035 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); 1036 case AF_IB: 1037 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); 1038 default: 1039 return 0; 1040 } 1041} 1042 1043static inline int cma_loopback_addr(struct sockaddr *addr) 1044{ 1045 switch (addr->sa_family) { 1046 case AF_INET: 1047 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 1048 case AF_INET6: 1049 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); 1050 case AF_IB: 1051 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); 1052 default: 1053 return 0; 1054 } 1055} 1056 1057static inline int cma_any_addr(struct sockaddr *addr) 1058{ 1059 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1060} 1061 1062static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 1063{ 1064 if (src->sa_family != dst->sa_family) 1065 return -1; 1066 1067 switch (src->sa_family) { 1068 case AF_INET: 1069 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 1070 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1071 case AF_INET6: 1072 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 1073 &((struct sockaddr_in6 *) dst)->sin6_addr); 1074 default: 1075 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1076 &((struct sockaddr_ib *) dst)->sib_addr); 1077 } 1078} 1079 1080static __be16 cma_port(struct sockaddr *addr) 1081{ 1082 struct sockaddr_ib *sib; 1083 1084 switch (addr->sa_family) { 1085 case AF_INET: 1086 return ((struct sockaddr_in *) addr)->sin_port; 1087 case AF_INET6: 1088 return ((struct sockaddr_in6 *) addr)->sin6_port; 1089 case AF_IB: 1090 sib = (struct sockaddr_ib *) addr; 1091 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1092 be64_to_cpu(sib->sib_sid_mask))); 1093 default: 1094 return 0; 1095 } 1096} 1097 1098static inline int cma_any_port(struct sockaddr *addr) 1099{ 1100 return !cma_port(addr); 1101} 1102 1103static void cma_save_ib_info(struct sockaddr *src_addr, 1104 struct sockaddr *dst_addr, 1105 struct rdma_cm_id *listen_id, 1106 struct ib_sa_path_rec *path) 1107{ 1108 struct sockaddr_ib *listen_ib, *ib; 1109 1110 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1111 if (src_addr) { 1112 ib = (struct sockaddr_ib *)src_addr; 1113 ib->sib_family = AF_IB; 1114 if (path) { 1115 ib->sib_pkey = path->pkey; 1116 ib->sib_flowinfo = path->flow_label; 1117 memcpy(&ib->sib_addr, &path->sgid, 16); 1118 ib->sib_sid = path->service_id; 1119 ib->sib_scope_id = 0; 1120 } else { 1121 ib->sib_pkey = listen_ib->sib_pkey; 1122 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1123 ib->sib_addr = listen_ib->sib_addr; 1124 ib->sib_sid = listen_ib->sib_sid; 1125 ib->sib_scope_id = listen_ib->sib_scope_id; 1126 } 1127 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1128 } 1129 if (dst_addr) { 1130 ib = (struct sockaddr_ib *)dst_addr; 1131 ib->sib_family = AF_IB; 1132 if (path) { 1133 ib->sib_pkey = path->pkey; 1134 ib->sib_flowinfo = path->flow_label; 1135 memcpy(&ib->sib_addr, &path->dgid, 16); 1136 } 1137 } 1138} 1139 1140static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1141 struct sockaddr_in *dst_addr, 1142 struct cma_hdr *hdr, 1143 __be16 local_port) 1144{ 1145 if (src_addr) { 1146 *src_addr = (struct sockaddr_in) { 1147 .sin_family = AF_INET, 1148 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1149 .sin_port = local_port, 1150 }; 1151 } 1152 1153 if (dst_addr) { 1154 *dst_addr = (struct sockaddr_in) { 1155 .sin_family = AF_INET, 1156 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1157 .sin_port = hdr->port, 1158 }; 1159 } 1160} 1161 1162static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1163 struct sockaddr_in6 *dst_addr, 1164 struct cma_hdr *hdr, 1165 __be16 local_port) 1166{ 1167 if (src_addr) { 1168 *src_addr = (struct sockaddr_in6) { 1169 .sin6_family = AF_INET6, 1170 .sin6_addr = hdr->dst_addr.ip6, 1171 .sin6_port = local_port, 1172 }; 1173 } 1174 1175 if (dst_addr) { 1176 *dst_addr = (struct sockaddr_in6) { 1177 .sin6_family = AF_INET6, 1178 .sin6_addr = hdr->src_addr.ip6, 1179 .sin6_port = hdr->port, 1180 }; 1181 } 1182} 1183 1184static u16 cma_port_from_service_id(__be64 service_id) 1185{ 1186 return (u16)be64_to_cpu(service_id); 1187} 1188 1189static int cma_save_ip_info(struct sockaddr *src_addr, 1190 struct sockaddr *dst_addr, 1191 struct ib_cm_event *ib_event, 1192 __be64 service_id) 1193{ 1194 struct cma_hdr *hdr; 1195 __be16 port; 1196 1197 hdr = ib_event->private_data; 1198 if (hdr->cma_version != CMA_VERSION) 1199 return -EINVAL; 1200 1201 port = htons(cma_port_from_service_id(service_id)); 1202 1203 switch (cma_get_ip_ver(hdr)) { 1204 case 4: 1205 cma_save_ip4_info((struct sockaddr_in *)src_addr, 1206 (struct sockaddr_in *)dst_addr, hdr, port); 1207 break; 1208 case 6: 1209 cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1210 (struct sockaddr_in6 *)dst_addr, hdr, port); 1211 break; 1212 default: 1213 return -EAFNOSUPPORT; 1214 } 1215 1216 return 0; 1217} 1218 1219static int cma_save_net_info(struct sockaddr *src_addr, 1220 struct sockaddr *dst_addr, 1221 struct rdma_cm_id *listen_id, 1222 struct ib_cm_event *ib_event, 1223 sa_family_t sa_family, __be64 service_id) 1224{ 1225 if (sa_family == AF_IB) { 1226 if (ib_event->event == IB_CM_REQ_RECEIVED) 1227 cma_save_ib_info(src_addr, dst_addr, listen_id, 1228 ib_event->param.req_rcvd.primary_path); 1229 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1230 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1231 return 0; 1232 } 1233 1234 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1235} 1236 1237static int cma_save_req_info(const struct ib_cm_event *ib_event, 1238 struct cma_req_info *req) 1239{ 1240 const struct ib_cm_req_event_param *req_param = 1241 &ib_event->param.req_rcvd; 1242 const struct ib_cm_sidr_req_event_param *sidr_param = 1243 &ib_event->param.sidr_req_rcvd; 1244 1245 switch (ib_event->event) { 1246 case IB_CM_REQ_RECEIVED: 1247 req->device = req_param->listen_id->device; 1248 req->port = req_param->port; 1249 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1250 sizeof(req->local_gid)); 1251 req->has_gid = true; 1252 req->service_id = req_param->primary_path->service_id; 1253 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1254 if (req->pkey != req_param->bth_pkey) 1255 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1256 "RDMA CMA: in the future this may cause the request to be dropped\n", 1257 req_param->bth_pkey, req->pkey); 1258 break; 1259 case IB_CM_SIDR_REQ_RECEIVED: 1260 req->device = sidr_param->listen_id->device; 1261 req->port = sidr_param->port; 1262 req->has_gid = false; 1263 req->service_id = sidr_param->service_id; 1264 req->pkey = sidr_param->pkey; 1265 if (req->pkey != sidr_param->bth_pkey) 1266 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1267 "RDMA CMA: in the future this may cause the request to be dropped\n", 1268 sidr_param->bth_pkey, req->pkey); 1269 break; 1270 default: 1271 return -EINVAL; 1272 } 1273 1274 return 0; 1275} 1276 1277static bool validate_ipv4_net_dev(struct net_device *net_dev, 1278 const struct sockaddr_in *dst_addr, 1279 const struct sockaddr_in *src_addr) 1280{ 1281 __be32 daddr = dst_addr->sin_addr.s_addr, 1282 saddr = src_addr->sin_addr.s_addr; 1283 struct fib_result res; 1284 struct flowi4 fl4; 1285 int err; 1286 bool ret; 1287 1288 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1289 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1290 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1291 ipv4_is_loopback(saddr)) 1292 return false; 1293 1294 memset(&fl4, 0, sizeof(fl4)); 1295 fl4.flowi4_iif = net_dev->ifindex; 1296 fl4.daddr = daddr; 1297 fl4.saddr = saddr; 1298 1299 rcu_read_lock(); 1300 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1301 ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1302 rcu_read_unlock(); 1303 1304 return ret; 1305} 1306 1307static bool validate_ipv6_net_dev(struct net_device *net_dev, 1308 const struct sockaddr_in6 *dst_addr, 1309 const struct sockaddr_in6 *src_addr) 1310{ 1311#if IS_ENABLED(CONFIG_IPV6) 1312 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1313 IPV6_ADDR_LINKLOCAL; 1314 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1315 &src_addr->sin6_addr, net_dev->ifindex, 1316 strict); 1317 bool ret; 1318 1319 if (!rt) 1320 return false; 1321 1322 ret = rt->rt6i_idev->dev == net_dev; 1323 ip6_rt_put(rt); 1324 1325 return ret; 1326#else 1327 return false; 1328#endif 1329} 1330 1331static bool validate_net_dev(struct net_device *net_dev, 1332 const struct sockaddr *daddr, 1333 const struct sockaddr *saddr) 1334{ 1335 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1336 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1337 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1338 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1339 1340 switch (daddr->sa_family) { 1341 case AF_INET: 1342 return saddr->sa_family == AF_INET && 1343 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1344 1345 case AF_INET6: 1346 return saddr->sa_family == AF_INET6 && 1347 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1348 1349 default: 1350 return false; 1351 } 1352} 1353 1354static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, 1355 const struct cma_req_info *req) 1356{ 1357 struct sockaddr_storage listen_addr_storage, src_addr_storage; 1358 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, 1359 *src_addr = (struct sockaddr *)&src_addr_storage; 1360 struct net_device *net_dev; 1361 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1362 int err; 1363 1364 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1365 req->service_id); 1366 if (err) 1367 return ERR_PTR(err); 1368 1369 net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, 1370 gid, listen_addr); 1371 if (!net_dev) 1372 return ERR_PTR(-ENODEV); 1373 1374 if (!validate_net_dev(net_dev, listen_addr, src_addr)) { 1375 dev_put(net_dev); 1376 return ERR_PTR(-EHOSTUNREACH); 1377 } 1378 1379 return net_dev; 1380} 1381 1382static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) 1383{ 1384 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1385} 1386 1387static bool cma_match_private_data(struct rdma_id_private *id_priv, 1388 const struct cma_hdr *hdr) 1389{ 1390 struct sockaddr *addr = cma_src_addr(id_priv); 1391 __be32 ip4_addr; 1392 struct in6_addr ip6_addr; 1393 1394 if (cma_any_addr(addr) && !id_priv->afonly) 1395 return true; 1396 1397 switch (addr->sa_family) { 1398 case AF_INET: 1399 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1400 if (cma_get_ip_ver(hdr) != 4) 1401 return false; 1402 if (!cma_any_addr(addr) && 1403 hdr->dst_addr.ip4.addr != ip4_addr) 1404 return false; 1405 break; 1406 case AF_INET6: 1407 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1408 if (cma_get_ip_ver(hdr) != 6) 1409 return false; 1410 if (!cma_any_addr(addr) && 1411 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1412 return false; 1413 break; 1414 case AF_IB: 1415 return true; 1416 default: 1417 return false; 1418 } 1419 1420 return true; 1421} 1422 1423static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) 1424{ 1425 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); 1426 enum rdma_transport_type transport = 1427 rdma_node_get_transport(device->node_type); 1428 1429 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; 1430} 1431 1432static bool cma_protocol_roce(const struct rdma_cm_id *id) 1433{ 1434 struct ib_device *device = id->device; 1435 const int port_num = id->port_num ?: rdma_start_port(device); 1436 1437 return cma_protocol_roce_dev_port(device, port_num); 1438} 1439 1440static bool cma_match_net_dev(const struct rdma_cm_id *id, 1441 const struct net_device *net_dev, 1442 u8 port_num) 1443{ 1444 const struct rdma_addr *addr = &id->route.addr; 1445 1446 if (!net_dev) 1447 /* This request is an AF_IB request or a RoCE request */ 1448 return (!id->port_num || id->port_num == port_num) && 1449 (addr->src_addr.ss_family == AF_IB || 1450 cma_protocol_roce_dev_port(id->device, port_num)); 1451 1452 return !addr->dev_addr.bound_dev_if || 1453 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1454 addr->dev_addr.bound_dev_if == net_dev->ifindex); 1455} 1456 1457static struct rdma_id_private *cma_find_listener( 1458 const struct rdma_bind_list *bind_list, 1459 const struct ib_cm_id *cm_id, 1460 const struct ib_cm_event *ib_event, 1461 const struct cma_req_info *req, 1462 const struct net_device *net_dev) 1463{ 1464 struct rdma_id_private *id_priv, *id_priv_dev; 1465 1466 if (!bind_list) 1467 return ERR_PTR(-EINVAL); 1468 1469 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1470 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1471 if (id_priv->id.device == cm_id->device && 1472 cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1473 return id_priv; 1474 list_for_each_entry(id_priv_dev, 1475 &id_priv->listen_list, 1476 listen_list) { 1477 if (id_priv_dev->id.device == cm_id->device && 1478 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1479 return id_priv_dev; 1480 } 1481 } 1482 } 1483 1484 return ERR_PTR(-EINVAL); 1485} 1486 1487static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, 1488 struct ib_cm_event *ib_event, 1489 struct net_device **net_dev) 1490{ 1491 struct cma_req_info req; 1492 struct rdma_bind_list *bind_list; 1493 struct rdma_id_private *id_priv; 1494 int err; 1495 1496 err = cma_save_req_info(ib_event, &req); 1497 if (err) 1498 return ERR_PTR(err); 1499 1500 *net_dev = cma_get_net_dev(ib_event, &req); 1501 if (IS_ERR(*net_dev)) { 1502 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1503 /* Assuming the protocol is AF_IB */ 1504 *net_dev = NULL; 1505 } else if (cma_protocol_roce_dev_port(req.device, req.port)) { 1506 /* TODO find the net dev matching the request parameters 1507 * through the RoCE GID table */ 1508 *net_dev = NULL; 1509 } else { 1510 return ERR_CAST(*net_dev); 1511 } 1512 } 1513 1514 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1515 rdma_ps_from_service_id(req.service_id), 1516 cma_port_from_service_id(req.service_id)); 1517 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1518 if (IS_ERR(id_priv) && *net_dev) { 1519 dev_put(*net_dev); 1520 *net_dev = NULL; 1521 } 1522 1523 return id_priv; 1524} 1525 1526static inline int cma_user_data_offset(struct rdma_id_private *id_priv) 1527{ 1528 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1529} 1530 1531static void cma_cancel_route(struct rdma_id_private *id_priv) 1532{ 1533 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1534 if (id_priv->query) 1535 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1536 } 1537} 1538 1539static void cma_cancel_listens(struct rdma_id_private *id_priv) 1540{ 1541 struct rdma_id_private *dev_id_priv; 1542 1543 /* 1544 * Remove from listen_any_list to prevent added devices from spawning 1545 * additional listen requests. 1546 */ 1547 mutex_lock(&lock); 1548 list_del(&id_priv->list); 1549 1550 while (!list_empty(&id_priv->listen_list)) { 1551 dev_id_priv = list_entry(id_priv->listen_list.next, 1552 struct rdma_id_private, listen_list); 1553 /* sync with device removal to avoid duplicate destruction */ 1554 list_del_init(&dev_id_priv->list); 1555 list_del(&dev_id_priv->listen_list); 1556 mutex_unlock(&lock); 1557 1558 rdma_destroy_id(&dev_id_priv->id); 1559 mutex_lock(&lock); 1560 } 1561 mutex_unlock(&lock); 1562} 1563 1564static void cma_cancel_operation(struct rdma_id_private *id_priv, 1565 enum rdma_cm_state state) 1566{ 1567 switch (state) { 1568 case RDMA_CM_ADDR_QUERY: 1569 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1570 break; 1571 case RDMA_CM_ROUTE_QUERY: 1572 cma_cancel_route(id_priv); 1573 break; 1574 case RDMA_CM_LISTEN: 1575 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1576 cma_cancel_listens(id_priv); 1577 break; 1578 default: 1579 break; 1580 } 1581} 1582 1583static void cma_release_port(struct rdma_id_private *id_priv) 1584{ 1585 struct rdma_bind_list *bind_list = id_priv->bind_list; 1586 struct net *net = id_priv->id.route.addr.dev_addr.net; 1587 1588 if (!bind_list) 1589 return; 1590 1591 mutex_lock(&lock); 1592 hlist_del(&id_priv->node); 1593 if (hlist_empty(&bind_list->owners)) { 1594 cma_ps_remove(net, bind_list->ps, bind_list->port); 1595 kfree(bind_list); 1596 } 1597 mutex_unlock(&lock); 1598} 1599 1600static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1601{ 1602 struct cma_multicast *mc; 1603 1604 while (!list_empty(&id_priv->mc_list)) { 1605 mc = container_of(id_priv->mc_list.next, 1606 struct cma_multicast, list); 1607 list_del(&mc->list); 1608 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, 1609 id_priv->id.port_num)) { 1610 ib_sa_free_multicast(mc->multicast.ib); 1611 kfree(mc); 1612 } else { 1613 if (mc->igmp_joined) { 1614 struct rdma_dev_addr *dev_addr = 1615 &id_priv->id.route.addr.dev_addr; 1616 struct net_device *ndev = NULL; 1617 1618 if (dev_addr->bound_dev_if) 1619 ndev = dev_get_by_index(&init_net, 1620 dev_addr->bound_dev_if); 1621 if (ndev) { 1622 cma_igmp_send(ndev, 1623 &mc->multicast.ib->rec.mgid, 1624 false); 1625 dev_put(ndev); 1626 } 1627 } 1628 kref_put(&mc->mcref, release_mc); 1629 } 1630 } 1631} 1632 1633void rdma_destroy_id(struct rdma_cm_id *id) 1634{ 1635 struct rdma_id_private *id_priv; 1636 enum rdma_cm_state state; 1637 1638 id_priv = container_of(id, struct rdma_id_private, id); 1639 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1640 cma_cancel_operation(id_priv, state); 1641 1642 /* 1643 * Wait for any active callback to finish. New callbacks will find 1644 * the id_priv state set to destroying and abort. 1645 */ 1646 mutex_lock(&id_priv->handler_mutex); 1647 mutex_unlock(&id_priv->handler_mutex); 1648 1649 if (id_priv->cma_dev) { 1650 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1651 if (id_priv->cm_id.ib) 1652 ib_destroy_cm_id(id_priv->cm_id.ib); 1653 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1654 if (id_priv->cm_id.iw) 1655 iw_destroy_cm_id(id_priv->cm_id.iw); 1656 } 1657 cma_leave_mc_groups(id_priv); 1658 cma_release_dev(id_priv); 1659 } 1660 1661 cma_release_port(id_priv); 1662 cma_deref_id(id_priv); 1663 wait_for_completion(&id_priv->comp); 1664 1665 if (id_priv->internal_id) 1666 cma_deref_id(id_priv->id.context); 1667 1668 kfree(id_priv->id.route.path_rec); 1669 put_net(id_priv->id.route.addr.dev_addr.net); 1670 kfree(id_priv); 1671} 1672EXPORT_SYMBOL(rdma_destroy_id); 1673 1674static int cma_rep_recv(struct rdma_id_private *id_priv) 1675{ 1676 int ret; 1677 1678 ret = cma_modify_qp_rtr(id_priv, NULL); 1679 if (ret) 1680 goto reject; 1681 1682 ret = cma_modify_qp_rts(id_priv, NULL); 1683 if (ret) 1684 goto reject; 1685 1686 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1687 if (ret) 1688 goto reject; 1689 1690 return 0; 1691reject: 1692 cma_modify_qp_err(id_priv); 1693 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1694 NULL, 0, NULL, 0); 1695 return ret; 1696} 1697 1698static void cma_set_rep_event_data(struct rdma_cm_event *event, 1699 struct ib_cm_rep_event_param *rep_data, 1700 void *private_data) 1701{ 1702 event->param.conn.private_data = private_data; 1703 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1704 event->param.conn.responder_resources = rep_data->responder_resources; 1705 event->param.conn.initiator_depth = rep_data->initiator_depth; 1706 event->param.conn.flow_control = rep_data->flow_control; 1707 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1708 event->param.conn.srq = rep_data->srq; 1709 event->param.conn.qp_num = rep_data->remote_qpn; 1710} 1711 1712static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1713{ 1714 struct rdma_id_private *id_priv = cm_id->context; 1715 struct rdma_cm_event event; 1716 int ret = 0; 1717 1718 mutex_lock(&id_priv->handler_mutex); 1719 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1720 id_priv->state != RDMA_CM_CONNECT) || 1721 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1722 id_priv->state != RDMA_CM_DISCONNECT)) 1723 goto out; 1724 1725 memset(&event, 0, sizeof event); 1726 switch (ib_event->event) { 1727 case IB_CM_REQ_ERROR: 1728 case IB_CM_REP_ERROR: 1729 event.event = RDMA_CM_EVENT_UNREACHABLE; 1730 event.status = -ETIMEDOUT; 1731 break; 1732 case IB_CM_REP_RECEIVED: 1733 if (id_priv->id.qp) { 1734 event.status = cma_rep_recv(id_priv); 1735 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1736 RDMA_CM_EVENT_ESTABLISHED; 1737 } else { 1738 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1739 } 1740 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1741 ib_event->private_data); 1742 break; 1743 case IB_CM_RTU_RECEIVED: 1744 case IB_CM_USER_ESTABLISHED: 1745 event.event = RDMA_CM_EVENT_ESTABLISHED; 1746 break; 1747 case IB_CM_DREQ_ERROR: 1748 event.status = -ETIMEDOUT; /* fall through */ 1749 case IB_CM_DREQ_RECEIVED: 1750 case IB_CM_DREP_RECEIVED: 1751 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1752 RDMA_CM_DISCONNECT)) 1753 goto out; 1754 event.event = RDMA_CM_EVENT_DISCONNECTED; 1755 break; 1756 case IB_CM_TIMEWAIT_EXIT: 1757 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1758 break; 1759 case IB_CM_MRA_RECEIVED: 1760 /* ignore event */ 1761 goto out; 1762 case IB_CM_REJ_RECEIVED: 1763 cma_modify_qp_err(id_priv); 1764 event.status = ib_event->param.rej_rcvd.reason; 1765 event.event = RDMA_CM_EVENT_REJECTED; 1766 event.param.conn.private_data = ib_event->private_data; 1767 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1768 break; 1769 default: 1770 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 1771 ib_event->event); 1772 goto out; 1773 } 1774 1775 ret = id_priv->id.event_handler(&id_priv->id, &event); 1776 if (ret) { 1777 /* Destroy the CM ID by returning a non-zero value. */ 1778 id_priv->cm_id.ib = NULL; 1779 cma_exch(id_priv, RDMA_CM_DESTROYING); 1780 mutex_unlock(&id_priv->handler_mutex); 1781 rdma_destroy_id(&id_priv->id); 1782 return ret; 1783 } 1784out: 1785 mutex_unlock(&id_priv->handler_mutex); 1786 return ret; 1787} 1788 1789static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1790 struct ib_cm_event *ib_event, 1791 struct net_device *net_dev) 1792{ 1793 struct rdma_id_private *id_priv; 1794 struct rdma_cm_id *id; 1795 struct rdma_route *rt; 1796 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1797 const __be64 service_id = 1798 ib_event->param.req_rcvd.primary_path->service_id; 1799 int ret; 1800 1801 id = rdma_create_id(listen_id->route.addr.dev_addr.net, 1802 listen_id->event_handler, listen_id->context, 1803 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1804 if (IS_ERR(id)) 1805 return NULL; 1806 1807 id_priv = container_of(id, struct rdma_id_private, id); 1808 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1809 (struct sockaddr *)&id->route.addr.dst_addr, 1810 listen_id, ib_event, ss_family, service_id)) 1811 goto err; 1812 1813 rt = &id->route; 1814 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1815 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1816 GFP_KERNEL); 1817 if (!rt->path_rec) 1818 goto err; 1819 1820 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1821 if (rt->num_paths == 2) 1822 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1823 1824 if (net_dev) { 1825 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); 1826 if (ret) 1827 goto err; 1828 } else { 1829 if (!cma_protocol_roce(listen_id) && 1830 cma_any_addr(cma_src_addr(id_priv))) { 1831 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1832 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1833 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 1834 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 1835 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 1836 if (ret) 1837 goto err; 1838 } 1839 } 1840 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1841 1842 id_priv->state = RDMA_CM_CONNECT; 1843 return id_priv; 1844 1845err: 1846 rdma_destroy_id(id); 1847 return NULL; 1848} 1849 1850static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1851 struct ib_cm_event *ib_event, 1852 struct net_device *net_dev) 1853{ 1854 struct rdma_id_private *id_priv; 1855 struct rdma_cm_id *id; 1856 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1857 struct net *net = listen_id->route.addr.dev_addr.net; 1858 int ret; 1859 1860 id = rdma_create_id(net, listen_id->event_handler, listen_id->context, 1861 listen_id->ps, IB_QPT_UD); 1862 if (IS_ERR(id)) 1863 return NULL; 1864 1865 id_priv = container_of(id, struct rdma_id_private, id); 1866 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1867 (struct sockaddr *)&id->route.addr.dst_addr, 1868 listen_id, ib_event, ss_family, 1869 ib_event->param.sidr_req_rcvd.service_id)) 1870 goto err; 1871 1872 if (net_dev) { 1873 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); 1874 if (ret) 1875 goto err; 1876 } else { 1877 if (!cma_any_addr(cma_src_addr(id_priv))) { 1878 ret = cma_translate_addr(cma_src_addr(id_priv), 1879 &id->route.addr.dev_addr); 1880 if (ret) 1881 goto err; 1882 } 1883 } 1884 1885 id_priv->state = RDMA_CM_CONNECT; 1886 return id_priv; 1887err: 1888 rdma_destroy_id(id); 1889 return NULL; 1890} 1891 1892static void cma_set_req_event_data(struct rdma_cm_event *event, 1893 struct ib_cm_req_event_param *req_data, 1894 void *private_data, int offset) 1895{ 1896 event->param.conn.private_data = private_data + offset; 1897 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1898 event->param.conn.responder_resources = req_data->responder_resources; 1899 event->param.conn.initiator_depth = req_data->initiator_depth; 1900 event->param.conn.flow_control = req_data->flow_control; 1901 event->param.conn.retry_count = req_data->retry_count; 1902 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1903 event->param.conn.srq = req_data->srq; 1904 event->param.conn.qp_num = req_data->remote_qpn; 1905} 1906 1907static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1908{ 1909 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1910 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1911 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1912 (id->qp_type == IB_QPT_UD)) || 1913 (!id->qp_type)); 1914} 1915 1916static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1917{ 1918 struct rdma_id_private *listen_id, *conn_id = NULL; 1919 struct rdma_cm_event event; 1920 struct net_device *net_dev; 1921 int offset, ret; 1922 1923 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); 1924 if (IS_ERR(listen_id)) 1925 return PTR_ERR(listen_id); 1926 1927 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) { 1928 ret = -EINVAL; 1929 goto net_dev_put; 1930 } 1931 1932 mutex_lock(&listen_id->handler_mutex); 1933 if (listen_id->state != RDMA_CM_LISTEN) { 1934 ret = -ECONNABORTED; 1935 goto err1; 1936 } 1937 1938 memset(&event, 0, sizeof event); 1939 offset = cma_user_data_offset(listen_id); 1940 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1941 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1942 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev); 1943 event.param.ud.private_data = ib_event->private_data + offset; 1944 event.param.ud.private_data_len = 1945 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1946 } else { 1947 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev); 1948 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1949 ib_event->private_data, offset); 1950 } 1951 if (!conn_id) { 1952 ret = -ENOMEM; 1953 goto err1; 1954 } 1955 1956 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1957 ret = cma_acquire_dev(conn_id, listen_id); 1958 if (ret) 1959 goto err2; 1960 1961 conn_id->cm_id.ib = cm_id; 1962 cm_id->context = conn_id; 1963 cm_id->cm_handler = cma_ib_handler; 1964 1965 /* 1966 * Protect against the user destroying conn_id from another thread 1967 * until we're done accessing it. 1968 */ 1969 atomic_inc(&conn_id->refcount); 1970 ret = conn_id->id.event_handler(&conn_id->id, &event); 1971 if (ret) 1972 goto err3; 1973 /* 1974 * Acquire mutex to prevent user executing rdma_destroy_id() 1975 * while we're accessing the cm_id. 1976 */ 1977 mutex_lock(&lock); 1978 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1979 (conn_id->id.qp_type != IB_QPT_UD)) 1980 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1981 mutex_unlock(&lock); 1982 mutex_unlock(&conn_id->handler_mutex); 1983 mutex_unlock(&listen_id->handler_mutex); 1984 cma_deref_id(conn_id); 1985 if (net_dev) 1986 dev_put(net_dev); 1987 return 0; 1988 1989err3: 1990 cma_deref_id(conn_id); 1991 /* Destroy the CM ID by returning a non-zero value. */ 1992 conn_id->cm_id.ib = NULL; 1993err2: 1994 cma_exch(conn_id, RDMA_CM_DESTROYING); 1995 mutex_unlock(&conn_id->handler_mutex); 1996err1: 1997 mutex_unlock(&listen_id->handler_mutex); 1998 if (conn_id) 1999 rdma_destroy_id(&conn_id->id); 2000 2001net_dev_put: 2002 if (net_dev) 2003 dev_put(net_dev); 2004 2005 return ret; 2006} 2007 2008__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2009{ 2010 if (addr->sa_family == AF_IB) 2011 return ((struct sockaddr_ib *) addr)->sib_sid; 2012 2013 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2014} 2015EXPORT_SYMBOL(rdma_get_service_id); 2016 2017static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 2018{ 2019 struct rdma_id_private *id_priv = iw_id->context; 2020 struct rdma_cm_event event; 2021 int ret = 0; 2022 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2023 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2024 2025 mutex_lock(&id_priv->handler_mutex); 2026 if (id_priv->state != RDMA_CM_CONNECT) 2027 goto out; 2028 2029 memset(&event, 0, sizeof event); 2030 switch (iw_event->event) { 2031 case IW_CM_EVENT_CLOSE: 2032 event.event = RDMA_CM_EVENT_DISCONNECTED; 2033 break; 2034 case IW_CM_EVENT_CONNECT_REPLY: 2035 memcpy(cma_src_addr(id_priv), laddr, 2036 rdma_addr_size(laddr)); 2037 memcpy(cma_dst_addr(id_priv), raddr, 2038 rdma_addr_size(raddr)); 2039 switch (iw_event->status) { 2040 case 0: 2041 event.event = RDMA_CM_EVENT_ESTABLISHED; 2042 event.param.conn.initiator_depth = iw_event->ird; 2043 event.param.conn.responder_resources = iw_event->ord; 2044 break; 2045 case -ECONNRESET: 2046 case -ECONNREFUSED: 2047 event.event = RDMA_CM_EVENT_REJECTED; 2048 break; 2049 case -ETIMEDOUT: 2050 event.event = RDMA_CM_EVENT_UNREACHABLE; 2051 break; 2052 default: 2053 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2054 break; 2055 } 2056 break; 2057 case IW_CM_EVENT_ESTABLISHED: 2058 event.event = RDMA_CM_EVENT_ESTABLISHED; 2059 event.param.conn.initiator_depth = iw_event->ird; 2060 event.param.conn.responder_resources = iw_event->ord; 2061 break; 2062 default: 2063 BUG_ON(1); 2064 } 2065 2066 event.status = iw_event->status; 2067 event.param.conn.private_data = iw_event->private_data; 2068 event.param.conn.private_data_len = iw_event->private_data_len; 2069 ret = id_priv->id.event_handler(&id_priv->id, &event); 2070 if (ret) { 2071 /* Destroy the CM ID by returning a non-zero value. */ 2072 id_priv->cm_id.iw = NULL; 2073 cma_exch(id_priv, RDMA_CM_DESTROYING); 2074 mutex_unlock(&id_priv->handler_mutex); 2075 rdma_destroy_id(&id_priv->id); 2076 return ret; 2077 } 2078 2079out: 2080 mutex_unlock(&id_priv->handler_mutex); 2081 return ret; 2082} 2083 2084static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2085 struct iw_cm_event *iw_event) 2086{ 2087 struct rdma_cm_id *new_cm_id; 2088 struct rdma_id_private *listen_id, *conn_id; 2089 struct rdma_cm_event event; 2090 int ret = -ECONNABORTED; 2091 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2092 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2093 2094 listen_id = cm_id->context; 2095 2096 mutex_lock(&listen_id->handler_mutex); 2097 if (listen_id->state != RDMA_CM_LISTEN) 2098 goto out; 2099 2100 /* Create a new RDMA id for the new IW CM ID */ 2101 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2102 listen_id->id.event_handler, 2103 listen_id->id.context, 2104 RDMA_PS_TCP, IB_QPT_RC); 2105 if (IS_ERR(new_cm_id)) { 2106 ret = -ENOMEM; 2107 goto out; 2108 } 2109 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2110 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2111 conn_id->state = RDMA_CM_CONNECT; 2112 2113 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); 2114 if (ret) { 2115 mutex_unlock(&conn_id->handler_mutex); 2116 rdma_destroy_id(new_cm_id); 2117 goto out; 2118 } 2119 2120 ret = cma_acquire_dev(conn_id, listen_id); 2121 if (ret) { 2122 mutex_unlock(&conn_id->handler_mutex); 2123 rdma_destroy_id(new_cm_id); 2124 goto out; 2125 } 2126 2127 conn_id->cm_id.iw = cm_id; 2128 cm_id->context = conn_id; 2129 cm_id->cm_handler = cma_iw_handler; 2130 2131 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2132 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2133 2134 memset(&event, 0, sizeof event); 2135 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2136 event.param.conn.private_data = iw_event->private_data; 2137 event.param.conn.private_data_len = iw_event->private_data_len; 2138 event.param.conn.initiator_depth = iw_event->ird; 2139 event.param.conn.responder_resources = iw_event->ord; 2140 2141 /* 2142 * Protect against the user destroying conn_id from another thread 2143 * until we're done accessing it. 2144 */ 2145 atomic_inc(&conn_id->refcount); 2146 ret = conn_id->id.event_handler(&conn_id->id, &event); 2147 if (ret) { 2148 /* User wants to destroy the CM ID */ 2149 conn_id->cm_id.iw = NULL; 2150 cma_exch(conn_id, RDMA_CM_DESTROYING); 2151 mutex_unlock(&conn_id->handler_mutex); 2152 cma_deref_id(conn_id); 2153 rdma_destroy_id(&conn_id->id); 2154 goto out; 2155 } 2156 2157 mutex_unlock(&conn_id->handler_mutex); 2158 cma_deref_id(conn_id); 2159 2160out: 2161 mutex_unlock(&listen_id->handler_mutex); 2162 return ret; 2163} 2164 2165static int cma_ib_listen(struct rdma_id_private *id_priv) 2166{ 2167 struct sockaddr *addr; 2168 struct ib_cm_id *id; 2169 __be64 svc_id; 2170 2171 addr = cma_src_addr(id_priv); 2172 svc_id = rdma_get_service_id(&id_priv->id, addr); 2173 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); 2174 if (IS_ERR(id)) 2175 return PTR_ERR(id); 2176 id_priv->cm_id.ib = id; 2177 2178 return 0; 2179} 2180 2181static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2182{ 2183 int ret; 2184 struct iw_cm_id *id; 2185 2186 id = iw_create_cm_id(id_priv->id.device, 2187 iw_conn_req_handler, 2188 id_priv); 2189 if (IS_ERR(id)) 2190 return PTR_ERR(id); 2191 2192 id->tos = id_priv->tos; 2193 id_priv->cm_id.iw = id; 2194 2195 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2196 rdma_addr_size(cma_src_addr(id_priv))); 2197 2198 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2199 2200 if (ret) { 2201 iw_destroy_cm_id(id_priv->cm_id.iw); 2202 id_priv->cm_id.iw = NULL; 2203 } 2204 2205 return ret; 2206} 2207 2208static int cma_listen_handler(struct rdma_cm_id *id, 2209 struct rdma_cm_event *event) 2210{ 2211 struct rdma_id_private *id_priv = id->context; 2212 2213 id->context = id_priv->id.context; 2214 id->event_handler = id_priv->id.event_handler; 2215 return id_priv->id.event_handler(id, event); 2216} 2217 2218static void cma_listen_on_dev(struct rdma_id_private *id_priv, 2219 struct cma_device *cma_dev) 2220{ 2221 struct rdma_id_private *dev_id_priv; 2222 struct rdma_cm_id *id; 2223 struct net *net = id_priv->id.route.addr.dev_addr.net; 2224 int ret; 2225 2226 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2227 return; 2228 2229 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2230 id_priv->id.qp_type); 2231 if (IS_ERR(id)) 2232 return; 2233 2234 dev_id_priv = container_of(id, struct rdma_id_private, id); 2235 2236 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2237 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2238 rdma_addr_size(cma_src_addr(id_priv))); 2239 2240 _cma_attach_to_dev(dev_id_priv, cma_dev); 2241 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2242 atomic_inc(&id_priv->refcount); 2243 dev_id_priv->internal_id = 1; 2244 dev_id_priv->afonly = id_priv->afonly; 2245 2246 ret = rdma_listen(id, id_priv->backlog); 2247 if (ret) 2248 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", 2249 ret, cma_dev->device->name); 2250} 2251 2252static void cma_listen_on_all(struct rdma_id_private *id_priv) 2253{ 2254 struct cma_device *cma_dev; 2255 2256 mutex_lock(&lock); 2257 list_add_tail(&id_priv->list, &listen_any_list); 2258 list_for_each_entry(cma_dev, &dev_list, list) 2259 cma_listen_on_dev(id_priv, cma_dev); 2260 mutex_unlock(&lock); 2261} 2262 2263void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2264{ 2265 struct rdma_id_private *id_priv; 2266 2267 id_priv = container_of(id, struct rdma_id_private, id); 2268 id_priv->tos = (u8) tos; 2269} 2270EXPORT_SYMBOL(rdma_set_service_type); 2271 2272static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 2273 void *context) 2274{ 2275 struct cma_work *work = context; 2276 struct rdma_route *route; 2277 2278 route = &work->id->id.route; 2279 2280 if (!status) { 2281 route->num_paths = 1; 2282 *route->path_rec = *path_rec; 2283 } else { 2284 work->old_state = RDMA_CM_ROUTE_QUERY; 2285 work->new_state = RDMA_CM_ADDR_RESOLVED; 2286 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2287 work->event.status = status; 2288 } 2289 2290 queue_work(cma_wq, &work->work); 2291} 2292 2293static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2294 struct cma_work *work) 2295{ 2296 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2297 struct ib_sa_path_rec path_rec; 2298 ib_sa_comp_mask comp_mask; 2299 struct sockaddr_in6 *sin6; 2300 struct sockaddr_ib *sib; 2301 2302 memset(&path_rec, 0, sizeof path_rec); 2303 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2304 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2305 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2306 path_rec.numb_path = 1; 2307 path_rec.reversible = 1; 2308 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 2309 2310 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2311 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2312 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2313 2314 switch (cma_family(id_priv)) { 2315 case AF_INET: 2316 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2317 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2318 break; 2319 case AF_INET6: 2320 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2321 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2322 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2323 break; 2324 case AF_IB: 2325 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2326 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2327 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2328 break; 2329 } 2330 2331 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2332 id_priv->id.port_num, &path_rec, 2333 comp_mask, timeout_ms, 2334 GFP_KERNEL, cma_query_handler, 2335 work, &id_priv->query); 2336 2337 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2338} 2339 2340static void cma_work_handler(struct work_struct *_work) 2341{ 2342 struct cma_work *work = container_of(_work, struct cma_work, work); 2343 struct rdma_id_private *id_priv = work->id; 2344 int destroy = 0; 2345 2346 mutex_lock(&id_priv->handler_mutex); 2347 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2348 goto out; 2349 2350 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2351 cma_exch(id_priv, RDMA_CM_DESTROYING); 2352 destroy = 1; 2353 } 2354out: 2355 mutex_unlock(&id_priv->handler_mutex); 2356 cma_deref_id(id_priv); 2357 if (destroy) 2358 rdma_destroy_id(&id_priv->id); 2359 kfree(work); 2360} 2361 2362static void cma_ndev_work_handler(struct work_struct *_work) 2363{ 2364 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 2365 struct rdma_id_private *id_priv = work->id; 2366 int destroy = 0; 2367 2368 mutex_lock(&id_priv->handler_mutex); 2369 if (id_priv->state == RDMA_CM_DESTROYING || 2370 id_priv->state == RDMA_CM_DEVICE_REMOVAL) 2371 goto out; 2372 2373 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2374 cma_exch(id_priv, RDMA_CM_DESTROYING); 2375 destroy = 1; 2376 } 2377 2378out: 2379 mutex_unlock(&id_priv->handler_mutex); 2380 cma_deref_id(id_priv); 2381 if (destroy) 2382 rdma_destroy_id(&id_priv->id); 2383 kfree(work); 2384} 2385 2386static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2387{ 2388 struct rdma_route *route = &id_priv->id.route; 2389 struct cma_work *work; 2390 int ret; 2391 2392 work = kzalloc(sizeof *work, GFP_KERNEL); 2393 if (!work) 2394 return -ENOMEM; 2395 2396 work->id = id_priv; 2397 INIT_WORK(&work->work, cma_work_handler); 2398 work->old_state = RDMA_CM_ROUTE_QUERY; 2399 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2400 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2401 2402 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 2403 if (!route->path_rec) { 2404 ret = -ENOMEM; 2405 goto err1; 2406 } 2407 2408 ret = cma_query_ib_route(id_priv, timeout_ms, work); 2409 if (ret) 2410 goto err2; 2411 2412 return 0; 2413err2: 2414 kfree(route->path_rec); 2415 route->path_rec = NULL; 2416err1: 2417 kfree(work); 2418 return ret; 2419} 2420 2421int rdma_set_ib_paths(struct rdma_cm_id *id, 2422 struct ib_sa_path_rec *path_rec, int num_paths) 2423{ 2424 struct rdma_id_private *id_priv; 2425 int ret; 2426 2427 id_priv = container_of(id, struct rdma_id_private, id); 2428 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2429 RDMA_CM_ROUTE_RESOLVED)) 2430 return -EINVAL; 2431 2432 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 2433 GFP_KERNEL); 2434 if (!id->route.path_rec) { 2435 ret = -ENOMEM; 2436 goto err; 2437 } 2438 2439 id->route.num_paths = num_paths; 2440 return 0; 2441err: 2442 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 2443 return ret; 2444} 2445EXPORT_SYMBOL(rdma_set_ib_paths); 2446 2447static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 2448{ 2449 struct cma_work *work; 2450 2451 work = kzalloc(sizeof *work, GFP_KERNEL); 2452 if (!work) 2453 return -ENOMEM; 2454 2455 work->id = id_priv; 2456 INIT_WORK(&work->work, cma_work_handler); 2457 work->old_state = RDMA_CM_ROUTE_QUERY; 2458 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2459 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2460 queue_work(cma_wq, &work->work); 2461 return 0; 2462} 2463 2464static int iboe_tos_to_sl(struct net_device *ndev, int tos) 2465{ 2466 int prio; 2467 struct net_device *dev; 2468 2469 prio = rt_tos2priority(tos); 2470 dev = ndev->priv_flags & IFF_802_1Q_VLAN ? 2471 vlan_dev_real_dev(ndev) : ndev; 2472 2473 if (dev->num_tc) 2474 return netdev_get_prio_tc_map(dev, prio); 2475 2476#if IS_ENABLED(CONFIG_VLAN_8021Q) 2477 if (ndev->priv_flags & IFF_802_1Q_VLAN) 2478 return (vlan_dev_get_egress_qos_mask(ndev, prio) & 2479 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 2480#endif 2481 return 0; 2482} 2483 2484static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 2485 unsigned long supported_gids, 2486 enum ib_gid_type default_gid) 2487{ 2488 if ((network_type == RDMA_NETWORK_IPV4 || 2489 network_type == RDMA_NETWORK_IPV6) && 2490 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 2491 return IB_GID_TYPE_ROCE_UDP_ENCAP; 2492 2493 return default_gid; 2494} 2495 2496static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2497{ 2498 struct rdma_route *route = &id_priv->id.route; 2499 struct rdma_addr *addr = &route->addr; 2500 struct cma_work *work; 2501 int ret; 2502 struct net_device *ndev = NULL; 2503 2504 2505 work = kzalloc(sizeof *work, GFP_KERNEL); 2506 if (!work) 2507 return -ENOMEM; 2508 2509 work->id = id_priv; 2510 INIT_WORK(&work->work, cma_work_handler); 2511 2512 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 2513 if (!route->path_rec) { 2514 ret = -ENOMEM; 2515 goto err1; 2516 } 2517 2518 route->num_paths = 1; 2519 2520 if (addr->dev_addr.bound_dev_if) { 2521 unsigned long supported_gids; 2522 2523 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2524 if (!ndev) { 2525 ret = -ENODEV; 2526 goto err2; 2527 } 2528 2529 if (ndev->flags & IFF_LOOPBACK) { 2530 dev_put(ndev); 2531 if (!id_priv->id.device->get_netdev) { 2532 ret = -EOPNOTSUPP; 2533 goto err2; 2534 } 2535 2536 ndev = id_priv->id.device->get_netdev(id_priv->id.device, 2537 id_priv->id.port_num); 2538 if (!ndev) { 2539 ret = -ENODEV; 2540 goto err2; 2541 } 2542 } 2543 2544 route->path_rec->net = &init_net; 2545 route->path_rec->ifindex = ndev->ifindex; 2546 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2547 id_priv->id.port_num); 2548 route->path_rec->gid_type = 2549 cma_route_gid_type(addr->dev_addr.network, 2550 supported_gids, 2551 id_priv->gid_type); 2552 } 2553 if (!ndev) { 2554 ret = -ENODEV; 2555 goto err2; 2556 } 2557 2558 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); 2559 2560 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 2561 &route->path_rec->sgid); 2562 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 2563 &route->path_rec->dgid); 2564 2565 /* Use the hint from IP Stack to select GID Type */ 2566 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 2567 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network); 2568 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 2569 /* TODO: get the hoplimit from the inet/inet6 device */ 2570 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 2571 else 2572 route->path_rec->hop_limit = 1; 2573 route->path_rec->reversible = 1; 2574 route->path_rec->pkey = cpu_to_be16(0xffff); 2575 route->path_rec->mtu_selector = IB_SA_EQ; 2576 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); 2577 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 2578 route->path_rec->rate_selector = IB_SA_EQ; 2579 route->path_rec->rate = iboe_get_rate(ndev); 2580 dev_put(ndev); 2581 route->path_rec->packet_life_time_selector = IB_SA_EQ; 2582 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 2583 if (!route->path_rec->mtu) { 2584 ret = -EINVAL; 2585 goto err2; 2586 } 2587 2588 work->old_state = RDMA_CM_ROUTE_QUERY; 2589 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2590 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2591 work->event.status = 0; 2592 2593 queue_work(cma_wq, &work->work); 2594 2595 return 0; 2596 2597err2: 2598 kfree(route->path_rec); 2599 route->path_rec = NULL; 2600err1: 2601 kfree(work); 2602 return ret; 2603} 2604 2605int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2606{ 2607 struct rdma_id_private *id_priv; 2608 int ret; 2609 2610 id_priv = container_of(id, struct rdma_id_private, id); 2611 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 2612 return -EINVAL; 2613 2614 atomic_inc(&id_priv->refcount); 2615 if (rdma_cap_ib_sa(id->device, id->port_num)) 2616 ret = cma_resolve_ib_route(id_priv, timeout_ms); 2617 else if (rdma_protocol_roce(id->device, id->port_num)) 2618 ret = cma_resolve_iboe_route(id_priv); 2619 else if (rdma_protocol_iwarp(id->device, id->port_num)) 2620 ret = cma_resolve_iw_route(id_priv, timeout_ms); 2621 else 2622 ret = -ENOSYS; 2623 2624 if (ret) 2625 goto err; 2626 2627 return 0; 2628err: 2629 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 2630 cma_deref_id(id_priv); 2631 return ret; 2632} 2633EXPORT_SYMBOL(rdma_resolve_route); 2634 2635static void cma_set_loopback(struct sockaddr *addr) 2636{ 2637 switch (addr->sa_family) { 2638 case AF_INET: 2639 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2640 break; 2641 case AF_INET6: 2642 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 2643 0, 0, 0, htonl(1)); 2644 break; 2645 default: 2646 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 2647 0, 0, 0, htonl(1)); 2648 break; 2649 } 2650} 2651 2652static int cma_bind_loopback(struct rdma_id_private *id_priv) 2653{ 2654 struct cma_device *cma_dev, *cur_dev; 2655 struct ib_port_attr port_attr; 2656 union ib_gid gid; 2657 u16 pkey; 2658 int ret; 2659 u8 p; 2660 2661 cma_dev = NULL; 2662 mutex_lock(&lock); 2663 list_for_each_entry(cur_dev, &dev_list, list) { 2664 if (cma_family(id_priv) == AF_IB && 2665 !rdma_cap_ib_cm(cur_dev->device, 1)) 2666 continue; 2667 2668 if (!cma_dev) 2669 cma_dev = cur_dev; 2670 2671 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 2672 if (!ib_query_port(cur_dev->device, p, &port_attr) && 2673 port_attr.state == IB_PORT_ACTIVE) { 2674 cma_dev = cur_dev; 2675 goto port_found; 2676 } 2677 } 2678 } 2679 2680 if (!cma_dev) { 2681 ret = -ENODEV; 2682 goto out; 2683 } 2684 2685 p = 1; 2686 2687port_found: 2688 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); 2689 if (ret) 2690 goto out; 2691 2692 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2693 if (ret) 2694 goto out; 2695 2696 id_priv->id.route.addr.dev_addr.dev_type = 2697 (rdma_protocol_ib(cma_dev->device, p)) ? 2698 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2699 2700 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2701 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2702 id_priv->id.port_num = p; 2703 cma_attach_to_dev(id_priv, cma_dev); 2704 cma_set_loopback(cma_src_addr(id_priv)); 2705out: 2706 mutex_unlock(&lock); 2707 return ret; 2708} 2709 2710static void addr_handler(int status, struct sockaddr *src_addr, 2711 struct rdma_dev_addr *dev_addr, void *context) 2712{ 2713 struct rdma_id_private *id_priv = context; 2714 struct rdma_cm_event event; 2715 2716 memset(&event, 0, sizeof event); 2717 mutex_lock(&id_priv->handler_mutex); 2718 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 2719 RDMA_CM_ADDR_RESOLVED)) 2720 goto out; 2721 2722 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); 2723 if (!status && !id_priv->cma_dev) 2724 status = cma_acquire_dev(id_priv, NULL); 2725 2726 if (status) { 2727 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2728 RDMA_CM_ADDR_BOUND)) 2729 goto out; 2730 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2731 event.status = status; 2732 } else 2733 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2734 2735 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2736 cma_exch(id_priv, RDMA_CM_DESTROYING); 2737 mutex_unlock(&id_priv->handler_mutex); 2738 cma_deref_id(id_priv); 2739 rdma_destroy_id(&id_priv->id); 2740 return; 2741 } 2742out: 2743 mutex_unlock(&id_priv->handler_mutex); 2744 cma_deref_id(id_priv); 2745} 2746 2747static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2748{ 2749 struct cma_work *work; 2750 union ib_gid gid; 2751 int ret; 2752 2753 work = kzalloc(sizeof *work, GFP_KERNEL); 2754 if (!work) 2755 return -ENOMEM; 2756 2757 if (!id_priv->cma_dev) { 2758 ret = cma_bind_loopback(id_priv); 2759 if (ret) 2760 goto err; 2761 } 2762 2763 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2764 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2765 2766 work->id = id_priv; 2767 INIT_WORK(&work->work, cma_work_handler); 2768 work->old_state = RDMA_CM_ADDR_QUERY; 2769 work->new_state = RDMA_CM_ADDR_RESOLVED; 2770 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2771 queue_work(cma_wq, &work->work); 2772 return 0; 2773err: 2774 kfree(work); 2775 return ret; 2776} 2777 2778static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 2779{ 2780 struct cma_work *work; 2781 int ret; 2782 2783 work = kzalloc(sizeof *work, GFP_KERNEL); 2784 if (!work) 2785 return -ENOMEM; 2786 2787 if (!id_priv->cma_dev) { 2788 ret = cma_resolve_ib_dev(id_priv); 2789 if (ret) 2790 goto err; 2791 } 2792 2793 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 2794 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 2795 2796 work->id = id_priv; 2797 INIT_WORK(&work->work, cma_work_handler); 2798 work->old_state = RDMA_CM_ADDR_QUERY; 2799 work->new_state = RDMA_CM_ADDR_RESOLVED; 2800 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2801 queue_work(cma_wq, &work->work); 2802 return 0; 2803err: 2804 kfree(work); 2805 return ret; 2806} 2807 2808static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2809 struct sockaddr *dst_addr) 2810{ 2811 if (!src_addr || !src_addr->sa_family) { 2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2813 src_addr->sa_family = dst_addr->sa_family; 2814 if (IS_ENABLED(CONFIG_IPV6) && 2815 dst_addr->sa_family == AF_INET6) { 2816 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2817 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2818 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2819 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 2820 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 2821 } else if (dst_addr->sa_family == AF_IB) { 2822 ((struct sockaddr_ib *) src_addr)->sib_pkey = 2823 ((struct sockaddr_ib *) dst_addr)->sib_pkey; 2824 } 2825 } 2826 return rdma_bind_addr(id, src_addr); 2827} 2828 2829int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2830 struct sockaddr *dst_addr, int timeout_ms) 2831{ 2832 struct rdma_id_private *id_priv; 2833 int ret; 2834 2835 id_priv = container_of(id, struct rdma_id_private, id); 2836 if (id_priv->state == RDMA_CM_IDLE) { 2837 ret = cma_bind_addr(id, src_addr, dst_addr); 2838 if (ret) 2839 return ret; 2840 } 2841 2842 if (cma_family(id_priv) != dst_addr->sa_family) 2843 return -EINVAL; 2844 2845 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2846 return -EINVAL; 2847 2848 atomic_inc(&id_priv->refcount); 2849 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 2850 if (cma_any_addr(dst_addr)) { 2851 ret = cma_resolve_loopback(id_priv); 2852 } else { 2853 if (dst_addr->sa_family == AF_IB) { 2854 ret = cma_resolve_ib_addr(id_priv); 2855 } else { 2856 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), 2857 dst_addr, &id->route.addr.dev_addr, 2858 timeout_ms, addr_handler, id_priv); 2859 } 2860 } 2861 if (ret) 2862 goto err; 2863 2864 return 0; 2865err: 2866 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2867 cma_deref_id(id_priv); 2868 return ret; 2869} 2870EXPORT_SYMBOL(rdma_resolve_addr); 2871 2872int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 2873{ 2874 struct rdma_id_private *id_priv; 2875 unsigned long flags; 2876 int ret; 2877 2878 id_priv = container_of(id, struct rdma_id_private, id); 2879 spin_lock_irqsave(&id_priv->lock, flags); 2880 if (reuse || id_priv->state == RDMA_CM_IDLE) { 2881 id_priv->reuseaddr = reuse; 2882 ret = 0; 2883 } else { 2884 ret = -EINVAL; 2885 } 2886 spin_unlock_irqrestore(&id_priv->lock, flags); 2887 return ret; 2888} 2889EXPORT_SYMBOL(rdma_set_reuseaddr); 2890 2891int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 2892{ 2893 struct rdma_id_private *id_priv; 2894 unsigned long flags; 2895 int ret; 2896 2897 id_priv = container_of(id, struct rdma_id_private, id); 2898 spin_lock_irqsave(&id_priv->lock, flags); 2899 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 2900 id_priv->options |= (1 << CMA_OPTION_AFONLY); 2901 id_priv->afonly = afonly; 2902 ret = 0; 2903 } else { 2904 ret = -EINVAL; 2905 } 2906 spin_unlock_irqrestore(&id_priv->lock, flags); 2907 return ret; 2908} 2909EXPORT_SYMBOL(rdma_set_afonly); 2910 2911static void cma_bind_port(struct rdma_bind_list *bind_list, 2912 struct rdma_id_private *id_priv) 2913{ 2914 struct sockaddr *addr; 2915 struct sockaddr_ib *sib; 2916 u64 sid, mask; 2917 __be16 port; 2918 2919 addr = cma_src_addr(id_priv); 2920 port = htons(bind_list->port); 2921 2922 switch (addr->sa_family) { 2923 case AF_INET: 2924 ((struct sockaddr_in *) addr)->sin_port = port; 2925 break; 2926 case AF_INET6: 2927 ((struct sockaddr_in6 *) addr)->sin6_port = port; 2928 break; 2929 case AF_IB: 2930 sib = (struct sockaddr_ib *) addr; 2931 sid = be64_to_cpu(sib->sib_sid); 2932 mask = be64_to_cpu(sib->sib_sid_mask); 2933 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 2934 sib->sib_sid_mask = cpu_to_be64(~0ULL); 2935 break; 2936 } 2937 id_priv->bind_list = bind_list; 2938 hlist_add_head(&id_priv->node, &bind_list->owners); 2939} 2940 2941static int cma_alloc_port(enum rdma_port_space ps, 2942 struct rdma_id_private *id_priv, unsigned short snum) 2943{ 2944 struct rdma_bind_list *bind_list; 2945 int ret; 2946 2947 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2948 if (!bind_list) 2949 return -ENOMEM; 2950 2951 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 2952 snum); 2953 if (ret < 0) 2954 goto err; 2955 2956 bind_list->ps = ps; 2957 bind_list->port = (unsigned short)ret; 2958 cma_bind_port(bind_list, id_priv); 2959 return 0; 2960err: 2961 kfree(bind_list); 2962 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 2963} 2964 2965static int cma_alloc_any_port(enum rdma_port_space ps, 2966 struct rdma_id_private *id_priv) 2967{ 2968 static unsigned int last_used_port; 2969 int low, high, remaining; 2970 unsigned int rover; 2971 struct net *net = id_priv->id.route.addr.dev_addr.net; 2972 2973 inet_get_local_port_range(net, &low, &high); 2974 remaining = (high - low) + 1; 2975 rover = prandom_u32() % remaining + low; 2976retry: 2977 if (last_used_port != rover && 2978 !cma_ps_find(net, ps, (unsigned short)rover)) { 2979 int ret = cma_alloc_port(ps, id_priv, rover); 2980 /* 2981 * Remember previously used port number in order to avoid 2982 * re-using same port immediately after it is closed. 2983 */ 2984 if (!ret) 2985 last_used_port = rover; 2986 if (ret != -EADDRNOTAVAIL) 2987 return ret; 2988 } 2989 if (--remaining) { 2990 rover++; 2991 if ((rover < low) || (rover > high)) 2992 rover = low; 2993 goto retry; 2994 } 2995 return -EADDRNOTAVAIL; 2996} 2997 2998/* 2999 * Check that the requested port is available. This is called when trying to 3000 * bind to a specific port, or when trying to listen on a bound port. In 3001 * the latter case, the provided id_priv may already be on the bind_list, but 3002 * we still need to check that it's okay to start listening. 3003 */ 3004static int cma_check_port(struct rdma_bind_list *bind_list, 3005 struct rdma_id_private *id_priv, uint8_t reuseaddr) 3006{ 3007 struct rdma_id_private *cur_id; 3008 struct sockaddr *addr, *cur_addr; 3009 3010 addr = cma_src_addr(id_priv); 3011 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3012 if (id_priv == cur_id) 3013 continue; 3014 3015 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 3016 cur_id->reuseaddr) 3017 continue; 3018 3019 cur_addr = cma_src_addr(cur_id); 3020 if (id_priv->afonly && cur_id->afonly && 3021 (addr->sa_family != cur_addr->sa_family)) 3022 continue; 3023 3024 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 3025 return -EADDRNOTAVAIL; 3026 3027 if (!cma_addr_cmp(addr, cur_addr)) 3028 return -EADDRINUSE; 3029 } 3030 return 0; 3031} 3032 3033static int cma_use_port(enum rdma_port_space ps, 3034 struct rdma_id_private *id_priv) 3035{ 3036 struct rdma_bind_list *bind_list; 3037 unsigned short snum; 3038 int ret; 3039 3040 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3041 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 3042 return -EACCES; 3043 3044 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3045 if (!bind_list) { 3046 ret = cma_alloc_port(ps, id_priv, snum); 3047 } else { 3048 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3049 if (!ret) 3050 cma_bind_port(bind_list, id_priv); 3051 } 3052 return ret; 3053} 3054 3055static int cma_bind_listen(struct rdma_id_private *id_priv) 3056{ 3057 struct rdma_bind_list *bind_list = id_priv->bind_list; 3058 int ret = 0; 3059 3060 mutex_lock(&lock); 3061 if (bind_list->owners.first->next) 3062 ret = cma_check_port(bind_list, id_priv, 0); 3063 mutex_unlock(&lock); 3064 return ret; 3065} 3066 3067static enum rdma_port_space cma_select_inet_ps( 3068 struct rdma_id_private *id_priv) 3069{ 3070 switch (id_priv->id.ps) { 3071 case RDMA_PS_TCP: 3072 case RDMA_PS_UDP: 3073 case RDMA_PS_IPOIB: 3074 case RDMA_PS_IB: 3075 return id_priv->id.ps; 3076 default: 3077 3078 return 0; 3079 } 3080} 3081 3082static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) 3083{ 3084 enum rdma_port_space ps = 0; 3085 struct sockaddr_ib *sib; 3086 u64 sid_ps, mask, sid; 3087 3088 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3089 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3090 sid = be64_to_cpu(sib->sib_sid) & mask; 3091 3092 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3093 sid_ps = RDMA_IB_IP_PS_IB; 3094 ps = RDMA_PS_IB; 3095 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3096 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3097 sid_ps = RDMA_IB_IP_PS_TCP; 3098 ps = RDMA_PS_TCP; 3099 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3100 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3101 sid_ps = RDMA_IB_IP_PS_UDP; 3102 ps = RDMA_PS_UDP; 3103 } 3104 3105 if (ps) { 3106 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3107 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3108 be64_to_cpu(sib->sib_sid_mask)); 3109 } 3110 return ps; 3111} 3112 3113static int cma_get_port(struct rdma_id_private *id_priv) 3114{ 3115 enum rdma_port_space ps; 3116 int ret; 3117 3118 if (cma_family(id_priv) != AF_IB) 3119 ps = cma_select_inet_ps(id_priv); 3120 else 3121 ps = cma_select_ib_ps(id_priv); 3122 if (!ps) 3123 return -EPROTONOSUPPORT; 3124 3125 mutex_lock(&lock); 3126 if (cma_any_port(cma_src_addr(id_priv))) 3127 ret = cma_alloc_any_port(ps, id_priv); 3128 else 3129 ret = cma_use_port(ps, id_priv); 3130 mutex_unlock(&lock); 3131 3132 return ret; 3133} 3134 3135static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3136 struct sockaddr *addr) 3137{ 3138#if IS_ENABLED(CONFIG_IPV6) 3139 struct sockaddr_in6 *sin6; 3140 3141 if (addr->sa_family != AF_INET6) 3142 return 0; 3143 3144 sin6 = (struct sockaddr_in6 *) addr; 3145 3146 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 3147 return 0; 3148 3149 if (!sin6->sin6_scope_id) 3150 return -EINVAL; 3151 3152 dev_addr->bound_dev_if = sin6->sin6_scope_id; 3153#endif 3154 return 0; 3155} 3156 3157int rdma_listen(struct rdma_cm_id *id, int backlog) 3158{ 3159 struct rdma_id_private *id_priv; 3160 int ret; 3161 3162 id_priv = container_of(id, struct rdma_id_private, id); 3163 if (id_priv->state == RDMA_CM_IDLE) { 3164 id->route.addr.src_addr.ss_family = AF_INET; 3165 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3166 if (ret) 3167 return ret; 3168 } 3169 3170 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 3171 return -EINVAL; 3172 3173 if (id_priv->reuseaddr) { 3174 ret = cma_bind_listen(id_priv); 3175 if (ret) 3176 goto err; 3177 } 3178 3179 id_priv->backlog = backlog; 3180 if (id->device) { 3181 if (rdma_cap_ib_cm(id->device, 1)) { 3182 ret = cma_ib_listen(id_priv); 3183 if (ret) 3184 goto err; 3185 } else if (rdma_cap_iw_cm(id->device, 1)) { 3186 ret = cma_iw_listen(id_priv, backlog); 3187 if (ret) 3188 goto err; 3189 } else { 3190 ret = -ENOSYS; 3191 goto err; 3192 } 3193 } else 3194 cma_listen_on_all(id_priv); 3195 3196 return 0; 3197err: 3198 id_priv->backlog = 0; 3199 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3200 return ret; 3201} 3202EXPORT_SYMBOL(rdma_listen); 3203 3204int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 3205{ 3206 struct rdma_id_private *id_priv; 3207 int ret; 3208 3209 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3210 addr->sa_family != AF_IB) 3211 return -EAFNOSUPPORT; 3212 3213 id_priv = container_of(id, struct rdma_id_private, id); 3214 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3215 return -EINVAL; 3216 3217 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 3218 if (ret) 3219 goto err1; 3220 3221 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3222 if (!cma_any_addr(addr)) { 3223 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 3224 if (ret) 3225 goto err1; 3226 3227 ret = cma_acquire_dev(id_priv, NULL); 3228 if (ret) 3229 goto err1; 3230 } 3231 3232 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3233 if (addr->sa_family == AF_INET) 3234 id_priv->afonly = 1; 3235#if IS_ENABLED(CONFIG_IPV6) 3236 else if (addr->sa_family == AF_INET6) { 3237 struct net *net = id_priv->id.route.addr.dev_addr.net; 3238 3239 id_priv->afonly = net->ipv6.sysctl.bindv6only; 3240 } 3241#endif 3242 } 3243 ret = cma_get_port(id_priv); 3244 if (ret) 3245 goto err2; 3246 3247 return 0; 3248err2: 3249 if (id_priv->cma_dev) 3250 cma_release_dev(id_priv); 3251err1: 3252 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 3253 return ret; 3254} 3255EXPORT_SYMBOL(rdma_bind_addr); 3256 3257static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 3258{ 3259 struct cma_hdr *cma_hdr; 3260 3261 cma_hdr = hdr; 3262 cma_hdr->cma_version = CMA_VERSION; 3263 if (cma_family(id_priv) == AF_INET) { 3264 struct sockaddr_in *src4, *dst4; 3265 3266 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3267 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3268 3269 cma_set_ip_ver(cma_hdr, 4); 3270 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3271 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3272 cma_hdr->port = src4->sin_port; 3273 } else if (cma_family(id_priv) == AF_INET6) { 3274 struct sockaddr_in6 *src6, *dst6; 3275 3276 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3277 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3278 3279 cma_set_ip_ver(cma_hdr, 6); 3280 cma_hdr->src_addr.ip6 = src6->sin6_addr; 3281 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 3282 cma_hdr->port = src6->sin6_port; 3283 } 3284 return 0; 3285} 3286 3287static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 3288 struct ib_cm_event *ib_event) 3289{ 3290 struct rdma_id_private *id_priv = cm_id->context; 3291 struct rdma_cm_event event; 3292 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3293 int ret = 0; 3294 3295 mutex_lock(&id_priv->handler_mutex); 3296 if (id_priv->state != RDMA_CM_CONNECT) 3297 goto out; 3298 3299 memset(&event, 0, sizeof event); 3300 switch (ib_event->event) { 3301 case IB_CM_SIDR_REQ_ERROR: 3302 event.event = RDMA_CM_EVENT_UNREACHABLE; 3303 event.status = -ETIMEDOUT; 3304 break; 3305 case IB_CM_SIDR_REP_RECEIVED: 3306 event.param.ud.private_data = ib_event->private_data; 3307 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 3308 if (rep->status != IB_SIDR_SUCCESS) { 3309 event.event = RDMA_CM_EVENT_UNREACHABLE; 3310 event.status = ib_event->param.sidr_rep_rcvd.status; 3311 break; 3312 } 3313 ret = cma_set_qkey(id_priv, rep->qkey); 3314 if (ret) { 3315 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3316 event.status = ret; 3317 break; 3318 } 3319 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 3320 id_priv->id.route.path_rec, 3321 &event.param.ud.ah_attr); 3322 event.param.ud.qp_num = rep->qpn; 3323 event.param.ud.qkey = rep->qkey; 3324 event.event = RDMA_CM_EVENT_ESTABLISHED; 3325 event.status = 0; 3326 break; 3327 default: 3328 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 3329 ib_event->event); 3330 goto out; 3331 } 3332 3333 ret = id_priv->id.event_handler(&id_priv->id, &event); 3334 if (ret) { 3335 /* Destroy the CM ID by returning a non-zero value. */ 3336 id_priv->cm_id.ib = NULL; 3337 cma_exch(id_priv, RDMA_CM_DESTROYING); 3338 mutex_unlock(&id_priv->handler_mutex); 3339 rdma_destroy_id(&id_priv->id); 3340 return ret; 3341 } 3342out: 3343 mutex_unlock(&id_priv->handler_mutex); 3344 return ret; 3345} 3346 3347static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 3348 struct rdma_conn_param *conn_param) 3349{ 3350 struct ib_cm_sidr_req_param req; 3351 struct ib_cm_id *id; 3352 void *private_data; 3353 int offset, ret; 3354 3355 memset(&req, 0, sizeof req); 3356 offset = cma_user_data_offset(id_priv); 3357 req.private_data_len = offset + conn_param->private_data_len; 3358 if (req.private_data_len < conn_param->private_data_len) 3359 return -EINVAL; 3360 3361 if (req.private_data_len) { 3362 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3363 if (!private_data) 3364 return -ENOMEM; 3365 } else { 3366 private_data = NULL; 3367 } 3368 3369 if (conn_param->private_data && conn_param->private_data_len) 3370 memcpy(private_data + offset, conn_param->private_data, 3371 conn_param->private_data_len); 3372 3373 if (private_data) { 3374 ret = cma_format_hdr(private_data, id_priv); 3375 if (ret) 3376 goto out; 3377 req.private_data = private_data; 3378 } 3379 3380 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 3381 id_priv); 3382 if (IS_ERR(id)) { 3383 ret = PTR_ERR(id); 3384 goto out; 3385 } 3386 id_priv->cm_id.ib = id; 3387 3388 req.path = id_priv->id.route.path_rec; 3389 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3390 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3391 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3392 3393 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3394 if (ret) { 3395 ib_destroy_cm_id(id_priv->cm_id.ib); 3396 id_priv->cm_id.ib = NULL; 3397 } 3398out: 3399 kfree(private_data); 3400 return ret; 3401} 3402 3403static int cma_connect_ib(struct rdma_id_private *id_priv, 3404 struct rdma_conn_param *conn_param) 3405{ 3406 struct ib_cm_req_param req; 3407 struct rdma_route *route; 3408 void *private_data; 3409 struct ib_cm_id *id; 3410 int offset, ret; 3411 3412 memset(&req, 0, sizeof req); 3413 offset = cma_user_data_offset(id_priv); 3414 req.private_data_len = offset + conn_param->private_data_len; 3415 if (req.private_data_len < conn_param->private_data_len) 3416 return -EINVAL; 3417 3418 if (req.private_data_len) { 3419 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3420 if (!private_data) 3421 return -ENOMEM; 3422 } else { 3423 private_data = NULL; 3424 } 3425 3426 if (conn_param->private_data && conn_param->private_data_len) 3427 memcpy(private_data + offset, conn_param->private_data, 3428 conn_param->private_data_len); 3429 3430 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 3431 if (IS_ERR(id)) { 3432 ret = PTR_ERR(id); 3433 goto out; 3434 } 3435 id_priv->cm_id.ib = id; 3436 3437 route = &id_priv->id.route; 3438 if (private_data) { 3439 ret = cma_format_hdr(private_data, id_priv); 3440 if (ret) 3441 goto out; 3442 req.private_data = private_data; 3443 } 3444 3445 req.primary_path = &route->path_rec[0]; 3446 if (route->num_paths == 2) 3447 req.alternate_path = &route->path_rec[1]; 3448 3449 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3450 req.qp_num = id_priv->qp_num; 3451 req.qp_type = id_priv->id.qp_type; 3452 req.starting_psn = id_priv->seq_num; 3453 req.responder_resources = conn_param->responder_resources; 3454 req.initiator_depth = conn_param->initiator_depth; 3455 req.flow_control = conn_param->flow_control; 3456 req.retry_count = min_t(u8, 7, conn_param->retry_count); 3457 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3458 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3459 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3460 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3461 req.srq = id_priv->srq ? 1 : 0; 3462 3463 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3464out: 3465 if (ret && !IS_ERR(id)) { 3466 ib_destroy_cm_id(id); 3467 id_priv->cm_id.ib = NULL; 3468 } 3469 3470 kfree(private_data); 3471 return ret; 3472} 3473 3474static int cma_connect_iw(struct rdma_id_private *id_priv, 3475 struct rdma_conn_param *conn_param) 3476{ 3477 struct iw_cm_id *cm_id; 3478 int ret; 3479 struct iw_cm_conn_param iw_param; 3480 3481 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 3482 if (IS_ERR(cm_id)) 3483 return PTR_ERR(cm_id); 3484 3485 cm_id->tos = id_priv->tos; 3486 id_priv->cm_id.iw = cm_id; 3487 3488 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 3489 rdma_addr_size(cma_src_addr(id_priv))); 3490 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 3491 rdma_addr_size(cma_dst_addr(id_priv))); 3492 3493 ret = cma_modify_qp_rtr(id_priv, conn_param); 3494 if (ret) 3495 goto out; 3496 3497 if (conn_param) { 3498 iw_param.ord = conn_param->initiator_depth; 3499 iw_param.ird = conn_param->responder_resources; 3500 iw_param.private_data = conn_param->private_data; 3501 iw_param.private_data_len = conn_param->private_data_len; 3502 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 3503 } else { 3504 memset(&iw_param, 0, sizeof iw_param); 3505 iw_param.qpn = id_priv->qp_num; 3506 } 3507 ret = iw_cm_connect(cm_id, &iw_param); 3508out: 3509 if (ret) { 3510 iw_destroy_cm_id(cm_id); 3511 id_priv->cm_id.iw = NULL; 3512 } 3513 return ret; 3514} 3515 3516int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3517{ 3518 struct rdma_id_private *id_priv; 3519 int ret; 3520 3521 id_priv = container_of(id, struct rdma_id_private, id); 3522 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 3523 return -EINVAL; 3524 3525 if (!id->qp) { 3526 id_priv->qp_num = conn_param->qp_num; 3527 id_priv->srq = conn_param->srq; 3528 } 3529 3530 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3531 if (id->qp_type == IB_QPT_UD) 3532 ret = cma_resolve_ib_udp(id_priv, conn_param); 3533 else 3534 ret = cma_connect_ib(id_priv, conn_param); 3535 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3536 ret = cma_connect_iw(id_priv, conn_param); 3537 else 3538 ret = -ENOSYS; 3539 if (ret) 3540 goto err; 3541 3542 return 0; 3543err: 3544 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 3545 return ret; 3546} 3547EXPORT_SYMBOL(rdma_connect); 3548 3549static int cma_accept_ib(struct rdma_id_private *id_priv, 3550 struct rdma_conn_param *conn_param) 3551{ 3552 struct ib_cm_rep_param rep; 3553 int ret; 3554 3555 ret = cma_modify_qp_rtr(id_priv, conn_param); 3556 if (ret) 3557 goto out; 3558 3559 ret = cma_modify_qp_rts(id_priv, conn_param); 3560 if (ret) 3561 goto out; 3562 3563 memset(&rep, 0, sizeof rep); 3564 rep.qp_num = id_priv->qp_num; 3565 rep.starting_psn = id_priv->seq_num; 3566 rep.private_data = conn_param->private_data; 3567 rep.private_data_len = conn_param->private_data_len; 3568 rep.responder_resources = conn_param->responder_resources; 3569 rep.initiator_depth = conn_param->initiator_depth; 3570 rep.failover_accepted = 0; 3571 rep.flow_control = conn_param->flow_control; 3572 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3573 rep.srq = id_priv->srq ? 1 : 0; 3574 3575 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 3576out: 3577 return ret; 3578} 3579 3580static int cma_accept_iw(struct rdma_id_private *id_priv, 3581 struct rdma_conn_param *conn_param) 3582{ 3583 struct iw_cm_conn_param iw_param; 3584 int ret; 3585 3586 ret = cma_modify_qp_rtr(id_priv, conn_param); 3587 if (ret) 3588 return ret; 3589 3590 iw_param.ord = conn_param->initiator_depth; 3591 iw_param.ird = conn_param->responder_resources; 3592 iw_param.private_data = conn_param->private_data; 3593 iw_param.private_data_len = conn_param->private_data_len; 3594 if (id_priv->id.qp) { 3595 iw_param.qpn = id_priv->qp_num; 3596 } else 3597 iw_param.qpn = conn_param->qp_num; 3598 3599 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 3600} 3601 3602static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 3603 enum ib_cm_sidr_status status, u32 qkey, 3604 const void *private_data, int private_data_len) 3605{ 3606 struct ib_cm_sidr_rep_param rep; 3607 int ret; 3608 3609 memset(&rep, 0, sizeof rep); 3610 rep.status = status; 3611 if (status == IB_SIDR_SUCCESS) { 3612 ret = cma_set_qkey(id_priv, qkey); 3613 if (ret) 3614 return ret; 3615 rep.qp_num = id_priv->qp_num; 3616 rep.qkey = id_priv->qkey; 3617 } 3618 rep.private_data = private_data; 3619 rep.private_data_len = private_data_len; 3620 3621 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 3622} 3623 3624int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3625{ 3626 struct rdma_id_private *id_priv; 3627 int ret; 3628 3629 id_priv = container_of(id, struct rdma_id_private, id); 3630 3631 id_priv->owner = task_pid_nr(current); 3632 3633 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 3634 return -EINVAL; 3635 3636 if (!id->qp && conn_param) { 3637 id_priv->qp_num = conn_param->qp_num; 3638 id_priv->srq = conn_param->srq; 3639 } 3640 3641 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3642 if (id->qp_type == IB_QPT_UD) { 3643 if (conn_param) 3644 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3645 conn_param->qkey, 3646 conn_param->private_data, 3647 conn_param->private_data_len); 3648 else 3649 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3650 0, NULL, 0); 3651 } else { 3652 if (conn_param) 3653 ret = cma_accept_ib(id_priv, conn_param); 3654 else 3655 ret = cma_rep_recv(id_priv); 3656 } 3657 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3658 ret = cma_accept_iw(id_priv, conn_param); 3659 else 3660 ret = -ENOSYS; 3661 3662 if (ret) 3663 goto reject; 3664 3665 return 0; 3666reject: 3667 cma_modify_qp_err(id_priv); 3668 rdma_reject(id, NULL, 0); 3669 return ret; 3670} 3671EXPORT_SYMBOL(rdma_accept); 3672 3673int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 3674{ 3675 struct rdma_id_private *id_priv; 3676 int ret; 3677 3678 id_priv = container_of(id, struct rdma_id_private, id); 3679 if (!id_priv->cm_id.ib) 3680 return -EINVAL; 3681 3682 switch (id->device->node_type) { 3683 case RDMA_NODE_IB_CA: 3684 ret = ib_cm_notify(id_priv->cm_id.ib, event); 3685 break; 3686 default: 3687 ret = 0; 3688 break; 3689 } 3690 return ret; 3691} 3692EXPORT_SYMBOL(rdma_notify); 3693 3694int rdma_reject(struct rdma_cm_id *id, const void *private_data, 3695 u8 private_data_len) 3696{ 3697 struct rdma_id_private *id_priv; 3698 int ret; 3699 3700 id_priv = container_of(id, struct rdma_id_private, id); 3701 if (!id_priv->cm_id.ib) 3702 return -EINVAL; 3703 3704 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3705 if (id->qp_type == IB_QPT_UD) 3706 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 3707 private_data, private_data_len); 3708 else 3709 ret = ib_send_cm_rej(id_priv->cm_id.ib, 3710 IB_CM_REJ_CONSUMER_DEFINED, NULL, 3711 0, private_data, private_data_len); 3712 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3713 ret = iw_cm_reject(id_priv->cm_id.iw, 3714 private_data, private_data_len); 3715 } else 3716 ret = -ENOSYS; 3717 3718 return ret; 3719} 3720EXPORT_SYMBOL(rdma_reject); 3721 3722int rdma_disconnect(struct rdma_cm_id *id) 3723{ 3724 struct rdma_id_private *id_priv; 3725 int ret; 3726 3727 id_priv = container_of(id, struct rdma_id_private, id); 3728 if (!id_priv->cm_id.ib) 3729 return -EINVAL; 3730 3731 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3732 ret = cma_modify_qp_err(id_priv); 3733 if (ret) 3734 goto out; 3735 /* Initiate or respond to a disconnect. */ 3736 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 3737 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 3738 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3739 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 3740 } else 3741 ret = -EINVAL; 3742 3743out: 3744 return ret; 3745} 3746EXPORT_SYMBOL(rdma_disconnect); 3747 3748static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 3749{ 3750 struct rdma_id_private *id_priv; 3751 struct cma_multicast *mc = multicast->context; 3752 struct rdma_cm_event event; 3753 int ret = 0; 3754 3755 id_priv = mc->id_priv; 3756 mutex_lock(&id_priv->handler_mutex); 3757 if (id_priv->state != RDMA_CM_ADDR_BOUND && 3758 id_priv->state != RDMA_CM_ADDR_RESOLVED) 3759 goto out; 3760 3761 if (!status) 3762 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3763 mutex_lock(&id_priv->qp_mutex); 3764 if (!status && id_priv->id.qp) 3765 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3766 be16_to_cpu(multicast->rec.mlid)); 3767 mutex_unlock(&id_priv->qp_mutex); 3768 3769 memset(&event, 0, sizeof event); 3770 event.status = status; 3771 event.param.ud.private_data = mc->context; 3772 if (!status) { 3773 struct rdma_dev_addr *dev_addr = 3774 &id_priv->id.route.addr.dev_addr; 3775 struct net_device *ndev = 3776 dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3777 enum ib_gid_type gid_type = 3778 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3779 rdma_start_port(id_priv->cma_dev->device)]; 3780 3781 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 3782 ib_init_ah_from_mcmember(id_priv->id.device, 3783 id_priv->id.port_num, &multicast->rec, 3784 ndev, gid_type, 3785 &event.param.ud.ah_attr); 3786 event.param.ud.qp_num = 0xFFFFFF; 3787 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3788 if (ndev) 3789 dev_put(ndev); 3790 } else 3791 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3792 3793 ret = id_priv->id.event_handler(&id_priv->id, &event); 3794 if (ret) { 3795 cma_exch(id_priv, RDMA_CM_DESTROYING); 3796 mutex_unlock(&id_priv->handler_mutex); 3797 rdma_destroy_id(&id_priv->id); 3798 return 0; 3799 } 3800 3801out: 3802 mutex_unlock(&id_priv->handler_mutex); 3803 return 0; 3804} 3805 3806static void cma_set_mgid(struct rdma_id_private *id_priv, 3807 struct sockaddr *addr, union ib_gid *mgid) 3808{ 3809 unsigned char mc_map[MAX_ADDR_LEN]; 3810 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3811 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3812 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3813 3814 if (cma_any_addr(addr)) { 3815 memset(mgid, 0, sizeof *mgid); 3816 } else if ((addr->sa_family == AF_INET6) && 3817 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3818 0xFF10A01B)) { 3819 /* IPv6 address is an SA assigned MGID. */ 3820 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3821 } else if (addr->sa_family == AF_IB) { 3822 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 3823 } else if ((addr->sa_family == AF_INET6)) { 3824 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3825 if (id_priv->id.ps == RDMA_PS_UDP) 3826 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3827 *mgid = *(union ib_gid *) (mc_map + 4); 3828 } else { 3829 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3830 if (id_priv->id.ps == RDMA_PS_UDP) 3831 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3832 *mgid = *(union ib_gid *) (mc_map + 4); 3833 } 3834} 3835 3836static void cma_query_sa_classport_info_cb(int status, 3837 struct ib_class_port_info *rec, 3838 void *context) 3839{ 3840 struct class_port_info_context *cb_ctx = context; 3841 3842 WARN_ON(!context); 3843 3844 if (status || !rec) { 3845 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n", 3846 cb_ctx->device->name, cb_ctx->port_num, status); 3847 goto out; 3848 } 3849 3850 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info)); 3851 3852out: 3853 complete(&cb_ctx->done); 3854} 3855 3856static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num, 3857 struct ib_class_port_info *class_port_info) 3858{ 3859 struct class_port_info_context *cb_ctx; 3860 int ret; 3861 3862 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL); 3863 if (!cb_ctx) 3864 return -ENOMEM; 3865 3866 cb_ctx->device = device; 3867 cb_ctx->class_port_info = class_port_info; 3868 cb_ctx->port_num = port_num; 3869 init_completion(&cb_ctx->done); 3870 3871 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num, 3872 CMA_QUERY_CLASSPORT_INFO_TIMEOUT, 3873 GFP_KERNEL, cma_query_sa_classport_info_cb, 3874 cb_ctx, &cb_ctx->sa_query); 3875 if (ret < 0) { 3876 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n", 3877 device->name, port_num, ret); 3878 goto out; 3879 } 3880 3881 wait_for_completion(&cb_ctx->done); 3882 3883out: 3884 kfree(cb_ctx); 3885 return ret; 3886} 3887 3888static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3889 struct cma_multicast *mc) 3890{ 3891 struct ib_sa_mcmember_rec rec; 3892 struct ib_class_port_info class_port_info; 3893 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3894 ib_sa_comp_mask comp_mask; 3895 int ret; 3896 3897 ib_addr_get_mgid(dev_addr, &rec.mgid); 3898 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3899 &rec.mgid, &rec); 3900 if (ret) 3901 return ret; 3902 3903 ret = cma_set_qkey(id_priv, 0); 3904 if (ret) 3905 return ret; 3906 3907 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3908 rec.qkey = cpu_to_be32(id_priv->qkey); 3909 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3910 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3911 rec.join_state = mc->join_state; 3912 3913 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) { 3914 ret = cma_query_sa_classport_info(id_priv->id.device, 3915 id_priv->id.port_num, 3916 &class_port_info); 3917 3918 if (ret) 3919 return ret; 3920 3921 if (!(ib_get_cpi_capmask2(&class_port_info) & 3922 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) { 3923 pr_warn("RDMA CM: %s port %u Unable to multicast join\n" 3924 "RDMA CM: SM doesn't support Send Only Full Member option\n", 3925 id_priv->id.device->name, id_priv->id.port_num); 3926 return -EOPNOTSUPP; 3927 } 3928 } 3929 3930 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3931 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3932 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3933 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3934 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3935 3936 if (id_priv->id.ps == RDMA_PS_IPOIB) 3937 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3938 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 3939 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 3940 IB_SA_MCMEMBER_REC_MTU | 3941 IB_SA_MCMEMBER_REC_HOP_LIMIT; 3942 3943 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3944 id_priv->id.port_num, &rec, 3945 comp_mask, GFP_KERNEL, 3946 cma_ib_mc_handler, mc); 3947 return PTR_ERR_OR_ZERO(mc->multicast.ib); 3948} 3949 3950static void iboe_mcast_work_handler(struct work_struct *work) 3951{ 3952 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3953 struct cma_multicast *mc = mw->mc; 3954 struct ib_sa_multicast *m = mc->multicast.ib; 3955 3956 mc->multicast.ib->context = mc; 3957 cma_ib_mc_handler(0, m); 3958 kref_put(&mc->mcref, release_mc); 3959 kfree(mw); 3960} 3961 3962static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3963{ 3964 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3965 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3966 3967 if (cma_any_addr(addr)) { 3968 memset(mgid, 0, sizeof *mgid); 3969 } else if (addr->sa_family == AF_INET6) { 3970 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3971 } else { 3972 mgid->raw[0] = 0xff; 3973 mgid->raw[1] = 0x0e; 3974 mgid->raw[2] = 0; 3975 mgid->raw[3] = 0; 3976 mgid->raw[4] = 0; 3977 mgid->raw[5] = 0; 3978 mgid->raw[6] = 0; 3979 mgid->raw[7] = 0; 3980 mgid->raw[8] = 0; 3981 mgid->raw[9] = 0; 3982 mgid->raw[10] = 0xff; 3983 mgid->raw[11] = 0xff; 3984 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3985 } 3986} 3987 3988static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3989 struct cma_multicast *mc) 3990{ 3991 struct iboe_mcast_work *work; 3992 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3993 int err = 0; 3994 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3995 struct net_device *ndev = NULL; 3996 enum ib_gid_type gid_type; 3997 bool send_only; 3998 3999 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 4000 4001 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 4002 return -EINVAL; 4003 4004 work = kzalloc(sizeof *work, GFP_KERNEL); 4005 if (!work) 4006 return -ENOMEM; 4007 4008 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 4009 if (!mc->multicast.ib) { 4010 err = -ENOMEM; 4011 goto out1; 4012 } 4013 4014 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 4015 4016 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 4017 if (id_priv->id.ps == RDMA_PS_UDP) 4018 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 4019 4020 if (dev_addr->bound_dev_if) 4021 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 4022 if (!ndev) { 4023 err = -ENODEV; 4024 goto out2; 4025 } 4026 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 4027 mc->multicast.ib->rec.hop_limit = 1; 4028 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 4029 4030 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4031 rdma_start_port(id_priv->cma_dev->device)]; 4032 if (addr->sa_family == AF_INET) { 4033 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 4034 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 4035 if (!send_only) { 4036 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 4037 true); 4038 if (!err) 4039 mc->igmp_joined = true; 4040 } 4041 } 4042 } else { 4043 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 4044 err = -ENOTSUPP; 4045 } 4046 dev_put(ndev); 4047 if (err || !mc->multicast.ib->rec.mtu) { 4048 if (!err) 4049 err = -EINVAL; 4050 goto out2; 4051 } 4052 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 4053 &mc->multicast.ib->rec.port_gid); 4054 work->id = id_priv; 4055 work->mc = mc; 4056 INIT_WORK(&work->work, iboe_mcast_work_handler); 4057 kref_get(&mc->mcref); 4058 queue_work(cma_wq, &work->work); 4059 4060 return 0; 4061 4062out2: 4063 kfree(mc->multicast.ib); 4064out1: 4065 kfree(work); 4066 return err; 4067} 4068 4069int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4070 u8 join_state, void *context) 4071{ 4072 struct rdma_id_private *id_priv; 4073 struct cma_multicast *mc; 4074 int ret; 4075 4076 id_priv = container_of(id, struct rdma_id_private, id); 4077 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4078 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4079 return -EINVAL; 4080 4081 mc = kmalloc(sizeof *mc, GFP_KERNEL); 4082 if (!mc) 4083 return -ENOMEM; 4084 4085 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 4086 mc->context = context; 4087 mc->id_priv = id_priv; 4088 mc->igmp_joined = false; 4089 mc->join_state = join_state; 4090 spin_lock(&id_priv->lock); 4091 list_add(&mc->list, &id_priv->mc_list); 4092 spin_unlock(&id_priv->lock); 4093 4094 if (rdma_protocol_roce(id->device, id->port_num)) { 4095 kref_init(&mc->mcref); 4096 ret = cma_iboe_join_multicast(id_priv, mc); 4097 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) 4098 ret = cma_join_ib_multicast(id_priv, mc); 4099 else 4100 ret = -ENOSYS; 4101 4102 if (ret) { 4103 spin_lock_irq(&id_priv->lock); 4104 list_del(&mc->list); 4105 spin_unlock_irq(&id_priv->lock); 4106 kfree(mc); 4107 } 4108 return ret; 4109} 4110EXPORT_SYMBOL(rdma_join_multicast); 4111 4112void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 4113{ 4114 struct rdma_id_private *id_priv; 4115 struct cma_multicast *mc; 4116 4117 id_priv = container_of(id, struct rdma_id_private, id); 4118 spin_lock_irq(&id_priv->lock); 4119 list_for_each_entry(mc, &id_priv->mc_list, list) { 4120 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { 4121 list_del(&mc->list); 4122 spin_unlock_irq(&id_priv->lock); 4123 4124 if (id->qp) 4125 ib_detach_mcast(id->qp, 4126 &mc->multicast.ib->rec.mgid, 4127 be16_to_cpu(mc->multicast.ib->rec.mlid)); 4128 4129 BUG_ON(id_priv->cma_dev->device != id->device); 4130 4131 if (rdma_cap_ib_mcast(id->device, id->port_num)) { 4132 ib_sa_free_multicast(mc->multicast.ib); 4133 kfree(mc); 4134 } else if (rdma_protocol_roce(id->device, id->port_num)) { 4135 if (mc->igmp_joined) { 4136 struct rdma_dev_addr *dev_addr = 4137 &id->route.addr.dev_addr; 4138 struct net_device *ndev = NULL; 4139 4140 if (dev_addr->bound_dev_if) 4141 ndev = dev_get_by_index(&init_net, 4142 dev_addr->bound_dev_if); 4143 if (ndev) { 4144 cma_igmp_send(ndev, 4145 &mc->multicast.ib->rec.mgid, 4146 false); 4147 dev_put(ndev); 4148 } 4149 mc->igmp_joined = false; 4150 } 4151 kref_put(&mc->mcref, release_mc); 4152 } 4153 return; 4154 } 4155 } 4156 spin_unlock_irq(&id_priv->lock); 4157} 4158EXPORT_SYMBOL(rdma_leave_multicast); 4159 4160static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 4161{ 4162 struct rdma_dev_addr *dev_addr; 4163 struct cma_ndev_work *work; 4164 4165 dev_addr = &id_priv->id.route.addr.dev_addr; 4166 4167 if ((dev_addr->bound_dev_if == ndev->ifindex) && 4168 (net_eq(dev_net(ndev), dev_addr->net)) && 4169 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 4170 pr_info("RDMA CM addr change for ndev %s used by id %p\n", 4171 ndev->name, &id_priv->id); 4172 work = kzalloc(sizeof *work, GFP_KERNEL); 4173 if (!work) 4174 return -ENOMEM; 4175 4176 INIT_WORK(&work->work, cma_ndev_work_handler); 4177 work->id = id_priv; 4178 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 4179 atomic_inc(&id_priv->refcount); 4180 queue_work(cma_wq, &work->work); 4181 } 4182 4183 return 0; 4184} 4185 4186static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 4187 void *ptr) 4188{ 4189 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 4190 struct cma_device *cma_dev; 4191 struct rdma_id_private *id_priv; 4192 int ret = NOTIFY_DONE; 4193 4194 if (event != NETDEV_BONDING_FAILOVER) 4195 return NOTIFY_DONE; 4196 4197 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 4198 return NOTIFY_DONE; 4199 4200 mutex_lock(&lock); 4201 list_for_each_entry(cma_dev, &dev_list, list) 4202 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4203 ret = cma_netdev_change(ndev, id_priv); 4204 if (ret) 4205 goto out; 4206 } 4207 4208out: 4209 mutex_unlock(&lock); 4210 return ret; 4211} 4212 4213static struct notifier_block cma_nb = { 4214 .notifier_call = cma_netdev_callback 4215}; 4216 4217static void cma_add_one(struct ib_device *device) 4218{ 4219 struct cma_device *cma_dev; 4220 struct rdma_id_private *id_priv; 4221 unsigned int i; 4222 unsigned long supported_gids = 0; 4223 4224 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4225 if (!cma_dev) 4226 return; 4227 4228 cma_dev->device = device; 4229 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4230 sizeof(*cma_dev->default_gid_type), 4231 GFP_KERNEL); 4232 if (!cma_dev->default_gid_type) { 4233 kfree(cma_dev); 4234 return; 4235 } 4236 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4237 supported_gids = roce_gid_type_mask_support(device, i); 4238 WARN_ON(!supported_gids); 4239 cma_dev->default_gid_type[i - rdma_start_port(device)] = 4240 find_first_bit(&supported_gids, BITS_PER_LONG); 4241 } 4242 4243 init_completion(&cma_dev->comp); 4244 atomic_set(&cma_dev->refcount, 1); 4245 INIT_LIST_HEAD(&cma_dev->id_list); 4246 ib_set_client_data(device, &cma_client, cma_dev); 4247 4248 mutex_lock(&lock); 4249 list_add_tail(&cma_dev->list, &dev_list); 4250 list_for_each_entry(id_priv, &listen_any_list, list) 4251 cma_listen_on_dev(id_priv, cma_dev); 4252 mutex_unlock(&lock); 4253} 4254 4255static int cma_remove_id_dev(struct rdma_id_private *id_priv) 4256{ 4257 struct rdma_cm_event event; 4258 enum rdma_cm_state state; 4259 int ret = 0; 4260 4261 /* Record that we want to remove the device */ 4262 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 4263 if (state == RDMA_CM_DESTROYING) 4264 return 0; 4265 4266 cma_cancel_operation(id_priv, state); 4267 mutex_lock(&id_priv->handler_mutex); 4268 4269 /* Check for destruction from another callback. */ 4270 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 4271 goto out; 4272 4273 memset(&event, 0, sizeof event); 4274 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4275 ret = id_priv->id.event_handler(&id_priv->id, &event); 4276out: 4277 mutex_unlock(&id_priv->handler_mutex); 4278 return ret; 4279} 4280 4281static void cma_process_remove(struct cma_device *cma_dev) 4282{ 4283 struct rdma_id_private *id_priv; 4284 int ret; 4285 4286 mutex_lock(&lock); 4287 while (!list_empty(&cma_dev->id_list)) { 4288 id_priv = list_entry(cma_dev->id_list.next, 4289 struct rdma_id_private, list); 4290 4291 list_del(&id_priv->listen_list); 4292 list_del_init(&id_priv->list); 4293 atomic_inc(&id_priv->refcount); 4294 mutex_unlock(&lock); 4295 4296 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 4297 cma_deref_id(id_priv); 4298 if (ret) 4299 rdma_destroy_id(&id_priv->id); 4300 4301 mutex_lock(&lock); 4302 } 4303 mutex_unlock(&lock); 4304 4305 cma_deref_dev(cma_dev); 4306 wait_for_completion(&cma_dev->comp); 4307} 4308 4309static void cma_remove_one(struct ib_device *device, void *client_data) 4310{ 4311 struct cma_device *cma_dev = client_data; 4312 4313 if (!cma_dev) 4314 return; 4315 4316 mutex_lock(&lock); 4317 list_del(&cma_dev->list); 4318 mutex_unlock(&lock); 4319 4320 cma_process_remove(cma_dev); 4321 kfree(cma_dev->default_gid_type); 4322 kfree(cma_dev); 4323} 4324 4325static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) 4326{ 4327 struct nlmsghdr *nlh; 4328 struct rdma_cm_id_stats *id_stats; 4329 struct rdma_id_private *id_priv; 4330 struct rdma_cm_id *id = NULL; 4331 struct cma_device *cma_dev; 4332 int i_dev = 0, i_id = 0; 4333 4334 /* 4335 * We export all of the IDs as a sequence of messages. Each 4336 * ID gets its own netlink message. 4337 */ 4338 mutex_lock(&lock); 4339 4340 list_for_each_entry(cma_dev, &dev_list, list) { 4341 if (i_dev < cb->args[0]) { 4342 i_dev++; 4343 continue; 4344 } 4345 4346 i_id = 0; 4347 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4348 if (i_id < cb->args[1]) { 4349 i_id++; 4350 continue; 4351 } 4352 4353 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, 4354 sizeof *id_stats, RDMA_NL_RDMA_CM, 4355 RDMA_NL_RDMA_CM_ID_STATS, 4356 NLM_F_MULTI); 4357 if (!id_stats) 4358 goto out; 4359 4360 memset(id_stats, 0, sizeof *id_stats); 4361 id = &id_priv->id; 4362 id_stats->node_type = id->route.addr.dev_addr.dev_type; 4363 id_stats->port_num = id->port_num; 4364 id_stats->bound_dev_if = 4365 id->route.addr.dev_addr.bound_dev_if; 4366 4367 if (ibnl_put_attr(skb, nlh, 4368 rdma_addr_size(cma_src_addr(id_priv)), 4369 cma_src_addr(id_priv), 4370 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) 4371 goto out; 4372 if (ibnl_put_attr(skb, nlh, 4373 rdma_addr_size(cma_src_addr(id_priv)), 4374 cma_dst_addr(id_priv), 4375 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) 4376 goto out; 4377 4378 id_stats->pid = id_priv->owner; 4379 id_stats->port_space = id->ps; 4380 id_stats->cm_state = id_priv->state; 4381 id_stats->qp_num = id_priv->qp_num; 4382 id_stats->qp_type = id->qp_type; 4383 4384 i_id++; 4385 } 4386 4387 cb->args[1] = 0; 4388 i_dev++; 4389 } 4390 4391out: 4392 mutex_unlock(&lock); 4393 cb->args[0] = i_dev; 4394 cb->args[1] = i_id; 4395 4396 return skb->len; 4397} 4398 4399static const struct ibnl_client_cbs cma_cb_table[] = { 4400 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, 4401 .module = THIS_MODULE }, 4402}; 4403 4404static int cma_init_net(struct net *net) 4405{ 4406 struct cma_pernet *pernet = cma_pernet(net); 4407 4408 idr_init(&pernet->tcp_ps); 4409 idr_init(&pernet->udp_ps); 4410 idr_init(&pernet->ipoib_ps); 4411 idr_init(&pernet->ib_ps); 4412 4413 return 0; 4414} 4415 4416static void cma_exit_net(struct net *net) 4417{ 4418 struct cma_pernet *pernet = cma_pernet(net); 4419 4420 idr_destroy(&pernet->tcp_ps); 4421 idr_destroy(&pernet->udp_ps); 4422 idr_destroy(&pernet->ipoib_ps); 4423 idr_destroy(&pernet->ib_ps); 4424} 4425 4426static struct pernet_operations cma_pernet_operations = { 4427 .init = cma_init_net, 4428 .exit = cma_exit_net, 4429 .id = &cma_pernet_id, 4430 .size = sizeof(struct cma_pernet), 4431}; 4432 4433static int __init cma_init(void) 4434{ 4435 int ret; 4436 4437 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 4438 if (!cma_wq) 4439 return -ENOMEM; 4440 4441 ret = register_pernet_subsys(&cma_pernet_operations); 4442 if (ret) 4443 goto err_wq; 4444 4445 ib_sa_register_client(&sa_client); 4446 rdma_addr_register_client(&addr_client); 4447 register_netdevice_notifier(&cma_nb); 4448 4449 ret = ib_register_client(&cma_client); 4450 if (ret) 4451 goto err; 4452 4453 if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table), 4454 cma_cb_table)) 4455 pr_warn("RDMA CMA: failed to add netlink callback\n"); 4456 cma_configfs_init(); 4457 4458 return 0; 4459 4460err: 4461 unregister_netdevice_notifier(&cma_nb); 4462 rdma_addr_unregister_client(&addr_client); 4463 ib_sa_unregister_client(&sa_client); 4464err_wq: 4465 destroy_workqueue(cma_wq); 4466 return ret; 4467} 4468 4469static void __exit cma_cleanup(void) 4470{ 4471 cma_configfs_exit(); 4472 ibnl_remove_client(RDMA_NL_RDMA_CM); 4473 ib_unregister_client(&cma_client); 4474 unregister_netdevice_notifier(&cma_nb); 4475 rdma_addr_unregister_client(&addr_client); 4476 ib_sa_unregister_client(&sa_client); 4477 unregister_pernet_subsys(&cma_pernet_operations); 4478 destroy_workqueue(cma_wq); 4479} 4480 4481module_init(cma_init); 4482module_exit(cma_cleanup);