Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.13 2010 lines 52 kB view raw
1/* 2 * NVMe over Fabrics RDMA host code. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15#include <linux/module.h> 16#include <linux/init.h> 17#include <linux/slab.h> 18#include <linux/err.h> 19#include <linux/string.h> 20#include <linux/atomic.h> 21#include <linux/blk-mq.h> 22#include <linux/types.h> 23#include <linux/list.h> 24#include <linux/mutex.h> 25#include <linux/scatterlist.h> 26#include <linux/nvme.h> 27#include <asm/unaligned.h> 28 29#include <rdma/ib_verbs.h> 30#include <rdma/rdma_cm.h> 31#include <linux/nvme-rdma.h> 32 33#include "nvme.h" 34#include "fabrics.h" 35 36 37#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */ 38 39#define NVME_RDMA_MAX_SEGMENT_SIZE 0xffffff /* 24-bit SGL field */ 40 41#define NVME_RDMA_MAX_SEGMENTS 256 42 43#define NVME_RDMA_MAX_INLINE_SEGMENTS 1 44 45/* 46 * We handle AEN commands ourselves and don't even let the 47 * block layer know about them. 48 */ 49#define NVME_RDMA_NR_AEN_COMMANDS 1 50#define NVME_RDMA_AQ_BLKMQ_DEPTH \ 51 (NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS) 52 53struct nvme_rdma_device { 54 struct ib_device *dev; 55 struct ib_pd *pd; 56 struct kref ref; 57 struct list_head entry; 58}; 59 60struct nvme_rdma_qe { 61 struct ib_cqe cqe; 62 void *data; 63 u64 dma; 64}; 65 66struct nvme_rdma_queue; 67struct nvme_rdma_request { 68 struct nvme_request req; 69 struct ib_mr *mr; 70 struct nvme_rdma_qe sqe; 71 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; 72 u32 num_sge; 73 int nents; 74 bool inline_data; 75 struct ib_reg_wr reg_wr; 76 struct ib_cqe reg_cqe; 77 struct nvme_rdma_queue *queue; 78 struct sg_table sg_table; 79 struct scatterlist first_sgl[]; 80}; 81 82enum nvme_rdma_queue_flags { 83 NVME_RDMA_Q_LIVE = 0, 84 NVME_RDMA_Q_DELETING = 1, 85}; 86 87struct nvme_rdma_queue { 88 struct nvme_rdma_qe *rsp_ring; 89 atomic_t sig_count; 90 int queue_size; 91 size_t cmnd_capsule_len; 92 struct nvme_rdma_ctrl *ctrl; 93 struct nvme_rdma_device *device; 94 struct ib_cq *ib_cq; 95 struct ib_qp *qp; 96 97 unsigned long flags; 98 struct rdma_cm_id *cm_id; 99 int cm_error; 100 struct completion cm_done; 101}; 102 103struct nvme_rdma_ctrl { 104 /* read only in the hot path */ 105 struct nvme_rdma_queue *queues; 106 107 /* other member variables */ 108 struct blk_mq_tag_set tag_set; 109 struct work_struct delete_work; 110 struct work_struct err_work; 111 112 struct nvme_rdma_qe async_event_sqe; 113 114 struct delayed_work reconnect_work; 115 116 struct list_head list; 117 118 struct blk_mq_tag_set admin_tag_set; 119 struct nvme_rdma_device *device; 120 121 u32 max_fr_pages; 122 123 struct sockaddr_storage addr; 124 struct sockaddr_storage src_addr; 125 126 struct nvme_ctrl ctrl; 127}; 128 129static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) 130{ 131 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); 132} 133 134static LIST_HEAD(device_list); 135static DEFINE_MUTEX(device_list_mutex); 136 137static LIST_HEAD(nvme_rdma_ctrl_list); 138static DEFINE_MUTEX(nvme_rdma_ctrl_mutex); 139 140/* 141 * Disabling this option makes small I/O goes faster, but is fundamentally 142 * unsafe. With it turned off we will have to register a global rkey that 143 * allows read and write access to all physical memory. 144 */ 145static bool register_always = true; 146module_param(register_always, bool, 0444); 147MODULE_PARM_DESC(register_always, 148 "Use memory registration even for contiguous memory regions"); 149 150static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 151 struct rdma_cm_event *event); 152static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 153 154/* XXX: really should move to a generic header sooner or later.. */ 155static inline void put_unaligned_le24(u32 val, u8 *p) 156{ 157 *p++ = val; 158 *p++ = val >> 8; 159 *p++ = val >> 16; 160} 161 162static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) 163{ 164 return queue - queue->ctrl->queues; 165} 166 167static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) 168{ 169 return queue->cmnd_capsule_len - sizeof(struct nvme_command); 170} 171 172static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 173 size_t capsule_size, enum dma_data_direction dir) 174{ 175 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); 176 kfree(qe->data); 177} 178 179static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 180 size_t capsule_size, enum dma_data_direction dir) 181{ 182 qe->data = kzalloc(capsule_size, GFP_KERNEL); 183 if (!qe->data) 184 return -ENOMEM; 185 186 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); 187 if (ib_dma_mapping_error(ibdev, qe->dma)) { 188 kfree(qe->data); 189 return -ENOMEM; 190 } 191 192 return 0; 193} 194 195static void nvme_rdma_free_ring(struct ib_device *ibdev, 196 struct nvme_rdma_qe *ring, size_t ib_queue_size, 197 size_t capsule_size, enum dma_data_direction dir) 198{ 199 int i; 200 201 for (i = 0; i < ib_queue_size; i++) 202 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir); 203 kfree(ring); 204} 205 206static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, 207 size_t ib_queue_size, size_t capsule_size, 208 enum dma_data_direction dir) 209{ 210 struct nvme_rdma_qe *ring; 211 int i; 212 213 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL); 214 if (!ring) 215 return NULL; 216 217 for (i = 0; i < ib_queue_size; i++) { 218 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) 219 goto out_free_ring; 220 } 221 222 return ring; 223 224out_free_ring: 225 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir); 226 return NULL; 227} 228 229static void nvme_rdma_qp_event(struct ib_event *event, void *context) 230{ 231 pr_debug("QP event %s (%d)\n", 232 ib_event_msg(event->event), event->event); 233 234} 235 236static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) 237{ 238 wait_for_completion_interruptible_timeout(&queue->cm_done, 239 msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1); 240 return queue->cm_error; 241} 242 243static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) 244{ 245 struct nvme_rdma_device *dev = queue->device; 246 struct ib_qp_init_attr init_attr; 247 int ret; 248 249 memset(&init_attr, 0, sizeof(init_attr)); 250 init_attr.event_handler = nvme_rdma_qp_event; 251 /* +1 for drain */ 252 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; 253 /* +1 for drain */ 254 init_attr.cap.max_recv_wr = queue->queue_size + 1; 255 init_attr.cap.max_recv_sge = 1; 256 init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS; 257 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 258 init_attr.qp_type = IB_QPT_RC; 259 init_attr.send_cq = queue->ib_cq; 260 init_attr.recv_cq = queue->ib_cq; 261 262 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); 263 264 queue->qp = queue->cm_id->qp; 265 return ret; 266} 267 268static int nvme_rdma_reinit_request(void *data, struct request *rq) 269{ 270 struct nvme_rdma_ctrl *ctrl = data; 271 struct nvme_rdma_device *dev = ctrl->device; 272 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 273 int ret = 0; 274 275 ib_dereg_mr(req->mr); 276 277 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG, 278 ctrl->max_fr_pages); 279 if (IS_ERR(req->mr)) { 280 ret = PTR_ERR(req->mr); 281 req->mr = NULL; 282 goto out; 283 } 284 285 req->mr->need_inval = false; 286 287out: 288 return ret; 289} 290 291static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, 292 struct request *rq, unsigned int hctx_idx) 293{ 294 struct nvme_rdma_ctrl *ctrl = set->driver_data; 295 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 296 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 297 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 298 struct nvme_rdma_device *dev = queue->device; 299 300 if (req->mr) 301 ib_dereg_mr(req->mr); 302 303 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), 304 DMA_TO_DEVICE); 305} 306 307static int nvme_rdma_init_request(struct blk_mq_tag_set *set, 308 struct request *rq, unsigned int hctx_idx, 309 unsigned int numa_node) 310{ 311 struct nvme_rdma_ctrl *ctrl = set->driver_data; 312 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 313 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 314 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 315 struct nvme_rdma_device *dev = queue->device; 316 struct ib_device *ibdev = dev->dev; 317 int ret; 318 319 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), 320 DMA_TO_DEVICE); 321 if (ret) 322 return ret; 323 324 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG, 325 ctrl->max_fr_pages); 326 if (IS_ERR(req->mr)) { 327 ret = PTR_ERR(req->mr); 328 goto out_free_qe; 329 } 330 331 req->queue = queue; 332 333 return 0; 334 335out_free_qe: 336 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), 337 DMA_TO_DEVICE); 338 return -ENOMEM; 339} 340 341static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 342 unsigned int hctx_idx) 343{ 344 struct nvme_rdma_ctrl *ctrl = data; 345 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; 346 347 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); 348 349 hctx->driver_data = queue; 350 return 0; 351} 352 353static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 354 unsigned int hctx_idx) 355{ 356 struct nvme_rdma_ctrl *ctrl = data; 357 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 358 359 BUG_ON(hctx_idx != 0); 360 361 hctx->driver_data = queue; 362 return 0; 363} 364 365static void nvme_rdma_free_dev(struct kref *ref) 366{ 367 struct nvme_rdma_device *ndev = 368 container_of(ref, struct nvme_rdma_device, ref); 369 370 mutex_lock(&device_list_mutex); 371 list_del(&ndev->entry); 372 mutex_unlock(&device_list_mutex); 373 374 ib_dealloc_pd(ndev->pd); 375 kfree(ndev); 376} 377 378static void nvme_rdma_dev_put(struct nvme_rdma_device *dev) 379{ 380 kref_put(&dev->ref, nvme_rdma_free_dev); 381} 382 383static int nvme_rdma_dev_get(struct nvme_rdma_device *dev) 384{ 385 return kref_get_unless_zero(&dev->ref); 386} 387 388static struct nvme_rdma_device * 389nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) 390{ 391 struct nvme_rdma_device *ndev; 392 393 mutex_lock(&device_list_mutex); 394 list_for_each_entry(ndev, &device_list, entry) { 395 if (ndev->dev->node_guid == cm_id->device->node_guid && 396 nvme_rdma_dev_get(ndev)) 397 goto out_unlock; 398 } 399 400 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 401 if (!ndev) 402 goto out_err; 403 404 ndev->dev = cm_id->device; 405 kref_init(&ndev->ref); 406 407 ndev->pd = ib_alloc_pd(ndev->dev, 408 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); 409 if (IS_ERR(ndev->pd)) 410 goto out_free_dev; 411 412 if (!(ndev->dev->attrs.device_cap_flags & 413 IB_DEVICE_MEM_MGT_EXTENSIONS)) { 414 dev_err(&ndev->dev->dev, 415 "Memory registrations not supported.\n"); 416 goto out_free_pd; 417 } 418 419 list_add(&ndev->entry, &device_list); 420out_unlock: 421 mutex_unlock(&device_list_mutex); 422 return ndev; 423 424out_free_pd: 425 ib_dealloc_pd(ndev->pd); 426out_free_dev: 427 kfree(ndev); 428out_err: 429 mutex_unlock(&device_list_mutex); 430 return NULL; 431} 432 433static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) 434{ 435 struct nvme_rdma_device *dev; 436 struct ib_device *ibdev; 437 438 dev = queue->device; 439 ibdev = dev->dev; 440 rdma_destroy_qp(queue->cm_id); 441 ib_free_cq(queue->ib_cq); 442 443 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 444 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 445 446 nvme_rdma_dev_put(dev); 447} 448 449static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) 450{ 451 struct ib_device *ibdev; 452 const int send_wr_factor = 3; /* MR, SEND, INV */ 453 const int cq_factor = send_wr_factor + 1; /* + RECV */ 454 int comp_vector, idx = nvme_rdma_queue_idx(queue); 455 int ret; 456 457 queue->device = nvme_rdma_find_get_device(queue->cm_id); 458 if (!queue->device) { 459 dev_err(queue->cm_id->device->dev.parent, 460 "no client data found!\n"); 461 return -ECONNREFUSED; 462 } 463 ibdev = queue->device->dev; 464 465 /* 466 * The admin queue is barely used once the controller is live, so don't 467 * bother to spread it out. 468 */ 469 if (idx == 0) 470 comp_vector = 0; 471 else 472 comp_vector = idx % ibdev->num_comp_vectors; 473 474 475 /* +1 for ib_stop_cq */ 476 queue->ib_cq = ib_alloc_cq(ibdev, queue, 477 cq_factor * queue->queue_size + 1, 478 comp_vector, IB_POLL_SOFTIRQ); 479 if (IS_ERR(queue->ib_cq)) { 480 ret = PTR_ERR(queue->ib_cq); 481 goto out_put_dev; 482 } 483 484 ret = nvme_rdma_create_qp(queue, send_wr_factor); 485 if (ret) 486 goto out_destroy_ib_cq; 487 488 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, 489 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 490 if (!queue->rsp_ring) { 491 ret = -ENOMEM; 492 goto out_destroy_qp; 493 } 494 495 return 0; 496 497out_destroy_qp: 498 ib_destroy_qp(queue->qp); 499out_destroy_ib_cq: 500 ib_free_cq(queue->ib_cq); 501out_put_dev: 502 nvme_rdma_dev_put(queue->device); 503 return ret; 504} 505 506static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, 507 int idx, size_t queue_size) 508{ 509 struct nvme_rdma_queue *queue; 510 struct sockaddr *src_addr = NULL; 511 int ret; 512 513 queue = &ctrl->queues[idx]; 514 queue->ctrl = ctrl; 515 init_completion(&queue->cm_done); 516 517 if (idx > 0) 518 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 519 else 520 queue->cmnd_capsule_len = sizeof(struct nvme_command); 521 522 queue->queue_size = queue_size; 523 atomic_set(&queue->sig_count, 0); 524 525 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, 526 RDMA_PS_TCP, IB_QPT_RC); 527 if (IS_ERR(queue->cm_id)) { 528 dev_info(ctrl->ctrl.device, 529 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); 530 return PTR_ERR(queue->cm_id); 531 } 532 533 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) 534 src_addr = (struct sockaddr *)&ctrl->src_addr; 535 536 queue->cm_error = -ETIMEDOUT; 537 ret = rdma_resolve_addr(queue->cm_id, src_addr, 538 (struct sockaddr *)&ctrl->addr, 539 NVME_RDMA_CONNECT_TIMEOUT_MS); 540 if (ret) { 541 dev_info(ctrl->ctrl.device, 542 "rdma_resolve_addr failed (%d).\n", ret); 543 goto out_destroy_cm_id; 544 } 545 546 ret = nvme_rdma_wait_for_cm(queue); 547 if (ret) { 548 dev_info(ctrl->ctrl.device, 549 "rdma_resolve_addr wait failed (%d).\n", ret); 550 goto out_destroy_cm_id; 551 } 552 553 clear_bit(NVME_RDMA_Q_DELETING, &queue->flags); 554 555 return 0; 556 557out_destroy_cm_id: 558 rdma_destroy_id(queue->cm_id); 559 return ret; 560} 561 562static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 563{ 564 rdma_disconnect(queue->cm_id); 565 ib_drain_qp(queue->qp); 566} 567 568static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) 569{ 570 nvme_rdma_destroy_queue_ib(queue); 571 rdma_destroy_id(queue->cm_id); 572} 573 574static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue) 575{ 576 if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) 577 return; 578 nvme_rdma_stop_queue(queue); 579 nvme_rdma_free_queue(queue); 580} 581 582static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) 583{ 584 int i; 585 586 for (i = 1; i < ctrl->ctrl.queue_count; i++) 587 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); 588} 589 590static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) 591{ 592 int i, ret = 0; 593 594 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 595 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 596 if (ret) { 597 dev_info(ctrl->ctrl.device, 598 "failed to connect i/o queue: %d\n", ret); 599 goto out_free_queues; 600 } 601 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 602 } 603 604 return 0; 605 606out_free_queues: 607 nvme_rdma_free_io_queues(ctrl); 608 return ret; 609} 610 611static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) 612{ 613 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 614 unsigned int nr_io_queues; 615 int i, ret; 616 617 nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 618 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 619 if (ret) 620 return ret; 621 622 ctrl->ctrl.queue_count = nr_io_queues + 1; 623 if (ctrl->ctrl.queue_count < 2) 624 return 0; 625 626 dev_info(ctrl->ctrl.device, 627 "creating %d I/O queues.\n", nr_io_queues); 628 629 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 630 ret = nvme_rdma_init_queue(ctrl, i, 631 ctrl->ctrl.opts->queue_size); 632 if (ret) { 633 dev_info(ctrl->ctrl.device, 634 "failed to initialize i/o queue: %d\n", ret); 635 goto out_free_queues; 636 } 637 } 638 639 return 0; 640 641out_free_queues: 642 for (i--; i >= 1; i--) 643 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); 644 645 return ret; 646} 647 648static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl) 649{ 650 nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, 651 sizeof(struct nvme_command), DMA_TO_DEVICE); 652 nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); 653 blk_cleanup_queue(ctrl->ctrl.admin_q); 654 blk_mq_free_tag_set(&ctrl->admin_tag_set); 655 nvme_rdma_dev_put(ctrl->device); 656} 657 658static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) 659{ 660 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 661 662 if (list_empty(&ctrl->list)) 663 goto free_ctrl; 664 665 mutex_lock(&nvme_rdma_ctrl_mutex); 666 list_del(&ctrl->list); 667 mutex_unlock(&nvme_rdma_ctrl_mutex); 668 669 kfree(ctrl->queues); 670 nvmf_free_options(nctrl->opts); 671free_ctrl: 672 kfree(ctrl); 673} 674 675static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 676{ 677 /* If we are resetting/deleting then do nothing */ 678 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { 679 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 680 ctrl->ctrl.state == NVME_CTRL_LIVE); 681 return; 682 } 683 684 if (nvmf_should_reconnect(&ctrl->ctrl)) { 685 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", 686 ctrl->ctrl.opts->reconnect_delay); 687 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, 688 ctrl->ctrl.opts->reconnect_delay * HZ); 689 } else { 690 dev_info(ctrl->ctrl.device, "Removing controller...\n"); 691 queue_work(nvme_wq, &ctrl->delete_work); 692 } 693} 694 695static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) 696{ 697 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), 698 struct nvme_rdma_ctrl, reconnect_work); 699 bool changed; 700 int ret; 701 702 ++ctrl->ctrl.nr_reconnects; 703 704 if (ctrl->ctrl.queue_count > 1) { 705 nvme_rdma_free_io_queues(ctrl); 706 707 ret = blk_mq_reinit_tagset(&ctrl->tag_set); 708 if (ret) 709 goto requeue; 710 } 711 712 nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); 713 714 ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set); 715 if (ret) 716 goto requeue; 717 718 ret = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); 719 if (ret) 720 goto requeue; 721 722 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 723 if (ret) 724 goto requeue; 725 726 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 727 728 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 729 if (ret) 730 goto requeue; 731 732 if (ctrl->ctrl.queue_count > 1) { 733 ret = nvme_rdma_init_io_queues(ctrl); 734 if (ret) 735 goto requeue; 736 737 ret = nvme_rdma_connect_io_queues(ctrl); 738 if (ret) 739 goto requeue; 740 741 blk_mq_update_nr_hw_queues(&ctrl->tag_set, 742 ctrl->ctrl.queue_count - 1); 743 } 744 745 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 746 WARN_ON_ONCE(!changed); 747 ctrl->ctrl.nr_reconnects = 0; 748 749 nvme_start_ctrl(&ctrl->ctrl); 750 751 dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); 752 753 return; 754 755requeue: 756 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 757 ctrl->ctrl.nr_reconnects); 758 nvme_rdma_reconnect_or_remove(ctrl); 759} 760 761static void nvme_rdma_error_recovery_work(struct work_struct *work) 762{ 763 struct nvme_rdma_ctrl *ctrl = container_of(work, 764 struct nvme_rdma_ctrl, err_work); 765 int i; 766 767 nvme_stop_ctrl(&ctrl->ctrl); 768 769 for (i = 0; i < ctrl->ctrl.queue_count; i++) 770 clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 771 772 if (ctrl->ctrl.queue_count > 1) 773 nvme_stop_queues(&ctrl->ctrl); 774 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 775 776 /* We must take care of fastfail/requeue all our inflight requests */ 777 if (ctrl->ctrl.queue_count > 1) 778 blk_mq_tagset_busy_iter(&ctrl->tag_set, 779 nvme_cancel_request, &ctrl->ctrl); 780 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 781 nvme_cancel_request, &ctrl->ctrl); 782 783 /* 784 * queues are not a live anymore, so restart the queues to fail fast 785 * new IO 786 */ 787 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 788 nvme_start_queues(&ctrl->ctrl); 789 790 nvme_rdma_reconnect_or_remove(ctrl); 791} 792 793static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) 794{ 795 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) 796 return; 797 798 queue_work(nvme_wq, &ctrl->err_work); 799} 800 801static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, 802 const char *op) 803{ 804 struct nvme_rdma_queue *queue = cq->cq_context; 805 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 806 807 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 808 dev_info(ctrl->ctrl.device, 809 "%s for CQE 0x%p failed with status %s (%d)\n", 810 op, wc->wr_cqe, 811 ib_wc_status_msg(wc->status), wc->status); 812 nvme_rdma_error_recovery(ctrl); 813} 814 815static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) 816{ 817 if (unlikely(wc->status != IB_WC_SUCCESS)) 818 nvme_rdma_wr_error(cq, wc, "MEMREG"); 819} 820 821static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) 822{ 823 if (unlikely(wc->status != IB_WC_SUCCESS)) 824 nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); 825} 826 827static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, 828 struct nvme_rdma_request *req) 829{ 830 struct ib_send_wr *bad_wr; 831 struct ib_send_wr wr = { 832 .opcode = IB_WR_LOCAL_INV, 833 .next = NULL, 834 .num_sge = 0, 835 .send_flags = 0, 836 .ex.invalidate_rkey = req->mr->rkey, 837 }; 838 839 req->reg_cqe.done = nvme_rdma_inv_rkey_done; 840 wr.wr_cqe = &req->reg_cqe; 841 842 return ib_post_send(queue->qp, &wr, &bad_wr); 843} 844 845static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, 846 struct request *rq) 847{ 848 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 849 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 850 struct nvme_rdma_device *dev = queue->device; 851 struct ib_device *ibdev = dev->dev; 852 int res; 853 854 if (!blk_rq_bytes(rq)) 855 return; 856 857 if (req->mr->need_inval) { 858 res = nvme_rdma_inv_rkey(queue, req); 859 if (res < 0) { 860 dev_err(ctrl->ctrl.device, 861 "Queueing INV WR for rkey %#x failed (%d)\n", 862 req->mr->rkey, res); 863 nvme_rdma_error_recovery(queue->ctrl); 864 } 865 } 866 867 ib_dma_unmap_sg(ibdev, req->sg_table.sgl, 868 req->nents, rq_data_dir(rq) == 869 WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 870 871 nvme_cleanup_cmd(rq); 872 sg_free_table_chained(&req->sg_table, true); 873} 874 875static int nvme_rdma_set_sg_null(struct nvme_command *c) 876{ 877 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 878 879 sg->addr = 0; 880 put_unaligned_le24(0, sg->length); 881 put_unaligned_le32(0, sg->key); 882 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 883 return 0; 884} 885 886static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, 887 struct nvme_rdma_request *req, struct nvme_command *c) 888{ 889 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 890 891 req->sge[1].addr = sg_dma_address(req->sg_table.sgl); 892 req->sge[1].length = sg_dma_len(req->sg_table.sgl); 893 req->sge[1].lkey = queue->device->pd->local_dma_lkey; 894 895 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); 896 sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl)); 897 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; 898 899 req->inline_data = true; 900 req->num_sge++; 901 return 0; 902} 903 904static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, 905 struct nvme_rdma_request *req, struct nvme_command *c) 906{ 907 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 908 909 sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl)); 910 put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length); 911 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); 912 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 913 return 0; 914} 915 916static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, 917 struct nvme_rdma_request *req, struct nvme_command *c, 918 int count) 919{ 920 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 921 int nr; 922 923 /* 924 * Align the MR to a 4K page size to match the ctrl page size and 925 * the block virtual boundary. 926 */ 927 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K); 928 if (nr < count) { 929 if (nr < 0) 930 return nr; 931 return -EINVAL; 932 } 933 934 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); 935 936 req->reg_cqe.done = nvme_rdma_memreg_done; 937 memset(&req->reg_wr, 0, sizeof(req->reg_wr)); 938 req->reg_wr.wr.opcode = IB_WR_REG_MR; 939 req->reg_wr.wr.wr_cqe = &req->reg_cqe; 940 req->reg_wr.wr.num_sge = 0; 941 req->reg_wr.mr = req->mr; 942 req->reg_wr.key = req->mr->rkey; 943 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | 944 IB_ACCESS_REMOTE_READ | 945 IB_ACCESS_REMOTE_WRITE; 946 947 req->mr->need_inval = true; 948 949 sg->addr = cpu_to_le64(req->mr->iova); 950 put_unaligned_le24(req->mr->length, sg->length); 951 put_unaligned_le32(req->mr->rkey, sg->key); 952 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | 953 NVME_SGL_FMT_INVALIDATE; 954 955 return 0; 956} 957 958static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, 959 struct request *rq, struct nvme_command *c) 960{ 961 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 962 struct nvme_rdma_device *dev = queue->device; 963 struct ib_device *ibdev = dev->dev; 964 int count, ret; 965 966 req->num_sge = 1; 967 req->inline_data = false; 968 req->mr->need_inval = false; 969 970 c->common.flags |= NVME_CMD_SGL_METABUF; 971 972 if (!blk_rq_bytes(rq)) 973 return nvme_rdma_set_sg_null(c); 974 975 req->sg_table.sgl = req->first_sgl; 976 ret = sg_alloc_table_chained(&req->sg_table, 977 blk_rq_nr_phys_segments(rq), req->sg_table.sgl); 978 if (ret) 979 return -ENOMEM; 980 981 req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl); 982 983 count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents, 984 rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 985 if (unlikely(count <= 0)) { 986 sg_free_table_chained(&req->sg_table, true); 987 return -EIO; 988 } 989 990 if (count == 1) { 991 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && 992 blk_rq_payload_bytes(rq) <= 993 nvme_rdma_inline_data_size(queue)) 994 return nvme_rdma_map_sg_inline(queue, req, c); 995 996 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) 997 return nvme_rdma_map_sg_single(queue, req, c); 998 } 999 1000 return nvme_rdma_map_sg_fr(queue, req, c, count); 1001} 1002 1003static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 1004{ 1005 if (unlikely(wc->status != IB_WC_SUCCESS)) 1006 nvme_rdma_wr_error(cq, wc, "SEND"); 1007} 1008 1009/* 1010 * We want to signal completion at least every queue depth/2. This returns the 1011 * largest power of two that is not above half of (queue size + 1) to optimize 1012 * (avoid divisions). 1013 */ 1014static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) 1015{ 1016 int limit = 1 << ilog2((queue->queue_size + 1) / 2); 1017 1018 return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0; 1019} 1020 1021static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, 1022 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, 1023 struct ib_send_wr *first, bool flush) 1024{ 1025 struct ib_send_wr wr, *bad_wr; 1026 int ret; 1027 1028 sge->addr = qe->dma; 1029 sge->length = sizeof(struct nvme_command), 1030 sge->lkey = queue->device->pd->local_dma_lkey; 1031 1032 qe->cqe.done = nvme_rdma_send_done; 1033 1034 wr.next = NULL; 1035 wr.wr_cqe = &qe->cqe; 1036 wr.sg_list = sge; 1037 wr.num_sge = num_sge; 1038 wr.opcode = IB_WR_SEND; 1039 wr.send_flags = 0; 1040 1041 /* 1042 * Unsignalled send completions are another giant desaster in the 1043 * IB Verbs spec: If we don't regularly post signalled sends 1044 * the send queue will fill up and only a QP reset will rescue us. 1045 * Would have been way to obvious to handle this in hardware or 1046 * at least the RDMA stack.. 1047 * 1048 * Always signal the flushes. The magic request used for the flush 1049 * sequencer is not allocated in our driver's tagset and it's 1050 * triggered to be freed by blk_cleanup_queue(). So we need to 1051 * always mark it as signaled to ensure that the "wr_cqe", which is 1052 * embedded in request's payload, is not freed when __ib_process_cq() 1053 * calls wr_cqe->done(). 1054 */ 1055 if (nvme_rdma_queue_sig_limit(queue) || flush) 1056 wr.send_flags |= IB_SEND_SIGNALED; 1057 1058 if (first) 1059 first->next = &wr; 1060 else 1061 first = &wr; 1062 1063 ret = ib_post_send(queue->qp, first, &bad_wr); 1064 if (ret) { 1065 dev_err(queue->ctrl->ctrl.device, 1066 "%s failed with error code %d\n", __func__, ret); 1067 } 1068 return ret; 1069} 1070 1071static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, 1072 struct nvme_rdma_qe *qe) 1073{ 1074 struct ib_recv_wr wr, *bad_wr; 1075 struct ib_sge list; 1076 int ret; 1077 1078 list.addr = qe->dma; 1079 list.length = sizeof(struct nvme_completion); 1080 list.lkey = queue->device->pd->local_dma_lkey; 1081 1082 qe->cqe.done = nvme_rdma_recv_done; 1083 1084 wr.next = NULL; 1085 wr.wr_cqe = &qe->cqe; 1086 wr.sg_list = &list; 1087 wr.num_sge = 1; 1088 1089 ret = ib_post_recv(queue->qp, &wr, &bad_wr); 1090 if (ret) { 1091 dev_err(queue->ctrl->ctrl.device, 1092 "%s failed with error code %d\n", __func__, ret); 1093 } 1094 return ret; 1095} 1096 1097static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) 1098{ 1099 u32 queue_idx = nvme_rdma_queue_idx(queue); 1100 1101 if (queue_idx == 0) 1102 return queue->ctrl->admin_tag_set.tags[queue_idx]; 1103 return queue->ctrl->tag_set.tags[queue_idx - 1]; 1104} 1105 1106static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) 1107{ 1108 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); 1109 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 1110 struct ib_device *dev = queue->device->dev; 1111 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; 1112 struct nvme_command *cmd = sqe->data; 1113 struct ib_sge sge; 1114 int ret; 1115 1116 if (WARN_ON_ONCE(aer_idx != 0)) 1117 return; 1118 1119 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); 1120 1121 memset(cmd, 0, sizeof(*cmd)); 1122 cmd->common.opcode = nvme_admin_async_event; 1123 cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH; 1124 cmd->common.flags |= NVME_CMD_SGL_METABUF; 1125 nvme_rdma_set_sg_null(cmd); 1126 1127 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), 1128 DMA_TO_DEVICE); 1129 1130 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false); 1131 WARN_ON_ONCE(ret); 1132} 1133 1134static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, 1135 struct nvme_completion *cqe, struct ib_wc *wc, int tag) 1136{ 1137 struct request *rq; 1138 struct nvme_rdma_request *req; 1139 int ret = 0; 1140 1141 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); 1142 if (!rq) { 1143 dev_err(queue->ctrl->ctrl.device, 1144 "tag 0x%x on QP %#x not found\n", 1145 cqe->command_id, queue->qp->qp_num); 1146 nvme_rdma_error_recovery(queue->ctrl); 1147 return ret; 1148 } 1149 req = blk_mq_rq_to_pdu(rq); 1150 1151 if (rq->tag == tag) 1152 ret = 1; 1153 1154 if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && 1155 wc->ex.invalidate_rkey == req->mr->rkey) 1156 req->mr->need_inval = false; 1157 1158 nvme_end_request(rq, cqe->status, cqe->result); 1159 return ret; 1160} 1161 1162static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag) 1163{ 1164 struct nvme_rdma_qe *qe = 1165 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); 1166 struct nvme_rdma_queue *queue = cq->cq_context; 1167 struct ib_device *ibdev = queue->device->dev; 1168 struct nvme_completion *cqe = qe->data; 1169 const size_t len = sizeof(struct nvme_completion); 1170 int ret = 0; 1171 1172 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1173 nvme_rdma_wr_error(cq, wc, "RECV"); 1174 return 0; 1175 } 1176 1177 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1178 /* 1179 * AEN requests are special as they don't time out and can 1180 * survive any kind of queue freeze and often don't respond to 1181 * aborts. We don't even bother to allocate a struct request 1182 * for them but rather special case them here. 1183 */ 1184 if (unlikely(nvme_rdma_queue_idx(queue) == 0 && 1185 cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH)) 1186 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1187 &cqe->result); 1188 else 1189 ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag); 1190 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1191 1192 nvme_rdma_post_recv(queue, qe); 1193 return ret; 1194} 1195 1196static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1197{ 1198 __nvme_rdma_recv_done(cq, wc, -1); 1199} 1200 1201static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) 1202{ 1203 int ret, i; 1204 1205 for (i = 0; i < queue->queue_size; i++) { 1206 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); 1207 if (ret) 1208 goto out_destroy_queue_ib; 1209 } 1210 1211 return 0; 1212 1213out_destroy_queue_ib: 1214 nvme_rdma_destroy_queue_ib(queue); 1215 return ret; 1216} 1217 1218static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, 1219 struct rdma_cm_event *ev) 1220{ 1221 struct rdma_cm_id *cm_id = queue->cm_id; 1222 int status = ev->status; 1223 const char *rej_msg; 1224 const struct nvme_rdma_cm_rej *rej_data; 1225 u8 rej_data_len; 1226 1227 rej_msg = rdma_reject_msg(cm_id, status); 1228 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); 1229 1230 if (rej_data && rej_data_len >= sizeof(u16)) { 1231 u16 sts = le16_to_cpu(rej_data->sts); 1232 1233 dev_err(queue->ctrl->ctrl.device, 1234 "Connect rejected: status %d (%s) nvme status %d (%s).\n", 1235 status, rej_msg, sts, nvme_rdma_cm_msg(sts)); 1236 } else { 1237 dev_err(queue->ctrl->ctrl.device, 1238 "Connect rejected: status %d (%s).\n", status, rej_msg); 1239 } 1240 1241 return -ECONNRESET; 1242} 1243 1244static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) 1245{ 1246 int ret; 1247 1248 ret = nvme_rdma_create_queue_ib(queue); 1249 if (ret) 1250 return ret; 1251 1252 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS); 1253 if (ret) { 1254 dev_err(queue->ctrl->ctrl.device, 1255 "rdma_resolve_route failed (%d).\n", 1256 queue->cm_error); 1257 goto out_destroy_queue; 1258 } 1259 1260 return 0; 1261 1262out_destroy_queue: 1263 nvme_rdma_destroy_queue_ib(queue); 1264 return ret; 1265} 1266 1267static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) 1268{ 1269 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1270 struct rdma_conn_param param = { }; 1271 struct nvme_rdma_cm_req priv = { }; 1272 int ret; 1273 1274 param.qp_num = queue->qp->qp_num; 1275 param.flow_control = 1; 1276 1277 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; 1278 /* maximum retry count */ 1279 param.retry_count = 7; 1280 param.rnr_retry_count = 7; 1281 param.private_data = &priv; 1282 param.private_data_len = sizeof(priv); 1283 1284 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1285 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); 1286 /* 1287 * set the admin queue depth to the minimum size 1288 * specified by the Fabrics standard. 1289 */ 1290 if (priv.qid == 0) { 1291 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH); 1292 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); 1293 } else { 1294 /* 1295 * current interpretation of the fabrics spec 1296 * is at minimum you make hrqsize sqsize+1, or a 1297 * 1's based representation of sqsize. 1298 */ 1299 priv.hrqsize = cpu_to_le16(queue->queue_size); 1300 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); 1301 } 1302 1303 ret = rdma_connect(queue->cm_id, &param); 1304 if (ret) { 1305 dev_err(ctrl->ctrl.device, 1306 "rdma_connect failed (%d).\n", ret); 1307 goto out_destroy_queue_ib; 1308 } 1309 1310 return 0; 1311 1312out_destroy_queue_ib: 1313 nvme_rdma_destroy_queue_ib(queue); 1314 return ret; 1315} 1316 1317static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 1318 struct rdma_cm_event *ev) 1319{ 1320 struct nvme_rdma_queue *queue = cm_id->context; 1321 int cm_error = 0; 1322 1323 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", 1324 rdma_event_msg(ev->event), ev->event, 1325 ev->status, cm_id); 1326 1327 switch (ev->event) { 1328 case RDMA_CM_EVENT_ADDR_RESOLVED: 1329 cm_error = nvme_rdma_addr_resolved(queue); 1330 break; 1331 case RDMA_CM_EVENT_ROUTE_RESOLVED: 1332 cm_error = nvme_rdma_route_resolved(queue); 1333 break; 1334 case RDMA_CM_EVENT_ESTABLISHED: 1335 queue->cm_error = nvme_rdma_conn_established(queue); 1336 /* complete cm_done regardless of success/failure */ 1337 complete(&queue->cm_done); 1338 return 0; 1339 case RDMA_CM_EVENT_REJECTED: 1340 nvme_rdma_destroy_queue_ib(queue); 1341 cm_error = nvme_rdma_conn_rejected(queue, ev); 1342 break; 1343 case RDMA_CM_EVENT_ROUTE_ERROR: 1344 case RDMA_CM_EVENT_CONNECT_ERROR: 1345 case RDMA_CM_EVENT_UNREACHABLE: 1346 nvme_rdma_destroy_queue_ib(queue); 1347 case RDMA_CM_EVENT_ADDR_ERROR: 1348 dev_dbg(queue->ctrl->ctrl.device, 1349 "CM error event %d\n", ev->event); 1350 cm_error = -ECONNRESET; 1351 break; 1352 case RDMA_CM_EVENT_DISCONNECTED: 1353 case RDMA_CM_EVENT_ADDR_CHANGE: 1354 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1355 dev_dbg(queue->ctrl->ctrl.device, 1356 "disconnect received - connection closed\n"); 1357 nvme_rdma_error_recovery(queue->ctrl); 1358 break; 1359 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1360 /* device removal is handled via the ib_client API */ 1361 break; 1362 default: 1363 dev_err(queue->ctrl->ctrl.device, 1364 "Unexpected RDMA CM event (%d)\n", ev->event); 1365 nvme_rdma_error_recovery(queue->ctrl); 1366 break; 1367 } 1368 1369 if (cm_error) { 1370 queue->cm_error = cm_error; 1371 complete(&queue->cm_done); 1372 } 1373 1374 return 0; 1375} 1376 1377static enum blk_eh_timer_return 1378nvme_rdma_timeout(struct request *rq, bool reserved) 1379{ 1380 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1381 1382 /* queue error recovery */ 1383 nvme_rdma_error_recovery(req->queue->ctrl); 1384 1385 /* fail with DNR on cmd timeout */ 1386 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1387 1388 return BLK_EH_HANDLED; 1389} 1390 1391/* 1392 * We cannot accept any other command until the Connect command has completed. 1393 */ 1394static inline blk_status_t 1395nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) 1396{ 1397 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1398 struct nvme_command *cmd = nvme_req(rq)->cmd; 1399 1400 if (!blk_rq_is_passthrough(rq) || 1401 cmd->common.opcode != nvme_fabrics_command || 1402 cmd->fabrics.fctype != nvme_fabrics_type_connect) { 1403 /* 1404 * reconnecting state means transport disruption, which 1405 * can take a long time and even might fail permanently, 1406 * so we can't let incoming I/O be requeued forever. 1407 * fail it fast to allow upper layers a chance to 1408 * failover. 1409 */ 1410 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) 1411 return BLK_STS_IOERR; 1412 return BLK_STS_RESOURCE; /* try again later */ 1413 } 1414 } 1415 1416 return 0; 1417} 1418 1419static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1420 const struct blk_mq_queue_data *bd) 1421{ 1422 struct nvme_ns *ns = hctx->queue->queuedata; 1423 struct nvme_rdma_queue *queue = hctx->driver_data; 1424 struct request *rq = bd->rq; 1425 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1426 struct nvme_rdma_qe *sqe = &req->sqe; 1427 struct nvme_command *c = sqe->data; 1428 bool flush = false; 1429 struct ib_device *dev; 1430 blk_status_t ret; 1431 int err; 1432 1433 WARN_ON_ONCE(rq->tag < 0); 1434 1435 ret = nvme_rdma_queue_is_ready(queue, rq); 1436 if (unlikely(ret)) 1437 return ret; 1438 1439 dev = queue->device->dev; 1440 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1441 sizeof(struct nvme_command), DMA_TO_DEVICE); 1442 1443 ret = nvme_setup_cmd(ns, rq, c); 1444 if (ret) 1445 return ret; 1446 1447 blk_mq_start_request(rq); 1448 1449 err = nvme_rdma_map_data(queue, rq, c); 1450 if (err < 0) { 1451 dev_err(queue->ctrl->ctrl.device, 1452 "Failed to map data (%d)\n", err); 1453 nvme_cleanup_cmd(rq); 1454 goto err; 1455 } 1456 1457 ib_dma_sync_single_for_device(dev, sqe->dma, 1458 sizeof(struct nvme_command), DMA_TO_DEVICE); 1459 1460 if (req_op(rq) == REQ_OP_FLUSH) 1461 flush = true; 1462 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1463 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); 1464 if (err) { 1465 nvme_rdma_unmap_data(queue, rq); 1466 goto err; 1467 } 1468 1469 return BLK_STS_OK; 1470err: 1471 if (err == -ENOMEM || err == -EAGAIN) 1472 return BLK_STS_RESOURCE; 1473 return BLK_STS_IOERR; 1474} 1475 1476static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) 1477{ 1478 struct nvme_rdma_queue *queue = hctx->driver_data; 1479 struct ib_cq *cq = queue->ib_cq; 1480 struct ib_wc wc; 1481 int found = 0; 1482 1483 while (ib_poll_cq(cq, 1, &wc) > 0) { 1484 struct ib_cqe *cqe = wc.wr_cqe; 1485 1486 if (cqe) { 1487 if (cqe->done == nvme_rdma_recv_done) 1488 found |= __nvme_rdma_recv_done(cq, &wc, tag); 1489 else 1490 cqe->done(cq, &wc); 1491 } 1492 } 1493 1494 return found; 1495} 1496 1497static void nvme_rdma_complete_rq(struct request *rq) 1498{ 1499 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1500 1501 nvme_rdma_unmap_data(req->queue, rq); 1502 nvme_complete_rq(rq); 1503} 1504 1505static const struct blk_mq_ops nvme_rdma_mq_ops = { 1506 .queue_rq = nvme_rdma_queue_rq, 1507 .complete = nvme_rdma_complete_rq, 1508 .init_request = nvme_rdma_init_request, 1509 .exit_request = nvme_rdma_exit_request, 1510 .reinit_request = nvme_rdma_reinit_request, 1511 .init_hctx = nvme_rdma_init_hctx, 1512 .poll = nvme_rdma_poll, 1513 .timeout = nvme_rdma_timeout, 1514}; 1515 1516static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { 1517 .queue_rq = nvme_rdma_queue_rq, 1518 .complete = nvme_rdma_complete_rq, 1519 .init_request = nvme_rdma_init_request, 1520 .exit_request = nvme_rdma_exit_request, 1521 .reinit_request = nvme_rdma_reinit_request, 1522 .init_hctx = nvme_rdma_init_admin_hctx, 1523 .timeout = nvme_rdma_timeout, 1524}; 1525 1526static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) 1527{ 1528 int error; 1529 1530 error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); 1531 if (error) 1532 return error; 1533 1534 ctrl->device = ctrl->queues[0].device; 1535 1536 /* 1537 * We need a reference on the device as long as the tag_set is alive, 1538 * as the MRs in the request structures need a valid ib_device. 1539 */ 1540 error = -EINVAL; 1541 if (!nvme_rdma_dev_get(ctrl->device)) 1542 goto out_free_queue; 1543 1544 ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS, 1545 ctrl->device->dev->attrs.max_fast_reg_page_list_len); 1546 1547 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 1548 ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops; 1549 ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH; 1550 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ 1551 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; 1552 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) + 1553 SG_CHUNK_SIZE * sizeof(struct scatterlist); 1554 ctrl->admin_tag_set.driver_data = ctrl; 1555 ctrl->admin_tag_set.nr_hw_queues = 1; 1556 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 1557 1558 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 1559 if (error) 1560 goto out_put_dev; 1561 1562 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 1563 if (IS_ERR(ctrl->ctrl.admin_q)) { 1564 error = PTR_ERR(ctrl->ctrl.admin_q); 1565 goto out_free_tagset; 1566 } 1567 1568 error = nvmf_connect_admin_queue(&ctrl->ctrl); 1569 if (error) 1570 goto out_cleanup_queue; 1571 1572 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 1573 1574 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, 1575 &ctrl->ctrl.cap); 1576 if (error) { 1577 dev_err(ctrl->ctrl.device, 1578 "prop_get NVME_REG_CAP failed\n"); 1579 goto out_cleanup_queue; 1580 } 1581 1582 ctrl->ctrl.sqsize = 1583 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); 1584 1585 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 1586 if (error) 1587 goto out_cleanup_queue; 1588 1589 ctrl->ctrl.max_hw_sectors = 1590 (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); 1591 1592 error = nvme_init_identify(&ctrl->ctrl); 1593 if (error) 1594 goto out_cleanup_queue; 1595 1596 error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, 1597 &ctrl->async_event_sqe, sizeof(struct nvme_command), 1598 DMA_TO_DEVICE); 1599 if (error) 1600 goto out_cleanup_queue; 1601 1602 return 0; 1603 1604out_cleanup_queue: 1605 blk_cleanup_queue(ctrl->ctrl.admin_q); 1606out_free_tagset: 1607 /* disconnect and drain the queue before freeing the tagset */ 1608 nvme_rdma_stop_queue(&ctrl->queues[0]); 1609 blk_mq_free_tag_set(&ctrl->admin_tag_set); 1610out_put_dev: 1611 nvme_rdma_dev_put(ctrl->device); 1612out_free_queue: 1613 nvme_rdma_free_queue(&ctrl->queues[0]); 1614 return error; 1615} 1616 1617static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) 1618{ 1619 cancel_work_sync(&ctrl->err_work); 1620 cancel_delayed_work_sync(&ctrl->reconnect_work); 1621 1622 if (ctrl->ctrl.queue_count > 1) { 1623 nvme_stop_queues(&ctrl->ctrl); 1624 blk_mq_tagset_busy_iter(&ctrl->tag_set, 1625 nvme_cancel_request, &ctrl->ctrl); 1626 nvme_rdma_free_io_queues(ctrl); 1627 } 1628 1629 if (test_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags)) 1630 nvme_shutdown_ctrl(&ctrl->ctrl); 1631 1632 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1633 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 1634 nvme_cancel_request, &ctrl->ctrl); 1635 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 1636 nvme_rdma_destroy_admin_queue(ctrl); 1637} 1638 1639static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) 1640{ 1641 nvme_stop_ctrl(&ctrl->ctrl); 1642 nvme_remove_namespaces(&ctrl->ctrl); 1643 if (shutdown) 1644 nvme_rdma_shutdown_ctrl(ctrl); 1645 1646 nvme_uninit_ctrl(&ctrl->ctrl); 1647 if (ctrl->ctrl.tagset) { 1648 blk_cleanup_queue(ctrl->ctrl.connect_q); 1649 blk_mq_free_tag_set(&ctrl->tag_set); 1650 nvme_rdma_dev_put(ctrl->device); 1651 } 1652 1653 nvme_put_ctrl(&ctrl->ctrl); 1654} 1655 1656static void nvme_rdma_del_ctrl_work(struct work_struct *work) 1657{ 1658 struct nvme_rdma_ctrl *ctrl = container_of(work, 1659 struct nvme_rdma_ctrl, delete_work); 1660 1661 __nvme_rdma_remove_ctrl(ctrl, true); 1662} 1663 1664static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) 1665{ 1666 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) 1667 return -EBUSY; 1668 1669 if (!queue_work(nvme_wq, &ctrl->delete_work)) 1670 return -EBUSY; 1671 1672 return 0; 1673} 1674 1675static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl) 1676{ 1677 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 1678 int ret = 0; 1679 1680 /* 1681 * Keep a reference until all work is flushed since 1682 * __nvme_rdma_del_ctrl can free the ctrl mem 1683 */ 1684 if (!kref_get_unless_zero(&ctrl->ctrl.kref)) 1685 return -EBUSY; 1686 ret = __nvme_rdma_del_ctrl(ctrl); 1687 if (!ret) 1688 flush_work(&ctrl->delete_work); 1689 nvme_put_ctrl(&ctrl->ctrl); 1690 return ret; 1691} 1692 1693static void nvme_rdma_remove_ctrl_work(struct work_struct *work) 1694{ 1695 struct nvme_rdma_ctrl *ctrl = container_of(work, 1696 struct nvme_rdma_ctrl, delete_work); 1697 1698 __nvme_rdma_remove_ctrl(ctrl, false); 1699} 1700 1701static void nvme_rdma_reset_ctrl_work(struct work_struct *work) 1702{ 1703 struct nvme_rdma_ctrl *ctrl = 1704 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); 1705 int ret; 1706 bool changed; 1707 1708 nvme_stop_ctrl(&ctrl->ctrl); 1709 nvme_rdma_shutdown_ctrl(ctrl); 1710 1711 ret = nvme_rdma_configure_admin_queue(ctrl); 1712 if (ret) { 1713 /* ctrl is already shutdown, just remove the ctrl */ 1714 INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work); 1715 goto del_dead_ctrl; 1716 } 1717 1718 if (ctrl->ctrl.queue_count > 1) { 1719 ret = blk_mq_reinit_tagset(&ctrl->tag_set); 1720 if (ret) 1721 goto del_dead_ctrl; 1722 1723 ret = nvme_rdma_init_io_queues(ctrl); 1724 if (ret) 1725 goto del_dead_ctrl; 1726 1727 ret = nvme_rdma_connect_io_queues(ctrl); 1728 if (ret) 1729 goto del_dead_ctrl; 1730 1731 blk_mq_update_nr_hw_queues(&ctrl->tag_set, 1732 ctrl->ctrl.queue_count - 1); 1733 } 1734 1735 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 1736 WARN_ON_ONCE(!changed); 1737 1738 nvme_start_ctrl(&ctrl->ctrl); 1739 1740 return; 1741 1742del_dead_ctrl: 1743 /* Deleting this dead controller... */ 1744 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 1745 WARN_ON(!queue_work(nvme_wq, &ctrl->delete_work)); 1746} 1747 1748static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 1749 .name = "rdma", 1750 .module = THIS_MODULE, 1751 .flags = NVME_F_FABRICS, 1752 .reg_read32 = nvmf_reg_read32, 1753 .reg_read64 = nvmf_reg_read64, 1754 .reg_write32 = nvmf_reg_write32, 1755 .free_ctrl = nvme_rdma_free_ctrl, 1756 .submit_async_event = nvme_rdma_submit_async_event, 1757 .delete_ctrl = nvme_rdma_del_ctrl, 1758 .get_address = nvmf_get_address, 1759}; 1760 1761static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) 1762{ 1763 int ret; 1764 1765 ret = nvme_rdma_init_io_queues(ctrl); 1766 if (ret) 1767 return ret; 1768 1769 /* 1770 * We need a reference on the device as long as the tag_set is alive, 1771 * as the MRs in the request structures need a valid ib_device. 1772 */ 1773 ret = -EINVAL; 1774 if (!nvme_rdma_dev_get(ctrl->device)) 1775 goto out_free_io_queues; 1776 1777 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 1778 ctrl->tag_set.ops = &nvme_rdma_mq_ops; 1779 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 1780 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 1781 ctrl->tag_set.numa_node = NUMA_NO_NODE; 1782 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1783 ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) + 1784 SG_CHUNK_SIZE * sizeof(struct scatterlist); 1785 ctrl->tag_set.driver_data = ctrl; 1786 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 1787 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 1788 1789 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 1790 if (ret) 1791 goto out_put_dev; 1792 ctrl->ctrl.tagset = &ctrl->tag_set; 1793 1794 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 1795 if (IS_ERR(ctrl->ctrl.connect_q)) { 1796 ret = PTR_ERR(ctrl->ctrl.connect_q); 1797 goto out_free_tag_set; 1798 } 1799 1800 ret = nvme_rdma_connect_io_queues(ctrl); 1801 if (ret) 1802 goto out_cleanup_connect_q; 1803 1804 return 0; 1805 1806out_cleanup_connect_q: 1807 blk_cleanup_queue(ctrl->ctrl.connect_q); 1808out_free_tag_set: 1809 blk_mq_free_tag_set(&ctrl->tag_set); 1810out_put_dev: 1811 nvme_rdma_dev_put(ctrl->device); 1812out_free_io_queues: 1813 nvme_rdma_free_io_queues(ctrl); 1814 return ret; 1815} 1816 1817static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, 1818 struct nvmf_ctrl_options *opts) 1819{ 1820 struct nvme_rdma_ctrl *ctrl; 1821 int ret; 1822 bool changed; 1823 char *port; 1824 1825 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1826 if (!ctrl) 1827 return ERR_PTR(-ENOMEM); 1828 ctrl->ctrl.opts = opts; 1829 INIT_LIST_HEAD(&ctrl->list); 1830 1831 if (opts->mask & NVMF_OPT_TRSVCID) 1832 port = opts->trsvcid; 1833 else 1834 port = __stringify(NVME_RDMA_IP_PORT); 1835 1836 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 1837 opts->traddr, port, &ctrl->addr); 1838 if (ret) { 1839 pr_err("malformed address passed: %s:%s\n", opts->traddr, port); 1840 goto out_free_ctrl; 1841 } 1842 1843 if (opts->mask & NVMF_OPT_HOST_TRADDR) { 1844 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 1845 opts->host_traddr, NULL, &ctrl->src_addr); 1846 if (ret) { 1847 pr_err("malformed src address passed: %s\n", 1848 opts->host_traddr); 1849 goto out_free_ctrl; 1850 } 1851 } 1852 1853 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, 1854 0 /* no quirks, we're perfect! */); 1855 if (ret) 1856 goto out_free_ctrl; 1857 1858 INIT_DELAYED_WORK(&ctrl->reconnect_work, 1859 nvme_rdma_reconnect_ctrl_work); 1860 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); 1861 INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work); 1862 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); 1863 1864 ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ 1865 ctrl->ctrl.sqsize = opts->queue_size - 1; 1866 ctrl->ctrl.kato = opts->kato; 1867 1868 ret = -ENOMEM; 1869 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 1870 GFP_KERNEL); 1871 if (!ctrl->queues) 1872 goto out_uninit_ctrl; 1873 1874 ret = nvme_rdma_configure_admin_queue(ctrl); 1875 if (ret) 1876 goto out_kfree_queues; 1877 1878 /* sanity check icdoff */ 1879 if (ctrl->ctrl.icdoff) { 1880 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); 1881 ret = -EINVAL; 1882 goto out_remove_admin_queue; 1883 } 1884 1885 /* sanity check keyed sgls */ 1886 if (!(ctrl->ctrl.sgls & (1 << 20))) { 1887 dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n"); 1888 ret = -EINVAL; 1889 goto out_remove_admin_queue; 1890 } 1891 1892 if (opts->queue_size > ctrl->ctrl.maxcmd) { 1893 /* warn if maxcmd is lower than queue_size */ 1894 dev_warn(ctrl->ctrl.device, 1895 "queue_size %zu > ctrl maxcmd %u, clamping down\n", 1896 opts->queue_size, ctrl->ctrl.maxcmd); 1897 opts->queue_size = ctrl->ctrl.maxcmd; 1898 } 1899 1900 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 1901 /* warn if sqsize is lower than queue_size */ 1902 dev_warn(ctrl->ctrl.device, 1903 "queue_size %zu > ctrl sqsize %u, clamping down\n", 1904 opts->queue_size, ctrl->ctrl.sqsize + 1); 1905 opts->queue_size = ctrl->ctrl.sqsize + 1; 1906 } 1907 1908 if (opts->nr_io_queues) { 1909 ret = nvme_rdma_create_io_queues(ctrl); 1910 if (ret) 1911 goto out_remove_admin_queue; 1912 } 1913 1914 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 1915 WARN_ON_ONCE(!changed); 1916 1917 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", 1918 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); 1919 1920 kref_get(&ctrl->ctrl.kref); 1921 1922 mutex_lock(&nvme_rdma_ctrl_mutex); 1923 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); 1924 mutex_unlock(&nvme_rdma_ctrl_mutex); 1925 1926 nvme_start_ctrl(&ctrl->ctrl); 1927 1928 return &ctrl->ctrl; 1929 1930out_remove_admin_queue: 1931 nvme_rdma_destroy_admin_queue(ctrl); 1932out_kfree_queues: 1933 kfree(ctrl->queues); 1934out_uninit_ctrl: 1935 nvme_uninit_ctrl(&ctrl->ctrl); 1936 nvme_put_ctrl(&ctrl->ctrl); 1937 if (ret > 0) 1938 ret = -EIO; 1939 return ERR_PTR(ret); 1940out_free_ctrl: 1941 kfree(ctrl); 1942 return ERR_PTR(ret); 1943} 1944 1945static struct nvmf_transport_ops nvme_rdma_transport = { 1946 .name = "rdma", 1947 .required_opts = NVMF_OPT_TRADDR, 1948 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | 1949 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO, 1950 .create_ctrl = nvme_rdma_create_ctrl, 1951}; 1952 1953static void nvme_rdma_add_one(struct ib_device *ib_device) 1954{ 1955} 1956 1957static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) 1958{ 1959 struct nvme_rdma_ctrl *ctrl; 1960 1961 /* Delete all controllers using this device */ 1962 mutex_lock(&nvme_rdma_ctrl_mutex); 1963 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { 1964 if (ctrl->device->dev != ib_device) 1965 continue; 1966 dev_info(ctrl->ctrl.device, 1967 "Removing ctrl: NQN \"%s\", addr %pISp\n", 1968 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); 1969 __nvme_rdma_del_ctrl(ctrl); 1970 } 1971 mutex_unlock(&nvme_rdma_ctrl_mutex); 1972 1973 flush_workqueue(nvme_wq); 1974} 1975 1976static struct ib_client nvme_rdma_ib_client = { 1977 .name = "nvme_rdma", 1978 .add = nvme_rdma_add_one, 1979 .remove = nvme_rdma_remove_one 1980}; 1981 1982static int __init nvme_rdma_init_module(void) 1983{ 1984 int ret; 1985 1986 ret = ib_register_client(&nvme_rdma_ib_client); 1987 if (ret) 1988 return ret; 1989 1990 ret = nvmf_register_transport(&nvme_rdma_transport); 1991 if (ret) 1992 goto err_unreg_client; 1993 1994 return 0; 1995 1996err_unreg_client: 1997 ib_unregister_client(&nvme_rdma_ib_client); 1998 return ret; 1999} 2000 2001static void __exit nvme_rdma_cleanup_module(void) 2002{ 2003 nvmf_unregister_transport(&nvme_rdma_transport); 2004 ib_unregister_client(&nvme_rdma_ib_client); 2005} 2006 2007module_init(nvme_rdma_init_module); 2008module_exit(nvme_rdma_cleanup_module); 2009 2010MODULE_LICENSE("GPL v2");