Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nvme-loop: handle cpu unplug when re-establishing the controller

If a cpu unplug event has occured, we need to take the minimum
of the provided nr_io_queues and the number of online cpus,
otherwise we won't be able to connect them as blk-mq mapping
won't dispatch to those queues.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>

+50 -38
+50 -38
drivers/nvme/target/loop.c
··· 223 223 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 224 224 struct nvme_loop_iod *iod, unsigned int queue_idx) 225 225 { 226 - BUG_ON(queue_idx >= ctrl->queue_count); 227 - 228 226 iod->req.cmd = &iod->cmd; 229 227 iod->req.rsp = &iod->rsp; 230 228 iod->queue = &ctrl->queues[queue_idx]; ··· 312 314 kfree(ctrl); 313 315 } 314 316 317 + static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) 318 + { 319 + int i; 320 + 321 + for (i = 1; i < ctrl->queue_count; i++) 322 + nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 323 + } 324 + 325 + static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) 326 + { 327 + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 328 + unsigned int nr_io_queues; 329 + int ret, i; 330 + 331 + nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 332 + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 333 + if (ret || !nr_io_queues) 334 + return ret; 335 + 336 + dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); 337 + 338 + for (i = 1; i <= nr_io_queues; i++) { 339 + ctrl->queues[i].ctrl = ctrl; 340 + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 341 + if (ret) 342 + goto out_destroy_queues; 343 + 344 + ctrl->queue_count++; 345 + } 346 + 347 + return 0; 348 + 349 + out_destroy_queues: 350 + nvme_loop_destroy_io_queues(ctrl); 351 + return ret; 352 + } 353 + 315 354 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 316 355 { 317 356 int error; ··· 420 385 421 386 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 422 387 { 423 - int i; 424 - 425 388 nvme_stop_keep_alive(&ctrl->ctrl); 426 389 427 390 if (ctrl->queue_count > 1) { 428 391 nvme_stop_queues(&ctrl->ctrl); 429 392 blk_mq_tagset_busy_iter(&ctrl->tag_set, 430 393 nvme_cancel_request, &ctrl->ctrl); 431 - 432 - for (i = 1; i < ctrl->queue_count; i++) 433 - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 394 + nvme_loop_destroy_io_queues(ctrl); 434 395 } 435 396 436 397 if (ctrl->ctrl.state == NVME_CTRL_LIVE) ··· 498 467 if (ret) 499 468 goto out_disable; 500 469 501 - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 502 - ctrl->queues[i].ctrl = ctrl; 503 - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 504 - if (ret) 505 - goto out_free_queues; 470 + ret = nvme_loop_init_io_queues(ctrl); 471 + if (ret) 472 + goto out_destroy_admin; 506 473 507 - ctrl->queue_count++; 508 - } 509 - 510 - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 474 + for (i = 1; i < ctrl->queue_count; i++) { 511 475 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 512 476 if (ret) 513 - goto out_free_queues; 477 + goto out_destroy_io; 514 478 } 515 479 516 480 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); ··· 518 492 519 493 return; 520 494 521 - out_free_queues: 522 - for (i = 1; i < ctrl->queue_count; i++) 523 - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 495 + out_destroy_io: 496 + nvme_loop_destroy_io_queues(ctrl); 497 + out_destroy_admin: 524 498 nvme_loop_destroy_admin_queue(ctrl); 525 499 out_disable: 526 500 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); ··· 559 533 560 534 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 561 535 { 562 - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 563 536 int ret, i; 564 537 565 - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); 566 - if (ret || !opts->nr_io_queues) 538 + ret = nvme_loop_init_io_queues(ctrl); 539 + if (ret) 567 540 return ret; 568 - 569 - dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", 570 - opts->nr_io_queues); 571 - 572 - for (i = 1; i <= opts->nr_io_queues; i++) { 573 - ctrl->queues[i].ctrl = ctrl; 574 - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 575 - if (ret) 576 - goto out_destroy_queues; 577 - 578 - ctrl->queue_count++; 579 - } 580 541 581 542 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 582 543 ctrl->tag_set.ops = &nvme_loop_mq_ops; ··· 588 575 goto out_free_tagset; 589 576 } 590 577 591 - for (i = 1; i <= opts->nr_io_queues; i++) { 578 + for (i = 1; i < ctrl->queue_count; i++) { 592 579 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 593 580 if (ret) 594 581 goto out_cleanup_connect_q; ··· 601 588 out_free_tagset: 602 589 blk_mq_free_tag_set(&ctrl->tag_set); 603 590 out_destroy_queues: 604 - for (i = 1; i < ctrl->queue_count; i++) 605 - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 591 + nvme_loop_destroy_io_queues(ctrl); 606 592 return ret; 607 593 } 608 594