Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics loopback device.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/scatterlist.h>
8#include <linux/blk-mq.h>
9#include <linux/nvme.h>
10#include <linux/module.h>
11#include <linux/parser.h>
12#include "nvmet.h"
13#include "../host/nvme.h"
14#include "../host/fabrics.h"
15
16#define NVME_LOOP_MAX_SEGMENTS 256
17
18struct nvme_loop_iod {
19 struct nvme_request nvme_req;
20 struct nvme_command cmd;
21 struct nvme_completion cqe;
22 struct nvmet_req req;
23 struct nvme_loop_queue *queue;
24 struct work_struct work;
25 struct sg_table sg_table;
26 struct scatterlist first_sgl[];
27};
28
29struct nvme_loop_ctrl {
30 struct nvme_loop_queue *queues;
31
32 struct blk_mq_tag_set admin_tag_set;
33
34 struct list_head list;
35 struct blk_mq_tag_set tag_set;
36 struct nvme_ctrl ctrl;
37
38 struct nvmet_port *port;
39
40 /* Must be last --ends in a flexible-array member. */
41 struct nvme_loop_iod async_event_iod;
42};
43
44static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
45{
46 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
47}
48
49enum nvme_loop_queue_flags {
50 NVME_LOOP_Q_LIVE = 0,
51};
52
53struct nvme_loop_queue {
54 struct nvmet_cq nvme_cq;
55 struct nvmet_sq nvme_sq;
56 struct nvme_loop_ctrl *ctrl;
57 unsigned long flags;
58};
59
60static LIST_HEAD(nvme_loop_ports);
61static DEFINE_MUTEX(nvme_loop_ports_mutex);
62
63static LIST_HEAD(nvme_loop_ctrl_list);
64static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
65
66static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
67static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
68
69static const struct nvmet_fabrics_ops nvme_loop_ops;
70
71static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
72{
73 return queue - queue->ctrl->queues;
74}
75
76static void nvme_loop_complete_rq(struct request *req)
77{
78 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
79
80 sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
81 nvme_complete_rq(req);
82}
83
84static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
85{
86 u32 queue_idx = nvme_loop_queue_idx(queue);
87
88 if (queue_idx == 0)
89 return queue->ctrl->admin_tag_set.tags[queue_idx];
90 return queue->ctrl->tag_set.tags[queue_idx - 1];
91}
92
93static void nvme_loop_queue_response(struct nvmet_req *req)
94{
95 struct nvme_loop_queue *queue =
96 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
97 struct nvme_completion *cqe = req->cqe;
98
99 /*
100 * AEN requests are special as they don't time out and can
101 * survive any kind of queue freeze and often don't respond to
102 * aborts. We don't even bother to allocate a struct request
103 * for them but rather special case them here.
104 */
105 if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
106 cqe->command_id))) {
107 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
108 &cqe->result);
109 } else {
110 struct request *rq;
111
112 rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
113 if (!rq) {
114 dev_err(queue->ctrl->ctrl.device,
115 "got bad command_id %#x on queue %d\n",
116 cqe->command_id, nvme_loop_queue_idx(queue));
117 return;
118 }
119
120 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
121 nvme_loop_complete_rq(rq);
122 }
123}
124
125static void nvme_loop_execute_work(struct work_struct *work)
126{
127 struct nvme_loop_iod *iod =
128 container_of(work, struct nvme_loop_iod, work);
129
130 iod->req.execute(&iod->req);
131}
132
133static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
134 const struct blk_mq_queue_data *bd)
135{
136 struct nvme_ns *ns = hctx->queue->queuedata;
137 struct nvme_loop_queue *queue = hctx->driver_data;
138 struct request *req = bd->rq;
139 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
140 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
141 blk_status_t ret;
142
143 if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
144 return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
145
146 ret = nvme_setup_cmd(ns, req);
147 if (ret)
148 return ret;
149
150 nvme_start_request(req);
151 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
152 iod->req.port = queue->ctrl->port;
153 if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops))
154 return BLK_STS_OK;
155
156 if (blk_rq_nr_phys_segments(req)) {
157 iod->sg_table.sgl = iod->first_sgl;
158 if (sg_alloc_table_chained(&iod->sg_table,
159 blk_rq_nr_phys_segments(req),
160 iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
161 nvme_cleanup_cmd(req);
162 return BLK_STS_RESOURCE;
163 }
164
165 iod->req.sg = iod->sg_table.sgl;
166 iod->req.sg_cnt = blk_rq_map_sg(req, iod->sg_table.sgl);
167 iod->req.transfer_len = blk_rq_payload_bytes(req);
168 }
169
170 queue_work(nvmet_wq, &iod->work);
171 return BLK_STS_OK;
172}
173
174static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
175{
176 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
177 struct nvme_loop_queue *queue = &ctrl->queues[0];
178 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
179
180 memset(&iod->cmd, 0, sizeof(iod->cmd));
181 iod->cmd.common.opcode = nvme_admin_async_event;
182 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
183 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
184
185 if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) {
186 dev_err(ctrl->ctrl.device, "failed async event work\n");
187 return;
188 }
189
190 queue_work(nvmet_wq, &iod->work);
191}
192
193static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
194 struct nvme_loop_iod *iod, unsigned int queue_idx)
195{
196 iod->req.cmd = &iod->cmd;
197 iod->req.cqe = &iod->cqe;
198 iod->queue = &ctrl->queues[queue_idx];
199 INIT_WORK(&iod->work, nvme_loop_execute_work);
200 return 0;
201}
202
203static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204 struct request *req, unsigned int hctx_idx,
205 unsigned int numa_node)
206{
207 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
208 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
209
210 nvme_req(req)->ctrl = &ctrl->ctrl;
211 nvme_req(req)->cmd = &iod->cmd;
212 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
214}
215
216static struct lock_class_key loop_hctx_fq_lock_key;
217
218static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
219 unsigned int hctx_idx)
220{
221 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
223
224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
225
226 /*
227 * flush_end_io() can be called recursively for us, so use our own
228 * lock class key for avoiding lockdep possible recursive locking,
229 * then we can remove the dynamically allocated lock class for each
230 * flush queue, that way may cause horrible boot delay.
231 */
232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
233
234 hctx->driver_data = queue;
235 return 0;
236}
237
238static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
239 unsigned int hctx_idx)
240{
241 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
242 struct nvme_loop_queue *queue = &ctrl->queues[0];
243
244 BUG_ON(hctx_idx != 0);
245
246 hctx->driver_data = queue;
247 return 0;
248}
249
250static const struct blk_mq_ops nvme_loop_mq_ops = {
251 .queue_rq = nvme_loop_queue_rq,
252 .complete = nvme_loop_complete_rq,
253 .init_request = nvme_loop_init_request,
254 .init_hctx = nvme_loop_init_hctx,
255};
256
257static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
258 .queue_rq = nvme_loop_queue_rq,
259 .complete = nvme_loop_complete_rq,
260 .init_request = nvme_loop_init_request,
261 .init_hctx = nvme_loop_init_admin_hctx,
262};
263
264static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
265{
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
267 return;
268 /*
269 * It's possible that some requests might have been added
270 * after admin queue is stopped/quiesced. So now start the
271 * queue to flush these requests to the completion.
272 */
273 nvme_unquiesce_admin_queue(&ctrl->ctrl);
274
275 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
276 nvmet_cq_put(&ctrl->queues[0].nvme_cq);
277 nvme_remove_admin_tag_set(&ctrl->ctrl);
278}
279
280static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
281{
282 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
283
284 if (list_empty(&ctrl->list))
285 goto free_ctrl;
286
287 mutex_lock(&nvme_loop_ctrl_mutex);
288 list_del(&ctrl->list);
289 mutex_unlock(&nvme_loop_ctrl_mutex);
290
291 if (nctrl->tagset)
292 nvme_remove_io_tag_set(nctrl);
293 kfree(ctrl->queues);
294 nvmf_free_options(nctrl->opts);
295free_ctrl:
296 kfree(ctrl);
297}
298
299static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
300{
301 int i;
302
303 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
304 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
305 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
306 nvmet_cq_put(&ctrl->queues[i].nvme_cq);
307 }
308 ctrl->ctrl.queue_count = 1;
309 /*
310 * It's possible that some requests might have been added
311 * after io queue is stopped/quiesced. So now start the
312 * queue to flush these requests to the completion.
313 */
314 nvme_unquiesce_io_queues(&ctrl->ctrl);
315}
316
317static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
318{
319 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
320 unsigned int nr_io_queues;
321 int ret, i;
322
323 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
324 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
325 if (ret || !nr_io_queues)
326 return ret;
327
328 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
329
330 for (i = 1; i <= nr_io_queues; i++) {
331 ctrl->queues[i].ctrl = ctrl;
332 nvmet_cq_init(&ctrl->queues[i].nvme_cq);
333 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq,
334 &ctrl->queues[i].nvme_cq);
335 if (ret) {
336 nvmet_cq_put(&ctrl->queues[i].nvme_cq);
337 goto out_destroy_queues;
338 }
339
340 ctrl->ctrl.queue_count++;
341 }
342
343 return 0;
344
345out_destroy_queues:
346 nvme_loop_destroy_io_queues(ctrl);
347 return ret;
348}
349
350static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
351{
352 int i, ret;
353
354 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
355 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
356 if (ret)
357 return ret;
358 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
359 }
360
361 return 0;
362}
363
364static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
365{
366 int error;
367
368 ctrl->queues[0].ctrl = ctrl;
369 nvmet_cq_init(&ctrl->queues[0].nvme_cq);
370 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq,
371 &ctrl->queues[0].nvme_cq);
372 if (error) {
373 nvmet_cq_put(&ctrl->queues[0].nvme_cq);
374 return error;
375 }
376 ctrl->ctrl.queue_count = 1;
377
378 error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
379 &nvme_loop_admin_mq_ops,
380 sizeof(struct nvme_loop_iod) +
381 NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
382 if (error)
383 goto out_free_sq;
384
385 /* reset stopped state for the fresh admin queue */
386 clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
387
388 error = nvmf_connect_admin_queue(&ctrl->ctrl);
389 if (error)
390 goto out_cleanup_tagset;
391
392 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
393
394 error = nvme_enable_ctrl(&ctrl->ctrl);
395 if (error)
396 goto out_cleanup_tagset;
397
398 ctrl->ctrl.max_hw_sectors =
399 (NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT;
400
401 nvme_unquiesce_admin_queue(&ctrl->ctrl);
402
403 error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
404 if (error)
405 goto out_cleanup_tagset;
406
407 return 0;
408
409out_cleanup_tagset:
410 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
411 nvme_remove_admin_tag_set(&ctrl->ctrl);
412out_free_sq:
413 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
414 nvmet_cq_put(&ctrl->queues[0].nvme_cq);
415 return error;
416}
417
418static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
419{
420 if (ctrl->ctrl.queue_count > 1) {
421 nvme_quiesce_io_queues(&ctrl->ctrl);
422 nvme_cancel_tagset(&ctrl->ctrl);
423 nvme_loop_destroy_io_queues(ctrl);
424 }
425
426 nvme_quiesce_admin_queue(&ctrl->ctrl);
427 if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
428 nvme_disable_ctrl(&ctrl->ctrl, true);
429
430 nvme_cancel_admin_tagset(&ctrl->ctrl);
431 nvme_loop_destroy_admin_queue(ctrl);
432}
433
434static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
435{
436 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
437}
438
439static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
440{
441 struct nvme_loop_ctrl *ctrl;
442
443 mutex_lock(&nvme_loop_ctrl_mutex);
444 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
445 if (ctrl->ctrl.cntlid == nctrl->cntlid)
446 nvme_delete_ctrl(&ctrl->ctrl);
447 }
448 mutex_unlock(&nvme_loop_ctrl_mutex);
449}
450
451static void nvme_loop_reset_ctrl_work(struct work_struct *work)
452{
453 struct nvme_loop_ctrl *ctrl =
454 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
455 int ret;
456
457 nvme_stop_ctrl(&ctrl->ctrl);
458 nvme_loop_shutdown_ctrl(ctrl);
459
460 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
461 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
462
463 if (state != NVME_CTRL_DELETING &&
464 state != NVME_CTRL_DELETING_NOIO)
465 /* state change failure for non-deleted ctrl? */
466 WARN_ON_ONCE(1);
467 return;
468 }
469
470 ret = nvme_loop_configure_admin_queue(ctrl);
471 if (ret)
472 goto out_disable;
473
474 ret = nvme_loop_init_io_queues(ctrl);
475 if (ret)
476 goto out_destroy_admin;
477
478 ret = nvme_loop_connect_io_queues(ctrl);
479 if (ret)
480 goto out_destroy_io;
481
482 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
483 ctrl->ctrl.queue_count - 1);
484
485 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
486 WARN_ON_ONCE(1);
487
488 nvme_start_ctrl(&ctrl->ctrl);
489
490 return;
491
492out_destroy_io:
493 nvme_loop_destroy_io_queues(ctrl);
494out_destroy_admin:
495 nvme_quiesce_admin_queue(&ctrl->ctrl);
496 nvme_cancel_admin_tagset(&ctrl->ctrl);
497 nvme_loop_destroy_admin_queue(ctrl);
498out_disable:
499 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
500 nvme_uninit_ctrl(&ctrl->ctrl);
501}
502
503static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
504 .name = "loop",
505 .module = THIS_MODULE,
506 .flags = NVME_F_FABRICS,
507 .reg_read32 = nvmf_reg_read32,
508 .reg_read64 = nvmf_reg_read64,
509 .reg_write32 = nvmf_reg_write32,
510 .free_ctrl = nvme_loop_free_ctrl,
511 .submit_async_event = nvme_loop_submit_async_event,
512 .delete_ctrl = nvme_loop_delete_ctrl_host,
513 .get_address = nvmf_get_address,
514 .get_virt_boundary = nvme_get_virt_boundary,
515};
516
517static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
518{
519 int ret;
520
521 ret = nvme_loop_init_io_queues(ctrl);
522 if (ret)
523 return ret;
524
525 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
526 &nvme_loop_mq_ops, 1,
527 sizeof(struct nvme_loop_iod) +
528 NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
529 if (ret)
530 goto out_destroy_queues;
531
532 ret = nvme_loop_connect_io_queues(ctrl);
533 if (ret)
534 goto out_cleanup_tagset;
535
536 return 0;
537
538out_cleanup_tagset:
539 nvme_remove_io_tag_set(&ctrl->ctrl);
540out_destroy_queues:
541 nvme_loop_destroy_io_queues(ctrl);
542 return ret;
543}
544
545static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
546{
547 struct nvmet_port *p, *found = NULL;
548
549 mutex_lock(&nvme_loop_ports_mutex);
550 list_for_each_entry(p, &nvme_loop_ports, entry) {
551 /* if no transport address is specified use the first port */
552 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
553 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
554 continue;
555 found = p;
556 break;
557 }
558 mutex_unlock(&nvme_loop_ports_mutex);
559 return found;
560}
561
562static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
563 struct nvmf_ctrl_options *opts)
564{
565 struct nvme_loop_ctrl *ctrl;
566 int ret;
567
568 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
569 if (!ctrl)
570 return ERR_PTR(-ENOMEM);
571 ctrl->ctrl.opts = opts;
572 INIT_LIST_HEAD(&ctrl->list);
573
574 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
575
576 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
577 0 /* no quirks, we're perfect! */);
578 if (ret) {
579 kfree(ctrl);
580 goto out;
581 }
582
583 ret = nvme_add_ctrl(&ctrl->ctrl);
584 if (ret)
585 goto out_put_ctrl;
586
587 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
588 WARN_ON_ONCE(1);
589
590 ret = -ENOMEM;
591
592 ctrl->ctrl.kato = opts->kato;
593 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
594
595 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
596 GFP_KERNEL);
597 if (!ctrl->queues)
598 goto out_uninit_ctrl;
599
600 ret = nvme_loop_configure_admin_queue(ctrl);
601 if (ret)
602 goto out_free_queues;
603
604 if (opts->queue_size > ctrl->ctrl.maxcmd) {
605 /* warn if maxcmd is lower than queue_size */
606 dev_warn(ctrl->ctrl.device,
607 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
608 opts->queue_size, ctrl->ctrl.maxcmd);
609 opts->queue_size = ctrl->ctrl.maxcmd;
610 }
611 ctrl->ctrl.sqsize = opts->queue_size - 1;
612
613 if (opts->nr_io_queues) {
614 ret = nvme_loop_create_io_queues(ctrl);
615 if (ret)
616 goto out_remove_admin_queue;
617 }
618
619 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
620
621 dev_info(ctrl->ctrl.device,
622 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
623
624 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
625 WARN_ON_ONCE(1);
626
627 mutex_lock(&nvme_loop_ctrl_mutex);
628 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
629 mutex_unlock(&nvme_loop_ctrl_mutex);
630
631 nvme_start_ctrl(&ctrl->ctrl);
632
633 return &ctrl->ctrl;
634
635out_remove_admin_queue:
636 nvme_quiesce_admin_queue(&ctrl->ctrl);
637 nvme_cancel_admin_tagset(&ctrl->ctrl);
638 nvme_loop_destroy_admin_queue(ctrl);
639out_free_queues:
640 kfree(ctrl->queues);
641out_uninit_ctrl:
642 nvme_uninit_ctrl(&ctrl->ctrl);
643out_put_ctrl:
644 nvme_put_ctrl(&ctrl->ctrl);
645out:
646 if (ret > 0)
647 ret = -EIO;
648 return ERR_PTR(ret);
649}
650
651static int nvme_loop_add_port(struct nvmet_port *port)
652{
653 mutex_lock(&nvme_loop_ports_mutex);
654 list_add_tail(&port->entry, &nvme_loop_ports);
655 mutex_unlock(&nvme_loop_ports_mutex);
656 return 0;
657}
658
659static void nvme_loop_remove_port(struct nvmet_port *port)
660{
661 mutex_lock(&nvme_loop_ports_mutex);
662 list_del_init(&port->entry);
663 mutex_unlock(&nvme_loop_ports_mutex);
664
665 /*
666 * Ensure any ctrls that are in the process of being
667 * deleted are in fact deleted before we return
668 * and free the port. This is to prevent active
669 * ctrls from using a port after it's freed.
670 */
671 flush_workqueue(nvme_delete_wq);
672}
673
674static const struct nvmet_fabrics_ops nvme_loop_ops = {
675 .owner = THIS_MODULE,
676 .type = NVMF_TRTYPE_LOOP,
677 .add_port = nvme_loop_add_port,
678 .remove_port = nvme_loop_remove_port,
679 .queue_response = nvme_loop_queue_response,
680 .delete_ctrl = nvme_loop_delete_ctrl,
681};
682
683static struct nvmf_transport_ops nvme_loop_transport = {
684 .name = "loop",
685 .module = THIS_MODULE,
686 .create_ctrl = nvme_loop_create_ctrl,
687 .allowed_opts = NVMF_OPT_TRADDR,
688};
689
690static int __init nvme_loop_init_module(void)
691{
692 int ret;
693
694 ret = nvmet_register_transport(&nvme_loop_ops);
695 if (ret)
696 return ret;
697
698 ret = nvmf_register_transport(&nvme_loop_transport);
699 if (ret)
700 nvmet_unregister_transport(&nvme_loop_ops);
701
702 return ret;
703}
704
705static void __exit nvme_loop_cleanup_module(void)
706{
707 struct nvme_loop_ctrl *ctrl, *next;
708
709 nvmf_unregister_transport(&nvme_loop_transport);
710 nvmet_unregister_transport(&nvme_loop_ops);
711
712 mutex_lock(&nvme_loop_ctrl_mutex);
713 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
714 nvme_delete_ctrl(&ctrl->ctrl);
715 mutex_unlock(&nvme_loop_ctrl_mutex);
716
717 flush_workqueue(nvme_delete_wq);
718}
719
720module_init(nvme_loop_init_module);
721module_exit(nvme_loop_cleanup_module);
722
723MODULE_DESCRIPTION("NVMe target loop transport driver");
724MODULE_LICENSE("GPL v2");
725MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */