Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics common host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/init.h>
8#include <linux/miscdevice.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/parser.h>
12#include <linux/seq_file.h>
13#include "nvme.h"
14#include "fabrics.h"
15#include <linux/nvme-keyring.h>
16
17static LIST_HEAD(nvmf_transports);
18static DECLARE_RWSEM(nvmf_transports_rwsem);
19
20static LIST_HEAD(nvmf_hosts);
21static DEFINE_MUTEX(nvmf_hosts_mutex);
22
23static struct nvmf_host *nvmf_default_host;
24
25static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id)
26{
27 struct nvmf_host *host;
28
29 host = kmalloc(sizeof(*host), GFP_KERNEL);
30 if (!host)
31 return NULL;
32
33 kref_init(&host->ref);
34 uuid_copy(&host->id, id);
35 strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
36
37 return host;
38}
39
40static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id)
41{
42 struct nvmf_host *host;
43
44 mutex_lock(&nvmf_hosts_mutex);
45
46 /*
47 * We have defined a host as how it is perceived by the target.
48 * Therefore, we don't allow different Host NQNs with the same Host ID.
49 * Similarly, we do not allow the usage of the same Host NQN with
50 * different Host IDs. This'll maintain unambiguous host identification.
51 */
52 list_for_each_entry(host, &nvmf_hosts, list) {
53 bool same_hostnqn = !strcmp(host->nqn, hostnqn);
54 bool same_hostid = uuid_equal(&host->id, id);
55
56 if (same_hostnqn && same_hostid) {
57 kref_get(&host->ref);
58 goto out_unlock;
59 }
60 if (same_hostnqn) {
61 pr_err("found same hostnqn %s but different hostid %pUb\n",
62 hostnqn, id);
63 host = ERR_PTR(-EINVAL);
64 goto out_unlock;
65 }
66 if (same_hostid) {
67 pr_err("found same hostid %pUb but different hostnqn %s\n",
68 id, hostnqn);
69 host = ERR_PTR(-EINVAL);
70 goto out_unlock;
71 }
72 }
73
74 host = nvmf_host_alloc(hostnqn, id);
75 if (!host) {
76 host = ERR_PTR(-ENOMEM);
77 goto out_unlock;
78 }
79
80 list_add_tail(&host->list, &nvmf_hosts);
81out_unlock:
82 mutex_unlock(&nvmf_hosts_mutex);
83 return host;
84}
85
86static struct nvmf_host *nvmf_host_default(void)
87{
88 struct nvmf_host *host;
89 char nqn[NVMF_NQN_SIZE];
90 uuid_t id;
91
92 uuid_gen(&id);
93 snprintf(nqn, NVMF_NQN_SIZE,
94 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
95
96 host = nvmf_host_alloc(nqn, &id);
97 if (!host)
98 return NULL;
99
100 mutex_lock(&nvmf_hosts_mutex);
101 list_add_tail(&host->list, &nvmf_hosts);
102 mutex_unlock(&nvmf_hosts_mutex);
103
104 return host;
105}
106
107static void nvmf_host_destroy(struct kref *ref)
108{
109 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
110
111 mutex_lock(&nvmf_hosts_mutex);
112 list_del(&host->list);
113 mutex_unlock(&nvmf_hosts_mutex);
114
115 kfree(host);
116}
117
118static void nvmf_host_put(struct nvmf_host *host)
119{
120 if (host)
121 kref_put(&host->ref, nvmf_host_destroy);
122}
123
124/**
125 * nvmf_get_address() - Get address/port
126 * @ctrl: Host NVMe controller instance which we got the address
127 * @buf: OUTPUT parameter that will contain the address/port
128 * @size: buffer size
129 */
130int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
131{
132 int len = 0;
133
134 if (ctrl->opts->mask & NVMF_OPT_TRADDR)
135 len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
136 if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
137 len += scnprintf(buf + len, size - len, "%strsvcid=%s",
138 (len) ? "," : "", ctrl->opts->trsvcid);
139 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
140 len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
141 (len) ? "," : "", ctrl->opts->host_traddr);
142 if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
143 len += scnprintf(buf + len, size - len, "%shost_iface=%s",
144 (len) ? "," : "", ctrl->opts->host_iface);
145 len += scnprintf(buf + len, size - len, "\n");
146
147 return len;
148}
149EXPORT_SYMBOL_GPL(nvmf_get_address);
150
151/**
152 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
153 * @ctrl: Host NVMe controller instance maintaining the admin
154 * queue used to submit the property read command to
155 * the allocated NVMe controller resource on the target system.
156 * @off: Starting offset value of the targeted property
157 * register (see the fabrics section of the NVMe standard).
158 * @val: OUTPUT parameter that will contain the value of
159 * the property after a successful read.
160 *
161 * Used by the host system to retrieve a 32-bit capsule property value
162 * from an NVMe controller on the target system.
163 *
164 * ("Capsule property" is an "PCIe register concept" applied to the
165 * NVMe fabrics space.)
166 *
167 * Return:
168 * 0: successful read
169 * > 0: NVMe error status code
170 * < 0: Linux errno error code
171 */
172int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
173{
174 struct nvme_command cmd = { };
175 union nvme_result res;
176 int ret;
177
178 cmd.prop_get.opcode = nvme_fabrics_command;
179 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
180 cmd.prop_get.offset = cpu_to_le32(off);
181
182 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
183 NVME_QID_ANY, NVME_SUBMIT_RESERVED);
184
185 if (ret >= 0)
186 *val = le64_to_cpu(res.u64);
187 if (unlikely(ret != 0))
188 dev_err(ctrl->device,
189 "Property Get error: %d, offset %#x\n",
190 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
191
192 return ret;
193}
194EXPORT_SYMBOL_GPL(nvmf_reg_read32);
195
196/**
197 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
198 * @ctrl: Host NVMe controller instance maintaining the admin
199 * queue used to submit the property read command to
200 * the allocated controller resource on the target system.
201 * @off: Starting offset value of the targeted property
202 * register (see the fabrics section of the NVMe standard).
203 * @val: OUTPUT parameter that will contain the value of
204 * the property after a successful read.
205 *
206 * Used by the host system to retrieve a 64-bit capsule property value
207 * from an NVMe controller on the target system.
208 *
209 * ("Capsule property" is an "PCIe register concept" applied to the
210 * NVMe fabrics space.)
211 *
212 * Return:
213 * 0: successful read
214 * > 0: NVMe error status code
215 * < 0: Linux errno error code
216 */
217int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
218{
219 struct nvme_command cmd = { };
220 union nvme_result res;
221 int ret;
222
223 cmd.prop_get.opcode = nvme_fabrics_command;
224 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
225 cmd.prop_get.attrib = 1;
226 cmd.prop_get.offset = cpu_to_le32(off);
227
228 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
229 NVME_QID_ANY, NVME_SUBMIT_RESERVED);
230
231 if (ret >= 0)
232 *val = le64_to_cpu(res.u64);
233 if (unlikely(ret != 0))
234 dev_err(ctrl->device,
235 "Property Get error: %d, offset %#x\n",
236 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
237 return ret;
238}
239EXPORT_SYMBOL_GPL(nvmf_reg_read64);
240
241/**
242 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
243 * @ctrl: Host NVMe controller instance maintaining the admin
244 * queue used to submit the property read command to
245 * the allocated NVMe controller resource on the target system.
246 * @off: Starting offset value of the targeted property
247 * register (see the fabrics section of the NVMe standard).
248 * @val: Input parameter that contains the value to be
249 * written to the property.
250 *
251 * Used by the NVMe host system to write a 32-bit capsule property value
252 * to an NVMe controller on the target system.
253 *
254 * ("Capsule property" is an "PCIe register concept" applied to the
255 * NVMe fabrics space.)
256 *
257 * Return:
258 * 0: successful write
259 * > 0: NVMe error status code
260 * < 0: Linux errno error code
261 */
262int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
263{
264 struct nvme_command cmd = { };
265 int ret;
266
267 cmd.prop_set.opcode = nvme_fabrics_command;
268 cmd.prop_set.fctype = nvme_fabrics_type_property_set;
269 cmd.prop_set.attrib = 0;
270 cmd.prop_set.offset = cpu_to_le32(off);
271 cmd.prop_set.value = cpu_to_le64(val);
272
273 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
274 NVME_QID_ANY, NVME_SUBMIT_RESERVED);
275 if (unlikely(ret))
276 dev_err(ctrl->device,
277 "Property Set error: %d, offset %#x\n",
278 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
279 return ret;
280}
281EXPORT_SYMBOL_GPL(nvmf_reg_write32);
282
283/**
284 * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
285 * connect() errors.
286 * @ctrl: The specific /dev/nvmeX device that had the error.
287 * @errval: Error code to be decoded in a more human-friendly
288 * printout.
289 * @offset: For use with the NVMe error code
290 * NVME_SC_CONNECT_INVALID_PARAM.
291 * @cmd: This is the SQE portion of a submission capsule.
292 * @data: This is the "Data" portion of a submission capsule.
293 */
294static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
295 int errval, int offset, struct nvme_command *cmd,
296 struct nvmf_connect_data *data)
297{
298 int err_sctype = errval & ~NVME_SC_DNR;
299
300 if (errval < 0) {
301 dev_err(ctrl->device,
302 "Connect command failed, errno: %d\n", errval);
303 return;
304 }
305
306 switch (err_sctype) {
307 case NVME_SC_CONNECT_INVALID_PARAM:
308 if (offset >> 16) {
309 char *inv_data = "Connect Invalid Data Parameter";
310
311 switch (offset & 0xffff) {
312 case (offsetof(struct nvmf_connect_data, cntlid)):
313 dev_err(ctrl->device,
314 "%s, cntlid: %d\n",
315 inv_data, data->cntlid);
316 break;
317 case (offsetof(struct nvmf_connect_data, hostnqn)):
318 dev_err(ctrl->device,
319 "%s, hostnqn \"%s\"\n",
320 inv_data, data->hostnqn);
321 break;
322 case (offsetof(struct nvmf_connect_data, subsysnqn)):
323 dev_err(ctrl->device,
324 "%s, subsysnqn \"%s\"\n",
325 inv_data, data->subsysnqn);
326 break;
327 default:
328 dev_err(ctrl->device,
329 "%s, starting byte offset: %d\n",
330 inv_data, offset & 0xffff);
331 break;
332 }
333 } else {
334 char *inv_sqe = "Connect Invalid SQE Parameter";
335
336 switch (offset) {
337 case (offsetof(struct nvmf_connect_command, qid)):
338 dev_err(ctrl->device,
339 "%s, qid %d\n",
340 inv_sqe, cmd->connect.qid);
341 break;
342 default:
343 dev_err(ctrl->device,
344 "%s, starting byte offset: %d\n",
345 inv_sqe, offset);
346 }
347 }
348 break;
349 case NVME_SC_CONNECT_INVALID_HOST:
350 dev_err(ctrl->device,
351 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
352 data->subsysnqn, data->hostnqn);
353 break;
354 case NVME_SC_CONNECT_CTRL_BUSY:
355 dev_err(ctrl->device,
356 "Connect command failed: controller is busy or not available\n");
357 break;
358 case NVME_SC_CONNECT_FORMAT:
359 dev_err(ctrl->device,
360 "Connect incompatible format: %d",
361 cmd->connect.recfmt);
362 break;
363 case NVME_SC_HOST_PATH_ERROR:
364 dev_err(ctrl->device,
365 "Connect command failed: host path error\n");
366 break;
367 case NVME_SC_AUTH_REQUIRED:
368 dev_err(ctrl->device,
369 "Connect command failed: authentication required\n");
370 break;
371 default:
372 dev_err(ctrl->device,
373 "Connect command failed, error wo/DNR bit: %d\n",
374 err_sctype);
375 break;
376 }
377}
378
379static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
380 u16 cntlid)
381{
382 struct nvmf_connect_data *data;
383
384 data = kzalloc(sizeof(*data), GFP_KERNEL);
385 if (!data)
386 return NULL;
387
388 uuid_copy(&data->hostid, &ctrl->opts->host->id);
389 data->cntlid = cpu_to_le16(cntlid);
390 strscpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
391 strscpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
392
393 return data;
394}
395
396static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
397 struct nvme_command *cmd)
398{
399 cmd->connect.opcode = nvme_fabrics_command;
400 cmd->connect.fctype = nvme_fabrics_type_connect;
401 cmd->connect.qid = cpu_to_le16(qid);
402
403 if (qid) {
404 cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize);
405 } else {
406 cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
407
408 /*
409 * set keep-alive timeout in seconds granularity (ms * 1000)
410 */
411 cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000);
412 }
413
414 if (ctrl->opts->disable_sqflow)
415 cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
416}
417
418/**
419 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
420 * API function.
421 * @ctrl: Host nvme controller instance used to request
422 * a new NVMe controller allocation on the target
423 * system and establish an NVMe Admin connection to
424 * that controller.
425 *
426 * This function enables an NVMe host device to request a new allocation of
427 * an NVMe controller resource on a target system as well establish a
428 * fabrics-protocol connection of the NVMe Admin queue between the
429 * host system device and the allocated NVMe controller on the
430 * target system via a NVMe Fabrics "Connect" command.
431 */
432int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
433{
434 struct nvme_command cmd = { };
435 union nvme_result res;
436 struct nvmf_connect_data *data;
437 int ret;
438 u32 result;
439
440 nvmf_connect_cmd_prep(ctrl, 0, &cmd);
441
442 data = nvmf_connect_data_prep(ctrl, 0xffff);
443 if (!data)
444 return -ENOMEM;
445
446 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
447 data, sizeof(*data), NVME_QID_ANY,
448 NVME_SUBMIT_AT_HEAD |
449 NVME_SUBMIT_NOWAIT |
450 NVME_SUBMIT_RESERVED);
451 if (ret) {
452 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
453 &cmd, data);
454 goto out_free_data;
455 }
456
457 result = le32_to_cpu(res.u32);
458 ctrl->cntlid = result & 0xFFFF;
459 if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
460 /* Secure concatenation is not implemented */
461 if (result & NVME_CONNECT_AUTHREQ_ASCR) {
462 dev_warn(ctrl->device,
463 "qid 0: secure concatenation is not supported\n");
464 ret = -EOPNOTSUPP;
465 goto out_free_data;
466 }
467 /* Authentication required */
468 ret = nvme_auth_negotiate(ctrl, 0);
469 if (ret) {
470 dev_warn(ctrl->device,
471 "qid 0: authentication setup failed\n");
472 goto out_free_data;
473 }
474 ret = nvme_auth_wait(ctrl, 0);
475 if (ret) {
476 dev_warn(ctrl->device,
477 "qid 0: authentication failed, error %d\n",
478 ret);
479 } else
480 dev_info(ctrl->device,
481 "qid 0: authenticated\n");
482 }
483out_free_data:
484 kfree(data);
485 return ret;
486}
487EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
488
489/**
490 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
491 * API function.
492 * @ctrl: Host nvme controller instance used to establish an
493 * NVMe I/O queue connection to the already allocated NVMe
494 * controller on the target system.
495 * @qid: NVMe I/O queue number for the new I/O connection between
496 * host and target (note qid == 0 is illegal as this is
497 * the Admin queue, per NVMe standard).
498 *
499 * This function issues a fabrics-protocol connection
500 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
501 * between the host system device and the allocated NVMe controller
502 * on the target system.
503 *
504 * Return:
505 * 0: success
506 * > 0: NVMe error status code
507 * < 0: Linux errno error code
508 */
509int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
510{
511 struct nvme_command cmd = { };
512 struct nvmf_connect_data *data;
513 union nvme_result res;
514 int ret;
515 u32 result;
516
517 nvmf_connect_cmd_prep(ctrl, qid, &cmd);
518
519 data = nvmf_connect_data_prep(ctrl, ctrl->cntlid);
520 if (!data)
521 return -ENOMEM;
522
523 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
524 data, sizeof(*data), qid,
525 NVME_SUBMIT_AT_HEAD |
526 NVME_SUBMIT_RESERVED |
527 NVME_SUBMIT_NOWAIT);
528 if (ret) {
529 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
530 &cmd, data);
531 goto out_free_data;
532 }
533 result = le32_to_cpu(res.u32);
534 if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
535 /* Secure concatenation is not implemented */
536 if (result & NVME_CONNECT_AUTHREQ_ASCR) {
537 dev_warn(ctrl->device,
538 "qid 0: secure concatenation is not supported\n");
539 ret = -EOPNOTSUPP;
540 goto out_free_data;
541 }
542 /* Authentication required */
543 ret = nvme_auth_negotiate(ctrl, qid);
544 if (ret) {
545 dev_warn(ctrl->device,
546 "qid %d: authentication setup failed\n", qid);
547 goto out_free_data;
548 }
549 ret = nvme_auth_wait(ctrl, qid);
550 if (ret) {
551 dev_warn(ctrl->device,
552 "qid %u: authentication failed, error %d\n",
553 qid, ret);
554 }
555 }
556out_free_data:
557 kfree(data);
558 return ret;
559}
560EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
561
562/*
563 * Evaluate the status information returned by the transport in order to decided
564 * if a reconnect attempt should be scheduled.
565 *
566 * Do not retry when:
567 *
568 * - the DNR bit is set and the specification states no further connect
569 * attempts with the same set of paramenters should be attempted.
570 *
571 * - when the authentication attempt fails, because the key was invalid.
572 * This error code is set on the host side.
573 */
574bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
575{
576 if (status > 0 && (status & NVME_SC_DNR))
577 return false;
578
579 if (status == -EKEYREJECTED)
580 return false;
581
582 if (ctrl->opts->max_reconnects == -1 ||
583 ctrl->nr_reconnects < ctrl->opts->max_reconnects)
584 return true;
585
586 return false;
587}
588EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
589
590/**
591 * nvmf_register_transport() - NVMe Fabrics Library registration function.
592 * @ops: Transport ops instance to be registered to the
593 * common fabrics library.
594 *
595 * API function that registers the type of specific transport fabric
596 * being implemented to the common NVMe fabrics library. Part of
597 * the overall init sequence of starting up a fabrics driver.
598 */
599int nvmf_register_transport(struct nvmf_transport_ops *ops)
600{
601 if (!ops->create_ctrl)
602 return -EINVAL;
603
604 down_write(&nvmf_transports_rwsem);
605 list_add_tail(&ops->entry, &nvmf_transports);
606 up_write(&nvmf_transports_rwsem);
607
608 return 0;
609}
610EXPORT_SYMBOL_GPL(nvmf_register_transport);
611
612/**
613 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
614 * @ops: Transport ops instance to be unregistered from the
615 * common fabrics library.
616 *
617 * Fabrics API function that unregisters the type of specific transport
618 * fabric being implemented from the common NVMe fabrics library.
619 * Part of the overall exit sequence of unloading the implemented driver.
620 */
621void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
622{
623 down_write(&nvmf_transports_rwsem);
624 list_del(&ops->entry);
625 up_write(&nvmf_transports_rwsem);
626}
627EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
628
629static struct nvmf_transport_ops *nvmf_lookup_transport(
630 struct nvmf_ctrl_options *opts)
631{
632 struct nvmf_transport_ops *ops;
633
634 lockdep_assert_held(&nvmf_transports_rwsem);
635
636 list_for_each_entry(ops, &nvmf_transports, entry) {
637 if (strcmp(ops->name, opts->transport) == 0)
638 return ops;
639 }
640
641 return NULL;
642}
643
644static struct key *nvmf_parse_key(int key_id)
645{
646 struct key *key;
647
648 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
649 pr_err("TLS is not supported\n");
650 return ERR_PTR(-EINVAL);
651 }
652
653 key = key_lookup(key_id);
654 if (IS_ERR(key))
655 pr_err("key id %08x not found\n", key_id);
656 else
657 pr_debug("Using key id %08x\n", key_id);
658 return key;
659}
660
661static const match_table_t opt_tokens = {
662 { NVMF_OPT_TRANSPORT, "transport=%s" },
663 { NVMF_OPT_TRADDR, "traddr=%s" },
664 { NVMF_OPT_TRSVCID, "trsvcid=%s" },
665 { NVMF_OPT_NQN, "nqn=%s" },
666 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
667 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
668 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
669 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" },
670 { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
671 { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
672 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
673 { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
674 { NVMF_OPT_HOST_ID, "hostid=%s" },
675 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
676 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
677 { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
678 { NVMF_OPT_DATA_DIGEST, "data_digest" },
679 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
680 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
681 { NVMF_OPT_TOS, "tos=%d" },
682#ifdef CONFIG_NVME_TCP_TLS
683 { NVMF_OPT_KEYRING, "keyring=%d" },
684 { NVMF_OPT_TLS_KEY, "tls_key=%d" },
685#endif
686 { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
687 { NVMF_OPT_DISCOVERY, "discovery" },
688#ifdef CONFIG_NVME_HOST_AUTH
689 { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
690 { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
691#endif
692#ifdef CONFIG_NVME_TCP_TLS
693 { NVMF_OPT_TLS, "tls" },
694#endif
695 { NVMF_OPT_ERR, NULL }
696};
697
698static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
699 const char *buf)
700{
701 substring_t args[MAX_OPT_ARGS];
702 char *options, *o, *p;
703 int token, ret = 0;
704 size_t nqnlen = 0;
705 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO, key_id;
706 uuid_t hostid;
707 char hostnqn[NVMF_NQN_SIZE];
708 struct key *key;
709
710 /* Set defaults */
711 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
712 opts->nr_io_queues = num_online_cpus();
713 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
714 opts->kato = 0;
715 opts->duplicate_connect = false;
716 opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
717 opts->hdr_digest = false;
718 opts->data_digest = false;
719 opts->tos = -1; /* < 0 == use transport default */
720 opts->tls = false;
721 opts->tls_key = NULL;
722 opts->keyring = NULL;
723
724 options = o = kstrdup(buf, GFP_KERNEL);
725 if (!options)
726 return -ENOMEM;
727
728 /* use default host if not given by user space */
729 uuid_copy(&hostid, &nvmf_default_host->id);
730 strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE);
731
732 while ((p = strsep(&o, ",\n")) != NULL) {
733 if (!*p)
734 continue;
735
736 token = match_token(p, opt_tokens, args);
737 opts->mask |= token;
738 switch (token) {
739 case NVMF_OPT_TRANSPORT:
740 p = match_strdup(args);
741 if (!p) {
742 ret = -ENOMEM;
743 goto out;
744 }
745 kfree(opts->transport);
746 opts->transport = p;
747 break;
748 case NVMF_OPT_NQN:
749 p = match_strdup(args);
750 if (!p) {
751 ret = -ENOMEM;
752 goto out;
753 }
754 kfree(opts->subsysnqn);
755 opts->subsysnqn = p;
756 nqnlen = strlen(opts->subsysnqn);
757 if (nqnlen >= NVMF_NQN_SIZE) {
758 pr_err("%s needs to be < %d bytes\n",
759 opts->subsysnqn, NVMF_NQN_SIZE);
760 ret = -EINVAL;
761 goto out;
762 }
763 opts->discovery_nqn =
764 !(strcmp(opts->subsysnqn,
765 NVME_DISC_SUBSYS_NAME));
766 break;
767 case NVMF_OPT_TRADDR:
768 p = match_strdup(args);
769 if (!p) {
770 ret = -ENOMEM;
771 goto out;
772 }
773 kfree(opts->traddr);
774 opts->traddr = p;
775 break;
776 case NVMF_OPT_TRSVCID:
777 p = match_strdup(args);
778 if (!p) {
779 ret = -ENOMEM;
780 goto out;
781 }
782 kfree(opts->trsvcid);
783 opts->trsvcid = p;
784 break;
785 case NVMF_OPT_QUEUE_SIZE:
786 if (match_int(args, &token)) {
787 ret = -EINVAL;
788 goto out;
789 }
790 if (token < NVMF_MIN_QUEUE_SIZE ||
791 token > NVMF_MAX_QUEUE_SIZE) {
792 pr_err("Invalid queue_size %d\n", token);
793 ret = -EINVAL;
794 goto out;
795 }
796 opts->queue_size = token;
797 break;
798 case NVMF_OPT_NR_IO_QUEUES:
799 if (match_int(args, &token)) {
800 ret = -EINVAL;
801 goto out;
802 }
803 if (token <= 0) {
804 pr_err("Invalid number of IOQs %d\n", token);
805 ret = -EINVAL;
806 goto out;
807 }
808 if (opts->discovery_nqn) {
809 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
810 break;
811 }
812
813 opts->nr_io_queues = min_t(unsigned int,
814 num_online_cpus(), token);
815 break;
816 case NVMF_OPT_KATO:
817 if (match_int(args, &token)) {
818 ret = -EINVAL;
819 goto out;
820 }
821
822 if (token < 0) {
823 pr_err("Invalid keep_alive_tmo %d\n", token);
824 ret = -EINVAL;
825 goto out;
826 } else if (token == 0 && !opts->discovery_nqn) {
827 /* Allowed for debug */
828 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
829 }
830 opts->kato = token;
831 break;
832 case NVMF_OPT_CTRL_LOSS_TMO:
833 if (match_int(args, &token)) {
834 ret = -EINVAL;
835 goto out;
836 }
837
838 if (token < 0)
839 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
840 ctrl_loss_tmo = token;
841 break;
842 case NVMF_OPT_FAIL_FAST_TMO:
843 if (match_int(args, &token)) {
844 ret = -EINVAL;
845 goto out;
846 }
847
848 if (token >= 0)
849 pr_warn("I/O fail on reconnect controller after %d sec\n",
850 token);
851 else
852 token = -1;
853
854 opts->fast_io_fail_tmo = token;
855 break;
856 case NVMF_OPT_HOSTNQN:
857 if (opts->host) {
858 pr_err("hostnqn already user-assigned: %s\n",
859 opts->host->nqn);
860 ret = -EADDRINUSE;
861 goto out;
862 }
863 p = match_strdup(args);
864 if (!p) {
865 ret = -ENOMEM;
866 goto out;
867 }
868 nqnlen = strlen(p);
869 if (nqnlen >= NVMF_NQN_SIZE) {
870 pr_err("%s needs to be < %d bytes\n",
871 p, NVMF_NQN_SIZE);
872 kfree(p);
873 ret = -EINVAL;
874 goto out;
875 }
876 strscpy(hostnqn, p, NVMF_NQN_SIZE);
877 kfree(p);
878 break;
879 case NVMF_OPT_RECONNECT_DELAY:
880 if (match_int(args, &token)) {
881 ret = -EINVAL;
882 goto out;
883 }
884 if (token <= 0) {
885 pr_err("Invalid reconnect_delay %d\n", token);
886 ret = -EINVAL;
887 goto out;
888 }
889 opts->reconnect_delay = token;
890 break;
891 case NVMF_OPT_HOST_TRADDR:
892 p = match_strdup(args);
893 if (!p) {
894 ret = -ENOMEM;
895 goto out;
896 }
897 kfree(opts->host_traddr);
898 opts->host_traddr = p;
899 break;
900 case NVMF_OPT_HOST_IFACE:
901 p = match_strdup(args);
902 if (!p) {
903 ret = -ENOMEM;
904 goto out;
905 }
906 kfree(opts->host_iface);
907 opts->host_iface = p;
908 break;
909 case NVMF_OPT_HOST_ID:
910 p = match_strdup(args);
911 if (!p) {
912 ret = -ENOMEM;
913 goto out;
914 }
915 ret = uuid_parse(p, &hostid);
916 if (ret) {
917 pr_err("Invalid hostid %s\n", p);
918 ret = -EINVAL;
919 kfree(p);
920 goto out;
921 }
922 kfree(p);
923 break;
924 case NVMF_OPT_DUP_CONNECT:
925 opts->duplicate_connect = true;
926 break;
927 case NVMF_OPT_DISABLE_SQFLOW:
928 opts->disable_sqflow = true;
929 break;
930 case NVMF_OPT_HDR_DIGEST:
931 opts->hdr_digest = true;
932 break;
933 case NVMF_OPT_DATA_DIGEST:
934 opts->data_digest = true;
935 break;
936 case NVMF_OPT_NR_WRITE_QUEUES:
937 if (match_int(args, &token)) {
938 ret = -EINVAL;
939 goto out;
940 }
941 if (token <= 0) {
942 pr_err("Invalid nr_write_queues %d\n", token);
943 ret = -EINVAL;
944 goto out;
945 }
946 opts->nr_write_queues = token;
947 break;
948 case NVMF_OPT_NR_POLL_QUEUES:
949 if (match_int(args, &token)) {
950 ret = -EINVAL;
951 goto out;
952 }
953 if (token <= 0) {
954 pr_err("Invalid nr_poll_queues %d\n", token);
955 ret = -EINVAL;
956 goto out;
957 }
958 opts->nr_poll_queues = token;
959 break;
960 case NVMF_OPT_TOS:
961 if (match_int(args, &token)) {
962 ret = -EINVAL;
963 goto out;
964 }
965 if (token < 0) {
966 pr_err("Invalid type of service %d\n", token);
967 ret = -EINVAL;
968 goto out;
969 }
970 if (token > 255) {
971 pr_warn("Clamping type of service to 255\n");
972 token = 255;
973 }
974 opts->tos = token;
975 break;
976 case NVMF_OPT_KEYRING:
977 if (match_int(args, &key_id) || key_id <= 0) {
978 ret = -EINVAL;
979 goto out;
980 }
981 key = nvmf_parse_key(key_id);
982 if (IS_ERR(key)) {
983 ret = PTR_ERR(key);
984 goto out;
985 }
986 key_put(opts->keyring);
987 opts->keyring = key;
988 break;
989 case NVMF_OPT_TLS_KEY:
990 if (match_int(args, &key_id) || key_id <= 0) {
991 ret = -EINVAL;
992 goto out;
993 }
994 key = nvmf_parse_key(key_id);
995 if (IS_ERR(key)) {
996 ret = PTR_ERR(key);
997 goto out;
998 }
999 key_put(opts->tls_key);
1000 opts->tls_key = key;
1001 break;
1002 case NVMF_OPT_DISCOVERY:
1003 opts->discovery_nqn = true;
1004 break;
1005 case NVMF_OPT_DHCHAP_SECRET:
1006 p = match_strdup(args);
1007 if (!p) {
1008 ret = -ENOMEM;
1009 goto out;
1010 }
1011 if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
1012 pr_err("Invalid DH-CHAP secret %s\n", p);
1013 ret = -EINVAL;
1014 goto out;
1015 }
1016 kfree(opts->dhchap_secret);
1017 opts->dhchap_secret = p;
1018 break;
1019 case NVMF_OPT_DHCHAP_CTRL_SECRET:
1020 p = match_strdup(args);
1021 if (!p) {
1022 ret = -ENOMEM;
1023 goto out;
1024 }
1025 if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
1026 pr_err("Invalid DH-CHAP secret %s\n", p);
1027 ret = -EINVAL;
1028 goto out;
1029 }
1030 kfree(opts->dhchap_ctrl_secret);
1031 opts->dhchap_ctrl_secret = p;
1032 break;
1033 case NVMF_OPT_TLS:
1034 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
1035 pr_err("TLS is not supported\n");
1036 ret = -EINVAL;
1037 goto out;
1038 }
1039 opts->tls = true;
1040 break;
1041 default:
1042 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
1043 p);
1044 ret = -EINVAL;
1045 goto out;
1046 }
1047 }
1048
1049 if (opts->discovery_nqn) {
1050 opts->nr_io_queues = 0;
1051 opts->nr_write_queues = 0;
1052 opts->nr_poll_queues = 0;
1053 opts->duplicate_connect = true;
1054 } else {
1055 if (!opts->kato)
1056 opts->kato = NVME_DEFAULT_KATO;
1057 }
1058 if (ctrl_loss_tmo < 0) {
1059 opts->max_reconnects = -1;
1060 } else {
1061 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
1062 opts->reconnect_delay);
1063 if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
1064 pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
1065 opts->fast_io_fail_tmo, ctrl_loss_tmo);
1066 }
1067
1068 opts->host = nvmf_host_add(hostnqn, &hostid);
1069 if (IS_ERR(opts->host)) {
1070 ret = PTR_ERR(opts->host);
1071 opts->host = NULL;
1072 goto out;
1073 }
1074
1075out:
1076 kfree(options);
1077 return ret;
1078}
1079
1080void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
1081 u32 io_queues[HCTX_MAX_TYPES])
1082{
1083 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1084 /*
1085 * separate read/write queues
1086 * hand out dedicated default queues only after we have
1087 * sufficient read queues.
1088 */
1089 io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1090 nr_io_queues -= io_queues[HCTX_TYPE_READ];
1091 io_queues[HCTX_TYPE_DEFAULT] =
1092 min(opts->nr_write_queues, nr_io_queues);
1093 nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
1094 } else {
1095 /*
1096 * shared read/write queues
1097 * either no write queues were requested, or we don't have
1098 * sufficient queue count to have dedicated default queues.
1099 */
1100 io_queues[HCTX_TYPE_DEFAULT] =
1101 min(opts->nr_io_queues, nr_io_queues);
1102 nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
1103 }
1104
1105 if (opts->nr_poll_queues && nr_io_queues) {
1106 /* map dedicated poll queues only if we have queues left */
1107 io_queues[HCTX_TYPE_POLL] =
1108 min(opts->nr_poll_queues, nr_io_queues);
1109 }
1110}
1111EXPORT_SYMBOL_GPL(nvmf_set_io_queues);
1112
1113void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
1114 u32 io_queues[HCTX_MAX_TYPES])
1115{
1116 struct nvmf_ctrl_options *opts = ctrl->opts;
1117
1118 if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) {
1119 /* separate read/write queues */
1120 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1121 io_queues[HCTX_TYPE_DEFAULT];
1122 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1123 set->map[HCTX_TYPE_READ].nr_queues =
1124 io_queues[HCTX_TYPE_READ];
1125 set->map[HCTX_TYPE_READ].queue_offset =
1126 io_queues[HCTX_TYPE_DEFAULT];
1127 } else {
1128 /* shared read/write queues */
1129 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1130 io_queues[HCTX_TYPE_DEFAULT];
1131 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1132 set->map[HCTX_TYPE_READ].nr_queues =
1133 io_queues[HCTX_TYPE_DEFAULT];
1134 set->map[HCTX_TYPE_READ].queue_offset = 0;
1135 }
1136
1137 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1138 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
1139 if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) {
1140 /* map dedicated poll queues only if we have queues left */
1141 set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
1142 set->map[HCTX_TYPE_POLL].queue_offset =
1143 io_queues[HCTX_TYPE_DEFAULT] +
1144 io_queues[HCTX_TYPE_READ];
1145 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1146 }
1147
1148 dev_info(ctrl->device,
1149 "mapped %d/%d/%d default/read/poll queues.\n",
1150 io_queues[HCTX_TYPE_DEFAULT],
1151 io_queues[HCTX_TYPE_READ],
1152 io_queues[HCTX_TYPE_POLL]);
1153}
1154EXPORT_SYMBOL_GPL(nvmf_map_queues);
1155
1156static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
1157 unsigned int required_opts)
1158{
1159 if ((opts->mask & required_opts) != required_opts) {
1160 unsigned int i;
1161
1162 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
1163 if ((opt_tokens[i].token & required_opts) &&
1164 !(opt_tokens[i].token & opts->mask)) {
1165 pr_warn("missing parameter '%s'\n",
1166 opt_tokens[i].pattern);
1167 }
1168 }
1169
1170 return -EINVAL;
1171 }
1172
1173 return 0;
1174}
1175
1176bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
1177 struct nvmf_ctrl_options *opts)
1178{
1179 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
1180 strcmp(opts->traddr, ctrl->opts->traddr) ||
1181 strcmp(opts->trsvcid, ctrl->opts->trsvcid))
1182 return false;
1183
1184 /*
1185 * Checking the local address or host interfaces is rough.
1186 *
1187 * In most cases, none is specified and the host port or
1188 * host interface is selected by the stack.
1189 *
1190 * Assume no match if:
1191 * - local address or host interface is specified and address
1192 * or host interface is not the same
1193 * - local address or host interface is not specified but
1194 * remote is, or vice versa (admin using specific
1195 * host_traddr/host_iface when it matters).
1196 */
1197 if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
1198 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
1199 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
1200 return false;
1201 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
1202 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
1203 return false;
1204 }
1205
1206 if ((opts->mask & NVMF_OPT_HOST_IFACE) &&
1207 (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
1208 if (strcmp(opts->host_iface, ctrl->opts->host_iface))
1209 return false;
1210 } else if ((opts->mask & NVMF_OPT_HOST_IFACE) ||
1211 (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
1212 return false;
1213 }
1214
1215 return true;
1216}
1217EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
1218
1219static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
1220 unsigned int allowed_opts)
1221{
1222 if (opts->mask & ~allowed_opts) {
1223 unsigned int i;
1224
1225 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
1226 if ((opt_tokens[i].token & opts->mask) &&
1227 (opt_tokens[i].token & ~allowed_opts)) {
1228 pr_warn("invalid parameter '%s'\n",
1229 opt_tokens[i].pattern);
1230 }
1231 }
1232
1233 return -EINVAL;
1234 }
1235
1236 return 0;
1237}
1238
1239void nvmf_free_options(struct nvmf_ctrl_options *opts)
1240{
1241 nvmf_host_put(opts->host);
1242 key_put(opts->keyring);
1243 key_put(opts->tls_key);
1244 kfree(opts->transport);
1245 kfree(opts->traddr);
1246 kfree(opts->trsvcid);
1247 kfree(opts->subsysnqn);
1248 kfree(opts->host_traddr);
1249 kfree(opts->host_iface);
1250 kfree(opts->dhchap_secret);
1251 kfree(opts->dhchap_ctrl_secret);
1252 kfree(opts);
1253}
1254EXPORT_SYMBOL_GPL(nvmf_free_options);
1255
1256#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
1257#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
1258 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
1259 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
1260 NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
1261 NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
1262 NVMF_OPT_DHCHAP_CTRL_SECRET)
1263
1264static struct nvme_ctrl *
1265nvmf_create_ctrl(struct device *dev, const char *buf)
1266{
1267 struct nvmf_ctrl_options *opts;
1268 struct nvmf_transport_ops *ops;
1269 struct nvme_ctrl *ctrl;
1270 int ret;
1271
1272 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1273 if (!opts)
1274 return ERR_PTR(-ENOMEM);
1275
1276 ret = nvmf_parse_options(opts, buf);
1277 if (ret)
1278 goto out_free_opts;
1279
1280
1281 request_module("nvme-%s", opts->transport);
1282
1283 /*
1284 * Check the generic options first as we need a valid transport for
1285 * the lookup below. Then clear the generic flags so that transport
1286 * drivers don't have to care about them.
1287 */
1288 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
1289 if (ret)
1290 goto out_free_opts;
1291 opts->mask &= ~NVMF_REQUIRED_OPTS;
1292
1293 down_read(&nvmf_transports_rwsem);
1294 ops = nvmf_lookup_transport(opts);
1295 if (!ops) {
1296 pr_info("no handler found for transport %s.\n",
1297 opts->transport);
1298 ret = -EINVAL;
1299 goto out_unlock;
1300 }
1301
1302 if (!try_module_get(ops->module)) {
1303 ret = -EBUSY;
1304 goto out_unlock;
1305 }
1306 up_read(&nvmf_transports_rwsem);
1307
1308 ret = nvmf_check_required_opts(opts, ops->required_opts);
1309 if (ret)
1310 goto out_module_put;
1311 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
1312 ops->allowed_opts | ops->required_opts);
1313 if (ret)
1314 goto out_module_put;
1315
1316 ctrl = ops->create_ctrl(dev, opts);
1317 if (IS_ERR(ctrl)) {
1318 ret = PTR_ERR(ctrl);
1319 goto out_module_put;
1320 }
1321
1322 module_put(ops->module);
1323 return ctrl;
1324
1325out_module_put:
1326 module_put(ops->module);
1327 goto out_free_opts;
1328out_unlock:
1329 up_read(&nvmf_transports_rwsem);
1330out_free_opts:
1331 nvmf_free_options(opts);
1332 return ERR_PTR(ret);
1333}
1334
1335static const struct class nvmf_class = {
1336 .name = "nvme-fabrics",
1337};
1338
1339static struct device *nvmf_device;
1340static DEFINE_MUTEX(nvmf_dev_mutex);
1341
1342static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
1343 size_t count, loff_t *pos)
1344{
1345 struct seq_file *seq_file = file->private_data;
1346 struct nvme_ctrl *ctrl;
1347 const char *buf;
1348 int ret = 0;
1349
1350 if (count > PAGE_SIZE)
1351 return -ENOMEM;
1352
1353 buf = memdup_user_nul(ubuf, count);
1354 if (IS_ERR(buf))
1355 return PTR_ERR(buf);
1356
1357 mutex_lock(&nvmf_dev_mutex);
1358 if (seq_file->private) {
1359 ret = -EINVAL;
1360 goto out_unlock;
1361 }
1362
1363 ctrl = nvmf_create_ctrl(nvmf_device, buf);
1364 if (IS_ERR(ctrl)) {
1365 ret = PTR_ERR(ctrl);
1366 goto out_unlock;
1367 }
1368
1369 seq_file->private = ctrl;
1370
1371out_unlock:
1372 mutex_unlock(&nvmf_dev_mutex);
1373 kfree(buf);
1374 return ret ? ret : count;
1375}
1376
1377static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
1378{
1379 const struct match_token *tok;
1380 int idx;
1381
1382 /*
1383 * Add dummy entries for instance and cntlid to
1384 * signal an invalid/non-existing controller
1385 */
1386 seq_puts(seq_file, "instance=-1,cntlid=-1");
1387 for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) {
1388 tok = &opt_tokens[idx];
1389 if (tok->token == NVMF_OPT_ERR)
1390 continue;
1391 seq_puts(seq_file, ",");
1392 seq_puts(seq_file, tok->pattern);
1393 }
1394 seq_puts(seq_file, "\n");
1395}
1396
1397static int nvmf_dev_show(struct seq_file *seq_file, void *private)
1398{
1399 struct nvme_ctrl *ctrl;
1400
1401 mutex_lock(&nvmf_dev_mutex);
1402 ctrl = seq_file->private;
1403 if (!ctrl) {
1404 __nvmf_concat_opt_tokens(seq_file);
1405 goto out_unlock;
1406 }
1407
1408 seq_printf(seq_file, "instance=%d,cntlid=%d\n",
1409 ctrl->instance, ctrl->cntlid);
1410
1411out_unlock:
1412 mutex_unlock(&nvmf_dev_mutex);
1413 return 0;
1414}
1415
1416static int nvmf_dev_open(struct inode *inode, struct file *file)
1417{
1418 /*
1419 * The miscdevice code initializes file->private_data, but doesn't
1420 * make use of it later.
1421 */
1422 file->private_data = NULL;
1423 return single_open(file, nvmf_dev_show, NULL);
1424}
1425
1426static int nvmf_dev_release(struct inode *inode, struct file *file)
1427{
1428 struct seq_file *seq_file = file->private_data;
1429 struct nvme_ctrl *ctrl = seq_file->private;
1430
1431 if (ctrl)
1432 nvme_put_ctrl(ctrl);
1433 return single_release(inode, file);
1434}
1435
1436static const struct file_operations nvmf_dev_fops = {
1437 .owner = THIS_MODULE,
1438 .write = nvmf_dev_write,
1439 .read = seq_read,
1440 .open = nvmf_dev_open,
1441 .release = nvmf_dev_release,
1442};
1443
1444static struct miscdevice nvmf_misc = {
1445 .minor = MISC_DYNAMIC_MINOR,
1446 .name = "nvme-fabrics",
1447 .fops = &nvmf_dev_fops,
1448};
1449
1450static int __init nvmf_init(void)
1451{
1452 int ret;
1453
1454 nvmf_default_host = nvmf_host_default();
1455 if (!nvmf_default_host)
1456 return -ENOMEM;
1457
1458 ret = class_register(&nvmf_class);
1459 if (ret) {
1460 pr_err("couldn't register class nvme-fabrics\n");
1461 goto out_free_host;
1462 }
1463
1464 nvmf_device =
1465 device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
1466 if (IS_ERR(nvmf_device)) {
1467 pr_err("couldn't create nvme-fabrics device!\n");
1468 ret = PTR_ERR(nvmf_device);
1469 goto out_destroy_class;
1470 }
1471
1472 ret = misc_register(&nvmf_misc);
1473 if (ret) {
1474 pr_err("couldn't register misc device: %d\n", ret);
1475 goto out_destroy_device;
1476 }
1477
1478 return 0;
1479
1480out_destroy_device:
1481 device_destroy(&nvmf_class, MKDEV(0, 0));
1482out_destroy_class:
1483 class_unregister(&nvmf_class);
1484out_free_host:
1485 nvmf_host_put(nvmf_default_host);
1486 return ret;
1487}
1488
1489static void __exit nvmf_exit(void)
1490{
1491 misc_deregister(&nvmf_misc);
1492 device_destroy(&nvmf_class, MKDEV(0, 0));
1493 class_unregister(&nvmf_class);
1494 nvmf_host_put(nvmf_default_host);
1495
1496 BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
1497 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
1498 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
1499 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
1500 BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
1501 BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
1502 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
1503 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
1504 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
1505 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
1506 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
1507 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
1508}
1509
1510MODULE_LICENSE("GPL v2");
1511MODULE_DESCRIPTION("NVMe host fabrics library");
1512
1513module_init(nvmf_init);
1514module_exit(nvmf_exit);