Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
5 */
6#include <linux/ptrace.h> /* for force_successful_syscall_return */
7#include <linux/nvme_ioctl.h>
8#include <linux/io_uring.h>
9#include "nvme.h"
10
11/*
12 * Convert integer values from ioctl structures to user pointers, silently
13 * ignoring the upper bits in the compat case to match behaviour of 32-bit
14 * kernels.
15 */
16static void __user *nvme_to_user_ptr(uintptr_t ptrval)
17{
18 if (in_compat_syscall())
19 ptrval = (compat_uptr_t)ptrval;
20 return (void __user *)ptrval;
21}
22
23static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
24 unsigned len, u32 seed, bool write)
25{
26 struct bio_integrity_payload *bip;
27 int ret = -ENOMEM;
28 void *buf;
29
30 buf = kmalloc(len, GFP_KERNEL);
31 if (!buf)
32 goto out;
33
34 ret = -EFAULT;
35 if (write && copy_from_user(buf, ubuf, len))
36 goto out_free_meta;
37
38 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
39 if (IS_ERR(bip)) {
40 ret = PTR_ERR(bip);
41 goto out_free_meta;
42 }
43
44 bip->bip_iter.bi_size = len;
45 bip->bip_iter.bi_sector = seed;
46 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
47 offset_in_page(buf));
48 if (ret == len)
49 return buf;
50 ret = -ENOMEM;
51out_free_meta:
52 kfree(buf);
53out:
54 return ERR_PTR(ret);
55}
56
57static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
58 void *meta, unsigned len, int ret)
59{
60 if (!ret && req_op(req) == REQ_OP_DRV_IN &&
61 copy_to_user(ubuf, meta, len))
62 ret = -EFAULT;
63 kfree(meta);
64 return ret;
65}
66
67static struct request *nvme_alloc_user_request(struct request_queue *q,
68 struct nvme_command *cmd, void __user *ubuffer,
69 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
70 u32 meta_seed, void **metap, unsigned timeout, bool vec,
71 blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
72{
73 bool write = nvme_is_write(cmd);
74 struct nvme_ns *ns = q->queuedata;
75 struct block_device *bdev = ns ? ns->disk->part0 : NULL;
76 struct request *req;
77 struct bio *bio = NULL;
78 void *meta = NULL;
79 int ret;
80
81 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
82 if (IS_ERR(req))
83 return req;
84 nvme_init_request(req, cmd);
85
86 if (timeout)
87 req->timeout = timeout;
88 nvme_req(req)->flags |= NVME_REQ_USERCMD;
89
90 if (ubuffer && bufflen) {
91 if (!vec)
92 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
93 GFP_KERNEL);
94 else {
95 struct iovec fast_iov[UIO_FASTIOV];
96 struct iovec *iov = fast_iov;
97 struct iov_iter iter;
98
99 ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
100 UIO_FASTIOV, &iov, &iter);
101 if (ret < 0)
102 goto out;
103 ret = blk_rq_map_user_iov(q, req, NULL, &iter,
104 GFP_KERNEL);
105 kfree(iov);
106 }
107 if (ret)
108 goto out;
109 bio = req->bio;
110 if (bdev)
111 bio_set_dev(bio, bdev);
112 if (bdev && meta_buffer && meta_len) {
113 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
114 meta_seed, write);
115 if (IS_ERR(meta)) {
116 ret = PTR_ERR(meta);
117 goto out_unmap;
118 }
119 req->cmd_flags |= REQ_INTEGRITY;
120 *metap = meta;
121 }
122 }
123
124 return req;
125
126out_unmap:
127 if (bio)
128 blk_rq_unmap_user(bio);
129out:
130 blk_mq_free_request(req);
131 return ERR_PTR(ret);
132}
133
134static int nvme_submit_user_cmd(struct request_queue *q,
135 struct nvme_command *cmd, void __user *ubuffer,
136 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
137 u32 meta_seed, u64 *result, unsigned timeout, bool vec)
138{
139 struct request *req;
140 void *meta = NULL;
141 struct bio *bio;
142 int ret;
143
144 req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
145 meta_len, meta_seed, &meta, timeout, vec, 0, 0);
146 if (IS_ERR(req))
147 return PTR_ERR(req);
148
149 bio = req->bio;
150
151 ret = nvme_execute_passthru_rq(req);
152
153 if (result)
154 *result = le64_to_cpu(nvme_req(req)->result.u64);
155 if (meta)
156 ret = nvme_finish_user_metadata(req, meta_buffer, meta,
157 meta_len, ret);
158 if (bio)
159 blk_rq_unmap_user(bio);
160 blk_mq_free_request(req);
161 return ret;
162}
163
164static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
165{
166 struct nvme_user_io io;
167 struct nvme_command c;
168 unsigned length, meta_len;
169 void __user *metadata;
170
171 if (copy_from_user(&io, uio, sizeof(io)))
172 return -EFAULT;
173 if (io.flags)
174 return -EINVAL;
175
176 switch (io.opcode) {
177 case nvme_cmd_write:
178 case nvme_cmd_read:
179 case nvme_cmd_compare:
180 break;
181 default:
182 return -EINVAL;
183 }
184
185 length = (io.nblocks + 1) << ns->lba_shift;
186
187 if ((io.control & NVME_RW_PRINFO_PRACT) &&
188 ns->ms == sizeof(struct t10_pi_tuple)) {
189 /*
190 * Protection information is stripped/inserted by the
191 * controller.
192 */
193 if (nvme_to_user_ptr(io.metadata))
194 return -EINVAL;
195 meta_len = 0;
196 metadata = NULL;
197 } else {
198 meta_len = (io.nblocks + 1) * ns->ms;
199 metadata = nvme_to_user_ptr(io.metadata);
200 }
201
202 if (ns->features & NVME_NS_EXT_LBAS) {
203 length += meta_len;
204 meta_len = 0;
205 } else if (meta_len) {
206 if ((io.metadata & 3) || !io.metadata)
207 return -EINVAL;
208 }
209
210 memset(&c, 0, sizeof(c));
211 c.rw.opcode = io.opcode;
212 c.rw.flags = io.flags;
213 c.rw.nsid = cpu_to_le32(ns->head->ns_id);
214 c.rw.slba = cpu_to_le64(io.slba);
215 c.rw.length = cpu_to_le16(io.nblocks);
216 c.rw.control = cpu_to_le16(io.control);
217 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
218 c.rw.reftag = cpu_to_le32(io.reftag);
219 c.rw.apptag = cpu_to_le16(io.apptag);
220 c.rw.appmask = cpu_to_le16(io.appmask);
221
222 return nvme_submit_user_cmd(ns->queue, &c,
223 nvme_to_user_ptr(io.addr), length,
224 metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
225 false);
226}
227
228static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
229 struct nvme_ns *ns, __u32 nsid)
230{
231 if (ns && nsid != ns->head->ns_id) {
232 dev_err(ctrl->device,
233 "%s: nsid (%u) in cmd does not match nsid (%u)"
234 "of namespace\n",
235 current->comm, nsid, ns->head->ns_id);
236 return false;
237 }
238
239 return true;
240}
241
242static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
243 struct nvme_passthru_cmd __user *ucmd)
244{
245 struct nvme_passthru_cmd cmd;
246 struct nvme_command c;
247 unsigned timeout = 0;
248 u64 result;
249 int status;
250
251 if (!capable(CAP_SYS_ADMIN))
252 return -EACCES;
253 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
254 return -EFAULT;
255 if (cmd.flags)
256 return -EINVAL;
257 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
258 return -EINVAL;
259
260 memset(&c, 0, sizeof(c));
261 c.common.opcode = cmd.opcode;
262 c.common.flags = cmd.flags;
263 c.common.nsid = cpu_to_le32(cmd.nsid);
264 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
265 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
266 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
267 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
268 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
269 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
270 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
271 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
272
273 if (cmd.timeout_ms)
274 timeout = msecs_to_jiffies(cmd.timeout_ms);
275
276 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
277 nvme_to_user_ptr(cmd.addr), cmd.data_len,
278 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
279 0, &result, timeout, false);
280
281 if (status >= 0) {
282 if (put_user(result, &ucmd->result))
283 return -EFAULT;
284 }
285
286 return status;
287}
288
289static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
290 struct nvme_passthru_cmd64 __user *ucmd, bool vec)
291{
292 struct nvme_passthru_cmd64 cmd;
293 struct nvme_command c;
294 unsigned timeout = 0;
295 int status;
296
297 if (!capable(CAP_SYS_ADMIN))
298 return -EACCES;
299 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
300 return -EFAULT;
301 if (cmd.flags)
302 return -EINVAL;
303 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
304 return -EINVAL;
305
306 memset(&c, 0, sizeof(c));
307 c.common.opcode = cmd.opcode;
308 c.common.flags = cmd.flags;
309 c.common.nsid = cpu_to_le32(cmd.nsid);
310 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
311 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
312 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
313 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
314 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
315 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
316 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
317 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
318
319 if (cmd.timeout_ms)
320 timeout = msecs_to_jiffies(cmd.timeout_ms);
321
322 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
323 nvme_to_user_ptr(cmd.addr), cmd.data_len,
324 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
325 0, &cmd.result, timeout, vec);
326
327 if (status >= 0) {
328 if (put_user(cmd.result, &ucmd->result))
329 return -EFAULT;
330 }
331
332 return status;
333}
334
335struct nvme_uring_data {
336 __u64 metadata;
337 __u64 addr;
338 __u32 data_len;
339 __u32 metadata_len;
340 __u32 timeout_ms;
341};
342
343/*
344 * This overlays struct io_uring_cmd pdu.
345 * Expect build errors if this grows larger than that.
346 */
347struct nvme_uring_cmd_pdu {
348 union {
349 struct bio *bio;
350 struct request *req;
351 };
352 void *meta; /* kernel-resident buffer */
353 void __user *meta_buffer;
354 u32 meta_len;
355};
356
357static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
358 struct io_uring_cmd *ioucmd)
359{
360 return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
361}
362
363static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
364{
365 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
366 struct request *req = pdu->req;
367 struct bio *bio = req->bio;
368 int status;
369 u64 result;
370
371 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
372 status = -EINTR;
373 else
374 status = nvme_req(req)->status;
375
376 result = le64_to_cpu(nvme_req(req)->result.u64);
377
378 if (pdu->meta)
379 status = nvme_finish_user_metadata(req, pdu->meta_buffer,
380 pdu->meta, pdu->meta_len, status);
381 if (bio)
382 blk_rq_unmap_user(bio);
383 blk_mq_free_request(req);
384
385 io_uring_cmd_done(ioucmd, status, result);
386}
387
388static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
389{
390 struct io_uring_cmd *ioucmd = req->end_io_data;
391 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
392 /* extract bio before reusing the same field for request */
393 struct bio *bio = pdu->bio;
394
395 pdu->req = req;
396 req->bio = bio;
397 /* this takes care of moving rest of completion-work to task context */
398 io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
399}
400
401static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
402 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
403{
404 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
405 const struct nvme_uring_cmd *cmd = ioucmd->cmd;
406 struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
407 struct nvme_uring_data d;
408 struct nvme_command c;
409 struct request *req;
410 blk_opf_t rq_flags = 0;
411 blk_mq_req_flags_t blk_flags = 0;
412 void *meta = NULL;
413
414 if (!capable(CAP_SYS_ADMIN))
415 return -EACCES;
416
417 c.common.opcode = READ_ONCE(cmd->opcode);
418 c.common.flags = READ_ONCE(cmd->flags);
419 if (c.common.flags)
420 return -EINVAL;
421
422 c.common.command_id = 0;
423 c.common.nsid = cpu_to_le32(cmd->nsid);
424 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
425 return -EINVAL;
426
427 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
428 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
429 c.common.metadata = 0;
430 c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
431 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
432 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
433 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
434 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
435 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
436 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
437
438 d.metadata = READ_ONCE(cmd->metadata);
439 d.addr = READ_ONCE(cmd->addr);
440 d.data_len = READ_ONCE(cmd->data_len);
441 d.metadata_len = READ_ONCE(cmd->metadata_len);
442 d.timeout_ms = READ_ONCE(cmd->timeout_ms);
443
444 if (issue_flags & IO_URING_F_NONBLOCK) {
445 rq_flags = REQ_NOWAIT;
446 blk_flags = BLK_MQ_REQ_NOWAIT;
447 }
448
449 req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
450 d.data_len, nvme_to_user_ptr(d.metadata),
451 d.metadata_len, 0, &meta, d.timeout_ms ?
452 msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
453 blk_flags);
454 if (IS_ERR(req))
455 return PTR_ERR(req);
456 req->end_io = nvme_uring_cmd_end_io;
457 req->end_io_data = ioucmd;
458
459 /* to free bio on completion, as req->bio will be null at that time */
460 pdu->bio = req->bio;
461 pdu->meta = meta;
462 pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
463 pdu->meta_len = d.metadata_len;
464
465 blk_execute_rq_nowait(req, false);
466 return -EIOCBQUEUED;
467}
468
469static bool is_ctrl_ioctl(unsigned int cmd)
470{
471 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
472 return true;
473 if (is_sed_ioctl(cmd))
474 return true;
475 return false;
476}
477
478static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
479 void __user *argp)
480{
481 switch (cmd) {
482 case NVME_IOCTL_ADMIN_CMD:
483 return nvme_user_cmd(ctrl, NULL, argp);
484 case NVME_IOCTL_ADMIN64_CMD:
485 return nvme_user_cmd64(ctrl, NULL, argp, false);
486 default:
487 return sed_ioctl(ctrl->opal_dev, cmd, argp);
488 }
489}
490
491#ifdef COMPAT_FOR_U64_ALIGNMENT
492struct nvme_user_io32 {
493 __u8 opcode;
494 __u8 flags;
495 __u16 control;
496 __u16 nblocks;
497 __u16 rsvd;
498 __u64 metadata;
499 __u64 addr;
500 __u64 slba;
501 __u32 dsmgmt;
502 __u32 reftag;
503 __u16 apptag;
504 __u16 appmask;
505} __attribute__((__packed__));
506#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
507#endif /* COMPAT_FOR_U64_ALIGNMENT */
508
509static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
510 void __user *argp)
511{
512 switch (cmd) {
513 case NVME_IOCTL_ID:
514 force_successful_syscall_return();
515 return ns->head->ns_id;
516 case NVME_IOCTL_IO_CMD:
517 return nvme_user_cmd(ns->ctrl, ns, argp);
518 /*
519 * struct nvme_user_io can have different padding on some 32-bit ABIs.
520 * Just accept the compat version as all fields that are used are the
521 * same size and at the same offset.
522 */
523#ifdef COMPAT_FOR_U64_ALIGNMENT
524 case NVME_IOCTL_SUBMIT_IO32:
525#endif
526 case NVME_IOCTL_SUBMIT_IO:
527 return nvme_submit_io(ns, argp);
528 case NVME_IOCTL_IO64_CMD:
529 return nvme_user_cmd64(ns->ctrl, ns, argp, false);
530 case NVME_IOCTL_IO64_CMD_VEC:
531 return nvme_user_cmd64(ns->ctrl, ns, argp, true);
532 default:
533 return -ENOTTY;
534 }
535}
536
537static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
538{
539 if (is_ctrl_ioctl(cmd))
540 return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
541 return nvme_ns_ioctl(ns, cmd, arg);
542}
543
544int nvme_ioctl(struct block_device *bdev, fmode_t mode,
545 unsigned int cmd, unsigned long arg)
546{
547 struct nvme_ns *ns = bdev->bd_disk->private_data;
548
549 return __nvme_ioctl(ns, cmd, (void __user *)arg);
550}
551
552long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
553{
554 struct nvme_ns *ns =
555 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
556
557 return __nvme_ioctl(ns, cmd, (void __user *)arg);
558}
559
560static int nvme_uring_cmd_checks(unsigned int issue_flags)
561{
562 /* IOPOLL not supported yet */
563 if (issue_flags & IO_URING_F_IOPOLL)
564 return -EOPNOTSUPP;
565
566 /* NVMe passthrough requires big SQE/CQE support */
567 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
568 (IO_URING_F_SQE128|IO_URING_F_CQE32))
569 return -EOPNOTSUPP;
570 return 0;
571}
572
573static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
574 unsigned int issue_flags)
575{
576 struct nvme_ctrl *ctrl = ns->ctrl;
577 int ret;
578
579 BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
580
581 ret = nvme_uring_cmd_checks(issue_flags);
582 if (ret)
583 return ret;
584
585 switch (ioucmd->cmd_op) {
586 case NVME_URING_CMD_IO:
587 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
588 break;
589 case NVME_URING_CMD_IO_VEC:
590 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
591 break;
592 default:
593 ret = -ENOTTY;
594 }
595
596 return ret;
597}
598
599int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
600{
601 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
602 struct nvme_ns, cdev);
603
604 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
605}
606
607#ifdef CONFIG_NVME_MULTIPATH
608static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
609 void __user *argp, struct nvme_ns_head *head, int srcu_idx)
610 __releases(&head->srcu)
611{
612 struct nvme_ctrl *ctrl = ns->ctrl;
613 int ret;
614
615 nvme_get_ctrl(ns->ctrl);
616 srcu_read_unlock(&head->srcu, srcu_idx);
617 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
618
619 nvme_put_ctrl(ctrl);
620 return ret;
621}
622
623int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
624 unsigned int cmd, unsigned long arg)
625{
626 struct nvme_ns_head *head = bdev->bd_disk->private_data;
627 void __user *argp = (void __user *)arg;
628 struct nvme_ns *ns;
629 int srcu_idx, ret = -EWOULDBLOCK;
630
631 srcu_idx = srcu_read_lock(&head->srcu);
632 ns = nvme_find_path(head);
633 if (!ns)
634 goto out_unlock;
635
636 /*
637 * Handle ioctls that apply to the controller instead of the namespace
638 * seperately and drop the ns SRCU reference early. This avoids a
639 * deadlock when deleting namespaces using the passthrough interface.
640 */
641 if (is_ctrl_ioctl(cmd))
642 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
643
644 ret = nvme_ns_ioctl(ns, cmd, argp);
645out_unlock:
646 srcu_read_unlock(&head->srcu, srcu_idx);
647 return ret;
648}
649
650long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
651 unsigned long arg)
652{
653 struct cdev *cdev = file_inode(file)->i_cdev;
654 struct nvme_ns_head *head =
655 container_of(cdev, struct nvme_ns_head, cdev);
656 void __user *argp = (void __user *)arg;
657 struct nvme_ns *ns;
658 int srcu_idx, ret = -EWOULDBLOCK;
659
660 srcu_idx = srcu_read_lock(&head->srcu);
661 ns = nvme_find_path(head);
662 if (!ns)
663 goto out_unlock;
664
665 if (is_ctrl_ioctl(cmd))
666 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
667
668 ret = nvme_ns_ioctl(ns, cmd, argp);
669out_unlock:
670 srcu_read_unlock(&head->srcu, srcu_idx);
671 return ret;
672}
673
674int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
675 unsigned int issue_flags)
676{
677 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
678 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
679 int srcu_idx = srcu_read_lock(&head->srcu);
680 struct nvme_ns *ns = nvme_find_path(head);
681 int ret = -EINVAL;
682
683 if (ns)
684 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
685 srcu_read_unlock(&head->srcu, srcu_idx);
686 return ret;
687}
688#endif /* CONFIG_NVME_MULTIPATH */
689
690int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
691{
692 struct nvme_ctrl *ctrl = ioucmd->file->private_data;
693 int ret;
694
695 ret = nvme_uring_cmd_checks(issue_flags);
696 if (ret)
697 return ret;
698
699 switch (ioucmd->cmd_op) {
700 case NVME_URING_CMD_ADMIN:
701 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
702 break;
703 case NVME_URING_CMD_ADMIN_VEC:
704 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
705 break;
706 default:
707 ret = -ENOTTY;
708 }
709
710 return ret;
711}
712
713static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
714{
715 struct nvme_ns *ns;
716 int ret;
717
718 down_read(&ctrl->namespaces_rwsem);
719 if (list_empty(&ctrl->namespaces)) {
720 ret = -ENOTTY;
721 goto out_unlock;
722 }
723
724 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
725 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
726 dev_warn(ctrl->device,
727 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
728 ret = -EINVAL;
729 goto out_unlock;
730 }
731
732 dev_warn(ctrl->device,
733 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
734 kref_get(&ns->kref);
735 up_read(&ctrl->namespaces_rwsem);
736
737 ret = nvme_user_cmd(ctrl, ns, argp);
738 nvme_put_ns(ns);
739 return ret;
740
741out_unlock:
742 up_read(&ctrl->namespaces_rwsem);
743 return ret;
744}
745
746long nvme_dev_ioctl(struct file *file, unsigned int cmd,
747 unsigned long arg)
748{
749 struct nvme_ctrl *ctrl = file->private_data;
750 void __user *argp = (void __user *)arg;
751
752 switch (cmd) {
753 case NVME_IOCTL_ADMIN_CMD:
754 return nvme_user_cmd(ctrl, NULL, argp);
755 case NVME_IOCTL_ADMIN64_CMD:
756 return nvme_user_cmd64(ctrl, NULL, argp, false);
757 case NVME_IOCTL_IO_CMD:
758 return nvme_dev_user_cmd(ctrl, argp);
759 case NVME_IOCTL_RESET:
760 dev_warn(ctrl->device, "resetting controller\n");
761 return nvme_reset_ctrl_sync(ctrl);
762 case NVME_IOCTL_SUBSYS_RESET:
763 return nvme_reset_subsystem(ctrl);
764 case NVME_IOCTL_RESCAN:
765 nvme_queue_scan(ctrl);
766 return 0;
767 default:
768 return -ENOTTY;
769 }
770}