Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/rculist.h>
9#include <linux/part_stat.h>
10
11#include <generated/utsrelease.h>
12#include <asm/unaligned.h>
13#include "nvmet.h"
14
15u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16{
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19 len <<= 16;
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
22 len += 1;
23 len *= sizeof(u32);
24
25 return len;
26}
27
28static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29{
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
33 default:
34 return 0;
35 }
36}
37
38u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39{
40 return le64_to_cpu(cmd->get_log_page.lpo);
41}
42
43static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44{
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46}
47
48static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49{
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 unsigned long flags;
52 off_t offset = 0;
53 u64 slot;
54 u64 i;
55
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
62 break;
63
64 if (slot == 0)
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
66 else
67 slot--;
68 offset += sizeof(struct nvme_error_slot);
69 }
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 nvmet_req_complete(req, 0);
72}
73
74static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
76{
77 struct nvmet_ns *ns;
78 u64 host_reads, host_writes, data_units_read, data_units_written;
79
80 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
81 if (!ns) {
82 pr_err("Could not find namespace id : %d\n",
83 le32_to_cpu(req->cmd->get_log_page.nsid));
84 req->error_loc = offsetof(struct nvme_rw_command, nsid);
85 return NVME_SC_INVALID_NS;
86 }
87
88 /* we don't have the right data for file backed ns */
89 if (!ns->bdev)
90 goto out;
91
92 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
93 data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
94 sectors[READ]), 1000);
95 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
96 data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
97 sectors[WRITE]), 1000);
98
99 put_unaligned_le64(host_reads, &slog->host_reads[0]);
100 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
101 put_unaligned_le64(host_writes, &slog->host_writes[0]);
102 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
103out:
104 nvmet_put_namespace(ns);
105
106 return NVME_SC_SUCCESS;
107}
108
109static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
110 struct nvme_smart_log *slog)
111{
112 u64 host_reads = 0, host_writes = 0;
113 u64 data_units_read = 0, data_units_written = 0;
114 struct nvmet_ns *ns;
115 struct nvmet_ctrl *ctrl;
116 unsigned long idx;
117
118 ctrl = req->sq->ctrl;
119 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
120 /* we don't have the right data for file backed ns */
121 if (!ns->bdev)
122 continue;
123 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
124 data_units_read += DIV_ROUND_UP(
125 part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
126 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
127 data_units_written += DIV_ROUND_UP(
128 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
129 }
130
131 put_unaligned_le64(host_reads, &slog->host_reads[0]);
132 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
133 put_unaligned_le64(host_writes, &slog->host_writes[0]);
134 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
135
136 return NVME_SC_SUCCESS;
137}
138
139static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
140{
141 struct nvme_smart_log *log;
142 u16 status = NVME_SC_INTERNAL;
143 unsigned long flags;
144
145 if (req->transfer_len != sizeof(*log))
146 goto out;
147
148 log = kzalloc(sizeof(*log), GFP_KERNEL);
149 if (!log)
150 goto out;
151
152 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
153 status = nvmet_get_smart_log_all(req, log);
154 else
155 status = nvmet_get_smart_log_nsid(req, log);
156 if (status)
157 goto out_free_log;
158
159 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
160 put_unaligned_le64(req->sq->ctrl->err_counter,
161 &log->num_err_log_entries);
162 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
163
164 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
165out_free_log:
166 kfree(log);
167out:
168 nvmet_req_complete(req, status);
169}
170
171static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
172{
173 u16 status = NVME_SC_INTERNAL;
174 struct nvme_effects_log *log;
175
176 log = kzalloc(sizeof(*log), GFP_KERNEL);
177 if (!log)
178 goto out;
179
180 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
181 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
182 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
183 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
184 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
185 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
186 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
187
188 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
189 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
190 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
191 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
192 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
193
194 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
195
196 kfree(log);
197out:
198 nvmet_req_complete(req, status);
199}
200
201static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
202{
203 struct nvmet_ctrl *ctrl = req->sq->ctrl;
204 u16 status = NVME_SC_INTERNAL;
205 size_t len;
206
207 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
208 goto out;
209
210 mutex_lock(&ctrl->lock);
211 if (ctrl->nr_changed_ns == U32_MAX)
212 len = sizeof(__le32);
213 else
214 len = ctrl->nr_changed_ns * sizeof(__le32);
215 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
216 if (!status)
217 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
218 ctrl->nr_changed_ns = 0;
219 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
220 mutex_unlock(&ctrl->lock);
221out:
222 nvmet_req_complete(req, status);
223}
224
225static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
226 struct nvme_ana_group_desc *desc)
227{
228 struct nvmet_ctrl *ctrl = req->sq->ctrl;
229 struct nvmet_ns *ns;
230 unsigned long idx;
231 u32 count = 0;
232
233 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
234 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
235 if (ns->anagrpid == grpid)
236 desc->nsids[count++] = cpu_to_le32(ns->nsid);
237 }
238
239 desc->grpid = cpu_to_le32(grpid);
240 desc->nnsids = cpu_to_le32(count);
241 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
242 desc->state = req->port->ana_state[grpid];
243 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
244 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
245}
246
247static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
248{
249 struct nvme_ana_rsp_hdr hdr = { 0, };
250 struct nvme_ana_group_desc *desc;
251 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
252 size_t len;
253 u32 grpid;
254 u16 ngrps = 0;
255 u16 status;
256
257 status = NVME_SC_INTERNAL;
258 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
259 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
260 if (!desc)
261 goto out;
262
263 down_read(&nvmet_ana_sem);
264 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
265 if (!nvmet_ana_group_enabled[grpid])
266 continue;
267 len = nvmet_format_ana_group(req, grpid, desc);
268 status = nvmet_copy_to_sgl(req, offset, desc, len);
269 if (status)
270 break;
271 offset += len;
272 ngrps++;
273 }
274 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
275 if (nvmet_ana_group_enabled[grpid])
276 ngrps++;
277 }
278
279 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
280 hdr.ngrps = cpu_to_le16(ngrps);
281 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
282 up_read(&nvmet_ana_sem);
283
284 kfree(desc);
285
286 /* copy the header last once we know the number of groups */
287 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
288out:
289 nvmet_req_complete(req, status);
290}
291
292static void nvmet_execute_get_log_page(struct nvmet_req *req)
293{
294 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
295 return;
296
297 switch (req->cmd->get_log_page.lid) {
298 case NVME_LOG_ERROR:
299 return nvmet_execute_get_log_page_error(req);
300 case NVME_LOG_SMART:
301 return nvmet_execute_get_log_page_smart(req);
302 case NVME_LOG_FW_SLOT:
303 /*
304 * We only support a single firmware slot which always is
305 * active, so we can zero out the whole firmware slot log and
306 * still claim to fully implement this mandatory log page.
307 */
308 return nvmet_execute_get_log_page_noop(req);
309 case NVME_LOG_CHANGED_NS:
310 return nvmet_execute_get_log_changed_ns(req);
311 case NVME_LOG_CMD_EFFECTS:
312 return nvmet_execute_get_log_cmd_effects_ns(req);
313 case NVME_LOG_ANA:
314 return nvmet_execute_get_log_page_ana(req);
315 }
316 pr_err("unhandled lid %d on qid %d\n",
317 req->cmd->get_log_page.lid, req->sq->qid);
318 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
319 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
320}
321
322static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
323 struct nvmet_subsys *subsys)
324{
325 const char *model = NVMET_DEFAULT_CTRL_MODEL;
326 struct nvmet_subsys_model *subsys_model;
327
328 rcu_read_lock();
329 subsys_model = rcu_dereference(subsys->model);
330 if (subsys_model)
331 model = subsys_model->number;
332 memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
333 rcu_read_unlock();
334}
335
336static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
337{
338 struct nvmet_ctrl *ctrl = req->sq->ctrl;
339 struct nvme_id_ctrl *id;
340 u32 cmd_capsule_size;
341 u16 status = 0;
342
343 id = kzalloc(sizeof(*id), GFP_KERNEL);
344 if (!id) {
345 status = NVME_SC_INTERNAL;
346 goto out;
347 }
348
349 /* XXX: figure out how to assign real vendors IDs. */
350 id->vid = 0;
351 id->ssvid = 0;
352
353 memset(id->sn, ' ', sizeof(id->sn));
354 bin2hex(id->sn, &ctrl->subsys->serial,
355 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
356 nvmet_id_set_model_number(id, ctrl->subsys);
357 memcpy_and_pad(id->fr, sizeof(id->fr),
358 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
359
360 id->rab = 6;
361
362 /*
363 * XXX: figure out how we can assign a IEEE OUI, but until then
364 * the safest is to leave it as zeroes.
365 */
366
367 /* we support multiple ports, multiples hosts and ANA: */
368 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
369
370 /* Limit MDTS according to transport capability */
371 if (ctrl->ops->get_mdts)
372 id->mdts = ctrl->ops->get_mdts(ctrl);
373 else
374 id->mdts = 0;
375
376 id->cntlid = cpu_to_le16(ctrl->cntlid);
377 id->ver = cpu_to_le32(ctrl->subsys->ver);
378
379 /* XXX: figure out what to do about RTD3R/RTD3 */
380 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
381 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
382 NVME_CTRL_ATTR_TBKAS);
383
384 id->oacs = 0;
385
386 /*
387 * We don't really have a practical limit on the number of abort
388 * comands. But we don't do anything useful for abort either, so
389 * no point in allowing more abort commands than the spec requires.
390 */
391 id->acl = 3;
392
393 id->aerl = NVMET_ASYNC_EVENTS - 1;
394
395 /* first slot is read-only, only one slot supported */
396 id->frmw = (1 << 0) | (1 << 1);
397 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
398 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
399 id->npss = 0;
400
401 /* We support keep-alive timeout in granularity of seconds */
402 id->kas = cpu_to_le16(NVMET_KAS);
403
404 id->sqes = (0x6 << 4) | 0x6;
405 id->cqes = (0x4 << 4) | 0x4;
406
407 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
408 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
409
410 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
411 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
412 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
413 NVME_CTRL_ONCS_WRITE_ZEROES);
414
415 /* XXX: don't report vwc if the underlying device is write through */
416 id->vwc = NVME_CTRL_VWC_PRESENT;
417
418 /*
419 * We can't support atomic writes bigger than a LBA without support
420 * from the backend device.
421 */
422 id->awun = 0;
423 id->awupf = 0;
424
425 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
426 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
427 id->sgls |= cpu_to_le32(1 << 2);
428 if (req->port->inline_data_size)
429 id->sgls |= cpu_to_le32(1 << 20);
430
431 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
432
433 /*
434 * Max command capsule size is sqe + in-capsule data size.
435 * Disable in-capsule data for Metadata capable controllers.
436 */
437 cmd_capsule_size = sizeof(struct nvme_command);
438 if (!ctrl->pi_support)
439 cmd_capsule_size += req->port->inline_data_size;
440 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
441
442 /* Max response capsule size is cqe */
443 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
444
445 id->msdbd = ctrl->ops->msdbd;
446
447 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
448 id->anatt = 10; /* random value */
449 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
450 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
451
452 /*
453 * Meh, we don't really support any power state. Fake up the same
454 * values that qemu does.
455 */
456 id->psd[0].max_power = cpu_to_le16(0x9c4);
457 id->psd[0].entry_lat = cpu_to_le32(0x10);
458 id->psd[0].exit_lat = cpu_to_le32(0x4);
459
460 id->nwpc = 1 << 0; /* write protect and no write protect */
461
462 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
463
464 kfree(id);
465out:
466 nvmet_req_complete(req, status);
467}
468
469static void nvmet_execute_identify_ns(struct nvmet_req *req)
470{
471 struct nvmet_ctrl *ctrl = req->sq->ctrl;
472 struct nvmet_ns *ns;
473 struct nvme_id_ns *id;
474 u16 status = 0;
475
476 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
477 req->error_loc = offsetof(struct nvme_identify, nsid);
478 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
479 goto out;
480 }
481
482 id = kzalloc(sizeof(*id), GFP_KERNEL);
483 if (!id) {
484 status = NVME_SC_INTERNAL;
485 goto out;
486 }
487
488 /* return an all zeroed buffer if we can't find an active namespace */
489 ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
490 if (!ns)
491 goto done;
492
493 nvmet_ns_revalidate(ns);
494
495 /*
496 * nuse = ncap = nsze isn't always true, but we have no way to find
497 * that out from the underlying device.
498 */
499 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
500 switch (req->port->ana_state[ns->anagrpid]) {
501 case NVME_ANA_INACCESSIBLE:
502 case NVME_ANA_PERSISTENT_LOSS:
503 break;
504 default:
505 id->nuse = id->nsze;
506 break;
507 }
508
509 if (ns->bdev)
510 nvmet_bdev_set_limits(ns->bdev, id);
511
512 /*
513 * We just provide a single LBA format that matches what the
514 * underlying device reports.
515 */
516 id->nlbaf = 0;
517 id->flbas = 0;
518
519 /*
520 * Our namespace might always be shared. Not just with other
521 * controllers, but also with any other user of the block device.
522 */
523 id->nmic = (1 << 0);
524 id->anagrpid = cpu_to_le32(ns->anagrpid);
525
526 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
527
528 id->lbaf[0].ds = ns->blksize_shift;
529
530 if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
531 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
532 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
533 NVME_NS_DPC_PI_TYPE3;
534 id->mc = NVME_MC_EXTENDED_LBA;
535 id->dps = ns->pi_type;
536 id->flbas = NVME_NS_FLBAS_META_EXT;
537 id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
538 }
539
540 if (ns->readonly)
541 id->nsattr |= (1 << 0);
542 nvmet_put_namespace(ns);
543done:
544 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
545 kfree(id);
546out:
547 nvmet_req_complete(req, status);
548}
549
550static void nvmet_execute_identify_nslist(struct nvmet_req *req)
551{
552 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
553 struct nvmet_ctrl *ctrl = req->sq->ctrl;
554 struct nvmet_ns *ns;
555 unsigned long idx;
556 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
557 __le32 *list;
558 u16 status = 0;
559 int i = 0;
560
561 list = kzalloc(buf_size, GFP_KERNEL);
562 if (!list) {
563 status = NVME_SC_INTERNAL;
564 goto out;
565 }
566
567 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
568 if (ns->nsid <= min_nsid)
569 continue;
570 list[i++] = cpu_to_le32(ns->nsid);
571 if (i == buf_size / sizeof(__le32))
572 break;
573 }
574
575 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
576
577 kfree(list);
578out:
579 nvmet_req_complete(req, status);
580}
581
582static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
583 void *id, off_t *off)
584{
585 struct nvme_ns_id_desc desc = {
586 .nidt = type,
587 .nidl = len,
588 };
589 u16 status;
590
591 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
592 if (status)
593 return status;
594 *off += sizeof(desc);
595
596 status = nvmet_copy_to_sgl(req, *off, id, len);
597 if (status)
598 return status;
599 *off += len;
600
601 return 0;
602}
603
604static void nvmet_execute_identify_desclist(struct nvmet_req *req)
605{
606 struct nvmet_ns *ns;
607 u16 status = 0;
608 off_t off = 0;
609
610 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
611 if (!ns) {
612 req->error_loc = offsetof(struct nvme_identify, nsid);
613 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
614 goto out;
615 }
616
617 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
618 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
619 NVME_NIDT_UUID_LEN,
620 &ns->uuid, &off);
621 if (status)
622 goto out_put_ns;
623 }
624 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
625 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
626 NVME_NIDT_NGUID_LEN,
627 &ns->nguid, &off);
628 if (status)
629 goto out_put_ns;
630 }
631
632 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
633 off) != NVME_IDENTIFY_DATA_SIZE - off)
634 status = NVME_SC_INTERNAL | NVME_SC_DNR;
635out_put_ns:
636 nvmet_put_namespace(ns);
637out:
638 nvmet_req_complete(req, status);
639}
640
641static void nvmet_execute_identify(struct nvmet_req *req)
642{
643 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
644 return;
645
646 switch (req->cmd->identify.cns) {
647 case NVME_ID_CNS_NS:
648 return nvmet_execute_identify_ns(req);
649 case NVME_ID_CNS_CTRL:
650 return nvmet_execute_identify_ctrl(req);
651 case NVME_ID_CNS_NS_ACTIVE_LIST:
652 return nvmet_execute_identify_nslist(req);
653 case NVME_ID_CNS_NS_DESC_LIST:
654 return nvmet_execute_identify_desclist(req);
655 }
656
657 pr_err("unhandled identify cns %d on qid %d\n",
658 req->cmd->identify.cns, req->sq->qid);
659 req->error_loc = offsetof(struct nvme_identify, cns);
660 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
661}
662
663/*
664 * A "minimum viable" abort implementation: the command is mandatory in the
665 * spec, but we are not required to do any useful work. We couldn't really
666 * do a useful abort, so don't bother even with waiting for the command
667 * to be exectuted and return immediately telling the command to abort
668 * wasn't found.
669 */
670static void nvmet_execute_abort(struct nvmet_req *req)
671{
672 if (!nvmet_check_transfer_len(req, 0))
673 return;
674 nvmet_set_result(req, 1);
675 nvmet_req_complete(req, 0);
676}
677
678static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
679{
680 u16 status;
681
682 if (req->ns->file)
683 status = nvmet_file_flush(req);
684 else
685 status = nvmet_bdev_flush(req);
686
687 if (status)
688 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
689 return status;
690}
691
692static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
693{
694 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
695 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
696 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
697
698 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
699 if (unlikely(!req->ns)) {
700 req->error_loc = offsetof(struct nvme_common_command, nsid);
701 return status;
702 }
703
704 mutex_lock(&subsys->lock);
705 switch (write_protect) {
706 case NVME_NS_WRITE_PROTECT:
707 req->ns->readonly = true;
708 status = nvmet_write_protect_flush_sync(req);
709 if (status)
710 req->ns->readonly = false;
711 break;
712 case NVME_NS_NO_WRITE_PROTECT:
713 req->ns->readonly = false;
714 status = 0;
715 break;
716 default:
717 break;
718 }
719
720 if (!status)
721 nvmet_ns_changed(subsys, req->ns->nsid);
722 mutex_unlock(&subsys->lock);
723 return status;
724}
725
726u16 nvmet_set_feat_kato(struct nvmet_req *req)
727{
728 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
729
730 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
731
732 nvmet_set_result(req, req->sq->ctrl->kato);
733
734 return 0;
735}
736
737u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
738{
739 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
740
741 if (val32 & ~mask) {
742 req->error_loc = offsetof(struct nvme_common_command, cdw11);
743 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
744 }
745
746 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
747 nvmet_set_result(req, val32);
748
749 return 0;
750}
751
752void nvmet_execute_set_features(struct nvmet_req *req)
753{
754 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
755 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
756 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
757 u16 status = 0;
758 u16 nsqr;
759 u16 ncqr;
760
761 if (!nvmet_check_transfer_len(req, 0))
762 return;
763
764 switch (cdw10 & 0xff) {
765 case NVME_FEAT_NUM_QUEUES:
766 ncqr = (cdw11 >> 16) & 0xffff;
767 nsqr = cdw11 & 0xffff;
768 if (ncqr == 0xffff || nsqr == 0xffff) {
769 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
770 break;
771 }
772 nvmet_set_result(req,
773 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
774 break;
775 case NVME_FEAT_KATO:
776 status = nvmet_set_feat_kato(req);
777 break;
778 case NVME_FEAT_ASYNC_EVENT:
779 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
780 break;
781 case NVME_FEAT_HOST_ID:
782 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
783 break;
784 case NVME_FEAT_WRITE_PROTECT:
785 status = nvmet_set_feat_write_protect(req);
786 break;
787 default:
788 req->error_loc = offsetof(struct nvme_common_command, cdw10);
789 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
790 break;
791 }
792
793 nvmet_req_complete(req, status);
794}
795
796static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
797{
798 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
799 u32 result;
800
801 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
802 if (!req->ns) {
803 req->error_loc = offsetof(struct nvme_common_command, nsid);
804 return NVME_SC_INVALID_NS | NVME_SC_DNR;
805 }
806 mutex_lock(&subsys->lock);
807 if (req->ns->readonly == true)
808 result = NVME_NS_WRITE_PROTECT;
809 else
810 result = NVME_NS_NO_WRITE_PROTECT;
811 nvmet_set_result(req, result);
812 mutex_unlock(&subsys->lock);
813
814 return 0;
815}
816
817void nvmet_get_feat_kato(struct nvmet_req *req)
818{
819 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
820}
821
822void nvmet_get_feat_async_event(struct nvmet_req *req)
823{
824 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
825}
826
827void nvmet_execute_get_features(struct nvmet_req *req)
828{
829 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
830 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
831 u16 status = 0;
832
833 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
834 return;
835
836 switch (cdw10 & 0xff) {
837 /*
838 * These features are mandatory in the spec, but we don't
839 * have a useful way to implement them. We'll eventually
840 * need to come up with some fake values for these.
841 */
842#if 0
843 case NVME_FEAT_ARBITRATION:
844 break;
845 case NVME_FEAT_POWER_MGMT:
846 break;
847 case NVME_FEAT_TEMP_THRESH:
848 break;
849 case NVME_FEAT_ERR_RECOVERY:
850 break;
851 case NVME_FEAT_IRQ_COALESCE:
852 break;
853 case NVME_FEAT_IRQ_CONFIG:
854 break;
855 case NVME_FEAT_WRITE_ATOMIC:
856 break;
857#endif
858 case NVME_FEAT_ASYNC_EVENT:
859 nvmet_get_feat_async_event(req);
860 break;
861 case NVME_FEAT_VOLATILE_WC:
862 nvmet_set_result(req, 1);
863 break;
864 case NVME_FEAT_NUM_QUEUES:
865 nvmet_set_result(req,
866 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
867 break;
868 case NVME_FEAT_KATO:
869 nvmet_get_feat_kato(req);
870 break;
871 case NVME_FEAT_HOST_ID:
872 /* need 128-bit host identifier flag */
873 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
874 req->error_loc =
875 offsetof(struct nvme_common_command, cdw11);
876 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
877 break;
878 }
879
880 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
881 sizeof(req->sq->ctrl->hostid));
882 break;
883 case NVME_FEAT_WRITE_PROTECT:
884 status = nvmet_get_feat_write_protect(req);
885 break;
886 default:
887 req->error_loc =
888 offsetof(struct nvme_common_command, cdw10);
889 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
890 break;
891 }
892
893 nvmet_req_complete(req, status);
894}
895
896void nvmet_execute_async_event(struct nvmet_req *req)
897{
898 struct nvmet_ctrl *ctrl = req->sq->ctrl;
899
900 if (!nvmet_check_transfer_len(req, 0))
901 return;
902
903 mutex_lock(&ctrl->lock);
904 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
905 mutex_unlock(&ctrl->lock);
906 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
907 return;
908 }
909 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
910 mutex_unlock(&ctrl->lock);
911
912 schedule_work(&ctrl->async_event_work);
913}
914
915void nvmet_execute_keep_alive(struct nvmet_req *req)
916{
917 struct nvmet_ctrl *ctrl = req->sq->ctrl;
918
919 if (!nvmet_check_transfer_len(req, 0))
920 return;
921
922 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
923 ctrl->cntlid, ctrl->kato);
924
925 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
926 nvmet_req_complete(req, 0);
927}
928
929u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
930{
931 struct nvme_command *cmd = req->cmd;
932 u16 ret;
933
934 if (nvme_is_fabrics(cmd))
935 return nvmet_parse_fabrics_cmd(req);
936 if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
937 return nvmet_parse_discovery_cmd(req);
938
939 ret = nvmet_check_ctrl_status(req, cmd);
940 if (unlikely(ret))
941 return ret;
942
943 if (nvmet_req_passthru_ctrl(req))
944 return nvmet_parse_passthru_admin_cmd(req);
945
946 switch (cmd->common.opcode) {
947 case nvme_admin_get_log_page:
948 req->execute = nvmet_execute_get_log_page;
949 return 0;
950 case nvme_admin_identify:
951 req->execute = nvmet_execute_identify;
952 return 0;
953 case nvme_admin_abort_cmd:
954 req->execute = nvmet_execute_abort;
955 return 0;
956 case nvme_admin_set_features:
957 req->execute = nvmet_execute_set_features;
958 return 0;
959 case nvme_admin_get_features:
960 req->execute = nvmet_execute_get_features;
961 return 0;
962 case nvme_admin_async_event:
963 req->execute = nvmet_execute_async_event;
964 return 0;
965 case nvme_admin_keep_alive:
966 req->execute = nvmet_execute_keep_alive;
967 return 0;
968 }
969
970 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
971 req->sq->qid);
972 req->error_loc = offsetof(struct nvme_common_command, opcode);
973 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
974}