Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/rculist.h>
9#include <linux/part_stat.h>
10
11#include <generated/utsrelease.h>
12#include <asm/unaligned.h>
13#include "nvmet.h"
14
15u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16{
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19 len <<= 16;
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
22 len += 1;
23 len *= sizeof(u32);
24
25 return len;
26}
27
28static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29{
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
33 default:
34 return 0;
35 }
36}
37
38u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39{
40 return le64_to_cpu(cmd->get_log_page.lpo);
41}
42
43static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44{
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46}
47
48static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49{
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 unsigned long flags;
52 off_t offset = 0;
53 u64 slot;
54 u64 i;
55
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
62 break;
63
64 if (slot == 0)
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
66 else
67 slot--;
68 offset += sizeof(struct nvme_error_slot);
69 }
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 nvmet_req_complete(req, 0);
72}
73
74static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
76{
77 struct nvmet_ns *ns;
78 u64 host_reads, host_writes, data_units_read, data_units_written;
79
80 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
81 if (!ns) {
82 pr_err("Could not find namespace id : %d\n",
83 le32_to_cpu(req->cmd->get_log_page.nsid));
84 req->error_loc = offsetof(struct nvme_rw_command, nsid);
85 return NVME_SC_INVALID_NS;
86 }
87
88 /* we don't have the right data for file backed ns */
89 if (!ns->bdev)
90 goto out;
91
92 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
93 data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
94 sectors[READ]), 1000);
95 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
96 data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
97 sectors[WRITE]), 1000);
98
99 put_unaligned_le64(host_reads, &slog->host_reads[0]);
100 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
101 put_unaligned_le64(host_writes, &slog->host_writes[0]);
102 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
103out:
104 nvmet_put_namespace(ns);
105
106 return NVME_SC_SUCCESS;
107}
108
109static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
110 struct nvme_smart_log *slog)
111{
112 u64 host_reads = 0, host_writes = 0;
113 u64 data_units_read = 0, data_units_written = 0;
114 struct nvmet_ns *ns;
115 struct nvmet_ctrl *ctrl;
116
117 ctrl = req->sq->ctrl;
118
119 rcu_read_lock();
120 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
121 /* we don't have the right data for file backed ns */
122 if (!ns->bdev)
123 continue;
124 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
125 data_units_read += DIV_ROUND_UP(
126 part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
127 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
128 data_units_written += DIV_ROUND_UP(
129 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
130
131 }
132 rcu_read_unlock();
133
134 put_unaligned_le64(host_reads, &slog->host_reads[0]);
135 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
136 put_unaligned_le64(host_writes, &slog->host_writes[0]);
137 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
138
139 return NVME_SC_SUCCESS;
140}
141
142static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
143{
144 struct nvme_smart_log *log;
145 u16 status = NVME_SC_INTERNAL;
146 unsigned long flags;
147
148 if (req->transfer_len != sizeof(*log))
149 goto out;
150
151 log = kzalloc(sizeof(*log), GFP_KERNEL);
152 if (!log)
153 goto out;
154
155 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
156 status = nvmet_get_smart_log_all(req, log);
157 else
158 status = nvmet_get_smart_log_nsid(req, log);
159 if (status)
160 goto out_free_log;
161
162 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
163 put_unaligned_le64(req->sq->ctrl->err_counter,
164 &log->num_err_log_entries);
165 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
166
167 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
168out_free_log:
169 kfree(log);
170out:
171 nvmet_req_complete(req, status);
172}
173
174static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
175{
176 u16 status = NVME_SC_INTERNAL;
177 struct nvme_effects_log *log;
178
179 log = kzalloc(sizeof(*log), GFP_KERNEL);
180 if (!log)
181 goto out;
182
183 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
184 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
185 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
186 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
187 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
188 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
189 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
190
191 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
192 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
193 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
194 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
195 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
196
197 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
198
199 kfree(log);
200out:
201 nvmet_req_complete(req, status);
202}
203
204static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
205{
206 struct nvmet_ctrl *ctrl = req->sq->ctrl;
207 u16 status = NVME_SC_INTERNAL;
208 size_t len;
209
210 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
211 goto out;
212
213 mutex_lock(&ctrl->lock);
214 if (ctrl->nr_changed_ns == U32_MAX)
215 len = sizeof(__le32);
216 else
217 len = ctrl->nr_changed_ns * sizeof(__le32);
218 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
219 if (!status)
220 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
221 ctrl->nr_changed_ns = 0;
222 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
223 mutex_unlock(&ctrl->lock);
224out:
225 nvmet_req_complete(req, status);
226}
227
228static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
229 struct nvme_ana_group_desc *desc)
230{
231 struct nvmet_ctrl *ctrl = req->sq->ctrl;
232 struct nvmet_ns *ns;
233 u32 count = 0;
234
235 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
236 rcu_read_lock();
237 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
238 if (ns->anagrpid == grpid)
239 desc->nsids[count++] = cpu_to_le32(ns->nsid);
240 rcu_read_unlock();
241 }
242
243 desc->grpid = cpu_to_le32(grpid);
244 desc->nnsids = cpu_to_le32(count);
245 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
246 desc->state = req->port->ana_state[grpid];
247 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
248 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
249}
250
251static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
252{
253 struct nvme_ana_rsp_hdr hdr = { 0, };
254 struct nvme_ana_group_desc *desc;
255 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
256 size_t len;
257 u32 grpid;
258 u16 ngrps = 0;
259 u16 status;
260
261 status = NVME_SC_INTERNAL;
262 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
263 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
264 if (!desc)
265 goto out;
266
267 down_read(&nvmet_ana_sem);
268 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
269 if (!nvmet_ana_group_enabled[grpid])
270 continue;
271 len = nvmet_format_ana_group(req, grpid, desc);
272 status = nvmet_copy_to_sgl(req, offset, desc, len);
273 if (status)
274 break;
275 offset += len;
276 ngrps++;
277 }
278 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
279 if (nvmet_ana_group_enabled[grpid])
280 ngrps++;
281 }
282
283 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
284 hdr.ngrps = cpu_to_le16(ngrps);
285 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
286 up_read(&nvmet_ana_sem);
287
288 kfree(desc);
289
290 /* copy the header last once we know the number of groups */
291 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
292out:
293 nvmet_req_complete(req, status);
294}
295
296static void nvmet_execute_get_log_page(struct nvmet_req *req)
297{
298 if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
299 return;
300
301 switch (req->cmd->get_log_page.lid) {
302 case NVME_LOG_ERROR:
303 return nvmet_execute_get_log_page_error(req);
304 case NVME_LOG_SMART:
305 return nvmet_execute_get_log_page_smart(req);
306 case NVME_LOG_FW_SLOT:
307 /*
308 * We only support a single firmware slot which always is
309 * active, so we can zero out the whole firmware slot log and
310 * still claim to fully implement this mandatory log page.
311 */
312 return nvmet_execute_get_log_page_noop(req);
313 case NVME_LOG_CHANGED_NS:
314 return nvmet_execute_get_log_changed_ns(req);
315 case NVME_LOG_CMD_EFFECTS:
316 return nvmet_execute_get_log_cmd_effects_ns(req);
317 case NVME_LOG_ANA:
318 return nvmet_execute_get_log_page_ana(req);
319 }
320 pr_err("unhandled lid %d on qid %d\n",
321 req->cmd->get_log_page.lid, req->sq->qid);
322 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
323 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
324}
325
326static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
327 struct nvmet_subsys *subsys)
328{
329 const char *model = NVMET_DEFAULT_CTRL_MODEL;
330 struct nvmet_subsys_model *subsys_model;
331
332 rcu_read_lock();
333 subsys_model = rcu_dereference(subsys->model);
334 if (subsys_model)
335 model = subsys_model->number;
336 memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
337 rcu_read_unlock();
338}
339
340static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
341{
342 struct nvmet_ctrl *ctrl = req->sq->ctrl;
343 struct nvme_id_ctrl *id;
344 u16 status = 0;
345
346 id = kzalloc(sizeof(*id), GFP_KERNEL);
347 if (!id) {
348 status = NVME_SC_INTERNAL;
349 goto out;
350 }
351
352 /* XXX: figure out how to assign real vendors IDs. */
353 id->vid = 0;
354 id->ssvid = 0;
355
356 memset(id->sn, ' ', sizeof(id->sn));
357 bin2hex(id->sn, &ctrl->subsys->serial,
358 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
359 nvmet_id_set_model_number(id, ctrl->subsys);
360 memcpy_and_pad(id->fr, sizeof(id->fr),
361 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
362
363 id->rab = 6;
364
365 /*
366 * XXX: figure out how we can assign a IEEE OUI, but until then
367 * the safest is to leave it as zeroes.
368 */
369
370 /* we support multiple ports, multiples hosts and ANA: */
371 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
372
373 /* Limit MDTS according to transport capability */
374 if (ctrl->ops->get_mdts)
375 id->mdts = ctrl->ops->get_mdts(ctrl);
376 else
377 id->mdts = 0;
378
379 id->cntlid = cpu_to_le16(ctrl->cntlid);
380 id->ver = cpu_to_le32(ctrl->subsys->ver);
381
382 /* XXX: figure out what to do about RTD3R/RTD3 */
383 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
384 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
385 NVME_CTRL_ATTR_TBKAS);
386
387 id->oacs = 0;
388
389 /*
390 * We don't really have a practical limit on the number of abort
391 * comands. But we don't do anything useful for abort either, so
392 * no point in allowing more abort commands than the spec requires.
393 */
394 id->acl = 3;
395
396 id->aerl = NVMET_ASYNC_EVENTS - 1;
397
398 /* first slot is read-only, only one slot supported */
399 id->frmw = (1 << 0) | (1 << 1);
400 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
401 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
402 id->npss = 0;
403
404 /* We support keep-alive timeout in granularity of seconds */
405 id->kas = cpu_to_le16(NVMET_KAS);
406
407 id->sqes = (0x6 << 4) | 0x6;
408 id->cqes = (0x4 << 4) | 0x4;
409
410 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
411 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
412
413 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
414 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
415 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
416 NVME_CTRL_ONCS_WRITE_ZEROES);
417
418 /* XXX: don't report vwc if the underlying device is write through */
419 id->vwc = NVME_CTRL_VWC_PRESENT;
420
421 /*
422 * We can't support atomic writes bigger than a LBA without support
423 * from the backend device.
424 */
425 id->awun = 0;
426 id->awupf = 0;
427
428 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
429 if (ctrl->ops->has_keyed_sgls)
430 id->sgls |= cpu_to_le32(1 << 2);
431 if (req->port->inline_data_size)
432 id->sgls |= cpu_to_le32(1 << 20);
433
434 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
435
436 /* Max command capsule size is sqe + single page of in-capsule data */
437 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
438 req->port->inline_data_size) / 16);
439 /* Max response capsule size is cqe */
440 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
441
442 id->msdbd = ctrl->ops->msdbd;
443
444 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
445 id->anatt = 10; /* random value */
446 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
447 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
448
449 /*
450 * Meh, we don't really support any power state. Fake up the same
451 * values that qemu does.
452 */
453 id->psd[0].max_power = cpu_to_le16(0x9c4);
454 id->psd[0].entry_lat = cpu_to_le32(0x10);
455 id->psd[0].exit_lat = cpu_to_le32(0x4);
456
457 id->nwpc = 1 << 0; /* write protect and no write protect */
458
459 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
460
461 kfree(id);
462out:
463 nvmet_req_complete(req, status);
464}
465
466static void nvmet_execute_identify_ns(struct nvmet_req *req)
467{
468 struct nvmet_ns *ns;
469 struct nvme_id_ns *id;
470 u16 status = 0;
471
472 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
473 req->error_loc = offsetof(struct nvme_identify, nsid);
474 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
475 goto out;
476 }
477
478 id = kzalloc(sizeof(*id), GFP_KERNEL);
479 if (!id) {
480 status = NVME_SC_INTERNAL;
481 goto out;
482 }
483
484 /* return an all zeroed buffer if we can't find an active namespace */
485 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
486 if (!ns)
487 goto done;
488
489 /*
490 * nuse = ncap = nsze isn't always true, but we have no way to find
491 * that out from the underlying device.
492 */
493 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
494 switch (req->port->ana_state[ns->anagrpid]) {
495 case NVME_ANA_INACCESSIBLE:
496 case NVME_ANA_PERSISTENT_LOSS:
497 break;
498 default:
499 id->nuse = id->nsze;
500 break;
501 }
502
503 if (ns->bdev)
504 nvmet_bdev_set_limits(ns->bdev, id);
505
506 /*
507 * We just provide a single LBA format that matches what the
508 * underlying device reports.
509 */
510 id->nlbaf = 0;
511 id->flbas = 0;
512
513 /*
514 * Our namespace might always be shared. Not just with other
515 * controllers, but also with any other user of the block device.
516 */
517 id->nmic = (1 << 0);
518 id->anagrpid = cpu_to_le32(ns->anagrpid);
519
520 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
521
522 id->lbaf[0].ds = ns->blksize_shift;
523
524 if (ns->readonly)
525 id->nsattr |= (1 << 0);
526 nvmet_put_namespace(ns);
527done:
528 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
529 kfree(id);
530out:
531 nvmet_req_complete(req, status);
532}
533
534static void nvmet_execute_identify_nslist(struct nvmet_req *req)
535{
536 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
537 struct nvmet_ctrl *ctrl = req->sq->ctrl;
538 struct nvmet_ns *ns;
539 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
540 __le32 *list;
541 u16 status = 0;
542 int i = 0;
543
544 list = kzalloc(buf_size, GFP_KERNEL);
545 if (!list) {
546 status = NVME_SC_INTERNAL;
547 goto out;
548 }
549
550 rcu_read_lock();
551 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
552 if (ns->nsid <= min_nsid)
553 continue;
554 list[i++] = cpu_to_le32(ns->nsid);
555 if (i == buf_size / sizeof(__le32))
556 break;
557 }
558 rcu_read_unlock();
559
560 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
561
562 kfree(list);
563out:
564 nvmet_req_complete(req, status);
565}
566
567static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
568 void *id, off_t *off)
569{
570 struct nvme_ns_id_desc desc = {
571 .nidt = type,
572 .nidl = len,
573 };
574 u16 status;
575
576 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
577 if (status)
578 return status;
579 *off += sizeof(desc);
580
581 status = nvmet_copy_to_sgl(req, *off, id, len);
582 if (status)
583 return status;
584 *off += len;
585
586 return 0;
587}
588
589static void nvmet_execute_identify_desclist(struct nvmet_req *req)
590{
591 struct nvmet_ns *ns;
592 u16 status = 0;
593 off_t off = 0;
594
595 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
596 if (!ns) {
597 req->error_loc = offsetof(struct nvme_identify, nsid);
598 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
599 goto out;
600 }
601
602 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
603 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
604 NVME_NIDT_UUID_LEN,
605 &ns->uuid, &off);
606 if (status)
607 goto out_put_ns;
608 }
609 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
610 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
611 NVME_NIDT_NGUID_LEN,
612 &ns->nguid, &off);
613 if (status)
614 goto out_put_ns;
615 }
616
617 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
618 off) != NVME_IDENTIFY_DATA_SIZE - off)
619 status = NVME_SC_INTERNAL | NVME_SC_DNR;
620out_put_ns:
621 nvmet_put_namespace(ns);
622out:
623 nvmet_req_complete(req, status);
624}
625
626static void nvmet_execute_identify(struct nvmet_req *req)
627{
628 if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
629 return;
630
631 switch (req->cmd->identify.cns) {
632 case NVME_ID_CNS_NS:
633 return nvmet_execute_identify_ns(req);
634 case NVME_ID_CNS_CTRL:
635 return nvmet_execute_identify_ctrl(req);
636 case NVME_ID_CNS_NS_ACTIVE_LIST:
637 return nvmet_execute_identify_nslist(req);
638 case NVME_ID_CNS_NS_DESC_LIST:
639 return nvmet_execute_identify_desclist(req);
640 }
641
642 pr_err("unhandled identify cns %d on qid %d\n",
643 req->cmd->identify.cns, req->sq->qid);
644 req->error_loc = offsetof(struct nvme_identify, cns);
645 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
646}
647
648/*
649 * A "minimum viable" abort implementation: the command is mandatory in the
650 * spec, but we are not required to do any useful work. We couldn't really
651 * do a useful abort, so don't bother even with waiting for the command
652 * to be exectuted and return immediately telling the command to abort
653 * wasn't found.
654 */
655static void nvmet_execute_abort(struct nvmet_req *req)
656{
657 if (!nvmet_check_data_len(req, 0))
658 return;
659 nvmet_set_result(req, 1);
660 nvmet_req_complete(req, 0);
661}
662
663static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
664{
665 u16 status;
666
667 if (req->ns->file)
668 status = nvmet_file_flush(req);
669 else
670 status = nvmet_bdev_flush(req);
671
672 if (status)
673 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
674 return status;
675}
676
677static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
678{
679 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
680 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
681 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
682
683 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
684 if (unlikely(!req->ns)) {
685 req->error_loc = offsetof(struct nvme_common_command, nsid);
686 return status;
687 }
688
689 mutex_lock(&subsys->lock);
690 switch (write_protect) {
691 case NVME_NS_WRITE_PROTECT:
692 req->ns->readonly = true;
693 status = nvmet_write_protect_flush_sync(req);
694 if (status)
695 req->ns->readonly = false;
696 break;
697 case NVME_NS_NO_WRITE_PROTECT:
698 req->ns->readonly = false;
699 status = 0;
700 break;
701 default:
702 break;
703 }
704
705 if (!status)
706 nvmet_ns_changed(subsys, req->ns->nsid);
707 mutex_unlock(&subsys->lock);
708 return status;
709}
710
711u16 nvmet_set_feat_kato(struct nvmet_req *req)
712{
713 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
714
715 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
716
717 nvmet_set_result(req, req->sq->ctrl->kato);
718
719 return 0;
720}
721
722u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
723{
724 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
725
726 if (val32 & ~mask) {
727 req->error_loc = offsetof(struct nvme_common_command, cdw11);
728 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
729 }
730
731 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
732 nvmet_set_result(req, val32);
733
734 return 0;
735}
736
737static void nvmet_execute_set_features(struct nvmet_req *req)
738{
739 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
740 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
741 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
742 u16 status = 0;
743 u16 nsqr;
744 u16 ncqr;
745
746 if (!nvmet_check_data_len(req, 0))
747 return;
748
749 switch (cdw10 & 0xff) {
750 case NVME_FEAT_NUM_QUEUES:
751 ncqr = (cdw11 >> 16) & 0xffff;
752 nsqr = cdw11 & 0xffff;
753 if (ncqr == 0xffff || nsqr == 0xffff) {
754 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
755 break;
756 }
757 nvmet_set_result(req,
758 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
759 break;
760 case NVME_FEAT_KATO:
761 status = nvmet_set_feat_kato(req);
762 break;
763 case NVME_FEAT_ASYNC_EVENT:
764 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
765 break;
766 case NVME_FEAT_HOST_ID:
767 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
768 break;
769 case NVME_FEAT_WRITE_PROTECT:
770 status = nvmet_set_feat_write_protect(req);
771 break;
772 default:
773 req->error_loc = offsetof(struct nvme_common_command, cdw10);
774 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
775 break;
776 }
777
778 nvmet_req_complete(req, status);
779}
780
781static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
782{
783 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
784 u32 result;
785
786 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
787 if (!req->ns) {
788 req->error_loc = offsetof(struct nvme_common_command, nsid);
789 return NVME_SC_INVALID_NS | NVME_SC_DNR;
790 }
791 mutex_lock(&subsys->lock);
792 if (req->ns->readonly == true)
793 result = NVME_NS_WRITE_PROTECT;
794 else
795 result = NVME_NS_NO_WRITE_PROTECT;
796 nvmet_set_result(req, result);
797 mutex_unlock(&subsys->lock);
798
799 return 0;
800}
801
802void nvmet_get_feat_kato(struct nvmet_req *req)
803{
804 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
805}
806
807void nvmet_get_feat_async_event(struct nvmet_req *req)
808{
809 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
810}
811
812static void nvmet_execute_get_features(struct nvmet_req *req)
813{
814 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
815 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
816 u16 status = 0;
817
818 if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
819 return;
820
821 switch (cdw10 & 0xff) {
822 /*
823 * These features are mandatory in the spec, but we don't
824 * have a useful way to implement them. We'll eventually
825 * need to come up with some fake values for these.
826 */
827#if 0
828 case NVME_FEAT_ARBITRATION:
829 break;
830 case NVME_FEAT_POWER_MGMT:
831 break;
832 case NVME_FEAT_TEMP_THRESH:
833 break;
834 case NVME_FEAT_ERR_RECOVERY:
835 break;
836 case NVME_FEAT_IRQ_COALESCE:
837 break;
838 case NVME_FEAT_IRQ_CONFIG:
839 break;
840 case NVME_FEAT_WRITE_ATOMIC:
841 break;
842#endif
843 case NVME_FEAT_ASYNC_EVENT:
844 nvmet_get_feat_async_event(req);
845 break;
846 case NVME_FEAT_VOLATILE_WC:
847 nvmet_set_result(req, 1);
848 break;
849 case NVME_FEAT_NUM_QUEUES:
850 nvmet_set_result(req,
851 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
852 break;
853 case NVME_FEAT_KATO:
854 nvmet_get_feat_kato(req);
855 break;
856 case NVME_FEAT_HOST_ID:
857 /* need 128-bit host identifier flag */
858 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
859 req->error_loc =
860 offsetof(struct nvme_common_command, cdw11);
861 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
862 break;
863 }
864
865 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
866 sizeof(req->sq->ctrl->hostid));
867 break;
868 case NVME_FEAT_WRITE_PROTECT:
869 status = nvmet_get_feat_write_protect(req);
870 break;
871 default:
872 req->error_loc =
873 offsetof(struct nvme_common_command, cdw10);
874 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
875 break;
876 }
877
878 nvmet_req_complete(req, status);
879}
880
881void nvmet_execute_async_event(struct nvmet_req *req)
882{
883 struct nvmet_ctrl *ctrl = req->sq->ctrl;
884
885 if (!nvmet_check_data_len(req, 0))
886 return;
887
888 mutex_lock(&ctrl->lock);
889 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
890 mutex_unlock(&ctrl->lock);
891 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
892 return;
893 }
894 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
895 mutex_unlock(&ctrl->lock);
896
897 schedule_work(&ctrl->async_event_work);
898}
899
900void nvmet_execute_keep_alive(struct nvmet_req *req)
901{
902 struct nvmet_ctrl *ctrl = req->sq->ctrl;
903
904 if (!nvmet_check_data_len(req, 0))
905 return;
906
907 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
908 ctrl->cntlid, ctrl->kato);
909
910 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
911 nvmet_req_complete(req, 0);
912}
913
914u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
915{
916 struct nvme_command *cmd = req->cmd;
917 u16 ret;
918
919 if (nvme_is_fabrics(cmd))
920 return nvmet_parse_fabrics_cmd(req);
921 if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
922 return nvmet_parse_discovery_cmd(req);
923
924 ret = nvmet_check_ctrl_status(req, cmd);
925 if (unlikely(ret))
926 return ret;
927
928 switch (cmd->common.opcode) {
929 case nvme_admin_get_log_page:
930 req->execute = nvmet_execute_get_log_page;
931 return 0;
932 case nvme_admin_identify:
933 req->execute = nvmet_execute_identify;
934 return 0;
935 case nvme_admin_abort_cmd:
936 req->execute = nvmet_execute_abort;
937 return 0;
938 case nvme_admin_set_features:
939 req->execute = nvmet_execute_set_features;
940 return 0;
941 case nvme_admin_get_features:
942 req->execute = nvmet_execute_get_features;
943 return 0;
944 case nvme_admin_async_event:
945 req->execute = nvmet_execute_async_event;
946 return 0;
947 case nvme_admin_keep_alive:
948 req->execute = nvmet_execute_keep_alive;
949 return 0;
950 }
951
952 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
953 req->sq->qid);
954 req->error_loc = offsetof(struct nvme_common_command, opcode);
955 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
956}