at v6.14 876 lines 24 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Sysfs interface for the NVMe core driver. 4 * 5 * Copyright (c) 2011-2014, Intel Corporation. 6 */ 7 8#include <linux/nvme-auth.h> 9 10#include "nvme.h" 11#include "fabrics.h" 12 13static ssize_t nvme_sysfs_reset(struct device *dev, 14 struct device_attribute *attr, const char *buf, 15 size_t count) 16{ 17 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 18 int ret; 19 20 ret = nvme_reset_ctrl_sync(ctrl); 21 if (ret < 0) 22 return ret; 23 return count; 24} 25static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 26 27static ssize_t nvme_sysfs_rescan(struct device *dev, 28 struct device_attribute *attr, const char *buf, 29 size_t count) 30{ 31 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 32 33 nvme_queue_scan(ctrl); 34 return count; 35} 36static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 37 38static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev, 39 struct device_attribute *attr, char *buf) 40{ 41 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 42 43 return sysfs_emit(buf, 44 ctrl->passthru_err_log_enabled ? "on\n" : "off\n"); 45} 46 47static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev, 48 struct device_attribute *attr, const char *buf, size_t count) 49{ 50 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 51 bool passthru_err_log_enabled; 52 int err; 53 54 err = kstrtobool(buf, &passthru_err_log_enabled); 55 if (err) 56 return -EINVAL; 57 58 ctrl->passthru_err_log_enabled = passthru_err_log_enabled; 59 60 return count; 61} 62 63static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 64{ 65 struct gendisk *disk = dev_to_disk(dev); 66 67 if (nvme_disk_is_ns_head(disk)) 68 return disk->private_data; 69 return nvme_get_ns_from_dev(dev)->head; 70} 71 72static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev, 73 struct device_attribute *attr, char *buf) 74{ 75 struct nvme_ns_head *head = dev_to_ns_head(dev); 76 77 return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n"); 78} 79 80static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev, 81 struct device_attribute *attr, const char *buf, size_t count) 82{ 83 struct nvme_ns_head *head = dev_to_ns_head(dev); 84 bool passthru_err_log_enabled; 85 int err; 86 87 err = kstrtobool(buf, &passthru_err_log_enabled); 88 if (err) 89 return -EINVAL; 90 head->passthru_err_log_enabled = passthru_err_log_enabled; 91 92 return count; 93} 94 95static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \ 96 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ 97 nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store); 98 99static struct device_attribute dev_attr_io_passthru_err_log_enabled = \ 100 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ 101 nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store); 102 103static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 104 char *buf) 105{ 106 struct nvme_ns_head *head = dev_to_ns_head(dev); 107 struct nvme_ns_ids *ids = &head->ids; 108 struct nvme_subsystem *subsys = head->subsys; 109 int serial_len = sizeof(subsys->serial); 110 int model_len = sizeof(subsys->model); 111 112 if (!uuid_is_null(&ids->uuid)) 113 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); 114 115 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 116 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); 117 118 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 119 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); 120 121 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 122 subsys->serial[serial_len - 1] == '\0')) 123 serial_len--; 124 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 125 subsys->model[model_len - 1] == '\0')) 126 model_len--; 127 128 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 129 serial_len, subsys->serial, model_len, subsys->model, 130 head->ns_id); 131} 132static DEVICE_ATTR_RO(wwid); 133 134static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 135 char *buf) 136{ 137 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 138} 139static DEVICE_ATTR_RO(nguid); 140 141static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 142 char *buf) 143{ 144 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 145 146 /* For backward compatibility expose the NGUID to userspace if 147 * we have no UUID set 148 */ 149 if (uuid_is_null(&ids->uuid)) { 150 dev_warn_once(dev, 151 "No UUID available providing old NGUID\n"); 152 return sysfs_emit(buf, "%pU\n", ids->nguid); 153 } 154 return sysfs_emit(buf, "%pU\n", &ids->uuid); 155} 156static DEVICE_ATTR_RO(uuid); 157 158static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 159 char *buf) 160{ 161 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 162} 163static DEVICE_ATTR_RO(eui); 164 165static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 166 char *buf) 167{ 168 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 169} 170static DEVICE_ATTR_RO(nsid); 171 172static ssize_t csi_show(struct device *dev, struct device_attribute *attr, 173 char *buf) 174{ 175 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi); 176} 177static DEVICE_ATTR_RO(csi); 178 179static ssize_t metadata_bytes_show(struct device *dev, 180 struct device_attribute *attr, char *buf) 181{ 182 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms); 183} 184static DEVICE_ATTR_RO(metadata_bytes); 185 186static int ns_head_update_nuse(struct nvme_ns_head *head) 187{ 188 struct nvme_id_ns *id; 189 struct nvme_ns *ns; 190 int srcu_idx, ret = -EWOULDBLOCK; 191 192 /* Avoid issuing commands too often by rate limiting the update */ 193 if (!__ratelimit(&head->rs_nuse)) 194 return 0; 195 196 srcu_idx = srcu_read_lock(&head->srcu); 197 ns = nvme_find_path(head); 198 if (!ns) 199 goto out_unlock; 200 201 ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id); 202 if (ret) 203 goto out_unlock; 204 205 head->nuse = le64_to_cpu(id->nuse); 206 kfree(id); 207 208out_unlock: 209 srcu_read_unlock(&head->srcu, srcu_idx); 210 return ret; 211} 212 213static int ns_update_nuse(struct nvme_ns *ns) 214{ 215 struct nvme_id_ns *id; 216 int ret; 217 218 /* Avoid issuing commands too often by rate limiting the update. */ 219 if (!__ratelimit(&ns->head->rs_nuse)) 220 return 0; 221 222 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id); 223 if (ret) 224 return ret; 225 226 ns->head->nuse = le64_to_cpu(id->nuse); 227 kfree(id); 228 return 0; 229} 230 231static ssize_t nuse_show(struct device *dev, struct device_attribute *attr, 232 char *buf) 233{ 234 struct nvme_ns_head *head = dev_to_ns_head(dev); 235 struct gendisk *disk = dev_to_disk(dev); 236 int ret; 237 238 if (nvme_disk_is_ns_head(disk)) 239 ret = ns_head_update_nuse(head); 240 else 241 ret = ns_update_nuse(disk->private_data); 242 if (ret) 243 return ret; 244 245 return sysfs_emit(buf, "%llu\n", head->nuse); 246} 247static DEVICE_ATTR_RO(nuse); 248 249static struct attribute *nvme_ns_attrs[] = { 250 &dev_attr_wwid.attr, 251 &dev_attr_uuid.attr, 252 &dev_attr_nguid.attr, 253 &dev_attr_eui.attr, 254 &dev_attr_csi.attr, 255 &dev_attr_nsid.attr, 256 &dev_attr_metadata_bytes.attr, 257 &dev_attr_nuse.attr, 258#ifdef CONFIG_NVME_MULTIPATH 259 &dev_attr_ana_grpid.attr, 260 &dev_attr_ana_state.attr, 261#endif 262 &dev_attr_io_passthru_err_log_enabled.attr, 263 NULL, 264}; 265 266static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, 267 struct attribute *a, int n) 268{ 269 struct device *dev = container_of(kobj, struct device, kobj); 270 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 271 272 if (a == &dev_attr_uuid.attr) { 273 if (uuid_is_null(&ids->uuid) && 274 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 275 return 0; 276 } 277 if (a == &dev_attr_nguid.attr) { 278 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 279 return 0; 280 } 281 if (a == &dev_attr_eui.attr) { 282 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 283 return 0; 284 } 285#ifdef CONFIG_NVME_MULTIPATH 286 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 287 /* per-path attr */ 288 if (nvme_disk_is_ns_head(dev_to_disk(dev))) 289 return 0; 290 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 291 return 0; 292 } 293#endif 294 return a->mode; 295} 296 297static const struct attribute_group nvme_ns_attr_group = { 298 .attrs = nvme_ns_attrs, 299 .is_visible = nvme_ns_attrs_are_visible, 300}; 301 302const struct attribute_group *nvme_ns_attr_groups[] = { 303 &nvme_ns_attr_group, 304 NULL, 305}; 306 307#define nvme_show_str_function(field) \ 308static ssize_t field##_show(struct device *dev, \ 309 struct device_attribute *attr, char *buf) \ 310{ \ 311 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 312 return sysfs_emit(buf, "%.*s\n", \ 313 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 314} \ 315static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 316 317nvme_show_str_function(model); 318nvme_show_str_function(serial); 319nvme_show_str_function(firmware_rev); 320 321#define nvme_show_int_function(field) \ 322static ssize_t field##_show(struct device *dev, \ 323 struct device_attribute *attr, char *buf) \ 324{ \ 325 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 326 return sysfs_emit(buf, "%d\n", ctrl->field); \ 327} \ 328static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 329 330nvme_show_int_function(cntlid); 331nvme_show_int_function(numa_node); 332nvme_show_int_function(queue_count); 333nvme_show_int_function(sqsize); 334nvme_show_int_function(kato); 335 336static ssize_t nvme_sysfs_delete(struct device *dev, 337 struct device_attribute *attr, const char *buf, 338 size_t count) 339{ 340 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 341 342 if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) 343 return -EBUSY; 344 345 if (device_remove_file_self(dev, attr)) 346 nvme_delete_ctrl_sync(ctrl); 347 return count; 348} 349static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 350 351static ssize_t nvme_sysfs_show_transport(struct device *dev, 352 struct device_attribute *attr, 353 char *buf) 354{ 355 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 356 357 return sysfs_emit(buf, "%s\n", ctrl->ops->name); 358} 359static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 360 361static ssize_t nvme_sysfs_show_state(struct device *dev, 362 struct device_attribute *attr, 363 char *buf) 364{ 365 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 366 unsigned state = (unsigned)nvme_ctrl_state(ctrl); 367 static const char *const state_name[] = { 368 [NVME_CTRL_NEW] = "new", 369 [NVME_CTRL_LIVE] = "live", 370 [NVME_CTRL_RESETTING] = "resetting", 371 [NVME_CTRL_CONNECTING] = "connecting", 372 [NVME_CTRL_DELETING] = "deleting", 373 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", 374 [NVME_CTRL_DEAD] = "dead", 375 }; 376 377 if (state < ARRAY_SIZE(state_name) && state_name[state]) 378 return sysfs_emit(buf, "%s\n", state_name[state]); 379 380 return sysfs_emit(buf, "unknown state\n"); 381} 382 383static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 384 385static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 386 struct device_attribute *attr, 387 char *buf) 388{ 389 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 390 391 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); 392} 393static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 394 395static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, 396 struct device_attribute *attr, 397 char *buf) 398{ 399 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 400 401 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); 402} 403static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 404 405static ssize_t nvme_sysfs_show_hostid(struct device *dev, 406 struct device_attribute *attr, 407 char *buf) 408{ 409 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 410 411 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); 412} 413static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 414 415static ssize_t nvme_sysfs_show_address(struct device *dev, 416 struct device_attribute *attr, 417 char *buf) 418{ 419 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 420 421 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 422} 423static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 424 425static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, 426 struct device_attribute *attr, char *buf) 427{ 428 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 429 struct nvmf_ctrl_options *opts = ctrl->opts; 430 431 if (ctrl->opts->max_reconnects == -1) 432 return sysfs_emit(buf, "off\n"); 433 return sysfs_emit(buf, "%d\n", 434 opts->max_reconnects * opts->reconnect_delay); 435} 436 437static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, 438 struct device_attribute *attr, const char *buf, size_t count) 439{ 440 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 441 struct nvmf_ctrl_options *opts = ctrl->opts; 442 int ctrl_loss_tmo, err; 443 444 err = kstrtoint(buf, 10, &ctrl_loss_tmo); 445 if (err) 446 return -EINVAL; 447 448 if (ctrl_loss_tmo < 0) 449 opts->max_reconnects = -1; 450 else 451 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 452 opts->reconnect_delay); 453 return count; 454} 455static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, 456 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); 457 458static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, 459 struct device_attribute *attr, char *buf) 460{ 461 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 462 463 if (ctrl->opts->reconnect_delay == -1) 464 return sysfs_emit(buf, "off\n"); 465 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); 466} 467 468static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, 469 struct device_attribute *attr, const char *buf, size_t count) 470{ 471 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 472 unsigned int v; 473 int err; 474 475 err = kstrtou32(buf, 10, &v); 476 if (err) 477 return err; 478 479 ctrl->opts->reconnect_delay = v; 480 return count; 481} 482static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, 483 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); 484 485static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, 486 struct device_attribute *attr, char *buf) 487{ 488 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 489 490 if (ctrl->opts->fast_io_fail_tmo == -1) 491 return sysfs_emit(buf, "off\n"); 492 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); 493} 494 495static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, 496 struct device_attribute *attr, const char *buf, size_t count) 497{ 498 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 499 struct nvmf_ctrl_options *opts = ctrl->opts; 500 int fast_io_fail_tmo, err; 501 502 err = kstrtoint(buf, 10, &fast_io_fail_tmo); 503 if (err) 504 return -EINVAL; 505 506 if (fast_io_fail_tmo < 0) 507 opts->fast_io_fail_tmo = -1; 508 else 509 opts->fast_io_fail_tmo = fast_io_fail_tmo; 510 return count; 511} 512static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, 513 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); 514 515static ssize_t cntrltype_show(struct device *dev, 516 struct device_attribute *attr, char *buf) 517{ 518 static const char * const type[] = { 519 [NVME_CTRL_IO] = "io\n", 520 [NVME_CTRL_DISC] = "discovery\n", 521 [NVME_CTRL_ADMIN] = "admin\n", 522 }; 523 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 524 525 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) 526 return sysfs_emit(buf, "reserved\n"); 527 528 return sysfs_emit(buf, type[ctrl->cntrltype]); 529} 530static DEVICE_ATTR_RO(cntrltype); 531 532static ssize_t dctype_show(struct device *dev, 533 struct device_attribute *attr, char *buf) 534{ 535 static const char * const type[] = { 536 [NVME_DCTYPE_NOT_REPORTED] = "none\n", 537 [NVME_DCTYPE_DDC] = "ddc\n", 538 [NVME_DCTYPE_CDC] = "cdc\n", 539 }; 540 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 541 542 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) 543 return sysfs_emit(buf, "reserved\n"); 544 545 return sysfs_emit(buf, type[ctrl->dctype]); 546} 547static DEVICE_ATTR_RO(dctype); 548 549#ifdef CONFIG_NVME_HOST_AUTH 550static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, 551 struct device_attribute *attr, char *buf) 552{ 553 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 554 struct nvmf_ctrl_options *opts = ctrl->opts; 555 556 if (!opts->dhchap_secret) 557 return sysfs_emit(buf, "none\n"); 558 return sysfs_emit(buf, "%s\n", opts->dhchap_secret); 559} 560 561static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, 562 struct device_attribute *attr, const char *buf, size_t count) 563{ 564 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 565 struct nvmf_ctrl_options *opts = ctrl->opts; 566 char *dhchap_secret; 567 568 if (!ctrl->opts->dhchap_secret) 569 return -EINVAL; 570 if (count < 7) 571 return -EINVAL; 572 if (memcmp(buf, "DHHC-1:", 7)) 573 return -EINVAL; 574 575 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 576 if (!dhchap_secret) 577 return -ENOMEM; 578 memcpy(dhchap_secret, buf, count); 579 nvme_auth_stop(ctrl); 580 if (strcmp(dhchap_secret, opts->dhchap_secret)) { 581 struct nvme_dhchap_key *key, *host_key; 582 int ret; 583 584 ret = nvme_auth_generate_key(dhchap_secret, &key); 585 if (ret) { 586 kfree(dhchap_secret); 587 return ret; 588 } 589 kfree(opts->dhchap_secret); 590 opts->dhchap_secret = dhchap_secret; 591 host_key = ctrl->host_key; 592 mutex_lock(&ctrl->dhchap_auth_mutex); 593 ctrl->host_key = key; 594 mutex_unlock(&ctrl->dhchap_auth_mutex); 595 nvme_auth_free_key(host_key); 596 } else 597 kfree(dhchap_secret); 598 /* Start re-authentication */ 599 dev_info(ctrl->device, "re-authenticating controller\n"); 600 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 601 602 return count; 603} 604 605static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, 606 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); 607 608static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, 609 struct device_attribute *attr, char *buf) 610{ 611 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 612 struct nvmf_ctrl_options *opts = ctrl->opts; 613 614 if (!opts->dhchap_ctrl_secret) 615 return sysfs_emit(buf, "none\n"); 616 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); 617} 618 619static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, 620 struct device_attribute *attr, const char *buf, size_t count) 621{ 622 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 623 struct nvmf_ctrl_options *opts = ctrl->opts; 624 char *dhchap_secret; 625 626 if (!ctrl->opts->dhchap_ctrl_secret) 627 return -EINVAL; 628 if (count < 7) 629 return -EINVAL; 630 if (memcmp(buf, "DHHC-1:", 7)) 631 return -EINVAL; 632 633 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 634 if (!dhchap_secret) 635 return -ENOMEM; 636 memcpy(dhchap_secret, buf, count); 637 nvme_auth_stop(ctrl); 638 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { 639 struct nvme_dhchap_key *key, *ctrl_key; 640 int ret; 641 642 ret = nvme_auth_generate_key(dhchap_secret, &key); 643 if (ret) { 644 kfree(dhchap_secret); 645 return ret; 646 } 647 kfree(opts->dhchap_ctrl_secret); 648 opts->dhchap_ctrl_secret = dhchap_secret; 649 ctrl_key = ctrl->ctrl_key; 650 mutex_lock(&ctrl->dhchap_auth_mutex); 651 ctrl->ctrl_key = key; 652 mutex_unlock(&ctrl->dhchap_auth_mutex); 653 nvme_auth_free_key(ctrl_key); 654 } else 655 kfree(dhchap_secret); 656 /* Start re-authentication */ 657 dev_info(ctrl->device, "re-authenticating controller\n"); 658 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 659 660 return count; 661} 662 663static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, 664 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); 665#endif 666 667static struct attribute *nvme_dev_attrs[] = { 668 &dev_attr_reset_controller.attr, 669 &dev_attr_rescan_controller.attr, 670 &dev_attr_model.attr, 671 &dev_attr_serial.attr, 672 &dev_attr_firmware_rev.attr, 673 &dev_attr_cntlid.attr, 674 &dev_attr_delete_controller.attr, 675 &dev_attr_transport.attr, 676 &dev_attr_subsysnqn.attr, 677 &dev_attr_address.attr, 678 &dev_attr_state.attr, 679 &dev_attr_numa_node.attr, 680 &dev_attr_queue_count.attr, 681 &dev_attr_sqsize.attr, 682 &dev_attr_hostnqn.attr, 683 &dev_attr_hostid.attr, 684 &dev_attr_ctrl_loss_tmo.attr, 685 &dev_attr_reconnect_delay.attr, 686 &dev_attr_fast_io_fail_tmo.attr, 687 &dev_attr_kato.attr, 688 &dev_attr_cntrltype.attr, 689 &dev_attr_dctype.attr, 690#ifdef CONFIG_NVME_HOST_AUTH 691 &dev_attr_dhchap_secret.attr, 692 &dev_attr_dhchap_ctrl_secret.attr, 693#endif 694 &dev_attr_adm_passthru_err_log_enabled.attr, 695 NULL 696}; 697 698static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 699 struct attribute *a, int n) 700{ 701 struct device *dev = container_of(kobj, struct device, kobj); 702 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 703 704 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 705 return 0; 706 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 707 return 0; 708 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) 709 return 0; 710 if (a == &dev_attr_hostid.attr && !ctrl->opts) 711 return 0; 712 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) 713 return 0; 714 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) 715 return 0; 716 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) 717 return 0; 718#ifdef CONFIG_NVME_HOST_AUTH 719 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) 720 return 0; 721 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) 722 return 0; 723#endif 724 725 return a->mode; 726} 727 728const struct attribute_group nvme_dev_attrs_group = { 729 .attrs = nvme_dev_attrs, 730 .is_visible = nvme_dev_attrs_are_visible, 731}; 732EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); 733 734#ifdef CONFIG_NVME_TCP_TLS 735static ssize_t tls_key_show(struct device *dev, 736 struct device_attribute *attr, char *buf) 737{ 738 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 739 740 if (!ctrl->tls_pskid) 741 return 0; 742 return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid); 743} 744static DEVICE_ATTR_RO(tls_key); 745 746static ssize_t tls_configured_key_show(struct device *dev, 747 struct device_attribute *attr, char *buf) 748{ 749 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 750 struct key *key = ctrl->opts->tls_key; 751 752 return sysfs_emit(buf, "%08x\n", key_serial(key)); 753} 754static DEVICE_ATTR_RO(tls_configured_key); 755 756static ssize_t tls_keyring_show(struct device *dev, 757 struct device_attribute *attr, char *buf) 758{ 759 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 760 struct key *keyring = ctrl->opts->keyring; 761 762 return sysfs_emit(buf, "%s\n", keyring->description); 763} 764static DEVICE_ATTR_RO(tls_keyring); 765 766static struct attribute *nvme_tls_attrs[] = { 767 &dev_attr_tls_key.attr, 768 &dev_attr_tls_configured_key.attr, 769 &dev_attr_tls_keyring.attr, 770 NULL, 771}; 772 773static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj, 774 struct attribute *a, int n) 775{ 776 struct device *dev = container_of(kobj, struct device, kobj); 777 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 778 779 if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")) 780 return 0; 781 782 if (a == &dev_attr_tls_key.attr && 783 !ctrl->opts->tls) 784 return 0; 785 if (a == &dev_attr_tls_configured_key.attr && 786 !ctrl->opts->tls_key) 787 return 0; 788 if (a == &dev_attr_tls_keyring.attr && 789 !ctrl->opts->keyring) 790 return 0; 791 792 return a->mode; 793} 794 795static const struct attribute_group nvme_tls_attrs_group = { 796 .attrs = nvme_tls_attrs, 797 .is_visible = nvme_tls_attrs_are_visible, 798}; 799#endif 800 801const struct attribute_group *nvme_dev_attr_groups[] = { 802 &nvme_dev_attrs_group, 803#ifdef CONFIG_NVME_TCP_TLS 804 &nvme_tls_attrs_group, 805#endif 806 NULL, 807}; 808 809#define SUBSYS_ATTR_RO(_name, _mode, _show) \ 810 struct device_attribute subsys_attr_##_name = \ 811 __ATTR(_name, _mode, _show, NULL) 812 813static ssize_t nvme_subsys_show_nqn(struct device *dev, 814 struct device_attribute *attr, 815 char *buf) 816{ 817 struct nvme_subsystem *subsys = 818 container_of(dev, struct nvme_subsystem, dev); 819 820 return sysfs_emit(buf, "%s\n", subsys->subnqn); 821} 822static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 823 824static ssize_t nvme_subsys_show_type(struct device *dev, 825 struct device_attribute *attr, 826 char *buf) 827{ 828 struct nvme_subsystem *subsys = 829 container_of(dev, struct nvme_subsystem, dev); 830 831 switch (subsys->subtype) { 832 case NVME_NQN_DISC: 833 return sysfs_emit(buf, "discovery\n"); 834 case NVME_NQN_NVME: 835 return sysfs_emit(buf, "nvm\n"); 836 default: 837 return sysfs_emit(buf, "reserved\n"); 838 } 839} 840static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); 841 842#define nvme_subsys_show_str_function(field) \ 843static ssize_t subsys_##field##_show(struct device *dev, \ 844 struct device_attribute *attr, char *buf) \ 845{ \ 846 struct nvme_subsystem *subsys = \ 847 container_of(dev, struct nvme_subsystem, dev); \ 848 return sysfs_emit(buf, "%.*s\n", \ 849 (int)sizeof(subsys->field), subsys->field); \ 850} \ 851static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 852 853nvme_subsys_show_str_function(model); 854nvme_subsys_show_str_function(serial); 855nvme_subsys_show_str_function(firmware_rev); 856 857static struct attribute *nvme_subsys_attrs[] = { 858 &subsys_attr_model.attr, 859 &subsys_attr_serial.attr, 860 &subsys_attr_firmware_rev.attr, 861 &subsys_attr_subsysnqn.attr, 862 &subsys_attr_subsystype.attr, 863#ifdef CONFIG_NVME_MULTIPATH 864 &subsys_attr_iopolicy.attr, 865#endif 866 NULL, 867}; 868 869static const struct attribute_group nvme_subsys_attrs_group = { 870 .attrs = nvme_subsys_attrs, 871}; 872 873const struct attribute_group *nvme_subsys_attrs_groups[] = { 874 &nvme_subsys_attrs_group, 875 NULL, 876};