Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nvmet: implement basic In-Band Authentication

Implement NVMe-oF In-Band authentication according to NVMe TPAR 8006.
This patch adds three additional configfs entries 'dhchap_key',
'dhchap_ctrl_key', and 'dhchap_hash' to the 'host' configfs directory.
The 'dhchap_key' and 'dhchap_ctrl_key' entries need to be in the ASCII
format as specified in NVMe Base Specification v2.0 section 8.13.5.8
'Secret representation'.
'dhchap_hash' defaults to 'hmac(sha256)', and can be written to to
switch to a different HMAC algorithm.

Signed-off-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Hannes Reinecke and committed by
Jens Axboe
db1312dd 6490c9ed

+1100 -3
+13
drivers/nvme/target/Kconfig
··· 83 83 devices over TCP. 84 84 85 85 If unsure, say N. 86 + 87 + config NVME_TARGET_AUTH 88 + bool "NVMe over Fabrics In-band Authentication support" 89 + depends on NVME_TARGET 90 + select NVME_COMMON 91 + select CRYPTO 92 + select CRYPTO_HMAC 93 + select CRYPTO_SHA256 94 + select CRYPTO_SHA512 95 + help 96 + This enables support for NVMe over Fabrics In-band Authentication 97 + 98 + If unsure, say N.
+1
drivers/nvme/target/Makefile
··· 13 13 discovery.o io-cmd-file.o io-cmd-bdev.o 14 14 nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o 15 15 nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o 16 + nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o 16 17 nvme-loop-y += loop.o 17 18 nvmet-rdma-y += rdma.o 18 19 nvmet-fc-y += fc.o
+2
drivers/nvme/target/admin-cmd.c
··· 1018 1018 1019 1019 if (nvme_is_fabrics(cmd)) 1020 1020 return nvmet_parse_fabrics_admin_cmd(req); 1021 + if (unlikely(!nvmet_check_auth_status(req))) 1022 + return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; 1021 1023 if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) 1022 1024 return nvmet_parse_discovery_cmd(req); 1023 1025
+367
drivers/nvme/target/auth.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * NVMe over Fabrics DH-HMAC-CHAP authentication. 4 + * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. 5 + * All rights reserved. 6 + */ 7 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 + #include <linux/module.h> 9 + #include <linux/init.h> 10 + #include <linux/slab.h> 11 + #include <linux/err.h> 12 + #include <crypto/hash.h> 13 + #include <linux/crc32.h> 14 + #include <linux/base64.h> 15 + #include <linux/ctype.h> 16 + #include <linux/random.h> 17 + #include <linux/nvme-auth.h> 18 + #include <asm/unaligned.h> 19 + 20 + #include "nvmet.h" 21 + 22 + int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, 23 + bool set_ctrl) 24 + { 25 + unsigned char key_hash; 26 + char *dhchap_secret; 27 + 28 + if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1) 29 + return -EINVAL; 30 + if (key_hash > 3) { 31 + pr_warn("Invalid DH-HMAC-CHAP hash id %d\n", 32 + key_hash); 33 + return -EINVAL; 34 + } 35 + if (key_hash > 0) { 36 + /* Validate selected hash algorithm */ 37 + const char *hmac = nvme_auth_hmac_name(key_hash); 38 + 39 + if (!crypto_has_shash(hmac, 0, 0)) { 40 + pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac); 41 + return -ENOTSUPP; 42 + } 43 + } 44 + dhchap_secret = kstrdup(secret, GFP_KERNEL); 45 + if (!dhchap_secret) 46 + return -ENOMEM; 47 + if (set_ctrl) { 48 + host->dhchap_ctrl_secret = strim(dhchap_secret); 49 + host->dhchap_ctrl_key_hash = key_hash; 50 + } else { 51 + host->dhchap_secret = strim(dhchap_secret); 52 + host->dhchap_key_hash = key_hash; 53 + } 54 + return 0; 55 + } 56 + 57 + int nvmet_setup_auth(struct nvmet_ctrl *ctrl) 58 + { 59 + int ret = 0; 60 + struct nvmet_host_link *p; 61 + struct nvmet_host *host = NULL; 62 + const char *hash_name; 63 + 64 + down_read(&nvmet_config_sem); 65 + if (nvmet_is_disc_subsys(ctrl->subsys)) 66 + goto out_unlock; 67 + 68 + if (ctrl->subsys->allow_any_host) 69 + goto out_unlock; 70 + 71 + list_for_each_entry(p, &ctrl->subsys->hosts, entry) { 72 + pr_debug("check %s\n", nvmet_host_name(p->host)); 73 + if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn)) 74 + continue; 75 + host = p->host; 76 + break; 77 + } 78 + if (!host) { 79 + pr_debug("host %s not found\n", ctrl->hostnqn); 80 + ret = -EPERM; 81 + goto out_unlock; 82 + } 83 + 84 + if (!host->dhchap_secret) { 85 + pr_debug("No authentication provided\n"); 86 + goto out_unlock; 87 + } 88 + 89 + if (host->dhchap_hash_id == ctrl->shash_id) { 90 + pr_debug("Re-use existing hash ID %d\n", 91 + ctrl->shash_id); 92 + } else { 93 + hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); 94 + if (!hash_name) { 95 + pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id); 96 + ret = -EINVAL; 97 + goto out_unlock; 98 + } 99 + ctrl->shash_id = host->dhchap_hash_id; 100 + } 101 + 102 + /* Skip the 'DHHC-1:XX:' prefix */ 103 + nvme_auth_free_key(ctrl->host_key); 104 + ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10, 105 + host->dhchap_key_hash); 106 + if (IS_ERR(ctrl->host_key)) { 107 + ret = PTR_ERR(ctrl->host_key); 108 + ctrl->host_key = NULL; 109 + goto out_free_hash; 110 + } 111 + pr_debug("%s: using hash %s key %*ph\n", __func__, 112 + ctrl->host_key->hash > 0 ? 113 + nvme_auth_hmac_name(ctrl->host_key->hash) : "none", 114 + (int)ctrl->host_key->len, ctrl->host_key->key); 115 + 116 + nvme_auth_free_key(ctrl->ctrl_key); 117 + if (!host->dhchap_ctrl_secret) { 118 + ctrl->ctrl_key = NULL; 119 + goto out_unlock; 120 + } 121 + 122 + ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10, 123 + host->dhchap_ctrl_key_hash); 124 + if (IS_ERR(ctrl->ctrl_key)) { 125 + ret = PTR_ERR(ctrl->ctrl_key); 126 + ctrl->ctrl_key = NULL; 127 + } 128 + pr_debug("%s: using ctrl hash %s key %*ph\n", __func__, 129 + ctrl->ctrl_key->hash > 0 ? 130 + nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none", 131 + (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key); 132 + 133 + out_free_hash: 134 + if (ret) { 135 + if (ctrl->host_key) { 136 + nvme_auth_free_key(ctrl->host_key); 137 + ctrl->host_key = NULL; 138 + } 139 + ctrl->shash_id = 0; 140 + } 141 + out_unlock: 142 + up_read(&nvmet_config_sem); 143 + 144 + return ret; 145 + } 146 + 147 + void nvmet_auth_sq_free(struct nvmet_sq *sq) 148 + { 149 + kfree(sq->dhchap_c1); 150 + sq->dhchap_c1 = NULL; 151 + kfree(sq->dhchap_c2); 152 + sq->dhchap_c2 = NULL; 153 + kfree(sq->dhchap_skey); 154 + sq->dhchap_skey = NULL; 155 + } 156 + 157 + void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) 158 + { 159 + ctrl->shash_id = 0; 160 + 161 + if (ctrl->host_key) { 162 + nvme_auth_free_key(ctrl->host_key); 163 + ctrl->host_key = NULL; 164 + } 165 + if (ctrl->ctrl_key) { 166 + nvme_auth_free_key(ctrl->ctrl_key); 167 + ctrl->ctrl_key = NULL; 168 + } 169 + } 170 + 171 + bool nvmet_check_auth_status(struct nvmet_req *req) 172 + { 173 + if (req->sq->ctrl->host_key && 174 + !req->sq->authenticated) 175 + return false; 176 + return true; 177 + } 178 + 179 + int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, 180 + unsigned int shash_len) 181 + { 182 + struct crypto_shash *shash_tfm; 183 + struct shash_desc *shash; 184 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 185 + const char *hash_name; 186 + u8 *challenge = req->sq->dhchap_c1, *host_response; 187 + u8 buf[4]; 188 + int ret; 189 + 190 + hash_name = nvme_auth_hmac_name(ctrl->shash_id); 191 + if (!hash_name) { 192 + pr_warn("Hash ID %d invalid\n", ctrl->shash_id); 193 + return -EINVAL; 194 + } 195 + 196 + shash_tfm = crypto_alloc_shash(hash_name, 0, 0); 197 + if (IS_ERR(shash_tfm)) { 198 + pr_err("failed to allocate shash %s\n", hash_name); 199 + return PTR_ERR(shash_tfm); 200 + } 201 + 202 + if (shash_len != crypto_shash_digestsize(shash_tfm)) { 203 + pr_debug("%s: hash len mismatch (len %d digest %d)\n", 204 + __func__, shash_len, 205 + crypto_shash_digestsize(shash_tfm)); 206 + ret = -EINVAL; 207 + goto out_free_tfm; 208 + } 209 + 210 + host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn); 211 + if (IS_ERR(host_response)) { 212 + ret = PTR_ERR(host_response); 213 + goto out_free_tfm; 214 + } 215 + 216 + ret = crypto_shash_setkey(shash_tfm, host_response, 217 + ctrl->host_key->len); 218 + if (ret) 219 + goto out_free_response; 220 + 221 + pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", 222 + ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, 223 + req->sq->dhchap_tid); 224 + 225 + shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), 226 + GFP_KERNEL); 227 + if (!shash) { 228 + ret = -ENOMEM; 229 + goto out_free_response; 230 + } 231 + shash->tfm = shash_tfm; 232 + ret = crypto_shash_init(shash); 233 + if (ret) 234 + goto out; 235 + ret = crypto_shash_update(shash, challenge, shash_len); 236 + if (ret) 237 + goto out; 238 + put_unaligned_le32(req->sq->dhchap_s1, buf); 239 + ret = crypto_shash_update(shash, buf, 4); 240 + if (ret) 241 + goto out; 242 + put_unaligned_le16(req->sq->dhchap_tid, buf); 243 + ret = crypto_shash_update(shash, buf, 2); 244 + if (ret) 245 + goto out; 246 + memset(buf, 0, 4); 247 + ret = crypto_shash_update(shash, buf, 1); 248 + if (ret) 249 + goto out; 250 + ret = crypto_shash_update(shash, "HostHost", 8); 251 + if (ret) 252 + goto out; 253 + ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); 254 + if (ret) 255 + goto out; 256 + ret = crypto_shash_update(shash, buf, 1); 257 + if (ret) 258 + goto out; 259 + ret = crypto_shash_update(shash, ctrl->subsysnqn, 260 + strlen(ctrl->subsysnqn)); 261 + if (ret) 262 + goto out; 263 + ret = crypto_shash_final(shash, response); 264 + out: 265 + if (challenge != req->sq->dhchap_c1) 266 + kfree(challenge); 267 + kfree(shash); 268 + out_free_response: 269 + kfree_sensitive(host_response); 270 + out_free_tfm: 271 + crypto_free_shash(shash_tfm); 272 + return 0; 273 + } 274 + 275 + int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, 276 + unsigned int shash_len) 277 + { 278 + struct crypto_shash *shash_tfm; 279 + struct shash_desc *shash; 280 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 281 + const char *hash_name; 282 + u8 *challenge = req->sq->dhchap_c2, *ctrl_response; 283 + u8 buf[4]; 284 + int ret; 285 + 286 + hash_name = nvme_auth_hmac_name(ctrl->shash_id); 287 + if (!hash_name) { 288 + pr_warn("Hash ID %d invalid\n", ctrl->shash_id); 289 + return -EINVAL; 290 + } 291 + 292 + shash_tfm = crypto_alloc_shash(hash_name, 0, 0); 293 + if (IS_ERR(shash_tfm)) { 294 + pr_err("failed to allocate shash %s\n", hash_name); 295 + return PTR_ERR(shash_tfm); 296 + } 297 + 298 + if (shash_len != crypto_shash_digestsize(shash_tfm)) { 299 + pr_debug("%s: hash len mismatch (len %d digest %d)\n", 300 + __func__, shash_len, 301 + crypto_shash_digestsize(shash_tfm)); 302 + ret = -EINVAL; 303 + goto out_free_tfm; 304 + } 305 + 306 + ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, 307 + ctrl->subsysnqn); 308 + if (IS_ERR(ctrl_response)) { 309 + ret = PTR_ERR(ctrl_response); 310 + goto out_free_tfm; 311 + } 312 + 313 + ret = crypto_shash_setkey(shash_tfm, ctrl_response, 314 + ctrl->ctrl_key->len); 315 + if (ret) 316 + goto out_free_response; 317 + 318 + shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), 319 + GFP_KERNEL); 320 + if (!shash) { 321 + ret = -ENOMEM; 322 + goto out_free_response; 323 + } 324 + shash->tfm = shash_tfm; 325 + 326 + ret = crypto_shash_init(shash); 327 + if (ret) 328 + goto out; 329 + ret = crypto_shash_update(shash, challenge, shash_len); 330 + if (ret) 331 + goto out; 332 + put_unaligned_le32(req->sq->dhchap_s2, buf); 333 + ret = crypto_shash_update(shash, buf, 4); 334 + if (ret) 335 + goto out; 336 + put_unaligned_le16(req->sq->dhchap_tid, buf); 337 + ret = crypto_shash_update(shash, buf, 2); 338 + if (ret) 339 + goto out; 340 + memset(buf, 0, 4); 341 + ret = crypto_shash_update(shash, buf, 1); 342 + if (ret) 343 + goto out; 344 + ret = crypto_shash_update(shash, "Controller", 10); 345 + if (ret) 346 + goto out; 347 + ret = crypto_shash_update(shash, ctrl->subsysnqn, 348 + strlen(ctrl->subsysnqn)); 349 + if (ret) 350 + goto out; 351 + ret = crypto_shash_update(shash, buf, 1); 352 + if (ret) 353 + goto out; 354 + ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); 355 + if (ret) 356 + goto out; 357 + ret = crypto_shash_final(shash, response); 358 + out: 359 + if (challenge != req->sq->dhchap_c2) 360 + kfree(challenge); 361 + kfree(shash); 362 + out_free_response: 363 + kfree_sensitive(ctrl_response); 364 + out_free_tfm: 365 + crypto_free_shash(shash_tfm); 366 + return 0; 367 + }
+106 -1
drivers/nvme/target/configfs.c
··· 11 11 #include <linux/ctype.h> 12 12 #include <linux/pci.h> 13 13 #include <linux/pci-p2pdma.h> 14 + #ifdef CONFIG_NVME_TARGET_AUTH 15 + #include <linux/nvme-auth.h> 16 + #endif 17 + #include <crypto/hash.h> 18 + #include <crypto/kpp.h> 14 19 15 20 #include "nvmet.h" 16 21 ··· 1685 1680 static struct config_group nvmet_subsystems_group; 1686 1681 static struct config_group nvmet_ports_group; 1687 1682 1683 + #ifdef CONFIG_NVME_TARGET_AUTH 1684 + static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, 1685 + char *page) 1686 + { 1687 + u8 *dhchap_secret = to_host(item)->dhchap_secret; 1688 + 1689 + if (!dhchap_secret) 1690 + return sprintf(page, "\n"); 1691 + return sprintf(page, "%s\n", dhchap_secret); 1692 + } 1693 + 1694 + static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, 1695 + const char *page, size_t count) 1696 + { 1697 + struct nvmet_host *host = to_host(item); 1698 + int ret; 1699 + 1700 + ret = nvmet_auth_set_key(host, page, false); 1701 + /* 1702 + * Re-authentication is a soft state, so keep the 1703 + * current authentication valid until the host 1704 + * requests re-authentication. 1705 + */ 1706 + return ret < 0 ? ret : count; 1707 + } 1708 + 1709 + CONFIGFS_ATTR(nvmet_host_, dhchap_key); 1710 + 1711 + static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, 1712 + char *page) 1713 + { 1714 + u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; 1715 + 1716 + if (!dhchap_secret) 1717 + return sprintf(page, "\n"); 1718 + return sprintf(page, "%s\n", dhchap_secret); 1719 + } 1720 + 1721 + static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, 1722 + const char *page, size_t count) 1723 + { 1724 + struct nvmet_host *host = to_host(item); 1725 + int ret; 1726 + 1727 + ret = nvmet_auth_set_key(host, page, true); 1728 + /* 1729 + * Re-authentication is a soft state, so keep the 1730 + * current authentication valid until the host 1731 + * requests re-authentication. 1732 + */ 1733 + return ret < 0 ? ret : count; 1734 + } 1735 + 1736 + CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key); 1737 + 1738 + static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item, 1739 + char *page) 1740 + { 1741 + struct nvmet_host *host = to_host(item); 1742 + const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); 1743 + 1744 + return sprintf(page, "%s\n", hash_name ? hash_name : "none"); 1745 + } 1746 + 1747 + static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item, 1748 + const char *page, size_t count) 1749 + { 1750 + struct nvmet_host *host = to_host(item); 1751 + u8 hmac_id; 1752 + 1753 + hmac_id = nvme_auth_hmac_id(page); 1754 + if (hmac_id == NVME_AUTH_HASH_INVALID) 1755 + return -EINVAL; 1756 + if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0)) 1757 + return -ENOTSUPP; 1758 + host->dhchap_hash_id = hmac_id; 1759 + return count; 1760 + } 1761 + 1762 + CONFIGFS_ATTR(nvmet_host_, dhchap_hash); 1763 + 1764 + static struct configfs_attribute *nvmet_host_attrs[] = { 1765 + &nvmet_host_attr_dhchap_key, 1766 + &nvmet_host_attr_dhchap_ctrl_key, 1767 + &nvmet_host_attr_dhchap_hash, 1768 + NULL, 1769 + }; 1770 + #endif /* CONFIG_NVME_TARGET_AUTH */ 1771 + 1688 1772 static void nvmet_host_release(struct config_item *item) 1689 1773 { 1690 1774 struct nvmet_host *host = to_host(item); 1691 - 1775 + #ifdef CONFIG_NVME_TARGET_AUTH 1776 + if (host->dhchap_secret) 1777 + kfree(host->dhchap_secret); 1778 + #endif 1692 1779 kfree(host); 1693 1780 } 1694 1781 ··· 1790 1693 1791 1694 static const struct config_item_type nvmet_host_type = { 1792 1695 .ct_item_ops = &nvmet_host_item_ops, 1696 + #ifdef CONFIG_NVME_TARGET_AUTH 1697 + .ct_attrs = nvmet_host_attrs, 1698 + #endif 1793 1699 .ct_owner = THIS_MODULE, 1794 1700 }; 1795 1701 ··· 1804 1704 host = kzalloc(sizeof(*host), GFP_KERNEL); 1805 1705 if (!host) 1806 1706 return ERR_PTR(-ENOMEM); 1707 + 1708 + #ifdef CONFIG_NVME_TARGET_AUTH 1709 + /* Default to SHA256 */ 1710 + host->dhchap_hash_id = NVME_AUTH_HASH_SHA256; 1711 + #endif 1807 1712 1808 1713 config_group_init_type_name(&host->group, name, &nvmet_host_type); 1809 1714
+11
drivers/nvme/target/core.c
··· 795 795 wait_for_completion(&sq->confirm_done); 796 796 wait_for_completion(&sq->free_done); 797 797 percpu_ref_exit(&sq->ref); 798 + nvmet_auth_sq_free(sq); 798 799 799 800 if (ctrl) { 800 801 /* ··· 871 870 872 871 if (nvme_is_fabrics(cmd)) 873 872 return nvmet_parse_fabrics_io_cmd(req); 873 + 874 + if (unlikely(!nvmet_check_auth_status(req))) 875 + return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; 874 876 875 877 ret = nvmet_check_ctrl_status(req); 876 878 if (unlikely(ret)) ··· 1279 1275 req->cmd->common.opcode, req->sq->qid); 1280 1276 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 1281 1277 } 1278 + 1279 + if (unlikely(!nvmet_check_auth_status(req))) { 1280 + pr_warn("qid %d not authenticated\n", req->sq->qid); 1281 + return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; 1282 + } 1282 1283 return 0; 1283 1284 } 1284 1285 ··· 1479 1470 1480 1471 flush_work(&ctrl->async_event_work); 1481 1472 cancel_work_sync(&ctrl->fatal_err_work); 1473 + 1474 + nvmet_destroy_auth(ctrl); 1482 1475 1483 1476 ida_free(&cntlid_ida, ctrl->cntlid); 1484 1477
+502
drivers/nvme/target/fabrics-cmd-auth.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * NVMe over Fabrics DH-HMAC-CHAP authentication command handling. 4 + * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. 5 + * All rights reserved. 6 + */ 7 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 + #include <linux/blkdev.h> 9 + #include <linux/random.h> 10 + #include <linux/nvme-auth.h> 11 + #include <crypto/hash.h> 12 + #include <crypto/kpp.h> 13 + #include "nvmet.h" 14 + 15 + void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req) 16 + { 17 + u32 result = le32_to_cpu(req->cqe->result.u32); 18 + 19 + /* Initialize in-band authentication */ 20 + req->sq->authenticated = false; 21 + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 22 + result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16; 23 + req->cqe->result.u32 = cpu_to_le32(result); 24 + } 25 + 26 + static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) 27 + { 28 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 29 + struct nvmf_auth_dhchap_negotiate_data *data = d; 30 + int i, hash_id = 0, fallback_hash_id = 0, dhgid; 31 + 32 + pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n", 33 + __func__, ctrl->cntlid, req->sq->qid, 34 + data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid, 35 + data->auth_protocol[0].dhchap.halen, 36 + data->auth_protocol[0].dhchap.dhlen); 37 + req->sq->dhchap_tid = le16_to_cpu(data->t_id); 38 + if (data->sc_c) 39 + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; 40 + 41 + if (data->napd != 1) 42 + return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 43 + 44 + if (data->auth_protocol[0].dhchap.authid != 45 + NVME_AUTH_DHCHAP_AUTH_ID) 46 + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 47 + 48 + for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) { 49 + u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i]; 50 + 51 + if (!fallback_hash_id && 52 + crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0)) 53 + fallback_hash_id = host_hmac_id; 54 + if (ctrl->shash_id != host_hmac_id) 55 + continue; 56 + hash_id = ctrl->shash_id; 57 + break; 58 + } 59 + if (hash_id == 0) { 60 + if (fallback_hash_id == 0) { 61 + pr_debug("%s: ctrl %d qid %d: no usable hash found\n", 62 + __func__, ctrl->cntlid, req->sq->qid); 63 + return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 64 + } 65 + pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n", 66 + __func__, ctrl->cntlid, req->sq->qid, 67 + nvme_auth_hmac_name(fallback_hash_id)); 68 + ctrl->shash_id = fallback_hash_id; 69 + } 70 + 71 + dhgid = -1; 72 + for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) { 73 + int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30]; 74 + 75 + if (tmp_dhgid == NVME_AUTH_DHGROUP_NULL) { 76 + dhgid = tmp_dhgid; 77 + break; 78 + } 79 + } 80 + if (dhgid < 0) { 81 + pr_debug("%s: ctrl %d qid %d: no usable DH group found\n", 82 + __func__, ctrl->cntlid, req->sq->qid); 83 + return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 84 + } 85 + pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n", 86 + __func__, ctrl->cntlid, req->sq->qid, 87 + nvme_auth_dhgroup_name(dhgid), dhgid); 88 + return 0; 89 + } 90 + 91 + static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) 92 + { 93 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 94 + struct nvmf_auth_dhchap_reply_data *data = d; 95 + u16 dhvlen = le16_to_cpu(data->dhvlen); 96 + u8 *response; 97 + 98 + pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n", 99 + __func__, ctrl->cntlid, req->sq->qid, 100 + data->hl, data->cvalid, dhvlen); 101 + 102 + if (dhvlen) { 103 + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 104 + } 105 + 106 + response = kmalloc(data->hl, GFP_KERNEL); 107 + if (!response) 108 + return NVME_AUTH_DHCHAP_FAILURE_FAILED; 109 + 110 + if (!ctrl->host_key) { 111 + pr_warn("ctrl %d qid %d no host key\n", 112 + ctrl->cntlid, req->sq->qid); 113 + kfree(response); 114 + return NVME_AUTH_DHCHAP_FAILURE_FAILED; 115 + } 116 + if (nvmet_auth_host_hash(req, response, data->hl) < 0) { 117 + pr_debug("ctrl %d qid %d host hash failed\n", 118 + ctrl->cntlid, req->sq->qid); 119 + kfree(response); 120 + return NVME_AUTH_DHCHAP_FAILURE_FAILED; 121 + } 122 + 123 + if (memcmp(data->rval, response, data->hl)) { 124 + pr_info("ctrl %d qid %d host response mismatch\n", 125 + ctrl->cntlid, req->sq->qid); 126 + kfree(response); 127 + return NVME_AUTH_DHCHAP_FAILURE_FAILED; 128 + } 129 + kfree(response); 130 + pr_debug("%s: ctrl %d qid %d host authenticated\n", 131 + __func__, ctrl->cntlid, req->sq->qid); 132 + if (data->cvalid) { 133 + req->sq->dhchap_c2 = kmalloc(data->hl, GFP_KERNEL); 134 + if (!req->sq->dhchap_c2) 135 + return NVME_AUTH_DHCHAP_FAILURE_FAILED; 136 + memcpy(req->sq->dhchap_c2, data->rval + data->hl, data->hl); 137 + 138 + pr_debug("%s: ctrl %d qid %d challenge %*ph\n", 139 + __func__, ctrl->cntlid, req->sq->qid, data->hl, 140 + req->sq->dhchap_c2); 141 + req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); 142 + } else { 143 + req->sq->authenticated = true; 144 + req->sq->dhchap_c2 = NULL; 145 + } 146 + 147 + return 0; 148 + } 149 + 150 + static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d) 151 + { 152 + struct nvmf_auth_dhchap_failure_data *data = d; 153 + 154 + return data->rescode_exp; 155 + } 156 + 157 + void nvmet_execute_auth_send(struct nvmet_req *req) 158 + { 159 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 160 + struct nvmf_auth_dhchap_success2_data *data; 161 + void *d; 162 + u32 tl; 163 + u16 status = 0; 164 + 165 + if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { 166 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 167 + req->error_loc = 168 + offsetof(struct nvmf_auth_send_command, secp); 169 + goto done; 170 + } 171 + if (req->cmd->auth_send.spsp0 != 0x01) { 172 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 173 + req->error_loc = 174 + offsetof(struct nvmf_auth_send_command, spsp0); 175 + goto done; 176 + } 177 + if (req->cmd->auth_send.spsp1 != 0x01) { 178 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 179 + req->error_loc = 180 + offsetof(struct nvmf_auth_send_command, spsp1); 181 + goto done; 182 + } 183 + tl = le32_to_cpu(req->cmd->auth_send.tl); 184 + if (!tl) { 185 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 186 + req->error_loc = 187 + offsetof(struct nvmf_auth_send_command, tl); 188 + goto done; 189 + } 190 + if (!nvmet_check_transfer_len(req, tl)) { 191 + pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl); 192 + return; 193 + } 194 + 195 + d = kmalloc(tl, GFP_KERNEL); 196 + if (!d) { 197 + status = NVME_SC_INTERNAL; 198 + goto done; 199 + } 200 + 201 + status = nvmet_copy_from_sgl(req, 0, d, tl); 202 + if (status) { 203 + kfree(d); 204 + goto done; 205 + } 206 + 207 + data = d; 208 + pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__, 209 + ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id, 210 + req->sq->dhchap_step); 211 + if (data->auth_type != NVME_AUTH_COMMON_MESSAGES && 212 + data->auth_type != NVME_AUTH_DHCHAP_MESSAGES) 213 + goto done_failure1; 214 + if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) { 215 + if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) { 216 + /* Restart negotiation */ 217 + pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__, 218 + ctrl->cntlid, req->sq->qid); 219 + if (!req->sq->qid) { 220 + status = nvmet_setup_auth(ctrl); 221 + if (status < 0) { 222 + pr_err("ctrl %d qid 0 failed to setup" 223 + "re-authentication", 224 + ctrl->cntlid); 225 + goto done_failure1; 226 + } 227 + } 228 + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 229 + } else if (data->auth_id != req->sq->dhchap_step) 230 + goto done_failure1; 231 + /* Validate negotiation parameters */ 232 + status = nvmet_auth_negotiate(req, d); 233 + if (status == 0) 234 + req->sq->dhchap_step = 235 + NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; 236 + else { 237 + req->sq->dhchap_step = 238 + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 239 + req->sq->dhchap_status = status; 240 + status = 0; 241 + } 242 + goto done_kfree; 243 + } 244 + if (data->auth_id != req->sq->dhchap_step) { 245 + pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n", 246 + __func__, ctrl->cntlid, req->sq->qid, 247 + data->auth_id, req->sq->dhchap_step); 248 + goto done_failure1; 249 + } 250 + if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) { 251 + pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n", 252 + __func__, ctrl->cntlid, req->sq->qid, 253 + le16_to_cpu(data->t_id), 254 + req->sq->dhchap_tid); 255 + req->sq->dhchap_step = 256 + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 257 + req->sq->dhchap_status = 258 + NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 259 + goto done_kfree; 260 + } 261 + 262 + switch (data->auth_id) { 263 + case NVME_AUTH_DHCHAP_MESSAGE_REPLY: 264 + status = nvmet_auth_reply(req, d); 265 + if (status == 0) 266 + req->sq->dhchap_step = 267 + NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; 268 + else { 269 + req->sq->dhchap_step = 270 + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 271 + req->sq->dhchap_status = status; 272 + status = 0; 273 + } 274 + goto done_kfree; 275 + break; 276 + case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: 277 + req->sq->authenticated = true; 278 + pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", 279 + __func__, ctrl->cntlid, req->sq->qid); 280 + goto done_kfree; 281 + break; 282 + case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: 283 + status = nvmet_auth_failure2(req, d); 284 + if (status) { 285 + pr_warn("ctrl %d qid %d: authentication failed (%d)\n", 286 + ctrl->cntlid, req->sq->qid, status); 287 + req->sq->dhchap_status = status; 288 + req->sq->authenticated = false; 289 + status = 0; 290 + } 291 + goto done_kfree; 292 + break; 293 + default: 294 + req->sq->dhchap_status = 295 + NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 296 + req->sq->dhchap_step = 297 + NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 298 + req->sq->authenticated = false; 299 + goto done_kfree; 300 + break; 301 + } 302 + done_failure1: 303 + req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 304 + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 305 + 306 + done_kfree: 307 + kfree(d); 308 + done: 309 + pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__, 310 + ctrl->cntlid, req->sq->qid, 311 + req->sq->dhchap_status, req->sq->dhchap_step); 312 + if (status) 313 + pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", 314 + __func__, ctrl->cntlid, req->sq->qid, 315 + status, req->error_loc); 316 + req->cqe->result.u64 = 0; 317 + nvmet_req_complete(req, status); 318 + if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && 319 + req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) 320 + return; 321 + /* Final states, clear up variables */ 322 + nvmet_auth_sq_free(req->sq); 323 + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) 324 + nvmet_ctrl_fatal_error(ctrl); 325 + } 326 + 327 + static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al) 328 + { 329 + struct nvmf_auth_dhchap_challenge_data *data = d; 330 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 331 + int ret = 0; 332 + int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); 333 + int data_size = sizeof(*d) + hash_len; 334 + 335 + if (al < data_size) { 336 + pr_debug("%s: buffer too small (al %d need %d)\n", __func__, 337 + al, data_size); 338 + return -EINVAL; 339 + } 340 + memset(data, 0, data_size); 341 + req->sq->dhchap_s1 = nvme_auth_get_seqnum(); 342 + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 343 + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; 344 + data->t_id = cpu_to_le16(req->sq->dhchap_tid); 345 + data->hashid = ctrl->shash_id; 346 + data->hl = hash_len; 347 + data->seqnum = cpu_to_le32(req->sq->dhchap_s1); 348 + req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL); 349 + if (!req->sq->dhchap_c1) 350 + return -ENOMEM; 351 + get_random_bytes(req->sq->dhchap_c1, data->hl); 352 + memcpy(data->cval, req->sq->dhchap_c1, data->hl); 353 + pr_debug("%s: ctrl %d qid %d seq %u transaction %d hl %d dhvlen %u\n", 354 + __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, 355 + req->sq->dhchap_tid, data->hl, 0); 356 + return ret; 357 + } 358 + 359 + static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al) 360 + { 361 + struct nvmf_auth_dhchap_success1_data *data = d; 362 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 363 + int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); 364 + 365 + WARN_ON(al < sizeof(*data)); 366 + memset(data, 0, sizeof(*data)); 367 + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 368 + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; 369 + data->t_id = cpu_to_le16(req->sq->dhchap_tid); 370 + data->hl = hash_len; 371 + if (req->sq->dhchap_c2) { 372 + if (!ctrl->ctrl_key) { 373 + pr_warn("ctrl %d qid %d no ctrl key\n", 374 + ctrl->cntlid, req->sq->qid); 375 + return NVME_AUTH_DHCHAP_FAILURE_FAILED; 376 + } 377 + if (nvmet_auth_ctrl_hash(req, data->rval, data->hl)) 378 + return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 379 + data->rvalid = 1; 380 + pr_debug("ctrl %d qid %d response %*ph\n", 381 + ctrl->cntlid, req->sq->qid, data->hl, data->rval); 382 + } 383 + return 0; 384 + } 385 + 386 + static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al) 387 + { 388 + struct nvmf_auth_dhchap_failure_data *data = d; 389 + 390 + WARN_ON(al < sizeof(*data)); 391 + data->auth_type = NVME_AUTH_COMMON_MESSAGES; 392 + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 393 + data->t_id = cpu_to_le16(req->sq->dhchap_tid); 394 + data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; 395 + data->rescode_exp = req->sq->dhchap_status; 396 + } 397 + 398 + void nvmet_execute_auth_receive(struct nvmet_req *req) 399 + { 400 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 401 + void *d; 402 + u32 al; 403 + u16 status = 0; 404 + 405 + if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { 406 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 407 + req->error_loc = 408 + offsetof(struct nvmf_auth_receive_command, secp); 409 + goto done; 410 + } 411 + if (req->cmd->auth_receive.spsp0 != 0x01) { 412 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 413 + req->error_loc = 414 + offsetof(struct nvmf_auth_receive_command, spsp0); 415 + goto done; 416 + } 417 + if (req->cmd->auth_receive.spsp1 != 0x01) { 418 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 419 + req->error_loc = 420 + offsetof(struct nvmf_auth_receive_command, spsp1); 421 + goto done; 422 + } 423 + al = le32_to_cpu(req->cmd->auth_receive.al); 424 + if (!al) { 425 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 426 + req->error_loc = 427 + offsetof(struct nvmf_auth_receive_command, al); 428 + goto done; 429 + } 430 + if (!nvmet_check_transfer_len(req, al)) { 431 + pr_debug("%s: transfer length mismatch (%u)\n", __func__, al); 432 + return; 433 + } 434 + 435 + d = kmalloc(al, GFP_KERNEL); 436 + if (!d) { 437 + status = NVME_SC_INTERNAL; 438 + goto done; 439 + } 440 + pr_debug("%s: ctrl %d qid %d step %x\n", __func__, 441 + ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); 442 + switch (req->sq->dhchap_step) { 443 + case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE: 444 + status = nvmet_auth_challenge(req, d, al); 445 + if (status < 0) { 446 + pr_warn("ctrl %d qid %d: challenge error (%d)\n", 447 + ctrl->cntlid, req->sq->qid, status); 448 + status = NVME_SC_INTERNAL; 449 + break; 450 + } 451 + if (status) { 452 + req->sq->dhchap_status = status; 453 + nvmet_auth_failure1(req, d, al); 454 + pr_warn("ctrl %d qid %d: challenge status (%x)\n", 455 + ctrl->cntlid, req->sq->qid, 456 + req->sq->dhchap_status); 457 + status = 0; 458 + break; 459 + } 460 + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY; 461 + break; 462 + case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1: 463 + status = nvmet_auth_success1(req, d, al); 464 + if (status) { 465 + req->sq->dhchap_status = status; 466 + req->sq->authenticated = false; 467 + nvmet_auth_failure1(req, d, al); 468 + pr_warn("ctrl %d qid %d: success1 status (%x)\n", 469 + ctrl->cntlid, req->sq->qid, 470 + req->sq->dhchap_status); 471 + break; 472 + } 473 + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; 474 + break; 475 + case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1: 476 + req->sq->authenticated = false; 477 + nvmet_auth_failure1(req, d, al); 478 + pr_warn("ctrl %d qid %d failure1 (%x)\n", 479 + ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); 480 + break; 481 + default: 482 + pr_warn("ctrl %d qid %d unhandled step (%d)\n", 483 + ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); 484 + req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 485 + req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 486 + nvmet_auth_failure1(req, d, al); 487 + status = 0; 488 + break; 489 + } 490 + 491 + status = nvmet_copy_to_sgl(req, 0, d, al); 492 + kfree(d); 493 + done: 494 + req->cqe->result.u64 = 0; 495 + nvmet_req_complete(req, status); 496 + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) 497 + nvmet_auth_sq_free(req->sq); 498 + else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 499 + nvmet_auth_sq_free(req->sq); 500 + nvmet_ctrl_fatal_error(ctrl); 501 + } 502 + }
+36 -2
drivers/nvme/target/fabrics-cmd.c
··· 93 93 case nvme_fabrics_type_property_get: 94 94 req->execute = nvmet_execute_prop_get; 95 95 break; 96 + #ifdef CONFIG_NVME_TARGET_AUTH 97 + case nvme_fabrics_type_auth_send: 98 + req->execute = nvmet_execute_auth_send; 99 + break; 100 + case nvme_fabrics_type_auth_receive: 101 + req->execute = nvmet_execute_auth_receive; 102 + break; 103 + #endif 96 104 default: 97 105 pr_debug("received unknown capsule type 0x%x\n", 98 106 cmd->fabrics.fctype); ··· 116 108 struct nvme_command *cmd = req->cmd; 117 109 118 110 switch (cmd->fabrics.fctype) { 111 + #ifdef CONFIG_NVME_TARGET_AUTH 112 + case nvme_fabrics_type_auth_send: 113 + req->execute = nvmet_execute_auth_send; 114 + break; 115 + case nvme_fabrics_type_auth_receive: 116 + req->execute = nvmet_execute_auth_receive; 117 + break; 118 + #endif 119 119 default: 120 120 pr_debug("received unknown capsule type 0x%x\n", 121 121 cmd->fabrics.fctype); ··· 204 188 struct nvmf_connect_data *d; 205 189 struct nvmet_ctrl *ctrl = NULL; 206 190 u16 status = 0; 191 + int ret; 207 192 208 193 if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) 209 194 return; ··· 247 230 248 231 uuid_copy(&ctrl->hostid, &d->hostid); 249 232 233 + ret = nvmet_setup_auth(ctrl); 234 + if (ret < 0) { 235 + pr_err("Failed to setup authentication, error %d\n", ret); 236 + nvmet_ctrl_put(ctrl); 237 + if (ret == -EPERM) 238 + status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); 239 + else 240 + status = NVME_SC_INTERNAL; 241 + goto out; 242 + } 243 + 250 244 status = nvmet_install_queue(ctrl, req); 251 245 if (status) { 252 246 nvmet_ctrl_put(ctrl); 253 247 goto out; 254 248 } 255 249 256 - pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n", 250 + pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n", 257 251 nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", 258 252 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, 259 - ctrl->pi_support ? " T10-PI is enabled" : ""); 253 + ctrl->pi_support ? " T10-PI is enabled" : "", 254 + nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); 260 255 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); 261 256 257 + if (nvmet_has_auth(ctrl)) 258 + nvmet_init_auth(ctrl, req); 262 259 out: 263 260 kfree(d); 264 261 complete: ··· 332 301 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); 333 302 334 303 pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); 304 + req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); 305 + if (nvmet_has_auth(ctrl)) 306 + nvmet_init_auth(ctrl, req); 335 307 336 308 out: 337 309 kfree(d);
+62
drivers/nvme/target/nvmet.h
··· 108 108 u16 size; 109 109 u32 sqhd; 110 110 bool sqhd_disabled; 111 + #ifdef CONFIG_NVME_TARGET_AUTH 112 + bool authenticated; 113 + u16 dhchap_tid; 114 + u16 dhchap_status; 115 + int dhchap_step; 116 + u8 *dhchap_c1; 117 + u8 *dhchap_c2; 118 + u32 dhchap_s1; 119 + u32 dhchap_s2; 120 + u8 *dhchap_skey; 121 + int dhchap_skey_len; 122 + #endif 111 123 struct completion free_done; 112 124 struct completion confirm_done; 113 125 }; ··· 221 209 u64 err_counter; 222 210 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; 223 211 bool pi_support; 212 + #ifdef CONFIG_NVME_TARGET_AUTH 213 + struct nvme_dhchap_key *host_key; 214 + struct nvme_dhchap_key *ctrl_key; 215 + u8 shash_id; 216 + #endif 224 217 }; 225 218 226 219 struct nvmet_subsys { ··· 288 271 289 272 struct nvmet_host { 290 273 struct config_group group; 274 + u8 *dhchap_secret; 275 + u8 *dhchap_ctrl_secret; 276 + u8 dhchap_key_hash; 277 + u8 dhchap_ctrl_key_hash; 278 + u8 dhchap_hash_id; 279 + u8 dhchap_dhgroup_id; 291 280 }; 292 281 293 282 static inline struct nvmet_host *to_host(struct config_item *item) ··· 691 668 if (bio != &req->b.inline_bio) 692 669 bio_put(bio); 693 670 } 671 + 672 + #ifdef CONFIG_NVME_TARGET_AUTH 673 + void nvmet_execute_auth_send(struct nvmet_req *req); 674 + void nvmet_execute_auth_receive(struct nvmet_req *req); 675 + int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, 676 + bool set_ctrl); 677 + int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash); 678 + int nvmet_setup_auth(struct nvmet_ctrl *ctrl); 679 + void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req); 680 + void nvmet_destroy_auth(struct nvmet_ctrl *ctrl); 681 + void nvmet_auth_sq_free(struct nvmet_sq *sq); 682 + bool nvmet_check_auth_status(struct nvmet_req *req); 683 + int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, 684 + unsigned int hash_len); 685 + int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, 686 + unsigned int hash_len); 687 + static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) 688 + { 689 + return ctrl->host_key != NULL; 690 + } 691 + #else 692 + static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl) 693 + { 694 + return 0; 695 + } 696 + static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl, 697 + struct nvmet_req *req) {}; 698 + static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {}; 699 + static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {}; 700 + static inline bool nvmet_check_auth_status(struct nvmet_req *req) 701 + { 702 + return true; 703 + } 704 + static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) 705 + { 706 + return false; 707 + } 708 + static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; } 709 + #endif 694 710 695 711 #endif /* _NVMET_H */