Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nvme: implement In-Band authentication

Implement NVMe-oF In-Band authentication according to NVMe TPAR 8006.
This patch adds two new fabric options 'dhchap_secret' to specify the
pre-shared key (in ASCII respresentation according to NVMe 2.0 section
8.13.5.8 'Secret representation') and 'dhchap_ctrl_secret' to specify
the pre-shared controller key for bi-directional authentication of both
the host and the controller.
Re-authentication can be triggered by writing the PSK into the new
controller sysfs attribute 'dhchap_secret' or 'dhchap_ctrl_secret'.

Signed-off-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[axboe: fold in clang build fix]
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Hannes Reinecke and committed by
Jens Axboe
f50fff73 3bf2fde6

+1498 -7
+1
drivers/nvme/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 menu "NVME Support" 3 3 4 + source "drivers/nvme/common/Kconfig" 4 5 source "drivers/nvme/host/Kconfig" 5 6 source "drivers/nvme/target/Kconfig" 6 7
+1
drivers/nvme/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 3 + obj-$(CONFIG_NVME_COMMON) += common/ 3 4 obj-y += host/ 4 5 obj-y += target/
+4
drivers/nvme/common/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + 3 + config NVME_COMMON 4 + tristate
+7
drivers/nvme/common/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + ccflags-y += -I$(src) 4 + 5 + obj-$(CONFIG_NVME_COMMON) += nvme-common.o 6 + 7 + nvme-common-y += auth.o
+323
drivers/nvme/common/auth.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2020 Hannes Reinecke, SUSE Linux 4 + */ 5 + 6 + #include <linux/module.h> 7 + #include <linux/crc32.h> 8 + #include <linux/base64.h> 9 + #include <linux/prandom.h> 10 + #include <linux/scatterlist.h> 11 + #include <asm/unaligned.h> 12 + #include <crypto/hash.h> 13 + #include <crypto/dh.h> 14 + #include <linux/nvme.h> 15 + #include <linux/nvme-auth.h> 16 + 17 + static u32 nvme_dhchap_seqnum; 18 + static DEFINE_MUTEX(nvme_dhchap_mutex); 19 + 20 + u32 nvme_auth_get_seqnum(void) 21 + { 22 + u32 seqnum; 23 + 24 + mutex_lock(&nvme_dhchap_mutex); 25 + if (!nvme_dhchap_seqnum) 26 + nvme_dhchap_seqnum = prandom_u32(); 27 + else { 28 + nvme_dhchap_seqnum++; 29 + if (!nvme_dhchap_seqnum) 30 + nvme_dhchap_seqnum++; 31 + } 32 + seqnum = nvme_dhchap_seqnum; 33 + mutex_unlock(&nvme_dhchap_mutex); 34 + return seqnum; 35 + } 36 + EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum); 37 + 38 + static struct nvme_auth_dhgroup_map { 39 + const char name[16]; 40 + const char kpp[16]; 41 + } dhgroup_map[] = { 42 + [NVME_AUTH_DHGROUP_NULL] = { 43 + .name = "null", .kpp = "null" }, 44 + [NVME_AUTH_DHGROUP_2048] = { 45 + .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" }, 46 + [NVME_AUTH_DHGROUP_3072] = { 47 + .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" }, 48 + [NVME_AUTH_DHGROUP_4096] = { 49 + .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" }, 50 + [NVME_AUTH_DHGROUP_6144] = { 51 + .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" }, 52 + [NVME_AUTH_DHGROUP_8192] = { 53 + .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" }, 54 + }; 55 + 56 + const char *nvme_auth_dhgroup_name(u8 dhgroup_id) 57 + { 58 + if (dhgroup_id > ARRAY_SIZE(dhgroup_map)) 59 + return NULL; 60 + return dhgroup_map[dhgroup_id].name; 61 + } 62 + EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name); 63 + 64 + const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id) 65 + { 66 + if (dhgroup_id > ARRAY_SIZE(dhgroup_map)) 67 + return NULL; 68 + return dhgroup_map[dhgroup_id].kpp; 69 + } 70 + EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp); 71 + 72 + u8 nvme_auth_dhgroup_id(const char *dhgroup_name) 73 + { 74 + int i; 75 + 76 + if (!dhgroup_name || !strlen(dhgroup_name)) 77 + return NVME_AUTH_DHGROUP_INVALID; 78 + for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) { 79 + if (!strlen(dhgroup_map[i].name)) 80 + continue; 81 + if (!strncmp(dhgroup_map[i].name, dhgroup_name, 82 + strlen(dhgroup_map[i].name))) 83 + return i; 84 + } 85 + return NVME_AUTH_DHGROUP_INVALID; 86 + } 87 + EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id); 88 + 89 + static struct nvme_dhchap_hash_map { 90 + int len; 91 + const char hmac[15]; 92 + const char digest[8]; 93 + } hash_map[] = { 94 + [NVME_AUTH_HASH_SHA256] = { 95 + .len = 32, 96 + .hmac = "hmac(sha256)", 97 + .digest = "sha256", 98 + }, 99 + [NVME_AUTH_HASH_SHA384] = { 100 + .len = 48, 101 + .hmac = "hmac(sha384)", 102 + .digest = "sha384", 103 + }, 104 + [NVME_AUTH_HASH_SHA512] = { 105 + .len = 64, 106 + .hmac = "hmac(sha512)", 107 + .digest = "sha512", 108 + }, 109 + }; 110 + 111 + const char *nvme_auth_hmac_name(u8 hmac_id) 112 + { 113 + if (hmac_id > ARRAY_SIZE(hash_map)) 114 + return NULL; 115 + return hash_map[hmac_id].hmac; 116 + } 117 + EXPORT_SYMBOL_GPL(nvme_auth_hmac_name); 118 + 119 + const char *nvme_auth_digest_name(u8 hmac_id) 120 + { 121 + if (hmac_id > ARRAY_SIZE(hash_map)) 122 + return NULL; 123 + return hash_map[hmac_id].digest; 124 + } 125 + EXPORT_SYMBOL_GPL(nvme_auth_digest_name); 126 + 127 + u8 nvme_auth_hmac_id(const char *hmac_name) 128 + { 129 + int i; 130 + 131 + if (!hmac_name || !strlen(hmac_name)) 132 + return NVME_AUTH_HASH_INVALID; 133 + 134 + for (i = 0; i < ARRAY_SIZE(hash_map); i++) { 135 + if (!strlen(hash_map[i].hmac)) 136 + continue; 137 + if (!strncmp(hash_map[i].hmac, hmac_name, 138 + strlen(hash_map[i].hmac))) 139 + return i; 140 + } 141 + return NVME_AUTH_HASH_INVALID; 142 + } 143 + EXPORT_SYMBOL_GPL(nvme_auth_hmac_id); 144 + 145 + size_t nvme_auth_hmac_hash_len(u8 hmac_id) 146 + { 147 + if (hmac_id > ARRAY_SIZE(hash_map)) 148 + return 0; 149 + return hash_map[hmac_id].len; 150 + } 151 + EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len); 152 + 153 + struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, 154 + u8 key_hash) 155 + { 156 + struct nvme_dhchap_key *key; 157 + unsigned char *p; 158 + u32 crc; 159 + int ret, key_len; 160 + size_t allocated_len = strlen(secret); 161 + 162 + /* Secret might be affixed with a ':' */ 163 + p = strrchr(secret, ':'); 164 + if (p) 165 + allocated_len = p - secret; 166 + key = kzalloc(sizeof(*key), GFP_KERNEL); 167 + if (!key) 168 + return ERR_PTR(-ENOMEM); 169 + key->key = kzalloc(allocated_len, GFP_KERNEL); 170 + if (!key->key) { 171 + ret = -ENOMEM; 172 + goto out_free_key; 173 + } 174 + 175 + key_len = base64_decode(secret, allocated_len, key->key); 176 + if (key_len < 0) { 177 + pr_debug("base64 key decoding error %d\n", 178 + key_len); 179 + ret = key_len; 180 + goto out_free_secret; 181 + } 182 + 183 + if (key_len != 36 && key_len != 52 && 184 + key_len != 68) { 185 + pr_err("Invalid key len %d\n", key_len); 186 + ret = -EINVAL; 187 + goto out_free_secret; 188 + } 189 + 190 + if (key_hash > 0 && 191 + (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) { 192 + pr_err("Mismatched key len %d for %s\n", key_len, 193 + nvme_auth_hmac_name(key_hash)); 194 + ret = -EINVAL; 195 + goto out_free_secret; 196 + } 197 + 198 + /* The last four bytes is the CRC in little-endian format */ 199 + key_len -= 4; 200 + /* 201 + * The linux implementation doesn't do pre- and post-increments, 202 + * so we have to do it manually. 203 + */ 204 + crc = ~crc32(~0, key->key, key_len); 205 + 206 + if (get_unaligned_le32(key->key + key_len) != crc) { 207 + pr_err("key crc mismatch (key %08x, crc %08x)\n", 208 + get_unaligned_le32(key->key + key_len), crc); 209 + ret = -EKEYREJECTED; 210 + goto out_free_secret; 211 + } 212 + key->len = key_len; 213 + key->hash = key_hash; 214 + return key; 215 + out_free_secret: 216 + kfree_sensitive(key->key); 217 + out_free_key: 218 + kfree(key); 219 + return ERR_PTR(ret); 220 + } 221 + EXPORT_SYMBOL_GPL(nvme_auth_extract_key); 222 + 223 + void nvme_auth_free_key(struct nvme_dhchap_key *key) 224 + { 225 + if (!key) 226 + return; 227 + kfree_sensitive(key->key); 228 + kfree(key); 229 + } 230 + EXPORT_SYMBOL_GPL(nvme_auth_free_key); 231 + 232 + u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) 233 + { 234 + const char *hmac_name; 235 + struct crypto_shash *key_tfm; 236 + struct shash_desc *shash; 237 + u8 *transformed_key; 238 + int ret; 239 + 240 + if (!key || !key->key) { 241 + pr_warn("No key specified\n"); 242 + return ERR_PTR(-ENOKEY); 243 + } 244 + if (key->hash == 0) { 245 + transformed_key = kmemdup(key->key, key->len, GFP_KERNEL); 246 + return transformed_key ? transformed_key : ERR_PTR(-ENOMEM); 247 + } 248 + hmac_name = nvme_auth_hmac_name(key->hash); 249 + if (!hmac_name) { 250 + pr_warn("Invalid key hash id %d\n", key->hash); 251 + return ERR_PTR(-EINVAL); 252 + } 253 + 254 + key_tfm = crypto_alloc_shash(hmac_name, 0, 0); 255 + if (IS_ERR(key_tfm)) 256 + return (u8 *)key_tfm; 257 + 258 + shash = kmalloc(sizeof(struct shash_desc) + 259 + crypto_shash_descsize(key_tfm), 260 + GFP_KERNEL); 261 + if (!shash) { 262 + ret = -ENOMEM; 263 + goto out_free_key; 264 + } 265 + 266 + transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL); 267 + if (!transformed_key) { 268 + ret = -ENOMEM; 269 + goto out_free_shash; 270 + } 271 + 272 + shash->tfm = key_tfm; 273 + ret = crypto_shash_setkey(key_tfm, key->key, key->len); 274 + if (ret < 0) 275 + goto out_free_shash; 276 + ret = crypto_shash_init(shash); 277 + if (ret < 0) 278 + goto out_free_shash; 279 + ret = crypto_shash_update(shash, nqn, strlen(nqn)); 280 + if (ret < 0) 281 + goto out_free_shash; 282 + ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17); 283 + if (ret < 0) 284 + goto out_free_shash; 285 + ret = crypto_shash_final(shash, transformed_key); 286 + out_free_shash: 287 + kfree(shash); 288 + out_free_key: 289 + crypto_free_shash(key_tfm); 290 + if (ret < 0) { 291 + kfree_sensitive(transformed_key); 292 + return ERR_PTR(ret); 293 + } 294 + return transformed_key; 295 + } 296 + EXPORT_SYMBOL_GPL(nvme_auth_transform_key); 297 + 298 + int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) 299 + { 300 + struct nvme_dhchap_key *key; 301 + u8 key_hash; 302 + 303 + if (!secret) { 304 + *ret_key = NULL; 305 + return 0; 306 + } 307 + 308 + if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1) 309 + return -EINVAL; 310 + 311 + /* Pass in the secret without the 'DHHC-1:XX:' prefix */ 312 + key = nvme_auth_extract_key(secret + 10, key_hash); 313 + if (IS_ERR(key)) { 314 + *ret_key = NULL; 315 + return PTR_ERR(key); 316 + } 317 + 318 + *ret_key = key; 319 + return 0; 320 + } 321 + EXPORT_SYMBOL_GPL(nvme_auth_generate_key); 322 + 323 + MODULE_LICENSE("GPL v2");
+13
drivers/nvme/host/Kconfig
··· 92 92 93 93 If unsure, say N. 94 94 95 + config NVME_AUTH 96 + bool "NVM Express over Fabrics In-Band Authentication" 97 + depends on NVME_CORE 98 + select NVME_COMMON 99 + select CRYPTO 100 + select CRYPTO_HMAC 101 + select CRYPTO_SHA256 102 + select CRYPTO_SHA512 103 + help 104 + This provides support for NVMe over Fabrics In-Band Authentication. 105 + 106 + If unsure, say N. 107 + 95 108 config NVME_APPLE 96 109 tristate "Apple ANS2 NVM Express host driver" 97 110 depends on OF && BLOCK
+1
drivers/nvme/host/Makefile
··· 16 16 nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o 17 17 nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o 18 18 nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o 19 + nvme-core-$(CONFIG_NVME_AUTH) += auth.o 19 20 20 21 nvme-y += pci.o 21 22
+828
drivers/nvme/host/auth.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2020 Hannes Reinecke, SUSE Linux 4 + */ 5 + 6 + #include <linux/crc32.h> 7 + #include <linux/base64.h> 8 + #include <linux/prandom.h> 9 + #include <asm/unaligned.h> 10 + #include <crypto/hash.h> 11 + #include <crypto/dh.h> 12 + #include "nvme.h" 13 + #include "fabrics.h" 14 + #include <linux/nvme-auth.h> 15 + 16 + struct nvme_dhchap_queue_context { 17 + struct list_head entry; 18 + struct work_struct auth_work; 19 + struct nvme_ctrl *ctrl; 20 + struct crypto_shash *shash_tfm; 21 + void *buf; 22 + size_t buf_size; 23 + int qid; 24 + int error; 25 + u32 s1; 26 + u32 s2; 27 + u16 transaction; 28 + u8 status; 29 + u8 hash_id; 30 + size_t hash_len; 31 + u8 dhgroup_id; 32 + u8 c1[64]; 33 + u8 c2[64]; 34 + u8 response[64]; 35 + u8 *host_response; 36 + }; 37 + 38 + #define nvme_auth_flags_from_qid(qid) \ 39 + (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED 40 + #define nvme_auth_queue_from_qid(ctrl, qid) \ 41 + (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q 42 + 43 + static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, 44 + void *data, size_t data_len, bool auth_send) 45 + { 46 + struct nvme_command cmd = {}; 47 + blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); 48 + struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); 49 + int ret; 50 + 51 + cmd.auth_common.opcode = nvme_fabrics_command; 52 + cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; 53 + cmd.auth_common.spsp0 = 0x01; 54 + cmd.auth_common.spsp1 = 0x01; 55 + if (auth_send) { 56 + cmd.auth_send.fctype = nvme_fabrics_type_auth_send; 57 + cmd.auth_send.tl = cpu_to_le32(data_len); 58 + } else { 59 + cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive; 60 + cmd.auth_receive.al = cpu_to_le32(data_len); 61 + } 62 + 63 + ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, 64 + qid == 0 ? NVME_QID_ANY : qid, 65 + 0, flags); 66 + if (ret > 0) 67 + dev_warn(ctrl->device, 68 + "qid %d auth_send failed with status %d\n", qid, ret); 69 + else if (ret < 0) 70 + dev_err(ctrl->device, 71 + "qid %d auth_send failed with error %d\n", qid, ret); 72 + return ret; 73 + } 74 + 75 + static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, 76 + struct nvmf_auth_dhchap_failure_data *data, 77 + u16 transaction, u8 expected_msg) 78 + { 79 + dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n", 80 + __func__, qid, data->auth_type, data->auth_id); 81 + 82 + if (data->auth_type == NVME_AUTH_COMMON_MESSAGES && 83 + data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 84 + return data->rescode_exp; 85 + } 86 + if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES || 87 + data->auth_id != expected_msg) { 88 + dev_warn(ctrl->device, 89 + "qid %d invalid message %02x/%02x\n", 90 + qid, data->auth_type, data->auth_id); 91 + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 92 + } 93 + if (le16_to_cpu(data->t_id) != transaction) { 94 + dev_warn(ctrl->device, 95 + "qid %d invalid transaction ID %d\n", 96 + qid, le16_to_cpu(data->t_id)); 97 + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 98 + } 99 + return 0; 100 + } 101 + 102 + static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, 103 + struct nvme_dhchap_queue_context *chap) 104 + { 105 + struct nvmf_auth_dhchap_negotiate_data *data = chap->buf; 106 + size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol); 107 + 108 + if (chap->buf_size < size) { 109 + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 110 + return -EINVAL; 111 + } 112 + memset((u8 *)chap->buf, 0, size); 113 + data->auth_type = NVME_AUTH_COMMON_MESSAGES; 114 + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 115 + data->t_id = cpu_to_le16(chap->transaction); 116 + data->sc_c = 0; /* No secure channel concatenation */ 117 + data->napd = 1; 118 + data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; 119 + data->auth_protocol[0].dhchap.halen = 3; 120 + data->auth_protocol[0].dhchap.dhlen = 6; 121 + data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256; 122 + data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384; 123 + data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512; 124 + data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL; 125 + data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048; 126 + data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072; 127 + data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096; 128 + data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; 129 + data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; 130 + 131 + return size; 132 + } 133 + 134 + static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, 135 + struct nvme_dhchap_queue_context *chap) 136 + { 137 + struct nvmf_auth_dhchap_challenge_data *data = chap->buf; 138 + u16 dhvlen = le16_to_cpu(data->dhvlen); 139 + size_t size = sizeof(*data) + data->hl + dhvlen; 140 + const char *hmac_name, *kpp_name; 141 + 142 + if (chap->buf_size < size) { 143 + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 144 + return NVME_SC_INVALID_FIELD; 145 + } 146 + 147 + hmac_name = nvme_auth_hmac_name(data->hashid); 148 + if (!hmac_name) { 149 + dev_warn(ctrl->device, 150 + "qid %d: invalid HASH ID %d\n", 151 + chap->qid, data->hashid); 152 + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 153 + return NVME_SC_INVALID_FIELD; 154 + } 155 + 156 + if (chap->hash_id == data->hashid && chap->shash_tfm && 157 + !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) && 158 + crypto_shash_digestsize(chap->shash_tfm) == data->hl) { 159 + dev_dbg(ctrl->device, 160 + "qid %d: reuse existing hash %s\n", 161 + chap->qid, hmac_name); 162 + goto select_kpp; 163 + } 164 + 165 + /* Reset if hash cannot be reused */ 166 + if (chap->shash_tfm) { 167 + crypto_free_shash(chap->shash_tfm); 168 + chap->hash_id = 0; 169 + chap->hash_len = 0; 170 + } 171 + chap->shash_tfm = crypto_alloc_shash(hmac_name, 0, 172 + CRYPTO_ALG_ALLOCATES_MEMORY); 173 + if (IS_ERR(chap->shash_tfm)) { 174 + dev_warn(ctrl->device, 175 + "qid %d: failed to allocate hash %s, error %ld\n", 176 + chap->qid, hmac_name, PTR_ERR(chap->shash_tfm)); 177 + chap->shash_tfm = NULL; 178 + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 179 + return NVME_SC_AUTH_REQUIRED; 180 + } 181 + 182 + if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) { 183 + dev_warn(ctrl->device, 184 + "qid %d: invalid hash length %d\n", 185 + chap->qid, data->hl); 186 + crypto_free_shash(chap->shash_tfm); 187 + chap->shash_tfm = NULL; 188 + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 189 + return NVME_SC_AUTH_REQUIRED; 190 + } 191 + 192 + /* Reset host response if the hash had been changed */ 193 + if (chap->hash_id != data->hashid) { 194 + kfree(chap->host_response); 195 + chap->host_response = NULL; 196 + } 197 + 198 + chap->hash_id = data->hashid; 199 + chap->hash_len = data->hl; 200 + dev_dbg(ctrl->device, "qid %d: selected hash %s\n", 201 + chap->qid, hmac_name); 202 + 203 + select_kpp: 204 + kpp_name = nvme_auth_dhgroup_kpp(data->dhgid); 205 + if (!kpp_name) { 206 + dev_warn(ctrl->device, 207 + "qid %d: invalid DH group id %d\n", 208 + chap->qid, data->dhgid); 209 + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 210 + return NVME_SC_AUTH_REQUIRED; 211 + } 212 + 213 + if (data->dhgid != NVME_AUTH_DHGROUP_NULL) { 214 + dev_warn(ctrl->device, 215 + "qid %d: unsupported DH group %s\n", 216 + chap->qid, kpp_name); 217 + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 218 + return NVME_SC_AUTH_REQUIRED; 219 + } else if (dhvlen != 0) { 220 + dev_warn(ctrl->device, 221 + "qid %d: invalid DH value for NULL DH\n", 222 + chap->qid); 223 + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 224 + return NVME_SC_INVALID_FIELD; 225 + } 226 + chap->dhgroup_id = data->dhgid; 227 + 228 + chap->s1 = le32_to_cpu(data->seqnum); 229 + memcpy(chap->c1, data->cval, chap->hash_len); 230 + 231 + return 0; 232 + } 233 + 234 + static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, 235 + struct nvme_dhchap_queue_context *chap) 236 + { 237 + struct nvmf_auth_dhchap_reply_data *data = chap->buf; 238 + size_t size = sizeof(*data); 239 + 240 + size += 2 * chap->hash_len; 241 + 242 + if (chap->buf_size < size) { 243 + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 244 + return -EINVAL; 245 + } 246 + 247 + memset(chap->buf, 0, size); 248 + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 249 + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY; 250 + data->t_id = cpu_to_le16(chap->transaction); 251 + data->hl = chap->hash_len; 252 + data->dhvlen = 0; 253 + memcpy(data->rval, chap->response, chap->hash_len); 254 + if (ctrl->ctrl_key) { 255 + get_random_bytes(chap->c2, chap->hash_len); 256 + data->cvalid = 1; 257 + chap->s2 = nvme_auth_get_seqnum(); 258 + memcpy(data->rval + chap->hash_len, chap->c2, 259 + chap->hash_len); 260 + dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", 261 + __func__, chap->qid, (int)chap->hash_len, chap->c2); 262 + } else { 263 + memset(chap->c2, 0, chap->hash_len); 264 + chap->s2 = 0; 265 + } 266 + data->seqnum = cpu_to_le32(chap->s2); 267 + return size; 268 + } 269 + 270 + static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, 271 + struct nvme_dhchap_queue_context *chap) 272 + { 273 + struct nvmf_auth_dhchap_success1_data *data = chap->buf; 274 + size_t size = sizeof(*data); 275 + 276 + if (ctrl->ctrl_key) 277 + size += chap->hash_len; 278 + 279 + if (chap->buf_size < size) { 280 + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 281 + return NVME_SC_INVALID_FIELD; 282 + } 283 + 284 + if (data->hl != chap->hash_len) { 285 + dev_warn(ctrl->device, 286 + "qid %d: invalid hash length %u\n", 287 + chap->qid, data->hl); 288 + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 289 + return NVME_SC_INVALID_FIELD; 290 + } 291 + 292 + /* Just print out information for the admin queue */ 293 + if (chap->qid == 0) 294 + dev_info(ctrl->device, 295 + "qid 0: authenticated with hash %s dhgroup %s\n", 296 + nvme_auth_hmac_name(chap->hash_id), 297 + nvme_auth_dhgroup_name(chap->dhgroup_id)); 298 + 299 + if (!data->rvalid) 300 + return 0; 301 + 302 + /* Validate controller response */ 303 + if (memcmp(chap->response, data->rval, data->hl)) { 304 + dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n", 305 + __func__, chap->qid, (int)chap->hash_len, data->rval); 306 + dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n", 307 + __func__, chap->qid, (int)chap->hash_len, 308 + chap->response); 309 + dev_warn(ctrl->device, 310 + "qid %d: controller authentication failed\n", 311 + chap->qid); 312 + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 313 + return NVME_SC_AUTH_REQUIRED; 314 + } 315 + 316 + /* Just print out information for the admin queue */ 317 + if (chap->qid == 0) 318 + dev_info(ctrl->device, 319 + "qid 0: controller authenticated\n"); 320 + return 0; 321 + } 322 + 323 + static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl, 324 + struct nvme_dhchap_queue_context *chap) 325 + { 326 + struct nvmf_auth_dhchap_success2_data *data = chap->buf; 327 + size_t size = sizeof(*data); 328 + 329 + memset(chap->buf, 0, size); 330 + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 331 + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; 332 + data->t_id = cpu_to_le16(chap->transaction); 333 + 334 + return size; 335 + } 336 + 337 + static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl, 338 + struct nvme_dhchap_queue_context *chap) 339 + { 340 + struct nvmf_auth_dhchap_failure_data *data = chap->buf; 341 + size_t size = sizeof(*data); 342 + 343 + memset(chap->buf, 0, size); 344 + data->auth_type = NVME_AUTH_COMMON_MESSAGES; 345 + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 346 + data->t_id = cpu_to_le16(chap->transaction); 347 + data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; 348 + data->rescode_exp = chap->status; 349 + 350 + return size; 351 + } 352 + 353 + static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, 354 + struct nvme_dhchap_queue_context *chap) 355 + { 356 + SHASH_DESC_ON_STACK(shash, chap->shash_tfm); 357 + u8 buf[4], *challenge = chap->c1; 358 + int ret; 359 + 360 + dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", 361 + __func__, chap->qid, chap->s1, chap->transaction); 362 + 363 + if (!chap->host_response) { 364 + chap->host_response = nvme_auth_transform_key(ctrl->host_key, 365 + ctrl->opts->host->nqn); 366 + if (IS_ERR(chap->host_response)) { 367 + ret = PTR_ERR(chap->host_response); 368 + chap->host_response = NULL; 369 + return ret; 370 + } 371 + } else { 372 + dev_dbg(ctrl->device, "%s: qid %d re-using host response\n", 373 + __func__, chap->qid); 374 + } 375 + 376 + ret = crypto_shash_setkey(chap->shash_tfm, 377 + chap->host_response, ctrl->host_key->len); 378 + if (ret) { 379 + dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", 380 + chap->qid, ret); 381 + goto out; 382 + } 383 + 384 + shash->tfm = chap->shash_tfm; 385 + ret = crypto_shash_init(shash); 386 + if (ret) 387 + goto out; 388 + ret = crypto_shash_update(shash, challenge, chap->hash_len); 389 + if (ret) 390 + goto out; 391 + put_unaligned_le32(chap->s1, buf); 392 + ret = crypto_shash_update(shash, buf, 4); 393 + if (ret) 394 + goto out; 395 + put_unaligned_le16(chap->transaction, buf); 396 + ret = crypto_shash_update(shash, buf, 2); 397 + if (ret) 398 + goto out; 399 + memset(buf, 0, sizeof(buf)); 400 + ret = crypto_shash_update(shash, buf, 1); 401 + if (ret) 402 + goto out; 403 + ret = crypto_shash_update(shash, "HostHost", 8); 404 + if (ret) 405 + goto out; 406 + ret = crypto_shash_update(shash, ctrl->opts->host->nqn, 407 + strlen(ctrl->opts->host->nqn)); 408 + if (ret) 409 + goto out; 410 + ret = crypto_shash_update(shash, buf, 1); 411 + if (ret) 412 + goto out; 413 + ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, 414 + strlen(ctrl->opts->subsysnqn)); 415 + if (ret) 416 + goto out; 417 + ret = crypto_shash_final(shash, chap->response); 418 + out: 419 + if (challenge != chap->c1) 420 + kfree(challenge); 421 + return ret; 422 + } 423 + 424 + static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, 425 + struct nvme_dhchap_queue_context *chap) 426 + { 427 + SHASH_DESC_ON_STACK(shash, chap->shash_tfm); 428 + u8 *ctrl_response; 429 + u8 buf[4], *challenge = chap->c2; 430 + int ret; 431 + 432 + ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, 433 + ctrl->opts->subsysnqn); 434 + if (IS_ERR(ctrl_response)) { 435 + ret = PTR_ERR(ctrl_response); 436 + return ret; 437 + } 438 + ret = crypto_shash_setkey(chap->shash_tfm, 439 + ctrl_response, ctrl->ctrl_key->len); 440 + if (ret) { 441 + dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", 442 + chap->qid, ret); 443 + goto out; 444 + } 445 + 446 + dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n", 447 + __func__, chap->qid, chap->s2, chap->transaction); 448 + dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n", 449 + __func__, chap->qid, (int)chap->hash_len, challenge); 450 + dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n", 451 + __func__, chap->qid, ctrl->opts->subsysnqn); 452 + dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n", 453 + __func__, chap->qid, ctrl->opts->host->nqn); 454 + shash->tfm = chap->shash_tfm; 455 + ret = crypto_shash_init(shash); 456 + if (ret) 457 + goto out; 458 + ret = crypto_shash_update(shash, challenge, chap->hash_len); 459 + if (ret) 460 + goto out; 461 + put_unaligned_le32(chap->s2, buf); 462 + ret = crypto_shash_update(shash, buf, 4); 463 + if (ret) 464 + goto out; 465 + put_unaligned_le16(chap->transaction, buf); 466 + ret = crypto_shash_update(shash, buf, 2); 467 + if (ret) 468 + goto out; 469 + memset(buf, 0, 4); 470 + ret = crypto_shash_update(shash, buf, 1); 471 + if (ret) 472 + goto out; 473 + ret = crypto_shash_update(shash, "Controller", 10); 474 + if (ret) 475 + goto out; 476 + ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, 477 + strlen(ctrl->opts->subsysnqn)); 478 + if (ret) 479 + goto out; 480 + ret = crypto_shash_update(shash, buf, 1); 481 + if (ret) 482 + goto out; 483 + ret = crypto_shash_update(shash, ctrl->opts->host->nqn, 484 + strlen(ctrl->opts->host->nqn)); 485 + if (ret) 486 + goto out; 487 + ret = crypto_shash_final(shash, chap->response); 488 + out: 489 + if (challenge != chap->c2) 490 + kfree(challenge); 491 + kfree(ctrl_response); 492 + return ret; 493 + } 494 + 495 + static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap) 496 + { 497 + chap->status = 0; 498 + chap->error = 0; 499 + chap->s1 = 0; 500 + chap->s2 = 0; 501 + chap->transaction = 0; 502 + memset(chap->c1, 0, sizeof(chap->c1)); 503 + memset(chap->c2, 0, sizeof(chap->c2)); 504 + } 505 + 506 + static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap) 507 + { 508 + __nvme_auth_reset(chap); 509 + if (chap->shash_tfm) 510 + crypto_free_shash(chap->shash_tfm); 511 + kfree_sensitive(chap->host_response); 512 + kfree(chap->buf); 513 + kfree(chap); 514 + } 515 + 516 + static void __nvme_auth_work(struct work_struct *work) 517 + { 518 + struct nvme_dhchap_queue_context *chap = 519 + container_of(work, struct nvme_dhchap_queue_context, auth_work); 520 + struct nvme_ctrl *ctrl = chap->ctrl; 521 + size_t tl; 522 + int ret = 0; 523 + 524 + chap->transaction = ctrl->transaction++; 525 + 526 + /* DH-HMAC-CHAP Step 1: send negotiate */ 527 + dev_dbg(ctrl->device, "%s: qid %d send negotiate\n", 528 + __func__, chap->qid); 529 + ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap); 530 + if (ret < 0) { 531 + chap->error = ret; 532 + return; 533 + } 534 + tl = ret; 535 + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 536 + if (ret) { 537 + chap->error = ret; 538 + return; 539 + } 540 + 541 + /* DH-HMAC-CHAP Step 2: receive challenge */ 542 + dev_dbg(ctrl->device, "%s: qid %d receive challenge\n", 543 + __func__, chap->qid); 544 + 545 + memset(chap->buf, 0, chap->buf_size); 546 + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false); 547 + if (ret) { 548 + dev_warn(ctrl->device, 549 + "qid %d failed to receive challenge, %s %d\n", 550 + chap->qid, ret < 0 ? "error" : "nvme status", ret); 551 + chap->error = ret; 552 + return; 553 + } 554 + ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, 555 + NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); 556 + if (ret) { 557 + chap->status = ret; 558 + chap->error = NVME_SC_AUTH_REQUIRED; 559 + return; 560 + } 561 + 562 + ret = nvme_auth_process_dhchap_challenge(ctrl, chap); 563 + if (ret) { 564 + /* Invalid challenge parameters */ 565 + chap->error = ret; 566 + goto fail2; 567 + } 568 + 569 + dev_dbg(ctrl->device, "%s: qid %d host response\n", 570 + __func__, chap->qid); 571 + ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); 572 + if (ret) { 573 + chap->error = ret; 574 + goto fail2; 575 + } 576 + 577 + /* DH-HMAC-CHAP Step 3: send reply */ 578 + dev_dbg(ctrl->device, "%s: qid %d send reply\n", 579 + __func__, chap->qid); 580 + ret = nvme_auth_set_dhchap_reply_data(ctrl, chap); 581 + if (ret < 0) { 582 + chap->error = ret; 583 + goto fail2; 584 + } 585 + 586 + tl = ret; 587 + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 588 + if (ret) { 589 + chap->error = ret; 590 + goto fail2; 591 + } 592 + 593 + /* DH-HMAC-CHAP Step 4: receive success1 */ 594 + dev_dbg(ctrl->device, "%s: qid %d receive success1\n", 595 + __func__, chap->qid); 596 + 597 + memset(chap->buf, 0, chap->buf_size); 598 + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false); 599 + if (ret) { 600 + dev_warn(ctrl->device, 601 + "qid %d failed to receive success1, %s %d\n", 602 + chap->qid, ret < 0 ? "error" : "nvme status", ret); 603 + chap->error = ret; 604 + return; 605 + } 606 + ret = nvme_auth_receive_validate(ctrl, chap->qid, 607 + chap->buf, chap->transaction, 608 + NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); 609 + if (ret) { 610 + chap->status = ret; 611 + chap->error = NVME_SC_AUTH_REQUIRED; 612 + return; 613 + } 614 + 615 + if (ctrl->ctrl_key) { 616 + dev_dbg(ctrl->device, 617 + "%s: qid %d controller response\n", 618 + __func__, chap->qid); 619 + ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); 620 + if (ret) { 621 + chap->error = ret; 622 + goto fail2; 623 + } 624 + } 625 + 626 + ret = nvme_auth_process_dhchap_success1(ctrl, chap); 627 + if (ret) { 628 + /* Controller authentication failed */ 629 + chap->error = NVME_SC_AUTH_REQUIRED; 630 + goto fail2; 631 + } 632 + 633 + if (ctrl->ctrl_key) { 634 + /* DH-HMAC-CHAP Step 5: send success2 */ 635 + dev_dbg(ctrl->device, "%s: qid %d send success2\n", 636 + __func__, chap->qid); 637 + tl = nvme_auth_set_dhchap_success2_data(ctrl, chap); 638 + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 639 + if (ret) 640 + chap->error = ret; 641 + } 642 + if (!ret) { 643 + chap->error = 0; 644 + return; 645 + } 646 + 647 + fail2: 648 + dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", 649 + __func__, chap->qid, chap->status); 650 + tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); 651 + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 652 + /* 653 + * only update error if send failure2 failed and no other 654 + * error had been set during authentication. 655 + */ 656 + if (ret && !chap->error) 657 + chap->error = ret; 658 + } 659 + 660 + int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) 661 + { 662 + struct nvme_dhchap_queue_context *chap; 663 + 664 + if (!ctrl->host_key) { 665 + dev_warn(ctrl->device, "qid %d: no key\n", qid); 666 + return -ENOKEY; 667 + } 668 + 669 + if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) { 670 + dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid); 671 + return -ENOKEY; 672 + } 673 + 674 + mutex_lock(&ctrl->dhchap_auth_mutex); 675 + /* Check if the context is already queued */ 676 + list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) { 677 + WARN_ON(!chap->buf); 678 + if (chap->qid == qid) { 679 + dev_dbg(ctrl->device, "qid %d: re-using context\n", qid); 680 + mutex_unlock(&ctrl->dhchap_auth_mutex); 681 + flush_work(&chap->auth_work); 682 + __nvme_auth_reset(chap); 683 + queue_work(nvme_wq, &chap->auth_work); 684 + return 0; 685 + } 686 + } 687 + chap = kzalloc(sizeof(*chap), GFP_KERNEL); 688 + if (!chap) { 689 + mutex_unlock(&ctrl->dhchap_auth_mutex); 690 + return -ENOMEM; 691 + } 692 + chap->qid = (qid == NVME_QID_ANY) ? 0 : qid; 693 + chap->ctrl = ctrl; 694 + 695 + /* 696 + * Allocate a large enough buffer for the entire negotiation: 697 + * 4k should be enough to ffdhe8192. 698 + */ 699 + chap->buf_size = 4096; 700 + chap->buf = kzalloc(chap->buf_size, GFP_KERNEL); 701 + if (!chap->buf) { 702 + mutex_unlock(&ctrl->dhchap_auth_mutex); 703 + kfree(chap); 704 + return -ENOMEM; 705 + } 706 + 707 + INIT_WORK(&chap->auth_work, __nvme_auth_work); 708 + list_add(&chap->entry, &ctrl->dhchap_auth_list); 709 + mutex_unlock(&ctrl->dhchap_auth_mutex); 710 + queue_work(nvme_wq, &chap->auth_work); 711 + return 0; 712 + } 713 + EXPORT_SYMBOL_GPL(nvme_auth_negotiate); 714 + 715 + int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) 716 + { 717 + struct nvme_dhchap_queue_context *chap; 718 + int ret; 719 + 720 + mutex_lock(&ctrl->dhchap_auth_mutex); 721 + list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) { 722 + if (chap->qid != qid) 723 + continue; 724 + mutex_unlock(&ctrl->dhchap_auth_mutex); 725 + flush_work(&chap->auth_work); 726 + ret = chap->error; 727 + return ret; 728 + } 729 + mutex_unlock(&ctrl->dhchap_auth_mutex); 730 + return -ENXIO; 731 + } 732 + EXPORT_SYMBOL_GPL(nvme_auth_wait); 733 + 734 + void nvme_auth_reset(struct nvme_ctrl *ctrl) 735 + { 736 + struct nvme_dhchap_queue_context *chap; 737 + 738 + mutex_lock(&ctrl->dhchap_auth_mutex); 739 + list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) { 740 + mutex_unlock(&ctrl->dhchap_auth_mutex); 741 + flush_work(&chap->auth_work); 742 + __nvme_auth_reset(chap); 743 + } 744 + mutex_unlock(&ctrl->dhchap_auth_mutex); 745 + } 746 + EXPORT_SYMBOL_GPL(nvme_auth_reset); 747 + 748 + static void nvme_dhchap_auth_work(struct work_struct *work) 749 + { 750 + struct nvme_ctrl *ctrl = 751 + container_of(work, struct nvme_ctrl, dhchap_auth_work); 752 + int ret, q; 753 + 754 + /* Authenticate admin queue first */ 755 + ret = nvme_auth_negotiate(ctrl, 0); 756 + if (ret) { 757 + dev_warn(ctrl->device, 758 + "qid 0: error %d setting up authentication\n", ret); 759 + return; 760 + } 761 + ret = nvme_auth_wait(ctrl, 0); 762 + if (ret) { 763 + dev_warn(ctrl->device, 764 + "qid 0: authentication failed\n"); 765 + return; 766 + } 767 + 768 + for (q = 1; q < ctrl->queue_count; q++) { 769 + ret = nvme_auth_negotiate(ctrl, q); 770 + if (ret) { 771 + dev_warn(ctrl->device, 772 + "qid %d: error %d setting up authentication\n", 773 + q, ret); 774 + break; 775 + } 776 + } 777 + 778 + /* 779 + * Failure is a soft-state; credentials remain valid until 780 + * the controller terminates the connection. 781 + */ 782 + } 783 + 784 + void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) 785 + { 786 + INIT_LIST_HEAD(&ctrl->dhchap_auth_list); 787 + INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work); 788 + mutex_init(&ctrl->dhchap_auth_mutex); 789 + if (!ctrl->opts) 790 + return; 791 + nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key); 792 + nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key); 793 + } 794 + EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); 795 + 796 + void nvme_auth_stop(struct nvme_ctrl *ctrl) 797 + { 798 + struct nvme_dhchap_queue_context *chap = NULL, *tmp; 799 + 800 + cancel_work_sync(&ctrl->dhchap_auth_work); 801 + mutex_lock(&ctrl->dhchap_auth_mutex); 802 + list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) 803 + cancel_work_sync(&chap->auth_work); 804 + mutex_unlock(&ctrl->dhchap_auth_mutex); 805 + } 806 + EXPORT_SYMBOL_GPL(nvme_auth_stop); 807 + 808 + void nvme_auth_free(struct nvme_ctrl *ctrl) 809 + { 810 + struct nvme_dhchap_queue_context *chap = NULL, *tmp; 811 + 812 + mutex_lock(&ctrl->dhchap_auth_mutex); 813 + list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) { 814 + list_del_init(&chap->entry); 815 + flush_work(&chap->auth_work); 816 + __nvme_auth_free(chap); 817 + } 818 + mutex_unlock(&ctrl->dhchap_auth_mutex); 819 + if (ctrl->host_key) { 820 + nvme_auth_free_key(ctrl->host_key); 821 + ctrl->host_key = NULL; 822 + } 823 + if (ctrl->ctrl_key) { 824 + nvme_auth_free_key(ctrl->ctrl_key); 825 + ctrl->ctrl_key = NULL; 826 + } 827 + } 828 + EXPORT_SYMBOL_GPL(nvme_auth_free);
+139 -4
drivers/nvme/host/core.c
··· 24 24 25 25 #include "nvme.h" 26 26 #include "fabrics.h" 27 + #include <linux/nvme-auth.h> 27 28 28 29 #define CREATE_TRACE_POINTS 29 30 #include "trace.h" ··· 331 330 COMPLETE, 332 331 RETRY, 333 332 FAILOVER, 333 + AUTHENTICATE, 334 334 }; 335 335 336 336 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 337 337 { 338 338 if (likely(nvme_req(req)->status == 0)) 339 339 return COMPLETE; 340 + 341 + if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) 342 + return AUTHENTICATE; 340 343 341 344 if (blk_noretry_request(req) || 342 345 (nvme_req(req)->status & NVME_SC_DNR) || ··· 380 375 381 376 void nvme_complete_rq(struct request *req) 382 377 { 378 + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 379 + 383 380 trace_nvme_complete_rq(req); 384 381 nvme_cleanup_cmd(req); 385 382 386 - if (nvme_req(req)->ctrl->kas) 387 - nvme_req(req)->ctrl->comp_seen = true; 383 + if (ctrl->kas) 384 + ctrl->comp_seen = true; 388 385 389 386 switch (nvme_decide_disposition(req)) { 390 387 case COMPLETE: ··· 397 390 return; 398 391 case FAILOVER: 399 392 nvme_failover_req(req); 393 + return; 394 + case AUTHENTICATE: 395 + #ifdef CONFIG_NVME_AUTH 396 + queue_work(nvme_wq, &ctrl->dhchap_auth_work); 397 + nvme_retry_req(req); 398 + #else 399 + nvme_end_req(req); 400 + #endif 400 401 return; 401 402 } 402 403 } ··· 717 702 switch (ctrl->state) { 718 703 case NVME_CTRL_CONNECTING: 719 704 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 720 - req->cmd->fabrics.fctype == nvme_fabrics_type_connect) 705 + (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || 706 + req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || 707 + req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) 721 708 return true; 722 709 break; 723 710 default: ··· 3626 3609 } 3627 3610 static DEVICE_ATTR_RO(dctype); 3628 3611 3612 + #ifdef CONFIG_NVME_AUTH 3613 + static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, 3614 + struct device_attribute *attr, char *buf) 3615 + { 3616 + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3617 + struct nvmf_ctrl_options *opts = ctrl->opts; 3618 + 3619 + if (!opts->dhchap_secret) 3620 + return sysfs_emit(buf, "none\n"); 3621 + return sysfs_emit(buf, "%s\n", opts->dhchap_secret); 3622 + } 3623 + 3624 + static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, 3625 + struct device_attribute *attr, const char *buf, size_t count) 3626 + { 3627 + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3628 + struct nvmf_ctrl_options *opts = ctrl->opts; 3629 + char *dhchap_secret; 3630 + 3631 + if (!ctrl->opts->dhchap_secret) 3632 + return -EINVAL; 3633 + if (count < 7) 3634 + return -EINVAL; 3635 + if (memcmp(buf, "DHHC-1:", 7)) 3636 + return -EINVAL; 3637 + 3638 + dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 3639 + if (!dhchap_secret) 3640 + return -ENOMEM; 3641 + memcpy(dhchap_secret, buf, count); 3642 + nvme_auth_stop(ctrl); 3643 + if (strcmp(dhchap_secret, opts->dhchap_secret)) { 3644 + int ret; 3645 + 3646 + ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key); 3647 + if (ret) 3648 + return ret; 3649 + kfree(opts->dhchap_secret); 3650 + opts->dhchap_secret = dhchap_secret; 3651 + /* Key has changed; re-authentication with new key */ 3652 + nvme_auth_reset(ctrl); 3653 + } 3654 + /* Start re-authentication */ 3655 + dev_info(ctrl->device, "re-authenticating controller\n"); 3656 + queue_work(nvme_wq, &ctrl->dhchap_auth_work); 3657 + 3658 + return count; 3659 + } 3660 + static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, 3661 + nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); 3662 + 3663 + static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, 3664 + struct device_attribute *attr, char *buf) 3665 + { 3666 + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3667 + struct nvmf_ctrl_options *opts = ctrl->opts; 3668 + 3669 + if (!opts->dhchap_ctrl_secret) 3670 + return sysfs_emit(buf, "none\n"); 3671 + return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); 3672 + } 3673 + 3674 + static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, 3675 + struct device_attribute *attr, const char *buf, size_t count) 3676 + { 3677 + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3678 + struct nvmf_ctrl_options *opts = ctrl->opts; 3679 + char *dhchap_secret; 3680 + 3681 + if (!ctrl->opts->dhchap_ctrl_secret) 3682 + return -EINVAL; 3683 + if (count < 7) 3684 + return -EINVAL; 3685 + if (memcmp(buf, "DHHC-1:", 7)) 3686 + return -EINVAL; 3687 + 3688 + dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 3689 + if (!dhchap_secret) 3690 + return -ENOMEM; 3691 + memcpy(dhchap_secret, buf, count); 3692 + nvme_auth_stop(ctrl); 3693 + if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { 3694 + int ret; 3695 + 3696 + ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key); 3697 + if (ret) 3698 + return ret; 3699 + kfree(opts->dhchap_ctrl_secret); 3700 + opts->dhchap_ctrl_secret = dhchap_secret; 3701 + /* Key has changed; re-authentication with new key */ 3702 + nvme_auth_reset(ctrl); 3703 + } 3704 + /* Start re-authentication */ 3705 + dev_info(ctrl->device, "re-authenticating controller\n"); 3706 + queue_work(nvme_wq, &ctrl->dhchap_auth_work); 3707 + 3708 + return count; 3709 + } 3710 + static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, 3711 + nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); 3712 + #endif 3713 + 3629 3714 static struct attribute *nvme_dev_attrs[] = { 3630 3715 &dev_attr_reset_controller.attr, 3631 3716 &dev_attr_rescan_controller.attr, ··· 3751 3632 &dev_attr_kato.attr, 3752 3633 &dev_attr_cntrltype.attr, 3753 3634 &dev_attr_dctype.attr, 3635 + #ifdef CONFIG_NVME_AUTH 3636 + &dev_attr_dhchap_secret.attr, 3637 + &dev_attr_dhchap_ctrl_secret.attr, 3638 + #endif 3754 3639 NULL 3755 3640 }; 3756 3641 ··· 3778 3655 return 0; 3779 3656 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) 3780 3657 return 0; 3658 + #ifdef CONFIG_NVME_AUTH 3659 + if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) 3660 + return 0; 3661 + if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) 3662 + return 0; 3663 + #endif 3781 3664 3782 3665 return a->mode; 3783 3666 } ··· 4677 4548 * recovery actions from interfering with the controller's 4678 4549 * firmware activation. 4679 4550 */ 4680 - if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 4551 + if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4552 + nvme_auth_stop(ctrl); 4681 4553 queue_work(nvme_wq, &ctrl->fw_act_work); 4554 + } 4682 4555 break; 4683 4556 #ifdef CONFIG_NVME_MULTIPATH 4684 4557 case NVME_AER_NOTICE_ANA: ··· 4744 4613 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4745 4614 { 4746 4615 nvme_mpath_stop(ctrl); 4616 + nvme_auth_stop(ctrl); 4747 4617 nvme_stop_keep_alive(ctrl); 4748 4618 nvme_stop_failfast_work(ctrl); 4749 4619 flush_work(&ctrl->async_event_work); ··· 4804 4672 4805 4673 nvme_free_cels(ctrl); 4806 4674 nvme_mpath_uninit(ctrl); 4675 + nvme_auth_stop(ctrl); 4676 + nvme_auth_free(ctrl); 4807 4677 __free_page(ctrl->discard_page); 4808 4678 4809 4679 if (subsys) { ··· 4896 4762 4897 4763 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4898 4764 nvme_mpath_init_ctrl(ctrl); 4765 + nvme_auth_init_ctrl(ctrl); 4899 4766 4900 4767 return 0; 4901 4768 out_free_name:
+77 -3
drivers/nvme/host/fabrics.c
··· 369 369 union nvme_result res; 370 370 struct nvmf_connect_data *data; 371 371 int ret; 372 + u32 result; 372 373 373 374 cmd.connect.opcode = nvme_fabrics_command; 374 375 cmd.connect.fctype = nvme_fabrics_type_connect; ··· 402 401 goto out_free_data; 403 402 } 404 403 405 - ctrl->cntlid = le16_to_cpu(res.u16); 406 - 404 + result = le32_to_cpu(res.u32); 405 + ctrl->cntlid = result & 0xFFFF; 406 + if ((result >> 16) & 0x3) { 407 + /* Authentication required */ 408 + ret = nvme_auth_negotiate(ctrl, 0); 409 + if (ret) { 410 + dev_warn(ctrl->device, 411 + "qid 0: authentication setup failed\n"); 412 + ret = NVME_SC_AUTH_REQUIRED; 413 + goto out_free_data; 414 + } 415 + ret = nvme_auth_wait(ctrl, 0); 416 + if (ret) 417 + dev_warn(ctrl->device, 418 + "qid 0: authentication failed\n"); 419 + else 420 + dev_info(ctrl->device, 421 + "qid 0: authenticated\n"); 422 + } 407 423 out_free_data: 408 424 kfree(data); 409 425 return ret; ··· 453 435 struct nvmf_connect_data *data; 454 436 union nvme_result res; 455 437 int ret; 438 + u32 result; 456 439 457 440 cmd.connect.opcode = nvme_fabrics_command; 458 441 cmd.connect.fctype = nvme_fabrics_type_connect; ··· 478 459 if (ret) { 479 460 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), 480 461 &cmd, data); 462 + } 463 + result = le32_to_cpu(res.u32); 464 + if ((result >> 16) & 2) { 465 + /* Authentication required */ 466 + ret = nvme_auth_negotiate(ctrl, qid); 467 + if (ret) { 468 + dev_warn(ctrl->device, 469 + "qid %d: authentication setup failed\n", qid); 470 + ret = NVME_SC_AUTH_REQUIRED; 471 + } else { 472 + ret = nvme_auth_wait(ctrl, qid); 473 + if (ret) 474 + dev_warn(ctrl->device, 475 + "qid %u: authentication failed\n", qid); 476 + } 481 477 } 482 478 kfree(data); 483 479 return ret; ··· 586 552 { NVMF_OPT_TOS, "tos=%d" }, 587 553 { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, 588 554 { NVMF_OPT_DISCOVERY, "discovery" }, 555 + { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" }, 556 + { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" }, 589 557 { NVMF_OPT_ERR, NULL } 590 558 }; 591 559 ··· 869 833 case NVMF_OPT_DISCOVERY: 870 834 opts->discovery_nqn = true; 871 835 break; 836 + case NVMF_OPT_DHCHAP_SECRET: 837 + p = match_strdup(args); 838 + if (!p) { 839 + ret = -ENOMEM; 840 + goto out; 841 + } 842 + if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) { 843 + pr_err("Invalid DH-CHAP secret %s\n", p); 844 + ret = -EINVAL; 845 + goto out; 846 + } 847 + kfree(opts->dhchap_secret); 848 + opts->dhchap_secret = p; 849 + break; 850 + case NVMF_OPT_DHCHAP_CTRL_SECRET: 851 + p = match_strdup(args); 852 + if (!p) { 853 + ret = -ENOMEM; 854 + goto out; 855 + } 856 + if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) { 857 + pr_err("Invalid DH-CHAP secret %s\n", p); 858 + ret = -EINVAL; 859 + goto out; 860 + } 861 + kfree(opts->dhchap_ctrl_secret); 862 + opts->dhchap_ctrl_secret = p; 863 + break; 872 864 default: 873 865 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", 874 866 p); ··· 1015 951 kfree(opts->subsysnqn); 1016 952 kfree(opts->host_traddr); 1017 953 kfree(opts->host_iface); 954 + kfree(opts->dhchap_secret); 955 + kfree(opts->dhchap_ctrl_secret); 1018 956 kfree(opts); 1019 957 } 1020 958 EXPORT_SYMBOL_GPL(nvmf_free_options); ··· 1026 960 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ 1027 961 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ 1028 962 NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\ 1029 - NVMF_OPT_FAIL_FAST_TMO) 963 + NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\ 964 + NVMF_OPT_DHCHAP_CTRL_SECRET) 1030 965 1031 966 static struct nvme_ctrl * 1032 967 nvmf_create_ctrl(struct device *dev, const char *buf) ··· 1263 1196 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64); 1264 1197 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64); 1265 1198 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64); 1199 + BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64); 1200 + BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64); 1266 1201 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024); 1202 + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8); 1203 + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16); 1204 + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16); 1205 + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16); 1206 + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16); 1267 1207 } 1268 1208 1269 1209 MODULE_LICENSE("GPL v2");
+7
drivers/nvme/host/fabrics.h
··· 68 68 NVMF_OPT_FAIL_FAST_TMO = 1 << 20, 69 69 NVMF_OPT_HOST_IFACE = 1 << 21, 70 70 NVMF_OPT_DISCOVERY = 1 << 22, 71 + NVMF_OPT_DHCHAP_SECRET = 1 << 23, 72 + NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24, 71 73 }; 72 74 73 75 /** ··· 99 97 * @max_reconnects: maximum number of allowed reconnect attempts before removing 100 98 * the controller, (-1) means reconnect forever, zero means remove 101 99 * immediately; 100 + * @dhchap_secret: DH-HMAC-CHAP secret 101 + * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional 102 + * authentication 102 103 * @disable_sqflow: disable controller sq flow control 103 104 * @hdr_digest: generate/verify header digest (TCP) 104 105 * @data_digest: generate/verify data digest (TCP) ··· 126 121 unsigned int kato; 127 122 struct nvmf_host *host; 128 123 int max_reconnects; 124 + char *dhchap_secret; 125 + char *dhchap_ctrl_secret; 129 126 bool disable_sqflow; 130 127 bool hdr_digest; 131 128 bool data_digest;
+30
drivers/nvme/host/nvme.h
··· 328 328 struct work_struct ana_work; 329 329 #endif 330 330 331 + #ifdef CONFIG_NVME_AUTH 332 + struct work_struct dhchap_auth_work; 333 + struct list_head dhchap_auth_list; 334 + struct mutex dhchap_auth_mutex; 335 + struct nvme_dhchap_key *host_key; 336 + struct nvme_dhchap_key *ctrl_key; 337 + u16 transaction; 338 + #endif 339 + 331 340 /* Power saving configuration */ 332 341 u64 ps_max_latency_us; 333 342 bool apst_enabled; ··· 1000 991 { 1001 992 return ctrl->sgls & ((1 << 0) | (1 << 1)); 1002 993 } 994 + 995 + #ifdef CONFIG_NVME_AUTH 996 + void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl); 997 + void nvme_auth_stop(struct nvme_ctrl *ctrl); 998 + int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid); 999 + int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid); 1000 + void nvme_auth_reset(struct nvme_ctrl *ctrl); 1001 + void nvme_auth_free(struct nvme_ctrl *ctrl); 1002 + #else 1003 + static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {}; 1004 + static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {}; 1005 + static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) 1006 + { 1007 + return -EPROTONOSUPPORT; 1008 + } 1009 + static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) 1010 + { 1011 + return NVME_SC_AUTH_REQUIRED; 1012 + } 1013 + static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; 1014 + #endif 1003 1015 1004 1016 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1005 1017 u8 opcode);
+1
drivers/nvme/host/rdma.c
··· 1205 1205 struct nvme_rdma_ctrl *ctrl = container_of(work, 1206 1206 struct nvme_rdma_ctrl, err_work); 1207 1207 1208 + nvme_auth_stop(&ctrl->ctrl); 1208 1209 nvme_stop_keep_alive(&ctrl->ctrl); 1209 1210 flush_work(&ctrl->ctrl.async_event_work); 1210 1211 nvme_rdma_teardown_io_queues(ctrl, false);
+1
drivers/nvme/host/tcp.c
··· 2173 2173 struct nvme_tcp_ctrl, err_work); 2174 2174 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; 2175 2175 2176 + nvme_auth_stop(ctrl); 2176 2177 nvme_stop_keep_alive(ctrl); 2177 2178 flush_work(&ctrl->async_event_work); 2178 2179 nvme_tcp_teardown_io_queues(ctrl, false);
+32
drivers/nvme/host/trace.c
··· 287 287 return ret; 288 288 } 289 289 290 + static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc) 291 + { 292 + const char *ret = trace_seq_buffer_ptr(p); 293 + u8 spsp0 = spc[1]; 294 + u8 spsp1 = spc[2]; 295 + u8 secp = spc[3]; 296 + u32 tl = get_unaligned_le32(spc + 4); 297 + 298 + trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u", 299 + spsp0, spsp1, secp, tl); 300 + trace_seq_putc(p, 0); 301 + return ret; 302 + } 303 + 304 + static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc) 305 + { 306 + const char *ret = trace_seq_buffer_ptr(p); 307 + u8 spsp0 = spc[1]; 308 + u8 spsp1 = spc[2]; 309 + u8 secp = spc[3]; 310 + u32 al = get_unaligned_le32(spc + 4); 311 + 312 + trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u", 313 + spsp0, spsp1, secp, al); 314 + trace_seq_putc(p, 0); 315 + return ret; 316 + } 317 + 290 318 static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc) 291 319 { 292 320 const char *ret = trace_seq_buffer_ptr(p); ··· 334 306 return nvme_trace_fabrics_connect(p, spc); 335 307 case nvme_fabrics_type_property_get: 336 308 return nvme_trace_fabrics_property_get(p, spc); 309 + case nvme_fabrics_type_auth_send: 310 + return nvme_trace_fabrics_auth_send(p, spc); 311 + case nvme_fabrics_type_auth_receive: 312 + return nvme_trace_fabrics_auth_receive(p, spc); 337 313 default: 338 314 return nvme_trace_fabrics_common(p, spc); 339 315 }
+33
include/linux/nvme-auth.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions 4 + */ 5 + 6 + #ifndef _NVME_AUTH_H 7 + #define _NVME_AUTH_H 8 + 9 + #include <crypto/kpp.h> 10 + 11 + struct nvme_dhchap_key { 12 + u8 *key; 13 + size_t len; 14 + u8 hash; 15 + }; 16 + 17 + u32 nvme_auth_get_seqnum(void); 18 + const char *nvme_auth_dhgroup_name(u8 dhgroup_id); 19 + const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id); 20 + u8 nvme_auth_dhgroup_id(const char *dhgroup_name); 21 + 22 + const char *nvme_auth_hmac_name(u8 hmac_id); 23 + const char *nvme_auth_digest_name(u8 hmac_id); 24 + size_t nvme_auth_hmac_hash_len(u8 hmac_id); 25 + u8 nvme_auth_hmac_id(const char *hmac_name); 26 + 27 + struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, 28 + u8 key_hash); 29 + void nvme_auth_free_key(struct nvme_dhchap_key *key); 30 + u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn); 31 + int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key); 32 + 33 + #endif /* _NVME_AUTH_H */