Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
16
17#include "nvme.h"
18#include "fabrics.h"
19
20struct nvme_tcp_queue;
21
22enum nvme_tcp_send_state {
23 NVME_TCP_SEND_CMD_PDU = 0,
24 NVME_TCP_SEND_H2C_PDU,
25 NVME_TCP_SEND_DATA,
26 NVME_TCP_SEND_DDGST,
27};
28
29struct nvme_tcp_request {
30 struct nvme_request req;
31 void *pdu;
32 struct nvme_tcp_queue *queue;
33 u32 data_len;
34 u32 pdu_len;
35 u32 pdu_sent;
36 u16 ttag;
37 struct list_head entry;
38 __le32 ddgst;
39
40 struct bio *curr_bio;
41 struct iov_iter iter;
42
43 /* send state */
44 size_t offset;
45 size_t data_sent;
46 enum nvme_tcp_send_state state;
47};
48
49enum nvme_tcp_queue_flags {
50 NVME_TCP_Q_ALLOCATED = 0,
51 NVME_TCP_Q_LIVE = 1,
52};
53
54enum nvme_tcp_recv_state {
55 NVME_TCP_RECV_PDU = 0,
56 NVME_TCP_RECV_DATA,
57 NVME_TCP_RECV_DDGST,
58};
59
60struct nvme_tcp_ctrl;
61struct nvme_tcp_queue {
62 struct socket *sock;
63 struct work_struct io_work;
64 int io_cpu;
65
66 spinlock_t lock;
67 struct list_head send_list;
68
69 /* recv state */
70 void *pdu;
71 int pdu_remaining;
72 int pdu_offset;
73 size_t data_remaining;
74 size_t ddgst_remaining;
75
76 /* send state */
77 struct nvme_tcp_request *request;
78
79 int queue_size;
80 size_t cmnd_capsule_len;
81 struct nvme_tcp_ctrl *ctrl;
82 unsigned long flags;
83 bool rd_enabled;
84
85 bool hdr_digest;
86 bool data_digest;
87 struct ahash_request *rcv_hash;
88 struct ahash_request *snd_hash;
89 __le32 exp_ddgst;
90 __le32 recv_ddgst;
91
92 struct page_frag_cache pf_cache;
93
94 void (*state_change)(struct sock *);
95 void (*data_ready)(struct sock *);
96 void (*write_space)(struct sock *);
97};
98
99struct nvme_tcp_ctrl {
100 /* read only in the hot path */
101 struct nvme_tcp_queue *queues;
102 struct blk_mq_tag_set tag_set;
103
104 /* other member variables */
105 struct list_head list;
106 struct blk_mq_tag_set admin_tag_set;
107 struct sockaddr_storage addr;
108 struct sockaddr_storage src_addr;
109 struct nvme_ctrl ctrl;
110
111 struct work_struct err_work;
112 struct delayed_work connect_work;
113 struct nvme_tcp_request async_req;
114};
115
116static LIST_HEAD(nvme_tcp_ctrl_list);
117static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
118static struct workqueue_struct *nvme_tcp_wq;
119static struct blk_mq_ops nvme_tcp_mq_ops;
120static struct blk_mq_ops nvme_tcp_admin_mq_ops;
121
122static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
123{
124 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
125}
126
127static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
128{
129 return queue - queue->ctrl->queues;
130}
131
132static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
133{
134 u32 queue_idx = nvme_tcp_queue_id(queue);
135
136 if (queue_idx == 0)
137 return queue->ctrl->admin_tag_set.tags[queue_idx];
138 return queue->ctrl->tag_set.tags[queue_idx - 1];
139}
140
141static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
142{
143 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
144}
145
146static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
147{
148 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
149}
150
151static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
152{
153 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
154}
155
156static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
157{
158 return req == &req->queue->ctrl->async_req;
159}
160
161static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
162{
163 struct request *rq;
164 unsigned int bytes;
165
166 if (unlikely(nvme_tcp_async_req(req)))
167 return false; /* async events don't have a request */
168
169 rq = blk_mq_rq_from_pdu(req);
170 bytes = blk_rq_payload_bytes(rq);
171
172 return rq_data_dir(rq) == WRITE && bytes &&
173 bytes <= nvme_tcp_inline_data_size(req->queue);
174}
175
176static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
177{
178 return req->iter.bvec->bv_page;
179}
180
181static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
182{
183 return req->iter.bvec->bv_offset + req->iter.iov_offset;
184}
185
186static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
187{
188 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
189 req->pdu_len - req->pdu_sent);
190}
191
192static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
193{
194 return req->iter.iov_offset;
195}
196
197static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
198{
199 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
200 req->pdu_len - req->pdu_sent : 0;
201}
202
203static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
204 int len)
205{
206 return nvme_tcp_pdu_data_left(req) <= len;
207}
208
209static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
210 unsigned int dir)
211{
212 struct request *rq = blk_mq_rq_from_pdu(req);
213 struct bio_vec *vec;
214 unsigned int size;
215 int nsegs;
216 size_t offset;
217
218 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
219 vec = &rq->special_vec;
220 nsegs = 1;
221 size = blk_rq_payload_bytes(rq);
222 offset = 0;
223 } else {
224 struct bio *bio = req->curr_bio;
225
226 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
227 nsegs = bio_segments(bio);
228 size = bio->bi_iter.bi_size;
229 offset = bio->bi_iter.bi_bvec_done;
230 }
231
232 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
233 req->iter.iov_offset = offset;
234}
235
236static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
237 int len)
238{
239 req->data_sent += len;
240 req->pdu_sent += len;
241 iov_iter_advance(&req->iter, len);
242 if (!iov_iter_count(&req->iter) &&
243 req->data_sent < req->data_len) {
244 req->curr_bio = req->curr_bio->bi_next;
245 nvme_tcp_init_iter(req, WRITE);
246 }
247}
248
249static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
250{
251 struct nvme_tcp_queue *queue = req->queue;
252
253 spin_lock(&queue->lock);
254 list_add_tail(&req->entry, &queue->send_list);
255 spin_unlock(&queue->lock);
256
257 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
258}
259
260static inline struct nvme_tcp_request *
261nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
262{
263 struct nvme_tcp_request *req;
264
265 spin_lock(&queue->lock);
266 req = list_first_entry_or_null(&queue->send_list,
267 struct nvme_tcp_request, entry);
268 if (req)
269 list_del(&req->entry);
270 spin_unlock(&queue->lock);
271
272 return req;
273}
274
275static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
276 __le32 *dgst)
277{
278 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
279 crypto_ahash_final(hash);
280}
281
282static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
283 struct page *page, off_t off, size_t len)
284{
285 struct scatterlist sg;
286
287 sg_init_marker(&sg, 1);
288 sg_set_page(&sg, page, len, off);
289 ahash_request_set_crypt(hash, &sg, NULL, len);
290 crypto_ahash_update(hash);
291}
292
293static inline void nvme_tcp_hdgst(struct ahash_request *hash,
294 void *pdu, size_t len)
295{
296 struct scatterlist sg;
297
298 sg_init_one(&sg, pdu, len);
299 ahash_request_set_crypt(hash, &sg, pdu + len, len);
300 crypto_ahash_digest(hash);
301}
302
303static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
304 void *pdu, size_t pdu_len)
305{
306 struct nvme_tcp_hdr *hdr = pdu;
307 __le32 recv_digest;
308 __le32 exp_digest;
309
310 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
311 dev_err(queue->ctrl->ctrl.device,
312 "queue %d: header digest flag is cleared\n",
313 nvme_tcp_queue_id(queue));
314 return -EPROTO;
315 }
316
317 recv_digest = *(__le32 *)(pdu + hdr->hlen);
318 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
319 exp_digest = *(__le32 *)(pdu + hdr->hlen);
320 if (recv_digest != exp_digest) {
321 dev_err(queue->ctrl->ctrl.device,
322 "header digest error: recv %#x expected %#x\n",
323 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
324 return -EIO;
325 }
326
327 return 0;
328}
329
330static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
331{
332 struct nvme_tcp_hdr *hdr = pdu;
333 u8 digest_len = nvme_tcp_hdgst_len(queue);
334 u32 len;
335
336 len = le32_to_cpu(hdr->plen) - hdr->hlen -
337 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
338
339 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
340 dev_err(queue->ctrl->ctrl.device,
341 "queue %d: data digest flag is cleared\n",
342 nvme_tcp_queue_id(queue));
343 return -EPROTO;
344 }
345 crypto_ahash_init(queue->rcv_hash);
346
347 return 0;
348}
349
350static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
351 struct request *rq, unsigned int hctx_idx)
352{
353 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
354
355 page_frag_free(req->pdu);
356}
357
358static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
359 struct request *rq, unsigned int hctx_idx,
360 unsigned int numa_node)
361{
362 struct nvme_tcp_ctrl *ctrl = set->driver_data;
363 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
364 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
365 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
366 u8 hdgst = nvme_tcp_hdgst_len(queue);
367
368 req->pdu = page_frag_alloc(&queue->pf_cache,
369 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
370 GFP_KERNEL | __GFP_ZERO);
371 if (!req->pdu)
372 return -ENOMEM;
373
374 req->queue = queue;
375 nvme_req(rq)->ctrl = &ctrl->ctrl;
376
377 return 0;
378}
379
380static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
381 unsigned int hctx_idx)
382{
383 struct nvme_tcp_ctrl *ctrl = data;
384 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
385
386 hctx->driver_data = queue;
387 return 0;
388}
389
390static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
391 unsigned int hctx_idx)
392{
393 struct nvme_tcp_ctrl *ctrl = data;
394 struct nvme_tcp_queue *queue = &ctrl->queues[0];
395
396 hctx->driver_data = queue;
397 return 0;
398}
399
400static enum nvme_tcp_recv_state
401nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
402{
403 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
404 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
405 NVME_TCP_RECV_DATA;
406}
407
408static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
409{
410 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
411 nvme_tcp_hdgst_len(queue);
412 queue->pdu_offset = 0;
413 queue->data_remaining = -1;
414 queue->ddgst_remaining = 0;
415}
416
417static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
418{
419 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
420 return;
421
422 queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
423}
424
425static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
426 struct nvme_completion *cqe)
427{
428 struct request *rq;
429
430 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
431 if (!rq) {
432 dev_err(queue->ctrl->ctrl.device,
433 "queue %d tag 0x%x not found\n",
434 nvme_tcp_queue_id(queue), cqe->command_id);
435 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
436 return -EINVAL;
437 }
438
439 nvme_end_request(rq, cqe->status, cqe->result);
440
441 return 0;
442}
443
444static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
445 struct nvme_tcp_data_pdu *pdu)
446{
447 struct request *rq;
448
449 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
450 if (!rq) {
451 dev_err(queue->ctrl->ctrl.device,
452 "queue %d tag %#x not found\n",
453 nvme_tcp_queue_id(queue), pdu->command_id);
454 return -ENOENT;
455 }
456
457 if (!blk_rq_payload_bytes(rq)) {
458 dev_err(queue->ctrl->ctrl.device,
459 "queue %d tag %#x unexpected data\n",
460 nvme_tcp_queue_id(queue), rq->tag);
461 return -EIO;
462 }
463
464 queue->data_remaining = le32_to_cpu(pdu->data_length);
465
466 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
467 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
468 dev_err(queue->ctrl->ctrl.device,
469 "queue %d tag %#x SUCCESS set but not last PDU\n",
470 nvme_tcp_queue_id(queue), rq->tag);
471 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
472 return -EPROTO;
473 }
474
475 return 0;
476
477}
478
479static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
480 struct nvme_tcp_rsp_pdu *pdu)
481{
482 struct nvme_completion *cqe = &pdu->cqe;
483 int ret = 0;
484
485 /*
486 * AEN requests are special as they don't time out and can
487 * survive any kind of queue freeze and often don't respond to
488 * aborts. We don't even bother to allocate a struct request
489 * for them but rather special case them here.
490 */
491 if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
492 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
493 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
494 &cqe->result);
495 else
496 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
497
498 return ret;
499}
500
501static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
502 struct nvme_tcp_r2t_pdu *pdu)
503{
504 struct nvme_tcp_data_pdu *data = req->pdu;
505 struct nvme_tcp_queue *queue = req->queue;
506 struct request *rq = blk_mq_rq_from_pdu(req);
507 u8 hdgst = nvme_tcp_hdgst_len(queue);
508 u8 ddgst = nvme_tcp_ddgst_len(queue);
509
510 req->pdu_len = le32_to_cpu(pdu->r2t_length);
511 req->pdu_sent = 0;
512
513 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
514 dev_err(queue->ctrl->ctrl.device,
515 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
516 rq->tag, req->pdu_len, req->data_len,
517 req->data_sent);
518 return -EPROTO;
519 }
520
521 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
522 dev_err(queue->ctrl->ctrl.device,
523 "req %d unexpected r2t offset %u (expected %zu)\n",
524 rq->tag, le32_to_cpu(pdu->r2t_offset),
525 req->data_sent);
526 return -EPROTO;
527 }
528
529 memset(data, 0, sizeof(*data));
530 data->hdr.type = nvme_tcp_h2c_data;
531 data->hdr.flags = NVME_TCP_F_DATA_LAST;
532 if (queue->hdr_digest)
533 data->hdr.flags |= NVME_TCP_F_HDGST;
534 if (queue->data_digest)
535 data->hdr.flags |= NVME_TCP_F_DDGST;
536 data->hdr.hlen = sizeof(*data);
537 data->hdr.pdo = data->hdr.hlen + hdgst;
538 data->hdr.plen =
539 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
540 data->ttag = pdu->ttag;
541 data->command_id = rq->tag;
542 data->data_offset = cpu_to_le32(req->data_sent);
543 data->data_length = cpu_to_le32(req->pdu_len);
544 return 0;
545}
546
547static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
548 struct nvme_tcp_r2t_pdu *pdu)
549{
550 struct nvme_tcp_request *req;
551 struct request *rq;
552 int ret;
553
554 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
555 if (!rq) {
556 dev_err(queue->ctrl->ctrl.device,
557 "queue %d tag %#x not found\n",
558 nvme_tcp_queue_id(queue), pdu->command_id);
559 return -ENOENT;
560 }
561 req = blk_mq_rq_to_pdu(rq);
562
563 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
564 if (unlikely(ret))
565 return ret;
566
567 req->state = NVME_TCP_SEND_H2C_PDU;
568 req->offset = 0;
569
570 nvme_tcp_queue_request(req);
571
572 return 0;
573}
574
575static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
576 unsigned int *offset, size_t *len)
577{
578 struct nvme_tcp_hdr *hdr;
579 char *pdu = queue->pdu;
580 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
581 int ret;
582
583 ret = skb_copy_bits(skb, *offset,
584 &pdu[queue->pdu_offset], rcv_len);
585 if (unlikely(ret))
586 return ret;
587
588 queue->pdu_remaining -= rcv_len;
589 queue->pdu_offset += rcv_len;
590 *offset += rcv_len;
591 *len -= rcv_len;
592 if (queue->pdu_remaining)
593 return 0;
594
595 hdr = queue->pdu;
596 if (queue->hdr_digest) {
597 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
598 if (unlikely(ret))
599 return ret;
600 }
601
602
603 if (queue->data_digest) {
604 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
605 if (unlikely(ret))
606 return ret;
607 }
608
609 switch (hdr->type) {
610 case nvme_tcp_c2h_data:
611 ret = nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
612 break;
613 case nvme_tcp_rsp:
614 nvme_tcp_init_recv_ctx(queue);
615 ret = nvme_tcp_handle_comp(queue, (void *)queue->pdu);
616 break;
617 case nvme_tcp_r2t:
618 nvme_tcp_init_recv_ctx(queue);
619 ret = nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
620 break;
621 default:
622 dev_err(queue->ctrl->ctrl.device,
623 "unsupported pdu type (%d)\n", hdr->type);
624 return -EINVAL;
625 }
626
627 return ret;
628}
629
630static inline void nvme_tcp_end_request(struct request *rq, u16 status)
631{
632 union nvme_result res = {};
633
634 nvme_end_request(rq, cpu_to_le16(status << 1), res);
635}
636
637
638static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
639 unsigned int *offset, size_t *len)
640{
641 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
642 struct nvme_tcp_request *req;
643 struct request *rq;
644
645 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
646 if (!rq) {
647 dev_err(queue->ctrl->ctrl.device,
648 "queue %d tag %#x not found\n",
649 nvme_tcp_queue_id(queue), pdu->command_id);
650 return -ENOENT;
651 }
652 req = blk_mq_rq_to_pdu(rq);
653
654 while (true) {
655 int recv_len, ret;
656
657 recv_len = min_t(size_t, *len, queue->data_remaining);
658 if (!recv_len)
659 break;
660
661 if (!iov_iter_count(&req->iter)) {
662 req->curr_bio = req->curr_bio->bi_next;
663
664 /*
665 * If we don`t have any bios it means that controller
666 * sent more data than we requested, hence error
667 */
668 if (!req->curr_bio) {
669 dev_err(queue->ctrl->ctrl.device,
670 "queue %d no space in request %#x",
671 nvme_tcp_queue_id(queue), rq->tag);
672 nvme_tcp_init_recv_ctx(queue);
673 return -EIO;
674 }
675 nvme_tcp_init_iter(req, READ);
676 }
677
678 /* we can read only from what is left in this bio */
679 recv_len = min_t(size_t, recv_len,
680 iov_iter_count(&req->iter));
681
682 if (queue->data_digest)
683 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
684 &req->iter, recv_len, queue->rcv_hash);
685 else
686 ret = skb_copy_datagram_iter(skb, *offset,
687 &req->iter, recv_len);
688 if (ret) {
689 dev_err(queue->ctrl->ctrl.device,
690 "queue %d failed to copy request %#x data",
691 nvme_tcp_queue_id(queue), rq->tag);
692 return ret;
693 }
694
695 *len -= recv_len;
696 *offset += recv_len;
697 queue->data_remaining -= recv_len;
698 }
699
700 if (!queue->data_remaining) {
701 if (queue->data_digest) {
702 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
703 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
704 } else {
705 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
706 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
707 nvme_tcp_init_recv_ctx(queue);
708 }
709 }
710
711 return 0;
712}
713
714static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
715 struct sk_buff *skb, unsigned int *offset, size_t *len)
716{
717 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
718 char *ddgst = (char *)&queue->recv_ddgst;
719 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
720 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
721 int ret;
722
723 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
724 if (unlikely(ret))
725 return ret;
726
727 queue->ddgst_remaining -= recv_len;
728 *offset += recv_len;
729 *len -= recv_len;
730 if (queue->ddgst_remaining)
731 return 0;
732
733 if (queue->recv_ddgst != queue->exp_ddgst) {
734 dev_err(queue->ctrl->ctrl.device,
735 "data digest error: recv %#x expected %#x\n",
736 le32_to_cpu(queue->recv_ddgst),
737 le32_to_cpu(queue->exp_ddgst));
738 return -EIO;
739 }
740
741 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
742 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
743 pdu->command_id);
744
745 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
746 }
747
748 nvme_tcp_init_recv_ctx(queue);
749 return 0;
750}
751
752static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
753 unsigned int offset, size_t len)
754{
755 struct nvme_tcp_queue *queue = desc->arg.data;
756 size_t consumed = len;
757 int result;
758
759 while (len) {
760 switch (nvme_tcp_recv_state(queue)) {
761 case NVME_TCP_RECV_PDU:
762 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
763 break;
764 case NVME_TCP_RECV_DATA:
765 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
766 break;
767 case NVME_TCP_RECV_DDGST:
768 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
769 break;
770 default:
771 result = -EFAULT;
772 }
773 if (result) {
774 dev_err(queue->ctrl->ctrl.device,
775 "receive failed: %d\n", result);
776 queue->rd_enabled = false;
777 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
778 return result;
779 }
780 }
781
782 return consumed;
783}
784
785static void nvme_tcp_data_ready(struct sock *sk)
786{
787 struct nvme_tcp_queue *queue;
788
789 read_lock(&sk->sk_callback_lock);
790 queue = sk->sk_user_data;
791 if (likely(queue && queue->rd_enabled))
792 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
793 read_unlock(&sk->sk_callback_lock);
794}
795
796static void nvme_tcp_write_space(struct sock *sk)
797{
798 struct nvme_tcp_queue *queue;
799
800 read_lock_bh(&sk->sk_callback_lock);
801 queue = sk->sk_user_data;
802 if (likely(queue && sk_stream_is_writeable(sk))) {
803 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
804 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
805 }
806 read_unlock_bh(&sk->sk_callback_lock);
807}
808
809static void nvme_tcp_state_change(struct sock *sk)
810{
811 struct nvme_tcp_queue *queue;
812
813 read_lock(&sk->sk_callback_lock);
814 queue = sk->sk_user_data;
815 if (!queue)
816 goto done;
817
818 switch (sk->sk_state) {
819 case TCP_CLOSE:
820 case TCP_CLOSE_WAIT:
821 case TCP_LAST_ACK:
822 case TCP_FIN_WAIT1:
823 case TCP_FIN_WAIT2:
824 /* fallthrough */
825 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
826 break;
827 default:
828 dev_info(queue->ctrl->ctrl.device,
829 "queue %d socket state %d\n",
830 nvme_tcp_queue_id(queue), sk->sk_state);
831 }
832
833 queue->state_change(sk);
834done:
835 read_unlock(&sk->sk_callback_lock);
836}
837
838static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
839{
840 queue->request = NULL;
841}
842
843static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
844{
845 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
846}
847
848static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
849{
850 struct nvme_tcp_queue *queue = req->queue;
851
852 while (true) {
853 struct page *page = nvme_tcp_req_cur_page(req);
854 size_t offset = nvme_tcp_req_cur_offset(req);
855 size_t len = nvme_tcp_req_cur_length(req);
856 bool last = nvme_tcp_pdu_last_send(req, len);
857 int ret, flags = MSG_DONTWAIT;
858
859 if (last && !queue->data_digest)
860 flags |= MSG_EOR;
861 else
862 flags |= MSG_MORE;
863
864 ret = kernel_sendpage(queue->sock, page, offset, len, flags);
865 if (ret <= 0)
866 return ret;
867
868 nvme_tcp_advance_req(req, ret);
869 if (queue->data_digest)
870 nvme_tcp_ddgst_update(queue->snd_hash, page,
871 offset, ret);
872
873 /* fully successful last write*/
874 if (last && ret == len) {
875 if (queue->data_digest) {
876 nvme_tcp_ddgst_final(queue->snd_hash,
877 &req->ddgst);
878 req->state = NVME_TCP_SEND_DDGST;
879 req->offset = 0;
880 } else {
881 nvme_tcp_done_send_req(queue);
882 }
883 return 1;
884 }
885 }
886 return -EAGAIN;
887}
888
889static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
890{
891 struct nvme_tcp_queue *queue = req->queue;
892 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
893 bool inline_data = nvme_tcp_has_inline_data(req);
894 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
895 u8 hdgst = nvme_tcp_hdgst_len(queue);
896 int len = sizeof(*pdu) + hdgst - req->offset;
897 int ret;
898
899 if (queue->hdr_digest && !req->offset)
900 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
901
902 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
903 offset_in_page(pdu) + req->offset, len, flags);
904 if (unlikely(ret <= 0))
905 return ret;
906
907 len -= ret;
908 if (!len) {
909 if (inline_data) {
910 req->state = NVME_TCP_SEND_DATA;
911 if (queue->data_digest)
912 crypto_ahash_init(queue->snd_hash);
913 nvme_tcp_init_iter(req, WRITE);
914 } else {
915 nvme_tcp_done_send_req(queue);
916 }
917 return 1;
918 }
919 req->offset += ret;
920
921 return -EAGAIN;
922}
923
924static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
925{
926 struct nvme_tcp_queue *queue = req->queue;
927 struct nvme_tcp_data_pdu *pdu = req->pdu;
928 u8 hdgst = nvme_tcp_hdgst_len(queue);
929 int len = sizeof(*pdu) - req->offset + hdgst;
930 int ret;
931
932 if (queue->hdr_digest && !req->offset)
933 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
934
935 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
936 offset_in_page(pdu) + req->offset, len,
937 MSG_DONTWAIT | MSG_MORE);
938 if (unlikely(ret <= 0))
939 return ret;
940
941 len -= ret;
942 if (!len) {
943 req->state = NVME_TCP_SEND_DATA;
944 if (queue->data_digest)
945 crypto_ahash_init(queue->snd_hash);
946 if (!req->data_sent)
947 nvme_tcp_init_iter(req, WRITE);
948 return 1;
949 }
950 req->offset += ret;
951
952 return -EAGAIN;
953}
954
955static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
956{
957 struct nvme_tcp_queue *queue = req->queue;
958 int ret;
959 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
960 struct kvec iov = {
961 .iov_base = &req->ddgst + req->offset,
962 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
963 };
964
965 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
966 if (unlikely(ret <= 0))
967 return ret;
968
969 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
970 nvme_tcp_done_send_req(queue);
971 return 1;
972 }
973
974 req->offset += ret;
975 return -EAGAIN;
976}
977
978static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
979{
980 struct nvme_tcp_request *req;
981 int ret = 1;
982
983 if (!queue->request) {
984 queue->request = nvme_tcp_fetch_request(queue);
985 if (!queue->request)
986 return 0;
987 }
988 req = queue->request;
989
990 if (req->state == NVME_TCP_SEND_CMD_PDU) {
991 ret = nvme_tcp_try_send_cmd_pdu(req);
992 if (ret <= 0)
993 goto done;
994 if (!nvme_tcp_has_inline_data(req))
995 return ret;
996 }
997
998 if (req->state == NVME_TCP_SEND_H2C_PDU) {
999 ret = nvme_tcp_try_send_data_pdu(req);
1000 if (ret <= 0)
1001 goto done;
1002 }
1003
1004 if (req->state == NVME_TCP_SEND_DATA) {
1005 ret = nvme_tcp_try_send_data(req);
1006 if (ret <= 0)
1007 goto done;
1008 }
1009
1010 if (req->state == NVME_TCP_SEND_DDGST)
1011 ret = nvme_tcp_try_send_ddgst(req);
1012done:
1013 if (ret == -EAGAIN)
1014 ret = 0;
1015 return ret;
1016}
1017
1018static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1019{
1020 struct sock *sk = queue->sock->sk;
1021 read_descriptor_t rd_desc;
1022 int consumed;
1023
1024 rd_desc.arg.data = queue;
1025 rd_desc.count = 1;
1026 lock_sock(sk);
1027 consumed = tcp_read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1028 release_sock(sk);
1029 return consumed;
1030}
1031
1032static void nvme_tcp_io_work(struct work_struct *w)
1033{
1034 struct nvme_tcp_queue *queue =
1035 container_of(w, struct nvme_tcp_queue, io_work);
1036 unsigned long start = jiffies + msecs_to_jiffies(1);
1037
1038 do {
1039 bool pending = false;
1040 int result;
1041
1042 result = nvme_tcp_try_send(queue);
1043 if (result > 0) {
1044 pending = true;
1045 } else if (unlikely(result < 0)) {
1046 dev_err(queue->ctrl->ctrl.device,
1047 "failed to send request %d\n", result);
1048 if (result != -EPIPE)
1049 nvme_tcp_fail_request(queue->request);
1050 nvme_tcp_done_send_req(queue);
1051 return;
1052 }
1053
1054 result = nvme_tcp_try_recv(queue);
1055 if (result > 0)
1056 pending = true;
1057
1058 if (!pending)
1059 return;
1060
1061 } while (time_after(jiffies, start)); /* quota is exhausted */
1062
1063 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1064}
1065
1066static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1067{
1068 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1069
1070 ahash_request_free(queue->rcv_hash);
1071 ahash_request_free(queue->snd_hash);
1072 crypto_free_ahash(tfm);
1073}
1074
1075static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1076{
1077 struct crypto_ahash *tfm;
1078
1079 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1080 if (IS_ERR(tfm))
1081 return PTR_ERR(tfm);
1082
1083 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1084 if (!queue->snd_hash)
1085 goto free_tfm;
1086 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1087
1088 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1089 if (!queue->rcv_hash)
1090 goto free_snd_hash;
1091 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1092
1093 return 0;
1094free_snd_hash:
1095 ahash_request_free(queue->snd_hash);
1096free_tfm:
1097 crypto_free_ahash(tfm);
1098 return -ENOMEM;
1099}
1100
1101static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1102{
1103 struct nvme_tcp_request *async = &ctrl->async_req;
1104
1105 page_frag_free(async->pdu);
1106}
1107
1108static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1109{
1110 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1111 struct nvme_tcp_request *async = &ctrl->async_req;
1112 u8 hdgst = nvme_tcp_hdgst_len(queue);
1113
1114 async->pdu = page_frag_alloc(&queue->pf_cache,
1115 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1116 GFP_KERNEL | __GFP_ZERO);
1117 if (!async->pdu)
1118 return -ENOMEM;
1119
1120 async->queue = &ctrl->queues[0];
1121 return 0;
1122}
1123
1124static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1125{
1126 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1127 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1128
1129 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1130 return;
1131
1132 if (queue->hdr_digest || queue->data_digest)
1133 nvme_tcp_free_crypto(queue);
1134
1135 sock_release(queue->sock);
1136 kfree(queue->pdu);
1137}
1138
1139static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1140{
1141 struct nvme_tcp_icreq_pdu *icreq;
1142 struct nvme_tcp_icresp_pdu *icresp;
1143 struct msghdr msg = {};
1144 struct kvec iov;
1145 bool ctrl_hdgst, ctrl_ddgst;
1146 int ret;
1147
1148 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1149 if (!icreq)
1150 return -ENOMEM;
1151
1152 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1153 if (!icresp) {
1154 ret = -ENOMEM;
1155 goto free_icreq;
1156 }
1157
1158 icreq->hdr.type = nvme_tcp_icreq;
1159 icreq->hdr.hlen = sizeof(*icreq);
1160 icreq->hdr.pdo = 0;
1161 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1162 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1163 icreq->maxr2t = 0; /* single inflight r2t supported */
1164 icreq->hpda = 0; /* no alignment constraint */
1165 if (queue->hdr_digest)
1166 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1167 if (queue->data_digest)
1168 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1169
1170 iov.iov_base = icreq;
1171 iov.iov_len = sizeof(*icreq);
1172 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1173 if (ret < 0)
1174 goto free_icresp;
1175
1176 memset(&msg, 0, sizeof(msg));
1177 iov.iov_base = icresp;
1178 iov.iov_len = sizeof(*icresp);
1179 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1180 iov.iov_len, msg.msg_flags);
1181 if (ret < 0)
1182 goto free_icresp;
1183
1184 ret = -EINVAL;
1185 if (icresp->hdr.type != nvme_tcp_icresp) {
1186 pr_err("queue %d: bad type returned %d\n",
1187 nvme_tcp_queue_id(queue), icresp->hdr.type);
1188 goto free_icresp;
1189 }
1190
1191 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1192 pr_err("queue %d: bad pdu length returned %d\n",
1193 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1194 goto free_icresp;
1195 }
1196
1197 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1198 pr_err("queue %d: bad pfv returned %d\n",
1199 nvme_tcp_queue_id(queue), icresp->pfv);
1200 goto free_icresp;
1201 }
1202
1203 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1204 if ((queue->data_digest && !ctrl_ddgst) ||
1205 (!queue->data_digest && ctrl_ddgst)) {
1206 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1207 nvme_tcp_queue_id(queue),
1208 queue->data_digest ? "enabled" : "disabled",
1209 ctrl_ddgst ? "enabled" : "disabled");
1210 goto free_icresp;
1211 }
1212
1213 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1214 if ((queue->hdr_digest && !ctrl_hdgst) ||
1215 (!queue->hdr_digest && ctrl_hdgst)) {
1216 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1217 nvme_tcp_queue_id(queue),
1218 queue->hdr_digest ? "enabled" : "disabled",
1219 ctrl_hdgst ? "enabled" : "disabled");
1220 goto free_icresp;
1221 }
1222
1223 if (icresp->cpda != 0) {
1224 pr_err("queue %d: unsupported cpda returned %d\n",
1225 nvme_tcp_queue_id(queue), icresp->cpda);
1226 goto free_icresp;
1227 }
1228
1229 ret = 0;
1230free_icresp:
1231 kfree(icresp);
1232free_icreq:
1233 kfree(icreq);
1234 return ret;
1235}
1236
1237static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1238 int qid, size_t queue_size)
1239{
1240 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1241 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1242 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1243 int ret, opt, rcv_pdu_size, n;
1244
1245 queue->ctrl = ctrl;
1246 INIT_LIST_HEAD(&queue->send_list);
1247 spin_lock_init(&queue->lock);
1248 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1249 queue->queue_size = queue_size;
1250
1251 if (qid > 0)
1252 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1253 else
1254 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1255 NVME_TCP_ADMIN_CCSZ;
1256
1257 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1258 IPPROTO_TCP, &queue->sock);
1259 if (ret) {
1260 dev_err(ctrl->ctrl.device,
1261 "failed to create socket: %d\n", ret);
1262 return ret;
1263 }
1264
1265 /* Single syn retry */
1266 opt = 1;
1267 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1268 (char *)&opt, sizeof(opt));
1269 if (ret) {
1270 dev_err(ctrl->ctrl.device,
1271 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1272 goto err_sock;
1273 }
1274
1275 /* Set TCP no delay */
1276 opt = 1;
1277 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1278 TCP_NODELAY, (char *)&opt, sizeof(opt));
1279 if (ret) {
1280 dev_err(ctrl->ctrl.device,
1281 "failed to set TCP_NODELAY sock opt %d\n", ret);
1282 goto err_sock;
1283 }
1284
1285 /*
1286 * Cleanup whatever is sitting in the TCP transmit queue on socket
1287 * close. This is done to prevent stale data from being sent should
1288 * the network connection be restored before TCP times out.
1289 */
1290 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1291 (char *)&sol, sizeof(sol));
1292 if (ret) {
1293 dev_err(ctrl->ctrl.device,
1294 "failed to set SO_LINGER sock opt %d\n", ret);
1295 goto err_sock;
1296 }
1297
1298 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1299 if (!qid)
1300 n = 0;
1301 else
1302 n = (qid - 1) % num_online_cpus();
1303 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1304 queue->request = NULL;
1305 queue->data_remaining = 0;
1306 queue->ddgst_remaining = 0;
1307 queue->pdu_remaining = 0;
1308 queue->pdu_offset = 0;
1309 sk_set_memalloc(queue->sock->sk);
1310
1311 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1312 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1313 sizeof(ctrl->src_addr));
1314 if (ret) {
1315 dev_err(ctrl->ctrl.device,
1316 "failed to bind queue %d socket %d\n",
1317 qid, ret);
1318 goto err_sock;
1319 }
1320 }
1321
1322 queue->hdr_digest = nctrl->opts->hdr_digest;
1323 queue->data_digest = nctrl->opts->data_digest;
1324 if (queue->hdr_digest || queue->data_digest) {
1325 ret = nvme_tcp_alloc_crypto(queue);
1326 if (ret) {
1327 dev_err(ctrl->ctrl.device,
1328 "failed to allocate queue %d crypto\n", qid);
1329 goto err_sock;
1330 }
1331 }
1332
1333 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1334 nvme_tcp_hdgst_len(queue);
1335 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1336 if (!queue->pdu) {
1337 ret = -ENOMEM;
1338 goto err_crypto;
1339 }
1340
1341 dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
1342 nvme_tcp_queue_id(queue));
1343
1344 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1345 sizeof(ctrl->addr), 0);
1346 if (ret) {
1347 dev_err(ctrl->ctrl.device,
1348 "failed to connect socket: %d\n", ret);
1349 goto err_rcv_pdu;
1350 }
1351
1352 ret = nvme_tcp_init_connection(queue);
1353 if (ret)
1354 goto err_init_connect;
1355
1356 queue->rd_enabled = true;
1357 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1358 nvme_tcp_init_recv_ctx(queue);
1359
1360 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1361 queue->sock->sk->sk_user_data = queue;
1362 queue->state_change = queue->sock->sk->sk_state_change;
1363 queue->data_ready = queue->sock->sk->sk_data_ready;
1364 queue->write_space = queue->sock->sk->sk_write_space;
1365 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1366 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1367 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1368 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1369
1370 return 0;
1371
1372err_init_connect:
1373 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1374err_rcv_pdu:
1375 kfree(queue->pdu);
1376err_crypto:
1377 if (queue->hdr_digest || queue->data_digest)
1378 nvme_tcp_free_crypto(queue);
1379err_sock:
1380 sock_release(queue->sock);
1381 queue->sock = NULL;
1382 return ret;
1383}
1384
1385static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1386{
1387 struct socket *sock = queue->sock;
1388
1389 write_lock_bh(&sock->sk->sk_callback_lock);
1390 sock->sk->sk_user_data = NULL;
1391 sock->sk->sk_data_ready = queue->data_ready;
1392 sock->sk->sk_state_change = queue->state_change;
1393 sock->sk->sk_write_space = queue->write_space;
1394 write_unlock_bh(&sock->sk->sk_callback_lock);
1395}
1396
1397static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1398{
1399 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1400 nvme_tcp_restore_sock_calls(queue);
1401 cancel_work_sync(&queue->io_work);
1402}
1403
1404static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1405{
1406 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1407 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1408
1409 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1410 return;
1411
1412 __nvme_tcp_stop_queue(queue);
1413}
1414
1415static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1416{
1417 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1418 int ret;
1419
1420 if (idx)
1421 ret = nvmf_connect_io_queue(nctrl, idx, false);
1422 else
1423 ret = nvmf_connect_admin_queue(nctrl);
1424
1425 if (!ret) {
1426 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1427 } else {
1428 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1429 dev_err(nctrl->device,
1430 "failed to connect queue: %d ret=%d\n", idx, ret);
1431 }
1432 return ret;
1433}
1434
1435static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1436 bool admin)
1437{
1438 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1439 struct blk_mq_tag_set *set;
1440 int ret;
1441
1442 if (admin) {
1443 set = &ctrl->admin_tag_set;
1444 memset(set, 0, sizeof(*set));
1445 set->ops = &nvme_tcp_admin_mq_ops;
1446 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1447 set->reserved_tags = 2; /* connect + keep-alive */
1448 set->numa_node = NUMA_NO_NODE;
1449 set->cmd_size = sizeof(struct nvme_tcp_request);
1450 set->driver_data = ctrl;
1451 set->nr_hw_queues = 1;
1452 set->timeout = ADMIN_TIMEOUT;
1453 } else {
1454 set = &ctrl->tag_set;
1455 memset(set, 0, sizeof(*set));
1456 set->ops = &nvme_tcp_mq_ops;
1457 set->queue_depth = nctrl->sqsize + 1;
1458 set->reserved_tags = 1; /* fabric connect */
1459 set->numa_node = NUMA_NO_NODE;
1460 set->flags = BLK_MQ_F_SHOULD_MERGE;
1461 set->cmd_size = sizeof(struct nvme_tcp_request);
1462 set->driver_data = ctrl;
1463 set->nr_hw_queues = nctrl->queue_count - 1;
1464 set->timeout = NVME_IO_TIMEOUT;
1465 set->nr_maps = 2 /* default + read */;
1466 }
1467
1468 ret = blk_mq_alloc_tag_set(set);
1469 if (ret)
1470 return ERR_PTR(ret);
1471
1472 return set;
1473}
1474
1475static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1476{
1477 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1478 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1479 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1480 }
1481
1482 nvme_tcp_free_queue(ctrl, 0);
1483}
1484
1485static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1486{
1487 int i;
1488
1489 for (i = 1; i < ctrl->queue_count; i++)
1490 nvme_tcp_free_queue(ctrl, i);
1491}
1492
1493static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1494{
1495 int i;
1496
1497 for (i = 1; i < ctrl->queue_count; i++)
1498 nvme_tcp_stop_queue(ctrl, i);
1499}
1500
1501static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1502{
1503 int i, ret = 0;
1504
1505 for (i = 1; i < ctrl->queue_count; i++) {
1506 ret = nvme_tcp_start_queue(ctrl, i);
1507 if (ret)
1508 goto out_stop_queues;
1509 }
1510
1511 return 0;
1512
1513out_stop_queues:
1514 for (i--; i >= 1; i--)
1515 nvme_tcp_stop_queue(ctrl, i);
1516 return ret;
1517}
1518
1519static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1520{
1521 int ret;
1522
1523 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1524 if (ret)
1525 return ret;
1526
1527 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1528 if (ret)
1529 goto out_free_queue;
1530
1531 return 0;
1532
1533out_free_queue:
1534 nvme_tcp_free_queue(ctrl, 0);
1535 return ret;
1536}
1537
1538static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1539{
1540 int i, ret;
1541
1542 for (i = 1; i < ctrl->queue_count; i++) {
1543 ret = nvme_tcp_alloc_queue(ctrl, i,
1544 ctrl->sqsize + 1);
1545 if (ret)
1546 goto out_free_queues;
1547 }
1548
1549 return 0;
1550
1551out_free_queues:
1552 for (i--; i >= 1; i--)
1553 nvme_tcp_free_queue(ctrl, i);
1554
1555 return ret;
1556}
1557
1558static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1559{
1560 unsigned int nr_io_queues;
1561
1562 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1563 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1564
1565 return nr_io_queues;
1566}
1567
1568static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
1569{
1570 unsigned int nr_io_queues;
1571 int ret;
1572
1573 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1574 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1575 if (ret)
1576 return ret;
1577
1578 ctrl->queue_count = nr_io_queues + 1;
1579 if (ctrl->queue_count < 2)
1580 return 0;
1581
1582 dev_info(ctrl->device,
1583 "creating %d I/O queues.\n", nr_io_queues);
1584
1585 return nvme_tcp_alloc_io_queues(ctrl);
1586}
1587
1588static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1589{
1590 nvme_tcp_stop_io_queues(ctrl);
1591 if (remove) {
1592 blk_cleanup_queue(ctrl->connect_q);
1593 blk_mq_free_tag_set(ctrl->tagset);
1594 }
1595 nvme_tcp_free_io_queues(ctrl);
1596}
1597
1598static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1599{
1600 int ret;
1601
1602 ret = nvme_alloc_io_queues(ctrl);
1603 if (ret)
1604 return ret;
1605
1606 if (new) {
1607 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1608 if (IS_ERR(ctrl->tagset)) {
1609 ret = PTR_ERR(ctrl->tagset);
1610 goto out_free_io_queues;
1611 }
1612
1613 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1614 if (IS_ERR(ctrl->connect_q)) {
1615 ret = PTR_ERR(ctrl->connect_q);
1616 goto out_free_tag_set;
1617 }
1618 } else {
1619 blk_mq_update_nr_hw_queues(ctrl->tagset,
1620 ctrl->queue_count - 1);
1621 }
1622
1623 ret = nvme_tcp_start_io_queues(ctrl);
1624 if (ret)
1625 goto out_cleanup_connect_q;
1626
1627 return 0;
1628
1629out_cleanup_connect_q:
1630 if (new)
1631 blk_cleanup_queue(ctrl->connect_q);
1632out_free_tag_set:
1633 if (new)
1634 blk_mq_free_tag_set(ctrl->tagset);
1635out_free_io_queues:
1636 nvme_tcp_free_io_queues(ctrl);
1637 return ret;
1638}
1639
1640static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1641{
1642 nvme_tcp_stop_queue(ctrl, 0);
1643 if (remove) {
1644 blk_cleanup_queue(ctrl->admin_q);
1645 blk_mq_free_tag_set(ctrl->admin_tagset);
1646 }
1647 nvme_tcp_free_admin_queue(ctrl);
1648}
1649
1650static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1651{
1652 int error;
1653
1654 error = nvme_tcp_alloc_admin_queue(ctrl);
1655 if (error)
1656 return error;
1657
1658 if (new) {
1659 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1660 if (IS_ERR(ctrl->admin_tagset)) {
1661 error = PTR_ERR(ctrl->admin_tagset);
1662 goto out_free_queue;
1663 }
1664
1665 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1666 if (IS_ERR(ctrl->admin_q)) {
1667 error = PTR_ERR(ctrl->admin_q);
1668 goto out_free_tagset;
1669 }
1670 }
1671
1672 error = nvme_tcp_start_queue(ctrl, 0);
1673 if (error)
1674 goto out_cleanup_queue;
1675
1676 error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
1677 if (error) {
1678 dev_err(ctrl->device,
1679 "prop_get NVME_REG_CAP failed\n");
1680 goto out_stop_queue;
1681 }
1682
1683 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
1684
1685 error = nvme_enable_ctrl(ctrl, ctrl->cap);
1686 if (error)
1687 goto out_stop_queue;
1688
1689 error = nvme_init_identify(ctrl);
1690 if (error)
1691 goto out_stop_queue;
1692
1693 return 0;
1694
1695out_stop_queue:
1696 nvme_tcp_stop_queue(ctrl, 0);
1697out_cleanup_queue:
1698 if (new)
1699 blk_cleanup_queue(ctrl->admin_q);
1700out_free_tagset:
1701 if (new)
1702 blk_mq_free_tag_set(ctrl->admin_tagset);
1703out_free_queue:
1704 nvme_tcp_free_admin_queue(ctrl);
1705 return error;
1706}
1707
1708static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1709 bool remove)
1710{
1711 blk_mq_quiesce_queue(ctrl->admin_q);
1712 nvme_tcp_stop_queue(ctrl, 0);
1713 blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
1714 blk_mq_unquiesce_queue(ctrl->admin_q);
1715 nvme_tcp_destroy_admin_queue(ctrl, remove);
1716}
1717
1718static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1719 bool remove)
1720{
1721 if (ctrl->queue_count <= 1)
1722 return;
1723 nvme_stop_queues(ctrl);
1724 nvme_tcp_stop_io_queues(ctrl);
1725 blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl);
1726 if (remove)
1727 nvme_start_queues(ctrl);
1728 nvme_tcp_destroy_io_queues(ctrl, remove);
1729}
1730
1731static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1732{
1733 /* If we are resetting/deleting then do nothing */
1734 if (ctrl->state != NVME_CTRL_CONNECTING) {
1735 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1736 ctrl->state == NVME_CTRL_LIVE);
1737 return;
1738 }
1739
1740 if (nvmf_should_reconnect(ctrl)) {
1741 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1742 ctrl->opts->reconnect_delay);
1743 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1744 ctrl->opts->reconnect_delay * HZ);
1745 } else {
1746 dev_info(ctrl->device, "Removing controller...\n");
1747 nvme_delete_ctrl(ctrl);
1748 }
1749}
1750
1751static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1752{
1753 struct nvmf_ctrl_options *opts = ctrl->opts;
1754 int ret = -EINVAL;
1755
1756 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1757 if (ret)
1758 return ret;
1759
1760 if (ctrl->icdoff) {
1761 dev_err(ctrl->device, "icdoff is not supported!\n");
1762 goto destroy_admin;
1763 }
1764
1765 if (opts->queue_size > ctrl->sqsize + 1)
1766 dev_warn(ctrl->device,
1767 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1768 opts->queue_size, ctrl->sqsize + 1);
1769
1770 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1771 dev_warn(ctrl->device,
1772 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1773 ctrl->sqsize + 1, ctrl->maxcmd);
1774 ctrl->sqsize = ctrl->maxcmd - 1;
1775 }
1776
1777 if (ctrl->queue_count > 1) {
1778 ret = nvme_tcp_configure_io_queues(ctrl, new);
1779 if (ret)
1780 goto destroy_admin;
1781 }
1782
1783 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1784 /* state change failure is ok if we're in DELETING state */
1785 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1786 ret = -EINVAL;
1787 goto destroy_io;
1788 }
1789
1790 nvme_start_ctrl(ctrl);
1791 return 0;
1792
1793destroy_io:
1794 if (ctrl->queue_count > 1)
1795 nvme_tcp_destroy_io_queues(ctrl, new);
1796destroy_admin:
1797 nvme_tcp_stop_queue(ctrl, 0);
1798 nvme_tcp_destroy_admin_queue(ctrl, new);
1799 return ret;
1800}
1801
1802static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1803{
1804 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1805 struct nvme_tcp_ctrl, connect_work);
1806 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1807
1808 ++ctrl->nr_reconnects;
1809
1810 if (nvme_tcp_setup_ctrl(ctrl, false))
1811 goto requeue;
1812
1813 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1814 ctrl->nr_reconnects);
1815
1816 ctrl->nr_reconnects = 0;
1817
1818 return;
1819
1820requeue:
1821 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1822 ctrl->nr_reconnects);
1823 nvme_tcp_reconnect_or_remove(ctrl);
1824}
1825
1826static void nvme_tcp_error_recovery_work(struct work_struct *work)
1827{
1828 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1829 struct nvme_tcp_ctrl, err_work);
1830 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1831
1832 nvme_stop_keep_alive(ctrl);
1833 nvme_tcp_teardown_io_queues(ctrl, false);
1834 /* unquiesce to fail fast pending requests */
1835 nvme_start_queues(ctrl);
1836 nvme_tcp_teardown_admin_queue(ctrl, false);
1837
1838 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1839 /* state change failure is ok if we're in DELETING state */
1840 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1841 return;
1842 }
1843
1844 nvme_tcp_reconnect_or_remove(ctrl);
1845}
1846
1847static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1848{
1849 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1850 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1851
1852 nvme_tcp_teardown_io_queues(ctrl, shutdown);
1853 if (shutdown)
1854 nvme_shutdown_ctrl(ctrl);
1855 else
1856 nvme_disable_ctrl(ctrl, ctrl->cap);
1857 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1858}
1859
1860static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1861{
1862 nvme_tcp_teardown_ctrl(ctrl, true);
1863}
1864
1865static void nvme_reset_ctrl_work(struct work_struct *work)
1866{
1867 struct nvme_ctrl *ctrl =
1868 container_of(work, struct nvme_ctrl, reset_work);
1869
1870 nvme_stop_ctrl(ctrl);
1871 nvme_tcp_teardown_ctrl(ctrl, false);
1872
1873 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1874 /* state change failure is ok if we're in DELETING state */
1875 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1876 return;
1877 }
1878
1879 if (nvme_tcp_setup_ctrl(ctrl, false))
1880 goto out_fail;
1881
1882 return;
1883
1884out_fail:
1885 ++ctrl->nr_reconnects;
1886 nvme_tcp_reconnect_or_remove(ctrl);
1887}
1888
1889static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1890{
1891 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1892
1893 if (list_empty(&ctrl->list))
1894 goto free_ctrl;
1895
1896 mutex_lock(&nvme_tcp_ctrl_mutex);
1897 list_del(&ctrl->list);
1898 mutex_unlock(&nvme_tcp_ctrl_mutex);
1899
1900 nvmf_free_options(nctrl->opts);
1901free_ctrl:
1902 kfree(ctrl->queues);
1903 kfree(ctrl);
1904}
1905
1906static void nvme_tcp_set_sg_null(struct nvme_command *c)
1907{
1908 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1909
1910 sg->addr = 0;
1911 sg->length = 0;
1912 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1913 NVME_SGL_FMT_TRANSPORT_A;
1914}
1915
1916static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1917 struct nvme_command *c, u32 data_len)
1918{
1919 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1920
1921 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1922 sg->length = cpu_to_le32(data_len);
1923 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1924}
1925
1926static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
1927 u32 data_len)
1928{
1929 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1930
1931 sg->addr = 0;
1932 sg->length = cpu_to_le32(data_len);
1933 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1934 NVME_SGL_FMT_TRANSPORT_A;
1935}
1936
1937static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
1938{
1939 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
1940 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1941 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
1942 struct nvme_command *cmd = &pdu->cmd;
1943 u8 hdgst = nvme_tcp_hdgst_len(queue);
1944
1945 memset(pdu, 0, sizeof(*pdu));
1946 pdu->hdr.type = nvme_tcp_cmd;
1947 if (queue->hdr_digest)
1948 pdu->hdr.flags |= NVME_TCP_F_HDGST;
1949 pdu->hdr.hlen = sizeof(*pdu);
1950 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
1951
1952 cmd->common.opcode = nvme_admin_async_event;
1953 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1954 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1955 nvme_tcp_set_sg_null(cmd);
1956
1957 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
1958 ctrl->async_req.offset = 0;
1959 ctrl->async_req.curr_bio = NULL;
1960 ctrl->async_req.data_len = 0;
1961
1962 nvme_tcp_queue_request(&ctrl->async_req);
1963}
1964
1965static enum blk_eh_timer_return
1966nvme_tcp_timeout(struct request *rq, bool reserved)
1967{
1968 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1969 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
1970 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1971
1972 dev_warn(ctrl->ctrl.device,
1973 "queue %d: timeout request %#x type %d\n",
1974 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
1975
1976 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1977 /*
1978 * Teardown immediately if controller times out while starting
1979 * or we are already started error recovery. all outstanding
1980 * requests are completed on shutdown, so we return BLK_EH_DONE.
1981 */
1982 flush_work(&ctrl->err_work);
1983 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
1984 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
1985 return BLK_EH_DONE;
1986 }
1987
1988 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1989 nvme_tcp_error_recovery(&ctrl->ctrl);
1990
1991 return BLK_EH_RESET_TIMER;
1992}
1993
1994static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
1995 struct request *rq)
1996{
1997 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1998 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1999 struct nvme_command *c = &pdu->cmd;
2000
2001 c->common.flags |= NVME_CMD_SGL_METABUF;
2002
2003 if (rq_data_dir(rq) == WRITE && req->data_len &&
2004 req->data_len <= nvme_tcp_inline_data_size(queue))
2005 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2006 else
2007 nvme_tcp_set_sg_host_data(c, req->data_len);
2008
2009 return 0;
2010}
2011
2012static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2013 struct request *rq)
2014{
2015 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2016 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2017 struct nvme_tcp_queue *queue = req->queue;
2018 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2019 blk_status_t ret;
2020
2021 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2022 if (ret)
2023 return ret;
2024
2025 req->state = NVME_TCP_SEND_CMD_PDU;
2026 req->offset = 0;
2027 req->data_sent = 0;
2028 req->pdu_len = 0;
2029 req->pdu_sent = 0;
2030 req->data_len = blk_rq_payload_bytes(rq);
2031 req->curr_bio = rq->bio;
2032
2033 if (rq_data_dir(rq) == WRITE &&
2034 req->data_len <= nvme_tcp_inline_data_size(queue))
2035 req->pdu_len = req->data_len;
2036 else if (req->curr_bio)
2037 nvme_tcp_init_iter(req, READ);
2038
2039 pdu->hdr.type = nvme_tcp_cmd;
2040 pdu->hdr.flags = 0;
2041 if (queue->hdr_digest)
2042 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2043 if (queue->data_digest && req->pdu_len) {
2044 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2045 ddgst = nvme_tcp_ddgst_len(queue);
2046 }
2047 pdu->hdr.hlen = sizeof(*pdu);
2048 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2049 pdu->hdr.plen =
2050 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2051
2052 ret = nvme_tcp_map_data(queue, rq);
2053 if (unlikely(ret)) {
2054 dev_err(queue->ctrl->ctrl.device,
2055 "Failed to map data (%d)\n", ret);
2056 return ret;
2057 }
2058
2059 return 0;
2060}
2061
2062static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2063 const struct blk_mq_queue_data *bd)
2064{
2065 struct nvme_ns *ns = hctx->queue->queuedata;
2066 struct nvme_tcp_queue *queue = hctx->driver_data;
2067 struct request *rq = bd->rq;
2068 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2069 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2070 blk_status_t ret;
2071
2072 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2073 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2074
2075 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2076 if (unlikely(ret))
2077 return ret;
2078
2079 blk_mq_start_request(rq);
2080
2081 nvme_tcp_queue_request(req);
2082
2083 return BLK_STS_OK;
2084}
2085
2086static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2087{
2088 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2089
2090 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2091 set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
2092 if (ctrl->ctrl.opts->nr_write_queues) {
2093 /* separate read/write queues */
2094 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2095 ctrl->ctrl.opts->nr_write_queues;
2096 set->map[HCTX_TYPE_READ].queue_offset =
2097 ctrl->ctrl.opts->nr_write_queues;
2098 } else {
2099 /* mixed read/write queues */
2100 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2101 ctrl->ctrl.opts->nr_io_queues;
2102 set->map[HCTX_TYPE_READ].queue_offset = 0;
2103 }
2104 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2105 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2106 return 0;
2107}
2108
2109static struct blk_mq_ops nvme_tcp_mq_ops = {
2110 .queue_rq = nvme_tcp_queue_rq,
2111 .complete = nvme_complete_rq,
2112 .init_request = nvme_tcp_init_request,
2113 .exit_request = nvme_tcp_exit_request,
2114 .init_hctx = nvme_tcp_init_hctx,
2115 .timeout = nvme_tcp_timeout,
2116 .map_queues = nvme_tcp_map_queues,
2117};
2118
2119static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2120 .queue_rq = nvme_tcp_queue_rq,
2121 .complete = nvme_complete_rq,
2122 .init_request = nvme_tcp_init_request,
2123 .exit_request = nvme_tcp_exit_request,
2124 .init_hctx = nvme_tcp_init_admin_hctx,
2125 .timeout = nvme_tcp_timeout,
2126};
2127
2128static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2129 .name = "tcp",
2130 .module = THIS_MODULE,
2131 .flags = NVME_F_FABRICS,
2132 .reg_read32 = nvmf_reg_read32,
2133 .reg_read64 = nvmf_reg_read64,
2134 .reg_write32 = nvmf_reg_write32,
2135 .free_ctrl = nvme_tcp_free_ctrl,
2136 .submit_async_event = nvme_tcp_submit_async_event,
2137 .delete_ctrl = nvme_tcp_delete_ctrl,
2138 .get_address = nvmf_get_address,
2139};
2140
2141static bool
2142nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2143{
2144 struct nvme_tcp_ctrl *ctrl;
2145 bool found = false;
2146
2147 mutex_lock(&nvme_tcp_ctrl_mutex);
2148 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2149 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2150 if (found)
2151 break;
2152 }
2153 mutex_unlock(&nvme_tcp_ctrl_mutex);
2154
2155 return found;
2156}
2157
2158static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2159 struct nvmf_ctrl_options *opts)
2160{
2161 struct nvme_tcp_ctrl *ctrl;
2162 int ret;
2163
2164 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2165 if (!ctrl)
2166 return ERR_PTR(-ENOMEM);
2167
2168 INIT_LIST_HEAD(&ctrl->list);
2169 ctrl->ctrl.opts = opts;
2170 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
2171 ctrl->ctrl.sqsize = opts->queue_size - 1;
2172 ctrl->ctrl.kato = opts->kato;
2173
2174 INIT_DELAYED_WORK(&ctrl->connect_work,
2175 nvme_tcp_reconnect_ctrl_work);
2176 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2177 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2178
2179 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2180 opts->trsvcid =
2181 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2182 if (!opts->trsvcid) {
2183 ret = -ENOMEM;
2184 goto out_free_ctrl;
2185 }
2186 opts->mask |= NVMF_OPT_TRSVCID;
2187 }
2188
2189 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2190 opts->traddr, opts->trsvcid, &ctrl->addr);
2191 if (ret) {
2192 pr_err("malformed address passed: %s:%s\n",
2193 opts->traddr, opts->trsvcid);
2194 goto out_free_ctrl;
2195 }
2196
2197 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2198 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2199 opts->host_traddr, NULL, &ctrl->src_addr);
2200 if (ret) {
2201 pr_err("malformed src address passed: %s\n",
2202 opts->host_traddr);
2203 goto out_free_ctrl;
2204 }
2205 }
2206
2207 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2208 ret = -EALREADY;
2209 goto out_free_ctrl;
2210 }
2211
2212 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2213 GFP_KERNEL);
2214 if (!ctrl->queues) {
2215 ret = -ENOMEM;
2216 goto out_free_ctrl;
2217 }
2218
2219 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2220 if (ret)
2221 goto out_kfree_queues;
2222
2223 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2224 WARN_ON_ONCE(1);
2225 ret = -EINTR;
2226 goto out_uninit_ctrl;
2227 }
2228
2229 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2230 if (ret)
2231 goto out_uninit_ctrl;
2232
2233 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2234 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2235
2236 nvme_get_ctrl(&ctrl->ctrl);
2237
2238 mutex_lock(&nvme_tcp_ctrl_mutex);
2239 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2240 mutex_unlock(&nvme_tcp_ctrl_mutex);
2241
2242 return &ctrl->ctrl;
2243
2244out_uninit_ctrl:
2245 nvme_uninit_ctrl(&ctrl->ctrl);
2246 nvme_put_ctrl(&ctrl->ctrl);
2247 if (ret > 0)
2248 ret = -EIO;
2249 return ERR_PTR(ret);
2250out_kfree_queues:
2251 kfree(ctrl->queues);
2252out_free_ctrl:
2253 kfree(ctrl);
2254 return ERR_PTR(ret);
2255}
2256
2257static struct nvmf_transport_ops nvme_tcp_transport = {
2258 .name = "tcp",
2259 .module = THIS_MODULE,
2260 .required_opts = NVMF_OPT_TRADDR,
2261 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2262 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2263 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2264 NVMF_OPT_NR_WRITE_QUEUES,
2265 .create_ctrl = nvme_tcp_create_ctrl,
2266};
2267
2268static int __init nvme_tcp_init_module(void)
2269{
2270 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2271 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2272 if (!nvme_tcp_wq)
2273 return -ENOMEM;
2274
2275 nvmf_register_transport(&nvme_tcp_transport);
2276 return 0;
2277}
2278
2279static void __exit nvme_tcp_cleanup_module(void)
2280{
2281 struct nvme_tcp_ctrl *ctrl;
2282
2283 nvmf_unregister_transport(&nvme_tcp_transport);
2284
2285 mutex_lock(&nvme_tcp_ctrl_mutex);
2286 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2287 nvme_delete_ctrl(&ctrl->ctrl);
2288 mutex_unlock(&nvme_tcp_ctrl_mutex);
2289 flush_workqueue(nvme_delete_wq);
2290
2291 destroy_workqueue(nvme_tcp_wq);
2292}
2293
2294module_init(nvme_tcp_init_module);
2295module_exit(nvme_tcp_cleanup_module);
2296
2297MODULE_LICENSE("GPL v2");