Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
16#include <net/busy_poll.h>
17
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
23/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
33#ifdef CONFIG_DEBUG_LOCK_ALLOC
34/* lockdep can detect a circular dependency of the form
35 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
36 * because dependencies are tracked for both nvme-tcp and user contexts. Using
37 * a separate class prevents lockdep from conflating nvme-tcp socket use with
38 * user-space socket API use.
39 */
40static struct lock_class_key nvme_tcp_sk_key[2];
41static struct lock_class_key nvme_tcp_slock_key[2];
42
43static void nvme_tcp_reclassify_socket(struct socket *sock)
44{
45 struct sock *sk = sock->sk;
46
47 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
48 return;
49
50 switch (sk->sk_family) {
51 case AF_INET:
52 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
53 &nvme_tcp_slock_key[0],
54 "sk_lock-AF_INET-NVME",
55 &nvme_tcp_sk_key[0]);
56 break;
57 case AF_INET6:
58 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
59 &nvme_tcp_slock_key[1],
60 "sk_lock-AF_INET6-NVME",
61 &nvme_tcp_sk_key[1]);
62 break;
63 default:
64 WARN_ON_ONCE(1);
65 }
66}
67#else
68static void nvme_tcp_reclassify_socket(struct socket *sock) { }
69#endif
70
71enum nvme_tcp_send_state {
72 NVME_TCP_SEND_CMD_PDU = 0,
73 NVME_TCP_SEND_H2C_PDU,
74 NVME_TCP_SEND_DATA,
75 NVME_TCP_SEND_DDGST,
76};
77
78struct nvme_tcp_request {
79 struct nvme_request req;
80 void *pdu;
81 struct nvme_tcp_queue *queue;
82 u32 data_len;
83 u32 pdu_len;
84 u32 pdu_sent;
85 u32 h2cdata_left;
86 u32 h2cdata_offset;
87 u16 ttag;
88 __le16 status;
89 struct list_head entry;
90 struct llist_node lentry;
91 __le32 ddgst;
92
93 struct bio *curr_bio;
94 struct iov_iter iter;
95
96 /* send state */
97 size_t offset;
98 size_t data_sent;
99 enum nvme_tcp_send_state state;
100};
101
102enum nvme_tcp_queue_flags {
103 NVME_TCP_Q_ALLOCATED = 0,
104 NVME_TCP_Q_LIVE = 1,
105 NVME_TCP_Q_POLLING = 2,
106};
107
108enum nvme_tcp_recv_state {
109 NVME_TCP_RECV_PDU = 0,
110 NVME_TCP_RECV_DATA,
111 NVME_TCP_RECV_DDGST,
112};
113
114struct nvme_tcp_ctrl;
115struct nvme_tcp_queue {
116 struct socket *sock;
117 struct work_struct io_work;
118 int io_cpu;
119
120 struct mutex queue_lock;
121 struct mutex send_mutex;
122 struct llist_head req_list;
123 struct list_head send_list;
124 bool more_requests;
125
126 /* recv state */
127 void *pdu;
128 int pdu_remaining;
129 int pdu_offset;
130 size_t data_remaining;
131 size_t ddgst_remaining;
132 unsigned int nr_cqe;
133
134 /* send state */
135 struct nvme_tcp_request *request;
136
137 int queue_size;
138 u32 maxh2cdata;
139 size_t cmnd_capsule_len;
140 struct nvme_tcp_ctrl *ctrl;
141 unsigned long flags;
142 bool rd_enabled;
143
144 bool hdr_digest;
145 bool data_digest;
146 struct ahash_request *rcv_hash;
147 struct ahash_request *snd_hash;
148 __le32 exp_ddgst;
149 __le32 recv_ddgst;
150
151 struct page_frag_cache pf_cache;
152
153 void (*state_change)(struct sock *);
154 void (*data_ready)(struct sock *);
155 void (*write_space)(struct sock *);
156};
157
158struct nvme_tcp_ctrl {
159 /* read only in the hot path */
160 struct nvme_tcp_queue *queues;
161 struct blk_mq_tag_set tag_set;
162
163 /* other member variables */
164 struct list_head list;
165 struct blk_mq_tag_set admin_tag_set;
166 struct sockaddr_storage addr;
167 struct sockaddr_storage src_addr;
168 struct nvme_ctrl ctrl;
169
170 struct work_struct err_work;
171 struct delayed_work connect_work;
172 struct nvme_tcp_request async_req;
173 u32 io_queues[HCTX_MAX_TYPES];
174};
175
176static LIST_HEAD(nvme_tcp_ctrl_list);
177static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
178static struct workqueue_struct *nvme_tcp_wq;
179static const struct blk_mq_ops nvme_tcp_mq_ops;
180static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
181static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
182
183static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
184{
185 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
186}
187
188static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
189{
190 return queue - queue->ctrl->queues;
191}
192
193static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
194{
195 u32 queue_idx = nvme_tcp_queue_id(queue);
196
197 if (queue_idx == 0)
198 return queue->ctrl->admin_tag_set.tags[queue_idx];
199 return queue->ctrl->tag_set.tags[queue_idx - 1];
200}
201
202static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
203{
204 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
205}
206
207static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
208{
209 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
210}
211
212static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
213{
214 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
215}
216
217static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
218{
219 return req == &req->queue->ctrl->async_req;
220}
221
222static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
223{
224 struct request *rq;
225
226 if (unlikely(nvme_tcp_async_req(req)))
227 return false; /* async events don't have a request */
228
229 rq = blk_mq_rq_from_pdu(req);
230
231 return rq_data_dir(rq) == WRITE && req->data_len &&
232 req->data_len <= nvme_tcp_inline_data_size(req->queue);
233}
234
235static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
236{
237 return req->iter.bvec->bv_page;
238}
239
240static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
241{
242 return req->iter.bvec->bv_offset + req->iter.iov_offset;
243}
244
245static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
246{
247 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
248 req->pdu_len - req->pdu_sent);
249}
250
251static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
252{
253 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
254 req->pdu_len - req->pdu_sent : 0;
255}
256
257static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
258 int len)
259{
260 return nvme_tcp_pdu_data_left(req) <= len;
261}
262
263static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
264 unsigned int dir)
265{
266 struct request *rq = blk_mq_rq_from_pdu(req);
267 struct bio_vec *vec;
268 unsigned int size;
269 int nr_bvec;
270 size_t offset;
271
272 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
273 vec = &rq->special_vec;
274 nr_bvec = 1;
275 size = blk_rq_payload_bytes(rq);
276 offset = 0;
277 } else {
278 struct bio *bio = req->curr_bio;
279 struct bvec_iter bi;
280 struct bio_vec bv;
281
282 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
283 nr_bvec = 0;
284 bio_for_each_bvec(bv, bio, bi) {
285 nr_bvec++;
286 }
287 size = bio->bi_iter.bi_size;
288 offset = bio->bi_iter.bi_bvec_done;
289 }
290
291 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
292 req->iter.iov_offset = offset;
293}
294
295static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
296 int len)
297{
298 req->data_sent += len;
299 req->pdu_sent += len;
300 iov_iter_advance(&req->iter, len);
301 if (!iov_iter_count(&req->iter) &&
302 req->data_sent < req->data_len) {
303 req->curr_bio = req->curr_bio->bi_next;
304 nvme_tcp_init_iter(req, WRITE);
305 }
306}
307
308static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
309{
310 int ret;
311
312 /* drain the send queue as much as we can... */
313 do {
314 ret = nvme_tcp_try_send(queue);
315 } while (ret > 0);
316}
317
318static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
319{
320 return !list_empty(&queue->send_list) ||
321 !llist_empty(&queue->req_list) || queue->more_requests;
322}
323
324static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
325 bool sync, bool last)
326{
327 struct nvme_tcp_queue *queue = req->queue;
328 bool empty;
329
330 empty = llist_add(&req->lentry, &queue->req_list) &&
331 list_empty(&queue->send_list) && !queue->request;
332
333 /*
334 * if we're the first on the send_list and we can try to send
335 * directly, otherwise queue io_work. Also, only do that if we
336 * are on the same cpu, so we don't introduce contention.
337 */
338 if (queue->io_cpu == raw_smp_processor_id() &&
339 sync && empty && mutex_trylock(&queue->send_mutex)) {
340 queue->more_requests = !last;
341 nvme_tcp_send_all(queue);
342 queue->more_requests = false;
343 mutex_unlock(&queue->send_mutex);
344 }
345
346 if (last && nvme_tcp_queue_more(queue))
347 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
348}
349
350static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
351{
352 struct nvme_tcp_request *req;
353 struct llist_node *node;
354
355 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
356 req = llist_entry(node, struct nvme_tcp_request, lentry);
357 list_add(&req->entry, &queue->send_list);
358 }
359}
360
361static inline struct nvme_tcp_request *
362nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
363{
364 struct nvme_tcp_request *req;
365
366 req = list_first_entry_or_null(&queue->send_list,
367 struct nvme_tcp_request, entry);
368 if (!req) {
369 nvme_tcp_process_req_list(queue);
370 req = list_first_entry_or_null(&queue->send_list,
371 struct nvme_tcp_request, entry);
372 if (unlikely(!req))
373 return NULL;
374 }
375
376 list_del(&req->entry);
377 return req;
378}
379
380static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
381 __le32 *dgst)
382{
383 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
384 crypto_ahash_final(hash);
385}
386
387static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
388 struct page *page, off_t off, size_t len)
389{
390 struct scatterlist sg;
391
392 sg_init_marker(&sg, 1);
393 sg_set_page(&sg, page, len, off);
394 ahash_request_set_crypt(hash, &sg, NULL, len);
395 crypto_ahash_update(hash);
396}
397
398static inline void nvme_tcp_hdgst(struct ahash_request *hash,
399 void *pdu, size_t len)
400{
401 struct scatterlist sg;
402
403 sg_init_one(&sg, pdu, len);
404 ahash_request_set_crypt(hash, &sg, pdu + len, len);
405 crypto_ahash_digest(hash);
406}
407
408static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
409 void *pdu, size_t pdu_len)
410{
411 struct nvme_tcp_hdr *hdr = pdu;
412 __le32 recv_digest;
413 __le32 exp_digest;
414
415 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
416 dev_err(queue->ctrl->ctrl.device,
417 "queue %d: header digest flag is cleared\n",
418 nvme_tcp_queue_id(queue));
419 return -EPROTO;
420 }
421
422 recv_digest = *(__le32 *)(pdu + hdr->hlen);
423 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
424 exp_digest = *(__le32 *)(pdu + hdr->hlen);
425 if (recv_digest != exp_digest) {
426 dev_err(queue->ctrl->ctrl.device,
427 "header digest error: recv %#x expected %#x\n",
428 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
429 return -EIO;
430 }
431
432 return 0;
433}
434
435static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
436{
437 struct nvme_tcp_hdr *hdr = pdu;
438 u8 digest_len = nvme_tcp_hdgst_len(queue);
439 u32 len;
440
441 len = le32_to_cpu(hdr->plen) - hdr->hlen -
442 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
443
444 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
445 dev_err(queue->ctrl->ctrl.device,
446 "queue %d: data digest flag is cleared\n",
447 nvme_tcp_queue_id(queue));
448 return -EPROTO;
449 }
450 crypto_ahash_init(queue->rcv_hash);
451
452 return 0;
453}
454
455static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
456 struct request *rq, unsigned int hctx_idx)
457{
458 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
459
460 page_frag_free(req->pdu);
461}
462
463static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
464 struct request *rq, unsigned int hctx_idx,
465 unsigned int numa_node)
466{
467 struct nvme_tcp_ctrl *ctrl = set->driver_data;
468 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
469 struct nvme_tcp_cmd_pdu *pdu;
470 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
471 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
472 u8 hdgst = nvme_tcp_hdgst_len(queue);
473
474 req->pdu = page_frag_alloc(&queue->pf_cache,
475 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
476 GFP_KERNEL | __GFP_ZERO);
477 if (!req->pdu)
478 return -ENOMEM;
479
480 pdu = req->pdu;
481 req->queue = queue;
482 nvme_req(rq)->ctrl = &ctrl->ctrl;
483 nvme_req(rq)->cmd = &pdu->cmd;
484
485 return 0;
486}
487
488static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
489 unsigned int hctx_idx)
490{
491 struct nvme_tcp_ctrl *ctrl = data;
492 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
493
494 hctx->driver_data = queue;
495 return 0;
496}
497
498static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
499 unsigned int hctx_idx)
500{
501 struct nvme_tcp_ctrl *ctrl = data;
502 struct nvme_tcp_queue *queue = &ctrl->queues[0];
503
504 hctx->driver_data = queue;
505 return 0;
506}
507
508static enum nvme_tcp_recv_state
509nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
510{
511 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
512 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
513 NVME_TCP_RECV_DATA;
514}
515
516static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
517{
518 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
519 nvme_tcp_hdgst_len(queue);
520 queue->pdu_offset = 0;
521 queue->data_remaining = -1;
522 queue->ddgst_remaining = 0;
523}
524
525static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
526{
527 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
528 return;
529
530 dev_warn(ctrl->device, "starting error recovery\n");
531 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
532}
533
534static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
535 struct nvme_completion *cqe)
536{
537 struct nvme_tcp_request *req;
538 struct request *rq;
539
540 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
541 if (!rq) {
542 dev_err(queue->ctrl->ctrl.device,
543 "got bad cqe.command_id %#x on queue %d\n",
544 cqe->command_id, nvme_tcp_queue_id(queue));
545 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
546 return -EINVAL;
547 }
548
549 req = blk_mq_rq_to_pdu(rq);
550 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
551 req->status = cqe->status;
552
553 if (!nvme_try_complete_req(rq, req->status, cqe->result))
554 nvme_complete_rq(rq);
555 queue->nr_cqe++;
556
557 return 0;
558}
559
560static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
561 struct nvme_tcp_data_pdu *pdu)
562{
563 struct request *rq;
564
565 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
566 if (!rq) {
567 dev_err(queue->ctrl->ctrl.device,
568 "got bad c2hdata.command_id %#x on queue %d\n",
569 pdu->command_id, nvme_tcp_queue_id(queue));
570 return -ENOENT;
571 }
572
573 if (!blk_rq_payload_bytes(rq)) {
574 dev_err(queue->ctrl->ctrl.device,
575 "queue %d tag %#x unexpected data\n",
576 nvme_tcp_queue_id(queue), rq->tag);
577 return -EIO;
578 }
579
580 queue->data_remaining = le32_to_cpu(pdu->data_length);
581
582 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
583 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
584 dev_err(queue->ctrl->ctrl.device,
585 "queue %d tag %#x SUCCESS set but not last PDU\n",
586 nvme_tcp_queue_id(queue), rq->tag);
587 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
588 return -EPROTO;
589 }
590
591 return 0;
592}
593
594static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
595 struct nvme_tcp_rsp_pdu *pdu)
596{
597 struct nvme_completion *cqe = &pdu->cqe;
598 int ret = 0;
599
600 /*
601 * AEN requests are special as they don't time out and can
602 * survive any kind of queue freeze and often don't respond to
603 * aborts. We don't even bother to allocate a struct request
604 * for them but rather special case them here.
605 */
606 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
607 cqe->command_id)))
608 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
609 &cqe->result);
610 else
611 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
612
613 return ret;
614}
615
616static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
617{
618 struct nvme_tcp_data_pdu *data = req->pdu;
619 struct nvme_tcp_queue *queue = req->queue;
620 struct request *rq = blk_mq_rq_from_pdu(req);
621 u32 h2cdata_sent = req->pdu_len;
622 u8 hdgst = nvme_tcp_hdgst_len(queue);
623 u8 ddgst = nvme_tcp_ddgst_len(queue);
624
625 req->state = NVME_TCP_SEND_H2C_PDU;
626 req->offset = 0;
627 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
628 req->pdu_sent = 0;
629 req->h2cdata_left -= req->pdu_len;
630 req->h2cdata_offset += h2cdata_sent;
631
632 memset(data, 0, sizeof(*data));
633 data->hdr.type = nvme_tcp_h2c_data;
634 if (!req->h2cdata_left)
635 data->hdr.flags = NVME_TCP_F_DATA_LAST;
636 if (queue->hdr_digest)
637 data->hdr.flags |= NVME_TCP_F_HDGST;
638 if (queue->data_digest)
639 data->hdr.flags |= NVME_TCP_F_DDGST;
640 data->hdr.hlen = sizeof(*data);
641 data->hdr.pdo = data->hdr.hlen + hdgst;
642 data->hdr.plen =
643 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
644 data->ttag = req->ttag;
645 data->command_id = nvme_cid(rq);
646 data->data_offset = cpu_to_le32(req->h2cdata_offset);
647 data->data_length = cpu_to_le32(req->pdu_len);
648}
649
650static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
651 struct nvme_tcp_r2t_pdu *pdu)
652{
653 struct nvme_tcp_request *req;
654 struct request *rq;
655 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
656 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
657
658 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
659 if (!rq) {
660 dev_err(queue->ctrl->ctrl.device,
661 "got bad r2t.command_id %#x on queue %d\n",
662 pdu->command_id, nvme_tcp_queue_id(queue));
663 return -ENOENT;
664 }
665 req = blk_mq_rq_to_pdu(rq);
666
667 if (unlikely(!r2t_length)) {
668 dev_err(queue->ctrl->ctrl.device,
669 "req %d r2t len is %u, probably a bug...\n",
670 rq->tag, r2t_length);
671 return -EPROTO;
672 }
673
674 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
675 dev_err(queue->ctrl->ctrl.device,
676 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
677 rq->tag, r2t_length, req->data_len, req->data_sent);
678 return -EPROTO;
679 }
680
681 if (unlikely(r2t_offset < req->data_sent)) {
682 dev_err(queue->ctrl->ctrl.device,
683 "req %d unexpected r2t offset %u (expected %zu)\n",
684 rq->tag, r2t_offset, req->data_sent);
685 return -EPROTO;
686 }
687
688 req->pdu_len = 0;
689 req->h2cdata_left = r2t_length;
690 req->h2cdata_offset = r2t_offset;
691 req->ttag = pdu->ttag;
692
693 nvme_tcp_setup_h2c_data_pdu(req);
694 nvme_tcp_queue_request(req, false, true);
695
696 return 0;
697}
698
699static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
700 unsigned int *offset, size_t *len)
701{
702 struct nvme_tcp_hdr *hdr;
703 char *pdu = queue->pdu;
704 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
705 int ret;
706
707 ret = skb_copy_bits(skb, *offset,
708 &pdu[queue->pdu_offset], rcv_len);
709 if (unlikely(ret))
710 return ret;
711
712 queue->pdu_remaining -= rcv_len;
713 queue->pdu_offset += rcv_len;
714 *offset += rcv_len;
715 *len -= rcv_len;
716 if (queue->pdu_remaining)
717 return 0;
718
719 hdr = queue->pdu;
720 if (queue->hdr_digest) {
721 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
722 if (unlikely(ret))
723 return ret;
724 }
725
726
727 if (queue->data_digest) {
728 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
729 if (unlikely(ret))
730 return ret;
731 }
732
733 switch (hdr->type) {
734 case nvme_tcp_c2h_data:
735 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
736 case nvme_tcp_rsp:
737 nvme_tcp_init_recv_ctx(queue);
738 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
739 case nvme_tcp_r2t:
740 nvme_tcp_init_recv_ctx(queue);
741 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
742 default:
743 dev_err(queue->ctrl->ctrl.device,
744 "unsupported pdu type (%d)\n", hdr->type);
745 return -EINVAL;
746 }
747}
748
749static inline void nvme_tcp_end_request(struct request *rq, u16 status)
750{
751 union nvme_result res = {};
752
753 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
754 nvme_complete_rq(rq);
755}
756
757static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
758 unsigned int *offset, size_t *len)
759{
760 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
761 struct request *rq =
762 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
763 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
764
765 while (true) {
766 int recv_len, ret;
767
768 recv_len = min_t(size_t, *len, queue->data_remaining);
769 if (!recv_len)
770 break;
771
772 if (!iov_iter_count(&req->iter)) {
773 req->curr_bio = req->curr_bio->bi_next;
774
775 /*
776 * If we don`t have any bios it means that controller
777 * sent more data than we requested, hence error
778 */
779 if (!req->curr_bio) {
780 dev_err(queue->ctrl->ctrl.device,
781 "queue %d no space in request %#x",
782 nvme_tcp_queue_id(queue), rq->tag);
783 nvme_tcp_init_recv_ctx(queue);
784 return -EIO;
785 }
786 nvme_tcp_init_iter(req, READ);
787 }
788
789 /* we can read only from what is left in this bio */
790 recv_len = min_t(size_t, recv_len,
791 iov_iter_count(&req->iter));
792
793 if (queue->data_digest)
794 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
795 &req->iter, recv_len, queue->rcv_hash);
796 else
797 ret = skb_copy_datagram_iter(skb, *offset,
798 &req->iter, recv_len);
799 if (ret) {
800 dev_err(queue->ctrl->ctrl.device,
801 "queue %d failed to copy request %#x data",
802 nvme_tcp_queue_id(queue), rq->tag);
803 return ret;
804 }
805
806 *len -= recv_len;
807 *offset += recv_len;
808 queue->data_remaining -= recv_len;
809 }
810
811 if (!queue->data_remaining) {
812 if (queue->data_digest) {
813 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
814 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
815 } else {
816 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
817 nvme_tcp_end_request(rq,
818 le16_to_cpu(req->status));
819 queue->nr_cqe++;
820 }
821 nvme_tcp_init_recv_ctx(queue);
822 }
823 }
824
825 return 0;
826}
827
828static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
829 struct sk_buff *skb, unsigned int *offset, size_t *len)
830{
831 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
832 char *ddgst = (char *)&queue->recv_ddgst;
833 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
834 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
835 int ret;
836
837 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
838 if (unlikely(ret))
839 return ret;
840
841 queue->ddgst_remaining -= recv_len;
842 *offset += recv_len;
843 *len -= recv_len;
844 if (queue->ddgst_remaining)
845 return 0;
846
847 if (queue->recv_ddgst != queue->exp_ddgst) {
848 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
849 pdu->command_id);
850 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
851
852 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
853
854 dev_err(queue->ctrl->ctrl.device,
855 "data digest error: recv %#x expected %#x\n",
856 le32_to_cpu(queue->recv_ddgst),
857 le32_to_cpu(queue->exp_ddgst));
858 }
859
860 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
861 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
862 pdu->command_id);
863 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
864
865 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
866 queue->nr_cqe++;
867 }
868
869 nvme_tcp_init_recv_ctx(queue);
870 return 0;
871}
872
873static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
874 unsigned int offset, size_t len)
875{
876 struct nvme_tcp_queue *queue = desc->arg.data;
877 size_t consumed = len;
878 int result;
879
880 while (len) {
881 switch (nvme_tcp_recv_state(queue)) {
882 case NVME_TCP_RECV_PDU:
883 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
884 break;
885 case NVME_TCP_RECV_DATA:
886 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
887 break;
888 case NVME_TCP_RECV_DDGST:
889 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
890 break;
891 default:
892 result = -EFAULT;
893 }
894 if (result) {
895 dev_err(queue->ctrl->ctrl.device,
896 "receive failed: %d\n", result);
897 queue->rd_enabled = false;
898 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
899 return result;
900 }
901 }
902
903 return consumed;
904}
905
906static void nvme_tcp_data_ready(struct sock *sk)
907{
908 struct nvme_tcp_queue *queue;
909
910 read_lock_bh(&sk->sk_callback_lock);
911 queue = sk->sk_user_data;
912 if (likely(queue && queue->rd_enabled) &&
913 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
914 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
915 read_unlock_bh(&sk->sk_callback_lock);
916}
917
918static void nvme_tcp_write_space(struct sock *sk)
919{
920 struct nvme_tcp_queue *queue;
921
922 read_lock_bh(&sk->sk_callback_lock);
923 queue = sk->sk_user_data;
924 if (likely(queue && sk_stream_is_writeable(sk))) {
925 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
926 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
927 }
928 read_unlock_bh(&sk->sk_callback_lock);
929}
930
931static void nvme_tcp_state_change(struct sock *sk)
932{
933 struct nvme_tcp_queue *queue;
934
935 read_lock_bh(&sk->sk_callback_lock);
936 queue = sk->sk_user_data;
937 if (!queue)
938 goto done;
939
940 switch (sk->sk_state) {
941 case TCP_CLOSE:
942 case TCP_CLOSE_WAIT:
943 case TCP_LAST_ACK:
944 case TCP_FIN_WAIT1:
945 case TCP_FIN_WAIT2:
946 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
947 break;
948 default:
949 dev_info(queue->ctrl->ctrl.device,
950 "queue %d socket state %d\n",
951 nvme_tcp_queue_id(queue), sk->sk_state);
952 }
953
954 queue->state_change(sk);
955done:
956 read_unlock_bh(&sk->sk_callback_lock);
957}
958
959static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
960{
961 queue->request = NULL;
962}
963
964static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
965{
966 if (nvme_tcp_async_req(req)) {
967 union nvme_result res = {};
968
969 nvme_complete_async_event(&req->queue->ctrl->ctrl,
970 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
971 } else {
972 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
973 NVME_SC_HOST_PATH_ERROR);
974 }
975}
976
977static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
978{
979 struct nvme_tcp_queue *queue = req->queue;
980 int req_data_len = req->data_len;
981 u32 h2cdata_left = req->h2cdata_left;
982
983 while (true) {
984 struct page *page = nvme_tcp_req_cur_page(req);
985 size_t offset = nvme_tcp_req_cur_offset(req);
986 size_t len = nvme_tcp_req_cur_length(req);
987 bool last = nvme_tcp_pdu_last_send(req, len);
988 int req_data_sent = req->data_sent;
989 int ret, flags = MSG_DONTWAIT;
990
991 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
992 flags |= MSG_EOR;
993 else
994 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
995
996 if (sendpage_ok(page)) {
997 ret = kernel_sendpage(queue->sock, page, offset, len,
998 flags);
999 } else {
1000 ret = sock_no_sendpage(queue->sock, page, offset, len,
1001 flags);
1002 }
1003 if (ret <= 0)
1004 return ret;
1005
1006 if (queue->data_digest)
1007 nvme_tcp_ddgst_update(queue->snd_hash, page,
1008 offset, ret);
1009
1010 /*
1011 * update the request iterator except for the last payload send
1012 * in the request where we don't want to modify it as we may
1013 * compete with the RX path completing the request.
1014 */
1015 if (req_data_sent + ret < req_data_len)
1016 nvme_tcp_advance_req(req, ret);
1017
1018 /* fully successful last send in current PDU */
1019 if (last && ret == len) {
1020 if (queue->data_digest) {
1021 nvme_tcp_ddgst_final(queue->snd_hash,
1022 &req->ddgst);
1023 req->state = NVME_TCP_SEND_DDGST;
1024 req->offset = 0;
1025 } else {
1026 if (h2cdata_left)
1027 nvme_tcp_setup_h2c_data_pdu(req);
1028 else
1029 nvme_tcp_done_send_req(queue);
1030 }
1031 return 1;
1032 }
1033 }
1034 return -EAGAIN;
1035}
1036
1037static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1038{
1039 struct nvme_tcp_queue *queue = req->queue;
1040 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1041 bool inline_data = nvme_tcp_has_inline_data(req);
1042 u8 hdgst = nvme_tcp_hdgst_len(queue);
1043 int len = sizeof(*pdu) + hdgst - req->offset;
1044 int flags = MSG_DONTWAIT;
1045 int ret;
1046
1047 if (inline_data || nvme_tcp_queue_more(queue))
1048 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1049 else
1050 flags |= MSG_EOR;
1051
1052 if (queue->hdr_digest && !req->offset)
1053 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1054
1055 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1056 offset_in_page(pdu) + req->offset, len, flags);
1057 if (unlikely(ret <= 0))
1058 return ret;
1059
1060 len -= ret;
1061 if (!len) {
1062 if (inline_data) {
1063 req->state = NVME_TCP_SEND_DATA;
1064 if (queue->data_digest)
1065 crypto_ahash_init(queue->snd_hash);
1066 } else {
1067 nvme_tcp_done_send_req(queue);
1068 }
1069 return 1;
1070 }
1071 req->offset += ret;
1072
1073 return -EAGAIN;
1074}
1075
1076static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1077{
1078 struct nvme_tcp_queue *queue = req->queue;
1079 struct nvme_tcp_data_pdu *pdu = req->pdu;
1080 u8 hdgst = nvme_tcp_hdgst_len(queue);
1081 int len = sizeof(*pdu) - req->offset + hdgst;
1082 int ret;
1083
1084 if (queue->hdr_digest && !req->offset)
1085 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1086
1087 if (!req->h2cdata_left)
1088 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1089 offset_in_page(pdu) + req->offset, len,
1090 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1091 else
1092 ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
1093 offset_in_page(pdu) + req->offset, len,
1094 MSG_DONTWAIT | MSG_MORE);
1095 if (unlikely(ret <= 0))
1096 return ret;
1097
1098 len -= ret;
1099 if (!len) {
1100 req->state = NVME_TCP_SEND_DATA;
1101 if (queue->data_digest)
1102 crypto_ahash_init(queue->snd_hash);
1103 return 1;
1104 }
1105 req->offset += ret;
1106
1107 return -EAGAIN;
1108}
1109
1110static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1111{
1112 struct nvme_tcp_queue *queue = req->queue;
1113 size_t offset = req->offset;
1114 u32 h2cdata_left = req->h2cdata_left;
1115 int ret;
1116 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1117 struct kvec iov = {
1118 .iov_base = (u8 *)&req->ddgst + req->offset,
1119 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1120 };
1121
1122 if (nvme_tcp_queue_more(queue))
1123 msg.msg_flags |= MSG_MORE;
1124 else
1125 msg.msg_flags |= MSG_EOR;
1126
1127 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1128 if (unlikely(ret <= 0))
1129 return ret;
1130
1131 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1132 if (h2cdata_left)
1133 nvme_tcp_setup_h2c_data_pdu(req);
1134 else
1135 nvme_tcp_done_send_req(queue);
1136 return 1;
1137 }
1138
1139 req->offset += ret;
1140 return -EAGAIN;
1141}
1142
1143static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1144{
1145 struct nvme_tcp_request *req;
1146 int ret = 1;
1147
1148 if (!queue->request) {
1149 queue->request = nvme_tcp_fetch_request(queue);
1150 if (!queue->request)
1151 return 0;
1152 }
1153 req = queue->request;
1154
1155 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1156 ret = nvme_tcp_try_send_cmd_pdu(req);
1157 if (ret <= 0)
1158 goto done;
1159 if (!nvme_tcp_has_inline_data(req))
1160 return ret;
1161 }
1162
1163 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1164 ret = nvme_tcp_try_send_data_pdu(req);
1165 if (ret <= 0)
1166 goto done;
1167 }
1168
1169 if (req->state == NVME_TCP_SEND_DATA) {
1170 ret = nvme_tcp_try_send_data(req);
1171 if (ret <= 0)
1172 goto done;
1173 }
1174
1175 if (req->state == NVME_TCP_SEND_DDGST)
1176 ret = nvme_tcp_try_send_ddgst(req);
1177done:
1178 if (ret == -EAGAIN) {
1179 ret = 0;
1180 } else if (ret < 0) {
1181 dev_err(queue->ctrl->ctrl.device,
1182 "failed to send request %d\n", ret);
1183 if (ret != -EPIPE && ret != -ECONNRESET)
1184 nvme_tcp_fail_request(queue->request);
1185 nvme_tcp_done_send_req(queue);
1186 }
1187 return ret;
1188}
1189
1190static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1191{
1192 struct socket *sock = queue->sock;
1193 struct sock *sk = sock->sk;
1194 read_descriptor_t rd_desc;
1195 int consumed;
1196
1197 rd_desc.arg.data = queue;
1198 rd_desc.count = 1;
1199 lock_sock(sk);
1200 queue->nr_cqe = 0;
1201 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1202 release_sock(sk);
1203 return consumed;
1204}
1205
1206static void nvme_tcp_io_work(struct work_struct *w)
1207{
1208 struct nvme_tcp_queue *queue =
1209 container_of(w, struct nvme_tcp_queue, io_work);
1210 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1211
1212 do {
1213 bool pending = false;
1214 int result;
1215
1216 if (mutex_trylock(&queue->send_mutex)) {
1217 result = nvme_tcp_try_send(queue);
1218 mutex_unlock(&queue->send_mutex);
1219 if (result > 0)
1220 pending = true;
1221 else if (unlikely(result < 0))
1222 break;
1223 }
1224
1225 result = nvme_tcp_try_recv(queue);
1226 if (result > 0)
1227 pending = true;
1228 else if (unlikely(result < 0))
1229 return;
1230
1231 if (!pending)
1232 return;
1233
1234 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1235
1236 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1237}
1238
1239static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1240{
1241 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1242
1243 ahash_request_free(queue->rcv_hash);
1244 ahash_request_free(queue->snd_hash);
1245 crypto_free_ahash(tfm);
1246}
1247
1248static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1249{
1250 struct crypto_ahash *tfm;
1251
1252 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1253 if (IS_ERR(tfm))
1254 return PTR_ERR(tfm);
1255
1256 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1257 if (!queue->snd_hash)
1258 goto free_tfm;
1259 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1260
1261 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1262 if (!queue->rcv_hash)
1263 goto free_snd_hash;
1264 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1265
1266 return 0;
1267free_snd_hash:
1268 ahash_request_free(queue->snd_hash);
1269free_tfm:
1270 crypto_free_ahash(tfm);
1271 return -ENOMEM;
1272}
1273
1274static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1275{
1276 struct nvme_tcp_request *async = &ctrl->async_req;
1277
1278 page_frag_free(async->pdu);
1279}
1280
1281static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1282{
1283 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1284 struct nvme_tcp_request *async = &ctrl->async_req;
1285 u8 hdgst = nvme_tcp_hdgst_len(queue);
1286
1287 async->pdu = page_frag_alloc(&queue->pf_cache,
1288 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1289 GFP_KERNEL | __GFP_ZERO);
1290 if (!async->pdu)
1291 return -ENOMEM;
1292
1293 async->queue = &ctrl->queues[0];
1294 return 0;
1295}
1296
1297static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1298{
1299 struct page *page;
1300 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1301 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1302
1303 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1304 return;
1305
1306 if (queue->hdr_digest || queue->data_digest)
1307 nvme_tcp_free_crypto(queue);
1308
1309 if (queue->pf_cache.va) {
1310 page = virt_to_head_page(queue->pf_cache.va);
1311 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1312 queue->pf_cache.va = NULL;
1313 }
1314 sock_release(queue->sock);
1315 kfree(queue->pdu);
1316 mutex_destroy(&queue->send_mutex);
1317 mutex_destroy(&queue->queue_lock);
1318}
1319
1320static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1321{
1322 struct nvme_tcp_icreq_pdu *icreq;
1323 struct nvme_tcp_icresp_pdu *icresp;
1324 struct msghdr msg = {};
1325 struct kvec iov;
1326 bool ctrl_hdgst, ctrl_ddgst;
1327 u32 maxh2cdata;
1328 int ret;
1329
1330 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1331 if (!icreq)
1332 return -ENOMEM;
1333
1334 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1335 if (!icresp) {
1336 ret = -ENOMEM;
1337 goto free_icreq;
1338 }
1339
1340 icreq->hdr.type = nvme_tcp_icreq;
1341 icreq->hdr.hlen = sizeof(*icreq);
1342 icreq->hdr.pdo = 0;
1343 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1344 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1345 icreq->maxr2t = 0; /* single inflight r2t supported */
1346 icreq->hpda = 0; /* no alignment constraint */
1347 if (queue->hdr_digest)
1348 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1349 if (queue->data_digest)
1350 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1351
1352 iov.iov_base = icreq;
1353 iov.iov_len = sizeof(*icreq);
1354 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1355 if (ret < 0)
1356 goto free_icresp;
1357
1358 memset(&msg, 0, sizeof(msg));
1359 iov.iov_base = icresp;
1360 iov.iov_len = sizeof(*icresp);
1361 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1362 iov.iov_len, msg.msg_flags);
1363 if (ret < 0)
1364 goto free_icresp;
1365
1366 ret = -EINVAL;
1367 if (icresp->hdr.type != nvme_tcp_icresp) {
1368 pr_err("queue %d: bad type returned %d\n",
1369 nvme_tcp_queue_id(queue), icresp->hdr.type);
1370 goto free_icresp;
1371 }
1372
1373 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1374 pr_err("queue %d: bad pdu length returned %d\n",
1375 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1376 goto free_icresp;
1377 }
1378
1379 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1380 pr_err("queue %d: bad pfv returned %d\n",
1381 nvme_tcp_queue_id(queue), icresp->pfv);
1382 goto free_icresp;
1383 }
1384
1385 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1386 if ((queue->data_digest && !ctrl_ddgst) ||
1387 (!queue->data_digest && ctrl_ddgst)) {
1388 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1389 nvme_tcp_queue_id(queue),
1390 queue->data_digest ? "enabled" : "disabled",
1391 ctrl_ddgst ? "enabled" : "disabled");
1392 goto free_icresp;
1393 }
1394
1395 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1396 if ((queue->hdr_digest && !ctrl_hdgst) ||
1397 (!queue->hdr_digest && ctrl_hdgst)) {
1398 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1399 nvme_tcp_queue_id(queue),
1400 queue->hdr_digest ? "enabled" : "disabled",
1401 ctrl_hdgst ? "enabled" : "disabled");
1402 goto free_icresp;
1403 }
1404
1405 if (icresp->cpda != 0) {
1406 pr_err("queue %d: unsupported cpda returned %d\n",
1407 nvme_tcp_queue_id(queue), icresp->cpda);
1408 goto free_icresp;
1409 }
1410
1411 maxh2cdata = le32_to_cpu(icresp->maxdata);
1412 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1413 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1414 nvme_tcp_queue_id(queue), maxh2cdata);
1415 goto free_icresp;
1416 }
1417 queue->maxh2cdata = maxh2cdata;
1418
1419 ret = 0;
1420free_icresp:
1421 kfree(icresp);
1422free_icreq:
1423 kfree(icreq);
1424 return ret;
1425}
1426
1427static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1428{
1429 return nvme_tcp_queue_id(queue) == 0;
1430}
1431
1432static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1433{
1434 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1435 int qid = nvme_tcp_queue_id(queue);
1436
1437 return !nvme_tcp_admin_queue(queue) &&
1438 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1439}
1440
1441static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1442{
1443 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1444 int qid = nvme_tcp_queue_id(queue);
1445
1446 return !nvme_tcp_admin_queue(queue) &&
1447 !nvme_tcp_default_queue(queue) &&
1448 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1449 ctrl->io_queues[HCTX_TYPE_READ];
1450}
1451
1452static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1453{
1454 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1455 int qid = nvme_tcp_queue_id(queue);
1456
1457 return !nvme_tcp_admin_queue(queue) &&
1458 !nvme_tcp_default_queue(queue) &&
1459 !nvme_tcp_read_queue(queue) &&
1460 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1461 ctrl->io_queues[HCTX_TYPE_READ] +
1462 ctrl->io_queues[HCTX_TYPE_POLL];
1463}
1464
1465static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1466{
1467 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1468 int qid = nvme_tcp_queue_id(queue);
1469 int n = 0;
1470
1471 if (nvme_tcp_default_queue(queue))
1472 n = qid - 1;
1473 else if (nvme_tcp_read_queue(queue))
1474 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1475 else if (nvme_tcp_poll_queue(queue))
1476 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1477 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1478 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1479}
1480
1481static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1482 int qid, size_t queue_size)
1483{
1484 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1485 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1486 int ret, rcv_pdu_size;
1487
1488 mutex_init(&queue->queue_lock);
1489 queue->ctrl = ctrl;
1490 init_llist_head(&queue->req_list);
1491 INIT_LIST_HEAD(&queue->send_list);
1492 mutex_init(&queue->send_mutex);
1493 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1494 queue->queue_size = queue_size;
1495
1496 if (qid > 0)
1497 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1498 else
1499 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1500 NVME_TCP_ADMIN_CCSZ;
1501
1502 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1503 IPPROTO_TCP, &queue->sock);
1504 if (ret) {
1505 dev_err(nctrl->device,
1506 "failed to create socket: %d\n", ret);
1507 goto err_destroy_mutex;
1508 }
1509
1510 nvme_tcp_reclassify_socket(queue->sock);
1511
1512 /* Single syn retry */
1513 tcp_sock_set_syncnt(queue->sock->sk, 1);
1514
1515 /* Set TCP no delay */
1516 tcp_sock_set_nodelay(queue->sock->sk);
1517
1518 /*
1519 * Cleanup whatever is sitting in the TCP transmit queue on socket
1520 * close. This is done to prevent stale data from being sent should
1521 * the network connection be restored before TCP times out.
1522 */
1523 sock_no_linger(queue->sock->sk);
1524
1525 if (so_priority > 0)
1526 sock_set_priority(queue->sock->sk, so_priority);
1527
1528 /* Set socket type of service */
1529 if (nctrl->opts->tos >= 0)
1530 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1531
1532 /* Set 10 seconds timeout for icresp recvmsg */
1533 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1534
1535 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1536 nvme_tcp_set_queue_io_cpu(queue);
1537 queue->request = NULL;
1538 queue->data_remaining = 0;
1539 queue->ddgst_remaining = 0;
1540 queue->pdu_remaining = 0;
1541 queue->pdu_offset = 0;
1542 sk_set_memalloc(queue->sock->sk);
1543
1544 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1545 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1546 sizeof(ctrl->src_addr));
1547 if (ret) {
1548 dev_err(nctrl->device,
1549 "failed to bind queue %d socket %d\n",
1550 qid, ret);
1551 goto err_sock;
1552 }
1553 }
1554
1555 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1556 char *iface = nctrl->opts->host_iface;
1557 sockptr_t optval = KERNEL_SOCKPTR(iface);
1558
1559 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1560 optval, strlen(iface));
1561 if (ret) {
1562 dev_err(nctrl->device,
1563 "failed to bind to interface %s queue %d err %d\n",
1564 iface, qid, ret);
1565 goto err_sock;
1566 }
1567 }
1568
1569 queue->hdr_digest = nctrl->opts->hdr_digest;
1570 queue->data_digest = nctrl->opts->data_digest;
1571 if (queue->hdr_digest || queue->data_digest) {
1572 ret = nvme_tcp_alloc_crypto(queue);
1573 if (ret) {
1574 dev_err(nctrl->device,
1575 "failed to allocate queue %d crypto\n", qid);
1576 goto err_sock;
1577 }
1578 }
1579
1580 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1581 nvme_tcp_hdgst_len(queue);
1582 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1583 if (!queue->pdu) {
1584 ret = -ENOMEM;
1585 goto err_crypto;
1586 }
1587
1588 dev_dbg(nctrl->device, "connecting queue %d\n",
1589 nvme_tcp_queue_id(queue));
1590
1591 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1592 sizeof(ctrl->addr), 0);
1593 if (ret) {
1594 dev_err(nctrl->device,
1595 "failed to connect socket: %d\n", ret);
1596 goto err_rcv_pdu;
1597 }
1598
1599 ret = nvme_tcp_init_connection(queue);
1600 if (ret)
1601 goto err_init_connect;
1602
1603 queue->rd_enabled = true;
1604 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1605 nvme_tcp_init_recv_ctx(queue);
1606
1607 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1608 queue->sock->sk->sk_user_data = queue;
1609 queue->state_change = queue->sock->sk->sk_state_change;
1610 queue->data_ready = queue->sock->sk->sk_data_ready;
1611 queue->write_space = queue->sock->sk->sk_write_space;
1612 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1613 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1614 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1615#ifdef CONFIG_NET_RX_BUSY_POLL
1616 queue->sock->sk->sk_ll_usec = 1;
1617#endif
1618 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1619
1620 return 0;
1621
1622err_init_connect:
1623 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1624err_rcv_pdu:
1625 kfree(queue->pdu);
1626err_crypto:
1627 if (queue->hdr_digest || queue->data_digest)
1628 nvme_tcp_free_crypto(queue);
1629err_sock:
1630 sock_release(queue->sock);
1631 queue->sock = NULL;
1632err_destroy_mutex:
1633 mutex_destroy(&queue->send_mutex);
1634 mutex_destroy(&queue->queue_lock);
1635 return ret;
1636}
1637
1638static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1639{
1640 struct socket *sock = queue->sock;
1641
1642 write_lock_bh(&sock->sk->sk_callback_lock);
1643 sock->sk->sk_user_data = NULL;
1644 sock->sk->sk_data_ready = queue->data_ready;
1645 sock->sk->sk_state_change = queue->state_change;
1646 sock->sk->sk_write_space = queue->write_space;
1647 write_unlock_bh(&sock->sk->sk_callback_lock);
1648}
1649
1650static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1651{
1652 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1653 nvme_tcp_restore_sock_calls(queue);
1654 cancel_work_sync(&queue->io_work);
1655}
1656
1657static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1658{
1659 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1660 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1661
1662 mutex_lock(&queue->queue_lock);
1663 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1664 __nvme_tcp_stop_queue(queue);
1665 mutex_unlock(&queue->queue_lock);
1666}
1667
1668static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1669{
1670 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1671 int ret;
1672
1673 if (idx)
1674 ret = nvmf_connect_io_queue(nctrl, idx);
1675 else
1676 ret = nvmf_connect_admin_queue(nctrl);
1677
1678 if (!ret) {
1679 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1680 } else {
1681 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1682 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1683 dev_err(nctrl->device,
1684 "failed to connect queue: %d ret=%d\n", idx, ret);
1685 }
1686 return ret;
1687}
1688
1689static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1690 bool admin)
1691{
1692 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1693 struct blk_mq_tag_set *set;
1694 int ret;
1695
1696 if (admin) {
1697 set = &ctrl->admin_tag_set;
1698 memset(set, 0, sizeof(*set));
1699 set->ops = &nvme_tcp_admin_mq_ops;
1700 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1701 set->reserved_tags = NVMF_RESERVED_TAGS;
1702 set->numa_node = nctrl->numa_node;
1703 set->flags = BLK_MQ_F_BLOCKING;
1704 set->cmd_size = sizeof(struct nvme_tcp_request);
1705 set->driver_data = ctrl;
1706 set->nr_hw_queues = 1;
1707 set->timeout = NVME_ADMIN_TIMEOUT;
1708 } else {
1709 set = &ctrl->tag_set;
1710 memset(set, 0, sizeof(*set));
1711 set->ops = &nvme_tcp_mq_ops;
1712 set->queue_depth = nctrl->sqsize + 1;
1713 set->reserved_tags = NVMF_RESERVED_TAGS;
1714 set->numa_node = nctrl->numa_node;
1715 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1716 set->cmd_size = sizeof(struct nvme_tcp_request);
1717 set->driver_data = ctrl;
1718 set->nr_hw_queues = nctrl->queue_count - 1;
1719 set->timeout = NVME_IO_TIMEOUT;
1720 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1721 }
1722
1723 ret = blk_mq_alloc_tag_set(set);
1724 if (ret)
1725 return ERR_PTR(ret);
1726
1727 return set;
1728}
1729
1730static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1731{
1732 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1733 cancel_work_sync(&ctrl->async_event_work);
1734 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1735 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1736 }
1737
1738 nvme_tcp_free_queue(ctrl, 0);
1739}
1740
1741static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1742{
1743 int i;
1744
1745 for (i = 1; i < ctrl->queue_count; i++)
1746 nvme_tcp_free_queue(ctrl, i);
1747}
1748
1749static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1750{
1751 int i;
1752
1753 for (i = 1; i < ctrl->queue_count; i++)
1754 nvme_tcp_stop_queue(ctrl, i);
1755}
1756
1757static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1758{
1759 int i, ret;
1760
1761 for (i = 1; i < ctrl->queue_count; i++) {
1762 ret = nvme_tcp_start_queue(ctrl, i);
1763 if (ret)
1764 goto out_stop_queues;
1765 }
1766
1767 return 0;
1768
1769out_stop_queues:
1770 for (i--; i >= 1; i--)
1771 nvme_tcp_stop_queue(ctrl, i);
1772 return ret;
1773}
1774
1775static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1776{
1777 int ret;
1778
1779 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1780 if (ret)
1781 return ret;
1782
1783 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1784 if (ret)
1785 goto out_free_queue;
1786
1787 return 0;
1788
1789out_free_queue:
1790 nvme_tcp_free_queue(ctrl, 0);
1791 return ret;
1792}
1793
1794static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1795{
1796 int i, ret;
1797
1798 for (i = 1; i < ctrl->queue_count; i++) {
1799 ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
1800 if (ret)
1801 goto out_free_queues;
1802 }
1803
1804 return 0;
1805
1806out_free_queues:
1807 for (i--; i >= 1; i--)
1808 nvme_tcp_free_queue(ctrl, i);
1809
1810 return ret;
1811}
1812
1813static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1814{
1815 unsigned int nr_io_queues;
1816
1817 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1818 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1819 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1820
1821 return nr_io_queues;
1822}
1823
1824static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1825 unsigned int nr_io_queues)
1826{
1827 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1828 struct nvmf_ctrl_options *opts = nctrl->opts;
1829
1830 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1831 /*
1832 * separate read/write queues
1833 * hand out dedicated default queues only after we have
1834 * sufficient read queues.
1835 */
1836 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1837 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1838 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1839 min(opts->nr_write_queues, nr_io_queues);
1840 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1841 } else {
1842 /*
1843 * shared read/write queues
1844 * either no write queues were requested, or we don't have
1845 * sufficient queue count to have dedicated default queues.
1846 */
1847 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1848 min(opts->nr_io_queues, nr_io_queues);
1849 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1850 }
1851
1852 if (opts->nr_poll_queues && nr_io_queues) {
1853 /* map dedicated poll queues only if we have queues left */
1854 ctrl->io_queues[HCTX_TYPE_POLL] =
1855 min(opts->nr_poll_queues, nr_io_queues);
1856 }
1857}
1858
1859static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1860{
1861 unsigned int nr_io_queues;
1862 int ret;
1863
1864 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1865 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1866 if (ret)
1867 return ret;
1868
1869 if (nr_io_queues == 0) {
1870 dev_err(ctrl->device,
1871 "unable to set any I/O queues\n");
1872 return -ENOMEM;
1873 }
1874
1875 ctrl->queue_count = nr_io_queues + 1;
1876 dev_info(ctrl->device,
1877 "creating %d I/O queues.\n", nr_io_queues);
1878
1879 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1880
1881 return __nvme_tcp_alloc_io_queues(ctrl);
1882}
1883
1884static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1885{
1886 nvme_tcp_stop_io_queues(ctrl);
1887 if (remove) {
1888 blk_cleanup_queue(ctrl->connect_q);
1889 blk_mq_free_tag_set(ctrl->tagset);
1890 }
1891 nvme_tcp_free_io_queues(ctrl);
1892}
1893
1894static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1895{
1896 int ret;
1897
1898 ret = nvme_tcp_alloc_io_queues(ctrl);
1899 if (ret)
1900 return ret;
1901
1902 if (new) {
1903 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1904 if (IS_ERR(ctrl->tagset)) {
1905 ret = PTR_ERR(ctrl->tagset);
1906 goto out_free_io_queues;
1907 }
1908
1909 ret = nvme_ctrl_init_connect_q(ctrl);
1910 if (ret)
1911 goto out_free_tag_set;
1912 }
1913
1914 ret = nvme_tcp_start_io_queues(ctrl);
1915 if (ret)
1916 goto out_cleanup_connect_q;
1917
1918 if (!new) {
1919 nvme_start_queues(ctrl);
1920 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1921 /*
1922 * If we timed out waiting for freeze we are likely to
1923 * be stuck. Fail the controller initialization just
1924 * to be safe.
1925 */
1926 ret = -ENODEV;
1927 goto out_wait_freeze_timed_out;
1928 }
1929 blk_mq_update_nr_hw_queues(ctrl->tagset,
1930 ctrl->queue_count - 1);
1931 nvme_unfreeze(ctrl);
1932 }
1933
1934 return 0;
1935
1936out_wait_freeze_timed_out:
1937 nvme_stop_queues(ctrl);
1938 nvme_sync_io_queues(ctrl);
1939 nvme_tcp_stop_io_queues(ctrl);
1940out_cleanup_connect_q:
1941 nvme_cancel_tagset(ctrl);
1942 if (new)
1943 blk_cleanup_queue(ctrl->connect_q);
1944out_free_tag_set:
1945 if (new)
1946 blk_mq_free_tag_set(ctrl->tagset);
1947out_free_io_queues:
1948 nvme_tcp_free_io_queues(ctrl);
1949 return ret;
1950}
1951
1952static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1953{
1954 nvme_tcp_stop_queue(ctrl, 0);
1955 if (remove) {
1956 blk_cleanup_queue(ctrl->admin_q);
1957 blk_cleanup_queue(ctrl->fabrics_q);
1958 blk_mq_free_tag_set(ctrl->admin_tagset);
1959 }
1960 nvme_tcp_free_admin_queue(ctrl);
1961}
1962
1963static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1964{
1965 int error;
1966
1967 error = nvme_tcp_alloc_admin_queue(ctrl);
1968 if (error)
1969 return error;
1970
1971 if (new) {
1972 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1973 if (IS_ERR(ctrl->admin_tagset)) {
1974 error = PTR_ERR(ctrl->admin_tagset);
1975 goto out_free_queue;
1976 }
1977
1978 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1979 if (IS_ERR(ctrl->fabrics_q)) {
1980 error = PTR_ERR(ctrl->fabrics_q);
1981 goto out_free_tagset;
1982 }
1983
1984 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1985 if (IS_ERR(ctrl->admin_q)) {
1986 error = PTR_ERR(ctrl->admin_q);
1987 goto out_cleanup_fabrics_q;
1988 }
1989 }
1990
1991 error = nvme_tcp_start_queue(ctrl, 0);
1992 if (error)
1993 goto out_cleanup_queue;
1994
1995 error = nvme_enable_ctrl(ctrl);
1996 if (error)
1997 goto out_stop_queue;
1998
1999 nvme_start_admin_queue(ctrl);
2000
2001 error = nvme_init_ctrl_finish(ctrl);
2002 if (error)
2003 goto out_quiesce_queue;
2004
2005 return 0;
2006
2007out_quiesce_queue:
2008 nvme_stop_admin_queue(ctrl);
2009 blk_sync_queue(ctrl->admin_q);
2010out_stop_queue:
2011 nvme_tcp_stop_queue(ctrl, 0);
2012 nvme_cancel_admin_tagset(ctrl);
2013out_cleanup_queue:
2014 if (new)
2015 blk_cleanup_queue(ctrl->admin_q);
2016out_cleanup_fabrics_q:
2017 if (new)
2018 blk_cleanup_queue(ctrl->fabrics_q);
2019out_free_tagset:
2020 if (new)
2021 blk_mq_free_tag_set(ctrl->admin_tagset);
2022out_free_queue:
2023 nvme_tcp_free_admin_queue(ctrl);
2024 return error;
2025}
2026
2027static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2028 bool remove)
2029{
2030 nvme_stop_admin_queue(ctrl);
2031 blk_sync_queue(ctrl->admin_q);
2032 nvme_tcp_stop_queue(ctrl, 0);
2033 nvme_cancel_admin_tagset(ctrl);
2034 if (remove)
2035 nvme_start_admin_queue(ctrl);
2036 nvme_tcp_destroy_admin_queue(ctrl, remove);
2037}
2038
2039static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2040 bool remove)
2041{
2042 if (ctrl->queue_count <= 1)
2043 return;
2044 nvme_stop_admin_queue(ctrl);
2045 nvme_start_freeze(ctrl);
2046 nvme_stop_queues(ctrl);
2047 nvme_sync_io_queues(ctrl);
2048 nvme_tcp_stop_io_queues(ctrl);
2049 nvme_cancel_tagset(ctrl);
2050 if (remove)
2051 nvme_start_queues(ctrl);
2052 nvme_tcp_destroy_io_queues(ctrl, remove);
2053}
2054
2055static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2056{
2057 /* If we are resetting/deleting then do nothing */
2058 if (ctrl->state != NVME_CTRL_CONNECTING) {
2059 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2060 ctrl->state == NVME_CTRL_LIVE);
2061 return;
2062 }
2063
2064 if (nvmf_should_reconnect(ctrl)) {
2065 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2066 ctrl->opts->reconnect_delay);
2067 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2068 ctrl->opts->reconnect_delay * HZ);
2069 } else {
2070 dev_info(ctrl->device, "Removing controller...\n");
2071 nvme_delete_ctrl(ctrl);
2072 }
2073}
2074
2075static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2076{
2077 struct nvmf_ctrl_options *opts = ctrl->opts;
2078 int ret;
2079
2080 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2081 if (ret)
2082 return ret;
2083
2084 if (ctrl->icdoff) {
2085 ret = -EOPNOTSUPP;
2086 dev_err(ctrl->device, "icdoff is not supported!\n");
2087 goto destroy_admin;
2088 }
2089
2090 if (!nvme_ctrl_sgl_supported(ctrl)) {
2091 ret = -EOPNOTSUPP;
2092 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2093 goto destroy_admin;
2094 }
2095
2096 if (opts->queue_size > ctrl->sqsize + 1)
2097 dev_warn(ctrl->device,
2098 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2099 opts->queue_size, ctrl->sqsize + 1);
2100
2101 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2102 dev_warn(ctrl->device,
2103 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2104 ctrl->sqsize + 1, ctrl->maxcmd);
2105 ctrl->sqsize = ctrl->maxcmd - 1;
2106 }
2107
2108 if (ctrl->queue_count > 1) {
2109 ret = nvme_tcp_configure_io_queues(ctrl, new);
2110 if (ret)
2111 goto destroy_admin;
2112 }
2113
2114 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2115 /*
2116 * state change failure is ok if we started ctrl delete,
2117 * unless we're during creation of a new controller to
2118 * avoid races with teardown flow.
2119 */
2120 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2121 ctrl->state != NVME_CTRL_DELETING_NOIO);
2122 WARN_ON_ONCE(new);
2123 ret = -EINVAL;
2124 goto destroy_io;
2125 }
2126
2127 nvme_start_ctrl(ctrl);
2128 return 0;
2129
2130destroy_io:
2131 if (ctrl->queue_count > 1) {
2132 nvme_stop_queues(ctrl);
2133 nvme_sync_io_queues(ctrl);
2134 nvme_tcp_stop_io_queues(ctrl);
2135 nvme_cancel_tagset(ctrl);
2136 nvme_tcp_destroy_io_queues(ctrl, new);
2137 }
2138destroy_admin:
2139 nvme_stop_admin_queue(ctrl);
2140 blk_sync_queue(ctrl->admin_q);
2141 nvme_tcp_stop_queue(ctrl, 0);
2142 nvme_cancel_admin_tagset(ctrl);
2143 nvme_tcp_destroy_admin_queue(ctrl, new);
2144 return ret;
2145}
2146
2147static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2148{
2149 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2150 struct nvme_tcp_ctrl, connect_work);
2151 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2152
2153 ++ctrl->nr_reconnects;
2154
2155 if (nvme_tcp_setup_ctrl(ctrl, false))
2156 goto requeue;
2157
2158 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2159 ctrl->nr_reconnects);
2160
2161 ctrl->nr_reconnects = 0;
2162
2163 return;
2164
2165requeue:
2166 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2167 ctrl->nr_reconnects);
2168 nvme_tcp_reconnect_or_remove(ctrl);
2169}
2170
2171static void nvme_tcp_error_recovery_work(struct work_struct *work)
2172{
2173 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2174 struct nvme_tcp_ctrl, err_work);
2175 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2176
2177 nvme_stop_keep_alive(ctrl);
2178 flush_work(&ctrl->async_event_work);
2179 nvme_tcp_teardown_io_queues(ctrl, false);
2180 /* unquiesce to fail fast pending requests */
2181 nvme_start_queues(ctrl);
2182 nvme_tcp_teardown_admin_queue(ctrl, false);
2183 nvme_start_admin_queue(ctrl);
2184
2185 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2186 /* state change failure is ok if we started ctrl delete */
2187 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2188 ctrl->state != NVME_CTRL_DELETING_NOIO);
2189 return;
2190 }
2191
2192 nvme_tcp_reconnect_or_remove(ctrl);
2193}
2194
2195static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2196{
2197 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2198 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2199
2200 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2201 nvme_stop_admin_queue(ctrl);
2202 if (shutdown)
2203 nvme_shutdown_ctrl(ctrl);
2204 else
2205 nvme_disable_ctrl(ctrl);
2206 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2207}
2208
2209static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2210{
2211 nvme_tcp_teardown_ctrl(ctrl, true);
2212}
2213
2214static void nvme_reset_ctrl_work(struct work_struct *work)
2215{
2216 struct nvme_ctrl *ctrl =
2217 container_of(work, struct nvme_ctrl, reset_work);
2218
2219 nvme_stop_ctrl(ctrl);
2220 nvme_tcp_teardown_ctrl(ctrl, false);
2221
2222 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2223 /* state change failure is ok if we started ctrl delete */
2224 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2225 ctrl->state != NVME_CTRL_DELETING_NOIO);
2226 return;
2227 }
2228
2229 if (nvme_tcp_setup_ctrl(ctrl, false))
2230 goto out_fail;
2231
2232 return;
2233
2234out_fail:
2235 ++ctrl->nr_reconnects;
2236 nvme_tcp_reconnect_or_remove(ctrl);
2237}
2238
2239static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2240{
2241 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2242
2243 if (list_empty(&ctrl->list))
2244 goto free_ctrl;
2245
2246 mutex_lock(&nvme_tcp_ctrl_mutex);
2247 list_del(&ctrl->list);
2248 mutex_unlock(&nvme_tcp_ctrl_mutex);
2249
2250 nvmf_free_options(nctrl->opts);
2251free_ctrl:
2252 kfree(ctrl->queues);
2253 kfree(ctrl);
2254}
2255
2256static void nvme_tcp_set_sg_null(struct nvme_command *c)
2257{
2258 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2259
2260 sg->addr = 0;
2261 sg->length = 0;
2262 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2263 NVME_SGL_FMT_TRANSPORT_A;
2264}
2265
2266static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2267 struct nvme_command *c, u32 data_len)
2268{
2269 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2270
2271 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2272 sg->length = cpu_to_le32(data_len);
2273 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2274}
2275
2276static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2277 u32 data_len)
2278{
2279 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2280
2281 sg->addr = 0;
2282 sg->length = cpu_to_le32(data_len);
2283 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2284 NVME_SGL_FMT_TRANSPORT_A;
2285}
2286
2287static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2288{
2289 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2290 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2291 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2292 struct nvme_command *cmd = &pdu->cmd;
2293 u8 hdgst = nvme_tcp_hdgst_len(queue);
2294
2295 memset(pdu, 0, sizeof(*pdu));
2296 pdu->hdr.type = nvme_tcp_cmd;
2297 if (queue->hdr_digest)
2298 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2299 pdu->hdr.hlen = sizeof(*pdu);
2300 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2301
2302 cmd->common.opcode = nvme_admin_async_event;
2303 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2304 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2305 nvme_tcp_set_sg_null(cmd);
2306
2307 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2308 ctrl->async_req.offset = 0;
2309 ctrl->async_req.curr_bio = NULL;
2310 ctrl->async_req.data_len = 0;
2311
2312 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2313}
2314
2315static void nvme_tcp_complete_timed_out(struct request *rq)
2316{
2317 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2318 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2319
2320 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2321 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2322 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2323 blk_mq_complete_request(rq);
2324 }
2325}
2326
2327static enum blk_eh_timer_return
2328nvme_tcp_timeout(struct request *rq, bool reserved)
2329{
2330 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2331 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2332 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2333
2334 dev_warn(ctrl->device,
2335 "queue %d: timeout request %#x type %d\n",
2336 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2337
2338 if (ctrl->state != NVME_CTRL_LIVE) {
2339 /*
2340 * If we are resetting, connecting or deleting we should
2341 * complete immediately because we may block controller
2342 * teardown or setup sequence
2343 * - ctrl disable/shutdown fabrics requests
2344 * - connect requests
2345 * - initialization admin requests
2346 * - I/O requests that entered after unquiescing and
2347 * the controller stopped responding
2348 *
2349 * All other requests should be cancelled by the error
2350 * recovery work, so it's fine that we fail it here.
2351 */
2352 nvme_tcp_complete_timed_out(rq);
2353 return BLK_EH_DONE;
2354 }
2355
2356 /*
2357 * LIVE state should trigger the normal error recovery which will
2358 * handle completing this request.
2359 */
2360 nvme_tcp_error_recovery(ctrl);
2361 return BLK_EH_RESET_TIMER;
2362}
2363
2364static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2365 struct request *rq)
2366{
2367 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2368 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2369 struct nvme_command *c = &pdu->cmd;
2370
2371 c->common.flags |= NVME_CMD_SGL_METABUF;
2372
2373 if (!blk_rq_nr_phys_segments(rq))
2374 nvme_tcp_set_sg_null(c);
2375 else if (rq_data_dir(rq) == WRITE &&
2376 req->data_len <= nvme_tcp_inline_data_size(queue))
2377 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2378 else
2379 nvme_tcp_set_sg_host_data(c, req->data_len);
2380
2381 return 0;
2382}
2383
2384static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2385 struct request *rq)
2386{
2387 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2388 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2389 struct nvme_tcp_queue *queue = req->queue;
2390 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2391 blk_status_t ret;
2392
2393 ret = nvme_setup_cmd(ns, rq);
2394 if (ret)
2395 return ret;
2396
2397 req->state = NVME_TCP_SEND_CMD_PDU;
2398 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2399 req->offset = 0;
2400 req->data_sent = 0;
2401 req->pdu_len = 0;
2402 req->pdu_sent = 0;
2403 req->h2cdata_left = 0;
2404 req->data_len = blk_rq_nr_phys_segments(rq) ?
2405 blk_rq_payload_bytes(rq) : 0;
2406 req->curr_bio = rq->bio;
2407 if (req->curr_bio && req->data_len)
2408 nvme_tcp_init_iter(req, rq_data_dir(rq));
2409
2410 if (rq_data_dir(rq) == WRITE &&
2411 req->data_len <= nvme_tcp_inline_data_size(queue))
2412 req->pdu_len = req->data_len;
2413
2414 pdu->hdr.type = nvme_tcp_cmd;
2415 pdu->hdr.flags = 0;
2416 if (queue->hdr_digest)
2417 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2418 if (queue->data_digest && req->pdu_len) {
2419 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2420 ddgst = nvme_tcp_ddgst_len(queue);
2421 }
2422 pdu->hdr.hlen = sizeof(*pdu);
2423 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2424 pdu->hdr.plen =
2425 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2426
2427 ret = nvme_tcp_map_data(queue, rq);
2428 if (unlikely(ret)) {
2429 nvme_cleanup_cmd(rq);
2430 dev_err(queue->ctrl->ctrl.device,
2431 "Failed to map data (%d)\n", ret);
2432 return ret;
2433 }
2434
2435 return 0;
2436}
2437
2438static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2439{
2440 struct nvme_tcp_queue *queue = hctx->driver_data;
2441
2442 if (!llist_empty(&queue->req_list))
2443 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2444}
2445
2446static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2447 const struct blk_mq_queue_data *bd)
2448{
2449 struct nvme_ns *ns = hctx->queue->queuedata;
2450 struct nvme_tcp_queue *queue = hctx->driver_data;
2451 struct request *rq = bd->rq;
2452 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2453 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2454 blk_status_t ret;
2455
2456 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2457 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2458
2459 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2460 if (unlikely(ret))
2461 return ret;
2462
2463 blk_mq_start_request(rq);
2464
2465 nvme_tcp_queue_request(req, true, bd->last);
2466
2467 return BLK_STS_OK;
2468}
2469
2470static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2471{
2472 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2473 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2474
2475 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2476 /* separate read/write queues */
2477 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2478 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2479 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2480 set->map[HCTX_TYPE_READ].nr_queues =
2481 ctrl->io_queues[HCTX_TYPE_READ];
2482 set->map[HCTX_TYPE_READ].queue_offset =
2483 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2484 } else {
2485 /* shared read/write queues */
2486 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2487 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2488 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2489 set->map[HCTX_TYPE_READ].nr_queues =
2490 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2491 set->map[HCTX_TYPE_READ].queue_offset = 0;
2492 }
2493 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2494 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2495
2496 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2497 /* map dedicated poll queues only if we have queues left */
2498 set->map[HCTX_TYPE_POLL].nr_queues =
2499 ctrl->io_queues[HCTX_TYPE_POLL];
2500 set->map[HCTX_TYPE_POLL].queue_offset =
2501 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2502 ctrl->io_queues[HCTX_TYPE_READ];
2503 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2504 }
2505
2506 dev_info(ctrl->ctrl.device,
2507 "mapped %d/%d/%d default/read/poll queues.\n",
2508 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2509 ctrl->io_queues[HCTX_TYPE_READ],
2510 ctrl->io_queues[HCTX_TYPE_POLL]);
2511
2512 return 0;
2513}
2514
2515static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2516{
2517 struct nvme_tcp_queue *queue = hctx->driver_data;
2518 struct sock *sk = queue->sock->sk;
2519
2520 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2521 return 0;
2522
2523 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2524 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2525 sk_busy_loop(sk, true);
2526 nvme_tcp_try_recv(queue);
2527 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2528 return queue->nr_cqe;
2529}
2530
2531static const struct blk_mq_ops nvme_tcp_mq_ops = {
2532 .queue_rq = nvme_tcp_queue_rq,
2533 .commit_rqs = nvme_tcp_commit_rqs,
2534 .complete = nvme_complete_rq,
2535 .init_request = nvme_tcp_init_request,
2536 .exit_request = nvme_tcp_exit_request,
2537 .init_hctx = nvme_tcp_init_hctx,
2538 .timeout = nvme_tcp_timeout,
2539 .map_queues = nvme_tcp_map_queues,
2540 .poll = nvme_tcp_poll,
2541};
2542
2543static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2544 .queue_rq = nvme_tcp_queue_rq,
2545 .complete = nvme_complete_rq,
2546 .init_request = nvme_tcp_init_request,
2547 .exit_request = nvme_tcp_exit_request,
2548 .init_hctx = nvme_tcp_init_admin_hctx,
2549 .timeout = nvme_tcp_timeout,
2550};
2551
2552static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2553 .name = "tcp",
2554 .module = THIS_MODULE,
2555 .flags = NVME_F_FABRICS,
2556 .reg_read32 = nvmf_reg_read32,
2557 .reg_read64 = nvmf_reg_read64,
2558 .reg_write32 = nvmf_reg_write32,
2559 .free_ctrl = nvme_tcp_free_ctrl,
2560 .submit_async_event = nvme_tcp_submit_async_event,
2561 .delete_ctrl = nvme_tcp_delete_ctrl,
2562 .get_address = nvmf_get_address,
2563};
2564
2565static bool
2566nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2567{
2568 struct nvme_tcp_ctrl *ctrl;
2569 bool found = false;
2570
2571 mutex_lock(&nvme_tcp_ctrl_mutex);
2572 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2573 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2574 if (found)
2575 break;
2576 }
2577 mutex_unlock(&nvme_tcp_ctrl_mutex);
2578
2579 return found;
2580}
2581
2582static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2583 struct nvmf_ctrl_options *opts)
2584{
2585 struct nvme_tcp_ctrl *ctrl;
2586 int ret;
2587
2588 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2589 if (!ctrl)
2590 return ERR_PTR(-ENOMEM);
2591
2592 INIT_LIST_HEAD(&ctrl->list);
2593 ctrl->ctrl.opts = opts;
2594 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2595 opts->nr_poll_queues + 1;
2596 ctrl->ctrl.sqsize = opts->queue_size - 1;
2597 ctrl->ctrl.kato = opts->kato;
2598
2599 INIT_DELAYED_WORK(&ctrl->connect_work,
2600 nvme_tcp_reconnect_ctrl_work);
2601 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2602 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2603
2604 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2605 opts->trsvcid =
2606 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2607 if (!opts->trsvcid) {
2608 ret = -ENOMEM;
2609 goto out_free_ctrl;
2610 }
2611 opts->mask |= NVMF_OPT_TRSVCID;
2612 }
2613
2614 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2615 opts->traddr, opts->trsvcid, &ctrl->addr);
2616 if (ret) {
2617 pr_err("malformed address passed: %s:%s\n",
2618 opts->traddr, opts->trsvcid);
2619 goto out_free_ctrl;
2620 }
2621
2622 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2623 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2624 opts->host_traddr, NULL, &ctrl->src_addr);
2625 if (ret) {
2626 pr_err("malformed src address passed: %s\n",
2627 opts->host_traddr);
2628 goto out_free_ctrl;
2629 }
2630 }
2631
2632 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2633 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2634 pr_err("invalid interface passed: %s\n",
2635 opts->host_iface);
2636 ret = -ENODEV;
2637 goto out_free_ctrl;
2638 }
2639 }
2640
2641 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2642 ret = -EALREADY;
2643 goto out_free_ctrl;
2644 }
2645
2646 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2647 GFP_KERNEL);
2648 if (!ctrl->queues) {
2649 ret = -ENOMEM;
2650 goto out_free_ctrl;
2651 }
2652
2653 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2654 if (ret)
2655 goto out_kfree_queues;
2656
2657 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2658 WARN_ON_ONCE(1);
2659 ret = -EINTR;
2660 goto out_uninit_ctrl;
2661 }
2662
2663 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2664 if (ret)
2665 goto out_uninit_ctrl;
2666
2667 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2668 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2669
2670 mutex_lock(&nvme_tcp_ctrl_mutex);
2671 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2672 mutex_unlock(&nvme_tcp_ctrl_mutex);
2673
2674 return &ctrl->ctrl;
2675
2676out_uninit_ctrl:
2677 nvme_uninit_ctrl(&ctrl->ctrl);
2678 nvme_put_ctrl(&ctrl->ctrl);
2679 if (ret > 0)
2680 ret = -EIO;
2681 return ERR_PTR(ret);
2682out_kfree_queues:
2683 kfree(ctrl->queues);
2684out_free_ctrl:
2685 kfree(ctrl);
2686 return ERR_PTR(ret);
2687}
2688
2689static struct nvmf_transport_ops nvme_tcp_transport = {
2690 .name = "tcp",
2691 .module = THIS_MODULE,
2692 .required_opts = NVMF_OPT_TRADDR,
2693 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2694 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2695 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2696 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2697 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
2698 .create_ctrl = nvme_tcp_create_ctrl,
2699};
2700
2701static int __init nvme_tcp_init_module(void)
2702{
2703 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2704 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2705 if (!nvme_tcp_wq)
2706 return -ENOMEM;
2707
2708 nvmf_register_transport(&nvme_tcp_transport);
2709 return 0;
2710}
2711
2712static void __exit nvme_tcp_cleanup_module(void)
2713{
2714 struct nvme_tcp_ctrl *ctrl;
2715
2716 nvmf_unregister_transport(&nvme_tcp_transport);
2717
2718 mutex_lock(&nvme_tcp_ctrl_mutex);
2719 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2720 nvme_delete_ctrl(&ctrl->ctrl);
2721 mutex_unlock(&nvme_tcp_ctrl_mutex);
2722 flush_workqueue(nvme_delete_wq);
2723
2724 destroy_workqueue(nvme_tcp_wq);
2725}
2726
2727module_init(nvme_tcp_init_module);
2728module_exit(nvme_tcp_cleanup_module);
2729
2730MODULE_LICENSE("GPL v2");