Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nvme-tcp: split nvme_tcp_alloc_tagset

Split nvme_tcp_alloc_tagset into one helper for the admin tag_set and
one for the I/O tag set.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
2f7a7e5d a7f7b711

+41 -41
+41 -41
drivers/nvme/host/tcp.c
··· 1687 1687 return ret; 1688 1688 } 1689 1689 1690 - static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, 1691 - bool admin) 1690 + static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl) 1692 1691 { 1693 1692 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1694 - struct blk_mq_tag_set *set; 1693 + struct blk_mq_tag_set *set = &ctrl->admin_tag_set; 1695 1694 int ret; 1696 1695 1697 - if (admin) { 1698 - set = &ctrl->admin_tag_set; 1699 - memset(set, 0, sizeof(*set)); 1700 - set->ops = &nvme_tcp_admin_mq_ops; 1701 - set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1702 - set->reserved_tags = NVMF_RESERVED_TAGS; 1703 - set->numa_node = nctrl->numa_node; 1704 - set->flags = BLK_MQ_F_BLOCKING; 1705 - set->cmd_size = sizeof(struct nvme_tcp_request); 1706 - set->driver_data = ctrl; 1707 - set->nr_hw_queues = 1; 1708 - set->timeout = NVME_ADMIN_TIMEOUT; 1709 - } else { 1710 - set = &ctrl->tag_set; 1711 - memset(set, 0, sizeof(*set)); 1712 - set->ops = &nvme_tcp_mq_ops; 1713 - set->queue_depth = nctrl->sqsize + 1; 1714 - set->reserved_tags = NVMF_RESERVED_TAGS; 1715 - set->numa_node = nctrl->numa_node; 1716 - set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 1717 - set->cmd_size = sizeof(struct nvme_tcp_request); 1718 - set->driver_data = ctrl; 1719 - set->nr_hw_queues = nctrl->queue_count - 1; 1720 - set->timeout = NVME_IO_TIMEOUT; 1721 - set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 1722 - } 1723 - 1696 + memset(set, 0, sizeof(*set)); 1697 + set->ops = &nvme_tcp_admin_mq_ops; 1698 + set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1699 + set->reserved_tags = NVMF_RESERVED_TAGS; 1700 + set->numa_node = nctrl->numa_node; 1701 + set->flags = BLK_MQ_F_BLOCKING; 1702 + set->cmd_size = sizeof(struct nvme_tcp_request); 1703 + set->driver_data = ctrl; 1704 + set->nr_hw_queues = 1; 1705 + set->timeout = NVME_ADMIN_TIMEOUT; 1724 1706 ret = blk_mq_alloc_tag_set(set); 1725 - if (ret) 1726 - return ERR_PTR(ret); 1707 + if (!ret) 1708 + nctrl->admin_tagset = set; 1709 + return ret; 1710 + } 1727 1711 1728 - return set; 1712 + static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl) 1713 + { 1714 + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1715 + struct blk_mq_tag_set *set = &ctrl->tag_set; 1716 + int ret; 1717 + 1718 + memset(set, 0, sizeof(*set)); 1719 + set->ops = &nvme_tcp_mq_ops; 1720 + set->queue_depth = nctrl->sqsize + 1; 1721 + set->reserved_tags = NVMF_RESERVED_TAGS; 1722 + set->numa_node = nctrl->numa_node; 1723 + set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 1724 + set->cmd_size = sizeof(struct nvme_tcp_request); 1725 + set->driver_data = ctrl; 1726 + set->nr_hw_queues = nctrl->queue_count - 1; 1727 + set->timeout = NVME_IO_TIMEOUT; 1728 + set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 1729 + ret = blk_mq_alloc_tag_set(set); 1730 + if (!ret) 1731 + nctrl->tagset = set; 1732 + return ret; 1729 1733 } 1730 1734 1731 1735 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) ··· 1905 1901 return ret; 1906 1902 1907 1903 if (new) { 1908 - ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); 1909 - if (IS_ERR(ctrl->tagset)) { 1910 - ret = PTR_ERR(ctrl->tagset); 1904 + ret = nvme_tcp_alloc_tag_set(ctrl); 1905 + if (ret) 1911 1906 goto out_free_io_queues; 1912 - } 1913 1907 1914 1908 ret = nvme_ctrl_init_connect_q(ctrl); 1915 1909 if (ret) ··· 1972 1970 return error; 1973 1971 1974 1972 if (new) { 1975 - ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); 1976 - if (IS_ERR(ctrl->admin_tagset)) { 1977 - error = PTR_ERR(ctrl->admin_tagset); 1973 + error = nvme_tcp_alloc_admin_tag_set(ctrl); 1974 + if (error) 1978 1975 goto out_free_queue; 1979 - } 1980 1976 1981 1977 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); 1982 1978 if (IS_ERR(ctrl->fabrics_q)) {