Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.15-rc1 227 lines 5.5 kB view raw
1/* 2 * NVMe I/O command implementation. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15#include <linux/blkdev.h> 16#include <linux/module.h> 17#include "nvmet.h" 18 19static void nvmet_bio_done(struct bio *bio) 20{ 21 struct nvmet_req *req = bio->bi_private; 22 23 nvmet_req_complete(req, 24 bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); 25 26 if (bio != &req->inline_bio) 27 bio_put(bio); 28} 29 30static inline u32 nvmet_rw_len(struct nvmet_req *req) 31{ 32 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << 33 req->ns->blksize_shift; 34} 35 36static void nvmet_execute_rw(struct nvmet_req *req) 37{ 38 int sg_cnt = req->sg_cnt; 39 struct bio *bio = &req->inline_bio; 40 struct scatterlist *sg; 41 sector_t sector; 42 blk_qc_t cookie; 43 int op, op_flags = 0, i; 44 45 if (!req->sg_cnt) { 46 nvmet_req_complete(req, 0); 47 return; 48 } 49 50 if (req->cmd->rw.opcode == nvme_cmd_write) { 51 op = REQ_OP_WRITE; 52 op_flags = REQ_SYNC | REQ_IDLE; 53 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) 54 op_flags |= REQ_FUA; 55 } else { 56 op = REQ_OP_READ; 57 } 58 59 sector = le64_to_cpu(req->cmd->rw.slba); 60 sector <<= (req->ns->blksize_shift - 9); 61 62 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 63 bio_set_dev(bio, req->ns->bdev); 64 bio->bi_iter.bi_sector = sector; 65 bio->bi_private = req; 66 bio->bi_end_io = nvmet_bio_done; 67 bio_set_op_attrs(bio, op, op_flags); 68 69 for_each_sg(req->sg, sg, req->sg_cnt, i) { 70 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 71 != sg->length) { 72 struct bio *prev = bio; 73 74 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); 75 bio_set_dev(bio, req->ns->bdev); 76 bio->bi_iter.bi_sector = sector; 77 bio_set_op_attrs(bio, op, op_flags); 78 79 bio_chain(bio, prev); 80 submit_bio(prev); 81 } 82 83 sector += sg->length >> 9; 84 sg_cnt--; 85 } 86 87 cookie = submit_bio(bio); 88 89 blk_poll(bdev_get_queue(req->ns->bdev), cookie); 90} 91 92static void nvmet_execute_flush(struct nvmet_req *req) 93{ 94 struct bio *bio = &req->inline_bio; 95 96 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 97 bio_set_dev(bio, req->ns->bdev); 98 bio->bi_private = req; 99 bio->bi_end_io = nvmet_bio_done; 100 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 101 102 submit_bio(bio); 103} 104 105static u16 nvmet_discard_range(struct nvmet_ns *ns, 106 struct nvme_dsm_range *range, struct bio **bio) 107{ 108 if (__blkdev_issue_discard(ns->bdev, 109 le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 110 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 111 GFP_KERNEL, 0, bio)) 112 return NVME_SC_INTERNAL | NVME_SC_DNR; 113 return 0; 114} 115 116static void nvmet_execute_discard(struct nvmet_req *req) 117{ 118 struct nvme_dsm_range range; 119 struct bio *bio = NULL; 120 int i; 121 u16 status; 122 123 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { 124 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range, 125 sizeof(range)); 126 if (status) 127 break; 128 129 status = nvmet_discard_range(req->ns, &range, &bio); 130 if (status) 131 break; 132 } 133 134 if (bio) { 135 bio->bi_private = req; 136 bio->bi_end_io = nvmet_bio_done; 137 if (status) { 138 bio->bi_status = BLK_STS_IOERR; 139 bio_endio(bio); 140 } else { 141 submit_bio(bio); 142 } 143 } else { 144 nvmet_req_complete(req, status); 145 } 146} 147 148static void nvmet_execute_dsm(struct nvmet_req *req) 149{ 150 switch (le32_to_cpu(req->cmd->dsm.attributes)) { 151 case NVME_DSMGMT_AD: 152 nvmet_execute_discard(req); 153 return; 154 case NVME_DSMGMT_IDR: 155 case NVME_DSMGMT_IDW: 156 default: 157 /* Not supported yet */ 158 nvmet_req_complete(req, 0); 159 return; 160 } 161} 162 163static void nvmet_execute_write_zeroes(struct nvmet_req *req) 164{ 165 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; 166 struct bio *bio = NULL; 167 u16 status = NVME_SC_SUCCESS; 168 sector_t sector; 169 sector_t nr_sector; 170 171 sector = le64_to_cpu(write_zeroes->slba) << 172 (req->ns->blksize_shift - 9); 173 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) << 174 (req->ns->blksize_shift - 9)) + 1; 175 176 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, 177 GFP_KERNEL, &bio, 0)) 178 status = NVME_SC_INTERNAL | NVME_SC_DNR; 179 180 if (bio) { 181 bio->bi_private = req; 182 bio->bi_end_io = nvmet_bio_done; 183 submit_bio(bio); 184 } else { 185 nvmet_req_complete(req, status); 186 } 187} 188 189u16 nvmet_parse_io_cmd(struct nvmet_req *req) 190{ 191 struct nvme_command *cmd = req->cmd; 192 u16 ret; 193 194 ret = nvmet_check_ctrl_status(req, cmd); 195 if (unlikely(ret)) { 196 req->ns = NULL; 197 return ret; 198 } 199 200 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); 201 if (unlikely(!req->ns)) 202 return NVME_SC_INVALID_NS | NVME_SC_DNR; 203 204 switch (cmd->common.opcode) { 205 case nvme_cmd_read: 206 case nvme_cmd_write: 207 req->execute = nvmet_execute_rw; 208 req->data_len = nvmet_rw_len(req); 209 return 0; 210 case nvme_cmd_flush: 211 req->execute = nvmet_execute_flush; 212 req->data_len = 0; 213 return 0; 214 case nvme_cmd_dsm: 215 req->execute = nvmet_execute_dsm; 216 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * 217 sizeof(struct nvme_dsm_range); 218 return 0; 219 case nvme_cmd_write_zeroes: 220 req->execute = nvmet_execute_write_zeroes; 221 return 0; 222 default: 223 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, 224 req->sq->qid); 225 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 226 } 227}