Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.10-rc3 2472 lines 67 kB view raw
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15/* 16 * Refer to the SCSI-NVMe Translation spec for details on how 17 * each command is translated. 18 */ 19 20#include <linux/bio.h> 21#include <linux/bitops.h> 22#include <linux/blkdev.h> 23#include <linux/compat.h> 24#include <linux/delay.h> 25#include <linux/errno.h> 26#include <linux/fs.h> 27#include <linux/genhd.h> 28#include <linux/idr.h> 29#include <linux/init.h> 30#include <linux/interrupt.h> 31#include <linux/io.h> 32#include <linux/kdev_t.h> 33#include <linux/kthread.h> 34#include <linux/kernel.h> 35#include <linux/mm.h> 36#include <linux/module.h> 37#include <linux/moduleparam.h> 38#include <linux/pci.h> 39#include <linux/poison.h> 40#include <linux/sched.h> 41#include <linux/slab.h> 42#include <linux/types.h> 43#include <asm/unaligned.h> 44#include <scsi/sg.h> 45#include <scsi/scsi.h> 46 47#include "nvme.h" 48 49static int sg_version_num = 30534; /* 2 digits for each component */ 50 51/* VPD Page Codes */ 52#define VPD_SUPPORTED_PAGES 0x00 53#define VPD_SERIAL_NUMBER 0x80 54#define VPD_DEVICE_IDENTIFIERS 0x83 55#define VPD_EXTENDED_INQUIRY 0x86 56#define VPD_BLOCK_LIMITS 0xB0 57#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 58 59/* format unit paramter list offsets */ 60#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4 61#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8 62#define FORMAT_UNIT_PROT_INT_OFFSET 3 63#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0 64#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07 65 66/* Misc. defines */ 67#define FIXED_SENSE_DATA 0x70 68#define DESC_FORMAT_SENSE_DATA 0x72 69#define FIXED_SENSE_DATA_ADD_LENGTH 10 70#define LUN_ENTRY_SIZE 8 71#define LUN_DATA_HEADER_SIZE 8 72#define ALL_LUNS_RETURNED 0x02 73#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01 74#define RESTRICTED_LUNS_RETURNED 0x00 75#define DOWNLOAD_SAVE_ACTIVATE 0x05 76#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E 77#define ACTIVATE_DEFERRED_MICROCODE 0x0F 78#define FORMAT_UNIT_IMMED_MASK 0x2 79#define FORMAT_UNIT_IMMED_OFFSET 1 80#define KELVIN_TEMP_FACTOR 273 81#define FIXED_FMT_SENSE_DATA_SIZE 18 82#define DESC_FMT_SENSE_DATA_SIZE 8 83 84/* SCSI/NVMe defines and bit masks */ 85#define INQ_STANDARD_INQUIRY_PAGE 0x00 86#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00 87#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 88#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 89#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 90#define INQ_BDEV_LIMITS_PAGE 0xB0 91#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 92#define INQ_SERIAL_NUMBER_LENGTH 0x14 93#define INQ_NUM_SUPPORTED_VPD_PAGES 6 94#define VERSION_SPC_4 0x06 95#define ACA_UNSUPPORTED 0 96#define STANDARD_INQUIRY_LENGTH 36 97#define ADDITIONAL_STD_INQ_LENGTH 31 98#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C 99#define RESERVED_FIELD 0 100 101/* Mode Sense/Select defines */ 102#define MODE_PAGE_INFO_EXCEP 0x1C 103#define MODE_PAGE_CACHING 0x08 104#define MODE_PAGE_CONTROL 0x0A 105#define MODE_PAGE_POWER_CONDITION 0x1A 106#define MODE_PAGE_RETURN_ALL 0x3F 107#define MODE_PAGE_BLK_DES_LEN 0x08 108#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10 109#define MODE_PAGE_CACHING_LEN 0x14 110#define MODE_PAGE_CONTROL_LEN 0x0C 111#define MODE_PAGE_POW_CND_LEN 0x28 112#define MODE_PAGE_INF_EXC_LEN 0x0C 113#define MODE_PAGE_ALL_LEN 0x54 114#define MODE_SENSE6_MPH_SIZE 4 115#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0 116#define MODE_SENSE_PAGE_CODE_OFFSET 2 117#define MODE_SENSE_PAGE_CODE_MASK 0x3F 118#define MODE_SENSE_LLBAA_MASK 0x10 119#define MODE_SENSE_LLBAA_SHIFT 4 120#define MODE_SENSE_DBD_MASK 8 121#define MODE_SENSE_DBD_SHIFT 3 122#define MODE_SENSE10_MPH_SIZE 8 123#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10 124#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1 125#define MODE_SELECT_6_BD_OFFSET 3 126#define MODE_SELECT_10_BD_OFFSET 6 127#define MODE_SELECT_10_LLBAA_OFFSET 4 128#define MODE_SELECT_10_LLBAA_MASK 1 129#define MODE_SELECT_6_MPH_SIZE 4 130#define MODE_SELECT_10_MPH_SIZE 8 131#define CACHING_MODE_PAGE_WCE_MASK 0x04 132#define MODE_SENSE_BLK_DESC_ENABLED 0 133#define MODE_SENSE_BLK_DESC_COUNT 1 134#define MODE_SELECT_PAGE_CODE_MASK 0x3F 135#define SHORT_DESC_BLOCK 8 136#define LONG_DESC_BLOCK 16 137#define MODE_PAGE_POW_CND_LEN_FIELD 0x26 138#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A 139#define MODE_PAGE_CACHING_LEN_FIELD 0x12 140#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A 141#define MODE_SENSE_PC_CURRENT_VALUES 0 142 143/* Log Sense defines */ 144#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00 145#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07 146#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F 147#define LOG_PAGE_TEMPERATURE_PAGE 0x0D 148#define LOG_SENSE_CDB_SP_NOT_ENABLED 0 149#define LOG_SENSE_CDB_PC_MASK 0xC0 150#define LOG_SENSE_CDB_PC_SHIFT 6 151#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1 152#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F 153#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8 154#define LOG_INFO_EXCP_PAGE_LENGTH 0xC 155#define REMAINING_TEMP_PAGE_LENGTH 0xC 156#define LOG_TEMP_PAGE_LENGTH 0x10 157#define LOG_TEMP_UNKNOWN 0xFF 158#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3 159 160/* Read Capacity defines */ 161#define READ_CAP_10_RESP_SIZE 8 162#define READ_CAP_16_RESP_SIZE 32 163 164/* NVMe Namespace and Command Defines */ 165#define BYTES_TO_DWORDS 4 166#define NVME_MAX_FIRMWARE_SLOT 7 167 168/* Report LUNs defines */ 169#define REPORT_LUNS_FIRST_LUN_OFFSET 8 170 171/* SCSI ADDITIONAL SENSE Codes */ 172 173#define SCSI_ASC_NO_SENSE 0x00 174#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03 175#define SCSI_ASC_LUN_NOT_READY 0x04 176#define SCSI_ASC_WARNING 0x0B 177#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10 178#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10 179#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10 180#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11 181#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D 182#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20 183#define SCSI_ASC_ILLEGAL_COMMAND 0x20 184#define SCSI_ASC_ILLEGAL_BLOCK 0x21 185#define SCSI_ASC_INVALID_CDB 0x24 186#define SCSI_ASC_INVALID_LUN 0x25 187#define SCSI_ASC_INVALID_PARAMETER 0x26 188#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31 189#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44 190 191/* SCSI ADDITIONAL SENSE Code Qualifiers */ 192 193#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00 194#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01 195#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01 196#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02 197#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03 198#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 199#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08 200#define SCSI_ASCQ_INVALID_LUN_ID 0x09 201 202/* copied from drivers/usb/gadget/function/storage_common.h */ 203static inline u32 get_unaligned_be24(u8 *buf) 204{ 205 return 0xffffff & (u32) get_unaligned_be32(buf - 1); 206} 207 208/* Struct to gather data that needs to be extracted from a SCSI CDB. 209 Not conforming to any particular CDB variant, but compatible with all. */ 210 211struct nvme_trans_io_cdb { 212 u8 fua; 213 u8 prot_info; 214 u64 lba; 215 u32 xfer_len; 216}; 217 218 219/* Internal Helper Functions */ 220 221 222/* Copy data to userspace memory */ 223 224static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from, 225 unsigned long n) 226{ 227 int i; 228 void *index = from; 229 size_t remaining = n; 230 size_t xfer_len; 231 232 if (hdr->iovec_count > 0) { 233 struct sg_iovec sgl; 234 235 for (i = 0; i < hdr->iovec_count; i++) { 236 if (copy_from_user(&sgl, hdr->dxferp + 237 i * sizeof(struct sg_iovec), 238 sizeof(struct sg_iovec))) 239 return -EFAULT; 240 xfer_len = min(remaining, sgl.iov_len); 241 if (copy_to_user(sgl.iov_base, index, xfer_len)) 242 return -EFAULT; 243 244 index += xfer_len; 245 remaining -= xfer_len; 246 if (remaining == 0) 247 break; 248 } 249 return 0; 250 } 251 252 if (copy_to_user(hdr->dxferp, from, n)) 253 return -EFAULT; 254 return 0; 255} 256 257/* Copy data from userspace memory */ 258 259static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to, 260 unsigned long n) 261{ 262 int i; 263 void *index = to; 264 size_t remaining = n; 265 size_t xfer_len; 266 267 if (hdr->iovec_count > 0) { 268 struct sg_iovec sgl; 269 270 for (i = 0; i < hdr->iovec_count; i++) { 271 if (copy_from_user(&sgl, hdr->dxferp + 272 i * sizeof(struct sg_iovec), 273 sizeof(struct sg_iovec))) 274 return -EFAULT; 275 xfer_len = min(remaining, sgl.iov_len); 276 if (copy_from_user(index, sgl.iov_base, xfer_len)) 277 return -EFAULT; 278 index += xfer_len; 279 remaining -= xfer_len; 280 if (remaining == 0) 281 break; 282 } 283 return 0; 284 } 285 286 if (copy_from_user(to, hdr->dxferp, n)) 287 return -EFAULT; 288 return 0; 289} 290 291/* Status/Sense Buffer Writeback */ 292 293static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key, 294 u8 asc, u8 ascq) 295{ 296 u8 xfer_len; 297 u8 resp[DESC_FMT_SENSE_DATA_SIZE]; 298 299 if (scsi_status_is_good(status)) { 300 hdr->status = SAM_STAT_GOOD; 301 hdr->masked_status = GOOD; 302 hdr->host_status = DID_OK; 303 hdr->driver_status = DRIVER_OK; 304 hdr->sb_len_wr = 0; 305 } else { 306 hdr->status = status; 307 hdr->masked_status = status >> 1; 308 hdr->host_status = DID_OK; 309 hdr->driver_status = DRIVER_OK; 310 311 memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE); 312 resp[0] = DESC_FORMAT_SENSE_DATA; 313 resp[1] = sense_key; 314 resp[2] = asc; 315 resp[3] = ascq; 316 317 xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE); 318 hdr->sb_len_wr = xfer_len; 319 if (copy_to_user(hdr->sbp, resp, xfer_len) > 0) 320 return -EFAULT; 321 } 322 323 return 0; 324} 325 326/* 327 * Take a status code from a lowlevel routine, and if it was a positive NVMe 328 * error code update the sense data based on it. In either case the passed 329 * in value is returned again, unless an -EFAULT from copy_to_user overrides 330 * it. 331 */ 332static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc) 333{ 334 u8 status, sense_key, asc, ascq; 335 int res; 336 337 /* For non-nvme (Linux) errors, simply return the error code */ 338 if (nvme_sc < 0) 339 return nvme_sc; 340 341 /* Mask DNR, More, and reserved fields */ 342 switch (nvme_sc & 0x7FF) { 343 /* Generic Command Status */ 344 case NVME_SC_SUCCESS: 345 status = SAM_STAT_GOOD; 346 sense_key = NO_SENSE; 347 asc = SCSI_ASC_NO_SENSE; 348 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 349 break; 350 case NVME_SC_INVALID_OPCODE: 351 status = SAM_STAT_CHECK_CONDITION; 352 sense_key = ILLEGAL_REQUEST; 353 asc = SCSI_ASC_ILLEGAL_COMMAND; 354 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 355 break; 356 case NVME_SC_INVALID_FIELD: 357 status = SAM_STAT_CHECK_CONDITION; 358 sense_key = ILLEGAL_REQUEST; 359 asc = SCSI_ASC_INVALID_CDB; 360 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 361 break; 362 case NVME_SC_DATA_XFER_ERROR: 363 status = SAM_STAT_CHECK_CONDITION; 364 sense_key = MEDIUM_ERROR; 365 asc = SCSI_ASC_NO_SENSE; 366 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 367 break; 368 case NVME_SC_POWER_LOSS: 369 status = SAM_STAT_TASK_ABORTED; 370 sense_key = ABORTED_COMMAND; 371 asc = SCSI_ASC_WARNING; 372 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; 373 break; 374 case NVME_SC_INTERNAL: 375 status = SAM_STAT_CHECK_CONDITION; 376 sense_key = HARDWARE_ERROR; 377 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; 378 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 379 break; 380 case NVME_SC_ABORT_REQ: 381 status = SAM_STAT_TASK_ABORTED; 382 sense_key = ABORTED_COMMAND; 383 asc = SCSI_ASC_NO_SENSE; 384 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 385 break; 386 case NVME_SC_ABORT_QUEUE: 387 status = SAM_STAT_TASK_ABORTED; 388 sense_key = ABORTED_COMMAND; 389 asc = SCSI_ASC_NO_SENSE; 390 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 391 break; 392 case NVME_SC_FUSED_FAIL: 393 status = SAM_STAT_TASK_ABORTED; 394 sense_key = ABORTED_COMMAND; 395 asc = SCSI_ASC_NO_SENSE; 396 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 397 break; 398 case NVME_SC_FUSED_MISSING: 399 status = SAM_STAT_TASK_ABORTED; 400 sense_key = ABORTED_COMMAND; 401 asc = SCSI_ASC_NO_SENSE; 402 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 403 break; 404 case NVME_SC_INVALID_NS: 405 status = SAM_STAT_CHECK_CONDITION; 406 sense_key = ILLEGAL_REQUEST; 407 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 408 ascq = SCSI_ASCQ_INVALID_LUN_ID; 409 break; 410 case NVME_SC_LBA_RANGE: 411 status = SAM_STAT_CHECK_CONDITION; 412 sense_key = ILLEGAL_REQUEST; 413 asc = SCSI_ASC_ILLEGAL_BLOCK; 414 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 415 break; 416 case NVME_SC_CAP_EXCEEDED: 417 status = SAM_STAT_CHECK_CONDITION; 418 sense_key = MEDIUM_ERROR; 419 asc = SCSI_ASC_NO_SENSE; 420 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 421 break; 422 case NVME_SC_NS_NOT_READY: 423 status = SAM_STAT_CHECK_CONDITION; 424 sense_key = NOT_READY; 425 asc = SCSI_ASC_LUN_NOT_READY; 426 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 427 break; 428 429 /* Command Specific Status */ 430 case NVME_SC_INVALID_FORMAT: 431 status = SAM_STAT_CHECK_CONDITION; 432 sense_key = ILLEGAL_REQUEST; 433 asc = SCSI_ASC_FORMAT_COMMAND_FAILED; 434 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; 435 break; 436 case NVME_SC_BAD_ATTRIBUTES: 437 status = SAM_STAT_CHECK_CONDITION; 438 sense_key = ILLEGAL_REQUEST; 439 asc = SCSI_ASC_INVALID_CDB; 440 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 441 break; 442 443 /* Media Errors */ 444 case NVME_SC_WRITE_FAULT: 445 status = SAM_STAT_CHECK_CONDITION; 446 sense_key = MEDIUM_ERROR; 447 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; 448 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 449 break; 450 case NVME_SC_READ_ERROR: 451 status = SAM_STAT_CHECK_CONDITION; 452 sense_key = MEDIUM_ERROR; 453 asc = SCSI_ASC_UNRECOVERED_READ_ERROR; 454 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 455 break; 456 case NVME_SC_GUARD_CHECK: 457 status = SAM_STAT_CHECK_CONDITION; 458 sense_key = MEDIUM_ERROR; 459 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; 460 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; 461 break; 462 case NVME_SC_APPTAG_CHECK: 463 status = SAM_STAT_CHECK_CONDITION; 464 sense_key = MEDIUM_ERROR; 465 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; 466 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; 467 break; 468 case NVME_SC_REFTAG_CHECK: 469 status = SAM_STAT_CHECK_CONDITION; 470 sense_key = MEDIUM_ERROR; 471 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; 472 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; 473 break; 474 case NVME_SC_COMPARE_FAILED: 475 status = SAM_STAT_CHECK_CONDITION; 476 sense_key = MISCOMPARE; 477 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; 478 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 479 break; 480 case NVME_SC_ACCESS_DENIED: 481 status = SAM_STAT_CHECK_CONDITION; 482 sense_key = ILLEGAL_REQUEST; 483 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 484 ascq = SCSI_ASCQ_INVALID_LUN_ID; 485 break; 486 487 /* Unspecified/Default */ 488 case NVME_SC_CMDID_CONFLICT: 489 case NVME_SC_CMD_SEQ_ERROR: 490 case NVME_SC_CQ_INVALID: 491 case NVME_SC_QID_INVALID: 492 case NVME_SC_QUEUE_SIZE: 493 case NVME_SC_ABORT_LIMIT: 494 case NVME_SC_ABORT_MISSING: 495 case NVME_SC_ASYNC_LIMIT: 496 case NVME_SC_FIRMWARE_SLOT: 497 case NVME_SC_FIRMWARE_IMAGE: 498 case NVME_SC_INVALID_VECTOR: 499 case NVME_SC_INVALID_LOG_PAGE: 500 default: 501 status = SAM_STAT_CHECK_CONDITION; 502 sense_key = ILLEGAL_REQUEST; 503 asc = SCSI_ASC_NO_SENSE; 504 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 505 break; 506 } 507 508 res = nvme_trans_completion(hdr, status, sense_key, asc, ascq); 509 return res ? res : nvme_sc; 510} 511 512/* INQUIRY Helper Functions */ 513 514static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, 515 struct sg_io_hdr *hdr, u8 *inq_response, 516 int alloc_len) 517{ 518 struct nvme_ctrl *ctrl = ns->ctrl; 519 struct nvme_id_ns *id_ns; 520 int res; 521 int nvme_sc; 522 int xfer_len; 523 u8 resp_data_format = 0x02; 524 u8 protect; 525 u8 cmdque = 0x01 << 1; 526 u8 fw_offset = sizeof(ctrl->firmware_rev); 527 528 /* nvme ns identify - use DPS value for PROTECT field */ 529 nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns); 530 res = nvme_trans_status_code(hdr, nvme_sc); 531 if (res) 532 return res; 533 534 if (id_ns->dps) 535 protect = 0x01; 536 else 537 protect = 0; 538 kfree(id_ns); 539 540 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 541 inq_response[2] = VERSION_SPC_4; 542 inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */ 543 inq_response[4] = ADDITIONAL_STD_INQ_LENGTH; 544 inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ 545 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ 546 strncpy(&inq_response[8], "NVMe ", 8); 547 strncpy(&inq_response[16], ctrl->model, 16); 548 549 while (ctrl->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4) 550 fw_offset--; 551 fw_offset -= 4; 552 strncpy(&inq_response[32], ctrl->firmware_rev + fw_offset, 4); 553 554 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 555 return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 556} 557 558static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, 559 struct sg_io_hdr *hdr, u8 *inq_response, 560 int alloc_len) 561{ 562 int xfer_len; 563 564 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 565 inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */ 566 inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */ 567 inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE; 568 inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE; 569 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; 570 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; 571 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; 572 inq_response[9] = INQ_BDEV_LIMITS_PAGE; 573 574 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 575 return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 576} 577 578static int nvme_trans_unit_serial_page(struct nvme_ns *ns, 579 struct sg_io_hdr *hdr, u8 *inq_response, 580 int alloc_len) 581{ 582 int xfer_len; 583 584 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 585 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ 586 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ 587 strncpy(&inq_response[4], ns->ctrl->serial, INQ_SERIAL_NUMBER_LENGTH); 588 589 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 590 return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 591} 592 593static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, 594 u8 *inq_response, int alloc_len) 595{ 596 struct nvme_id_ns *id_ns; 597 int nvme_sc, res; 598 size_t len; 599 void *eui; 600 601 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns); 602 res = nvme_trans_status_code(hdr, nvme_sc); 603 if (res) 604 return res; 605 606 eui = id_ns->eui64; 607 len = sizeof(id_ns->eui64); 608 609 if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) { 610 if (bitmap_empty(eui, len * 8)) { 611 eui = id_ns->nguid; 612 len = sizeof(id_ns->nguid); 613 } 614 } 615 616 if (bitmap_empty(eui, len * 8)) { 617 res = -EOPNOTSUPP; 618 goto out_free_id; 619 } 620 621 memset(inq_response, 0, alloc_len); 622 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; 623 inq_response[3] = 4 + len; /* Page Length */ 624 625 /* Designation Descriptor start */ 626 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ 627 inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */ 628 inq_response[6] = 0x00; /* Rsvd */ 629 inq_response[7] = len; /* Designator Length */ 630 memcpy(&inq_response[8], eui, len); 631 632 res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len); 633out_free_id: 634 kfree(id_ns); 635 return res; 636} 637 638static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns, 639 struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len) 640{ 641 struct nvme_ctrl *ctrl = ns->ctrl; 642 struct nvme_id_ctrl *id_ctrl; 643 int nvme_sc, res; 644 645 if (alloc_len < 72) { 646 return nvme_trans_completion(hdr, 647 SAM_STAT_CHECK_CONDITION, 648 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 649 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 650 } 651 652 nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl); 653 res = nvme_trans_status_code(hdr, nvme_sc); 654 if (res) 655 return res; 656 657 memset(inq_response, 0, alloc_len); 658 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; 659 inq_response[3] = 0x48; /* Page Length */ 660 661 /* Designation Descriptor start */ 662 inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */ 663 inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */ 664 inq_response[6] = 0x00; /* Rsvd */ 665 inq_response[7] = 0x44; /* Designator Length */ 666 667 sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid)); 668 memcpy(&inq_response[12], ctrl->model, sizeof(ctrl->model)); 669 sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id)); 670 memcpy(&inq_response[56], ctrl->serial, sizeof(ctrl->serial)); 671 672 res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len); 673 kfree(id_ctrl); 674 return res; 675} 676 677static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 678 u8 *resp, int alloc_len) 679{ 680 int res; 681 682 if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) { 683 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); 684 if (res != -EOPNOTSUPP) 685 return res; 686 } 687 688 return nvme_fill_device_id_scsi_string(ns, hdr, resp, alloc_len); 689} 690 691static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 692 int alloc_len) 693{ 694 u8 *inq_response; 695 int res; 696 int nvme_sc; 697 struct nvme_ctrl *ctrl = ns->ctrl; 698 struct nvme_id_ctrl *id_ctrl; 699 struct nvme_id_ns *id_ns; 700 int xfer_len; 701 u8 microcode = 0x80; 702 u8 spt; 703 u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7}; 704 u8 grd_chk, app_chk, ref_chk, protect; 705 u8 uask_sup = 0x20; 706 u8 v_sup; 707 u8 luiclr = 0x01; 708 709 inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); 710 if (inq_response == NULL) 711 return -ENOMEM; 712 713 nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns); 714 res = nvme_trans_status_code(hdr, nvme_sc); 715 if (res) 716 goto out_free_inq; 717 718 spt = spt_lut[id_ns->dpc & 0x07] << 3; 719 if (id_ns->dps) 720 protect = 0x01; 721 else 722 protect = 0; 723 kfree(id_ns); 724 725 grd_chk = protect << 2; 726 app_chk = protect << 1; 727 ref_chk = protect; 728 729 nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl); 730 res = nvme_trans_status_code(hdr, nvme_sc); 731 if (res) 732 goto out_free_inq; 733 734 v_sup = id_ctrl->vwc; 735 kfree(id_ctrl); 736 737 memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); 738 inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */ 739 inq_response[2] = 0x00; /* Page Length MSB */ 740 inq_response[3] = 0x3C; /* Page Length LSB */ 741 inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk; 742 inq_response[5] = uask_sup; 743 inq_response[6] = v_sup; 744 inq_response[7] = luiclr; 745 inq_response[8] = 0; 746 inq_response[9] = 0; 747 748 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); 749 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 750 751 out_free_inq: 752 kfree(inq_response); 753 return res; 754} 755 756static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 757 u8 *inq_response, int alloc_len) 758{ 759 __be32 max_sectors = cpu_to_be32( 760 nvme_block_nr(ns, queue_max_hw_sectors(ns->queue))); 761 __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); 762 __be32 discard_desc_count = cpu_to_be32(0x100); 763 764 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 765 inq_response[1] = VPD_BLOCK_LIMITS; 766 inq_response[3] = 0x3c; /* Page Length */ 767 memcpy(&inq_response[8], &max_sectors, sizeof(u32)); 768 memcpy(&inq_response[20], &max_discard, sizeof(u32)); 769 770 if (max_discard) 771 memcpy(&inq_response[24], &discard_desc_count, sizeof(u32)); 772 773 return nvme_trans_copy_to_user(hdr, inq_response, 0x3c); 774} 775 776static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 777 int alloc_len) 778{ 779 u8 *inq_response; 780 int res; 781 int xfer_len; 782 783 inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); 784 if (inq_response == NULL) { 785 res = -ENOMEM; 786 goto out_mem; 787 } 788 789 inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */ 790 inq_response[2] = 0x00; /* Page Length MSB */ 791 inq_response[3] = 0x3C; /* Page Length LSB */ 792 inq_response[4] = 0x00; /* Medium Rotation Rate MSB */ 793 inq_response[5] = 0x01; /* Medium Rotation Rate LSB */ 794 inq_response[6] = 0x00; /* Form Factor */ 795 796 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); 797 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 798 799 kfree(inq_response); 800 out_mem: 801 return res; 802} 803 804/* LOG SENSE Helper Functions */ 805 806static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, 807 int alloc_len) 808{ 809 int res; 810 int xfer_len; 811 u8 *log_response; 812 813 log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); 814 if (log_response == NULL) { 815 res = -ENOMEM; 816 goto out_mem; 817 } 818 819 log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; 820 /* Subpage=0x00, Page Length MSB=0 */ 821 log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH; 822 log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; 823 log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; 824 log_response[6] = LOG_PAGE_TEMPERATURE_PAGE; 825 826 xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); 827 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 828 829 kfree(log_response); 830 out_mem: 831 return res; 832} 833 834static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, 835 struct sg_io_hdr *hdr, int alloc_len) 836{ 837 int res; 838 int xfer_len; 839 u8 *log_response; 840 struct nvme_smart_log *smart_log; 841 u8 temp_c; 842 u16 temp_k; 843 844 log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); 845 if (log_response == NULL) 846 return -ENOMEM; 847 848 res = nvme_get_log_page(ns->ctrl, &smart_log); 849 if (res < 0) 850 goto out_free_response; 851 852 if (res != NVME_SC_SUCCESS) { 853 temp_c = LOG_TEMP_UNKNOWN; 854 } else { 855 temp_k = (smart_log->temperature[1] << 8) + 856 (smart_log->temperature[0]); 857 temp_c = temp_k - KELVIN_TEMP_FACTOR; 858 } 859 kfree(smart_log); 860 861 log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; 862 /* Subpage=0x00, Page Length MSB=0 */ 863 log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH; 864 /* Informational Exceptions Log Parameter 1 Start */ 865 /* Parameter Code=0x0000 bytes 4,5 */ 866 log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */ 867 log_response[7] = 0x04; /* PARAMETER LENGTH */ 868 /* Add sense Code and qualifier = 0x00 each */ 869 /* Use Temperature from NVMe Get Log Page, convert to C from K */ 870 log_response[10] = temp_c; 871 872 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH); 873 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 874 875 out_free_response: 876 kfree(log_response); 877 return res; 878} 879 880static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, 881 int alloc_len) 882{ 883 int res; 884 int xfer_len; 885 u8 *log_response; 886 struct nvme_smart_log *smart_log; 887 u32 feature_resp; 888 u8 temp_c_cur, temp_c_thresh; 889 u16 temp_k; 890 891 log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); 892 if (log_response == NULL) 893 return -ENOMEM; 894 895 res = nvme_get_log_page(ns->ctrl, &smart_log); 896 if (res < 0) 897 goto out_free_response; 898 899 if (res != NVME_SC_SUCCESS) { 900 temp_c_cur = LOG_TEMP_UNKNOWN; 901 } else { 902 temp_k = (smart_log->temperature[1] << 8) + 903 (smart_log->temperature[0]); 904 temp_c_cur = temp_k - KELVIN_TEMP_FACTOR; 905 } 906 kfree(smart_log); 907 908 /* Get Features for Temp Threshold */ 909 res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0, 910 &feature_resp); 911 if (res != NVME_SC_SUCCESS) 912 temp_c_thresh = LOG_TEMP_UNKNOWN; 913 else 914 temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR; 915 916 log_response[0] = LOG_PAGE_TEMPERATURE_PAGE; 917 /* Subpage=0x00, Page Length MSB=0 */ 918 log_response[3] = REMAINING_TEMP_PAGE_LENGTH; 919 /* Temperature Log Parameter 1 (Temperature) Start */ 920 /* Parameter Code = 0x0000 */ 921 log_response[6] = 0x01; /* Format and Linking = 01b */ 922 log_response[7] = 0x02; /* Parameter Length */ 923 /* Use Temperature from NVMe Get Log Page, convert to C from K */ 924 log_response[9] = temp_c_cur; 925 /* Temperature Log Parameter 2 (Reference Temperature) Start */ 926 log_response[11] = 0x01; /* Parameter Code = 0x0001 */ 927 log_response[12] = 0x01; /* Format and Linking = 01b */ 928 log_response[13] = 0x02; /* Parameter Length */ 929 /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */ 930 log_response[15] = temp_c_thresh; 931 932 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH); 933 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 934 935 out_free_response: 936 kfree(log_response); 937 return res; 938} 939 940/* MODE SENSE Helper Functions */ 941 942static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa, 943 u16 mode_data_length, u16 blk_desc_len) 944{ 945 /* Quick check to make sure I don't stomp on my own memory... */ 946 if ((cdb10 && len < 8) || (!cdb10 && len < 4)) 947 return -EINVAL; 948 949 if (cdb10) { 950 resp[0] = (mode_data_length & 0xFF00) >> 8; 951 resp[1] = (mode_data_length & 0x00FF); 952 resp[3] = 0x10 /* DPOFUA */; 953 resp[4] = llbaa; 954 resp[5] = RESERVED_FIELD; 955 resp[6] = (blk_desc_len & 0xFF00) >> 8; 956 resp[7] = (blk_desc_len & 0x00FF); 957 } else { 958 resp[0] = (mode_data_length & 0x00FF); 959 resp[2] = 0x10 /* DPOFUA */; 960 resp[3] = (blk_desc_len & 0x00FF); 961 } 962 963 return 0; 964} 965 966static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, 967 u8 *resp, int len, u8 llbaa) 968{ 969 int res; 970 int nvme_sc; 971 struct nvme_id_ns *id_ns; 972 u8 flbas; 973 u32 lba_length; 974 975 if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN) 976 return -EINVAL; 977 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) 978 return -EINVAL; 979 980 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns); 981 res = nvme_trans_status_code(hdr, nvme_sc); 982 if (res) 983 return res; 984 985 flbas = (id_ns->flbas) & 0x0F; 986 lba_length = (1 << (id_ns->lbaf[flbas].ds)); 987 988 if (llbaa == 0) { 989 __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap)); 990 /* Byte 4 is reserved */ 991 __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF); 992 993 memcpy(resp, &tmp_cap, sizeof(u32)); 994 memcpy(&resp[4], &tmp_len, sizeof(u32)); 995 } else { 996 __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap)); 997 __be32 tmp_len = cpu_to_be32(lba_length); 998 999 memcpy(resp, &tmp_cap, sizeof(u64)); 1000 /* Bytes 8, 9, 10, 11 are reserved */ 1001 memcpy(&resp[12], &tmp_len, sizeof(u32)); 1002 } 1003 1004 kfree(id_ns); 1005 return res; 1006} 1007 1008static int nvme_trans_fill_control_page(struct nvme_ns *ns, 1009 struct sg_io_hdr *hdr, u8 *resp, 1010 int len) 1011{ 1012 if (len < MODE_PAGE_CONTROL_LEN) 1013 return -EINVAL; 1014 1015 resp[0] = MODE_PAGE_CONTROL; 1016 resp[1] = MODE_PAGE_CONTROL_LEN_FIELD; 1017 resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1, 1018 * D_SENSE=1, GLTSD=1, RLEC=0 */ 1019 resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */ 1020 /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */ 1021 resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */ 1022 /* resp[6] and [7] are obsolete, thus zero */ 1023 resp[8] = 0xFF; /* Busy timeout period = 0xffff */ 1024 resp[9] = 0xFF; 1025 /* Bytes 10,11: Extended selftest completion time = 0x0000 */ 1026 1027 return 0; 1028} 1029 1030static int nvme_trans_fill_caching_page(struct nvme_ns *ns, 1031 struct sg_io_hdr *hdr, 1032 u8 *resp, int len) 1033{ 1034 int res = 0; 1035 int nvme_sc; 1036 u32 feature_resp; 1037 u8 vwc; 1038 1039 if (len < MODE_PAGE_CACHING_LEN) 1040 return -EINVAL; 1041 1042 nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0, 1043 &feature_resp); 1044 res = nvme_trans_status_code(hdr, nvme_sc); 1045 if (res) 1046 return res; 1047 1048 vwc = feature_resp & 0x00000001; 1049 1050 resp[0] = MODE_PAGE_CACHING; 1051 resp[1] = MODE_PAGE_CACHING_LEN_FIELD; 1052 resp[2] = vwc << 2; 1053 return 0; 1054} 1055 1056static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns, 1057 struct sg_io_hdr *hdr, u8 *resp, 1058 int len) 1059{ 1060 if (len < MODE_PAGE_POW_CND_LEN) 1061 return -EINVAL; 1062 1063 resp[0] = MODE_PAGE_POWER_CONDITION; 1064 resp[1] = MODE_PAGE_POW_CND_LEN_FIELD; 1065 /* All other bytes are zero */ 1066 1067 return 0; 1068} 1069 1070static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns, 1071 struct sg_io_hdr *hdr, u8 *resp, 1072 int len) 1073{ 1074 if (len < MODE_PAGE_INF_EXC_LEN) 1075 return -EINVAL; 1076 1077 resp[0] = MODE_PAGE_INFO_EXCEP; 1078 resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD; 1079 resp[2] = 0x88; 1080 /* All other bytes are zero */ 1081 1082 return 0; 1083} 1084 1085static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1086 u8 *resp, int len) 1087{ 1088 int res; 1089 u16 mode_pages_offset_1 = 0; 1090 u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4; 1091 1092 mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN; 1093 mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN; 1094 mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN; 1095 1096 res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1], 1097 MODE_PAGE_CACHING_LEN); 1098 if (res) 1099 return res; 1100 res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2], 1101 MODE_PAGE_CONTROL_LEN); 1102 if (res) 1103 return res; 1104 res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3], 1105 MODE_PAGE_POW_CND_LEN); 1106 if (res) 1107 return res; 1108 return nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4], 1109 MODE_PAGE_INF_EXC_LEN); 1110} 1111 1112static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa) 1113{ 1114 if (dbd == MODE_SENSE_BLK_DESC_ENABLED) { 1115 /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */ 1116 return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT; 1117 } else { 1118 return 0; 1119 } 1120} 1121 1122static int nvme_trans_mode_page_create(struct nvme_ns *ns, 1123 struct sg_io_hdr *hdr, u8 *cmd, 1124 u16 alloc_len, u8 cdb10, 1125 int (*mode_page_fill_func) 1126 (struct nvme_ns *, 1127 struct sg_io_hdr *hdr, u8 *, int), 1128 u16 mode_pages_tot_len) 1129{ 1130 int res; 1131 int xfer_len; 1132 u8 *response; 1133 u8 dbd, llbaa; 1134 u16 resp_size; 1135 int mph_size; 1136 u16 mode_pages_offset_1; 1137 u16 blk_desc_len, blk_desc_offset, mode_data_length; 1138 1139 dbd = (cmd[1] & MODE_SENSE_DBD_MASK) >> MODE_SENSE_DBD_SHIFT; 1140 llbaa = (cmd[1] & MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT; 1141 mph_size = cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE; 1142 1143 blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa); 1144 1145 resp_size = mph_size + blk_desc_len + mode_pages_tot_len; 1146 /* Refer spc4r34 Table 440 for calculation of Mode data Length field */ 1147 mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len; 1148 1149 blk_desc_offset = mph_size; 1150 mode_pages_offset_1 = blk_desc_offset + blk_desc_len; 1151 1152 response = kzalloc(resp_size, GFP_KERNEL); 1153 if (response == NULL) { 1154 res = -ENOMEM; 1155 goto out_mem; 1156 } 1157 1158 res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10, 1159 llbaa, mode_data_length, blk_desc_len); 1160 if (res) 1161 goto out_free; 1162 if (blk_desc_len > 0) { 1163 res = nvme_trans_fill_blk_desc(ns, hdr, 1164 &response[blk_desc_offset], 1165 blk_desc_len, llbaa); 1166 if (res) 1167 goto out_free; 1168 } 1169 res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1], 1170 mode_pages_tot_len); 1171 if (res) 1172 goto out_free; 1173 1174 xfer_len = min(alloc_len, resp_size); 1175 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 1176 1177 out_free: 1178 kfree(response); 1179 out_mem: 1180 return res; 1181} 1182 1183/* Read Capacity Helper Functions */ 1184 1185static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns, 1186 u8 cdb16) 1187{ 1188 u8 flbas; 1189 u32 lba_length; 1190 u64 rlba; 1191 u8 prot_en; 1192 u8 p_type_lut[4] = {0, 0, 1, 2}; 1193 __be64 tmp_rlba; 1194 __be32 tmp_rlba_32; 1195 __be32 tmp_len; 1196 1197 flbas = (id_ns->flbas) & 0x0F; 1198 lba_length = (1 << (id_ns->lbaf[flbas].ds)); 1199 rlba = le64_to_cpup(&id_ns->nsze) - 1; 1200 (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0); 1201 1202 if (!cdb16) { 1203 if (rlba > 0xFFFFFFFF) 1204 rlba = 0xFFFFFFFF; 1205 tmp_rlba_32 = cpu_to_be32(rlba); 1206 tmp_len = cpu_to_be32(lba_length); 1207 memcpy(response, &tmp_rlba_32, sizeof(u32)); 1208 memcpy(&response[4], &tmp_len, sizeof(u32)); 1209 } else { 1210 tmp_rlba = cpu_to_be64(rlba); 1211 tmp_len = cpu_to_be32(lba_length); 1212 memcpy(response, &tmp_rlba, sizeof(u64)); 1213 memcpy(&response[8], &tmp_len, sizeof(u32)); 1214 response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en; 1215 /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */ 1216 /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */ 1217 /* Bytes 16-31 - Reserved */ 1218 } 1219} 1220 1221/* Start Stop Unit Helper Functions */ 1222 1223static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1224 u8 buffer_id) 1225{ 1226 struct nvme_command c; 1227 int nvme_sc; 1228 1229 memset(&c, 0, sizeof(c)); 1230 c.common.opcode = nvme_admin_activate_fw; 1231 c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV); 1232 1233 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0); 1234 return nvme_trans_status_code(hdr, nvme_sc); 1235} 1236 1237static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1238 u8 opcode, u32 tot_len, u32 offset, 1239 u8 buffer_id) 1240{ 1241 int nvme_sc; 1242 struct nvme_command c; 1243 1244 if (hdr->iovec_count > 0) { 1245 /* Assuming SGL is not allowed for this command */ 1246 return nvme_trans_completion(hdr, 1247 SAM_STAT_CHECK_CONDITION, 1248 ILLEGAL_REQUEST, 1249 SCSI_ASC_INVALID_CDB, 1250 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1251 } 1252 1253 memset(&c, 0, sizeof(c)); 1254 c.common.opcode = nvme_admin_download_fw; 1255 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); 1256 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); 1257 1258 nvme_sc = nvme_submit_user_cmd(ns->ctrl->admin_q, &c, 1259 hdr->dxferp, tot_len, NULL, 0); 1260 return nvme_trans_status_code(hdr, nvme_sc); 1261} 1262 1263/* Mode Select Helper Functions */ 1264 1265static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, 1266 u16 *bd_len, u8 *llbaa) 1267{ 1268 if (cdb10) { 1269 /* 10 Byte CDB */ 1270 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + 1271 parm_list[MODE_SELECT_10_BD_OFFSET + 1]; 1272 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] & 1273 MODE_SELECT_10_LLBAA_MASK; 1274 } else { 1275 /* 6 Byte CDB */ 1276 *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET]; 1277 } 1278} 1279 1280static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, 1281 u16 idx, u16 bd_len, u8 llbaa) 1282{ 1283 /* Store block descriptor info if a FORMAT UNIT comes later */ 1284 /* TODO Saving 1st BD info; what to do if multiple BD received? */ 1285 if (llbaa == 0) { 1286 /* Standard Block Descriptor - spc4r34 7.5.5.1 */ 1287 ns->mode_select_num_blocks = 1288 (parm_list[idx + 1] << 16) + 1289 (parm_list[idx + 2] << 8) + 1290 (parm_list[idx + 3]); 1291 1292 ns->mode_select_block_len = 1293 (parm_list[idx + 5] << 16) + 1294 (parm_list[idx + 6] << 8) + 1295 (parm_list[idx + 7]); 1296 } else { 1297 /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */ 1298 ns->mode_select_num_blocks = 1299 (((u64)parm_list[idx + 0]) << 56) + 1300 (((u64)parm_list[idx + 1]) << 48) + 1301 (((u64)parm_list[idx + 2]) << 40) + 1302 (((u64)parm_list[idx + 3]) << 32) + 1303 (((u64)parm_list[idx + 4]) << 24) + 1304 (((u64)parm_list[idx + 5]) << 16) + 1305 (((u64)parm_list[idx + 6]) << 8) + 1306 ((u64)parm_list[idx + 7]); 1307 1308 ns->mode_select_block_len = 1309 (parm_list[idx + 12] << 24) + 1310 (parm_list[idx + 13] << 16) + 1311 (parm_list[idx + 14] << 8) + 1312 (parm_list[idx + 15]); 1313 } 1314} 1315 1316static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1317 u8 *mode_page, u8 page_code) 1318{ 1319 int res = 0; 1320 int nvme_sc; 1321 unsigned dword11; 1322 1323 switch (page_code) { 1324 case MODE_PAGE_CACHING: 1325 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); 1326 nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 1327 dword11, NULL, 0, NULL); 1328 res = nvme_trans_status_code(hdr, nvme_sc); 1329 break; 1330 case MODE_PAGE_CONTROL: 1331 break; 1332 case MODE_PAGE_POWER_CONDITION: 1333 /* Verify the OS is not trying to set timers */ 1334 if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) { 1335 res = nvme_trans_completion(hdr, 1336 SAM_STAT_CHECK_CONDITION, 1337 ILLEGAL_REQUEST, 1338 SCSI_ASC_INVALID_PARAMETER, 1339 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1340 break; 1341 } 1342 break; 1343 default: 1344 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1345 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1346 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1347 break; 1348 } 1349 1350 return res; 1351} 1352 1353static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1354 u8 *cmd, u16 parm_list_len, u8 pf, 1355 u8 sp, u8 cdb10) 1356{ 1357 int res; 1358 u8 *parm_list; 1359 u16 bd_len; 1360 u8 llbaa = 0; 1361 u16 index, saved_index; 1362 u8 page_code; 1363 u16 mp_size; 1364 1365 /* Get parm list from data-in/out buffer */ 1366 parm_list = kmalloc(parm_list_len, GFP_KERNEL); 1367 if (parm_list == NULL) { 1368 res = -ENOMEM; 1369 goto out; 1370 } 1371 1372 res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len); 1373 if (res) 1374 goto out_mem; 1375 1376 nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa); 1377 index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE); 1378 1379 if (bd_len != 0) { 1380 /* Block Descriptors present, parse */ 1381 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa); 1382 index += bd_len; 1383 } 1384 saved_index = index; 1385 1386 /* Multiple mode pages may be present; iterate through all */ 1387 /* In 1st Iteration, don't do NVME Command, only check for CDB errors */ 1388 do { 1389 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; 1390 mp_size = parm_list[index + 1] + 2; 1391 if ((page_code != MODE_PAGE_CACHING) && 1392 (page_code != MODE_PAGE_CONTROL) && 1393 (page_code != MODE_PAGE_POWER_CONDITION)) { 1394 res = nvme_trans_completion(hdr, 1395 SAM_STAT_CHECK_CONDITION, 1396 ILLEGAL_REQUEST, 1397 SCSI_ASC_INVALID_CDB, 1398 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1399 goto out_mem; 1400 } 1401 index += mp_size; 1402 } while (index < parm_list_len); 1403 1404 /* In 2nd Iteration, do the NVME Commands */ 1405 index = saved_index; 1406 do { 1407 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; 1408 mp_size = parm_list[index + 1] + 2; 1409 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index], 1410 page_code); 1411 if (res) 1412 break; 1413 index += mp_size; 1414 } while (index < parm_list_len); 1415 1416 out_mem: 1417 kfree(parm_list); 1418 out: 1419 return res; 1420} 1421 1422/* Format Unit Helper Functions */ 1423 1424static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, 1425 struct sg_io_hdr *hdr) 1426{ 1427 int res = 0; 1428 int nvme_sc; 1429 u8 flbas; 1430 1431 /* 1432 * SCSI Expects a MODE SELECT would have been issued prior to 1433 * a FORMAT UNIT, and the block size and number would be used 1434 * from the block descriptor in it. If a MODE SELECT had not 1435 * been issued, FORMAT shall use the current values for both. 1436 */ 1437 1438 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { 1439 struct nvme_id_ns *id_ns; 1440 1441 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns); 1442 res = nvme_trans_status_code(hdr, nvme_sc); 1443 if (res) 1444 return res; 1445 1446 if (ns->mode_select_num_blocks == 0) 1447 ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap); 1448 if (ns->mode_select_block_len == 0) { 1449 flbas = (id_ns->flbas) & 0x0F; 1450 ns->mode_select_block_len = 1451 (1 << (id_ns->lbaf[flbas].ds)); 1452 } 1453 1454 kfree(id_ns); 1455 } 1456 1457 return 0; 1458} 1459 1460static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len, 1461 u8 format_prot_info, u8 *nvme_pf_code) 1462{ 1463 int res; 1464 u8 *parm_list; 1465 u8 pf_usage, pf_code; 1466 1467 parm_list = kmalloc(len, GFP_KERNEL); 1468 if (parm_list == NULL) { 1469 res = -ENOMEM; 1470 goto out; 1471 } 1472 res = nvme_trans_copy_from_user(hdr, parm_list, len); 1473 if (res) 1474 goto out_mem; 1475 1476 if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] & 1477 FORMAT_UNIT_IMMED_MASK) != 0) { 1478 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1479 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1480 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1481 goto out_mem; 1482 } 1483 1484 if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN && 1485 (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) { 1486 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1487 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1488 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1489 goto out_mem; 1490 } 1491 pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] & 1492 FORMAT_UNIT_PROT_FIELD_USAGE_MASK; 1493 pf_code = (pf_usage << 2) | format_prot_info; 1494 switch (pf_code) { 1495 case 0: 1496 *nvme_pf_code = 0; 1497 break; 1498 case 2: 1499 *nvme_pf_code = 1; 1500 break; 1501 case 3: 1502 *nvme_pf_code = 2; 1503 break; 1504 case 7: 1505 *nvme_pf_code = 3; 1506 break; 1507 default: 1508 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1509 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1510 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1511 break; 1512 } 1513 1514 out_mem: 1515 kfree(parm_list); 1516 out: 1517 return res; 1518} 1519 1520static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1521 u8 prot_info) 1522{ 1523 int res; 1524 int nvme_sc; 1525 struct nvme_id_ns *id_ns; 1526 u8 i; 1527 u8 nlbaf; 1528 u8 selected_lbaf = 0xFF; 1529 u32 cdw10 = 0; 1530 struct nvme_command c; 1531 1532 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ 1533 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns); 1534 res = nvme_trans_status_code(hdr, nvme_sc); 1535 if (res) 1536 return res; 1537 1538 nlbaf = id_ns->nlbaf; 1539 1540 for (i = 0; i < nlbaf; i++) { 1541 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) { 1542 selected_lbaf = i; 1543 break; 1544 } 1545 } 1546 if (selected_lbaf > 0x0F) { 1547 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1548 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, 1549 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1550 } 1551 if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) { 1552 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1553 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, 1554 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1555 } 1556 1557 cdw10 |= prot_info << 5; 1558 cdw10 |= selected_lbaf & 0x0F; 1559 memset(&c, 0, sizeof(c)); 1560 c.format.opcode = nvme_admin_format_nvm; 1561 c.format.nsid = cpu_to_le32(ns->ns_id); 1562 c.format.cdw10 = cpu_to_le32(cdw10); 1563 1564 nvme_sc = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, 0); 1565 res = nvme_trans_status_code(hdr, nvme_sc); 1566 1567 kfree(id_ns); 1568 return res; 1569} 1570 1571static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr, 1572 struct nvme_trans_io_cdb *cdb_info, 1573 u32 max_blocks) 1574{ 1575 /* If using iovecs, send one nvme command per vector */ 1576 if (hdr->iovec_count > 0) 1577 return hdr->iovec_count; 1578 else if (cdb_info->xfer_len > max_blocks) 1579 return ((cdb_info->xfer_len - 1) / max_blocks) + 1; 1580 else 1581 return 1; 1582} 1583 1584static u16 nvme_trans_io_get_control(struct nvme_ns *ns, 1585 struct nvme_trans_io_cdb *cdb_info) 1586{ 1587 u16 control = 0; 1588 1589 /* When Protection information support is added, implement here */ 1590 1591 if (cdb_info->fua > 0) 1592 control |= NVME_RW_FUA; 1593 1594 return control; 1595} 1596 1597static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1598 struct nvme_trans_io_cdb *cdb_info, u8 is_write) 1599{ 1600 int nvme_sc = NVME_SC_SUCCESS; 1601 u32 num_cmds; 1602 u64 unit_len; 1603 u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */ 1604 u32 retcode; 1605 u32 i = 0; 1606 u64 nvme_offset = 0; 1607 void __user *next_mapping_addr; 1608 struct nvme_command c; 1609 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); 1610 u16 control; 1611 u32 max_blocks = queue_max_hw_sectors(ns->queue); 1612 1613 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); 1614 1615 /* 1616 * This loop handles two cases. 1617 * First, when an SGL is used in the form of an iovec list: 1618 * - Use iov_base as the next mapping address for the nvme command_id 1619 * - Use iov_len as the data transfer length for the command. 1620 * Second, when we have a single buffer 1621 * - If larger than max_blocks, split into chunks, offset 1622 * each nvme command accordingly. 1623 */ 1624 for (i = 0; i < num_cmds; i++) { 1625 memset(&c, 0, sizeof(c)); 1626 if (hdr->iovec_count > 0) { 1627 struct sg_iovec sgl; 1628 1629 retcode = copy_from_user(&sgl, hdr->dxferp + 1630 i * sizeof(struct sg_iovec), 1631 sizeof(struct sg_iovec)); 1632 if (retcode) 1633 return -EFAULT; 1634 unit_len = sgl.iov_len; 1635 unit_num_blocks = unit_len >> ns->lba_shift; 1636 next_mapping_addr = sgl.iov_base; 1637 } else { 1638 unit_num_blocks = min((u64)max_blocks, 1639 (cdb_info->xfer_len - nvme_offset)); 1640 unit_len = unit_num_blocks << ns->lba_shift; 1641 next_mapping_addr = hdr->dxferp + 1642 ((1 << ns->lba_shift) * nvme_offset); 1643 } 1644 1645 c.rw.opcode = opcode; 1646 c.rw.nsid = cpu_to_le32(ns->ns_id); 1647 c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset); 1648 c.rw.length = cpu_to_le16(unit_num_blocks - 1); 1649 control = nvme_trans_io_get_control(ns, cdb_info); 1650 c.rw.control = cpu_to_le16(control); 1651 1652 if (get_capacity(ns->disk) - unit_num_blocks < 1653 cdb_info->lba + nvme_offset) { 1654 nvme_sc = NVME_SC_LBA_RANGE; 1655 break; 1656 } 1657 nvme_sc = nvme_submit_user_cmd(ns->queue, &c, 1658 next_mapping_addr, unit_len, NULL, 0); 1659 if (nvme_sc) 1660 break; 1661 1662 nvme_offset += unit_num_blocks; 1663 } 1664 1665 return nvme_trans_status_code(hdr, nvme_sc); 1666} 1667 1668 1669/* SCSI Command Translation Functions */ 1670 1671static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write, 1672 u8 *cmd) 1673{ 1674 int res = 0; 1675 struct nvme_trans_io_cdb cdb_info = { 0, }; 1676 u8 opcode = cmd[0]; 1677 u64 xfer_bytes; 1678 u64 sum_iov_len = 0; 1679 struct sg_iovec sgl; 1680 int i; 1681 size_t not_copied; 1682 1683 /* 1684 * The FUA and WPROTECT fields are not supported in 6-byte CDBs, 1685 * but always in the same place for all others. 1686 */ 1687 switch (opcode) { 1688 case WRITE_6: 1689 case READ_6: 1690 break; 1691 default: 1692 cdb_info.fua = cmd[1] & 0x8; 1693 cdb_info.prot_info = (cmd[1] & 0xe0) >> 5; 1694 if (cdb_info.prot_info && !ns->pi_type) { 1695 return nvme_trans_completion(hdr, 1696 SAM_STAT_CHECK_CONDITION, 1697 ILLEGAL_REQUEST, 1698 SCSI_ASC_INVALID_CDB, 1699 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1700 } 1701 } 1702 1703 switch (opcode) { 1704 case WRITE_6: 1705 case READ_6: 1706 cdb_info.lba = get_unaligned_be24(&cmd[1]); 1707 cdb_info.xfer_len = cmd[4]; 1708 if (cdb_info.xfer_len == 0) 1709 cdb_info.xfer_len = 256; 1710 break; 1711 case WRITE_10: 1712 case READ_10: 1713 cdb_info.lba = get_unaligned_be32(&cmd[2]); 1714 cdb_info.xfer_len = get_unaligned_be16(&cmd[7]); 1715 break; 1716 case WRITE_12: 1717 case READ_12: 1718 cdb_info.lba = get_unaligned_be32(&cmd[2]); 1719 cdb_info.xfer_len = get_unaligned_be32(&cmd[6]); 1720 break; 1721 case WRITE_16: 1722 case READ_16: 1723 cdb_info.lba = get_unaligned_be64(&cmd[2]); 1724 cdb_info.xfer_len = get_unaligned_be32(&cmd[10]); 1725 break; 1726 default: 1727 /* Will never really reach here */ 1728 res = -EIO; 1729 goto out; 1730 } 1731 1732 /* Calculate total length of transfer (in bytes) */ 1733 if (hdr->iovec_count > 0) { 1734 for (i = 0; i < hdr->iovec_count; i++) { 1735 not_copied = copy_from_user(&sgl, hdr->dxferp + 1736 i * sizeof(struct sg_iovec), 1737 sizeof(struct sg_iovec)); 1738 if (not_copied) 1739 return -EFAULT; 1740 sum_iov_len += sgl.iov_len; 1741 /* IO vector sizes should be multiples of block size */ 1742 if (sgl.iov_len % (1 << ns->lba_shift) != 0) { 1743 res = nvme_trans_completion(hdr, 1744 SAM_STAT_CHECK_CONDITION, 1745 ILLEGAL_REQUEST, 1746 SCSI_ASC_INVALID_PARAMETER, 1747 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1748 goto out; 1749 } 1750 } 1751 } else { 1752 sum_iov_len = hdr->dxfer_len; 1753 } 1754 1755 /* As Per sg ioctl howto, if the lengths differ, use the lower one */ 1756 xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len); 1757 1758 /* If block count and actual data buffer size dont match, error out */ 1759 if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) { 1760 res = -EINVAL; 1761 goto out; 1762 } 1763 1764 /* Check for 0 length transfer - it is not illegal */ 1765 if (cdb_info.xfer_len == 0) 1766 goto out; 1767 1768 /* Send NVMe IO Command(s) */ 1769 res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write); 1770 if (res) 1771 goto out; 1772 1773 out: 1774 return res; 1775} 1776 1777static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1778 u8 *cmd) 1779{ 1780 int res = 0; 1781 u8 evpd; 1782 u8 page_code; 1783 int alloc_len; 1784 u8 *inq_response; 1785 1786 evpd = cmd[1] & 0x01; 1787 page_code = cmd[2]; 1788 alloc_len = get_unaligned_be16(&cmd[3]); 1789 1790 inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH), 1791 GFP_KERNEL); 1792 if (inq_response == NULL) { 1793 res = -ENOMEM; 1794 goto out_mem; 1795 } 1796 1797 if (evpd == 0) { 1798 if (page_code == INQ_STANDARD_INQUIRY_PAGE) { 1799 res = nvme_trans_standard_inquiry_page(ns, hdr, 1800 inq_response, alloc_len); 1801 } else { 1802 res = nvme_trans_completion(hdr, 1803 SAM_STAT_CHECK_CONDITION, 1804 ILLEGAL_REQUEST, 1805 SCSI_ASC_INVALID_CDB, 1806 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1807 } 1808 } else { 1809 switch (page_code) { 1810 case VPD_SUPPORTED_PAGES: 1811 res = nvme_trans_supported_vpd_pages(ns, hdr, 1812 inq_response, alloc_len); 1813 break; 1814 case VPD_SERIAL_NUMBER: 1815 res = nvme_trans_unit_serial_page(ns, hdr, inq_response, 1816 alloc_len); 1817 break; 1818 case VPD_DEVICE_IDENTIFIERS: 1819 res = nvme_trans_device_id_page(ns, hdr, inq_response, 1820 alloc_len); 1821 break; 1822 case VPD_EXTENDED_INQUIRY: 1823 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); 1824 break; 1825 case VPD_BLOCK_LIMITS: 1826 res = nvme_trans_bdev_limits_page(ns, hdr, inq_response, 1827 alloc_len); 1828 break; 1829 case VPD_BLOCK_DEV_CHARACTERISTICS: 1830 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); 1831 break; 1832 default: 1833 res = nvme_trans_completion(hdr, 1834 SAM_STAT_CHECK_CONDITION, 1835 ILLEGAL_REQUEST, 1836 SCSI_ASC_INVALID_CDB, 1837 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1838 break; 1839 } 1840 } 1841 kfree(inq_response); 1842 out_mem: 1843 return res; 1844} 1845 1846static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1847 u8 *cmd) 1848{ 1849 int res; 1850 u16 alloc_len; 1851 u8 pc; 1852 u8 page_code; 1853 1854 if (cmd[1] != LOG_SENSE_CDB_SP_NOT_ENABLED) { 1855 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1856 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1857 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1858 goto out; 1859 } 1860 1861 page_code = cmd[2] & LOG_SENSE_CDB_PAGE_CODE_MASK; 1862 pc = (cmd[2] & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT; 1863 if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) { 1864 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1865 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1866 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1867 goto out; 1868 } 1869 alloc_len = get_unaligned_be16(&cmd[7]); 1870 switch (page_code) { 1871 case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE: 1872 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len); 1873 break; 1874 case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE: 1875 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len); 1876 break; 1877 case LOG_PAGE_TEMPERATURE_PAGE: 1878 res = nvme_trans_log_temperature(ns, hdr, alloc_len); 1879 break; 1880 default: 1881 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1882 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1883 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1884 break; 1885 } 1886 1887 out: 1888 return res; 1889} 1890 1891static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1892 u8 *cmd) 1893{ 1894 u8 cdb10 = 0; 1895 u16 parm_list_len; 1896 u8 page_format; 1897 u8 save_pages; 1898 1899 page_format = cmd[1] & MODE_SELECT_CDB_PAGE_FORMAT_MASK; 1900 save_pages = cmd[1] & MODE_SELECT_CDB_SAVE_PAGES_MASK; 1901 1902 if (cmd[0] == MODE_SELECT) { 1903 parm_list_len = cmd[4]; 1904 } else { 1905 parm_list_len = cmd[7]; 1906 cdb10 = 1; 1907 } 1908 1909 if (parm_list_len != 0) { 1910 /* 1911 * According to SPC-4 r24, a paramter list length field of 0 1912 * shall not be considered an error 1913 */ 1914 return nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len, 1915 page_format, save_pages, cdb10); 1916 } 1917 1918 return 0; 1919} 1920 1921static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1922 u8 *cmd) 1923{ 1924 int res = 0; 1925 u16 alloc_len; 1926 u8 cdb10 = 0; 1927 1928 if (cmd[0] == MODE_SENSE) { 1929 alloc_len = cmd[4]; 1930 } else { 1931 alloc_len = get_unaligned_be16(&cmd[7]); 1932 cdb10 = 1; 1933 } 1934 1935 if ((cmd[2] & MODE_SENSE_PAGE_CONTROL_MASK) != 1936 MODE_SENSE_PC_CURRENT_VALUES) { 1937 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1938 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1939 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1940 goto out; 1941 } 1942 1943 switch (cmd[2] & MODE_SENSE_PAGE_CODE_MASK) { 1944 case MODE_PAGE_CACHING: 1945 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 1946 cdb10, 1947 &nvme_trans_fill_caching_page, 1948 MODE_PAGE_CACHING_LEN); 1949 break; 1950 case MODE_PAGE_CONTROL: 1951 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 1952 cdb10, 1953 &nvme_trans_fill_control_page, 1954 MODE_PAGE_CONTROL_LEN); 1955 break; 1956 case MODE_PAGE_POWER_CONDITION: 1957 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 1958 cdb10, 1959 &nvme_trans_fill_pow_cnd_page, 1960 MODE_PAGE_POW_CND_LEN); 1961 break; 1962 case MODE_PAGE_INFO_EXCEP: 1963 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 1964 cdb10, 1965 &nvme_trans_fill_inf_exc_page, 1966 MODE_PAGE_INF_EXC_LEN); 1967 break; 1968 case MODE_PAGE_RETURN_ALL: 1969 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 1970 cdb10, 1971 &nvme_trans_fill_all_pages, 1972 MODE_PAGE_ALL_LEN); 1973 break; 1974 default: 1975 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1976 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1977 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1978 break; 1979 } 1980 1981 out: 1982 return res; 1983} 1984 1985static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1986 u8 *cmd, u8 cdb16) 1987{ 1988 int res; 1989 int nvme_sc; 1990 u32 alloc_len; 1991 u32 resp_size; 1992 u32 xfer_len; 1993 struct nvme_id_ns *id_ns; 1994 u8 *response; 1995 1996 if (cdb16) { 1997 alloc_len = get_unaligned_be32(&cmd[10]); 1998 resp_size = READ_CAP_16_RESP_SIZE; 1999 } else { 2000 alloc_len = READ_CAP_10_RESP_SIZE; 2001 resp_size = READ_CAP_10_RESP_SIZE; 2002 } 2003 2004 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns); 2005 res = nvme_trans_status_code(hdr, nvme_sc); 2006 if (res) 2007 return res; 2008 2009 response = kzalloc(resp_size, GFP_KERNEL); 2010 if (response == NULL) { 2011 res = -ENOMEM; 2012 goto out_free_id; 2013 } 2014 nvme_trans_fill_read_cap(response, id_ns, cdb16); 2015 2016 xfer_len = min(alloc_len, resp_size); 2017 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 2018 2019 kfree(response); 2020 out_free_id: 2021 kfree(id_ns); 2022 return res; 2023} 2024 2025static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2026 u8 *cmd) 2027{ 2028 int res; 2029 int nvme_sc; 2030 u32 alloc_len, xfer_len, resp_size; 2031 u8 *response; 2032 struct nvme_id_ctrl *id_ctrl; 2033 u32 ll_length, lun_id; 2034 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; 2035 __be32 tmp_len; 2036 2037 switch (cmd[2]) { 2038 default: 2039 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2040 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2041 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2042 case ALL_LUNS_RETURNED: 2043 case ALL_WELL_KNOWN_LUNS_RETURNED: 2044 case RESTRICTED_LUNS_RETURNED: 2045 nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl); 2046 res = nvme_trans_status_code(hdr, nvme_sc); 2047 if (res) 2048 return res; 2049 2050 ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE; 2051 resp_size = ll_length + LUN_DATA_HEADER_SIZE; 2052 2053 alloc_len = get_unaligned_be32(&cmd[6]); 2054 if (alloc_len < resp_size) { 2055 res = nvme_trans_completion(hdr, 2056 SAM_STAT_CHECK_CONDITION, 2057 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2058 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2059 goto out_free_id; 2060 } 2061 2062 response = kzalloc(resp_size, GFP_KERNEL); 2063 if (response == NULL) { 2064 res = -ENOMEM; 2065 goto out_free_id; 2066 } 2067 2068 /* The first LUN ID will always be 0 per the SAM spec */ 2069 for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) { 2070 /* 2071 * Set the LUN Id and then increment to the next LUN 2072 * location in the parameter data. 2073 */ 2074 __be64 tmp_id = cpu_to_be64(lun_id); 2075 memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64)); 2076 lun_id_offset += LUN_ENTRY_SIZE; 2077 } 2078 tmp_len = cpu_to_be32(ll_length); 2079 memcpy(response, &tmp_len, sizeof(u32)); 2080 } 2081 2082 xfer_len = min(alloc_len, resp_size); 2083 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 2084 2085 kfree(response); 2086 out_free_id: 2087 kfree(id_ctrl); 2088 return res; 2089} 2090 2091static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2092 u8 *cmd) 2093{ 2094 int res; 2095 u8 alloc_len, xfer_len, resp_size; 2096 u8 desc_format; 2097 u8 *response; 2098 2099 desc_format = cmd[1] & 0x01; 2100 alloc_len = cmd[4]; 2101 2102 resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) : 2103 (FIXED_FMT_SENSE_DATA_SIZE)); 2104 response = kzalloc(resp_size, GFP_KERNEL); 2105 if (response == NULL) { 2106 res = -ENOMEM; 2107 goto out; 2108 } 2109 2110 if (desc_format) { 2111 /* Descriptor Format Sense Data */ 2112 response[0] = DESC_FORMAT_SENSE_DATA; 2113 response[1] = NO_SENSE; 2114 /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */ 2115 response[2] = SCSI_ASC_NO_SENSE; 2116 response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2117 /* SDAT_OVFL = 0 | Additional Sense Length = 0 */ 2118 } else { 2119 /* Fixed Format Sense Data */ 2120 response[0] = FIXED_SENSE_DATA; 2121 /* Byte 1 = Obsolete */ 2122 response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */ 2123 /* Bytes 3-6 - Information - set to zero */ 2124 response[7] = FIXED_SENSE_DATA_ADD_LENGTH; 2125 /* Bytes 8-11 - Cmd Specific Information - set to zero */ 2126 response[12] = SCSI_ASC_NO_SENSE; 2127 response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2128 /* Byte 14 = Field Replaceable Unit Code = 0 */ 2129 /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */ 2130 } 2131 2132 xfer_len = min(alloc_len, resp_size); 2133 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 2134 2135 kfree(response); 2136 out: 2137 return res; 2138} 2139 2140static int nvme_trans_security_protocol(struct nvme_ns *ns, 2141 struct sg_io_hdr *hdr, 2142 u8 *cmd) 2143{ 2144 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2145 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, 2146 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2147} 2148 2149static int nvme_trans_synchronize_cache(struct nvme_ns *ns, 2150 struct sg_io_hdr *hdr) 2151{ 2152 int nvme_sc; 2153 struct nvme_command c; 2154 2155 memset(&c, 0, sizeof(c)); 2156 c.common.opcode = nvme_cmd_flush; 2157 c.common.nsid = cpu_to_le32(ns->ns_id); 2158 2159 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0); 2160 return nvme_trans_status_code(hdr, nvme_sc); 2161} 2162 2163static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2164 u8 *cmd) 2165{ 2166 int res; 2167 u8 parm_hdr_len = 0; 2168 u8 nvme_pf_code = 0; 2169 u8 format_prot_info, long_list, format_data; 2170 2171 format_prot_info = (cmd[1] & 0xc0) >> 6; 2172 long_list = cmd[1] & 0x20; 2173 format_data = cmd[1] & 0x10; 2174 2175 if (format_data != 0) { 2176 if (format_prot_info != 0) { 2177 if (long_list == 0) 2178 parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN; 2179 else 2180 parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN; 2181 } 2182 } else if (format_data == 0 && format_prot_info != 0) { 2183 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2184 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2185 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2186 goto out; 2187 } 2188 2189 /* Get parm header from data-in/out buffer */ 2190 /* 2191 * According to the translation spec, the only fields in the parameter 2192 * list we are concerned with are in the header. So allocate only that. 2193 */ 2194 if (parm_hdr_len > 0) { 2195 res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len, 2196 format_prot_info, &nvme_pf_code); 2197 if (res) 2198 goto out; 2199 } 2200 2201 /* Attempt to activate any previously downloaded firmware image */ 2202 res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0); 2203 2204 /* Determine Block size and count and send format command */ 2205 res = nvme_trans_fmt_set_blk_size_count(ns, hdr); 2206 if (res) 2207 goto out; 2208 2209 res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code); 2210 2211 out: 2212 return res; 2213} 2214 2215static int nvme_trans_test_unit_ready(struct nvme_ns *ns, 2216 struct sg_io_hdr *hdr, 2217 u8 *cmd) 2218{ 2219 if (nvme_ctrl_ready(ns->ctrl)) 2220 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2221 NOT_READY, SCSI_ASC_LUN_NOT_READY, 2222 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2223 else 2224 return nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0); 2225} 2226 2227static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2228 u8 *cmd) 2229{ 2230 int res = 0; 2231 u32 buffer_offset, parm_list_length; 2232 u8 buffer_id, mode; 2233 2234 parm_list_length = get_unaligned_be24(&cmd[6]); 2235 if (parm_list_length % BYTES_TO_DWORDS != 0) { 2236 /* NVMe expects Firmware file to be a whole number of DWORDS */ 2237 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2238 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2239 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2240 goto out; 2241 } 2242 buffer_id = cmd[2]; 2243 if (buffer_id > NVME_MAX_FIRMWARE_SLOT) { 2244 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2245 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2246 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2247 goto out; 2248 } 2249 mode = cmd[1] & 0x1f; 2250 buffer_offset = get_unaligned_be24(&cmd[3]); 2251 2252 switch (mode) { 2253 case DOWNLOAD_SAVE_ACTIVATE: 2254 res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw, 2255 parm_list_length, buffer_offset, 2256 buffer_id); 2257 if (res) 2258 goto out; 2259 res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id); 2260 break; 2261 case DOWNLOAD_SAVE_DEFER_ACTIVATE: 2262 res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw, 2263 parm_list_length, buffer_offset, 2264 buffer_id); 2265 break; 2266 case ACTIVATE_DEFERRED_MICROCODE: 2267 res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id); 2268 break; 2269 default: 2270 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2271 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2272 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2273 break; 2274 } 2275 2276 out: 2277 return res; 2278} 2279 2280struct scsi_unmap_blk_desc { 2281 __be64 slba; 2282 __be32 nlb; 2283 u32 resv; 2284}; 2285 2286struct scsi_unmap_parm_list { 2287 __be16 unmap_data_len; 2288 __be16 unmap_blk_desc_data_len; 2289 u32 resv; 2290 struct scsi_unmap_blk_desc desc[0]; 2291}; 2292 2293static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2294 u8 *cmd) 2295{ 2296 struct scsi_unmap_parm_list *plist; 2297 struct nvme_dsm_range *range; 2298 struct nvme_command c; 2299 int i, nvme_sc, res; 2300 u16 ndesc, list_len; 2301 2302 list_len = get_unaligned_be16(&cmd[7]); 2303 if (!list_len) 2304 return -EINVAL; 2305 2306 plist = kmalloc(list_len, GFP_KERNEL); 2307 if (!plist) 2308 return -ENOMEM; 2309 2310 res = nvme_trans_copy_from_user(hdr, plist, list_len); 2311 if (res) 2312 goto out; 2313 2314 ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; 2315 if (!ndesc || ndesc > 256) { 2316 res = -EINVAL; 2317 goto out; 2318 } 2319 2320 range = kcalloc(ndesc, sizeof(*range), GFP_KERNEL); 2321 if (!range) { 2322 res = -ENOMEM; 2323 goto out; 2324 } 2325 2326 for (i = 0; i < ndesc; i++) { 2327 range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb)); 2328 range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba)); 2329 range[i].cattr = 0; 2330 } 2331 2332 memset(&c, 0, sizeof(c)); 2333 c.dsm.opcode = nvme_cmd_dsm; 2334 c.dsm.nsid = cpu_to_le32(ns->ns_id); 2335 c.dsm.nr = cpu_to_le32(ndesc - 1); 2336 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 2337 2338 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, range, 2339 ndesc * sizeof(*range)); 2340 res = nvme_trans_status_code(hdr, nvme_sc); 2341 2342 kfree(range); 2343 out: 2344 kfree(plist); 2345 return res; 2346} 2347 2348static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) 2349{ 2350 u8 cmd[BLK_MAX_CDB]; 2351 int retcode; 2352 unsigned int opcode; 2353 2354 if (hdr->cmdp == NULL) 2355 return -EMSGSIZE; 2356 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) 2357 return -EFAULT; 2358 2359 /* 2360 * Prime the hdr with good status for scsi commands that don't require 2361 * an nvme command for translation. 2362 */ 2363 retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS); 2364 if (retcode) 2365 return retcode; 2366 2367 opcode = cmd[0]; 2368 2369 switch (opcode) { 2370 case READ_6: 2371 case READ_10: 2372 case READ_12: 2373 case READ_16: 2374 retcode = nvme_trans_io(ns, hdr, 0, cmd); 2375 break; 2376 case WRITE_6: 2377 case WRITE_10: 2378 case WRITE_12: 2379 case WRITE_16: 2380 retcode = nvme_trans_io(ns, hdr, 1, cmd); 2381 break; 2382 case INQUIRY: 2383 retcode = nvme_trans_inquiry(ns, hdr, cmd); 2384 break; 2385 case LOG_SENSE: 2386 retcode = nvme_trans_log_sense(ns, hdr, cmd); 2387 break; 2388 case MODE_SELECT: 2389 case MODE_SELECT_10: 2390 retcode = nvme_trans_mode_select(ns, hdr, cmd); 2391 break; 2392 case MODE_SENSE: 2393 case MODE_SENSE_10: 2394 retcode = nvme_trans_mode_sense(ns, hdr, cmd); 2395 break; 2396 case READ_CAPACITY: 2397 retcode = nvme_trans_read_capacity(ns, hdr, cmd, 0); 2398 break; 2399 case SERVICE_ACTION_IN_16: 2400 switch (cmd[1]) { 2401 case SAI_READ_CAPACITY_16: 2402 retcode = nvme_trans_read_capacity(ns, hdr, cmd, 1); 2403 break; 2404 default: 2405 goto out; 2406 } 2407 break; 2408 case REPORT_LUNS: 2409 retcode = nvme_trans_report_luns(ns, hdr, cmd); 2410 break; 2411 case REQUEST_SENSE: 2412 retcode = nvme_trans_request_sense(ns, hdr, cmd); 2413 break; 2414 case SECURITY_PROTOCOL_IN: 2415 case SECURITY_PROTOCOL_OUT: 2416 retcode = nvme_trans_security_protocol(ns, hdr, cmd); 2417 break; 2418 case SYNCHRONIZE_CACHE: 2419 retcode = nvme_trans_synchronize_cache(ns, hdr); 2420 break; 2421 case FORMAT_UNIT: 2422 retcode = nvme_trans_format_unit(ns, hdr, cmd); 2423 break; 2424 case TEST_UNIT_READY: 2425 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd); 2426 break; 2427 case WRITE_BUFFER: 2428 retcode = nvme_trans_write_buffer(ns, hdr, cmd); 2429 break; 2430 case UNMAP: 2431 retcode = nvme_trans_unmap(ns, hdr, cmd); 2432 break; 2433 default: 2434 out: 2435 retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2436 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, 2437 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2438 break; 2439 } 2440 return retcode; 2441} 2442 2443int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) 2444{ 2445 struct sg_io_hdr hdr; 2446 int retcode; 2447 2448 if (!capable(CAP_SYS_ADMIN)) 2449 return -EACCES; 2450 if (copy_from_user(&hdr, u_hdr, sizeof(hdr))) 2451 return -EFAULT; 2452 if (hdr.interface_id != 'S') 2453 return -EINVAL; 2454 if (hdr.cmd_len > BLK_MAX_CDB) 2455 return -EINVAL; 2456 2457 /* 2458 * A positive return code means a NVMe status, which has been 2459 * translated to sense data. 2460 */ 2461 retcode = nvme_scsi_translate(ns, &hdr); 2462 if (retcode < 0) 2463 return retcode; 2464 if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0) 2465 return -EFAULT; 2466 return 0; 2467} 2468 2469int nvme_sg_get_version_num(int __user *ip) 2470{ 2471 return put_user(sg_version_num, ip); 2472}