Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.13-rc6 3044 lines 85 kB view raw
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19/* 20 * Refer to the SCSI-NVMe Translation spec for details on how 21 * each command is translated. 22 */ 23 24#include <linux/nvme.h> 25#include <linux/bio.h> 26#include <linux/bitops.h> 27#include <linux/blkdev.h> 28#include <linux/delay.h> 29#include <linux/errno.h> 30#include <linux/fs.h> 31#include <linux/genhd.h> 32#include <linux/idr.h> 33#include <linux/init.h> 34#include <linux/interrupt.h> 35#include <linux/io.h> 36#include <linux/kdev_t.h> 37#include <linux/kthread.h> 38#include <linux/kernel.h> 39#include <linux/mm.h> 40#include <linux/module.h> 41#include <linux/moduleparam.h> 42#include <linux/pci.h> 43#include <linux/poison.h> 44#include <linux/sched.h> 45#include <linux/slab.h> 46#include <linux/types.h> 47#include <scsi/sg.h> 48#include <scsi/scsi.h> 49 50 51static int sg_version_num = 30534; /* 2 digits for each component */ 52 53#define SNTI_TRANSLATION_SUCCESS 0 54#define SNTI_INTERNAL_ERROR 1 55 56/* VPD Page Codes */ 57#define VPD_SUPPORTED_PAGES 0x00 58#define VPD_SERIAL_NUMBER 0x80 59#define VPD_DEVICE_IDENTIFIERS 0x83 60#define VPD_EXTENDED_INQUIRY 0x86 61#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 62 63/* CDB offsets */ 64#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6 65#define REPORT_LUNS_SR_OFFSET 2 66#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10 67#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4 68#define REQUEST_SENSE_DESC_OFFSET 1 69#define REQUEST_SENSE_DESC_MASK 0x01 70#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1 71#define INQUIRY_EVPD_BYTE_OFFSET 1 72#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2 73#define INQUIRY_EVPD_BIT_MASK 1 74#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3 75#define START_STOP_UNIT_CDB_IMMED_OFFSET 1 76#define START_STOP_UNIT_CDB_IMMED_MASK 0x1 77#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3 78#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF 79#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4 80#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0 81#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4 82#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4 83#define START_STOP_UNIT_CDB_START_OFFSET 4 84#define START_STOP_UNIT_CDB_START_MASK 0x1 85#define WRITE_BUFFER_CDB_MODE_OFFSET 1 86#define WRITE_BUFFER_CDB_MODE_MASK 0x1F 87#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2 88#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3 89#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6 90#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1 91#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0 92#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6 93#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1 94#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20 95#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1 96#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10 97#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4 98#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8 99#define FORMAT_UNIT_PROT_INT_OFFSET 3 100#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0 101#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07 102#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7 103 104/* Misc. defines */ 105#define NIBBLE_SHIFT 4 106#define FIXED_SENSE_DATA 0x70 107#define DESC_FORMAT_SENSE_DATA 0x72 108#define FIXED_SENSE_DATA_ADD_LENGTH 10 109#define LUN_ENTRY_SIZE 8 110#define LUN_DATA_HEADER_SIZE 8 111#define ALL_LUNS_RETURNED 0x02 112#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01 113#define RESTRICTED_LUNS_RETURNED 0x00 114#define NVME_POWER_STATE_START_VALID 0x00 115#define NVME_POWER_STATE_ACTIVE 0x01 116#define NVME_POWER_STATE_IDLE 0x02 117#define NVME_POWER_STATE_STANDBY 0x03 118#define NVME_POWER_STATE_LU_CONTROL 0x07 119#define POWER_STATE_0 0 120#define POWER_STATE_1 1 121#define POWER_STATE_2 2 122#define POWER_STATE_3 3 123#define DOWNLOAD_SAVE_ACTIVATE 0x05 124#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E 125#define ACTIVATE_DEFERRED_MICROCODE 0x0F 126#define FORMAT_UNIT_IMMED_MASK 0x2 127#define FORMAT_UNIT_IMMED_OFFSET 1 128#define KELVIN_TEMP_FACTOR 273 129#define FIXED_FMT_SENSE_DATA_SIZE 18 130#define DESC_FMT_SENSE_DATA_SIZE 8 131 132/* SCSI/NVMe defines and bit masks */ 133#define INQ_STANDARD_INQUIRY_PAGE 0x00 134#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00 135#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 136#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 137#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 138#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 139#define INQ_SERIAL_NUMBER_LENGTH 0x14 140#define INQ_NUM_SUPPORTED_VPD_PAGES 5 141#define VERSION_SPC_4 0x06 142#define ACA_UNSUPPORTED 0 143#define STANDARD_INQUIRY_LENGTH 36 144#define ADDITIONAL_STD_INQ_LENGTH 31 145#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C 146#define RESERVED_FIELD 0 147 148/* SCSI READ/WRITE Defines */ 149#define IO_CDB_WP_MASK 0xE0 150#define IO_CDB_WP_SHIFT 5 151#define IO_CDB_FUA_MASK 0x8 152#define IO_6_CDB_LBA_OFFSET 0 153#define IO_6_CDB_LBA_MASK 0x001FFFFF 154#define IO_6_CDB_TX_LEN_OFFSET 4 155#define IO_6_DEFAULT_TX_LEN 256 156#define IO_10_CDB_LBA_OFFSET 2 157#define IO_10_CDB_TX_LEN_OFFSET 7 158#define IO_10_CDB_WP_OFFSET 1 159#define IO_10_CDB_FUA_OFFSET 1 160#define IO_12_CDB_LBA_OFFSET 2 161#define IO_12_CDB_TX_LEN_OFFSET 6 162#define IO_12_CDB_WP_OFFSET 1 163#define IO_12_CDB_FUA_OFFSET 1 164#define IO_16_CDB_FUA_OFFSET 1 165#define IO_16_CDB_WP_OFFSET 1 166#define IO_16_CDB_LBA_OFFSET 2 167#define IO_16_CDB_TX_LEN_OFFSET 10 168 169/* Mode Sense/Select defines */ 170#define MODE_PAGE_INFO_EXCEP 0x1C 171#define MODE_PAGE_CACHING 0x08 172#define MODE_PAGE_CONTROL 0x0A 173#define MODE_PAGE_POWER_CONDITION 0x1A 174#define MODE_PAGE_RETURN_ALL 0x3F 175#define MODE_PAGE_BLK_DES_LEN 0x08 176#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10 177#define MODE_PAGE_CACHING_LEN 0x14 178#define MODE_PAGE_CONTROL_LEN 0x0C 179#define MODE_PAGE_POW_CND_LEN 0x28 180#define MODE_PAGE_INF_EXC_LEN 0x0C 181#define MODE_PAGE_ALL_LEN 0x54 182#define MODE_SENSE6_MPH_SIZE 4 183#define MODE_SENSE6_ALLOC_LEN_OFFSET 4 184#define MODE_SENSE_PAGE_CONTROL_OFFSET 2 185#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0 186#define MODE_SENSE_PAGE_CODE_OFFSET 2 187#define MODE_SENSE_PAGE_CODE_MASK 0x3F 188#define MODE_SENSE_LLBAA_OFFSET 1 189#define MODE_SENSE_LLBAA_MASK 0x10 190#define MODE_SENSE_LLBAA_SHIFT 4 191#define MODE_SENSE_DBD_OFFSET 1 192#define MODE_SENSE_DBD_MASK 8 193#define MODE_SENSE_DBD_SHIFT 3 194#define MODE_SENSE10_MPH_SIZE 8 195#define MODE_SENSE10_ALLOC_LEN_OFFSET 7 196#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1 197#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1 198#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4 199#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7 200#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10 201#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1 202#define MODE_SELECT_6_BD_OFFSET 3 203#define MODE_SELECT_10_BD_OFFSET 6 204#define MODE_SELECT_10_LLBAA_OFFSET 4 205#define MODE_SELECT_10_LLBAA_MASK 1 206#define MODE_SELECT_6_MPH_SIZE 4 207#define MODE_SELECT_10_MPH_SIZE 8 208#define CACHING_MODE_PAGE_WCE_MASK 0x04 209#define MODE_SENSE_BLK_DESC_ENABLED 0 210#define MODE_SENSE_BLK_DESC_COUNT 1 211#define MODE_SELECT_PAGE_CODE_MASK 0x3F 212#define SHORT_DESC_BLOCK 8 213#define LONG_DESC_BLOCK 16 214#define MODE_PAGE_POW_CND_LEN_FIELD 0x26 215#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A 216#define MODE_PAGE_CACHING_LEN_FIELD 0x12 217#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A 218#define MODE_SENSE_PC_CURRENT_VALUES 0 219 220/* Log Sense defines */ 221#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00 222#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07 223#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F 224#define LOG_PAGE_TEMPERATURE_PAGE 0x0D 225#define LOG_SENSE_CDB_SP_OFFSET 1 226#define LOG_SENSE_CDB_SP_NOT_ENABLED 0 227#define LOG_SENSE_CDB_PC_OFFSET 2 228#define LOG_SENSE_CDB_PC_MASK 0xC0 229#define LOG_SENSE_CDB_PC_SHIFT 6 230#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1 231#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F 232#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7 233#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8 234#define LOG_INFO_EXCP_PAGE_LENGTH 0xC 235#define REMAINING_TEMP_PAGE_LENGTH 0xC 236#define LOG_TEMP_PAGE_LENGTH 0x10 237#define LOG_TEMP_UNKNOWN 0xFF 238#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3 239 240/* Read Capacity defines */ 241#define READ_CAP_10_RESP_SIZE 8 242#define READ_CAP_16_RESP_SIZE 32 243 244/* NVMe Namespace and Command Defines */ 245#define NVME_GET_SMART_LOG_PAGE 0x02 246#define NVME_GET_FEAT_TEMP_THRESH 0x04 247#define BYTES_TO_DWORDS 4 248#define NVME_MAX_FIRMWARE_SLOT 7 249 250/* Report LUNs defines */ 251#define REPORT_LUNS_FIRST_LUN_OFFSET 8 252 253/* SCSI ADDITIONAL SENSE Codes */ 254 255#define SCSI_ASC_NO_SENSE 0x00 256#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03 257#define SCSI_ASC_LUN_NOT_READY 0x04 258#define SCSI_ASC_WARNING 0x0B 259#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10 260#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10 261#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10 262#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11 263#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D 264#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20 265#define SCSI_ASC_ILLEGAL_COMMAND 0x20 266#define SCSI_ASC_ILLEGAL_BLOCK 0x21 267#define SCSI_ASC_INVALID_CDB 0x24 268#define SCSI_ASC_INVALID_LUN 0x25 269#define SCSI_ASC_INVALID_PARAMETER 0x26 270#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31 271#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44 272 273/* SCSI ADDITIONAL SENSE Code Qualifiers */ 274 275#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00 276#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01 277#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01 278#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02 279#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03 280#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 281#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08 282#define SCSI_ASCQ_INVALID_LUN_ID 0x09 283 284/** 285 * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to 286 * enable DPOFUA support type 0x10 value. 287 */ 288#define DEVICE_SPECIFIC_PARAMETER 0 289#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR) 290 291/* MACROs to extract information from CDBs */ 292 293#define GET_OPCODE(cdb) cdb[0] 294 295#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0) 296 297#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0)) 298 299#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \ 300(cdb[index + 1] << 8) | \ 301(cdb[index + 2] << 0)) 302 303#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \ 304(cdb[index + 1] << 16) | \ 305(cdb[index + 2] << 8) | \ 306(cdb[index + 3] << 0)) 307 308#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \ 309(((u64)cdb[index + 1]) << 48) | \ 310(((u64)cdb[index + 2]) << 40) | \ 311(((u64)cdb[index + 3]) << 32) | \ 312(((u64)cdb[index + 4]) << 24) | \ 313(((u64)cdb[index + 5]) << 16) | \ 314(((u64)cdb[index + 6]) << 8) | \ 315(((u64)cdb[index + 7]) << 0)) 316 317/* Inquiry Helper Macros */ 318#define GET_INQ_EVPD_BIT(cdb) \ 319((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \ 320INQUIRY_EVPD_BIT_MASK) ? 1 : 0) 321 322#define GET_INQ_PAGE_CODE(cdb) \ 323(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET)) 324 325#define GET_INQ_ALLOC_LENGTH(cdb) \ 326(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET)) 327 328/* Report LUNs Helper Macros */ 329#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \ 330(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET)) 331 332/* Read Capacity Helper Macros */ 333#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \ 334(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET)) 335 336#define IS_READ_CAP_16(cdb) \ 337((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0) 338 339/* Request Sense Helper Macros */ 340#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \ 341(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET)) 342 343/* Mode Sense Helper Macros */ 344#define GET_MODE_SENSE_DBD(cdb) \ 345((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \ 346MODE_SENSE_DBD_SHIFT) 347 348#define GET_MODE_SENSE_LLBAA(cdb) \ 349((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \ 350MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT) 351 352#define GET_MODE_SENSE_MPH_SIZE(cdb10) \ 353(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE) 354 355 356/* Struct to gather data that needs to be extracted from a SCSI CDB. 357 Not conforming to any particular CDB variant, but compatible with all. */ 358 359struct nvme_trans_io_cdb { 360 u8 fua; 361 u8 prot_info; 362 u64 lba; 363 u32 xfer_len; 364}; 365 366 367/* Internal Helper Functions */ 368 369 370/* Copy data to userspace memory */ 371 372static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from, 373 unsigned long n) 374{ 375 int res = SNTI_TRANSLATION_SUCCESS; 376 unsigned long not_copied; 377 int i; 378 void *index = from; 379 size_t remaining = n; 380 size_t xfer_len; 381 382 if (hdr->iovec_count > 0) { 383 struct sg_iovec sgl; 384 385 for (i = 0; i < hdr->iovec_count; i++) { 386 not_copied = copy_from_user(&sgl, hdr->dxferp + 387 i * sizeof(struct sg_iovec), 388 sizeof(struct sg_iovec)); 389 if (not_copied) 390 return -EFAULT; 391 xfer_len = min(remaining, sgl.iov_len); 392 not_copied = copy_to_user(sgl.iov_base, index, 393 xfer_len); 394 if (not_copied) { 395 res = -EFAULT; 396 break; 397 } 398 index += xfer_len; 399 remaining -= xfer_len; 400 if (remaining == 0) 401 break; 402 } 403 return res; 404 } 405 not_copied = copy_to_user(hdr->dxferp, from, n); 406 if (not_copied) 407 res = -EFAULT; 408 return res; 409} 410 411/* Copy data from userspace memory */ 412 413static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to, 414 unsigned long n) 415{ 416 int res = SNTI_TRANSLATION_SUCCESS; 417 unsigned long not_copied; 418 int i; 419 void *index = to; 420 size_t remaining = n; 421 size_t xfer_len; 422 423 if (hdr->iovec_count > 0) { 424 struct sg_iovec sgl; 425 426 for (i = 0; i < hdr->iovec_count; i++) { 427 not_copied = copy_from_user(&sgl, hdr->dxferp + 428 i * sizeof(struct sg_iovec), 429 sizeof(struct sg_iovec)); 430 if (not_copied) 431 return -EFAULT; 432 xfer_len = min(remaining, sgl.iov_len); 433 not_copied = copy_from_user(index, sgl.iov_base, 434 xfer_len); 435 if (not_copied) { 436 res = -EFAULT; 437 break; 438 } 439 index += xfer_len; 440 remaining -= xfer_len; 441 if (remaining == 0) 442 break; 443 } 444 return res; 445 } 446 447 not_copied = copy_from_user(to, hdr->dxferp, n); 448 if (not_copied) 449 res = -EFAULT; 450 return res; 451} 452 453/* Status/Sense Buffer Writeback */ 454 455static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key, 456 u8 asc, u8 ascq) 457{ 458 int res = SNTI_TRANSLATION_SUCCESS; 459 u8 xfer_len; 460 u8 resp[DESC_FMT_SENSE_DATA_SIZE]; 461 462 if (scsi_status_is_good(status)) { 463 hdr->status = SAM_STAT_GOOD; 464 hdr->masked_status = GOOD; 465 hdr->host_status = DID_OK; 466 hdr->driver_status = DRIVER_OK; 467 hdr->sb_len_wr = 0; 468 } else { 469 hdr->status = status; 470 hdr->masked_status = status >> 1; 471 hdr->host_status = DID_OK; 472 hdr->driver_status = DRIVER_OK; 473 474 memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE); 475 resp[0] = DESC_FORMAT_SENSE_DATA; 476 resp[1] = sense_key; 477 resp[2] = asc; 478 resp[3] = ascq; 479 480 xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE); 481 hdr->sb_len_wr = xfer_len; 482 if (copy_to_user(hdr->sbp, resp, xfer_len) > 0) 483 res = -EFAULT; 484 } 485 486 return res; 487} 488 489static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc) 490{ 491 u8 status, sense_key, asc, ascq; 492 int res = SNTI_TRANSLATION_SUCCESS; 493 494 /* For non-nvme (Linux) errors, simply return the error code */ 495 if (nvme_sc < 0) 496 return nvme_sc; 497 498 /* Mask DNR, More, and reserved fields */ 499 nvme_sc &= 0x7FF; 500 501 switch (nvme_sc) { 502 /* Generic Command Status */ 503 case NVME_SC_SUCCESS: 504 status = SAM_STAT_GOOD; 505 sense_key = NO_SENSE; 506 asc = SCSI_ASC_NO_SENSE; 507 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 508 break; 509 case NVME_SC_INVALID_OPCODE: 510 status = SAM_STAT_CHECK_CONDITION; 511 sense_key = ILLEGAL_REQUEST; 512 asc = SCSI_ASC_ILLEGAL_COMMAND; 513 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 514 break; 515 case NVME_SC_INVALID_FIELD: 516 status = SAM_STAT_CHECK_CONDITION; 517 sense_key = ILLEGAL_REQUEST; 518 asc = SCSI_ASC_INVALID_CDB; 519 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 520 break; 521 case NVME_SC_DATA_XFER_ERROR: 522 status = SAM_STAT_CHECK_CONDITION; 523 sense_key = MEDIUM_ERROR; 524 asc = SCSI_ASC_NO_SENSE; 525 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 526 break; 527 case NVME_SC_POWER_LOSS: 528 status = SAM_STAT_TASK_ABORTED; 529 sense_key = ABORTED_COMMAND; 530 asc = SCSI_ASC_WARNING; 531 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; 532 break; 533 case NVME_SC_INTERNAL: 534 status = SAM_STAT_CHECK_CONDITION; 535 sense_key = HARDWARE_ERROR; 536 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; 537 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 538 break; 539 case NVME_SC_ABORT_REQ: 540 status = SAM_STAT_TASK_ABORTED; 541 sense_key = ABORTED_COMMAND; 542 asc = SCSI_ASC_NO_SENSE; 543 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 544 break; 545 case NVME_SC_ABORT_QUEUE: 546 status = SAM_STAT_TASK_ABORTED; 547 sense_key = ABORTED_COMMAND; 548 asc = SCSI_ASC_NO_SENSE; 549 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 550 break; 551 case NVME_SC_FUSED_FAIL: 552 status = SAM_STAT_TASK_ABORTED; 553 sense_key = ABORTED_COMMAND; 554 asc = SCSI_ASC_NO_SENSE; 555 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 556 break; 557 case NVME_SC_FUSED_MISSING: 558 status = SAM_STAT_TASK_ABORTED; 559 sense_key = ABORTED_COMMAND; 560 asc = SCSI_ASC_NO_SENSE; 561 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 562 break; 563 case NVME_SC_INVALID_NS: 564 status = SAM_STAT_CHECK_CONDITION; 565 sense_key = ILLEGAL_REQUEST; 566 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 567 ascq = SCSI_ASCQ_INVALID_LUN_ID; 568 break; 569 case NVME_SC_LBA_RANGE: 570 status = SAM_STAT_CHECK_CONDITION; 571 sense_key = ILLEGAL_REQUEST; 572 asc = SCSI_ASC_ILLEGAL_BLOCK; 573 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 574 break; 575 case NVME_SC_CAP_EXCEEDED: 576 status = SAM_STAT_CHECK_CONDITION; 577 sense_key = MEDIUM_ERROR; 578 asc = SCSI_ASC_NO_SENSE; 579 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 580 break; 581 case NVME_SC_NS_NOT_READY: 582 status = SAM_STAT_CHECK_CONDITION; 583 sense_key = NOT_READY; 584 asc = SCSI_ASC_LUN_NOT_READY; 585 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 586 break; 587 588 /* Command Specific Status */ 589 case NVME_SC_INVALID_FORMAT: 590 status = SAM_STAT_CHECK_CONDITION; 591 sense_key = ILLEGAL_REQUEST; 592 asc = SCSI_ASC_FORMAT_COMMAND_FAILED; 593 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; 594 break; 595 case NVME_SC_BAD_ATTRIBUTES: 596 status = SAM_STAT_CHECK_CONDITION; 597 sense_key = ILLEGAL_REQUEST; 598 asc = SCSI_ASC_INVALID_CDB; 599 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 600 break; 601 602 /* Media Errors */ 603 case NVME_SC_WRITE_FAULT: 604 status = SAM_STAT_CHECK_CONDITION; 605 sense_key = MEDIUM_ERROR; 606 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; 607 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 608 break; 609 case NVME_SC_READ_ERROR: 610 status = SAM_STAT_CHECK_CONDITION; 611 sense_key = MEDIUM_ERROR; 612 asc = SCSI_ASC_UNRECOVERED_READ_ERROR; 613 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 614 break; 615 case NVME_SC_GUARD_CHECK: 616 status = SAM_STAT_CHECK_CONDITION; 617 sense_key = MEDIUM_ERROR; 618 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; 619 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; 620 break; 621 case NVME_SC_APPTAG_CHECK: 622 status = SAM_STAT_CHECK_CONDITION; 623 sense_key = MEDIUM_ERROR; 624 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; 625 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; 626 break; 627 case NVME_SC_REFTAG_CHECK: 628 status = SAM_STAT_CHECK_CONDITION; 629 sense_key = MEDIUM_ERROR; 630 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; 631 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; 632 break; 633 case NVME_SC_COMPARE_FAILED: 634 status = SAM_STAT_CHECK_CONDITION; 635 sense_key = MISCOMPARE; 636 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; 637 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 638 break; 639 case NVME_SC_ACCESS_DENIED: 640 status = SAM_STAT_CHECK_CONDITION; 641 sense_key = ILLEGAL_REQUEST; 642 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 643 ascq = SCSI_ASCQ_INVALID_LUN_ID; 644 break; 645 646 /* Unspecified/Default */ 647 case NVME_SC_CMDID_CONFLICT: 648 case NVME_SC_CMD_SEQ_ERROR: 649 case NVME_SC_CQ_INVALID: 650 case NVME_SC_QID_INVALID: 651 case NVME_SC_QUEUE_SIZE: 652 case NVME_SC_ABORT_LIMIT: 653 case NVME_SC_ABORT_MISSING: 654 case NVME_SC_ASYNC_LIMIT: 655 case NVME_SC_FIRMWARE_SLOT: 656 case NVME_SC_FIRMWARE_IMAGE: 657 case NVME_SC_INVALID_VECTOR: 658 case NVME_SC_INVALID_LOG_PAGE: 659 default: 660 status = SAM_STAT_CHECK_CONDITION; 661 sense_key = ILLEGAL_REQUEST; 662 asc = SCSI_ASC_NO_SENSE; 663 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 664 break; 665 } 666 667 res = nvme_trans_completion(hdr, status, sense_key, asc, ascq); 668 669 return res; 670} 671 672/* INQUIRY Helper Functions */ 673 674static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, 675 struct sg_io_hdr *hdr, u8 *inq_response, 676 int alloc_len) 677{ 678 struct nvme_dev *dev = ns->dev; 679 dma_addr_t dma_addr; 680 void *mem; 681 struct nvme_id_ns *id_ns; 682 int res = SNTI_TRANSLATION_SUCCESS; 683 int nvme_sc; 684 int xfer_len; 685 u8 resp_data_format = 0x02; 686 u8 protect; 687 u8 cmdque = 0x01 << 1; 688 689 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 690 &dma_addr, GFP_KERNEL); 691 if (mem == NULL) { 692 res = -ENOMEM; 693 goto out_dma; 694 } 695 696 /* nvme ns identify - use DPS value for PROTECT field */ 697 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); 698 res = nvme_trans_status_code(hdr, nvme_sc); 699 /* 700 * If nvme_sc was -ve, res will be -ve here. 701 * If nvme_sc was +ve, the status would bace been translated, and res 702 * can only be 0 or -ve. 703 * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc 704 * - If -ve, return because its a Linux error. 705 */ 706 if (res) 707 goto out_free; 708 if (nvme_sc) { 709 res = nvme_sc; 710 goto out_free; 711 } 712 id_ns = mem; 713 (id_ns->dps) ? (protect = 0x01) : (protect = 0); 714 715 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 716 inq_response[2] = VERSION_SPC_4; 717 inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */ 718 inq_response[4] = ADDITIONAL_STD_INQ_LENGTH; 719 inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ 720 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ 721 strncpy(&inq_response[8], "NVMe ", 8); 722 strncpy(&inq_response[16], dev->model, 16); 723 strncpy(&inq_response[32], dev->firmware_rev, 4); 724 725 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 726 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 727 728 out_free: 729 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 730 dma_addr); 731 out_dma: 732 return res; 733} 734 735static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, 736 struct sg_io_hdr *hdr, u8 *inq_response, 737 int alloc_len) 738{ 739 int res = SNTI_TRANSLATION_SUCCESS; 740 int xfer_len; 741 742 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 743 inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */ 744 inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */ 745 inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE; 746 inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE; 747 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; 748 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; 749 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; 750 751 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 752 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 753 754 return res; 755} 756 757static int nvme_trans_unit_serial_page(struct nvme_ns *ns, 758 struct sg_io_hdr *hdr, u8 *inq_response, 759 int alloc_len) 760{ 761 struct nvme_dev *dev = ns->dev; 762 int res = SNTI_TRANSLATION_SUCCESS; 763 int xfer_len; 764 765 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 766 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ 767 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ 768 strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH); 769 770 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 771 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 772 773 return res; 774} 775 776static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 777 u8 *inq_response, int alloc_len) 778{ 779 struct nvme_dev *dev = ns->dev; 780 dma_addr_t dma_addr; 781 void *mem; 782 struct nvme_id_ctrl *id_ctrl; 783 int res = SNTI_TRANSLATION_SUCCESS; 784 int nvme_sc; 785 u8 ieee[4]; 786 int xfer_len; 787 __be32 tmp_id = cpu_to_be32(ns->ns_id); 788 789 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 790 &dma_addr, GFP_KERNEL); 791 if (mem == NULL) { 792 res = -ENOMEM; 793 goto out_dma; 794 } 795 796 /* nvme controller identify */ 797 nvme_sc = nvme_identify(dev, 0, 1, dma_addr); 798 res = nvme_trans_status_code(hdr, nvme_sc); 799 if (res) 800 goto out_free; 801 if (nvme_sc) { 802 res = nvme_sc; 803 goto out_free; 804 } 805 id_ctrl = mem; 806 807 /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */ 808 ieee[0] = id_ctrl->ieee[0] << 4; 809 ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4; 810 ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4; 811 ieee[3] = id_ctrl->ieee[2] >> 4; 812 813 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 814 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ 815 inq_response[3] = 20; /* Page Length */ 816 /* Designation Descriptor start */ 817 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ 818 inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ 819 inq_response[6] = 0x00; /* Rsvd */ 820 inq_response[7] = 16; /* Designator Length */ 821 /* Designator start */ 822 inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/ 823 inq_response[9] = ieee[2]; /* IEEE ID */ 824 inq_response[10] = ieee[1]; /* IEEE ID */ 825 inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */ 826 inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8; 827 inq_response[13] = (dev->pci_dev->vendor & 0x00FF); 828 inq_response[14] = dev->serial[0]; 829 inq_response[15] = dev->serial[1]; 830 inq_response[16] = dev->model[0]; 831 inq_response[17] = dev->model[1]; 832 memcpy(&inq_response[18], &tmp_id, sizeof(u32)); 833 /* Last 2 bytes are zero */ 834 835 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 836 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 837 838 out_free: 839 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 840 dma_addr); 841 out_dma: 842 return res; 843} 844 845static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 846 int alloc_len) 847{ 848 u8 *inq_response; 849 int res = SNTI_TRANSLATION_SUCCESS; 850 int nvme_sc; 851 struct nvme_dev *dev = ns->dev; 852 dma_addr_t dma_addr; 853 void *mem; 854 struct nvme_id_ctrl *id_ctrl; 855 struct nvme_id_ns *id_ns; 856 int xfer_len; 857 u8 microcode = 0x80; 858 u8 spt; 859 u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7}; 860 u8 grd_chk, app_chk, ref_chk, protect; 861 u8 uask_sup = 0x20; 862 u8 v_sup; 863 u8 luiclr = 0x01; 864 865 inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); 866 if (inq_response == NULL) { 867 res = -ENOMEM; 868 goto out_mem; 869 } 870 871 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 872 &dma_addr, GFP_KERNEL); 873 if (mem == NULL) { 874 res = -ENOMEM; 875 goto out_dma; 876 } 877 878 /* nvme ns identify */ 879 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); 880 res = nvme_trans_status_code(hdr, nvme_sc); 881 if (res) 882 goto out_free; 883 if (nvme_sc) { 884 res = nvme_sc; 885 goto out_free; 886 } 887 id_ns = mem; 888 spt = spt_lut[(id_ns->dpc) & 0x07] << 3; 889 (id_ns->dps) ? (protect = 0x01) : (protect = 0); 890 grd_chk = protect << 2; 891 app_chk = protect << 1; 892 ref_chk = protect; 893 894 /* nvme controller identify */ 895 nvme_sc = nvme_identify(dev, 0, 1, dma_addr); 896 res = nvme_trans_status_code(hdr, nvme_sc); 897 if (res) 898 goto out_free; 899 if (nvme_sc) { 900 res = nvme_sc; 901 goto out_free; 902 } 903 id_ctrl = mem; 904 v_sup = id_ctrl->vwc; 905 906 memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); 907 inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */ 908 inq_response[2] = 0x00; /* Page Length MSB */ 909 inq_response[3] = 0x3C; /* Page Length LSB */ 910 inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk; 911 inq_response[5] = uask_sup; 912 inq_response[6] = v_sup; 913 inq_response[7] = luiclr; 914 inq_response[8] = 0; 915 inq_response[9] = 0; 916 917 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); 918 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 919 920 out_free: 921 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 922 dma_addr); 923 out_dma: 924 kfree(inq_response); 925 out_mem: 926 return res; 927} 928 929static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 930 int alloc_len) 931{ 932 u8 *inq_response; 933 int res = SNTI_TRANSLATION_SUCCESS; 934 int xfer_len; 935 936 inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); 937 if (inq_response == NULL) { 938 res = -ENOMEM; 939 goto out_mem; 940 } 941 942 inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */ 943 inq_response[2] = 0x00; /* Page Length MSB */ 944 inq_response[3] = 0x3C; /* Page Length LSB */ 945 inq_response[4] = 0x00; /* Medium Rotation Rate MSB */ 946 inq_response[5] = 0x01; /* Medium Rotation Rate LSB */ 947 inq_response[6] = 0x00; /* Form Factor */ 948 949 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); 950 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 951 952 kfree(inq_response); 953 out_mem: 954 return res; 955} 956 957/* LOG SENSE Helper Functions */ 958 959static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, 960 int alloc_len) 961{ 962 int res = SNTI_TRANSLATION_SUCCESS; 963 int xfer_len; 964 u8 *log_response; 965 966 log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); 967 if (log_response == NULL) { 968 res = -ENOMEM; 969 goto out_mem; 970 } 971 972 log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; 973 /* Subpage=0x00, Page Length MSB=0 */ 974 log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH; 975 log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; 976 log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; 977 log_response[6] = LOG_PAGE_TEMPERATURE_PAGE; 978 979 xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); 980 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 981 982 kfree(log_response); 983 out_mem: 984 return res; 985} 986 987static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, 988 struct sg_io_hdr *hdr, int alloc_len) 989{ 990 int res = SNTI_TRANSLATION_SUCCESS; 991 int xfer_len; 992 u8 *log_response; 993 struct nvme_command c; 994 struct nvme_dev *dev = ns->dev; 995 struct nvme_smart_log *smart_log; 996 dma_addr_t dma_addr; 997 void *mem; 998 u8 temp_c; 999 u16 temp_k; 1000 1001 log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); 1002 if (log_response == NULL) { 1003 res = -ENOMEM; 1004 goto out_mem; 1005 } 1006 1007 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1008 sizeof(struct nvme_smart_log), 1009 &dma_addr, GFP_KERNEL); 1010 if (mem == NULL) { 1011 res = -ENOMEM; 1012 goto out_dma; 1013 } 1014 1015 /* Get SMART Log Page */ 1016 memset(&c, 0, sizeof(c)); 1017 c.common.opcode = nvme_admin_get_log_page; 1018 c.common.nsid = cpu_to_le32(0xFFFFFFFF); 1019 c.common.prp1 = cpu_to_le64(dma_addr); 1020 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / 1021 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); 1022 res = nvme_submit_admin_cmd(dev, &c, NULL); 1023 if (res != NVME_SC_SUCCESS) { 1024 temp_c = LOG_TEMP_UNKNOWN; 1025 } else { 1026 smart_log = mem; 1027 temp_k = (smart_log->temperature[1] << 8) + 1028 (smart_log->temperature[0]); 1029 temp_c = temp_k - KELVIN_TEMP_FACTOR; 1030 } 1031 1032 log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; 1033 /* Subpage=0x00, Page Length MSB=0 */ 1034 log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH; 1035 /* Informational Exceptions Log Parameter 1 Start */ 1036 /* Parameter Code=0x0000 bytes 4,5 */ 1037 log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */ 1038 log_response[7] = 0x04; /* PARAMETER LENGTH */ 1039 /* Add sense Code and qualifier = 0x00 each */ 1040 /* Use Temperature from NVMe Get Log Page, convert to C from K */ 1041 log_response[10] = temp_c; 1042 1043 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH); 1044 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 1045 1046 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), 1047 mem, dma_addr); 1048 out_dma: 1049 kfree(log_response); 1050 out_mem: 1051 return res; 1052} 1053 1054static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1055 int alloc_len) 1056{ 1057 int res = SNTI_TRANSLATION_SUCCESS; 1058 int xfer_len; 1059 u8 *log_response; 1060 struct nvme_command c; 1061 struct nvme_dev *dev = ns->dev; 1062 struct nvme_smart_log *smart_log; 1063 dma_addr_t dma_addr; 1064 void *mem; 1065 u32 feature_resp; 1066 u8 temp_c_cur, temp_c_thresh; 1067 u16 temp_k; 1068 1069 log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); 1070 if (log_response == NULL) { 1071 res = -ENOMEM; 1072 goto out_mem; 1073 } 1074 1075 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1076 sizeof(struct nvme_smart_log), 1077 &dma_addr, GFP_KERNEL); 1078 if (mem == NULL) { 1079 res = -ENOMEM; 1080 goto out_dma; 1081 } 1082 1083 /* Get SMART Log Page */ 1084 memset(&c, 0, sizeof(c)); 1085 c.common.opcode = nvme_admin_get_log_page; 1086 c.common.nsid = cpu_to_le32(0xFFFFFFFF); 1087 c.common.prp1 = cpu_to_le64(dma_addr); 1088 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / 1089 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); 1090 res = nvme_submit_admin_cmd(dev, &c, NULL); 1091 if (res != NVME_SC_SUCCESS) { 1092 temp_c_cur = LOG_TEMP_UNKNOWN; 1093 } else { 1094 smart_log = mem; 1095 temp_k = (smart_log->temperature[1] << 8) + 1096 (smart_log->temperature[0]); 1097 temp_c_cur = temp_k - KELVIN_TEMP_FACTOR; 1098 } 1099 1100 /* Get Features for Temp Threshold */ 1101 res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0, 1102 &feature_resp); 1103 if (res != NVME_SC_SUCCESS) 1104 temp_c_thresh = LOG_TEMP_UNKNOWN; 1105 else 1106 temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR; 1107 1108 log_response[0] = LOG_PAGE_TEMPERATURE_PAGE; 1109 /* Subpage=0x00, Page Length MSB=0 */ 1110 log_response[3] = REMAINING_TEMP_PAGE_LENGTH; 1111 /* Temperature Log Parameter 1 (Temperature) Start */ 1112 /* Parameter Code = 0x0000 */ 1113 log_response[6] = 0x01; /* Format and Linking = 01b */ 1114 log_response[7] = 0x02; /* Parameter Length */ 1115 /* Use Temperature from NVMe Get Log Page, convert to C from K */ 1116 log_response[9] = temp_c_cur; 1117 /* Temperature Log Parameter 2 (Reference Temperature) Start */ 1118 log_response[11] = 0x01; /* Parameter Code = 0x0001 */ 1119 log_response[12] = 0x01; /* Format and Linking = 01b */ 1120 log_response[13] = 0x02; /* Parameter Length */ 1121 /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */ 1122 log_response[15] = temp_c_thresh; 1123 1124 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH); 1125 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 1126 1127 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), 1128 mem, dma_addr); 1129 out_dma: 1130 kfree(log_response); 1131 out_mem: 1132 return res; 1133} 1134 1135/* MODE SENSE Helper Functions */ 1136 1137static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa, 1138 u16 mode_data_length, u16 blk_desc_len) 1139{ 1140 /* Quick check to make sure I don't stomp on my own memory... */ 1141 if ((cdb10 && len < 8) || (!cdb10 && len < 4)) 1142 return SNTI_INTERNAL_ERROR; 1143 1144 if (cdb10) { 1145 resp[0] = (mode_data_length & 0xFF00) >> 8; 1146 resp[1] = (mode_data_length & 0x00FF); 1147 /* resp[2] and [3] are zero */ 1148 resp[4] = llbaa; 1149 resp[5] = RESERVED_FIELD; 1150 resp[6] = (blk_desc_len & 0xFF00) >> 8; 1151 resp[7] = (blk_desc_len & 0x00FF); 1152 } else { 1153 resp[0] = (mode_data_length & 0x00FF); 1154 /* resp[1] and [2] are zero */ 1155 resp[3] = (blk_desc_len & 0x00FF); 1156 } 1157 1158 return SNTI_TRANSLATION_SUCCESS; 1159} 1160 1161static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1162 u8 *resp, int len, u8 llbaa) 1163{ 1164 int res = SNTI_TRANSLATION_SUCCESS; 1165 int nvme_sc; 1166 struct nvme_dev *dev = ns->dev; 1167 dma_addr_t dma_addr; 1168 void *mem; 1169 struct nvme_id_ns *id_ns; 1170 u8 flbas; 1171 u32 lba_length; 1172 1173 if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN) 1174 return SNTI_INTERNAL_ERROR; 1175 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) 1176 return SNTI_INTERNAL_ERROR; 1177 1178 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 1179 &dma_addr, GFP_KERNEL); 1180 if (mem == NULL) { 1181 res = -ENOMEM; 1182 goto out; 1183 } 1184 1185 /* nvme ns identify */ 1186 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); 1187 res = nvme_trans_status_code(hdr, nvme_sc); 1188 if (res) 1189 goto out_dma; 1190 if (nvme_sc) { 1191 res = nvme_sc; 1192 goto out_dma; 1193 } 1194 id_ns = mem; 1195 flbas = (id_ns->flbas) & 0x0F; 1196 lba_length = (1 << (id_ns->lbaf[flbas].ds)); 1197 1198 if (llbaa == 0) { 1199 __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap)); 1200 /* Byte 4 is reserved */ 1201 __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF); 1202 1203 memcpy(resp, &tmp_cap, sizeof(u32)); 1204 memcpy(&resp[4], &tmp_len, sizeof(u32)); 1205 } else { 1206 __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap)); 1207 __be32 tmp_len = cpu_to_be32(lba_length); 1208 1209 memcpy(resp, &tmp_cap, sizeof(u64)); 1210 /* Bytes 8, 9, 10, 11 are reserved */ 1211 memcpy(&resp[12], &tmp_len, sizeof(u32)); 1212 } 1213 1214 out_dma: 1215 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 1216 dma_addr); 1217 out: 1218 return res; 1219} 1220 1221static int nvme_trans_fill_control_page(struct nvme_ns *ns, 1222 struct sg_io_hdr *hdr, u8 *resp, 1223 int len) 1224{ 1225 if (len < MODE_PAGE_CONTROL_LEN) 1226 return SNTI_INTERNAL_ERROR; 1227 1228 resp[0] = MODE_PAGE_CONTROL; 1229 resp[1] = MODE_PAGE_CONTROL_LEN_FIELD; 1230 resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1, 1231 * D_SENSE=1, GLTSD=1, RLEC=0 */ 1232 resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */ 1233 /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */ 1234 resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */ 1235 /* resp[6] and [7] are obsolete, thus zero */ 1236 resp[8] = 0xFF; /* Busy timeout period = 0xffff */ 1237 resp[9] = 0xFF; 1238 /* Bytes 10,11: Extended selftest completion time = 0x0000 */ 1239 1240 return SNTI_TRANSLATION_SUCCESS; 1241} 1242 1243static int nvme_trans_fill_caching_page(struct nvme_ns *ns, 1244 struct sg_io_hdr *hdr, 1245 u8 *resp, int len) 1246{ 1247 int res = SNTI_TRANSLATION_SUCCESS; 1248 int nvme_sc; 1249 struct nvme_dev *dev = ns->dev; 1250 u32 feature_resp; 1251 u8 vwc; 1252 1253 if (len < MODE_PAGE_CACHING_LEN) 1254 return SNTI_INTERNAL_ERROR; 1255 1256 nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0, 1257 &feature_resp); 1258 res = nvme_trans_status_code(hdr, nvme_sc); 1259 if (res) 1260 goto out; 1261 if (nvme_sc) { 1262 res = nvme_sc; 1263 goto out; 1264 } 1265 vwc = feature_resp & 0x00000001; 1266 1267 resp[0] = MODE_PAGE_CACHING; 1268 resp[1] = MODE_PAGE_CACHING_LEN_FIELD; 1269 resp[2] = vwc << 2; 1270 1271 out: 1272 return res; 1273} 1274 1275static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns, 1276 struct sg_io_hdr *hdr, u8 *resp, 1277 int len) 1278{ 1279 int res = SNTI_TRANSLATION_SUCCESS; 1280 1281 if (len < MODE_PAGE_POW_CND_LEN) 1282 return SNTI_INTERNAL_ERROR; 1283 1284 resp[0] = MODE_PAGE_POWER_CONDITION; 1285 resp[1] = MODE_PAGE_POW_CND_LEN_FIELD; 1286 /* All other bytes are zero */ 1287 1288 return res; 1289} 1290 1291static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns, 1292 struct sg_io_hdr *hdr, u8 *resp, 1293 int len) 1294{ 1295 int res = SNTI_TRANSLATION_SUCCESS; 1296 1297 if (len < MODE_PAGE_INF_EXC_LEN) 1298 return SNTI_INTERNAL_ERROR; 1299 1300 resp[0] = MODE_PAGE_INFO_EXCEP; 1301 resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD; 1302 resp[2] = 0x88; 1303 /* All other bytes are zero */ 1304 1305 return res; 1306} 1307 1308static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1309 u8 *resp, int len) 1310{ 1311 int res = SNTI_TRANSLATION_SUCCESS; 1312 u16 mode_pages_offset_1 = 0; 1313 u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4; 1314 1315 mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN; 1316 mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN; 1317 mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN; 1318 1319 res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1], 1320 MODE_PAGE_CACHING_LEN); 1321 if (res != SNTI_TRANSLATION_SUCCESS) 1322 goto out; 1323 res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2], 1324 MODE_PAGE_CONTROL_LEN); 1325 if (res != SNTI_TRANSLATION_SUCCESS) 1326 goto out; 1327 res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3], 1328 MODE_PAGE_POW_CND_LEN); 1329 if (res != SNTI_TRANSLATION_SUCCESS) 1330 goto out; 1331 res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4], 1332 MODE_PAGE_INF_EXC_LEN); 1333 if (res != SNTI_TRANSLATION_SUCCESS) 1334 goto out; 1335 1336 out: 1337 return res; 1338} 1339 1340static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa) 1341{ 1342 if (dbd == MODE_SENSE_BLK_DESC_ENABLED) { 1343 /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */ 1344 return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT; 1345 } else { 1346 return 0; 1347 } 1348} 1349 1350static int nvme_trans_mode_page_create(struct nvme_ns *ns, 1351 struct sg_io_hdr *hdr, u8 *cmd, 1352 u16 alloc_len, u8 cdb10, 1353 int (*mode_page_fill_func) 1354 (struct nvme_ns *, 1355 struct sg_io_hdr *hdr, u8 *, int), 1356 u16 mode_pages_tot_len) 1357{ 1358 int res = SNTI_TRANSLATION_SUCCESS; 1359 int xfer_len; 1360 u8 *response; 1361 u8 dbd, llbaa; 1362 u16 resp_size; 1363 int mph_size; 1364 u16 mode_pages_offset_1; 1365 u16 blk_desc_len, blk_desc_offset, mode_data_length; 1366 1367 dbd = GET_MODE_SENSE_DBD(cmd); 1368 llbaa = GET_MODE_SENSE_LLBAA(cmd); 1369 mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10); 1370 blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa); 1371 1372 resp_size = mph_size + blk_desc_len + mode_pages_tot_len; 1373 /* Refer spc4r34 Table 440 for calculation of Mode data Length field */ 1374 mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len; 1375 1376 blk_desc_offset = mph_size; 1377 mode_pages_offset_1 = blk_desc_offset + blk_desc_len; 1378 1379 response = kzalloc(resp_size, GFP_KERNEL); 1380 if (response == NULL) { 1381 res = -ENOMEM; 1382 goto out_mem; 1383 } 1384 1385 res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10, 1386 llbaa, mode_data_length, blk_desc_len); 1387 if (res != SNTI_TRANSLATION_SUCCESS) 1388 goto out_free; 1389 if (blk_desc_len > 0) { 1390 res = nvme_trans_fill_blk_desc(ns, hdr, 1391 &response[blk_desc_offset], 1392 blk_desc_len, llbaa); 1393 if (res != SNTI_TRANSLATION_SUCCESS) 1394 goto out_free; 1395 } 1396 res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1], 1397 mode_pages_tot_len); 1398 if (res != SNTI_TRANSLATION_SUCCESS) 1399 goto out_free; 1400 1401 xfer_len = min(alloc_len, resp_size); 1402 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 1403 1404 out_free: 1405 kfree(response); 1406 out_mem: 1407 return res; 1408} 1409 1410/* Read Capacity Helper Functions */ 1411 1412static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns, 1413 u8 cdb16) 1414{ 1415 u8 flbas; 1416 u32 lba_length; 1417 u64 rlba; 1418 u8 prot_en; 1419 u8 p_type_lut[4] = {0, 0, 1, 2}; 1420 __be64 tmp_rlba; 1421 __be32 tmp_rlba_32; 1422 __be32 tmp_len; 1423 1424 flbas = (id_ns->flbas) & 0x0F; 1425 lba_length = (1 << (id_ns->lbaf[flbas].ds)); 1426 rlba = le64_to_cpup(&id_ns->nsze) - 1; 1427 (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0); 1428 1429 if (!cdb16) { 1430 if (rlba > 0xFFFFFFFF) 1431 rlba = 0xFFFFFFFF; 1432 tmp_rlba_32 = cpu_to_be32(rlba); 1433 tmp_len = cpu_to_be32(lba_length); 1434 memcpy(response, &tmp_rlba_32, sizeof(u32)); 1435 memcpy(&response[4], &tmp_len, sizeof(u32)); 1436 } else { 1437 tmp_rlba = cpu_to_be64(rlba); 1438 tmp_len = cpu_to_be32(lba_length); 1439 memcpy(response, &tmp_rlba, sizeof(u64)); 1440 memcpy(&response[8], &tmp_len, sizeof(u32)); 1441 response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en; 1442 /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */ 1443 /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */ 1444 /* Bytes 16-31 - Reserved */ 1445 } 1446} 1447 1448/* Start Stop Unit Helper Functions */ 1449 1450static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1451 u8 pc, u8 pcmod, u8 start) 1452{ 1453 int res = SNTI_TRANSLATION_SUCCESS; 1454 int nvme_sc; 1455 struct nvme_dev *dev = ns->dev; 1456 dma_addr_t dma_addr; 1457 void *mem; 1458 struct nvme_id_ctrl *id_ctrl; 1459 int lowest_pow_st; /* max npss = lowest power consumption */ 1460 unsigned ps_desired = 0; 1461 1462 /* NVMe Controller Identify */ 1463 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1464 sizeof(struct nvme_id_ctrl), 1465 &dma_addr, GFP_KERNEL); 1466 if (mem == NULL) { 1467 res = -ENOMEM; 1468 goto out; 1469 } 1470 nvme_sc = nvme_identify(dev, 0, 1, dma_addr); 1471 res = nvme_trans_status_code(hdr, nvme_sc); 1472 if (res) 1473 goto out_dma; 1474 if (nvme_sc) { 1475 res = nvme_sc; 1476 goto out_dma; 1477 } 1478 id_ctrl = mem; 1479 lowest_pow_st = id_ctrl->npss - 1; 1480 1481 switch (pc) { 1482 case NVME_POWER_STATE_START_VALID: 1483 /* Action unspecified if POWER CONDITION MODIFIER != 0 */ 1484 if (pcmod == 0 && start == 0x1) 1485 ps_desired = POWER_STATE_0; 1486 if (pcmod == 0 && start == 0x0) 1487 ps_desired = lowest_pow_st; 1488 break; 1489 case NVME_POWER_STATE_ACTIVE: 1490 /* Action unspecified if POWER CONDITION MODIFIER != 0 */ 1491 if (pcmod == 0) 1492 ps_desired = POWER_STATE_0; 1493 break; 1494 case NVME_POWER_STATE_IDLE: 1495 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ 1496 /* min of desired state and (lps-1) because lps is STOP */ 1497 if (pcmod == 0x0) 1498 ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1)); 1499 else if (pcmod == 0x1) 1500 ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1)); 1501 else if (pcmod == 0x2) 1502 ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1)); 1503 break; 1504 case NVME_POWER_STATE_STANDBY: 1505 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ 1506 if (pcmod == 0x0) 1507 ps_desired = max(0, (lowest_pow_st - 2)); 1508 else if (pcmod == 0x1) 1509 ps_desired = max(0, (lowest_pow_st - 1)); 1510 break; 1511 case NVME_POWER_STATE_LU_CONTROL: 1512 default: 1513 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1514 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1515 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1516 break; 1517 } 1518 nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0, 1519 NULL); 1520 res = nvme_trans_status_code(hdr, nvme_sc); 1521 if (res) 1522 goto out_dma; 1523 if (nvme_sc) 1524 res = nvme_sc; 1525 out_dma: 1526 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, 1527 dma_addr); 1528 out: 1529 return res; 1530} 1531 1532/* Write Buffer Helper Functions */ 1533/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */ 1534 1535static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1536 u8 opcode, u32 tot_len, u32 offset, 1537 u8 buffer_id) 1538{ 1539 int res = SNTI_TRANSLATION_SUCCESS; 1540 int nvme_sc; 1541 struct nvme_dev *dev = ns->dev; 1542 struct nvme_command c; 1543 struct nvme_iod *iod = NULL; 1544 unsigned length; 1545 1546 memset(&c, 0, sizeof(c)); 1547 c.common.opcode = opcode; 1548 if (opcode == nvme_admin_download_fw) { 1549 if (hdr->iovec_count > 0) { 1550 /* Assuming SGL is not allowed for this command */ 1551 res = nvme_trans_completion(hdr, 1552 SAM_STAT_CHECK_CONDITION, 1553 ILLEGAL_REQUEST, 1554 SCSI_ASC_INVALID_CDB, 1555 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1556 goto out; 1557 } 1558 iod = nvme_map_user_pages(dev, DMA_TO_DEVICE, 1559 (unsigned long)hdr->dxferp, tot_len); 1560 if (IS_ERR(iod)) { 1561 res = PTR_ERR(iod); 1562 goto out; 1563 } 1564 length = nvme_setup_prps(dev, &c.common, iod, tot_len, 1565 GFP_KERNEL); 1566 if (length != tot_len) { 1567 res = -ENOMEM; 1568 goto out_unmap; 1569 } 1570 1571 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); 1572 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); 1573 } else if (opcode == nvme_admin_activate_fw) { 1574 u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV; 1575 c.common.cdw10[0] = cpu_to_le32(cdw10); 1576 } 1577 1578 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); 1579 res = nvme_trans_status_code(hdr, nvme_sc); 1580 if (res) 1581 goto out_unmap; 1582 if (nvme_sc) 1583 res = nvme_sc; 1584 1585 out_unmap: 1586 if (opcode == nvme_admin_download_fw) { 1587 nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod); 1588 nvme_free_iod(dev, iod); 1589 } 1590 out: 1591 return res; 1592} 1593 1594/* Mode Select Helper Functions */ 1595 1596static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, 1597 u16 *bd_len, u8 *llbaa) 1598{ 1599 if (cdb10) { 1600 /* 10 Byte CDB */ 1601 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + 1602 parm_list[MODE_SELECT_10_BD_OFFSET + 1]; 1603 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && 1604 MODE_SELECT_10_LLBAA_MASK; 1605 } else { 1606 /* 6 Byte CDB */ 1607 *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET]; 1608 } 1609} 1610 1611static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, 1612 u16 idx, u16 bd_len, u8 llbaa) 1613{ 1614 u16 bd_num; 1615 1616 bd_num = bd_len / ((llbaa == 0) ? 1617 SHORT_DESC_BLOCK : LONG_DESC_BLOCK); 1618 /* Store block descriptor info if a FORMAT UNIT comes later */ 1619 /* TODO Saving 1st BD info; what to do if multiple BD received? */ 1620 if (llbaa == 0) { 1621 /* Standard Block Descriptor - spc4r34 7.5.5.1 */ 1622 ns->mode_select_num_blocks = 1623 (parm_list[idx + 1] << 16) + 1624 (parm_list[idx + 2] << 8) + 1625 (parm_list[idx + 3]); 1626 1627 ns->mode_select_block_len = 1628 (parm_list[idx + 5] << 16) + 1629 (parm_list[idx + 6] << 8) + 1630 (parm_list[idx + 7]); 1631 } else { 1632 /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */ 1633 ns->mode_select_num_blocks = 1634 (((u64)parm_list[idx + 0]) << 56) + 1635 (((u64)parm_list[idx + 1]) << 48) + 1636 (((u64)parm_list[idx + 2]) << 40) + 1637 (((u64)parm_list[idx + 3]) << 32) + 1638 (((u64)parm_list[idx + 4]) << 24) + 1639 (((u64)parm_list[idx + 5]) << 16) + 1640 (((u64)parm_list[idx + 6]) << 8) + 1641 ((u64)parm_list[idx + 7]); 1642 1643 ns->mode_select_block_len = 1644 (parm_list[idx + 12] << 24) + 1645 (parm_list[idx + 13] << 16) + 1646 (parm_list[idx + 14] << 8) + 1647 (parm_list[idx + 15]); 1648 } 1649} 1650 1651static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1652 u8 *mode_page, u8 page_code) 1653{ 1654 int res = SNTI_TRANSLATION_SUCCESS; 1655 int nvme_sc; 1656 struct nvme_dev *dev = ns->dev; 1657 unsigned dword11; 1658 1659 switch (page_code) { 1660 case MODE_PAGE_CACHING: 1661 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); 1662 nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11, 1663 0, NULL); 1664 res = nvme_trans_status_code(hdr, nvme_sc); 1665 if (res) 1666 break; 1667 if (nvme_sc) { 1668 res = nvme_sc; 1669 break; 1670 } 1671 break; 1672 case MODE_PAGE_CONTROL: 1673 break; 1674 case MODE_PAGE_POWER_CONDITION: 1675 /* Verify the OS is not trying to set timers */ 1676 if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) { 1677 res = nvme_trans_completion(hdr, 1678 SAM_STAT_CHECK_CONDITION, 1679 ILLEGAL_REQUEST, 1680 SCSI_ASC_INVALID_PARAMETER, 1681 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1682 if (!res) 1683 res = SNTI_INTERNAL_ERROR; 1684 break; 1685 } 1686 break; 1687 default: 1688 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1689 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1690 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1691 if (!res) 1692 res = SNTI_INTERNAL_ERROR; 1693 break; 1694 } 1695 1696 return res; 1697} 1698 1699static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1700 u8 *cmd, u16 parm_list_len, u8 pf, 1701 u8 sp, u8 cdb10) 1702{ 1703 int res = SNTI_TRANSLATION_SUCCESS; 1704 u8 *parm_list; 1705 u16 bd_len; 1706 u8 llbaa = 0; 1707 u16 index, saved_index; 1708 u8 page_code; 1709 u16 mp_size; 1710 1711 /* Get parm list from data-in/out buffer */ 1712 parm_list = kmalloc(parm_list_len, GFP_KERNEL); 1713 if (parm_list == NULL) { 1714 res = -ENOMEM; 1715 goto out; 1716 } 1717 1718 res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len); 1719 if (res != SNTI_TRANSLATION_SUCCESS) 1720 goto out_mem; 1721 1722 nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa); 1723 index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE); 1724 1725 if (bd_len != 0) { 1726 /* Block Descriptors present, parse */ 1727 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa); 1728 index += bd_len; 1729 } 1730 saved_index = index; 1731 1732 /* Multiple mode pages may be present; iterate through all */ 1733 /* In 1st Iteration, don't do NVME Command, only check for CDB errors */ 1734 do { 1735 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; 1736 mp_size = parm_list[index + 1] + 2; 1737 if ((page_code != MODE_PAGE_CACHING) && 1738 (page_code != MODE_PAGE_CONTROL) && 1739 (page_code != MODE_PAGE_POWER_CONDITION)) { 1740 res = nvme_trans_completion(hdr, 1741 SAM_STAT_CHECK_CONDITION, 1742 ILLEGAL_REQUEST, 1743 SCSI_ASC_INVALID_CDB, 1744 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1745 goto out_mem; 1746 } 1747 index += mp_size; 1748 } while (index < parm_list_len); 1749 1750 /* In 2nd Iteration, do the NVME Commands */ 1751 index = saved_index; 1752 do { 1753 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; 1754 mp_size = parm_list[index + 1] + 2; 1755 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index], 1756 page_code); 1757 if (res != SNTI_TRANSLATION_SUCCESS) 1758 break; 1759 index += mp_size; 1760 } while (index < parm_list_len); 1761 1762 out_mem: 1763 kfree(parm_list); 1764 out: 1765 return res; 1766} 1767 1768/* Format Unit Helper Functions */ 1769 1770static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, 1771 struct sg_io_hdr *hdr) 1772{ 1773 int res = SNTI_TRANSLATION_SUCCESS; 1774 int nvme_sc; 1775 struct nvme_dev *dev = ns->dev; 1776 dma_addr_t dma_addr; 1777 void *mem; 1778 struct nvme_id_ns *id_ns; 1779 u8 flbas; 1780 1781 /* 1782 * SCSI Expects a MODE SELECT would have been issued prior to 1783 * a FORMAT UNIT, and the block size and number would be used 1784 * from the block descriptor in it. If a MODE SELECT had not 1785 * been issued, FORMAT shall use the current values for both. 1786 */ 1787 1788 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { 1789 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1790 sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL); 1791 if (mem == NULL) { 1792 res = -ENOMEM; 1793 goto out; 1794 } 1795 /* nvme ns identify */ 1796 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); 1797 res = nvme_trans_status_code(hdr, nvme_sc); 1798 if (res) 1799 goto out_dma; 1800 if (nvme_sc) { 1801 res = nvme_sc; 1802 goto out_dma; 1803 } 1804 id_ns = mem; 1805 1806 if (ns->mode_select_num_blocks == 0) 1807 ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap); 1808 if (ns->mode_select_block_len == 0) { 1809 flbas = (id_ns->flbas) & 0x0F; 1810 ns->mode_select_block_len = 1811 (1 << (id_ns->lbaf[flbas].ds)); 1812 } 1813 out_dma: 1814 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 1815 mem, dma_addr); 1816 } 1817 out: 1818 return res; 1819} 1820 1821static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len, 1822 u8 format_prot_info, u8 *nvme_pf_code) 1823{ 1824 int res = SNTI_TRANSLATION_SUCCESS; 1825 u8 *parm_list; 1826 u8 pf_usage, pf_code; 1827 1828 parm_list = kmalloc(len, GFP_KERNEL); 1829 if (parm_list == NULL) { 1830 res = -ENOMEM; 1831 goto out; 1832 } 1833 res = nvme_trans_copy_from_user(hdr, parm_list, len); 1834 if (res != SNTI_TRANSLATION_SUCCESS) 1835 goto out_mem; 1836 1837 if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] & 1838 FORMAT_UNIT_IMMED_MASK) != 0) { 1839 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1840 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1841 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1842 goto out_mem; 1843 } 1844 1845 if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN && 1846 (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) { 1847 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1848 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1849 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1850 goto out_mem; 1851 } 1852 pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] & 1853 FORMAT_UNIT_PROT_FIELD_USAGE_MASK; 1854 pf_code = (pf_usage << 2) | format_prot_info; 1855 switch (pf_code) { 1856 case 0: 1857 *nvme_pf_code = 0; 1858 break; 1859 case 2: 1860 *nvme_pf_code = 1; 1861 break; 1862 case 3: 1863 *nvme_pf_code = 2; 1864 break; 1865 case 7: 1866 *nvme_pf_code = 3; 1867 break; 1868 default: 1869 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1870 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 1871 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1872 break; 1873 } 1874 1875 out_mem: 1876 kfree(parm_list); 1877 out: 1878 return res; 1879} 1880 1881static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1882 u8 prot_info) 1883{ 1884 int res = SNTI_TRANSLATION_SUCCESS; 1885 int nvme_sc; 1886 struct nvme_dev *dev = ns->dev; 1887 dma_addr_t dma_addr; 1888 void *mem; 1889 struct nvme_id_ns *id_ns; 1890 u8 i; 1891 u8 flbas, nlbaf; 1892 u8 selected_lbaf = 0xFF; 1893 u32 cdw10 = 0; 1894 struct nvme_command c; 1895 1896 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ 1897 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 1898 &dma_addr, GFP_KERNEL); 1899 if (mem == NULL) { 1900 res = -ENOMEM; 1901 goto out; 1902 } 1903 /* nvme ns identify */ 1904 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); 1905 res = nvme_trans_status_code(hdr, nvme_sc); 1906 if (res) 1907 goto out_dma; 1908 if (nvme_sc) { 1909 res = nvme_sc; 1910 goto out_dma; 1911 } 1912 id_ns = mem; 1913 flbas = (id_ns->flbas) & 0x0F; 1914 nlbaf = id_ns->nlbaf; 1915 1916 for (i = 0; i < nlbaf; i++) { 1917 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) { 1918 selected_lbaf = i; 1919 break; 1920 } 1921 } 1922 if (selected_lbaf > 0x0F) { 1923 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1924 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, 1925 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1926 } 1927 if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) { 1928 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 1929 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, 1930 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1931 } 1932 1933 cdw10 |= prot_info << 5; 1934 cdw10 |= selected_lbaf & 0x0F; 1935 memset(&c, 0, sizeof(c)); 1936 c.format.opcode = nvme_admin_format_nvm; 1937 c.format.nsid = cpu_to_le32(ns->ns_id); 1938 c.format.cdw10 = cpu_to_le32(cdw10); 1939 1940 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); 1941 res = nvme_trans_status_code(hdr, nvme_sc); 1942 if (res) 1943 goto out_dma; 1944 if (nvme_sc) 1945 res = nvme_sc; 1946 1947 out_dma: 1948 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 1949 dma_addr); 1950 out: 1951 return res; 1952} 1953 1954/* Read/Write Helper Functions */ 1955 1956static inline void nvme_trans_get_io_cdb6(u8 *cmd, 1957 struct nvme_trans_io_cdb *cdb_info) 1958{ 1959 cdb_info->fua = 0; 1960 cdb_info->prot_info = 0; 1961 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) & 1962 IO_6_CDB_LBA_MASK; 1963 cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET); 1964 1965 /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */ 1966 if (cdb_info->xfer_len == 0) 1967 cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN; 1968} 1969 1970static inline void nvme_trans_get_io_cdb10(u8 *cmd, 1971 struct nvme_trans_io_cdb *cdb_info) 1972{ 1973 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) & 1974 IO_CDB_FUA_MASK; 1975 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) & 1976 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; 1977 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET); 1978 cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET); 1979} 1980 1981static inline void nvme_trans_get_io_cdb12(u8 *cmd, 1982 struct nvme_trans_io_cdb *cdb_info) 1983{ 1984 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) & 1985 IO_CDB_FUA_MASK; 1986 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) & 1987 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; 1988 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET); 1989 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET); 1990} 1991 1992static inline void nvme_trans_get_io_cdb16(u8 *cmd, 1993 struct nvme_trans_io_cdb *cdb_info) 1994{ 1995 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) & 1996 IO_CDB_FUA_MASK; 1997 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) & 1998 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; 1999 cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET); 2000 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET); 2001} 2002 2003static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr, 2004 struct nvme_trans_io_cdb *cdb_info, 2005 u32 max_blocks) 2006{ 2007 /* If using iovecs, send one nvme command per vector */ 2008 if (hdr->iovec_count > 0) 2009 return hdr->iovec_count; 2010 else if (cdb_info->xfer_len > max_blocks) 2011 return ((cdb_info->xfer_len - 1) / max_blocks) + 1; 2012 else 2013 return 1; 2014} 2015 2016static u16 nvme_trans_io_get_control(struct nvme_ns *ns, 2017 struct nvme_trans_io_cdb *cdb_info) 2018{ 2019 u16 control = 0; 2020 2021 /* When Protection information support is added, implement here */ 2022 2023 if (cdb_info->fua > 0) 2024 control |= NVME_RW_FUA; 2025 2026 return control; 2027} 2028 2029static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2030 struct nvme_trans_io_cdb *cdb_info, u8 is_write) 2031{ 2032 int res = SNTI_TRANSLATION_SUCCESS; 2033 int nvme_sc; 2034 struct nvme_dev *dev = ns->dev; 2035 struct nvme_queue *nvmeq; 2036 u32 num_cmds; 2037 struct nvme_iod *iod; 2038 u64 unit_len; 2039 u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */ 2040 u32 retcode; 2041 u32 i = 0; 2042 u64 nvme_offset = 0; 2043 void __user *next_mapping_addr; 2044 struct nvme_command c; 2045 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); 2046 u16 control; 2047 u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors); 2048 2049 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); 2050 2051 /* 2052 * This loop handles two cases. 2053 * First, when an SGL is used in the form of an iovec list: 2054 * - Use iov_base as the next mapping address for the nvme command_id 2055 * - Use iov_len as the data transfer length for the command. 2056 * Second, when we have a single buffer 2057 * - If larger than max_blocks, split into chunks, offset 2058 * each nvme command accordingly. 2059 */ 2060 for (i = 0; i < num_cmds; i++) { 2061 memset(&c, 0, sizeof(c)); 2062 if (hdr->iovec_count > 0) { 2063 struct sg_iovec sgl; 2064 2065 retcode = copy_from_user(&sgl, hdr->dxferp + 2066 i * sizeof(struct sg_iovec), 2067 sizeof(struct sg_iovec)); 2068 if (retcode) 2069 return -EFAULT; 2070 unit_len = sgl.iov_len; 2071 unit_num_blocks = unit_len >> ns->lba_shift; 2072 next_mapping_addr = sgl.iov_base; 2073 } else { 2074 unit_num_blocks = min((u64)max_blocks, 2075 (cdb_info->xfer_len - nvme_offset)); 2076 unit_len = unit_num_blocks << ns->lba_shift; 2077 next_mapping_addr = hdr->dxferp + 2078 ((1 << ns->lba_shift) * nvme_offset); 2079 } 2080 2081 c.rw.opcode = opcode; 2082 c.rw.nsid = cpu_to_le32(ns->ns_id); 2083 c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset); 2084 c.rw.length = cpu_to_le16(unit_num_blocks - 1); 2085 control = nvme_trans_io_get_control(ns, cdb_info); 2086 c.rw.control = cpu_to_le16(control); 2087 2088 iod = nvme_map_user_pages(dev, 2089 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2090 (unsigned long)next_mapping_addr, unit_len); 2091 if (IS_ERR(iod)) { 2092 res = PTR_ERR(iod); 2093 goto out; 2094 } 2095 retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, 2096 GFP_KERNEL); 2097 if (retcode != unit_len) { 2098 nvme_unmap_user_pages(dev, 2099 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2100 iod); 2101 nvme_free_iod(dev, iod); 2102 res = -ENOMEM; 2103 goto out; 2104 } 2105 2106 nvme_offset += unit_num_blocks; 2107 2108 nvmeq = get_nvmeq(dev); 2109 /* 2110 * Since nvme_submit_sync_cmd sleeps, we can't keep 2111 * preemption disabled. We may be preempted at any 2112 * point, and be rescheduled to a different CPU. That 2113 * will cause cacheline bouncing, but no additional 2114 * races since q_lock already protects against other 2115 * CPUs. 2116 */ 2117 put_nvmeq(nvmeq); 2118 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, 2119 NVME_IO_TIMEOUT); 2120 if (nvme_sc != NVME_SC_SUCCESS) { 2121 nvme_unmap_user_pages(dev, 2122 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2123 iod); 2124 nvme_free_iod(dev, iod); 2125 res = nvme_trans_status_code(hdr, nvme_sc); 2126 goto out; 2127 } 2128 nvme_unmap_user_pages(dev, 2129 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2130 iod); 2131 nvme_free_iod(dev, iod); 2132 } 2133 res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS); 2134 2135 out: 2136 return res; 2137} 2138 2139 2140/* SCSI Command Translation Functions */ 2141 2142static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write, 2143 u8 *cmd) 2144{ 2145 int res = SNTI_TRANSLATION_SUCCESS; 2146 struct nvme_trans_io_cdb cdb_info; 2147 u8 opcode = cmd[0]; 2148 u64 xfer_bytes; 2149 u64 sum_iov_len = 0; 2150 struct sg_iovec sgl; 2151 int i; 2152 size_t not_copied; 2153 2154 /* Extract Fields from CDB */ 2155 switch (opcode) { 2156 case WRITE_6: 2157 case READ_6: 2158 nvme_trans_get_io_cdb6(cmd, &cdb_info); 2159 break; 2160 case WRITE_10: 2161 case READ_10: 2162 nvme_trans_get_io_cdb10(cmd, &cdb_info); 2163 break; 2164 case WRITE_12: 2165 case READ_12: 2166 nvme_trans_get_io_cdb12(cmd, &cdb_info); 2167 break; 2168 case WRITE_16: 2169 case READ_16: 2170 nvme_trans_get_io_cdb16(cmd, &cdb_info); 2171 break; 2172 default: 2173 /* Will never really reach here */ 2174 res = SNTI_INTERNAL_ERROR; 2175 goto out; 2176 } 2177 2178 /* Calculate total length of transfer (in bytes) */ 2179 if (hdr->iovec_count > 0) { 2180 for (i = 0; i < hdr->iovec_count; i++) { 2181 not_copied = copy_from_user(&sgl, hdr->dxferp + 2182 i * sizeof(struct sg_iovec), 2183 sizeof(struct sg_iovec)); 2184 if (not_copied) 2185 return -EFAULT; 2186 sum_iov_len += sgl.iov_len; 2187 /* IO vector sizes should be multiples of block size */ 2188 if (sgl.iov_len % (1 << ns->lba_shift) != 0) { 2189 res = nvme_trans_completion(hdr, 2190 SAM_STAT_CHECK_CONDITION, 2191 ILLEGAL_REQUEST, 2192 SCSI_ASC_INVALID_PARAMETER, 2193 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2194 goto out; 2195 } 2196 } 2197 } else { 2198 sum_iov_len = hdr->dxfer_len; 2199 } 2200 2201 /* As Per sg ioctl howto, if the lengths differ, use the lower one */ 2202 xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len); 2203 2204 /* If block count and actual data buffer size dont match, error out */ 2205 if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) { 2206 res = -EINVAL; 2207 goto out; 2208 } 2209 2210 /* Check for 0 length transfer - it is not illegal */ 2211 if (cdb_info.xfer_len == 0) 2212 goto out; 2213 2214 /* Send NVMe IO Command(s) */ 2215 res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write); 2216 if (res != SNTI_TRANSLATION_SUCCESS) 2217 goto out; 2218 2219 out: 2220 return res; 2221} 2222 2223static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2224 u8 *cmd) 2225{ 2226 int res = SNTI_TRANSLATION_SUCCESS; 2227 u8 evpd; 2228 u8 page_code; 2229 int alloc_len; 2230 u8 *inq_response; 2231 2232 evpd = GET_INQ_EVPD_BIT(cmd); 2233 page_code = GET_INQ_PAGE_CODE(cmd); 2234 alloc_len = GET_INQ_ALLOC_LENGTH(cmd); 2235 2236 inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); 2237 if (inq_response == NULL) { 2238 res = -ENOMEM; 2239 goto out_mem; 2240 } 2241 2242 if (evpd == 0) { 2243 if (page_code == INQ_STANDARD_INQUIRY_PAGE) { 2244 res = nvme_trans_standard_inquiry_page(ns, hdr, 2245 inq_response, alloc_len); 2246 } else { 2247 res = nvme_trans_completion(hdr, 2248 SAM_STAT_CHECK_CONDITION, 2249 ILLEGAL_REQUEST, 2250 SCSI_ASC_INVALID_CDB, 2251 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2252 } 2253 } else { 2254 switch (page_code) { 2255 case VPD_SUPPORTED_PAGES: 2256 res = nvme_trans_supported_vpd_pages(ns, hdr, 2257 inq_response, alloc_len); 2258 break; 2259 case VPD_SERIAL_NUMBER: 2260 res = nvme_trans_unit_serial_page(ns, hdr, inq_response, 2261 alloc_len); 2262 break; 2263 case VPD_DEVICE_IDENTIFIERS: 2264 res = nvme_trans_device_id_page(ns, hdr, inq_response, 2265 alloc_len); 2266 break; 2267 case VPD_EXTENDED_INQUIRY: 2268 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); 2269 break; 2270 case VPD_BLOCK_DEV_CHARACTERISTICS: 2271 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); 2272 break; 2273 default: 2274 res = nvme_trans_completion(hdr, 2275 SAM_STAT_CHECK_CONDITION, 2276 ILLEGAL_REQUEST, 2277 SCSI_ASC_INVALID_CDB, 2278 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2279 break; 2280 } 2281 } 2282 kfree(inq_response); 2283 out_mem: 2284 return res; 2285} 2286 2287static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2288 u8 *cmd) 2289{ 2290 int res = SNTI_TRANSLATION_SUCCESS; 2291 u16 alloc_len; 2292 u8 sp; 2293 u8 pc; 2294 u8 page_code; 2295 2296 sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET); 2297 if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) { 2298 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2299 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2300 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2301 goto out; 2302 } 2303 pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET); 2304 page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK; 2305 pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT; 2306 if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) { 2307 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2308 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2309 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2310 goto out; 2311 } 2312 alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET); 2313 switch (page_code) { 2314 case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE: 2315 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len); 2316 break; 2317 case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE: 2318 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len); 2319 break; 2320 case LOG_PAGE_TEMPERATURE_PAGE: 2321 res = nvme_trans_log_temperature(ns, hdr, alloc_len); 2322 break; 2323 default: 2324 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2325 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2326 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2327 break; 2328 } 2329 2330 out: 2331 return res; 2332} 2333 2334static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2335 u8 *cmd) 2336{ 2337 int res = SNTI_TRANSLATION_SUCCESS; 2338 u8 cdb10 = 0; 2339 u16 parm_list_len; 2340 u8 page_format; 2341 u8 save_pages; 2342 2343 page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET); 2344 page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK; 2345 2346 save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET); 2347 save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK; 2348 2349 if (GET_OPCODE(cmd) == MODE_SELECT) { 2350 parm_list_len = GET_U8_FROM_CDB(cmd, 2351 MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET); 2352 } else { 2353 parm_list_len = GET_U16_FROM_CDB(cmd, 2354 MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET); 2355 cdb10 = 1; 2356 } 2357 2358 if (parm_list_len != 0) { 2359 /* 2360 * According to SPC-4 r24, a paramter list length field of 0 2361 * shall not be considered an error 2362 */ 2363 res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len, 2364 page_format, save_pages, cdb10); 2365 } 2366 2367 return res; 2368} 2369 2370static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2371 u8 *cmd) 2372{ 2373 int res = SNTI_TRANSLATION_SUCCESS; 2374 u16 alloc_len; 2375 u8 cdb10 = 0; 2376 u8 page_code; 2377 u8 pc; 2378 2379 if (GET_OPCODE(cmd) == MODE_SENSE) { 2380 alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET); 2381 } else { 2382 alloc_len = GET_U16_FROM_CDB(cmd, 2383 MODE_SENSE10_ALLOC_LEN_OFFSET); 2384 cdb10 = 1; 2385 } 2386 2387 pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) & 2388 MODE_SENSE_PAGE_CONTROL_MASK; 2389 if (pc != MODE_SENSE_PC_CURRENT_VALUES) { 2390 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2391 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2392 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2393 goto out; 2394 } 2395 2396 page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) & 2397 MODE_SENSE_PAGE_CODE_MASK; 2398 switch (page_code) { 2399 case MODE_PAGE_CACHING: 2400 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 2401 cdb10, 2402 &nvme_trans_fill_caching_page, 2403 MODE_PAGE_CACHING_LEN); 2404 break; 2405 case MODE_PAGE_CONTROL: 2406 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 2407 cdb10, 2408 &nvme_trans_fill_control_page, 2409 MODE_PAGE_CONTROL_LEN); 2410 break; 2411 case MODE_PAGE_POWER_CONDITION: 2412 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 2413 cdb10, 2414 &nvme_trans_fill_pow_cnd_page, 2415 MODE_PAGE_POW_CND_LEN); 2416 break; 2417 case MODE_PAGE_INFO_EXCEP: 2418 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 2419 cdb10, 2420 &nvme_trans_fill_inf_exc_page, 2421 MODE_PAGE_INF_EXC_LEN); 2422 break; 2423 case MODE_PAGE_RETURN_ALL: 2424 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, 2425 cdb10, 2426 &nvme_trans_fill_all_pages, 2427 MODE_PAGE_ALL_LEN); 2428 break; 2429 default: 2430 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2431 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2432 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2433 break; 2434 } 2435 2436 out: 2437 return res; 2438} 2439 2440static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2441 u8 *cmd) 2442{ 2443 int res = SNTI_TRANSLATION_SUCCESS; 2444 int nvme_sc; 2445 u32 alloc_len = READ_CAP_10_RESP_SIZE; 2446 u32 resp_size = READ_CAP_10_RESP_SIZE; 2447 u32 xfer_len; 2448 u8 cdb16; 2449 struct nvme_dev *dev = ns->dev; 2450 dma_addr_t dma_addr; 2451 void *mem; 2452 struct nvme_id_ns *id_ns; 2453 u8 *response; 2454 2455 cdb16 = IS_READ_CAP_16(cmd); 2456 if (cdb16) { 2457 alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd); 2458 resp_size = READ_CAP_16_RESP_SIZE; 2459 } 2460 2461 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 2462 &dma_addr, GFP_KERNEL); 2463 if (mem == NULL) { 2464 res = -ENOMEM; 2465 goto out; 2466 } 2467 /* nvme ns identify */ 2468 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); 2469 res = nvme_trans_status_code(hdr, nvme_sc); 2470 if (res) 2471 goto out_dma; 2472 if (nvme_sc) { 2473 res = nvme_sc; 2474 goto out_dma; 2475 } 2476 id_ns = mem; 2477 2478 response = kzalloc(resp_size, GFP_KERNEL); 2479 if (response == NULL) { 2480 res = -ENOMEM; 2481 goto out_dma; 2482 } 2483 nvme_trans_fill_read_cap(response, id_ns, cdb16); 2484 2485 xfer_len = min(alloc_len, resp_size); 2486 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 2487 2488 kfree(response); 2489 out_dma: 2490 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 2491 dma_addr); 2492 out: 2493 return res; 2494} 2495 2496static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2497 u8 *cmd) 2498{ 2499 int res = SNTI_TRANSLATION_SUCCESS; 2500 int nvme_sc; 2501 u32 alloc_len, xfer_len, resp_size; 2502 u8 select_report; 2503 u8 *response; 2504 struct nvme_dev *dev = ns->dev; 2505 dma_addr_t dma_addr; 2506 void *mem; 2507 struct nvme_id_ctrl *id_ctrl; 2508 u32 ll_length, lun_id; 2509 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; 2510 __be32 tmp_len; 2511 2512 alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd); 2513 select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET); 2514 2515 if ((select_report != ALL_LUNS_RETURNED) && 2516 (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) && 2517 (select_report != RESTRICTED_LUNS_RETURNED)) { 2518 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2519 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2520 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2521 goto out; 2522 } else { 2523 /* NVMe Controller Identify */ 2524 mem = dma_alloc_coherent(&dev->pci_dev->dev, 2525 sizeof(struct nvme_id_ctrl), 2526 &dma_addr, GFP_KERNEL); 2527 if (mem == NULL) { 2528 res = -ENOMEM; 2529 goto out; 2530 } 2531 nvme_sc = nvme_identify(dev, 0, 1, dma_addr); 2532 res = nvme_trans_status_code(hdr, nvme_sc); 2533 if (res) 2534 goto out_dma; 2535 if (nvme_sc) { 2536 res = nvme_sc; 2537 goto out_dma; 2538 } 2539 id_ctrl = mem; 2540 ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE; 2541 resp_size = ll_length + LUN_DATA_HEADER_SIZE; 2542 2543 if (alloc_len < resp_size) { 2544 res = nvme_trans_completion(hdr, 2545 SAM_STAT_CHECK_CONDITION, 2546 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2547 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2548 goto out_dma; 2549 } 2550 2551 response = kzalloc(resp_size, GFP_KERNEL); 2552 if (response == NULL) { 2553 res = -ENOMEM; 2554 goto out_dma; 2555 } 2556 2557 /* The first LUN ID will always be 0 per the SAM spec */ 2558 for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) { 2559 /* 2560 * Set the LUN Id and then increment to the next LUN 2561 * location in the parameter data. 2562 */ 2563 __be64 tmp_id = cpu_to_be64(lun_id); 2564 memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64)); 2565 lun_id_offset += LUN_ENTRY_SIZE; 2566 } 2567 tmp_len = cpu_to_be32(ll_length); 2568 memcpy(response, &tmp_len, sizeof(u32)); 2569 } 2570 2571 xfer_len = min(alloc_len, resp_size); 2572 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 2573 2574 kfree(response); 2575 out_dma: 2576 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, 2577 dma_addr); 2578 out: 2579 return res; 2580} 2581 2582static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2583 u8 *cmd) 2584{ 2585 int res = SNTI_TRANSLATION_SUCCESS; 2586 u8 alloc_len, xfer_len, resp_size; 2587 u8 desc_format; 2588 u8 *response; 2589 2590 alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd); 2591 desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET); 2592 desc_format &= REQUEST_SENSE_DESC_MASK; 2593 2594 resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) : 2595 (FIXED_FMT_SENSE_DATA_SIZE)); 2596 response = kzalloc(resp_size, GFP_KERNEL); 2597 if (response == NULL) { 2598 res = -ENOMEM; 2599 goto out; 2600 } 2601 2602 if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) { 2603 /* Descriptor Format Sense Data */ 2604 response[0] = DESC_FORMAT_SENSE_DATA; 2605 response[1] = NO_SENSE; 2606 /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */ 2607 response[2] = SCSI_ASC_NO_SENSE; 2608 response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2609 /* SDAT_OVFL = 0 | Additional Sense Length = 0 */ 2610 } else { 2611 /* Fixed Format Sense Data */ 2612 response[0] = FIXED_SENSE_DATA; 2613 /* Byte 1 = Obsolete */ 2614 response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */ 2615 /* Bytes 3-6 - Information - set to zero */ 2616 response[7] = FIXED_SENSE_DATA_ADD_LENGTH; 2617 /* Bytes 8-11 - Cmd Specific Information - set to zero */ 2618 response[12] = SCSI_ASC_NO_SENSE; 2619 response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2620 /* Byte 14 = Field Replaceable Unit Code = 0 */ 2621 /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */ 2622 } 2623 2624 xfer_len = min(alloc_len, resp_size); 2625 res = nvme_trans_copy_to_user(hdr, response, xfer_len); 2626 2627 kfree(response); 2628 out: 2629 return res; 2630} 2631 2632static int nvme_trans_security_protocol(struct nvme_ns *ns, 2633 struct sg_io_hdr *hdr, 2634 u8 *cmd) 2635{ 2636 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2637 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, 2638 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2639} 2640 2641static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2642 u8 *cmd) 2643{ 2644 int res = SNTI_TRANSLATION_SUCCESS; 2645 int nvme_sc; 2646 struct nvme_queue *nvmeq; 2647 struct nvme_command c; 2648 u8 immed, pcmod, pc, no_flush, start; 2649 2650 immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET); 2651 pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET); 2652 pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET); 2653 no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET); 2654 start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET); 2655 2656 immed &= START_STOP_UNIT_CDB_IMMED_MASK; 2657 pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK; 2658 pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT; 2659 no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK; 2660 start &= START_STOP_UNIT_CDB_START_MASK; 2661 2662 if (immed != 0) { 2663 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2664 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2665 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2666 } else { 2667 if (no_flush == 0) { 2668 /* Issue NVME FLUSH command prior to START STOP UNIT */ 2669 memset(&c, 0, sizeof(c)); 2670 c.common.opcode = nvme_cmd_flush; 2671 c.common.nsid = cpu_to_le32(ns->ns_id); 2672 2673 nvmeq = get_nvmeq(ns->dev); 2674 put_nvmeq(nvmeq); 2675 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 2676 2677 res = nvme_trans_status_code(hdr, nvme_sc); 2678 if (res) 2679 goto out; 2680 if (nvme_sc) { 2681 res = nvme_sc; 2682 goto out; 2683 } 2684 } 2685 /* Setup the expected power state transition */ 2686 res = nvme_trans_power_state(ns, hdr, pc, pcmod, start); 2687 } 2688 2689 out: 2690 return res; 2691} 2692 2693static int nvme_trans_synchronize_cache(struct nvme_ns *ns, 2694 struct sg_io_hdr *hdr, u8 *cmd) 2695{ 2696 int res = SNTI_TRANSLATION_SUCCESS; 2697 int nvme_sc; 2698 struct nvme_command c; 2699 struct nvme_queue *nvmeq; 2700 2701 memset(&c, 0, sizeof(c)); 2702 c.common.opcode = nvme_cmd_flush; 2703 c.common.nsid = cpu_to_le32(ns->ns_id); 2704 2705 nvmeq = get_nvmeq(ns->dev); 2706 put_nvmeq(nvmeq); 2707 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 2708 2709 res = nvme_trans_status_code(hdr, nvme_sc); 2710 if (res) 2711 goto out; 2712 if (nvme_sc) 2713 res = nvme_sc; 2714 2715 out: 2716 return res; 2717} 2718 2719static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2720 u8 *cmd) 2721{ 2722 int res = SNTI_TRANSLATION_SUCCESS; 2723 u8 parm_hdr_len = 0; 2724 u8 nvme_pf_code = 0; 2725 u8 format_prot_info, long_list, format_data; 2726 2727 format_prot_info = GET_U8_FROM_CDB(cmd, 2728 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET); 2729 long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET); 2730 format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET); 2731 2732 format_prot_info = (format_prot_info & 2733 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >> 2734 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT; 2735 long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK; 2736 format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK; 2737 2738 if (format_data != 0) { 2739 if (format_prot_info != 0) { 2740 if (long_list == 0) 2741 parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN; 2742 else 2743 parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN; 2744 } 2745 } else if (format_data == 0 && format_prot_info != 0) { 2746 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2747 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2748 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2749 goto out; 2750 } 2751 2752 /* Get parm header from data-in/out buffer */ 2753 /* 2754 * According to the translation spec, the only fields in the parameter 2755 * list we are concerned with are in the header. So allocate only that. 2756 */ 2757 if (parm_hdr_len > 0) { 2758 res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len, 2759 format_prot_info, &nvme_pf_code); 2760 if (res != SNTI_TRANSLATION_SUCCESS) 2761 goto out; 2762 } 2763 2764 /* Attempt to activate any previously downloaded firmware image */ 2765 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0); 2766 2767 /* Determine Block size and count and send format command */ 2768 res = nvme_trans_fmt_set_blk_size_count(ns, hdr); 2769 if (res != SNTI_TRANSLATION_SUCCESS) 2770 goto out; 2771 2772 res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code); 2773 2774 out: 2775 return res; 2776} 2777 2778static int nvme_trans_test_unit_ready(struct nvme_ns *ns, 2779 struct sg_io_hdr *hdr, 2780 u8 *cmd) 2781{ 2782 int res = SNTI_TRANSLATION_SUCCESS; 2783 struct nvme_dev *dev = ns->dev; 2784 2785 if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) 2786 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2787 NOT_READY, SCSI_ASC_LUN_NOT_READY, 2788 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2789 else 2790 res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0); 2791 2792 return res; 2793} 2794 2795static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2796 u8 *cmd) 2797{ 2798 int res = SNTI_TRANSLATION_SUCCESS; 2799 u32 buffer_offset, parm_list_length; 2800 u8 buffer_id, mode; 2801 2802 parm_list_length = 2803 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET); 2804 if (parm_list_length % BYTES_TO_DWORDS != 0) { 2805 /* NVMe expects Firmware file to be a whole number of DWORDS */ 2806 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2807 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2808 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2809 goto out; 2810 } 2811 buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET); 2812 if (buffer_id > NVME_MAX_FIRMWARE_SLOT) { 2813 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2814 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2815 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2816 goto out; 2817 } 2818 mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) & 2819 WRITE_BUFFER_CDB_MODE_MASK; 2820 buffer_offset = 2821 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET); 2822 2823 switch (mode) { 2824 case DOWNLOAD_SAVE_ACTIVATE: 2825 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, 2826 parm_list_length, buffer_offset, 2827 buffer_id); 2828 if (res != SNTI_TRANSLATION_SUCCESS) 2829 goto out; 2830 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 2831 parm_list_length, buffer_offset, 2832 buffer_id); 2833 break; 2834 case DOWNLOAD_SAVE_DEFER_ACTIVATE: 2835 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, 2836 parm_list_length, buffer_offset, 2837 buffer_id); 2838 break; 2839 case ACTIVATE_DEFERRED_MICROCODE: 2840 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 2841 parm_list_length, buffer_offset, 2842 buffer_id); 2843 break; 2844 default: 2845 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2846 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, 2847 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2848 break; 2849 } 2850 2851 out: 2852 return res; 2853} 2854 2855struct scsi_unmap_blk_desc { 2856 __be64 slba; 2857 __be32 nlb; 2858 u32 resv; 2859}; 2860 2861struct scsi_unmap_parm_list { 2862 __be16 unmap_data_len; 2863 __be16 unmap_blk_desc_data_len; 2864 u32 resv; 2865 struct scsi_unmap_blk_desc desc[0]; 2866}; 2867 2868static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2869 u8 *cmd) 2870{ 2871 struct nvme_dev *dev = ns->dev; 2872 struct scsi_unmap_parm_list *plist; 2873 struct nvme_dsm_range *range; 2874 struct nvme_queue *nvmeq; 2875 struct nvme_command c; 2876 int i, nvme_sc, res = -ENOMEM; 2877 u16 ndesc, list_len; 2878 dma_addr_t dma_addr; 2879 2880 list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET); 2881 if (!list_len) 2882 return -EINVAL; 2883 2884 plist = kmalloc(list_len, GFP_KERNEL); 2885 if (!plist) 2886 return -ENOMEM; 2887 2888 res = nvme_trans_copy_from_user(hdr, plist, list_len); 2889 if (res != SNTI_TRANSLATION_SUCCESS) 2890 goto out; 2891 2892 ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; 2893 if (!ndesc || ndesc > 256) { 2894 res = -EINVAL; 2895 goto out; 2896 } 2897 2898 range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), 2899 &dma_addr, GFP_KERNEL); 2900 if (!range) 2901 goto out; 2902 2903 for (i = 0; i < ndesc; i++) { 2904 range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb)); 2905 range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba)); 2906 range[i].cattr = 0; 2907 } 2908 2909 memset(&c, 0, sizeof(c)); 2910 c.dsm.opcode = nvme_cmd_dsm; 2911 c.dsm.nsid = cpu_to_le32(ns->ns_id); 2912 c.dsm.prp1 = cpu_to_le64(dma_addr); 2913 c.dsm.nr = cpu_to_le32(ndesc - 1); 2914 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 2915 2916 nvmeq = get_nvmeq(dev); 2917 put_nvmeq(nvmeq); 2918 2919 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 2920 res = nvme_trans_status_code(hdr, nvme_sc); 2921 2922 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), 2923 range, dma_addr); 2924 out: 2925 kfree(plist); 2926 return res; 2927} 2928 2929static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) 2930{ 2931 u8 cmd[BLK_MAX_CDB]; 2932 int retcode; 2933 unsigned int opcode; 2934 2935 if (hdr->cmdp == NULL) 2936 return -EMSGSIZE; 2937 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) 2938 return -EFAULT; 2939 2940 opcode = cmd[0]; 2941 2942 switch (opcode) { 2943 case READ_6: 2944 case READ_10: 2945 case READ_12: 2946 case READ_16: 2947 retcode = nvme_trans_io(ns, hdr, 0, cmd); 2948 break; 2949 case WRITE_6: 2950 case WRITE_10: 2951 case WRITE_12: 2952 case WRITE_16: 2953 retcode = nvme_trans_io(ns, hdr, 1, cmd); 2954 break; 2955 case INQUIRY: 2956 retcode = nvme_trans_inquiry(ns, hdr, cmd); 2957 break; 2958 case LOG_SENSE: 2959 retcode = nvme_trans_log_sense(ns, hdr, cmd); 2960 break; 2961 case MODE_SELECT: 2962 case MODE_SELECT_10: 2963 retcode = nvme_trans_mode_select(ns, hdr, cmd); 2964 break; 2965 case MODE_SENSE: 2966 case MODE_SENSE_10: 2967 retcode = nvme_trans_mode_sense(ns, hdr, cmd); 2968 break; 2969 case READ_CAPACITY: 2970 retcode = nvme_trans_read_capacity(ns, hdr, cmd); 2971 break; 2972 case SERVICE_ACTION_IN: 2973 if (IS_READ_CAP_16(cmd)) 2974 retcode = nvme_trans_read_capacity(ns, hdr, cmd); 2975 else 2976 goto out; 2977 break; 2978 case REPORT_LUNS: 2979 retcode = nvme_trans_report_luns(ns, hdr, cmd); 2980 break; 2981 case REQUEST_SENSE: 2982 retcode = nvme_trans_request_sense(ns, hdr, cmd); 2983 break; 2984 case SECURITY_PROTOCOL_IN: 2985 case SECURITY_PROTOCOL_OUT: 2986 retcode = nvme_trans_security_protocol(ns, hdr, cmd); 2987 break; 2988 case START_STOP: 2989 retcode = nvme_trans_start_stop(ns, hdr, cmd); 2990 break; 2991 case SYNCHRONIZE_CACHE: 2992 retcode = nvme_trans_synchronize_cache(ns, hdr, cmd); 2993 break; 2994 case FORMAT_UNIT: 2995 retcode = nvme_trans_format_unit(ns, hdr, cmd); 2996 break; 2997 case TEST_UNIT_READY: 2998 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd); 2999 break; 3000 case WRITE_BUFFER: 3001 retcode = nvme_trans_write_buffer(ns, hdr, cmd); 3002 break; 3003 case UNMAP: 3004 retcode = nvme_trans_unmap(ns, hdr, cmd); 3005 break; 3006 default: 3007 out: 3008 retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 3009 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, 3010 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 3011 break; 3012 } 3013 return retcode; 3014} 3015 3016int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) 3017{ 3018 struct sg_io_hdr hdr; 3019 int retcode; 3020 3021 if (!capable(CAP_SYS_ADMIN)) 3022 return -EACCES; 3023 if (copy_from_user(&hdr, u_hdr, sizeof(hdr))) 3024 return -EFAULT; 3025 if (hdr.interface_id != 'S') 3026 return -EINVAL; 3027 if (hdr.cmd_len > BLK_MAX_CDB) 3028 return -EINVAL; 3029 3030 retcode = nvme_scsi_translate(ns, &hdr); 3031 if (retcode < 0) 3032 return retcode; 3033 if (retcode > 0) 3034 retcode = SNTI_TRANSLATION_SUCCESS; 3035 if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0) 3036 return -EFAULT; 3037 3038 return retcode; 3039} 3040 3041int nvme_sg_get_version_num(int __user *ip) 3042{ 3043 return put_user(sg_version_num, ip); 3044}