Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.18-rc8 3677 lines 96 kB view raw
1/* 2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST 3 * was acquired by Western Digital in 2012. 4 * 5 * Copyright 2012 sTec, Inc. 6 * Copyright (c) 2017 Western Digital Corporation or its affiliates. 7 * 8 * This file is part of the Linux kernel, and is made available under 9 * the terms of the GNU General Public License version 2. 10 */ 11 12#include <linux/kernel.h> 13#include <linux/module.h> 14#include <linux/init.h> 15#include <linux/pci.h> 16#include <linux/slab.h> 17#include <linux/spinlock.h> 18#include <linux/blkdev.h> 19#include <linux/blk-mq.h> 20#include <linux/sched.h> 21#include <linux/interrupt.h> 22#include <linux/compiler.h> 23#include <linux/workqueue.h> 24#include <linux/delay.h> 25#include <linux/time.h> 26#include <linux/hdreg.h> 27#include <linux/dma-mapping.h> 28#include <linux/completion.h> 29#include <linux/scatterlist.h> 30#include <linux/version.h> 31#include <linux/err.h> 32#include <linux/aer.h> 33#include <linux/wait.h> 34#include <linux/stringify.h> 35#include <scsi/scsi.h> 36#include <scsi/sg.h> 37#include <linux/io.h> 38#include <linux/uaccess.h> 39#include <asm/unaligned.h> 40 41#include "skd_s1120.h" 42 43static int skd_dbg_level; 44static int skd_isr_comp_limit = 4; 45 46#define SKD_ASSERT(expr) \ 47 do { \ 48 if (unlikely(!(expr))) { \ 49 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ 50 # expr, __FILE__, __func__, __LINE__); \ 51 } \ 52 } while (0) 53 54#define DRV_NAME "skd" 55#define PFX DRV_NAME ": " 56 57MODULE_LICENSE("GPL"); 58 59MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver"); 60 61#define PCI_VENDOR_ID_STEC 0x1B39 62#define PCI_DEVICE_ID_S1120 0x0001 63 64#define SKD_FUA_NV (1 << 1) 65#define SKD_MINORS_PER_DEVICE 16 66 67#define SKD_MAX_QUEUE_DEPTH 200u 68 69#define SKD_PAUSE_TIMEOUT (5 * 1000) 70 71#define SKD_N_FITMSG_BYTES (512u) 72#define SKD_MAX_REQ_PER_MSG 14 73 74#define SKD_N_SPECIAL_FITMSG_BYTES (128u) 75 76/* SG elements are 32 bytes, so we can make this 4096 and still be under the 77 * 128KB limit. That allows 4096*4K = 16M xfer size 78 */ 79#define SKD_N_SG_PER_REQ_DEFAULT 256u 80 81#define SKD_N_COMPLETION_ENTRY 256u 82#define SKD_N_READ_CAP_BYTES (8u) 83 84#define SKD_N_INTERNAL_BYTES (512u) 85 86#define SKD_SKCOMP_SIZE \ 87 ((sizeof(struct fit_completion_entry_v1) + \ 88 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY) 89 90/* 5 bits of uniqifier, 0xF800 */ 91#define SKD_ID_TABLE_MASK (3u << 8u) 92#define SKD_ID_RW_REQUEST (0u << 8u) 93#define SKD_ID_INTERNAL (1u << 8u) 94#define SKD_ID_FIT_MSG (3u << 8u) 95#define SKD_ID_SLOT_MASK 0x00FFu 96#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu 97 98#define SKD_N_MAX_SECTORS 2048u 99 100#define SKD_MAX_RETRIES 2u 101 102#define SKD_TIMER_SECONDS(seconds) (seconds) 103#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) 104 105#define INQ_STD_NBYTES 36 106 107enum skd_drvr_state { 108 SKD_DRVR_STATE_LOAD, 109 SKD_DRVR_STATE_IDLE, 110 SKD_DRVR_STATE_BUSY, 111 SKD_DRVR_STATE_STARTING, 112 SKD_DRVR_STATE_ONLINE, 113 SKD_DRVR_STATE_PAUSING, 114 SKD_DRVR_STATE_PAUSED, 115 SKD_DRVR_STATE_RESTARTING, 116 SKD_DRVR_STATE_RESUMING, 117 SKD_DRVR_STATE_STOPPING, 118 SKD_DRVR_STATE_FAULT, 119 SKD_DRVR_STATE_DISAPPEARED, 120 SKD_DRVR_STATE_PROTOCOL_MISMATCH, 121 SKD_DRVR_STATE_BUSY_ERASE, 122 SKD_DRVR_STATE_BUSY_SANITIZE, 123 SKD_DRVR_STATE_BUSY_IMMINENT, 124 SKD_DRVR_STATE_WAIT_BOOT, 125 SKD_DRVR_STATE_SYNCING, 126}; 127 128#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) 129#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) 130#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) 131#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) 132#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) 133#define SKD_START_WAIT_SECONDS 90u 134 135enum skd_req_state { 136 SKD_REQ_STATE_IDLE, 137 SKD_REQ_STATE_SETUP, 138 SKD_REQ_STATE_BUSY, 139 SKD_REQ_STATE_COMPLETED, 140 SKD_REQ_STATE_TIMEOUT, 141}; 142 143enum skd_check_status_action { 144 SKD_CHECK_STATUS_REPORT_GOOD, 145 SKD_CHECK_STATUS_REPORT_SMART_ALERT, 146 SKD_CHECK_STATUS_REQUEUE_REQUEST, 147 SKD_CHECK_STATUS_REPORT_ERROR, 148 SKD_CHECK_STATUS_BUSY_IMMINENT, 149}; 150 151struct skd_msg_buf { 152 struct fit_msg_hdr fmh; 153 struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG]; 154}; 155 156struct skd_fitmsg_context { 157 u32 id; 158 159 u32 length; 160 161 struct skd_msg_buf *msg_buf; 162 dma_addr_t mb_dma_address; 163}; 164 165struct skd_request_context { 166 enum skd_req_state state; 167 168 u16 id; 169 u32 fitmsg_id; 170 171 u8 flush_cmd; 172 173 enum dma_data_direction data_dir; 174 struct scatterlist *sg; 175 u32 n_sg; 176 u32 sg_byte_count; 177 178 struct fit_sg_descriptor *sksg_list; 179 dma_addr_t sksg_dma_address; 180 181 struct fit_completion_entry_v1 completion; 182 183 struct fit_comp_error_info err_info; 184 185 blk_status_t status; 186}; 187 188struct skd_special_context { 189 struct skd_request_context req; 190 191 void *data_buf; 192 dma_addr_t db_dma_address; 193 194 struct skd_msg_buf *msg_buf; 195 dma_addr_t mb_dma_address; 196}; 197 198typedef enum skd_irq_type { 199 SKD_IRQ_LEGACY, 200 SKD_IRQ_MSI, 201 SKD_IRQ_MSIX 202} skd_irq_type_t; 203 204#define SKD_MAX_BARS 2 205 206struct skd_device { 207 void __iomem *mem_map[SKD_MAX_BARS]; 208 resource_size_t mem_phys[SKD_MAX_BARS]; 209 u32 mem_size[SKD_MAX_BARS]; 210 211 struct skd_msix_entry *msix_entries; 212 213 struct pci_dev *pdev; 214 int pcie_error_reporting_is_enabled; 215 216 spinlock_t lock; 217 struct gendisk *disk; 218 struct blk_mq_tag_set tag_set; 219 struct request_queue *queue; 220 struct skd_fitmsg_context *skmsg; 221 struct device *class_dev; 222 int gendisk_on; 223 int sync_done; 224 225 u32 devno; 226 u32 major; 227 char isr_name[30]; 228 229 enum skd_drvr_state state; 230 u32 drive_state; 231 232 u32 cur_max_queue_depth; 233 u32 queue_low_water_mark; 234 u32 dev_max_queue_depth; 235 236 u32 num_fitmsg_context; 237 u32 num_req_context; 238 239 struct skd_fitmsg_context *skmsg_table; 240 241 struct skd_special_context internal_skspcl; 242 u32 read_cap_blocksize; 243 u32 read_cap_last_lba; 244 int read_cap_is_valid; 245 int inquiry_is_valid; 246 u8 inq_serial_num[13]; /*12 chars plus null term */ 247 248 u8 skcomp_cycle; 249 u32 skcomp_ix; 250 struct kmem_cache *msgbuf_cache; 251 struct kmem_cache *sglist_cache; 252 struct kmem_cache *databuf_cache; 253 struct fit_completion_entry_v1 *skcomp_table; 254 struct fit_comp_error_info *skerr_table; 255 dma_addr_t cq_dma_address; 256 257 wait_queue_head_t waitq; 258 259 struct timer_list timer; 260 u32 timer_countdown; 261 u32 timer_substate; 262 263 int sgs_per_request; 264 u32 last_mtd; 265 266 u32 proto_ver; 267 268 int dbg_level; 269 u32 connect_time_stamp; 270 int connect_retries; 271#define SKD_MAX_CONNECT_RETRIES 16 272 u32 drive_jiffies; 273 274 u32 timo_slot; 275 276 struct work_struct start_queue; 277 struct work_struct completion_worker; 278}; 279 280#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) 281#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) 282#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) 283 284static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) 285{ 286 u32 val = readl(skdev->mem_map[1] + offset); 287 288 if (unlikely(skdev->dbg_level >= 2)) 289 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); 290 return val; 291} 292 293static inline void skd_reg_write32(struct skd_device *skdev, u32 val, 294 u32 offset) 295{ 296 writel(val, skdev->mem_map[1] + offset); 297 if (unlikely(skdev->dbg_level >= 2)) 298 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); 299} 300 301static inline void skd_reg_write64(struct skd_device *skdev, u64 val, 302 u32 offset) 303{ 304 writeq(val, skdev->mem_map[1] + offset); 305 if (unlikely(skdev->dbg_level >= 2)) 306 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset, 307 val); 308} 309 310 311#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX 312static int skd_isr_type = SKD_IRQ_DEFAULT; 313 314module_param(skd_isr_type, int, 0444); 315MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." 316 " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); 317 318#define SKD_MAX_REQ_PER_MSG_DEFAULT 1 319static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 320 321module_param(skd_max_req_per_msg, int, 0444); 322MODULE_PARM_DESC(skd_max_req_per_msg, 323 "Maximum SCSI requests packed in a single message." 324 " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)"); 325 326#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 327#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" 328static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 329 330module_param(skd_max_queue_depth, int, 0444); 331MODULE_PARM_DESC(skd_max_queue_depth, 332 "Maximum SCSI requests issued to s1120." 333 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); 334 335static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 336module_param(skd_sgs_per_request, int, 0444); 337MODULE_PARM_DESC(skd_sgs_per_request, 338 "Maximum SG elements per block request." 339 " (1-4096, default==256)"); 340 341static int skd_max_pass_thru = 1; 342module_param(skd_max_pass_thru, int, 0444); 343MODULE_PARM_DESC(skd_max_pass_thru, 344 "Maximum SCSI pass-thru at a time. IGNORED"); 345 346module_param(skd_dbg_level, int, 0444); 347MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); 348 349module_param(skd_isr_comp_limit, int, 0444); 350MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); 351 352/* Major device number dynamically assigned. */ 353static u32 skd_major; 354 355static void skd_destruct(struct skd_device *skdev); 356static const struct block_device_operations skd_blockdev_ops; 357static void skd_send_fitmsg(struct skd_device *skdev, 358 struct skd_fitmsg_context *skmsg); 359static void skd_send_special_fitmsg(struct skd_device *skdev, 360 struct skd_special_context *skspcl); 361static bool skd_preop_sg_list(struct skd_device *skdev, 362 struct skd_request_context *skreq); 363static void skd_postop_sg_list(struct skd_device *skdev, 364 struct skd_request_context *skreq); 365 366static void skd_restart_device(struct skd_device *skdev); 367static int skd_quiesce_dev(struct skd_device *skdev); 368static int skd_unquiesce_dev(struct skd_device *skdev); 369static void skd_disable_interrupts(struct skd_device *skdev); 370static void skd_isr_fwstate(struct skd_device *skdev); 371static void skd_recover_requests(struct skd_device *skdev); 372static void skd_soft_reset(struct skd_device *skdev); 373 374const char *skd_drive_state_to_str(int state); 375const char *skd_skdev_state_to_str(enum skd_drvr_state state); 376static void skd_log_skdev(struct skd_device *skdev, const char *event); 377static void skd_log_skreq(struct skd_device *skdev, 378 struct skd_request_context *skreq, const char *event); 379 380/* 381 ***************************************************************************** 382 * READ/WRITE REQUESTS 383 ***************************************************************************** 384 */ 385static void skd_inc_in_flight(struct request *rq, void *data, bool reserved) 386{ 387 int *count = data; 388 389 count++; 390} 391 392static int skd_in_flight(struct skd_device *skdev) 393{ 394 int count = 0; 395 396 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); 397 398 return count; 399} 400 401static void 402skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, 403 int data_dir, unsigned lba, 404 unsigned count) 405{ 406 if (data_dir == READ) 407 scsi_req->cdb[0] = READ_10; 408 else 409 scsi_req->cdb[0] = WRITE_10; 410 411 scsi_req->cdb[1] = 0; 412 scsi_req->cdb[2] = (lba & 0xff000000) >> 24; 413 scsi_req->cdb[3] = (lba & 0xff0000) >> 16; 414 scsi_req->cdb[4] = (lba & 0xff00) >> 8; 415 scsi_req->cdb[5] = (lba & 0xff); 416 scsi_req->cdb[6] = 0; 417 scsi_req->cdb[7] = (count & 0xff00) >> 8; 418 scsi_req->cdb[8] = count & 0xff; 419 scsi_req->cdb[9] = 0; 420} 421 422static void 423skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, 424 struct skd_request_context *skreq) 425{ 426 skreq->flush_cmd = 1; 427 428 scsi_req->cdb[0] = SYNCHRONIZE_CACHE; 429 scsi_req->cdb[1] = 0; 430 scsi_req->cdb[2] = 0; 431 scsi_req->cdb[3] = 0; 432 scsi_req->cdb[4] = 0; 433 scsi_req->cdb[5] = 0; 434 scsi_req->cdb[6] = 0; 435 scsi_req->cdb[7] = 0; 436 scsi_req->cdb[8] = 0; 437 scsi_req->cdb[9] = 0; 438} 439 440/* 441 * Return true if and only if all pending requests should be failed. 442 */ 443static bool skd_fail_all(struct request_queue *q) 444{ 445 struct skd_device *skdev = q->queuedata; 446 447 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 448 449 skd_log_skdev(skdev, "req_not_online"); 450 switch (skdev->state) { 451 case SKD_DRVR_STATE_PAUSING: 452 case SKD_DRVR_STATE_PAUSED: 453 case SKD_DRVR_STATE_STARTING: 454 case SKD_DRVR_STATE_RESTARTING: 455 case SKD_DRVR_STATE_WAIT_BOOT: 456 /* In case of starting, we haven't started the queue, 457 * so we can't get here... but requests are 458 * possibly hanging out waiting for us because we 459 * reported the dev/skd0 already. They'll wait 460 * forever if connect doesn't complete. 461 * What to do??? delay dev/skd0 ?? 462 */ 463 case SKD_DRVR_STATE_BUSY: 464 case SKD_DRVR_STATE_BUSY_IMMINENT: 465 case SKD_DRVR_STATE_BUSY_ERASE: 466 return false; 467 468 case SKD_DRVR_STATE_BUSY_SANITIZE: 469 case SKD_DRVR_STATE_STOPPING: 470 case SKD_DRVR_STATE_SYNCING: 471 case SKD_DRVR_STATE_FAULT: 472 case SKD_DRVR_STATE_DISAPPEARED: 473 default: 474 return true; 475 } 476} 477 478static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 479 const struct blk_mq_queue_data *mqd) 480{ 481 struct request *const req = mqd->rq; 482 struct request_queue *const q = req->q; 483 struct skd_device *skdev = q->queuedata; 484 struct skd_fitmsg_context *skmsg; 485 struct fit_msg_hdr *fmh; 486 const u32 tag = blk_mq_unique_tag(req); 487 struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req); 488 struct skd_scsi_request *scsi_req; 489 unsigned long flags = 0; 490 const u32 lba = blk_rq_pos(req); 491 const u32 count = blk_rq_sectors(req); 492 const int data_dir = rq_data_dir(req); 493 494 if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE)) 495 return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE; 496 497 blk_mq_start_request(req); 498 499 WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n", 500 tag, skd_max_queue_depth, q->nr_requests); 501 502 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 503 504 dev_dbg(&skdev->pdev->dev, 505 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, 506 lba, count, count, data_dir); 507 508 skreq->id = tag + SKD_ID_RW_REQUEST; 509 skreq->flush_cmd = 0; 510 skreq->n_sg = 0; 511 skreq->sg_byte_count = 0; 512 513 skreq->fitmsg_id = 0; 514 515 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 516 517 if (req->bio && !skd_preop_sg_list(skdev, skreq)) { 518 dev_dbg(&skdev->pdev->dev, "error Out\n"); 519 skreq->status = BLK_STS_RESOURCE; 520 blk_mq_complete_request(req); 521 return BLK_STS_OK; 522 } 523 524 dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address, 525 skreq->n_sg * 526 sizeof(struct fit_sg_descriptor), 527 DMA_TO_DEVICE); 528 529 /* Either a FIT msg is in progress or we have to start one. */ 530 if (skd_max_req_per_msg == 1) { 531 skmsg = NULL; 532 } else { 533 spin_lock_irqsave(&skdev->lock, flags); 534 skmsg = skdev->skmsg; 535 } 536 if (!skmsg) { 537 skmsg = &skdev->skmsg_table[tag]; 538 skdev->skmsg = skmsg; 539 540 /* Initialize the FIT msg header */ 541 fmh = &skmsg->msg_buf->fmh; 542 memset(fmh, 0, sizeof(*fmh)); 543 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 544 skmsg->length = sizeof(*fmh); 545 } else { 546 fmh = &skmsg->msg_buf->fmh; 547 } 548 549 skreq->fitmsg_id = skmsg->id; 550 551 scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced]; 552 memset(scsi_req, 0, sizeof(*scsi_req)); 553 554 scsi_req->hdr.tag = skreq->id; 555 scsi_req->hdr.sg_list_dma_address = 556 cpu_to_be64(skreq->sksg_dma_address); 557 558 if (req_op(req) == REQ_OP_FLUSH) { 559 skd_prep_zerosize_flush_cdb(scsi_req, skreq); 560 SKD_ASSERT(skreq->flush_cmd == 1); 561 } else { 562 skd_prep_rw_cdb(scsi_req, data_dir, lba, count); 563 } 564 565 if (req->cmd_flags & REQ_FUA) 566 scsi_req->cdb[1] |= SKD_FUA_NV; 567 568 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count); 569 570 /* Complete resource allocations. */ 571 skreq->state = SKD_REQ_STATE_BUSY; 572 573 skmsg->length += sizeof(struct skd_scsi_request); 574 fmh->num_protocol_cmds_coalesced++; 575 576 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id, 577 skd_in_flight(skdev)); 578 579 /* 580 * If the FIT msg buffer is full send it. 581 */ 582 if (skd_max_req_per_msg == 1) { 583 skd_send_fitmsg(skdev, skmsg); 584 } else { 585 if (mqd->last || 586 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 587 skd_send_fitmsg(skdev, skmsg); 588 skdev->skmsg = NULL; 589 } 590 spin_unlock_irqrestore(&skdev->lock, flags); 591 } 592 593 return BLK_STS_OK; 594} 595 596static enum blk_eh_timer_return skd_timed_out(struct request *req, 597 bool reserved) 598{ 599 struct skd_device *skdev = req->q->queuedata; 600 601 dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n", 602 blk_mq_unique_tag(req)); 603 604 return BLK_EH_RESET_TIMER; 605} 606 607static void skd_complete_rq(struct request *req) 608{ 609 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); 610 611 blk_mq_end_request(req, skreq->status); 612} 613 614static bool skd_preop_sg_list(struct skd_device *skdev, 615 struct skd_request_context *skreq) 616{ 617 struct request *req = blk_mq_rq_from_pdu(skreq); 618 struct scatterlist *sgl = &skreq->sg[0], *sg; 619 int n_sg; 620 int i; 621 622 skreq->sg_byte_count = 0; 623 624 WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE && 625 skreq->data_dir != DMA_FROM_DEVICE); 626 627 n_sg = blk_rq_map_sg(skdev->queue, req, sgl); 628 if (n_sg <= 0) 629 return false; 630 631 /* 632 * Map scatterlist to PCI bus addresses. 633 * Note PCI might change the number of entries. 634 */ 635 n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir); 636 if (n_sg <= 0) 637 return false; 638 639 SKD_ASSERT(n_sg <= skdev->sgs_per_request); 640 641 skreq->n_sg = n_sg; 642 643 for_each_sg(sgl, sg, n_sg, i) { 644 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 645 u32 cnt = sg_dma_len(sg); 646 uint64_t dma_addr = sg_dma_address(sg); 647 648 sgd->control = FIT_SGD_CONTROL_NOT_LAST; 649 sgd->byte_count = cnt; 650 skreq->sg_byte_count += cnt; 651 sgd->host_side_addr = dma_addr; 652 sgd->dev_side_addr = 0; 653 } 654 655 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 656 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 657 658 if (unlikely(skdev->dbg_level > 1)) { 659 dev_dbg(&skdev->pdev->dev, 660 "skreq=%x sksg_list=%p sksg_dma=%llx\n", 661 skreq->id, skreq->sksg_list, skreq->sksg_dma_address); 662 for (i = 0; i < n_sg; i++) { 663 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 664 665 dev_dbg(&skdev->pdev->dev, 666 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", 667 i, sgd->byte_count, sgd->control, 668 sgd->host_side_addr, sgd->next_desc_ptr); 669 } 670 } 671 672 return true; 673} 674 675static void skd_postop_sg_list(struct skd_device *skdev, 676 struct skd_request_context *skreq) 677{ 678 /* 679 * restore the next ptr for next IO request so we 680 * don't have to set it every time. 681 */ 682 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 683 skreq->sksg_dma_address + 684 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); 685 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir); 686} 687 688/* 689 ***************************************************************************** 690 * TIMER 691 ***************************************************************************** 692 */ 693 694static void skd_timer_tick_not_online(struct skd_device *skdev); 695 696static void skd_start_queue(struct work_struct *work) 697{ 698 struct skd_device *skdev = container_of(work, typeof(*skdev), 699 start_queue); 700 701 /* 702 * Although it is safe to call blk_start_queue() from interrupt 703 * context, blk_mq_start_hw_queues() must not be called from 704 * interrupt context. 705 */ 706 blk_mq_start_hw_queues(skdev->queue); 707} 708 709static void skd_timer_tick(struct timer_list *t) 710{ 711 struct skd_device *skdev = from_timer(skdev, t, timer); 712 unsigned long reqflags; 713 u32 state; 714 715 if (skdev->state == SKD_DRVR_STATE_FAULT) 716 /* The driver has declared fault, and we want it to 717 * stay that way until driver is reloaded. 718 */ 719 return; 720 721 spin_lock_irqsave(&skdev->lock, reqflags); 722 723 state = SKD_READL(skdev, FIT_STATUS); 724 state &= FIT_SR_DRIVE_STATE_MASK; 725 if (state != skdev->drive_state) 726 skd_isr_fwstate(skdev); 727 728 if (skdev->state != SKD_DRVR_STATE_ONLINE) 729 skd_timer_tick_not_online(skdev); 730 731 mod_timer(&skdev->timer, (jiffies + HZ)); 732 733 spin_unlock_irqrestore(&skdev->lock, reqflags); 734} 735 736static void skd_timer_tick_not_online(struct skd_device *skdev) 737{ 738 switch (skdev->state) { 739 case SKD_DRVR_STATE_IDLE: 740 case SKD_DRVR_STATE_LOAD: 741 break; 742 case SKD_DRVR_STATE_BUSY_SANITIZE: 743 dev_dbg(&skdev->pdev->dev, 744 "drive busy sanitize[%x], driver[%x]\n", 745 skdev->drive_state, skdev->state); 746 /* If we've been in sanitize for 3 seconds, we figure we're not 747 * going to get anymore completions, so recover requests now 748 */ 749 if (skdev->timer_countdown > 0) { 750 skdev->timer_countdown--; 751 return; 752 } 753 skd_recover_requests(skdev); 754 break; 755 756 case SKD_DRVR_STATE_BUSY: 757 case SKD_DRVR_STATE_BUSY_IMMINENT: 758 case SKD_DRVR_STATE_BUSY_ERASE: 759 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n", 760 skdev->state, skdev->timer_countdown); 761 if (skdev->timer_countdown > 0) { 762 skdev->timer_countdown--; 763 return; 764 } 765 dev_dbg(&skdev->pdev->dev, 766 "busy[%x], timedout=%d, restarting device.", 767 skdev->state, skdev->timer_countdown); 768 skd_restart_device(skdev); 769 break; 770 771 case SKD_DRVR_STATE_WAIT_BOOT: 772 case SKD_DRVR_STATE_STARTING: 773 if (skdev->timer_countdown > 0) { 774 skdev->timer_countdown--; 775 return; 776 } 777 /* For now, we fault the drive. Could attempt resets to 778 * revcover at some point. */ 779 skdev->state = SKD_DRVR_STATE_FAULT; 780 781 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n", 782 skdev->drive_state); 783 784 /*start the queue so we can respond with error to requests */ 785 /* wakeup anyone waiting for startup complete */ 786 schedule_work(&skdev->start_queue); 787 skdev->gendisk_on = -1; 788 wake_up_interruptible(&skdev->waitq); 789 break; 790 791 case SKD_DRVR_STATE_ONLINE: 792 /* shouldn't get here. */ 793 break; 794 795 case SKD_DRVR_STATE_PAUSING: 796 case SKD_DRVR_STATE_PAUSED: 797 break; 798 799 case SKD_DRVR_STATE_RESTARTING: 800 if (skdev->timer_countdown > 0) { 801 skdev->timer_countdown--; 802 return; 803 } 804 /* For now, we fault the drive. Could attempt resets to 805 * revcover at some point. */ 806 skdev->state = SKD_DRVR_STATE_FAULT; 807 dev_err(&skdev->pdev->dev, 808 "DriveFault Reconnect Timeout (%x)\n", 809 skdev->drive_state); 810 811 /* 812 * Recovering does two things: 813 * 1. completes IO with error 814 * 2. reclaims dma resources 815 * When is it safe to recover requests? 816 * - if the drive state is faulted 817 * - if the state is still soft reset after out timeout 818 * - if the drive registers are dead (state = FF) 819 * If it is "unsafe", we still need to recover, so we will 820 * disable pci bus mastering and disable our interrupts. 821 */ 822 823 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || 824 (skdev->drive_state == FIT_SR_DRIVE_FAULT) || 825 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) 826 /* It never came out of soft reset. Try to 827 * recover the requests and then let them 828 * fail. This is to mitigate hung processes. */ 829 skd_recover_requests(skdev); 830 else { 831 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n", 832 skdev->drive_state); 833 pci_disable_device(skdev->pdev); 834 skd_disable_interrupts(skdev); 835 skd_recover_requests(skdev); 836 } 837 838 /*start the queue so we can respond with error to requests */ 839 /* wakeup anyone waiting for startup complete */ 840 schedule_work(&skdev->start_queue); 841 skdev->gendisk_on = -1; 842 wake_up_interruptible(&skdev->waitq); 843 break; 844 845 case SKD_DRVR_STATE_RESUMING: 846 case SKD_DRVR_STATE_STOPPING: 847 case SKD_DRVR_STATE_SYNCING: 848 case SKD_DRVR_STATE_FAULT: 849 case SKD_DRVR_STATE_DISAPPEARED: 850 default: 851 break; 852 } 853} 854 855static int skd_start_timer(struct skd_device *skdev) 856{ 857 int rc; 858 859 timer_setup(&skdev->timer, skd_timer_tick, 0); 860 861 rc = mod_timer(&skdev->timer, (jiffies + HZ)); 862 if (rc) 863 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc); 864 return rc; 865} 866 867static void skd_kill_timer(struct skd_device *skdev) 868{ 869 del_timer_sync(&skdev->timer); 870} 871 872/* 873 ***************************************************************************** 874 * INTERNAL REQUESTS -- generated by driver itself 875 ***************************************************************************** 876 */ 877 878static int skd_format_internal_skspcl(struct skd_device *skdev) 879{ 880 struct skd_special_context *skspcl = &skdev->internal_skspcl; 881 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 882 struct fit_msg_hdr *fmh; 883 uint64_t dma_address; 884 struct skd_scsi_request *scsi; 885 886 fmh = &skspcl->msg_buf->fmh; 887 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 888 fmh->num_protocol_cmds_coalesced = 1; 889 890 scsi = &skspcl->msg_buf->scsi[0]; 891 memset(scsi, 0, sizeof(*scsi)); 892 dma_address = skspcl->req.sksg_dma_address; 893 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 894 skspcl->req.n_sg = 1; 895 sgd->control = FIT_SGD_CONTROL_LAST; 896 sgd->byte_count = 0; 897 sgd->host_side_addr = skspcl->db_dma_address; 898 sgd->dev_side_addr = 0; 899 sgd->next_desc_ptr = 0LL; 900 901 return 1; 902} 903 904#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES 905 906static void skd_send_internal_skspcl(struct skd_device *skdev, 907 struct skd_special_context *skspcl, 908 u8 opcode) 909{ 910 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 911 struct skd_scsi_request *scsi; 912 unsigned char *buf = skspcl->data_buf; 913 int i; 914 915 if (skspcl->req.state != SKD_REQ_STATE_IDLE) 916 /* 917 * A refresh is already in progress. 918 * Just wait for it to finish. 919 */ 920 return; 921 922 skspcl->req.state = SKD_REQ_STATE_BUSY; 923 924 scsi = &skspcl->msg_buf->scsi[0]; 925 scsi->hdr.tag = skspcl->req.id; 926 927 memset(scsi->cdb, 0, sizeof(scsi->cdb)); 928 929 switch (opcode) { 930 case TEST_UNIT_READY: 931 scsi->cdb[0] = TEST_UNIT_READY; 932 sgd->byte_count = 0; 933 scsi->hdr.sg_list_len_bytes = 0; 934 break; 935 936 case READ_CAPACITY: 937 scsi->cdb[0] = READ_CAPACITY; 938 sgd->byte_count = SKD_N_READ_CAP_BYTES; 939 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 940 break; 941 942 case INQUIRY: 943 scsi->cdb[0] = INQUIRY; 944 scsi->cdb[1] = 0x01; /* evpd */ 945 scsi->cdb[2] = 0x80; /* serial number page */ 946 scsi->cdb[4] = 0x10; 947 sgd->byte_count = 16; 948 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 949 break; 950 951 case SYNCHRONIZE_CACHE: 952 scsi->cdb[0] = SYNCHRONIZE_CACHE; 953 sgd->byte_count = 0; 954 scsi->hdr.sg_list_len_bytes = 0; 955 break; 956 957 case WRITE_BUFFER: 958 scsi->cdb[0] = WRITE_BUFFER; 959 scsi->cdb[1] = 0x02; 960 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; 961 scsi->cdb[8] = WR_BUF_SIZE & 0xFF; 962 sgd->byte_count = WR_BUF_SIZE; 963 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 964 /* fill incrementing byte pattern */ 965 for (i = 0; i < sgd->byte_count; i++) 966 buf[i] = i & 0xFF; 967 break; 968 969 case READ_BUFFER: 970 scsi->cdb[0] = READ_BUFFER; 971 scsi->cdb[1] = 0x02; 972 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; 973 scsi->cdb[8] = WR_BUF_SIZE & 0xFF; 974 sgd->byte_count = WR_BUF_SIZE; 975 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 976 memset(skspcl->data_buf, 0, sgd->byte_count); 977 break; 978 979 default: 980 SKD_ASSERT("Don't know what to send"); 981 return; 982 983 } 984 skd_send_special_fitmsg(skdev, skspcl); 985} 986 987static void skd_refresh_device_data(struct skd_device *skdev) 988{ 989 struct skd_special_context *skspcl = &skdev->internal_skspcl; 990 991 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); 992} 993 994static int skd_chk_read_buf(struct skd_device *skdev, 995 struct skd_special_context *skspcl) 996{ 997 unsigned char *buf = skspcl->data_buf; 998 int i; 999 1000 /* check for incrementing byte pattern */ 1001 for (i = 0; i < WR_BUF_SIZE; i++) 1002 if (buf[i] != (i & 0xFF)) 1003 return 1; 1004 1005 return 0; 1006} 1007 1008static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, 1009 u8 code, u8 qual, u8 fruc) 1010{ 1011 /* If the check condition is of special interest, log a message */ 1012 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) 1013 && (code == 0x04) && (qual == 0x06)) { 1014 dev_err(&skdev->pdev->dev, 1015 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 1016 key, code, qual, fruc); 1017 } 1018} 1019 1020static void skd_complete_internal(struct skd_device *skdev, 1021 struct fit_completion_entry_v1 *skcomp, 1022 struct fit_comp_error_info *skerr, 1023 struct skd_special_context *skspcl) 1024{ 1025 u8 *buf = skspcl->data_buf; 1026 u8 status; 1027 int i; 1028 struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0]; 1029 1030 lockdep_assert_held(&skdev->lock); 1031 1032 SKD_ASSERT(skspcl == &skdev->internal_skspcl); 1033 1034 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]); 1035 1036 dma_sync_single_for_cpu(&skdev->pdev->dev, 1037 skspcl->db_dma_address, 1038 skspcl->req.sksg_list[0].byte_count, 1039 DMA_BIDIRECTIONAL); 1040 1041 skspcl->req.completion = *skcomp; 1042 skspcl->req.state = SKD_REQ_STATE_IDLE; 1043 1044 status = skspcl->req.completion.status; 1045 1046 skd_log_check_status(skdev, status, skerr->key, skerr->code, 1047 skerr->qual, skerr->fruc); 1048 1049 switch (scsi->cdb[0]) { 1050 case TEST_UNIT_READY: 1051 if (status == SAM_STAT_GOOD) 1052 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); 1053 else if ((status == SAM_STAT_CHECK_CONDITION) && 1054 (skerr->key == MEDIUM_ERROR)) 1055 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); 1056 else { 1057 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1058 dev_dbg(&skdev->pdev->dev, 1059 "TUR failed, don't send anymore state 0x%x\n", 1060 skdev->state); 1061 return; 1062 } 1063 dev_dbg(&skdev->pdev->dev, 1064 "**** TUR failed, retry skerr\n"); 1065 skd_send_internal_skspcl(skdev, skspcl, 1066 TEST_UNIT_READY); 1067 } 1068 break; 1069 1070 case WRITE_BUFFER: 1071 if (status == SAM_STAT_GOOD) 1072 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); 1073 else { 1074 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1075 dev_dbg(&skdev->pdev->dev, 1076 "write buffer failed, don't send anymore state 0x%x\n", 1077 skdev->state); 1078 return; 1079 } 1080 dev_dbg(&skdev->pdev->dev, 1081 "**** write buffer failed, retry skerr\n"); 1082 skd_send_internal_skspcl(skdev, skspcl, 1083 TEST_UNIT_READY); 1084 } 1085 break; 1086 1087 case READ_BUFFER: 1088 if (status == SAM_STAT_GOOD) { 1089 if (skd_chk_read_buf(skdev, skspcl) == 0) 1090 skd_send_internal_skspcl(skdev, skspcl, 1091 READ_CAPACITY); 1092 else { 1093 dev_err(&skdev->pdev->dev, 1094 "*** W/R Buffer mismatch %d ***\n", 1095 skdev->connect_retries); 1096 if (skdev->connect_retries < 1097 SKD_MAX_CONNECT_RETRIES) { 1098 skdev->connect_retries++; 1099 skd_soft_reset(skdev); 1100 } else { 1101 dev_err(&skdev->pdev->dev, 1102 "W/R Buffer Connect Error\n"); 1103 return; 1104 } 1105 } 1106 1107 } else { 1108 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1109 dev_dbg(&skdev->pdev->dev, 1110 "read buffer failed, don't send anymore state 0x%x\n", 1111 skdev->state); 1112 return; 1113 } 1114 dev_dbg(&skdev->pdev->dev, 1115 "**** read buffer failed, retry skerr\n"); 1116 skd_send_internal_skspcl(skdev, skspcl, 1117 TEST_UNIT_READY); 1118 } 1119 break; 1120 1121 case READ_CAPACITY: 1122 skdev->read_cap_is_valid = 0; 1123 if (status == SAM_STAT_GOOD) { 1124 skdev->read_cap_last_lba = 1125 (buf[0] << 24) | (buf[1] << 16) | 1126 (buf[2] << 8) | buf[3]; 1127 skdev->read_cap_blocksize = 1128 (buf[4] << 24) | (buf[5] << 16) | 1129 (buf[6] << 8) | buf[7]; 1130 1131 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n", 1132 skdev->read_cap_last_lba, 1133 skdev->read_cap_blocksize); 1134 1135 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 1136 1137 skdev->read_cap_is_valid = 1; 1138 1139 skd_send_internal_skspcl(skdev, skspcl, INQUIRY); 1140 } else if ((status == SAM_STAT_CHECK_CONDITION) && 1141 (skerr->key == MEDIUM_ERROR)) { 1142 skdev->read_cap_last_lba = ~0; 1143 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 1144 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n"); 1145 skd_send_internal_skspcl(skdev, skspcl, INQUIRY); 1146 } else { 1147 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n"); 1148 skd_send_internal_skspcl(skdev, skspcl, 1149 TEST_UNIT_READY); 1150 } 1151 break; 1152 1153 case INQUIRY: 1154 skdev->inquiry_is_valid = 0; 1155 if (status == SAM_STAT_GOOD) { 1156 skdev->inquiry_is_valid = 1; 1157 1158 for (i = 0; i < 12; i++) 1159 skdev->inq_serial_num[i] = buf[i + 4]; 1160 skdev->inq_serial_num[12] = 0; 1161 } 1162 1163 if (skd_unquiesce_dev(skdev) < 0) 1164 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n"); 1165 /* connection is complete */ 1166 skdev->connect_retries = 0; 1167 break; 1168 1169 case SYNCHRONIZE_CACHE: 1170 if (status == SAM_STAT_GOOD) 1171 skdev->sync_done = 1; 1172 else 1173 skdev->sync_done = -1; 1174 wake_up_interruptible(&skdev->waitq); 1175 break; 1176 1177 default: 1178 SKD_ASSERT("we didn't send this"); 1179 } 1180} 1181 1182/* 1183 ***************************************************************************** 1184 * FIT MESSAGES 1185 ***************************************************************************** 1186 */ 1187 1188static void skd_send_fitmsg(struct skd_device *skdev, 1189 struct skd_fitmsg_context *skmsg) 1190{ 1191 u64 qcmd; 1192 1193 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n", 1194 skmsg->mb_dma_address, skd_in_flight(skdev)); 1195 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf); 1196 1197 qcmd = skmsg->mb_dma_address; 1198 qcmd |= FIT_QCMD_QID_NORMAL; 1199 1200 if (unlikely(skdev->dbg_level > 1)) { 1201 u8 *bp = (u8 *)skmsg->msg_buf; 1202 int i; 1203 for (i = 0; i < skmsg->length; i += 8) { 1204 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i, 1205 &bp[i]); 1206 if (i == 0) 1207 i = 64 - 8; 1208 } 1209 } 1210 1211 if (skmsg->length > 256) 1212 qcmd |= FIT_QCMD_MSGSIZE_512; 1213 else if (skmsg->length > 128) 1214 qcmd |= FIT_QCMD_MSGSIZE_256; 1215 else if (skmsg->length > 64) 1216 qcmd |= FIT_QCMD_MSGSIZE_128; 1217 else 1218 /* 1219 * This makes no sense because the FIT msg header is 1220 * 64 bytes. If the msg is only 64 bytes long it has 1221 * no payload. 1222 */ 1223 qcmd |= FIT_QCMD_MSGSIZE_64; 1224 1225 dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address, 1226 skmsg->length, DMA_TO_DEVICE); 1227 1228 /* Make sure skd_msg_buf is written before the doorbell is triggered. */ 1229 smp_wmb(); 1230 1231 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1232} 1233 1234static void skd_send_special_fitmsg(struct skd_device *skdev, 1235 struct skd_special_context *skspcl) 1236{ 1237 u64 qcmd; 1238 1239 WARN_ON_ONCE(skspcl->req.n_sg != 1); 1240 1241 if (unlikely(skdev->dbg_level > 1)) { 1242 u8 *bp = (u8 *)skspcl->msg_buf; 1243 int i; 1244 1245 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 1246 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i, 1247 &bp[i]); 1248 if (i == 0) 1249 i = 64 - 8; 1250 } 1251 1252 dev_dbg(&skdev->pdev->dev, 1253 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", 1254 skspcl, skspcl->req.id, skspcl->req.sksg_list, 1255 skspcl->req.sksg_dma_address); 1256 for (i = 0; i < skspcl->req.n_sg; i++) { 1257 struct fit_sg_descriptor *sgd = 1258 &skspcl->req.sksg_list[i]; 1259 1260 dev_dbg(&skdev->pdev->dev, 1261 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", 1262 i, sgd->byte_count, sgd->control, 1263 sgd->host_side_addr, sgd->next_desc_ptr); 1264 } 1265 } 1266 1267 /* 1268 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr 1269 * and one 64-byte SSDI command. 1270 */ 1271 qcmd = skspcl->mb_dma_address; 1272 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 1273 1274 dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address, 1275 SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE); 1276 dma_sync_single_for_device(&skdev->pdev->dev, 1277 skspcl->req.sksg_dma_address, 1278 1 * sizeof(struct fit_sg_descriptor), 1279 DMA_TO_DEVICE); 1280 dma_sync_single_for_device(&skdev->pdev->dev, 1281 skspcl->db_dma_address, 1282 skspcl->req.sksg_list[0].byte_count, 1283 DMA_BIDIRECTIONAL); 1284 1285 /* Make sure skd_msg_buf is written before the doorbell is triggered. */ 1286 smp_wmb(); 1287 1288 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1289} 1290 1291/* 1292 ***************************************************************************** 1293 * COMPLETION QUEUE 1294 ***************************************************************************** 1295 */ 1296 1297static void skd_complete_other(struct skd_device *skdev, 1298 struct fit_completion_entry_v1 *skcomp, 1299 struct fit_comp_error_info *skerr); 1300 1301struct sns_info { 1302 u8 type; 1303 u8 stat; 1304 u8 key; 1305 u8 asc; 1306 u8 ascq; 1307 u8 mask; 1308 enum skd_check_status_action action; 1309}; 1310 1311static struct sns_info skd_chkstat_table[] = { 1312 /* Good */ 1313 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, 1314 SKD_CHECK_STATUS_REPORT_GOOD }, 1315 1316 /* Smart alerts */ 1317 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ 1318 SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 1319 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ 1320 SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 1321 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ 1322 SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 1323 1324 /* Retry (with limits) */ 1325 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ 1326 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1327 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ 1328 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1329 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ 1330 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1331 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ 1332 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1333 1334 /* Busy (or about to be) */ 1335 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ 1336 SKD_CHECK_STATUS_BUSY_IMMINENT }, 1337}; 1338 1339/* 1340 * Look up status and sense data to decide how to handle the error 1341 * from the device. 1342 * mask says which fields must match e.g., mask=0x18 means check 1343 * type and stat, ignore key, asc, ascq. 1344 */ 1345 1346static enum skd_check_status_action 1347skd_check_status(struct skd_device *skdev, 1348 u8 cmp_status, struct fit_comp_error_info *skerr) 1349{ 1350 int i; 1351 1352 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 1353 skerr->key, skerr->code, skerr->qual, skerr->fruc); 1354 1355 dev_dbg(&skdev->pdev->dev, 1356 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", 1357 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual, 1358 skerr->fruc); 1359 1360 /* Does the info match an entry in the good category? */ 1361 for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) { 1362 struct sns_info *sns = &skd_chkstat_table[i]; 1363 1364 if (sns->mask & 0x10) 1365 if (skerr->type != sns->type) 1366 continue; 1367 1368 if (sns->mask & 0x08) 1369 if (cmp_status != sns->stat) 1370 continue; 1371 1372 if (sns->mask & 0x04) 1373 if (skerr->key != sns->key) 1374 continue; 1375 1376 if (sns->mask & 0x02) 1377 if (skerr->code != sns->asc) 1378 continue; 1379 1380 if (sns->mask & 0x01) 1381 if (skerr->qual != sns->ascq) 1382 continue; 1383 1384 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 1385 dev_err(&skdev->pdev->dev, 1386 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n", 1387 skerr->key, skerr->code, skerr->qual); 1388 } 1389 return sns->action; 1390 } 1391 1392 /* No other match, so nonzero status means error, 1393 * zero status means good 1394 */ 1395 if (cmp_status) { 1396 dev_dbg(&skdev->pdev->dev, "status check: error\n"); 1397 return SKD_CHECK_STATUS_REPORT_ERROR; 1398 } 1399 1400 dev_dbg(&skdev->pdev->dev, "status check good default\n"); 1401 return SKD_CHECK_STATUS_REPORT_GOOD; 1402} 1403 1404static void skd_resolve_req_exception(struct skd_device *skdev, 1405 struct skd_request_context *skreq, 1406 struct request *req) 1407{ 1408 u8 cmp_status = skreq->completion.status; 1409 1410 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { 1411 case SKD_CHECK_STATUS_REPORT_GOOD: 1412 case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 1413 skreq->status = BLK_STS_OK; 1414 blk_mq_complete_request(req); 1415 break; 1416 1417 case SKD_CHECK_STATUS_BUSY_IMMINENT: 1418 skd_log_skreq(skdev, skreq, "retry(busy)"); 1419 blk_requeue_request(skdev->queue, req); 1420 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); 1421 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 1422 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 1423 skd_quiesce_dev(skdev); 1424 break; 1425 1426 case SKD_CHECK_STATUS_REQUEUE_REQUEST: 1427 if ((unsigned long) ++req->special < SKD_MAX_RETRIES) { 1428 skd_log_skreq(skdev, skreq, "retry"); 1429 blk_requeue_request(skdev->queue, req); 1430 break; 1431 } 1432 /* fall through */ 1433 1434 case SKD_CHECK_STATUS_REPORT_ERROR: 1435 default: 1436 skreq->status = BLK_STS_IOERR; 1437 blk_mq_complete_request(req); 1438 break; 1439 } 1440} 1441 1442static void skd_release_skreq(struct skd_device *skdev, 1443 struct skd_request_context *skreq) 1444{ 1445 /* 1446 * Reclaim the skd_request_context 1447 */ 1448 skreq->state = SKD_REQ_STATE_IDLE; 1449} 1450 1451static int skd_isr_completion_posted(struct skd_device *skdev, 1452 int limit, int *enqueued) 1453{ 1454 struct fit_completion_entry_v1 *skcmp; 1455 struct fit_comp_error_info *skerr; 1456 u16 req_id; 1457 u32 tag; 1458 u16 hwq = 0; 1459 struct request *rq; 1460 struct skd_request_context *skreq; 1461 u16 cmp_cntxt; 1462 u8 cmp_status; 1463 u8 cmp_cycle; 1464 u32 cmp_bytes; 1465 int rc = 0; 1466 int processed = 0; 1467 1468 lockdep_assert_held(&skdev->lock); 1469 1470 for (;; ) { 1471 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 1472 1473 skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; 1474 cmp_cycle = skcmp->cycle; 1475 cmp_cntxt = skcmp->tag; 1476 cmp_status = skcmp->status; 1477 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); 1478 1479 skerr = &skdev->skerr_table[skdev->skcomp_ix]; 1480 1481 dev_dbg(&skdev->pdev->dev, 1482 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n", 1483 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle, 1484 cmp_cntxt, cmp_status, skd_in_flight(skdev), 1485 cmp_bytes, skdev->proto_ver); 1486 1487 if (cmp_cycle != skdev->skcomp_cycle) { 1488 dev_dbg(&skdev->pdev->dev, "end of completions\n"); 1489 break; 1490 } 1491 /* 1492 * Update the completion queue head index and possibly 1493 * the completion cycle count. 8-bit wrap-around. 1494 */ 1495 skdev->skcomp_ix++; 1496 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { 1497 skdev->skcomp_ix = 0; 1498 skdev->skcomp_cycle++; 1499 } 1500 1501 /* 1502 * The command context is a unique 32-bit ID. The low order 1503 * bits help locate the request. The request is usually a 1504 * r/w request (see skd_start() above) or a special request. 1505 */ 1506 req_id = cmp_cntxt; 1507 tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 1508 1509 /* Is this other than a r/w request? */ 1510 if (tag >= skdev->num_req_context) { 1511 /* 1512 * This is not a completion for a r/w request. 1513 */ 1514 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], 1515 tag)); 1516 skd_complete_other(skdev, skcmp, skerr); 1517 continue; 1518 } 1519 1520 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); 1521 if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt, 1522 tag)) 1523 continue; 1524 skreq = blk_mq_rq_to_pdu(rq); 1525 1526 /* 1527 * Make sure the request ID for the slot matches. 1528 */ 1529 if (skreq->id != req_id) { 1530 dev_err(&skdev->pdev->dev, 1531 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n", 1532 req_id, skreq->id, cmp_cntxt); 1533 1534 continue; 1535 } 1536 1537 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); 1538 1539 skreq->completion = *skcmp; 1540 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { 1541 skreq->err_info = *skerr; 1542 skd_log_check_status(skdev, cmp_status, skerr->key, 1543 skerr->code, skerr->qual, 1544 skerr->fruc); 1545 } 1546 /* Release DMA resources for the request. */ 1547 if (skreq->n_sg > 0) 1548 skd_postop_sg_list(skdev, skreq); 1549 1550 skd_release_skreq(skdev, skreq); 1551 1552 /* 1553 * Capture the outcome and post it back to the native request. 1554 */ 1555 if (likely(cmp_status == SAM_STAT_GOOD)) { 1556 skreq->status = BLK_STS_OK; 1557 blk_mq_complete_request(rq); 1558 } else { 1559 skd_resolve_req_exception(skdev, skreq, rq); 1560 } 1561 1562 /* skd_isr_comp_limit equal zero means no limit */ 1563 if (limit) { 1564 if (++processed >= limit) { 1565 rc = 1; 1566 break; 1567 } 1568 } 1569 } 1570 1571 if (skdev->state == SKD_DRVR_STATE_PAUSING && 1572 skd_in_flight(skdev) == 0) { 1573 skdev->state = SKD_DRVR_STATE_PAUSED; 1574 wake_up_interruptible(&skdev->waitq); 1575 } 1576 1577 return rc; 1578} 1579 1580static void skd_complete_other(struct skd_device *skdev, 1581 struct fit_completion_entry_v1 *skcomp, 1582 struct fit_comp_error_info *skerr) 1583{ 1584 u32 req_id = 0; 1585 u32 req_table; 1586 u32 req_slot; 1587 struct skd_special_context *skspcl; 1588 1589 lockdep_assert_held(&skdev->lock); 1590 1591 req_id = skcomp->tag; 1592 req_table = req_id & SKD_ID_TABLE_MASK; 1593 req_slot = req_id & SKD_ID_SLOT_MASK; 1594 1595 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table, 1596 req_id, req_slot); 1597 1598 /* 1599 * Based on the request id, determine how to dispatch this completion. 1600 * This swich/case is finding the good cases and forwarding the 1601 * completion entry. Errors are reported below the switch. 1602 */ 1603 switch (req_table) { 1604 case SKD_ID_RW_REQUEST: 1605 /* 1606 * The caller, skd_isr_completion_posted() above, 1607 * handles r/w requests. The only way we get here 1608 * is if the req_slot is out of bounds. 1609 */ 1610 break; 1611 1612 case SKD_ID_INTERNAL: 1613 if (req_slot == 0) { 1614 skspcl = &skdev->internal_skspcl; 1615 if (skspcl->req.id == req_id && 1616 skspcl->req.state == SKD_REQ_STATE_BUSY) { 1617 skd_complete_internal(skdev, 1618 skcomp, skerr, skspcl); 1619 return; 1620 } 1621 } 1622 break; 1623 1624 case SKD_ID_FIT_MSG: 1625 /* 1626 * These id's should never appear in a completion record. 1627 */ 1628 break; 1629 1630 default: 1631 /* 1632 * These id's should never appear anywhere; 1633 */ 1634 break; 1635 } 1636 1637 /* 1638 * If we get here it is a bad or stale id. 1639 */ 1640} 1641 1642static void skd_reset_skcomp(struct skd_device *skdev) 1643{ 1644 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE); 1645 1646 skdev->skcomp_ix = 0; 1647 skdev->skcomp_cycle = 1; 1648} 1649 1650/* 1651 ***************************************************************************** 1652 * INTERRUPTS 1653 ***************************************************************************** 1654 */ 1655static void skd_completion_worker(struct work_struct *work) 1656{ 1657 struct skd_device *skdev = 1658 container_of(work, struct skd_device, completion_worker); 1659 unsigned long flags; 1660 int flush_enqueued = 0; 1661 1662 spin_lock_irqsave(&skdev->lock, flags); 1663 1664 /* 1665 * pass in limit=0, which means no limit.. 1666 * process everything in compq 1667 */ 1668 skd_isr_completion_posted(skdev, 0, &flush_enqueued); 1669 schedule_work(&skdev->start_queue); 1670 1671 spin_unlock_irqrestore(&skdev->lock, flags); 1672} 1673 1674static void skd_isr_msg_from_dev(struct skd_device *skdev); 1675 1676static irqreturn_t 1677skd_isr(int irq, void *ptr) 1678{ 1679 struct skd_device *skdev = ptr; 1680 u32 intstat; 1681 u32 ack; 1682 int rc = 0; 1683 int deferred = 0; 1684 int flush_enqueued = 0; 1685 1686 spin_lock(&skdev->lock); 1687 1688 for (;; ) { 1689 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); 1690 1691 ack = FIT_INT_DEF_MASK; 1692 ack &= intstat; 1693 1694 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat, 1695 ack); 1696 1697 /* As long as there is an int pending on device, keep 1698 * running loop. When none, get out, but if we've never 1699 * done any processing, call completion handler? 1700 */ 1701 if (ack == 0) { 1702 /* No interrupts on device, but run the completion 1703 * processor anyway? 1704 */ 1705 if (rc == 0) 1706 if (likely (skdev->state 1707 == SKD_DRVR_STATE_ONLINE)) 1708 deferred = 1; 1709 break; 1710 } 1711 1712 rc = IRQ_HANDLED; 1713 1714 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); 1715 1716 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && 1717 (skdev->state != SKD_DRVR_STATE_STOPPING))) { 1718 if (intstat & FIT_ISH_COMPLETION_POSTED) { 1719 /* 1720 * If we have already deferred completion 1721 * processing, don't bother running it again 1722 */ 1723 if (deferred == 0) 1724 deferred = 1725 skd_isr_completion_posted(skdev, 1726 skd_isr_comp_limit, &flush_enqueued); 1727 } 1728 1729 if (intstat & FIT_ISH_FW_STATE_CHANGE) { 1730 skd_isr_fwstate(skdev); 1731 if (skdev->state == SKD_DRVR_STATE_FAULT || 1732 skdev->state == 1733 SKD_DRVR_STATE_DISAPPEARED) { 1734 spin_unlock(&skdev->lock); 1735 return rc; 1736 } 1737 } 1738 1739 if (intstat & FIT_ISH_MSG_FROM_DEV) 1740 skd_isr_msg_from_dev(skdev); 1741 } 1742 } 1743 1744 if (unlikely(flush_enqueued)) 1745 schedule_work(&skdev->start_queue); 1746 1747 if (deferred) 1748 schedule_work(&skdev->completion_worker); 1749 else if (!flush_enqueued) 1750 schedule_work(&skdev->start_queue); 1751 1752 spin_unlock(&skdev->lock); 1753 1754 return rc; 1755} 1756 1757static void skd_drive_fault(struct skd_device *skdev) 1758{ 1759 skdev->state = SKD_DRVR_STATE_FAULT; 1760 dev_err(&skdev->pdev->dev, "Drive FAULT\n"); 1761} 1762 1763static void skd_drive_disappeared(struct skd_device *skdev) 1764{ 1765 skdev->state = SKD_DRVR_STATE_DISAPPEARED; 1766 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n"); 1767} 1768 1769static void skd_isr_fwstate(struct skd_device *skdev) 1770{ 1771 u32 sense; 1772 u32 state; 1773 u32 mtd; 1774 int prev_driver_state = skdev->state; 1775 1776 sense = SKD_READL(skdev, FIT_STATUS); 1777 state = sense & FIT_SR_DRIVE_STATE_MASK; 1778 1779 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n", 1780 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 1781 skd_drive_state_to_str(state), state); 1782 1783 skdev->drive_state = state; 1784 1785 switch (skdev->drive_state) { 1786 case FIT_SR_DRIVE_INIT: 1787 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { 1788 skd_disable_interrupts(skdev); 1789 break; 1790 } 1791 if (skdev->state == SKD_DRVR_STATE_RESTARTING) 1792 skd_recover_requests(skdev); 1793 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 1794 skdev->timer_countdown = SKD_STARTING_TIMO; 1795 skdev->state = SKD_DRVR_STATE_STARTING; 1796 skd_soft_reset(skdev); 1797 break; 1798 } 1799 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); 1800 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1801 skdev->last_mtd = mtd; 1802 break; 1803 1804 case FIT_SR_DRIVE_ONLINE: 1805 skdev->cur_max_queue_depth = skd_max_queue_depth; 1806 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) 1807 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; 1808 1809 skdev->queue_low_water_mark = 1810 skdev->cur_max_queue_depth * 2 / 3 + 1; 1811 if (skdev->queue_low_water_mark < 1) 1812 skdev->queue_low_water_mark = 1; 1813 dev_info(&skdev->pdev->dev, 1814 "Queue depth limit=%d dev=%d lowat=%d\n", 1815 skdev->cur_max_queue_depth, 1816 skdev->dev_max_queue_depth, 1817 skdev->queue_low_water_mark); 1818 1819 skd_refresh_device_data(skdev); 1820 break; 1821 1822 case FIT_SR_DRIVE_BUSY: 1823 skdev->state = SKD_DRVR_STATE_BUSY; 1824 skdev->timer_countdown = SKD_BUSY_TIMO; 1825 skd_quiesce_dev(skdev); 1826 break; 1827 case FIT_SR_DRIVE_BUSY_SANITIZE: 1828 /* set timer for 3 seconds, we'll abort any unfinished 1829 * commands after that expires 1830 */ 1831 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 1832 skdev->timer_countdown = SKD_TIMER_SECONDS(3); 1833 schedule_work(&skdev->start_queue); 1834 break; 1835 case FIT_SR_DRIVE_BUSY_ERASE: 1836 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 1837 skdev->timer_countdown = SKD_BUSY_TIMO; 1838 break; 1839 case FIT_SR_DRIVE_OFFLINE: 1840 skdev->state = SKD_DRVR_STATE_IDLE; 1841 break; 1842 case FIT_SR_DRIVE_SOFT_RESET: 1843 switch (skdev->state) { 1844 case SKD_DRVR_STATE_STARTING: 1845 case SKD_DRVR_STATE_RESTARTING: 1846 /* Expected by a caller of skd_soft_reset() */ 1847 break; 1848 default: 1849 skdev->state = SKD_DRVR_STATE_RESTARTING; 1850 break; 1851 } 1852 break; 1853 case FIT_SR_DRIVE_FW_BOOTING: 1854 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n"); 1855 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 1856 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 1857 break; 1858 1859 case FIT_SR_DRIVE_DEGRADED: 1860 case FIT_SR_PCIE_LINK_DOWN: 1861 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 1862 break; 1863 1864 case FIT_SR_DRIVE_FAULT: 1865 skd_drive_fault(skdev); 1866 skd_recover_requests(skdev); 1867 schedule_work(&skdev->start_queue); 1868 break; 1869 1870 /* PCIe bus returned all Fs? */ 1871 case 0xFF: 1872 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state, 1873 sense); 1874 skd_drive_disappeared(skdev); 1875 skd_recover_requests(skdev); 1876 schedule_work(&skdev->start_queue); 1877 break; 1878 default: 1879 /* 1880 * Uknown FW State. Wait for a state we recognize. 1881 */ 1882 break; 1883 } 1884 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", 1885 skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 1886 skd_skdev_state_to_str(skdev->state), skdev->state); 1887} 1888 1889static void skd_recover_request(struct request *req, void *data, bool reserved) 1890{ 1891 struct skd_device *const skdev = data; 1892 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); 1893 1894 if (skreq->state != SKD_REQ_STATE_BUSY) 1895 return; 1896 1897 skd_log_skreq(skdev, skreq, "recover"); 1898 1899 /* Release DMA resources for the request. */ 1900 if (skreq->n_sg > 0) 1901 skd_postop_sg_list(skdev, skreq); 1902 1903 skreq->state = SKD_REQ_STATE_IDLE; 1904 skreq->status = BLK_STS_IOERR; 1905 blk_mq_complete_request(req); 1906} 1907 1908static void skd_recover_requests(struct skd_device *skdev) 1909{ 1910 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); 1911} 1912 1913static void skd_isr_msg_from_dev(struct skd_device *skdev) 1914{ 1915 u32 mfd; 1916 u32 mtd; 1917 u32 data; 1918 1919 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 1920 1921 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd, 1922 skdev->last_mtd); 1923 1924 /* ignore any mtd that is an ack for something we didn't send */ 1925 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) 1926 return; 1927 1928 switch (FIT_MXD_TYPE(mfd)) { 1929 case FIT_MTD_FITFW_INIT: 1930 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 1931 1932 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 1933 dev_err(&skdev->pdev->dev, "protocol mismatch\n"); 1934 dev_err(&skdev->pdev->dev, " got=%d support=%d\n", 1935 skdev->proto_ver, FIT_PROTOCOL_VERSION_1); 1936 dev_err(&skdev->pdev->dev, " please upgrade driver\n"); 1937 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 1938 skd_soft_reset(skdev); 1939 break; 1940 } 1941 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); 1942 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1943 skdev->last_mtd = mtd; 1944 break; 1945 1946 case FIT_MTD_GET_CMDQ_DEPTH: 1947 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); 1948 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, 1949 SKD_N_COMPLETION_ENTRY); 1950 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1951 skdev->last_mtd = mtd; 1952 break; 1953 1954 case FIT_MTD_SET_COMPQ_DEPTH: 1955 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); 1956 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); 1957 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1958 skdev->last_mtd = mtd; 1959 break; 1960 1961 case FIT_MTD_SET_COMPQ_ADDR: 1962 skd_reset_skcomp(skdev); 1963 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); 1964 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1965 skdev->last_mtd = mtd; 1966 break; 1967 1968 case FIT_MTD_CMD_LOG_HOST_ID: 1969 /* hardware interface overflows in y2106 */ 1970 skdev->connect_time_stamp = (u32)ktime_get_real_seconds(); 1971 data = skdev->connect_time_stamp & 0xFFFF; 1972 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); 1973 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1974 skdev->last_mtd = mtd; 1975 break; 1976 1977 case FIT_MTD_CMD_LOG_TIME_STAMP_LO: 1978 skdev->drive_jiffies = FIT_MXD_DATA(mfd); 1979 data = (skdev->connect_time_stamp >> 16) & 0xFFFF; 1980 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); 1981 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1982 skdev->last_mtd = mtd; 1983 break; 1984 1985 case FIT_MTD_CMD_LOG_TIME_STAMP_HI: 1986 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); 1987 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); 1988 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1989 skdev->last_mtd = mtd; 1990 1991 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n", 1992 skdev->connect_time_stamp, skdev->drive_jiffies); 1993 break; 1994 1995 case FIT_MTD_ARM_QUEUE: 1996 skdev->last_mtd = 0; 1997 /* 1998 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. 1999 */ 2000 break; 2001 2002 default: 2003 break; 2004 } 2005} 2006 2007static void skd_disable_interrupts(struct skd_device *skdev) 2008{ 2009 u32 sense; 2010 2011 sense = SKD_READL(skdev, FIT_CONTROL); 2012 sense &= ~FIT_CR_ENABLE_INTERRUPTS; 2013 SKD_WRITEL(skdev, sense, FIT_CONTROL); 2014 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense); 2015 2016 /* Note that the 1s is written. A 1-bit means 2017 * disable, a 0 means enable. 2018 */ 2019 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); 2020} 2021 2022static void skd_enable_interrupts(struct skd_device *skdev) 2023{ 2024 u32 val; 2025 2026 /* unmask interrupts first */ 2027 val = FIT_ISH_FW_STATE_CHANGE + 2028 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; 2029 2030 /* Note that the compliment of mask is written. A 1-bit means 2031 * disable, a 0 means enable. */ 2032 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 2033 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val); 2034 2035 val = SKD_READL(skdev, FIT_CONTROL); 2036 val |= FIT_CR_ENABLE_INTERRUPTS; 2037 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); 2038 SKD_WRITEL(skdev, val, FIT_CONTROL); 2039} 2040 2041/* 2042 ***************************************************************************** 2043 * START, STOP, RESTART, QUIESCE, UNQUIESCE 2044 ***************************************************************************** 2045 */ 2046 2047static void skd_soft_reset(struct skd_device *skdev) 2048{ 2049 u32 val; 2050 2051 val = SKD_READL(skdev, FIT_CONTROL); 2052 val |= (FIT_CR_SOFT_RESET); 2053 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); 2054 SKD_WRITEL(skdev, val, FIT_CONTROL); 2055} 2056 2057static void skd_start_device(struct skd_device *skdev) 2058{ 2059 unsigned long flags; 2060 u32 sense; 2061 u32 state; 2062 2063 spin_lock_irqsave(&skdev->lock, flags); 2064 2065 /* ack all ghost interrupts */ 2066 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2067 2068 sense = SKD_READL(skdev, FIT_STATUS); 2069 2070 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense); 2071 2072 state = sense & FIT_SR_DRIVE_STATE_MASK; 2073 skdev->drive_state = state; 2074 skdev->last_mtd = 0; 2075 2076 skdev->state = SKD_DRVR_STATE_STARTING; 2077 skdev->timer_countdown = SKD_STARTING_TIMO; 2078 2079 skd_enable_interrupts(skdev); 2080 2081 switch (skdev->drive_state) { 2082 case FIT_SR_DRIVE_OFFLINE: 2083 dev_err(&skdev->pdev->dev, "Drive offline...\n"); 2084 break; 2085 2086 case FIT_SR_DRIVE_FW_BOOTING: 2087 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n"); 2088 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2089 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 2090 break; 2091 2092 case FIT_SR_DRIVE_BUSY_SANITIZE: 2093 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n"); 2094 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2095 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2096 break; 2097 2098 case FIT_SR_DRIVE_BUSY_ERASE: 2099 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n"); 2100 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2101 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2102 break; 2103 2104 case FIT_SR_DRIVE_INIT: 2105 case FIT_SR_DRIVE_ONLINE: 2106 skd_soft_reset(skdev); 2107 break; 2108 2109 case FIT_SR_DRIVE_BUSY: 2110 dev_err(&skdev->pdev->dev, "Drive Busy...\n"); 2111 skdev->state = SKD_DRVR_STATE_BUSY; 2112 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2113 break; 2114 2115 case FIT_SR_DRIVE_SOFT_RESET: 2116 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n"); 2117 break; 2118 2119 case FIT_SR_DRIVE_FAULT: 2120 /* Fault state is bad...soft reset won't do it... 2121 * Hard reset, maybe, but does it work on device? 2122 * For now, just fault so the system doesn't hang. 2123 */ 2124 skd_drive_fault(skdev); 2125 /*start the queue so we can respond with error to requests */ 2126 dev_dbg(&skdev->pdev->dev, "starting queue\n"); 2127 schedule_work(&skdev->start_queue); 2128 skdev->gendisk_on = -1; 2129 wake_up_interruptible(&skdev->waitq); 2130 break; 2131 2132 case 0xFF: 2133 /* Most likely the device isn't there or isn't responding 2134 * to the BAR1 addresses. */ 2135 skd_drive_disappeared(skdev); 2136 /*start the queue so we can respond with error to requests */ 2137 dev_dbg(&skdev->pdev->dev, 2138 "starting queue to error-out reqs\n"); 2139 schedule_work(&skdev->start_queue); 2140 skdev->gendisk_on = -1; 2141 wake_up_interruptible(&skdev->waitq); 2142 break; 2143 2144 default: 2145 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n", 2146 skdev->drive_state); 2147 break; 2148 } 2149 2150 state = SKD_READL(skdev, FIT_CONTROL); 2151 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state); 2152 2153 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2154 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state); 2155 2156 state = SKD_READL(skdev, FIT_INT_MASK_HOST); 2157 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state); 2158 2159 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2160 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state); 2161 2162 state = SKD_READL(skdev, FIT_HW_VERSION); 2163 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state); 2164 2165 spin_unlock_irqrestore(&skdev->lock, flags); 2166} 2167 2168static void skd_stop_device(struct skd_device *skdev) 2169{ 2170 unsigned long flags; 2171 struct skd_special_context *skspcl = &skdev->internal_skspcl; 2172 u32 dev_state; 2173 int i; 2174 2175 spin_lock_irqsave(&skdev->lock, flags); 2176 2177 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 2178 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__); 2179 goto stop_out; 2180 } 2181 2182 if (skspcl->req.state != SKD_REQ_STATE_IDLE) { 2183 dev_err(&skdev->pdev->dev, "%s no special\n", __func__); 2184 goto stop_out; 2185 } 2186 2187 skdev->state = SKD_DRVR_STATE_SYNCING; 2188 skdev->sync_done = 0; 2189 2190 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 2191 2192 spin_unlock_irqrestore(&skdev->lock, flags); 2193 2194 wait_event_interruptible_timeout(skdev->waitq, 2195 (skdev->sync_done), (10 * HZ)); 2196 2197 spin_lock_irqsave(&skdev->lock, flags); 2198 2199 switch (skdev->sync_done) { 2200 case 0: 2201 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__); 2202 break; 2203 case 1: 2204 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__); 2205 break; 2206 default: 2207 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__); 2208 } 2209 2210stop_out: 2211 skdev->state = SKD_DRVR_STATE_STOPPING; 2212 spin_unlock_irqrestore(&skdev->lock, flags); 2213 2214 skd_kill_timer(skdev); 2215 2216 spin_lock_irqsave(&skdev->lock, flags); 2217 skd_disable_interrupts(skdev); 2218 2219 /* ensure all ints on device are cleared */ 2220 /* soft reset the device to unload with a clean slate */ 2221 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2222 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); 2223 2224 spin_unlock_irqrestore(&skdev->lock, flags); 2225 2226 /* poll every 100ms, 1 second timeout */ 2227 for (i = 0; i < 10; i++) { 2228 dev_state = 2229 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; 2230 if (dev_state == FIT_SR_DRIVE_INIT) 2231 break; 2232 set_current_state(TASK_INTERRUPTIBLE); 2233 schedule_timeout(msecs_to_jiffies(100)); 2234 } 2235 2236 if (dev_state != FIT_SR_DRIVE_INIT) 2237 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__, 2238 dev_state); 2239} 2240 2241/* assume spinlock is held */ 2242static void skd_restart_device(struct skd_device *skdev) 2243{ 2244 u32 state; 2245 2246 /* ack all ghost interrupts */ 2247 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2248 2249 state = SKD_READL(skdev, FIT_STATUS); 2250 2251 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state); 2252 2253 state &= FIT_SR_DRIVE_STATE_MASK; 2254 skdev->drive_state = state; 2255 skdev->last_mtd = 0; 2256 2257 skdev->state = SKD_DRVR_STATE_RESTARTING; 2258 skdev->timer_countdown = SKD_RESTARTING_TIMO; 2259 2260 skd_soft_reset(skdev); 2261} 2262 2263/* assume spinlock is held */ 2264static int skd_quiesce_dev(struct skd_device *skdev) 2265{ 2266 int rc = 0; 2267 2268 switch (skdev->state) { 2269 case SKD_DRVR_STATE_BUSY: 2270 case SKD_DRVR_STATE_BUSY_IMMINENT: 2271 dev_dbg(&skdev->pdev->dev, "stopping queue\n"); 2272 blk_mq_stop_hw_queues(skdev->queue); 2273 break; 2274 case SKD_DRVR_STATE_ONLINE: 2275 case SKD_DRVR_STATE_STOPPING: 2276 case SKD_DRVR_STATE_SYNCING: 2277 case SKD_DRVR_STATE_PAUSING: 2278 case SKD_DRVR_STATE_PAUSED: 2279 case SKD_DRVR_STATE_STARTING: 2280 case SKD_DRVR_STATE_RESTARTING: 2281 case SKD_DRVR_STATE_RESUMING: 2282 default: 2283 rc = -EINVAL; 2284 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n", 2285 skdev->state); 2286 } 2287 return rc; 2288} 2289 2290/* assume spinlock is held */ 2291static int skd_unquiesce_dev(struct skd_device *skdev) 2292{ 2293 int prev_driver_state = skdev->state; 2294 2295 skd_log_skdev(skdev, "unquiesce"); 2296 if (skdev->state == SKD_DRVR_STATE_ONLINE) { 2297 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n"); 2298 return 0; 2299 } 2300 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { 2301 /* 2302 * If there has been an state change to other than 2303 * ONLINE, we will rely on controller state change 2304 * to come back online and restart the queue. 2305 * The BUSY state means that driver is ready to 2306 * continue normal processing but waiting for controller 2307 * to become available. 2308 */ 2309 skdev->state = SKD_DRVR_STATE_BUSY; 2310 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n"); 2311 return 0; 2312 } 2313 2314 /* 2315 * Drive has just come online, driver is either in startup, 2316 * paused performing a task, or bust waiting for hardware. 2317 */ 2318 switch (skdev->state) { 2319 case SKD_DRVR_STATE_PAUSED: 2320 case SKD_DRVR_STATE_BUSY: 2321 case SKD_DRVR_STATE_BUSY_IMMINENT: 2322 case SKD_DRVR_STATE_BUSY_ERASE: 2323 case SKD_DRVR_STATE_STARTING: 2324 case SKD_DRVR_STATE_RESTARTING: 2325 case SKD_DRVR_STATE_FAULT: 2326 case SKD_DRVR_STATE_IDLE: 2327 case SKD_DRVR_STATE_LOAD: 2328 skdev->state = SKD_DRVR_STATE_ONLINE; 2329 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", 2330 skd_skdev_state_to_str(prev_driver_state), 2331 prev_driver_state, skd_skdev_state_to_str(skdev->state), 2332 skdev->state); 2333 dev_dbg(&skdev->pdev->dev, 2334 "**** device ONLINE...starting block queue\n"); 2335 dev_dbg(&skdev->pdev->dev, "starting queue\n"); 2336 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n"); 2337 schedule_work(&skdev->start_queue); 2338 skdev->gendisk_on = 1; 2339 wake_up_interruptible(&skdev->waitq); 2340 break; 2341 2342 case SKD_DRVR_STATE_DISAPPEARED: 2343 default: 2344 dev_dbg(&skdev->pdev->dev, 2345 "**** driver state %d, not implemented\n", 2346 skdev->state); 2347 return -EBUSY; 2348 } 2349 return 0; 2350} 2351 2352/* 2353 ***************************************************************************** 2354 * PCIe MSI/MSI-X INTERRUPT HANDLERS 2355 ***************************************************************************** 2356 */ 2357 2358static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) 2359{ 2360 struct skd_device *skdev = skd_host_data; 2361 unsigned long flags; 2362 2363 spin_lock_irqsave(&skdev->lock, flags); 2364 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2365 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2366 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq, 2367 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2368 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); 2369 spin_unlock_irqrestore(&skdev->lock, flags); 2370 return IRQ_HANDLED; 2371} 2372 2373static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) 2374{ 2375 struct skd_device *skdev = skd_host_data; 2376 unsigned long flags; 2377 2378 spin_lock_irqsave(&skdev->lock, flags); 2379 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2380 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2381 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); 2382 skd_isr_fwstate(skdev); 2383 spin_unlock_irqrestore(&skdev->lock, flags); 2384 return IRQ_HANDLED; 2385} 2386 2387static irqreturn_t skd_comp_q(int irq, void *skd_host_data) 2388{ 2389 struct skd_device *skdev = skd_host_data; 2390 unsigned long flags; 2391 int flush_enqueued = 0; 2392 int deferred; 2393 2394 spin_lock_irqsave(&skdev->lock, flags); 2395 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2396 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2397 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); 2398 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, 2399 &flush_enqueued); 2400 if (flush_enqueued) 2401 schedule_work(&skdev->start_queue); 2402 2403 if (deferred) 2404 schedule_work(&skdev->completion_worker); 2405 else if (!flush_enqueued) 2406 schedule_work(&skdev->start_queue); 2407 2408 spin_unlock_irqrestore(&skdev->lock, flags); 2409 2410 return IRQ_HANDLED; 2411} 2412 2413static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) 2414{ 2415 struct skd_device *skdev = skd_host_data; 2416 unsigned long flags; 2417 2418 spin_lock_irqsave(&skdev->lock, flags); 2419 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2420 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2421 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); 2422 skd_isr_msg_from_dev(skdev); 2423 spin_unlock_irqrestore(&skdev->lock, flags); 2424 return IRQ_HANDLED; 2425} 2426 2427static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) 2428{ 2429 struct skd_device *skdev = skd_host_data; 2430 unsigned long flags; 2431 2432 spin_lock_irqsave(&skdev->lock, flags); 2433 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2434 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2435 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); 2436 spin_unlock_irqrestore(&skdev->lock, flags); 2437 return IRQ_HANDLED; 2438} 2439 2440/* 2441 ***************************************************************************** 2442 * PCIe MSI/MSI-X SETUP 2443 ***************************************************************************** 2444 */ 2445 2446struct skd_msix_entry { 2447 char isr_name[30]; 2448}; 2449 2450struct skd_init_msix_entry { 2451 const char *name; 2452 irq_handler_t handler; 2453}; 2454 2455#define SKD_MAX_MSIX_COUNT 13 2456#define SKD_MIN_MSIX_COUNT 7 2457#define SKD_BASE_MSIX_IRQ 4 2458 2459static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { 2460 { "(DMA 0)", skd_reserved_isr }, 2461 { "(DMA 1)", skd_reserved_isr }, 2462 { "(DMA 2)", skd_reserved_isr }, 2463 { "(DMA 3)", skd_reserved_isr }, 2464 { "(State Change)", skd_statec_isr }, 2465 { "(COMPL_Q)", skd_comp_q }, 2466 { "(MSG)", skd_msg_isr }, 2467 { "(Reserved)", skd_reserved_isr }, 2468 { "(Reserved)", skd_reserved_isr }, 2469 { "(Queue Full 0)", skd_qfull_isr }, 2470 { "(Queue Full 1)", skd_qfull_isr }, 2471 { "(Queue Full 2)", skd_qfull_isr }, 2472 { "(Queue Full 3)", skd_qfull_isr }, 2473}; 2474 2475static int skd_acquire_msix(struct skd_device *skdev) 2476{ 2477 int i, rc; 2478 struct pci_dev *pdev = skdev->pdev; 2479 2480 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT, 2481 PCI_IRQ_MSIX); 2482 if (rc < 0) { 2483 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc); 2484 goto out; 2485 } 2486 2487 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT, 2488 sizeof(struct skd_msix_entry), GFP_KERNEL); 2489 if (!skdev->msix_entries) { 2490 rc = -ENOMEM; 2491 dev_err(&skdev->pdev->dev, "msix table allocation error\n"); 2492 goto out; 2493 } 2494 2495 /* Enable MSI-X vectors for the base queue */ 2496 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { 2497 struct skd_msix_entry *qentry = &skdev->msix_entries[i]; 2498 2499 snprintf(qentry->isr_name, sizeof(qentry->isr_name), 2500 "%s%d-msix %s", DRV_NAME, skdev->devno, 2501 msix_entries[i].name); 2502 2503 rc = devm_request_irq(&skdev->pdev->dev, 2504 pci_irq_vector(skdev->pdev, i), 2505 msix_entries[i].handler, 0, 2506 qentry->isr_name, skdev); 2507 if (rc) { 2508 dev_err(&skdev->pdev->dev, 2509 "Unable to register(%d) MSI-X handler %d: %s\n", 2510 rc, i, qentry->isr_name); 2511 goto msix_out; 2512 } 2513 } 2514 2515 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n", 2516 SKD_MAX_MSIX_COUNT); 2517 return 0; 2518 2519msix_out: 2520 while (--i >= 0) 2521 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev); 2522out: 2523 kfree(skdev->msix_entries); 2524 skdev->msix_entries = NULL; 2525 return rc; 2526} 2527 2528static int skd_acquire_irq(struct skd_device *skdev) 2529{ 2530 struct pci_dev *pdev = skdev->pdev; 2531 unsigned int irq_flag = PCI_IRQ_LEGACY; 2532 int rc; 2533 2534 if (skd_isr_type == SKD_IRQ_MSIX) { 2535 rc = skd_acquire_msix(skdev); 2536 if (!rc) 2537 return 0; 2538 2539 dev_err(&skdev->pdev->dev, 2540 "failed to enable MSI-X, re-trying with MSI %d\n", rc); 2541 } 2542 2543 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME, 2544 skdev->devno); 2545 2546 if (skd_isr_type != SKD_IRQ_LEGACY) 2547 irq_flag |= PCI_IRQ_MSI; 2548 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag); 2549 if (rc < 0) { 2550 dev_err(&skdev->pdev->dev, 2551 "failed to allocate the MSI interrupt %d\n", rc); 2552 return rc; 2553 } 2554 2555 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 2556 pdev->msi_enabled ? 0 : IRQF_SHARED, 2557 skdev->isr_name, skdev); 2558 if (rc) { 2559 pci_free_irq_vectors(pdev); 2560 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n", 2561 rc); 2562 return rc; 2563 } 2564 2565 return 0; 2566} 2567 2568static void skd_release_irq(struct skd_device *skdev) 2569{ 2570 struct pci_dev *pdev = skdev->pdev; 2571 2572 if (skdev->msix_entries) { 2573 int i; 2574 2575 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { 2576 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), 2577 skdev); 2578 } 2579 2580 kfree(skdev->msix_entries); 2581 skdev->msix_entries = NULL; 2582 } else { 2583 devm_free_irq(&pdev->dev, pdev->irq, skdev); 2584 } 2585 2586 pci_free_irq_vectors(pdev); 2587} 2588 2589/* 2590 ***************************************************************************** 2591 * CONSTRUCT 2592 ***************************************************************************** 2593 */ 2594 2595static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, 2596 dma_addr_t *dma_handle, gfp_t gfp, 2597 enum dma_data_direction dir) 2598{ 2599 struct device *dev = &skdev->pdev->dev; 2600 void *buf; 2601 2602 buf = kmem_cache_alloc(s, gfp); 2603 if (!buf) 2604 return NULL; 2605 *dma_handle = dma_map_single(dev, buf, 2606 kmem_cache_size(s), dir); 2607 if (dma_mapping_error(dev, *dma_handle)) { 2608 kmem_cache_free(s, buf); 2609 buf = NULL; 2610 } 2611 return buf; 2612} 2613 2614static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s, 2615 void *vaddr, dma_addr_t dma_handle, 2616 enum dma_data_direction dir) 2617{ 2618 if (!vaddr) 2619 return; 2620 2621 dma_unmap_single(&skdev->pdev->dev, dma_handle, 2622 kmem_cache_size(s), dir); 2623 kmem_cache_free(s, vaddr); 2624} 2625 2626static int skd_cons_skcomp(struct skd_device *skdev) 2627{ 2628 int rc = 0; 2629 struct fit_completion_entry_v1 *skcomp; 2630 2631 dev_dbg(&skdev->pdev->dev, 2632 "comp pci_alloc, total bytes %zd entries %d\n", 2633 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2634 2635 skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE, 2636 &skdev->cq_dma_address); 2637 2638 if (skcomp == NULL) { 2639 rc = -ENOMEM; 2640 goto err_out; 2641 } 2642 2643 skdev->skcomp_table = skcomp; 2644 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + 2645 sizeof(*skcomp) * 2646 SKD_N_COMPLETION_ENTRY); 2647 2648err_out: 2649 return rc; 2650} 2651 2652static int skd_cons_skmsg(struct skd_device *skdev) 2653{ 2654 int rc = 0; 2655 u32 i; 2656 2657 dev_dbg(&skdev->pdev->dev, 2658 "skmsg_table kcalloc, struct %lu, count %u total %lu\n", 2659 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context, 2660 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); 2661 2662 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context, 2663 sizeof(struct skd_fitmsg_context), 2664 GFP_KERNEL); 2665 if (skdev->skmsg_table == NULL) { 2666 rc = -ENOMEM; 2667 goto err_out; 2668 } 2669 2670 for (i = 0; i < skdev->num_fitmsg_context; i++) { 2671 struct skd_fitmsg_context *skmsg; 2672 2673 skmsg = &skdev->skmsg_table[i]; 2674 2675 skmsg->id = i + SKD_ID_FIT_MSG; 2676 2677 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, 2678 SKD_N_FITMSG_BYTES, 2679 &skmsg->mb_dma_address); 2680 2681 if (skmsg->msg_buf == NULL) { 2682 rc = -ENOMEM; 2683 goto err_out; 2684 } 2685 2686 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) & 2687 (FIT_QCMD_ALIGN - 1), 2688 "not aligned: msg_buf %p mb_dma_address %#llx\n", 2689 skmsg->msg_buf, skmsg->mb_dma_address); 2690 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); 2691 } 2692 2693err_out: 2694 return rc; 2695} 2696 2697static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, 2698 u32 n_sg, 2699 dma_addr_t *ret_dma_addr) 2700{ 2701 struct fit_sg_descriptor *sg_list; 2702 2703 sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr, 2704 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); 2705 2706 if (sg_list != NULL) { 2707 uint64_t dma_address = *ret_dma_addr; 2708 u32 i; 2709 2710 for (i = 0; i < n_sg - 1; i++) { 2711 uint64_t ndp_off; 2712 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); 2713 2714 sg_list[i].next_desc_ptr = dma_address + ndp_off; 2715 } 2716 sg_list[i].next_desc_ptr = 0LL; 2717 } 2718 2719 return sg_list; 2720} 2721 2722static void skd_free_sg_list(struct skd_device *skdev, 2723 struct fit_sg_descriptor *sg_list, 2724 dma_addr_t dma_addr) 2725{ 2726 if (WARN_ON_ONCE(!sg_list)) 2727 return; 2728 2729 skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr, 2730 DMA_TO_DEVICE); 2731} 2732 2733static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq, 2734 unsigned int hctx_idx, unsigned int numa_node) 2735{ 2736 struct skd_device *skdev = set->driver_data; 2737 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); 2738 2739 skreq->state = SKD_REQ_STATE_IDLE; 2740 skreq->sg = (void *)(skreq + 1); 2741 sg_init_table(skreq->sg, skd_sgs_per_request); 2742 skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request, 2743 &skreq->sksg_dma_address); 2744 2745 return skreq->sksg_list ? 0 : -ENOMEM; 2746} 2747 2748static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq, 2749 unsigned int hctx_idx) 2750{ 2751 struct skd_device *skdev = set->driver_data; 2752 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); 2753 2754 skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address); 2755} 2756 2757static int skd_cons_sksb(struct skd_device *skdev) 2758{ 2759 int rc = 0; 2760 struct skd_special_context *skspcl; 2761 2762 skspcl = &skdev->internal_skspcl; 2763 2764 skspcl->req.id = 0 + SKD_ID_INTERNAL; 2765 skspcl->req.state = SKD_REQ_STATE_IDLE; 2766 2767 skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache, 2768 &skspcl->db_dma_address, 2769 GFP_DMA | __GFP_ZERO, 2770 DMA_BIDIRECTIONAL); 2771 if (skspcl->data_buf == NULL) { 2772 rc = -ENOMEM; 2773 goto err_out; 2774 } 2775 2776 skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache, 2777 &skspcl->mb_dma_address, 2778 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); 2779 if (skspcl->msg_buf == NULL) { 2780 rc = -ENOMEM; 2781 goto err_out; 2782 } 2783 2784 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, 2785 &skspcl->req.sksg_dma_address); 2786 if (skspcl->req.sksg_list == NULL) { 2787 rc = -ENOMEM; 2788 goto err_out; 2789 } 2790 2791 if (!skd_format_internal_skspcl(skdev)) { 2792 rc = -EINVAL; 2793 goto err_out; 2794 } 2795 2796err_out: 2797 return rc; 2798} 2799 2800static const struct blk_mq_ops skd_mq_ops = { 2801 .queue_rq = skd_mq_queue_rq, 2802 .complete = skd_complete_rq, 2803 .timeout = skd_timed_out, 2804 .init_request = skd_init_request, 2805 .exit_request = skd_exit_request, 2806}; 2807 2808static int skd_cons_disk(struct skd_device *skdev) 2809{ 2810 int rc = 0; 2811 struct gendisk *disk; 2812 struct request_queue *q; 2813 unsigned long flags; 2814 2815 disk = alloc_disk(SKD_MINORS_PER_DEVICE); 2816 if (!disk) { 2817 rc = -ENOMEM; 2818 goto err_out; 2819 } 2820 2821 skdev->disk = disk; 2822 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); 2823 2824 disk->major = skdev->major; 2825 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; 2826 disk->fops = &skd_blockdev_ops; 2827 disk->private_data = skdev; 2828 2829 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); 2830 skdev->tag_set.ops = &skd_mq_ops; 2831 skdev->tag_set.nr_hw_queues = 1; 2832 skdev->tag_set.queue_depth = skd_max_queue_depth; 2833 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + 2834 skdev->sgs_per_request * sizeof(struct scatterlist); 2835 skdev->tag_set.numa_node = NUMA_NO_NODE; 2836 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 2837 BLK_MQ_F_SG_MERGE | 2838 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO); 2839 skdev->tag_set.driver_data = skdev; 2840 rc = blk_mq_alloc_tag_set(&skdev->tag_set); 2841 if (rc) 2842 goto err_out; 2843 q = blk_mq_init_queue(&skdev->tag_set); 2844 if (IS_ERR(q)) { 2845 blk_mq_free_tag_set(&skdev->tag_set); 2846 rc = PTR_ERR(q); 2847 goto err_out; 2848 } 2849 q->queuedata = skdev; 2850 2851 skdev->queue = q; 2852 disk->queue = q; 2853 2854 blk_queue_write_cache(q, true, true); 2855 blk_queue_max_segments(q, skdev->sgs_per_request); 2856 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); 2857 2858 /* set optimal I/O size to 8KB */ 2859 blk_queue_io_opt(q, 8192); 2860 2861 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2862 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2863 2864 blk_queue_rq_timeout(q, 8 * HZ); 2865 2866 spin_lock_irqsave(&skdev->lock, flags); 2867 dev_dbg(&skdev->pdev->dev, "stopping queue\n"); 2868 blk_mq_stop_hw_queues(skdev->queue); 2869 spin_unlock_irqrestore(&skdev->lock, flags); 2870 2871err_out: 2872 return rc; 2873} 2874 2875#define SKD_N_DEV_TABLE 16u 2876static u32 skd_next_devno; 2877 2878static struct skd_device *skd_construct(struct pci_dev *pdev) 2879{ 2880 struct skd_device *skdev; 2881 int blk_major = skd_major; 2882 size_t size; 2883 int rc; 2884 2885 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); 2886 2887 if (!skdev) { 2888 dev_err(&pdev->dev, "memory alloc failure\n"); 2889 return NULL; 2890 } 2891 2892 skdev->state = SKD_DRVR_STATE_LOAD; 2893 skdev->pdev = pdev; 2894 skdev->devno = skd_next_devno++; 2895 skdev->major = blk_major; 2896 skdev->dev_max_queue_depth = 0; 2897 2898 skdev->num_req_context = skd_max_queue_depth; 2899 skdev->num_fitmsg_context = skd_max_queue_depth; 2900 skdev->cur_max_queue_depth = 1; 2901 skdev->queue_low_water_mark = 1; 2902 skdev->proto_ver = 99; 2903 skdev->sgs_per_request = skd_sgs_per_request; 2904 skdev->dbg_level = skd_dbg_level; 2905 2906 spin_lock_init(&skdev->lock); 2907 2908 INIT_WORK(&skdev->start_queue, skd_start_queue); 2909 INIT_WORK(&skdev->completion_worker, skd_completion_worker); 2910 2911 size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES); 2912 skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0, 2913 SLAB_HWCACHE_ALIGN, NULL); 2914 if (!skdev->msgbuf_cache) 2915 goto err_out; 2916 WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size, 2917 "skd-msgbuf: %d < %zd\n", 2918 kmem_cache_size(skdev->msgbuf_cache), size); 2919 size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor); 2920 skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0, 2921 SLAB_HWCACHE_ALIGN, NULL); 2922 if (!skdev->sglist_cache) 2923 goto err_out; 2924 WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size, 2925 "skd-sglist: %d < %zd\n", 2926 kmem_cache_size(skdev->sglist_cache), size); 2927 size = SKD_N_INTERNAL_BYTES; 2928 skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0, 2929 SLAB_HWCACHE_ALIGN, NULL); 2930 if (!skdev->databuf_cache) 2931 goto err_out; 2932 WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size, 2933 "skd-databuf: %d < %zd\n", 2934 kmem_cache_size(skdev->databuf_cache), size); 2935 2936 dev_dbg(&skdev->pdev->dev, "skcomp\n"); 2937 rc = skd_cons_skcomp(skdev); 2938 if (rc < 0) 2939 goto err_out; 2940 2941 dev_dbg(&skdev->pdev->dev, "skmsg\n"); 2942 rc = skd_cons_skmsg(skdev); 2943 if (rc < 0) 2944 goto err_out; 2945 2946 dev_dbg(&skdev->pdev->dev, "sksb\n"); 2947 rc = skd_cons_sksb(skdev); 2948 if (rc < 0) 2949 goto err_out; 2950 2951 dev_dbg(&skdev->pdev->dev, "disk\n"); 2952 rc = skd_cons_disk(skdev); 2953 if (rc < 0) 2954 goto err_out; 2955 2956 dev_dbg(&skdev->pdev->dev, "VICTORY\n"); 2957 return skdev; 2958 2959err_out: 2960 dev_dbg(&skdev->pdev->dev, "construct failed\n"); 2961 skd_destruct(skdev); 2962 return NULL; 2963} 2964 2965/* 2966 ***************************************************************************** 2967 * DESTRUCT (FREE) 2968 ***************************************************************************** 2969 */ 2970 2971static void skd_free_skcomp(struct skd_device *skdev) 2972{ 2973 if (skdev->skcomp_table) 2974 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE, 2975 skdev->skcomp_table, skdev->cq_dma_address); 2976 2977 skdev->skcomp_table = NULL; 2978 skdev->cq_dma_address = 0; 2979} 2980 2981static void skd_free_skmsg(struct skd_device *skdev) 2982{ 2983 u32 i; 2984 2985 if (skdev->skmsg_table == NULL) 2986 return; 2987 2988 for (i = 0; i < skdev->num_fitmsg_context; i++) { 2989 struct skd_fitmsg_context *skmsg; 2990 2991 skmsg = &skdev->skmsg_table[i]; 2992 2993 if (skmsg->msg_buf != NULL) { 2994 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, 2995 skmsg->msg_buf, 2996 skmsg->mb_dma_address); 2997 } 2998 skmsg->msg_buf = NULL; 2999 skmsg->mb_dma_address = 0; 3000 } 3001 3002 kfree(skdev->skmsg_table); 3003 skdev->skmsg_table = NULL; 3004} 3005 3006static void skd_free_sksb(struct skd_device *skdev) 3007{ 3008 struct skd_special_context *skspcl = &skdev->internal_skspcl; 3009 3010 skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf, 3011 skspcl->db_dma_address, DMA_BIDIRECTIONAL); 3012 3013 skspcl->data_buf = NULL; 3014 skspcl->db_dma_address = 0; 3015 3016 skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf, 3017 skspcl->mb_dma_address, DMA_TO_DEVICE); 3018 3019 skspcl->msg_buf = NULL; 3020 skspcl->mb_dma_address = 0; 3021 3022 skd_free_sg_list(skdev, skspcl->req.sksg_list, 3023 skspcl->req.sksg_dma_address); 3024 3025 skspcl->req.sksg_list = NULL; 3026 skspcl->req.sksg_dma_address = 0; 3027} 3028 3029static void skd_free_disk(struct skd_device *skdev) 3030{ 3031 struct gendisk *disk = skdev->disk; 3032 3033 if (disk && (disk->flags & GENHD_FL_UP)) 3034 del_gendisk(disk); 3035 3036 if (skdev->queue) { 3037 blk_cleanup_queue(skdev->queue); 3038 skdev->queue = NULL; 3039 if (disk) 3040 disk->queue = NULL; 3041 } 3042 3043 if (skdev->tag_set.tags) 3044 blk_mq_free_tag_set(&skdev->tag_set); 3045 3046 put_disk(disk); 3047 skdev->disk = NULL; 3048} 3049 3050static void skd_destruct(struct skd_device *skdev) 3051{ 3052 if (skdev == NULL) 3053 return; 3054 3055 cancel_work_sync(&skdev->start_queue); 3056 3057 dev_dbg(&skdev->pdev->dev, "disk\n"); 3058 skd_free_disk(skdev); 3059 3060 dev_dbg(&skdev->pdev->dev, "sksb\n"); 3061 skd_free_sksb(skdev); 3062 3063 dev_dbg(&skdev->pdev->dev, "skmsg\n"); 3064 skd_free_skmsg(skdev); 3065 3066 dev_dbg(&skdev->pdev->dev, "skcomp\n"); 3067 skd_free_skcomp(skdev); 3068 3069 kmem_cache_destroy(skdev->databuf_cache); 3070 kmem_cache_destroy(skdev->sglist_cache); 3071 kmem_cache_destroy(skdev->msgbuf_cache); 3072 3073 dev_dbg(&skdev->pdev->dev, "skdev\n"); 3074 kfree(skdev); 3075} 3076 3077/* 3078 ***************************************************************************** 3079 * BLOCK DEVICE (BDEV) GLUE 3080 ***************************************************************************** 3081 */ 3082 3083static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3084{ 3085 struct skd_device *skdev; 3086 u64 capacity; 3087 3088 skdev = bdev->bd_disk->private_data; 3089 3090 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n", 3091 bdev->bd_disk->disk_name, current->comm); 3092 3093 if (skdev->read_cap_is_valid) { 3094 capacity = get_capacity(skdev->disk); 3095 geo->heads = 64; 3096 geo->sectors = 255; 3097 geo->cylinders = (capacity) / (255 * 64); 3098 3099 return 0; 3100 } 3101 return -EIO; 3102} 3103 3104static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) 3105{ 3106 dev_dbg(&skdev->pdev->dev, "add_disk\n"); 3107 device_add_disk(parent, skdev->disk); 3108 return 0; 3109} 3110 3111static const struct block_device_operations skd_blockdev_ops = { 3112 .owner = THIS_MODULE, 3113 .getgeo = skd_bdev_getgeo, 3114}; 3115 3116/* 3117 ***************************************************************************** 3118 * PCIe DRIVER GLUE 3119 ***************************************************************************** 3120 */ 3121 3122static const struct pci_device_id skd_pci_tbl[] = { 3123 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, 3124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, 3125 { 0 } /* terminate list */ 3126}; 3127 3128MODULE_DEVICE_TABLE(pci, skd_pci_tbl); 3129 3130static char *skd_pci_info(struct skd_device *skdev, char *str) 3131{ 3132 int pcie_reg; 3133 3134 strcpy(str, "PCIe ("); 3135 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); 3136 3137 if (pcie_reg) { 3138 3139 char lwstr[6]; 3140 uint16_t pcie_lstat, lspeed, lwidth; 3141 3142 pcie_reg += 0x12; 3143 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); 3144 lspeed = pcie_lstat & (0xF); 3145 lwidth = (pcie_lstat & 0x3F0) >> 4; 3146 3147 if (lspeed == 1) 3148 strcat(str, "2.5GT/s "); 3149 else if (lspeed == 2) 3150 strcat(str, "5.0GT/s "); 3151 else 3152 strcat(str, "<unknown> "); 3153 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); 3154 strcat(str, lwstr); 3155 } 3156 return str; 3157} 3158 3159static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3160{ 3161 int i; 3162 int rc = 0; 3163 char pci_str[32]; 3164 struct skd_device *skdev; 3165 3166 dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor, 3167 pdev->device); 3168 3169 rc = pci_enable_device(pdev); 3170 if (rc) 3171 return rc; 3172 rc = pci_request_regions(pdev, DRV_NAME); 3173 if (rc) 3174 goto err_out; 3175 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3176 if (!rc) { 3177 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3178 dev_err(&pdev->dev, "consistent DMA mask error %d\n", 3179 rc); 3180 } 3181 } else { 3182 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3183 if (rc) { 3184 dev_err(&pdev->dev, "DMA mask error %d\n", rc); 3185 goto err_out_regions; 3186 } 3187 } 3188 3189 if (!skd_major) { 3190 rc = register_blkdev(0, DRV_NAME); 3191 if (rc < 0) 3192 goto err_out_regions; 3193 BUG_ON(!rc); 3194 skd_major = rc; 3195 } 3196 3197 skdev = skd_construct(pdev); 3198 if (skdev == NULL) { 3199 rc = -ENOMEM; 3200 goto err_out_regions; 3201 } 3202 3203 skd_pci_info(skdev, pci_str); 3204 dev_info(&pdev->dev, "%s 64bit\n", pci_str); 3205 3206 pci_set_master(pdev); 3207 rc = pci_enable_pcie_error_reporting(pdev); 3208 if (rc) { 3209 dev_err(&pdev->dev, 3210 "bad enable of PCIe error reporting rc=%d\n", rc); 3211 skdev->pcie_error_reporting_is_enabled = 0; 3212 } else 3213 skdev->pcie_error_reporting_is_enabled = 1; 3214 3215 pci_set_drvdata(pdev, skdev); 3216 3217 for (i = 0; i < SKD_MAX_BARS; i++) { 3218 skdev->mem_phys[i] = pci_resource_start(pdev, i); 3219 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); 3220 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 3221 skdev->mem_size[i]); 3222 if (!skdev->mem_map[i]) { 3223 dev_err(&pdev->dev, 3224 "Unable to map adapter memory!\n"); 3225 rc = -ENODEV; 3226 goto err_out_iounmap; 3227 } 3228 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", 3229 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], 3230 skdev->mem_size[i]); 3231 } 3232 3233 rc = skd_acquire_irq(skdev); 3234 if (rc) { 3235 dev_err(&pdev->dev, "interrupt resource error %d\n", rc); 3236 goto err_out_iounmap; 3237 } 3238 3239 rc = skd_start_timer(skdev); 3240 if (rc) 3241 goto err_out_timer; 3242 3243 init_waitqueue_head(&skdev->waitq); 3244 3245 skd_start_device(skdev); 3246 3247 rc = wait_event_interruptible_timeout(skdev->waitq, 3248 (skdev->gendisk_on), 3249 (SKD_START_WAIT_SECONDS * HZ)); 3250 if (skdev->gendisk_on > 0) { 3251 /* device came on-line after reset */ 3252 skd_bdev_attach(&pdev->dev, skdev); 3253 rc = 0; 3254 } else { 3255 /* we timed out, something is wrong with the device, 3256 don't add the disk structure */ 3257 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n", 3258 rc); 3259 /* in case of no error; we timeout with ENXIO */ 3260 if (!rc) 3261 rc = -ENXIO; 3262 goto err_out_timer; 3263 } 3264 3265 return rc; 3266 3267err_out_timer: 3268 skd_stop_device(skdev); 3269 skd_release_irq(skdev); 3270 3271err_out_iounmap: 3272 for (i = 0; i < SKD_MAX_BARS; i++) 3273 if (skdev->mem_map[i]) 3274 iounmap(skdev->mem_map[i]); 3275 3276 if (skdev->pcie_error_reporting_is_enabled) 3277 pci_disable_pcie_error_reporting(pdev); 3278 3279 skd_destruct(skdev); 3280 3281err_out_regions: 3282 pci_release_regions(pdev); 3283 3284err_out: 3285 pci_disable_device(pdev); 3286 pci_set_drvdata(pdev, NULL); 3287 return rc; 3288} 3289 3290static void skd_pci_remove(struct pci_dev *pdev) 3291{ 3292 int i; 3293 struct skd_device *skdev; 3294 3295 skdev = pci_get_drvdata(pdev); 3296 if (!skdev) { 3297 dev_err(&pdev->dev, "no device data for PCI\n"); 3298 return; 3299 } 3300 skd_stop_device(skdev); 3301 skd_release_irq(skdev); 3302 3303 for (i = 0; i < SKD_MAX_BARS; i++) 3304 if (skdev->mem_map[i]) 3305 iounmap(skdev->mem_map[i]); 3306 3307 if (skdev->pcie_error_reporting_is_enabled) 3308 pci_disable_pcie_error_reporting(pdev); 3309 3310 skd_destruct(skdev); 3311 3312 pci_release_regions(pdev); 3313 pci_disable_device(pdev); 3314 pci_set_drvdata(pdev, NULL); 3315 3316 return; 3317} 3318 3319static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) 3320{ 3321 int i; 3322 struct skd_device *skdev; 3323 3324 skdev = pci_get_drvdata(pdev); 3325 if (!skdev) { 3326 dev_err(&pdev->dev, "no device data for PCI\n"); 3327 return -EIO; 3328 } 3329 3330 skd_stop_device(skdev); 3331 3332 skd_release_irq(skdev); 3333 3334 for (i = 0; i < SKD_MAX_BARS; i++) 3335 if (skdev->mem_map[i]) 3336 iounmap(skdev->mem_map[i]); 3337 3338 if (skdev->pcie_error_reporting_is_enabled) 3339 pci_disable_pcie_error_reporting(pdev); 3340 3341 pci_release_regions(pdev); 3342 pci_save_state(pdev); 3343 pci_disable_device(pdev); 3344 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3345 return 0; 3346} 3347 3348static int skd_pci_resume(struct pci_dev *pdev) 3349{ 3350 int i; 3351 int rc = 0; 3352 struct skd_device *skdev; 3353 3354 skdev = pci_get_drvdata(pdev); 3355 if (!skdev) { 3356 dev_err(&pdev->dev, "no device data for PCI\n"); 3357 return -1; 3358 } 3359 3360 pci_set_power_state(pdev, PCI_D0); 3361 pci_enable_wake(pdev, PCI_D0, 0); 3362 pci_restore_state(pdev); 3363 3364 rc = pci_enable_device(pdev); 3365 if (rc) 3366 return rc; 3367 rc = pci_request_regions(pdev, DRV_NAME); 3368 if (rc) 3369 goto err_out; 3370 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3371 if (!rc) { 3372 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3373 3374 dev_err(&pdev->dev, "consistent DMA mask error %d\n", 3375 rc); 3376 } 3377 } else { 3378 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3379 if (rc) { 3380 3381 dev_err(&pdev->dev, "DMA mask error %d\n", rc); 3382 goto err_out_regions; 3383 } 3384 } 3385 3386 pci_set_master(pdev); 3387 rc = pci_enable_pcie_error_reporting(pdev); 3388 if (rc) { 3389 dev_err(&pdev->dev, 3390 "bad enable of PCIe error reporting rc=%d\n", rc); 3391 skdev->pcie_error_reporting_is_enabled = 0; 3392 } else 3393 skdev->pcie_error_reporting_is_enabled = 1; 3394 3395 for (i = 0; i < SKD_MAX_BARS; i++) { 3396 3397 skdev->mem_phys[i] = pci_resource_start(pdev, i); 3398 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); 3399 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 3400 skdev->mem_size[i]); 3401 if (!skdev->mem_map[i]) { 3402 dev_err(&pdev->dev, "Unable to map adapter memory!\n"); 3403 rc = -ENODEV; 3404 goto err_out_iounmap; 3405 } 3406 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", 3407 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], 3408 skdev->mem_size[i]); 3409 } 3410 rc = skd_acquire_irq(skdev); 3411 if (rc) { 3412 dev_err(&pdev->dev, "interrupt resource error %d\n", rc); 3413 goto err_out_iounmap; 3414 } 3415 3416 rc = skd_start_timer(skdev); 3417 if (rc) 3418 goto err_out_timer; 3419 3420 init_waitqueue_head(&skdev->waitq); 3421 3422 skd_start_device(skdev); 3423 3424 return rc; 3425 3426err_out_timer: 3427 skd_stop_device(skdev); 3428 skd_release_irq(skdev); 3429 3430err_out_iounmap: 3431 for (i = 0; i < SKD_MAX_BARS; i++) 3432 if (skdev->mem_map[i]) 3433 iounmap(skdev->mem_map[i]); 3434 3435 if (skdev->pcie_error_reporting_is_enabled) 3436 pci_disable_pcie_error_reporting(pdev); 3437 3438err_out_regions: 3439 pci_release_regions(pdev); 3440 3441err_out: 3442 pci_disable_device(pdev); 3443 return rc; 3444} 3445 3446static void skd_pci_shutdown(struct pci_dev *pdev) 3447{ 3448 struct skd_device *skdev; 3449 3450 dev_err(&pdev->dev, "%s called\n", __func__); 3451 3452 skdev = pci_get_drvdata(pdev); 3453 if (!skdev) { 3454 dev_err(&pdev->dev, "no device data for PCI\n"); 3455 return; 3456 } 3457 3458 dev_err(&pdev->dev, "calling stop\n"); 3459 skd_stop_device(skdev); 3460} 3461 3462static struct pci_driver skd_driver = { 3463 .name = DRV_NAME, 3464 .id_table = skd_pci_tbl, 3465 .probe = skd_pci_probe, 3466 .remove = skd_pci_remove, 3467 .suspend = skd_pci_suspend, 3468 .resume = skd_pci_resume, 3469 .shutdown = skd_pci_shutdown, 3470}; 3471 3472/* 3473 ***************************************************************************** 3474 * LOGGING SUPPORT 3475 ***************************************************************************** 3476 */ 3477 3478const char *skd_drive_state_to_str(int state) 3479{ 3480 switch (state) { 3481 case FIT_SR_DRIVE_OFFLINE: 3482 return "OFFLINE"; 3483 case FIT_SR_DRIVE_INIT: 3484 return "INIT"; 3485 case FIT_SR_DRIVE_ONLINE: 3486 return "ONLINE"; 3487 case FIT_SR_DRIVE_BUSY: 3488 return "BUSY"; 3489 case FIT_SR_DRIVE_FAULT: 3490 return "FAULT"; 3491 case FIT_SR_DRIVE_DEGRADED: 3492 return "DEGRADED"; 3493 case FIT_SR_PCIE_LINK_DOWN: 3494 return "INK_DOWN"; 3495 case FIT_SR_DRIVE_SOFT_RESET: 3496 return "SOFT_RESET"; 3497 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 3498 return "NEED_FW"; 3499 case FIT_SR_DRIVE_INIT_FAULT: 3500 return "INIT_FAULT"; 3501 case FIT_SR_DRIVE_BUSY_SANITIZE: 3502 return "BUSY_SANITIZE"; 3503 case FIT_SR_DRIVE_BUSY_ERASE: 3504 return "BUSY_ERASE"; 3505 case FIT_SR_DRIVE_FW_BOOTING: 3506 return "FW_BOOTING"; 3507 default: 3508 return "???"; 3509 } 3510} 3511 3512const char *skd_skdev_state_to_str(enum skd_drvr_state state) 3513{ 3514 switch (state) { 3515 case SKD_DRVR_STATE_LOAD: 3516 return "LOAD"; 3517 case SKD_DRVR_STATE_IDLE: 3518 return "IDLE"; 3519 case SKD_DRVR_STATE_BUSY: 3520 return "BUSY"; 3521 case SKD_DRVR_STATE_STARTING: 3522 return "STARTING"; 3523 case SKD_DRVR_STATE_ONLINE: 3524 return "ONLINE"; 3525 case SKD_DRVR_STATE_PAUSING: 3526 return "PAUSING"; 3527 case SKD_DRVR_STATE_PAUSED: 3528 return "PAUSED"; 3529 case SKD_DRVR_STATE_RESTARTING: 3530 return "RESTARTING"; 3531 case SKD_DRVR_STATE_RESUMING: 3532 return "RESUMING"; 3533 case SKD_DRVR_STATE_STOPPING: 3534 return "STOPPING"; 3535 case SKD_DRVR_STATE_SYNCING: 3536 return "SYNCING"; 3537 case SKD_DRVR_STATE_FAULT: 3538 return "FAULT"; 3539 case SKD_DRVR_STATE_DISAPPEARED: 3540 return "DISAPPEARED"; 3541 case SKD_DRVR_STATE_BUSY_ERASE: 3542 return "BUSY_ERASE"; 3543 case SKD_DRVR_STATE_BUSY_SANITIZE: 3544 return "BUSY_SANITIZE"; 3545 case SKD_DRVR_STATE_BUSY_IMMINENT: 3546 return "BUSY_IMMINENT"; 3547 case SKD_DRVR_STATE_WAIT_BOOT: 3548 return "WAIT_BOOT"; 3549 3550 default: 3551 return "???"; 3552 } 3553} 3554 3555static const char *skd_skreq_state_to_str(enum skd_req_state state) 3556{ 3557 switch (state) { 3558 case SKD_REQ_STATE_IDLE: 3559 return "IDLE"; 3560 case SKD_REQ_STATE_SETUP: 3561 return "SETUP"; 3562 case SKD_REQ_STATE_BUSY: 3563 return "BUSY"; 3564 case SKD_REQ_STATE_COMPLETED: 3565 return "COMPLETED"; 3566 case SKD_REQ_STATE_TIMEOUT: 3567 return "TIMEOUT"; 3568 default: 3569 return "???"; 3570 } 3571} 3572 3573static void skd_log_skdev(struct skd_device *skdev, const char *event) 3574{ 3575 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event); 3576 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n", 3577 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3578 skd_skdev_state_to_str(skdev->state), skdev->state); 3579 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n", 3580 skd_in_flight(skdev), skdev->cur_max_queue_depth, 3581 skdev->dev_max_queue_depth, skdev->queue_low_water_mark); 3582 dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n", 3583 skdev->skcomp_cycle, skdev->skcomp_ix); 3584} 3585 3586static void skd_log_skreq(struct skd_device *skdev, 3587 struct skd_request_context *skreq, const char *event) 3588{ 3589 struct request *req = blk_mq_rq_from_pdu(skreq); 3590 u32 lba = blk_rq_pos(req); 3591 u32 count = blk_rq_sectors(req); 3592 3593 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event); 3594 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n", 3595 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id, 3596 skreq->fitmsg_id); 3597 dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n", 3598 skreq->data_dir, skreq->n_sg); 3599 3600 dev_dbg(&skdev->pdev->dev, 3601 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba, 3602 count, count, (int)rq_data_dir(req)); 3603} 3604 3605/* 3606 ***************************************************************************** 3607 * MODULE GLUE 3608 ***************************************************************************** 3609 */ 3610 3611static int __init skd_init(void) 3612{ 3613 BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8); 3614 BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32); 3615 BUILD_BUG_ON(sizeof(struct skd_command_header) != 16); 3616 BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32); 3617 BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44); 3618 BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0); 3619 BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64); 3620 BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES); 3621 3622 switch (skd_isr_type) { 3623 case SKD_IRQ_LEGACY: 3624 case SKD_IRQ_MSI: 3625 case SKD_IRQ_MSIX: 3626 break; 3627 default: 3628 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", 3629 skd_isr_type, SKD_IRQ_DEFAULT); 3630 skd_isr_type = SKD_IRQ_DEFAULT; 3631 } 3632 3633 if (skd_max_queue_depth < 1 || 3634 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { 3635 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", 3636 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); 3637 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 3638 } 3639 3640 if (skd_max_req_per_msg < 1 || 3641 skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) { 3642 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", 3643 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 3644 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 3645 } 3646 3647 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { 3648 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", 3649 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); 3650 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 3651 } 3652 3653 if (skd_dbg_level < 0 || skd_dbg_level > 2) { 3654 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", 3655 skd_dbg_level, 0); 3656 skd_dbg_level = 0; 3657 } 3658 3659 if (skd_isr_comp_limit < 0) { 3660 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", 3661 skd_isr_comp_limit, 0); 3662 skd_isr_comp_limit = 0; 3663 } 3664 3665 return pci_register_driver(&skd_driver); 3666} 3667 3668static void __exit skd_exit(void) 3669{ 3670 pci_unregister_driver(&skd_driver); 3671 3672 if (skd_major) 3673 unregister_blkdev(skd_major, DRV_NAME); 3674} 3675 3676module_init(skd_init); 3677module_exit(skd_exit);