Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc7 5271 lines 138 kB view raw
1/* Copyright 2012 STEC, Inc. 2 * 3 * This file is licensed under the terms of the 3-clause 4 * BSD License (http://opensource.org/licenses/BSD-3-Clause) 5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), 6 * at your option. Both licenses are also available in the LICENSE file 7 * distributed with this project. This file may not be copied, modified, 8 * or distributed except in accordance with those terms. 9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com> 10 * Initial Driver Design! 11 * Thomas Swann <tswann@stec-inc.com> 12 * Interrupt handling. 13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com> 14 * biomode implementation. 15 * Akhil Bhansali <abhansali@stec-inc.com> 16 * Added support for DISCARD / FLUSH and FUA. 17 */ 18 19#include <linux/kernel.h> 20#include <linux/module.h> 21#include <linux/init.h> 22#include <linux/pci.h> 23#include <linux/slab.h> 24#include <linux/spinlock.h> 25#include <linux/blkdev.h> 26#include <linux/sched.h> 27#include <linux/interrupt.h> 28#include <linux/compiler.h> 29#include <linux/workqueue.h> 30#include <linux/bitops.h> 31#include <linux/delay.h> 32#include <linux/time.h> 33#include <linux/hdreg.h> 34#include <linux/dma-mapping.h> 35#include <linux/completion.h> 36#include <linux/scatterlist.h> 37#include <linux/version.h> 38#include <linux/err.h> 39#include <linux/aer.h> 40#include <linux/ctype.h> 41#include <linux/wait.h> 42#include <linux/uio.h> 43#include <scsi/scsi.h> 44#include <scsi/sg.h> 45#include <linux/io.h> 46#include <linux/uaccess.h> 47#include <asm/unaligned.h> 48 49#include "skd_s1120.h" 50 51static int skd_dbg_level; 52static int skd_isr_comp_limit = 4; 53 54enum { 55 STEC_LINK_2_5GTS = 0, 56 STEC_LINK_5GTS = 1, 57 STEC_LINK_8GTS = 2, 58 STEC_LINK_UNKNOWN = 0xFF 59}; 60 61enum { 62 SKD_FLUSH_INITIALIZER, 63 SKD_FLUSH_ZERO_SIZE_FIRST, 64 SKD_FLUSH_DATA_SECOND, 65}; 66 67#define SKD_ASSERT(expr) \ 68 do { \ 69 if (unlikely(!(expr))) { \ 70 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ 71 # expr, __FILE__, __func__, __LINE__); \ 72 } \ 73 } while (0) 74 75#define DRV_NAME "skd" 76#define DRV_VERSION "2.2.1" 77#define DRV_BUILD_ID "0260" 78#define PFX DRV_NAME ": " 79#define DRV_BIN_VERSION 0x100 80#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID 81 82MODULE_AUTHOR("bug-reports: support@stec-inc.com"); 83MODULE_LICENSE("Dual BSD/GPL"); 84 85MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); 86MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); 87 88#define PCI_VENDOR_ID_STEC 0x1B39 89#define PCI_DEVICE_ID_S1120 0x0001 90 91#define SKD_FUA_NV (1 << 1) 92#define SKD_MINORS_PER_DEVICE 16 93 94#define SKD_MAX_QUEUE_DEPTH 200u 95 96#define SKD_PAUSE_TIMEOUT (5 * 1000) 97 98#define SKD_N_FITMSG_BYTES (512u) 99 100#define SKD_N_SPECIAL_CONTEXT 32u 101#define SKD_N_SPECIAL_FITMSG_BYTES (128u) 102 103/* SG elements are 32 bytes, so we can make this 4096 and still be under the 104 * 128KB limit. That allows 4096*4K = 16M xfer size 105 */ 106#define SKD_N_SG_PER_REQ_DEFAULT 256u 107#define SKD_N_SG_PER_SPECIAL 256u 108 109#define SKD_N_COMPLETION_ENTRY 256u 110#define SKD_N_READ_CAP_BYTES (8u) 111 112#define SKD_N_INTERNAL_BYTES (512u) 113 114/* 5 bits of uniqifier, 0xF800 */ 115#define SKD_ID_INCR (0x400) 116#define SKD_ID_TABLE_MASK (3u << 8u) 117#define SKD_ID_RW_REQUEST (0u << 8u) 118#define SKD_ID_INTERNAL (1u << 8u) 119#define SKD_ID_SPECIAL_REQUEST (2u << 8u) 120#define SKD_ID_FIT_MSG (3u << 8u) 121#define SKD_ID_SLOT_MASK 0x00FFu 122#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu 123 124#define SKD_N_TIMEOUT_SLOT 4u 125#define SKD_TIMEOUT_SLOT_MASK 3u 126 127#define SKD_N_MAX_SECTORS 2048u 128 129#define SKD_MAX_RETRIES 2u 130 131#define SKD_TIMER_SECONDS(seconds) (seconds) 132#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) 133 134#define INQ_STD_NBYTES 36 135 136enum skd_drvr_state { 137 SKD_DRVR_STATE_LOAD, 138 SKD_DRVR_STATE_IDLE, 139 SKD_DRVR_STATE_BUSY, 140 SKD_DRVR_STATE_STARTING, 141 SKD_DRVR_STATE_ONLINE, 142 SKD_DRVR_STATE_PAUSING, 143 SKD_DRVR_STATE_PAUSED, 144 SKD_DRVR_STATE_DRAINING_TIMEOUT, 145 SKD_DRVR_STATE_RESTARTING, 146 SKD_DRVR_STATE_RESUMING, 147 SKD_DRVR_STATE_STOPPING, 148 SKD_DRVR_STATE_FAULT, 149 SKD_DRVR_STATE_DISAPPEARED, 150 SKD_DRVR_STATE_PROTOCOL_MISMATCH, 151 SKD_DRVR_STATE_BUSY_ERASE, 152 SKD_DRVR_STATE_BUSY_SANITIZE, 153 SKD_DRVR_STATE_BUSY_IMMINENT, 154 SKD_DRVR_STATE_WAIT_BOOT, 155 SKD_DRVR_STATE_SYNCING, 156}; 157 158#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) 159#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) 160#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) 161#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) 162#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) 163#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) 164#define SKD_START_WAIT_SECONDS 90u 165 166enum skd_req_state { 167 SKD_REQ_STATE_IDLE, 168 SKD_REQ_STATE_SETUP, 169 SKD_REQ_STATE_BUSY, 170 SKD_REQ_STATE_COMPLETED, 171 SKD_REQ_STATE_TIMEOUT, 172 SKD_REQ_STATE_ABORTED, 173}; 174 175enum skd_fit_msg_state { 176 SKD_MSG_STATE_IDLE, 177 SKD_MSG_STATE_BUSY, 178}; 179 180enum skd_check_status_action { 181 SKD_CHECK_STATUS_REPORT_GOOD, 182 SKD_CHECK_STATUS_REPORT_SMART_ALERT, 183 SKD_CHECK_STATUS_REQUEUE_REQUEST, 184 SKD_CHECK_STATUS_REPORT_ERROR, 185 SKD_CHECK_STATUS_BUSY_IMMINENT, 186}; 187 188struct skd_fitmsg_context { 189 enum skd_fit_msg_state state; 190 191 struct skd_fitmsg_context *next; 192 193 u32 id; 194 u16 outstanding; 195 196 u32 length; 197 u32 offset; 198 199 u8 *msg_buf; 200 dma_addr_t mb_dma_address; 201}; 202 203struct skd_request_context { 204 enum skd_req_state state; 205 206 struct skd_request_context *next; 207 208 u16 id; 209 u32 fitmsg_id; 210 211 struct request *req; 212 u8 flush_cmd; 213 214 u32 timeout_stamp; 215 u8 sg_data_dir; 216 struct scatterlist *sg; 217 u32 n_sg; 218 u32 sg_byte_count; 219 220 struct fit_sg_descriptor *sksg_list; 221 dma_addr_t sksg_dma_address; 222 223 struct fit_completion_entry_v1 completion; 224 225 struct fit_comp_error_info err_info; 226 227}; 228#define SKD_DATA_DIR_HOST_TO_CARD 1 229#define SKD_DATA_DIR_CARD_TO_HOST 2 230 231struct skd_special_context { 232 struct skd_request_context req; 233 234 u8 orphaned; 235 236 void *data_buf; 237 dma_addr_t db_dma_address; 238 239 u8 *msg_buf; 240 dma_addr_t mb_dma_address; 241}; 242 243struct skd_sg_io { 244 fmode_t mode; 245 void __user *argp; 246 247 struct sg_io_hdr sg; 248 249 u8 cdb[16]; 250 251 u32 dxfer_len; 252 u32 iovcnt; 253 struct sg_iovec *iov; 254 struct sg_iovec no_iov_iov; 255 256 struct skd_special_context *skspcl; 257}; 258 259typedef enum skd_irq_type { 260 SKD_IRQ_LEGACY, 261 SKD_IRQ_MSI, 262 SKD_IRQ_MSIX 263} skd_irq_type_t; 264 265#define SKD_MAX_BARS 2 266 267struct skd_device { 268 volatile void __iomem *mem_map[SKD_MAX_BARS]; 269 resource_size_t mem_phys[SKD_MAX_BARS]; 270 u32 mem_size[SKD_MAX_BARS]; 271 272 struct skd_msix_entry *msix_entries; 273 274 struct pci_dev *pdev; 275 int pcie_error_reporting_is_enabled; 276 277 spinlock_t lock; 278 struct gendisk *disk; 279 struct request_queue *queue; 280 struct device *class_dev; 281 int gendisk_on; 282 int sync_done; 283 284 atomic_t device_count; 285 u32 devno; 286 u32 major; 287 char name[32]; 288 char isr_name[30]; 289 290 enum skd_drvr_state state; 291 u32 drive_state; 292 293 u32 in_flight; 294 u32 cur_max_queue_depth; 295 u32 queue_low_water_mark; 296 u32 dev_max_queue_depth; 297 298 u32 num_fitmsg_context; 299 u32 num_req_context; 300 301 u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; 302 u32 timeout_stamp; 303 struct skd_fitmsg_context *skmsg_free_list; 304 struct skd_fitmsg_context *skmsg_table; 305 306 struct skd_request_context *skreq_free_list; 307 struct skd_request_context *skreq_table; 308 309 struct skd_special_context *skspcl_free_list; 310 struct skd_special_context *skspcl_table; 311 312 struct skd_special_context internal_skspcl; 313 u32 read_cap_blocksize; 314 u32 read_cap_last_lba; 315 int read_cap_is_valid; 316 int inquiry_is_valid; 317 u8 inq_serial_num[13]; /*12 chars plus null term */ 318 u8 id_str[80]; /* holds a composite name (pci + sernum) */ 319 320 u8 skcomp_cycle; 321 u32 skcomp_ix; 322 struct fit_completion_entry_v1 *skcomp_table; 323 struct fit_comp_error_info *skerr_table; 324 dma_addr_t cq_dma_address; 325 326 wait_queue_head_t waitq; 327 328 struct timer_list timer; 329 u32 timer_countdown; 330 u32 timer_substate; 331 332 int n_special; 333 int sgs_per_request; 334 u32 last_mtd; 335 336 u32 proto_ver; 337 338 int dbg_level; 339 u32 connect_time_stamp; 340 int connect_retries; 341#define SKD_MAX_CONNECT_RETRIES 16 342 u32 drive_jiffies; 343 344 u32 timo_slot; 345 346 347 struct work_struct completion_worker; 348}; 349 350#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) 351#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) 352#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) 353 354static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) 355{ 356 u32 val; 357 358 if (likely(skdev->dbg_level < 2)) 359 return readl(skdev->mem_map[1] + offset); 360 else { 361 barrier(); 362 val = readl(skdev->mem_map[1] + offset); 363 barrier(); 364 pr_debug("%s:%s:%d offset %x = %x\n", 365 skdev->name, __func__, __LINE__, offset, val); 366 return val; 367 } 368 369} 370 371static inline void skd_reg_write32(struct skd_device *skdev, u32 val, 372 u32 offset) 373{ 374 if (likely(skdev->dbg_level < 2)) { 375 writel(val, skdev->mem_map[1] + offset); 376 barrier(); 377 } else { 378 barrier(); 379 writel(val, skdev->mem_map[1] + offset); 380 barrier(); 381 pr_debug("%s:%s:%d offset %x = %x\n", 382 skdev->name, __func__, __LINE__, offset, val); 383 } 384} 385 386static inline void skd_reg_write64(struct skd_device *skdev, u64 val, 387 u32 offset) 388{ 389 if (likely(skdev->dbg_level < 2)) { 390 writeq(val, skdev->mem_map[1] + offset); 391 barrier(); 392 } else { 393 barrier(); 394 writeq(val, skdev->mem_map[1] + offset); 395 barrier(); 396 pr_debug("%s:%s:%d offset %x = %016llx\n", 397 skdev->name, __func__, __LINE__, offset, val); 398 } 399} 400 401 402#define SKD_IRQ_DEFAULT SKD_IRQ_MSI 403static int skd_isr_type = SKD_IRQ_DEFAULT; 404 405module_param(skd_isr_type, int, 0444); 406MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." 407 " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); 408 409#define SKD_MAX_REQ_PER_MSG_DEFAULT 1 410static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 411 412module_param(skd_max_req_per_msg, int, 0444); 413MODULE_PARM_DESC(skd_max_req_per_msg, 414 "Maximum SCSI requests packed in a single message." 415 " (1-14, default==1)"); 416 417#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 418#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" 419static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 420 421module_param(skd_max_queue_depth, int, 0444); 422MODULE_PARM_DESC(skd_max_queue_depth, 423 "Maximum SCSI requests issued to s1120." 424 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); 425 426static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 427module_param(skd_sgs_per_request, int, 0444); 428MODULE_PARM_DESC(skd_sgs_per_request, 429 "Maximum SG elements per block request." 430 " (1-4096, default==256)"); 431 432static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; 433module_param(skd_max_pass_thru, int, 0444); 434MODULE_PARM_DESC(skd_max_pass_thru, 435 "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); 436 437module_param(skd_dbg_level, int, 0444); 438MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); 439 440module_param(skd_isr_comp_limit, int, 0444); 441MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); 442 443/* Major device number dynamically assigned. */ 444static u32 skd_major; 445 446static void skd_destruct(struct skd_device *skdev); 447static const struct block_device_operations skd_blockdev_ops; 448static void skd_send_fitmsg(struct skd_device *skdev, 449 struct skd_fitmsg_context *skmsg); 450static void skd_send_special_fitmsg(struct skd_device *skdev, 451 struct skd_special_context *skspcl); 452static void skd_request_fn(struct request_queue *rq); 453static void skd_end_request(struct skd_device *skdev, 454 struct skd_request_context *skreq, int error); 455static int skd_preop_sg_list(struct skd_device *skdev, 456 struct skd_request_context *skreq); 457static void skd_postop_sg_list(struct skd_device *skdev, 458 struct skd_request_context *skreq); 459 460static void skd_restart_device(struct skd_device *skdev); 461static int skd_quiesce_dev(struct skd_device *skdev); 462static int skd_unquiesce_dev(struct skd_device *skdev); 463static void skd_release_special(struct skd_device *skdev, 464 struct skd_special_context *skspcl); 465static void skd_disable_interrupts(struct skd_device *skdev); 466static void skd_isr_fwstate(struct skd_device *skdev); 467static void skd_recover_requests(struct skd_device *skdev, int requeue); 468static void skd_soft_reset(struct skd_device *skdev); 469 470static const char *skd_name(struct skd_device *skdev); 471const char *skd_drive_state_to_str(int state); 472const char *skd_skdev_state_to_str(enum skd_drvr_state state); 473static void skd_log_skdev(struct skd_device *skdev, const char *event); 474static void skd_log_skmsg(struct skd_device *skdev, 475 struct skd_fitmsg_context *skmsg, const char *event); 476static void skd_log_skreq(struct skd_device *skdev, 477 struct skd_request_context *skreq, const char *event); 478 479/* 480 ***************************************************************************** 481 * READ/WRITE REQUESTS 482 ***************************************************************************** 483 */ 484static void skd_fail_all_pending(struct skd_device *skdev) 485{ 486 struct request_queue *q = skdev->queue; 487 struct request *req; 488 489 for (;; ) { 490 req = blk_peek_request(q); 491 if (req == NULL) 492 break; 493 blk_start_request(req); 494 __blk_end_request_all(req, -EIO); 495 } 496} 497 498static void 499skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, 500 int data_dir, unsigned lba, 501 unsigned count) 502{ 503 if (data_dir == READ) 504 scsi_req->cdb[0] = 0x28; 505 else 506 scsi_req->cdb[0] = 0x2a; 507 508 scsi_req->cdb[1] = 0; 509 scsi_req->cdb[2] = (lba & 0xff000000) >> 24; 510 scsi_req->cdb[3] = (lba & 0xff0000) >> 16; 511 scsi_req->cdb[4] = (lba & 0xff00) >> 8; 512 scsi_req->cdb[5] = (lba & 0xff); 513 scsi_req->cdb[6] = 0; 514 scsi_req->cdb[7] = (count & 0xff00) >> 8; 515 scsi_req->cdb[8] = count & 0xff; 516 scsi_req->cdb[9] = 0; 517} 518 519static void 520skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, 521 struct skd_request_context *skreq) 522{ 523 skreq->flush_cmd = 1; 524 525 scsi_req->cdb[0] = 0x35; 526 scsi_req->cdb[1] = 0; 527 scsi_req->cdb[2] = 0; 528 scsi_req->cdb[3] = 0; 529 scsi_req->cdb[4] = 0; 530 scsi_req->cdb[5] = 0; 531 scsi_req->cdb[6] = 0; 532 scsi_req->cdb[7] = 0; 533 scsi_req->cdb[8] = 0; 534 scsi_req->cdb[9] = 0; 535} 536 537static void skd_request_fn_not_online(struct request_queue *q); 538 539static void skd_request_fn(struct request_queue *q) 540{ 541 struct skd_device *skdev = q->queuedata; 542 struct skd_fitmsg_context *skmsg = NULL; 543 struct fit_msg_hdr *fmh = NULL; 544 struct skd_request_context *skreq; 545 struct request *req = NULL; 546 struct skd_scsi_request *scsi_req; 547 unsigned long io_flags; 548 int error; 549 u32 lba; 550 u32 count; 551 int data_dir; 552 u32 be_lba; 553 u32 be_count; 554 u64 be_dmaa; 555 u64 cmdctxt; 556 u32 timo_slot; 557 void *cmd_ptr; 558 int flush, fua; 559 560 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 561 skd_request_fn_not_online(q); 562 return; 563 } 564 565 if (blk_queue_stopped(skdev->queue)) { 566 if (skdev->skmsg_free_list == NULL || 567 skdev->skreq_free_list == NULL || 568 skdev->in_flight >= skdev->queue_low_water_mark) 569 /* There is still some kind of shortage */ 570 return; 571 572 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); 573 } 574 575 /* 576 * Stop conditions: 577 * - There are no more native requests 578 * - There are already the maximum number of requests in progress 579 * - There are no more skd_request_context entries 580 * - There are no more FIT msg buffers 581 */ 582 for (;; ) { 583 584 flush = fua = 0; 585 586 req = blk_peek_request(q); 587 588 /* Are there any native requests to start? */ 589 if (req == NULL) 590 break; 591 592 lba = (u32)blk_rq_pos(req); 593 count = blk_rq_sectors(req); 594 data_dir = rq_data_dir(req); 595 io_flags = req->cmd_flags; 596 597 if (req_op(req) == REQ_OP_FLUSH) 598 flush++; 599 600 if (io_flags & REQ_FUA) 601 fua++; 602 603 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " 604 "count=%u(0x%x) dir=%d\n", 605 skdev->name, __func__, __LINE__, 606 req, lba, lba, count, count, data_dir); 607 608 /* At this point we know there is a request */ 609 610 /* Are too many requets already in progress? */ 611 if (skdev->in_flight >= skdev->cur_max_queue_depth) { 612 pr_debug("%s:%s:%d qdepth %d, limit %d\n", 613 skdev->name, __func__, __LINE__, 614 skdev->in_flight, skdev->cur_max_queue_depth); 615 break; 616 } 617 618 /* Is a skd_request_context available? */ 619 skreq = skdev->skreq_free_list; 620 if (skreq == NULL) { 621 pr_debug("%s:%s:%d Out of req=%p\n", 622 skdev->name, __func__, __LINE__, q); 623 break; 624 } 625 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 626 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); 627 628 /* Now we check to see if we can get a fit msg */ 629 if (skmsg == NULL) { 630 if (skdev->skmsg_free_list == NULL) { 631 pr_debug("%s:%s:%d Out of msg\n", 632 skdev->name, __func__, __LINE__); 633 break; 634 } 635 } 636 637 skreq->flush_cmd = 0; 638 skreq->n_sg = 0; 639 skreq->sg_byte_count = 0; 640 641 /* 642 * OK to now dequeue request from q. 643 * 644 * At this point we are comitted to either start or reject 645 * the native request. Note that skd_request_context is 646 * available but is still at the head of the free list. 647 */ 648 blk_start_request(req); 649 skreq->req = req; 650 skreq->fitmsg_id = 0; 651 652 /* Either a FIT msg is in progress or we have to start one. */ 653 if (skmsg == NULL) { 654 /* Are there any FIT msg buffers available? */ 655 skmsg = skdev->skmsg_free_list; 656 if (skmsg == NULL) { 657 pr_debug("%s:%s:%d Out of msg skdev=%p\n", 658 skdev->name, __func__, __LINE__, 659 skdev); 660 break; 661 } 662 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); 663 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); 664 665 skdev->skmsg_free_list = skmsg->next; 666 667 skmsg->state = SKD_MSG_STATE_BUSY; 668 skmsg->id += SKD_ID_INCR; 669 670 /* Initialize the FIT msg header */ 671 fmh = (struct fit_msg_hdr *)skmsg->msg_buf; 672 memset(fmh, 0, sizeof(*fmh)); 673 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 674 skmsg->length = sizeof(*fmh); 675 } 676 677 skreq->fitmsg_id = skmsg->id; 678 679 /* 680 * Note that a FIT msg may have just been started 681 * but contains no SoFIT requests yet. 682 */ 683 684 /* 685 * Transcode the request, checking as we go. The outcome of 686 * the transcoding is represented by the error variable. 687 */ 688 cmd_ptr = &skmsg->msg_buf[skmsg->length]; 689 memset(cmd_ptr, 0, 32); 690 691 be_lba = cpu_to_be32(lba); 692 be_count = cpu_to_be32(count); 693 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); 694 cmdctxt = skreq->id + SKD_ID_INCR; 695 696 scsi_req = cmd_ptr; 697 scsi_req->hdr.tag = cmdctxt; 698 scsi_req->hdr.sg_list_dma_address = be_dmaa; 699 700 if (data_dir == READ) 701 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; 702 else 703 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; 704 705 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { 706 skd_prep_zerosize_flush_cdb(scsi_req, skreq); 707 SKD_ASSERT(skreq->flush_cmd == 1); 708 709 } else { 710 skd_prep_rw_cdb(scsi_req, data_dir, lba, count); 711 } 712 713 if (fua) 714 scsi_req->cdb[1] |= SKD_FUA_NV; 715 716 if (!req->bio) 717 goto skip_sg; 718 719 error = skd_preop_sg_list(skdev, skreq); 720 721 if (error != 0) { 722 /* 723 * Complete the native request with error. 724 * Note that the request context is still at the 725 * head of the free list, and that the SoFIT request 726 * was encoded into the FIT msg buffer but the FIT 727 * msg length has not been updated. In short, the 728 * only resource that has been allocated but might 729 * not be used is that the FIT msg could be empty. 730 */ 731 pr_debug("%s:%s:%d error Out\n", 732 skdev->name, __func__, __LINE__); 733 skd_end_request(skdev, skreq, error); 734 continue; 735 } 736 737skip_sg: 738 scsi_req->hdr.sg_list_len_bytes = 739 cpu_to_be32(skreq->sg_byte_count); 740 741 /* Complete resource allocations. */ 742 skdev->skreq_free_list = skreq->next; 743 skreq->state = SKD_REQ_STATE_BUSY; 744 skreq->id += SKD_ID_INCR; 745 746 skmsg->length += sizeof(struct skd_scsi_request); 747 fmh->num_protocol_cmds_coalesced++; 748 749 /* 750 * Update the active request counts. 751 * Capture the timeout timestamp. 752 */ 753 skreq->timeout_stamp = skdev->timeout_stamp; 754 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 755 skdev->timeout_slot[timo_slot]++; 756 skdev->in_flight++; 757 pr_debug("%s:%s:%d req=0x%x busy=%d\n", 758 skdev->name, __func__, __LINE__, 759 skreq->id, skdev->in_flight); 760 761 /* 762 * If the FIT msg buffer is full send it. 763 */ 764 if (skmsg->length >= SKD_N_FITMSG_BYTES || 765 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 766 skd_send_fitmsg(skdev, skmsg); 767 skmsg = NULL; 768 fmh = NULL; 769 } 770 } 771 772 /* 773 * Is a FIT msg in progress? If it is empty put the buffer back 774 * on the free list. If it is non-empty send what we got. 775 * This minimizes latency when there are fewer requests than 776 * what fits in a FIT msg. 777 */ 778 if (skmsg != NULL) { 779 /* Bigger than just a FIT msg header? */ 780 if (skmsg->length > sizeof(struct fit_msg_hdr)) { 781 pr_debug("%s:%s:%d sending msg=%p, len %d\n", 782 skdev->name, __func__, __LINE__, 783 skmsg, skmsg->length); 784 skd_send_fitmsg(skdev, skmsg); 785 } else { 786 /* 787 * The FIT msg is empty. It means we got started 788 * on the msg, but the requests were rejected. 789 */ 790 skmsg->state = SKD_MSG_STATE_IDLE; 791 skmsg->id += SKD_ID_INCR; 792 skmsg->next = skdev->skmsg_free_list; 793 skdev->skmsg_free_list = skmsg; 794 } 795 skmsg = NULL; 796 fmh = NULL; 797 } 798 799 /* 800 * If req is non-NULL it means there is something to do but 801 * we are out of a resource. 802 */ 803 if (req) 804 blk_stop_queue(skdev->queue); 805} 806 807static void skd_end_request(struct skd_device *skdev, 808 struct skd_request_context *skreq, int error) 809{ 810 if (unlikely(error)) { 811 struct request *req = skreq->req; 812 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; 813 u32 lba = (u32)blk_rq_pos(req); 814 u32 count = blk_rq_sectors(req); 815 816 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", 817 skd_name(skdev), cmd, lba, count, skreq->id); 818 } else 819 pr_debug("%s:%s:%d id=0x%x error=%d\n", 820 skdev->name, __func__, __LINE__, skreq->id, error); 821 822 __blk_end_request_all(skreq->req, error); 823} 824 825static int skd_preop_sg_list(struct skd_device *skdev, 826 struct skd_request_context *skreq) 827{ 828 struct request *req = skreq->req; 829 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; 830 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; 831 struct scatterlist *sg = &skreq->sg[0]; 832 int n_sg; 833 int i; 834 835 skreq->sg_byte_count = 0; 836 837 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || 838 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ 839 840 n_sg = blk_rq_map_sg(skdev->queue, req, sg); 841 if (n_sg <= 0) 842 return -EINVAL; 843 844 /* 845 * Map scatterlist to PCI bus addresses. 846 * Note PCI might change the number of entries. 847 */ 848 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); 849 if (n_sg <= 0) 850 return -EINVAL; 851 852 SKD_ASSERT(n_sg <= skdev->sgs_per_request); 853 854 skreq->n_sg = n_sg; 855 856 for (i = 0; i < n_sg; i++) { 857 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 858 u32 cnt = sg_dma_len(&sg[i]); 859 uint64_t dma_addr = sg_dma_address(&sg[i]); 860 861 sgd->control = FIT_SGD_CONTROL_NOT_LAST; 862 sgd->byte_count = cnt; 863 skreq->sg_byte_count += cnt; 864 sgd->host_side_addr = dma_addr; 865 sgd->dev_side_addr = 0; 866 } 867 868 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 869 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 870 871 if (unlikely(skdev->dbg_level > 1)) { 872 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", 873 skdev->name, __func__, __LINE__, 874 skreq->id, skreq->sksg_list, skreq->sksg_dma_address); 875 for (i = 0; i < n_sg; i++) { 876 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 877 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " 878 "addr=0x%llx next=0x%llx\n", 879 skdev->name, __func__, __LINE__, 880 i, sgd->byte_count, sgd->control, 881 sgd->host_side_addr, sgd->next_desc_ptr); 882 } 883 } 884 885 return 0; 886} 887 888static void skd_postop_sg_list(struct skd_device *skdev, 889 struct skd_request_context *skreq) 890{ 891 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; 892 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; 893 894 /* 895 * restore the next ptr for next IO request so we 896 * don't have to set it every time. 897 */ 898 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 899 skreq->sksg_dma_address + 900 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); 901 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); 902} 903 904static void skd_request_fn_not_online(struct request_queue *q) 905{ 906 struct skd_device *skdev = q->queuedata; 907 int error; 908 909 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 910 911 skd_log_skdev(skdev, "req_not_online"); 912 switch (skdev->state) { 913 case SKD_DRVR_STATE_PAUSING: 914 case SKD_DRVR_STATE_PAUSED: 915 case SKD_DRVR_STATE_STARTING: 916 case SKD_DRVR_STATE_RESTARTING: 917 case SKD_DRVR_STATE_WAIT_BOOT: 918 /* In case of starting, we haven't started the queue, 919 * so we can't get here... but requests are 920 * possibly hanging out waiting for us because we 921 * reported the dev/skd0 already. They'll wait 922 * forever if connect doesn't complete. 923 * What to do??? delay dev/skd0 ?? 924 */ 925 case SKD_DRVR_STATE_BUSY: 926 case SKD_DRVR_STATE_BUSY_IMMINENT: 927 case SKD_DRVR_STATE_BUSY_ERASE: 928 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 929 return; 930 931 case SKD_DRVR_STATE_BUSY_SANITIZE: 932 case SKD_DRVR_STATE_STOPPING: 933 case SKD_DRVR_STATE_SYNCING: 934 case SKD_DRVR_STATE_FAULT: 935 case SKD_DRVR_STATE_DISAPPEARED: 936 default: 937 error = -EIO; 938 break; 939 } 940 941 /* If we get here, terminate all pending block requeusts 942 * with EIO and any scsi pass thru with appropriate sense 943 */ 944 945 skd_fail_all_pending(skdev); 946} 947 948/* 949 ***************************************************************************** 950 * TIMER 951 ***************************************************************************** 952 */ 953 954static void skd_timer_tick_not_online(struct skd_device *skdev); 955 956static void skd_timer_tick(ulong arg) 957{ 958 struct skd_device *skdev = (struct skd_device *)arg; 959 960 u32 timo_slot; 961 u32 overdue_timestamp; 962 unsigned long reqflags; 963 u32 state; 964 965 if (skdev->state == SKD_DRVR_STATE_FAULT) 966 /* The driver has declared fault, and we want it to 967 * stay that way until driver is reloaded. 968 */ 969 return; 970 971 spin_lock_irqsave(&skdev->lock, reqflags); 972 973 state = SKD_READL(skdev, FIT_STATUS); 974 state &= FIT_SR_DRIVE_STATE_MASK; 975 if (state != skdev->drive_state) 976 skd_isr_fwstate(skdev); 977 978 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 979 skd_timer_tick_not_online(skdev); 980 goto timer_func_out; 981 } 982 skdev->timeout_stamp++; 983 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 984 985 /* 986 * All requests that happened during the previous use of 987 * this slot should be done by now. The previous use was 988 * over 7 seconds ago. 989 */ 990 if (skdev->timeout_slot[timo_slot] == 0) 991 goto timer_func_out; 992 993 /* Something is overdue */ 994 overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; 995 996 pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n", 997 skdev->name, __func__, __LINE__, 998 skdev->timeout_slot[timo_slot], skdev->in_flight); 999 pr_err("(%s): Overdue IOs (%d), busy %d\n", 1000 skd_name(skdev), skdev->timeout_slot[timo_slot], 1001 skdev->in_flight); 1002 1003 skdev->timer_countdown = SKD_DRAINING_TIMO; 1004 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; 1005 skdev->timo_slot = timo_slot; 1006 blk_stop_queue(skdev->queue); 1007 1008timer_func_out: 1009 mod_timer(&skdev->timer, (jiffies + HZ)); 1010 1011 spin_unlock_irqrestore(&skdev->lock, reqflags); 1012} 1013 1014static void skd_timer_tick_not_online(struct skd_device *skdev) 1015{ 1016 switch (skdev->state) { 1017 case SKD_DRVR_STATE_IDLE: 1018 case SKD_DRVR_STATE_LOAD: 1019 break; 1020 case SKD_DRVR_STATE_BUSY_SANITIZE: 1021 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n", 1022 skdev->name, __func__, __LINE__, 1023 skdev->drive_state, skdev->state); 1024 /* If we've been in sanitize for 3 seconds, we figure we're not 1025 * going to get anymore completions, so recover requests now 1026 */ 1027 if (skdev->timer_countdown > 0) { 1028 skdev->timer_countdown--; 1029 return; 1030 } 1031 skd_recover_requests(skdev, 0); 1032 break; 1033 1034 case SKD_DRVR_STATE_BUSY: 1035 case SKD_DRVR_STATE_BUSY_IMMINENT: 1036 case SKD_DRVR_STATE_BUSY_ERASE: 1037 pr_debug("%s:%s:%d busy[%x], countdown=%d\n", 1038 skdev->name, __func__, __LINE__, 1039 skdev->state, skdev->timer_countdown); 1040 if (skdev->timer_countdown > 0) { 1041 skdev->timer_countdown--; 1042 return; 1043 } 1044 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.", 1045 skdev->name, __func__, __LINE__, 1046 skdev->state, skdev->timer_countdown); 1047 skd_restart_device(skdev); 1048 break; 1049 1050 case SKD_DRVR_STATE_WAIT_BOOT: 1051 case SKD_DRVR_STATE_STARTING: 1052 if (skdev->timer_countdown > 0) { 1053 skdev->timer_countdown--; 1054 return; 1055 } 1056 /* For now, we fault the drive. Could attempt resets to 1057 * revcover at some point. */ 1058 skdev->state = SKD_DRVR_STATE_FAULT; 1059 1060 pr_err("(%s): DriveFault Connect Timeout (%x)\n", 1061 skd_name(skdev), skdev->drive_state); 1062 1063 /*start the queue so we can respond with error to requests */ 1064 /* wakeup anyone waiting for startup complete */ 1065 blk_start_queue(skdev->queue); 1066 skdev->gendisk_on = -1; 1067 wake_up_interruptible(&skdev->waitq); 1068 break; 1069 1070 case SKD_DRVR_STATE_ONLINE: 1071 /* shouldn't get here. */ 1072 break; 1073 1074 case SKD_DRVR_STATE_PAUSING: 1075 case SKD_DRVR_STATE_PAUSED: 1076 break; 1077 1078 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 1079 pr_debug("%s:%s:%d " 1080 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", 1081 skdev->name, __func__, __LINE__, 1082 skdev->timo_slot, 1083 skdev->timer_countdown, 1084 skdev->in_flight, 1085 skdev->timeout_slot[skdev->timo_slot]); 1086 /* if the slot has cleared we can let the I/O continue */ 1087 if (skdev->timeout_slot[skdev->timo_slot] == 0) { 1088 pr_debug("%s:%s:%d Slot drained, starting queue.\n", 1089 skdev->name, __func__, __LINE__); 1090 skdev->state = SKD_DRVR_STATE_ONLINE; 1091 blk_start_queue(skdev->queue); 1092 return; 1093 } 1094 if (skdev->timer_countdown > 0) { 1095 skdev->timer_countdown--; 1096 return; 1097 } 1098 skd_restart_device(skdev); 1099 break; 1100 1101 case SKD_DRVR_STATE_RESTARTING: 1102 if (skdev->timer_countdown > 0) { 1103 skdev->timer_countdown--; 1104 return; 1105 } 1106 /* For now, we fault the drive. Could attempt resets to 1107 * revcover at some point. */ 1108 skdev->state = SKD_DRVR_STATE_FAULT; 1109 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", 1110 skd_name(skdev), skdev->drive_state); 1111 1112 /* 1113 * Recovering does two things: 1114 * 1. completes IO with error 1115 * 2. reclaims dma resources 1116 * When is it safe to recover requests? 1117 * - if the drive state is faulted 1118 * - if the state is still soft reset after out timeout 1119 * - if the drive registers are dead (state = FF) 1120 * If it is "unsafe", we still need to recover, so we will 1121 * disable pci bus mastering and disable our interrupts. 1122 */ 1123 1124 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || 1125 (skdev->drive_state == FIT_SR_DRIVE_FAULT) || 1126 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) 1127 /* It never came out of soft reset. Try to 1128 * recover the requests and then let them 1129 * fail. This is to mitigate hung processes. */ 1130 skd_recover_requests(skdev, 0); 1131 else { 1132 pr_err("(%s): Disable BusMaster (%x)\n", 1133 skd_name(skdev), skdev->drive_state); 1134 pci_disable_device(skdev->pdev); 1135 skd_disable_interrupts(skdev); 1136 skd_recover_requests(skdev, 0); 1137 } 1138 1139 /*start the queue so we can respond with error to requests */ 1140 /* wakeup anyone waiting for startup complete */ 1141 blk_start_queue(skdev->queue); 1142 skdev->gendisk_on = -1; 1143 wake_up_interruptible(&skdev->waitq); 1144 break; 1145 1146 case SKD_DRVR_STATE_RESUMING: 1147 case SKD_DRVR_STATE_STOPPING: 1148 case SKD_DRVR_STATE_SYNCING: 1149 case SKD_DRVR_STATE_FAULT: 1150 case SKD_DRVR_STATE_DISAPPEARED: 1151 default: 1152 break; 1153 } 1154} 1155 1156static int skd_start_timer(struct skd_device *skdev) 1157{ 1158 int rc; 1159 1160 init_timer(&skdev->timer); 1161 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); 1162 1163 rc = mod_timer(&skdev->timer, (jiffies + HZ)); 1164 if (rc) 1165 pr_err("%s: failed to start timer %d\n", 1166 __func__, rc); 1167 return rc; 1168} 1169 1170static void skd_kill_timer(struct skd_device *skdev) 1171{ 1172 del_timer_sync(&skdev->timer); 1173} 1174 1175/* 1176 ***************************************************************************** 1177 * IOCTL 1178 ***************************************************************************** 1179 */ 1180static int skd_ioctl_sg_io(struct skd_device *skdev, 1181 fmode_t mode, void __user *argp); 1182static int skd_sg_io_get_and_check_args(struct skd_device *skdev, 1183 struct skd_sg_io *sksgio); 1184static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, 1185 struct skd_sg_io *sksgio); 1186static int skd_sg_io_prep_buffering(struct skd_device *skdev, 1187 struct skd_sg_io *sksgio); 1188static int skd_sg_io_copy_buffer(struct skd_device *skdev, 1189 struct skd_sg_io *sksgio, int dxfer_dir); 1190static int skd_sg_io_send_fitmsg(struct skd_device *skdev, 1191 struct skd_sg_io *sksgio); 1192static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); 1193static int skd_sg_io_release_skspcl(struct skd_device *skdev, 1194 struct skd_sg_io *sksgio); 1195static int skd_sg_io_put_status(struct skd_device *skdev, 1196 struct skd_sg_io *sksgio); 1197 1198static void skd_complete_special(struct skd_device *skdev, 1199 volatile struct fit_completion_entry_v1 1200 *skcomp, 1201 volatile struct fit_comp_error_info *skerr, 1202 struct skd_special_context *skspcl); 1203 1204static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, 1205 uint cmd_in, ulong arg) 1206{ 1207 static const int sg_version_num = 30527; 1208 int rc = 0, timeout; 1209 struct gendisk *disk = bdev->bd_disk; 1210 struct skd_device *skdev = disk->private_data; 1211 int __user *p = (int __user *)arg; 1212 1213 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", 1214 skdev->name, __func__, __LINE__, 1215 disk->disk_name, current->comm, mode, cmd_in, arg); 1216 1217 if (!capable(CAP_SYS_ADMIN)) 1218 return -EPERM; 1219 1220 switch (cmd_in) { 1221 case SG_SET_TIMEOUT: 1222 rc = get_user(timeout, p); 1223 if (!rc) 1224 disk->queue->sg_timeout = clock_t_to_jiffies(timeout); 1225 break; 1226 case SG_GET_TIMEOUT: 1227 rc = jiffies_to_clock_t(disk->queue->sg_timeout); 1228 break; 1229 case SG_GET_VERSION_NUM: 1230 rc = put_user(sg_version_num, p); 1231 break; 1232 case SG_IO: 1233 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg); 1234 break; 1235 1236 default: 1237 rc = -ENOTTY; 1238 break; 1239 } 1240 1241 pr_debug("%s:%s:%d %s: completion rc %d\n", 1242 skdev->name, __func__, __LINE__, disk->disk_name, rc); 1243 return rc; 1244} 1245 1246static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, 1247 void __user *argp) 1248{ 1249 int rc; 1250 struct skd_sg_io sksgio; 1251 1252 memset(&sksgio, 0, sizeof(sksgio)); 1253 sksgio.mode = mode; 1254 sksgio.argp = argp; 1255 sksgio.iov = &sksgio.no_iov_iov; 1256 1257 switch (skdev->state) { 1258 case SKD_DRVR_STATE_ONLINE: 1259 case SKD_DRVR_STATE_BUSY_IMMINENT: 1260 break; 1261 1262 default: 1263 pr_debug("%s:%s:%d drive not online\n", 1264 skdev->name, __func__, __LINE__); 1265 rc = -ENXIO; 1266 goto out; 1267 } 1268 1269 rc = skd_sg_io_get_and_check_args(skdev, &sksgio); 1270 if (rc) 1271 goto out; 1272 1273 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); 1274 if (rc) 1275 goto out; 1276 1277 rc = skd_sg_io_prep_buffering(skdev, &sksgio); 1278 if (rc) 1279 goto out; 1280 1281 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); 1282 if (rc) 1283 goto out; 1284 1285 rc = skd_sg_io_send_fitmsg(skdev, &sksgio); 1286 if (rc) 1287 goto out; 1288 1289 rc = skd_sg_io_await(skdev, &sksgio); 1290 if (rc) 1291 goto out; 1292 1293 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); 1294 if (rc) 1295 goto out; 1296 1297 rc = skd_sg_io_put_status(skdev, &sksgio); 1298 if (rc) 1299 goto out; 1300 1301 rc = 0; 1302 1303out: 1304 skd_sg_io_release_skspcl(skdev, &sksgio); 1305 1306 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) 1307 kfree(sksgio.iov); 1308 return rc; 1309} 1310 1311static int skd_sg_io_get_and_check_args(struct skd_device *skdev, 1312 struct skd_sg_io *sksgio) 1313{ 1314 struct sg_io_hdr *sgp = &sksgio->sg; 1315 int i, acc; 1316 1317 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { 1318 pr_debug("%s:%s:%d access sg failed %p\n", 1319 skdev->name, __func__, __LINE__, sksgio->argp); 1320 return -EFAULT; 1321 } 1322 1323 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { 1324 pr_debug("%s:%s:%d copy_from_user sg failed %p\n", 1325 skdev->name, __func__, __LINE__, sksgio->argp); 1326 return -EFAULT; 1327 } 1328 1329 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { 1330 pr_debug("%s:%s:%d interface_id invalid 0x%x\n", 1331 skdev->name, __func__, __LINE__, sgp->interface_id); 1332 return -EINVAL; 1333 } 1334 1335 if (sgp->cmd_len > sizeof(sksgio->cdb)) { 1336 pr_debug("%s:%s:%d cmd_len invalid %d\n", 1337 skdev->name, __func__, __LINE__, sgp->cmd_len); 1338 return -EINVAL; 1339 } 1340 1341 if (sgp->iovec_count > 256) { 1342 pr_debug("%s:%s:%d iovec_count invalid %d\n", 1343 skdev->name, __func__, __LINE__, sgp->iovec_count); 1344 return -EINVAL; 1345 } 1346 1347 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { 1348 pr_debug("%s:%s:%d dxfer_len invalid %d\n", 1349 skdev->name, __func__, __LINE__, sgp->dxfer_len); 1350 return -EINVAL; 1351 } 1352 1353 switch (sgp->dxfer_direction) { 1354 case SG_DXFER_NONE: 1355 acc = -1; 1356 break; 1357 1358 case SG_DXFER_TO_DEV: 1359 acc = VERIFY_READ; 1360 break; 1361 1362 case SG_DXFER_FROM_DEV: 1363 case SG_DXFER_TO_FROM_DEV: 1364 acc = VERIFY_WRITE; 1365 break; 1366 1367 default: 1368 pr_debug("%s:%s:%d dxfer_dir invalid %d\n", 1369 skdev->name, __func__, __LINE__, sgp->dxfer_direction); 1370 return -EINVAL; 1371 } 1372 1373 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { 1374 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n", 1375 skdev->name, __func__, __LINE__, sgp->cmdp); 1376 return -EFAULT; 1377 } 1378 1379 if (sgp->mx_sb_len != 0) { 1380 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { 1381 pr_debug("%s:%s:%d access sbp failed %p\n", 1382 skdev->name, __func__, __LINE__, sgp->sbp); 1383 return -EFAULT; 1384 } 1385 } 1386 1387 if (sgp->iovec_count == 0) { 1388 sksgio->iov[0].iov_base = sgp->dxferp; 1389 sksgio->iov[0].iov_len = sgp->dxfer_len; 1390 sksgio->iovcnt = 1; 1391 sksgio->dxfer_len = sgp->dxfer_len; 1392 } else { 1393 struct sg_iovec *iov; 1394 uint nbytes = sizeof(*iov) * sgp->iovec_count; 1395 size_t iov_data_len; 1396 1397 iov = kmalloc(nbytes, GFP_KERNEL); 1398 if (iov == NULL) { 1399 pr_debug("%s:%s:%d alloc iovec failed %d\n", 1400 skdev->name, __func__, __LINE__, 1401 sgp->iovec_count); 1402 return -ENOMEM; 1403 } 1404 sksgio->iov = iov; 1405 sksgio->iovcnt = sgp->iovec_count; 1406 1407 if (copy_from_user(iov, sgp->dxferp, nbytes)) { 1408 pr_debug("%s:%s:%d copy_from_user iovec failed %p\n", 1409 skdev->name, __func__, __LINE__, sgp->dxferp); 1410 return -EFAULT; 1411 } 1412 1413 /* 1414 * Sum up the vecs, making sure they don't overflow 1415 */ 1416 iov_data_len = 0; 1417 for (i = 0; i < sgp->iovec_count; i++) { 1418 if (iov_data_len + iov[i].iov_len < iov_data_len) 1419 return -EINVAL; 1420 iov_data_len += iov[i].iov_len; 1421 } 1422 1423 /* SG_IO howto says that the shorter of the two wins */ 1424 if (sgp->dxfer_len < iov_data_len) { 1425 sksgio->iovcnt = iov_shorten((struct iovec *)iov, 1426 sgp->iovec_count, 1427 sgp->dxfer_len); 1428 sksgio->dxfer_len = sgp->dxfer_len; 1429 } else 1430 sksgio->dxfer_len = iov_data_len; 1431 } 1432 1433 if (sgp->dxfer_direction != SG_DXFER_NONE) { 1434 struct sg_iovec *iov = sksgio->iov; 1435 for (i = 0; i < sksgio->iovcnt; i++, iov++) { 1436 if (!access_ok(acc, iov->iov_base, iov->iov_len)) { 1437 pr_debug("%s:%s:%d access data failed %p/%d\n", 1438 skdev->name, __func__, __LINE__, 1439 iov->iov_base, (int)iov->iov_len); 1440 return -EFAULT; 1441 } 1442 } 1443 } 1444 1445 return 0; 1446} 1447 1448static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, 1449 struct skd_sg_io *sksgio) 1450{ 1451 struct skd_special_context *skspcl = NULL; 1452 int rc; 1453 1454 for (;;) { 1455 ulong flags; 1456 1457 spin_lock_irqsave(&skdev->lock, flags); 1458 skspcl = skdev->skspcl_free_list; 1459 if (skspcl != NULL) { 1460 skdev->skspcl_free_list = 1461 (struct skd_special_context *)skspcl->req.next; 1462 skspcl->req.id += SKD_ID_INCR; 1463 skspcl->req.state = SKD_REQ_STATE_SETUP; 1464 skspcl->orphaned = 0; 1465 skspcl->req.n_sg = 0; 1466 } 1467 spin_unlock_irqrestore(&skdev->lock, flags); 1468 1469 if (skspcl != NULL) { 1470 rc = 0; 1471 break; 1472 } 1473 1474 pr_debug("%s:%s:%d blocking\n", 1475 skdev->name, __func__, __LINE__); 1476 1477 rc = wait_event_interruptible_timeout( 1478 skdev->waitq, 1479 (skdev->skspcl_free_list != NULL), 1480 msecs_to_jiffies(sksgio->sg.timeout)); 1481 1482 pr_debug("%s:%s:%d unblocking, rc=%d\n", 1483 skdev->name, __func__, __LINE__, rc); 1484 1485 if (rc <= 0) { 1486 if (rc == 0) 1487 rc = -ETIMEDOUT; 1488 else 1489 rc = -EINTR; 1490 break; 1491 } 1492 /* 1493 * If we get here rc > 0 meaning the timeout to 1494 * wait_event_interruptible_timeout() had time left, hence the 1495 * sought event -- non-empty free list -- happened. 1496 * Retry the allocation. 1497 */ 1498 } 1499 sksgio->skspcl = skspcl; 1500 1501 return rc; 1502} 1503 1504static int skd_skreq_prep_buffering(struct skd_device *skdev, 1505 struct skd_request_context *skreq, 1506 u32 dxfer_len) 1507{ 1508 u32 resid = dxfer_len; 1509 1510 /* 1511 * The DMA engine must have aligned addresses and byte counts. 1512 */ 1513 resid += (-resid) & 3; 1514 skreq->sg_byte_count = resid; 1515 1516 skreq->n_sg = 0; 1517 1518 while (resid > 0) { 1519 u32 nbytes = PAGE_SIZE; 1520 u32 ix = skreq->n_sg; 1521 struct scatterlist *sg = &skreq->sg[ix]; 1522 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; 1523 struct page *page; 1524 1525 if (nbytes > resid) 1526 nbytes = resid; 1527 1528 page = alloc_page(GFP_KERNEL); 1529 if (page == NULL) 1530 return -ENOMEM; 1531 1532 sg_set_page(sg, page, nbytes, 0); 1533 1534 /* TODO: This should be going through a pci_???() 1535 * routine to do proper mapping. */ 1536 sksg->control = FIT_SGD_CONTROL_NOT_LAST; 1537 sksg->byte_count = nbytes; 1538 1539 sksg->host_side_addr = sg_phys(sg); 1540 1541 sksg->dev_side_addr = 0; 1542 sksg->next_desc_ptr = skreq->sksg_dma_address + 1543 (ix + 1) * sizeof(*sksg); 1544 1545 skreq->n_sg++; 1546 resid -= nbytes; 1547 } 1548 1549 if (skreq->n_sg > 0) { 1550 u32 ix = skreq->n_sg - 1; 1551 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; 1552 1553 sksg->control = FIT_SGD_CONTROL_LAST; 1554 sksg->next_desc_ptr = 0; 1555 } 1556 1557 if (unlikely(skdev->dbg_level > 1)) { 1558 u32 i; 1559 1560 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", 1561 skdev->name, __func__, __LINE__, 1562 skreq->id, skreq->sksg_list, skreq->sksg_dma_address); 1563 for (i = 0; i < skreq->n_sg; i++) { 1564 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 1565 1566 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " 1567 "addr=0x%llx next=0x%llx\n", 1568 skdev->name, __func__, __LINE__, 1569 i, sgd->byte_count, sgd->control, 1570 sgd->host_side_addr, sgd->next_desc_ptr); 1571 } 1572 } 1573 1574 return 0; 1575} 1576 1577static int skd_sg_io_prep_buffering(struct skd_device *skdev, 1578 struct skd_sg_io *sksgio) 1579{ 1580 struct skd_special_context *skspcl = sksgio->skspcl; 1581 struct skd_request_context *skreq = &skspcl->req; 1582 u32 dxfer_len = sksgio->dxfer_len; 1583 int rc; 1584 1585 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); 1586 /* 1587 * Eventually, errors or not, skd_release_special() is called 1588 * to recover allocations including partial allocations. 1589 */ 1590 return rc; 1591} 1592 1593static int skd_sg_io_copy_buffer(struct skd_device *skdev, 1594 struct skd_sg_io *sksgio, int dxfer_dir) 1595{ 1596 struct skd_special_context *skspcl = sksgio->skspcl; 1597 u32 iov_ix = 0; 1598 struct sg_iovec curiov; 1599 u32 sksg_ix = 0; 1600 u8 *bufp = NULL; 1601 u32 buf_len = 0; 1602 u32 resid = sksgio->dxfer_len; 1603 int rc; 1604 1605 curiov.iov_len = 0; 1606 curiov.iov_base = NULL; 1607 1608 if (dxfer_dir != sksgio->sg.dxfer_direction) { 1609 if (dxfer_dir != SG_DXFER_TO_DEV || 1610 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) 1611 return 0; 1612 } 1613 1614 while (resid > 0) { 1615 u32 nbytes = PAGE_SIZE; 1616 1617 if (curiov.iov_len == 0) { 1618 curiov = sksgio->iov[iov_ix++]; 1619 continue; 1620 } 1621 1622 if (buf_len == 0) { 1623 struct page *page; 1624 page = sg_page(&skspcl->req.sg[sksg_ix++]); 1625 bufp = page_address(page); 1626 buf_len = PAGE_SIZE; 1627 } 1628 1629 nbytes = min_t(u32, nbytes, resid); 1630 nbytes = min_t(u32, nbytes, curiov.iov_len); 1631 nbytes = min_t(u32, nbytes, buf_len); 1632 1633 if (dxfer_dir == SG_DXFER_TO_DEV) 1634 rc = __copy_from_user(bufp, curiov.iov_base, nbytes); 1635 else 1636 rc = __copy_to_user(curiov.iov_base, bufp, nbytes); 1637 1638 if (rc) 1639 return -EFAULT; 1640 1641 resid -= nbytes; 1642 curiov.iov_len -= nbytes; 1643 curiov.iov_base += nbytes; 1644 buf_len -= nbytes; 1645 } 1646 1647 return 0; 1648} 1649 1650static int skd_sg_io_send_fitmsg(struct skd_device *skdev, 1651 struct skd_sg_io *sksgio) 1652{ 1653 struct skd_special_context *skspcl = sksgio->skspcl; 1654 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; 1655 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; 1656 1657 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); 1658 1659 /* Initialize the FIT msg header */ 1660 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1661 fmh->num_protocol_cmds_coalesced = 1; 1662 1663 /* Initialize the SCSI request */ 1664 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) 1665 scsi_req->hdr.sg_list_dma_address = 1666 cpu_to_be64(skspcl->req.sksg_dma_address); 1667 scsi_req->hdr.tag = skspcl->req.id; 1668 scsi_req->hdr.sg_list_len_bytes = 1669 cpu_to_be32(skspcl->req.sg_byte_count); 1670 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); 1671 1672 skspcl->req.state = SKD_REQ_STATE_BUSY; 1673 skd_send_special_fitmsg(skdev, skspcl); 1674 1675 return 0; 1676} 1677 1678static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) 1679{ 1680 unsigned long flags; 1681 int rc; 1682 1683 rc = wait_event_interruptible_timeout(skdev->waitq, 1684 (sksgio->skspcl->req.state != 1685 SKD_REQ_STATE_BUSY), 1686 msecs_to_jiffies(sksgio->sg. 1687 timeout)); 1688 1689 spin_lock_irqsave(&skdev->lock, flags); 1690 1691 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { 1692 pr_debug("%s:%s:%d skspcl %p aborted\n", 1693 skdev->name, __func__, __LINE__, sksgio->skspcl); 1694 1695 /* Build check cond, sense and let command finish. */ 1696 /* For a timeout, we must fabricate completion and sense 1697 * data to complete the command */ 1698 sksgio->skspcl->req.completion.status = 1699 SAM_STAT_CHECK_CONDITION; 1700 1701 memset(&sksgio->skspcl->req.err_info, 0, 1702 sizeof(sksgio->skspcl->req.err_info)); 1703 sksgio->skspcl->req.err_info.type = 0x70; 1704 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; 1705 sksgio->skspcl->req.err_info.code = 0x44; 1706 sksgio->skspcl->req.err_info.qual = 0; 1707 rc = 0; 1708 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) 1709 /* No longer on the adapter. We finish. */ 1710 rc = 0; 1711 else { 1712 /* Something's gone wrong. Still busy. Timeout or 1713 * user interrupted (control-C). Mark as an orphan 1714 * so it will be disposed when completed. */ 1715 sksgio->skspcl->orphaned = 1; 1716 sksgio->skspcl = NULL; 1717 if (rc == 0) { 1718 pr_debug("%s:%s:%d timed out %p (%u ms)\n", 1719 skdev->name, __func__, __LINE__, 1720 sksgio, sksgio->sg.timeout); 1721 rc = -ETIMEDOUT; 1722 } else { 1723 pr_debug("%s:%s:%d cntlc %p\n", 1724 skdev->name, __func__, __LINE__, sksgio); 1725 rc = -EINTR; 1726 } 1727 } 1728 1729 spin_unlock_irqrestore(&skdev->lock, flags); 1730 1731 return rc; 1732} 1733 1734static int skd_sg_io_put_status(struct skd_device *skdev, 1735 struct skd_sg_io *sksgio) 1736{ 1737 struct sg_io_hdr *sgp = &sksgio->sg; 1738 struct skd_special_context *skspcl = sksgio->skspcl; 1739 int resid = 0; 1740 1741 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); 1742 1743 sgp->status = skspcl->req.completion.status; 1744 resid = sksgio->dxfer_len - nb; 1745 1746 sgp->masked_status = sgp->status & STATUS_MASK; 1747 sgp->msg_status = 0; 1748 sgp->host_status = 0; 1749 sgp->driver_status = 0; 1750 sgp->resid = resid; 1751 if (sgp->masked_status || sgp->host_status || sgp->driver_status) 1752 sgp->info |= SG_INFO_CHECK; 1753 1754 pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n", 1755 skdev->name, __func__, __LINE__, 1756 sgp->status, sgp->masked_status, sgp->resid); 1757 1758 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { 1759 if (sgp->mx_sb_len > 0) { 1760 struct fit_comp_error_info *ei = &skspcl->req.err_info; 1761 u32 nbytes = sizeof(*ei); 1762 1763 nbytes = min_t(u32, nbytes, sgp->mx_sb_len); 1764 1765 sgp->sb_len_wr = nbytes; 1766 1767 if (__copy_to_user(sgp->sbp, ei, nbytes)) { 1768 pr_debug("%s:%s:%d copy_to_user sense failed %p\n", 1769 skdev->name, __func__, __LINE__, 1770 sgp->sbp); 1771 return -EFAULT; 1772 } 1773 } 1774 } 1775 1776 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { 1777 pr_debug("%s:%s:%d copy_to_user sg failed %p\n", 1778 skdev->name, __func__, __LINE__, sksgio->argp); 1779 return -EFAULT; 1780 } 1781 1782 return 0; 1783} 1784 1785static int skd_sg_io_release_skspcl(struct skd_device *skdev, 1786 struct skd_sg_io *sksgio) 1787{ 1788 struct skd_special_context *skspcl = sksgio->skspcl; 1789 1790 if (skspcl != NULL) { 1791 ulong flags; 1792 1793 sksgio->skspcl = NULL; 1794 1795 spin_lock_irqsave(&skdev->lock, flags); 1796 skd_release_special(skdev, skspcl); 1797 spin_unlock_irqrestore(&skdev->lock, flags); 1798 } 1799 1800 return 0; 1801} 1802 1803/* 1804 ***************************************************************************** 1805 * INTERNAL REQUESTS -- generated by driver itself 1806 ***************************************************************************** 1807 */ 1808 1809static int skd_format_internal_skspcl(struct skd_device *skdev) 1810{ 1811 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1812 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1813 struct fit_msg_hdr *fmh; 1814 uint64_t dma_address; 1815 struct skd_scsi_request *scsi; 1816 1817 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; 1818 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1819 fmh->num_protocol_cmds_coalesced = 1; 1820 1821 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; 1822 memset(scsi, 0, sizeof(*scsi)); 1823 dma_address = skspcl->req.sksg_dma_address; 1824 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 1825 sgd->control = FIT_SGD_CONTROL_LAST; 1826 sgd->byte_count = 0; 1827 sgd->host_side_addr = skspcl->db_dma_address; 1828 sgd->dev_side_addr = 0; 1829 sgd->next_desc_ptr = 0LL; 1830 1831 return 1; 1832} 1833 1834#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES 1835 1836static void skd_send_internal_skspcl(struct skd_device *skdev, 1837 struct skd_special_context *skspcl, 1838 u8 opcode) 1839{ 1840 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1841 struct skd_scsi_request *scsi; 1842 unsigned char *buf = skspcl->data_buf; 1843 int i; 1844 1845 if (skspcl->req.state != SKD_REQ_STATE_IDLE) 1846 /* 1847 * A refresh is already in progress. 1848 * Just wait for it to finish. 1849 */ 1850 return; 1851 1852 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); 1853 skspcl->req.state = SKD_REQ_STATE_BUSY; 1854 skspcl->req.id += SKD_ID_INCR; 1855 1856 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; 1857 scsi->hdr.tag = skspcl->req.id; 1858 1859 memset(scsi->cdb, 0, sizeof(scsi->cdb)); 1860 1861 switch (opcode) { 1862 case TEST_UNIT_READY: 1863 scsi->cdb[0] = TEST_UNIT_READY; 1864 sgd->byte_count = 0; 1865 scsi->hdr.sg_list_len_bytes = 0; 1866 break; 1867 1868 case READ_CAPACITY: 1869 scsi->cdb[0] = READ_CAPACITY; 1870 sgd->byte_count = SKD_N_READ_CAP_BYTES; 1871 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1872 break; 1873 1874 case INQUIRY: 1875 scsi->cdb[0] = INQUIRY; 1876 scsi->cdb[1] = 0x01; /* evpd */ 1877 scsi->cdb[2] = 0x80; /* serial number page */ 1878 scsi->cdb[4] = 0x10; 1879 sgd->byte_count = 16; 1880 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1881 break; 1882 1883 case SYNCHRONIZE_CACHE: 1884 scsi->cdb[0] = SYNCHRONIZE_CACHE; 1885 sgd->byte_count = 0; 1886 scsi->hdr.sg_list_len_bytes = 0; 1887 break; 1888 1889 case WRITE_BUFFER: 1890 scsi->cdb[0] = WRITE_BUFFER; 1891 scsi->cdb[1] = 0x02; 1892 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; 1893 scsi->cdb[8] = WR_BUF_SIZE & 0xFF; 1894 sgd->byte_count = WR_BUF_SIZE; 1895 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1896 /* fill incrementing byte pattern */ 1897 for (i = 0; i < sgd->byte_count; i++) 1898 buf[i] = i & 0xFF; 1899 break; 1900 1901 case READ_BUFFER: 1902 scsi->cdb[0] = READ_BUFFER; 1903 scsi->cdb[1] = 0x02; 1904 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; 1905 scsi->cdb[8] = WR_BUF_SIZE & 0xFF; 1906 sgd->byte_count = WR_BUF_SIZE; 1907 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1908 memset(skspcl->data_buf, 0, sgd->byte_count); 1909 break; 1910 1911 default: 1912 SKD_ASSERT("Don't know what to send"); 1913 return; 1914 1915 } 1916 skd_send_special_fitmsg(skdev, skspcl); 1917} 1918 1919static void skd_refresh_device_data(struct skd_device *skdev) 1920{ 1921 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1922 1923 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); 1924} 1925 1926static int skd_chk_read_buf(struct skd_device *skdev, 1927 struct skd_special_context *skspcl) 1928{ 1929 unsigned char *buf = skspcl->data_buf; 1930 int i; 1931 1932 /* check for incrementing byte pattern */ 1933 for (i = 0; i < WR_BUF_SIZE; i++) 1934 if (buf[i] != (i & 0xFF)) 1935 return 1; 1936 1937 return 0; 1938} 1939 1940static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, 1941 u8 code, u8 qual, u8 fruc) 1942{ 1943 /* If the check condition is of special interest, log a message */ 1944 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) 1945 && (code == 0x04) && (qual == 0x06)) { 1946 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" 1947 "ascq/fruc %02x/%02x/%02x/%02x\n", 1948 skd_name(skdev), key, code, qual, fruc); 1949 } 1950} 1951 1952static void skd_complete_internal(struct skd_device *skdev, 1953 volatile struct fit_completion_entry_v1 1954 *skcomp, 1955 volatile struct fit_comp_error_info *skerr, 1956 struct skd_special_context *skspcl) 1957{ 1958 u8 *buf = skspcl->data_buf; 1959 u8 status; 1960 int i; 1961 struct skd_scsi_request *scsi = 1962 (struct skd_scsi_request *)&skspcl->msg_buf[64]; 1963 1964 SKD_ASSERT(skspcl == &skdev->internal_skspcl); 1965 1966 pr_debug("%s:%s:%d complete internal %x\n", 1967 skdev->name, __func__, __LINE__, scsi->cdb[0]); 1968 1969 skspcl->req.completion = *skcomp; 1970 skspcl->req.state = SKD_REQ_STATE_IDLE; 1971 skspcl->req.id += SKD_ID_INCR; 1972 1973 status = skspcl->req.completion.status; 1974 1975 skd_log_check_status(skdev, status, skerr->key, skerr->code, 1976 skerr->qual, skerr->fruc); 1977 1978 switch (scsi->cdb[0]) { 1979 case TEST_UNIT_READY: 1980 if (status == SAM_STAT_GOOD) 1981 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); 1982 else if ((status == SAM_STAT_CHECK_CONDITION) && 1983 (skerr->key == MEDIUM_ERROR)) 1984 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); 1985 else { 1986 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1987 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n", 1988 skdev->name, __func__, __LINE__, 1989 skdev->state); 1990 return; 1991 } 1992 pr_debug("%s:%s:%d **** TUR failed, retry skerr\n", 1993 skdev->name, __func__, __LINE__); 1994 skd_send_internal_skspcl(skdev, skspcl, 0x00); 1995 } 1996 break; 1997 1998 case WRITE_BUFFER: 1999 if (status == SAM_STAT_GOOD) 2000 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); 2001 else { 2002 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 2003 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n", 2004 skdev->name, __func__, __LINE__, 2005 skdev->state); 2006 return; 2007 } 2008 pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n", 2009 skdev->name, __func__, __LINE__); 2010 skd_send_internal_skspcl(skdev, skspcl, 0x00); 2011 } 2012 break; 2013 2014 case READ_BUFFER: 2015 if (status == SAM_STAT_GOOD) { 2016 if (skd_chk_read_buf(skdev, skspcl) == 0) 2017 skd_send_internal_skspcl(skdev, skspcl, 2018 READ_CAPACITY); 2019 else { 2020 pr_err( 2021 "(%s):*** W/R Buffer mismatch %d ***\n", 2022 skd_name(skdev), skdev->connect_retries); 2023 if (skdev->connect_retries < 2024 SKD_MAX_CONNECT_RETRIES) { 2025 skdev->connect_retries++; 2026 skd_soft_reset(skdev); 2027 } else { 2028 pr_err( 2029 "(%s): W/R Buffer Connect Error\n", 2030 skd_name(skdev)); 2031 return; 2032 } 2033 } 2034 2035 } else { 2036 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 2037 pr_debug("%s:%s:%d " 2038 "read buffer failed, don't send anymore state 0x%x\n", 2039 skdev->name, __func__, __LINE__, 2040 skdev->state); 2041 return; 2042 } 2043 pr_debug("%s:%s:%d " 2044 "**** read buffer failed, retry skerr\n", 2045 skdev->name, __func__, __LINE__); 2046 skd_send_internal_skspcl(skdev, skspcl, 0x00); 2047 } 2048 break; 2049 2050 case READ_CAPACITY: 2051 skdev->read_cap_is_valid = 0; 2052 if (status == SAM_STAT_GOOD) { 2053 skdev->read_cap_last_lba = 2054 (buf[0] << 24) | (buf[1] << 16) | 2055 (buf[2] << 8) | buf[3]; 2056 skdev->read_cap_blocksize = 2057 (buf[4] << 24) | (buf[5] << 16) | 2058 (buf[6] << 8) | buf[7]; 2059 2060 pr_debug("%s:%s:%d last lba %d, bs %d\n", 2061 skdev->name, __func__, __LINE__, 2062 skdev->read_cap_last_lba, 2063 skdev->read_cap_blocksize); 2064 2065 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 2066 2067 skdev->read_cap_is_valid = 1; 2068 2069 skd_send_internal_skspcl(skdev, skspcl, INQUIRY); 2070 } else if ((status == SAM_STAT_CHECK_CONDITION) && 2071 (skerr->key == MEDIUM_ERROR)) { 2072 skdev->read_cap_last_lba = ~0; 2073 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 2074 pr_debug("%s:%s:%d " 2075 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n", 2076 skdev->name, __func__, __LINE__); 2077 skd_send_internal_skspcl(skdev, skspcl, INQUIRY); 2078 } else { 2079 pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n", 2080 skdev->name, __func__, __LINE__); 2081 skd_send_internal_skspcl(skdev, skspcl, 2082 TEST_UNIT_READY); 2083 } 2084 break; 2085 2086 case INQUIRY: 2087 skdev->inquiry_is_valid = 0; 2088 if (status == SAM_STAT_GOOD) { 2089 skdev->inquiry_is_valid = 1; 2090 2091 for (i = 0; i < 12; i++) 2092 skdev->inq_serial_num[i] = buf[i + 4]; 2093 skdev->inq_serial_num[12] = 0; 2094 } 2095 2096 if (skd_unquiesce_dev(skdev) < 0) 2097 pr_debug("%s:%s:%d **** failed, to ONLINE device\n", 2098 skdev->name, __func__, __LINE__); 2099 /* connection is complete */ 2100 skdev->connect_retries = 0; 2101 break; 2102 2103 case SYNCHRONIZE_CACHE: 2104 if (status == SAM_STAT_GOOD) 2105 skdev->sync_done = 1; 2106 else 2107 skdev->sync_done = -1; 2108 wake_up_interruptible(&skdev->waitq); 2109 break; 2110 2111 default: 2112 SKD_ASSERT("we didn't send this"); 2113 } 2114} 2115 2116/* 2117 ***************************************************************************** 2118 * FIT MESSAGES 2119 ***************************************************************************** 2120 */ 2121 2122static void skd_send_fitmsg(struct skd_device *skdev, 2123 struct skd_fitmsg_context *skmsg) 2124{ 2125 u64 qcmd; 2126 struct fit_msg_hdr *fmh; 2127 2128 pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n", 2129 skdev->name, __func__, __LINE__, 2130 skmsg->mb_dma_address, skdev->in_flight); 2131 pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n", 2132 skdev->name, __func__, __LINE__, 2133 skmsg->msg_buf, skmsg->offset); 2134 2135 qcmd = skmsg->mb_dma_address; 2136 qcmd |= FIT_QCMD_QID_NORMAL; 2137 2138 fmh = (struct fit_msg_hdr *)skmsg->msg_buf; 2139 skmsg->outstanding = fmh->num_protocol_cmds_coalesced; 2140 2141 if (unlikely(skdev->dbg_level > 1)) { 2142 u8 *bp = (u8 *)skmsg->msg_buf; 2143 int i; 2144 for (i = 0; i < skmsg->length; i += 8) { 2145 pr_debug("%s:%s:%d msg[%2d] %8ph\n", 2146 skdev->name, __func__, __LINE__, i, &bp[i]); 2147 if (i == 0) 2148 i = 64 - 8; 2149 } 2150 } 2151 2152 if (skmsg->length > 256) 2153 qcmd |= FIT_QCMD_MSGSIZE_512; 2154 else if (skmsg->length > 128) 2155 qcmd |= FIT_QCMD_MSGSIZE_256; 2156 else if (skmsg->length > 64) 2157 qcmd |= FIT_QCMD_MSGSIZE_128; 2158 else 2159 /* 2160 * This makes no sense because the FIT msg header is 2161 * 64 bytes. If the msg is only 64 bytes long it has 2162 * no payload. 2163 */ 2164 qcmd |= FIT_QCMD_MSGSIZE_64; 2165 2166 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 2167} 2168 2169static void skd_send_special_fitmsg(struct skd_device *skdev, 2170 struct skd_special_context *skspcl) 2171{ 2172 u64 qcmd; 2173 2174 if (unlikely(skdev->dbg_level > 1)) { 2175 u8 *bp = (u8 *)skspcl->msg_buf; 2176 int i; 2177 2178 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 2179 pr_debug("%s:%s:%d spcl[%2d] %8ph\n", 2180 skdev->name, __func__, __LINE__, i, &bp[i]); 2181 if (i == 0) 2182 i = 64 - 8; 2183 } 2184 2185 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", 2186 skdev->name, __func__, __LINE__, 2187 skspcl, skspcl->req.id, skspcl->req.sksg_list, 2188 skspcl->req.sksg_dma_address); 2189 for (i = 0; i < skspcl->req.n_sg; i++) { 2190 struct fit_sg_descriptor *sgd = 2191 &skspcl->req.sksg_list[i]; 2192 2193 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " 2194 "addr=0x%llx next=0x%llx\n", 2195 skdev->name, __func__, __LINE__, 2196 i, sgd->byte_count, sgd->control, 2197 sgd->host_side_addr, sgd->next_desc_ptr); 2198 } 2199 } 2200 2201 /* 2202 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr 2203 * and one 64-byte SSDI command. 2204 */ 2205 qcmd = skspcl->mb_dma_address; 2206 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 2207 2208 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 2209} 2210 2211/* 2212 ***************************************************************************** 2213 * COMPLETION QUEUE 2214 ***************************************************************************** 2215 */ 2216 2217static void skd_complete_other(struct skd_device *skdev, 2218 volatile struct fit_completion_entry_v1 *skcomp, 2219 volatile struct fit_comp_error_info *skerr); 2220 2221struct sns_info { 2222 u8 type; 2223 u8 stat; 2224 u8 key; 2225 u8 asc; 2226 u8 ascq; 2227 u8 mask; 2228 enum skd_check_status_action action; 2229}; 2230 2231static struct sns_info skd_chkstat_table[] = { 2232 /* Good */ 2233 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, 2234 SKD_CHECK_STATUS_REPORT_GOOD }, 2235 2236 /* Smart alerts */ 2237 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ 2238 SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 2239 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ 2240 SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 2241 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ 2242 SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 2243 2244 /* Retry (with limits) */ 2245 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ 2246 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 2247 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ 2248 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 2249 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ 2250 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 2251 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ 2252 SKD_CHECK_STATUS_REQUEUE_REQUEST }, 2253 2254 /* Busy (or about to be) */ 2255 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ 2256 SKD_CHECK_STATUS_BUSY_IMMINENT }, 2257}; 2258 2259/* 2260 * Look up status and sense data to decide how to handle the error 2261 * from the device. 2262 * mask says which fields must match e.g., mask=0x18 means check 2263 * type and stat, ignore key, asc, ascq. 2264 */ 2265 2266static enum skd_check_status_action 2267skd_check_status(struct skd_device *skdev, 2268 u8 cmp_status, volatile struct fit_comp_error_info *skerr) 2269{ 2270 int i, n; 2271 2272 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 2273 skd_name(skdev), skerr->key, skerr->code, skerr->qual, 2274 skerr->fruc); 2275 2276 pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", 2277 skdev->name, __func__, __LINE__, skerr->type, cmp_status, 2278 skerr->key, skerr->code, skerr->qual, skerr->fruc); 2279 2280 /* Does the info match an entry in the good category? */ 2281 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); 2282 for (i = 0; i < n; i++) { 2283 struct sns_info *sns = &skd_chkstat_table[i]; 2284 2285 if (sns->mask & 0x10) 2286 if (skerr->type != sns->type) 2287 continue; 2288 2289 if (sns->mask & 0x08) 2290 if (cmp_status != sns->stat) 2291 continue; 2292 2293 if (sns->mask & 0x04) 2294 if (skerr->key != sns->key) 2295 continue; 2296 2297 if (sns->mask & 0x02) 2298 if (skerr->code != sns->asc) 2299 continue; 2300 2301 if (sns->mask & 0x01) 2302 if (skerr->qual != sns->ascq) 2303 continue; 2304 2305 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 2306 pr_err("(%s): SMART Alert: sense key/asc/ascq " 2307 "%02x/%02x/%02x\n", 2308 skd_name(skdev), skerr->key, 2309 skerr->code, skerr->qual); 2310 } 2311 return sns->action; 2312 } 2313 2314 /* No other match, so nonzero status means error, 2315 * zero status means good 2316 */ 2317 if (cmp_status) { 2318 pr_debug("%s:%s:%d status check: error\n", 2319 skdev->name, __func__, __LINE__); 2320 return SKD_CHECK_STATUS_REPORT_ERROR; 2321 } 2322 2323 pr_debug("%s:%s:%d status check good default\n", 2324 skdev->name, __func__, __LINE__); 2325 return SKD_CHECK_STATUS_REPORT_GOOD; 2326} 2327 2328static void skd_resolve_req_exception(struct skd_device *skdev, 2329 struct skd_request_context *skreq) 2330{ 2331 u8 cmp_status = skreq->completion.status; 2332 2333 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { 2334 case SKD_CHECK_STATUS_REPORT_GOOD: 2335 case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 2336 skd_end_request(skdev, skreq, 0); 2337 break; 2338 2339 case SKD_CHECK_STATUS_BUSY_IMMINENT: 2340 skd_log_skreq(skdev, skreq, "retry(busy)"); 2341 blk_requeue_request(skdev->queue, skreq->req); 2342 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); 2343 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 2344 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2345 skd_quiesce_dev(skdev); 2346 break; 2347 2348 case SKD_CHECK_STATUS_REQUEUE_REQUEST: 2349 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { 2350 skd_log_skreq(skdev, skreq, "retry"); 2351 blk_requeue_request(skdev->queue, skreq->req); 2352 break; 2353 } 2354 /* fall through to report error */ 2355 2356 case SKD_CHECK_STATUS_REPORT_ERROR: 2357 default: 2358 skd_end_request(skdev, skreq, -EIO); 2359 break; 2360 } 2361} 2362 2363/* assume spinlock is already held */ 2364static void skd_release_skreq(struct skd_device *skdev, 2365 struct skd_request_context *skreq) 2366{ 2367 u32 msg_slot; 2368 struct skd_fitmsg_context *skmsg; 2369 2370 u32 timo_slot; 2371 2372 /* 2373 * Reclaim the FIT msg buffer if this is 2374 * the first of the requests it carried to 2375 * be completed. The FIT msg buffer used to 2376 * send this request cannot be reused until 2377 * we are sure the s1120 card has copied 2378 * it to its memory. The FIT msg might have 2379 * contained several requests. As soon as 2380 * any of them are completed we know that 2381 * the entire FIT msg was transferred. 2382 * Only the first completed request will 2383 * match the FIT msg buffer id. The FIT 2384 * msg buffer id is immediately updated. 2385 * When subsequent requests complete the FIT 2386 * msg buffer id won't match, so we know 2387 * quite cheaply that it is already done. 2388 */ 2389 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 2390 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); 2391 2392 skmsg = &skdev->skmsg_table[msg_slot]; 2393 if (skmsg->id == skreq->fitmsg_id) { 2394 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); 2395 SKD_ASSERT(skmsg->outstanding > 0); 2396 skmsg->outstanding--; 2397 if (skmsg->outstanding == 0) { 2398 skmsg->state = SKD_MSG_STATE_IDLE; 2399 skmsg->id += SKD_ID_INCR; 2400 skmsg->next = skdev->skmsg_free_list; 2401 skdev->skmsg_free_list = skmsg; 2402 } 2403 } 2404 2405 /* 2406 * Decrease the number of active requests. 2407 * Also decrements the count in the timeout slot. 2408 */ 2409 SKD_ASSERT(skdev->in_flight > 0); 2410 skdev->in_flight -= 1; 2411 2412 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 2413 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); 2414 skdev->timeout_slot[timo_slot] -= 1; 2415 2416 /* 2417 * Reset backpointer 2418 */ 2419 skreq->req = NULL; 2420 2421 /* 2422 * Reclaim the skd_request_context 2423 */ 2424 skreq->state = SKD_REQ_STATE_IDLE; 2425 skreq->id += SKD_ID_INCR; 2426 skreq->next = skdev->skreq_free_list; 2427 skdev->skreq_free_list = skreq; 2428} 2429 2430#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA 2431 2432static void skd_do_inq_page_00(struct skd_device *skdev, 2433 volatile struct fit_completion_entry_v1 *skcomp, 2434 volatile struct fit_comp_error_info *skerr, 2435 uint8_t *cdb, uint8_t *buf) 2436{ 2437 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; 2438 2439 /* Caller requested "supported pages". The driver needs to insert 2440 * its page. 2441 */ 2442 pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n", 2443 skdev->name, __func__, __LINE__); 2444 2445 /* If the device rejected the request because the CDB was 2446 * improperly formed, then just leave. 2447 */ 2448 if (skcomp->status == SAM_STAT_CHECK_CONDITION && 2449 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) 2450 return; 2451 2452 /* Get the amount of space the caller allocated */ 2453 max_bytes = (cdb[3] << 8) | cdb[4]; 2454 2455 /* Get the number of pages actually returned by the device */ 2456 drive_pages = (buf[2] << 8) | buf[3]; 2457 drive_bytes = drive_pages + 4; 2458 new_size = drive_pages + 1; 2459 2460 /* Supported pages must be in numerical order, so find where 2461 * the driver page needs to be inserted into the list of 2462 * pages returned by the device. 2463 */ 2464 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { 2465 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) 2466 return; /* Device using this page code. abort */ 2467 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) 2468 break; 2469 } 2470 2471 if (insert_pt < max_bytes) { 2472 uint16_t u; 2473 2474 /* Shift everything up one byte to make room. */ 2475 for (u = new_size + 3; u > insert_pt; u--) 2476 buf[u] = buf[u - 1]; 2477 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; 2478 2479 /* SCSI byte order increment of num_returned_bytes by 1 */ 2480 skcomp->num_returned_bytes = 2481 be32_to_cpu(skcomp->num_returned_bytes) + 1; 2482 skcomp->num_returned_bytes = 2483 be32_to_cpu(skcomp->num_returned_bytes); 2484 } 2485 2486 /* update page length field to reflect the driver's page too */ 2487 buf[2] = (uint8_t)((new_size >> 8) & 0xFF); 2488 buf[3] = (uint8_t)((new_size >> 0) & 0xFF); 2489} 2490 2491static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) 2492{ 2493 int pcie_reg; 2494 u16 pci_bus_speed; 2495 u8 pci_lanes; 2496 2497 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); 2498 if (pcie_reg) { 2499 u16 linksta; 2500 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); 2501 2502 pci_bus_speed = linksta & 0xF; 2503 pci_lanes = (linksta & 0x3F0) >> 4; 2504 } else { 2505 *speed = STEC_LINK_UNKNOWN; 2506 *width = 0xFF; 2507 return; 2508 } 2509 2510 switch (pci_bus_speed) { 2511 case 1: 2512 *speed = STEC_LINK_2_5GTS; 2513 break; 2514 case 2: 2515 *speed = STEC_LINK_5GTS; 2516 break; 2517 case 3: 2518 *speed = STEC_LINK_8GTS; 2519 break; 2520 default: 2521 *speed = STEC_LINK_UNKNOWN; 2522 break; 2523 } 2524 2525 if (pci_lanes <= 0x20) 2526 *width = pci_lanes; 2527 else 2528 *width = 0xFF; 2529} 2530 2531static void skd_do_inq_page_da(struct skd_device *skdev, 2532 volatile struct fit_completion_entry_v1 *skcomp, 2533 volatile struct fit_comp_error_info *skerr, 2534 uint8_t *cdb, uint8_t *buf) 2535{ 2536 struct pci_dev *pdev = skdev->pdev; 2537 unsigned max_bytes; 2538 struct driver_inquiry_data inq; 2539 u16 val; 2540 2541 pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n", 2542 skdev->name, __func__, __LINE__); 2543 2544 memset(&inq, 0, sizeof(inq)); 2545 2546 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; 2547 2548 skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes); 2549 inq.pcie_bus_number = cpu_to_be16(pdev->bus->number); 2550 inq.pcie_device_number = PCI_SLOT(pdev->devfn); 2551 inq.pcie_function_number = PCI_FUNC(pdev->devfn); 2552 2553 pci_read_config_word(pdev, PCI_VENDOR_ID, &val); 2554 inq.pcie_vendor_id = cpu_to_be16(val); 2555 2556 pci_read_config_word(pdev, PCI_DEVICE_ID, &val); 2557 inq.pcie_device_id = cpu_to_be16(val); 2558 2559 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val); 2560 inq.pcie_subsystem_vendor_id = cpu_to_be16(val); 2561 2562 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val); 2563 inq.pcie_subsystem_device_id = cpu_to_be16(val); 2564 2565 /* Driver version, fixed lenth, padded with spaces on the right */ 2566 inq.driver_version_length = sizeof(inq.driver_version); 2567 memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); 2568 memcpy(inq.driver_version, DRV_VER_COMPL, 2569 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); 2570 2571 inq.page_length = cpu_to_be16((sizeof(inq) - 4)); 2572 2573 /* Clear the error set by the device */ 2574 skcomp->status = SAM_STAT_GOOD; 2575 memset((void *)skerr, 0, sizeof(*skerr)); 2576 2577 /* copy response into output buffer */ 2578 max_bytes = (cdb[3] << 8) | cdb[4]; 2579 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); 2580 2581 skcomp->num_returned_bytes = 2582 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); 2583} 2584 2585static void skd_do_driver_inq(struct skd_device *skdev, 2586 volatile struct fit_completion_entry_v1 *skcomp, 2587 volatile struct fit_comp_error_info *skerr, 2588 uint8_t *cdb, uint8_t *buf) 2589{ 2590 if (!buf) 2591 return; 2592 else if (cdb[0] != INQUIRY) 2593 return; /* Not an INQUIRY */ 2594 else if ((cdb[1] & 1) == 0) 2595 return; /* EVPD not set */ 2596 else if (cdb[2] == 0) 2597 /* Need to add driver's page to supported pages list */ 2598 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); 2599 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) 2600 /* Caller requested driver's page */ 2601 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); 2602} 2603 2604static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) 2605{ 2606 if (!sg) 2607 return NULL; 2608 if (!sg_page(sg)) 2609 return NULL; 2610 return sg_virt(sg); 2611} 2612 2613static void skd_process_scsi_inq(struct skd_device *skdev, 2614 volatile struct fit_completion_entry_v1 2615 *skcomp, 2616 volatile struct fit_comp_error_info *skerr, 2617 struct skd_special_context *skspcl) 2618{ 2619 uint8_t *buf; 2620 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; 2621 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; 2622 2623 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, 2624 skspcl->req.sg_data_dir); 2625 buf = skd_sg_1st_page_ptr(skspcl->req.sg); 2626 2627 if (buf) 2628 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); 2629} 2630 2631 2632static int skd_isr_completion_posted(struct skd_device *skdev, 2633 int limit, int *enqueued) 2634{ 2635 volatile struct fit_completion_entry_v1 *skcmp = NULL; 2636 volatile struct fit_comp_error_info *skerr; 2637 u16 req_id; 2638 u32 req_slot; 2639 struct skd_request_context *skreq; 2640 u16 cmp_cntxt = 0; 2641 u8 cmp_status = 0; 2642 u8 cmp_cycle = 0; 2643 u32 cmp_bytes = 0; 2644 int rc = 0; 2645 int processed = 0; 2646 2647 for (;; ) { 2648 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 2649 2650 skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; 2651 cmp_cycle = skcmp->cycle; 2652 cmp_cntxt = skcmp->tag; 2653 cmp_status = skcmp->status; 2654 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); 2655 2656 skerr = &skdev->skerr_table[skdev->skcomp_ix]; 2657 2658 pr_debug("%s:%s:%d " 2659 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " 2660 "busy=%d rbytes=0x%x proto=%d\n", 2661 skdev->name, __func__, __LINE__, skdev->skcomp_cycle, 2662 skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, 2663 skdev->in_flight, cmp_bytes, skdev->proto_ver); 2664 2665 if (cmp_cycle != skdev->skcomp_cycle) { 2666 pr_debug("%s:%s:%d end of completions\n", 2667 skdev->name, __func__, __LINE__); 2668 break; 2669 } 2670 /* 2671 * Update the completion queue head index and possibly 2672 * the completion cycle count. 8-bit wrap-around. 2673 */ 2674 skdev->skcomp_ix++; 2675 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { 2676 skdev->skcomp_ix = 0; 2677 skdev->skcomp_cycle++; 2678 } 2679 2680 /* 2681 * The command context is a unique 32-bit ID. The low order 2682 * bits help locate the request. The request is usually a 2683 * r/w request (see skd_start() above) or a special request. 2684 */ 2685 req_id = cmp_cntxt; 2686 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 2687 2688 /* Is this other than a r/w request? */ 2689 if (req_slot >= skdev->num_req_context) { 2690 /* 2691 * This is not a completion for a r/w request. 2692 */ 2693 skd_complete_other(skdev, skcmp, skerr); 2694 continue; 2695 } 2696 2697 skreq = &skdev->skreq_table[req_slot]; 2698 2699 /* 2700 * Make sure the request ID for the slot matches. 2701 */ 2702 if (skreq->id != req_id) { 2703 pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n", 2704 skdev->name, __func__, __LINE__, 2705 req_id, skreq->id); 2706 { 2707 u16 new_id = cmp_cntxt; 2708 pr_err("(%s): Completion mismatch " 2709 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", 2710 skd_name(skdev), req_id, 2711 skreq->id, new_id); 2712 2713 continue; 2714 } 2715 } 2716 2717 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); 2718 2719 if (skreq->state == SKD_REQ_STATE_ABORTED) { 2720 pr_debug("%s:%s:%d reclaim req %p id=%04x\n", 2721 skdev->name, __func__, __LINE__, 2722 skreq, skreq->id); 2723 /* a previously timed out command can 2724 * now be cleaned up */ 2725 skd_release_skreq(skdev, skreq); 2726 continue; 2727 } 2728 2729 skreq->completion = *skcmp; 2730 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { 2731 skreq->err_info = *skerr; 2732 skd_log_check_status(skdev, cmp_status, skerr->key, 2733 skerr->code, skerr->qual, 2734 skerr->fruc); 2735 } 2736 /* Release DMA resources for the request. */ 2737 if (skreq->n_sg > 0) 2738 skd_postop_sg_list(skdev, skreq); 2739 2740 if (!skreq->req) { 2741 pr_debug("%s:%s:%d NULL backptr skdreq %p, " 2742 "req=0x%x req_id=0x%x\n", 2743 skdev->name, __func__, __LINE__, 2744 skreq, skreq->id, req_id); 2745 } else { 2746 /* 2747 * Capture the outcome and post it back to the 2748 * native request. 2749 */ 2750 if (likely(cmp_status == SAM_STAT_GOOD)) 2751 skd_end_request(skdev, skreq, 0); 2752 else 2753 skd_resolve_req_exception(skdev, skreq); 2754 } 2755 2756 /* 2757 * Release the skreq, its FIT msg (if one), timeout slot, 2758 * and queue depth. 2759 */ 2760 skd_release_skreq(skdev, skreq); 2761 2762 /* skd_isr_comp_limit equal zero means no limit */ 2763 if (limit) { 2764 if (++processed >= limit) { 2765 rc = 1; 2766 break; 2767 } 2768 } 2769 } 2770 2771 if ((skdev->state == SKD_DRVR_STATE_PAUSING) 2772 && (skdev->in_flight) == 0) { 2773 skdev->state = SKD_DRVR_STATE_PAUSED; 2774 wake_up_interruptible(&skdev->waitq); 2775 } 2776 2777 return rc; 2778} 2779 2780static void skd_complete_other(struct skd_device *skdev, 2781 volatile struct fit_completion_entry_v1 *skcomp, 2782 volatile struct fit_comp_error_info *skerr) 2783{ 2784 u32 req_id = 0; 2785 u32 req_table; 2786 u32 req_slot; 2787 struct skd_special_context *skspcl; 2788 2789 req_id = skcomp->tag; 2790 req_table = req_id & SKD_ID_TABLE_MASK; 2791 req_slot = req_id & SKD_ID_SLOT_MASK; 2792 2793 pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n", 2794 skdev->name, __func__, __LINE__, 2795 req_table, req_id, req_slot); 2796 2797 /* 2798 * Based on the request id, determine how to dispatch this completion. 2799 * This swich/case is finding the good cases and forwarding the 2800 * completion entry. Errors are reported below the switch. 2801 */ 2802 switch (req_table) { 2803 case SKD_ID_RW_REQUEST: 2804 /* 2805 * The caller, skd_completion_posted_isr() above, 2806 * handles r/w requests. The only way we get here 2807 * is if the req_slot is out of bounds. 2808 */ 2809 break; 2810 2811 case SKD_ID_SPECIAL_REQUEST: 2812 /* 2813 * Make sure the req_slot is in bounds and that the id 2814 * matches. 2815 */ 2816 if (req_slot < skdev->n_special) { 2817 skspcl = &skdev->skspcl_table[req_slot]; 2818 if (skspcl->req.id == req_id && 2819 skspcl->req.state == SKD_REQ_STATE_BUSY) { 2820 skd_complete_special(skdev, 2821 skcomp, skerr, skspcl); 2822 return; 2823 } 2824 } 2825 break; 2826 2827 case SKD_ID_INTERNAL: 2828 if (req_slot == 0) { 2829 skspcl = &skdev->internal_skspcl; 2830 if (skspcl->req.id == req_id && 2831 skspcl->req.state == SKD_REQ_STATE_BUSY) { 2832 skd_complete_internal(skdev, 2833 skcomp, skerr, skspcl); 2834 return; 2835 } 2836 } 2837 break; 2838 2839 case SKD_ID_FIT_MSG: 2840 /* 2841 * These id's should never appear in a completion record. 2842 */ 2843 break; 2844 2845 default: 2846 /* 2847 * These id's should never appear anywhere; 2848 */ 2849 break; 2850 } 2851 2852 /* 2853 * If we get here it is a bad or stale id. 2854 */ 2855} 2856 2857static void skd_complete_special(struct skd_device *skdev, 2858 volatile struct fit_completion_entry_v1 2859 *skcomp, 2860 volatile struct fit_comp_error_info *skerr, 2861 struct skd_special_context *skspcl) 2862{ 2863 pr_debug("%s:%s:%d completing special request %p\n", 2864 skdev->name, __func__, __LINE__, skspcl); 2865 if (skspcl->orphaned) { 2866 /* Discard orphaned request */ 2867 /* ?: Can this release directly or does it need 2868 * to use a worker? */ 2869 pr_debug("%s:%s:%d release orphaned %p\n", 2870 skdev->name, __func__, __LINE__, skspcl); 2871 skd_release_special(skdev, skspcl); 2872 return; 2873 } 2874 2875 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); 2876 2877 skspcl->req.state = SKD_REQ_STATE_COMPLETED; 2878 skspcl->req.completion = *skcomp; 2879 skspcl->req.err_info = *skerr; 2880 2881 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, 2882 skerr->code, skerr->qual, skerr->fruc); 2883 2884 wake_up_interruptible(&skdev->waitq); 2885} 2886 2887/* assume spinlock is already held */ 2888static void skd_release_special(struct skd_device *skdev, 2889 struct skd_special_context *skspcl) 2890{ 2891 int i, was_depleted; 2892 2893 for (i = 0; i < skspcl->req.n_sg; i++) { 2894 struct page *page = sg_page(&skspcl->req.sg[i]); 2895 __free_page(page); 2896 } 2897 2898 was_depleted = (skdev->skspcl_free_list == NULL); 2899 2900 skspcl->req.state = SKD_REQ_STATE_IDLE; 2901 skspcl->req.id += SKD_ID_INCR; 2902 skspcl->req.next = 2903 (struct skd_request_context *)skdev->skspcl_free_list; 2904 skdev->skspcl_free_list = (struct skd_special_context *)skspcl; 2905 2906 if (was_depleted) { 2907 pr_debug("%s:%s:%d skspcl was depleted\n", 2908 skdev->name, __func__, __LINE__); 2909 /* Free list was depleted. Their might be waiters. */ 2910 wake_up_interruptible(&skdev->waitq); 2911 } 2912} 2913 2914static void skd_reset_skcomp(struct skd_device *skdev) 2915{ 2916 u32 nbytes; 2917 struct fit_completion_entry_v1 *skcomp; 2918 2919 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; 2920 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 2921 2922 memset(skdev->skcomp_table, 0, nbytes); 2923 2924 skdev->skcomp_ix = 0; 2925 skdev->skcomp_cycle = 1; 2926} 2927 2928/* 2929 ***************************************************************************** 2930 * INTERRUPTS 2931 ***************************************************************************** 2932 */ 2933static void skd_completion_worker(struct work_struct *work) 2934{ 2935 struct skd_device *skdev = 2936 container_of(work, struct skd_device, completion_worker); 2937 unsigned long flags; 2938 int flush_enqueued = 0; 2939 2940 spin_lock_irqsave(&skdev->lock, flags); 2941 2942 /* 2943 * pass in limit=0, which means no limit.. 2944 * process everything in compq 2945 */ 2946 skd_isr_completion_posted(skdev, 0, &flush_enqueued); 2947 skd_request_fn(skdev->queue); 2948 2949 spin_unlock_irqrestore(&skdev->lock, flags); 2950} 2951 2952static void skd_isr_msg_from_dev(struct skd_device *skdev); 2953 2954static irqreturn_t 2955skd_isr(int irq, void *ptr) 2956{ 2957 struct skd_device *skdev; 2958 u32 intstat; 2959 u32 ack; 2960 int rc = 0; 2961 int deferred = 0; 2962 int flush_enqueued = 0; 2963 2964 skdev = (struct skd_device *)ptr; 2965 spin_lock(&skdev->lock); 2966 2967 for (;; ) { 2968 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2969 2970 ack = FIT_INT_DEF_MASK; 2971 ack &= intstat; 2972 2973 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n", 2974 skdev->name, __func__, __LINE__, intstat, ack); 2975 2976 /* As long as there is an int pending on device, keep 2977 * running loop. When none, get out, but if we've never 2978 * done any processing, call completion handler? 2979 */ 2980 if (ack == 0) { 2981 /* No interrupts on device, but run the completion 2982 * processor anyway? 2983 */ 2984 if (rc == 0) 2985 if (likely (skdev->state 2986 == SKD_DRVR_STATE_ONLINE)) 2987 deferred = 1; 2988 break; 2989 } 2990 2991 rc = IRQ_HANDLED; 2992 2993 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); 2994 2995 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && 2996 (skdev->state != SKD_DRVR_STATE_STOPPING))) { 2997 if (intstat & FIT_ISH_COMPLETION_POSTED) { 2998 /* 2999 * If we have already deferred completion 3000 * processing, don't bother running it again 3001 */ 3002 if (deferred == 0) 3003 deferred = 3004 skd_isr_completion_posted(skdev, 3005 skd_isr_comp_limit, &flush_enqueued); 3006 } 3007 3008 if (intstat & FIT_ISH_FW_STATE_CHANGE) { 3009 skd_isr_fwstate(skdev); 3010 if (skdev->state == SKD_DRVR_STATE_FAULT || 3011 skdev->state == 3012 SKD_DRVR_STATE_DISAPPEARED) { 3013 spin_unlock(&skdev->lock); 3014 return rc; 3015 } 3016 } 3017 3018 if (intstat & FIT_ISH_MSG_FROM_DEV) 3019 skd_isr_msg_from_dev(skdev); 3020 } 3021 } 3022 3023 if (unlikely(flush_enqueued)) 3024 skd_request_fn(skdev->queue); 3025 3026 if (deferred) 3027 schedule_work(&skdev->completion_worker); 3028 else if (!flush_enqueued) 3029 skd_request_fn(skdev->queue); 3030 3031 spin_unlock(&skdev->lock); 3032 3033 return rc; 3034} 3035 3036static void skd_drive_fault(struct skd_device *skdev) 3037{ 3038 skdev->state = SKD_DRVR_STATE_FAULT; 3039 pr_err("(%s): Drive FAULT\n", skd_name(skdev)); 3040} 3041 3042static void skd_drive_disappeared(struct skd_device *skdev) 3043{ 3044 skdev->state = SKD_DRVR_STATE_DISAPPEARED; 3045 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); 3046} 3047 3048static void skd_isr_fwstate(struct skd_device *skdev) 3049{ 3050 u32 sense; 3051 u32 state; 3052 u32 mtd; 3053 int prev_driver_state = skdev->state; 3054 3055 sense = SKD_READL(skdev, FIT_STATUS); 3056 state = sense & FIT_SR_DRIVE_STATE_MASK; 3057 3058 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", 3059 skd_name(skdev), 3060 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3061 skd_drive_state_to_str(state), state); 3062 3063 skdev->drive_state = state; 3064 3065 switch (skdev->drive_state) { 3066 case FIT_SR_DRIVE_INIT: 3067 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { 3068 skd_disable_interrupts(skdev); 3069 break; 3070 } 3071 if (skdev->state == SKD_DRVR_STATE_RESTARTING) 3072 skd_recover_requests(skdev, 0); 3073 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 3074 skdev->timer_countdown = SKD_STARTING_TIMO; 3075 skdev->state = SKD_DRVR_STATE_STARTING; 3076 skd_soft_reset(skdev); 3077 break; 3078 } 3079 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); 3080 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3081 skdev->last_mtd = mtd; 3082 break; 3083 3084 case FIT_SR_DRIVE_ONLINE: 3085 skdev->cur_max_queue_depth = skd_max_queue_depth; 3086 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) 3087 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; 3088 3089 skdev->queue_low_water_mark = 3090 skdev->cur_max_queue_depth * 2 / 3 + 1; 3091 if (skdev->queue_low_water_mark < 1) 3092 skdev->queue_low_water_mark = 1; 3093 pr_info( 3094 "(%s): Queue depth limit=%d dev=%d lowat=%d\n", 3095 skd_name(skdev), 3096 skdev->cur_max_queue_depth, 3097 skdev->dev_max_queue_depth, skdev->queue_low_water_mark); 3098 3099 skd_refresh_device_data(skdev); 3100 break; 3101 3102 case FIT_SR_DRIVE_BUSY: 3103 skdev->state = SKD_DRVR_STATE_BUSY; 3104 skdev->timer_countdown = SKD_BUSY_TIMO; 3105 skd_quiesce_dev(skdev); 3106 break; 3107 case FIT_SR_DRIVE_BUSY_SANITIZE: 3108 /* set timer for 3 seconds, we'll abort any unfinished 3109 * commands after that expires 3110 */ 3111 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 3112 skdev->timer_countdown = SKD_TIMER_SECONDS(3); 3113 blk_start_queue(skdev->queue); 3114 break; 3115 case FIT_SR_DRIVE_BUSY_ERASE: 3116 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 3117 skdev->timer_countdown = SKD_BUSY_TIMO; 3118 break; 3119 case FIT_SR_DRIVE_OFFLINE: 3120 skdev->state = SKD_DRVR_STATE_IDLE; 3121 break; 3122 case FIT_SR_DRIVE_SOFT_RESET: 3123 switch (skdev->state) { 3124 case SKD_DRVR_STATE_STARTING: 3125 case SKD_DRVR_STATE_RESTARTING: 3126 /* Expected by a caller of skd_soft_reset() */ 3127 break; 3128 default: 3129 skdev->state = SKD_DRVR_STATE_RESTARTING; 3130 break; 3131 } 3132 break; 3133 case FIT_SR_DRIVE_FW_BOOTING: 3134 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n", 3135 skdev->name, __func__, __LINE__, skdev->name); 3136 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 3137 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 3138 break; 3139 3140 case FIT_SR_DRIVE_DEGRADED: 3141 case FIT_SR_PCIE_LINK_DOWN: 3142 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 3143 break; 3144 3145 case FIT_SR_DRIVE_FAULT: 3146 skd_drive_fault(skdev); 3147 skd_recover_requests(skdev, 0); 3148 blk_start_queue(skdev->queue); 3149 break; 3150 3151 /* PCIe bus returned all Fs? */ 3152 case 0xFF: 3153 pr_info("(%s): state=0x%x sense=0x%x\n", 3154 skd_name(skdev), state, sense); 3155 skd_drive_disappeared(skdev); 3156 skd_recover_requests(skdev, 0); 3157 blk_start_queue(skdev->queue); 3158 break; 3159 default: 3160 /* 3161 * Uknown FW State. Wait for a state we recognize. 3162 */ 3163 break; 3164 } 3165 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", 3166 skd_name(skdev), 3167 skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 3168 skd_skdev_state_to_str(skdev->state), skdev->state); 3169} 3170 3171static void skd_recover_requests(struct skd_device *skdev, int requeue) 3172{ 3173 int i; 3174 3175 for (i = 0; i < skdev->num_req_context; i++) { 3176 struct skd_request_context *skreq = &skdev->skreq_table[i]; 3177 3178 if (skreq->state == SKD_REQ_STATE_BUSY) { 3179 skd_log_skreq(skdev, skreq, "recover"); 3180 3181 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); 3182 SKD_ASSERT(skreq->req != NULL); 3183 3184 /* Release DMA resources for the request. */ 3185 if (skreq->n_sg > 0) 3186 skd_postop_sg_list(skdev, skreq); 3187 3188 if (requeue && 3189 (unsigned long) ++skreq->req->special < 3190 SKD_MAX_RETRIES) 3191 blk_requeue_request(skdev->queue, skreq->req); 3192 else 3193 skd_end_request(skdev, skreq, -EIO); 3194 3195 skreq->req = NULL; 3196 3197 skreq->state = SKD_REQ_STATE_IDLE; 3198 skreq->id += SKD_ID_INCR; 3199 } 3200 if (i > 0) 3201 skreq[-1].next = skreq; 3202 skreq->next = NULL; 3203 } 3204 skdev->skreq_free_list = skdev->skreq_table; 3205 3206 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3207 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; 3208 3209 if (skmsg->state == SKD_MSG_STATE_BUSY) { 3210 skd_log_skmsg(skdev, skmsg, "salvaged"); 3211 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); 3212 skmsg->state = SKD_MSG_STATE_IDLE; 3213 skmsg->id += SKD_ID_INCR; 3214 } 3215 if (i > 0) 3216 skmsg[-1].next = skmsg; 3217 skmsg->next = NULL; 3218 } 3219 skdev->skmsg_free_list = skdev->skmsg_table; 3220 3221 for (i = 0; i < skdev->n_special; i++) { 3222 struct skd_special_context *skspcl = &skdev->skspcl_table[i]; 3223 3224 /* If orphaned, reclaim it because it has already been reported 3225 * to the process as an error (it was just waiting for 3226 * a completion that didn't come, and now it will never come) 3227 * If busy, change to a state that will cause it to error 3228 * out in the wait routine and let it do the normal 3229 * reporting and reclaiming 3230 */ 3231 if (skspcl->req.state == SKD_REQ_STATE_BUSY) { 3232 if (skspcl->orphaned) { 3233 pr_debug("%s:%s:%d orphaned %p\n", 3234 skdev->name, __func__, __LINE__, 3235 skspcl); 3236 skd_release_special(skdev, skspcl); 3237 } else { 3238 pr_debug("%s:%s:%d not orphaned %p\n", 3239 skdev->name, __func__, __LINE__, 3240 skspcl); 3241 skspcl->req.state = SKD_REQ_STATE_ABORTED; 3242 } 3243 } 3244 } 3245 skdev->skspcl_free_list = skdev->skspcl_table; 3246 3247 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) 3248 skdev->timeout_slot[i] = 0; 3249 3250 skdev->in_flight = 0; 3251} 3252 3253static void skd_isr_msg_from_dev(struct skd_device *skdev) 3254{ 3255 u32 mfd; 3256 u32 mtd; 3257 u32 data; 3258 3259 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 3260 3261 pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n", 3262 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd); 3263 3264 /* ignore any mtd that is an ack for something we didn't send */ 3265 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) 3266 return; 3267 3268 switch (FIT_MXD_TYPE(mfd)) { 3269 case FIT_MTD_FITFW_INIT: 3270 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 3271 3272 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 3273 pr_err("(%s): protocol mismatch\n", 3274 skdev->name); 3275 pr_err("(%s): got=%d support=%d\n", 3276 skdev->name, skdev->proto_ver, 3277 FIT_PROTOCOL_VERSION_1); 3278 pr_err("(%s): please upgrade driver\n", 3279 skdev->name); 3280 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 3281 skd_soft_reset(skdev); 3282 break; 3283 } 3284 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); 3285 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3286 skdev->last_mtd = mtd; 3287 break; 3288 3289 case FIT_MTD_GET_CMDQ_DEPTH: 3290 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); 3291 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, 3292 SKD_N_COMPLETION_ENTRY); 3293 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3294 skdev->last_mtd = mtd; 3295 break; 3296 3297 case FIT_MTD_SET_COMPQ_DEPTH: 3298 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); 3299 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); 3300 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3301 skdev->last_mtd = mtd; 3302 break; 3303 3304 case FIT_MTD_SET_COMPQ_ADDR: 3305 skd_reset_skcomp(skdev); 3306 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); 3307 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3308 skdev->last_mtd = mtd; 3309 break; 3310 3311 case FIT_MTD_CMD_LOG_HOST_ID: 3312 skdev->connect_time_stamp = get_seconds(); 3313 data = skdev->connect_time_stamp & 0xFFFF; 3314 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); 3315 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3316 skdev->last_mtd = mtd; 3317 break; 3318 3319 case FIT_MTD_CMD_LOG_TIME_STAMP_LO: 3320 skdev->drive_jiffies = FIT_MXD_DATA(mfd); 3321 data = (skdev->connect_time_stamp >> 16) & 0xFFFF; 3322 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); 3323 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3324 skdev->last_mtd = mtd; 3325 break; 3326 3327 case FIT_MTD_CMD_LOG_TIME_STAMP_HI: 3328 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); 3329 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); 3330 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 3331 skdev->last_mtd = mtd; 3332 3333 pr_err("(%s): Time sync driver=0x%x device=0x%x\n", 3334 skd_name(skdev), 3335 skdev->connect_time_stamp, skdev->drive_jiffies); 3336 break; 3337 3338 case FIT_MTD_ARM_QUEUE: 3339 skdev->last_mtd = 0; 3340 /* 3341 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. 3342 */ 3343 break; 3344 3345 default: 3346 break; 3347 } 3348} 3349 3350static void skd_disable_interrupts(struct skd_device *skdev) 3351{ 3352 u32 sense; 3353 3354 sense = SKD_READL(skdev, FIT_CONTROL); 3355 sense &= ~FIT_CR_ENABLE_INTERRUPTS; 3356 SKD_WRITEL(skdev, sense, FIT_CONTROL); 3357 pr_debug("%s:%s:%d sense 0x%x\n", 3358 skdev->name, __func__, __LINE__, sense); 3359 3360 /* Note that the 1s is written. A 1-bit means 3361 * disable, a 0 means enable. 3362 */ 3363 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); 3364} 3365 3366static void skd_enable_interrupts(struct skd_device *skdev) 3367{ 3368 u32 val; 3369 3370 /* unmask interrupts first */ 3371 val = FIT_ISH_FW_STATE_CHANGE + 3372 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; 3373 3374 /* Note that the compliment of mask is written. A 1-bit means 3375 * disable, a 0 means enable. */ 3376 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 3377 pr_debug("%s:%s:%d interrupt mask=0x%x\n", 3378 skdev->name, __func__, __LINE__, ~val); 3379 3380 val = SKD_READL(skdev, FIT_CONTROL); 3381 val |= FIT_CR_ENABLE_INTERRUPTS; 3382 pr_debug("%s:%s:%d control=0x%x\n", 3383 skdev->name, __func__, __LINE__, val); 3384 SKD_WRITEL(skdev, val, FIT_CONTROL); 3385} 3386 3387/* 3388 ***************************************************************************** 3389 * START, STOP, RESTART, QUIESCE, UNQUIESCE 3390 ***************************************************************************** 3391 */ 3392 3393static void skd_soft_reset(struct skd_device *skdev) 3394{ 3395 u32 val; 3396 3397 val = SKD_READL(skdev, FIT_CONTROL); 3398 val |= (FIT_CR_SOFT_RESET); 3399 pr_debug("%s:%s:%d control=0x%x\n", 3400 skdev->name, __func__, __LINE__, val); 3401 SKD_WRITEL(skdev, val, FIT_CONTROL); 3402} 3403 3404static void skd_start_device(struct skd_device *skdev) 3405{ 3406 unsigned long flags; 3407 u32 sense; 3408 u32 state; 3409 3410 spin_lock_irqsave(&skdev->lock, flags); 3411 3412 /* ack all ghost interrupts */ 3413 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 3414 3415 sense = SKD_READL(skdev, FIT_STATUS); 3416 3417 pr_debug("%s:%s:%d initial status=0x%x\n", 3418 skdev->name, __func__, __LINE__, sense); 3419 3420 state = sense & FIT_SR_DRIVE_STATE_MASK; 3421 skdev->drive_state = state; 3422 skdev->last_mtd = 0; 3423 3424 skdev->state = SKD_DRVR_STATE_STARTING; 3425 skdev->timer_countdown = SKD_STARTING_TIMO; 3426 3427 skd_enable_interrupts(skdev); 3428 3429 switch (skdev->drive_state) { 3430 case FIT_SR_DRIVE_OFFLINE: 3431 pr_err("(%s): Drive offline...\n", skd_name(skdev)); 3432 break; 3433 3434 case FIT_SR_DRIVE_FW_BOOTING: 3435 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n", 3436 skdev->name, __func__, __LINE__, skdev->name); 3437 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 3438 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 3439 break; 3440 3441 case FIT_SR_DRIVE_BUSY_SANITIZE: 3442 pr_info("(%s): Start: BUSY_SANITIZE\n", 3443 skd_name(skdev)); 3444 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 3445 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 3446 break; 3447 3448 case FIT_SR_DRIVE_BUSY_ERASE: 3449 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); 3450 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 3451 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 3452 break; 3453 3454 case FIT_SR_DRIVE_INIT: 3455 case FIT_SR_DRIVE_ONLINE: 3456 skd_soft_reset(skdev); 3457 break; 3458 3459 case FIT_SR_DRIVE_BUSY: 3460 pr_err("(%s): Drive Busy...\n", skd_name(skdev)); 3461 skdev->state = SKD_DRVR_STATE_BUSY; 3462 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 3463 break; 3464 3465 case FIT_SR_DRIVE_SOFT_RESET: 3466 pr_err("(%s) drive soft reset in prog\n", 3467 skd_name(skdev)); 3468 break; 3469 3470 case FIT_SR_DRIVE_FAULT: 3471 /* Fault state is bad...soft reset won't do it... 3472 * Hard reset, maybe, but does it work on device? 3473 * For now, just fault so the system doesn't hang. 3474 */ 3475 skd_drive_fault(skdev); 3476 /*start the queue so we can respond with error to requests */ 3477 pr_debug("%s:%s:%d starting %s queue\n", 3478 skdev->name, __func__, __LINE__, skdev->name); 3479 blk_start_queue(skdev->queue); 3480 skdev->gendisk_on = -1; 3481 wake_up_interruptible(&skdev->waitq); 3482 break; 3483 3484 case 0xFF: 3485 /* Most likely the device isn't there or isn't responding 3486 * to the BAR1 addresses. */ 3487 skd_drive_disappeared(skdev); 3488 /*start the queue so we can respond with error to requests */ 3489 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", 3490 skdev->name, __func__, __LINE__, skdev->name); 3491 blk_start_queue(skdev->queue); 3492 skdev->gendisk_on = -1; 3493 wake_up_interruptible(&skdev->waitq); 3494 break; 3495 3496 default: 3497 pr_err("(%s) Start: unknown state %x\n", 3498 skd_name(skdev), skdev->drive_state); 3499 break; 3500 } 3501 3502 state = SKD_READL(skdev, FIT_CONTROL); 3503 pr_debug("%s:%s:%d FIT Control Status=0x%x\n", 3504 skdev->name, __func__, __LINE__, state); 3505 3506 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 3507 pr_debug("%s:%s:%d Intr Status=0x%x\n", 3508 skdev->name, __func__, __LINE__, state); 3509 3510 state = SKD_READL(skdev, FIT_INT_MASK_HOST); 3511 pr_debug("%s:%s:%d Intr Mask=0x%x\n", 3512 skdev->name, __func__, __LINE__, state); 3513 3514 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 3515 pr_debug("%s:%s:%d Msg from Dev=0x%x\n", 3516 skdev->name, __func__, __LINE__, state); 3517 3518 state = SKD_READL(skdev, FIT_HW_VERSION); 3519 pr_debug("%s:%s:%d HW version=0x%x\n", 3520 skdev->name, __func__, __LINE__, state); 3521 3522 spin_unlock_irqrestore(&skdev->lock, flags); 3523} 3524 3525static void skd_stop_device(struct skd_device *skdev) 3526{ 3527 unsigned long flags; 3528 struct skd_special_context *skspcl = &skdev->internal_skspcl; 3529 u32 dev_state; 3530 int i; 3531 3532 spin_lock_irqsave(&skdev->lock, flags); 3533 3534 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 3535 pr_err("(%s): skd_stop_device not online no sync\n", 3536 skd_name(skdev)); 3537 goto stop_out; 3538 } 3539 3540 if (skspcl->req.state != SKD_REQ_STATE_IDLE) { 3541 pr_err("(%s): skd_stop_device no special\n", 3542 skd_name(skdev)); 3543 goto stop_out; 3544 } 3545 3546 skdev->state = SKD_DRVR_STATE_SYNCING; 3547 skdev->sync_done = 0; 3548 3549 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 3550 3551 spin_unlock_irqrestore(&skdev->lock, flags); 3552 3553 wait_event_interruptible_timeout(skdev->waitq, 3554 (skdev->sync_done), (10 * HZ)); 3555 3556 spin_lock_irqsave(&skdev->lock, flags); 3557 3558 switch (skdev->sync_done) { 3559 case 0: 3560 pr_err("(%s): skd_stop_device no sync\n", 3561 skd_name(skdev)); 3562 break; 3563 case 1: 3564 pr_err("(%s): skd_stop_device sync done\n", 3565 skd_name(skdev)); 3566 break; 3567 default: 3568 pr_err("(%s): skd_stop_device sync error\n", 3569 skd_name(skdev)); 3570 } 3571 3572stop_out: 3573 skdev->state = SKD_DRVR_STATE_STOPPING; 3574 spin_unlock_irqrestore(&skdev->lock, flags); 3575 3576 skd_kill_timer(skdev); 3577 3578 spin_lock_irqsave(&skdev->lock, flags); 3579 skd_disable_interrupts(skdev); 3580 3581 /* ensure all ints on device are cleared */ 3582 /* soft reset the device to unload with a clean slate */ 3583 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 3584 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); 3585 3586 spin_unlock_irqrestore(&skdev->lock, flags); 3587 3588 /* poll every 100ms, 1 second timeout */ 3589 for (i = 0; i < 10; i++) { 3590 dev_state = 3591 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; 3592 if (dev_state == FIT_SR_DRIVE_INIT) 3593 break; 3594 set_current_state(TASK_INTERRUPTIBLE); 3595 schedule_timeout(msecs_to_jiffies(100)); 3596 } 3597 3598 if (dev_state != FIT_SR_DRIVE_INIT) 3599 pr_err("(%s): skd_stop_device state error 0x%02x\n", 3600 skd_name(skdev), dev_state); 3601} 3602 3603/* assume spinlock is held */ 3604static void skd_restart_device(struct skd_device *skdev) 3605{ 3606 u32 state; 3607 3608 /* ack all ghost interrupts */ 3609 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 3610 3611 state = SKD_READL(skdev, FIT_STATUS); 3612 3613 pr_debug("%s:%s:%d drive status=0x%x\n", 3614 skdev->name, __func__, __LINE__, state); 3615 3616 state &= FIT_SR_DRIVE_STATE_MASK; 3617 skdev->drive_state = state; 3618 skdev->last_mtd = 0; 3619 3620 skdev->state = SKD_DRVR_STATE_RESTARTING; 3621 skdev->timer_countdown = SKD_RESTARTING_TIMO; 3622 3623 skd_soft_reset(skdev); 3624} 3625 3626/* assume spinlock is held */ 3627static int skd_quiesce_dev(struct skd_device *skdev) 3628{ 3629 int rc = 0; 3630 3631 switch (skdev->state) { 3632 case SKD_DRVR_STATE_BUSY: 3633 case SKD_DRVR_STATE_BUSY_IMMINENT: 3634 pr_debug("%s:%s:%d stopping %s queue\n", 3635 skdev->name, __func__, __LINE__, skdev->name); 3636 blk_stop_queue(skdev->queue); 3637 break; 3638 case SKD_DRVR_STATE_ONLINE: 3639 case SKD_DRVR_STATE_STOPPING: 3640 case SKD_DRVR_STATE_SYNCING: 3641 case SKD_DRVR_STATE_PAUSING: 3642 case SKD_DRVR_STATE_PAUSED: 3643 case SKD_DRVR_STATE_STARTING: 3644 case SKD_DRVR_STATE_RESTARTING: 3645 case SKD_DRVR_STATE_RESUMING: 3646 default: 3647 rc = -EINVAL; 3648 pr_debug("%s:%s:%d state [%d] not implemented\n", 3649 skdev->name, __func__, __LINE__, skdev->state); 3650 } 3651 return rc; 3652} 3653 3654/* assume spinlock is held */ 3655static int skd_unquiesce_dev(struct skd_device *skdev) 3656{ 3657 int prev_driver_state = skdev->state; 3658 3659 skd_log_skdev(skdev, "unquiesce"); 3660 if (skdev->state == SKD_DRVR_STATE_ONLINE) { 3661 pr_debug("%s:%s:%d **** device already ONLINE\n", 3662 skdev->name, __func__, __LINE__); 3663 return 0; 3664 } 3665 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { 3666 /* 3667 * If there has been an state change to other than 3668 * ONLINE, we will rely on controller state change 3669 * to come back online and restart the queue. 3670 * The BUSY state means that driver is ready to 3671 * continue normal processing but waiting for controller 3672 * to become available. 3673 */ 3674 skdev->state = SKD_DRVR_STATE_BUSY; 3675 pr_debug("%s:%s:%d drive BUSY state\n", 3676 skdev->name, __func__, __LINE__); 3677 return 0; 3678 } 3679 3680 /* 3681 * Drive has just come online, driver is either in startup, 3682 * paused performing a task, or bust waiting for hardware. 3683 */ 3684 switch (skdev->state) { 3685 case SKD_DRVR_STATE_PAUSED: 3686 case SKD_DRVR_STATE_BUSY: 3687 case SKD_DRVR_STATE_BUSY_IMMINENT: 3688 case SKD_DRVR_STATE_BUSY_ERASE: 3689 case SKD_DRVR_STATE_STARTING: 3690 case SKD_DRVR_STATE_RESTARTING: 3691 case SKD_DRVR_STATE_FAULT: 3692 case SKD_DRVR_STATE_IDLE: 3693 case SKD_DRVR_STATE_LOAD: 3694 skdev->state = SKD_DRVR_STATE_ONLINE; 3695 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", 3696 skd_name(skdev), 3697 skd_skdev_state_to_str(prev_driver_state), 3698 prev_driver_state, skd_skdev_state_to_str(skdev->state), 3699 skdev->state); 3700 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n", 3701 skdev->name, __func__, __LINE__); 3702 pr_debug("%s:%s:%d starting %s queue\n", 3703 skdev->name, __func__, __LINE__, skdev->name); 3704 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); 3705 blk_start_queue(skdev->queue); 3706 skdev->gendisk_on = 1; 3707 wake_up_interruptible(&skdev->waitq); 3708 break; 3709 3710 case SKD_DRVR_STATE_DISAPPEARED: 3711 default: 3712 pr_debug("%s:%s:%d **** driver state %d, not implemented \n", 3713 skdev->name, __func__, __LINE__, 3714 skdev->state); 3715 return -EBUSY; 3716 } 3717 return 0; 3718} 3719 3720/* 3721 ***************************************************************************** 3722 * PCIe MSI/MSI-X INTERRUPT HANDLERS 3723 ***************************************************************************** 3724 */ 3725 3726static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) 3727{ 3728 struct skd_device *skdev = skd_host_data; 3729 unsigned long flags; 3730 3731 spin_lock_irqsave(&skdev->lock, flags); 3732 pr_debug("%s:%s:%d MSIX = 0x%x\n", 3733 skdev->name, __func__, __LINE__, 3734 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3735 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), 3736 irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3737 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); 3738 spin_unlock_irqrestore(&skdev->lock, flags); 3739 return IRQ_HANDLED; 3740} 3741 3742static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) 3743{ 3744 struct skd_device *skdev = skd_host_data; 3745 unsigned long flags; 3746 3747 spin_lock_irqsave(&skdev->lock, flags); 3748 pr_debug("%s:%s:%d MSIX = 0x%x\n", 3749 skdev->name, __func__, __LINE__, 3750 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3751 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); 3752 skd_isr_fwstate(skdev); 3753 spin_unlock_irqrestore(&skdev->lock, flags); 3754 return IRQ_HANDLED; 3755} 3756 3757static irqreturn_t skd_comp_q(int irq, void *skd_host_data) 3758{ 3759 struct skd_device *skdev = skd_host_data; 3760 unsigned long flags; 3761 int flush_enqueued = 0; 3762 int deferred; 3763 3764 spin_lock_irqsave(&skdev->lock, flags); 3765 pr_debug("%s:%s:%d MSIX = 0x%x\n", 3766 skdev->name, __func__, __LINE__, 3767 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3768 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); 3769 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, 3770 &flush_enqueued); 3771 if (flush_enqueued) 3772 skd_request_fn(skdev->queue); 3773 3774 if (deferred) 3775 schedule_work(&skdev->completion_worker); 3776 else if (!flush_enqueued) 3777 skd_request_fn(skdev->queue); 3778 3779 spin_unlock_irqrestore(&skdev->lock, flags); 3780 3781 return IRQ_HANDLED; 3782} 3783 3784static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) 3785{ 3786 struct skd_device *skdev = skd_host_data; 3787 unsigned long flags; 3788 3789 spin_lock_irqsave(&skdev->lock, flags); 3790 pr_debug("%s:%s:%d MSIX = 0x%x\n", 3791 skdev->name, __func__, __LINE__, 3792 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3793 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); 3794 skd_isr_msg_from_dev(skdev); 3795 spin_unlock_irqrestore(&skdev->lock, flags); 3796 return IRQ_HANDLED; 3797} 3798 3799static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) 3800{ 3801 struct skd_device *skdev = skd_host_data; 3802 unsigned long flags; 3803 3804 spin_lock_irqsave(&skdev->lock, flags); 3805 pr_debug("%s:%s:%d MSIX = 0x%x\n", 3806 skdev->name, __func__, __LINE__, 3807 SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3808 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); 3809 spin_unlock_irqrestore(&skdev->lock, flags); 3810 return IRQ_HANDLED; 3811} 3812 3813/* 3814 ***************************************************************************** 3815 * PCIe MSI/MSI-X SETUP 3816 ***************************************************************************** 3817 */ 3818 3819struct skd_msix_entry { 3820 char isr_name[30]; 3821}; 3822 3823struct skd_init_msix_entry { 3824 const char *name; 3825 irq_handler_t handler; 3826}; 3827 3828#define SKD_MAX_MSIX_COUNT 13 3829#define SKD_MIN_MSIX_COUNT 7 3830#define SKD_BASE_MSIX_IRQ 4 3831 3832static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { 3833 { "(DMA 0)", skd_reserved_isr }, 3834 { "(DMA 1)", skd_reserved_isr }, 3835 { "(DMA 2)", skd_reserved_isr }, 3836 { "(DMA 3)", skd_reserved_isr }, 3837 { "(State Change)", skd_statec_isr }, 3838 { "(COMPL_Q)", skd_comp_q }, 3839 { "(MSG)", skd_msg_isr }, 3840 { "(Reserved)", skd_reserved_isr }, 3841 { "(Reserved)", skd_reserved_isr }, 3842 { "(Queue Full 0)", skd_qfull_isr }, 3843 { "(Queue Full 1)", skd_qfull_isr }, 3844 { "(Queue Full 2)", skd_qfull_isr }, 3845 { "(Queue Full 3)", skd_qfull_isr }, 3846}; 3847 3848static int skd_acquire_msix(struct skd_device *skdev) 3849{ 3850 int i, rc; 3851 struct pci_dev *pdev = skdev->pdev; 3852 3853 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT, 3854 PCI_IRQ_MSIX); 3855 if (rc < 0) { 3856 pr_err("(%s): failed to enable MSI-X %d\n", 3857 skd_name(skdev), rc); 3858 goto out; 3859 } 3860 3861 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT, 3862 sizeof(struct skd_msix_entry), GFP_KERNEL); 3863 if (!skdev->msix_entries) { 3864 rc = -ENOMEM; 3865 pr_err("(%s): msix table allocation error\n", 3866 skd_name(skdev)); 3867 goto out; 3868 } 3869 3870 /* Enable MSI-X vectors for the base queue */ 3871 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { 3872 struct skd_msix_entry *qentry = &skdev->msix_entries[i]; 3873 3874 snprintf(qentry->isr_name, sizeof(qentry->isr_name), 3875 "%s%d-msix %s", DRV_NAME, skdev->devno, 3876 msix_entries[i].name); 3877 3878 rc = devm_request_irq(&skdev->pdev->dev, 3879 pci_irq_vector(skdev->pdev, i), 3880 msix_entries[i].handler, 0, 3881 qentry->isr_name, skdev); 3882 if (rc) { 3883 pr_err("(%s): Unable to register(%d) MSI-X " 3884 "handler %d: %s\n", 3885 skd_name(skdev), rc, i, qentry->isr_name); 3886 goto msix_out; 3887 } 3888 } 3889 3890 pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n", 3891 skdev->name, __func__, __LINE__, 3892 pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT); 3893 return 0; 3894 3895msix_out: 3896 while (--i >= 0) 3897 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev); 3898out: 3899 kfree(skdev->msix_entries); 3900 skdev->msix_entries = NULL; 3901 return rc; 3902} 3903 3904static int skd_acquire_irq(struct skd_device *skdev) 3905{ 3906 struct pci_dev *pdev = skdev->pdev; 3907 unsigned int irq_flag = PCI_IRQ_LEGACY; 3908 int rc; 3909 3910 if (skd_isr_type == SKD_IRQ_MSIX) { 3911 rc = skd_acquire_msix(skdev); 3912 if (!rc) 3913 return 0; 3914 3915 pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n", 3916 skd_name(skdev), rc); 3917 } 3918 3919 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME, 3920 skdev->devno); 3921 3922 if (skd_isr_type != SKD_IRQ_LEGACY) 3923 irq_flag |= PCI_IRQ_MSI; 3924 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag); 3925 if (rc < 0) { 3926 pr_err("(%s): failed to allocate the MSI interrupt %d\n", 3927 skd_name(skdev), rc); 3928 return rc; 3929 } 3930 3931 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 3932 pdev->msi_enabled ? 0 : IRQF_SHARED, 3933 skdev->isr_name, skdev); 3934 if (rc) { 3935 pci_free_irq_vectors(pdev); 3936 pr_err("(%s): failed to allocate interrupt %d\n", 3937 skd_name(skdev), rc); 3938 return rc; 3939 } 3940 3941 return 0; 3942} 3943 3944static void skd_release_irq(struct skd_device *skdev) 3945{ 3946 struct pci_dev *pdev = skdev->pdev; 3947 3948 if (skdev->msix_entries) { 3949 int i; 3950 3951 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { 3952 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), 3953 skdev); 3954 } 3955 3956 kfree(skdev->msix_entries); 3957 skdev->msix_entries = NULL; 3958 } else { 3959 devm_free_irq(&pdev->dev, pdev->irq, skdev); 3960 } 3961 3962 pci_free_irq_vectors(pdev); 3963} 3964 3965/* 3966 ***************************************************************************** 3967 * CONSTRUCT 3968 ***************************************************************************** 3969 */ 3970 3971static int skd_cons_skcomp(struct skd_device *skdev) 3972{ 3973 int rc = 0; 3974 struct fit_completion_entry_v1 *skcomp; 3975 u32 nbytes; 3976 3977 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; 3978 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 3979 3980 pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n", 3981 skdev->name, __func__, __LINE__, 3982 nbytes, SKD_N_COMPLETION_ENTRY); 3983 3984 skcomp = pci_zalloc_consistent(skdev->pdev, nbytes, 3985 &skdev->cq_dma_address); 3986 3987 if (skcomp == NULL) { 3988 rc = -ENOMEM; 3989 goto err_out; 3990 } 3991 3992 skdev->skcomp_table = skcomp; 3993 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + 3994 sizeof(*skcomp) * 3995 SKD_N_COMPLETION_ENTRY); 3996 3997err_out: 3998 return rc; 3999} 4000 4001static int skd_cons_skmsg(struct skd_device *skdev) 4002{ 4003 int rc = 0; 4004 u32 i; 4005 4006 pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n", 4007 skdev->name, __func__, __LINE__, 4008 sizeof(struct skd_fitmsg_context), 4009 skdev->num_fitmsg_context, 4010 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); 4011 4012 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) 4013 *skdev->num_fitmsg_context, GFP_KERNEL); 4014 if (skdev->skmsg_table == NULL) { 4015 rc = -ENOMEM; 4016 goto err_out; 4017 } 4018 4019 for (i = 0; i < skdev->num_fitmsg_context; i++) { 4020 struct skd_fitmsg_context *skmsg; 4021 4022 skmsg = &skdev->skmsg_table[i]; 4023 4024 skmsg->id = i + SKD_ID_FIT_MSG; 4025 4026 skmsg->state = SKD_MSG_STATE_IDLE; 4027 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, 4028 SKD_N_FITMSG_BYTES + 64, 4029 &skmsg->mb_dma_address); 4030 4031 if (skmsg->msg_buf == NULL) { 4032 rc = -ENOMEM; 4033 goto err_out; 4034 } 4035 4036 skmsg->offset = (u32)((u64)skmsg->msg_buf & 4037 (~FIT_QCMD_BASE_ADDRESS_MASK)); 4038 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; 4039 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & 4040 FIT_QCMD_BASE_ADDRESS_MASK); 4041 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; 4042 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; 4043 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); 4044 4045 skmsg->next = &skmsg[1]; 4046 } 4047 4048 /* Free list is in order starting with the 0th entry. */ 4049 skdev->skmsg_table[i - 1].next = NULL; 4050 skdev->skmsg_free_list = skdev->skmsg_table; 4051 4052err_out: 4053 return rc; 4054} 4055 4056static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, 4057 u32 n_sg, 4058 dma_addr_t *ret_dma_addr) 4059{ 4060 struct fit_sg_descriptor *sg_list; 4061 u32 nbytes; 4062 4063 nbytes = sizeof(*sg_list) * n_sg; 4064 4065 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); 4066 4067 if (sg_list != NULL) { 4068 uint64_t dma_address = *ret_dma_addr; 4069 u32 i; 4070 4071 memset(sg_list, 0, nbytes); 4072 4073 for (i = 0; i < n_sg - 1; i++) { 4074 uint64_t ndp_off; 4075 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); 4076 4077 sg_list[i].next_desc_ptr = dma_address + ndp_off; 4078 } 4079 sg_list[i].next_desc_ptr = 0LL; 4080 } 4081 4082 return sg_list; 4083} 4084 4085static int skd_cons_skreq(struct skd_device *skdev) 4086{ 4087 int rc = 0; 4088 u32 i; 4089 4090 pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n", 4091 skdev->name, __func__, __LINE__, 4092 sizeof(struct skd_request_context), 4093 skdev->num_req_context, 4094 sizeof(struct skd_request_context) * skdev->num_req_context); 4095 4096 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) 4097 * skdev->num_req_context, GFP_KERNEL); 4098 if (skdev->skreq_table == NULL) { 4099 rc = -ENOMEM; 4100 goto err_out; 4101 } 4102 4103 pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n", 4104 skdev->name, __func__, __LINE__, 4105 skdev->sgs_per_request, sizeof(struct scatterlist), 4106 skdev->sgs_per_request * sizeof(struct scatterlist)); 4107 4108 for (i = 0; i < skdev->num_req_context; i++) { 4109 struct skd_request_context *skreq; 4110 4111 skreq = &skdev->skreq_table[i]; 4112 4113 skreq->id = i + SKD_ID_RW_REQUEST; 4114 skreq->state = SKD_REQ_STATE_IDLE; 4115 4116 skreq->sg = kzalloc(sizeof(struct scatterlist) * 4117 skdev->sgs_per_request, GFP_KERNEL); 4118 if (skreq->sg == NULL) { 4119 rc = -ENOMEM; 4120 goto err_out; 4121 } 4122 sg_init_table(skreq->sg, skdev->sgs_per_request); 4123 4124 skreq->sksg_list = skd_cons_sg_list(skdev, 4125 skdev->sgs_per_request, 4126 &skreq->sksg_dma_address); 4127 4128 if (skreq->sksg_list == NULL) { 4129 rc = -ENOMEM; 4130 goto err_out; 4131 } 4132 4133 skreq->next = &skreq[1]; 4134 } 4135 4136 /* Free list is in order starting with the 0th entry. */ 4137 skdev->skreq_table[i - 1].next = NULL; 4138 skdev->skreq_free_list = skdev->skreq_table; 4139 4140err_out: 4141 return rc; 4142} 4143 4144static int skd_cons_skspcl(struct skd_device *skdev) 4145{ 4146 int rc = 0; 4147 u32 i, nbytes; 4148 4149 pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n", 4150 skdev->name, __func__, __LINE__, 4151 sizeof(struct skd_special_context), 4152 skdev->n_special, 4153 sizeof(struct skd_special_context) * skdev->n_special); 4154 4155 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) 4156 * skdev->n_special, GFP_KERNEL); 4157 if (skdev->skspcl_table == NULL) { 4158 rc = -ENOMEM; 4159 goto err_out; 4160 } 4161 4162 for (i = 0; i < skdev->n_special; i++) { 4163 struct skd_special_context *skspcl; 4164 4165 skspcl = &skdev->skspcl_table[i]; 4166 4167 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; 4168 skspcl->req.state = SKD_REQ_STATE_IDLE; 4169 4170 skspcl->req.next = &skspcl[1].req; 4171 4172 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 4173 4174 skspcl->msg_buf = 4175 pci_zalloc_consistent(skdev->pdev, nbytes, 4176 &skspcl->mb_dma_address); 4177 if (skspcl->msg_buf == NULL) { 4178 rc = -ENOMEM; 4179 goto err_out; 4180 } 4181 4182 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * 4183 SKD_N_SG_PER_SPECIAL, GFP_KERNEL); 4184 if (skspcl->req.sg == NULL) { 4185 rc = -ENOMEM; 4186 goto err_out; 4187 } 4188 4189 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 4190 SKD_N_SG_PER_SPECIAL, 4191 &skspcl->req. 4192 sksg_dma_address); 4193 if (skspcl->req.sksg_list == NULL) { 4194 rc = -ENOMEM; 4195 goto err_out; 4196 } 4197 } 4198 4199 /* Free list is in order starting with the 0th entry. */ 4200 skdev->skspcl_table[i - 1].req.next = NULL; 4201 skdev->skspcl_free_list = skdev->skspcl_table; 4202 4203 return rc; 4204 4205err_out: 4206 return rc; 4207} 4208 4209static int skd_cons_sksb(struct skd_device *skdev) 4210{ 4211 int rc = 0; 4212 struct skd_special_context *skspcl; 4213 u32 nbytes; 4214 4215 skspcl = &skdev->internal_skspcl; 4216 4217 skspcl->req.id = 0 + SKD_ID_INTERNAL; 4218 skspcl->req.state = SKD_REQ_STATE_IDLE; 4219 4220 nbytes = SKD_N_INTERNAL_BYTES; 4221 4222 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes, 4223 &skspcl->db_dma_address); 4224 if (skspcl->data_buf == NULL) { 4225 rc = -ENOMEM; 4226 goto err_out; 4227 } 4228 4229 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 4230 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes, 4231 &skspcl->mb_dma_address); 4232 if (skspcl->msg_buf == NULL) { 4233 rc = -ENOMEM; 4234 goto err_out; 4235 } 4236 4237 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, 4238 &skspcl->req.sksg_dma_address); 4239 if (skspcl->req.sksg_list == NULL) { 4240 rc = -ENOMEM; 4241 goto err_out; 4242 } 4243 4244 if (!skd_format_internal_skspcl(skdev)) { 4245 rc = -EINVAL; 4246 goto err_out; 4247 } 4248 4249err_out: 4250 return rc; 4251} 4252 4253static int skd_cons_disk(struct skd_device *skdev) 4254{ 4255 int rc = 0; 4256 struct gendisk *disk; 4257 struct request_queue *q; 4258 unsigned long flags; 4259 4260 disk = alloc_disk(SKD_MINORS_PER_DEVICE); 4261 if (!disk) { 4262 rc = -ENOMEM; 4263 goto err_out; 4264 } 4265 4266 skdev->disk = disk; 4267 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); 4268 4269 disk->major = skdev->major; 4270 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; 4271 disk->fops = &skd_blockdev_ops; 4272 disk->private_data = skdev; 4273 4274 q = blk_init_queue(skd_request_fn, &skdev->lock); 4275 if (!q) { 4276 rc = -ENOMEM; 4277 goto err_out; 4278 } 4279 4280 skdev->queue = q; 4281 disk->queue = q; 4282 q->queuedata = skdev; 4283 4284 blk_queue_write_cache(q, true, true); 4285 blk_queue_max_segments(q, skdev->sgs_per_request); 4286 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); 4287 4288 /* set sysfs ptimal_io_size to 8K */ 4289 blk_queue_io_opt(q, 8192); 4290 4291 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 4292 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 4293 4294 spin_lock_irqsave(&skdev->lock, flags); 4295 pr_debug("%s:%s:%d stopping %s queue\n", 4296 skdev->name, __func__, __LINE__, skdev->name); 4297 blk_stop_queue(skdev->queue); 4298 spin_unlock_irqrestore(&skdev->lock, flags); 4299 4300err_out: 4301 return rc; 4302} 4303 4304#define SKD_N_DEV_TABLE 16u 4305static u32 skd_next_devno; 4306 4307static struct skd_device *skd_construct(struct pci_dev *pdev) 4308{ 4309 struct skd_device *skdev; 4310 int blk_major = skd_major; 4311 int rc; 4312 4313 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); 4314 4315 if (!skdev) { 4316 pr_err(PFX "(%s): memory alloc failure\n", 4317 pci_name(pdev)); 4318 return NULL; 4319 } 4320 4321 skdev->state = SKD_DRVR_STATE_LOAD; 4322 skdev->pdev = pdev; 4323 skdev->devno = skd_next_devno++; 4324 skdev->major = blk_major; 4325 sprintf(skdev->name, DRV_NAME "%d", skdev->devno); 4326 skdev->dev_max_queue_depth = 0; 4327 4328 skdev->num_req_context = skd_max_queue_depth; 4329 skdev->num_fitmsg_context = skd_max_queue_depth; 4330 skdev->n_special = skd_max_pass_thru; 4331 skdev->cur_max_queue_depth = 1; 4332 skdev->queue_low_water_mark = 1; 4333 skdev->proto_ver = 99; 4334 skdev->sgs_per_request = skd_sgs_per_request; 4335 skdev->dbg_level = skd_dbg_level; 4336 4337 atomic_set(&skdev->device_count, 0); 4338 4339 spin_lock_init(&skdev->lock); 4340 4341 INIT_WORK(&skdev->completion_worker, skd_completion_worker); 4342 4343 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); 4344 rc = skd_cons_skcomp(skdev); 4345 if (rc < 0) 4346 goto err_out; 4347 4348 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); 4349 rc = skd_cons_skmsg(skdev); 4350 if (rc < 0) 4351 goto err_out; 4352 4353 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); 4354 rc = skd_cons_skreq(skdev); 4355 if (rc < 0) 4356 goto err_out; 4357 4358 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); 4359 rc = skd_cons_skspcl(skdev); 4360 if (rc < 0) 4361 goto err_out; 4362 4363 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); 4364 rc = skd_cons_sksb(skdev); 4365 if (rc < 0) 4366 goto err_out; 4367 4368 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); 4369 rc = skd_cons_disk(skdev); 4370 if (rc < 0) 4371 goto err_out; 4372 4373 pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__); 4374 return skdev; 4375 4376err_out: 4377 pr_debug("%s:%s:%d construct failed\n", 4378 skdev->name, __func__, __LINE__); 4379 skd_destruct(skdev); 4380 return NULL; 4381} 4382 4383/* 4384 ***************************************************************************** 4385 * DESTRUCT (FREE) 4386 ***************************************************************************** 4387 */ 4388 4389static void skd_free_skcomp(struct skd_device *skdev) 4390{ 4391 if (skdev->skcomp_table != NULL) { 4392 u32 nbytes; 4393 4394 nbytes = sizeof(skdev->skcomp_table[0]) * 4395 SKD_N_COMPLETION_ENTRY; 4396 pci_free_consistent(skdev->pdev, nbytes, 4397 skdev->skcomp_table, skdev->cq_dma_address); 4398 } 4399 4400 skdev->skcomp_table = NULL; 4401 skdev->cq_dma_address = 0; 4402} 4403 4404static void skd_free_skmsg(struct skd_device *skdev) 4405{ 4406 u32 i; 4407 4408 if (skdev->skmsg_table == NULL) 4409 return; 4410 4411 for (i = 0; i < skdev->num_fitmsg_context; i++) { 4412 struct skd_fitmsg_context *skmsg; 4413 4414 skmsg = &skdev->skmsg_table[i]; 4415 4416 if (skmsg->msg_buf != NULL) { 4417 skmsg->msg_buf += skmsg->offset; 4418 skmsg->mb_dma_address += skmsg->offset; 4419 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, 4420 skmsg->msg_buf, 4421 skmsg->mb_dma_address); 4422 } 4423 skmsg->msg_buf = NULL; 4424 skmsg->mb_dma_address = 0; 4425 } 4426 4427 kfree(skdev->skmsg_table); 4428 skdev->skmsg_table = NULL; 4429} 4430 4431static void skd_free_sg_list(struct skd_device *skdev, 4432 struct fit_sg_descriptor *sg_list, 4433 u32 n_sg, dma_addr_t dma_addr) 4434{ 4435 if (sg_list != NULL) { 4436 u32 nbytes; 4437 4438 nbytes = sizeof(*sg_list) * n_sg; 4439 4440 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); 4441 } 4442} 4443 4444static void skd_free_skreq(struct skd_device *skdev) 4445{ 4446 u32 i; 4447 4448 if (skdev->skreq_table == NULL) 4449 return; 4450 4451 for (i = 0; i < skdev->num_req_context; i++) { 4452 struct skd_request_context *skreq; 4453 4454 skreq = &skdev->skreq_table[i]; 4455 4456 skd_free_sg_list(skdev, skreq->sksg_list, 4457 skdev->sgs_per_request, 4458 skreq->sksg_dma_address); 4459 4460 skreq->sksg_list = NULL; 4461 skreq->sksg_dma_address = 0; 4462 4463 kfree(skreq->sg); 4464 } 4465 4466 kfree(skdev->skreq_table); 4467 skdev->skreq_table = NULL; 4468} 4469 4470static void skd_free_skspcl(struct skd_device *skdev) 4471{ 4472 u32 i; 4473 u32 nbytes; 4474 4475 if (skdev->skspcl_table == NULL) 4476 return; 4477 4478 for (i = 0; i < skdev->n_special; i++) { 4479 struct skd_special_context *skspcl; 4480 4481 skspcl = &skdev->skspcl_table[i]; 4482 4483 if (skspcl->msg_buf != NULL) { 4484 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 4485 pci_free_consistent(skdev->pdev, nbytes, 4486 skspcl->msg_buf, 4487 skspcl->mb_dma_address); 4488 } 4489 4490 skspcl->msg_buf = NULL; 4491 skspcl->mb_dma_address = 0; 4492 4493 skd_free_sg_list(skdev, skspcl->req.sksg_list, 4494 SKD_N_SG_PER_SPECIAL, 4495 skspcl->req.sksg_dma_address); 4496 4497 skspcl->req.sksg_list = NULL; 4498 skspcl->req.sksg_dma_address = 0; 4499 4500 kfree(skspcl->req.sg); 4501 } 4502 4503 kfree(skdev->skspcl_table); 4504 skdev->skspcl_table = NULL; 4505} 4506 4507static void skd_free_sksb(struct skd_device *skdev) 4508{ 4509 struct skd_special_context *skspcl; 4510 u32 nbytes; 4511 4512 skspcl = &skdev->internal_skspcl; 4513 4514 if (skspcl->data_buf != NULL) { 4515 nbytes = SKD_N_INTERNAL_BYTES; 4516 4517 pci_free_consistent(skdev->pdev, nbytes, 4518 skspcl->data_buf, skspcl->db_dma_address); 4519 } 4520 4521 skspcl->data_buf = NULL; 4522 skspcl->db_dma_address = 0; 4523 4524 if (skspcl->msg_buf != NULL) { 4525 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 4526 pci_free_consistent(skdev->pdev, nbytes, 4527 skspcl->msg_buf, skspcl->mb_dma_address); 4528 } 4529 4530 skspcl->msg_buf = NULL; 4531 skspcl->mb_dma_address = 0; 4532 4533 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, 4534 skspcl->req.sksg_dma_address); 4535 4536 skspcl->req.sksg_list = NULL; 4537 skspcl->req.sksg_dma_address = 0; 4538} 4539 4540static void skd_free_disk(struct skd_device *skdev) 4541{ 4542 struct gendisk *disk = skdev->disk; 4543 4544 if (disk != NULL) { 4545 struct request_queue *q = disk->queue; 4546 4547 if (disk->flags & GENHD_FL_UP) 4548 del_gendisk(disk); 4549 if (q) 4550 blk_cleanup_queue(q); 4551 put_disk(disk); 4552 } 4553 skdev->disk = NULL; 4554} 4555 4556static void skd_destruct(struct skd_device *skdev) 4557{ 4558 if (skdev == NULL) 4559 return; 4560 4561 4562 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); 4563 skd_free_disk(skdev); 4564 4565 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); 4566 skd_free_sksb(skdev); 4567 4568 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); 4569 skd_free_skspcl(skdev); 4570 4571 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); 4572 skd_free_skreq(skdev); 4573 4574 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); 4575 skd_free_skmsg(skdev); 4576 4577 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); 4578 skd_free_skcomp(skdev); 4579 4580 pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__); 4581 kfree(skdev); 4582} 4583 4584/* 4585 ***************************************************************************** 4586 * BLOCK DEVICE (BDEV) GLUE 4587 ***************************************************************************** 4588 */ 4589 4590static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4591{ 4592 struct skd_device *skdev; 4593 u64 capacity; 4594 4595 skdev = bdev->bd_disk->private_data; 4596 4597 pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n", 4598 skdev->name, __func__, __LINE__, 4599 bdev->bd_disk->disk_name, current->comm); 4600 4601 if (skdev->read_cap_is_valid) { 4602 capacity = get_capacity(skdev->disk); 4603 geo->heads = 64; 4604 geo->sectors = 255; 4605 geo->cylinders = (capacity) / (255 * 64); 4606 4607 return 0; 4608 } 4609 return -EIO; 4610} 4611 4612static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) 4613{ 4614 pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); 4615 device_add_disk(parent, skdev->disk); 4616 return 0; 4617} 4618 4619static const struct block_device_operations skd_blockdev_ops = { 4620 .owner = THIS_MODULE, 4621 .ioctl = skd_bdev_ioctl, 4622 .getgeo = skd_bdev_getgeo, 4623}; 4624 4625 4626/* 4627 ***************************************************************************** 4628 * PCIe DRIVER GLUE 4629 ***************************************************************************** 4630 */ 4631 4632static const struct pci_device_id skd_pci_tbl[] = { 4633 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, 4634 PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, 4635 { 0 } /* terminate list */ 4636}; 4637 4638MODULE_DEVICE_TABLE(pci, skd_pci_tbl); 4639 4640static char *skd_pci_info(struct skd_device *skdev, char *str) 4641{ 4642 int pcie_reg; 4643 4644 strcpy(str, "PCIe ("); 4645 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); 4646 4647 if (pcie_reg) { 4648 4649 char lwstr[6]; 4650 uint16_t pcie_lstat, lspeed, lwidth; 4651 4652 pcie_reg += 0x12; 4653 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); 4654 lspeed = pcie_lstat & (0xF); 4655 lwidth = (pcie_lstat & 0x3F0) >> 4; 4656 4657 if (lspeed == 1) 4658 strcat(str, "2.5GT/s "); 4659 else if (lspeed == 2) 4660 strcat(str, "5.0GT/s "); 4661 else 4662 strcat(str, "<unknown> "); 4663 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); 4664 strcat(str, lwstr); 4665 } 4666 return str; 4667} 4668 4669static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4670{ 4671 int i; 4672 int rc = 0; 4673 char pci_str[32]; 4674 struct skd_device *skdev; 4675 4676 pr_info("STEC s1120 Driver(%s) version %s-b%s\n", 4677 DRV_NAME, DRV_VERSION, DRV_BUILD_ID); 4678 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", 4679 pci_name(pdev), pdev->vendor, pdev->device); 4680 4681 rc = pci_enable_device(pdev); 4682 if (rc) 4683 return rc; 4684 rc = pci_request_regions(pdev, DRV_NAME); 4685 if (rc) 4686 goto err_out; 4687 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4688 if (!rc) { 4689 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 4690 4691 pr_err("(%s): consistent DMA mask error %d\n", 4692 pci_name(pdev), rc); 4693 } 4694 } else { 4695 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); 4696 if (rc) { 4697 4698 pr_err("(%s): DMA mask error %d\n", 4699 pci_name(pdev), rc); 4700 goto err_out_regions; 4701 } 4702 } 4703 4704 if (!skd_major) { 4705 rc = register_blkdev(0, DRV_NAME); 4706 if (rc < 0) 4707 goto err_out_regions; 4708 BUG_ON(!rc); 4709 skd_major = rc; 4710 } 4711 4712 skdev = skd_construct(pdev); 4713 if (skdev == NULL) { 4714 rc = -ENOMEM; 4715 goto err_out_regions; 4716 } 4717 4718 skd_pci_info(skdev, pci_str); 4719 pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); 4720 4721 pci_set_master(pdev); 4722 rc = pci_enable_pcie_error_reporting(pdev); 4723 if (rc) { 4724 pr_err( 4725 "(%s): bad enable of PCIe error reporting rc=%d\n", 4726 skd_name(skdev), rc); 4727 skdev->pcie_error_reporting_is_enabled = 0; 4728 } else 4729 skdev->pcie_error_reporting_is_enabled = 1; 4730 4731 4732 pci_set_drvdata(pdev, skdev); 4733 4734 for (i = 0; i < SKD_MAX_BARS; i++) { 4735 skdev->mem_phys[i] = pci_resource_start(pdev, i); 4736 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); 4737 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 4738 skdev->mem_size[i]); 4739 if (!skdev->mem_map[i]) { 4740 pr_err("(%s): Unable to map adapter memory!\n", 4741 skd_name(skdev)); 4742 rc = -ENODEV; 4743 goto err_out_iounmap; 4744 } 4745 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", 4746 skdev->name, __func__, __LINE__, 4747 skdev->mem_map[i], 4748 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); 4749 } 4750 4751 rc = skd_acquire_irq(skdev); 4752 if (rc) { 4753 pr_err("(%s): interrupt resource error %d\n", 4754 skd_name(skdev), rc); 4755 goto err_out_iounmap; 4756 } 4757 4758 rc = skd_start_timer(skdev); 4759 if (rc) 4760 goto err_out_timer; 4761 4762 init_waitqueue_head(&skdev->waitq); 4763 4764 skd_start_device(skdev); 4765 4766 rc = wait_event_interruptible_timeout(skdev->waitq, 4767 (skdev->gendisk_on), 4768 (SKD_START_WAIT_SECONDS * HZ)); 4769 if (skdev->gendisk_on > 0) { 4770 /* device came on-line after reset */ 4771 skd_bdev_attach(&pdev->dev, skdev); 4772 rc = 0; 4773 } else { 4774 /* we timed out, something is wrong with the device, 4775 don't add the disk structure */ 4776 pr_err( 4777 "(%s): error: waiting for s1120 timed out %d!\n", 4778 skd_name(skdev), rc); 4779 /* in case of no error; we timeout with ENXIO */ 4780 if (!rc) 4781 rc = -ENXIO; 4782 goto err_out_timer; 4783 } 4784 4785 4786#ifdef SKD_VMK_POLL_HANDLER 4787 if (skdev->irq_type == SKD_IRQ_MSIX) { 4788 /* MSIX completion handler is being used for coredump */ 4789 vmklnx_scsi_register_poll_handler(skdev->scsi_host, 4790 skdev->msix_entries[5].vector, 4791 skd_comp_q, skdev); 4792 } else { 4793 vmklnx_scsi_register_poll_handler(skdev->scsi_host, 4794 skdev->pdev->irq, skd_isr, 4795 skdev); 4796 } 4797#endif /* SKD_VMK_POLL_HANDLER */ 4798 4799 return rc; 4800 4801err_out_timer: 4802 skd_stop_device(skdev); 4803 skd_release_irq(skdev); 4804 4805err_out_iounmap: 4806 for (i = 0; i < SKD_MAX_BARS; i++) 4807 if (skdev->mem_map[i]) 4808 iounmap(skdev->mem_map[i]); 4809 4810 if (skdev->pcie_error_reporting_is_enabled) 4811 pci_disable_pcie_error_reporting(pdev); 4812 4813 skd_destruct(skdev); 4814 4815err_out_regions: 4816 pci_release_regions(pdev); 4817 4818err_out: 4819 pci_disable_device(pdev); 4820 pci_set_drvdata(pdev, NULL); 4821 return rc; 4822} 4823 4824static void skd_pci_remove(struct pci_dev *pdev) 4825{ 4826 int i; 4827 struct skd_device *skdev; 4828 4829 skdev = pci_get_drvdata(pdev); 4830 if (!skdev) { 4831 pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4832 return; 4833 } 4834 skd_stop_device(skdev); 4835 skd_release_irq(skdev); 4836 4837 for (i = 0; i < SKD_MAX_BARS; i++) 4838 if (skdev->mem_map[i]) 4839 iounmap((u32 *)skdev->mem_map[i]); 4840 4841 if (skdev->pcie_error_reporting_is_enabled) 4842 pci_disable_pcie_error_reporting(pdev); 4843 4844 skd_destruct(skdev); 4845 4846 pci_release_regions(pdev); 4847 pci_disable_device(pdev); 4848 pci_set_drvdata(pdev, NULL); 4849 4850 return; 4851} 4852 4853static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) 4854{ 4855 int i; 4856 struct skd_device *skdev; 4857 4858 skdev = pci_get_drvdata(pdev); 4859 if (!skdev) { 4860 pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4861 return -EIO; 4862 } 4863 4864 skd_stop_device(skdev); 4865 4866 skd_release_irq(skdev); 4867 4868 for (i = 0; i < SKD_MAX_BARS; i++) 4869 if (skdev->mem_map[i]) 4870 iounmap((u32 *)skdev->mem_map[i]); 4871 4872 if (skdev->pcie_error_reporting_is_enabled) 4873 pci_disable_pcie_error_reporting(pdev); 4874 4875 pci_release_regions(pdev); 4876 pci_save_state(pdev); 4877 pci_disable_device(pdev); 4878 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4879 return 0; 4880} 4881 4882static int skd_pci_resume(struct pci_dev *pdev) 4883{ 4884 int i; 4885 int rc = 0; 4886 struct skd_device *skdev; 4887 4888 skdev = pci_get_drvdata(pdev); 4889 if (!skdev) { 4890 pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4891 return -1; 4892 } 4893 4894 pci_set_power_state(pdev, PCI_D0); 4895 pci_enable_wake(pdev, PCI_D0, 0); 4896 pci_restore_state(pdev); 4897 4898 rc = pci_enable_device(pdev); 4899 if (rc) 4900 return rc; 4901 rc = pci_request_regions(pdev, DRV_NAME); 4902 if (rc) 4903 goto err_out; 4904 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4905 if (!rc) { 4906 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 4907 4908 pr_err("(%s): consistent DMA mask error %d\n", 4909 pci_name(pdev), rc); 4910 } 4911 } else { 4912 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4913 if (rc) { 4914 4915 pr_err("(%s): DMA mask error %d\n", 4916 pci_name(pdev), rc); 4917 goto err_out_regions; 4918 } 4919 } 4920 4921 pci_set_master(pdev); 4922 rc = pci_enable_pcie_error_reporting(pdev); 4923 if (rc) { 4924 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", 4925 skdev->name, rc); 4926 skdev->pcie_error_reporting_is_enabled = 0; 4927 } else 4928 skdev->pcie_error_reporting_is_enabled = 1; 4929 4930 for (i = 0; i < SKD_MAX_BARS; i++) { 4931 4932 skdev->mem_phys[i] = pci_resource_start(pdev, i); 4933 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); 4934 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 4935 skdev->mem_size[i]); 4936 if (!skdev->mem_map[i]) { 4937 pr_err("(%s): Unable to map adapter memory!\n", 4938 skd_name(skdev)); 4939 rc = -ENODEV; 4940 goto err_out_iounmap; 4941 } 4942 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", 4943 skdev->name, __func__, __LINE__, 4944 skdev->mem_map[i], 4945 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); 4946 } 4947 rc = skd_acquire_irq(skdev); 4948 if (rc) { 4949 4950 pr_err("(%s): interrupt resource error %d\n", 4951 pci_name(pdev), rc); 4952 goto err_out_iounmap; 4953 } 4954 4955 rc = skd_start_timer(skdev); 4956 if (rc) 4957 goto err_out_timer; 4958 4959 init_waitqueue_head(&skdev->waitq); 4960 4961 skd_start_device(skdev); 4962 4963 return rc; 4964 4965err_out_timer: 4966 skd_stop_device(skdev); 4967 skd_release_irq(skdev); 4968 4969err_out_iounmap: 4970 for (i = 0; i < SKD_MAX_BARS; i++) 4971 if (skdev->mem_map[i]) 4972 iounmap(skdev->mem_map[i]); 4973 4974 if (skdev->pcie_error_reporting_is_enabled) 4975 pci_disable_pcie_error_reporting(pdev); 4976 4977err_out_regions: 4978 pci_release_regions(pdev); 4979 4980err_out: 4981 pci_disable_device(pdev); 4982 return rc; 4983} 4984 4985static void skd_pci_shutdown(struct pci_dev *pdev) 4986{ 4987 struct skd_device *skdev; 4988 4989 pr_err("skd_pci_shutdown called\n"); 4990 4991 skdev = pci_get_drvdata(pdev); 4992 if (!skdev) { 4993 pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4994 return; 4995 } 4996 4997 pr_err("%s: calling stop\n", skd_name(skdev)); 4998 skd_stop_device(skdev); 4999} 5000 5001static struct pci_driver skd_driver = { 5002 .name = DRV_NAME, 5003 .id_table = skd_pci_tbl, 5004 .probe = skd_pci_probe, 5005 .remove = skd_pci_remove, 5006 .suspend = skd_pci_suspend, 5007 .resume = skd_pci_resume, 5008 .shutdown = skd_pci_shutdown, 5009}; 5010 5011/* 5012 ***************************************************************************** 5013 * LOGGING SUPPORT 5014 ***************************************************************************** 5015 */ 5016 5017static const char *skd_name(struct skd_device *skdev) 5018{ 5019 memset(skdev->id_str, 0, sizeof(skdev->id_str)); 5020 5021 if (skdev->inquiry_is_valid) 5022 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", 5023 skdev->name, skdev->inq_serial_num, 5024 pci_name(skdev->pdev)); 5025 else 5026 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", 5027 skdev->name, pci_name(skdev->pdev)); 5028 5029 return skdev->id_str; 5030} 5031 5032const char *skd_drive_state_to_str(int state) 5033{ 5034 switch (state) { 5035 case FIT_SR_DRIVE_OFFLINE: 5036 return "OFFLINE"; 5037 case FIT_SR_DRIVE_INIT: 5038 return "INIT"; 5039 case FIT_SR_DRIVE_ONLINE: 5040 return "ONLINE"; 5041 case FIT_SR_DRIVE_BUSY: 5042 return "BUSY"; 5043 case FIT_SR_DRIVE_FAULT: 5044 return "FAULT"; 5045 case FIT_SR_DRIVE_DEGRADED: 5046 return "DEGRADED"; 5047 case FIT_SR_PCIE_LINK_DOWN: 5048 return "INK_DOWN"; 5049 case FIT_SR_DRIVE_SOFT_RESET: 5050 return "SOFT_RESET"; 5051 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 5052 return "NEED_FW"; 5053 case FIT_SR_DRIVE_INIT_FAULT: 5054 return "INIT_FAULT"; 5055 case FIT_SR_DRIVE_BUSY_SANITIZE: 5056 return "BUSY_SANITIZE"; 5057 case FIT_SR_DRIVE_BUSY_ERASE: 5058 return "BUSY_ERASE"; 5059 case FIT_SR_DRIVE_FW_BOOTING: 5060 return "FW_BOOTING"; 5061 default: 5062 return "???"; 5063 } 5064} 5065 5066const char *skd_skdev_state_to_str(enum skd_drvr_state state) 5067{ 5068 switch (state) { 5069 case SKD_DRVR_STATE_LOAD: 5070 return "LOAD"; 5071 case SKD_DRVR_STATE_IDLE: 5072 return "IDLE"; 5073 case SKD_DRVR_STATE_BUSY: 5074 return "BUSY"; 5075 case SKD_DRVR_STATE_STARTING: 5076 return "STARTING"; 5077 case SKD_DRVR_STATE_ONLINE: 5078 return "ONLINE"; 5079 case SKD_DRVR_STATE_PAUSING: 5080 return "PAUSING"; 5081 case SKD_DRVR_STATE_PAUSED: 5082 return "PAUSED"; 5083 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 5084 return "DRAINING_TIMEOUT"; 5085 case SKD_DRVR_STATE_RESTARTING: 5086 return "RESTARTING"; 5087 case SKD_DRVR_STATE_RESUMING: 5088 return "RESUMING"; 5089 case SKD_DRVR_STATE_STOPPING: 5090 return "STOPPING"; 5091 case SKD_DRVR_STATE_SYNCING: 5092 return "SYNCING"; 5093 case SKD_DRVR_STATE_FAULT: 5094 return "FAULT"; 5095 case SKD_DRVR_STATE_DISAPPEARED: 5096 return "DISAPPEARED"; 5097 case SKD_DRVR_STATE_BUSY_ERASE: 5098 return "BUSY_ERASE"; 5099 case SKD_DRVR_STATE_BUSY_SANITIZE: 5100 return "BUSY_SANITIZE"; 5101 case SKD_DRVR_STATE_BUSY_IMMINENT: 5102 return "BUSY_IMMINENT"; 5103 case SKD_DRVR_STATE_WAIT_BOOT: 5104 return "WAIT_BOOT"; 5105 5106 default: 5107 return "???"; 5108 } 5109} 5110 5111static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) 5112{ 5113 switch (state) { 5114 case SKD_MSG_STATE_IDLE: 5115 return "IDLE"; 5116 case SKD_MSG_STATE_BUSY: 5117 return "BUSY"; 5118 default: 5119 return "???"; 5120 } 5121} 5122 5123static const char *skd_skreq_state_to_str(enum skd_req_state state) 5124{ 5125 switch (state) { 5126 case SKD_REQ_STATE_IDLE: 5127 return "IDLE"; 5128 case SKD_REQ_STATE_SETUP: 5129 return "SETUP"; 5130 case SKD_REQ_STATE_BUSY: 5131 return "BUSY"; 5132 case SKD_REQ_STATE_COMPLETED: 5133 return "COMPLETED"; 5134 case SKD_REQ_STATE_TIMEOUT: 5135 return "TIMEOUT"; 5136 case SKD_REQ_STATE_ABORTED: 5137 return "ABORTED"; 5138 default: 5139 return "???"; 5140 } 5141} 5142 5143static void skd_log_skdev(struct skd_device *skdev, const char *event) 5144{ 5145 pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n", 5146 skdev->name, __func__, __LINE__, skdev->name, skdev, event); 5147 pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n", 5148 skdev->name, __func__, __LINE__, 5149 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 5150 skd_skdev_state_to_str(skdev->state), skdev->state); 5151 pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n", 5152 skdev->name, __func__, __LINE__, 5153 skdev->in_flight, skdev->cur_max_queue_depth, 5154 skdev->dev_max_queue_depth, skdev->queue_low_water_mark); 5155 pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n", 5156 skdev->name, __func__, __LINE__, 5157 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); 5158} 5159 5160static void skd_log_skmsg(struct skd_device *skdev, 5161 struct skd_fitmsg_context *skmsg, const char *event) 5162{ 5163 pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n", 5164 skdev->name, __func__, __LINE__, skdev->name, skmsg, event); 5165 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n", 5166 skdev->name, __func__, __LINE__, 5167 skd_skmsg_state_to_str(skmsg->state), skmsg->state, 5168 skmsg->id, skmsg->length); 5169} 5170 5171static void skd_log_skreq(struct skd_device *skdev, 5172 struct skd_request_context *skreq, const char *event) 5173{ 5174 pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n", 5175 skdev->name, __func__, __LINE__, skdev->name, skreq, event); 5176 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n", 5177 skdev->name, __func__, __LINE__, 5178 skd_skreq_state_to_str(skreq->state), skreq->state, 5179 skreq->id, skreq->fitmsg_id); 5180 pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n", 5181 skdev->name, __func__, __LINE__, 5182 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); 5183 5184 if (skreq->req != NULL) { 5185 struct request *req = skreq->req; 5186 u32 lba = (u32)blk_rq_pos(req); 5187 u32 count = blk_rq_sectors(req); 5188 5189 pr_debug("%s:%s:%d " 5190 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", 5191 skdev->name, __func__, __LINE__, 5192 req, lba, lba, count, count, 5193 (int)rq_data_dir(req)); 5194 } else 5195 pr_debug("%s:%s:%d req=NULL\n", 5196 skdev->name, __func__, __LINE__); 5197} 5198 5199/* 5200 ***************************************************************************** 5201 * MODULE GLUE 5202 ***************************************************************************** 5203 */ 5204 5205static int __init skd_init(void) 5206{ 5207 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); 5208 5209 switch (skd_isr_type) { 5210 case SKD_IRQ_LEGACY: 5211 case SKD_IRQ_MSI: 5212 case SKD_IRQ_MSIX: 5213 break; 5214 default: 5215 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", 5216 skd_isr_type, SKD_IRQ_DEFAULT); 5217 skd_isr_type = SKD_IRQ_DEFAULT; 5218 } 5219 5220 if (skd_max_queue_depth < 1 || 5221 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { 5222 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", 5223 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); 5224 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 5225 } 5226 5227 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { 5228 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", 5229 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 5230 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 5231 } 5232 5233 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { 5234 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", 5235 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); 5236 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 5237 } 5238 5239 if (skd_dbg_level < 0 || skd_dbg_level > 2) { 5240 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", 5241 skd_dbg_level, 0); 5242 skd_dbg_level = 0; 5243 } 5244 5245 if (skd_isr_comp_limit < 0) { 5246 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", 5247 skd_isr_comp_limit, 0); 5248 skd_isr_comp_limit = 0; 5249 } 5250 5251 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { 5252 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n", 5253 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); 5254 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; 5255 } 5256 5257 return pci_register_driver(&skd_driver); 5258} 5259 5260static void __exit skd_exit(void) 5261{ 5262 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); 5263 5264 pci_unregister_driver(&skd_driver); 5265 5266 if (skd_major) 5267 unregister_blkdev(skd_major, DRV_NAME); 5268} 5269 5270module_init(skd_init); 5271module_exit(skd_exit);