Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: myrb: Add Mylex RAID controller (block interface)

This patch adds support for the Mylex DAC960 RAID controller,
supporting the older, block-based interface only. The driver is a
re-implementation of the original DAC960 driver.

Signed-off-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Hannes Reinecke and committed by
Martin K. Petersen
081ff398 e6760cc4

+4636
+6
MAINTAINERS
··· 9892 9892 F: drivers/gpu/drm/mxsfb/ 9893 9893 F: Documentation/devicetree/bindings/display/mxsfb.txt 9894 9894 9895 + MYLEX DAC960 PCI RAID Controller 9896 + M: Hannes Reinecke <hare@kernel.org> 9897 + L: linux-scsi@vger.kernel.org 9898 + S: Supported 9899 + F: drivers/scsi/myrb.* 9900 + 9895 9901 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 9896 9902 M: Chris Lee <christopher.lee@cspi.com> 9897 9903 L: netdev@vger.kernel.org
+15
drivers/scsi/Kconfig
··· 557 557 substantial, so users of MultiMaster Host Adapters may not 558 558 wish to include it. 559 559 560 + config SCSI_MYRB 561 + tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)" 562 + depends on PCI 563 + select RAID_ATTRS 564 + help 565 + This driver adds support for the Mylex DAC960, AcceleRAID, and 566 + eXtremeRAID PCI RAID controllers. This driver supports the 567 + older, block based interface. 568 + This driver is a reimplementation of the original DAC960 569 + driver. If you have used the DAC960 driver you should enable 570 + this module. 571 + 572 + To compile this driver as a module, choose M here: the 573 + module will be called myrb. 574 + 560 575 config VMWARE_PVSCSI 561 576 tristate "VMware PVSCSI driver support" 562 577 depends on PCI && SCSI && X86
+1
drivers/scsi/Makefile
··· 106 106 obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o 107 107 obj-$(CONFIG_SCSI_MESH) += mesh.o 108 108 obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o 109 + obj-$(CONFIG_SCSI_MYRB) += myrb.o 109 110 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o 110 111 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 111 112 obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
+3656
drivers/scsi/myrb.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers 4 + * 5 + * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com> 6 + * 7 + * Based on the original DAC960 driver, 8 + * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com> 9 + * Portions Copyright 2002 by Mylex (An IBM Business Unit) 10 + * 11 + */ 12 + 13 + #include <linux/module.h> 14 + #include <linux/types.h> 15 + #include <linux/delay.h> 16 + #include <linux/interrupt.h> 17 + #include <linux/pci.h> 18 + #include <linux/raid_class.h> 19 + #include <asm/unaligned.h> 20 + #include <scsi/scsi.h> 21 + #include <scsi/scsi_host.h> 22 + #include <scsi/scsi_device.h> 23 + #include <scsi/scsi_cmnd.h> 24 + #include <scsi/scsi_tcq.h> 25 + #include "myrb.h" 26 + 27 + static struct raid_template *myrb_raid_template; 28 + 29 + static void myrb_monitor(struct work_struct *work); 30 + static inline void myrb_translate_devstate(void *DeviceState); 31 + 32 + static inline int myrb_logical_channel(struct Scsi_Host *shost) 33 + { 34 + return shost->max_channel - 1; 35 + } 36 + 37 + static struct myrb_devstate_name_entry { 38 + enum myrb_devstate state; 39 + const char *name; 40 + } myrb_devstate_name_list[] = { 41 + { MYRB_DEVICE_DEAD, "Dead" }, 42 + { MYRB_DEVICE_WO, "WriteOnly" }, 43 + { MYRB_DEVICE_ONLINE, "Online" }, 44 + { MYRB_DEVICE_CRITICAL, "Critical" }, 45 + { MYRB_DEVICE_STANDBY, "Standby" }, 46 + { MYRB_DEVICE_OFFLINE, "Offline" }, 47 + }; 48 + 49 + static const char *myrb_devstate_name(enum myrb_devstate state) 50 + { 51 + struct myrb_devstate_name_entry *entry = myrb_devstate_name_list; 52 + int i; 53 + 54 + for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) { 55 + if (entry[i].state == state) 56 + return entry[i].name; 57 + } 58 + return "Unknown"; 59 + } 60 + 61 + static struct myrb_raidlevel_name_entry { 62 + enum myrb_raidlevel level; 63 + const char *name; 64 + } myrb_raidlevel_name_list[] = { 65 + { MYRB_RAID_LEVEL0, "RAID0" }, 66 + { MYRB_RAID_LEVEL1, "RAID1" }, 67 + { MYRB_RAID_LEVEL3, "RAID3" }, 68 + { MYRB_RAID_LEVEL5, "RAID5" }, 69 + { MYRB_RAID_LEVEL6, "RAID6" }, 70 + { MYRB_RAID_JBOD, "JBOD" }, 71 + }; 72 + 73 + static const char *myrb_raidlevel_name(enum myrb_raidlevel level) 74 + { 75 + struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list; 76 + int i; 77 + 78 + for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) { 79 + if (entry[i].level == level) 80 + return entry[i].name; 81 + } 82 + return NULL; 83 + } 84 + 85 + /** 86 + * myrb_create_mempools - allocates auxiliary data structures 87 + * 88 + * Return: true on success, false otherwise. 89 + */ 90 + static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb) 91 + { 92 + size_t elem_size, elem_align; 93 + 94 + elem_align = sizeof(struct myrb_sge); 95 + elem_size = cb->host->sg_tablesize * elem_align; 96 + cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev, 97 + elem_size, elem_align, 0); 98 + if (cb->sg_pool == NULL) { 99 + shost_printk(KERN_ERR, cb->host, 100 + "Failed to allocate SG pool\n"); 101 + return false; 102 + } 103 + 104 + cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev, 105 + sizeof(struct myrb_dcdb), 106 + sizeof(unsigned int), 0); 107 + if (!cb->dcdb_pool) { 108 + dma_pool_destroy(cb->sg_pool); 109 + cb->sg_pool = NULL; 110 + shost_printk(KERN_ERR, cb->host, 111 + "Failed to allocate DCDB pool\n"); 112 + return false; 113 + } 114 + 115 + snprintf(cb->work_q_name, sizeof(cb->work_q_name), 116 + "myrb_wq_%d", cb->host->host_no); 117 + cb->work_q = create_singlethread_workqueue(cb->work_q_name); 118 + if (!cb->work_q) { 119 + dma_pool_destroy(cb->dcdb_pool); 120 + cb->dcdb_pool = NULL; 121 + dma_pool_destroy(cb->sg_pool); 122 + cb->sg_pool = NULL; 123 + shost_printk(KERN_ERR, cb->host, 124 + "Failed to create workqueue\n"); 125 + return false; 126 + } 127 + 128 + /* 129 + * Initialize the Monitoring Timer. 130 + */ 131 + INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor); 132 + queue_delayed_work(cb->work_q, &cb->monitor_work, 1); 133 + 134 + return true; 135 + } 136 + 137 + /** 138 + * myrb_destroy_mempools - tears down the memory pools for the controller 139 + */ 140 + static void myrb_destroy_mempools(struct myrb_hba *cb) 141 + { 142 + cancel_delayed_work_sync(&cb->monitor_work); 143 + destroy_workqueue(cb->work_q); 144 + 145 + dma_pool_destroy(cb->sg_pool); 146 + dma_pool_destroy(cb->dcdb_pool); 147 + } 148 + 149 + /** 150 + * myrb_reset_cmd - reset command block 151 + */ 152 + static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk) 153 + { 154 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 155 + 156 + memset(mbox, 0, sizeof(union myrb_cmd_mbox)); 157 + cmd_blk->status = 0; 158 + } 159 + 160 + /** 161 + * myrb_qcmd - queues command block for execution 162 + */ 163 + static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) 164 + { 165 + void __iomem *base = cb->io_base; 166 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 167 + union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox; 168 + 169 + cb->write_cmd_mbox(next_mbox, mbox); 170 + if (cb->prev_cmd_mbox1->words[0] == 0 || 171 + cb->prev_cmd_mbox2->words[0] == 0) 172 + cb->get_cmd_mbox(base); 173 + cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1; 174 + cb->prev_cmd_mbox1 = next_mbox; 175 + if (++next_mbox > cb->last_cmd_mbox) 176 + next_mbox = cb->first_cmd_mbox; 177 + cb->next_cmd_mbox = next_mbox; 178 + } 179 + 180 + /** 181 + * myrb_exec_cmd - executes command block and waits for completion. 182 + * 183 + * Return: command status 184 + */ 185 + static unsigned short myrb_exec_cmd(struct myrb_hba *cb, 186 + struct myrb_cmdblk *cmd_blk) 187 + { 188 + DECLARE_COMPLETION_ONSTACK(cmpl); 189 + unsigned long flags; 190 + 191 + cmd_blk->completion = &cmpl; 192 + 193 + spin_lock_irqsave(&cb->queue_lock, flags); 194 + cb->qcmd(cb, cmd_blk); 195 + spin_unlock_irqrestore(&cb->queue_lock, flags); 196 + 197 + WARN_ON(in_interrupt()); 198 + wait_for_completion(&cmpl); 199 + return cmd_blk->status; 200 + } 201 + 202 + /** 203 + * myrb_exec_type3 - executes a type 3 command and waits for completion. 204 + * 205 + * Return: command status 206 + */ 207 + static unsigned short myrb_exec_type3(struct myrb_hba *cb, 208 + enum myrb_cmd_opcode op, dma_addr_t addr) 209 + { 210 + struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; 211 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 212 + unsigned short status; 213 + 214 + mutex_lock(&cb->dcmd_mutex); 215 + myrb_reset_cmd(cmd_blk); 216 + mbox->type3.id = MYRB_DCMD_TAG; 217 + mbox->type3.opcode = op; 218 + mbox->type3.addr = addr; 219 + status = myrb_exec_cmd(cb, cmd_blk); 220 + mutex_unlock(&cb->dcmd_mutex); 221 + return status; 222 + } 223 + 224 + /** 225 + * myrb_exec_type3D - executes a type 3D command and waits for completion. 226 + * 227 + * Return: command status 228 + */ 229 + static unsigned short myrb_exec_type3D(struct myrb_hba *cb, 230 + enum myrb_cmd_opcode op, struct scsi_device *sdev, 231 + struct myrb_pdev_state *pdev_info) 232 + { 233 + struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; 234 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 235 + unsigned short status; 236 + dma_addr_t pdev_info_addr; 237 + 238 + pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info, 239 + sizeof(struct myrb_pdev_state), 240 + DMA_FROM_DEVICE); 241 + if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr)) 242 + return MYRB_STATUS_SUBSYS_FAILED; 243 + 244 + mutex_lock(&cb->dcmd_mutex); 245 + myrb_reset_cmd(cmd_blk); 246 + mbox->type3D.id = MYRB_DCMD_TAG; 247 + mbox->type3D.opcode = op; 248 + mbox->type3D.channel = sdev->channel; 249 + mbox->type3D.target = sdev->id; 250 + mbox->type3D.addr = pdev_info_addr; 251 + status = myrb_exec_cmd(cb, cmd_blk); 252 + mutex_unlock(&cb->dcmd_mutex); 253 + dma_unmap_single(&cb->pdev->dev, pdev_info_addr, 254 + sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE); 255 + if (status == MYRB_STATUS_SUCCESS && 256 + mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD) 257 + myrb_translate_devstate(pdev_info); 258 + 259 + return status; 260 + } 261 + 262 + static char *myrb_event_msg[] = { 263 + "killed because write recovery failed", 264 + "killed because of SCSI bus reset failure", 265 + "killed because of double check condition", 266 + "killed because it was removed", 267 + "killed because of gross error on SCSI chip", 268 + "killed because of bad tag returned from drive", 269 + "killed because of timeout on SCSI command", 270 + "killed because of reset SCSI command issued from system", 271 + "killed because busy or parity error count exceeded limit", 272 + "killed because of 'kill drive' command from system", 273 + "killed because of selection timeout", 274 + "killed due to SCSI phase sequence error", 275 + "killed due to unknown status", 276 + }; 277 + 278 + /** 279 + * myrb_get_event - get event log from HBA 280 + * @cb: pointer to the hba structure 281 + * @event: number of the event 282 + * 283 + * Execute a type 3E command and logs the event message 284 + */ 285 + static void myrb_get_event(struct myrb_hba *cb, unsigned int event) 286 + { 287 + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; 288 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 289 + struct myrb_log_entry *ev_buf; 290 + dma_addr_t ev_addr; 291 + unsigned short status; 292 + 293 + ev_buf = dma_alloc_coherent(&cb->pdev->dev, 294 + sizeof(struct myrb_log_entry), 295 + &ev_addr, GFP_KERNEL); 296 + if (!ev_buf) 297 + return; 298 + 299 + myrb_reset_cmd(cmd_blk); 300 + mbox->type3E.id = MYRB_MCMD_TAG; 301 + mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION; 302 + mbox->type3E.optype = DAC960_V1_GetEventLogEntry; 303 + mbox->type3E.opqual = 1; 304 + mbox->type3E.ev_seq = event; 305 + mbox->type3E.addr = ev_addr; 306 + status = myrb_exec_cmd(cb, cmd_blk); 307 + if (status != MYRB_STATUS_SUCCESS) 308 + shost_printk(KERN_INFO, cb->host, 309 + "Failed to get event log %d, status %04x\n", 310 + event, status); 311 + 312 + else if (ev_buf->seq_num == event) { 313 + struct scsi_sense_hdr sshdr; 314 + 315 + memset(&sshdr, 0, sizeof(sshdr)); 316 + scsi_normalize_sense(ev_buf->sense, 32, &sshdr); 317 + 318 + if (sshdr.sense_key == VENDOR_SPECIFIC && 319 + sshdr.asc == 0x80 && 320 + sshdr.ascq < ARRAY_SIZE(myrb_event_msg)) 321 + shost_printk(KERN_CRIT, cb->host, 322 + "Physical drive %d:%d: %s\n", 323 + ev_buf->channel, ev_buf->target, 324 + myrb_event_msg[sshdr.ascq]); 325 + else 326 + shost_printk(KERN_CRIT, cb->host, 327 + "Physical drive %d:%d: Sense: %X/%02X/%02X\n", 328 + ev_buf->channel, ev_buf->target, 329 + sshdr.sense_key, sshdr.asc, sshdr.ascq); 330 + } 331 + 332 + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry), 333 + ev_buf, ev_addr); 334 + } 335 + 336 + /** 337 + * myrb_get_errtable - retrieves the error table from the controller 338 + * 339 + * Executes a type 3 command and logs the error table from the controller. 340 + */ 341 + static void myrb_get_errtable(struct myrb_hba *cb) 342 + { 343 + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; 344 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 345 + unsigned short status; 346 + struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS]; 347 + 348 + memcpy(&old_table, cb->err_table, sizeof(old_table)); 349 + 350 + myrb_reset_cmd(cmd_blk); 351 + mbox->type3.id = MYRB_MCMD_TAG; 352 + mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE; 353 + mbox->type3.addr = cb->err_table_addr; 354 + status = myrb_exec_cmd(cb, cmd_blk); 355 + if (status == MYRB_STATUS_SUCCESS) { 356 + struct myrb_error_entry *table = cb->err_table; 357 + struct myrb_error_entry *new, *old; 358 + size_t err_table_offset; 359 + struct scsi_device *sdev; 360 + 361 + shost_for_each_device(sdev, cb->host) { 362 + if (sdev->channel >= myrb_logical_channel(cb->host)) 363 + continue; 364 + err_table_offset = sdev->channel * MYRB_MAX_TARGETS 365 + + sdev->id; 366 + new = table + err_table_offset; 367 + old = &old_table[err_table_offset]; 368 + if (new->parity_err == old->parity_err && 369 + new->soft_err == old->soft_err && 370 + new->hard_err == old->hard_err && 371 + new->misc_err == old->misc_err) 372 + continue; 373 + sdev_printk(KERN_CRIT, sdev, 374 + "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n", 375 + new->parity_err, new->soft_err, 376 + new->hard_err, new->misc_err); 377 + } 378 + } 379 + } 380 + 381 + /** 382 + * myrb_get_ldev_info - retrieves the logical device table from the controller 383 + * 384 + * Executes a type 3 command and updates the logical device table. 385 + * 386 + * Return: command status 387 + */ 388 + static unsigned short myrb_get_ldev_info(struct myrb_hba *cb) 389 + { 390 + unsigned short status; 391 + int ldev_num, ldev_cnt = cb->enquiry->ldev_count; 392 + struct Scsi_Host *shost = cb->host; 393 + 394 + status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO, 395 + cb->ldev_info_addr); 396 + if (status != MYRB_STATUS_SUCCESS) 397 + return status; 398 + 399 + for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) { 400 + struct myrb_ldev_info *old = NULL; 401 + struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num; 402 + struct scsi_device *sdev; 403 + 404 + sdev = scsi_device_lookup(shost, myrb_logical_channel(shost), 405 + ldev_num, 0); 406 + if (!sdev) { 407 + if (new->state == MYRB_DEVICE_OFFLINE) 408 + continue; 409 + shost_printk(KERN_INFO, shost, 410 + "Adding Logical Drive %d in state %s\n", 411 + ldev_num, myrb_devstate_name(new->state)); 412 + scsi_add_device(shost, myrb_logical_channel(shost), 413 + ldev_num, 0); 414 + continue; 415 + } 416 + old = sdev->hostdata; 417 + if (new->state != old->state) 418 + shost_printk(KERN_INFO, shost, 419 + "Logical Drive %d is now %s\n", 420 + ldev_num, myrb_devstate_name(new->state)); 421 + if (new->wb_enabled != old->wb_enabled) 422 + sdev_printk(KERN_INFO, sdev, 423 + "Logical Drive is now WRITE %s\n", 424 + (new->wb_enabled ? "BACK" : "THRU")); 425 + memcpy(old, new, sizeof(*new)); 426 + scsi_device_put(sdev); 427 + } 428 + return status; 429 + } 430 + 431 + /** 432 + * myrb_get_rbld_progress - get rebuild progress information 433 + * 434 + * Executes a type 3 command and returns the rebuild progress 435 + * information. 436 + * 437 + * Return: command status 438 + */ 439 + static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb, 440 + struct myrb_rbld_progress *rbld) 441 + { 442 + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; 443 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 444 + struct myrb_rbld_progress *rbld_buf; 445 + dma_addr_t rbld_addr; 446 + unsigned short status; 447 + 448 + rbld_buf = dma_alloc_coherent(&cb->pdev->dev, 449 + sizeof(struct myrb_rbld_progress), 450 + &rbld_addr, GFP_KERNEL); 451 + if (!rbld_buf) 452 + return MYRB_STATUS_RBLD_NOT_CHECKED; 453 + 454 + myrb_reset_cmd(cmd_blk); 455 + mbox->type3.id = MYRB_MCMD_TAG; 456 + mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS; 457 + mbox->type3.addr = rbld_addr; 458 + status = myrb_exec_cmd(cb, cmd_blk); 459 + if (rbld) 460 + memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress)); 461 + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), 462 + rbld_buf, rbld_addr); 463 + return status; 464 + } 465 + 466 + /** 467 + * myrb_update_rbld_progress - updates the rebuild status 468 + * 469 + * Updates the rebuild status for the attached logical devices. 470 + * 471 + */ 472 + static void myrb_update_rbld_progress(struct myrb_hba *cb) 473 + { 474 + struct myrb_rbld_progress rbld_buf; 475 + unsigned short status; 476 + 477 + status = myrb_get_rbld_progress(cb, &rbld_buf); 478 + if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS && 479 + cb->last_rbld_status == MYRB_STATUS_SUCCESS) 480 + status = MYRB_STATUS_RBLD_SUCCESS; 481 + if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) { 482 + unsigned int blocks_done = 483 + rbld_buf.ldev_size - rbld_buf.blocks_left; 484 + struct scsi_device *sdev; 485 + 486 + sdev = scsi_device_lookup(cb->host, 487 + myrb_logical_channel(cb->host), 488 + rbld_buf.ldev_num, 0); 489 + if (!sdev) 490 + return; 491 + 492 + switch (status) { 493 + case MYRB_STATUS_SUCCESS: 494 + sdev_printk(KERN_INFO, sdev, 495 + "Rebuild in Progress, %d%% completed\n", 496 + (100 * (blocks_done >> 7)) 497 + / (rbld_buf.ldev_size >> 7)); 498 + break; 499 + case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE: 500 + sdev_printk(KERN_INFO, sdev, 501 + "Rebuild Failed due to Logical Drive Failure\n"); 502 + break; 503 + case MYRB_STATUS_RBLD_FAILED_BADBLOCKS: 504 + sdev_printk(KERN_INFO, sdev, 505 + "Rebuild Failed due to Bad Blocks on Other Drives\n"); 506 + break; 507 + case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED: 508 + sdev_printk(KERN_INFO, sdev, 509 + "Rebuild Failed due to Failure of Drive Being Rebuilt\n"); 510 + break; 511 + case MYRB_STATUS_RBLD_SUCCESS: 512 + sdev_printk(KERN_INFO, sdev, 513 + "Rebuild Completed Successfully\n"); 514 + break; 515 + case MYRB_STATUS_RBLD_SUCCESS_TERMINATED: 516 + sdev_printk(KERN_INFO, sdev, 517 + "Rebuild Successfully Terminated\n"); 518 + break; 519 + default: 520 + break; 521 + } 522 + scsi_device_put(sdev); 523 + } 524 + cb->last_rbld_status = status; 525 + } 526 + 527 + /** 528 + * myrb_get_cc_progress - retrieve the rebuild status 529 + * 530 + * Execute a type 3 Command and fetch the rebuild / consistency check 531 + * status. 532 + */ 533 + static void myrb_get_cc_progress(struct myrb_hba *cb) 534 + { 535 + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; 536 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 537 + struct myrb_rbld_progress *rbld_buf; 538 + dma_addr_t rbld_addr; 539 + unsigned short status; 540 + 541 + rbld_buf = dma_alloc_coherent(&cb->pdev->dev, 542 + sizeof(struct myrb_rbld_progress), 543 + &rbld_addr, GFP_KERNEL); 544 + if (!rbld_buf) { 545 + cb->need_cc_status = true; 546 + return; 547 + } 548 + myrb_reset_cmd(cmd_blk); 549 + mbox->type3.id = MYRB_MCMD_TAG; 550 + mbox->type3.opcode = MYRB_CMD_REBUILD_STAT; 551 + mbox->type3.addr = rbld_addr; 552 + status = myrb_exec_cmd(cb, cmd_blk); 553 + if (status == MYRB_STATUS_SUCCESS) { 554 + unsigned int ldev_num = rbld_buf->ldev_num; 555 + unsigned int ldev_size = rbld_buf->ldev_size; 556 + unsigned int blocks_done = 557 + ldev_size - rbld_buf->blocks_left; 558 + struct scsi_device *sdev; 559 + 560 + sdev = scsi_device_lookup(cb->host, 561 + myrb_logical_channel(cb->host), 562 + ldev_num, 0); 563 + if (sdev) { 564 + sdev_printk(KERN_INFO, sdev, 565 + "Consistency Check in Progress: %d%% completed\n", 566 + (100 * (blocks_done >> 7)) 567 + / (ldev_size >> 7)); 568 + scsi_device_put(sdev); 569 + } 570 + } 571 + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), 572 + rbld_buf, rbld_addr); 573 + } 574 + 575 + /** 576 + * myrb_bgi_control - updates background initialisation status 577 + * 578 + * Executes a type 3B command and updates the background initialisation status 579 + */ 580 + static void myrb_bgi_control(struct myrb_hba *cb) 581 + { 582 + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; 583 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 584 + struct myrb_bgi_status *bgi, *last_bgi; 585 + dma_addr_t bgi_addr; 586 + struct scsi_device *sdev = NULL; 587 + unsigned short status; 588 + 589 + bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), 590 + &bgi_addr, GFP_KERNEL); 591 + if (!bgi) { 592 + shost_printk(KERN_ERR, cb->host, 593 + "Failed to allocate bgi memory\n"); 594 + return; 595 + } 596 + myrb_reset_cmd(cmd_blk); 597 + mbox->type3B.id = MYRB_DCMD_TAG; 598 + mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL; 599 + mbox->type3B.optype = 0x20; 600 + mbox->type3B.addr = bgi_addr; 601 + status = myrb_exec_cmd(cb, cmd_blk); 602 + last_bgi = &cb->bgi_status; 603 + sdev = scsi_device_lookup(cb->host, 604 + myrb_logical_channel(cb->host), 605 + bgi->ldev_num, 0); 606 + switch (status) { 607 + case MYRB_STATUS_SUCCESS: 608 + switch (bgi->status) { 609 + case MYRB_BGI_INVALID: 610 + break; 611 + case MYRB_BGI_STARTED: 612 + if (!sdev) 613 + break; 614 + sdev_printk(KERN_INFO, sdev, 615 + "Background Initialization Started\n"); 616 + break; 617 + case MYRB_BGI_INPROGRESS: 618 + if (!sdev) 619 + break; 620 + if (bgi->blocks_done == last_bgi->blocks_done && 621 + bgi->ldev_num == last_bgi->ldev_num) 622 + break; 623 + sdev_printk(KERN_INFO, sdev, 624 + "Background Initialization in Progress: %d%% completed\n", 625 + (100 * (bgi->blocks_done >> 7)) 626 + / (bgi->ldev_size >> 7)); 627 + break; 628 + case MYRB_BGI_SUSPENDED: 629 + if (!sdev) 630 + break; 631 + sdev_printk(KERN_INFO, sdev, 632 + "Background Initialization Suspended\n"); 633 + break; 634 + case MYRB_BGI_CANCELLED: 635 + if (!sdev) 636 + break; 637 + sdev_printk(KERN_INFO, sdev, 638 + "Background Initialization Cancelled\n"); 639 + break; 640 + } 641 + memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status)); 642 + break; 643 + case MYRB_STATUS_BGI_SUCCESS: 644 + if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) 645 + sdev_printk(KERN_INFO, sdev, 646 + "Background Initialization Completed Successfully\n"); 647 + cb->bgi_status.status = MYRB_BGI_INVALID; 648 + break; 649 + case MYRB_STATUS_BGI_ABORTED: 650 + if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) 651 + sdev_printk(KERN_INFO, sdev, 652 + "Background Initialization Aborted\n"); 653 + /* Fallthrough */ 654 + case MYRB_STATUS_NO_BGI_INPROGRESS: 655 + cb->bgi_status.status = MYRB_BGI_INVALID; 656 + break; 657 + } 658 + if (sdev) 659 + scsi_device_put(sdev); 660 + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), 661 + bgi, bgi_addr); 662 + } 663 + 664 + /** 665 + * myrb_hba_enquiry - updates the controller status 666 + * 667 + * Executes a DAC_V1_Enquiry command and updates the controller status. 668 + * 669 + * Return: command status 670 + */ 671 + static unsigned short myrb_hba_enquiry(struct myrb_hba *cb) 672 + { 673 + struct myrb_enquiry old, *new; 674 + unsigned short status; 675 + 676 + memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry)); 677 + 678 + status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr); 679 + if (status != MYRB_STATUS_SUCCESS) 680 + return status; 681 + 682 + new = cb->enquiry; 683 + if (new->ldev_count > old.ldev_count) { 684 + int ldev_num = old.ldev_count - 1; 685 + 686 + while (++ldev_num < new->ldev_count) 687 + shost_printk(KERN_CRIT, cb->host, 688 + "Logical Drive %d Now Exists\n", 689 + ldev_num); 690 + } 691 + if (new->ldev_count < old.ldev_count) { 692 + int ldev_num = new->ldev_count - 1; 693 + 694 + while (++ldev_num < old.ldev_count) 695 + shost_printk(KERN_CRIT, cb->host, 696 + "Logical Drive %d No Longer Exists\n", 697 + ldev_num); 698 + } 699 + if (new->status.deferred != old.status.deferred) 700 + shost_printk(KERN_CRIT, cb->host, 701 + "Deferred Write Error Flag is now %s\n", 702 + (new->status.deferred ? "TRUE" : "FALSE")); 703 + if (new->ev_seq != old.ev_seq) { 704 + cb->new_ev_seq = new->ev_seq; 705 + cb->need_err_info = true; 706 + shost_printk(KERN_INFO, cb->host, 707 + "Event log %d/%d (%d/%d) available\n", 708 + cb->old_ev_seq, cb->new_ev_seq, 709 + old.ev_seq, new->ev_seq); 710 + } 711 + if ((new->ldev_critical > 0 && 712 + new->ldev_critical != old.ldev_critical) || 713 + (new->ldev_offline > 0 && 714 + new->ldev_offline != old.ldev_offline) || 715 + (new->ldev_count != old.ldev_count)) { 716 + shost_printk(KERN_INFO, cb->host, 717 + "Logical drive count changed (%d/%d/%d)\n", 718 + new->ldev_critical, 719 + new->ldev_offline, 720 + new->ldev_count); 721 + cb->need_ldev_info = true; 722 + } 723 + if (new->pdev_dead > 0 || 724 + new->pdev_dead != old.pdev_dead || 725 + time_after_eq(jiffies, cb->secondary_monitor_time 726 + + MYRB_SECONDARY_MONITOR_INTERVAL)) { 727 + cb->need_bgi_status = cb->bgi_status_supported; 728 + cb->secondary_monitor_time = jiffies; 729 + } 730 + if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS || 731 + new->rbld == MYRB_BG_RBLD_IN_PROGRESS || 732 + old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS || 733 + old.rbld == MYRB_BG_RBLD_IN_PROGRESS) { 734 + cb->need_rbld = true; 735 + cb->rbld_first = (new->ldev_critical < old.ldev_critical); 736 + } 737 + if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS) 738 + switch (new->rbld) { 739 + case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS: 740 + shost_printk(KERN_INFO, cb->host, 741 + "Consistency Check Completed Successfully\n"); 742 + break; 743 + case MYRB_STDBY_RBLD_IN_PROGRESS: 744 + case MYRB_BG_RBLD_IN_PROGRESS: 745 + break; 746 + case MYRB_BG_CHECK_IN_PROGRESS: 747 + cb->need_cc_status = true; 748 + break; 749 + case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR: 750 + shost_printk(KERN_INFO, cb->host, 751 + "Consistency Check Completed with Error\n"); 752 + break; 753 + case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED: 754 + shost_printk(KERN_INFO, cb->host, 755 + "Consistency Check Failed - Physical Device Failed\n"); 756 + break; 757 + case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED: 758 + shost_printk(KERN_INFO, cb->host, 759 + "Consistency Check Failed - Logical Drive Failed\n"); 760 + break; 761 + case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER: 762 + shost_printk(KERN_INFO, cb->host, 763 + "Consistency Check Failed - Other Causes\n"); 764 + break; 765 + case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED: 766 + shost_printk(KERN_INFO, cb->host, 767 + "Consistency Check Successfully Terminated\n"); 768 + break; 769 + } 770 + else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS) 771 + cb->need_cc_status = true; 772 + 773 + return MYRB_STATUS_SUCCESS; 774 + } 775 + 776 + /** 777 + * myrb_set_pdev_state - sets the device state for a physical device 778 + * 779 + * Return: command status 780 + */ 781 + static unsigned short myrb_set_pdev_state(struct myrb_hba *cb, 782 + struct scsi_device *sdev, enum myrb_devstate state) 783 + { 784 + struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; 785 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 786 + unsigned short status; 787 + 788 + mutex_lock(&cb->dcmd_mutex); 789 + mbox->type3D.opcode = MYRB_CMD_START_DEVICE; 790 + mbox->type3D.id = MYRB_DCMD_TAG; 791 + mbox->type3D.channel = sdev->channel; 792 + mbox->type3D.target = sdev->id; 793 + mbox->type3D.state = state & 0x1F; 794 + status = myrb_exec_cmd(cb, cmd_blk); 795 + mutex_unlock(&cb->dcmd_mutex); 796 + 797 + return status; 798 + } 799 + 800 + /** 801 + * myrb_enable_mmio - enables the Memory Mailbox Interface 802 + * 803 + * PD and P controller types have no memory mailbox, but still need the 804 + * other dma mapped memory. 805 + * 806 + * Return: true on success, false otherwise. 807 + */ 808 + static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) 809 + { 810 + void __iomem *base = cb->io_base; 811 + struct pci_dev *pdev = cb->pdev; 812 + size_t err_table_size; 813 + size_t ldev_info_size; 814 + union myrb_cmd_mbox *cmd_mbox_mem; 815 + struct myrb_stat_mbox *stat_mbox_mem; 816 + union myrb_cmd_mbox mbox; 817 + unsigned short status; 818 + 819 + memset(&mbox, 0, sizeof(union myrb_cmd_mbox)); 820 + 821 + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 822 + dev_err(&pdev->dev, "DMA mask out of range\n"); 823 + return false; 824 + } 825 + 826 + cb->enquiry = dma_alloc_coherent(&pdev->dev, 827 + sizeof(struct myrb_enquiry), 828 + &cb->enquiry_addr, GFP_KERNEL); 829 + if (!cb->enquiry) 830 + return false; 831 + 832 + err_table_size = sizeof(struct myrb_error_entry) * 833 + MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; 834 + cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size, 835 + &cb->err_table_addr, GFP_KERNEL); 836 + if (!cb->err_table) 837 + return false; 838 + 839 + ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS; 840 + cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size, 841 + &cb->ldev_info_addr, GFP_KERNEL); 842 + if (!cb->ldev_info_buf) 843 + return false; 844 + 845 + /* 846 + * Skip mailbox initialisation for PD and P Controllers 847 + */ 848 + if (!mmio_init_fn) 849 + return true; 850 + 851 + /* These are the base addresses for the command memory mailbox array */ 852 + cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox); 853 + cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev, 854 + cb->cmd_mbox_size, 855 + &cb->cmd_mbox_addr, 856 + GFP_KERNEL); 857 + if (!cb->first_cmd_mbox) 858 + return false; 859 + 860 + cmd_mbox_mem = cb->first_cmd_mbox; 861 + cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1; 862 + cb->last_cmd_mbox = cmd_mbox_mem; 863 + cb->next_cmd_mbox = cb->first_cmd_mbox; 864 + cb->prev_cmd_mbox1 = cb->last_cmd_mbox; 865 + cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1; 866 + 867 + /* These are the base addresses for the status memory mailbox array */ 868 + cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT * 869 + sizeof(struct myrb_stat_mbox); 870 + cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev, 871 + cb->stat_mbox_size, 872 + &cb->stat_mbox_addr, 873 + GFP_KERNEL); 874 + if (!cb->first_stat_mbox) 875 + return false; 876 + 877 + stat_mbox_mem = cb->first_stat_mbox; 878 + stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1; 879 + cb->last_stat_mbox = stat_mbox_mem; 880 + cb->next_stat_mbox = cb->first_stat_mbox; 881 + 882 + /* Enable the Memory Mailbox Interface. */ 883 + cb->dual_mode_interface = true; 884 + mbox.typeX.opcode = 0x2B; 885 + mbox.typeX.id = 0; 886 + mbox.typeX.opcode2 = 0x14; 887 + mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr; 888 + mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr; 889 + 890 + status = mmio_init_fn(pdev, base, &mbox); 891 + if (status != MYRB_STATUS_SUCCESS) { 892 + cb->dual_mode_interface = false; 893 + mbox.typeX.opcode2 = 0x10; 894 + status = mmio_init_fn(pdev, base, &mbox); 895 + if (status != MYRB_STATUS_SUCCESS) { 896 + dev_err(&pdev->dev, 897 + "Failed to enable mailbox, statux %02X\n", 898 + status); 899 + return false; 900 + } 901 + } 902 + return true; 903 + } 904 + 905 + /** 906 + * myrb_get_hba_config - reads the configuration information 907 + * 908 + * Reads the configuration information from the controller and 909 + * initializes the controller structure. 910 + * 911 + * Return: 0 on success, errno otherwise 912 + */ 913 + static int myrb_get_hba_config(struct myrb_hba *cb) 914 + { 915 + struct myrb_enquiry2 *enquiry2; 916 + dma_addr_t enquiry2_addr; 917 + struct myrb_config2 *config2; 918 + dma_addr_t config2_addr; 919 + struct Scsi_Host *shost = cb->host; 920 + struct pci_dev *pdev = cb->pdev; 921 + int pchan_max = 0, pchan_cur = 0; 922 + unsigned short status; 923 + int ret = -ENODEV, memsize = 0; 924 + 925 + enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), 926 + &enquiry2_addr, GFP_KERNEL); 927 + if (!enquiry2) { 928 + shost_printk(KERN_ERR, cb->host, 929 + "Failed to allocate V1 enquiry2 memory\n"); 930 + return -ENOMEM; 931 + } 932 + config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2), 933 + &config2_addr, GFP_KERNEL); 934 + if (!config2) { 935 + shost_printk(KERN_ERR, cb->host, 936 + "Failed to allocate V1 config2 memory\n"); 937 + dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), 938 + enquiry2, enquiry2_addr); 939 + return -ENOMEM; 940 + } 941 + mutex_lock(&cb->dma_mutex); 942 + status = myrb_hba_enquiry(cb); 943 + mutex_unlock(&cb->dma_mutex); 944 + if (status != MYRB_STATUS_SUCCESS) { 945 + shost_printk(KERN_WARNING, cb->host, 946 + "Failed it issue V1 Enquiry\n"); 947 + goto out_free; 948 + } 949 + 950 + status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr); 951 + if (status != MYRB_STATUS_SUCCESS) { 952 + shost_printk(KERN_WARNING, cb->host, 953 + "Failed to issue V1 Enquiry2\n"); 954 + goto out_free; 955 + } 956 + 957 + status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr); 958 + if (status != MYRB_STATUS_SUCCESS) { 959 + shost_printk(KERN_WARNING, cb->host, 960 + "Failed to issue ReadConfig2\n"); 961 + goto out_free; 962 + } 963 + 964 + status = myrb_get_ldev_info(cb); 965 + if (status != MYRB_STATUS_SUCCESS) { 966 + shost_printk(KERN_WARNING, cb->host, 967 + "Failed to get logical drive information\n"); 968 + goto out_free; 969 + } 970 + 971 + /* 972 + * Initialize the Controller Model Name and Full Model Name fields. 973 + */ 974 + switch (enquiry2->hw.sub_model) { 975 + case DAC960_V1_P_PD_PU: 976 + if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA) 977 + strcpy(cb->model_name, "DAC960PU"); 978 + else 979 + strcpy(cb->model_name, "DAC960PD"); 980 + break; 981 + case DAC960_V1_PL: 982 + strcpy(cb->model_name, "DAC960PL"); 983 + break; 984 + case DAC960_V1_PG: 985 + strcpy(cb->model_name, "DAC960PG"); 986 + break; 987 + case DAC960_V1_PJ: 988 + strcpy(cb->model_name, "DAC960PJ"); 989 + break; 990 + case DAC960_V1_PR: 991 + strcpy(cb->model_name, "DAC960PR"); 992 + break; 993 + case DAC960_V1_PT: 994 + strcpy(cb->model_name, "DAC960PT"); 995 + break; 996 + case DAC960_V1_PTL0: 997 + strcpy(cb->model_name, "DAC960PTL0"); 998 + break; 999 + case DAC960_V1_PRL: 1000 + strcpy(cb->model_name, "DAC960PRL"); 1001 + break; 1002 + case DAC960_V1_PTL1: 1003 + strcpy(cb->model_name, "DAC960PTL1"); 1004 + break; 1005 + case DAC960_V1_1164P: 1006 + strcpy(cb->model_name, "eXtremeRAID 1100"); 1007 + break; 1008 + default: 1009 + shost_printk(KERN_WARNING, cb->host, 1010 + "Unknown Model %X\n", 1011 + enquiry2->hw.sub_model); 1012 + goto out; 1013 + } 1014 + /* 1015 + * Initialize the Controller Firmware Version field and verify that it 1016 + * is a supported firmware version. 1017 + * The supported firmware versions are: 1018 + * 1019 + * DAC1164P 5.06 and above 1020 + * DAC960PTL/PRL/PJ/PG 4.06 and above 1021 + * DAC960PU/PD/PL 3.51 and above 1022 + * DAC960PU/PD/PL/P 2.73 and above 1023 + */ 1024 + #if defined(CONFIG_ALPHA) 1025 + /* 1026 + * DEC Alpha machines were often equipped with DAC960 cards that were 1027 + * OEMed from Mylex, and had their own custom firmware. Version 2.70, 1028 + * the last custom FW revision to be released by DEC for these older 1029 + * controllers, appears to work quite well with this driver. 1030 + * 1031 + * Cards tested successfully were several versions each of the PD and 1032 + * PU, called by DEC the KZPSC and KZPAC, respectively, and having 1033 + * the Manufacturer Numbers (from Mylex), usually on a sticker on the 1034 + * back of the board, of: 1035 + * 1036 + * KZPSC: D040347 (1-channel) or D040348 (2-channel) 1037 + * or D040349 (3-channel) 1038 + * KZPAC: D040395 (1-channel) or D040396 (2-channel) 1039 + * or D040397 (3-channel) 1040 + */ 1041 + # define FIRMWARE_27X "2.70" 1042 + #else 1043 + # define FIRMWARE_27X "2.73" 1044 + #endif 1045 + 1046 + if (enquiry2->fw.major_version == 0) { 1047 + enquiry2->fw.major_version = cb->enquiry->fw_major_version; 1048 + enquiry2->fw.minor_version = cb->enquiry->fw_minor_version; 1049 + enquiry2->fw.firmware_type = '0'; 1050 + enquiry2->fw.turn_id = 0; 1051 + } 1052 + sprintf(cb->fw_version, "%d.%02d-%c-%02d", 1053 + enquiry2->fw.major_version, 1054 + enquiry2->fw.minor_version, 1055 + enquiry2->fw.firmware_type, 1056 + enquiry2->fw.turn_id); 1057 + if (!((enquiry2->fw.major_version == 5 && 1058 + enquiry2->fw.minor_version >= 6) || 1059 + (enquiry2->fw.major_version == 4 && 1060 + enquiry2->fw.minor_version >= 6) || 1061 + (enquiry2->fw.major_version == 3 && 1062 + enquiry2->fw.minor_version >= 51) || 1063 + (enquiry2->fw.major_version == 2 && 1064 + strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) { 1065 + shost_printk(KERN_WARNING, cb->host, 1066 + "Firmware Version '%s' unsupported\n", 1067 + cb->fw_version); 1068 + goto out; 1069 + } 1070 + /* 1071 + * Initialize the Channels, Targets, Memory Size, and SAF-TE 1072 + * Enclosure Management Enabled fields. 1073 + */ 1074 + switch (enquiry2->hw.model) { 1075 + case MYRB_5_CHANNEL_BOARD: 1076 + pchan_max = 5; 1077 + break; 1078 + case MYRB_3_CHANNEL_BOARD: 1079 + case MYRB_3_CHANNEL_ASIC_DAC: 1080 + pchan_max = 3; 1081 + break; 1082 + case MYRB_2_CHANNEL_BOARD: 1083 + pchan_max = 2; 1084 + break; 1085 + default: 1086 + pchan_max = enquiry2->cfg_chan; 1087 + break; 1088 + } 1089 + pchan_cur = enquiry2->cur_chan; 1090 + if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT) 1091 + cb->bus_width = 32; 1092 + else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT) 1093 + cb->bus_width = 16; 1094 + else 1095 + cb->bus_width = 8; 1096 + cb->ldev_block_size = enquiry2->ldev_block_size; 1097 + shost->max_channel = pchan_cur; 1098 + shost->max_id = enquiry2->max_targets; 1099 + memsize = enquiry2->mem_size >> 20; 1100 + cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE); 1101 + /* 1102 + * Initialize the Controller Queue Depth, Driver Queue Depth, 1103 + * Logical Drive Count, Maximum Blocks per Command, Controller 1104 + * Scatter/Gather Limit, and Driver Scatter/Gather Limit. 1105 + * The Driver Queue Depth must be at most one less than the 1106 + * Controller Queue Depth to allow for an automatic drive 1107 + * rebuild operation. 1108 + */ 1109 + shost->can_queue = cb->enquiry->max_tcq; 1110 + if (shost->can_queue < 3) 1111 + shost->can_queue = enquiry2->max_cmds; 1112 + if (shost->can_queue < 3) 1113 + /* Play safe and disable TCQ */ 1114 + shost->can_queue = 1; 1115 + 1116 + if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2) 1117 + shost->can_queue = MYRB_CMD_MBOX_COUNT - 2; 1118 + shost->max_sectors = enquiry2->max_sectors; 1119 + shost->sg_tablesize = enquiry2->max_sge; 1120 + if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT) 1121 + shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT; 1122 + /* 1123 + * Initialize the Stripe Size, Segment Size, and Geometry Translation. 1124 + */ 1125 + cb->stripe_size = config2->blocks_per_stripe * config2->block_factor 1126 + >> (10 - MYRB_BLKSIZE_BITS); 1127 + cb->segment_size = config2->blocks_per_cacheline * config2->block_factor 1128 + >> (10 - MYRB_BLKSIZE_BITS); 1129 + /* Assume 255/63 translation */ 1130 + cb->ldev_geom_heads = 255; 1131 + cb->ldev_geom_sectors = 63; 1132 + if (config2->drive_geometry) { 1133 + cb->ldev_geom_heads = 128; 1134 + cb->ldev_geom_sectors = 32; 1135 + } 1136 + 1137 + /* 1138 + * Initialize the Background Initialization Status. 1139 + */ 1140 + if ((cb->fw_version[0] == '4' && 1141 + strcmp(cb->fw_version, "4.08") >= 0) || 1142 + (cb->fw_version[0] == '5' && 1143 + strcmp(cb->fw_version, "5.08") >= 0)) { 1144 + cb->bgi_status_supported = true; 1145 + myrb_bgi_control(cb); 1146 + } 1147 + cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS; 1148 + ret = 0; 1149 + 1150 + out: 1151 + shost_printk(KERN_INFO, cb->host, 1152 + "Configuring %s PCI RAID Controller\n", cb->model_name); 1153 + shost_printk(KERN_INFO, cb->host, 1154 + " Firmware Version: %s, Memory Size: %dMB\n", 1155 + cb->fw_version, memsize); 1156 + if (cb->io_addr == 0) 1157 + shost_printk(KERN_INFO, cb->host, 1158 + " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n", 1159 + (unsigned long)cb->pci_addr, cb->irq); 1160 + else 1161 + shost_printk(KERN_INFO, cb->host, 1162 + " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n", 1163 + (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr, 1164 + cb->irq); 1165 + shost_printk(KERN_INFO, cb->host, 1166 + " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n", 1167 + cb->host->can_queue, cb->host->max_sectors); 1168 + shost_printk(KERN_INFO, cb->host, 1169 + " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n", 1170 + cb->host->can_queue, cb->host->sg_tablesize, 1171 + MYRB_SCATTER_GATHER_LIMIT); 1172 + shost_printk(KERN_INFO, cb->host, 1173 + " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n", 1174 + cb->stripe_size, cb->segment_size, 1175 + cb->ldev_geom_heads, cb->ldev_geom_sectors, 1176 + cb->safte_enabled ? 1177 + " SAF-TE Enclosure Management Enabled" : ""); 1178 + shost_printk(KERN_INFO, cb->host, 1179 + " Physical: %d/%d channels %d/%d/%d devices\n", 1180 + pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead, 1181 + cb->host->max_id); 1182 + 1183 + shost_printk(KERN_INFO, cb->host, 1184 + " Logical: 1/1 channels, %d/%d disks\n", 1185 + cb->enquiry->ldev_count, MYRB_MAX_LDEVS); 1186 + 1187 + out_free: 1188 + dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), 1189 + enquiry2, enquiry2_addr); 1190 + dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2), 1191 + config2, config2_addr); 1192 + 1193 + return ret; 1194 + } 1195 + 1196 + /** 1197 + * myrb_unmap - unmaps controller structures 1198 + */ 1199 + static void myrb_unmap(struct myrb_hba *cb) 1200 + { 1201 + if (cb->ldev_info_buf) { 1202 + size_t ldev_info_size = sizeof(struct myrb_ldev_info) * 1203 + MYRB_MAX_LDEVS; 1204 + dma_free_coherent(&cb->pdev->dev, ldev_info_size, 1205 + cb->ldev_info_buf, cb->ldev_info_addr); 1206 + cb->ldev_info_buf = NULL; 1207 + } 1208 + if (cb->err_table) { 1209 + size_t err_table_size = sizeof(struct myrb_error_entry) * 1210 + MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; 1211 + dma_free_coherent(&cb->pdev->dev, err_table_size, 1212 + cb->err_table, cb->err_table_addr); 1213 + cb->err_table = NULL; 1214 + } 1215 + if (cb->enquiry) { 1216 + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry), 1217 + cb->enquiry, cb->enquiry_addr); 1218 + cb->enquiry = NULL; 1219 + } 1220 + if (cb->first_stat_mbox) { 1221 + dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size, 1222 + cb->first_stat_mbox, cb->stat_mbox_addr); 1223 + cb->first_stat_mbox = NULL; 1224 + } 1225 + if (cb->first_cmd_mbox) { 1226 + dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size, 1227 + cb->first_cmd_mbox, cb->cmd_mbox_addr); 1228 + cb->first_cmd_mbox = NULL; 1229 + } 1230 + } 1231 + 1232 + /** 1233 + * myrb_cleanup - cleanup controller structures 1234 + */ 1235 + static void myrb_cleanup(struct myrb_hba *cb) 1236 + { 1237 + struct pci_dev *pdev = cb->pdev; 1238 + 1239 + /* Free the memory mailbox, status, and related structures */ 1240 + myrb_unmap(cb); 1241 + 1242 + if (cb->mmio_base) { 1243 + cb->disable_intr(cb->io_base); 1244 + iounmap(cb->mmio_base); 1245 + } 1246 + if (cb->irq) 1247 + free_irq(cb->irq, cb); 1248 + if (cb->io_addr) 1249 + release_region(cb->io_addr, 0x80); 1250 + pci_set_drvdata(pdev, NULL); 1251 + pci_disable_device(pdev); 1252 + scsi_host_put(cb->host); 1253 + } 1254 + 1255 + static int myrb_host_reset(struct scsi_cmnd *scmd) 1256 + { 1257 + struct Scsi_Host *shost = scmd->device->host; 1258 + struct myrb_hba *cb = shost_priv(shost); 1259 + 1260 + cb->reset(cb->io_base); 1261 + return SUCCESS; 1262 + } 1263 + 1264 + static int myrb_pthru_queuecommand(struct Scsi_Host *shost, 1265 + struct scsi_cmnd *scmd) 1266 + { 1267 + struct myrb_hba *cb = shost_priv(shost); 1268 + struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); 1269 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 1270 + struct myrb_dcdb *dcdb; 1271 + dma_addr_t dcdb_addr; 1272 + struct scsi_device *sdev = scmd->device; 1273 + struct scatterlist *sgl; 1274 + unsigned long flags; 1275 + int nsge; 1276 + 1277 + myrb_reset_cmd(cmd_blk); 1278 + dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr); 1279 + if (!dcdb) 1280 + return SCSI_MLQUEUE_HOST_BUSY; 1281 + nsge = scsi_dma_map(scmd); 1282 + if (nsge > 1) { 1283 + dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr); 1284 + scmd->result = (DID_ERROR << 16); 1285 + scmd->scsi_done(scmd); 1286 + return 0; 1287 + } 1288 + 1289 + mbox->type3.opcode = MYRB_CMD_DCDB; 1290 + mbox->type3.id = scmd->request->tag + 3; 1291 + mbox->type3.addr = dcdb_addr; 1292 + dcdb->channel = sdev->channel; 1293 + dcdb->target = sdev->id; 1294 + switch (scmd->sc_data_direction) { 1295 + case DMA_NONE: 1296 + dcdb->data_xfer = MYRB_DCDB_XFER_NONE; 1297 + break; 1298 + case DMA_TO_DEVICE: 1299 + dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE; 1300 + break; 1301 + case DMA_FROM_DEVICE: 1302 + dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM; 1303 + break; 1304 + default: 1305 + dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL; 1306 + break; 1307 + } 1308 + dcdb->early_status = false; 1309 + if (scmd->request->timeout <= 10) 1310 + dcdb->timeout = MYRB_DCDB_TMO_10_SECS; 1311 + else if (scmd->request->timeout <= 60) 1312 + dcdb->timeout = MYRB_DCDB_TMO_60_SECS; 1313 + else if (scmd->request->timeout <= 600) 1314 + dcdb->timeout = MYRB_DCDB_TMO_10_MINS; 1315 + else 1316 + dcdb->timeout = MYRB_DCDB_TMO_24_HRS; 1317 + dcdb->no_autosense = false; 1318 + dcdb->allow_disconnect = true; 1319 + sgl = scsi_sglist(scmd); 1320 + dcdb->dma_addr = sg_dma_address(sgl); 1321 + if (sg_dma_len(sgl) > USHRT_MAX) { 1322 + dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff; 1323 + dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16; 1324 + } else { 1325 + dcdb->xfer_len_lo = sg_dma_len(sgl); 1326 + dcdb->xfer_len_hi4 = 0; 1327 + } 1328 + dcdb->cdb_len = scmd->cmd_len; 1329 + dcdb->sense_len = sizeof(dcdb->sense); 1330 + memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len); 1331 + 1332 + spin_lock_irqsave(&cb->queue_lock, flags); 1333 + cb->qcmd(cb, cmd_blk); 1334 + spin_unlock_irqrestore(&cb->queue_lock, flags); 1335 + return 0; 1336 + } 1337 + 1338 + static void myrb_inquiry(struct myrb_hba *cb, 1339 + struct scsi_cmnd *scmd) 1340 + { 1341 + unsigned char inq[36] = { 1342 + 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00, 1343 + 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20, 1344 + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 1345 + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 1346 + 0x20, 0x20, 0x20, 0x20, 1347 + }; 1348 + 1349 + if (cb->bus_width > 16) 1350 + inq[7] |= 1 << 6; 1351 + if (cb->bus_width > 8) 1352 + inq[7] |= 1 << 5; 1353 + memcpy(&inq[16], cb->model_name, 16); 1354 + memcpy(&inq[32], cb->fw_version, 1); 1355 + memcpy(&inq[33], &cb->fw_version[2], 2); 1356 + memcpy(&inq[35], &cb->fw_version[7], 1); 1357 + 1358 + scsi_sg_copy_from_buffer(scmd, (void *)inq, 36); 1359 + } 1360 + 1361 + static void 1362 + myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd, 1363 + struct myrb_ldev_info *ldev_info) 1364 + { 1365 + unsigned char modes[32], *mode_pg; 1366 + bool dbd; 1367 + size_t mode_len; 1368 + 1369 + dbd = (scmd->cmnd[1] & 0x08) == 0x08; 1370 + if (dbd) { 1371 + mode_len = 24; 1372 + mode_pg = &modes[4]; 1373 + } else { 1374 + mode_len = 32; 1375 + mode_pg = &modes[12]; 1376 + } 1377 + memset(modes, 0, sizeof(modes)); 1378 + modes[0] = mode_len - 1; 1379 + if (!dbd) { 1380 + unsigned char *block_desc = &modes[4]; 1381 + 1382 + modes[3] = 8; 1383 + put_unaligned_be32(ldev_info->size, &block_desc[0]); 1384 + put_unaligned_be32(cb->ldev_block_size, &block_desc[5]); 1385 + } 1386 + mode_pg[0] = 0x08; 1387 + mode_pg[1] = 0x12; 1388 + if (ldev_info->wb_enabled) 1389 + mode_pg[2] |= 0x04; 1390 + if (cb->segment_size) { 1391 + mode_pg[2] |= 0x08; 1392 + put_unaligned_be16(cb->segment_size, &mode_pg[14]); 1393 + } 1394 + 1395 + scsi_sg_copy_from_buffer(scmd, modes, mode_len); 1396 + } 1397 + 1398 + static void myrb_request_sense(struct myrb_hba *cb, 1399 + struct scsi_cmnd *scmd) 1400 + { 1401 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1402 + NO_SENSE, 0, 0); 1403 + scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer, 1404 + SCSI_SENSE_BUFFERSIZE); 1405 + } 1406 + 1407 + static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd, 1408 + struct myrb_ldev_info *ldev_info) 1409 + { 1410 + unsigned char data[8]; 1411 + 1412 + dev_dbg(&scmd->device->sdev_gendev, 1413 + "Capacity %u, blocksize %u\n", 1414 + ldev_info->size, cb->ldev_block_size); 1415 + put_unaligned_be32(ldev_info->size - 1, &data[0]); 1416 + put_unaligned_be32(cb->ldev_block_size, &data[4]); 1417 + scsi_sg_copy_from_buffer(scmd, data, 8); 1418 + } 1419 + 1420 + static int myrb_ldev_queuecommand(struct Scsi_Host *shost, 1421 + struct scsi_cmnd *scmd) 1422 + { 1423 + struct myrb_hba *cb = shost_priv(shost); 1424 + struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); 1425 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 1426 + struct myrb_ldev_info *ldev_info; 1427 + struct scsi_device *sdev = scmd->device; 1428 + struct scatterlist *sgl; 1429 + unsigned long flags; 1430 + u64 lba; 1431 + u32 block_cnt; 1432 + int nsge; 1433 + 1434 + ldev_info = sdev->hostdata; 1435 + if (ldev_info->state != MYRB_DEVICE_ONLINE && 1436 + ldev_info->state != MYRB_DEVICE_WO) { 1437 + dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n", 1438 + sdev->id, ldev_info ? ldev_info->state : 0xff); 1439 + scmd->result = (DID_BAD_TARGET << 16); 1440 + scmd->scsi_done(scmd); 1441 + return 0; 1442 + } 1443 + switch (scmd->cmnd[0]) { 1444 + case TEST_UNIT_READY: 1445 + scmd->result = (DID_OK << 16); 1446 + scmd->scsi_done(scmd); 1447 + return 0; 1448 + case INQUIRY: 1449 + if (scmd->cmnd[1] & 1) { 1450 + /* Illegal request, invalid field in CDB */ 1451 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1452 + ILLEGAL_REQUEST, 0x24, 0); 1453 + scmd->result = (DRIVER_SENSE << 24) | 1454 + SAM_STAT_CHECK_CONDITION; 1455 + } else { 1456 + myrb_inquiry(cb, scmd); 1457 + scmd->result = (DID_OK << 16); 1458 + } 1459 + scmd->scsi_done(scmd); 1460 + return 0; 1461 + case SYNCHRONIZE_CACHE: 1462 + scmd->result = (DID_OK << 16); 1463 + scmd->scsi_done(scmd); 1464 + return 0; 1465 + case MODE_SENSE: 1466 + if ((scmd->cmnd[2] & 0x3F) != 0x3F && 1467 + (scmd->cmnd[2] & 0x3F) != 0x08) { 1468 + /* Illegal request, invalid field in CDB */ 1469 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1470 + ILLEGAL_REQUEST, 0x24, 0); 1471 + scmd->result = (DRIVER_SENSE << 24) | 1472 + SAM_STAT_CHECK_CONDITION; 1473 + } else { 1474 + myrb_mode_sense(cb, scmd, ldev_info); 1475 + scmd->result = (DID_OK << 16); 1476 + } 1477 + scmd->scsi_done(scmd); 1478 + return 0; 1479 + case READ_CAPACITY: 1480 + if ((scmd->cmnd[1] & 1) || 1481 + (scmd->cmnd[8] & 1)) { 1482 + /* Illegal request, invalid field in CDB */ 1483 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1484 + ILLEGAL_REQUEST, 0x24, 0); 1485 + scmd->result = (DRIVER_SENSE << 24) | 1486 + SAM_STAT_CHECK_CONDITION; 1487 + scmd->scsi_done(scmd); 1488 + return 0; 1489 + } 1490 + lba = get_unaligned_be32(&scmd->cmnd[2]); 1491 + if (lba) { 1492 + /* Illegal request, invalid field in CDB */ 1493 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1494 + ILLEGAL_REQUEST, 0x24, 0); 1495 + scmd->result = (DRIVER_SENSE << 24) | 1496 + SAM_STAT_CHECK_CONDITION; 1497 + scmd->scsi_done(scmd); 1498 + return 0; 1499 + } 1500 + myrb_read_capacity(cb, scmd, ldev_info); 1501 + scmd->scsi_done(scmd); 1502 + return 0; 1503 + case REQUEST_SENSE: 1504 + myrb_request_sense(cb, scmd); 1505 + scmd->result = (DID_OK << 16); 1506 + return 0; 1507 + case SEND_DIAGNOSTIC: 1508 + if (scmd->cmnd[1] != 0x04) { 1509 + /* Illegal request, invalid field in CDB */ 1510 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1511 + ILLEGAL_REQUEST, 0x24, 0); 1512 + scmd->result = (DRIVER_SENSE << 24) | 1513 + SAM_STAT_CHECK_CONDITION; 1514 + } else { 1515 + /* Assume good status */ 1516 + scmd->result = (DID_OK << 16); 1517 + } 1518 + scmd->scsi_done(scmd); 1519 + return 0; 1520 + case READ_6: 1521 + if (ldev_info->state == MYRB_DEVICE_WO) { 1522 + /* Data protect, attempt to read invalid data */ 1523 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1524 + DATA_PROTECT, 0x21, 0x06); 1525 + scmd->result = (DRIVER_SENSE << 24) | 1526 + SAM_STAT_CHECK_CONDITION; 1527 + scmd->scsi_done(scmd); 1528 + return 0; 1529 + } 1530 + case WRITE_6: 1531 + lba = (((scmd->cmnd[1] & 0x1F) << 16) | 1532 + (scmd->cmnd[2] << 8) | 1533 + scmd->cmnd[3]); 1534 + block_cnt = scmd->cmnd[4]; 1535 + break; 1536 + case READ_10: 1537 + if (ldev_info->state == MYRB_DEVICE_WO) { 1538 + /* Data protect, attempt to read invalid data */ 1539 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1540 + DATA_PROTECT, 0x21, 0x06); 1541 + scmd->result = (DRIVER_SENSE << 24) | 1542 + SAM_STAT_CHECK_CONDITION; 1543 + scmd->scsi_done(scmd); 1544 + return 0; 1545 + } 1546 + case WRITE_10: 1547 + case VERIFY: /* 0x2F */ 1548 + case WRITE_VERIFY: /* 0x2E */ 1549 + lba = get_unaligned_be32(&scmd->cmnd[2]); 1550 + block_cnt = get_unaligned_be16(&scmd->cmnd[7]); 1551 + break; 1552 + case READ_12: 1553 + if (ldev_info->state == MYRB_DEVICE_WO) { 1554 + /* Data protect, attempt to read invalid data */ 1555 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1556 + DATA_PROTECT, 0x21, 0x06); 1557 + scmd->result = (DRIVER_SENSE << 24) | 1558 + SAM_STAT_CHECK_CONDITION; 1559 + scmd->scsi_done(scmd); 1560 + return 0; 1561 + } 1562 + case WRITE_12: 1563 + case VERIFY_12: /* 0xAF */ 1564 + case WRITE_VERIFY_12: /* 0xAE */ 1565 + lba = get_unaligned_be32(&scmd->cmnd[2]); 1566 + block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 1567 + break; 1568 + default: 1569 + /* Illegal request, invalid opcode */ 1570 + scsi_build_sense_buffer(0, scmd->sense_buffer, 1571 + ILLEGAL_REQUEST, 0x20, 0); 1572 + scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 1573 + scmd->scsi_done(scmd); 1574 + return 0; 1575 + } 1576 + 1577 + myrb_reset_cmd(cmd_blk); 1578 + mbox->type5.id = scmd->request->tag + 3; 1579 + if (scmd->sc_data_direction == DMA_NONE) 1580 + goto submit; 1581 + nsge = scsi_dma_map(scmd); 1582 + if (nsge == 1) { 1583 + sgl = scsi_sglist(scmd); 1584 + if (scmd->sc_data_direction == DMA_FROM_DEVICE) 1585 + mbox->type5.opcode = MYRB_CMD_READ; 1586 + else 1587 + mbox->type5.opcode = MYRB_CMD_WRITE; 1588 + 1589 + mbox->type5.ld.xfer_len = block_cnt; 1590 + mbox->type5.ld.ldev_num = sdev->id; 1591 + mbox->type5.lba = lba; 1592 + mbox->type5.addr = (u32)sg_dma_address(sgl); 1593 + } else { 1594 + struct myrb_sge *hw_sgl; 1595 + dma_addr_t hw_sgl_addr; 1596 + int i; 1597 + 1598 + hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr); 1599 + if (!hw_sgl) 1600 + return SCSI_MLQUEUE_HOST_BUSY; 1601 + 1602 + cmd_blk->sgl = hw_sgl; 1603 + cmd_blk->sgl_addr = hw_sgl_addr; 1604 + 1605 + if (scmd->sc_data_direction == DMA_FROM_DEVICE) 1606 + mbox->type5.opcode = MYRB_CMD_READ_SG; 1607 + else 1608 + mbox->type5.opcode = MYRB_CMD_WRITE_SG; 1609 + 1610 + mbox->type5.ld.xfer_len = block_cnt; 1611 + mbox->type5.ld.ldev_num = sdev->id; 1612 + mbox->type5.lba = lba; 1613 + mbox->type5.addr = hw_sgl_addr; 1614 + mbox->type5.sg_count = nsge; 1615 + 1616 + scsi_for_each_sg(scmd, sgl, nsge, i) { 1617 + hw_sgl->sge_addr = (u32)sg_dma_address(sgl); 1618 + hw_sgl->sge_count = (u32)sg_dma_len(sgl); 1619 + hw_sgl++; 1620 + } 1621 + } 1622 + submit: 1623 + spin_lock_irqsave(&cb->queue_lock, flags); 1624 + cb->qcmd(cb, cmd_blk); 1625 + spin_unlock_irqrestore(&cb->queue_lock, flags); 1626 + 1627 + return 0; 1628 + } 1629 + 1630 + static int myrb_queuecommand(struct Scsi_Host *shost, 1631 + struct scsi_cmnd *scmd) 1632 + { 1633 + struct scsi_device *sdev = scmd->device; 1634 + 1635 + if (sdev->channel > myrb_logical_channel(shost)) { 1636 + scmd->result = (DID_BAD_TARGET << 16); 1637 + scmd->scsi_done(scmd); 1638 + return 0; 1639 + } 1640 + if (sdev->channel == myrb_logical_channel(shost)) 1641 + return myrb_ldev_queuecommand(shost, scmd); 1642 + 1643 + return myrb_pthru_queuecommand(shost, scmd); 1644 + } 1645 + 1646 + static int myrb_ldev_slave_alloc(struct scsi_device *sdev) 1647 + { 1648 + struct myrb_hba *cb = shost_priv(sdev->host); 1649 + struct myrb_ldev_info *ldev_info; 1650 + unsigned short ldev_num = sdev->id; 1651 + enum raid_level level; 1652 + 1653 + ldev_info = cb->ldev_info_buf + ldev_num; 1654 + if (!ldev_info) 1655 + return -ENXIO; 1656 + 1657 + sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL); 1658 + if (!sdev->hostdata) 1659 + return -ENOMEM; 1660 + dev_dbg(&sdev->sdev_gendev, 1661 + "slave alloc ldev %d state %x\n", 1662 + ldev_num, ldev_info->state); 1663 + memcpy(sdev->hostdata, ldev_info, 1664 + sizeof(*ldev_info)); 1665 + switch (ldev_info->raid_level) { 1666 + case MYRB_RAID_LEVEL0: 1667 + level = RAID_LEVEL_LINEAR; 1668 + break; 1669 + case MYRB_RAID_LEVEL1: 1670 + level = RAID_LEVEL_1; 1671 + break; 1672 + case MYRB_RAID_LEVEL3: 1673 + level = RAID_LEVEL_3; 1674 + break; 1675 + case MYRB_RAID_LEVEL5: 1676 + level = RAID_LEVEL_5; 1677 + break; 1678 + case MYRB_RAID_LEVEL6: 1679 + level = RAID_LEVEL_6; 1680 + break; 1681 + case MYRB_RAID_JBOD: 1682 + level = RAID_LEVEL_JBOD; 1683 + break; 1684 + default: 1685 + level = RAID_LEVEL_UNKNOWN; 1686 + break; 1687 + } 1688 + raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level); 1689 + return 0; 1690 + } 1691 + 1692 + static int myrb_pdev_slave_alloc(struct scsi_device *sdev) 1693 + { 1694 + struct myrb_hba *cb = shost_priv(sdev->host); 1695 + struct myrb_pdev_state *pdev_info; 1696 + unsigned short status; 1697 + 1698 + if (sdev->id > MYRB_MAX_TARGETS) 1699 + return -ENXIO; 1700 + 1701 + pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA); 1702 + if (!pdev_info) 1703 + return -ENOMEM; 1704 + 1705 + status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, 1706 + sdev, pdev_info); 1707 + if (status != MYRB_STATUS_SUCCESS) { 1708 + dev_dbg(&sdev->sdev_gendev, 1709 + "Failed to get device state, status %x\n", 1710 + status); 1711 + kfree(pdev_info); 1712 + return -ENXIO; 1713 + } 1714 + if (!pdev_info->present) { 1715 + dev_dbg(&sdev->sdev_gendev, 1716 + "device not present, skip\n"); 1717 + kfree(pdev_info); 1718 + return -ENXIO; 1719 + } 1720 + dev_dbg(&sdev->sdev_gendev, 1721 + "slave alloc pdev %d:%d state %x\n", 1722 + sdev->channel, sdev->id, pdev_info->state); 1723 + sdev->hostdata = pdev_info; 1724 + 1725 + return 0; 1726 + } 1727 + 1728 + static int myrb_slave_alloc(struct scsi_device *sdev) 1729 + { 1730 + if (sdev->channel > myrb_logical_channel(sdev->host)) 1731 + return -ENXIO; 1732 + 1733 + if (sdev->lun > 0) 1734 + return -ENXIO; 1735 + 1736 + if (sdev->channel == myrb_logical_channel(sdev->host)) 1737 + return myrb_ldev_slave_alloc(sdev); 1738 + 1739 + return myrb_pdev_slave_alloc(sdev); 1740 + } 1741 + 1742 + static int myrb_slave_configure(struct scsi_device *sdev) 1743 + { 1744 + struct myrb_ldev_info *ldev_info; 1745 + 1746 + if (sdev->channel > myrb_logical_channel(sdev->host)) 1747 + return -ENXIO; 1748 + 1749 + if (sdev->channel < myrb_logical_channel(sdev->host)) { 1750 + sdev->no_uld_attach = 1; 1751 + return 0; 1752 + } 1753 + if (sdev->lun != 0) 1754 + return -ENXIO; 1755 + 1756 + ldev_info = sdev->hostdata; 1757 + if (!ldev_info) 1758 + return -ENXIO; 1759 + if (ldev_info->state != MYRB_DEVICE_ONLINE) 1760 + sdev_printk(KERN_INFO, sdev, 1761 + "Logical drive is %s\n", 1762 + myrb_devstate_name(ldev_info->state)); 1763 + 1764 + sdev->tagged_supported = 1; 1765 + return 0; 1766 + } 1767 + 1768 + static void myrb_slave_destroy(struct scsi_device *sdev) 1769 + { 1770 + kfree(sdev->hostdata); 1771 + } 1772 + 1773 + static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev, 1774 + sector_t capacity, int geom[]) 1775 + { 1776 + struct myrb_hba *cb = shost_priv(sdev->host); 1777 + 1778 + geom[0] = cb->ldev_geom_heads; 1779 + geom[1] = cb->ldev_geom_sectors; 1780 + geom[2] = sector_div(capacity, geom[0] * geom[1]); 1781 + 1782 + return 0; 1783 + } 1784 + 1785 + static ssize_t raid_state_show(struct device *dev, 1786 + struct device_attribute *attr, char *buf) 1787 + { 1788 + struct scsi_device *sdev = to_scsi_device(dev); 1789 + struct myrb_hba *cb = shost_priv(sdev->host); 1790 + int ret; 1791 + 1792 + if (!sdev->hostdata) 1793 + return snprintf(buf, 16, "Unknown\n"); 1794 + 1795 + if (sdev->channel == myrb_logical_channel(sdev->host)) { 1796 + struct myrb_ldev_info *ldev_info = sdev->hostdata; 1797 + const char *name; 1798 + 1799 + name = myrb_devstate_name(ldev_info->state); 1800 + if (name) 1801 + ret = snprintf(buf, 32, "%s\n", name); 1802 + else 1803 + ret = snprintf(buf, 32, "Invalid (%02X)\n", 1804 + ldev_info->state); 1805 + } else { 1806 + struct myrb_pdev_state *pdev_info = sdev->hostdata; 1807 + unsigned short status; 1808 + const char *name; 1809 + 1810 + status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, 1811 + sdev, pdev_info); 1812 + if (status != MYRB_STATUS_SUCCESS) 1813 + sdev_printk(KERN_INFO, sdev, 1814 + "Failed to get device state, status %x\n", 1815 + status); 1816 + 1817 + if (!pdev_info->present) 1818 + name = "Removed"; 1819 + else 1820 + name = myrb_devstate_name(pdev_info->state); 1821 + if (name) 1822 + ret = snprintf(buf, 32, "%s\n", name); 1823 + else 1824 + ret = snprintf(buf, 32, "Invalid (%02X)\n", 1825 + pdev_info->state); 1826 + } 1827 + return ret; 1828 + } 1829 + 1830 + static ssize_t raid_state_store(struct device *dev, 1831 + struct device_attribute *attr, const char *buf, size_t count) 1832 + { 1833 + struct scsi_device *sdev = to_scsi_device(dev); 1834 + struct myrb_hba *cb = shost_priv(sdev->host); 1835 + struct myrb_pdev_state *pdev_info; 1836 + enum myrb_devstate new_state; 1837 + unsigned short status; 1838 + 1839 + if (!strncmp(buf, "kill", 4) || 1840 + !strncmp(buf, "offline", 7)) 1841 + new_state = MYRB_DEVICE_DEAD; 1842 + else if (!strncmp(buf, "online", 6)) 1843 + new_state = MYRB_DEVICE_ONLINE; 1844 + else if (!strncmp(buf, "standby", 7)) 1845 + new_state = MYRB_DEVICE_STANDBY; 1846 + else 1847 + return -EINVAL; 1848 + 1849 + pdev_info = sdev->hostdata; 1850 + if (!pdev_info) { 1851 + sdev_printk(KERN_INFO, sdev, 1852 + "Failed - no physical device information\n"); 1853 + return -ENXIO; 1854 + } 1855 + if (!pdev_info->present) { 1856 + sdev_printk(KERN_INFO, sdev, 1857 + "Failed - device not present\n"); 1858 + return -ENXIO; 1859 + } 1860 + 1861 + if (pdev_info->state == new_state) 1862 + return count; 1863 + 1864 + status = myrb_set_pdev_state(cb, sdev, new_state); 1865 + switch (status) { 1866 + case MYRB_STATUS_SUCCESS: 1867 + break; 1868 + case MYRB_STATUS_START_DEVICE_FAILED: 1869 + sdev_printk(KERN_INFO, sdev, 1870 + "Failed - Unable to Start Device\n"); 1871 + count = -EAGAIN; 1872 + break; 1873 + case MYRB_STATUS_NO_DEVICE: 1874 + sdev_printk(KERN_INFO, sdev, 1875 + "Failed - No Device at Address\n"); 1876 + count = -ENODEV; 1877 + break; 1878 + case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET: 1879 + sdev_printk(KERN_INFO, sdev, 1880 + "Failed - Invalid Channel or Target or Modifier\n"); 1881 + count = -EINVAL; 1882 + break; 1883 + case MYRB_STATUS_CHANNEL_BUSY: 1884 + sdev_printk(KERN_INFO, sdev, 1885 + "Failed - Channel Busy\n"); 1886 + count = -EBUSY; 1887 + break; 1888 + default: 1889 + sdev_printk(KERN_INFO, sdev, 1890 + "Failed - Unexpected Status %04X\n", status); 1891 + count = -EIO; 1892 + break; 1893 + } 1894 + return count; 1895 + } 1896 + static DEVICE_ATTR_RW(raid_state); 1897 + 1898 + static ssize_t raid_level_show(struct device *dev, 1899 + struct device_attribute *attr, char *buf) 1900 + { 1901 + struct scsi_device *sdev = to_scsi_device(dev); 1902 + 1903 + if (sdev->channel == myrb_logical_channel(sdev->host)) { 1904 + struct myrb_ldev_info *ldev_info = sdev->hostdata; 1905 + const char *name; 1906 + 1907 + if (!ldev_info) 1908 + return -ENXIO; 1909 + 1910 + name = myrb_raidlevel_name(ldev_info->raid_level); 1911 + if (!name) 1912 + return snprintf(buf, 32, "Invalid (%02X)\n", 1913 + ldev_info->state); 1914 + return snprintf(buf, 32, "%s\n", name); 1915 + } 1916 + return snprintf(buf, 32, "Physical Drive\n"); 1917 + } 1918 + static DEVICE_ATTR_RO(raid_level); 1919 + 1920 + static ssize_t rebuild_show(struct device *dev, 1921 + struct device_attribute *attr, char *buf) 1922 + { 1923 + struct scsi_device *sdev = to_scsi_device(dev); 1924 + struct myrb_hba *cb = shost_priv(sdev->host); 1925 + struct myrb_rbld_progress rbld_buf; 1926 + unsigned char status; 1927 + 1928 + if (sdev->channel < myrb_logical_channel(sdev->host)) 1929 + return snprintf(buf, 32, "physical device - not rebuilding\n"); 1930 + 1931 + status = myrb_get_rbld_progress(cb, &rbld_buf); 1932 + 1933 + if (rbld_buf.ldev_num != sdev->id || 1934 + status != MYRB_STATUS_SUCCESS) 1935 + return snprintf(buf, 32, "not rebuilding\n"); 1936 + 1937 + return snprintf(buf, 32, "rebuilding block %u of %u\n", 1938 + rbld_buf.ldev_size - rbld_buf.blocks_left, 1939 + rbld_buf.ldev_size); 1940 + } 1941 + 1942 + static ssize_t rebuild_store(struct device *dev, 1943 + struct device_attribute *attr, const char *buf, size_t count) 1944 + { 1945 + struct scsi_device *sdev = to_scsi_device(dev); 1946 + struct myrb_hba *cb = shost_priv(sdev->host); 1947 + struct myrb_cmdblk *cmd_blk; 1948 + union myrb_cmd_mbox *mbox; 1949 + unsigned short status; 1950 + int rc, start; 1951 + const char *msg; 1952 + 1953 + rc = kstrtoint(buf, 0, &start); 1954 + if (rc) 1955 + return rc; 1956 + 1957 + if (sdev->channel >= myrb_logical_channel(sdev->host)) 1958 + return -ENXIO; 1959 + 1960 + status = myrb_get_rbld_progress(cb, NULL); 1961 + if (start) { 1962 + if (status == MYRB_STATUS_SUCCESS) { 1963 + sdev_printk(KERN_INFO, sdev, 1964 + "Rebuild Not Initiated; already in progress\n"); 1965 + return -EALREADY; 1966 + } 1967 + mutex_lock(&cb->dcmd_mutex); 1968 + cmd_blk = &cb->dcmd_blk; 1969 + myrb_reset_cmd(cmd_blk); 1970 + mbox = &cmd_blk->mbox; 1971 + mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC; 1972 + mbox->type3D.id = MYRB_DCMD_TAG; 1973 + mbox->type3D.channel = sdev->channel; 1974 + mbox->type3D.target = sdev->id; 1975 + status = myrb_exec_cmd(cb, cmd_blk); 1976 + mutex_unlock(&cb->dcmd_mutex); 1977 + } else { 1978 + struct pci_dev *pdev = cb->pdev; 1979 + unsigned char *rate; 1980 + dma_addr_t rate_addr; 1981 + 1982 + if (status != MYRB_STATUS_SUCCESS) { 1983 + sdev_printk(KERN_INFO, sdev, 1984 + "Rebuild Not Cancelled; not in progress\n"); 1985 + return 0; 1986 + } 1987 + 1988 + rate = dma_alloc_coherent(&pdev->dev, sizeof(char), 1989 + &rate_addr, GFP_KERNEL); 1990 + if (rate == NULL) { 1991 + sdev_printk(KERN_INFO, sdev, 1992 + "Cancellation of Rebuild Failed - Out of Memory\n"); 1993 + return -ENOMEM; 1994 + } 1995 + mutex_lock(&cb->dcmd_mutex); 1996 + cmd_blk = &cb->dcmd_blk; 1997 + myrb_reset_cmd(cmd_blk); 1998 + mbox = &cmd_blk->mbox; 1999 + mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; 2000 + mbox->type3R.id = MYRB_DCMD_TAG; 2001 + mbox->type3R.rbld_rate = 0xFF; 2002 + mbox->type3R.addr = rate_addr; 2003 + status = myrb_exec_cmd(cb, cmd_blk); 2004 + dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); 2005 + mutex_unlock(&cb->dcmd_mutex); 2006 + } 2007 + if (status == MYRB_STATUS_SUCCESS) { 2008 + sdev_printk(KERN_INFO, sdev, "Rebuild %s\n", 2009 + start ? "Initiated" : "Cancelled"); 2010 + return count; 2011 + } 2012 + if (!start) { 2013 + sdev_printk(KERN_INFO, sdev, 2014 + "Rebuild Not Cancelled, status 0x%x\n", 2015 + status); 2016 + return -EIO; 2017 + } 2018 + 2019 + switch (status) { 2020 + case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: 2021 + msg = "Attempt to Rebuild Online or Unresponsive Drive"; 2022 + break; 2023 + case MYRB_STATUS_RBLD_NEW_DISK_FAILED: 2024 + msg = "New Disk Failed During Rebuild"; 2025 + break; 2026 + case MYRB_STATUS_INVALID_ADDRESS: 2027 + msg = "Invalid Device Address"; 2028 + break; 2029 + case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: 2030 + msg = "Already in Progress"; 2031 + break; 2032 + default: 2033 + msg = NULL; 2034 + break; 2035 + } 2036 + if (msg) 2037 + sdev_printk(KERN_INFO, sdev, 2038 + "Rebuild Failed - %s\n", msg); 2039 + else 2040 + sdev_printk(KERN_INFO, sdev, 2041 + "Rebuild Failed, status 0x%x\n", status); 2042 + 2043 + return -EIO; 2044 + } 2045 + static DEVICE_ATTR_RW(rebuild); 2046 + 2047 + static ssize_t consistency_check_store(struct device *dev, 2048 + struct device_attribute *attr, const char *buf, size_t count) 2049 + { 2050 + struct scsi_device *sdev = to_scsi_device(dev); 2051 + struct myrb_hba *cb = shost_priv(sdev->host); 2052 + struct myrb_rbld_progress rbld_buf; 2053 + struct myrb_cmdblk *cmd_blk; 2054 + union myrb_cmd_mbox *mbox; 2055 + unsigned short ldev_num = 0xFFFF; 2056 + unsigned short status; 2057 + int rc, start; 2058 + const char *msg; 2059 + 2060 + rc = kstrtoint(buf, 0, &start); 2061 + if (rc) 2062 + return rc; 2063 + 2064 + if (sdev->channel < myrb_logical_channel(sdev->host)) 2065 + return -ENXIO; 2066 + 2067 + status = myrb_get_rbld_progress(cb, &rbld_buf); 2068 + if (start) { 2069 + if (status == MYRB_STATUS_SUCCESS) { 2070 + sdev_printk(KERN_INFO, sdev, 2071 + "Check Consistency Not Initiated; already in progress\n"); 2072 + return -EALREADY; 2073 + } 2074 + mutex_lock(&cb->dcmd_mutex); 2075 + cmd_blk = &cb->dcmd_blk; 2076 + myrb_reset_cmd(cmd_blk); 2077 + mbox = &cmd_blk->mbox; 2078 + mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC; 2079 + mbox->type3C.id = MYRB_DCMD_TAG; 2080 + mbox->type3C.ldev_num = sdev->id; 2081 + mbox->type3C.auto_restore = true; 2082 + 2083 + status = myrb_exec_cmd(cb, cmd_blk); 2084 + mutex_unlock(&cb->dcmd_mutex); 2085 + } else { 2086 + struct pci_dev *pdev = cb->pdev; 2087 + unsigned char *rate; 2088 + dma_addr_t rate_addr; 2089 + 2090 + if (ldev_num != sdev->id) { 2091 + sdev_printk(KERN_INFO, sdev, 2092 + "Check Consistency Not Cancelled; not in progress\n"); 2093 + return 0; 2094 + } 2095 + rate = dma_alloc_coherent(&pdev->dev, sizeof(char), 2096 + &rate_addr, GFP_KERNEL); 2097 + if (rate == NULL) { 2098 + sdev_printk(KERN_INFO, sdev, 2099 + "Cancellation of Check Consistency Failed - Out of Memory\n"); 2100 + return -ENOMEM; 2101 + } 2102 + mutex_lock(&cb->dcmd_mutex); 2103 + cmd_blk = &cb->dcmd_blk; 2104 + myrb_reset_cmd(cmd_blk); 2105 + mbox = &cmd_blk->mbox; 2106 + mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; 2107 + mbox->type3R.id = MYRB_DCMD_TAG; 2108 + mbox->type3R.rbld_rate = 0xFF; 2109 + mbox->type3R.addr = rate_addr; 2110 + status = myrb_exec_cmd(cb, cmd_blk); 2111 + dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); 2112 + mutex_unlock(&cb->dcmd_mutex); 2113 + } 2114 + if (status == MYRB_STATUS_SUCCESS) { 2115 + sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n", 2116 + start ? "Initiated" : "Cancelled"); 2117 + return count; 2118 + } 2119 + if (!start) { 2120 + sdev_printk(KERN_INFO, sdev, 2121 + "Check Consistency Not Cancelled, status 0x%x\n", 2122 + status); 2123 + return -EIO; 2124 + } 2125 + 2126 + switch (status) { 2127 + case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: 2128 + msg = "Dependent Physical Device is DEAD"; 2129 + break; 2130 + case MYRB_STATUS_RBLD_NEW_DISK_FAILED: 2131 + msg = "New Disk Failed During Rebuild"; 2132 + break; 2133 + case MYRB_STATUS_INVALID_ADDRESS: 2134 + msg = "Invalid or Nonredundant Logical Drive"; 2135 + break; 2136 + case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: 2137 + msg = "Already in Progress"; 2138 + break; 2139 + default: 2140 + msg = NULL; 2141 + break; 2142 + } 2143 + if (msg) 2144 + sdev_printk(KERN_INFO, sdev, 2145 + "Check Consistency Failed - %s\n", msg); 2146 + else 2147 + sdev_printk(KERN_INFO, sdev, 2148 + "Check Consistency Failed, status 0x%x\n", status); 2149 + 2150 + return -EIO; 2151 + } 2152 + 2153 + static ssize_t consistency_check_show(struct device *dev, 2154 + struct device_attribute *attr, char *buf) 2155 + { 2156 + return rebuild_show(dev, attr, buf); 2157 + } 2158 + static DEVICE_ATTR_RW(consistency_check); 2159 + 2160 + static ssize_t ctlr_num_show(struct device *dev, 2161 + struct device_attribute *attr, char *buf) 2162 + { 2163 + struct Scsi_Host *shost = class_to_shost(dev); 2164 + struct myrb_hba *cb = shost_priv(shost); 2165 + 2166 + return snprintf(buf, 20, "%d\n", cb->ctlr_num); 2167 + } 2168 + static DEVICE_ATTR_RO(ctlr_num); 2169 + 2170 + static ssize_t firmware_show(struct device *dev, 2171 + struct device_attribute *attr, char *buf) 2172 + { 2173 + struct Scsi_Host *shost = class_to_shost(dev); 2174 + struct myrb_hba *cb = shost_priv(shost); 2175 + 2176 + return snprintf(buf, 16, "%s\n", cb->fw_version); 2177 + } 2178 + static DEVICE_ATTR_RO(firmware); 2179 + 2180 + static ssize_t model_show(struct device *dev, 2181 + struct device_attribute *attr, char *buf) 2182 + { 2183 + struct Scsi_Host *shost = class_to_shost(dev); 2184 + struct myrb_hba *cb = shost_priv(shost); 2185 + 2186 + return snprintf(buf, 16, "%s\n", cb->model_name); 2187 + } 2188 + static DEVICE_ATTR_RO(model); 2189 + 2190 + static ssize_t flush_cache_store(struct device *dev, 2191 + struct device_attribute *attr, const char *buf, size_t count) 2192 + { 2193 + struct Scsi_Host *shost = class_to_shost(dev); 2194 + struct myrb_hba *cb = shost_priv(shost); 2195 + unsigned short status; 2196 + 2197 + status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); 2198 + if (status == MYRB_STATUS_SUCCESS) { 2199 + shost_printk(KERN_INFO, shost, 2200 + "Cache Flush Completed\n"); 2201 + return count; 2202 + } 2203 + shost_printk(KERN_INFO, shost, 2204 + "Cache Flush Failed, status %x\n", status); 2205 + return -EIO; 2206 + } 2207 + static DEVICE_ATTR_WO(flush_cache); 2208 + 2209 + static struct device_attribute *myrb_sdev_attrs[] = { 2210 + &dev_attr_rebuild, 2211 + &dev_attr_consistency_check, 2212 + &dev_attr_raid_state, 2213 + &dev_attr_raid_level, 2214 + NULL, 2215 + }; 2216 + 2217 + static struct device_attribute *myrb_shost_attrs[] = { 2218 + &dev_attr_ctlr_num, 2219 + &dev_attr_model, 2220 + &dev_attr_firmware, 2221 + &dev_attr_flush_cache, 2222 + NULL, 2223 + }; 2224 + 2225 + struct scsi_host_template myrb_template = { 2226 + .module = THIS_MODULE, 2227 + .name = "DAC960", 2228 + .proc_name = "myrb", 2229 + .queuecommand = myrb_queuecommand, 2230 + .eh_host_reset_handler = myrb_host_reset, 2231 + .slave_alloc = myrb_slave_alloc, 2232 + .slave_configure = myrb_slave_configure, 2233 + .slave_destroy = myrb_slave_destroy, 2234 + .bios_param = myrb_biosparam, 2235 + .cmd_size = sizeof(struct myrb_cmdblk), 2236 + .shost_attrs = myrb_shost_attrs, 2237 + .sdev_attrs = myrb_sdev_attrs, 2238 + .this_id = -1, 2239 + }; 2240 + 2241 + /** 2242 + * myrb_is_raid - return boolean indicating device is raid volume 2243 + * @dev the device struct object 2244 + */ 2245 + static int myrb_is_raid(struct device *dev) 2246 + { 2247 + struct scsi_device *sdev = to_scsi_device(dev); 2248 + 2249 + return sdev->channel == myrb_logical_channel(sdev->host); 2250 + } 2251 + 2252 + /** 2253 + * myrb_get_resync - get raid volume resync percent complete 2254 + * @dev the device struct object 2255 + */ 2256 + static void myrb_get_resync(struct device *dev) 2257 + { 2258 + struct scsi_device *sdev = to_scsi_device(dev); 2259 + struct myrb_hba *cb = shost_priv(sdev->host); 2260 + struct myrb_rbld_progress rbld_buf; 2261 + unsigned int percent_complete = 0; 2262 + unsigned short status; 2263 + unsigned int ldev_size = 0, remaining = 0; 2264 + 2265 + if (sdev->channel < myrb_logical_channel(sdev->host)) 2266 + return; 2267 + status = myrb_get_rbld_progress(cb, &rbld_buf); 2268 + if (status == MYRB_STATUS_SUCCESS) { 2269 + if (rbld_buf.ldev_num == sdev->id) { 2270 + ldev_size = rbld_buf.ldev_size; 2271 + remaining = rbld_buf.blocks_left; 2272 + } 2273 + } 2274 + if (remaining && ldev_size) 2275 + percent_complete = (ldev_size - remaining) * 100 / ldev_size; 2276 + raid_set_resync(myrb_raid_template, dev, percent_complete); 2277 + } 2278 + 2279 + /** 2280 + * myrb_get_state - get raid volume status 2281 + * @dev the device struct object 2282 + */ 2283 + static void myrb_get_state(struct device *dev) 2284 + { 2285 + struct scsi_device *sdev = to_scsi_device(dev); 2286 + struct myrb_hba *cb = shost_priv(sdev->host); 2287 + struct myrb_ldev_info *ldev_info = sdev->hostdata; 2288 + enum raid_state state = RAID_STATE_UNKNOWN; 2289 + unsigned short status; 2290 + 2291 + if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info) 2292 + state = RAID_STATE_UNKNOWN; 2293 + else { 2294 + status = myrb_get_rbld_progress(cb, NULL); 2295 + if (status == MYRB_STATUS_SUCCESS) 2296 + state = RAID_STATE_RESYNCING; 2297 + else { 2298 + switch (ldev_info->state) { 2299 + case MYRB_DEVICE_ONLINE: 2300 + state = RAID_STATE_ACTIVE; 2301 + break; 2302 + case MYRB_DEVICE_WO: 2303 + case MYRB_DEVICE_CRITICAL: 2304 + state = RAID_STATE_DEGRADED; 2305 + break; 2306 + default: 2307 + state = RAID_STATE_OFFLINE; 2308 + } 2309 + } 2310 + } 2311 + raid_set_state(myrb_raid_template, dev, state); 2312 + } 2313 + 2314 + struct raid_function_template myrb_raid_functions = { 2315 + .cookie = &myrb_template, 2316 + .is_raid = myrb_is_raid, 2317 + .get_resync = myrb_get_resync, 2318 + .get_state = myrb_get_state, 2319 + }; 2320 + 2321 + static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk, 2322 + struct scsi_cmnd *scmd) 2323 + { 2324 + unsigned short status; 2325 + 2326 + if (!cmd_blk) 2327 + return; 2328 + 2329 + scsi_dma_unmap(scmd); 2330 + 2331 + if (cmd_blk->dcdb) { 2332 + memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64); 2333 + dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb, 2334 + cmd_blk->dcdb_addr); 2335 + cmd_blk->dcdb = NULL; 2336 + } 2337 + if (cmd_blk->sgl) { 2338 + dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr); 2339 + cmd_blk->sgl = NULL; 2340 + cmd_blk->sgl_addr = 0; 2341 + } 2342 + status = cmd_blk->status; 2343 + switch (status) { 2344 + case MYRB_STATUS_SUCCESS: 2345 + case MYRB_STATUS_DEVICE_BUSY: 2346 + scmd->result = (DID_OK << 16) | status; 2347 + break; 2348 + case MYRB_STATUS_BAD_DATA: 2349 + dev_dbg(&scmd->device->sdev_gendev, 2350 + "Bad Data Encountered\n"); 2351 + if (scmd->sc_data_direction == DMA_FROM_DEVICE) 2352 + /* Unrecovered read error */ 2353 + scsi_build_sense_buffer(0, scmd->sense_buffer, 2354 + MEDIUM_ERROR, 0x11, 0); 2355 + else 2356 + /* Write error */ 2357 + scsi_build_sense_buffer(0, scmd->sense_buffer, 2358 + MEDIUM_ERROR, 0x0C, 0); 2359 + scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; 2360 + break; 2361 + case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR: 2362 + scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n"); 2363 + if (scmd->sc_data_direction == DMA_FROM_DEVICE) 2364 + /* Unrecovered read error, auto-reallocation failed */ 2365 + scsi_build_sense_buffer(0, scmd->sense_buffer, 2366 + MEDIUM_ERROR, 0x11, 0x04); 2367 + else 2368 + /* Write error, auto-reallocation failed */ 2369 + scsi_build_sense_buffer(0, scmd->sense_buffer, 2370 + MEDIUM_ERROR, 0x0C, 0x02); 2371 + scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; 2372 + break; 2373 + case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE: 2374 + dev_dbg(&scmd->device->sdev_gendev, 2375 + "Logical Drive Nonexistent or Offline"); 2376 + scmd->result = (DID_BAD_TARGET << 16); 2377 + break; 2378 + case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV: 2379 + dev_dbg(&scmd->device->sdev_gendev, 2380 + "Attempt to Access Beyond End of Logical Drive"); 2381 + /* Logical block address out of range */ 2382 + scsi_build_sense_buffer(0, scmd->sense_buffer, 2383 + NOT_READY, 0x21, 0); 2384 + break; 2385 + case MYRB_STATUS_DEVICE_NONRESPONSIVE: 2386 + dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n"); 2387 + scmd->result = (DID_BAD_TARGET << 16); 2388 + break; 2389 + default: 2390 + scmd_printk(KERN_ERR, scmd, 2391 + "Unexpected Error Status %04X", status); 2392 + scmd->result = (DID_ERROR << 16); 2393 + break; 2394 + } 2395 + scmd->scsi_done(scmd); 2396 + } 2397 + 2398 + static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) 2399 + { 2400 + if (!cmd_blk) 2401 + return; 2402 + 2403 + if (cmd_blk->completion) { 2404 + complete(cmd_blk->completion); 2405 + cmd_blk->completion = NULL; 2406 + } 2407 + } 2408 + 2409 + static void myrb_monitor(struct work_struct *work) 2410 + { 2411 + struct myrb_hba *cb = container_of(work, 2412 + struct myrb_hba, monitor_work.work); 2413 + struct Scsi_Host *shost = cb->host; 2414 + unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL; 2415 + 2416 + dev_dbg(&shost->shost_gendev, "monitor tick\n"); 2417 + 2418 + if (cb->new_ev_seq > cb->old_ev_seq) { 2419 + int event = cb->old_ev_seq; 2420 + 2421 + dev_dbg(&shost->shost_gendev, 2422 + "get event log no %d/%d\n", 2423 + cb->new_ev_seq, event); 2424 + myrb_get_event(cb, event); 2425 + cb->old_ev_seq = event + 1; 2426 + interval = 10; 2427 + } else if (cb->need_err_info) { 2428 + cb->need_err_info = false; 2429 + dev_dbg(&shost->shost_gendev, "get error table\n"); 2430 + myrb_get_errtable(cb); 2431 + interval = 10; 2432 + } else if (cb->need_rbld && cb->rbld_first) { 2433 + cb->need_rbld = false; 2434 + dev_dbg(&shost->shost_gendev, 2435 + "get rebuild progress\n"); 2436 + myrb_update_rbld_progress(cb); 2437 + interval = 10; 2438 + } else if (cb->need_ldev_info) { 2439 + cb->need_ldev_info = false; 2440 + dev_dbg(&shost->shost_gendev, 2441 + "get logical drive info\n"); 2442 + myrb_get_ldev_info(cb); 2443 + interval = 10; 2444 + } else if (cb->need_rbld) { 2445 + cb->need_rbld = false; 2446 + dev_dbg(&shost->shost_gendev, 2447 + "get rebuild progress\n"); 2448 + myrb_update_rbld_progress(cb); 2449 + interval = 10; 2450 + } else if (cb->need_cc_status) { 2451 + cb->need_cc_status = false; 2452 + dev_dbg(&shost->shost_gendev, 2453 + "get consistency check progress\n"); 2454 + myrb_get_cc_progress(cb); 2455 + interval = 10; 2456 + } else if (cb->need_bgi_status) { 2457 + cb->need_bgi_status = false; 2458 + dev_dbg(&shost->shost_gendev, "get background init status\n"); 2459 + myrb_bgi_control(cb); 2460 + interval = 10; 2461 + } else { 2462 + dev_dbg(&shost->shost_gendev, "new enquiry\n"); 2463 + mutex_lock(&cb->dma_mutex); 2464 + myrb_hba_enquiry(cb); 2465 + mutex_unlock(&cb->dma_mutex); 2466 + if ((cb->new_ev_seq - cb->old_ev_seq > 0) || 2467 + cb->need_err_info || cb->need_rbld || 2468 + cb->need_ldev_info || cb->need_cc_status || 2469 + cb->need_bgi_status) { 2470 + dev_dbg(&shost->shost_gendev, 2471 + "reschedule monitor\n"); 2472 + interval = 0; 2473 + } 2474 + } 2475 + if (interval > 1) 2476 + cb->primary_monitor_time = jiffies; 2477 + queue_delayed_work(cb->work_q, &cb->monitor_work, interval); 2478 + } 2479 + 2480 + /** 2481 + * myrb_err_status - reports controller BIOS messages 2482 + * 2483 + * Controller BIOS messages are passed through the Error Status Register 2484 + * when the driver performs the BIOS handshaking. 2485 + * 2486 + * Return: true for fatal errors and false otherwise. 2487 + */ 2488 + bool myrb_err_status(struct myrb_hba *cb, unsigned char error, 2489 + unsigned char parm0, unsigned char parm1) 2490 + { 2491 + struct pci_dev *pdev = cb->pdev; 2492 + 2493 + switch (error) { 2494 + case 0x00: 2495 + dev_info(&pdev->dev, 2496 + "Physical Device %d:%d Not Responding\n", 2497 + parm1, parm0); 2498 + break; 2499 + case 0x08: 2500 + dev_notice(&pdev->dev, "Spinning Up Drives\n"); 2501 + break; 2502 + case 0x30: 2503 + dev_notice(&pdev->dev, "Configuration Checksum Error\n"); 2504 + break; 2505 + case 0x60: 2506 + dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n"); 2507 + break; 2508 + case 0x70: 2509 + dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n"); 2510 + break; 2511 + case 0x90: 2512 + dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n", 2513 + parm1, parm0); 2514 + break; 2515 + case 0xA0: 2516 + dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n"); 2517 + break; 2518 + case 0xB0: 2519 + dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n"); 2520 + break; 2521 + case 0xD0: 2522 + dev_notice(&pdev->dev, "New Controller Configuration Found\n"); 2523 + break; 2524 + case 0xF0: 2525 + dev_err(&pdev->dev, "Fatal Memory Parity Error\n"); 2526 + return true; 2527 + default: 2528 + dev_err(&pdev->dev, "Unknown Initialization Error %02X\n", 2529 + error); 2530 + return true; 2531 + } 2532 + return false; 2533 + } 2534 + 2535 + /* 2536 + * Hardware-specific functions 2537 + */ 2538 + 2539 + /* 2540 + * DAC960 LA Series Controllers 2541 + */ 2542 + 2543 + static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base) 2544 + { 2545 + writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); 2546 + } 2547 + 2548 + static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base) 2549 + { 2550 + writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET); 2551 + } 2552 + 2553 + static inline void DAC960_LA_gen_intr(void __iomem *base) 2554 + { 2555 + writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET); 2556 + } 2557 + 2558 + static inline void DAC960_LA_reset_ctrl(void __iomem *base) 2559 + { 2560 + writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET); 2561 + } 2562 + 2563 + static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base) 2564 + { 2565 + writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); 2566 + } 2567 + 2568 + static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base) 2569 + { 2570 + unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); 2571 + 2572 + return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY); 2573 + } 2574 + 2575 + static inline bool DAC960_LA_init_in_progress(void __iomem *base) 2576 + { 2577 + unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); 2578 + 2579 + return !(idb & DAC960_LA_IDB_INIT_DONE); 2580 + } 2581 + 2582 + static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base) 2583 + { 2584 + writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET); 2585 + } 2586 + 2587 + static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base) 2588 + { 2589 + writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET); 2590 + } 2591 + 2592 + static inline void DAC960_LA_ack_intr(void __iomem *base) 2593 + { 2594 + writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ, 2595 + base + DAC960_LA_ODB_OFFSET); 2596 + } 2597 + 2598 + static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base) 2599 + { 2600 + unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET); 2601 + 2602 + return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL; 2603 + } 2604 + 2605 + static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base) 2606 + { 2607 + unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET); 2608 + 2609 + return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL; 2610 + } 2611 + 2612 + static inline void DAC960_LA_enable_intr(void __iomem *base) 2613 + { 2614 + unsigned char odb = 0xFF; 2615 + 2616 + odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ; 2617 + writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); 2618 + } 2619 + 2620 + static inline void DAC960_LA_disable_intr(void __iomem *base) 2621 + { 2622 + unsigned char odb = 0xFF; 2623 + 2624 + odb |= DAC960_LA_IRQMASK_DISABLE_IRQ; 2625 + writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); 2626 + } 2627 + 2628 + static inline bool DAC960_LA_intr_enabled(void __iomem *base) 2629 + { 2630 + unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET); 2631 + 2632 + return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ); 2633 + } 2634 + 2635 + static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, 2636 + union myrb_cmd_mbox *mbox) 2637 + { 2638 + mem_mbox->words[1] = mbox->words[1]; 2639 + mem_mbox->words[2] = mbox->words[2]; 2640 + mem_mbox->words[3] = mbox->words[3]; 2641 + /* Memory barrier to prevent reordering */ 2642 + wmb(); 2643 + mem_mbox->words[0] = mbox->words[0]; 2644 + /* Memory barrier to force PCI access */ 2645 + mb(); 2646 + } 2647 + 2648 + static inline void DAC960_LA_write_hw_mbox(void __iomem *base, 2649 + union myrb_cmd_mbox *mbox) 2650 + { 2651 + writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET); 2652 + writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET); 2653 + writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET); 2654 + writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET); 2655 + } 2656 + 2657 + static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base) 2658 + { 2659 + return readb(base + DAC960_LA_STSID_OFFSET); 2660 + } 2661 + 2662 + static inline unsigned short DAC960_LA_read_status(void __iomem *base) 2663 + { 2664 + return readw(base + DAC960_LA_STS_OFFSET); 2665 + } 2666 + 2667 + static inline bool 2668 + DAC960_LA_read_error_status(void __iomem *base, unsigned char *error, 2669 + unsigned char *param0, unsigned char *param1) 2670 + { 2671 + unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET); 2672 + 2673 + if (!(errsts & DAC960_LA_ERRSTS_PENDING)) 2674 + return false; 2675 + errsts &= ~DAC960_LA_ERRSTS_PENDING; 2676 + 2677 + *error = errsts; 2678 + *param0 = readb(base + DAC960_LA_CMDOP_OFFSET); 2679 + *param1 = readb(base + DAC960_LA_CMDID_OFFSET); 2680 + writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET); 2681 + return true; 2682 + } 2683 + 2684 + static inline unsigned short 2685 + DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base, 2686 + union myrb_cmd_mbox *mbox) 2687 + { 2688 + unsigned short status; 2689 + int timeout = 0; 2690 + 2691 + while (timeout < MYRB_MAILBOX_TIMEOUT) { 2692 + if (!DAC960_LA_hw_mbox_is_full(base)) 2693 + break; 2694 + udelay(10); 2695 + timeout++; 2696 + } 2697 + if (DAC960_LA_hw_mbox_is_full(base)) { 2698 + dev_err(&pdev->dev, 2699 + "Timeout waiting for empty mailbox\n"); 2700 + return MYRB_STATUS_SUBSYS_TIMEOUT; 2701 + } 2702 + DAC960_LA_write_hw_mbox(base, mbox); 2703 + DAC960_LA_hw_mbox_new_cmd(base); 2704 + timeout = 0; 2705 + while (timeout < MYRB_MAILBOX_TIMEOUT) { 2706 + if (DAC960_LA_hw_mbox_status_available(base)) 2707 + break; 2708 + udelay(10); 2709 + timeout++; 2710 + } 2711 + if (!DAC960_LA_hw_mbox_status_available(base)) { 2712 + dev_err(&pdev->dev, "Timeout waiting for mailbox status\n"); 2713 + return MYRB_STATUS_SUBSYS_TIMEOUT; 2714 + } 2715 + status = DAC960_LA_read_status(base); 2716 + DAC960_LA_ack_hw_mbox_intr(base); 2717 + DAC960_LA_ack_hw_mbox_status(base); 2718 + 2719 + return status; 2720 + } 2721 + 2722 + static int DAC960_LA_hw_init(struct pci_dev *pdev, 2723 + struct myrb_hba *cb, void __iomem *base) 2724 + { 2725 + int timeout = 0; 2726 + unsigned char error, parm0, parm1; 2727 + 2728 + DAC960_LA_disable_intr(base); 2729 + DAC960_LA_ack_hw_mbox_status(base); 2730 + udelay(1000); 2731 + timeout = 0; 2732 + while (DAC960_LA_init_in_progress(base) && 2733 + timeout < MYRB_MAILBOX_TIMEOUT) { 2734 + if (DAC960_LA_read_error_status(base, &error, 2735 + &parm0, &parm1) && 2736 + myrb_err_status(cb, error, parm0, parm1)) 2737 + return -ENODEV; 2738 + udelay(10); 2739 + timeout++; 2740 + } 2741 + if (timeout == MYRB_MAILBOX_TIMEOUT) { 2742 + dev_err(&pdev->dev, 2743 + "Timeout waiting for Controller Initialisation\n"); 2744 + return -ETIMEDOUT; 2745 + } 2746 + if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) { 2747 + dev_err(&pdev->dev, 2748 + "Unable to Enable Memory Mailbox Interface\n"); 2749 + DAC960_LA_reset_ctrl(base); 2750 + return -ENODEV; 2751 + } 2752 + DAC960_LA_enable_intr(base); 2753 + cb->qcmd = myrb_qcmd; 2754 + cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox; 2755 + if (cb->dual_mode_interface) 2756 + cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd; 2757 + else 2758 + cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd; 2759 + cb->disable_intr = DAC960_LA_disable_intr; 2760 + cb->reset = DAC960_LA_reset_ctrl; 2761 + 2762 + return 0; 2763 + } 2764 + 2765 + static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg) 2766 + { 2767 + struct myrb_hba *cb = arg; 2768 + void __iomem *base = cb->io_base; 2769 + struct myrb_stat_mbox *next_stat_mbox; 2770 + unsigned long flags; 2771 + 2772 + spin_lock_irqsave(&cb->queue_lock, flags); 2773 + DAC960_LA_ack_intr(base); 2774 + next_stat_mbox = cb->next_stat_mbox; 2775 + while (next_stat_mbox->valid) { 2776 + unsigned char id = next_stat_mbox->id; 2777 + struct scsi_cmnd *scmd = NULL; 2778 + struct myrb_cmdblk *cmd_blk = NULL; 2779 + 2780 + if (id == MYRB_DCMD_TAG) 2781 + cmd_blk = &cb->dcmd_blk; 2782 + else if (id == MYRB_MCMD_TAG) 2783 + cmd_blk = &cb->mcmd_blk; 2784 + else { 2785 + scmd = scsi_host_find_tag(cb->host, id - 3); 2786 + if (scmd) 2787 + cmd_blk = scsi_cmd_priv(scmd); 2788 + } 2789 + if (cmd_blk) 2790 + cmd_blk->status = next_stat_mbox->status; 2791 + else 2792 + dev_err(&cb->pdev->dev, 2793 + "Unhandled command completion %d\n", id); 2794 + 2795 + memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); 2796 + if (++next_stat_mbox > cb->last_stat_mbox) 2797 + next_stat_mbox = cb->first_stat_mbox; 2798 + 2799 + if (cmd_blk) { 2800 + if (id < 3) 2801 + myrb_handle_cmdblk(cb, cmd_blk); 2802 + else 2803 + myrb_handle_scsi(cb, cmd_blk, scmd); 2804 + } 2805 + } 2806 + cb->next_stat_mbox = next_stat_mbox; 2807 + spin_unlock_irqrestore(&cb->queue_lock, flags); 2808 + return IRQ_HANDLED; 2809 + } 2810 + 2811 + struct myrb_privdata DAC960_LA_privdata = { 2812 + .hw_init = DAC960_LA_hw_init, 2813 + .irq_handler = DAC960_LA_intr_handler, 2814 + .mmio_size = DAC960_LA_mmio_size, 2815 + }; 2816 + 2817 + /* 2818 + * DAC960 PG Series Controllers 2819 + */ 2820 + static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base) 2821 + { 2822 + writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); 2823 + } 2824 + 2825 + static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base) 2826 + { 2827 + writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET); 2828 + } 2829 + 2830 + static inline void DAC960_PG_gen_intr(void __iomem *base) 2831 + { 2832 + writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET); 2833 + } 2834 + 2835 + static inline void DAC960_PG_reset_ctrl(void __iomem *base) 2836 + { 2837 + writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET); 2838 + } 2839 + 2840 + static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base) 2841 + { 2842 + writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); 2843 + } 2844 + 2845 + static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base) 2846 + { 2847 + unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); 2848 + 2849 + return idb & DAC960_PG_IDB_HWMBOX_FULL; 2850 + } 2851 + 2852 + static inline bool DAC960_PG_init_in_progress(void __iomem *base) 2853 + { 2854 + unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); 2855 + 2856 + return idb & DAC960_PG_IDB_INIT_IN_PROGRESS; 2857 + } 2858 + 2859 + static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base) 2860 + { 2861 + writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET); 2862 + } 2863 + 2864 + static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base) 2865 + { 2866 + writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET); 2867 + } 2868 + 2869 + static inline void DAC960_PG_ack_intr(void __iomem *base) 2870 + { 2871 + writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ, 2872 + base + DAC960_PG_ODB_OFFSET); 2873 + } 2874 + 2875 + static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base) 2876 + { 2877 + unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET); 2878 + 2879 + return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL; 2880 + } 2881 + 2882 + static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base) 2883 + { 2884 + unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET); 2885 + 2886 + return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL; 2887 + } 2888 + 2889 + static inline void DAC960_PG_enable_intr(void __iomem *base) 2890 + { 2891 + unsigned int imask = (unsigned int)-1; 2892 + 2893 + imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ; 2894 + writel(imask, base + DAC960_PG_IRQMASK_OFFSET); 2895 + } 2896 + 2897 + static inline void DAC960_PG_disable_intr(void __iomem *base) 2898 + { 2899 + unsigned int imask = (unsigned int)-1; 2900 + 2901 + writel(imask, base + DAC960_PG_IRQMASK_OFFSET); 2902 + } 2903 + 2904 + static inline bool DAC960_PG_intr_enabled(void __iomem *base) 2905 + { 2906 + unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET); 2907 + 2908 + return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ); 2909 + } 2910 + 2911 + static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, 2912 + union myrb_cmd_mbox *mbox) 2913 + { 2914 + mem_mbox->words[1] = mbox->words[1]; 2915 + mem_mbox->words[2] = mbox->words[2]; 2916 + mem_mbox->words[3] = mbox->words[3]; 2917 + /* Memory barrier to prevent reordering */ 2918 + wmb(); 2919 + mem_mbox->words[0] = mbox->words[0]; 2920 + /* Memory barrier to force PCI access */ 2921 + mb(); 2922 + } 2923 + 2924 + static inline void DAC960_PG_write_hw_mbox(void __iomem *base, 2925 + union myrb_cmd_mbox *mbox) 2926 + { 2927 + writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET); 2928 + writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET); 2929 + writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET); 2930 + writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET); 2931 + } 2932 + 2933 + static inline unsigned char 2934 + DAC960_PG_read_status_cmd_ident(void __iomem *base) 2935 + { 2936 + return readb(base + DAC960_PG_STSID_OFFSET); 2937 + } 2938 + 2939 + static inline unsigned short 2940 + DAC960_PG_read_status(void __iomem *base) 2941 + { 2942 + return readw(base + DAC960_PG_STS_OFFSET); 2943 + } 2944 + 2945 + static inline bool 2946 + DAC960_PG_read_error_status(void __iomem *base, unsigned char *error, 2947 + unsigned char *param0, unsigned char *param1) 2948 + { 2949 + unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET); 2950 + 2951 + if (!(errsts & DAC960_PG_ERRSTS_PENDING)) 2952 + return false; 2953 + errsts &= ~DAC960_PG_ERRSTS_PENDING; 2954 + *error = errsts; 2955 + *param0 = readb(base + DAC960_PG_CMDOP_OFFSET); 2956 + *param1 = readb(base + DAC960_PG_CMDID_OFFSET); 2957 + writeb(0, base + DAC960_PG_ERRSTS_OFFSET); 2958 + return true; 2959 + } 2960 + 2961 + static inline unsigned short 2962 + DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base, 2963 + union myrb_cmd_mbox *mbox) 2964 + { 2965 + unsigned short status; 2966 + int timeout = 0; 2967 + 2968 + while (timeout < MYRB_MAILBOX_TIMEOUT) { 2969 + if (!DAC960_PG_hw_mbox_is_full(base)) 2970 + break; 2971 + udelay(10); 2972 + timeout++; 2973 + } 2974 + if (DAC960_PG_hw_mbox_is_full(base)) { 2975 + dev_err(&pdev->dev, 2976 + "Timeout waiting for empty mailbox\n"); 2977 + return MYRB_STATUS_SUBSYS_TIMEOUT; 2978 + } 2979 + DAC960_PG_write_hw_mbox(base, mbox); 2980 + DAC960_PG_hw_mbox_new_cmd(base); 2981 + 2982 + timeout = 0; 2983 + while (timeout < MYRB_MAILBOX_TIMEOUT) { 2984 + if (DAC960_PG_hw_mbox_status_available(base)) 2985 + break; 2986 + udelay(10); 2987 + timeout++; 2988 + } 2989 + if (!DAC960_PG_hw_mbox_status_available(base)) { 2990 + dev_err(&pdev->dev, 2991 + "Timeout waiting for mailbox status\n"); 2992 + return MYRB_STATUS_SUBSYS_TIMEOUT; 2993 + } 2994 + status = DAC960_PG_read_status(base); 2995 + DAC960_PG_ack_hw_mbox_intr(base); 2996 + DAC960_PG_ack_hw_mbox_status(base); 2997 + 2998 + return status; 2999 + } 3000 + 3001 + static int DAC960_PG_hw_init(struct pci_dev *pdev, 3002 + struct myrb_hba *cb, void __iomem *base) 3003 + { 3004 + int timeout = 0; 3005 + unsigned char error, parm0, parm1; 3006 + 3007 + DAC960_PG_disable_intr(base); 3008 + DAC960_PG_ack_hw_mbox_status(base); 3009 + udelay(1000); 3010 + while (DAC960_PG_init_in_progress(base) && 3011 + timeout < MYRB_MAILBOX_TIMEOUT) { 3012 + if (DAC960_PG_read_error_status(base, &error, 3013 + &parm0, &parm1) && 3014 + myrb_err_status(cb, error, parm0, parm1)) 3015 + return -EIO; 3016 + udelay(10); 3017 + timeout++; 3018 + } 3019 + if (timeout == MYRB_MAILBOX_TIMEOUT) { 3020 + dev_err(&pdev->dev, 3021 + "Timeout waiting for Controller Initialisation\n"); 3022 + return -ETIMEDOUT; 3023 + } 3024 + if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) { 3025 + dev_err(&pdev->dev, 3026 + "Unable to Enable Memory Mailbox Interface\n"); 3027 + DAC960_PG_reset_ctrl(base); 3028 + return -ENODEV; 3029 + } 3030 + DAC960_PG_enable_intr(base); 3031 + cb->qcmd = myrb_qcmd; 3032 + cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox; 3033 + if (cb->dual_mode_interface) 3034 + cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd; 3035 + else 3036 + cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd; 3037 + cb->disable_intr = DAC960_PG_disable_intr; 3038 + cb->reset = DAC960_PG_reset_ctrl; 3039 + 3040 + return 0; 3041 + } 3042 + 3043 + static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg) 3044 + { 3045 + struct myrb_hba *cb = arg; 3046 + void __iomem *base = cb->io_base; 3047 + struct myrb_stat_mbox *next_stat_mbox; 3048 + unsigned long flags; 3049 + 3050 + spin_lock_irqsave(&cb->queue_lock, flags); 3051 + DAC960_PG_ack_intr(base); 3052 + next_stat_mbox = cb->next_stat_mbox; 3053 + while (next_stat_mbox->valid) { 3054 + unsigned char id = next_stat_mbox->id; 3055 + struct scsi_cmnd *scmd = NULL; 3056 + struct myrb_cmdblk *cmd_blk = NULL; 3057 + 3058 + if (id == MYRB_DCMD_TAG) 3059 + cmd_blk = &cb->dcmd_blk; 3060 + else if (id == MYRB_MCMD_TAG) 3061 + cmd_blk = &cb->mcmd_blk; 3062 + else { 3063 + scmd = scsi_host_find_tag(cb->host, id - 3); 3064 + if (scmd) 3065 + cmd_blk = scsi_cmd_priv(scmd); 3066 + } 3067 + if (cmd_blk) 3068 + cmd_blk->status = next_stat_mbox->status; 3069 + else 3070 + dev_err(&cb->pdev->dev, 3071 + "Unhandled command completion %d\n", id); 3072 + 3073 + memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); 3074 + if (++next_stat_mbox > cb->last_stat_mbox) 3075 + next_stat_mbox = cb->first_stat_mbox; 3076 + 3077 + if (id < 3) 3078 + myrb_handle_cmdblk(cb, cmd_blk); 3079 + else 3080 + myrb_handle_scsi(cb, cmd_blk, scmd); 3081 + } 3082 + cb->next_stat_mbox = next_stat_mbox; 3083 + spin_unlock_irqrestore(&cb->queue_lock, flags); 3084 + return IRQ_HANDLED; 3085 + } 3086 + 3087 + struct myrb_privdata DAC960_PG_privdata = { 3088 + .hw_init = DAC960_PG_hw_init, 3089 + .irq_handler = DAC960_PG_intr_handler, 3090 + .mmio_size = DAC960_PG_mmio_size, 3091 + }; 3092 + 3093 + 3094 + /* 3095 + * DAC960 PD Series Controllers 3096 + */ 3097 + 3098 + static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base) 3099 + { 3100 + writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET); 3101 + } 3102 + 3103 + static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base) 3104 + { 3105 + writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET); 3106 + } 3107 + 3108 + static inline void DAC960_PD_gen_intr(void __iomem *base) 3109 + { 3110 + writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET); 3111 + } 3112 + 3113 + static inline void DAC960_PD_reset_ctrl(void __iomem *base) 3114 + { 3115 + writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET); 3116 + } 3117 + 3118 + static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base) 3119 + { 3120 + unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); 3121 + 3122 + return idb & DAC960_PD_IDB_HWMBOX_FULL; 3123 + } 3124 + 3125 + static inline bool DAC960_PD_init_in_progress(void __iomem *base) 3126 + { 3127 + unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); 3128 + 3129 + return idb & DAC960_PD_IDB_INIT_IN_PROGRESS; 3130 + } 3131 + 3132 + static inline void DAC960_PD_ack_intr(void __iomem *base) 3133 + { 3134 + writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET); 3135 + } 3136 + 3137 + static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base) 3138 + { 3139 + unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET); 3140 + 3141 + return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL; 3142 + } 3143 + 3144 + static inline void DAC960_PD_enable_intr(void __iomem *base) 3145 + { 3146 + writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET); 3147 + } 3148 + 3149 + static inline void DAC960_PD_disable_intr(void __iomem *base) 3150 + { 3151 + writeb(0, base + DAC960_PD_IRQEN_OFFSET); 3152 + } 3153 + 3154 + static inline bool DAC960_PD_intr_enabled(void __iomem *base) 3155 + { 3156 + unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET); 3157 + 3158 + return imask & DAC960_PD_IRQMASK_ENABLE_IRQ; 3159 + } 3160 + 3161 + static inline void DAC960_PD_write_cmd_mbox(void __iomem *base, 3162 + union myrb_cmd_mbox *mbox) 3163 + { 3164 + writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET); 3165 + writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET); 3166 + writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET); 3167 + writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET); 3168 + } 3169 + 3170 + static inline unsigned char 3171 + DAC960_PD_read_status_cmd_ident(void __iomem *base) 3172 + { 3173 + return readb(base + DAC960_PD_STSID_OFFSET); 3174 + } 3175 + 3176 + static inline unsigned short 3177 + DAC960_PD_read_status(void __iomem *base) 3178 + { 3179 + return readw(base + DAC960_PD_STS_OFFSET); 3180 + } 3181 + 3182 + static inline bool 3183 + DAC960_PD_read_error_status(void __iomem *base, unsigned char *error, 3184 + unsigned char *param0, unsigned char *param1) 3185 + { 3186 + unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET); 3187 + 3188 + if (!(errsts & DAC960_PD_ERRSTS_PENDING)) 3189 + return false; 3190 + errsts &= ~DAC960_PD_ERRSTS_PENDING; 3191 + *error = errsts; 3192 + *param0 = readb(base + DAC960_PD_CMDOP_OFFSET); 3193 + *param1 = readb(base + DAC960_PD_CMDID_OFFSET); 3194 + writeb(0, base + DAC960_PD_ERRSTS_OFFSET); 3195 + return true; 3196 + } 3197 + 3198 + static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) 3199 + { 3200 + void __iomem *base = cb->io_base; 3201 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 3202 + 3203 + while (DAC960_PD_hw_mbox_is_full(base)) 3204 + udelay(1); 3205 + DAC960_PD_write_cmd_mbox(base, mbox); 3206 + DAC960_PD_hw_mbox_new_cmd(base); 3207 + } 3208 + 3209 + static int DAC960_PD_hw_init(struct pci_dev *pdev, 3210 + struct myrb_hba *cb, void __iomem *base) 3211 + { 3212 + int timeout = 0; 3213 + unsigned char error, parm0, parm1; 3214 + 3215 + if (!request_region(cb->io_addr, 0x80, "myrb")) { 3216 + dev_err(&pdev->dev, "IO port 0x%lx busy\n", 3217 + (unsigned long)cb->io_addr); 3218 + return -EBUSY; 3219 + } 3220 + DAC960_PD_disable_intr(base); 3221 + DAC960_PD_ack_hw_mbox_status(base); 3222 + udelay(1000); 3223 + while (DAC960_PD_init_in_progress(base) && 3224 + timeout < MYRB_MAILBOX_TIMEOUT) { 3225 + if (DAC960_PD_read_error_status(base, &error, 3226 + &parm0, &parm1) && 3227 + myrb_err_status(cb, error, parm0, parm1)) 3228 + return -EIO; 3229 + udelay(10); 3230 + timeout++; 3231 + } 3232 + if (timeout == MYRB_MAILBOX_TIMEOUT) { 3233 + dev_err(&pdev->dev, 3234 + "Timeout waiting for Controller Initialisation\n"); 3235 + return -ETIMEDOUT; 3236 + } 3237 + if (!myrb_enable_mmio(cb, NULL)) { 3238 + dev_err(&pdev->dev, 3239 + "Unable to Enable Memory Mailbox Interface\n"); 3240 + DAC960_PD_reset_ctrl(base); 3241 + return -ENODEV; 3242 + } 3243 + DAC960_PD_enable_intr(base); 3244 + cb->qcmd = DAC960_PD_qcmd; 3245 + cb->disable_intr = DAC960_PD_disable_intr; 3246 + cb->reset = DAC960_PD_reset_ctrl; 3247 + 3248 + return 0; 3249 + } 3250 + 3251 + static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg) 3252 + { 3253 + struct myrb_hba *cb = arg; 3254 + void __iomem *base = cb->io_base; 3255 + unsigned long flags; 3256 + 3257 + spin_lock_irqsave(&cb->queue_lock, flags); 3258 + while (DAC960_PD_hw_mbox_status_available(base)) { 3259 + unsigned char id = DAC960_PD_read_status_cmd_ident(base); 3260 + struct scsi_cmnd *scmd = NULL; 3261 + struct myrb_cmdblk *cmd_blk = NULL; 3262 + 3263 + if (id == MYRB_DCMD_TAG) 3264 + cmd_blk = &cb->dcmd_blk; 3265 + else if (id == MYRB_MCMD_TAG) 3266 + cmd_blk = &cb->mcmd_blk; 3267 + else { 3268 + scmd = scsi_host_find_tag(cb->host, id - 3); 3269 + if (scmd) 3270 + cmd_blk = scsi_cmd_priv(scmd); 3271 + } 3272 + if (cmd_blk) 3273 + cmd_blk->status = DAC960_PD_read_status(base); 3274 + else 3275 + dev_err(&cb->pdev->dev, 3276 + "Unhandled command completion %d\n", id); 3277 + 3278 + DAC960_PD_ack_intr(base); 3279 + DAC960_PD_ack_hw_mbox_status(base); 3280 + 3281 + if (id < 3) 3282 + myrb_handle_cmdblk(cb, cmd_blk); 3283 + else 3284 + myrb_handle_scsi(cb, cmd_blk, scmd); 3285 + } 3286 + spin_unlock_irqrestore(&cb->queue_lock, flags); 3287 + return IRQ_HANDLED; 3288 + } 3289 + 3290 + struct myrb_privdata DAC960_PD_privdata = { 3291 + .hw_init = DAC960_PD_hw_init, 3292 + .irq_handler = DAC960_PD_intr_handler, 3293 + .mmio_size = DAC960_PD_mmio_size, 3294 + }; 3295 + 3296 + 3297 + /* 3298 + * DAC960 P Series Controllers 3299 + * 3300 + * Similar to the DAC960 PD Series Controllers, but some commands have 3301 + * to be translated. 3302 + */ 3303 + 3304 + static inline void myrb_translate_enquiry(void *enq) 3305 + { 3306 + memcpy(enq + 132, enq + 36, 64); 3307 + memset(enq + 36, 0, 96); 3308 + } 3309 + 3310 + static inline void myrb_translate_devstate(void *state) 3311 + { 3312 + memcpy(state + 2, state + 3, 1); 3313 + memmove(state + 4, state + 5, 2); 3314 + memmove(state + 6, state + 8, 4); 3315 + } 3316 + 3317 + static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk) 3318 + { 3319 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 3320 + int ldev_num = mbox->type5.ld.ldev_num; 3321 + 3322 + mbox->bytes[3] &= 0x7; 3323 + mbox->bytes[3] |= mbox->bytes[7] << 6; 3324 + mbox->bytes[7] = ldev_num; 3325 + } 3326 + 3327 + static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk) 3328 + { 3329 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 3330 + int ldev_num = mbox->bytes[7]; 3331 + 3332 + mbox->bytes[7] = mbox->bytes[3] >> 6; 3333 + mbox->bytes[3] &= 0x7; 3334 + mbox->bytes[3] |= ldev_num << 3; 3335 + } 3336 + 3337 + static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) 3338 + { 3339 + void __iomem *base = cb->io_base; 3340 + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; 3341 + 3342 + switch (mbox->common.opcode) { 3343 + case MYRB_CMD_ENQUIRY: 3344 + mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD; 3345 + break; 3346 + case MYRB_CMD_GET_DEVICE_STATE: 3347 + mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD; 3348 + break; 3349 + case MYRB_CMD_READ: 3350 + mbox->common.opcode = MYRB_CMD_READ_OLD; 3351 + myrb_translate_to_rw_command(cmd_blk); 3352 + break; 3353 + case MYRB_CMD_WRITE: 3354 + mbox->common.opcode = MYRB_CMD_WRITE_OLD; 3355 + myrb_translate_to_rw_command(cmd_blk); 3356 + break; 3357 + case MYRB_CMD_READ_SG: 3358 + mbox->common.opcode = MYRB_CMD_READ_SG_OLD; 3359 + myrb_translate_to_rw_command(cmd_blk); 3360 + break; 3361 + case MYRB_CMD_WRITE_SG: 3362 + mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD; 3363 + myrb_translate_to_rw_command(cmd_blk); 3364 + break; 3365 + default: 3366 + break; 3367 + } 3368 + while (DAC960_PD_hw_mbox_is_full(base)) 3369 + udelay(1); 3370 + DAC960_PD_write_cmd_mbox(base, mbox); 3371 + DAC960_PD_hw_mbox_new_cmd(base); 3372 + } 3373 + 3374 + 3375 + static int DAC960_P_hw_init(struct pci_dev *pdev, 3376 + struct myrb_hba *cb, void __iomem *base) 3377 + { 3378 + int timeout = 0; 3379 + unsigned char error, parm0, parm1; 3380 + 3381 + if (!request_region(cb->io_addr, 0x80, "myrb")) { 3382 + dev_err(&pdev->dev, "IO port 0x%lx busy\n", 3383 + (unsigned long)cb->io_addr); 3384 + return -EBUSY; 3385 + } 3386 + DAC960_PD_disable_intr(base); 3387 + DAC960_PD_ack_hw_mbox_status(base); 3388 + udelay(1000); 3389 + while (DAC960_PD_init_in_progress(base) && 3390 + timeout < MYRB_MAILBOX_TIMEOUT) { 3391 + if (DAC960_PD_read_error_status(base, &error, 3392 + &parm0, &parm1) && 3393 + myrb_err_status(cb, error, parm0, parm1)) 3394 + return -EAGAIN; 3395 + udelay(10); 3396 + timeout++; 3397 + } 3398 + if (timeout == MYRB_MAILBOX_TIMEOUT) { 3399 + dev_err(&pdev->dev, 3400 + "Timeout waiting for Controller Initialisation\n"); 3401 + return -ETIMEDOUT; 3402 + } 3403 + if (!myrb_enable_mmio(cb, NULL)) { 3404 + dev_err(&pdev->dev, 3405 + "Unable to allocate DMA mapped memory\n"); 3406 + DAC960_PD_reset_ctrl(base); 3407 + return -ETIMEDOUT; 3408 + } 3409 + DAC960_PD_enable_intr(base); 3410 + cb->qcmd = DAC960_P_qcmd; 3411 + cb->disable_intr = DAC960_PD_disable_intr; 3412 + cb->reset = DAC960_PD_reset_ctrl; 3413 + 3414 + return 0; 3415 + } 3416 + 3417 + static irqreturn_t DAC960_P_intr_handler(int irq, void *arg) 3418 + { 3419 + struct myrb_hba *cb = arg; 3420 + void __iomem *base = cb->io_base; 3421 + unsigned long flags; 3422 + 3423 + spin_lock_irqsave(&cb->queue_lock, flags); 3424 + while (DAC960_PD_hw_mbox_status_available(base)) { 3425 + unsigned char id = DAC960_PD_read_status_cmd_ident(base); 3426 + struct scsi_cmnd *scmd = NULL; 3427 + struct myrb_cmdblk *cmd_blk = NULL; 3428 + union myrb_cmd_mbox *mbox; 3429 + enum myrb_cmd_opcode op; 3430 + 3431 + 3432 + if (id == MYRB_DCMD_TAG) 3433 + cmd_blk = &cb->dcmd_blk; 3434 + else if (id == MYRB_MCMD_TAG) 3435 + cmd_blk = &cb->mcmd_blk; 3436 + else { 3437 + scmd = scsi_host_find_tag(cb->host, id - 3); 3438 + if (scmd) 3439 + cmd_blk = scsi_cmd_priv(scmd); 3440 + } 3441 + if (cmd_blk) 3442 + cmd_blk->status = DAC960_PD_read_status(base); 3443 + else 3444 + dev_err(&cb->pdev->dev, 3445 + "Unhandled command completion %d\n", id); 3446 + 3447 + DAC960_PD_ack_intr(base); 3448 + DAC960_PD_ack_hw_mbox_status(base); 3449 + 3450 + if (!cmd_blk) 3451 + continue; 3452 + 3453 + mbox = &cmd_blk->mbox; 3454 + op = mbox->common.opcode; 3455 + switch (op) { 3456 + case MYRB_CMD_ENQUIRY_OLD: 3457 + mbox->common.opcode = MYRB_CMD_ENQUIRY; 3458 + myrb_translate_enquiry(cb->enquiry); 3459 + break; 3460 + case MYRB_CMD_READ_OLD: 3461 + mbox->common.opcode = MYRB_CMD_READ; 3462 + myrb_translate_from_rw_command(cmd_blk); 3463 + break; 3464 + case MYRB_CMD_WRITE_OLD: 3465 + mbox->common.opcode = MYRB_CMD_WRITE; 3466 + myrb_translate_from_rw_command(cmd_blk); 3467 + break; 3468 + case MYRB_CMD_READ_SG_OLD: 3469 + mbox->common.opcode = MYRB_CMD_READ_SG; 3470 + myrb_translate_from_rw_command(cmd_blk); 3471 + break; 3472 + case MYRB_CMD_WRITE_SG_OLD: 3473 + mbox->common.opcode = MYRB_CMD_WRITE_SG; 3474 + myrb_translate_from_rw_command(cmd_blk); 3475 + break; 3476 + default: 3477 + break; 3478 + } 3479 + if (id < 3) 3480 + myrb_handle_cmdblk(cb, cmd_blk); 3481 + else 3482 + myrb_handle_scsi(cb, cmd_blk, scmd); 3483 + } 3484 + spin_unlock_irqrestore(&cb->queue_lock, flags); 3485 + return IRQ_HANDLED; 3486 + } 3487 + 3488 + struct myrb_privdata DAC960_P_privdata = { 3489 + .hw_init = DAC960_P_hw_init, 3490 + .irq_handler = DAC960_P_intr_handler, 3491 + .mmio_size = DAC960_PD_mmio_size, 3492 + }; 3493 + 3494 + static struct myrb_hba *myrb_detect(struct pci_dev *pdev, 3495 + const struct pci_device_id *entry) 3496 + { 3497 + struct myrb_privdata *privdata = 3498 + (struct myrb_privdata *)entry->driver_data; 3499 + irq_handler_t irq_handler = privdata->irq_handler; 3500 + unsigned int mmio_size = privdata->mmio_size; 3501 + struct Scsi_Host *shost; 3502 + struct myrb_hba *cb = NULL; 3503 + 3504 + shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba)); 3505 + if (!shost) { 3506 + dev_err(&pdev->dev, "Unable to allocate Controller\n"); 3507 + return NULL; 3508 + } 3509 + shost->max_cmd_len = 12; 3510 + shost->max_lun = 256; 3511 + cb = shost_priv(shost); 3512 + mutex_init(&cb->dcmd_mutex); 3513 + mutex_init(&cb->dma_mutex); 3514 + cb->pdev = pdev; 3515 + 3516 + if (pci_enable_device(pdev)) 3517 + goto failure; 3518 + 3519 + if (privdata->hw_init == DAC960_PD_hw_init || 3520 + privdata->hw_init == DAC960_P_hw_init) { 3521 + cb->io_addr = pci_resource_start(pdev, 0); 3522 + cb->pci_addr = pci_resource_start(pdev, 1); 3523 + } else 3524 + cb->pci_addr = pci_resource_start(pdev, 0); 3525 + 3526 + pci_set_drvdata(pdev, cb); 3527 + spin_lock_init(&cb->queue_lock); 3528 + if (mmio_size < PAGE_SIZE) 3529 + mmio_size = PAGE_SIZE; 3530 + cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size); 3531 + if (cb->mmio_base == NULL) { 3532 + dev_err(&pdev->dev, 3533 + "Unable to map Controller Register Window\n"); 3534 + goto failure; 3535 + } 3536 + 3537 + cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK); 3538 + if (privdata->hw_init(pdev, cb, cb->io_base)) 3539 + goto failure; 3540 + 3541 + if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) { 3542 + dev_err(&pdev->dev, 3543 + "Unable to acquire IRQ Channel %d\n", pdev->irq); 3544 + goto failure; 3545 + } 3546 + cb->irq = pdev->irq; 3547 + return cb; 3548 + 3549 + failure: 3550 + dev_err(&pdev->dev, 3551 + "Failed to initialize Controller\n"); 3552 + myrb_cleanup(cb); 3553 + return NULL; 3554 + } 3555 + 3556 + static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry) 3557 + { 3558 + struct myrb_hba *cb; 3559 + int ret; 3560 + 3561 + cb = myrb_detect(dev, entry); 3562 + if (!cb) 3563 + return -ENODEV; 3564 + 3565 + ret = myrb_get_hba_config(cb); 3566 + if (ret < 0) { 3567 + myrb_cleanup(cb); 3568 + return ret; 3569 + } 3570 + 3571 + if (!myrb_create_mempools(dev, cb)) { 3572 + ret = -ENOMEM; 3573 + goto failed; 3574 + } 3575 + 3576 + ret = scsi_add_host(cb->host, &dev->dev); 3577 + if (ret) { 3578 + dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret); 3579 + myrb_destroy_mempools(cb); 3580 + goto failed; 3581 + } 3582 + scsi_scan_host(cb->host); 3583 + return 0; 3584 + failed: 3585 + myrb_cleanup(cb); 3586 + return ret; 3587 + } 3588 + 3589 + 3590 + static void myrb_remove(struct pci_dev *pdev) 3591 + { 3592 + struct myrb_hba *cb = pci_get_drvdata(pdev); 3593 + 3594 + shost_printk(KERN_NOTICE, cb->host, "Flushing Cache..."); 3595 + myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); 3596 + myrb_cleanup(cb); 3597 + myrb_destroy_mempools(cb); 3598 + } 3599 + 3600 + 3601 + static const struct pci_device_id myrb_id_table[] = { 3602 + { 3603 + PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC, 3604 + PCI_DEVICE_ID_DEC_21285, 3605 + PCI_VENDOR_ID_MYLEX, 3606 + PCI_DEVICE_ID_MYLEX_DAC960_LA), 3607 + .driver_data = (unsigned long) &DAC960_LA_privdata, 3608 + }, 3609 + { 3610 + PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata), 3611 + }, 3612 + { 3613 + PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata), 3614 + }, 3615 + { 3616 + PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata), 3617 + }, 3618 + {0, }, 3619 + }; 3620 + 3621 + MODULE_DEVICE_TABLE(pci, myrb_id_table); 3622 + 3623 + static struct pci_driver myrb_pci_driver = { 3624 + .name = "myrb", 3625 + .id_table = myrb_id_table, 3626 + .probe = myrb_probe, 3627 + .remove = myrb_remove, 3628 + }; 3629 + 3630 + static int __init myrb_init_module(void) 3631 + { 3632 + int ret; 3633 + 3634 + myrb_raid_template = raid_class_attach(&myrb_raid_functions); 3635 + if (!myrb_raid_template) 3636 + return -ENODEV; 3637 + 3638 + ret = pci_register_driver(&myrb_pci_driver); 3639 + if (ret) 3640 + raid_class_release(myrb_raid_template); 3641 + 3642 + return ret; 3643 + } 3644 + 3645 + static void __exit myrb_cleanup_module(void) 3646 + { 3647 + pci_unregister_driver(&myrb_pci_driver); 3648 + raid_class_release(myrb_raid_template); 3649 + } 3650 + 3651 + module_init(myrb_init_module); 3652 + module_exit(myrb_cleanup_module); 3653 + 3654 + MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)"); 3655 + MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>"); 3656 + MODULE_LICENSE("GPL");
+958
drivers/scsi/myrb.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers 4 + * 5 + * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com> 6 + * 7 + * Based on the original DAC960 driver, 8 + * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com> 9 + * Portions Copyright 2002 by Mylex (An IBM Business Unit) 10 + * 11 + */ 12 + 13 + #ifndef MYRB_H 14 + #define MYRB_H 15 + 16 + #define MYRB_MAX_LDEVS 32 17 + #define MYRB_MAX_CHANNELS 3 18 + #define MYRB_MAX_TARGETS 16 19 + #define MYRB_MAX_PHYSICAL_DEVICES 45 20 + #define MYRB_SCATTER_GATHER_LIMIT 32 21 + #define MYRB_CMD_MBOX_COUNT 256 22 + #define MYRB_STAT_MBOX_COUNT 1024 23 + 24 + #define MYRB_BLKSIZE_BITS 9 25 + #define MYRB_MAILBOX_TIMEOUT 1000000 26 + 27 + #define MYRB_DCMD_TAG 1 28 + #define MYRB_MCMD_TAG 2 29 + 30 + #define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ) 31 + #define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ) 32 + 33 + /* 34 + * DAC960 V1 Firmware Command Opcodes. 35 + */ 36 + enum myrb_cmd_opcode { 37 + /* I/O Commands */ 38 + MYRB_CMD_READ_EXTENDED = 0x33, 39 + MYRB_CMD_WRITE_EXTENDED = 0x34, 40 + MYRB_CMD_READAHEAD_EXTENDED = 0x35, 41 + MYRB_CMD_READ_EXTENDED_SG = 0xB3, 42 + MYRB_CMD_WRITE_EXTENDED_SG = 0xB4, 43 + MYRB_CMD_READ = 0x36, 44 + MYRB_CMD_READ_SG = 0xB6, 45 + MYRB_CMD_WRITE = 0x37, 46 + MYRB_CMD_WRITE_SG = 0xB7, 47 + MYRB_CMD_DCDB = 0x04, 48 + MYRB_CMD_DCDB_SG = 0x84, 49 + MYRB_CMD_FLUSH = 0x0A, 50 + /* Controller Status Related Commands */ 51 + MYRB_CMD_ENQUIRY = 0x53, 52 + MYRB_CMD_ENQUIRY2 = 0x1C, 53 + MYRB_CMD_GET_LDRV_ELEMENT = 0x55, 54 + MYRB_CMD_GET_LDEV_INFO = 0x19, 55 + MYRB_CMD_IOPORTREAD = 0x39, 56 + MYRB_CMD_IOPORTWRITE = 0x3A, 57 + MYRB_CMD_GET_SD_STATS = 0x3E, 58 + MYRB_CMD_GET_PD_STATS = 0x3F, 59 + MYRB_CMD_EVENT_LOG_OPERATION = 0x72, 60 + /* Device Related Commands */ 61 + MYRB_CMD_START_DEVICE = 0x10, 62 + MYRB_CMD_GET_DEVICE_STATE = 0x50, 63 + MYRB_CMD_STOP_CHANNEL = 0x13, 64 + MYRB_CMD_START_CHANNEL = 0x12, 65 + MYRB_CMD_RESET_CHANNEL = 0x1A, 66 + /* Commands Associated with Data Consistency and Errors */ 67 + MYRB_CMD_REBUILD = 0x09, 68 + MYRB_CMD_REBUILD_ASYNC = 0x16, 69 + MYRB_CMD_CHECK_CONSISTENCY = 0x0F, 70 + MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E, 71 + MYRB_CMD_REBUILD_STAT = 0x0C, 72 + MYRB_CMD_GET_REBUILD_PROGRESS = 0x27, 73 + MYRB_CMD_REBUILD_CONTROL = 0x1F, 74 + MYRB_CMD_READ_BADBLOCK_TABLE = 0x0B, 75 + MYRB_CMD_READ_BADDATA_TABLE = 0x25, 76 + MYRB_CMD_CLEAR_BADDATA_TABLE = 0x26, 77 + MYRB_CMD_GET_ERROR_TABLE = 0x17, 78 + MYRB_CMD_ADD_CAPACITY_ASYNC = 0x2A, 79 + MYRB_CMD_BGI_CONTROL = 0x2B, 80 + /* Configuration Related Commands */ 81 + MYRB_CMD_READ_CONFIG2 = 0x3D, 82 + MYRB_CMD_WRITE_CONFIG2 = 0x3C, 83 + MYRB_CMD_READ_CONFIG_ONDISK = 0x4A, 84 + MYRB_CMD_WRITE_CONFIG_ONDISK = 0x4B, 85 + MYRB_CMD_READ_CONFIG = 0x4E, 86 + MYRB_CMD_READ_BACKUP_CONFIG = 0x4D, 87 + MYRB_CMD_WRITE_CONFIG = 0x4F, 88 + MYRB_CMD_ADD_CONFIG = 0x4C, 89 + MYRB_CMD_READ_CONFIG_LABEL = 0x48, 90 + MYRB_CMD_WRITE_CONFIG_LABEL = 0x49, 91 + /* Firmware Upgrade Related Commands */ 92 + MYRB_CMD_LOAD_IMAGE = 0x20, 93 + MYRB_CMD_STORE_IMAGE = 0x21, 94 + MYRB_CMD_PROGRAM_IMAGE = 0x22, 95 + /* Diagnostic Commands */ 96 + MYRB_CMD_SET_DIAGNOSTIC_MODE = 0x31, 97 + MYRB_CMD_RUN_DIAGNOSTIC = 0x32, 98 + /* Subsystem Service Commands */ 99 + MYRB_CMD_GET_SUBSYS_DATA = 0x70, 100 + MYRB_CMD_SET_SUBSYS_PARAM = 0x71, 101 + /* Version 2.xx Firmware Commands */ 102 + MYRB_CMD_ENQUIRY_OLD = 0x05, 103 + MYRB_CMD_GET_DEVICE_STATE_OLD = 0x14, 104 + MYRB_CMD_READ_OLD = 0x02, 105 + MYRB_CMD_WRITE_OLD = 0x03, 106 + MYRB_CMD_READ_SG_OLD = 0x82, 107 + MYRB_CMD_WRITE_SG_OLD = 0x83 108 + } __packed; 109 + 110 + /* 111 + * DAC960 V1 Firmware Command Status Codes. 112 + */ 113 + #define MYRB_STATUS_SUCCESS 0x0000 /* Common */ 114 + #define MYRB_STATUS_CHECK_CONDITION 0x0002 /* Common */ 115 + #define MYRB_STATUS_NO_DEVICE 0x0102 /* Common */ 116 + #define MYRB_STATUS_INVALID_ADDRESS 0x0105 /* Common */ 117 + #define MYRB_STATUS_INVALID_PARAM 0x0105 /* Common */ 118 + #define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR 0x0001 /* I/O */ 119 + #define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002 /* I/O */ 120 + #define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV 0x0105 /* I/O */ 121 + #define MYRB_STATUS_BAD_DATA 0x010C /* I/O */ 122 + #define MYRB_STATUS_DEVICE_BUSY 0x0008 /* DCDB */ 123 + #define MYRB_STATUS_DEVICE_NONRESPONSIVE 0x000E /* DCDB */ 124 + #define MYRB_STATUS_COMMAND_TERMINATED 0x000F /* DCDB */ 125 + #define MYRB_STATUS_START_DEVICE_FAILED 0x0002 /* Device */ 126 + #define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET 0x0105 /* Device */ 127 + #define MYRB_STATUS_CHANNEL_BUSY 0x0106 /* Device */ 128 + #define MYRB_STATUS_OUT_OF_MEMORY 0x0107 /* Device */ 129 + #define MYRB_STATUS_CHANNEL_NOT_STOPPED 0x0002 /* Device */ 130 + #define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002 /* Consistency */ 131 + #define MYRB_STATUS_RBLD_BADBLOCKS 0x0003 /* Consistency */ 132 + #define MYRB_STATUS_RBLD_NEW_DISK_FAILED 0x0004 /* Consistency */ 133 + #define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS 0x0106 /* Consistency */ 134 + #define MYRB_STATUS_DEPENDENT_DISK_DEAD 0x0002 /* Consistency */ 135 + #define MYRB_STATUS_INCONSISTENT_BLOCKS 0x0003 /* Consistency */ 136 + #define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */ 137 + #define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS 0x0105 /* Consistency */ 138 + #define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID 0x0000 /* Consistency */ 139 + #define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE 0x0002 /* Consistency */ 140 + #define MYRB_STATUS_RBLD_FAILED_BADBLOCKS 0x0003 /* Consistency */ 141 + #define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004 /* Consistency */ 142 + #define MYRB_STATUS_RBLD_SUCCESS 0x0100 /* Consistency */ 143 + #define MYRB_STATUS_RBLD_SUCCESS_TERMINATED 0x0107 /* Consistency */ 144 + #define MYRB_STATUS_RBLD_NOT_CHECKED 0x0108 /* Consistency */ 145 + #define MYRB_STATUS_BGI_SUCCESS 0x0100 /* Consistency */ 146 + #define MYRB_STATUS_BGI_ABORTED 0x0005 /* Consistency */ 147 + #define MYRB_STATUS_NO_BGI_INPROGRESS 0x0105 /* Consistency */ 148 + #define MYRB_STATUS_ADD_CAPACITY_INPROGRESS 0x0004 /* Consistency */ 149 + #define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */ 150 + #define MYRB_STATUS_CONFIG2_CSUM_ERROR 0x0002 /* Configuration */ 151 + #define MYRB_STATUS_CONFIGURATION_SUSPENDED 0x0106 /* Configuration */ 152 + #define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM 0x0105 /* Configuration */ 153 + #define MYRB_STATUS_CONFIGURATION_NOT_SAVED 0x0106 /* Configuration */ 154 + #define MYRB_STATUS_SUBSYS_NOTINSTALLED 0x0001 /* Subsystem */ 155 + #define MYRB_STATUS_SUBSYS_FAILED 0x0002 /* Subsystem */ 156 + #define MYRB_STATUS_SUBSYS_BUSY 0x0106 /* Subsystem */ 157 + #define MYRB_STATUS_SUBSYS_TIMEOUT 0x0108 /* Subsystem */ 158 + 159 + /* 160 + * DAC960 V1 Firmware Enquiry Command reply structure. 161 + */ 162 + struct myrb_enquiry { 163 + unsigned char ldev_count; /* Byte 0 */ 164 + unsigned int rsvd1:24; /* Bytes 1-3 */ 165 + unsigned int ldev_sizes[32]; /* Bytes 4-131 */ 166 + unsigned short flash_age; /* Bytes 132-133 */ 167 + struct { 168 + unsigned char deferred:1; /* Byte 134 Bit 0 */ 169 + unsigned char low_bat:1; /* Byte 134 Bit 1 */ 170 + unsigned char rsvd2:6; /* Byte 134 Bits 2-7 */ 171 + } status; 172 + unsigned char rsvd3:8; /* Byte 135 */ 173 + unsigned char fw_minor_version; /* Byte 136 */ 174 + unsigned char fw_major_version; /* Byte 137 */ 175 + enum { 176 + MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS = 0x00, 177 + MYRB_STDBY_RBLD_IN_PROGRESS = 0x01, 178 + MYRB_BG_RBLD_IN_PROGRESS = 0x02, 179 + MYRB_BG_CHECK_IN_PROGRESS = 0x03, 180 + MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR = 0xFF, 181 + MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED = 0xF0, 182 + MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED = 0xF1, 183 + MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER = 0xF2, 184 + MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED = 0xF3 185 + } __packed rbld; /* Byte 138 */ 186 + unsigned char max_tcq; /* Byte 139 */ 187 + unsigned char ldev_offline; /* Byte 140 */ 188 + unsigned char rsvd4:8; /* Byte 141 */ 189 + unsigned short ev_seq; /* Bytes 142-143 */ 190 + unsigned char ldev_critical; /* Byte 144 */ 191 + unsigned int rsvd5:24; /* Bytes 145-147 */ 192 + unsigned char pdev_dead; /* Byte 148 */ 193 + unsigned char rsvd6:8; /* Byte 149 */ 194 + unsigned char rbld_count; /* Byte 150 */ 195 + struct { 196 + unsigned char rsvd7:3; /* Byte 151 Bits 0-2 */ 197 + unsigned char bbu_present:1; /* Byte 151 Bit 3 */ 198 + unsigned char rsvd8:4; /* Byte 151 Bits 4-7 */ 199 + } misc; 200 + struct { 201 + unsigned char target; 202 + unsigned char channel; 203 + } dead_drives[21]; /* Bytes 152-194 */ 204 + unsigned char rsvd9[62]; /* Bytes 195-255 */ 205 + } __packed; 206 + 207 + /* 208 + * DAC960 V1 Firmware Enquiry2 Command reply structure. 209 + */ 210 + struct myrb_enquiry2 { 211 + struct { 212 + enum { 213 + DAC960_V1_P_PD_PU = 0x01, 214 + DAC960_V1_PL = 0x02, 215 + DAC960_V1_PG = 0x10, 216 + DAC960_V1_PJ = 0x11, 217 + DAC960_V1_PR = 0x12, 218 + DAC960_V1_PT = 0x13, 219 + DAC960_V1_PTL0 = 0x14, 220 + DAC960_V1_PRL = 0x15, 221 + DAC960_V1_PTL1 = 0x16, 222 + DAC960_V1_1164P = 0x20 223 + } __packed sub_model; /* Byte 0 */ 224 + unsigned char actual_channels; /* Byte 1 */ 225 + enum { 226 + MYRB_5_CHANNEL_BOARD = 0x01, 227 + MYRB_3_CHANNEL_BOARD = 0x02, 228 + MYRB_2_CHANNEL_BOARD = 0x03, 229 + MYRB_3_CHANNEL_ASIC_DAC = 0x04 230 + } __packed model; /* Byte 2 */ 231 + enum { 232 + MYRB_EISA_CONTROLLER = 0x01, 233 + MYRB_MCA_CONTROLLER = 0x02, 234 + MYRB_PCI_CONTROLLER = 0x03, 235 + MYRB_SCSI_TO_SCSI = 0x08 236 + } __packed controller; /* Byte 3 */ 237 + } hw; /* Bytes 0-3 */ 238 + /* MajorVersion.MinorVersion-FirmwareType-TurnID */ 239 + struct { 240 + unsigned char major_version; /* Byte 4 */ 241 + unsigned char minor_version; /* Byte 5 */ 242 + unsigned char turn_id; /* Byte 6 */ 243 + char firmware_type; /* Byte 7 */ 244 + } fw; /* Bytes 4-7 */ 245 + unsigned int rsvd1; /* Byte 8-11 */ 246 + unsigned char cfg_chan; /* Byte 12 */ 247 + unsigned char cur_chan; /* Byte 13 */ 248 + unsigned char max_targets; /* Byte 14 */ 249 + unsigned char max_tcq; /* Byte 15 */ 250 + unsigned char max_ldev; /* Byte 16 */ 251 + unsigned char max_arms; /* Byte 17 */ 252 + unsigned char max_spans; /* Byte 18 */ 253 + unsigned char rsvd2; /* Byte 19 */ 254 + unsigned int rsvd3; /* Bytes 20-23 */ 255 + unsigned int mem_size; /* Bytes 24-27 */ 256 + unsigned int cache_size; /* Bytes 28-31 */ 257 + unsigned int flash_size; /* Bytes 32-35 */ 258 + unsigned int nvram_size; /* Bytes 36-39 */ 259 + struct { 260 + enum { 261 + MYRB_RAM_TYPE_DRAM = 0x0, 262 + MYRB_RAM_TYPE_EDO = 0x1, 263 + MYRB_RAM_TYPE_SDRAM = 0x2, 264 + MYRB_RAM_TYPE_Last = 0x7 265 + } __packed ram:3; /* Byte 40 Bits 0-2 */ 266 + enum { 267 + MYRB_ERR_CORR_None = 0x0, 268 + MYRB_ERR_CORR_Parity = 0x1, 269 + MYRB_ERR_CORR_ECC = 0x2, 270 + MYRB_ERR_CORR_Last = 0x7 271 + } __packed ec:3; /* Byte 40 Bits 3-5 */ 272 + unsigned char fast_page:1; /* Byte 40 Bit 6 */ 273 + unsigned char low_power:1; /* Byte 40 Bit 7 */ 274 + unsigned char rsvd4; /* Bytes 41 */ 275 + } mem_type; 276 + unsigned short clock_speed; /* Bytes 42-43 */ 277 + unsigned short mem_speed; /* Bytes 44-45 */ 278 + unsigned short hw_speed; /* Bytes 46-47 */ 279 + unsigned char rsvd5[12]; /* Bytes 48-59 */ 280 + unsigned short max_cmds; /* Bytes 60-61 */ 281 + unsigned short max_sge; /* Bytes 62-63 */ 282 + unsigned short max_drv_cmds; /* Bytes 64-65 */ 283 + unsigned short max_io_desc; /* Bytes 66-67 */ 284 + unsigned short max_sectors; /* Bytes 68-69 */ 285 + unsigned char latency; /* Byte 70 */ 286 + unsigned char rsvd6; /* Byte 71 */ 287 + unsigned char scsi_tmo; /* Byte 72 */ 288 + unsigned char rsvd7; /* Byte 73 */ 289 + unsigned short min_freelines; /* Bytes 74-75 */ 290 + unsigned char rsvd8[8]; /* Bytes 76-83 */ 291 + unsigned char rbld_rate_const; /* Byte 84 */ 292 + unsigned char rsvd9[11]; /* Byte 85-95 */ 293 + unsigned short pdrv_block_size; /* Bytes 96-97 */ 294 + unsigned short ldev_block_size; /* Bytes 98-99 */ 295 + unsigned short max_blocks_per_cmd; /* Bytes 100-101 */ 296 + unsigned short block_factor; /* Bytes 102-103 */ 297 + unsigned short cacheline_size; /* Bytes 104-105 */ 298 + struct { 299 + enum { 300 + MYRB_WIDTH_NARROW_8BIT = 0x0, 301 + MYRB_WIDTH_WIDE_16BIT = 0x1, 302 + MYRB_WIDTH_WIDE_32BIT = 0x2 303 + } __packed bus_width:2; /* Byte 106 Bits 0-1 */ 304 + enum { 305 + MYRB_SCSI_SPEED_FAST = 0x0, 306 + MYRB_SCSI_SPEED_ULTRA = 0x1, 307 + MYRB_SCSI_SPEED_ULTRA2 = 0x2 308 + } __packed bus_speed:2; /* Byte 106 Bits 2-3 */ 309 + unsigned char differential:1; /* Byte 106 Bit 4 */ 310 + unsigned char rsvd10:3; /* Byte 106 Bits 5-7 */ 311 + } scsi_cap; 312 + unsigned char rsvd11[5]; /* Byte 107-111 */ 313 + unsigned short fw_build; /* Bytes 112-113 */ 314 + enum { 315 + MYRB_FAULT_AEMI = 0x01, 316 + MYRB_FAULT_OEM1 = 0x02, 317 + MYRB_FAULT_OEM2 = 0x04, 318 + MYRB_FAULT_OEM3 = 0x08, 319 + MYRB_FAULT_CONNER = 0x10, 320 + MYRB_FAULT_SAFTE = 0x20 321 + } __packed fault_mgmt; /* Byte 114 */ 322 + unsigned char rsvd12; /* Byte 115 */ 323 + struct { 324 + unsigned int clustering:1; /* Byte 116 Bit 0 */ 325 + unsigned int online_RAID_expansion:1; /* Byte 116 Bit 1 */ 326 + unsigned int readahead:1; /* Byte 116 Bit 2 */ 327 + unsigned int bgi:1; /* Byte 116 Bit 3 */ 328 + unsigned int rsvd13:28; /* Bytes 116-119 */ 329 + } fw_features; 330 + unsigned char rsvd14[8]; /* Bytes 120-127 */ 331 + } __packed; 332 + 333 + /* 334 + * DAC960 V1 Firmware Logical Drive State type. 335 + */ 336 + enum myrb_devstate { 337 + MYRB_DEVICE_DEAD = 0x00, 338 + MYRB_DEVICE_WO = 0x02, 339 + MYRB_DEVICE_ONLINE = 0x03, 340 + MYRB_DEVICE_CRITICAL = 0x04, 341 + MYRB_DEVICE_STANDBY = 0x10, 342 + MYRB_DEVICE_OFFLINE = 0xFF 343 + } __packed; 344 + 345 + /* 346 + * DAC960 V1 RAID Levels 347 + */ 348 + enum myrb_raidlevel { 349 + MYRB_RAID_LEVEL0 = 0x0, /* RAID 0 */ 350 + MYRB_RAID_LEVEL1 = 0x1, /* RAID 1 */ 351 + MYRB_RAID_LEVEL3 = 0x3, /* RAID 3 */ 352 + MYRB_RAID_LEVEL5 = 0x5, /* RAID 5 */ 353 + MYRB_RAID_LEVEL6 = 0x6, /* RAID 6 */ 354 + MYRB_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */ 355 + } __packed; 356 + 357 + /* 358 + * DAC960 V1 Firmware Logical Drive Information structure. 359 + */ 360 + struct myrb_ldev_info { 361 + unsigned int size; /* Bytes 0-3 */ 362 + enum myrb_devstate state; /* Byte 4 */ 363 + unsigned int raid_level:7; /* Byte 5 Bits 0-6 */ 364 + unsigned int wb_enabled:1; /* Byte 5 Bit 7 */ 365 + unsigned int rsvd:16; /* Bytes 6-7 */ 366 + }; 367 + 368 + /* 369 + * DAC960 V1 Firmware Perform Event Log Operation Types. 370 + */ 371 + #define DAC960_V1_GetEventLogEntry 0x00 372 + 373 + /* 374 + * DAC960 V1 Firmware Get Event Log Entry Command reply structure. 375 + */ 376 + struct myrb_log_entry { 377 + unsigned char msg_type; /* Byte 0 */ 378 + unsigned char msg_len; /* Byte 1 */ 379 + unsigned char target:5; /* Byte 2 Bits 0-4 */ 380 + unsigned char channel:3; /* Byte 2 Bits 5-7 */ 381 + unsigned char lun:6; /* Byte 3 Bits 0-5 */ 382 + unsigned char rsvd1:2; /* Byte 3 Bits 6-7 */ 383 + unsigned short seq_num; /* Bytes 4-5 */ 384 + unsigned char sense[26]; /* Bytes 6-31 */ 385 + }; 386 + 387 + /* 388 + * DAC960 V1 Firmware Get Device State Command reply structure. 389 + * The structure is padded by 2 bytes for compatibility with Version 2.xx 390 + * Firmware. 391 + */ 392 + struct myrb_pdev_state { 393 + unsigned int present:1; /* Byte 0 Bit 0 */ 394 + unsigned int :7; /* Byte 0 Bits 1-7 */ 395 + enum { 396 + MYRB_TYPE_OTHER = 0x0, 397 + MYRB_TYPE_DISK = 0x1, 398 + MYRB_TYPE_TAPE = 0x2, 399 + MYRB_TYPE_CDROM_OR_WORM = 0x3 400 + } __packed devtype:2; /* Byte 1 Bits 0-1 */ 401 + unsigned int rsvd1:1; /* Byte 1 Bit 2 */ 402 + unsigned int fast20:1; /* Byte 1 Bit 3 */ 403 + unsigned int sync:1; /* Byte 1 Bit 4 */ 404 + unsigned int fast:1; /* Byte 1 Bit 5 */ 405 + unsigned int wide:1; /* Byte 1 Bit 6 */ 406 + unsigned int tcq_supported:1; /* Byte 1 Bit 7 */ 407 + enum myrb_devstate state; /* Byte 2 */ 408 + unsigned int rsvd2:8; /* Byte 3 */ 409 + unsigned int sync_multiplier; /* Byte 4 */ 410 + unsigned int sync_offset:5; /* Byte 5 Bits 0-4 */ 411 + unsigned int rsvd3:3; /* Byte 5 Bits 5-7 */ 412 + unsigned int size; /* Bytes 6-9 */ 413 + unsigned int rsvd4:16; /* Bytes 10-11 */ 414 + } __packed; 415 + 416 + /* 417 + * DAC960 V1 Firmware Get Rebuild Progress Command reply structure. 418 + */ 419 + struct myrb_rbld_progress { 420 + unsigned int ldev_num; /* Bytes 0-3 */ 421 + unsigned int ldev_size; /* Bytes 4-7 */ 422 + unsigned int blocks_left; /* Bytes 8-11 */ 423 + }; 424 + 425 + /* 426 + * DAC960 V1 Firmware Background Initialization Status Command reply structure. 427 + */ 428 + struct myrb_bgi_status { 429 + unsigned int ldev_size; /* Bytes 0-3 */ 430 + unsigned int blocks_done; /* Bytes 4-7 */ 431 + unsigned char rsvd1[12]; /* Bytes 8-19 */ 432 + unsigned int ldev_num; /* Bytes 20-23 */ 433 + unsigned char raid_level; /* Byte 24 */ 434 + enum { 435 + MYRB_BGI_INVALID = 0x00, 436 + MYRB_BGI_STARTED = 0x02, 437 + MYRB_BGI_INPROGRESS = 0x04, 438 + MYRB_BGI_SUSPENDED = 0x05, 439 + MYRB_BGI_CANCELLED = 0x06 440 + } __packed status; /* Byte 25 */ 441 + unsigned char rsvd2[6]; /* Bytes 26-31 */ 442 + }; 443 + 444 + /* 445 + * DAC960 V1 Firmware Error Table Entry structure. 446 + */ 447 + struct myrb_error_entry { 448 + unsigned char parity_err; /* Byte 0 */ 449 + unsigned char soft_err; /* Byte 1 */ 450 + unsigned char hard_err; /* Byte 2 */ 451 + unsigned char misc_err; /* Byte 3 */ 452 + }; 453 + 454 + /* 455 + * DAC960 V1 Firmware Read Config2 Command reply structure. 456 + */ 457 + struct myrb_config2 { 458 + unsigned rsvd1:1; /* Byte 0 Bit 0 */ 459 + unsigned active_negation:1; /* Byte 0 Bit 1 */ 460 + unsigned rsvd2:5; /* Byte 0 Bits 2-6 */ 461 + unsigned no_rescan_on_reset_during_scan:1; /* Byte 0 Bit 7 */ 462 + unsigned StorageWorks_support:1; /* Byte 1 Bit 0 */ 463 + unsigned HewlettPackard_support:1; /* Byte 1 Bit 1 */ 464 + unsigned no_disconnect_on_first_command:1; /* Byte 1 Bit 2 */ 465 + unsigned rsvd3:2; /* Byte 1 Bits 3-4 */ 466 + unsigned AEMI_ARM:1; /* Byte 1 Bit 5 */ 467 + unsigned AEMI_OFM:1; /* Byte 1 Bit 6 */ 468 + unsigned rsvd4:1; /* Byte 1 Bit 7 */ 469 + enum { 470 + MYRB_OEMID_MYLEX = 0x00, 471 + MYRB_OEMID_IBM = 0x08, 472 + MYRB_OEMID_HP = 0x0A, 473 + MYRB_OEMID_DEC = 0x0C, 474 + MYRB_OEMID_SIEMENS = 0x10, 475 + MYRB_OEMID_INTEL = 0x12 476 + } __packed OEMID; /* Byte 2 */ 477 + unsigned char oem_model_number; /* Byte 3 */ 478 + unsigned char physical_sector; /* Byte 4 */ 479 + unsigned char logical_sector; /* Byte 5 */ 480 + unsigned char block_factor; /* Byte 6 */ 481 + unsigned readahead_enabled:1; /* Byte 7 Bit 0 */ 482 + unsigned low_BIOS_delay:1; /* Byte 7 Bit 1 */ 483 + unsigned rsvd5:2; /* Byte 7 Bits 2-3 */ 484 + unsigned restrict_reassign_to_one_sector:1; /* Byte 7 Bit 4 */ 485 + unsigned rsvd6:1; /* Byte 7 Bit 5 */ 486 + unsigned FUA_during_write_recovery:1; /* Byte 7 Bit 6 */ 487 + unsigned enable_LeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */ 488 + unsigned char default_rebuild_rate; /* Byte 8 */ 489 + unsigned char rsvd7; /* Byte 9 */ 490 + unsigned char blocks_per_cacheline; /* Byte 10 */ 491 + unsigned char blocks_per_stripe; /* Byte 11 */ 492 + struct { 493 + enum { 494 + MYRB_SPEED_ASYNC = 0x0, 495 + MYRB_SPEED_SYNC_8MHz = 0x1, 496 + MYRB_SPEED_SYNC_5MHz = 0x2, 497 + MYRB_SPEED_SYNC_10_OR_20MHz = 0x3 498 + } __packed speed:2; /* Byte 11 Bits 0-1 */ 499 + unsigned force_8bit:1; /* Byte 11 Bit 2 */ 500 + unsigned disable_fast20:1; /* Byte 11 Bit 3 */ 501 + unsigned rsvd8:3; /* Byte 11 Bits 4-6 */ 502 + unsigned enable_tcq:1; /* Byte 11 Bit 7 */ 503 + } __packed channelparam[6]; /* Bytes 12-17 */ 504 + unsigned char SCSIInitiatorID; /* Byte 18 */ 505 + unsigned char rsvd9; /* Byte 19 */ 506 + enum { 507 + MYRB_STARTUP_CONTROLLER_SPINUP = 0x00, 508 + MYRB_STARTUP_POWERON_SPINUP = 0x01 509 + } __packed startup; /* Byte 20 */ 510 + unsigned char simultaneous_device_spinup_count; /* Byte 21 */ 511 + unsigned char seconds_delay_between_spinups; /* Byte 22 */ 512 + unsigned char rsvd10[29]; /* Bytes 23-51 */ 513 + unsigned BIOS_disabled:1; /* Byte 52 Bit 0 */ 514 + unsigned CDROM_boot_enabled:1; /* Byte 52 Bit 1 */ 515 + unsigned rsvd11:3; /* Byte 52 Bits 2-4 */ 516 + enum { 517 + MYRB_GEOM_128_32 = 0x0, 518 + MYRB_GEOM_255_63 = 0x1, 519 + MYRB_GEOM_RESERVED1 = 0x2, 520 + MYRB_GEOM_RESERVED2 = 0x3 521 + } __packed drive_geometry:2; /* Byte 52 Bits 5-6 */ 522 + unsigned rsvd12:1; /* Byte 52 Bit 7 */ 523 + unsigned char rsvd13[9]; /* Bytes 53-61 */ 524 + unsigned short csum; /* Bytes 62-63 */ 525 + }; 526 + 527 + /* 528 + * DAC960 V1 Firmware DCDB request structure. 529 + */ 530 + struct myrb_dcdb { 531 + unsigned target:4; /* Byte 0 Bits 0-3 */ 532 + unsigned channel:4; /* Byte 0 Bits 4-7 */ 533 + enum { 534 + MYRB_DCDB_XFER_NONE = 0, 535 + MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1, 536 + MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2, 537 + MYRB_DCDB_XFER_ILLEGAL = 3 538 + } __packed data_xfer:2; /* Byte 1 Bits 0-1 */ 539 + unsigned early_status:1; /* Byte 1 Bit 2 */ 540 + unsigned rsvd1:1; /* Byte 1 Bit 3 */ 541 + enum { 542 + MYRB_DCDB_TMO_24_HRS = 0, 543 + MYRB_DCDB_TMO_10_SECS = 1, 544 + MYRB_DCDB_TMO_60_SECS = 2, 545 + MYRB_DCDB_TMO_10_MINS = 3 546 + } __packed timeout:2; /* Byte 1 Bits 4-5 */ 547 + unsigned no_autosense:1; /* Byte 1 Bit 6 */ 548 + unsigned allow_disconnect:1; /* Byte 1 Bit 7 */ 549 + unsigned short xfer_len_lo; /* Bytes 2-3 */ 550 + u32 dma_addr; /* Bytes 4-7 */ 551 + unsigned char cdb_len:4; /* Byte 8 Bits 0-3 */ 552 + unsigned char xfer_len_hi4:4; /* Byte 8 Bits 4-7 */ 553 + unsigned char sense_len; /* Byte 9 */ 554 + unsigned char cdb[12]; /* Bytes 10-21 */ 555 + unsigned char sense[64]; /* Bytes 22-85 */ 556 + unsigned char status; /* Byte 86 */ 557 + unsigned char rsvd2; /* Byte 87 */ 558 + }; 559 + 560 + /* 561 + * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address 562 + *32 Bit Byte Count structure. 563 + */ 564 + struct myrb_sge { 565 + u32 sge_addr; /* Bytes 0-3 */ 566 + u32 sge_count; /* Bytes 4-7 */ 567 + }; 568 + 569 + /* 570 + * 13 Byte DAC960 V1 Firmware Command Mailbox structure. 571 + * Bytes 13-15 are not used. The structure is padded to 16 bytes for 572 + * efficient access. 573 + */ 574 + union myrb_cmd_mbox { 575 + unsigned int words[4]; /* Words 0-3 */ 576 + unsigned char bytes[16]; /* Bytes 0-15 */ 577 + struct { 578 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 579 + unsigned char id; /* Byte 1 */ 580 + unsigned char rsvd[14]; /* Bytes 2-15 */ 581 + } __packed common; 582 + struct { 583 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 584 + unsigned char id; /* Byte 1 */ 585 + unsigned char rsvd1[6]; /* Bytes 2-7 */ 586 + u32 addr; /* Bytes 8-11 */ 587 + unsigned char rsvd2[4]; /* Bytes 12-15 */ 588 + } __packed type3; 589 + struct { 590 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 591 + unsigned char id; /* Byte 1 */ 592 + unsigned char optype; /* Byte 2 */ 593 + unsigned char rsvd1[5]; /* Bytes 3-7 */ 594 + u32 addr; /* Bytes 8-11 */ 595 + unsigned char rsvd2[4]; /* Bytes 12-15 */ 596 + } __packed type3B; 597 + struct { 598 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 599 + unsigned char id; /* Byte 1 */ 600 + unsigned char rsvd1[5]; /* Bytes 2-6 */ 601 + unsigned char ldev_num:6; /* Byte 7 Bits 0-6 */ 602 + unsigned char auto_restore:1; /* Byte 7 Bit 7 */ 603 + unsigned char rsvd2[8]; /* Bytes 8-15 */ 604 + } __packed type3C; 605 + struct { 606 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 607 + unsigned char id; /* Byte 1 */ 608 + unsigned char channel; /* Byte 2 */ 609 + unsigned char target; /* Byte 3 */ 610 + enum myrb_devstate state; /* Byte 4 */ 611 + unsigned char rsvd1[3]; /* Bytes 5-7 */ 612 + u32 addr; /* Bytes 8-11 */ 613 + unsigned char rsvd2[4]; /* Bytes 12-15 */ 614 + } __packed type3D; 615 + struct { 616 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 617 + unsigned char id; /* Byte 1 */ 618 + unsigned char optype; /* Byte 2 */ 619 + unsigned char opqual; /* Byte 3 */ 620 + unsigned short ev_seq; /* Bytes 4-5 */ 621 + unsigned char rsvd1[2]; /* Bytes 6-7 */ 622 + u32 addr; /* Bytes 8-11 */ 623 + unsigned char rsvd2[4]; /* Bytes 12-15 */ 624 + } __packed type3E; 625 + struct { 626 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 627 + unsigned char id; /* Byte 1 */ 628 + unsigned char rsvd1[2]; /* Bytes 2-3 */ 629 + unsigned char rbld_rate; /* Byte 4 */ 630 + unsigned char rsvd2[3]; /* Bytes 5-7 */ 631 + u32 addr; /* Bytes 8-11 */ 632 + unsigned char rsvd3[4]; /* Bytes 12-15 */ 633 + } __packed type3R; 634 + struct { 635 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 636 + unsigned char id; /* Byte 1 */ 637 + unsigned short xfer_len; /* Bytes 2-3 */ 638 + unsigned int lba; /* Bytes 4-7 */ 639 + u32 addr; /* Bytes 8-11 */ 640 + unsigned char ldev_num; /* Byte 12 */ 641 + unsigned char rsvd[3]; /* Bytes 13-15 */ 642 + } __packed type4; 643 + struct { 644 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 645 + unsigned char id; /* Byte 1 */ 646 + struct { 647 + unsigned short xfer_len:11; /* Bytes 2-3 */ 648 + unsigned char ldev_num:5; /* Byte 3 Bits 3-7 */ 649 + } __packed ld; 650 + unsigned int lba; /* Bytes 4-7 */ 651 + u32 addr; /* Bytes 8-11 */ 652 + unsigned char sg_count:6; /* Byte 12 Bits 0-5 */ 653 + enum { 654 + MYRB_SGL_ADDR32_COUNT32 = 0x0, 655 + MYRB_SGL_ADDR32_COUNT16 = 0x1, 656 + MYRB_SGL_COUNT32_ADDR32 = 0x2, 657 + MYRB_SGL_COUNT16_ADDR32 = 0x3 658 + } __packed sg_type:2; /* Byte 12 Bits 6-7 */ 659 + unsigned char rsvd[3]; /* Bytes 13-15 */ 660 + } __packed type5; 661 + struct { 662 + enum myrb_cmd_opcode opcode; /* Byte 0 */ 663 + unsigned char id; /* Byte 1 */ 664 + unsigned char opcode2; /* Byte 2 */ 665 + unsigned char rsvd1:8; /* Byte 3 */ 666 + u32 cmd_mbox_addr; /* Bytes 4-7 */ 667 + u32 stat_mbox_addr; /* Bytes 8-11 */ 668 + unsigned char rsvd2[4]; /* Bytes 12-15 */ 669 + } __packed typeX; 670 + }; 671 + 672 + /* 673 + * DAC960 V1 Firmware Controller Status Mailbox structure. 674 + */ 675 + struct myrb_stat_mbox { 676 + unsigned char id; /* Byte 0 */ 677 + unsigned char rsvd:7; /* Byte 1 Bits 0-6 */ 678 + unsigned char valid:1; /* Byte 1 Bit 7 */ 679 + unsigned short status; /* Bytes 2-3 */ 680 + }; 681 + 682 + struct myrb_cmdblk { 683 + union myrb_cmd_mbox mbox; 684 + unsigned short status; 685 + struct completion *completion; 686 + struct myrb_dcdb *dcdb; 687 + dma_addr_t dcdb_addr; 688 + struct myrb_sge *sgl; 689 + dma_addr_t sgl_addr; 690 + }; 691 + 692 + struct myrb_hba { 693 + unsigned int ldev_block_size; 694 + unsigned char ldev_geom_heads; 695 + unsigned char ldev_geom_sectors; 696 + unsigned char bus_width; 697 + unsigned short stripe_size; 698 + unsigned short segment_size; 699 + unsigned short new_ev_seq; 700 + unsigned short old_ev_seq; 701 + bool dual_mode_interface; 702 + bool bgi_status_supported; 703 + bool safte_enabled; 704 + bool need_ldev_info; 705 + bool need_err_info; 706 + bool need_rbld; 707 + bool need_cc_status; 708 + bool need_bgi_status; 709 + bool rbld_first; 710 + 711 + struct pci_dev *pdev; 712 + struct Scsi_Host *host; 713 + 714 + struct workqueue_struct *work_q; 715 + char work_q_name[20]; 716 + struct delayed_work monitor_work; 717 + unsigned long primary_monitor_time; 718 + unsigned long secondary_monitor_time; 719 + 720 + struct dma_pool *sg_pool; 721 + struct dma_pool *dcdb_pool; 722 + 723 + spinlock_t queue_lock; 724 + 725 + void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk); 726 + void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox, 727 + union myrb_cmd_mbox *cmd_mbox); 728 + void (*get_cmd_mbox)(void __iomem *base); 729 + void (*disable_intr)(void __iomem *base); 730 + void (*reset)(void __iomem *base); 731 + 732 + unsigned int ctlr_num; 733 + unsigned char model_name[20]; 734 + unsigned char fw_version[12]; 735 + 736 + unsigned int irq; 737 + phys_addr_t io_addr; 738 + phys_addr_t pci_addr; 739 + void __iomem *io_base; 740 + void __iomem *mmio_base; 741 + 742 + size_t cmd_mbox_size; 743 + dma_addr_t cmd_mbox_addr; 744 + union myrb_cmd_mbox *first_cmd_mbox; 745 + union myrb_cmd_mbox *last_cmd_mbox; 746 + union myrb_cmd_mbox *next_cmd_mbox; 747 + union myrb_cmd_mbox *prev_cmd_mbox1; 748 + union myrb_cmd_mbox *prev_cmd_mbox2; 749 + 750 + size_t stat_mbox_size; 751 + dma_addr_t stat_mbox_addr; 752 + struct myrb_stat_mbox *first_stat_mbox; 753 + struct myrb_stat_mbox *last_stat_mbox; 754 + struct myrb_stat_mbox *next_stat_mbox; 755 + 756 + struct myrb_cmdblk dcmd_blk; 757 + struct myrb_cmdblk mcmd_blk; 758 + struct mutex dcmd_mutex; 759 + 760 + struct myrb_enquiry *enquiry; 761 + dma_addr_t enquiry_addr; 762 + 763 + struct myrb_error_entry *err_table; 764 + dma_addr_t err_table_addr; 765 + 766 + unsigned short last_rbld_status; 767 + 768 + struct myrb_ldev_info *ldev_info_buf; 769 + dma_addr_t ldev_info_addr; 770 + 771 + struct myrb_bgi_status bgi_status; 772 + 773 + struct mutex dma_mutex; 774 + }; 775 + 776 + /* 777 + * DAC960 LA Series Controller Interface Register Offsets. 778 + */ 779 + #define DAC960_LA_mmio_size 0x80 780 + 781 + enum DAC960_LA_reg_offset { 782 + DAC960_LA_IRQMASK_OFFSET = 0x34, 783 + DAC960_LA_CMDOP_OFFSET = 0x50, 784 + DAC960_LA_CMDID_OFFSET = 0x51, 785 + DAC960_LA_MBOX2_OFFSET = 0x52, 786 + DAC960_LA_MBOX3_OFFSET = 0x53, 787 + DAC960_LA_MBOX4_OFFSET = 0x54, 788 + DAC960_LA_MBOX5_OFFSET = 0x55, 789 + DAC960_LA_MBOX6_OFFSET = 0x56, 790 + DAC960_LA_MBOX7_OFFSET = 0x57, 791 + DAC960_LA_MBOX8_OFFSET = 0x58, 792 + DAC960_LA_MBOX9_OFFSET = 0x59, 793 + DAC960_LA_MBOX10_OFFSET = 0x5A, 794 + DAC960_LA_MBOX11_OFFSET = 0x5B, 795 + DAC960_LA_MBOX12_OFFSET = 0x5C, 796 + DAC960_LA_STSID_OFFSET = 0x5D, 797 + DAC960_LA_STS_OFFSET = 0x5E, 798 + DAC960_LA_IDB_OFFSET = 0x60, 799 + DAC960_LA_ODB_OFFSET = 0x61, 800 + DAC960_LA_ERRSTS_OFFSET = 0x63, 801 + }; 802 + 803 + /* 804 + * DAC960 LA Series Inbound Door Bell Register. 805 + */ 806 + #define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01 807 + #define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02 808 + #define DAC960_LA_IDB_GEN_IRQ 0x04 809 + #define DAC960_LA_IDB_CTRL_RESET 0x08 810 + #define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10 811 + 812 + #define DAC960_LA_IDB_HWMBOX_EMPTY 0x01 813 + #define DAC960_LA_IDB_INIT_DONE 0x02 814 + 815 + /* 816 + * DAC960 LA Series Outbound Door Bell Register. 817 + */ 818 + #define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01 819 + #define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02 820 + #define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01 821 + #define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02 822 + 823 + /* 824 + * DAC960 LA Series Interrupt Mask Register. 825 + */ 826 + #define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04 827 + 828 + /* 829 + * DAC960 LA Series Error Status Register. 830 + */ 831 + #define DAC960_LA_ERRSTS_PENDING 0x02 832 + 833 + /* 834 + * DAC960 PG Series Controller Interface Register Offsets. 835 + */ 836 + #define DAC960_PG_mmio_size 0x2000 837 + 838 + enum DAC960_PG_reg_offset { 839 + DAC960_PG_IDB_OFFSET = 0x0020, 840 + DAC960_PG_ODB_OFFSET = 0x002C, 841 + DAC960_PG_IRQMASK_OFFSET = 0x0034, 842 + DAC960_PG_CMDOP_OFFSET = 0x1000, 843 + DAC960_PG_CMDID_OFFSET = 0x1001, 844 + DAC960_PG_MBOX2_OFFSET = 0x1002, 845 + DAC960_PG_MBOX3_OFFSET = 0x1003, 846 + DAC960_PG_MBOX4_OFFSET = 0x1004, 847 + DAC960_PG_MBOX5_OFFSET = 0x1005, 848 + DAC960_PG_MBOX6_OFFSET = 0x1006, 849 + DAC960_PG_MBOX7_OFFSET = 0x1007, 850 + DAC960_PG_MBOX8_OFFSET = 0x1008, 851 + DAC960_PG_MBOX9_OFFSET = 0x1009, 852 + DAC960_PG_MBOX10_OFFSET = 0x100A, 853 + DAC960_PG_MBOX11_OFFSET = 0x100B, 854 + DAC960_PG_MBOX12_OFFSET = 0x100C, 855 + DAC960_PG_STSID_OFFSET = 0x1018, 856 + DAC960_PG_STS_OFFSET = 0x101A, 857 + DAC960_PG_ERRSTS_OFFSET = 0x103F, 858 + }; 859 + 860 + /* 861 + * DAC960 PG Series Inbound Door Bell Register. 862 + */ 863 + #define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01 864 + #define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02 865 + #define DAC960_PG_IDB_GEN_IRQ 0x04 866 + #define DAC960_PG_IDB_CTRL_RESET 0x08 867 + #define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10 868 + 869 + #define DAC960_PG_IDB_HWMBOX_FULL 0x01 870 + #define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02 871 + 872 + /* 873 + * DAC960 PG Series Outbound Door Bell Register. 874 + */ 875 + #define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01 876 + #define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02 877 + #define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01 878 + #define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02 879 + 880 + /* 881 + * DAC960 PG Series Interrupt Mask Register. 882 + */ 883 + #define DAC960_PG_IRQMASK_MSI_MASK1 0x03 884 + #define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04 885 + #define DAC960_PG_IRQMASK_MSI_MASK2 0xF8 886 + 887 + /* 888 + * DAC960 PG Series Error Status Register. 889 + */ 890 + #define DAC960_PG_ERRSTS_PENDING 0x04 891 + 892 + /* 893 + * DAC960 PD Series Controller Interface Register Offsets. 894 + */ 895 + #define DAC960_PD_mmio_size 0x80 896 + 897 + enum DAC960_PD_reg_offset { 898 + DAC960_PD_CMDOP_OFFSET = 0x00, 899 + DAC960_PD_CMDID_OFFSET = 0x01, 900 + DAC960_PD_MBOX2_OFFSET = 0x02, 901 + DAC960_PD_MBOX3_OFFSET = 0x03, 902 + DAC960_PD_MBOX4_OFFSET = 0x04, 903 + DAC960_PD_MBOX5_OFFSET = 0x05, 904 + DAC960_PD_MBOX6_OFFSET = 0x06, 905 + DAC960_PD_MBOX7_OFFSET = 0x07, 906 + DAC960_PD_MBOX8_OFFSET = 0x08, 907 + DAC960_PD_MBOX9_OFFSET = 0x09, 908 + DAC960_PD_MBOX10_OFFSET = 0x0A, 909 + DAC960_PD_MBOX11_OFFSET = 0x0B, 910 + DAC960_PD_MBOX12_OFFSET = 0x0C, 911 + DAC960_PD_STSID_OFFSET = 0x0D, 912 + DAC960_PD_STS_OFFSET = 0x0E, 913 + DAC960_PD_ERRSTS_OFFSET = 0x3F, 914 + DAC960_PD_IDB_OFFSET = 0x40, 915 + DAC960_PD_ODB_OFFSET = 0x41, 916 + DAC960_PD_IRQEN_OFFSET = 0x43, 917 + }; 918 + 919 + /* 920 + * DAC960 PD Series Inbound Door Bell Register. 921 + */ 922 + #define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01 923 + #define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02 924 + #define DAC960_PD_IDB_GEN_IRQ 0x04 925 + #define DAC960_PD_IDB_CTRL_RESET 0x08 926 + 927 + #define DAC960_PD_IDB_HWMBOX_FULL 0x01 928 + #define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02 929 + 930 + /* 931 + * DAC960 PD Series Outbound Door Bell Register. 932 + */ 933 + #define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01 934 + #define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01 935 + 936 + /* 937 + * DAC960 PD Series Interrupt Enable Register. 938 + */ 939 + #define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01 940 + 941 + /* 942 + * DAC960 PD Series Error Status Register. 943 + */ 944 + #define DAC960_PD_ERRSTS_PENDING 0x04 945 + 946 + typedef int (*myrb_hw_init_t)(struct pci_dev *pdev, 947 + struct myrb_hba *cb, void __iomem *base); 948 + typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev, 949 + void __iomem *base, 950 + union myrb_cmd_mbox *mbox); 951 + 952 + struct myrb_privdata { 953 + myrb_hw_init_t hw_init; 954 + irq_handler_t irq_handler; 955 + unsigned int mmio_size; 956 + }; 957 + 958 + #endif /* MYRB_H */