Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.17-rc4 3364 lines 88 kB view raw
1/*************************************************************************** 2 dpti.c - description 3 ------------------- 4 begin : Thu Sep 7 2000 5 copyright : (C) 2000 by Adaptec 6 7 July 30, 2001 First version being submitted 8 for inclusion in the kernel. V2.4 9 10 See Documentation/scsi/dpti.txt for history, notes, license info 11 and credits 12 ***************************************************************************/ 13 14/*************************************************************************** 15 * * 16 * This program is free software; you can redistribute it and/or modify * 17 * it under the terms of the GNU General Public License as published by * 18 * the Free Software Foundation; either version 2 of the License, or * 19 * (at your option) any later version. * 20 * * 21 ***************************************************************************/ 22/*************************************************************************** 23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp> 24 - Support 2.6 kernel and DMA-mapping 25 - ioctl fix for raid tools 26 - use schedule_timeout in long long loop 27 **************************************************************************/ 28 29/*#define DEBUG 1 */ 30/*#define UARTDELAY 1 */ 31 32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates 33 high pages. Keep the macro around because of the broken unmerged ia64 tree */ 34 35#define ADDR32 (0) 36 37#include <linux/module.h> 38 39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); 40MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); 41 42//////////////////////////////////////////////////////////////// 43 44#include <linux/ioctl.h> /* For SCSI-Passthrough */ 45#include <asm/uaccess.h> 46 47#include <linux/stat.h> 48#include <linux/slab.h> /* for kmalloc() */ 49#include <linux/config.h> /* for CONFIG_PCI */ 50#include <linux/pci.h> /* for PCI support */ 51#include <linux/proc_fs.h> 52#include <linux/blkdev.h> 53#include <linux/delay.h> /* for udelay */ 54#include <linux/interrupt.h> 55#include <linux/kernel.h> /* for printk */ 56#include <linux/sched.h> 57#include <linux/reboot.h> 58#include <linux/spinlock.h> 59#include <linux/smp_lock.h> 60#include <linux/dma-mapping.h> 61 62#include <linux/timer.h> 63#include <linux/string.h> 64#include <linux/ioport.h> 65#include <linux/mutex.h> 66 67#include <asm/processor.h> /* for boot_cpu_data */ 68#include <asm/pgtable.h> 69#include <asm/io.h> /* for virt_to_bus, etc. */ 70 71#include <scsi/scsi.h> 72#include <scsi/scsi_cmnd.h> 73#include <scsi/scsi_device.h> 74#include <scsi/scsi_host.h> 75#include <scsi/scsi_tcq.h> 76 77#include "dpt/dptsig.h" 78#include "dpti.h" 79 80/*============================================================================ 81 * Create a binary signature - this is read by dptsig 82 * Needed for our management apps 83 *============================================================================ 84 */ 85static dpt_sig_S DPTI_sig = { 86 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, 87#ifdef __i386__ 88 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, 89#elif defined(__ia64__) 90 PROC_INTEL, PROC_IA64, 91#elif defined(__sparc__) 92 PROC_ULTRASPARC, PROC_ULTRASPARC, 93#elif defined(__alpha__) 94 PROC_ALPHA, PROC_ALPHA, 95#else 96 (-1),(-1), 97#endif 98 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL, 99 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION, 100 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver" 101}; 102 103 104 105 106/*============================================================================ 107 * Globals 108 *============================================================================ 109 */ 110 111static DEFINE_MUTEX(adpt_configuration_lock); 112 113static struct i2o_sys_tbl *sys_tbl = NULL; 114static int sys_tbl_ind = 0; 115static int sys_tbl_len = 0; 116 117static adpt_hba* hba_chain = NULL; 118static int hba_count = 0; 119 120static struct file_operations adpt_fops = { 121 .ioctl = adpt_ioctl, 122 .open = adpt_open, 123 .release = adpt_close 124}; 125 126#ifdef REBOOT_NOTIFIER 127static struct notifier_block adpt_reboot_notifier = 128{ 129 adpt_reboot_event, 130 NULL, 131 0 132}; 133#endif 134 135/* Structures and definitions for synchronous message posting. 136 * See adpt_i2o_post_wait() for description 137 * */ 138struct adpt_i2o_post_wait_data 139{ 140 int status; 141 u32 id; 142 adpt_wait_queue_head_t *wq; 143 struct adpt_i2o_post_wait_data *next; 144}; 145 146static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL; 147static u32 adpt_post_wait_id = 0; 148static DEFINE_SPINLOCK(adpt_post_wait_lock); 149 150 151/*============================================================================ 152 * Functions 153 *============================================================================ 154 */ 155 156static u8 adpt_read_blink_led(adpt_hba* host) 157{ 158 if(host->FwDebugBLEDflag_P != 0) { 159 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){ 160 return readb(host->FwDebugBLEDvalue_P); 161 } 162 } 163 return 0; 164} 165 166/*============================================================================ 167 * Scsi host template interface functions 168 *============================================================================ 169 */ 170 171static struct pci_device_id dptids[] = { 172 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 173 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 174 { 0, } 175}; 176MODULE_DEVICE_TABLE(pci,dptids); 177 178static int adpt_detect(struct scsi_host_template* sht) 179{ 180 struct pci_dev *pDev = NULL; 181 adpt_hba* pHba; 182 183 adpt_init(); 184 185 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 186 187 /* search for all Adatpec I2O RAID cards */ 188 while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { 189 if(pDev->device == PCI_DPT_DEVICE_ID || 190 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){ 191 if(adpt_install_hba(sht, pDev) ){ 192 PERROR("Could not Init an I2O RAID device\n"); 193 PERROR("Will not try to detect others.\n"); 194 return hba_count-1; 195 } 196 } 197 } 198 199 /* In INIT state, Activate IOPs */ 200 for (pHba = hba_chain; pHba; pHba = pHba->next) { 201 // Activate does get status , init outbound, and get hrt 202 if (adpt_i2o_activate_hba(pHba) < 0) { 203 adpt_i2o_delete_hba(pHba); 204 } 205 } 206 207 208 /* Active IOPs in HOLD state */ 209 210rebuild_sys_tab: 211 if (hba_chain == NULL) 212 return 0; 213 214 /* 215 * If build_sys_table fails, we kill everything and bail 216 * as we can't init the IOPs w/o a system table 217 */ 218 if (adpt_i2o_build_sys_table() < 0) { 219 adpt_i2o_sys_shutdown(); 220 return 0; 221 } 222 223 PDEBUG("HBA's in HOLD state\n"); 224 225 /* If IOP don't get online, we need to rebuild the System table */ 226 for (pHba = hba_chain; pHba; pHba = pHba->next) { 227 if (adpt_i2o_online_hba(pHba) < 0) { 228 adpt_i2o_delete_hba(pHba); 229 goto rebuild_sys_tab; 230 } 231 } 232 233 /* Active IOPs now in OPERATIONAL state */ 234 PDEBUG("HBA's in OPERATIONAL state\n"); 235 236 printk("dpti: If you have a lot of devices this could take a few minutes.\n"); 237 for (pHba = hba_chain; pHba; pHba = pHba->next) { 238 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); 239 if (adpt_i2o_lct_get(pHba) < 0){ 240 adpt_i2o_delete_hba(pHba); 241 continue; 242 } 243 244 if (adpt_i2o_parse_lct(pHba) < 0){ 245 adpt_i2o_delete_hba(pHba); 246 continue; 247 } 248 adpt_inquiry(pHba); 249 } 250 251 for (pHba = hba_chain; pHba; pHba = pHba->next) { 252 if( adpt_scsi_register(pHba,sht) < 0){ 253 adpt_i2o_delete_hba(pHba); 254 continue; 255 } 256 pHba->initialized = TRUE; 257 pHba->state &= ~DPTI_STATE_RESET; 258 } 259 260 // Register our control device node 261 // nodes will need to be created in /dev to access this 262 // the nodes can not be created from within the driver 263 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) { 264 adpt_i2o_sys_shutdown(); 265 return 0; 266 } 267 return hba_count; 268} 269 270 271/* 272 * scsi_unregister will be called AFTER we return. 273 */ 274static int adpt_release(struct Scsi_Host *host) 275{ 276 adpt_hba* pHba = (adpt_hba*) host->hostdata[0]; 277// adpt_i2o_quiesce_hba(pHba); 278 adpt_i2o_delete_hba(pHba); 279 scsi_unregister(host); 280 return 0; 281} 282 283 284static void adpt_inquiry(adpt_hba* pHba) 285{ 286 u32 msg[14]; 287 u32 *mptr; 288 u32 *lenptr; 289 int direction; 290 int scsidir; 291 u32 len; 292 u32 reqlen; 293 u8* buf; 294 u8 scb[16]; 295 s32 rcode; 296 297 memset(msg, 0, sizeof(msg)); 298 buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32); 299 if(!buf){ 300 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); 301 return; 302 } 303 memset((void*)buf, 0, 36); 304 305 len = 36; 306 direction = 0x00000000; 307 scsidir =0x40000000; // DATA IN (iop<--dev) 308 309 reqlen = 14; // SINGLE SGE 310 /* Stick the headers on */ 311 msg[0] = reqlen<<16 | SGL_OFFSET_12; 312 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); 313 msg[2] = 0; 314 msg[3] = 0; 315 // Adaptec/DPT Private stuff 316 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16; 317 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/; 318 /* Direction, disconnect ok | sense data | simple queue , CDBLen */ 319 // I2O_SCB_FLAG_ENABLE_DISCONNECT | 320 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 321 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE; 322 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/; 323 324 mptr=msg+7; 325 326 memset(scb, 0, sizeof(scb)); 327 // Write SCSI command into the message - always 16 byte block 328 scb[0] = INQUIRY; 329 scb[1] = 0; 330 scb[2] = 0; 331 scb[3] = 0; 332 scb[4] = 36; 333 scb[5] = 0; 334 // Don't care about the rest of scb 335 336 memcpy(mptr, scb, sizeof(scb)); 337 mptr+=4; 338 lenptr=mptr++; /* Remember me - fill in when we know */ 339 340 /* Now fill in the SGList and command */ 341 *lenptr = len; 342 *mptr++ = 0xD0000000|direction|len; 343 *mptr++ = virt_to_bus(buf); 344 345 // Send it on it's way 346 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); 347 if (rcode != 0) { 348 sprintf(pHba->detail, "Adaptec I2O RAID"); 349 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); 350 if (rcode != -ETIME && rcode != -EINTR) 351 kfree(buf); 352 } else { 353 memset(pHba->detail, 0, sizeof(pHba->detail)); 354 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); 355 memcpy(&(pHba->detail[16]), " Model: ", 8); 356 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16); 357 memcpy(&(pHba->detail[40]), " FW: ", 4); 358 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); 359 pHba->detail[48] = '\0'; /* precautionary */ 360 kfree(buf); 361 } 362 adpt_i2o_status_get(pHba); 363 return ; 364} 365 366 367static int adpt_slave_configure(struct scsi_device * device) 368{ 369 struct Scsi_Host *host = device->host; 370 adpt_hba* pHba; 371 372 pHba = (adpt_hba *) host->hostdata[0]; 373 374 if (host->can_queue && device->tagged_supported) { 375 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG, 376 host->can_queue - 1); 377 } else { 378 scsi_adjust_queue_depth(device, 0, 1); 379 } 380 return 0; 381} 382 383static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) 384{ 385 adpt_hba* pHba = NULL; 386 struct adpt_device* pDev = NULL; /* dpt per device information */ 387 388 cmd->scsi_done = done; 389 /* 390 * SCSI REQUEST_SENSE commands will be executed automatically by the 391 * Host Adapter for any errors, so they should not be executed 392 * explicitly unless the Sense Data is zero indicating that no error 393 * occurred. 394 */ 395 396 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) { 397 cmd->result = (DID_OK << 16); 398 cmd->scsi_done(cmd); 399 return 0; 400 } 401 402 pHba = (adpt_hba*)cmd->device->host->hostdata[0]; 403 if (!pHba) { 404 return FAILED; 405 } 406 407 rmb(); 408 /* 409 * TODO: I need to block here if I am processing ioctl cmds 410 * but if the outstanding cmds all finish before the ioctl, 411 * the scsi-core will not know to start sending cmds to me again. 412 * I need to a way to restart the scsi-cores queues or should I block 413 * calling scsi_done on the outstanding cmds instead 414 * for now we don't set the IOCTL state 415 */ 416 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) { 417 pHba->host->last_reset = jiffies; 418 pHba->host->resetting = 1; 419 return 1; 420 } 421 422 // TODO if the cmd->device if offline then I may need to issue a bus rescan 423 // followed by a get_lct to see if the device is there anymore 424 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) { 425 /* 426 * First command request for this device. Set up a pointer 427 * to the device structure. This should be a TEST_UNIT_READY 428 * command from scan_scsis_single. 429 */ 430 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) { 431 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 432 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue. 433 cmd->result = (DID_NO_CONNECT << 16); 434 cmd->scsi_done(cmd); 435 return 0; 436 } 437 cmd->device->hostdata = pDev; 438 } 439 pDev->pScsi_dev = cmd->device; 440 441 /* 442 * If we are being called from when the device is being reset, 443 * delay processing of the command until later. 444 */ 445 if (pDev->state & DPTI_DEV_RESET ) { 446 return FAILED; 447 } 448 return adpt_scsi_to_i2o(pHba, cmd, pDev); 449} 450 451static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, 452 sector_t capacity, int geom[]) 453{ 454 int heads=-1; 455 int sectors=-1; 456 int cylinders=-1; 457 458 // *** First lets set the default geometry **** 459 460 // If the capacity is less than ox2000 461 if (capacity < 0x2000 ) { // floppy 462 heads = 18; 463 sectors = 2; 464 } 465 // else if between 0x2000 and 0x20000 466 else if (capacity < 0x20000) { 467 heads = 64; 468 sectors = 32; 469 } 470 // else if between 0x20000 and 0x40000 471 else if (capacity < 0x40000) { 472 heads = 65; 473 sectors = 63; 474 } 475 // else if between 0x4000 and 0x80000 476 else if (capacity < 0x80000) { 477 heads = 128; 478 sectors = 63; 479 } 480 // else if greater than 0x80000 481 else { 482 heads = 255; 483 sectors = 63; 484 } 485 cylinders = sector_div(capacity, heads * sectors); 486 487 // Special case if CDROM 488 if(sdev->type == 5) { // CDROM 489 heads = 252; 490 sectors = 63; 491 cylinders = 1111; 492 } 493 494 geom[0] = heads; 495 geom[1] = sectors; 496 geom[2] = cylinders; 497 498 PDEBUG("adpt_bios_param: exit\n"); 499 return 0; 500} 501 502 503static const char *adpt_info(struct Scsi_Host *host) 504{ 505 adpt_hba* pHba; 506 507 pHba = (adpt_hba *) host->hostdata[0]; 508 return (char *) (pHba->detail); 509} 510 511static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, 512 int length, int inout) 513{ 514 struct adpt_device* d; 515 int id; 516 int chan; 517 int len = 0; 518 int begin = 0; 519 int pos = 0; 520 adpt_hba* pHba; 521 int unit; 522 523 *start = buffer; 524 if (inout == TRUE) { 525 /* 526 * The user has done a write and wants us to take the 527 * data in the buffer and do something with it. 528 * proc_scsiwrite calls us with inout = 1 529 * 530 * Read data from buffer (writing to us) - NOT SUPPORTED 531 */ 532 return -EINVAL; 533 } 534 535 /* 536 * inout = 0 means the user has done a read and wants information 537 * returned, so we write information about the cards into the buffer 538 * proc_scsiread() calls us with inout = 0 539 */ 540 541 // Find HBA (host bus adapter) we are looking for 542 mutex_lock(&adpt_configuration_lock); 543 for (pHba = hba_chain; pHba; pHba = pHba->next) { 544 if (pHba->host == host) { 545 break; /* found adapter */ 546 } 547 } 548 mutex_unlock(&adpt_configuration_lock); 549 if (pHba == NULL) { 550 return 0; 551 } 552 host = pHba->host; 553 554 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION); 555 len += sprintf(buffer+len, "%s\n", pHba->detail); 556 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n", 557 pHba->host->host_no, pHba->name, host->irq); 558 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", 559 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); 560 561 pos = begin + len; 562 563 /* CHECKPOINT */ 564 if(pos > offset + length) { 565 goto stop_output; 566 } 567 if(pos <= offset) { 568 /* 569 * If we haven't even written to where we last left 570 * off (the last time we were called), reset the 571 * beginning pointer. 572 */ 573 len = 0; 574 begin = pos; 575 } 576 len += sprintf(buffer+len, "Devices:\n"); 577 for(chan = 0; chan < MAX_CHANNEL; chan++) { 578 for(id = 0; id < MAX_ID; id++) { 579 d = pHba->channel[chan].device[id]; 580 while(d){ 581 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor); 582 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev); 583 pos = begin + len; 584 585 586 /* CHECKPOINT */ 587 if(pos > offset + length) { 588 goto stop_output; 589 } 590 if(pos <= offset) { 591 len = 0; 592 begin = pos; 593 } 594 595 unit = d->pI2o_dev->lct_data.tid; 596 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n", 597 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun, 598 scsi_device_online(d->pScsi_dev)? "online":"offline"); 599 pos = begin + len; 600 601 /* CHECKPOINT */ 602 if(pos > offset + length) { 603 goto stop_output; 604 } 605 if(pos <= offset) { 606 len = 0; 607 begin = pos; 608 } 609 610 d = d->next_lun; 611 } 612 } 613 } 614 615 /* 616 * begin is where we last checked our position with regards to offset 617 * begin is always less than offset. len is relative to begin. It 618 * is the number of bytes written past begin 619 * 620 */ 621stop_output: 622 /* stop the output and calculate the correct length */ 623 *(buffer + len) = '\0'; 624 625 *start = buffer + (offset - begin); /* Start of wanted data */ 626 len -= (offset - begin); 627 if(len > length) { 628 len = length; 629 } else if(len < 0){ 630 len = 0; 631 **start = '\0'; 632 } 633 return len; 634} 635 636 637/*=========================================================================== 638 * Error Handling routines 639 *=========================================================================== 640 */ 641 642static int adpt_abort(struct scsi_cmnd * cmd) 643{ 644 adpt_hba* pHba = NULL; /* host bus adapter structure */ 645 struct adpt_device* dptdevice; /* dpt per device information */ 646 u32 msg[5]; 647 int rcode; 648 649 if(cmd->serial_number == 0){ 650 return FAILED; 651 } 652 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 653 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number); 654 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) { 655 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name); 656 return FAILED; 657 } 658 659 memset(msg, 0, sizeof(msg)); 660 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; 661 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; 662 msg[2] = 0; 663 msg[3]= 0; 664 msg[4] = (u32)cmd; 665 if (pHba->host) 666 spin_lock_irq(pHba->host->host_lock); 667 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); 668 if (pHba->host) 669 spin_unlock_irq(pHba->host->host_lock); 670 if (rcode != 0) { 671 if(rcode == -EOPNOTSUPP ){ 672 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name); 673 return FAILED; 674 } 675 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number); 676 return FAILED; 677 } 678 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number); 679 return SUCCESS; 680} 681 682 683#define I2O_DEVICE_RESET 0x27 684// This is the same for BLK and SCSI devices 685// NOTE this is wrong in the i2o.h definitions 686// This is not currently supported by our adapter but we issue it anyway 687static int adpt_device_reset(struct scsi_cmnd* cmd) 688{ 689 adpt_hba* pHba; 690 u32 msg[4]; 691 u32 rcode; 692 int old_state; 693 struct adpt_device* d = cmd->device->hostdata; 694 695 pHba = (void*) cmd->device->host->hostdata[0]; 696 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name); 697 if (!d) { 698 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name); 699 return FAILED; 700 } 701 memset(msg, 0, sizeof(msg)); 702 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; 703 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid); 704 msg[2] = 0; 705 msg[3] = 0; 706 707 if (pHba->host) 708 spin_lock_irq(pHba->host->host_lock); 709 old_state = d->state; 710 d->state |= DPTI_DEV_RESET; 711 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); 712 d->state = old_state; 713 if (pHba->host) 714 spin_unlock_irq(pHba->host->host_lock); 715 if (rcode != 0) { 716 if(rcode == -EOPNOTSUPP ){ 717 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name); 718 return FAILED; 719 } 720 printk(KERN_INFO"%s: Device reset failed\n",pHba->name); 721 return FAILED; 722 } else { 723 printk(KERN_INFO"%s: Device reset successful\n",pHba->name); 724 return SUCCESS; 725 } 726} 727 728 729#define I2O_HBA_BUS_RESET 0x87 730// This version of bus reset is called by the eh_error handler 731static int adpt_bus_reset(struct scsi_cmnd* cmd) 732{ 733 adpt_hba* pHba; 734 u32 msg[4]; 735 u32 rcode; 736 737 pHba = (adpt_hba*)cmd->device->host->hostdata[0]; 738 memset(msg, 0, sizeof(msg)); 739 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid ); 740 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; 741 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid); 742 msg[2] = 0; 743 msg[3] = 0; 744 if (pHba->host) 745 spin_lock_irq(pHba->host->host_lock); 746 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); 747 if (pHba->host) 748 spin_unlock_irq(pHba->host->host_lock); 749 if (rcode != 0) { 750 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name); 751 return FAILED; 752 } else { 753 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name); 754 return SUCCESS; 755 } 756} 757 758// This version of reset is called by the eh_error_handler 759static int __adpt_reset(struct scsi_cmnd* cmd) 760{ 761 adpt_hba* pHba; 762 int rcode; 763 pHba = (adpt_hba*)cmd->device->host->hostdata[0]; 764 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid ); 765 rcode = adpt_hba_reset(pHba); 766 if(rcode == 0){ 767 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name); 768 return SUCCESS; 769 } else { 770 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode); 771 return FAILED; 772 } 773} 774 775static int adpt_reset(struct scsi_cmnd* cmd) 776{ 777 int rc; 778 779 spin_lock_irq(cmd->device->host->host_lock); 780 rc = __adpt_reset(cmd); 781 spin_unlock_irq(cmd->device->host->host_lock); 782 783 return rc; 784} 785 786// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset 787static int adpt_hba_reset(adpt_hba* pHba) 788{ 789 int rcode; 790 791 pHba->state |= DPTI_STATE_RESET; 792 793 // Activate does get status , init outbound, and get hrt 794 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) { 795 printk(KERN_ERR "%s: Could not activate\n", pHba->name); 796 adpt_i2o_delete_hba(pHba); 797 return rcode; 798 } 799 800 if ((rcode=adpt_i2o_build_sys_table()) < 0) { 801 adpt_i2o_delete_hba(pHba); 802 return rcode; 803 } 804 PDEBUG("%s: in HOLD state\n",pHba->name); 805 806 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) { 807 adpt_i2o_delete_hba(pHba); 808 return rcode; 809 } 810 PDEBUG("%s: in OPERATIONAL state\n",pHba->name); 811 812 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){ 813 adpt_i2o_delete_hba(pHba); 814 return rcode; 815 } 816 817 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){ 818 adpt_i2o_delete_hba(pHba); 819 return rcode; 820 } 821 pHba->state &= ~DPTI_STATE_RESET; 822 823 adpt_fail_posted_scbs(pHba); 824 return 0; /* return success */ 825} 826 827/*=========================================================================== 828 * 829 *=========================================================================== 830 */ 831 832 833static void adpt_i2o_sys_shutdown(void) 834{ 835 adpt_hba *pHba, *pNext; 836 struct adpt_i2o_post_wait_data *p1, *old; 837 838 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n"); 839 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n"); 840 /* Delete all IOPs from the controller chain */ 841 /* They should have already been released by the 842 * scsi-core 843 */ 844 for (pHba = hba_chain; pHba; pHba = pNext) { 845 pNext = pHba->next; 846 adpt_i2o_delete_hba(pHba); 847 } 848 849 /* Remove any timedout entries from the wait queue. */ 850// spin_lock_irqsave(&adpt_post_wait_lock, flags); 851 /* Nothing should be outstanding at this point so just 852 * free them 853 */ 854 for(p1 = adpt_post_wait_queue; p1;) { 855 old = p1; 856 p1 = p1->next; 857 kfree(old); 858 } 859// spin_unlock_irqrestore(&adpt_post_wait_lock, flags); 860 adpt_post_wait_queue = NULL; 861 862 printk(KERN_INFO "Adaptec I2O controllers down.\n"); 863} 864 865/* 866 * reboot/shutdown notification. 867 * 868 * - Quiesce each IOP in the system 869 * 870 */ 871 872#ifdef REBOOT_NOTIFIER 873static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p) 874{ 875 876 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF) 877 return NOTIFY_DONE; 878 879 adpt_i2o_sys_shutdown(); 880 881 return NOTIFY_DONE; 882} 883#endif 884 885 886static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) 887{ 888 889 adpt_hba* pHba = NULL; 890 adpt_hba* p = NULL; 891 ulong base_addr0_phys = 0; 892 ulong base_addr1_phys = 0; 893 u32 hba_map0_area_size = 0; 894 u32 hba_map1_area_size = 0; 895 void __iomem *base_addr_virt = NULL; 896 void __iomem *msg_addr_virt = NULL; 897 898 int raptorFlag = FALSE; 899 900 if(pci_enable_device(pDev)) { 901 return -EINVAL; 902 } 903 904 if (pci_request_regions(pDev, "dpt_i2o")) { 905 PERROR("dpti: adpt_config_hba: pci request region failed\n"); 906 return -EINVAL; 907 } 908 909 pci_set_master(pDev); 910 if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) && 911 pci_set_dma_mask(pDev, DMA_32BIT_MASK)) 912 return -EINVAL; 913 914 base_addr0_phys = pci_resource_start(pDev,0); 915 hba_map0_area_size = pci_resource_len(pDev,0); 916 917 // Check if standard PCI card or single BAR Raptor 918 if(pDev->device == PCI_DPT_DEVICE_ID){ 919 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){ 920 // Raptor card with this device id needs 4M 921 hba_map0_area_size = 0x400000; 922 } else { // Not Raptor - it is a PCI card 923 if(hba_map0_area_size > 0x100000 ){ 924 hba_map0_area_size = 0x100000; 925 } 926 } 927 } else {// Raptor split BAR config 928 // Use BAR1 in this configuration 929 base_addr1_phys = pci_resource_start(pDev,1); 930 hba_map1_area_size = pci_resource_len(pDev,1); 931 raptorFlag = TRUE; 932 } 933 934 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); 935 if (!base_addr_virt) { 936 pci_release_regions(pDev); 937 PERROR("dpti: adpt_config_hba: io remap failed\n"); 938 return -EINVAL; 939 } 940 941 if(raptorFlag == TRUE) { 942 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size ); 943 if (!msg_addr_virt) { 944 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n"); 945 iounmap(base_addr_virt); 946 pci_release_regions(pDev); 947 return -EINVAL; 948 } 949 } else { 950 msg_addr_virt = base_addr_virt; 951 } 952 953 // Allocate and zero the data structure 954 pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL); 955 if( pHba == NULL) { 956 if(msg_addr_virt != base_addr_virt){ 957 iounmap(msg_addr_virt); 958 } 959 iounmap(base_addr_virt); 960 pci_release_regions(pDev); 961 return -ENOMEM; 962 } 963 memset(pHba, 0, sizeof(adpt_hba)); 964 965 mutex_lock(&adpt_configuration_lock); 966 967 if(hba_chain != NULL){ 968 for(p = hba_chain; p->next; p = p->next); 969 p->next = pHba; 970 } else { 971 hba_chain = pHba; 972 } 973 pHba->next = NULL; 974 pHba->unit = hba_count; 975 sprintf(pHba->name, "dpti%d", hba_count); 976 hba_count++; 977 978 mutex_unlock(&adpt_configuration_lock); 979 980 pHba->pDev = pDev; 981 pHba->base_addr_phys = base_addr0_phys; 982 983 // Set up the Virtual Base Address of the I2O Device 984 pHba->base_addr_virt = base_addr_virt; 985 pHba->msg_addr_virt = msg_addr_virt; 986 pHba->irq_mask = base_addr_virt+0x30; 987 pHba->post_port = base_addr_virt+0x40; 988 pHba->reply_port = base_addr_virt+0x44; 989 990 pHba->hrt = NULL; 991 pHba->lct = NULL; 992 pHba->lct_size = 0; 993 pHba->status_block = NULL; 994 pHba->post_count = 0; 995 pHba->state = DPTI_STATE_RESET; 996 pHba->pDev = pDev; 997 pHba->devices = NULL; 998 999 // Initializing the spinlocks 1000 spin_lock_init(&pHba->state_lock); 1001 spin_lock_init(&adpt_post_wait_lock); 1002 1003 if(raptorFlag == 0){ 1004 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 1005 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); 1006 } else { 1007 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); 1008 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); 1009 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); 1010 } 1011 1012 if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) { 1013 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq); 1014 adpt_i2o_delete_hba(pHba); 1015 return -EINVAL; 1016 } 1017 1018 return 0; 1019} 1020 1021 1022static void adpt_i2o_delete_hba(adpt_hba* pHba) 1023{ 1024 adpt_hba* p1; 1025 adpt_hba* p2; 1026 struct i2o_device* d; 1027 struct i2o_device* next; 1028 int i; 1029 int j; 1030 struct adpt_device* pDev; 1031 struct adpt_device* pNext; 1032 1033 1034 mutex_lock(&adpt_configuration_lock); 1035 // scsi_unregister calls our adpt_release which 1036 // does a quiese 1037 if(pHba->host){ 1038 free_irq(pHba->host->irq, pHba); 1039 } 1040 p2 = NULL; 1041 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){ 1042 if(p1 == pHba) { 1043 if(p2) { 1044 p2->next = p1->next; 1045 } else { 1046 hba_chain = p1->next; 1047 } 1048 break; 1049 } 1050 } 1051 1052 hba_count--; 1053 mutex_unlock(&adpt_configuration_lock); 1054 1055 iounmap(pHba->base_addr_virt); 1056 pci_release_regions(pHba->pDev); 1057 if(pHba->msg_addr_virt != pHba->base_addr_virt){ 1058 iounmap(pHba->msg_addr_virt); 1059 } 1060 kfree(pHba->hrt); 1061 kfree(pHba->lct); 1062 kfree(pHba->status_block); 1063 kfree(pHba->reply_pool); 1064 1065 for(d = pHba->devices; d ; d = next){ 1066 next = d->next; 1067 kfree(d); 1068 } 1069 for(i = 0 ; i < pHba->top_scsi_channel ; i++){ 1070 for(j = 0; j < MAX_ID; j++){ 1071 if(pHba->channel[i].device[j] != NULL){ 1072 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){ 1073 pNext = pDev->next_lun; 1074 kfree(pDev); 1075 } 1076 } 1077 } 1078 } 1079 kfree(pHba); 1080 1081 if(hba_count <= 0){ 1082 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); 1083 } 1084} 1085 1086 1087static int adpt_init(void) 1088{ 1089 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n"); 1090#ifdef REBOOT_NOTIFIER 1091 register_reboot_notifier(&adpt_reboot_notifier); 1092#endif 1093 1094 return 0; 1095} 1096 1097 1098static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun) 1099{ 1100 struct adpt_device* d; 1101 1102 if(chan < 0 || chan >= MAX_CHANNEL) 1103 return NULL; 1104 1105 if( pHba->channel[chan].device == NULL){ 1106 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n"); 1107 return NULL; 1108 } 1109 1110 d = pHba->channel[chan].device[id]; 1111 if(!d || d->tid == 0) { 1112 return NULL; 1113 } 1114 1115 /* If it is the only lun at that address then this should match*/ 1116 if(d->scsi_lun == lun){ 1117 return d; 1118 } 1119 1120 /* else we need to look through all the luns */ 1121 for(d=d->next_lun ; d ; d = d->next_lun){ 1122 if(d->scsi_lun == lun){ 1123 return d; 1124 } 1125 } 1126 return NULL; 1127} 1128 1129 1130static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) 1131{ 1132 // I used my own version of the WAIT_QUEUE_HEAD 1133 // to handle some version differences 1134 // When embedded in the kernel this could go back to the vanilla one 1135 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post); 1136 int status = 0; 1137 ulong flags = 0; 1138 struct adpt_i2o_post_wait_data *p1, *p2; 1139 struct adpt_i2o_post_wait_data *wait_data = 1140 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL); 1141 DECLARE_WAITQUEUE(wait, current); 1142 1143 if (!wait_data) 1144 return -ENOMEM; 1145 1146 /* 1147 * The spin locking is needed to keep anyone from playing 1148 * with the queue pointers and id while we do the same 1149 */ 1150 spin_lock_irqsave(&adpt_post_wait_lock, flags); 1151 // TODO we need a MORE unique way of getting ids 1152 // to support async LCT get 1153 wait_data->next = adpt_post_wait_queue; 1154 adpt_post_wait_queue = wait_data; 1155 adpt_post_wait_id++; 1156 adpt_post_wait_id &= 0x7fff; 1157 wait_data->id = adpt_post_wait_id; 1158 spin_unlock_irqrestore(&adpt_post_wait_lock, flags); 1159 1160 wait_data->wq = &adpt_wq_i2o_post; 1161 wait_data->status = -ETIMEDOUT; 1162 1163 add_wait_queue(&adpt_wq_i2o_post, &wait); 1164 1165 msg[2] |= 0x80000000 | ((u32)wait_data->id); 1166 timeout *= HZ; 1167 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){ 1168 set_current_state(TASK_INTERRUPTIBLE); 1169 if(pHba->host) 1170 spin_unlock_irq(pHba->host->host_lock); 1171 if (!timeout) 1172 schedule(); 1173 else{ 1174 timeout = schedule_timeout(timeout); 1175 if (timeout == 0) { 1176 // I/O issued, but cannot get result in 1177 // specified time. Freeing resorces is 1178 // dangerous. 1179 status = -ETIME; 1180 } 1181 } 1182 if(pHba->host) 1183 spin_lock_irq(pHba->host->host_lock); 1184 } 1185 remove_wait_queue(&adpt_wq_i2o_post, &wait); 1186 1187 if(status == -ETIMEDOUT){ 1188 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit); 1189 // We will have to free the wait_data memory during shutdown 1190 return status; 1191 } 1192 1193 /* Remove the entry from the queue. */ 1194 p2 = NULL; 1195 spin_lock_irqsave(&adpt_post_wait_lock, flags); 1196 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) { 1197 if(p1 == wait_data) { 1198 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) { 1199 status = -EOPNOTSUPP; 1200 } 1201 if(p2) { 1202 p2->next = p1->next; 1203 } else { 1204 adpt_post_wait_queue = p1->next; 1205 } 1206 break; 1207 } 1208 } 1209 spin_unlock_irqrestore(&adpt_post_wait_lock, flags); 1210 1211 kfree(wait_data); 1212 1213 return status; 1214} 1215 1216 1217static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len) 1218{ 1219 1220 u32 m = EMPTY_QUEUE; 1221 u32 __iomem *msg; 1222 ulong timeout = jiffies + 30*HZ; 1223 do { 1224 rmb(); 1225 m = readl(pHba->post_port); 1226 if (m != EMPTY_QUEUE) { 1227 break; 1228 } 1229 if(time_after(jiffies,timeout)){ 1230 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit); 1231 return -ETIMEDOUT; 1232 } 1233 schedule_timeout_uninterruptible(1); 1234 } while(m == EMPTY_QUEUE); 1235 1236 msg = pHba->msg_addr_virt + m; 1237 memcpy_toio(msg, data, len); 1238 wmb(); 1239 1240 //post message 1241 writel(m, pHba->post_port); 1242 wmb(); 1243 1244 return 0; 1245} 1246 1247 1248static void adpt_i2o_post_wait_complete(u32 context, int status) 1249{ 1250 struct adpt_i2o_post_wait_data *p1 = NULL; 1251 /* 1252 * We need to search through the adpt_post_wait 1253 * queue to see if the given message is still 1254 * outstanding. If not, it means that the IOP 1255 * took longer to respond to the message than we 1256 * had allowed and timer has already expired. 1257 * Not much we can do about that except log 1258 * it for debug purposes, increase timeout, and recompile 1259 * 1260 * Lock needed to keep anyone from moving queue pointers 1261 * around while we're looking through them. 1262 */ 1263 1264 context &= 0x7fff; 1265 1266 spin_lock(&adpt_post_wait_lock); 1267 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) { 1268 if(p1->id == context) { 1269 p1->status = status; 1270 spin_unlock(&adpt_post_wait_lock); 1271 wake_up_interruptible(p1->wq); 1272 return; 1273 } 1274 } 1275 spin_unlock(&adpt_post_wait_lock); 1276 // If this happens we lose commands that probably really completed 1277 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context); 1278 printk(KERN_DEBUG" Tasks in wait queue:\n"); 1279 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) { 1280 printk(KERN_DEBUG" %d\n",p1->id); 1281 } 1282 return; 1283} 1284 1285static s32 adpt_i2o_reset_hba(adpt_hba* pHba) 1286{ 1287 u32 msg[8]; 1288 u8* status; 1289 u32 m = EMPTY_QUEUE ; 1290 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ); 1291 1292 if(pHba->initialized == FALSE) { // First time reset should be quick 1293 timeout = jiffies + (25*HZ); 1294 } else { 1295 adpt_i2o_quiesce_hba(pHba); 1296 } 1297 1298 do { 1299 rmb(); 1300 m = readl(pHba->post_port); 1301 if (m != EMPTY_QUEUE) { 1302 break; 1303 } 1304 if(time_after(jiffies,timeout)){ 1305 printk(KERN_WARNING"Timeout waiting for message!\n"); 1306 return -ETIMEDOUT; 1307 } 1308 schedule_timeout_uninterruptible(1); 1309 } while (m == EMPTY_QUEUE); 1310 1311 status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32); 1312 if(status == NULL) { 1313 adpt_send_nop(pHba, m); 1314 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1315 return -ENOMEM; 1316 } 1317 memset(status,0,4); 1318 1319 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; 1320 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; 1321 msg[2]=0; 1322 msg[3]=0; 1323 msg[4]=0; 1324 msg[5]=0; 1325 msg[6]=virt_to_bus(status); 1326 msg[7]=0; 1327 1328 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); 1329 wmb(); 1330 writel(m, pHba->post_port); 1331 wmb(); 1332 1333 while(*status == 0){ 1334 if(time_after(jiffies,timeout)){ 1335 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name); 1336 kfree(status); 1337 return -ETIMEDOUT; 1338 } 1339 rmb(); 1340 schedule_timeout_uninterruptible(1); 1341 } 1342 1343 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) { 1344 PDEBUG("%s: Reset in progress...\n", pHba->name); 1345 // Here we wait for message frame to become available 1346 // indicated that reset has finished 1347 do { 1348 rmb(); 1349 m = readl(pHba->post_port); 1350 if (m != EMPTY_QUEUE) { 1351 break; 1352 } 1353 if(time_after(jiffies,timeout)){ 1354 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); 1355 return -ETIMEDOUT; 1356 } 1357 schedule_timeout_uninterruptible(1); 1358 } while (m == EMPTY_QUEUE); 1359 // Flush the offset 1360 adpt_send_nop(pHba, m); 1361 } 1362 adpt_i2o_status_get(pHba); 1363 if(*status == 0x02 || 1364 pHba->status_block->iop_state != ADAPTER_STATE_RESET) { 1365 printk(KERN_WARNING"%s: Reset reject, trying to clear\n", 1366 pHba->name); 1367 } else { 1368 PDEBUG("%s: Reset completed.\n", pHba->name); 1369 } 1370 1371 kfree(status); 1372#ifdef UARTDELAY 1373 // This delay is to allow someone attached to the card through the debug UART to 1374 // set up the dump levels that they want before the rest of the initialization sequence 1375 adpt_delay(20000); 1376#endif 1377 return 0; 1378} 1379 1380 1381static int adpt_i2o_parse_lct(adpt_hba* pHba) 1382{ 1383 int i; 1384 int max; 1385 int tid; 1386 struct i2o_device *d; 1387 i2o_lct *lct = pHba->lct; 1388 u8 bus_no = 0; 1389 s16 scsi_id; 1390 s16 scsi_lun; 1391 u32 buf[10]; // larger than 7, or 8 ... 1392 struct adpt_device* pDev; 1393 1394 if (lct == NULL) { 1395 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name); 1396 return -1; 1397 } 1398 1399 max = lct->table_size; 1400 max -= 3; 1401 max /= 9; 1402 1403 for(i=0;i<max;i++) { 1404 if( lct->lct_entry[i].user_tid != 0xfff){ 1405 /* 1406 * If we have hidden devices, we need to inform the upper layers about 1407 * the possible maximum id reference to handle device access when 1408 * an array is disassembled. This code has no other purpose but to 1409 * allow us future access to devices that are currently hidden 1410 * behind arrays, hotspares or have not been configured (JBOD mode). 1411 */ 1412 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE && 1413 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL && 1414 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ 1415 continue; 1416 } 1417 tid = lct->lct_entry[i].tid; 1418 // I2O_DPT_DEVICE_INFO_GROUP_NO; 1419 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) { 1420 continue; 1421 } 1422 bus_no = buf[0]>>16; 1423 scsi_id = buf[1]; 1424 scsi_lun = (buf[2]>>8 )&0xff; 1425 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it 1426 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no); 1427 continue; 1428 } 1429 if (scsi_id >= MAX_ID){ 1430 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no); 1431 continue; 1432 } 1433 if(bus_no > pHba->top_scsi_channel){ 1434 pHba->top_scsi_channel = bus_no; 1435 } 1436 if(scsi_id > pHba->top_scsi_id){ 1437 pHba->top_scsi_id = scsi_id; 1438 } 1439 if(scsi_lun > pHba->top_scsi_lun){ 1440 pHba->top_scsi_lun = scsi_lun; 1441 } 1442 continue; 1443 } 1444 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); 1445 if(d==NULL) 1446 { 1447 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name); 1448 return -ENOMEM; 1449 } 1450 1451 d->controller = pHba; 1452 d->next = NULL; 1453 1454 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); 1455 1456 d->flags = 0; 1457 tid = d->lct_data.tid; 1458 adpt_i2o_report_hba_unit(pHba, d); 1459 adpt_i2o_install_device(pHba, d); 1460 } 1461 bus_no = 0; 1462 for(d = pHba->devices; d ; d = d->next) { 1463 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT || 1464 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){ 1465 tid = d->lct_data.tid; 1466 // TODO get the bus_no from hrt-but for now they are in order 1467 //bus_no = 1468 if(bus_no > pHba->top_scsi_channel){ 1469 pHba->top_scsi_channel = bus_no; 1470 } 1471 pHba->channel[bus_no].type = d->lct_data.class_id; 1472 pHba->channel[bus_no].tid = tid; 1473 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0) 1474 { 1475 pHba->channel[bus_no].scsi_id = buf[1]; 1476 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]); 1477 } 1478 // TODO remove - this is just until we get from hrt 1479 bus_no++; 1480 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it 1481 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no); 1482 break; 1483 } 1484 } 1485 } 1486 1487 // Setup adpt_device table 1488 for(d = pHba->devices; d ; d = d->next) { 1489 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE || 1490 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL || 1491 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ 1492 1493 tid = d->lct_data.tid; 1494 scsi_id = -1; 1495 // I2O_DPT_DEVICE_INFO_GROUP_NO; 1496 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) { 1497 bus_no = buf[0]>>16; 1498 scsi_id = buf[1]; 1499 scsi_lun = (buf[2]>>8 )&0xff; 1500 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it 1501 continue; 1502 } 1503 if (scsi_id >= MAX_ID) { 1504 continue; 1505 } 1506 if( pHba->channel[bus_no].device[scsi_id] == NULL){ 1507 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 1508 if(pDev == NULL) { 1509 return -ENOMEM; 1510 } 1511 pHba->channel[bus_no].device[scsi_id] = pDev; 1512 memset(pDev,0,sizeof(struct adpt_device)); 1513 } else { 1514 for( pDev = pHba->channel[bus_no].device[scsi_id]; 1515 pDev->next_lun; pDev = pDev->next_lun){ 1516 } 1517 pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 1518 if(pDev->next_lun == NULL) { 1519 return -ENOMEM; 1520 } 1521 memset(pDev->next_lun,0,sizeof(struct adpt_device)); 1522 pDev = pDev->next_lun; 1523 } 1524 pDev->tid = tid; 1525 pDev->scsi_channel = bus_no; 1526 pDev->scsi_id = scsi_id; 1527 pDev->scsi_lun = scsi_lun; 1528 pDev->pI2o_dev = d; 1529 d->owner = pDev; 1530 pDev->type = (buf[0])&0xff; 1531 pDev->flags = (buf[0]>>8)&0xff; 1532 if(scsi_id > pHba->top_scsi_id){ 1533 pHba->top_scsi_id = scsi_id; 1534 } 1535 if(scsi_lun > pHba->top_scsi_lun){ 1536 pHba->top_scsi_lun = scsi_lun; 1537 } 1538 } 1539 if(scsi_id == -1){ 1540 printk(KERN_WARNING"Could not find SCSI ID for %s\n", 1541 d->lct_data.identity_tag); 1542 } 1543 } 1544 } 1545 return 0; 1546} 1547 1548 1549/* 1550 * Each I2O controller has a chain of devices on it - these match 1551 * the useful parts of the LCT of the board. 1552 */ 1553 1554static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d) 1555{ 1556 mutex_lock(&adpt_configuration_lock); 1557 d->controller=pHba; 1558 d->owner=NULL; 1559 d->next=pHba->devices; 1560 d->prev=NULL; 1561 if (pHba->devices != NULL){ 1562 pHba->devices->prev=d; 1563 } 1564 pHba->devices=d; 1565 *d->dev_name = 0; 1566 1567 mutex_unlock(&adpt_configuration_lock); 1568 return 0; 1569} 1570 1571static int adpt_open(struct inode *inode, struct file *file) 1572{ 1573 int minor; 1574 adpt_hba* pHba; 1575 1576 //TODO check for root access 1577 // 1578 minor = iminor(inode); 1579 if (minor >= hba_count) { 1580 return -ENXIO; 1581 } 1582 mutex_lock(&adpt_configuration_lock); 1583 for (pHba = hba_chain; pHba; pHba = pHba->next) { 1584 if (pHba->unit == minor) { 1585 break; /* found adapter */ 1586 } 1587 } 1588 if (pHba == NULL) { 1589 mutex_unlock(&adpt_configuration_lock); 1590 return -ENXIO; 1591 } 1592 1593// if(pHba->in_use){ 1594 // mutex_unlock(&adpt_configuration_lock); 1595// return -EBUSY; 1596// } 1597 1598 pHba->in_use = 1; 1599 mutex_unlock(&adpt_configuration_lock); 1600 1601 return 0; 1602} 1603 1604static int adpt_close(struct inode *inode, struct file *file) 1605{ 1606 int minor; 1607 adpt_hba* pHba; 1608 1609 minor = iminor(inode); 1610 if (minor >= hba_count) { 1611 return -ENXIO; 1612 } 1613 mutex_lock(&adpt_configuration_lock); 1614 for (pHba = hba_chain; pHba; pHba = pHba->next) { 1615 if (pHba->unit == minor) { 1616 break; /* found adapter */ 1617 } 1618 } 1619 mutex_unlock(&adpt_configuration_lock); 1620 if (pHba == NULL) { 1621 return -ENXIO; 1622 } 1623 1624 pHba->in_use = 0; 1625 1626 return 0; 1627} 1628 1629 1630static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) 1631{ 1632 u32 msg[MAX_MESSAGE_SIZE]; 1633 u32* reply = NULL; 1634 u32 size = 0; 1635 u32 reply_size = 0; 1636 u32 __user *user_msg = arg; 1637 u32 __user * user_reply = NULL; 1638 void *sg_list[pHba->sg_tablesize]; 1639 u32 sg_offset = 0; 1640 u32 sg_count = 0; 1641 int sg_index = 0; 1642 u32 i = 0; 1643 u32 rcode = 0; 1644 void *p = NULL; 1645 ulong flags = 0; 1646 1647 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 1648 // get user msg size in u32s 1649 if(get_user(size, &user_msg[0])){ 1650 return -EFAULT; 1651 } 1652 size = size>>16; 1653 1654 user_reply = &user_msg[size]; 1655 if(size > MAX_MESSAGE_SIZE){ 1656 return -EFAULT; 1657 } 1658 size *= 4; // Convert to bytes 1659 1660 /* Copy in the user's I2O command */ 1661 if(copy_from_user(msg, user_msg, size)) { 1662 return -EFAULT; 1663 } 1664 get_user(reply_size, &user_reply[0]); 1665 reply_size = reply_size>>16; 1666 if(reply_size > REPLY_FRAME_SIZE){ 1667 reply_size = REPLY_FRAME_SIZE; 1668 } 1669 reply_size *= 4; 1670 reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL); 1671 if(reply == NULL) { 1672 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name); 1673 return -ENOMEM; 1674 } 1675 memset(reply,0,REPLY_FRAME_SIZE*4); 1676 sg_offset = (msg[0]>>4)&0xf; 1677 msg[2] = 0x40000000; // IOCTL context 1678 msg[3] = (u32)reply; 1679 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1680 if(sg_offset) { 1681 // TODO 64bit fix 1682 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); 1683 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1684 if (sg_count > pHba->sg_tablesize){ 1685 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count); 1686 kfree (reply); 1687 return -EINVAL; 1688 } 1689 1690 for(i = 0; i < sg_count; i++) { 1691 int sg_size; 1692 1693 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) { 1694 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count); 1695 rcode = -EINVAL; 1696 goto cleanup; 1697 } 1698 sg_size = sg[i].flag_count & 0xffffff; 1699 /* Allocate memory for the transfer */ 1700 p = kmalloc(sg_size, GFP_KERNEL|ADDR32); 1701 if(!p) { 1702 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 1703 pHba->name,sg_size,i,sg_count); 1704 rcode = -ENOMEM; 1705 goto cleanup; 1706 } 1707 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. 1708 /* Copy in the user's SG buffer if necessary */ 1709 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { 1710 // TODO 64bit fix 1711 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { 1712 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); 1713 rcode = -EFAULT; 1714 goto cleanup; 1715 } 1716 } 1717 //TODO 64bit fix 1718 sg[i].addr_bus = (u32)virt_to_bus(p); 1719 } 1720 } 1721 1722 do { 1723 if(pHba->host) 1724 spin_lock_irqsave(pHba->host->host_lock, flags); 1725 // This state stops any new commands from enterring the 1726 // controller while processing the ioctl 1727// pHba->state |= DPTI_STATE_IOCTL; 1728// We can't set this now - The scsi subsystem sets host_blocked and 1729// the queue empties and stops. We need a way to restart the queue 1730 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); 1731 if (rcode != 0) 1732 printk("adpt_i2o_passthru: post wait failed %d %p\n", 1733 rcode, reply); 1734// pHba->state &= ~DPTI_STATE_IOCTL; 1735 if(pHba->host) 1736 spin_unlock_irqrestore(pHba->host->host_lock, flags); 1737 } while(rcode == -ETIMEDOUT); 1738 1739 if(rcode){ 1740 goto cleanup; 1741 } 1742 1743 if(sg_offset) { 1744 /* Copy back the Scatter Gather buffers back to user space */ 1745 u32 j; 1746 // TODO 64bit fix 1747 struct sg_simple_element* sg; 1748 int sg_size; 1749 1750 // re-acquire the original message to handle correctly the sg copy operation 1751 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 1752 // get user msg size in u32s 1753 if(get_user(size, &user_msg[0])){ 1754 rcode = -EFAULT; 1755 goto cleanup; 1756 } 1757 size = size>>16; 1758 size *= 4; 1759 /* Copy in the user's I2O command */ 1760 if (copy_from_user (msg, user_msg, size)) { 1761 rcode = -EFAULT; 1762 goto cleanup; 1763 } 1764 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1765 1766 // TODO 64bit fix 1767 sg = (struct sg_simple_element*)(msg + sg_offset); 1768 for (j = 0; j < sg_count; j++) { 1769 /* Copy out the SG list to user's buffer if necessary */ 1770 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { 1771 sg_size = sg[j].flag_count & 0xffffff; 1772 // TODO 64bit fix 1773 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { 1774 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); 1775 rcode = -EFAULT; 1776 goto cleanup; 1777 } 1778 } 1779 } 1780 } 1781 1782 /* Copy back the reply to user space */ 1783 if (reply_size) { 1784 // we wrote our own values for context - now restore the user supplied ones 1785 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { 1786 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name); 1787 rcode = -EFAULT; 1788 } 1789 if(copy_to_user(user_reply, reply, reply_size)) { 1790 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name); 1791 rcode = -EFAULT; 1792 } 1793 } 1794 1795 1796cleanup: 1797 if (rcode != -ETIME && rcode != -EINTR) 1798 kfree (reply); 1799 while(sg_index) { 1800 if(sg_list[--sg_index]) { 1801 if (rcode != -ETIME && rcode != -EINTR) 1802 kfree(sg_list[sg_index]); 1803 } 1804 } 1805 return rcode; 1806} 1807 1808 1809/* 1810 * This routine returns information about the system. This does not effect 1811 * any logic and if the info is wrong - it doesn't matter. 1812 */ 1813 1814/* Get all the info we can not get from kernel services */ 1815static int adpt_system_info(void __user *buffer) 1816{ 1817 sysInfo_S si; 1818 1819 memset(&si, 0, sizeof(si)); 1820 1821 si.osType = OS_LINUX; 1822 si.osMajorVersion = 0; 1823 si.osMinorVersion = 0; 1824 si.osRevision = 0; 1825 si.busType = SI_PCI_BUS; 1826 si.processorFamily = DPTI_sig.dsProcessorFamily; 1827 1828#if defined __i386__ 1829 adpt_i386_info(&si); 1830#elif defined (__ia64__) 1831 adpt_ia64_info(&si); 1832#elif defined(__sparc__) 1833 adpt_sparc_info(&si); 1834#elif defined (__alpha__) 1835 adpt_alpha_info(&si); 1836#else 1837 si.processorType = 0xff ; 1838#endif 1839 if(copy_to_user(buffer, &si, sizeof(si))){ 1840 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n"); 1841 return -EFAULT; 1842 } 1843 1844 return 0; 1845} 1846 1847#if defined __ia64__ 1848static void adpt_ia64_info(sysInfo_S* si) 1849{ 1850 // This is all the info we need for now 1851 // We will add more info as our new 1852 // managmenent utility requires it 1853 si->processorType = PROC_IA64; 1854} 1855#endif 1856 1857 1858#if defined __sparc__ 1859static void adpt_sparc_info(sysInfo_S* si) 1860{ 1861 // This is all the info we need for now 1862 // We will add more info as our new 1863 // managmenent utility requires it 1864 si->processorType = PROC_ULTRASPARC; 1865} 1866#endif 1867 1868#if defined __alpha__ 1869static void adpt_alpha_info(sysInfo_S* si) 1870{ 1871 // This is all the info we need for now 1872 // We will add more info as our new 1873 // managmenent utility requires it 1874 si->processorType = PROC_ALPHA; 1875} 1876#endif 1877 1878#if defined __i386__ 1879 1880static void adpt_i386_info(sysInfo_S* si) 1881{ 1882 // This is all the info we need for now 1883 // We will add more info as our new 1884 // managmenent utility requires it 1885 switch (boot_cpu_data.x86) { 1886 case CPU_386: 1887 si->processorType = PROC_386; 1888 break; 1889 case CPU_486: 1890 si->processorType = PROC_486; 1891 break; 1892 case CPU_586: 1893 si->processorType = PROC_PENTIUM; 1894 break; 1895 default: // Just in case 1896 si->processorType = PROC_PENTIUM; 1897 break; 1898 } 1899} 1900 1901#endif 1902 1903 1904static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, 1905 ulong arg) 1906{ 1907 int minor; 1908 int error = 0; 1909 adpt_hba* pHba; 1910 ulong flags = 0; 1911 void __user *argp = (void __user *)arg; 1912 1913 minor = iminor(inode); 1914 if (minor >= DPTI_MAX_HBA){ 1915 return -ENXIO; 1916 } 1917 mutex_lock(&adpt_configuration_lock); 1918 for (pHba = hba_chain; pHba; pHba = pHba->next) { 1919 if (pHba->unit == minor) { 1920 break; /* found adapter */ 1921 } 1922 } 1923 mutex_unlock(&adpt_configuration_lock); 1924 if(pHba == NULL){ 1925 return -ENXIO; 1926 } 1927 1928 while((volatile u32) pHba->state & DPTI_STATE_RESET ) 1929 schedule_timeout_uninterruptible(2); 1930 1931 switch (cmd) { 1932 // TODO: handle 3 cases 1933 case DPT_SIGNATURE: 1934 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) { 1935 return -EFAULT; 1936 } 1937 break; 1938 case I2OUSRCMD: 1939 return adpt_i2o_passthru(pHba, argp); 1940 1941 case DPT_CTRLINFO:{ 1942 drvrHBAinfo_S HbaInfo; 1943 1944#define FLG_OSD_PCI_VALID 0x0001 1945#define FLG_OSD_DMA 0x0002 1946#define FLG_OSD_I2O 0x0004 1947 memset(&HbaInfo, 0, sizeof(HbaInfo)); 1948 HbaInfo.drvrHBAnum = pHba->unit; 1949 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys; 1950 HbaInfo.blinkState = adpt_read_blink_led(pHba); 1951 HbaInfo.pciBusNum = pHba->pDev->bus->number; 1952 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); 1953 HbaInfo.Interrupt = pHba->pDev->irq; 1954 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; 1955 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){ 1956 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name); 1957 return -EFAULT; 1958 } 1959 break; 1960 } 1961 case DPT_SYSINFO: 1962 return adpt_system_info(argp); 1963 case DPT_BLINKLED:{ 1964 u32 value; 1965 value = (u32)adpt_read_blink_led(pHba); 1966 if (copy_to_user(argp, &value, sizeof(value))) { 1967 return -EFAULT; 1968 } 1969 break; 1970 } 1971 case I2ORESETCMD: 1972 if(pHba->host) 1973 spin_lock_irqsave(pHba->host->host_lock, flags); 1974 adpt_hba_reset(pHba); 1975 if(pHba->host) 1976 spin_unlock_irqrestore(pHba->host->host_lock, flags); 1977 break; 1978 case I2ORESCANCMD: 1979 adpt_rescan(pHba); 1980 break; 1981 default: 1982 return -EINVAL; 1983 } 1984 1985 return error; 1986} 1987 1988 1989static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs) 1990{ 1991 struct scsi_cmnd* cmd; 1992 adpt_hba* pHba = dev_id; 1993 u32 m; 1994 void __iomem *reply; 1995 u32 status=0; 1996 u32 context; 1997 ulong flags = 0; 1998 int handled = 0; 1999 2000 if (pHba == NULL){ 2001 printk(KERN_WARNING"adpt_isr: NULL dev_id\n"); 2002 return IRQ_NONE; 2003 } 2004 if(pHba->host) 2005 spin_lock_irqsave(pHba->host->host_lock, flags); 2006 2007 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) { 2008 m = readl(pHba->reply_port); 2009 if(m == EMPTY_QUEUE){ 2010 // Try twice then give up 2011 rmb(); 2012 m = readl(pHba->reply_port); 2013 if(m == EMPTY_QUEUE){ 2014 // This really should not happen 2015 printk(KERN_ERR"dpti: Could not get reply frame\n"); 2016 goto out; 2017 } 2018 } 2019 reply = bus_to_virt(m); 2020 2021 if (readl(reply) & MSG_FAIL) { 2022 u32 old_m = readl(reply+28); 2023 void __iomem *msg; 2024 u32 old_context; 2025 PDEBUG("%s: Failed message\n",pHba->name); 2026 if(old_m >= 0x100000){ 2027 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m); 2028 writel(m,pHba->reply_port); 2029 continue; 2030 } 2031 // Transaction context is 0 in failed reply frame 2032 msg = pHba->msg_addr_virt + old_m; 2033 old_context = readl(msg+12); 2034 writel(old_context, reply+12); 2035 adpt_send_nop(pHba, old_m); 2036 } 2037 context = readl(reply+8); 2038 if(context & 0x40000000){ // IOCTL 2039 void *p = (void *)readl(reply+12); 2040 if( p != NULL) { 2041 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); 2042 } 2043 // All IOCTLs will also be post wait 2044 } 2045 if(context & 0x80000000){ // Post wait message 2046 status = readl(reply+16); 2047 if(status >> 24){ 2048 status &= 0xffff; /* Get detail status */ 2049 } else { 2050 status = I2O_POST_WAIT_OK; 2051 } 2052 if(!(context & 0x40000000)) { 2053 cmd = (struct scsi_cmnd*) readl(reply+12); 2054 if(cmd != NULL) { 2055 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); 2056 } 2057 } 2058 adpt_i2o_post_wait_complete(context, status); 2059 } else { // SCSI message 2060 cmd = (struct scsi_cmnd*) readl(reply+12); 2061 if(cmd != NULL){ 2062 if(cmd->serial_number != 0) { // If not timedout 2063 adpt_i2o_to_scsi(reply, cmd); 2064 } 2065 } 2066 } 2067 writel(m, pHba->reply_port); 2068 wmb(); 2069 rmb(); 2070 } 2071 handled = 1; 2072out: if(pHba->host) 2073 spin_unlock_irqrestore(pHba->host->host_lock, flags); 2074 return IRQ_RETVAL(handled); 2075} 2076 2077static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d) 2078{ 2079 int i; 2080 u32 msg[MAX_MESSAGE_SIZE]; 2081 u32* mptr; 2082 u32 *lenptr; 2083 int direction; 2084 int scsidir; 2085 u32 len; 2086 u32 reqlen; 2087 s32 rcode; 2088 2089 memset(msg, 0 , sizeof(msg)); 2090 len = cmd->request_bufflen; 2091 direction = 0x00000000; 2092 2093 scsidir = 0x00000000; // DATA NO XFER 2094 if(len) { 2095 /* 2096 * Set SCBFlags to indicate if data is being transferred 2097 * in or out, or no data transfer 2098 * Note: Do not have to verify index is less than 0 since 2099 * cmd->cmnd[0] is an unsigned char 2100 */ 2101 switch(cmd->sc_data_direction){ 2102 case DMA_FROM_DEVICE: 2103 scsidir =0x40000000; // DATA IN (iop<--dev) 2104 break; 2105 case DMA_TO_DEVICE: 2106 direction=0x04000000; // SGL OUT 2107 scsidir =0x80000000; // DATA OUT (iop-->dev) 2108 break; 2109 case DMA_NONE: 2110 break; 2111 case DMA_BIDIRECTIONAL: 2112 scsidir =0x40000000; // DATA IN (iop<--dev) 2113 // Assume In - and continue; 2114 break; 2115 default: 2116 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n", 2117 pHba->name, cmd->cmnd[0]); 2118 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8); 2119 cmd->scsi_done(cmd); 2120 return 0; 2121 } 2122 } 2123 // msg[0] is set later 2124 // I2O_CMD_SCSI_EXEC 2125 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); 2126 msg[2] = 0; 2127 msg[3] = (u32)cmd; /* We want the SCSI control block back */ 2128 // Our cards use the transaction context as the tag for queueing 2129 // Adaptec/DPT Private stuff 2130 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); 2131 msg[5] = d->tid; 2132 /* Direction, disconnect ok | sense data | simple queue , CDBLen */ 2133 // I2O_SCB_FLAG_ENABLE_DISCONNECT | 2134 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 2135 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE; 2136 msg[6] = scsidir|0x20a00000|cmd->cmd_len; 2137 2138 mptr=msg+7; 2139 2140 // Write SCSI command into the message - always 16 byte block 2141 memset(mptr, 0, 16); 2142 memcpy(mptr, cmd->cmnd, cmd->cmd_len); 2143 mptr+=4; 2144 lenptr=mptr++; /* Remember me - fill in when we know */ 2145 reqlen = 14; // SINGLE SGE 2146 /* Now fill in the SGList and command */ 2147 if(cmd->use_sg) { 2148 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer; 2149 int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg, 2150 cmd->sc_data_direction); 2151 2152 2153 len = 0; 2154 for(i = 0 ; i < sg_count; i++) { 2155 *mptr++ = direction|0x10000000|sg_dma_len(sg); 2156 len+=sg_dma_len(sg); 2157 *mptr++ = sg_dma_address(sg); 2158 sg++; 2159 } 2160 /* Make this an end of list */ 2161 mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1); 2162 reqlen = mptr - msg; 2163 *lenptr = len; 2164 2165 if(cmd->underflow && len != cmd->underflow){ 2166 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n", 2167 len, cmd->underflow); 2168 } 2169 } else { 2170 *lenptr = len = cmd->request_bufflen; 2171 if(len == 0) { 2172 reqlen = 12; 2173 } else { 2174 *mptr++ = 0xD0000000|direction|cmd->request_bufflen; 2175 *mptr++ = pci_map_single(pHba->pDev, 2176 cmd->request_buffer, 2177 cmd->request_bufflen, 2178 cmd->sc_data_direction); 2179 } 2180 } 2181 2182 /* Stick the headers on */ 2183 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0); 2184 2185 // Send it on it's way 2186 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2); 2187 if (rcode == 0) { 2188 return 0; 2189 } 2190 return rcode; 2191} 2192 2193 2194static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht) 2195{ 2196 struct Scsi_Host *host = NULL; 2197 2198 host = scsi_register(sht, sizeof(adpt_hba*)); 2199 if (host == NULL) { 2200 printk ("%s: scsi_register returned NULL\n",pHba->name); 2201 return -1; 2202 } 2203 host->hostdata[0] = (unsigned long)pHba; 2204 pHba->host = host; 2205 2206 host->irq = pHba->pDev->irq; 2207 /* no IO ports, so don't have to set host->io_port and 2208 * host->n_io_port 2209 */ 2210 host->io_port = 0; 2211 host->n_io_port = 0; 2212 /* see comments in hosts.h */ 2213 host->max_id = 16; 2214 host->max_lun = 256; 2215 host->max_channel = pHba->top_scsi_channel + 1; 2216 host->cmd_per_lun = 1; 2217 host->unique_id = (uint) pHba; 2218 host->sg_tablesize = pHba->sg_tablesize; 2219 host->can_queue = pHba->post_fifo_size; 2220 2221 return 0; 2222} 2223 2224 2225static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) 2226{ 2227 adpt_hba* pHba; 2228 u32 hba_status; 2229 u32 dev_status; 2230 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits 2231 // I know this would look cleaner if I just read bytes 2232 // but the model I have been using for all the rest of the 2233 // io is in 4 byte words - so I keep that model 2234 u16 detailed_status = readl(reply+16) &0xffff; 2235 dev_status = (detailed_status & 0xff); 2236 hba_status = detailed_status >> 8; 2237 2238 // calculate resid for sg 2239 cmd->resid = cmd->request_bufflen - readl(reply+5); 2240 2241 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 2242 2243 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false 2244 2245 if(!(reply_flags & MSG_FAIL)) { 2246 switch(detailed_status & I2O_SCSI_DSC_MASK) { 2247 case I2O_SCSI_DSC_SUCCESS: 2248 cmd->result = (DID_OK << 16); 2249 // handle underflow 2250 if(readl(reply+5) < cmd->underflow ) { 2251 cmd->result = (DID_ERROR <<16); 2252 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name); 2253 } 2254 break; 2255 case I2O_SCSI_DSC_REQUEST_ABORTED: 2256 cmd->result = (DID_ABORT << 16); 2257 break; 2258 case I2O_SCSI_DSC_PATH_INVALID: 2259 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT: 2260 case I2O_SCSI_DSC_SELECTION_TIMEOUT: 2261 case I2O_SCSI_DSC_COMMAND_TIMEOUT: 2262 case I2O_SCSI_DSC_NO_ADAPTER: 2263 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE: 2264 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n", 2265 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]); 2266 cmd->result = (DID_TIME_OUT << 16); 2267 break; 2268 case I2O_SCSI_DSC_ADAPTER_BUSY: 2269 case I2O_SCSI_DSC_BUS_BUSY: 2270 cmd->result = (DID_BUS_BUSY << 16); 2271 break; 2272 case I2O_SCSI_DSC_SCSI_BUS_RESET: 2273 case I2O_SCSI_DSC_BDR_MESSAGE_SENT: 2274 cmd->result = (DID_RESET << 16); 2275 break; 2276 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE: 2277 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name); 2278 cmd->result = (DID_PARITY << 16); 2279 break; 2280 case I2O_SCSI_DSC_UNABLE_TO_ABORT: 2281 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR: 2282 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE: 2283 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED: 2284 case I2O_SCSI_DSC_AUTOSENSE_FAILED: 2285 case I2O_SCSI_DSC_DATA_OVERRUN: 2286 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE: 2287 case I2O_SCSI_DSC_SEQUENCE_FAILURE: 2288 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR: 2289 case I2O_SCSI_DSC_PROVIDE_FAILURE: 2290 case I2O_SCSI_DSC_REQUEST_TERMINATED: 2291 case I2O_SCSI_DSC_IDE_MESSAGE_SENT: 2292 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT: 2293 case I2O_SCSI_DSC_MESSAGE_RECEIVED: 2294 case I2O_SCSI_DSC_INVALID_CDB: 2295 case I2O_SCSI_DSC_LUN_INVALID: 2296 case I2O_SCSI_DSC_SCSI_TID_INVALID: 2297 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE: 2298 case I2O_SCSI_DSC_NO_NEXUS: 2299 case I2O_SCSI_DSC_CDB_RECEIVED: 2300 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED: 2301 case I2O_SCSI_DSC_QUEUE_FROZEN: 2302 case I2O_SCSI_DSC_REQUEST_INVALID: 2303 default: 2304 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", 2305 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, 2306 hba_status, dev_status, cmd->cmnd[0]); 2307 cmd->result = (DID_ERROR << 16); 2308 break; 2309 } 2310 2311 // copy over the request sense data if it was a check 2312 // condition status 2313 if(dev_status == 0x02 /*CHECK_CONDITION*/) { 2314 u32 len = sizeof(cmd->sense_buffer); 2315 len = (len > 40) ? 40 : len; 2316 // Copy over the sense data 2317 memcpy_fromio(cmd->sense_buffer, (reply+28) , len); 2318 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 2319 cmd->sense_buffer[2] == DATA_PROTECT ){ 2320 /* This is to handle an array failed */ 2321 cmd->result = (DID_TIME_OUT << 16); 2322 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", 2323 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, 2324 hba_status, dev_status, cmd->cmnd[0]); 2325 2326 } 2327 } 2328 } else { 2329 /* In this condtion we could not talk to the tid 2330 * the card rejected it. We should signal a retry 2331 * for a limitted number of retries. 2332 */ 2333 cmd->result = (DID_TIME_OUT << 16); 2334 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n", 2335 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, 2336 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]); 2337 } 2338 2339 cmd->result |= (dev_status); 2340 2341 if(cmd->scsi_done != NULL){ 2342 cmd->scsi_done(cmd); 2343 } 2344 return cmd->result; 2345} 2346 2347 2348static s32 adpt_rescan(adpt_hba* pHba) 2349{ 2350 s32 rcode; 2351 ulong flags = 0; 2352 2353 if(pHba->host) 2354 spin_lock_irqsave(pHba->host->host_lock, flags); 2355 if ((rcode=adpt_i2o_lct_get(pHba)) < 0) 2356 goto out; 2357 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0) 2358 goto out; 2359 rcode = 0; 2360out: if(pHba->host) 2361 spin_unlock_irqrestore(pHba->host->host_lock, flags); 2362 return rcode; 2363} 2364 2365 2366static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) 2367{ 2368 int i; 2369 int max; 2370 int tid; 2371 struct i2o_device *d; 2372 i2o_lct *lct = pHba->lct; 2373 u8 bus_no = 0; 2374 s16 scsi_id; 2375 s16 scsi_lun; 2376 u32 buf[10]; // at least 8 u32's 2377 struct adpt_device* pDev = NULL; 2378 struct i2o_device* pI2o_dev = NULL; 2379 2380 if (lct == NULL) { 2381 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name); 2382 return -1; 2383 } 2384 2385 max = lct->table_size; 2386 max -= 3; 2387 max /= 9; 2388 2389 // Mark each drive as unscanned 2390 for (d = pHba->devices; d; d = d->next) { 2391 pDev =(struct adpt_device*) d->owner; 2392 if(!pDev){ 2393 continue; 2394 } 2395 pDev->state |= DPTI_DEV_UNSCANNED; 2396 } 2397 2398 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max); 2399 2400 for(i=0;i<max;i++) { 2401 if( lct->lct_entry[i].user_tid != 0xfff){ 2402 continue; 2403 } 2404 2405 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE || 2406 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL || 2407 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ 2408 tid = lct->lct_entry[i].tid; 2409 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) { 2410 printk(KERN_ERR"%s: Could not query device\n",pHba->name); 2411 continue; 2412 } 2413 bus_no = buf[0]>>16; 2414 scsi_id = buf[1]; 2415 scsi_lun = (buf[2]>>8 )&0xff; 2416 pDev = pHba->channel[bus_no].device[scsi_id]; 2417 /* da lun */ 2418 while(pDev) { 2419 if(pDev->scsi_lun == scsi_lun) { 2420 break; 2421 } 2422 pDev = pDev->next_lun; 2423 } 2424 if(!pDev ) { // Something new add it 2425 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); 2426 if(d==NULL) 2427 { 2428 printk(KERN_CRIT "Out of memory for I2O device data.\n"); 2429 return -ENOMEM; 2430 } 2431 2432 d->controller = pHba; 2433 d->next = NULL; 2434 2435 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); 2436 2437 d->flags = 0; 2438 adpt_i2o_report_hba_unit(pHba, d); 2439 adpt_i2o_install_device(pHba, d); 2440 2441 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it 2442 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no); 2443 continue; 2444 } 2445 pDev = pHba->channel[bus_no].device[scsi_id]; 2446 if( pDev == NULL){ 2447 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 2448 if(pDev == NULL) { 2449 return -ENOMEM; 2450 } 2451 pHba->channel[bus_no].device[scsi_id] = pDev; 2452 } else { 2453 while (pDev->next_lun) { 2454 pDev = pDev->next_lun; 2455 } 2456 pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 2457 if(pDev == NULL) { 2458 return -ENOMEM; 2459 } 2460 } 2461 memset(pDev,0,sizeof(struct adpt_device)); 2462 pDev->tid = d->lct_data.tid; 2463 pDev->scsi_channel = bus_no; 2464 pDev->scsi_id = scsi_id; 2465 pDev->scsi_lun = scsi_lun; 2466 pDev->pI2o_dev = d; 2467 d->owner = pDev; 2468 pDev->type = (buf[0])&0xff; 2469 pDev->flags = (buf[0]>>8)&0xff; 2470 // Too late, SCSI system has made up it's mind, but what the hey ... 2471 if(scsi_id > pHba->top_scsi_id){ 2472 pHba->top_scsi_id = scsi_id; 2473 } 2474 if(scsi_lun > pHba->top_scsi_lun){ 2475 pHba->top_scsi_lun = scsi_lun; 2476 } 2477 continue; 2478 } // end of new i2o device 2479 2480 // We found an old device - check it 2481 while(pDev) { 2482 if(pDev->scsi_lun == scsi_lun) { 2483 if(!scsi_device_online(pDev->pScsi_dev)) { 2484 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n", 2485 pHba->name,bus_no,scsi_id,scsi_lun); 2486 if (pDev->pScsi_dev) { 2487 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING); 2488 } 2489 } 2490 d = pDev->pI2o_dev; 2491 if(d->lct_data.tid != tid) { // something changed 2492 pDev->tid = tid; 2493 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); 2494 if (pDev->pScsi_dev) { 2495 pDev->pScsi_dev->changed = TRUE; 2496 pDev->pScsi_dev->removable = TRUE; 2497 } 2498 } 2499 // Found it - mark it scanned 2500 pDev->state = DPTI_DEV_ONLINE; 2501 break; 2502 } 2503 pDev = pDev->next_lun; 2504 } 2505 } 2506 } 2507 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) { 2508 pDev =(struct adpt_device*) pI2o_dev->owner; 2509 if(!pDev){ 2510 continue; 2511 } 2512 // Drive offline drives that previously existed but could not be found 2513 // in the LCT table 2514 if (pDev->state & DPTI_DEV_UNSCANNED){ 2515 pDev->state = DPTI_DEV_OFFLINE; 2516 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun); 2517 if (pDev->pScsi_dev) { 2518 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE); 2519 } 2520 } 2521 } 2522 return 0; 2523} 2524 2525static void adpt_fail_posted_scbs(adpt_hba* pHba) 2526{ 2527 struct scsi_cmnd* cmd = NULL; 2528 struct scsi_device* d = NULL; 2529 2530 shost_for_each_device(d, pHba->host) { 2531 unsigned long flags; 2532 spin_lock_irqsave(&d->list_lock, flags); 2533 list_for_each_entry(cmd, &d->cmd_list, list) { 2534 if(cmd->serial_number == 0){ 2535 continue; 2536 } 2537 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1); 2538 cmd->scsi_done(cmd); 2539 } 2540 spin_unlock_irqrestore(&d->list_lock, flags); 2541 } 2542} 2543 2544 2545/*============================================================================ 2546 * Routines from i2o subsystem 2547 *============================================================================ 2548 */ 2549 2550 2551 2552/* 2553 * Bring an I2O controller into HOLD state. See the spec. 2554 */ 2555static int adpt_i2o_activate_hba(adpt_hba* pHba) 2556{ 2557 int rcode; 2558 2559 if(pHba->initialized ) { 2560 if (adpt_i2o_status_get(pHba) < 0) { 2561 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){ 2562 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name); 2563 return rcode; 2564 } 2565 if (adpt_i2o_status_get(pHba) < 0) { 2566 printk(KERN_INFO "HBA not responding.\n"); 2567 return -1; 2568 } 2569 } 2570 2571 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) { 2572 printk(KERN_CRIT "%s: hardware fault\n", pHba->name); 2573 return -1; 2574 } 2575 2576 if (pHba->status_block->iop_state == ADAPTER_STATE_READY || 2577 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL || 2578 pHba->status_block->iop_state == ADAPTER_STATE_HOLD || 2579 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) { 2580 adpt_i2o_reset_hba(pHba); 2581 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) { 2582 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name); 2583 return -1; 2584 } 2585 } 2586 } else { 2587 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){ 2588 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name); 2589 return rcode; 2590 } 2591 2592 } 2593 2594 if (adpt_i2o_init_outbound_q(pHba) < 0) { 2595 return -1; 2596 } 2597 2598 /* In HOLD state */ 2599 2600 if (adpt_i2o_hrt_get(pHba) < 0) { 2601 return -1; 2602 } 2603 2604 return 0; 2605} 2606 2607/* 2608 * Bring a controller online into OPERATIONAL state. 2609 */ 2610 2611static int adpt_i2o_online_hba(adpt_hba* pHba) 2612{ 2613 if (adpt_i2o_systab_send(pHba) < 0) { 2614 adpt_i2o_delete_hba(pHba); 2615 return -1; 2616 } 2617 /* In READY state */ 2618 2619 if (adpt_i2o_enable_hba(pHba) < 0) { 2620 adpt_i2o_delete_hba(pHba); 2621 return -1; 2622 } 2623 2624 /* In OPERATIONAL state */ 2625 return 0; 2626} 2627 2628static s32 adpt_send_nop(adpt_hba*pHba,u32 m) 2629{ 2630 u32 __iomem *msg; 2631 ulong timeout = jiffies + 5*HZ; 2632 2633 while(m == EMPTY_QUEUE){ 2634 rmb(); 2635 m = readl(pHba->post_port); 2636 if(m != EMPTY_QUEUE){ 2637 break; 2638 } 2639 if(time_after(jiffies,timeout)){ 2640 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name); 2641 return 2; 2642 } 2643 schedule_timeout_uninterruptible(1); 2644 } 2645 msg = (u32 __iomem *)(pHba->msg_addr_virt + m); 2646 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]); 2647 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]); 2648 writel( 0,&msg[2]); 2649 wmb(); 2650 2651 writel(m, pHba->post_port); 2652 wmb(); 2653 return 0; 2654} 2655 2656static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) 2657{ 2658 u8 *status; 2659 u32 __iomem *msg = NULL; 2660 int i; 2661 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; 2662 u32* ptr; 2663 u32 outbound_frame; // This had to be a 32 bit address 2664 u32 m; 2665 2666 do { 2667 rmb(); 2668 m = readl(pHba->post_port); 2669 if (m != EMPTY_QUEUE) { 2670 break; 2671 } 2672 2673 if(time_after(jiffies,timeout)){ 2674 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name); 2675 return -ETIMEDOUT; 2676 } 2677 schedule_timeout_uninterruptible(1); 2678 } while(m == EMPTY_QUEUE); 2679 2680 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); 2681 2682 status = kmalloc(4,GFP_KERNEL|ADDR32); 2683 if (status==NULL) { 2684 adpt_send_nop(pHba, m); 2685 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n", 2686 pHba->name); 2687 return -ENOMEM; 2688 } 2689 memset(status, 0, 4); 2690 2691 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); 2692 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); 2693 writel(0, &msg[2]); 2694 writel(0x0106, &msg[3]); /* Transaction context */ 2695 writel(4096, &msg[4]); /* Host page frame size */ 2696 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ 2697 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ 2698 writel(virt_to_bus(status), &msg[7]); 2699 2700 writel(m, pHba->post_port); 2701 wmb(); 2702 2703 // Wait for the reply status to come back 2704 do { 2705 if (*status) { 2706 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) { 2707 break; 2708 } 2709 } 2710 rmb(); 2711 if(time_after(jiffies,timeout)){ 2712 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); 2713 return -ETIMEDOUT; 2714 } 2715 schedule_timeout_uninterruptible(1); 2716 } while (1); 2717 2718 // If the command was successful, fill the fifo with our reply 2719 // message packets 2720 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { 2721 kfree(status); 2722 return -2; 2723 } 2724 kfree(status); 2725 2726 kfree(pHba->reply_pool); 2727 2728 pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); 2729 if(!pHba->reply_pool){ 2730 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name); 2731 return -1; 2732 } 2733 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4); 2734 2735 ptr = pHba->reply_pool; 2736 for(i = 0; i < pHba->reply_fifo_size; i++) { 2737 outbound_frame = (u32)virt_to_bus(ptr); 2738 writel(outbound_frame, pHba->reply_port); 2739 wmb(); 2740 ptr += REPLY_FRAME_SIZE; 2741 } 2742 adpt_i2o_status_get(pHba); 2743 return 0; 2744} 2745 2746 2747/* 2748 * I2O System Table. Contains information about 2749 * all the IOPs in the system. Used to inform IOPs 2750 * about each other's existence. 2751 * 2752 * sys_tbl_ver is the CurrentChangeIndicator that is 2753 * used by IOPs to track changes. 2754 */ 2755 2756 2757 2758static s32 adpt_i2o_status_get(adpt_hba* pHba) 2759{ 2760 ulong timeout; 2761 u32 m; 2762 u32 __iomem *msg; 2763 u8 *status_block=NULL; 2764 ulong status_block_bus; 2765 2766 if(pHba->status_block == NULL) { 2767 pHba->status_block = (i2o_status_block*) 2768 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32); 2769 if(pHba->status_block == NULL) { 2770 printk(KERN_ERR 2771 "dpti%d: Get Status Block failed; Out of memory. \n", 2772 pHba->unit); 2773 return -ENOMEM; 2774 } 2775 } 2776 memset(pHba->status_block, 0, sizeof(i2o_status_block)); 2777 status_block = (u8*)(pHba->status_block); 2778 status_block_bus = virt_to_bus(pHba->status_block); 2779 timeout = jiffies+TMOUT_GETSTATUS*HZ; 2780 do { 2781 rmb(); 2782 m = readl(pHba->post_port); 2783 if (m != EMPTY_QUEUE) { 2784 break; 2785 } 2786 if(time_after(jiffies,timeout)){ 2787 printk(KERN_ERR "%s: Timeout waiting for message !\n", 2788 pHba->name); 2789 return -ETIMEDOUT; 2790 } 2791 schedule_timeout_uninterruptible(1); 2792 } while(m==EMPTY_QUEUE); 2793 2794 2795 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); 2796 2797 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]); 2798 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]); 2799 writel(1, &msg[2]); 2800 writel(0, &msg[3]); 2801 writel(0, &msg[4]); 2802 writel(0, &msg[5]); 2803 writel(((u32)status_block_bus)&0xffffffff, &msg[6]); 2804 writel(0, &msg[7]); 2805 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes 2806 2807 //post message 2808 writel(m, pHba->post_port); 2809 wmb(); 2810 2811 while(status_block[87]!=0xff){ 2812 if(time_after(jiffies,timeout)){ 2813 printk(KERN_ERR"dpti%d: Get status timeout.\n", 2814 pHba->unit); 2815 return -ETIMEDOUT; 2816 } 2817 rmb(); 2818 schedule_timeout_uninterruptible(1); 2819 } 2820 2821 // Set up our number of outbound and inbound messages 2822 pHba->post_fifo_size = pHba->status_block->max_inbound_frames; 2823 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) { 2824 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES; 2825 } 2826 2827 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames; 2828 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) { 2829 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES; 2830 } 2831 2832 // Calculate the Scatter Gather list size 2833 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element); 2834 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { 2835 pHba->sg_tablesize = SG_LIST_ELEMENTS; 2836 } 2837 2838 2839#ifdef DEBUG 2840 printk("dpti%d: State = ",pHba->unit); 2841 switch(pHba->status_block->iop_state) { 2842 case 0x01: 2843 printk("INIT\n"); 2844 break; 2845 case 0x02: 2846 printk("RESET\n"); 2847 break; 2848 case 0x04: 2849 printk("HOLD\n"); 2850 break; 2851 case 0x05: 2852 printk("READY\n"); 2853 break; 2854 case 0x08: 2855 printk("OPERATIONAL\n"); 2856 break; 2857 case 0x10: 2858 printk("FAILED\n"); 2859 break; 2860 case 0x11: 2861 printk("FAULTED\n"); 2862 break; 2863 default: 2864 printk("%x (unknown!!)\n",pHba->status_block->iop_state); 2865 } 2866#endif 2867 return 0; 2868} 2869 2870/* 2871 * Get the IOP's Logical Configuration Table 2872 */ 2873static int adpt_i2o_lct_get(adpt_hba* pHba) 2874{ 2875 u32 msg[8]; 2876 int ret; 2877 u32 buf[16]; 2878 2879 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){ 2880 pHba->lct_size = pHba->status_block->expected_lct_size; 2881 } 2882 do { 2883 if (pHba->lct == NULL) { 2884 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32); 2885 if(pHba->lct == NULL) { 2886 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", 2887 pHba->name); 2888 return -ENOMEM; 2889 } 2890 } 2891 memset(pHba->lct, 0, pHba->lct_size); 2892 2893 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6; 2894 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID; 2895 msg[2] = 0; 2896 msg[3] = 0; 2897 msg[4] = 0xFFFFFFFF; /* All devices */ 2898 msg[5] = 0x00000000; /* Report now */ 2899 msg[6] = 0xD0000000|pHba->lct_size; 2900 msg[7] = virt_to_bus(pHba->lct); 2901 2902 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { 2903 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 2904 pHba->name, ret); 2905 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n"); 2906 return ret; 2907 } 2908 2909 if ((pHba->lct->table_size << 2) > pHba->lct_size) { 2910 pHba->lct_size = pHba->lct->table_size << 2; 2911 kfree(pHba->lct); 2912 pHba->lct = NULL; 2913 } 2914 } while (pHba->lct == NULL); 2915 2916 PDEBUG("%s: Hardware resource table read.\n", pHba->name); 2917 2918 2919 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; 2920 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { 2921 pHba->FwDebugBufferSize = buf[1]; 2922 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0]; 2923 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET; 2924 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET; 2925 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; 2926 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET; 2927 pHba->FwDebugBuffer_P += buf[2]; 2928 pHba->FwDebugFlags = 0; 2929 } 2930 2931 return 0; 2932} 2933 2934static int adpt_i2o_build_sys_table(void) 2935{ 2936 adpt_hba* pHba = NULL; 2937 int count = 0; 2938 2939 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs 2940 (hba_count) * sizeof(struct i2o_sys_tbl_entry); 2941 2942 kfree(sys_tbl); 2943 2944 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32); 2945 if(!sys_tbl) { 2946 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n"); 2947 return -ENOMEM; 2948 } 2949 memset(sys_tbl, 0, sys_tbl_len); 2950 2951 sys_tbl->num_entries = hba_count; 2952 sys_tbl->version = I2OVERSION; 2953 sys_tbl->change_ind = sys_tbl_ind++; 2954 2955 for(pHba = hba_chain; pHba; pHba = pHba->next) { 2956 // Get updated Status Block so we have the latest information 2957 if (adpt_i2o_status_get(pHba)) { 2958 sys_tbl->num_entries--; 2959 continue; // try next one 2960 } 2961 2962 sys_tbl->iops[count].org_id = pHba->status_block->org_id; 2963 sys_tbl->iops[count].iop_id = pHba->unit + 2; 2964 sys_tbl->iops[count].seg_num = 0; 2965 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version; 2966 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state; 2967 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type; 2968 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; 2969 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? 2970 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; 2971 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port); 2972 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32); 2973 2974 count++; 2975 } 2976 2977#ifdef DEBUG 2978{ 2979 u32 *table = (u32*)sys_tbl; 2980 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2)); 2981 for(count = 0; count < (sys_tbl_len >>2); count++) { 2982 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", 2983 count, table[count]); 2984 } 2985} 2986#endif 2987 2988 return 0; 2989} 2990 2991 2992/* 2993 * Dump the information block associated with a given unit (TID) 2994 */ 2995 2996static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d) 2997{ 2998 char buf[64]; 2999 int unit = d->lct_data.tid; 3000 3001 printk(KERN_INFO "TID %3.3d ", unit); 3002 3003 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0) 3004 { 3005 buf[16]=0; 3006 printk(" Vendor: %-12.12s", buf); 3007 } 3008 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0) 3009 { 3010 buf[16]=0; 3011 printk(" Device: %-12.12s", buf); 3012 } 3013 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0) 3014 { 3015 buf[8]=0; 3016 printk(" Rev: %-12.12s\n", buf); 3017 } 3018#ifdef DEBUG 3019 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id)); 3020 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class); 3021 printk(KERN_INFO "\tFlags: "); 3022 3023 if(d->lct_data.device_flags&(1<<0)) 3024 printk("C"); // ConfigDialog requested 3025 if(d->lct_data.device_flags&(1<<1)) 3026 printk("U"); // Multi-user capable 3027 if(!(d->lct_data.device_flags&(1<<4))) 3028 printk("P"); // Peer service enabled! 3029 if(!(d->lct_data.device_flags&(1<<5))) 3030 printk("M"); // Mgmt service enabled! 3031 printk("\n"); 3032#endif 3033} 3034 3035#ifdef DEBUG 3036/* 3037 * Do i2o class name lookup 3038 */ 3039static const char *adpt_i2o_get_class_name(int class) 3040{ 3041 int idx = 16; 3042 static char *i2o_class_name[] = { 3043 "Executive", 3044 "Device Driver Module", 3045 "Block Device", 3046 "Tape Device", 3047 "LAN Interface", 3048 "WAN Interface", 3049 "Fibre Channel Port", 3050 "Fibre Channel Device", 3051 "SCSI Device", 3052 "ATE Port", 3053 "ATE Device", 3054 "Floppy Controller", 3055 "Floppy Device", 3056 "Secondary Bus Port", 3057 "Peer Transport Agent", 3058 "Peer Transport", 3059 "Unknown" 3060 }; 3061 3062 switch(class&0xFFF) { 3063 case I2O_CLASS_EXECUTIVE: 3064 idx = 0; break; 3065 case I2O_CLASS_DDM: 3066 idx = 1; break; 3067 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 3068 idx = 2; break; 3069 case I2O_CLASS_SEQUENTIAL_STORAGE: 3070 idx = 3; break; 3071 case I2O_CLASS_LAN: 3072 idx = 4; break; 3073 case I2O_CLASS_WAN: 3074 idx = 5; break; 3075 case I2O_CLASS_FIBRE_CHANNEL_PORT: 3076 idx = 6; break; 3077 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 3078 idx = 7; break; 3079 case I2O_CLASS_SCSI_PERIPHERAL: 3080 idx = 8; break; 3081 case I2O_CLASS_ATE_PORT: 3082 idx = 9; break; 3083 case I2O_CLASS_ATE_PERIPHERAL: 3084 idx = 10; break; 3085 case I2O_CLASS_FLOPPY_CONTROLLER: 3086 idx = 11; break; 3087 case I2O_CLASS_FLOPPY_DEVICE: 3088 idx = 12; break; 3089 case I2O_CLASS_BUS_ADAPTER_PORT: 3090 idx = 13; break; 3091 case I2O_CLASS_PEER_TRANSPORT_AGENT: 3092 idx = 14; break; 3093 case I2O_CLASS_PEER_TRANSPORT: 3094 idx = 15; break; 3095 } 3096 return i2o_class_name[idx]; 3097} 3098#endif 3099 3100 3101static s32 adpt_i2o_hrt_get(adpt_hba* pHba) 3102{ 3103 u32 msg[6]; 3104 int ret, size = sizeof(i2o_hrt); 3105 3106 do { 3107 if (pHba->hrt == NULL) { 3108 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32); 3109 if (pHba->hrt == NULL) { 3110 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name); 3111 return -ENOMEM; 3112 } 3113 } 3114 3115 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4; 3116 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID; 3117 msg[2]= 0; 3118 msg[3]= 0; 3119 msg[4]= (0xD0000000 | size); /* Simple transaction */ 3120 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */ 3121 3122 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { 3123 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret); 3124 return ret; 3125 } 3126 3127 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) { 3128 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2; 3129 kfree(pHba->hrt); 3130 pHba->hrt = NULL; 3131 } 3132 } while(pHba->hrt == NULL); 3133 return 0; 3134} 3135 3136/* 3137 * Query one scalar group value or a whole scalar group. 3138 */ 3139static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 3140 int group, int field, void *buf, int buflen) 3141{ 3142 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 3143 u8 *resblk; 3144 3145 int size; 3146 3147 /* 8 bytes for header */ 3148 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32); 3149 if (resblk == NULL) { 3150 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); 3151 return -ENOMEM; 3152 } 3153 3154 if (field == -1) /* whole group */ 3155 opblk[4] = -1; 3156 3157 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 3158 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen)); 3159 if (size == -ETIME) { 3160 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); 3161 return -ETIME; 3162 } else if (size == -EINTR) { 3163 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); 3164 return -EINTR; 3165 } 3166 3167 memcpy(buf, resblk+8, buflen); /* cut off header */ 3168 3169 kfree(resblk); 3170 if (size < 0) 3171 return size; 3172 3173 return buflen; 3174} 3175 3176 3177/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET 3178 * 3179 * This function can be used for all UtilParamsGet/Set operations. 3180 * The OperationBlock is given in opblk-buffer, 3181 * and results are returned in resblk-buffer. 3182 * Note that the minimum sized resblk is 8 bytes and contains 3183 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 3184 */ 3185static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 3186 void *opblk, int oplen, void *resblk, int reslen) 3187{ 3188 u32 msg[9]; 3189 u32 *res = (u32 *)resblk; 3190 int wait_status; 3191 3192 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; 3193 msg[1] = cmd << 24 | HOST_TID << 12 | tid; 3194 msg[2] = 0; 3195 msg[3] = 0; 3196 msg[4] = 0; 3197 msg[5] = 0x54000000 | oplen; /* OperationBlock */ 3198 msg[6] = virt_to_bus(opblk); 3199 msg[7] = 0xD0000000 | reslen; /* ResultBlock */ 3200 msg[8] = virt_to_bus(resblk); 3201 3202 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { 3203 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk); 3204 return wait_status; /* -DetailedStatus */ 3205 } 3206 3207 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */ 3208 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, " 3209 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", 3210 pHba->name, 3211 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" 3212 : "PARAMS_GET", 3213 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF); 3214 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */ 3215 } 3216 3217 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 3218} 3219 3220 3221static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba) 3222{ 3223 u32 msg[4]; 3224 int ret; 3225 3226 adpt_i2o_status_get(pHba); 3227 3228 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */ 3229 3230 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) && 3231 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){ 3232 return 0; 3233 } 3234 3235 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; 3236 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID; 3237 msg[2] = 0; 3238 msg[3] = 0; 3239 3240 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { 3241 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n", 3242 pHba->unit, -ret); 3243 } else { 3244 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit); 3245 } 3246 3247 adpt_i2o_status_get(pHba); 3248 return ret; 3249} 3250 3251 3252/* 3253 * Enable IOP. Allows the IOP to resume external operations. 3254 */ 3255static int adpt_i2o_enable_hba(adpt_hba* pHba) 3256{ 3257 u32 msg[4]; 3258 int ret; 3259 3260 adpt_i2o_status_get(pHba); 3261 if(!pHba->status_block){ 3262 return -ENOMEM; 3263 } 3264 /* Enable only allowed on READY state */ 3265 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL) 3266 return 0; 3267 3268 if(pHba->status_block->iop_state != ADAPTER_STATE_READY) 3269 return -EINVAL; 3270 3271 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; 3272 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID; 3273 msg[2]= 0; 3274 msg[3]= 0; 3275 3276 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { 3277 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", 3278 pHba->name, ret); 3279 } else { 3280 PDEBUG("%s: Enabled.\n", pHba->name); 3281 } 3282 3283 adpt_i2o_status_get(pHba); 3284 return ret; 3285} 3286 3287 3288static int adpt_i2o_systab_send(adpt_hba* pHba) 3289{ 3290 u32 msg[12]; 3291 int ret; 3292 3293 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6; 3294 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID; 3295 msg[2] = 0; 3296 msg[3] = 0; 3297 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */ 3298 msg[5] = 0; /* Segment 0 */ 3299 3300 /* 3301 * Provide three SGL-elements: 3302 * System table (SysTab), Private memory space declaration and 3303 * Private i/o space declaration 3304 */ 3305 msg[6] = 0x54000000 | sys_tbl_len; 3306 msg[7] = virt_to_phys(sys_tbl); 3307 msg[8] = 0x54000000 | 0; 3308 msg[9] = 0; 3309 msg[10] = 0xD4000000 | 0; 3310 msg[11] = 0; 3311 3312 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) { 3313 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", 3314 pHba->name, ret); 3315 } 3316#ifdef DEBUG 3317 else { 3318 PINFO("%s: SysTab set.\n", pHba->name); 3319 } 3320#endif 3321 3322 return ret; 3323 } 3324 3325 3326/*============================================================================ 3327 * 3328 *============================================================================ 3329 */ 3330 3331 3332#ifdef UARTDELAY 3333 3334static static void adpt_delay(int millisec) 3335{ 3336 int i; 3337 for (i = 0; i < millisec; i++) { 3338 udelay(1000); /* delay for one millisecond */ 3339 } 3340} 3341 3342#endif 3343 3344static struct scsi_host_template driver_template = { 3345 .name = "dpt_i2o", 3346 .proc_name = "dpt_i2o", 3347 .proc_info = adpt_proc_info, 3348 .detect = adpt_detect, 3349 .release = adpt_release, 3350 .info = adpt_info, 3351 .queuecommand = adpt_queue, 3352 .eh_abort_handler = adpt_abort, 3353 .eh_device_reset_handler = adpt_device_reset, 3354 .eh_bus_reset_handler = adpt_bus_reset, 3355 .eh_host_reset_handler = adpt_reset, 3356 .bios_param = adpt_bios_param, 3357 .slave_configure = adpt_slave_configure, 3358 .can_queue = MAX_TO_IOP_MESSAGES, 3359 .this_id = 7, 3360 .cmd_per_lun = 1, 3361 .use_clustering = ENABLE_CLUSTERING, 3362}; 3363#include "scsi_module.c" 3364MODULE_LICENSE("GPL");