Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

+2389 -813
+23
Documentation/scsi/ChangeLog.megaraid_sas
··· 1 + 1 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 2 + 2 Current Version : 00.00.02.04 3 + 3 Older Version : 00.00.02.04 4 + 5 + i. Support for 1078 type (ppc IOP) controller, device id : 0x60 added. 6 + During initialization, depending on the device id, the template members 7 + are initialized with function pointers specific to the ppc or 8 + xscale controllers. 9 + 10 + -Sumant Patro <Sumant.Patro@lsil.com> 11 + 12 + 1 Release Date : Fri Feb 03 14:16:25 PST 2006 - Sumant Patro 13 + <Sumant.Patro@lsil.com> 14 + 2 Current Version : 00.00.02.04 15 + 3 Older Version : 00.00.02.02 16 + i. Register 16 byte CDB capability with scsi midlayer 17 + 18 + "Ths patch properly registers the 16 byte command length capability of the 19 + megaraid_sas controlled hardware with the scsi midlayer. All megaraid_sas 20 + hardware supports 16 byte CDB's." 21 + 22 + -Joshua Giles <joshua_giles@dell.com> 23 + 1 24 1 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 2 25 2 Current Version : 00.00.02.02 3 26 3 Older Version : 00.00.02.01
+2 -113
drivers/message/fusion/mptbase.c
··· 452 452 } else if (func == MPI_FUNCTION_EVENT_ACK) { 453 453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", 454 454 ioc->name)); 455 - } else if (func == MPI_FUNCTION_CONFIG || 456 - func == MPI_FUNCTION_TOOLBOX) { 455 + } else if (func == MPI_FUNCTION_CONFIG) { 457 456 CONFIGPARMS *pCfg; 458 457 unsigned long flags; 459 458 ··· 5326 5327 } 5327 5328 5328 5329 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5329 - /** 5330 - * mpt_toolbox - Generic function to issue toolbox message 5331 - * @ioc - Pointer to an adapter structure 5332 - * @cfg - Pointer to a toolbox structure. Struct contains 5333 - * action, page address, direction, physical address 5334 - * and pointer to a configuration page header 5335 - * Page header is updated. 5336 - * 5337 - * Returns 0 for success 5338 - * -EPERM if not allowed due to ISR context 5339 - * -EAGAIN if no msg frames currently available 5340 - * -EFAULT for non-successful reply or no reply (timeout) 5341 - */ 5342 - int 5343 - mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 5344 - { 5345 - ToolboxIstwiReadWriteRequest_t *pReq; 5346 - MPT_FRAME_HDR *mf; 5347 - struct pci_dev *pdev; 5348 - unsigned long flags; 5349 - int rc; 5350 - u32 flagsLength; 5351 - int in_isr; 5352 - 5353 - /* Prevent calling wait_event() (below), if caller happens 5354 - * to be in ISR context, because that is fatal! 5355 - */ 5356 - in_isr = in_interrupt(); 5357 - if (in_isr) { 5358 - dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n", 5359 - ioc->name)); 5360 - return -EPERM; 5361 - } 5362 - 5363 - /* Get and Populate a free Frame 5364 - */ 5365 - if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5366 - dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n", 5367 - ioc->name)); 5368 - return -EAGAIN; 5369 - } 5370 - pReq = (ToolboxIstwiReadWriteRequest_t *)mf; 5371 - pReq->Tool = pCfg->action; 5372 - pReq->Reserved = 0; 5373 - pReq->ChainOffset = 0; 5374 - pReq->Function = MPI_FUNCTION_TOOLBOX; 5375 - pReq->Reserved1 = 0; 5376 - pReq->Reserved2 = 0; 5377 - pReq->MsgFlags = 0; 5378 - pReq->Flags = pCfg->dir; 5379 - pReq->BusNum = 0; 5380 - pReq->Reserved3 = 0; 5381 - pReq->NumAddressBytes = 0x01; 5382 - pReq->Reserved4 = 0; 5383 - pReq->DataLength = cpu_to_le16(0x04); 5384 - pdev = ioc->pcidev; 5385 - if (pdev->devfn & 1) 5386 - pReq->DeviceAddr = 0xB2; 5387 - else 5388 - pReq->DeviceAddr = 0xB0; 5389 - pReq->Addr1 = 0; 5390 - pReq->Addr2 = 0; 5391 - pReq->Addr3 = 0; 5392 - pReq->Reserved5 = 0; 5393 - 5394 - /* Add a SGE to the config request. 5395 - */ 5396 - 5397 - flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4; 5398 - 5399 - mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr); 5400 - 5401 - dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n", 5402 - ioc->name, pReq->Tool)); 5403 - 5404 - /* Append pCfg pointer to end of mf 5405 - */ 5406 - *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; 5407 - 5408 - /* Initalize the timer 5409 - */ 5410 - init_timer(&pCfg->timer); 5411 - pCfg->timer.data = (unsigned long) ioc; 5412 - pCfg->timer.function = mpt_timer_expired; 5413 - pCfg->wait_done = 0; 5414 - 5415 - /* Set the timer; ensure 10 second minimum */ 5416 - if (pCfg->timeout < 10) 5417 - pCfg->timer.expires = jiffies + HZ*10; 5418 - else 5419 - pCfg->timer.expires = jiffies + HZ*pCfg->timeout; 5420 - 5421 - /* Add to end of Q, set timer and then issue this command */ 5422 - spin_lock_irqsave(&ioc->FreeQlock, flags); 5423 - list_add_tail(&pCfg->linkage, &ioc->configQ); 5424 - spin_unlock_irqrestore(&ioc->FreeQlock, flags); 5425 - 5426 - add_timer(&pCfg->timer); 5427 - mpt_put_msg_frame(mpt_base_index, ioc, mf); 5428 - wait_event(mpt_waitq, pCfg->wait_done); 5429 - 5430 - /* mf has been freed - do not access */ 5431 - 5432 - rc = pCfg->status; 5433 - 5434 - return rc; 5435 - } 5436 - 5437 - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5438 5330 /* 5439 5331 * mpt_timer_expired - Call back for timer process. 5440 5332 * Used only internal config functionality. ··· 6032 6142 if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 6033 6143 int idx; 6034 6144 6035 - idx = ioc->eventContext % ioc->eventLogSize; 6145 + idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; 6036 6146 6037 6147 ioc->events[idx].event = event; 6038 6148 ioc->events[idx].eventContext = ioc->eventContext; ··· 6430 6540 EXPORT_SYMBOL(mpt_stm_index); 6431 6541 EXPORT_SYMBOL(mpt_HardResetHandler); 6432 6542 EXPORT_SYMBOL(mpt_config); 6433 - EXPORT_SYMBOL(mpt_toolbox); 6434 6543 EXPORT_SYMBOL(mpt_findImVolumes); 6435 6544 EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6436 6545 EXPORT_SYMBOL(mpt_alloc_fw_memory);
+1 -1
drivers/message/fusion/mptbase.h
··· 616 616 * increments by 32 bytes 617 617 */ 618 618 int errata_flag_1064; 619 + int aen_event_read_flag; /* flag to indicate event log was read*/ 619 620 u8 FirstWhoInit; 620 621 u8 upload_fw; /* If set, do a fw upload */ 621 622 u8 reload_fw; /* Force a FW Reload on next reset */ ··· 1027 1026 extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); 1028 1027 extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 1029 1028 extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1030 - extern int mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1031 1029 extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); 1032 1030 extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 1033 1031 extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
+201 -42
drivers/message/fusion/mptctl.c
··· 136 136 */ 137 137 static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); 138 138 139 + /* 140 + * Event Handler function 141 + */ 142 + static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 143 + struct fasync_struct *async_queue=NULL; 144 + 139 145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 140 146 /* 141 147 * Scatter gather list (SGL) sizes and limits... ··· 391 385 } 392 386 393 387 /* Now wait for the command to complete */ 394 - ii = wait_event_interruptible_timeout(mptctl_wait, 388 + ii = wait_event_timeout(mptctl_wait, 395 389 ioctl->wait_done == 1, 396 390 HZ*5 /* 5 second timeout */); 397 391 398 392 if(ii <=0 && (ioctl->wait_done != 1 )) { 393 + mpt_free_msg_frame(hd->ioc, mf); 399 394 ioctl->wait_done = 0; 400 395 retval = -1; /* return failure */ 401 396 } 402 397 403 398 mptctl_bus_reset_done: 404 399 405 - mpt_free_msg_frame(hd->ioc, mf); 406 400 mptctl_free_tm_flags(ioctl->ioc); 407 401 return retval; 408 402 } ··· 475 469 } 476 470 477 471 return 1; 472 + } 473 + 474 + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 475 + /* ASYNC Event Notification Support */ 476 + static int 477 + mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 478 + { 479 + u8 event; 480 + 481 + event = le32_to_cpu(pEvReply->Event) & 0xFF; 482 + 483 + dctlprintk(("%s() called\n", __FUNCTION__)); 484 + if(async_queue == NULL) 485 + return 1; 486 + 487 + /* Raise SIGIO for persistent events. 488 + * TODO - this define is not in MPI spec yet, 489 + * but they plan to set it to 0x21 490 + */ 491 + if (event == 0x21 ) { 492 + ioc->aen_event_read_flag=1; 493 + dctlprintk(("Raised SIGIO to application\n")); 494 + devtprintk(("Raised SIGIO to application\n")); 495 + kill_fasync(&async_queue, SIGIO, POLL_IN); 496 + return 1; 497 + } 498 + 499 + /* This flag is set after SIGIO was raised, and 500 + * remains set until the application has read 501 + * the event log via ioctl=MPTEVENTREPORT 502 + */ 503 + if(ioc->aen_event_read_flag) 504 + return 1; 505 + 506 + /* Signal only for the events that are 507 + * requested for by the application 508 + */ 509 + if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 510 + ioc->aen_event_read_flag=1; 511 + dctlprintk(("Raised SIGIO to application\n")); 512 + devtprintk(("Raised SIGIO to application\n")); 513 + kill_fasync(&async_queue, SIGIO, POLL_IN); 514 + } 515 + return 1; 516 + } 517 + 518 + static int 519 + mptctl_fasync(int fd, struct file *filep, int mode) 520 + { 521 + MPT_ADAPTER *ioc; 522 + 523 + list_for_each_entry(ioc, &ioc_list, list) 524 + ioc->aen_event_read_flag=0; 525 + 526 + dctlprintk(("%s() called\n", __FUNCTION__)); 527 + return fasync_helper(fd, filep, mode, &async_queue); 528 + } 529 + 530 + static int 531 + mptctl_release(struct inode *inode, struct file *filep) 532 + { 533 + dctlprintk(("%s() called\n", __FUNCTION__)); 534 + return fasync_helper(-1, filep, 0, &async_queue); 478 535 } 479 536 480 537 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ ··· 743 674 u16 iocstat; 744 675 pFWDownloadReply_t ReplyMsg = NULL; 745 676 746 - dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); 677 + dctlprintk(("mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); 747 678 748 - dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); 749 - dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); 750 - dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); 679 + dctlprintk(("DbG: kfwdl.bufp = %p\n", ufwbuf)); 680 + dctlprintk(("DbG: kfwdl.fwlen = %d\n", (int)fwlen)); 681 + dctlprintk(("DbG: kfwdl.ioc = %04xh\n", ioc)); 751 682 752 - if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { 753 - dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n", 754 - __FILE__, __LINE__, ioc)); 683 + if (mpt_verify_adapter(ioc, &iocp) < 0) { 684 + dctlprintk(("ioctl_fwdl - ioc%d not found!\n", 685 + ioc)); 755 686 return -ENODEV; /* (-6) No such device or address */ 756 - } 687 + } else { 757 688 758 - /* Valid device. Get a message frame and construct the FW download message. 759 - */ 760 - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) 761 - return -EAGAIN; 689 + /* Valid device. Get a message frame and construct the FW download message. 690 + */ 691 + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) 692 + return -EAGAIN; 693 + } 762 694 dlmsg = (FWDownload_t*) mf; 763 695 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; 764 696 sgOut = (char *) (ptsge + 1); ··· 772 702 dlmsg->ChainOffset = 0; 773 703 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; 774 704 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; 775 - dlmsg->MsgFlags = 0; 705 + if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) 706 + dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; 707 + else 708 + dlmsg->MsgFlags = 0; 709 + 776 710 777 711 /* Set up the Transaction SGE. 778 712 */ ··· 828 754 goto fwdl_out; 829 755 } 830 756 831 - dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); 757 + dctlprintk(("DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); 832 758 833 759 /* 834 760 * Parse SG list, copying sgl itself, ··· 877 803 /* 878 804 * Finally, perform firmware download. 879 805 */ 880 - iocp->ioctl->wait_done = 0; 806 + ReplyMsg = NULL; 881 807 mpt_put_msg_frame(mptctl_id, iocp, mf); 882 808 883 809 /* Now wait for the command to complete */ 884 - ret = wait_event_interruptible_timeout(mptctl_wait, 810 + ret = wait_event_timeout(mptctl_wait, 885 811 iocp->ioctl->wait_done == 1, 886 812 HZ*60); 887 813 ··· 1219 1145 /* Fill in the data and return the structure to the calling 1220 1146 * program 1221 1147 */ 1222 - if (ioc->bus_type == FC) 1148 + if (ioc->bus_type == SAS) 1149 + karg->adapterType = MPT_IOCTL_INTERFACE_SAS; 1150 + else if (ioc->bus_type == FC) 1223 1151 karg->adapterType = MPT_IOCTL_INTERFACE_FC; 1224 1152 else 1225 1153 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; ··· 1246 1170 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1247 1171 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1248 1172 } else if (cim_rev == 2) { 1249 - /* Get the PCI bus, device, function and segment ID numbers 1173 + /* Get the PCI bus, device, function and segment ID numbers 1250 1174 for the IOC */ 1251 1175 karg->pciInfo.u.bits.busNumber = pdev->bus->number; 1252 1176 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1253 - karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1254 1177 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1255 1178 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); 1256 1179 } ··· 1575 1500 return -ENODEV; 1576 1501 } 1577 1502 1578 - karg.eventEntries = ioc->eventLogSize; 1503 + karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; 1579 1504 karg.eventTypes = ioc->eventTypes; 1580 1505 1581 1506 /* Copy the data from kernel memory to user memory ··· 1625 1550 memset(ioc->events, 0, sz); 1626 1551 ioc->alloc_total += sz; 1627 1552 1628 - ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE; 1629 1553 ioc->eventContext = 0; 1630 1554 } 1631 1555 ··· 1664 1590 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); 1665 1591 1666 1592 1667 - max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; 1593 + max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; 1668 1594 1669 1595 /* If fewer than 1 event is requested, there must have 1670 1596 * been some type of error. 1671 1597 */ 1672 1598 if ((max < 1) || !ioc->events) 1673 1599 return -ENODATA; 1600 + 1601 + /* reset this flag so SIGIO can restart */ 1602 + ioc->aen_event_read_flag=0; 1674 1603 1675 1604 /* Copy the data from kernel memory to user memory 1676 1605 */ ··· 1894 1817 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 1895 1818 case MPI_FUNCTION_FW_DOWNLOAD: 1896 1819 case MPI_FUNCTION_FC_PRIMITIVE_SEND: 1820 + case MPI_FUNCTION_TOOLBOX: 1821 + case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: 1897 1822 break; 1898 1823 1899 1824 case MPI_FUNCTION_SCSI_IO_REQUEST: ··· 1916 1837 goto done_free_mem; 1917 1838 } 1918 1839 1919 - pScsiReq->MsgFlags = mpt_msg_flags(); 1840 + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1841 + pScsiReq->MsgFlags |= mpt_msg_flags(); 1842 + 1920 1843 1921 1844 /* verify that app has not requested 1922 1845 * more sense data than driver ··· 1969 1888 } 1970 1889 break; 1971 1890 1891 + case MPI_FUNCTION_SMP_PASSTHROUGH: 1892 + /* Check mf->PassthruFlags to determine if 1893 + * transfer is ImmediateMode or not. 1894 + * Immediate mode returns data in the ReplyFrame. 1895 + * Else, we are sending request and response data 1896 + * in two SGLs at the end of the mf. 1897 + */ 1898 + break; 1899 + 1900 + case MPI_FUNCTION_SATA_PASSTHROUGH: 1901 + if (!ioc->sh) { 1902 + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " 1903 + "SCSI driver is not loaded. \n", 1904 + __FILE__, __LINE__); 1905 + rc = -EFAULT; 1906 + goto done_free_mem; 1907 + } 1908 + break; 1909 + 1972 1910 case MPI_FUNCTION_RAID_ACTION: 1973 1911 /* Just add a SGE 1974 1912 */ ··· 2000 1900 int scsidir = MPI_SCSIIO_CONTROL_READ; 2001 1901 int dataSize; 2002 1902 2003 - pScsiReq->MsgFlags = mpt_msg_flags(); 1903 + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1904 + pScsiReq->MsgFlags |= mpt_msg_flags(); 1905 + 2004 1906 2005 1907 /* verify that app has not requested 2006 1908 * more sense data than driver ··· 2232 2130 2233 2131 /* Now wait for the command to complete */ 2234 2132 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2235 - timeout = wait_event_interruptible_timeout(mptctl_wait, 2133 + timeout = wait_event_timeout(mptctl_wait, 2236 2134 ioc->ioctl->wait_done == 1, 2237 2135 HZ*timeout); 2238 2136 ··· 2348 2246 hp_host_info_t __user *uarg = (void __user *) arg; 2349 2247 MPT_ADAPTER *ioc; 2350 2248 struct pci_dev *pdev; 2351 - char *pbuf; 2249 + char *pbuf=NULL; 2352 2250 dma_addr_t buf_dma; 2353 2251 hp_host_info_t karg; 2354 2252 CONFIGPARMS cfg; 2355 2253 ConfigPageHeader_t hdr; 2356 2254 int iocnum; 2357 2255 int rc, cim_rev; 2256 + ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; 2257 + MPT_FRAME_HDR *mf = NULL; 2258 + MPIHeader_t *mpi_hdr; 2358 2259 2359 2260 dctlprintk((": mptctl_hp_hostinfo called.\n")); 2360 2261 /* Reset long to int. Should affect IA64 and SPARC only ··· 2475 2370 2476 2371 karg.base_io_addr = pci_resource_start(pdev, 0); 2477 2372 2478 - if (ioc->bus_type == FC) 2373 + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) 2479 2374 karg.bus_phys_width = HP_BUS_WIDTH_UNK; 2480 2375 else 2481 2376 karg.bus_phys_width = HP_BUS_WIDTH_16; ··· 2493 2388 } 2494 2389 } 2495 2390 2496 - cfg.pageAddr = 0; 2497 - cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; 2498 - cfg.dir = MPI_TB_ISTWI_FLAGS_READ; 2499 - cfg.timeout = 10; 2500 - pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2501 - if (pbuf) { 2502 - cfg.physAddr = buf_dma; 2503 - if ((mpt_toolbox(ioc, &cfg)) == 0) { 2504 - karg.rsvd = *(u32 *)pbuf; 2505 - } 2506 - pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2507 - pbuf = NULL; 2391 + /* 2392 + * Gather ISTWI(Industry Standard Two Wire Interface) Data 2393 + */ 2394 + if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2395 + dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2396 + ioc->name,__FUNCTION__)); 2397 + goto out; 2508 2398 } 2399 + 2400 + IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; 2401 + mpi_hdr = (MPIHeader_t *) mf; 2402 + memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); 2403 + IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; 2404 + IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; 2405 + IstwiRWRequest->MsgContext = mpi_hdr->MsgContext; 2406 + IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; 2407 + IstwiRWRequest->NumAddressBytes = 0x01; 2408 + IstwiRWRequest->DataLength = cpu_to_le16(0x04); 2409 + if (pdev->devfn & 1) 2410 + IstwiRWRequest->DeviceAddr = 0xB2; 2411 + else 2412 + IstwiRWRequest->DeviceAddr = 0xB0; 2413 + 2414 + pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2415 + if (!pbuf) 2416 + goto out; 2417 + mpt_add_sge((char *)&IstwiRWRequest->SGL, 2418 + (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); 2419 + 2420 + ioc->ioctl->wait_done = 0; 2421 + mpt_put_msg_frame(mptctl_id, ioc, mf); 2422 + 2423 + rc = wait_event_timeout(mptctl_wait, 2424 + ioc->ioctl->wait_done == 1, 2425 + HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); 2426 + 2427 + if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { 2428 + /* 2429 + * Now we need to reset the board 2430 + */ 2431 + mpt_free_msg_frame(ioc, mf); 2432 + mptctl_timeout_expired(ioc->ioctl); 2433 + goto out; 2434 + } 2435 + 2436 + /* 2437 + *ISTWI Data Definition 2438 + * pbuf[0] = FW_VERSION = 0x4 2439 + * pbuf[1] = Bay Count = 6 or 4 or 2, depending on 2440 + * the config, you should be seeing one out of these three values 2441 + * pbuf[2] = Drive Installed Map = bit pattern depend on which 2442 + * bays have drives in them 2443 + * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) 2444 + */ 2445 + if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) 2446 + karg.rsvd = *(u32 *)pbuf; 2447 + 2448 + out: 2449 + if (pbuf) 2450 + pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2509 2451 2510 2452 /* Copy the data from kernel memory to user memory 2511 2453 */ ··· 2611 2459 2612 2460 /* There is nothing to do for FCP parts. 2613 2461 */ 2614 - if (ioc->bus_type == FC) 2462 + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) 2615 2463 return 0; 2616 2464 2617 2465 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) ··· 2721 2569 static struct file_operations mptctl_fops = { 2722 2570 .owner = THIS_MODULE, 2723 2571 .llseek = no_llseek, 2572 + .release = mptctl_release, 2573 + .fasync = mptctl_fasync, 2724 2574 .unlocked_ioctl = mptctl_ioctl, 2725 2575 #ifdef CONFIG_COMPAT 2726 2576 .compat_ioctl = compat_mpctl_ioctl, ··· 2965 2811 dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); 2966 2812 } else { 2967 2813 /* FIXME! */ 2814 + } 2815 + 2816 + if (mpt_event_register(mptctl_id, mptctl_event_process) == 0) { 2817 + devtprintk((KERN_INFO MYNAM 2818 + ": Registered for IOC event notifications\n")); 2968 2819 } 2969 2820 2970 2821 return 0;
+3 -1
drivers/message/fusion/mptctl.h
··· 169 169 * Read only. 170 170 * Data starts at offset 0xC 171 171 */ 172 - #define MPT_IOCTL_INTERFACE_FC (0x01) 173 172 #define MPT_IOCTL_INTERFACE_SCSI (0x00) 173 + #define MPT_IOCTL_INTERFACE_FC (0x01) 174 + #define MPT_IOCTL_INTERFACE_FC_IP (0x02) 175 + #define MPT_IOCTL_INTERFACE_SAS (0x03) 174 176 #define MPT_IOCTL_VERSION_LENGTH (32) 175 177 176 178 struct mpt_ioctl_iocinfo {
+1 -1
drivers/message/fusion/mptscsih.c
··· 2489 2489 int idx; 2490 2490 MPT_ADAPTER *ioc = hd->ioc; 2491 2491 2492 - idx = ioc->eventContext % ioc->eventLogSize; 2492 + idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; 2493 2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; 2494 2494 ioc->events[idx].eventContext = ioc->eventContext; 2495 2495
+30 -46
drivers/s390/scsi/zfcp_dbf.c
··· 710 710 _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 711 711 struct zfcp_adapter *adapter, 712 712 struct scsi_cmnd *scsi_cmnd, 713 - struct zfcp_fsf_req *new_fsf_req) 713 + struct zfcp_fsf_req *fsf_req, 714 + struct zfcp_fsf_req *old_fsf_req) 714 715 { 715 - struct zfcp_fsf_req *fsf_req = 716 - (struct zfcp_fsf_req *)scsi_cmnd->host_scribble; 717 716 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 718 717 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 719 718 unsigned long flags; ··· 726 727 if (offset == 0) { 727 728 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 728 729 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); 729 - if (scsi_cmnd->device) { 730 - rec->scsi_id = scsi_cmnd->device->id; 731 - rec->scsi_lun = scsi_cmnd->device->lun; 730 + if (scsi_cmnd != NULL) { 731 + if (scsi_cmnd->device) { 732 + rec->scsi_id = scsi_cmnd->device->id; 733 + rec->scsi_lun = scsi_cmnd->device->lun; 734 + } 735 + rec->scsi_result = scsi_cmnd->result; 736 + rec->scsi_cmnd = (unsigned long)scsi_cmnd; 737 + rec->scsi_serial = scsi_cmnd->serial_number; 738 + memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd, 739 + min((int)scsi_cmnd->cmd_len, 740 + ZFCP_DBF_SCSI_OPCODE)); 741 + rec->scsi_retries = scsi_cmnd->retries; 742 + rec->scsi_allowed = scsi_cmnd->allowed; 732 743 } 733 - rec->scsi_result = scsi_cmnd->result; 734 - rec->scsi_cmnd = (unsigned long)scsi_cmnd; 735 - rec->scsi_serial = scsi_cmnd->serial_number; 736 - memcpy(rec->scsi_opcode, 737 - &scsi_cmnd->cmnd, 738 - min((int)scsi_cmnd->cmd_len, 739 - ZFCP_DBF_SCSI_OPCODE)); 740 - rec->scsi_retries = scsi_cmnd->retries; 741 - rec->scsi_allowed = scsi_cmnd->allowed; 742 744 if (fsf_req != NULL) { 743 745 fcp_rsp = (struct fcp_rsp_iu *) 744 746 &(fsf_req->qtcb->bottom.io.fcp_rsp); ··· 772 772 rec->fsf_seqno = fsf_req->seq_no; 773 773 rec->fsf_issued = fsf_req->issued; 774 774 } 775 - if (new_fsf_req != NULL) { 776 - rec->type.new_fsf_req.fsf_reqid = 777 - (unsigned long) 778 - new_fsf_req; 779 - rec->type.new_fsf_req.fsf_seqno = 780 - new_fsf_req->seq_no; 781 - rec->type.new_fsf_req.fsf_issued = 782 - new_fsf_req->issued; 783 - } 775 + rec->type.old_fsf_reqid = 776 + (unsigned long) old_fsf_req; 784 777 } else { 785 778 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 786 779 dump->total_size = buflen; ··· 794 801 inline void 795 802 zfcp_scsi_dbf_event_result(const char *tag, int level, 796 803 struct zfcp_adapter *adapter, 797 - struct scsi_cmnd *scsi_cmnd) 804 + struct scsi_cmnd *scsi_cmnd, 805 + struct zfcp_fsf_req *fsf_req) 798 806 { 799 - _zfcp_scsi_dbf_event_common("rslt", 800 - tag, level, adapter, scsi_cmnd, NULL); 807 + _zfcp_scsi_dbf_event_common("rslt", tag, level, 808 + adapter, scsi_cmnd, fsf_req, NULL); 801 809 } 802 810 803 811 inline void 804 812 zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 805 813 struct scsi_cmnd *scsi_cmnd, 806 - struct zfcp_fsf_req *new_fsf_req) 814 + struct zfcp_fsf_req *new_fsf_req, 815 + struct zfcp_fsf_req *old_fsf_req) 807 816 { 808 - _zfcp_scsi_dbf_event_common("abrt", 809 - tag, 1, adapter, scsi_cmnd, new_fsf_req); 817 + _zfcp_scsi_dbf_event_common("abrt", tag, 1, 818 + adapter, scsi_cmnd, new_fsf_req, old_fsf_req); 810 819 } 811 820 812 821 inline void ··· 818 823 struct zfcp_adapter *adapter = unit->port->adapter; 819 824 820 825 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 821 - tag, 1, adapter, scsi_cmnd, NULL); 826 + tag, 1, adapter, scsi_cmnd, NULL, NULL); 822 827 } 823 828 824 829 static int ··· 851 856 rec->scsi_retries); 852 857 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", 853 858 rec->scsi_allowed); 859 + if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { 860 + len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx", 861 + rec->type.old_fsf_reqid); 862 + } 854 863 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", 855 864 rec->fsf_reqid); 856 865 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", ··· 882 883 min((int)rec->type.fcp.sns_info_len, 883 884 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, 884 885 rec->type.fcp.sns_info_len); 885 - } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { 886 - len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx", 887 - rec->type.new_fsf_req.fsf_reqid); 888 - len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x", 889 - rec->type.new_fsf_req.fsf_seqno); 890 - len += zfcp_dbf_stck(out_buf + len, "fsf_issued", 891 - rec->type.new_fsf_req.fsf_issued); 892 - } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) || 893 - (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) { 894 - len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx", 895 - rec->type.new_fsf_req.fsf_reqid); 896 - len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x", 897 - rec->type.new_fsf_req.fsf_seqno); 898 - len += zfcp_dbf_stck(out_buf + len, "fsf_issued", 899 - rec->type.new_fsf_req.fsf_issued); 900 886 } 901 887 902 888 len += sprintf(out_buf + len, "\n");
+1 -12
drivers/s390/scsi/zfcp_def.h
··· 152 152 #define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 153 153 #define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 154 154 155 - /* Retry 5 times every 2 second, then every minute */ 156 - #define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5 157 - #define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200 158 - #define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000 159 - 160 155 /* timeout value for "default timer" for fsf requests */ 161 156 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 162 157 ··· 424 429 u32 fsf_seqno; 425 430 u64 fsf_issued; 426 431 union { 427 - struct { 428 - u64 fsf_reqid; 429 - u32 fsf_seqno; 430 - u64 fsf_issued; 431 - } new_fsf_req; 432 + u64 old_fsf_reqid; 432 433 struct { 433 434 u8 rsp_validity; 434 435 u8 rsp_scsi_status; ··· 906 915 wwn_t peer_wwnn; /* P2P peer WWNN */ 907 916 wwn_t peer_wwpn; /* P2P peer WWPN */ 908 917 u32 peer_d_id; /* P2P peer D_ID */ 909 - wwn_t physical_wwpn; /* WWPN of physical port */ 910 - u32 physical_s_id; /* local FC port ID */ 911 918 struct ccw_device *ccw_device; /* S/390 ccw device */ 912 919 u8 fc_service_class; 913 920 u32 hydra_version; /* Hydra version */
+30 -52
drivers/s390/scsi/zfcp_erp.c
··· 2246 2246 { 2247 2247 int retval; 2248 2248 2249 - if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2250 - &erp_action->adapter->status)) && 2251 - (erp_action->adapter->adapter_features & 2252 - FSF_FEATURE_HBAAPI_MANAGEMENT)) { 2253 - zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 2254 - atomic_set(&erp_action->adapter->erp_counter, 0); 2255 - return ZFCP_ERP_FAILED; 2256 - } 2257 - 2258 2249 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2259 2250 if (retval == ZFCP_ERP_FAILED) 2260 2251 return ZFCP_ERP_FAILED; ··· 2257 2266 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); 2258 2267 } 2259 2268 2260 - /* 2261 - * function: 2262 - * 2263 - * purpose: 2264 - * 2265 - * returns: 2266 - */ 2267 2269 static int 2268 2270 zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) 2269 2271 { ··· 2334 2350 zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2335 2351 { 2336 2352 int ret; 2337 - int retries; 2338 - int sleep; 2339 - struct zfcp_adapter *adapter = erp_action->adapter; 2353 + struct zfcp_adapter *adapter; 2340 2354 2355 + adapter = erp_action->adapter; 2341 2356 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2342 2357 2343 - retries = 0; 2344 - do { 2345 - write_lock(&adapter->erp_lock); 2346 - zfcp_erp_action_to_running(erp_action); 2347 - write_unlock(&adapter->erp_lock); 2348 - zfcp_erp_timeout_init(erp_action); 2349 - ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2350 - if (ret == -EOPNOTSUPP) { 2351 - debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2352 - return ZFCP_ERP_SUCCEEDED; 2353 - } else if (ret) { 2354 - debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); 2355 - return ZFCP_ERP_FAILED; 2356 - } 2357 - debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); 2358 + write_lock(&adapter->erp_lock); 2359 + zfcp_erp_action_to_running(erp_action); 2360 + write_unlock(&adapter->erp_lock); 2358 2361 2359 - down(&adapter->erp_ready_sem); 2360 - if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2361 - ZFCP_LOG_INFO("error: exchange of port data " 2362 - "for adapter %s timed out\n", 2363 - zfcp_get_busid_by_adapter(adapter)); 2364 - break; 2365 - } 2366 - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2367 - &adapter->status)) 2368 - break; 2362 + zfcp_erp_timeout_init(erp_action); 2363 + ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2364 + if (ret == -EOPNOTSUPP) { 2365 + debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2366 + return ZFCP_ERP_SUCCEEDED; 2367 + } else if (ret) { 2368 + debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); 2369 + return ZFCP_ERP_FAILED; 2370 + } 2371 + debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); 2369 2372 2370 - if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { 2371 - sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; 2372 - retries++; 2373 - } else 2374 - sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; 2375 - schedule_timeout(sleep); 2376 - } while (1); 2373 + ret = ZFCP_ERP_SUCCEEDED; 2374 + down(&adapter->erp_ready_sem); 2375 + if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2376 + ZFCP_LOG_INFO("error: exchange port data timed out (adapter " 2377 + "%s)\n", zfcp_get_busid_by_adapter(adapter)); 2378 + ret = ZFCP_ERP_FAILED; 2379 + } 2380 + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { 2381 + ZFCP_LOG_INFO("error: exchange port data failed (adapter " 2382 + "%s\n", zfcp_get_busid_by_adapter(adapter)); 2383 + ret = ZFCP_ERP_FAILED; 2384 + } 2377 2385 2378 - return ZFCP_ERP_SUCCEEDED; 2386 + return ret; 2379 2387 } 2380 2388 2381 2389 /* ··· 3415 3439 "(adapter %s, wwpn=0x%016Lx)\n", 3416 3440 zfcp_get_busid_by_port(port), 3417 3441 port->wwpn); 3442 + else 3443 + scsi_flush_work(adapter->scsi_host); 3418 3444 } 3419 3445 zfcp_port_put(port); 3420 3446 break;
+3 -2
drivers/s390/scsi/zfcp_ext.h
··· 194 194 extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 195 195 196 196 extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 197 - struct scsi_cmnd *); 197 + struct scsi_cmnd *, 198 + struct zfcp_fsf_req *); 198 199 extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 199 - struct scsi_cmnd *, 200 + struct scsi_cmnd *, struct zfcp_fsf_req *, 200 201 struct zfcp_fsf_req *); 201 202 extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 202 203 struct scsi_cmnd *);
+44 -36
drivers/s390/scsi/zfcp_fsf.c
··· 388 388 case FSF_PROT_LINK_DOWN: 389 389 zfcp_fsf_link_down_info_eval(adapter, 390 390 &prot_status_qual->link_down_info); 391 + zfcp_erp_adapter_reopen(adapter, 0); 391 392 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 392 393 break; 393 394 ··· 559 558 560 559 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 561 560 562 - if (link_down == NULL) { 563 - zfcp_erp_adapter_reopen(adapter, 0); 564 - return; 565 - } 561 + if (link_down == NULL) 562 + goto out; 566 563 567 564 switch (link_down->error_code) { 568 565 case FSF_PSQ_LINK_NO_LIGHT: ··· 642 643 link_down->explanation_code, 643 644 link_down->vendor_specific_code); 644 645 645 - switch (link_down->error_code) { 646 - case FSF_PSQ_LINK_NO_LIGHT: 647 - case FSF_PSQ_LINK_WRAP_PLUG: 648 - case FSF_PSQ_LINK_NO_FCP: 649 - case FSF_PSQ_LINK_FIRMWARE_UPDATE: 650 - zfcp_erp_adapter_reopen(adapter, 0); 651 - break; 652 - default: 653 - zfcp_erp_adapter_failed(adapter); 654 - } 646 + out: 647 + zfcp_erp_adapter_failed(adapter); 655 648 } 656 649 657 650 /* ··· 2295 2304 return retval; 2296 2305 } 2297 2306 2307 + /** 2308 + * zfcp_fsf_exchange_port_evaluate 2309 + * @fsf_req: fsf_req which belongs to xchg port data request 2310 + * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1) 2311 + */ 2312 + static void 2313 + zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) 2314 + { 2315 + struct zfcp_adapter *adapter; 2316 + struct fsf_qtcb *qtcb; 2317 + struct fsf_qtcb_bottom_port *bottom, *data; 2318 + struct Scsi_Host *shost; 2319 + 2320 + adapter = fsf_req->adapter; 2321 + qtcb = fsf_req->qtcb; 2322 + bottom = &qtcb->bottom.port; 2323 + shost = adapter->scsi_host; 2324 + 2325 + data = (struct fsf_qtcb_bottom_port*) fsf_req->data; 2326 + if (data) 2327 + memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); 2328 + 2329 + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 2330 + fc_host_permanent_port_name(shost) = bottom->wwpn; 2331 + else 2332 + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 2333 + fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 2334 + fc_host_supported_speeds(shost) = bottom->supported_speed; 2335 + } 2298 2336 2299 2337 /** 2300 2338 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request ··· 2332 2312 static void 2333 2313 zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2334 2314 { 2335 - struct zfcp_adapter *adapter = fsf_req->adapter; 2336 - struct Scsi_Host *shost = adapter->scsi_host; 2337 - struct fsf_qtcb *qtcb = fsf_req->qtcb; 2338 - struct fsf_qtcb_bottom_port *bottom, *data; 2315 + struct zfcp_adapter *adapter; 2316 + struct fsf_qtcb *qtcb; 2317 + 2318 + adapter = fsf_req->adapter; 2319 + qtcb = fsf_req->qtcb; 2339 2320 2340 2321 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2341 2322 return; 2342 2323 2343 2324 switch (qtcb->header.fsf_status) { 2344 2325 case FSF_GOOD: 2326 + zfcp_fsf_exchange_port_evaluate(fsf_req, 1); 2345 2327 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2346 - 2347 - bottom = &qtcb->bottom.port; 2348 - data = (struct fsf_qtcb_bottom_port*) fsf_req->data; 2349 - if (data) 2350 - memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); 2351 - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 2352 - fc_host_permanent_port_name(shost) = bottom->wwpn; 2353 - else 2354 - fc_host_permanent_port_name(shost) = 2355 - fc_host_port_name(shost); 2356 - fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 2357 - fc_host_supported_speeds(shost) = bottom->supported_speed; 2358 2328 break; 2359 - 2360 2329 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 2330 + zfcp_fsf_exchange_port_evaluate(fsf_req, 0); 2361 2331 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2362 - 2363 2332 zfcp_fsf_link_down_info_eval(adapter, 2364 2333 &qtcb->header.fsf_status_qual.link_down_info); 2365 2334 break; 2366 - 2367 2335 default: 2368 2336 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); 2369 2337 debug_event(adapter->erp_dbf, 0, ··· 4211 4203 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4212 4204 4213 4205 if (scpnt->result != 0) 4214 - zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt); 4206 + zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req); 4215 4207 else if (scpnt->retries > 0) 4216 - zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt); 4208 + zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req); 4217 4209 else 4218 - zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt); 4210 + zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req); 4219 4211 4220 4212 /* cleanup pointer (need this especially for abort) */ 4221 4213 scpnt->host_scribble = NULL;
+10 -5
drivers/s390/scsi/zfcp_scsi.c
··· 242 242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 243 243 zfcp_scsi_dbf_event_result("fail", 4, 244 244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 245 - scpnt); 245 + scpnt, NULL); 246 246 /* return directly */ 247 247 scpnt->scsi_done(scpnt); 248 248 } ··· 446 446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; 447 447 if (!old_fsf_req) { 448 448 write_unlock_irqrestore(&adapter->abort_lock, flags); 449 - zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req); 449 + zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL); 450 450 retval = SUCCESS; 451 451 goto out; 452 452 } ··· 460 460 adapter, unit, 0); 461 461 if (!new_fsf_req) { 462 462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 463 + zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 464 + old_fsf_req); 463 465 retval = FAILED; 464 466 goto out; 465 467 } ··· 472 470 473 471 /* status should be valid since signals were not permitted */ 474 472 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 475 - zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req); 473 + zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req, 474 + NULL); 476 475 retval = SUCCESS; 477 476 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 478 - zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req); 477 + zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req, 478 + NULL); 479 479 retval = SUCCESS; 480 480 } else { 481 - zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req); 481 + zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req, 482 + NULL); 482 483 retval = FAILED; 483 484 } 484 485 zfcp_fsf_req_free(new_fsf_req);
-4
drivers/s390/scsi/zfcp_sysfs_adapter.c
··· 55 55 ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 56 56 ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 57 57 ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 58 - ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn); 59 - ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id); 60 58 ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 61 59 ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 62 60 ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", ··· 239 241 &dev_attr_peer_wwnn.attr, 240 242 &dev_attr_peer_wwpn.attr, 241 243 &dev_attr_peer_d_id.attr, 242 - &dev_attr_physical_wwpn.attr, 243 - &dev_attr_physical_s_id.attr, 244 244 &dev_attr_card_version.attr, 245 245 &dev_attr_lic_version.attr, 246 246 &dev_attr_status.attr,
+4 -3
drivers/scsi/3w-9xxx.c
··· 61 61 Add support for embedded firmware error strings. 62 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 63 63 2.26.02.004 - Add support for 9550SX controllers. 64 + 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 64 65 */ 65 66 66 67 #include <linux/module.h> ··· 85 84 #include "3w-9xxx.h" 86 85 87 86 /* Globals */ 88 - #define TW_DRIVER_VERSION "2.26.02.004" 87 + #define TW_DRIVER_VERSION "2.26.02.005" 89 88 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 90 89 static unsigned int twa_device_extension_count; 91 90 static int twa_major = -1; ··· 1409 1408 dma_addr_t mapping; 1410 1409 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1411 1410 struct pci_dev *pdev = tw_dev->tw_pci_dev; 1412 - int retval = 0; 1411 + dma_addr_t retval = 0; 1413 1412 1414 1413 if (cmd->request_bufflen == 0) { 1415 1414 retval = 0; ··· 1799 1798 int i, sg_count; 1800 1799 struct scsi_cmnd *srb = NULL; 1801 1800 struct scatterlist *sglist = NULL; 1802 - u32 buffaddr = 0x0; 1801 + dma_addr_t buffaddr = 0x0; 1803 1802 int retval = 1; 1804 1803 1805 1804 if (tw_dev->srb[request_id]) {
+85 -132
drivers/scsi/aacraid/aachba.c
··· 173 173 int status = 0; 174 174 struct fib * fibptr; 175 175 176 - if (!(fibptr = fib_alloc(dev))) 176 + if (!(fibptr = aac_fib_alloc(dev))) 177 177 return -ENOMEM; 178 178 179 - fib_init(fibptr); 179 + aac_fib_init(fibptr); 180 180 { 181 181 struct aac_get_config_status *dinfo; 182 182 dinfo = (struct aac_get_config_status *) fib_data(fibptr); ··· 186 186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 187 187 } 188 188 189 - status = fib_send(ContainerCommand, 189 + status = aac_fib_send(ContainerCommand, 190 190 fibptr, 191 191 sizeof (struct aac_get_config_status), 192 192 FsaNormal, ··· 209 209 status = -EINVAL; 210 210 } 211 211 } 212 - fib_complete(fibptr); 212 + aac_fib_complete(fibptr); 213 213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 214 214 if (status >= 0) { 215 215 if (commit == 1) { 216 216 struct aac_commit_config * dinfo; 217 - fib_init(fibptr); 217 + aac_fib_init(fibptr); 218 218 dinfo = (struct aac_commit_config *) fib_data(fibptr); 219 219 220 220 dinfo->command = cpu_to_le32(VM_ContainerConfig); 221 221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 222 222 223 - status = fib_send(ContainerCommand, 223 + status = aac_fib_send(ContainerCommand, 224 224 fibptr, 225 225 sizeof (struct aac_commit_config), 226 226 FsaNormal, 227 227 1, 1, 228 228 NULL, NULL); 229 - fib_complete(fibptr); 229 + aac_fib_complete(fibptr); 230 230 } else if (commit == 0) { 231 231 printk(KERN_WARNING 232 232 "aac_get_config_status: Foreign device configurations are being ignored\n"); 233 233 } 234 234 } 235 - fib_free(fibptr); 235 + aac_fib_free(fibptr); 236 236 return status; 237 237 } 238 238 ··· 255 255 256 256 instance = dev->scsi_host_ptr->unique_id; 257 257 258 - if (!(fibptr = fib_alloc(dev))) 258 + if (!(fibptr = aac_fib_alloc(dev))) 259 259 return -ENOMEM; 260 260 261 - fib_init(fibptr); 261 + aac_fib_init(fibptr); 262 262 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 263 263 dinfo->command = cpu_to_le32(VM_ContainerConfig); 264 264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 265 265 266 - status = fib_send(ContainerCommand, 266 + status = aac_fib_send(ContainerCommand, 267 267 fibptr, 268 268 sizeof (struct aac_get_container_count), 269 269 FsaNormal, ··· 272 272 if (status >= 0) { 273 273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 274 274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 275 - fib_complete(fibptr); 275 + aac_fib_complete(fibptr); 276 276 } 277 277 278 278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) ··· 280 280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 281 281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 282 282 if (!fsa_dev_ptr) { 283 - fib_free(fibptr); 283 + aac_fib_free(fibptr); 284 284 return -ENOMEM; 285 285 } 286 286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); ··· 294 294 295 295 fsa_dev_ptr[index].devname[0] = '\0'; 296 296 297 - fib_init(fibptr); 297 + aac_fib_init(fibptr); 298 298 dinfo = (struct aac_query_mount *) fib_data(fibptr); 299 299 300 300 dinfo->command = cpu_to_le32(VM_NameServe); 301 301 dinfo->count = cpu_to_le32(index); 302 302 dinfo->type = cpu_to_le32(FT_FILESYS); 303 303 304 - status = fib_send(ContainerCommand, 304 + status = aac_fib_send(ContainerCommand, 305 305 fibptr, 306 306 sizeof (struct aac_query_mount), 307 307 FsaNormal, ··· 319 319 dinfo->count = cpu_to_le32(index); 320 320 dinfo->type = cpu_to_le32(FT_FILESYS); 321 321 322 - if (fib_send(ContainerCommand, 322 + if (aac_fib_send(ContainerCommand, 323 323 fibptr, 324 324 sizeof(struct aac_query_mount), 325 325 FsaNormal, ··· 347 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 348 348 fsa_dev_ptr[index].ro = 1; 349 349 } 350 - fib_complete(fibptr); 350 + aac_fib_complete(fibptr); 351 351 /* 352 352 * If there are no more containers, then stop asking. 353 353 */ ··· 355 355 break; 356 356 } 357 357 } 358 - fib_free(fibptr); 358 + aac_fib_free(fibptr); 359 359 return status; 360 360 } 361 361 ··· 413 413 414 414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 415 415 416 - fib_complete(fibptr); 417 - fib_free(fibptr); 416 + aac_fib_complete(fibptr); 417 + aac_fib_free(fibptr); 418 418 scsicmd->scsi_done(scsicmd); 419 419 } 420 420 ··· 430 430 431 431 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 432 432 433 - if (!(cmd_fibcontext = fib_alloc(dev))) 433 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) 434 434 return -ENOMEM; 435 435 436 - fib_init(cmd_fibcontext); 436 + aac_fib_init(cmd_fibcontext); 437 437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 438 438 439 439 dinfo->command = cpu_to_le32(VM_ContainerConfig); ··· 441 441 dinfo->cid = cpu_to_le32(cid); 442 442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 443 443 444 - status = fib_send(ContainerCommand, 444 + status = aac_fib_send(ContainerCommand, 445 445 cmd_fibcontext, 446 446 sizeof (struct aac_get_name), 447 447 FsaNormal, ··· 455 455 if (status == -EINPROGRESS) 456 456 return 0; 457 457 458 - printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); 459 - fib_complete(cmd_fibcontext); 460 - fib_free(cmd_fibcontext); 458 + printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); 459 + aac_fib_complete(cmd_fibcontext); 460 + aac_fib_free(cmd_fibcontext); 461 461 return -1; 462 462 } 463 463 464 464 /** 465 - * probe_container - query a logical volume 465 + * aac_probe_container - query a logical volume 466 466 * @dev: device to query 467 467 * @cid: container identifier 468 468 * ··· 470 470 * is updated in the struct fsa_dev_info structure rather than returned. 471 471 */ 472 472 473 - int probe_container(struct aac_dev *dev, int cid) 473 + int aac_probe_container(struct aac_dev *dev, int cid) 474 474 { 475 475 struct fsa_dev_info *fsa_dev_ptr; 476 476 int status; ··· 482 482 fsa_dev_ptr = dev->fsa_dev; 483 483 instance = dev->scsi_host_ptr->unique_id; 484 484 485 - if (!(fibptr = fib_alloc(dev))) 485 + if (!(fibptr = aac_fib_alloc(dev))) 486 486 return -ENOMEM; 487 487 488 - fib_init(fibptr); 488 + aac_fib_init(fibptr); 489 489 490 490 dinfo = (struct aac_query_mount *)fib_data(fibptr); 491 491 ··· 493 493 dinfo->count = cpu_to_le32(cid); 494 494 dinfo->type = cpu_to_le32(FT_FILESYS); 495 495 496 - status = fib_send(ContainerCommand, 496 + status = aac_fib_send(ContainerCommand, 497 497 fibptr, 498 498 sizeof(struct aac_query_mount), 499 499 FsaNormal, 500 500 1, 1, 501 501 NULL, NULL); 502 502 if (status < 0) { 503 - printk(KERN_WARNING "aacraid: probe_container query failed.\n"); 503 + printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n"); 504 504 goto error; 505 505 } 506 506 ··· 512 512 dinfo->count = cpu_to_le32(cid); 513 513 dinfo->type = cpu_to_le32(FT_FILESYS); 514 514 515 - if (fib_send(ContainerCommand, 515 + if (aac_fib_send(ContainerCommand, 516 516 fibptr, 517 517 sizeof(struct aac_query_mount), 518 518 FsaNormal, ··· 535 535 } 536 536 537 537 error: 538 - fib_complete(fibptr); 539 - fib_free(fibptr); 538 + aac_fib_complete(fibptr); 539 + aac_fib_free(fibptr); 540 540 541 541 return status; 542 542 } ··· 700 700 struct aac_bus_info *command; 701 701 struct aac_bus_info_response *bus_info; 702 702 703 - if (!(fibptr = fib_alloc(dev))) 703 + if (!(fibptr = aac_fib_alloc(dev))) 704 704 return -ENOMEM; 705 705 706 - fib_init(fibptr); 706 + aac_fib_init(fibptr); 707 707 info = (struct aac_adapter_info *) fib_data(fibptr); 708 708 memset(info,0,sizeof(*info)); 709 709 710 - rcode = fib_send(RequestAdapterInfo, 710 + rcode = aac_fib_send(RequestAdapterInfo, 711 711 fibptr, 712 712 sizeof(*info), 713 713 FsaNormal, ··· 716 716 NULL); 717 717 718 718 if (rcode < 0) { 719 - fib_complete(fibptr); 720 - fib_free(fibptr); 719 + aac_fib_complete(fibptr); 720 + aac_fib_free(fibptr); 721 721 return rcode; 722 722 } 723 723 memcpy(&dev->adapter_info, info, sizeof(*info)); ··· 725 725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 726 726 struct aac_supplement_adapter_info * info; 727 727 728 - fib_init(fibptr); 728 + aac_fib_init(fibptr); 729 729 730 730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 731 731 732 732 memset(info,0,sizeof(*info)); 733 733 734 - rcode = fib_send(RequestSupplementAdapterInfo, 734 + rcode = aac_fib_send(RequestSupplementAdapterInfo, 735 735 fibptr, 736 736 sizeof(*info), 737 737 FsaNormal, ··· 748 748 * GetBusInfo 749 749 */ 750 750 751 - fib_init(fibptr); 751 + aac_fib_init(fibptr); 752 752 753 753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 754 754 ··· 761 761 command->MethodId = cpu_to_le32(1); 762 762 command->CtlCmd = cpu_to_le32(GetBusInfo); 763 763 764 - rcode = fib_send(ContainerCommand, 764 + rcode = aac_fib_send(ContainerCommand, 765 765 fibptr, 766 766 sizeof (*bus_info), 767 767 FsaNormal, ··· 891 891 } 892 892 } 893 893 894 - fib_complete(fibptr); 895 - fib_free(fibptr); 894 + aac_fib_complete(fibptr); 895 + aac_fib_free(fibptr); 896 896 897 897 return rcode; 898 898 } ··· 976 976 ? sizeof(scsicmd->sense_buffer) 977 977 : sizeof(dev->fsa_dev[cid].sense_data)); 978 978 } 979 - fib_complete(fibptr); 980 - fib_free(fibptr); 979 + aac_fib_complete(fibptr); 980 + aac_fib_free(fibptr); 981 981 982 982 scsicmd->scsi_done(scsicmd); 983 983 } ··· 1062 1062 /* 1063 1063 * Alocate and initialize a Fib 1064 1064 */ 1065 - if (!(cmd_fibcontext = fib_alloc(dev))) { 1065 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1066 1066 return -1; 1067 1067 } 1068 1068 1069 - fib_init(cmd_fibcontext); 1069 + aac_fib_init(cmd_fibcontext); 1070 1070 1071 1071 if (dev->raw_io_interface) { 1072 1072 struct aac_raw_io *readcmd; ··· 1086 1086 /* 1087 1087 * Now send the Fib to the adapter 1088 1088 */ 1089 - status = fib_send(ContainerRawIo, 1089 + status = aac_fib_send(ContainerRawIo, 1090 1090 cmd_fibcontext, 1091 1091 fibsize, 1092 1092 FsaNormal, ··· 1112 1112 /* 1113 1113 * Now send the Fib to the adapter 1114 1114 */ 1115 - status = fib_send(ContainerCommand64, 1115 + status = aac_fib_send(ContainerCommand64, 1116 1116 cmd_fibcontext, 1117 1117 fibsize, 1118 1118 FsaNormal, ··· 1136 1136 /* 1137 1137 * Now send the Fib to the adapter 1138 1138 */ 1139 - status = fib_send(ContainerCommand, 1139 + status = aac_fib_send(ContainerCommand, 1140 1140 cmd_fibcontext, 1141 1141 fibsize, 1142 1142 FsaNormal, ··· 1153 1153 if (status == -EINPROGRESS) 1154 1154 return 0; 1155 1155 1156 - printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); 1156 + printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); 1157 1157 /* 1158 1158 * For some reason, the Fib didn't queue, return QUEUE_FULL 1159 1159 */ 1160 1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1161 1161 scsicmd->scsi_done(scsicmd); 1162 - fib_complete(cmd_fibcontext); 1163 - fib_free(cmd_fibcontext); 1162 + aac_fib_complete(cmd_fibcontext); 1163 + aac_fib_free(cmd_fibcontext); 1164 1164 return 0; 1165 1165 } 1166 1166 ··· 1228 1228 /* 1229 1229 * Allocate and initialize a Fib then setup a BlockWrite command 1230 1230 */ 1231 - if (!(cmd_fibcontext = fib_alloc(dev))) { 1231 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1232 1232 scsicmd->result = DID_ERROR << 16; 1233 1233 scsicmd->scsi_done(scsicmd); 1234 1234 return 0; 1235 1235 } 1236 - fib_init(cmd_fibcontext); 1236 + aac_fib_init(cmd_fibcontext); 1237 1237 1238 1238 if (dev->raw_io_interface) { 1239 1239 struct aac_raw_io *writecmd; ··· 1253 1253 /* 1254 1254 * Now send the Fib to the adapter 1255 1255 */ 1256 - status = fib_send(ContainerRawIo, 1256 + status = aac_fib_send(ContainerRawIo, 1257 1257 cmd_fibcontext, 1258 1258 fibsize, 1259 1259 FsaNormal, ··· 1279 1279 /* 1280 1280 * Now send the Fib to the adapter 1281 1281 */ 1282 - status = fib_send(ContainerCommand64, 1282 + status = aac_fib_send(ContainerCommand64, 1283 1283 cmd_fibcontext, 1284 1284 fibsize, 1285 1285 FsaNormal, ··· 1305 1305 /* 1306 1306 * Now send the Fib to the adapter 1307 1307 */ 1308 - status = fib_send(ContainerCommand, 1308 + status = aac_fib_send(ContainerCommand, 1309 1309 cmd_fibcontext, 1310 1310 fibsize, 1311 1311 FsaNormal, ··· 1322 1322 return 0; 1323 1323 } 1324 1324 1325 - printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); 1325 + printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); 1326 1326 /* 1327 1327 * For some reason, the Fib didn't queue, return QUEUE_FULL 1328 1328 */ 1329 1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1330 1330 scsicmd->scsi_done(scsicmd); 1331 1331 1332 - fib_complete(cmd_fibcontext); 1333 - fib_free(cmd_fibcontext); 1332 + aac_fib_complete(cmd_fibcontext); 1333 + aac_fib_free(cmd_fibcontext); 1334 1334 return 0; 1335 1335 } 1336 1336 ··· 1369 1369 sizeof(cmd->sense_buffer))); 1370 1370 } 1371 1371 1372 - fib_complete(fibptr); 1373 - fib_free(fibptr); 1372 + aac_fib_complete(fibptr); 1373 + aac_fib_free(fibptr); 1374 1374 cmd->scsi_done(cmd); 1375 1375 } 1376 1376 ··· 1407 1407 * Allocate and initialize a Fib 1408 1408 */ 1409 1409 if (!(cmd_fibcontext = 1410 - fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1410 + aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1411 1411 return SCSI_MLQUEUE_HOST_BUSY; 1412 1412 1413 - fib_init(cmd_fibcontext); 1413 + aac_fib_init(cmd_fibcontext); 1414 1414 1415 1415 synchronizecmd = fib_data(cmd_fibcontext); 1416 1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); ··· 1422 1422 /* 1423 1423 * Now send the Fib to the adapter 1424 1424 */ 1425 - status = fib_send(ContainerCommand, 1425 + status = aac_fib_send(ContainerCommand, 1426 1426 cmd_fibcontext, 1427 1427 sizeof(struct aac_synchronize), 1428 1428 FsaNormal, ··· 1437 1437 return 0; 1438 1438 1439 1439 printk(KERN_WARNING 1440 - "aac_synchronize: fib_send failed with status: %d.\n", status); 1441 - fib_complete(cmd_fibcontext); 1442 - fib_free(cmd_fibcontext); 1440 + "aac_synchronize: aac_fib_send failed with status: %d.\n", status); 1441 + aac_fib_complete(cmd_fibcontext); 1442 + aac_fib_free(cmd_fibcontext); 1443 1443 return SCSI_MLQUEUE_HOST_BUSY; 1444 1444 } 1445 1445 ··· 1465 1465 * itself. 1466 1466 */ 1467 1467 if (scmd_id(scsicmd) != host->this_id) { 1468 - if ((scsicmd->device->channel == 0) ){ 1468 + if ((scsicmd->device->channel == CONTAINER_CHANNEL)) { 1469 1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 1470 1470 scsicmd->result = DID_NO_CONNECT << 16; 1471 1471 scsicmd->scsi_done(scsicmd); ··· 1488 1488 case READ_CAPACITY: 1489 1489 case TEST_UNIT_READY: 1490 1490 spin_unlock_irq(host->host_lock); 1491 - probe_container(dev, cid); 1491 + aac_probe_container(dev, cid); 1492 1492 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1493 1493 fsa_dev_ptr[cid].valid = 0; 1494 1494 spin_lock_irq(host->host_lock); ··· 1935 1935 case SRB_STATUS_ERROR_RECOVERY: 1936 1936 case SRB_STATUS_PENDING: 1937 1937 case SRB_STATUS_SUCCESS: 1938 - if(scsicmd->cmnd[0] == INQUIRY ){ 1939 - u8 b; 1940 - u8 b1; 1941 - /* We can't expose disk devices because we can't tell whether they 1942 - * are the raw container drives or stand alone drives. If they have 1943 - * the removable bit set then we should expose them though. 1944 - */ 1945 - b = (*(u8*)scsicmd->buffer)&0x1f; 1946 - b1 = ((u8*)scsicmd->buffer)[1]; 1947 - if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 1948 - || (b==TYPE_DISK && (b1&0x80)) ){ 1949 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1950 - /* 1951 - * We will allow disk devices if in RAID/SCSI mode and 1952 - * the channel is 2 1953 - */ 1954 - } else if ((dev->raid_scsi_mode) && 1955 - (scmd_channel(scsicmd) == 2)) { 1956 - scsicmd->result = DID_OK << 16 | 1957 - COMMAND_COMPLETE << 8; 1958 - } else { 1959 - scsicmd->result = DID_NO_CONNECT << 16 | 1960 - COMMAND_COMPLETE << 8; 1961 - } 1962 - } else { 1963 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1964 - } 1938 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1965 1939 break; 1966 1940 case SRB_STATUS_DATA_OVERRUN: 1967 1941 switch(scsicmd->cmnd[0]){ ··· 1955 1981 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1956 1982 break; 1957 1983 case INQUIRY: { 1958 - u8 b; 1959 - u8 b1; 1960 - /* We can't expose disk devices because we can't tell whether they 1961 - * are the raw container drives or stand alone drives 1962 - */ 1963 - b = (*(u8*)scsicmd->buffer)&0x0f; 1964 - b1 = ((u8*)scsicmd->buffer)[1]; 1965 - if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 1966 - || (b==TYPE_DISK && (b1&0x80)) ){ 1967 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1968 - /* 1969 - * We will allow disk devices if in RAID/SCSI mode and 1970 - * the channel is 2 1971 - */ 1972 - } else if ((dev->raid_scsi_mode) && 1973 - (scmd_channel(scsicmd) == 2)) { 1974 - scsicmd->result = DID_OK << 16 | 1975 - COMMAND_COMPLETE << 8; 1976 - } else { 1977 - scsicmd->result = DID_NO_CONNECT << 16 | 1978 - COMMAND_COMPLETE << 8; 1979 - } 1984 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1980 1985 break; 1981 1986 } 1982 1987 default: ··· 2042 2089 */ 2043 2090 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 2044 2091 2045 - fib_complete(fibptr); 2046 - fib_free(fibptr); 2092 + aac_fib_complete(fibptr); 2093 + aac_fib_free(fibptr); 2047 2094 scsicmd->scsi_done(scsicmd); 2048 2095 } 2049 2096 ··· 2095 2142 /* 2096 2143 * Allocate and initialize a Fib then setup a BlockWrite command 2097 2144 */ 2098 - if (!(cmd_fibcontext = fib_alloc(dev))) { 2145 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 2099 2146 return -1; 2100 2147 } 2101 - fib_init(cmd_fibcontext); 2148 + aac_fib_init(cmd_fibcontext); 2102 2149 2103 2150 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); 2104 2151 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); ··· 2132 2179 /* 2133 2180 * Now send the Fib to the adapter 2134 2181 */ 2135 - status = fib_send(ScsiPortCommand64, cmd_fibcontext, 2182 + status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext, 2136 2183 fibsize, FsaNormal, 0, 1, 2137 2184 (fib_callback) aac_srb_callback, 2138 2185 (void *) scsicmd); ··· 2154 2201 /* 2155 2202 * Now send the Fib to the adapter 2156 2203 */ 2157 - status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 2204 + status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 2158 2205 (fib_callback) aac_srb_callback, (void *) scsicmd); 2159 2206 } 2160 2207 /* ··· 2164 2211 return 0; 2165 2212 } 2166 2213 2167 - printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); 2168 - fib_complete(cmd_fibcontext); 2169 - fib_free(cmd_fibcontext); 2214 + printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); 2215 + aac_fib_complete(cmd_fibcontext); 2216 + aac_fib_free(cmd_fibcontext); 2170 2217 2171 2218 return -1; 2172 2219 }
+9 -9
drivers/scsi/aacraid/aacraid.h
··· 1774 1774 struct scsi_cmnd; 1775 1775 1776 1776 const char *aac_driverinfo(struct Scsi_Host *); 1777 - struct fib *fib_alloc(struct aac_dev *dev); 1778 - int fib_setup(struct aac_dev *dev); 1779 - void fib_map_free(struct aac_dev *dev); 1780 - void fib_free(struct fib * context); 1781 - void fib_init(struct fib * context); 1777 + struct fib *aac_fib_alloc(struct aac_dev *dev); 1778 + int aac_fib_setup(struct aac_dev *dev); 1779 + void aac_fib_map_free(struct aac_dev *dev); 1780 + void aac_fib_free(struct fib * context); 1781 + void aac_fib_init(struct fib * context); 1782 1782 void aac_printf(struct aac_dev *dev, u32 val); 1783 - int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 1783 + int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 1784 1784 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1785 1785 void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1786 - int fib_complete(struct fib * context); 1786 + int aac_fib_complete(struct fib * context); 1787 1787 #define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1788 1788 struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1789 1789 int aac_get_config_status(struct aac_dev *dev); ··· 1799 1799 unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1800 1800 int aac_command_thread(struct aac_dev * dev); 1801 1801 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1802 - int fib_adapter_complete(struct fib * fibptr, unsigned short size); 1802 + int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); 1803 1803 struct aac_driver_ident* aac_get_driver_ident(int devtype); 1804 1804 int aac_get_adapter_info(struct aac_dev* dev); 1805 1805 int aac_send_shutdown(struct aac_dev *dev); 1806 - int probe_container(struct aac_dev *dev, int cid); 1806 + int aac_probe_container(struct aac_dev *dev, int cid); 1807 1807 extern int numacb; 1808 1808 extern int acbsize; 1809 1809 extern char aac_driver_version[];
+11 -11
drivers/scsi/aacraid/commctrl.c
··· 63 63 unsigned size; 64 64 int retval; 65 65 66 - fibptr = fib_alloc(dev); 66 + fibptr = aac_fib_alloc(dev); 67 67 if(fibptr == NULL) { 68 68 return -ENOMEM; 69 69 } ··· 73 73 * First copy in the header so that we can check the size field. 74 74 */ 75 75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 76 - fib_free(fibptr); 76 + aac_fib_free(fibptr); 77 77 return -EFAULT; 78 78 } 79 79 /* ··· 110 110 */ 111 111 kfib->header.XferState = 0; 112 112 } else { 113 - retval = fib_send(le16_to_cpu(kfib->header.Command), fibptr, 113 + retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, 114 114 le16_to_cpu(kfib->header.Size) , FsaNormal, 115 115 1, 1, NULL, NULL); 116 116 if (retval) { 117 117 goto cleanup; 118 118 } 119 - if (fib_complete(fibptr) != 0) { 119 + if (aac_fib_complete(fibptr) != 0) { 120 120 retval = -EINVAL; 121 121 goto cleanup; 122 122 } ··· 138 138 fibptr->hw_fib_pa = hw_fib_pa; 139 139 fibptr->hw_fib = hw_fib; 140 140 } 141 - fib_free(fibptr); 141 + aac_fib_free(fibptr); 142 142 return retval; 143 143 } 144 144 ··· 464 464 /* 465 465 * Allocate and initialize a Fib then setup a BlockWrite command 466 466 */ 467 - if (!(srbfib = fib_alloc(dev))) { 467 + if (!(srbfib = aac_fib_alloc(dev))) { 468 468 return -ENOMEM; 469 469 } 470 - fib_init(srbfib); 470 + aac_fib_init(srbfib); 471 471 472 472 srbcmd = (struct aac_srb*) fib_data(srbfib); 473 473 ··· 601 601 602 602 srbcmd->count = cpu_to_le32(byte_count); 603 603 psg->count = cpu_to_le32(sg_indx+1); 604 - status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 604 + status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 605 605 } else { 606 606 struct user_sgmap* upsg = &user_srbcmd->sg; 607 607 struct sgmap* psg = &srbcmd->sg; ··· 649 649 } 650 650 srbcmd->count = cpu_to_le32(byte_count); 651 651 psg->count = cpu_to_le32(sg_indx+1); 652 - status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 652 + status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 653 653 } 654 654 655 655 if (status != 0){ ··· 684 684 for(i=0; i <= sg_indx; i++){ 685 685 kfree(sg_list[i]); 686 686 } 687 - fib_complete(srbfib); 688 - fib_free(srbfib); 687 + aac_fib_complete(srbfib); 688 + aac_fib_free(srbfib); 689 689 690 690 return rcode; 691 691 }
+6 -6
drivers/scsi/aacraid/comminit.c
··· 185 185 struct aac_close *cmd; 186 186 int status; 187 187 188 - fibctx = fib_alloc(dev); 188 + fibctx = aac_fib_alloc(dev); 189 189 if (!fibctx) 190 190 return -ENOMEM; 191 - fib_init(fibctx); 191 + aac_fib_init(fibctx); 192 192 193 193 cmd = (struct aac_close *) fib_data(fibctx); 194 194 195 195 cmd->command = cpu_to_le32(VM_CloseAll); 196 196 cmd->cid = cpu_to_le32(0xffffffff); 197 197 198 - status = fib_send(ContainerCommand, 198 + status = aac_fib_send(ContainerCommand, 199 199 fibctx, 200 200 sizeof(struct aac_close), 201 201 FsaNormal, ··· 203 203 NULL, NULL); 204 204 205 205 if (status == 0) 206 - fib_complete(fibctx); 207 - fib_free(fibctx); 206 + aac_fib_complete(fibctx); 207 + aac_fib_free(fibctx); 208 208 return status; 209 209 } 210 210 ··· 427 427 /* 428 428 * Initialize the list of fibs 429 429 */ 430 - if(fib_setup(dev)<0){ 430 + if (aac_fib_setup(dev) < 0) { 431 431 kfree(dev->queues); 432 432 return NULL; 433 433 }
+26 -24
drivers/scsi/aacraid/commsup.c
··· 67 67 } 68 68 69 69 /** 70 - * fib_map_free - free the fib objects 70 + * aac_fib_map_free - free the fib objects 71 71 * @dev: Adapter to free 72 72 * 73 73 * Free the PCI mappings and the memory allocated for FIB blocks 74 74 * on this adapter. 75 75 */ 76 76 77 - void fib_map_free(struct aac_dev *dev) 77 + void aac_fib_map_free(struct aac_dev *dev) 78 78 { 79 79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 80 80 } 81 81 82 82 /** 83 - * fib_setup - setup the fibs 83 + * aac_fib_setup - setup the fibs 84 84 * @dev: Adapter to set up 85 85 * 86 86 * Allocate the PCI space for the fibs, map it and then intialise the 87 87 * fib area, the unmapped fib data and also the free list 88 88 */ 89 89 90 - int fib_setup(struct aac_dev * dev) 90 + int aac_fib_setup(struct aac_dev * dev) 91 91 { 92 92 struct fib *fibptr; 93 93 struct hw_fib *hw_fib_va; ··· 134 134 } 135 135 136 136 /** 137 - * fib_alloc - allocate a fib 137 + * aac_fib_alloc - allocate a fib 138 138 * @dev: Adapter to allocate the fib for 139 139 * 140 140 * Allocate a fib from the adapter fib pool. If the pool is empty we 141 141 * return NULL. 142 142 */ 143 143 144 - struct fib * fib_alloc(struct aac_dev *dev) 144 + struct fib *aac_fib_alloc(struct aac_dev *dev) 145 145 { 146 146 struct fib * fibptr; 147 147 unsigned long flags; ··· 170 170 } 171 171 172 172 /** 173 - * fib_free - free a fib 173 + * aac_fib_free - free a fib 174 174 * @fibptr: fib to free up 175 175 * 176 176 * Frees up a fib and places it on the appropriate queue 177 177 * (either free or timed out) 178 178 */ 179 179 180 - void fib_free(struct fib * fibptr) 180 + void aac_fib_free(struct fib *fibptr) 181 181 { 182 182 unsigned long flags; 183 183 ··· 188 188 fibptr->dev->timeout_fib = fibptr; 189 189 } else { 190 190 if (fibptr->hw_fib->header.XferState != 0) { 191 - printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 191 + printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 192 192 (void*)fibptr, 193 193 le32_to_cpu(fibptr->hw_fib->header.XferState)); 194 194 } ··· 199 199 } 200 200 201 201 /** 202 - * fib_init - initialise a fib 202 + * aac_fib_init - initialise a fib 203 203 * @fibptr: The fib to initialize 204 204 * 205 205 * Set up the generic fib fields ready for use 206 206 */ 207 207 208 - void fib_init(struct fib *fibptr) 208 + void aac_fib_init(struct fib *fibptr) 209 209 { 210 210 struct hw_fib *hw_fib = fibptr->hw_fib; 211 211 ··· 362 362 */ 363 363 364 364 /** 365 - * fib_send - send a fib to the adapter 365 + * aac_fib_send - send a fib to the adapter 366 366 * @command: Command to send 367 367 * @fibptr: The fib 368 368 * @size: Size of fib data area ··· 378 378 * response FIB is received from the adapter. 379 379 */ 380 380 381 - int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 381 + int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 382 + int priority, int wait, int reply, fib_callback callback, 383 + void *callback_data) 382 384 { 383 385 struct aac_dev * dev = fibptr->dev; 384 386 struct hw_fib * hw_fib = fibptr->hw_fib; ··· 495 493 q->numpending++; 496 494 *(q->headers.producer) = cpu_to_le32(index + 1); 497 495 spin_unlock_irqrestore(q->lock, qflags); 498 - dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); 496 + dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); 499 497 if (!(nointr & aac_config.irq_mod)) 500 498 aac_adapter_notify(dev, AdapNormCmdQueue); 501 499 } ··· 522 520 list_del(&fibptr->queue); 523 521 spin_unlock_irqrestore(q->lock, qflags); 524 522 if (wait == -1) { 525 - printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" 523 + printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" 526 524 "Usually a result of a PCI interrupt routing problem;\n" 527 525 "update mother board BIOS or consider utilizing one of\n" 528 526 "the SAFE mode kernel options (acpi, apic etc)\n"); ··· 626 624 } 627 625 628 626 /** 629 - * fib_adapter_complete - complete adapter issued fib 627 + * aac_fib_adapter_complete - complete adapter issued fib 630 628 * @fibptr: fib to complete 631 629 * @size: size of fib 632 630 * ··· 634 632 * the adapter. 635 633 */ 636 634 637 - int fib_adapter_complete(struct fib * fibptr, unsigned short size) 635 + int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 638 636 { 639 637 struct hw_fib * hw_fib = fibptr->hw_fib; 640 638 struct aac_dev * dev = fibptr->dev; ··· 685 683 } 686 684 else 687 685 { 688 - printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); 686 + printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); 689 687 BUG(); 690 688 } 691 689 return 0; 692 690 } 693 691 694 692 /** 695 - * fib_complete - fib completion handler 693 + * aac_fib_complete - fib completion handler 696 694 * @fib: FIB to complete 697 695 * 698 696 * Will do all necessary work to complete a FIB. 699 697 */ 700 698 701 - int fib_complete(struct fib * fibptr) 699 + int aac_fib_complete(struct fib *fibptr) 702 700 { 703 701 struct hw_fib * hw_fib = fibptr->hw_fib; 704 702 ··· 997 995 if (!dev || !dev->scsi_host_ptr) 998 996 return; 999 997 /* 1000 - * force reload of disk info via probe_container 998 + * force reload of disk info via aac_probe_container 1001 999 */ 1002 1000 if ((device_config_needed == CHANGE) 1003 1001 && (dev->fsa_dev[container].valid == 1)) 1004 1002 dev->fsa_dev[container].valid = 2; 1005 1003 if ((device_config_needed == CHANGE) || 1006 1004 (device_config_needed == ADD)) 1007 - probe_container(dev, container); 1005 + aac_probe_container(dev, container); 1008 1006 device = scsi_device_lookup(dev->scsi_host_ptr, 1009 1007 CONTAINER_TO_CHANNEL(container), 1010 1008 CONTAINER_TO_ID(container), ··· 1106 1104 /* Handle Driver Notify Events */ 1107 1105 aac_handle_aif(dev, fib); 1108 1106 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1109 - fib_adapter_complete(fib, (u16)sizeof(u32)); 1107 + aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 1110 1108 } else { 1111 1109 struct list_head *entry; 1112 1110 /* The u32 here is important and intended. We are using ··· 1243 1241 * Set the status of this FIB 1244 1242 */ 1245 1243 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1246 - fib_adapter_complete(fib, sizeof(u32)); 1244 + aac_fib_adapter_complete(fib, sizeof(u32)); 1247 1245 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1248 1246 /* Free up the remaining resources */ 1249 1247 hw_fib_p = hw_fib_pool;
+1 -1
drivers/scsi/aacraid/dpcsup.c
··· 206 206 * Set the status of this FIB 207 207 */ 208 208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 209 - fib_adapter_complete(fib, sizeof(u32)); 209 + aac_fib_adapter_complete(fib, sizeof(u32)); 210 210 spin_lock_irqsave(q->lock, flags); 211 211 } 212 212 }
+39 -11
drivers/scsi/aacraid/linit.c
··· 385 385 386 386 static int aac_slave_configure(struct scsi_device *sdev) 387 387 { 388 - struct Scsi_Host *host = sdev->host; 388 + if (sdev_channel(sdev) == CONTAINER_CHANNEL) { 389 + sdev->skip_ms_page_8 = 1; 390 + sdev->skip_ms_page_3f = 1; 391 + } 392 + if ((sdev->type == TYPE_DISK) && 393 + (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 394 + struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 395 + if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) 396 + sdev->no_uld_attach = 1; 397 + } 398 + if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 399 + (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 400 + struct scsi_device * dev; 401 + struct Scsi_Host *host = sdev->host; 402 + unsigned num_lsu = 0; 403 + unsigned num_one = 0; 404 + unsigned depth; 389 405 390 - if (sdev->tagged_supported) 391 - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128); 392 - else 406 + __shost_for_each_device(dev, host) { 407 + if (dev->tagged_supported && (dev->type == TYPE_DISK) && 408 + (sdev_channel(dev) == CONTAINER_CHANNEL)) 409 + ++num_lsu; 410 + else 411 + ++num_one; 412 + } 413 + if (num_lsu == 0) 414 + ++num_lsu; 415 + depth = (host->can_queue - num_one) / num_lsu; 416 + if (depth > 256) 417 + depth = 256; 418 + else if (depth < 2) 419 + depth = 2; 420 + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 421 + if (!(((struct aac_dev *)host->hostdata)->adapter_info.options & 422 + AAC_OPT_NEW_COMM)) 423 + blk_queue_max_segment_size(sdev->request_queue, 65536); 424 + } else 393 425 scsi_adjust_queue_depth(sdev, 0, 1); 394 - 395 - if (!(((struct aac_dev *)host->hostdata)->adapter_info.options 396 - & AAC_OPT_NEW_COMM)) 397 - blk_queue_max_segment_size(sdev->request_queue, 65536); 398 426 399 427 return 0; 400 428 } ··· 898 870 899 871 /* 900 872 * max channel will be the physical channels plus 1 virtual channel 901 - * all containers are on the virtual channel 0 873 + * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 902 874 * physical channels are address by their actual physical number+1 903 875 */ 904 876 if (aac->nondasd_support == 1) ··· 941 913 aac_adapter_disable_int(aac); 942 914 free_irq(pdev->irq, aac); 943 915 out_unmap: 944 - fib_map_free(aac); 916 + aac_fib_map_free(aac); 945 917 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 946 918 kfree(aac->queues); 947 919 iounmap(aac->regs.sa); ··· 975 947 976 948 aac_send_shutdown(aac); 977 949 aac_adapter_disable_int(aac); 978 - fib_map_free(aac); 950 + aac_fib_map_free(aac); 979 951 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 980 952 aac->comm_phys); 981 953 kfree(aac->queues);
+1 -1
drivers/scsi/gdth.c
··· 2816 2816 } 2817 2817 #endif 2818 2818 2819 - } else { 2819 + } else if (scp->request_bufflen) { 2820 2820 scp->SCp.Status = GDTH_MAP_SINGLE; 2821 2821 scp->SCp.Message = (read_write == 1 ? 2822 2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+5 -44
drivers/scsi/ipr.c
··· 4236 4236 } 4237 4237 4238 4238 /** 4239 - * ipr_save_ioafp_mode_select - Save adapters mode select data 4240 - * @ioa_cfg: ioa config struct 4241 - * @scsi_cmd: scsi command struct 4242 - * 4243 - * This function saves mode select data for the adapter to 4244 - * use following an adapter reset. 4245 - * 4246 - * Return value: 4247 - * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure 4248 - **/ 4249 - static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg, 4250 - struct scsi_cmnd *scsi_cmd) 4251 - { 4252 - if (!ioa_cfg->saved_mode_pages) { 4253 - ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages), 4254 - GFP_ATOMIC); 4255 - if (!ioa_cfg->saved_mode_pages) { 4256 - dev_err(&ioa_cfg->pdev->dev, 4257 - "IOA mode select buffer allocation failed\n"); 4258 - return SCSI_MLQUEUE_HOST_BUSY; 4259 - } 4260 - } 4261 - 4262 - memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]); 4263 - ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4]; 4264 - return 0; 4265 - } 4266 - 4267 - /** 4268 4239 * ipr_queuecommand - Queue a mid-layer request 4269 4240 * @scsi_cmd: scsi command struct 4270 4241 * @done: done function ··· 4308 4337 if (scsi_cmd->cmnd[0] >= 0xC0 && 4309 4338 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 4310 4339 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 4311 - 4312 - if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT) 4313 - rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd); 4314 4340 4315 4341 if (likely(rc == 0)) 4316 4342 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); ··· 4797 4829 int length; 4798 4830 4799 4831 ENTER; 4800 - if (ioa_cfg->saved_mode_pages) { 4801 - memcpy(mode_pages, ioa_cfg->saved_mode_pages, 4802 - ioa_cfg->saved_mode_page_len); 4803 - length = ioa_cfg->saved_mode_page_len; 4804 - } else { 4805 - ipr_scsi_bus_speed_limit(ioa_cfg); 4806 - ipr_check_term_power(ioa_cfg, mode_pages); 4807 - ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 4808 - length = mode_pages->hdr.length + 1; 4809 - mode_pages->hdr.length = 0; 4810 - } 4832 + ipr_scsi_bus_speed_limit(ioa_cfg); 4833 + ipr_check_term_power(ioa_cfg, mode_pages); 4834 + ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 4835 + length = mode_pages->hdr.length + 1; 4836 + mode_pages->hdr.length = 0; 4811 4837 4812 4838 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 4813 4839 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), ··· 5931 5969 } 5932 5970 5933 5971 ipr_free_dump(ioa_cfg); 5934 - kfree(ioa_cfg->saved_mode_pages); 5935 5972 kfree(ioa_cfg->trace); 5936 5973 } 5937 5974
+2 -3
drivers/scsi/ipr.h
··· 36 36 /* 37 37 * Literals 38 38 */ 39 - #define IPR_DRIVER_VERSION "2.1.1" 40 - #define IPR_DRIVER_DATE "(November 15, 2005)" 39 + #define IPR_DRIVER_VERSION "2.1.2" 40 + #define IPR_DRIVER_DATE "(February 8, 2006)" 41 41 42 42 /* 43 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding ··· 1000 1000 struct Scsi_Host *host; 1001 1001 struct pci_dev *pdev; 1002 1002 struct ipr_sglist *ucode_sglist; 1003 - struct ipr_mode_pages *saved_mode_pages; 1004 1003 u8 saved_mode_page_len; 1005 1004 1006 1005 struct work_struct work_q;
+41 -37
drivers/scsi/iscsi_tcp.c
··· 146 146 spin_unlock_irqrestore(&session->lock, flags); 147 147 set_bit(SUSPEND_BIT, &conn->suspend_tx); 148 148 set_bit(SUSPEND_BIT, &conn->suspend_rx); 149 - iscsi_conn_error(iscsi_handle(conn), err); 149 + iscsi_conn_error(conn->cls_conn, err); 150 150 } 151 151 152 152 static inline int ··· 244 244 if (sc->sc_data_direction == DMA_TO_DEVICE) { 245 245 struct iscsi_data_task *dtask, *n; 246 246 /* WRITE: cleanup Data-Out's if any */ 247 - spin_lock(&conn->lock); 248 247 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { 249 248 list_del(&dtask->item); 250 249 mempool_free(dtask, ctask->datapool); 251 250 } 252 - spin_unlock(&conn->lock); 253 251 } 254 252 ctask->xmstate = XMSTATE_IDLE; 255 253 ctask->r2t = NULL; ··· 687 689 break; 688 690 689 691 if (!conn->in.datalen) { 690 - rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 692 + rc = iscsi_recv_pdu(conn->cls_conn, hdr, 691 693 NULL, 0); 692 694 if (conn->login_mtask != mtask) { 693 695 spin_lock(&session->lock); ··· 735 737 if (!conn->in.datalen) { 736 738 struct iscsi_mgmt_task *mtask; 737 739 738 - rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 740 + rc = iscsi_recv_pdu(conn->cls_conn, hdr, 739 741 NULL, 0); 740 742 mtask = (struct iscsi_mgmt_task *) 741 743 session->mgmt_cmds[conn->in.itt - ··· 759 761 rc = iscsi_check_assign_cmdsn(session, 760 762 (struct iscsi_nopin*)hdr); 761 763 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) 762 - rc = iscsi_recv_pdu(iscsi_handle(conn), 764 + rc = iscsi_recv_pdu(conn->cls_conn, 763 765 hdr, NULL, 0); 764 766 } else 765 767 rc = ISCSI_ERR_PROTO; ··· 1042 1044 goto exit; 1043 1045 } 1044 1046 1045 - rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr, 1047 + rc = iscsi_recv_pdu(conn->cls_conn, conn->in.hdr, 1046 1048 conn->data, conn->in.datalen); 1047 1049 1048 1050 if (!rc && conn->datadgst_en && ··· 2426 2428 } 2427 2429 2428 2430 static struct iscsi_cls_conn * 2429 - iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx) 2431 + iscsi_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 2430 2432 { 2433 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2431 2434 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2432 2435 struct iscsi_conn *conn; 2433 2436 struct iscsi_cls_conn *cls_conn; 2434 2437 2435 - cls_conn = iscsi_create_conn(hostdata_session(shost->hostdata), 2436 - conn_idx); 2438 + cls_conn = iscsi_create_conn(cls_session, conn_idx); 2437 2439 if (!cls_conn) 2438 2440 return NULL; 2439 2441 conn = cls_conn->dd_data; 2442 + memset(conn, 0, sizeof(*conn)); 2440 2443 2441 - memset(conn, 0, sizeof(struct iscsi_conn)); 2444 + conn->cls_conn = cls_conn; 2442 2445 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2443 2446 conn->in_progress = IN_PROGRESS_WAIT_HEADER; 2444 2447 conn->id = conn_idx; ··· 2450 2451 conn->hdr_size = sizeof(struct iscsi_hdr); 2451 2452 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2452 2453 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2453 - 2454 - spin_lock_init(&conn->lock); 2455 2454 2456 2455 /* initialize general xmit PDU commands queue */ 2457 2456 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), ··· 2622 2625 } 2623 2626 2624 2627 static int 2625 - iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, 2626 - uint32_t transport_fd, int is_leading) 2628 + iscsi_conn_bind(struct iscsi_cls_session *cls_session, 2629 + struct iscsi_cls_conn *cls_conn, uint32_t transport_fd, 2630 + int is_leading) 2627 2631 { 2628 - struct iscsi_session *session = iscsi_ptr(sessionh); 2629 - struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh); 2632 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2633 + struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2634 + struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = cls_conn->dd_data; 2630 2635 struct sock *sk; 2631 2636 struct socket *sock; 2632 2637 int err; ··· 2702 2703 } 2703 2704 2704 2705 static int 2705 - iscsi_conn_start(iscsi_connh_t connh) 2706 + iscsi_conn_start(struct iscsi_cls_conn *cls_conn) 2706 2707 { 2707 - struct iscsi_conn *conn = iscsi_ptr(connh); 2708 + struct iscsi_conn *conn = cls_conn->dd_data; 2708 2709 struct iscsi_session *session = conn->session; 2709 2710 struct sock *sk; 2710 2711 ··· 2753 2754 } 2754 2755 2755 2756 static void 2756 - iscsi_conn_stop(iscsi_connh_t connh, int flag) 2757 + iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 2757 2758 { 2758 - struct iscsi_conn *conn = iscsi_ptr(connh); 2759 + struct iscsi_conn *conn = cls_conn->dd_data; 2759 2760 struct iscsi_session *session = conn->session; 2760 2761 struct sock *sk; 2761 2762 unsigned long flags; ··· 3252 3253 3253 3254 static struct iscsi_transport iscsi_tcp_transport; 3254 3255 3255 - static struct Scsi_Host * 3256 + static struct iscsi_cls_session * 3256 3257 iscsi_session_create(struct scsi_transport_template *scsit, 3257 - uint32_t initial_cmdsn) 3258 + uint32_t initial_cmdsn, uint32_t *sid) 3258 3259 { 3259 3260 struct Scsi_Host *shost; 3260 3261 struct iscsi_session *session; ··· 3267 3268 session = iscsi_hostdata(shost->hostdata); 3268 3269 memset(session, 0, sizeof(struct iscsi_session)); 3269 3270 session->host = shost; 3270 - session->state = ISCSI_STATE_LOGGED_IN; 3271 + session->state = ISCSI_STATE_FREE; 3271 3272 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 3272 3273 session->cmds_max = ISCSI_XMIT_CMDS_MAX; 3273 3274 session->cmdsn = initial_cmdsn; 3274 3275 session->exp_cmdsn = initial_cmdsn + 1; 3275 3276 session->max_cmdsn = initial_cmdsn + 1; 3276 3277 session->max_r2t = 1; 3278 + *sid = shost->host_no; 3277 3279 3278 3280 /* initialize SCSI PDU commands pool */ 3279 3281 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, ··· 3311 3311 if (iscsi_r2tpool_alloc(session)) 3312 3312 goto r2tpool_alloc_fail; 3313 3313 3314 - return shost; 3314 + return hostdata_session(shost->hostdata); 3315 3315 3316 3316 r2tpool_alloc_fail: 3317 3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) 3318 3318 kfree(session->mgmt_cmds[cmd_i]->data); 3319 - iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 3320 3319 immdata_alloc_fail: 3320 + iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 3321 3321 mgmtpool_alloc_fail: 3322 3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 3323 3323 cmdpool_alloc_fail: 3324 + iscsi_transport_destroy_session(shost); 3324 3325 return NULL; 3325 3326 } 3326 3327 3327 3328 static void 3328 - iscsi_session_destroy(struct Scsi_Host *shost) 3329 + iscsi_session_destroy(struct iscsi_cls_session *cls_session) 3329 3330 { 3331 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 3330 3332 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3331 3333 int cmd_i; 3332 3334 struct iscsi_data_task *dtask, *n; ··· 3352 3350 } 3353 3351 3354 3352 static int 3355 - iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, 3353 + iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, 3356 3354 uint32_t value) 3357 3355 { 3358 - struct iscsi_conn *conn = iscsi_ptr(connh); 3356 + struct iscsi_conn *conn = cls_conn->dd_data; 3359 3357 struct iscsi_session *session = conn->session; 3360 3358 3361 3359 spin_lock_bh(&session->lock); ··· 3497 3495 } 3498 3496 3499 3497 static int 3500 - iscsi_session_get_param(struct Scsi_Host *shost, 3498 + iscsi_session_get_param(struct iscsi_cls_session *cls_session, 3501 3499 enum iscsi_param param, uint32_t *value) 3502 3500 { 3501 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 3503 3502 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3504 3503 3505 3504 switch(param) { ··· 3542 3539 } 3543 3540 3544 3541 static int 3545 - iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value) 3542 + iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 3543 + enum iscsi_param param, uint32_t *value) 3546 3544 { 3547 - struct iscsi_conn *conn = data; 3545 + struct iscsi_conn *conn = cls_conn->dd_data; 3548 3546 3549 3547 switch(param) { 3550 3548 case ISCSI_PARAM_MAX_RECV_DLENGTH: ··· 3568 3564 } 3569 3565 3570 3566 static void 3571 - iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) 3567 + iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 3572 3568 { 3573 - struct iscsi_conn *conn = iscsi_ptr(connh); 3569 + struct iscsi_conn *conn = cls_conn->dd_data; 3574 3570 3575 3571 stats->txdata_octets = conn->txdata_octets; 3576 3572 stats->rxdata_octets = conn->rxdata_octets; ··· 3591 3587 } 3592 3588 3593 3589 static int 3594 - iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data, 3595 - uint32_t data_size) 3590 + iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 3591 + char *data, uint32_t data_size) 3596 3592 { 3597 - struct iscsi_conn *conn = iscsi_ptr(connh); 3593 + struct iscsi_conn *conn = cls_conn->dd_data; 3598 3594 int rc; 3599 3595 3600 3596 mutex_lock(&conn->xmitmutex);
+3 -1
drivers/scsi/iscsi_tcp.h
··· 113 113 int datadgst; 114 114 }; 115 115 116 + struct iscsi_cls_conn; 117 + 116 118 struct iscsi_conn { 119 + struct iscsi_cls_conn *cls_conn; /* ptr to class connection */ 117 120 struct iscsi_hdr hdr; /* header placeholder */ 118 121 char hdrext[4*sizeof(__u16) + 119 122 sizeof(__u32)]; ··· 146 143 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 147 144 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 148 145 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 149 - spinlock_t lock; /* FIXME: to be removed */ 150 146 151 147 /* old values for socket callbacks */ 152 148 void (*old_data_ready)(struct sock *, int);
+1 -1
drivers/scsi/megaraid.c
··· 5049 5049 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 5050 5050 5051 5051 static struct pci_driver megaraid_pci_driver = { 5052 - .name = "megaraid", 5052 + .name = "megaraid_legacy", 5053 5053 .id_table = megaraid_pci_tbl, 5054 5054 .probe = megaraid_probe_one, 5055 5055 .remove = __devexit_p(megaraid_remove_one),
+1 -1
drivers/scsi/megaraid.h
··· 5 5 #include <linux/mutex.h> 6 6 7 7 #define MEGARAID_VERSION \ 8 - "v2.00.3 (Release Date: Wed Feb 19 08:51:30 EST 2003)\n" 8 + "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n" 9 9 10 10 /* 11 11 * Driver features - change the values to enable or disable features in the
+99 -2
drivers/scsi/megaraid/megaraid_sas.c
··· 10 10 * 2 of the License, or (at your option) any later version. 11 11 * 12 12 * FILE : megaraid_sas.c 13 - * Version : v00.00.02.02 13 + * Version : v00.00.02.04 14 14 * 15 15 * Authors: 16 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> ··· 59 59 PCI_ANY_ID, 60 60 PCI_ANY_ID, 61 61 }, 62 + { 63 + PCI_VENDOR_ID_LSI_LOGIC, 64 + PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP 65 + PCI_ANY_ID, 66 + PCI_ANY_ID, 67 + }, 62 68 { 63 69 PCI_VENDOR_ID_DELL, 64 70 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP ··· 202 196 /** 203 197 * This is the end of set of functions & definitions specific 204 198 * to xscale (deviceid : 1064R, PERC5) controllers 199 + */ 200 + 201 + /** 202 + * The following functions are defined for ppc (deviceid : 0x60) 203 + * controllers 204 + */ 205 + 206 + /** 207 + * megasas_enable_intr_ppc - Enables interrupts 208 + * @regs: MFI register set 209 + */ 210 + static inline void 211 + megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) 212 + { 213 + writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 214 + 215 + writel(~0x80000004, &(regs)->outbound_intr_mask); 216 + 217 + /* Dummy readl to force pci flush */ 218 + readl(&regs->outbound_intr_mask); 219 + } 220 + 221 + /** 222 + * megasas_read_fw_status_reg_ppc - returns the current FW status value 223 + * @regs: MFI register set 224 + */ 225 + static u32 226 + megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) 227 + { 228 + return readl(&(regs)->outbound_scratch_pad); 229 + } 230 + 231 + /** 232 + * megasas_clear_interrupt_ppc - Check & clear interrupt 233 + * @regs: MFI register set 234 + */ 235 + static int 236 + megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 237 + { 238 + u32 status; 239 + /* 240 + * Check if it is our interrupt 241 + */ 242 + status = readl(&regs->outbound_intr_status); 243 + 244 + if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { 245 + return 1; 246 + } 247 + 248 + /* 249 + * Clear the interrupt by writing back the same value 250 + */ 251 + writel(status, &regs->outbound_doorbell_clear); 252 + 253 + return 0; 254 + } 255 + /** 256 + * megasas_fire_cmd_ppc - Sends command to the FW 257 + * @frame_phys_addr : Physical address of cmd 258 + * @frame_count : Number of frames for the command 259 + * @regs : MFI register set 260 + */ 261 + static inline void 262 + megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) 263 + { 264 + writel((frame_phys_addr | (frame_count<<1))|1, 265 + &(regs)->inbound_queue_port); 266 + } 267 + 268 + static struct megasas_instance_template megasas_instance_template_ppc = { 269 + 270 + .fire_cmd = megasas_fire_cmd_ppc, 271 + .enable_intr = megasas_enable_intr_ppc, 272 + .clear_intr = megasas_clear_intr_ppc, 273 + .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 274 + }; 275 + 276 + /** 277 + * This is the end of set of functions & definitions 278 + * specific to ppc (deviceid : 0x60) controllers 205 279 */ 206 280 207 281 /** ··· 1693 1607 1694 1608 reg_set = instance->reg_set; 1695 1609 1696 - instance->instancet = &megasas_instance_template_xscale; 1610 + switch(instance->pdev->device) 1611 + { 1612 + case PCI_DEVICE_ID_LSI_SAS1078R: 1613 + instance->instancet = &megasas_instance_template_ppc; 1614 + break; 1615 + case PCI_DEVICE_ID_LSI_SAS1064R: 1616 + case PCI_DEVICE_ID_DELL_PERC5: 1617 + default: 1618 + instance->instancet = &megasas_instance_template_xscale; 1619 + break; 1620 + } 1697 1621 1698 1622 /* 1699 1623 * We expect the FW state to be READY ··· 2079 1983 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 2080 1984 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 2081 1985 host->max_lun = MEGASAS_MAX_LUN; 1986 + host->max_cmd_len = 16; 2082 1987 2083 1988 /* 2084 1989 * Notify the mid-layer about the new controller
+34 -19
drivers/scsi/megaraid/megaraid_sas.h
··· 18 18 /** 19 19 * MegaRAID SAS Driver meta data 20 20 */ 21 - #define MEGASAS_VERSION "00.00.02.02" 22 - #define MEGASAS_RELDATE "Jan 23, 2006" 23 - #define MEGASAS_EXT_VERSION "Mon Jan 23 14:09:01 PST 2006" 21 + #define MEGASAS_VERSION "00.00.02.04" 22 + #define MEGASAS_RELDATE "Feb 03, 2006" 23 + #define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006" 24 24 /* 25 25 * ===================================== 26 26 * MegaRAID SAS MFI firmware definitions ··· 553 553 #define MFI_OB_INTR_STATUS_MASK 0x00000002 554 554 #define MFI_POLL_TIMEOUT_SECS 10 555 555 556 + #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 557 + #define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060 558 + 556 559 struct megasas_register_set { 560 + u32 reserved_0[4]; /*0000h*/ 557 561 558 - u32 reserved_0[4]; /*0000h */ 562 + u32 inbound_msg_0; /*0010h*/ 563 + u32 inbound_msg_1; /*0014h*/ 564 + u32 outbound_msg_0; /*0018h*/ 565 + u32 outbound_msg_1; /*001Ch*/ 559 566 560 - u32 inbound_msg_0; /*0010h */ 561 - u32 inbound_msg_1; /*0014h */ 562 - u32 outbound_msg_0; /*0018h */ 563 - u32 outbound_msg_1; /*001Ch */ 567 + u32 inbound_doorbell; /*0020h*/ 568 + u32 inbound_intr_status; /*0024h*/ 569 + u32 inbound_intr_mask; /*0028h*/ 564 570 565 - u32 inbound_doorbell; /*0020h */ 566 - u32 inbound_intr_status; /*0024h */ 567 - u32 inbound_intr_mask; /*0028h */ 571 + u32 outbound_doorbell; /*002Ch*/ 572 + u32 outbound_intr_status; /*0030h*/ 573 + u32 outbound_intr_mask; /*0034h*/ 568 574 569 - u32 outbound_doorbell; /*002Ch */ 570 - u32 outbound_intr_status; /*0030h */ 571 - u32 outbound_intr_mask; /*0034h */ 575 + u32 reserved_1[2]; /*0038h*/ 572 576 573 - u32 reserved_1[2]; /*0038h */ 577 + u32 inbound_queue_port; /*0040h*/ 578 + u32 outbound_queue_port; /*0044h*/ 574 579 575 - u32 inbound_queue_port; /*0040h */ 576 - u32 outbound_queue_port; /*0044h */ 580 + u32 reserved_2[22]; /*0048h*/ 577 581 578 - u32 reserved_2; /*004Ch */ 582 + u32 outbound_doorbell_clear; /*00A0h*/ 579 583 580 - u32 index_registers[1004]; /*0050h */ 584 + u32 reserved_3[3]; /*00A4h*/ 585 + 586 + u32 outbound_scratch_pad ; /*00B0h*/ 587 + 588 + u32 reserved_4[3]; /*00B4h*/ 589 + 590 + u32 inbound_low_queue_port ; /*00C0h*/ 591 + 592 + u32 inbound_high_queue_port ; /*00C4h*/ 593 + 594 + u32 reserved_5; /*00C8h*/ 595 + u32 index_registers[820]; /*00CCh*/ 581 596 582 597 } __attribute__ ((packed)); 583 598
+271 -5
drivers/scsi/qla2xxx/qla_attr.c
··· 7 7 #include "qla_def.h" 8 8 9 9 #include <linux/vmalloc.h> 10 - #include <scsi/scsi_transport_fc.h> 11 10 12 11 /* SYSFS attributes --------------------------------------------------------- */ 13 12 ··· 113 114 struct device, kobj))); 114 115 unsigned long flags; 115 116 116 - if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 117 + if (!capable(CAP_SYS_ADMIN) || off != 0) 117 118 return 0; 118 119 119 120 /* Read NVRAM. */ ··· 122 123 ha->nvram_size); 123 124 spin_unlock_irqrestore(&ha->hardware_lock, flags); 124 125 125 - return (count); 126 + return ha->nvram_size; 126 127 } 127 128 128 129 static ssize_t ··· 174 175 .mode = S_IRUSR | S_IWUSR, 175 176 .owner = THIS_MODULE, 176 177 }, 177 - .size = 0, 178 + .size = 512, 178 179 .read = qla2x00_sysfs_read_nvram, 179 180 .write = qla2x00_sysfs_write_nvram, 181 + }; 182 + 183 + static ssize_t 184 + qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off, 185 + size_t count) 186 + { 187 + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 188 + struct device, kobj))); 189 + 190 + if (ha->optrom_state != QLA_SREADING) 191 + return 0; 192 + if (off > ha->optrom_size) 193 + return 0; 194 + if (off + count > ha->optrom_size) 195 + count = ha->optrom_size - off; 196 + 197 + memcpy(buf, &ha->optrom_buffer[off], count); 198 + 199 + return count; 200 + } 201 + 202 + static ssize_t 203 + qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off, 204 + size_t count) 205 + { 206 + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 207 + struct device, kobj))); 208 + 209 + if (ha->optrom_state != QLA_SWRITING) 210 + return -EINVAL; 211 + if (off > ha->optrom_size) 212 + return -ERANGE; 213 + if (off + count > ha->optrom_size) 214 + count = ha->optrom_size - off; 215 + 216 + memcpy(&ha->optrom_buffer[off], buf, count); 217 + 218 + return count; 219 + } 220 + 221 + static struct bin_attribute sysfs_optrom_attr = { 222 + .attr = { 223 + .name = "optrom", 224 + .mode = S_IRUSR | S_IWUSR, 225 + .owner = THIS_MODULE, 226 + }, 227 + .size = OPTROM_SIZE_24XX, 228 + .read = qla2x00_sysfs_read_optrom, 229 + .write = qla2x00_sysfs_write_optrom, 230 + }; 231 + 232 + static ssize_t 233 + qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off, 234 + size_t count) 235 + { 236 + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 237 + struct device, kobj))); 238 + int val; 239 + 240 + if (off) 241 + return 0; 242 + 243 + if (sscanf(buf, "%d", &val) != 1) 244 + return -EINVAL; 245 + 246 + switch (val) { 247 + case 0: 248 + if (ha->optrom_state != QLA_SREADING && 249 + ha->optrom_state != QLA_SWRITING) 250 + break; 251 + 252 + ha->optrom_state = QLA_SWAITING; 253 + vfree(ha->optrom_buffer); 254 + ha->optrom_buffer = NULL; 255 + break; 256 + case 1: 257 + if (ha->optrom_state != QLA_SWAITING) 258 + break; 259 + 260 + ha->optrom_state = QLA_SREADING; 261 + ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size); 262 + if (ha->optrom_buffer == NULL) { 263 + qla_printk(KERN_WARNING, ha, 264 + "Unable to allocate memory for optrom retrieval " 265 + "(%x).\n", ha->optrom_size); 266 + 267 + ha->optrom_state = QLA_SWAITING; 268 + return count; 269 + } 270 + 271 + memset(ha->optrom_buffer, 0, ha->optrom_size); 272 + ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0, 273 + ha->optrom_size); 274 + break; 275 + case 2: 276 + if (ha->optrom_state != QLA_SWAITING) 277 + break; 278 + 279 + ha->optrom_state = QLA_SWRITING; 280 + ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size); 281 + if (ha->optrom_buffer == NULL) { 282 + qla_printk(KERN_WARNING, ha, 283 + "Unable to allocate memory for optrom update " 284 + "(%x).\n", ha->optrom_size); 285 + 286 + ha->optrom_state = QLA_SWAITING; 287 + return count; 288 + } 289 + memset(ha->optrom_buffer, 0, ha->optrom_size); 290 + break; 291 + case 3: 292 + if (ha->optrom_state != QLA_SWRITING) 293 + break; 294 + 295 + ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0, 296 + ha->optrom_size); 297 + break; 298 + } 299 + return count; 300 + } 301 + 302 + static struct bin_attribute sysfs_optrom_ctl_attr = { 303 + .attr = { 304 + .name = "optrom_ctl", 305 + .mode = S_IWUSR, 306 + .owner = THIS_MODULE, 307 + }, 308 + .size = 0, 309 + .write = qla2x00_sysfs_write_optrom_ctl, 180 310 }; 181 311 182 312 void ··· 314 186 struct Scsi_Host *host = ha->host; 315 187 316 188 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 317 - sysfs_nvram_attr.size = ha->nvram_size; 318 189 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 190 + sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 191 + sysfs_create_bin_file(&host->shost_gendev.kobj, 192 + &sysfs_optrom_ctl_attr); 319 193 } 320 194 321 195 void ··· 327 197 328 198 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 329 199 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 200 + sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 201 + sysfs_remove_bin_file(&host->shost_gendev.kobj, 202 + &sysfs_optrom_ctl_attr); 203 + 204 + if (ha->beacon_blink_led == 1) 205 + ha->isp_ops.beacon_off(ha); 330 206 } 331 207 332 208 /* Scsi_Host attributes. */ ··· 520 384 return strlen(buf); 521 385 } 522 386 387 + static ssize_t 388 + qla2x00_beacon_show(struct class_device *cdev, char *buf) 389 + { 390 + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); 391 + int len = 0; 392 + 393 + if (ha->beacon_blink_led) 394 + len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 395 + else 396 + len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 397 + return len; 398 + } 399 + 400 + static ssize_t 401 + qla2x00_beacon_store(struct class_device *cdev, const char *buf, 402 + size_t count) 403 + { 404 + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); 405 + int val = 0; 406 + int rval; 407 + 408 + if (IS_QLA2100(ha) || IS_QLA2200(ha)) 409 + return -EPERM; 410 + 411 + if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) { 412 + qla_printk(KERN_WARNING, ha, 413 + "Abort ISP active -- ignoring beacon request.\n"); 414 + return -EBUSY; 415 + } 416 + 417 + if (sscanf(buf, "%d", &val) != 1) 418 + return -EINVAL; 419 + 420 + if (val) 421 + rval = ha->isp_ops.beacon_on(ha); 422 + else 423 + rval = ha->isp_ops.beacon_off(ha); 424 + 425 + if (rval != QLA_SUCCESS) 426 + count = 0; 427 + 428 + return count; 429 + } 430 + 523 431 static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, 524 432 NULL); 525 433 static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); ··· 578 398 qla2x00_zio_store); 579 399 static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 580 400 qla2x00_zio_timer_store); 401 + static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 402 + qla2x00_beacon_store); 581 403 582 404 struct class_device_attribute *qla2x00_host_attrs[] = { 583 405 &class_device_attr_driver_version, ··· 593 411 &class_device_attr_state, 594 412 &class_device_attr_zio, 595 413 &class_device_attr_zio_timer, 414 + &class_device_attr_beacon, 596 415 NULL, 597 416 }; 598 417 ··· 606 423 607 424 fc_host_port_id(shost) = ha->d_id.b.domain << 16 | 608 425 ha->d_id.b.area << 8 | ha->d_id.b.al_pa; 426 + } 427 + 428 + static void 429 + qla2x00_get_host_speed(struct Scsi_Host *shost) 430 + { 431 + scsi_qla_host_t *ha = to_qla_host(shost); 432 + uint32_t speed = 0; 433 + 434 + switch (ha->link_data_rate) { 435 + case LDR_1GB: 436 + speed = 1; 437 + break; 438 + case LDR_2GB: 439 + speed = 2; 440 + break; 441 + case LDR_4GB: 442 + speed = 4; 443 + break; 444 + } 445 + fc_host_speed(shost) = speed; 446 + } 447 + 448 + static void 449 + qla2x00_get_host_port_type(struct Scsi_Host *shost) 450 + { 451 + scsi_qla_host_t *ha = to_qla_host(shost); 452 + uint32_t port_type = FC_PORTTYPE_UNKNOWN; 453 + 454 + switch (ha->current_topology) { 455 + case ISP_CFG_NL: 456 + port_type = FC_PORTTYPE_LPORT; 457 + break; 458 + case ISP_CFG_FL: 459 + port_type = FC_PORTTYPE_NLPORT; 460 + break; 461 + case ISP_CFG_N: 462 + port_type = FC_PORTTYPE_PTP; 463 + break; 464 + case ISP_CFG_F: 465 + port_type = FC_PORTTYPE_NPORT; 466 + break; 467 + } 468 + fc_host_port_type(shost) = port_type; 609 469 } 610 470 611 471 static void ··· 738 512 return 0; 739 513 } 740 514 515 + static struct fc_host_statistics * 516 + qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 517 + { 518 + scsi_qla_host_t *ha = to_qla_host(shost); 519 + int rval; 520 + uint16_t mb_stat[1]; 521 + link_stat_t stat_buf; 522 + struct fc_host_statistics *pfc_host_stat; 523 + 524 + pfc_host_stat = &ha->fc_host_stat; 525 + memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 526 + 527 + if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 528 + rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, 529 + sizeof(stat_buf) / 4, mb_stat); 530 + } else { 531 + rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf, 532 + mb_stat); 533 + } 534 + if (rval != 0) { 535 + qla_printk(KERN_WARNING, ha, 536 + "Unable to retrieve host statistics (%d).\n", mb_stat[0]); 537 + return pfc_host_stat; 538 + } 539 + 540 + pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt; 541 + pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt; 542 + pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt; 543 + pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt; 544 + pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt; 545 + pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt; 546 + 547 + return pfc_host_stat; 548 + } 549 + 741 550 struct fc_function_template qla2xxx_transport_functions = { 742 551 743 552 .show_host_node_name = 1, ··· 781 520 782 521 .get_host_port_id = qla2x00_get_host_port_id, 783 522 .show_host_port_id = 1, 523 + .get_host_speed = qla2x00_get_host_speed, 524 + .show_host_speed = 1, 525 + .get_host_port_type = qla2x00_get_host_port_type, 526 + .show_host_port_type = 1, 784 527 785 528 .dd_fcrport_size = sizeof(struct fc_port *), 786 529 .show_rport_supported_classes = 1, ··· 801 536 .show_rport_dev_loss_tmo = 1, 802 537 803 538 .issue_fc_host_lip = qla2x00_issue_lip, 539 + .get_fc_host_stats = qla2x00_get_fc_host_stats, 804 540 }; 805 541 806 542 void
+42 -2
drivers/scsi/qla2xxx/qla_def.h
··· 29 29 #include <scsi/scsi_host.h> 30 30 #include <scsi/scsi_device.h> 31 31 #include <scsi/scsi_cmnd.h> 32 + #include <scsi/scsi_transport_fc.h> 32 33 33 34 #if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) 34 35 #if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE) ··· 180 179 #define WRT_REG_BYTE(addr, data) writeb(data,addr) 181 180 #define WRT_REG_WORD(addr, data) writew(data,addr) 182 181 #define WRT_REG_DWORD(addr, data) writel(data,addr) 182 + 183 + /* 184 + * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an 185 + * 133Mhz slot. 186 + */ 187 + #define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr)) 188 + #define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr)) 183 189 184 190 /* 185 191 * Fibre Channel device definitions. ··· 440 432 #define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 441 433 #define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 442 434 #define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 435 + #define GPIO_LED_ALL_OFF 0x0000 436 + #define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */ 437 + #define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */ 443 438 444 439 union { 445 440 struct { ··· 2210 2199 2211 2200 void (*fw_dump) (struct scsi_qla_host *, int); 2212 2201 void (*ascii_fw_dump) (struct scsi_qla_host *); 2202 + 2203 + int (*beacon_on) (struct scsi_qla_host *); 2204 + int (*beacon_off) (struct scsi_qla_host *); 2205 + void (*beacon_blink) (struct scsi_qla_host *); 2206 + 2207 + uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *, 2208 + uint32_t, uint32_t); 2209 + int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t, 2210 + uint32_t); 2213 2211 }; 2214 2212 2215 2213 /* ··· 2351 2331 uint16_t min_external_loopid; /* First external loop Id */ 2352 2332 2353 2333 uint16_t link_data_rate; /* F/W operating speed */ 2334 + #define LDR_1GB 0 2335 + #define LDR_2GB 1 2336 + #define LDR_4GB 3 2337 + #define LDR_UNKNOWN 0xFFFF 2354 2338 2355 2339 uint8_t current_topology; 2356 2340 uint8_t prev_topology; ··· 2510 2486 uint8_t *port_name; 2511 2487 uint32_t isp_abort_cnt; 2512 2488 2489 + /* Option ROM information. */ 2490 + char *optrom_buffer; 2491 + uint32_t optrom_size; 2492 + int optrom_state; 2493 + #define QLA_SWAITING 0 2494 + #define QLA_SREADING 1 2495 + #define QLA_SWRITING 2 2496 + 2513 2497 /* Needed for BEACON */ 2514 2498 uint16_t beacon_blink_led; 2515 - uint16_t beacon_green_on; 2499 + uint8_t beacon_color_state; 2500 + #define QLA_LED_GRN_ON 0x01 2501 + #define QLA_LED_YLW_ON 0x02 2502 + #define QLA_LED_ABR_ON 0x04 2503 + #define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */ 2504 + /* ISP2322: red, green, amber. */ 2516 2505 2517 2506 uint16_t zio_mode; 2518 2507 uint16_t zio_timer; 2508 + struct fc_host_statistics fc_host_stat; 2519 2509 } scsi_qla_host_t; 2520 2510 2521 2511 ··· 2595 2557 /* 2596 2558 * Flash support definitions 2597 2559 */ 2598 - #define FLASH_IMAGE_SIZE 131072 2560 + #define OPTROM_SIZE_2300 0x20000 2561 + #define OPTROM_SIZE_2322 0x100000 2562 + #define OPTROM_SIZE_24XX 0x100000 2599 2563 2600 2564 #include "qla_gbl.h" 2601 2565 #include "qla_dbg.h"
+25 -2
drivers/scsi/qla2xxx/qla_gbl.h
··· 75 75 extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 76 76 extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); 77 77 78 - extern void qla2x00_blink_led(scsi_qla_host_t *); 79 - 80 78 extern int qla2x00_down_timeout(struct semaphore *, unsigned long); 81 79 82 80 extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); 81 + 82 + extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 83 83 84 84 /* 85 85 * Global Function Prototypes in qla_iocb.c source file. ··· 185 185 extern int 186 186 qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 187 187 188 + extern int 189 + qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *, 190 + uint16_t *); 191 + 192 + extern int 193 + qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *); 194 + 188 195 extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 189 196 extern int qla24xx_abort_target(fc_port_t *); 190 197 ··· 234 227 uint32_t); 235 228 extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 236 229 uint32_t); 230 + 231 + extern int qla2x00_beacon_on(struct scsi_qla_host *); 232 + extern int qla2x00_beacon_off(struct scsi_qla_host *); 233 + extern void qla2x00_beacon_blink(struct scsi_qla_host *); 234 + extern int qla24xx_beacon_on(struct scsi_qla_host *); 235 + extern int qla24xx_beacon_off(struct scsi_qla_host *); 236 + extern void qla24xx_beacon_blink(struct scsi_qla_host *); 237 + 238 + extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 239 + uint32_t, uint32_t); 240 + extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *, 241 + uint32_t, uint32_t); 242 + extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, 243 + uint32_t, uint32_t); 244 + extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, 245 + uint32_t, uint32_t); 237 246 238 247 /* 239 248 * Global Function Prototypes in qla_dbg.c source file.
-1
drivers/scsi/qla2xxx/qla_init.c
··· 8 8 9 9 #include <linux/delay.h> 10 10 #include <linux/vmalloc.h> 11 - #include <scsi/scsi_transport_fc.h> 12 11 13 12 #include "qla_devtbl.h" 14 13
+1
drivers/scsi/qla2xxx/qla_iocb.c
··· 814 814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 815 815 816 816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 817 + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 817 818 818 819 /* Load SCSI command packet. */ 819 820 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+2 -2
drivers/scsi/qla2xxx/qla_isr.c
··· 402 402 break; 403 403 404 404 case MBA_LOOP_UP: /* Loop Up Event */ 405 - ha->link_data_rate = 0; 406 405 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 407 406 link_speed = link_speeds[0]; 407 + ha->link_data_rate = LDR_1GB; 408 408 } else { 409 409 link_speed = link_speeds[LS_UNKNOWN]; 410 410 if (mb[1] < 5) ··· 436 436 } 437 437 438 438 ha->flags.management_server_logged_in = 0; 439 - ha->link_data_rate = 0; 439 + ha->link_data_rate = LDR_UNKNOWN; 440 440 if (ql2xfdmienable) 441 441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 442 442
+104 -4
drivers/scsi/qla2xxx/qla_mbx.c
··· 7 7 #include "qla_def.h" 8 8 9 9 #include <linux/delay.h> 10 - #include <scsi/scsi_transport_fc.h> 11 10 12 11 static void 13 12 qla2x00_mbx_sem_timeout(unsigned long data) ··· 1873 1874 mcp->mb[3] = LSW(id_list_dma); 1874 1875 mcp->mb[6] = MSW(MSD(id_list_dma)); 1875 1876 mcp->mb[7] = LSW(MSD(id_list_dma)); 1876 - mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2; 1877 + mcp->mb[8] = 0; 1878 + mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 1877 1879 } else { 1878 1880 mcp->mb[1] = MSW(id_list_dma); 1879 1881 mcp->mb[2] = LSW(id_list_dma); ··· 2017 2017 2018 2018 return rval; 2019 2019 } 2020 + #endif 2020 2021 2021 - uint8_t 2022 + /* 2023 + * qla2x00_get_link_status 2024 + * 2025 + * Input: 2026 + * ha = adapter block pointer. 2027 + * loop_id = device loop ID. 2028 + * ret_buf = pointer to link status return buffer. 2029 + * 2030 + * Returns: 2031 + * 0 = success. 2032 + * BIT_0 = mem alloc error. 2033 + * BIT_1 = mailbox error. 2034 + */ 2035 + int 2036 + qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, 2037 + link_stat_t *ret_buf, uint16_t *status) 2038 + { 2039 + int rval; 2040 + mbx_cmd_t mc; 2041 + mbx_cmd_t *mcp = &mc; 2042 + link_stat_t *stat_buf; 2043 + dma_addr_t stat_buf_dma; 2044 + 2045 + DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2046 + 2047 + stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma); 2048 + if (stat_buf == NULL) { 2049 + DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 2050 + __func__, ha->host_no)); 2051 + return BIT_0; 2052 + } 2053 + memset(stat_buf, 0, sizeof(link_stat_t)); 2054 + 2055 + mcp->mb[0] = MBC_GET_LINK_STATUS; 2056 + mcp->mb[2] = MSW(stat_buf_dma); 2057 + mcp->mb[3] = LSW(stat_buf_dma); 2058 + mcp->mb[6] = MSW(MSD(stat_buf_dma)); 2059 + mcp->mb[7] = LSW(MSD(stat_buf_dma)); 2060 + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2061 + mcp->in_mb = MBX_0; 2062 + if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 2063 + mcp->mb[1] = loop_id; 2064 + mcp->mb[4] = 0; 2065 + mcp->mb[10] = 0; 2066 + mcp->out_mb |= MBX_10|MBX_4|MBX_1; 2067 + mcp->in_mb |= MBX_1; 2068 + } else if (HAS_EXTENDED_IDS(ha)) { 2069 + mcp->mb[1] = loop_id; 2070 + mcp->mb[10] = 0; 2071 + mcp->out_mb |= MBX_10|MBX_1; 2072 + } else { 2073 + mcp->mb[1] = loop_id << 8; 2074 + mcp->out_mb |= MBX_1; 2075 + } 2076 + mcp->tov = 30; 2077 + mcp->flags = IOCTL_CMD; 2078 + rval = qla2x00_mailbox_command(ha, mcp); 2079 + 2080 + if (rval == QLA_SUCCESS) { 2081 + if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2082 + DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2083 + __func__, ha->host_no, mcp->mb[0]);) 2084 + status[0] = mcp->mb[0]; 2085 + rval = BIT_1; 2086 + } else { 2087 + /* copy over data -- firmware data is LE. */ 2088 + ret_buf->link_fail_cnt = 2089 + le32_to_cpu(stat_buf->link_fail_cnt); 2090 + ret_buf->loss_sync_cnt = 2091 + le32_to_cpu(stat_buf->loss_sync_cnt); 2092 + ret_buf->loss_sig_cnt = 2093 + le32_to_cpu(stat_buf->loss_sig_cnt); 2094 + ret_buf->prim_seq_err_cnt = 2095 + le32_to_cpu(stat_buf->prim_seq_err_cnt); 2096 + ret_buf->inval_xmit_word_cnt = 2097 + le32_to_cpu(stat_buf->inval_xmit_word_cnt); 2098 + ret_buf->inval_crc_cnt = 2099 + le32_to_cpu(stat_buf->inval_crc_cnt); 2100 + 2101 + DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d " 2102 + "loss_sync=%d loss_sig=%d seq_err=%d " 2103 + "inval_xmt_word=%d inval_crc=%d.\n", __func__, 2104 + ha->host_no, stat_buf->link_fail_cnt, 2105 + stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt, 2106 + stat_buf->prim_seq_err_cnt, 2107 + stat_buf->inval_xmit_word_cnt, 2108 + stat_buf->inval_crc_cnt);) 2109 + } 2110 + } else { 2111 + /* Failed. */ 2112 + DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2113 + ha->host_no, rval);) 2114 + rval = BIT_1; 2115 + } 2116 + 2117 + dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma); 2118 + 2119 + return rval; 2120 + } 2121 + 2122 + int 2022 2123 qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, 2023 2124 uint16_t *status) 2024 2125 { ··· 2181 2080 2182 2081 return rval; 2183 2082 } 2184 - #endif 2185 2083 2186 2084 int 2187 2085 qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
+42 -1
drivers/scsi/qla2xxx/qla_os.c
··· 366 366 goto qc_fail_command; 367 367 } 368 368 369 + /* Close window on fcport/rport state-transitioning. */ 370 + if (!*(fc_port_t **)rport->dd_data) { 371 + cmd->result = DID_IMM_RETRY << 16; 372 + goto qc_fail_command; 373 + } 374 + 369 375 if (atomic_read(&fcport->state) != FCS_ONLINE) { 370 376 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 371 377 atomic_read(&ha->loop_state) == LOOP_DEAD) { ··· 424 418 rval = fc_remote_port_chkready(rport); 425 419 if (rval) { 426 420 cmd->result = rval; 421 + goto qc24_fail_command; 422 + } 423 + 424 + /* Close window on fcport/rport state-transitioning. */ 425 + if (!*(fc_port_t **)rport->dd_data) { 426 + cmd->result = DID_IMM_RETRY << 16; 427 427 goto qc24_fail_command; 428 428 } 429 429 ··· 525 513 * Success (Adapter is online) : 0 526 514 * Failed (Adapter is offline/disabled) : 1 527 515 */ 528 - static int 516 + int 529 517 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 530 518 { 531 519 int return_status; ··· 1324 1312 ha->ports = MAX_BUSES; 1325 1313 ha->init_cb_size = sizeof(init_cb_t); 1326 1314 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1315 + ha->link_data_rate = LDR_UNKNOWN; 1316 + ha->optrom_size = OPTROM_SIZE_2300; 1327 1317 1328 1318 /* Assign ISP specific operations. */ 1329 1319 ha->isp_ops.pci_config = qla2100_pci_config; ··· 1353 1339 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1354 1340 ha->isp_ops.fw_dump = qla2100_fw_dump; 1355 1341 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; 1342 + ha->isp_ops.read_optrom = qla2x00_read_optrom_data; 1343 + ha->isp_ops.write_optrom = qla2x00_write_optrom_data; 1356 1344 if (IS_QLA2100(ha)) { 1357 1345 host->max_id = MAX_TARGETS_2100; 1358 1346 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; ··· 1380 1364 ha->isp_ops.intr_handler = qla2300_intr_handler; 1381 1365 ha->isp_ops.fw_dump = qla2300_fw_dump; 1382 1366 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; 1367 + ha->isp_ops.beacon_on = qla2x00_beacon_on; 1368 + ha->isp_ops.beacon_off = qla2x00_beacon_off; 1369 + ha->isp_ops.beacon_blink = qla2x00_beacon_blink; 1383 1370 ha->gid_list_info_size = 6; 1371 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1372 + ha->optrom_size = OPTROM_SIZE_2322; 1384 1373 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1385 1374 host->max_id = MAX_TARGETS_2200; 1386 1375 ha->mbx_count = MAILBOX_REGISTER_COUNT; ··· 1421 1400 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1422 1401 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1423 1402 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; 1403 + ha->isp_ops.read_optrom = qla24xx_read_optrom_data; 1404 + ha->isp_ops.write_optrom = qla24xx_write_optrom_data; 1405 + ha->isp_ops.beacon_on = qla24xx_beacon_on; 1406 + ha->isp_ops.beacon_off = qla24xx_beacon_off; 1407 + ha->isp_ops.beacon_blink = qla24xx_beacon_blink; 1424 1408 ha->gid_list_info_size = 8; 1409 + ha->optrom_size = OPTROM_SIZE_24XX; 1425 1410 } 1426 1411 host->can_queue = ha->request_q_length + 128; 1427 1412 ··· 1684 1657 spin_lock_irqsave(&fcport->rport_lock, flags); 1685 1658 fcport->drport = rport; 1686 1659 fcport->rport = NULL; 1660 + *(fc_port_t **)rport->dd_data = NULL; 1687 1661 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1688 1662 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1689 1663 } else { 1690 1664 spin_lock_irqsave(&fcport->rport_lock, flags); 1691 1665 fcport->rport = NULL; 1666 + *(fc_port_t **)rport->dd_data = NULL; 1692 1667 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1693 1668 fc_remote_port_delete(rport); 1694 1669 } ··· 2095 2066 ha->fw_dumped = 0; 2096 2067 ha->fw_dump_reading = 0; 2097 2068 ha->fw_dump_buffer = NULL; 2069 + 2070 + vfree(ha->optrom_buffer); 2098 2071 } 2099 2072 2100 2073 /* ··· 2345 2314 if (!ha->interrupts_on) 2346 2315 ha->isp_ops.enable_intrs(ha); 2347 2316 2317 + if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2318 + ha->isp_ops.beacon_blink(ha); 2319 + 2348 2320 ha->dpc_active = 0; 2349 2321 } /* End of while(1) */ 2350 2322 ··· 2525 2491 atomic_read(&ha->loop_down_timer))); 2526 2492 } 2527 2493 2494 + /* Check if beacon LED needs to be blinked */ 2495 + if (ha->beacon_blink_led == 1) { 2496 + set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2497 + start_dpc++; 2498 + } 2499 + 2528 2500 /* Schedule the DPC routine if needed */ 2529 2501 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2530 2502 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || ··· 2539 2499 start_dpc || 2540 2500 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2541 2501 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2502 + test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2542 2503 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2543 2504 ha->dpc_wait && !ha->dpc_active) { 2544 2505
-2
drivers/scsi/qla2xxx/qla_rscn.c
··· 6 6 */ 7 7 #include "qla_def.h" 8 8 9 - #include <scsi/scsi_transport_fc.h> 10 - 11 9 /** 12 10 * IO descriptor handle definitions. 13 11 *
+963
drivers/scsi/qla2xxx/qla_sup.c
··· 695 695 696 696 return ret; 697 697 } 698 + 699 + 700 + static inline void 701 + qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 702 + { 703 + if (IS_QLA2322(ha)) { 704 + /* Flip all colors. */ 705 + if (ha->beacon_color_state == QLA_LED_ALL_ON) { 706 + /* Turn off. */ 707 + ha->beacon_color_state = 0; 708 + *pflags = GPIO_LED_ALL_OFF; 709 + } else { 710 + /* Turn on. */ 711 + ha->beacon_color_state = QLA_LED_ALL_ON; 712 + *pflags = GPIO_LED_RGA_ON; 713 + } 714 + } else { 715 + /* Flip green led only. */ 716 + if (ha->beacon_color_state == QLA_LED_GRN_ON) { 717 + /* Turn off. */ 718 + ha->beacon_color_state = 0; 719 + *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF; 720 + } else { 721 + /* Turn on. */ 722 + ha->beacon_color_state = QLA_LED_GRN_ON; 723 + *pflags = GPIO_LED_GREEN_ON_AMBER_OFF; 724 + } 725 + } 726 + } 727 + 728 + void 729 + qla2x00_beacon_blink(struct scsi_qla_host *ha) 730 + { 731 + uint16_t gpio_enable; 732 + uint16_t gpio_data; 733 + uint16_t led_color = 0; 734 + unsigned long flags; 735 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 736 + 737 + if (ha->pio_address) 738 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 739 + 740 + spin_lock_irqsave(&ha->hardware_lock, flags); 741 + 742 + /* Save the Original GPIOE. */ 743 + if (ha->pio_address) { 744 + gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 745 + gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 746 + } else { 747 + gpio_enable = RD_REG_WORD(&reg->gpioe); 748 + gpio_data = RD_REG_WORD(&reg->gpiod); 749 + } 750 + 751 + /* Set the modified gpio_enable values */ 752 + gpio_enable |= GPIO_LED_MASK; 753 + 754 + if (ha->pio_address) { 755 + WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 756 + } else { 757 + WRT_REG_WORD(&reg->gpioe, gpio_enable); 758 + RD_REG_WORD(&reg->gpioe); 759 + } 760 + 761 + qla2x00_flip_colors(ha, &led_color); 762 + 763 + /* Clear out any previously set LED color. */ 764 + gpio_data &= ~GPIO_LED_MASK; 765 + 766 + /* Set the new input LED color to GPIOD. */ 767 + gpio_data |= led_color; 768 + 769 + /* Set the modified gpio_data values */ 770 + if (ha->pio_address) { 771 + WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 772 + } else { 773 + WRT_REG_WORD(&reg->gpiod, gpio_data); 774 + RD_REG_WORD(&reg->gpiod); 775 + } 776 + 777 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 778 + } 779 + 780 + int 781 + qla2x00_beacon_on(struct scsi_qla_host *ha) 782 + { 783 + uint16_t gpio_enable; 784 + uint16_t gpio_data; 785 + unsigned long flags; 786 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 787 + 788 + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 789 + ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 790 + 791 + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 792 + qla_printk(KERN_WARNING, ha, 793 + "Unable to update fw options (beacon on).\n"); 794 + return QLA_FUNCTION_FAILED; 795 + } 796 + 797 + if (ha->pio_address) 798 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 799 + 800 + /* Turn off LEDs. */ 801 + spin_lock_irqsave(&ha->hardware_lock, flags); 802 + if (ha->pio_address) { 803 + gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 804 + gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 805 + } else { 806 + gpio_enable = RD_REG_WORD(&reg->gpioe); 807 + gpio_data = RD_REG_WORD(&reg->gpiod); 808 + } 809 + gpio_enable |= GPIO_LED_MASK; 810 + 811 + /* Set the modified gpio_enable values. */ 812 + if (ha->pio_address) { 813 + WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 814 + } else { 815 + WRT_REG_WORD(&reg->gpioe, gpio_enable); 816 + RD_REG_WORD(&reg->gpioe); 817 + } 818 + 819 + /* Clear out previously set LED colour. */ 820 + gpio_data &= ~GPIO_LED_MASK; 821 + if (ha->pio_address) { 822 + WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 823 + } else { 824 + WRT_REG_WORD(&reg->gpiod, gpio_data); 825 + RD_REG_WORD(&reg->gpiod); 826 + } 827 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 828 + 829 + /* 830 + * Let the per HBA timer kick off the blinking process based on 831 + * the following flags. No need to do anything else now. 832 + */ 833 + ha->beacon_blink_led = 1; 834 + ha->beacon_color_state = 0; 835 + 836 + return QLA_SUCCESS; 837 + } 838 + 839 + int 840 + qla2x00_beacon_off(struct scsi_qla_host *ha) 841 + { 842 + int rval = QLA_SUCCESS; 843 + 844 + ha->beacon_blink_led = 0; 845 + 846 + /* Set the on flag so when it gets flipped it will be off. */ 847 + if (IS_QLA2322(ha)) 848 + ha->beacon_color_state = QLA_LED_ALL_ON; 849 + else 850 + ha->beacon_color_state = QLA_LED_GRN_ON; 851 + 852 + ha->isp_ops.beacon_blink(ha); /* This turns green LED off */ 853 + 854 + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 855 + ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; 856 + 857 + rval = qla2x00_set_fw_options(ha, ha->fw_options); 858 + if (rval != QLA_SUCCESS) 859 + qla_printk(KERN_WARNING, ha, 860 + "Unable to update fw options (beacon off).\n"); 861 + return rval; 862 + } 863 + 864 + 865 + static inline void 866 + qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 867 + { 868 + /* Flip all colors. */ 869 + if (ha->beacon_color_state == QLA_LED_ALL_ON) { 870 + /* Turn off. */ 871 + ha->beacon_color_state = 0; 872 + *pflags = 0; 873 + } else { 874 + /* Turn on. */ 875 + ha->beacon_color_state = QLA_LED_ALL_ON; 876 + *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON; 877 + } 878 + } 879 + 880 + void 881 + qla24xx_beacon_blink(struct scsi_qla_host *ha) 882 + { 883 + uint16_t led_color = 0; 884 + uint32_t gpio_data; 885 + unsigned long flags; 886 + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 887 + 888 + /* Save the Original GPIOD. */ 889 + spin_lock_irqsave(&ha->hardware_lock, flags); 890 + gpio_data = RD_REG_DWORD(&reg->gpiod); 891 + 892 + /* Enable the gpio_data reg for update. */ 893 + gpio_data |= GPDX_LED_UPDATE_MASK; 894 + 895 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 896 + gpio_data = RD_REG_DWORD(&reg->gpiod); 897 + 898 + /* Set the color bits. */ 899 + qla24xx_flip_colors(ha, &led_color); 900 + 901 + /* Clear out any previously set LED color. */ 902 + gpio_data &= ~GPDX_LED_COLOR_MASK; 903 + 904 + /* Set the new input LED color to GPIOD. */ 905 + gpio_data |= led_color; 906 + 907 + /* Set the modified gpio_data values. */ 908 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 909 + gpio_data = RD_REG_DWORD(&reg->gpiod); 910 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 911 + } 912 + 913 + int 914 + qla24xx_beacon_on(struct scsi_qla_host *ha) 915 + { 916 + uint32_t gpio_data; 917 + unsigned long flags; 918 + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 919 + 920 + if (ha->beacon_blink_led == 0) { 921 + /* Enable firmware for update */ 922 + ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; 923 + 924 + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) 925 + return QLA_FUNCTION_FAILED; 926 + 927 + if (qla2x00_get_fw_options(ha, ha->fw_options) != 928 + QLA_SUCCESS) { 929 + qla_printk(KERN_WARNING, ha, 930 + "Unable to update fw options (beacon on).\n"); 931 + return QLA_FUNCTION_FAILED; 932 + } 933 + 934 + spin_lock_irqsave(&ha->hardware_lock, flags); 935 + gpio_data = RD_REG_DWORD(&reg->gpiod); 936 + 937 + /* Enable the gpio_data reg for update. */ 938 + gpio_data |= GPDX_LED_UPDATE_MASK; 939 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 940 + RD_REG_DWORD(&reg->gpiod); 941 + 942 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 943 + } 944 + 945 + /* So all colors blink together. */ 946 + ha->beacon_color_state = 0; 947 + 948 + /* Let the per HBA timer kick off the blinking process. */ 949 + ha->beacon_blink_led = 1; 950 + 951 + return QLA_SUCCESS; 952 + } 953 + 954 + int 955 + qla24xx_beacon_off(struct scsi_qla_host *ha) 956 + { 957 + uint32_t gpio_data; 958 + unsigned long flags; 959 + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 960 + 961 + ha->beacon_blink_led = 0; 962 + ha->beacon_color_state = QLA_LED_ALL_ON; 963 + 964 + ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */ 965 + 966 + /* Give control back to firmware. */ 967 + spin_lock_irqsave(&ha->hardware_lock, flags); 968 + gpio_data = RD_REG_DWORD(&reg->gpiod); 969 + 970 + /* Disable the gpio_data reg for update. */ 971 + gpio_data &= ~GPDX_LED_UPDATE_MASK; 972 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 973 + RD_REG_DWORD(&reg->gpiod); 974 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 975 + 976 + ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 977 + 978 + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 979 + qla_printk(KERN_WARNING, ha, 980 + "Unable to update fw options (beacon off).\n"); 981 + return QLA_FUNCTION_FAILED; 982 + } 983 + 984 + if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 985 + qla_printk(KERN_WARNING, ha, 986 + "Unable to get fw options (beacon off).\n"); 987 + return QLA_FUNCTION_FAILED; 988 + } 989 + 990 + return QLA_SUCCESS; 991 + } 992 + 993 + 994 + /* 995 + * Flash support routines 996 + */ 997 + 998 + /** 999 + * qla2x00_flash_enable() - Setup flash for reading and writing. 1000 + * @ha: HA context 1001 + */ 1002 + static void 1003 + qla2x00_flash_enable(scsi_qla_host_t *ha) 1004 + { 1005 + uint16_t data; 1006 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1007 + 1008 + data = RD_REG_WORD(&reg->ctrl_status); 1009 + data |= CSR_FLASH_ENABLE; 1010 + WRT_REG_WORD(&reg->ctrl_status, data); 1011 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1012 + } 1013 + 1014 + /** 1015 + * qla2x00_flash_disable() - Disable flash and allow RISC to run. 1016 + * @ha: HA context 1017 + */ 1018 + static void 1019 + qla2x00_flash_disable(scsi_qla_host_t *ha) 1020 + { 1021 + uint16_t data; 1022 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1023 + 1024 + data = RD_REG_WORD(&reg->ctrl_status); 1025 + data &= ~(CSR_FLASH_ENABLE); 1026 + WRT_REG_WORD(&reg->ctrl_status, data); 1027 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1028 + } 1029 + 1030 + /** 1031 + * qla2x00_read_flash_byte() - Reads a byte from flash 1032 + * @ha: HA context 1033 + * @addr: Address in flash to read 1034 + * 1035 + * A word is read from the chip, but, only the lower byte is valid. 1036 + * 1037 + * Returns the byte read from flash @addr. 1038 + */ 1039 + static uint8_t 1040 + qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr) 1041 + { 1042 + uint16_t data; 1043 + uint16_t bank_select; 1044 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1045 + 1046 + bank_select = RD_REG_WORD(&reg->ctrl_status); 1047 + 1048 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1049 + /* Specify 64K address range: */ 1050 + /* clear out Module Select and Flash Address bits [19:16]. */ 1051 + bank_select &= ~0xf8; 1052 + bank_select |= addr >> 12 & 0xf0; 1053 + bank_select |= CSR_FLASH_64K_BANK; 1054 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1055 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1056 + 1057 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1058 + data = RD_REG_WORD(&reg->flash_data); 1059 + 1060 + return (uint8_t)data; 1061 + } 1062 + 1063 + /* Setup bit 16 of flash address. */ 1064 + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { 1065 + bank_select |= CSR_FLASH_64K_BANK; 1066 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1067 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1068 + } else if (((addr & BIT_16) == 0) && 1069 + (bank_select & CSR_FLASH_64K_BANK)) { 1070 + bank_select &= ~(CSR_FLASH_64K_BANK); 1071 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1072 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1073 + } 1074 + 1075 + /* Always perform IO mapped accesses to the FLASH registers. */ 1076 + if (ha->pio_address) { 1077 + uint16_t data2; 1078 + 1079 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1080 + WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1081 + do { 1082 + data = RD_REG_WORD_PIO(&reg->flash_data); 1083 + barrier(); 1084 + cpu_relax(); 1085 + data2 = RD_REG_WORD_PIO(&reg->flash_data); 1086 + } while (data != data2); 1087 + } else { 1088 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1089 + data = qla2x00_debounce_register(&reg->flash_data); 1090 + } 1091 + 1092 + return (uint8_t)data; 1093 + } 1094 + 1095 + /** 1096 + * qla2x00_write_flash_byte() - Write a byte to flash 1097 + * @ha: HA context 1098 + * @addr: Address in flash to write 1099 + * @data: Data to write 1100 + */ 1101 + static void 1102 + qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data) 1103 + { 1104 + uint16_t bank_select; 1105 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1106 + 1107 + bank_select = RD_REG_WORD(&reg->ctrl_status); 1108 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1109 + /* Specify 64K address range: */ 1110 + /* clear out Module Select and Flash Address bits [19:16]. */ 1111 + bank_select &= ~0xf8; 1112 + bank_select |= addr >> 12 & 0xf0; 1113 + bank_select |= CSR_FLASH_64K_BANK; 1114 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1115 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1116 + 1117 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1118 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1119 + WRT_REG_WORD(&reg->flash_data, (uint16_t)data); 1120 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1121 + 1122 + return; 1123 + } 1124 + 1125 + /* Setup bit 16 of flash address. */ 1126 + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { 1127 + bank_select |= CSR_FLASH_64K_BANK; 1128 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1129 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1130 + } else if (((addr & BIT_16) == 0) && 1131 + (bank_select & CSR_FLASH_64K_BANK)) { 1132 + bank_select &= ~(CSR_FLASH_64K_BANK); 1133 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1134 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1135 + } 1136 + 1137 + /* Always perform IO mapped accesses to the FLASH registers. */ 1138 + if (ha->pio_address) { 1139 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1140 + WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1141 + WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data); 1142 + } else { 1143 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1144 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1145 + WRT_REG_WORD(&reg->flash_data, (uint16_t)data); 1146 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1147 + } 1148 + } 1149 + 1150 + /** 1151 + * qla2x00_poll_flash() - Polls flash for completion. 1152 + * @ha: HA context 1153 + * @addr: Address in flash to poll 1154 + * @poll_data: Data to be polled 1155 + * @man_id: Flash manufacturer ID 1156 + * @flash_id: Flash ID 1157 + * 1158 + * This function polls the device until bit 7 of what is read matches data 1159 + * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed 1160 + * out (a fatal error). The flash book recommeds reading bit 7 again after 1161 + * reading bit 5 as a 1. 1162 + * 1163 + * Returns 0 on success, else non-zero. 1164 + */ 1165 + static int 1166 + qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data, 1167 + uint8_t man_id, uint8_t flash_id) 1168 + { 1169 + int status; 1170 + uint8_t flash_data; 1171 + uint32_t cnt; 1172 + 1173 + status = 1; 1174 + 1175 + /* Wait for 30 seconds for command to finish. */ 1176 + poll_data &= BIT_7; 1177 + for (cnt = 3000000; cnt; cnt--) { 1178 + flash_data = qla2x00_read_flash_byte(ha, addr); 1179 + if ((flash_data & BIT_7) == poll_data) { 1180 + status = 0; 1181 + break; 1182 + } 1183 + 1184 + if (man_id != 0x40 && man_id != 0xda) { 1185 + if ((flash_data & BIT_5) && cnt > 2) 1186 + cnt = 2; 1187 + } 1188 + udelay(10); 1189 + barrier(); 1190 + } 1191 + return status; 1192 + } 1193 + 1194 + #define IS_OEM_001(ha) \ 1195 + ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2322 && \ 1196 + (ha)->pdev->subsystem_vendor == 0x1028 && \ 1197 + (ha)->pdev->subsystem_device == 0x0170) 1198 + 1199 + /** 1200 + * qla2x00_program_flash_address() - Programs a flash address 1201 + * @ha: HA context 1202 + * @addr: Address in flash to program 1203 + * @data: Data to be written in flash 1204 + * @man_id: Flash manufacturer ID 1205 + * @flash_id: Flash ID 1206 + * 1207 + * Returns 0 on success, else non-zero. 1208 + */ 1209 + static int 1210 + qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data, 1211 + uint8_t man_id, uint8_t flash_id) 1212 + { 1213 + /* Write Program Command Sequence. */ 1214 + if (IS_OEM_001(ha)) { 1215 + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); 1216 + qla2x00_write_flash_byte(ha, 0x555, 0x55); 1217 + qla2x00_write_flash_byte(ha, 0xaaa, 0xa0); 1218 + qla2x00_write_flash_byte(ha, addr, data); 1219 + } else { 1220 + if (man_id == 0xda && flash_id == 0xc1) { 1221 + qla2x00_write_flash_byte(ha, addr, data); 1222 + if (addr & 0x7e) 1223 + return 0; 1224 + } else { 1225 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1226 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1227 + qla2x00_write_flash_byte(ha, 0x5555, 0xa0); 1228 + qla2x00_write_flash_byte(ha, addr, data); 1229 + } 1230 + } 1231 + 1232 + udelay(150); 1233 + 1234 + /* Wait for write to complete. */ 1235 + return qla2x00_poll_flash(ha, addr, data, man_id, flash_id); 1236 + } 1237 + 1238 + /** 1239 + * qla2x00_erase_flash() - Erase the flash. 1240 + * @ha: HA context 1241 + * @man_id: Flash manufacturer ID 1242 + * @flash_id: Flash ID 1243 + * 1244 + * Returns 0 on success, else non-zero. 1245 + */ 1246 + static int 1247 + qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id) 1248 + { 1249 + /* Individual Sector Erase Command Sequence */ 1250 + if (IS_OEM_001(ha)) { 1251 + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); 1252 + qla2x00_write_flash_byte(ha, 0x555, 0x55); 1253 + qla2x00_write_flash_byte(ha, 0xaaa, 0x80); 1254 + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); 1255 + qla2x00_write_flash_byte(ha, 0x555, 0x55); 1256 + qla2x00_write_flash_byte(ha, 0xaaa, 0x10); 1257 + } else { 1258 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1259 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1260 + qla2x00_write_flash_byte(ha, 0x5555, 0x80); 1261 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1262 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1263 + qla2x00_write_flash_byte(ha, 0x5555, 0x10); 1264 + } 1265 + 1266 + udelay(150); 1267 + 1268 + /* Wait for erase to complete. */ 1269 + return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id); 1270 + } 1271 + 1272 + /** 1273 + * qla2x00_erase_flash_sector() - Erase a flash sector. 1274 + * @ha: HA context 1275 + * @addr: Flash sector to erase 1276 + * @sec_mask: Sector address mask 1277 + * @man_id: Flash manufacturer ID 1278 + * @flash_id: Flash ID 1279 + * 1280 + * Returns 0 on success, else non-zero. 1281 + */ 1282 + static int 1283 + qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr, 1284 + uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) 1285 + { 1286 + /* Individual Sector Erase Command Sequence */ 1287 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1288 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1289 + qla2x00_write_flash_byte(ha, 0x5555, 0x80); 1290 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1291 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1292 + if (man_id == 0x1f && flash_id == 0x13) 1293 + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10); 1294 + else 1295 + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30); 1296 + 1297 + udelay(150); 1298 + 1299 + /* Wait for erase to complete. */ 1300 + return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id); 1301 + } 1302 + 1303 + /** 1304 + * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip. 1305 + * @man_id: Flash manufacturer ID 1306 + * @flash_id: Flash ID 1307 + */ 1308 + static void 1309 + qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 1310 + uint8_t *flash_id) 1311 + { 1312 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1313 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1314 + qla2x00_write_flash_byte(ha, 0x5555, 0x90); 1315 + *man_id = qla2x00_read_flash_byte(ha, 0x0000); 1316 + *flash_id = qla2x00_read_flash_byte(ha, 0x0001); 1317 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1318 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1319 + qla2x00_write_flash_byte(ha, 0x5555, 0xf0); 1320 + } 1321 + 1322 + 1323 + static inline void 1324 + qla2x00_suspend_hba(struct scsi_qla_host *ha) 1325 + { 1326 + int cnt; 1327 + unsigned long flags; 1328 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1329 + 1330 + /* Suspend HBA. */ 1331 + scsi_block_requests(ha->host); 1332 + ha->isp_ops.disable_intrs(ha); 1333 + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1334 + 1335 + /* Pause RISC. */ 1336 + spin_lock_irqsave(&ha->hardware_lock, flags); 1337 + WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 1338 + RD_REG_WORD(&reg->hccr); 1339 + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 1340 + for (cnt = 0; cnt < 30000; cnt++) { 1341 + if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0) 1342 + break; 1343 + udelay(100); 1344 + } 1345 + } else { 1346 + udelay(10); 1347 + } 1348 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1349 + } 1350 + 1351 + static inline void 1352 + qla2x00_resume_hba(struct scsi_qla_host *ha) 1353 + { 1354 + /* Resume HBA. */ 1355 + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1356 + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1357 + up(ha->dpc_wait); 1358 + qla2x00_wait_for_hba_online(ha); 1359 + scsi_unblock_requests(ha->host); 1360 + } 1361 + 1362 + uint8_t * 1363 + qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1364 + uint32_t offset, uint32_t length) 1365 + { 1366 + unsigned long flags; 1367 + uint32_t addr, midpoint; 1368 + uint8_t *data; 1369 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1370 + 1371 + /* Suspend HBA. */ 1372 + qla2x00_suspend_hba(ha); 1373 + 1374 + /* Go with read. */ 1375 + spin_lock_irqsave(&ha->hardware_lock, flags); 1376 + midpoint = ha->optrom_size / 2; 1377 + 1378 + qla2x00_flash_enable(ha); 1379 + WRT_REG_WORD(&reg->nvram, 0); 1380 + RD_REG_WORD(&reg->nvram); /* PCI Posting. */ 1381 + for (addr = offset, data = buf; addr < length; addr++, data++) { 1382 + if (addr == midpoint) { 1383 + WRT_REG_WORD(&reg->nvram, NVR_SELECT); 1384 + RD_REG_WORD(&reg->nvram); /* PCI Posting. */ 1385 + } 1386 + 1387 + *data = qla2x00_read_flash_byte(ha, addr); 1388 + } 1389 + qla2x00_flash_disable(ha); 1390 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1391 + 1392 + /* Resume HBA. */ 1393 + qla2x00_resume_hba(ha); 1394 + 1395 + return buf; 1396 + } 1397 + 1398 + int 1399 + qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1400 + uint32_t offset, uint32_t length) 1401 + { 1402 + 1403 + int rval; 1404 + unsigned long flags; 1405 + uint8_t man_id, flash_id, sec_number, data; 1406 + uint16_t wd; 1407 + uint32_t addr, liter, sec_mask, rest_addr; 1408 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1409 + 1410 + /* Suspend HBA. */ 1411 + qla2x00_suspend_hba(ha); 1412 + 1413 + rval = QLA_SUCCESS; 1414 + sec_number = 0; 1415 + 1416 + /* Reset ISP chip. */ 1417 + spin_lock_irqsave(&ha->hardware_lock, flags); 1418 + WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 1419 + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 1420 + 1421 + /* Go with write. */ 1422 + qla2x00_flash_enable(ha); 1423 + do { /* Loop once to provide quick error exit */ 1424 + /* Structure of flash memory based on manufacturer */ 1425 + if (IS_OEM_001(ha)) { 1426 + /* OEM variant with special flash part. */ 1427 + man_id = flash_id = 0; 1428 + rest_addr = 0xffff; 1429 + sec_mask = 0x10000; 1430 + goto update_flash; 1431 + } 1432 + qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id); 1433 + switch (man_id) { 1434 + case 0x20: /* ST flash. */ 1435 + if (flash_id == 0xd2 || flash_id == 0xe3) { 1436 + /* 1437 + * ST m29w008at part - 64kb sector size with 1438 + * 32kb,8kb,8kb,16kb sectors at memory address 1439 + * 0xf0000. 1440 + */ 1441 + rest_addr = 0xffff; 1442 + sec_mask = 0x10000; 1443 + break; 1444 + } 1445 + /* 1446 + * ST m29w010b part - 16kb sector size 1447 + * Default to 16kb sectors 1448 + */ 1449 + rest_addr = 0x3fff; 1450 + sec_mask = 0x1c000; 1451 + break; 1452 + case 0x40: /* Mostel flash. */ 1453 + /* Mostel v29c51001 part - 512 byte sector size. */ 1454 + rest_addr = 0x1ff; 1455 + sec_mask = 0x1fe00; 1456 + break; 1457 + case 0xbf: /* SST flash. */ 1458 + /* SST39sf10 part - 4kb sector size. */ 1459 + rest_addr = 0xfff; 1460 + sec_mask = 0x1f000; 1461 + break; 1462 + case 0xda: /* Winbond flash. */ 1463 + /* Winbond W29EE011 part - 256 byte sector size. */ 1464 + rest_addr = 0x7f; 1465 + sec_mask = 0x1ff80; 1466 + break; 1467 + case 0xc2: /* Macronix flash. */ 1468 + /* 64k sector size. */ 1469 + if (flash_id == 0x38 || flash_id == 0x4f) { 1470 + rest_addr = 0xffff; 1471 + sec_mask = 0x10000; 1472 + break; 1473 + } 1474 + /* Fall through... */ 1475 + 1476 + case 0x1f: /* Atmel flash. */ 1477 + /* 512k sector size. */ 1478 + if (flash_id == 0x13) { 1479 + rest_addr = 0x7fffffff; 1480 + sec_mask = 0x80000000; 1481 + break; 1482 + } 1483 + /* Fall through... */ 1484 + 1485 + case 0x01: /* AMD flash. */ 1486 + if (flash_id == 0x38 || flash_id == 0x40 || 1487 + flash_id == 0x4f) { 1488 + /* Am29LV081 part - 64kb sector size. */ 1489 + /* Am29LV002BT part - 64kb sector size. */ 1490 + rest_addr = 0xffff; 1491 + sec_mask = 0x10000; 1492 + break; 1493 + } else if (flash_id == 0x3e) { 1494 + /* 1495 + * Am29LV008b part - 64kb sector size with 1496 + * 32kb,8kb,8kb,16kb sector at memory address 1497 + * h0xf0000. 1498 + */ 1499 + rest_addr = 0xffff; 1500 + sec_mask = 0x10000; 1501 + break; 1502 + } else if (flash_id == 0x20 || flash_id == 0x6e) { 1503 + /* 1504 + * Am29LV010 part or AM29f010 - 16kb sector 1505 + * size. 1506 + */ 1507 + rest_addr = 0x3fff; 1508 + sec_mask = 0x1c000; 1509 + break; 1510 + } else if (flash_id == 0x6d) { 1511 + /* Am29LV001 part - 8kb sector size. */ 1512 + rest_addr = 0x1fff; 1513 + sec_mask = 0x1e000; 1514 + break; 1515 + } 1516 + default: 1517 + /* Default to 16 kb sector size. */ 1518 + rest_addr = 0x3fff; 1519 + sec_mask = 0x1c000; 1520 + break; 1521 + } 1522 + 1523 + update_flash: 1524 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1525 + if (qla2x00_erase_flash(ha, man_id, flash_id)) { 1526 + rval = QLA_FUNCTION_FAILED; 1527 + break; 1528 + } 1529 + } 1530 + 1531 + for (addr = offset, liter = 0; liter < length; liter++, 1532 + addr++) { 1533 + data = buf[liter]; 1534 + /* Are we at the beginning of a sector? */ 1535 + if ((addr & rest_addr) == 0) { 1536 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1537 + if (addr >= 0x10000UL) { 1538 + if (((addr >> 12) & 0xf0) && 1539 + ((man_id == 0x01 && 1540 + flash_id == 0x3e) || 1541 + (man_id == 0x20 && 1542 + flash_id == 0xd2))) { 1543 + sec_number++; 1544 + if (sec_number == 1) { 1545 + rest_addr = 1546 + 0x7fff; 1547 + sec_mask = 1548 + 0x18000; 1549 + } else if ( 1550 + sec_number == 2 || 1551 + sec_number == 3) { 1552 + rest_addr = 1553 + 0x1fff; 1554 + sec_mask = 1555 + 0x1e000; 1556 + } else if ( 1557 + sec_number == 4) { 1558 + rest_addr = 1559 + 0x3fff; 1560 + sec_mask = 1561 + 0x1c000; 1562 + } 1563 + } 1564 + } 1565 + } else if (addr == ha->optrom_size / 2) { 1566 + WRT_REG_WORD(&reg->nvram, NVR_SELECT); 1567 + RD_REG_WORD(&reg->nvram); 1568 + } 1569 + 1570 + if (flash_id == 0xda && man_id == 0xc1) { 1571 + qla2x00_write_flash_byte(ha, 0x5555, 1572 + 0xaa); 1573 + qla2x00_write_flash_byte(ha, 0x2aaa, 1574 + 0x55); 1575 + qla2x00_write_flash_byte(ha, 0x5555, 1576 + 0xa0); 1577 + } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) { 1578 + /* Then erase it */ 1579 + if (qla2x00_erase_flash_sector(ha, 1580 + addr, sec_mask, man_id, 1581 + flash_id)) { 1582 + rval = QLA_FUNCTION_FAILED; 1583 + break; 1584 + } 1585 + if (man_id == 0x01 && flash_id == 0x6d) 1586 + sec_number++; 1587 + } 1588 + } 1589 + 1590 + if (man_id == 0x01 && flash_id == 0x6d) { 1591 + if (sec_number == 1 && 1592 + addr == (rest_addr - 1)) { 1593 + rest_addr = 0x0fff; 1594 + sec_mask = 0x1f000; 1595 + } else if (sec_number == 3 && (addr & 0x7ffe)) { 1596 + rest_addr = 0x3fff; 1597 + sec_mask = 0x1c000; 1598 + } 1599 + } 1600 + 1601 + if (qla2x00_program_flash_address(ha, addr, data, 1602 + man_id, flash_id)) { 1603 + rval = QLA_FUNCTION_FAILED; 1604 + break; 1605 + } 1606 + } 1607 + } while (0); 1608 + qla2x00_flash_disable(ha); 1609 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1610 + 1611 + /* Resume HBA. */ 1612 + qla2x00_resume_hba(ha); 1613 + 1614 + return rval; 1615 + } 1616 + 1617 + uint8_t * 1618 + qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1619 + uint32_t offset, uint32_t length) 1620 + { 1621 + /* Suspend HBA. */ 1622 + scsi_block_requests(ha->host); 1623 + ha->isp_ops.disable_intrs(ha); 1624 + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1625 + 1626 + /* Go with read. */ 1627 + qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2); 1628 + 1629 + /* Resume HBA. */ 1630 + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1631 + ha->isp_ops.enable_intrs(ha); 1632 + scsi_unblock_requests(ha->host); 1633 + 1634 + return buf; 1635 + } 1636 + 1637 + int 1638 + qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1639 + uint32_t offset, uint32_t length) 1640 + { 1641 + int rval; 1642 + 1643 + /* Suspend HBA. */ 1644 + scsi_block_requests(ha->host); 1645 + ha->isp_ops.disable_intrs(ha); 1646 + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1647 + 1648 + /* Go with write. */ 1649 + rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2, 1650 + length >> 2); 1651 + 1652 + /* Resume HBA -- RISC reset needed. */ 1653 + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1654 + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1655 + up(ha->dpc_wait); 1656 + qla2x00_wait_for_hba_online(ha); 1657 + scsi_unblock_requests(ha->host); 1658 + 1659 + return rval; 1660 + }
+59
drivers/scsi/scsi_lib.c
··· 16 16 #include <linux/init.h> 17 17 #include <linux/pci.h> 18 18 #include <linux/delay.h> 19 + #include <linux/hardirq.h> 19 20 20 21 #include <scsi/scsi.h> 21 22 #include <scsi/scsi_dbg.h> ··· 2249 2248 device_for_each_child(dev, NULL, target_unblock); 2250 2249 } 2251 2250 EXPORT_SYMBOL_GPL(scsi_target_unblock); 2251 + 2252 + 2253 + struct work_queue_work { 2254 + struct work_struct work; 2255 + void (*fn)(void *); 2256 + void *data; 2257 + }; 2258 + 2259 + static void execute_in_process_context_work(void *data) 2260 + { 2261 + void (*fn)(void *data); 2262 + struct work_queue_work *wqw = data; 2263 + 2264 + fn = wqw->fn; 2265 + data = wqw->data; 2266 + 2267 + kfree(wqw); 2268 + 2269 + fn(data); 2270 + } 2271 + 2272 + /** 2273 + * scsi_execute_in_process_context - reliably execute the routine with user context 2274 + * @fn: the function to execute 2275 + * @data: data to pass to the function 2276 + * 2277 + * Executes the function immediately if process context is available, 2278 + * otherwise schedules the function for delayed execution. 2279 + * 2280 + * Returns: 0 - function was executed 2281 + * 1 - function was scheduled for execution 2282 + * <0 - error 2283 + */ 2284 + int scsi_execute_in_process_context(void (*fn)(void *data), void *data) 2285 + { 2286 + struct work_queue_work *wqw; 2287 + 2288 + if (!in_interrupt()) { 2289 + fn(data); 2290 + return 0; 2291 + } 2292 + 2293 + wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC); 2294 + 2295 + if (unlikely(!wqw)) { 2296 + printk(KERN_ERR "Failed to allocate memory\n"); 2297 + WARN_ON(1); 2298 + return -ENOMEM; 2299 + } 2300 + 2301 + INIT_WORK(&wqw->work, execute_in_process_context_work, wqw); 2302 + wqw->fn = fn; 2303 + wqw->data = data; 2304 + schedule_work(&wqw->work); 2305 + 2306 + return 1; 2307 + } 2308 + EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);
+4 -22
drivers/scsi/scsi_scan.c
··· 387 387 return found_target; 388 388 } 389 389 390 - struct work_queue_wrapper { 391 - struct work_struct work; 392 - struct scsi_target *starget; 393 - }; 394 - 395 - static void scsi_target_reap_work(void *data) { 396 - struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 397 - struct scsi_target *starget = wqw->starget; 390 + static void scsi_target_reap_usercontext(void *data) 391 + { 392 + struct scsi_target *starget = data; 398 393 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 399 394 unsigned long flags; 400 - 401 - kfree(wqw); 402 395 403 396 spin_lock_irqsave(shost->host_lock, flags); 404 397 ··· 421 428 */ 422 429 void scsi_target_reap(struct scsi_target *starget) 423 430 { 424 - struct work_queue_wrapper *wqw = 425 - kzalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); 426 - 427 - if (!wqw) { 428 - starget_printk(KERN_ERR, starget, 429 - "Failed to allocate memory in scsi_reap_target()\n"); 430 - return; 431 - } 432 - 433 - INIT_WORK(&wqw->work, scsi_target_reap_work, wqw); 434 - wqw->starget = starget; 435 - schedule_work(&wqw->work); 431 + scsi_execute_in_process_context(scsi_target_reap_usercontext, starget); 436 432 } 437 433 438 434 /**
+8 -1
drivers/scsi/scsi_sysfs.c
··· 217 217 put_device(&sdev->sdev_gendev); 218 218 } 219 219 220 - static void scsi_device_dev_release(struct device *dev) 220 + static void scsi_device_dev_release_usercontext(void *data) 221 221 { 222 + struct device *dev = data; 222 223 struct scsi_device *sdev; 223 224 struct device *parent; 224 225 struct scsi_target *starget; ··· 238 237 239 238 if (sdev->request_queue) { 240 239 sdev->request_queue->queuedata = NULL; 240 + /* user context needed to free queue */ 241 241 scsi_free_queue(sdev->request_queue); 242 242 /* temporary expedient, try to catch use of queue lock 243 243 * after free of sdev */ ··· 252 250 253 251 if (parent) 254 252 put_device(parent); 253 + } 254 + 255 + static void scsi_device_dev_release(struct device *dev) 256 + { 257 + scsi_execute_in_process_context(scsi_device_dev_release_usercontext, dev); 255 258 } 256 259 257 260 static struct class sdev_class = {
+130 -130
drivers/scsi/scsi_transport_iscsi.c
··· 39 39 struct iscsi_transport *iscsi_transport; 40 40 struct list_head list; 41 41 /* 42 - * List of sessions for this transport 43 - */ 44 - struct list_head sessions; 45 - /* 46 42 * based on transport capabilities, at register time we set these 47 43 * bits to tell the transport class it wants attributes displayed 48 44 * in sysfs or that it can support different iSCSI Data-Path ··· 160 164 #define Z_MAX_ERROR 16 161 165 #define Z_HIWAT_ERROR 12 162 166 167 + static LIST_HEAD(sesslist); 168 + static DEFINE_SPINLOCK(sesslock); 163 169 static LIST_HEAD(connlist); 164 170 static DEFINE_SPINLOCK(connlock); 171 + 172 + static struct iscsi_cls_session *iscsi_session_lookup(uint64_t handle) 173 + { 174 + unsigned long flags; 175 + struct iscsi_cls_session *sess; 176 + 177 + spin_lock_irqsave(&sesslock, flags); 178 + list_for_each_entry(sess, &sesslist, sess_list) { 179 + if (sess == iscsi_ptr(handle)) { 180 + spin_unlock_irqrestore(&sesslock, flags); 181 + return sess; 182 + } 183 + } 184 + spin_unlock_irqrestore(&sesslock, flags); 185 + return NULL; 186 + } 187 + 188 + static struct iscsi_cls_conn *iscsi_conn_lookup(uint64_t handle) 189 + { 190 + unsigned long flags; 191 + struct iscsi_cls_conn *conn; 192 + 193 + spin_lock_irqsave(&connlock, flags); 194 + list_for_each_entry(conn, &connlist, conn_list) { 195 + if (conn == iscsi_ptr(handle)) { 196 + spin_unlock_irqrestore(&connlock, flags); 197 + return conn; 198 + } 199 + } 200 + spin_unlock_irqrestore(&connlock, flags); 201 + return NULL; 202 + } 165 203 166 204 /* 167 205 * The following functions can be used by LLDs that allocate ··· 395 365 { 396 366 struct iscsi_cls_session *session; 397 367 struct Scsi_Host *shost; 368 + unsigned long flags; 398 369 399 370 shost = scsi_host_alloc(transport->host_template, 400 371 hostdata_privsize(transport)); ··· 420 389 goto remove_host; 421 390 422 391 *(unsigned long*)shost->hostdata = (unsigned long)session; 392 + spin_lock_irqsave(&sesslock, flags); 393 + list_add(&session->sess_list, &sesslist); 394 + spin_unlock_irqrestore(&sesslock, flags); 423 395 return shost; 424 396 425 397 remove_host: ··· 444 410 int iscsi_transport_destroy_session(struct Scsi_Host *shost) 445 411 { 446 412 struct iscsi_cls_session *session; 413 + unsigned long flags; 447 414 448 415 scsi_remove_host(shost); 449 416 session = hostdata_session(shost->hostdata); 417 + spin_lock_irqsave(&sesslock, flags); 418 + list_del(&session->sess_list); 419 + spin_unlock_irqrestore(&sesslock, flags); 450 420 iscsi_destroy_session(session); 451 421 /* ref from host alloc */ 452 422 scsi_host_put(shost); ··· 462 424 /* 463 425 * iscsi interface functions 464 426 */ 465 - static struct iscsi_cls_conn* 466 - iscsi_if_find_conn(uint64_t key) 467 - { 468 - unsigned long flags; 469 - struct iscsi_cls_conn *conn; 470 - 471 - spin_lock_irqsave(&connlock, flags); 472 - list_for_each_entry(conn, &connlist, conn_list) 473 - if (conn->connh == key) { 474 - spin_unlock_irqrestore(&connlock, flags); 475 - return conn; 476 - } 477 - spin_unlock_irqrestore(&connlock, flags); 478 - return NULL; 479 - } 480 - 481 427 static struct iscsi_internal * 482 428 iscsi_if_transport_lookup(struct iscsi_transport *tt) 483 429 { ··· 526 504 if (!zp) 527 505 return NULL; 528 506 507 + zp->size = size; 508 + zp->hiwat = hiwat; 509 + INIT_LIST_HEAD(&zp->freequeue); 510 + spin_lock_init(&zp->freelock); 511 + atomic_set(&zp->allocated, 0); 512 + 529 513 zp->pool = mempool_create(max, mempool_zone_alloc_skb, 530 514 mempool_zone_free_skb, zp); 531 515 if (!zp->pool) { 532 516 kfree(zp); 533 517 return NULL; 534 518 } 535 - 536 - zp->size = size; 537 - zp->hiwat = hiwat; 538 - 539 - INIT_LIST_HEAD(&zp->freequeue); 540 - spin_lock_init(&zp->freelock); 541 - atomic_set(&zp->allocated, 0); 542 519 543 520 return zp; 544 521 } ··· 580 559 return 0; 581 560 } 582 561 583 - int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, 562 + int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 584 563 char *data, uint32_t data_size) 585 564 { 586 565 struct nlmsghdr *nlh; 587 566 struct sk_buff *skb; 588 567 struct iscsi_uevent *ev; 589 - struct iscsi_cls_conn *conn; 590 568 char *pdu; 591 569 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 592 570 data_size); 593 - 594 - conn = iscsi_if_find_conn(connh); 595 - BUG_ON(!conn); 596 571 597 572 mempool_zone_complete(conn->z_pdu); 598 573 599 574 skb = mempool_zone_get_skb(conn->z_pdu); 600 575 if (!skb) { 601 - iscsi_conn_error(connh, ISCSI_ERR_CONN_FAILED); 576 + iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); 602 577 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 603 578 "control PDU: OOM\n"); 604 579 return -ENOMEM; ··· 607 590 ev->type = ISCSI_KEVENT_RECV_PDU; 608 591 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat) 609 592 ev->iferror = -ENOMEM; 610 - ev->r.recv_req.conn_handle = connh; 593 + ev->r.recv_req.conn_handle = iscsi_handle(conn); 611 594 pdu = (char*)ev + sizeof(*ev); 612 595 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 613 596 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); ··· 616 599 } 617 600 EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 618 601 619 - void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) 602 + void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 620 603 { 621 604 struct nlmsghdr *nlh; 622 605 struct sk_buff *skb; 623 606 struct iscsi_uevent *ev; 624 - struct iscsi_cls_conn *conn; 625 607 int len = NLMSG_SPACE(sizeof(*ev)); 626 - 627 - conn = iscsi_if_find_conn(connh); 628 - BUG_ON(!conn); 629 608 630 609 mempool_zone_complete(conn->z_error); 631 610 ··· 639 626 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat) 640 627 ev->iferror = -ENOMEM; 641 628 ev->r.connerror.error = error; 642 - ev->r.connerror.conn_handle = connh; 629 + ev->r.connerror.conn_handle = iscsi_handle(conn); 643 630 644 631 iscsi_unicast_skb(conn->z_error, skb); 645 632 ··· 675 662 } 676 663 677 664 static int 678 - iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, 679 - struct nlmsghdr *nlh) 665 + iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) 680 666 { 681 667 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 682 668 struct iscsi_stats *stats; ··· 689 677 ISCSI_STATS_CUSTOM_MAX); 690 678 int err = 0; 691 679 692 - conn = iscsi_if_find_conn(ev->u.get_stats.conn_handle); 680 + conn = iscsi_conn_lookup(ev->u.get_stats.conn_handle); 693 681 if (!conn) 694 682 return -EEXIST; 695 683 ··· 719 707 ((char*)evstat + sizeof(*evstat)); 720 708 memset(stats, 0, sizeof(*stats)); 721 709 722 - transport->get_stats(ev->u.get_stats.conn_handle, stats); 710 + transport->get_stats(conn, stats); 723 711 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 724 712 sizeof(struct iscsi_stats) + 725 713 sizeof(struct iscsi_stats_custom) * 726 714 stats->custom_length); 727 715 actual_size -= sizeof(*nlhstat); 728 716 actual_size = NLMSG_LENGTH(actual_size); 729 - skb_trim(skb, NLMSG_ALIGN(actual_size)); 717 + skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 730 718 nlhstat->nlmsg_len = actual_size; 731 719 732 720 err = iscsi_unicast_skb(conn->z_pdu, skbstat); ··· 739 727 iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 740 728 { 741 729 struct iscsi_transport *transport = priv->iscsi_transport; 742 - struct Scsi_Host *shost; 730 + struct iscsi_cls_session *session; 731 + uint32_t sid; 743 732 744 - if (!transport->create_session) 745 - return -EINVAL; 746 - 747 - shost = transport->create_session(&priv->t, 748 - ev->u.c_session.initial_cmdsn); 749 - if (!shost) 733 + session = transport->create_session(&priv->t, 734 + ev->u.c_session.initial_cmdsn, 735 + &sid); 736 + if (!session) 750 737 return -ENOMEM; 751 738 752 - ev->r.c_session_ret.session_handle = iscsi_handle(iscsi_hostdata(shost->hostdata)); 753 - ev->r.c_session_ret.sid = shost->host_no; 739 + ev->r.c_session_ret.session_handle = iscsi_handle(session); 740 + ev->r.c_session_ret.sid = sid; 754 741 return 0; 755 742 } 756 743 757 744 static int 758 - iscsi_if_destroy_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 745 + iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) 759 746 { 760 - struct iscsi_transport *transport = priv->iscsi_transport; 761 - 762 - struct Scsi_Host *shost; 763 - 764 - if (!transport->destroy_session) 765 - return -EINVAL; 766 - 767 - shost = scsi_host_lookup(ev->u.d_session.sid); 768 - if (shost == ERR_PTR(-ENXIO)) 769 - return -EEXIST; 770 - 771 - if (transport->destroy_session) 772 - transport->destroy_session(shost); 773 - /* ref from host lookup */ 774 - scsi_host_put(shost); 775 - return 0; 776 - } 777 - 778 - static int 779 - iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev){ 780 - struct Scsi_Host *shost; 781 747 struct iscsi_cls_conn *conn; 748 + struct iscsi_cls_session *session; 782 749 unsigned long flags; 783 750 784 - if (!transport->create_conn) 751 + session = iscsi_session_lookup(ev->u.c_conn.session_handle); 752 + if (!session) 785 753 return -EINVAL; 786 754 787 - shost = scsi_host_lookup(ev->u.c_conn.sid); 788 - if (shost == ERR_PTR(-ENXIO)) 789 - return -EEXIST; 790 - 791 - conn = transport->create_conn(shost, ev->u.c_conn.cid); 755 + conn = transport->create_conn(session, ev->u.c_conn.cid); 792 756 if (!conn) 793 - goto release_ref; 757 + return -ENOMEM; 794 758 795 759 conn->z_pdu = mempool_zone_init(Z_MAX_PDU, 796 760 NLMSG_SPACE(sizeof(struct iscsi_uevent) + ··· 788 800 goto free_pdu_pool; 789 801 } 790 802 791 - ev->r.handle = conn->connh = iscsi_handle(conn->dd_data); 803 + ev->r.handle = iscsi_handle(conn); 792 804 793 805 spin_lock_irqsave(&connlock, flags); 794 806 list_add(&conn->conn_list, &connlist); 795 807 conn->active = 1; 796 808 spin_unlock_irqrestore(&connlock, flags); 797 809 798 - scsi_host_put(shost); 799 810 return 0; 800 811 801 812 free_pdu_pool: ··· 802 815 destroy_conn: 803 816 if (transport->destroy_conn) 804 817 transport->destroy_conn(conn->dd_data); 805 - release_ref: 806 - scsi_host_put(shost); 807 818 return -ENOMEM; 808 819 } 809 820 ··· 812 827 struct iscsi_cls_conn *conn; 813 828 struct mempool_zone *z_error, *z_pdu; 814 829 815 - conn = iscsi_if_find_conn(ev->u.d_conn.conn_handle); 830 + conn = iscsi_conn_lookup(ev->u.d_conn.conn_handle); 816 831 if (!conn) 817 - return -EEXIST; 818 - 819 - if (!transport->destroy_conn) 820 832 return -EINVAL; 821 - 822 833 spin_lock_irqsave(&connlock, flags); 823 834 conn->active = 0; 824 835 list_del(&conn->conn_list); ··· 839 858 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 840 859 struct iscsi_transport *transport = NULL; 841 860 struct iscsi_internal *priv; 842 - 843 - if (NETLINK_CREDS(skb)->uid) 844 - return -EPERM; 861 + struct iscsi_cls_session *session; 862 + struct iscsi_cls_conn *conn; 845 863 846 864 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 847 865 if (!priv) 848 866 return -EINVAL; 849 867 transport = priv->iscsi_transport; 850 868 851 - daemon_pid = NETLINK_CREDS(skb)->pid; 869 + if (!try_module_get(transport->owner)) 870 + return -EINVAL; 852 871 853 872 switch (nlh->nlmsg_type) { 854 873 case ISCSI_UEVENT_CREATE_SESSION: 855 874 err = iscsi_if_create_session(priv, ev); 856 875 break; 857 876 case ISCSI_UEVENT_DESTROY_SESSION: 858 - err = iscsi_if_destroy_session(priv, ev); 877 + session = iscsi_session_lookup(ev->u.d_session.session_handle); 878 + if (session) 879 + transport->destroy_session(session); 880 + else 881 + err = -EINVAL; 859 882 break; 860 883 case ISCSI_UEVENT_CREATE_CONN: 861 884 err = iscsi_if_create_conn(transport, ev); ··· 868 883 err = iscsi_if_destroy_conn(transport, ev); 869 884 break; 870 885 case ISCSI_UEVENT_BIND_CONN: 871 - if (!iscsi_if_find_conn(ev->u.b_conn.conn_handle)) 872 - return -EEXIST; 873 - ev->r.retcode = transport->bind_conn( 874 - ev->u.b_conn.session_handle, 875 - ev->u.b_conn.conn_handle, 876 - ev->u.b_conn.transport_fd, 877 - ev->u.b_conn.is_leading); 886 + session = iscsi_session_lookup(ev->u.b_conn.session_handle); 887 + conn = iscsi_conn_lookup(ev->u.b_conn.conn_handle); 888 + 889 + if (session && conn) 890 + ev->r.retcode = transport->bind_conn(session, conn, 891 + ev->u.b_conn.transport_fd, 892 + ev->u.b_conn.is_leading); 893 + else 894 + err = -EINVAL; 878 895 break; 879 896 case ISCSI_UEVENT_SET_PARAM: 880 - if (!iscsi_if_find_conn(ev->u.set_param.conn_handle)) 881 - return -EEXIST; 882 - ev->r.retcode = transport->set_param( 883 - ev->u.set_param.conn_handle, 884 - ev->u.set_param.param, ev->u.set_param.value); 897 + conn = iscsi_conn_lookup(ev->u.set_param.conn_handle); 898 + if (conn) 899 + ev->r.retcode = transport->set_param(conn, 900 + ev->u.set_param.param, ev->u.set_param.value); 901 + else 902 + err = -EINVAL; 885 903 break; 886 904 case ISCSI_UEVENT_START_CONN: 887 - if (!iscsi_if_find_conn(ev->u.start_conn.conn_handle)) 888 - return -EEXIST; 889 - ev->r.retcode = transport->start_conn( 890 - ev->u.start_conn.conn_handle); 905 + conn = iscsi_conn_lookup(ev->u.start_conn.conn_handle); 906 + if (conn) 907 + ev->r.retcode = transport->start_conn(conn); 908 + else 909 + err = -EINVAL; 910 + 891 911 break; 892 912 case ISCSI_UEVENT_STOP_CONN: 893 - if (!iscsi_if_find_conn(ev->u.stop_conn.conn_handle)) 894 - return -EEXIST; 895 - transport->stop_conn(ev->u.stop_conn.conn_handle, 896 - ev->u.stop_conn.flag); 913 + conn = iscsi_conn_lookup(ev->u.stop_conn.conn_handle); 914 + if (conn) 915 + transport->stop_conn(conn, ev->u.stop_conn.flag); 916 + else 917 + err = -EINVAL; 897 918 break; 898 919 case ISCSI_UEVENT_SEND_PDU: 899 - if (!iscsi_if_find_conn(ev->u.send_pdu.conn_handle)) 900 - return -EEXIST; 901 - ev->r.retcode = transport->send_pdu( 902 - ev->u.send_pdu.conn_handle, 903 - (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), 904 - (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, 905 - ev->u.send_pdu.data_size); 920 + conn = iscsi_conn_lookup(ev->u.send_pdu.conn_handle); 921 + if (conn) 922 + ev->r.retcode = transport->send_pdu(conn, 923 + (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), 924 + (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, 925 + ev->u.send_pdu.data_size); 926 + else 927 + err = -EINVAL; 906 928 break; 907 929 case ISCSI_UEVENT_GET_STATS: 908 - err = iscsi_if_get_stats(transport, skb, nlh); 930 + err = iscsi_if_get_stats(transport, nlh); 909 931 break; 910 932 default: 911 933 err = -EINVAL; 912 934 break; 913 935 } 914 936 937 + module_put(transport->owner); 915 938 return err; 916 939 } 917 940 918 941 /* Get message from skb (based on rtnetlink_rcv_skb). Each message is 919 942 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are 920 - * discarded silently. */ 943 + * or invalid creds discarded silently. */ 921 944 static void 922 945 iscsi_if_rx(struct sock *sk, int len) 923 946 { ··· 933 940 934 941 mutex_lock(&rx_queue_mutex); 935 942 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 943 + if (NETLINK_CREDS(skb)->uid) { 944 + skb_pull(skb, skb->len); 945 + goto free_skb; 946 + } 947 + daemon_pid = NETLINK_CREDS(skb)->pid; 948 + 936 949 while (skb->len >= NLMSG_SPACE(0)) { 937 950 int err; 938 951 uint32_t rlen; ··· 950 951 skb->len < nlh->nlmsg_len) { 951 952 break; 952 953 } 954 + 953 955 ev = NLMSG_DATA(nlh); 954 956 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 955 957 if (rlen > skb->len) 956 958 rlen = skb->len; 959 + 957 960 err = iscsi_if_recv_msg(skb, nlh); 958 961 if (err) { 959 962 ev->type = ISCSI_KEVENT_IF_ERROR; ··· 979 978 } while (err < 0 && err != -ECONNREFUSED); 980 979 skb_pull(skb, rlen); 981 980 } 981 + free_skb: 982 982 kfree_skb(skb); 983 983 } 984 984 mutex_unlock(&rx_queue_mutex); ··· 999 997 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 1000 998 struct iscsi_transport *t = conn->transport; \ 1001 999 \ 1002 - t->get_conn_param(conn->dd_data, param, &value); \ 1000 + t->get_conn_param(conn, param, &value); \ 1003 1001 return snprintf(buf, 20, format"\n", value); \ 1004 1002 } 1005 1003 ··· 1026 1024 { \ 1027 1025 uint32_t value = 0; \ 1028 1026 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1029 - struct Scsi_Host *shost = iscsi_session_to_shost(session); \ 1030 1027 struct iscsi_transport *t = session->transport; \ 1031 1028 \ 1032 - t->get_session_param(shost, param, &value); \ 1029 + t->get_session_param(session, param, &value); \ 1033 1030 return snprintf(buf, 20, format"\n", value); \ 1034 1031 } 1035 1032 ··· 1122 1121 return NULL; 1123 1122 memset(priv, 0, sizeof(*priv)); 1124 1123 INIT_LIST_HEAD(&priv->list); 1125 - INIT_LIST_HEAD(&priv->sessions); 1126 1124 priv->iscsi_transport = tt; 1127 1125 1128 1126 priv->cdev.class = &iscsi_transport_class;
+1 -1
drivers/scsi/sym53c8xx_2/sym_hipd.c
··· 3588 3588 3589 3589 if (pm) { 3590 3590 dp_scr = scr_to_cpu(pm->ret); 3591 - dp_ofs -= scr_to_cpu(pm->sg.size); 3591 + dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; 3592 3592 } 3593 3593 3594 3594 /*
-3
include/scsi/iscsi_if.h
··· 163 163 }; 164 164 #define ISCSI_PARAM_MAX 14 165 165 166 - typedef uint64_t iscsi_sessionh_t; /* iSCSI Data-Path session handle */ 167 - typedef uint64_t iscsi_connh_t; /* iSCSI Data-Path connection handle */ 168 - 169 166 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 170 167 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 171 168 #define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+2
include/scsi/scsi.h
··· 433 433 /* Used to obtain the PCI location of a device */ 434 434 #define SCSI_IOCTL_GET_PCI 0x5387 435 435 436 + int scsi_execute_in_process_context(void (*fn)(void *data), void *data); 437 + 436 438 #endif /* _SCSI_SCSI_H */
+18 -16
include/scsi/scsi_transport_iscsi.h
··· 63 63 int max_lun; 64 64 unsigned int max_conn; 65 65 unsigned int max_cmd_len; 66 - struct Scsi_Host *(*create_session) (struct scsi_transport_template *t, 67 - uint32_t initial_cmdsn); 68 - void (*destroy_session) (struct Scsi_Host *shost); 69 - struct iscsi_cls_conn *(*create_conn) (struct Scsi_Host *shost, 66 + struct iscsi_cls_session *(*create_session) 67 + (struct scsi_transport_template *t, uint32_t sn, uint32_t *sid); 68 + void (*destroy_session) (struct iscsi_cls_session *session); 69 + struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, 70 70 uint32_t cid); 71 - int (*bind_conn) (iscsi_sessionh_t session, iscsi_connh_t conn, 71 + int (*bind_conn) (struct iscsi_cls_session *session, 72 + struct iscsi_cls_conn *cls_conn, 72 73 uint32_t transport_fd, int is_leading); 73 - int (*start_conn) (iscsi_connh_t conn); 74 - void (*stop_conn) (iscsi_connh_t conn, int flag); 74 + int (*start_conn) (struct iscsi_cls_conn *conn); 75 + void (*stop_conn) (struct iscsi_cls_conn *conn, int flag); 75 76 void (*destroy_conn) (struct iscsi_cls_conn *conn); 76 - int (*set_param) (iscsi_connh_t conn, enum iscsi_param param, 77 + int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param, 77 78 uint32_t value); 78 - int (*get_conn_param) (void *conndata, enum iscsi_param param, 79 + int (*get_conn_param) (struct iscsi_cls_conn *conn, 80 + enum iscsi_param param, 79 81 uint32_t *value); 80 - int (*get_session_param) (struct Scsi_Host *shost, 82 + int (*get_session_param) (struct iscsi_cls_session *session, 81 83 enum iscsi_param param, uint32_t *value); 82 - int (*send_pdu) (iscsi_connh_t conn, struct iscsi_hdr *hdr, 84 + int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 83 85 char *data, uint32_t data_size); 84 - void (*get_stats) (iscsi_connh_t conn, struct iscsi_stats *stats); 86 + void (*get_stats) (struct iscsi_cls_conn *conn, 87 + struct iscsi_stats *stats); 85 88 }; 86 89 87 90 /* ··· 96 93 /* 97 94 * control plane upcalls 98 95 */ 99 - extern void iscsi_conn_error(iscsi_connh_t conn, enum iscsi_err error); 100 - extern int iscsi_recv_pdu(iscsi_connh_t conn, struct iscsi_hdr *hdr, 96 + extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error); 97 + extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 101 98 char *data, uint32_t data_size); 102 99 103 100 struct iscsi_cls_conn { 104 101 struct list_head conn_list; /* item in connlist */ 105 102 void *dd_data; /* LLD private data */ 106 103 struct iscsi_transport *transport; 107 - iscsi_connh_t connh; 108 104 int active; /* must be accessed with the connlock */ 109 105 struct device dev; /* sysfs transport/container device */ 110 106 struct mempool_zone *z_error; ··· 115 113 container_of(_dev, struct iscsi_cls_conn, dev) 116 114 117 115 struct iscsi_cls_session { 118 - struct list_head list; /* item in session_list */ 116 + struct list_head sess_list; /* item in session_list */ 119 117 struct iscsi_transport *transport; 120 118 struct device dev; /* sysfs transport/container device */ 121 119 };