Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

+2389 -813
+23
Documentation/scsi/ChangeLog.megaraid_sas
··· 1 1 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 2 2 Current Version : 00.00.02.02 3 3 Older Version : 00.00.02.01
··· 1 + 1 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 2 + 2 Current Version : 00.00.02.04 3 + 3 Older Version : 00.00.02.04 4 + 5 + i. Support for 1078 type (ppc IOP) controller, device id : 0x60 added. 6 + During initialization, depending on the device id, the template members 7 + are initialized with function pointers specific to the ppc or 8 + xscale controllers. 9 + 10 + -Sumant Patro <Sumant.Patro@lsil.com> 11 + 12 + 1 Release Date : Fri Feb 03 14:16:25 PST 2006 - Sumant Patro 13 + <Sumant.Patro@lsil.com> 14 + 2 Current Version : 00.00.02.04 15 + 3 Older Version : 00.00.02.02 16 + i. Register 16 byte CDB capability with scsi midlayer 17 + 18 + "Ths patch properly registers the 16 byte command length capability of the 19 + megaraid_sas controlled hardware with the scsi midlayer. All megaraid_sas 20 + hardware supports 16 byte CDB's." 21 + 22 + -Joshua Giles <joshua_giles@dell.com> 23 + 24 1 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 25 2 Current Version : 00.00.02.02 26 3 Older Version : 00.00.02.01
+2 -113
drivers/message/fusion/mptbase.c
··· 452 } else if (func == MPI_FUNCTION_EVENT_ACK) { 453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", 454 ioc->name)); 455 - } else if (func == MPI_FUNCTION_CONFIG || 456 - func == MPI_FUNCTION_TOOLBOX) { 457 CONFIGPARMS *pCfg; 458 unsigned long flags; 459 ··· 5326 } 5327 5328 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5329 - /** 5330 - * mpt_toolbox - Generic function to issue toolbox message 5331 - * @ioc - Pointer to an adapter structure 5332 - * @cfg - Pointer to a toolbox structure. Struct contains 5333 - * action, page address, direction, physical address 5334 - * and pointer to a configuration page header 5335 - * Page header is updated. 5336 - * 5337 - * Returns 0 for success 5338 - * -EPERM if not allowed due to ISR context 5339 - * -EAGAIN if no msg frames currently available 5340 - * -EFAULT for non-successful reply or no reply (timeout) 5341 - */ 5342 - int 5343 - mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 5344 - { 5345 - ToolboxIstwiReadWriteRequest_t *pReq; 5346 - MPT_FRAME_HDR *mf; 5347 - struct pci_dev *pdev; 5348 - unsigned long flags; 5349 - int rc; 5350 - u32 flagsLength; 5351 - int in_isr; 5352 - 5353 - /* Prevent calling wait_event() (below), if caller happens 5354 - * to be in ISR context, because that is fatal! 5355 - */ 5356 - in_isr = in_interrupt(); 5357 - if (in_isr) { 5358 - dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n", 5359 - ioc->name)); 5360 - return -EPERM; 5361 - } 5362 - 5363 - /* Get and Populate a free Frame 5364 - */ 5365 - if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5366 - dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n", 5367 - ioc->name)); 5368 - return -EAGAIN; 5369 - } 5370 - pReq = (ToolboxIstwiReadWriteRequest_t *)mf; 5371 - pReq->Tool = pCfg->action; 5372 - pReq->Reserved = 0; 5373 - pReq->ChainOffset = 0; 5374 - pReq->Function = MPI_FUNCTION_TOOLBOX; 5375 - pReq->Reserved1 = 0; 5376 - pReq->Reserved2 = 0; 5377 - pReq->MsgFlags = 0; 5378 - pReq->Flags = pCfg->dir; 5379 - pReq->BusNum = 0; 5380 - pReq->Reserved3 = 0; 5381 - pReq->NumAddressBytes = 0x01; 5382 - pReq->Reserved4 = 0; 5383 - pReq->DataLength = cpu_to_le16(0x04); 5384 - pdev = ioc->pcidev; 5385 - if (pdev->devfn & 1) 5386 - pReq->DeviceAddr = 0xB2; 5387 - else 5388 - pReq->DeviceAddr = 0xB0; 5389 - pReq->Addr1 = 0; 5390 - pReq->Addr2 = 0; 5391 - pReq->Addr3 = 0; 5392 - pReq->Reserved5 = 0; 5393 - 5394 - /* Add a SGE to the config request. 5395 - */ 5396 - 5397 - flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4; 5398 - 5399 - mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr); 5400 - 5401 - dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n", 5402 - ioc->name, pReq->Tool)); 5403 - 5404 - /* Append pCfg pointer to end of mf 5405 - */ 5406 - *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; 5407 - 5408 - /* Initalize the timer 5409 - */ 5410 - init_timer(&pCfg->timer); 5411 - pCfg->timer.data = (unsigned long) ioc; 5412 - pCfg->timer.function = mpt_timer_expired; 5413 - pCfg->wait_done = 0; 5414 - 5415 - /* Set the timer; ensure 10 second minimum */ 5416 - if (pCfg->timeout < 10) 5417 - pCfg->timer.expires = jiffies + HZ*10; 5418 - else 5419 - pCfg->timer.expires = jiffies + HZ*pCfg->timeout; 5420 - 5421 - /* Add to end of Q, set timer and then issue this command */ 5422 - spin_lock_irqsave(&ioc->FreeQlock, flags); 5423 - list_add_tail(&pCfg->linkage, &ioc->configQ); 5424 - spin_unlock_irqrestore(&ioc->FreeQlock, flags); 5425 - 5426 - add_timer(&pCfg->timer); 5427 - mpt_put_msg_frame(mpt_base_index, ioc, mf); 5428 - wait_event(mpt_waitq, pCfg->wait_done); 5429 - 5430 - /* mf has been freed - do not access */ 5431 - 5432 - rc = pCfg->status; 5433 - 5434 - return rc; 5435 - } 5436 - 5437 - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5438 /* 5439 * mpt_timer_expired - Call back for timer process. 5440 * Used only internal config functionality. ··· 6032 if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 6033 int idx; 6034 6035 - idx = ioc->eventContext % ioc->eventLogSize; 6036 6037 ioc->events[idx].event = event; 6038 ioc->events[idx].eventContext = ioc->eventContext; ··· 6430 EXPORT_SYMBOL(mpt_stm_index); 6431 EXPORT_SYMBOL(mpt_HardResetHandler); 6432 EXPORT_SYMBOL(mpt_config); 6433 - EXPORT_SYMBOL(mpt_toolbox); 6434 EXPORT_SYMBOL(mpt_findImVolumes); 6435 EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6436 EXPORT_SYMBOL(mpt_alloc_fw_memory);
··· 452 } else if (func == MPI_FUNCTION_EVENT_ACK) { 453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", 454 ioc->name)); 455 + } else if (func == MPI_FUNCTION_CONFIG) { 456 CONFIGPARMS *pCfg; 457 unsigned long flags; 458 ··· 5327 } 5328 5329 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5330 /* 5331 * mpt_timer_expired - Call back for timer process. 5332 * Used only internal config functionality. ··· 6142 if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 6143 int idx; 6144 6145 + idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; 6146 6147 ioc->events[idx].event = event; 6148 ioc->events[idx].eventContext = ioc->eventContext; ··· 6540 EXPORT_SYMBOL(mpt_stm_index); 6541 EXPORT_SYMBOL(mpt_HardResetHandler); 6542 EXPORT_SYMBOL(mpt_config); 6543 EXPORT_SYMBOL(mpt_findImVolumes); 6544 EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6545 EXPORT_SYMBOL(mpt_alloc_fw_memory);
+1 -1
drivers/message/fusion/mptbase.h
··· 616 * increments by 32 bytes 617 */ 618 int errata_flag_1064; 619 u8 FirstWhoInit; 620 u8 upload_fw; /* If set, do a fw upload */ 621 u8 reload_fw; /* Force a FW Reload on next reset */ ··· 1027 extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); 1028 extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 1029 extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1030 - extern int mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1031 extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); 1032 extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 1033 extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
··· 616 * increments by 32 bytes 617 */ 618 int errata_flag_1064; 619 + int aen_event_read_flag; /* flag to indicate event log was read*/ 620 u8 FirstWhoInit; 621 u8 upload_fw; /* If set, do a fw upload */ 622 u8 reload_fw; /* Force a FW Reload on next reset */ ··· 1026 extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); 1027 extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 1028 extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1029 extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); 1030 extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 1031 extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
+201 -42
drivers/message/fusion/mptctl.c
··· 136 */ 137 static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); 138 139 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 140 /* 141 * Scatter gather list (SGL) sizes and limits... ··· 391 } 392 393 /* Now wait for the command to complete */ 394 - ii = wait_event_interruptible_timeout(mptctl_wait, 395 ioctl->wait_done == 1, 396 HZ*5 /* 5 second timeout */); 397 398 if(ii <=0 && (ioctl->wait_done != 1 )) { 399 ioctl->wait_done = 0; 400 retval = -1; /* return failure */ 401 } 402 403 mptctl_bus_reset_done: 404 405 - mpt_free_msg_frame(hd->ioc, mf); 406 mptctl_free_tm_flags(ioctl->ioc); 407 return retval; 408 } ··· 475 } 476 477 return 1; 478 } 479 480 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ ··· 743 u16 iocstat; 744 pFWDownloadReply_t ReplyMsg = NULL; 745 746 - dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); 747 748 - dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); 749 - dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); 750 - dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); 751 752 - if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { 753 - dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n", 754 - __FILE__, __LINE__, ioc)); 755 return -ENODEV; /* (-6) No such device or address */ 756 - } 757 758 - /* Valid device. Get a message frame and construct the FW download message. 759 - */ 760 - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) 761 - return -EAGAIN; 762 dlmsg = (FWDownload_t*) mf; 763 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; 764 sgOut = (char *) (ptsge + 1); ··· 772 dlmsg->ChainOffset = 0; 773 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; 774 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; 775 - dlmsg->MsgFlags = 0; 776 777 /* Set up the Transaction SGE. 778 */ ··· 828 goto fwdl_out; 829 } 830 831 - dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); 832 833 /* 834 * Parse SG list, copying sgl itself, ··· 877 /* 878 * Finally, perform firmware download. 879 */ 880 - iocp->ioctl->wait_done = 0; 881 mpt_put_msg_frame(mptctl_id, iocp, mf); 882 883 /* Now wait for the command to complete */ 884 - ret = wait_event_interruptible_timeout(mptctl_wait, 885 iocp->ioctl->wait_done == 1, 886 HZ*60); 887 ··· 1219 /* Fill in the data and return the structure to the calling 1220 * program 1221 */ 1222 - if (ioc->bus_type == FC) 1223 karg->adapterType = MPT_IOCTL_INTERFACE_FC; 1224 else 1225 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; ··· 1246 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1247 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1248 } else if (cim_rev == 2) { 1249 - /* Get the PCI bus, device, function and segment ID numbers 1250 for the IOC */ 1251 karg->pciInfo.u.bits.busNumber = pdev->bus->number; 1252 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1253 - karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1254 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1255 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); 1256 } ··· 1575 return -ENODEV; 1576 } 1577 1578 - karg.eventEntries = ioc->eventLogSize; 1579 karg.eventTypes = ioc->eventTypes; 1580 1581 /* Copy the data from kernel memory to user memory ··· 1625 memset(ioc->events, 0, sz); 1626 ioc->alloc_total += sz; 1627 1628 - ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE; 1629 ioc->eventContext = 0; 1630 } 1631 ··· 1664 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); 1665 1666 1667 - max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; 1668 1669 /* If fewer than 1 event is requested, there must have 1670 * been some type of error. 1671 */ 1672 if ((max < 1) || !ioc->events) 1673 return -ENODATA; 1674 1675 /* Copy the data from kernel memory to user memory 1676 */ ··· 1894 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 1895 case MPI_FUNCTION_FW_DOWNLOAD: 1896 case MPI_FUNCTION_FC_PRIMITIVE_SEND: 1897 break; 1898 1899 case MPI_FUNCTION_SCSI_IO_REQUEST: ··· 1916 goto done_free_mem; 1917 } 1918 1919 - pScsiReq->MsgFlags = mpt_msg_flags(); 1920 1921 /* verify that app has not requested 1922 * more sense data than driver ··· 1969 } 1970 break; 1971 1972 case MPI_FUNCTION_RAID_ACTION: 1973 /* Just add a SGE 1974 */ ··· 2000 int scsidir = MPI_SCSIIO_CONTROL_READ; 2001 int dataSize; 2002 2003 - pScsiReq->MsgFlags = mpt_msg_flags(); 2004 2005 /* verify that app has not requested 2006 * more sense data than driver ··· 2232 2233 /* Now wait for the command to complete */ 2234 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2235 - timeout = wait_event_interruptible_timeout(mptctl_wait, 2236 ioc->ioctl->wait_done == 1, 2237 HZ*timeout); 2238 ··· 2348 hp_host_info_t __user *uarg = (void __user *) arg; 2349 MPT_ADAPTER *ioc; 2350 struct pci_dev *pdev; 2351 - char *pbuf; 2352 dma_addr_t buf_dma; 2353 hp_host_info_t karg; 2354 CONFIGPARMS cfg; 2355 ConfigPageHeader_t hdr; 2356 int iocnum; 2357 int rc, cim_rev; 2358 2359 dctlprintk((": mptctl_hp_hostinfo called.\n")); 2360 /* Reset long to int. Should affect IA64 and SPARC only ··· 2475 2476 karg.base_io_addr = pci_resource_start(pdev, 0); 2477 2478 - if (ioc->bus_type == FC) 2479 karg.bus_phys_width = HP_BUS_WIDTH_UNK; 2480 else 2481 karg.bus_phys_width = HP_BUS_WIDTH_16; ··· 2493 } 2494 } 2495 2496 - cfg.pageAddr = 0; 2497 - cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; 2498 - cfg.dir = MPI_TB_ISTWI_FLAGS_READ; 2499 - cfg.timeout = 10; 2500 - pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2501 - if (pbuf) { 2502 - cfg.physAddr = buf_dma; 2503 - if ((mpt_toolbox(ioc, &cfg)) == 0) { 2504 - karg.rsvd = *(u32 *)pbuf; 2505 - } 2506 - pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2507 - pbuf = NULL; 2508 } 2509 2510 /* Copy the data from kernel memory to user memory 2511 */ ··· 2611 2612 /* There is nothing to do for FCP parts. 2613 */ 2614 - if (ioc->bus_type == FC) 2615 return 0; 2616 2617 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) ··· 2721 static struct file_operations mptctl_fops = { 2722 .owner = THIS_MODULE, 2723 .llseek = no_llseek, 2724 .unlocked_ioctl = mptctl_ioctl, 2725 #ifdef CONFIG_COMPAT 2726 .compat_ioctl = compat_mpctl_ioctl, ··· 2965 dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); 2966 } else { 2967 /* FIXME! */ 2968 } 2969 2970 return 0;
··· 136 */ 137 static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); 138 139 + /* 140 + * Event Handler function 141 + */ 142 + static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 143 + struct fasync_struct *async_queue=NULL; 144 + 145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 146 /* 147 * Scatter gather list (SGL) sizes and limits... ··· 385 } 386 387 /* Now wait for the command to complete */ 388 + ii = wait_event_timeout(mptctl_wait, 389 ioctl->wait_done == 1, 390 HZ*5 /* 5 second timeout */); 391 392 if(ii <=0 && (ioctl->wait_done != 1 )) { 393 + mpt_free_msg_frame(hd->ioc, mf); 394 ioctl->wait_done = 0; 395 retval = -1; /* return failure */ 396 } 397 398 mptctl_bus_reset_done: 399 400 mptctl_free_tm_flags(ioctl->ioc); 401 return retval; 402 } ··· 469 } 470 471 return 1; 472 + } 473 + 474 + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 475 + /* ASYNC Event Notification Support */ 476 + static int 477 + mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 478 + { 479 + u8 event; 480 + 481 + event = le32_to_cpu(pEvReply->Event) & 0xFF; 482 + 483 + dctlprintk(("%s() called\n", __FUNCTION__)); 484 + if(async_queue == NULL) 485 + return 1; 486 + 487 + /* Raise SIGIO for persistent events. 488 + * TODO - this define is not in MPI spec yet, 489 + * but they plan to set it to 0x21 490 + */ 491 + if (event == 0x21 ) { 492 + ioc->aen_event_read_flag=1; 493 + dctlprintk(("Raised SIGIO to application\n")); 494 + devtprintk(("Raised SIGIO to application\n")); 495 + kill_fasync(&async_queue, SIGIO, POLL_IN); 496 + return 1; 497 + } 498 + 499 + /* This flag is set after SIGIO was raised, and 500 + * remains set until the application has read 501 + * the event log via ioctl=MPTEVENTREPORT 502 + */ 503 + if(ioc->aen_event_read_flag) 504 + return 1; 505 + 506 + /* Signal only for the events that are 507 + * requested for by the application 508 + */ 509 + if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 510 + ioc->aen_event_read_flag=1; 511 + dctlprintk(("Raised SIGIO to application\n")); 512 + devtprintk(("Raised SIGIO to application\n")); 513 + kill_fasync(&async_queue, SIGIO, POLL_IN); 514 + } 515 + return 1; 516 + } 517 + 518 + static int 519 + mptctl_fasync(int fd, struct file *filep, int mode) 520 + { 521 + MPT_ADAPTER *ioc; 522 + 523 + list_for_each_entry(ioc, &ioc_list, list) 524 + ioc->aen_event_read_flag=0; 525 + 526 + dctlprintk(("%s() called\n", __FUNCTION__)); 527 + return fasync_helper(fd, filep, mode, &async_queue); 528 + } 529 + 530 + static int 531 + mptctl_release(struct inode *inode, struct file *filep) 532 + { 533 + dctlprintk(("%s() called\n", __FUNCTION__)); 534 + return fasync_helper(-1, filep, 0, &async_queue); 535 } 536 537 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ ··· 674 u16 iocstat; 675 pFWDownloadReply_t ReplyMsg = NULL; 676 677 + dctlprintk(("mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); 678 679 + dctlprintk(("DbG: kfwdl.bufp = %p\n", ufwbuf)); 680 + dctlprintk(("DbG: kfwdl.fwlen = %d\n", (int)fwlen)); 681 + dctlprintk(("DbG: kfwdl.ioc = %04xh\n", ioc)); 682 683 + if (mpt_verify_adapter(ioc, &iocp) < 0) { 684 + dctlprintk(("ioctl_fwdl - ioc%d not found!\n", 685 + ioc)); 686 return -ENODEV; /* (-6) No such device or address */ 687 + } else { 688 689 + /* Valid device. Get a message frame and construct the FW download message. 690 + */ 691 + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) 692 + return -EAGAIN; 693 + } 694 dlmsg = (FWDownload_t*) mf; 695 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; 696 sgOut = (char *) (ptsge + 1); ··· 702 dlmsg->ChainOffset = 0; 703 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; 704 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; 705 + if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) 706 + dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; 707 + else 708 + dlmsg->MsgFlags = 0; 709 + 710 711 /* Set up the Transaction SGE. 712 */ ··· 754 goto fwdl_out; 755 } 756 757 + dctlprintk(("DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); 758 759 /* 760 * Parse SG list, copying sgl itself, ··· 803 /* 804 * Finally, perform firmware download. 805 */ 806 + ReplyMsg = NULL; 807 mpt_put_msg_frame(mptctl_id, iocp, mf); 808 809 /* Now wait for the command to complete */ 810 + ret = wait_event_timeout(mptctl_wait, 811 iocp->ioctl->wait_done == 1, 812 HZ*60); 813 ··· 1145 /* Fill in the data and return the structure to the calling 1146 * program 1147 */ 1148 + if (ioc->bus_type == SAS) 1149 + karg->adapterType = MPT_IOCTL_INTERFACE_SAS; 1150 + else if (ioc->bus_type == FC) 1151 karg->adapterType = MPT_IOCTL_INTERFACE_FC; 1152 else 1153 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; ··· 1170 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1171 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1172 } else if (cim_rev == 2) { 1173 + /* Get the PCI bus, device, function and segment ID numbers 1174 for the IOC */ 1175 karg->pciInfo.u.bits.busNumber = pdev->bus->number; 1176 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1177 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1178 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); 1179 } ··· 1500 return -ENODEV; 1501 } 1502 1503 + karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; 1504 karg.eventTypes = ioc->eventTypes; 1505 1506 /* Copy the data from kernel memory to user memory ··· 1550 memset(ioc->events, 0, sz); 1551 ioc->alloc_total += sz; 1552 1553 ioc->eventContext = 0; 1554 } 1555 ··· 1590 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); 1591 1592 1593 + max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; 1594 1595 /* If fewer than 1 event is requested, there must have 1596 * been some type of error. 1597 */ 1598 if ((max < 1) || !ioc->events) 1599 return -ENODATA; 1600 + 1601 + /* reset this flag so SIGIO can restart */ 1602 + ioc->aen_event_read_flag=0; 1603 1604 /* Copy the data from kernel memory to user memory 1605 */ ··· 1817 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 1818 case MPI_FUNCTION_FW_DOWNLOAD: 1819 case MPI_FUNCTION_FC_PRIMITIVE_SEND: 1820 + case MPI_FUNCTION_TOOLBOX: 1821 + case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: 1822 break; 1823 1824 case MPI_FUNCTION_SCSI_IO_REQUEST: ··· 1837 goto done_free_mem; 1838 } 1839 1840 + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1841 + pScsiReq->MsgFlags |= mpt_msg_flags(); 1842 + 1843 1844 /* verify that app has not requested 1845 * more sense data than driver ··· 1888 } 1889 break; 1890 1891 + case MPI_FUNCTION_SMP_PASSTHROUGH: 1892 + /* Check mf->PassthruFlags to determine if 1893 + * transfer is ImmediateMode or not. 1894 + * Immediate mode returns data in the ReplyFrame. 1895 + * Else, we are sending request and response data 1896 + * in two SGLs at the end of the mf. 1897 + */ 1898 + break; 1899 + 1900 + case MPI_FUNCTION_SATA_PASSTHROUGH: 1901 + if (!ioc->sh) { 1902 + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " 1903 + "SCSI driver is not loaded. \n", 1904 + __FILE__, __LINE__); 1905 + rc = -EFAULT; 1906 + goto done_free_mem; 1907 + } 1908 + break; 1909 + 1910 case MPI_FUNCTION_RAID_ACTION: 1911 /* Just add a SGE 1912 */ ··· 1900 int scsidir = MPI_SCSIIO_CONTROL_READ; 1901 int dataSize; 1902 1903 + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1904 + pScsiReq->MsgFlags |= mpt_msg_flags(); 1905 + 1906 1907 /* verify that app has not requested 1908 * more sense data than driver ··· 2130 2131 /* Now wait for the command to complete */ 2132 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2133 + timeout = wait_event_timeout(mptctl_wait, 2134 ioc->ioctl->wait_done == 1, 2135 HZ*timeout); 2136 ··· 2246 hp_host_info_t __user *uarg = (void __user *) arg; 2247 MPT_ADAPTER *ioc; 2248 struct pci_dev *pdev; 2249 + char *pbuf=NULL; 2250 dma_addr_t buf_dma; 2251 hp_host_info_t karg; 2252 CONFIGPARMS cfg; 2253 ConfigPageHeader_t hdr; 2254 int iocnum; 2255 int rc, cim_rev; 2256 + ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; 2257 + MPT_FRAME_HDR *mf = NULL; 2258 + MPIHeader_t *mpi_hdr; 2259 2260 dctlprintk((": mptctl_hp_hostinfo called.\n")); 2261 /* Reset long to int. Should affect IA64 and SPARC only ··· 2370 2371 karg.base_io_addr = pci_resource_start(pdev, 0); 2372 2373 + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) 2374 karg.bus_phys_width = HP_BUS_WIDTH_UNK; 2375 else 2376 karg.bus_phys_width = HP_BUS_WIDTH_16; ··· 2388 } 2389 } 2390 2391 + /* 2392 + * Gather ISTWI(Industry Standard Two Wire Interface) Data 2393 + */ 2394 + if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2395 + dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2396 + ioc->name,__FUNCTION__)); 2397 + goto out; 2398 } 2399 + 2400 + IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; 2401 + mpi_hdr = (MPIHeader_t *) mf; 2402 + memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); 2403 + IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; 2404 + IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; 2405 + IstwiRWRequest->MsgContext = mpi_hdr->MsgContext; 2406 + IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; 2407 + IstwiRWRequest->NumAddressBytes = 0x01; 2408 + IstwiRWRequest->DataLength = cpu_to_le16(0x04); 2409 + if (pdev->devfn & 1) 2410 + IstwiRWRequest->DeviceAddr = 0xB2; 2411 + else 2412 + IstwiRWRequest->DeviceAddr = 0xB0; 2413 + 2414 + pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2415 + if (!pbuf) 2416 + goto out; 2417 + mpt_add_sge((char *)&IstwiRWRequest->SGL, 2418 + (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); 2419 + 2420 + ioc->ioctl->wait_done = 0; 2421 + mpt_put_msg_frame(mptctl_id, ioc, mf); 2422 + 2423 + rc = wait_event_timeout(mptctl_wait, 2424 + ioc->ioctl->wait_done == 1, 2425 + HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); 2426 + 2427 + if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { 2428 + /* 2429 + * Now we need to reset the board 2430 + */ 2431 + mpt_free_msg_frame(ioc, mf); 2432 + mptctl_timeout_expired(ioc->ioctl); 2433 + goto out; 2434 + } 2435 + 2436 + /* 2437 + *ISTWI Data Definition 2438 + * pbuf[0] = FW_VERSION = 0x4 2439 + * pbuf[1] = Bay Count = 6 or 4 or 2, depending on 2440 + * the config, you should be seeing one out of these three values 2441 + * pbuf[2] = Drive Installed Map = bit pattern depend on which 2442 + * bays have drives in them 2443 + * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) 2444 + */ 2445 + if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) 2446 + karg.rsvd = *(u32 *)pbuf; 2447 + 2448 + out: 2449 + if (pbuf) 2450 + pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2451 2452 /* Copy the data from kernel memory to user memory 2453 */ ··· 2459 2460 /* There is nothing to do for FCP parts. 2461 */ 2462 + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) 2463 return 0; 2464 2465 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) ··· 2569 static struct file_operations mptctl_fops = { 2570 .owner = THIS_MODULE, 2571 .llseek = no_llseek, 2572 + .release = mptctl_release, 2573 + .fasync = mptctl_fasync, 2574 .unlocked_ioctl = mptctl_ioctl, 2575 #ifdef CONFIG_COMPAT 2576 .compat_ioctl = compat_mpctl_ioctl, ··· 2811 dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); 2812 } else { 2813 /* FIXME! */ 2814 + } 2815 + 2816 + if (mpt_event_register(mptctl_id, mptctl_event_process) == 0) { 2817 + devtprintk((KERN_INFO MYNAM 2818 + ": Registered for IOC event notifications\n")); 2819 } 2820 2821 return 0;
+3 -1
drivers/message/fusion/mptctl.h
··· 169 * Read only. 170 * Data starts at offset 0xC 171 */ 172 - #define MPT_IOCTL_INTERFACE_FC (0x01) 173 #define MPT_IOCTL_INTERFACE_SCSI (0x00) 174 #define MPT_IOCTL_VERSION_LENGTH (32) 175 176 struct mpt_ioctl_iocinfo {
··· 169 * Read only. 170 * Data starts at offset 0xC 171 */ 172 #define MPT_IOCTL_INTERFACE_SCSI (0x00) 173 + #define MPT_IOCTL_INTERFACE_FC (0x01) 174 + #define MPT_IOCTL_INTERFACE_FC_IP (0x02) 175 + #define MPT_IOCTL_INTERFACE_SAS (0x03) 176 #define MPT_IOCTL_VERSION_LENGTH (32) 177 178 struct mpt_ioctl_iocinfo {
+1 -1
drivers/message/fusion/mptscsih.c
··· 2489 int idx; 2490 MPT_ADAPTER *ioc = hd->ioc; 2491 2492 - idx = ioc->eventContext % ioc->eventLogSize; 2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; 2494 ioc->events[idx].eventContext = ioc->eventContext; 2495
··· 2489 int idx; 2490 MPT_ADAPTER *ioc = hd->ioc; 2491 2492 + idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; 2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; 2494 ioc->events[idx].eventContext = ioc->eventContext; 2495
+30 -46
drivers/s390/scsi/zfcp_dbf.c
··· 710 _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 711 struct zfcp_adapter *adapter, 712 struct scsi_cmnd *scsi_cmnd, 713 - struct zfcp_fsf_req *new_fsf_req) 714 { 715 - struct zfcp_fsf_req *fsf_req = 716 - (struct zfcp_fsf_req *)scsi_cmnd->host_scribble; 717 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 718 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 719 unsigned long flags; ··· 726 if (offset == 0) { 727 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 728 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); 729 - if (scsi_cmnd->device) { 730 - rec->scsi_id = scsi_cmnd->device->id; 731 - rec->scsi_lun = scsi_cmnd->device->lun; 732 } 733 - rec->scsi_result = scsi_cmnd->result; 734 - rec->scsi_cmnd = (unsigned long)scsi_cmnd; 735 - rec->scsi_serial = scsi_cmnd->serial_number; 736 - memcpy(rec->scsi_opcode, 737 - &scsi_cmnd->cmnd, 738 - min((int)scsi_cmnd->cmd_len, 739 - ZFCP_DBF_SCSI_OPCODE)); 740 - rec->scsi_retries = scsi_cmnd->retries; 741 - rec->scsi_allowed = scsi_cmnd->allowed; 742 if (fsf_req != NULL) { 743 fcp_rsp = (struct fcp_rsp_iu *) 744 &(fsf_req->qtcb->bottom.io.fcp_rsp); ··· 772 rec->fsf_seqno = fsf_req->seq_no; 773 rec->fsf_issued = fsf_req->issued; 774 } 775 - if (new_fsf_req != NULL) { 776 - rec->type.new_fsf_req.fsf_reqid = 777 - (unsigned long) 778 - new_fsf_req; 779 - rec->type.new_fsf_req.fsf_seqno = 780 - new_fsf_req->seq_no; 781 - rec->type.new_fsf_req.fsf_issued = 782 - new_fsf_req->issued; 783 - } 784 } else { 785 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 786 dump->total_size = buflen; ··· 794 inline void 795 zfcp_scsi_dbf_event_result(const char *tag, int level, 796 struct zfcp_adapter *adapter, 797 - struct scsi_cmnd *scsi_cmnd) 798 { 799 - _zfcp_scsi_dbf_event_common("rslt", 800 - tag, level, adapter, scsi_cmnd, NULL); 801 } 802 803 inline void 804 zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 805 struct scsi_cmnd *scsi_cmnd, 806 - struct zfcp_fsf_req *new_fsf_req) 807 { 808 - _zfcp_scsi_dbf_event_common("abrt", 809 - tag, 1, adapter, scsi_cmnd, new_fsf_req); 810 } 811 812 inline void ··· 818 struct zfcp_adapter *adapter = unit->port->adapter; 819 820 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 821 - tag, 1, adapter, scsi_cmnd, NULL); 822 } 823 824 static int ··· 851 rec->scsi_retries); 852 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", 853 rec->scsi_allowed); 854 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", 855 rec->fsf_reqid); 856 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", ··· 882 min((int)rec->type.fcp.sns_info_len, 883 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, 884 rec->type.fcp.sns_info_len); 885 - } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { 886 - len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx", 887 - rec->type.new_fsf_req.fsf_reqid); 888 - len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x", 889 - rec->type.new_fsf_req.fsf_seqno); 890 - len += zfcp_dbf_stck(out_buf + len, "fsf_issued", 891 - rec->type.new_fsf_req.fsf_issued); 892 - } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) || 893 - (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) { 894 - len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx", 895 - rec->type.new_fsf_req.fsf_reqid); 896 - len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x", 897 - rec->type.new_fsf_req.fsf_seqno); 898 - len += zfcp_dbf_stck(out_buf + len, "fsf_issued", 899 - rec->type.new_fsf_req.fsf_issued); 900 } 901 902 len += sprintf(out_buf + len, "\n");
··· 710 _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 711 struct zfcp_adapter *adapter, 712 struct scsi_cmnd *scsi_cmnd, 713 + struct zfcp_fsf_req *fsf_req, 714 + struct zfcp_fsf_req *old_fsf_req) 715 { 716 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 717 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 718 unsigned long flags; ··· 727 if (offset == 0) { 728 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 729 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); 730 + if (scsi_cmnd != NULL) { 731 + if (scsi_cmnd->device) { 732 + rec->scsi_id = scsi_cmnd->device->id; 733 + rec->scsi_lun = scsi_cmnd->device->lun; 734 + } 735 + rec->scsi_result = scsi_cmnd->result; 736 + rec->scsi_cmnd = (unsigned long)scsi_cmnd; 737 + rec->scsi_serial = scsi_cmnd->serial_number; 738 + memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd, 739 + min((int)scsi_cmnd->cmd_len, 740 + ZFCP_DBF_SCSI_OPCODE)); 741 + rec->scsi_retries = scsi_cmnd->retries; 742 + rec->scsi_allowed = scsi_cmnd->allowed; 743 } 744 if (fsf_req != NULL) { 745 fcp_rsp = (struct fcp_rsp_iu *) 746 &(fsf_req->qtcb->bottom.io.fcp_rsp); ··· 772 rec->fsf_seqno = fsf_req->seq_no; 773 rec->fsf_issued = fsf_req->issued; 774 } 775 + rec->type.old_fsf_reqid = 776 + (unsigned long) old_fsf_req; 777 } else { 778 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 779 dump->total_size = buflen; ··· 801 inline void 802 zfcp_scsi_dbf_event_result(const char *tag, int level, 803 struct zfcp_adapter *adapter, 804 + struct scsi_cmnd *scsi_cmnd, 805 + struct zfcp_fsf_req *fsf_req) 806 { 807 + _zfcp_scsi_dbf_event_common("rslt", tag, level, 808 + adapter, scsi_cmnd, fsf_req, NULL); 809 } 810 811 inline void 812 zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 813 struct scsi_cmnd *scsi_cmnd, 814 + struct zfcp_fsf_req *new_fsf_req, 815 + struct zfcp_fsf_req *old_fsf_req) 816 { 817 + _zfcp_scsi_dbf_event_common("abrt", tag, 1, 818 + adapter, scsi_cmnd, new_fsf_req, old_fsf_req); 819 } 820 821 inline void ··· 823 struct zfcp_adapter *adapter = unit->port->adapter; 824 825 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 826 + tag, 1, adapter, scsi_cmnd, NULL, NULL); 827 } 828 829 static int ··· 856 rec->scsi_retries); 857 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", 858 rec->scsi_allowed); 859 + if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { 860 + len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx", 861 + rec->type.old_fsf_reqid); 862 + } 863 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", 864 rec->fsf_reqid); 865 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", ··· 883 min((int)rec->type.fcp.sns_info_len, 884 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, 885 rec->type.fcp.sns_info_len); 886 } 887 888 len += sprintf(out_buf + len, "\n");
+1 -12
drivers/s390/scsi/zfcp_def.h
··· 152 #define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 153 #define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 154 155 - /* Retry 5 times every 2 second, then every minute */ 156 - #define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5 157 - #define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200 158 - #define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000 159 - 160 /* timeout value for "default timer" for fsf requests */ 161 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 162 ··· 424 u32 fsf_seqno; 425 u64 fsf_issued; 426 union { 427 - struct { 428 - u64 fsf_reqid; 429 - u32 fsf_seqno; 430 - u64 fsf_issued; 431 - } new_fsf_req; 432 struct { 433 u8 rsp_validity; 434 u8 rsp_scsi_status; ··· 906 wwn_t peer_wwnn; /* P2P peer WWNN */ 907 wwn_t peer_wwpn; /* P2P peer WWPN */ 908 u32 peer_d_id; /* P2P peer D_ID */ 909 - wwn_t physical_wwpn; /* WWPN of physical port */ 910 - u32 physical_s_id; /* local FC port ID */ 911 struct ccw_device *ccw_device; /* S/390 ccw device */ 912 u8 fc_service_class; 913 u32 hydra_version; /* Hydra version */
··· 152 #define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 153 #define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 154 155 /* timeout value for "default timer" for fsf requests */ 156 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 157 ··· 429 u32 fsf_seqno; 430 u64 fsf_issued; 431 union { 432 + u64 old_fsf_reqid; 433 struct { 434 u8 rsp_validity; 435 u8 rsp_scsi_status; ··· 915 wwn_t peer_wwnn; /* P2P peer WWNN */ 916 wwn_t peer_wwpn; /* P2P peer WWPN */ 917 u32 peer_d_id; /* P2P peer D_ID */ 918 struct ccw_device *ccw_device; /* S/390 ccw device */ 919 u8 fc_service_class; 920 u32 hydra_version; /* Hydra version */
+30 -52
drivers/s390/scsi/zfcp_erp.c
··· 2246 { 2247 int retval; 2248 2249 - if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2250 - &erp_action->adapter->status)) && 2251 - (erp_action->adapter->adapter_features & 2252 - FSF_FEATURE_HBAAPI_MANAGEMENT)) { 2253 - zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 2254 - atomic_set(&erp_action->adapter->erp_counter, 0); 2255 - return ZFCP_ERP_FAILED; 2256 - } 2257 - 2258 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2259 if (retval == ZFCP_ERP_FAILED) 2260 return ZFCP_ERP_FAILED; ··· 2257 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); 2258 } 2259 2260 - /* 2261 - * function: 2262 - * 2263 - * purpose: 2264 - * 2265 - * returns: 2266 - */ 2267 static int 2268 zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) 2269 { ··· 2334 zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2335 { 2336 int ret; 2337 - int retries; 2338 - int sleep; 2339 - struct zfcp_adapter *adapter = erp_action->adapter; 2340 2341 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2342 2343 - retries = 0; 2344 - do { 2345 - write_lock(&adapter->erp_lock); 2346 - zfcp_erp_action_to_running(erp_action); 2347 - write_unlock(&adapter->erp_lock); 2348 - zfcp_erp_timeout_init(erp_action); 2349 - ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2350 - if (ret == -EOPNOTSUPP) { 2351 - debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2352 - return ZFCP_ERP_SUCCEEDED; 2353 - } else if (ret) { 2354 - debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); 2355 - return ZFCP_ERP_FAILED; 2356 - } 2357 - debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); 2358 2359 - down(&adapter->erp_ready_sem); 2360 - if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2361 - ZFCP_LOG_INFO("error: exchange of port data " 2362 - "for adapter %s timed out\n", 2363 - zfcp_get_busid_by_adapter(adapter)); 2364 - break; 2365 - } 2366 - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2367 - &adapter->status)) 2368 - break; 2369 2370 - if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { 2371 - sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; 2372 - retries++; 2373 - } else 2374 - sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; 2375 - schedule_timeout(sleep); 2376 - } while (1); 2377 2378 - return ZFCP_ERP_SUCCEEDED; 2379 } 2380 2381 /* ··· 3415 "(adapter %s, wwpn=0x%016Lx)\n", 3416 zfcp_get_busid_by_port(port), 3417 port->wwpn); 3418 } 3419 zfcp_port_put(port); 3420 break;
··· 2246 { 2247 int retval; 2248 2249 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2250 if (retval == ZFCP_ERP_FAILED) 2251 return ZFCP_ERP_FAILED; ··· 2266 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); 2267 } 2268 2269 static int 2270 zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) 2271 { ··· 2350 zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2351 { 2352 int ret; 2353 + struct zfcp_adapter *adapter; 2354 2355 + adapter = erp_action->adapter; 2356 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2357 2358 + write_lock(&adapter->erp_lock); 2359 + zfcp_erp_action_to_running(erp_action); 2360 + write_unlock(&adapter->erp_lock); 2361 2362 + zfcp_erp_timeout_init(erp_action); 2363 + ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2364 + if (ret == -EOPNOTSUPP) { 2365 + debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2366 + return ZFCP_ERP_SUCCEEDED; 2367 + } else if (ret) { 2368 + debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); 2369 + return ZFCP_ERP_FAILED; 2370 + } 2371 + debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); 2372 2373 + ret = ZFCP_ERP_SUCCEEDED; 2374 + down(&adapter->erp_ready_sem); 2375 + if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2376 + ZFCP_LOG_INFO("error: exchange port data timed out (adapter " 2377 + "%s)\n", zfcp_get_busid_by_adapter(adapter)); 2378 + ret = ZFCP_ERP_FAILED; 2379 + } 2380 + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { 2381 + ZFCP_LOG_INFO("error: exchange port data failed (adapter " 2382 + "%s\n", zfcp_get_busid_by_adapter(adapter)); 2383 + ret = ZFCP_ERP_FAILED; 2384 + } 2385 2386 + return ret; 2387 } 2388 2389 /* ··· 3439 "(adapter %s, wwpn=0x%016Lx)\n", 3440 zfcp_get_busid_by_port(port), 3441 port->wwpn); 3442 + else 3443 + scsi_flush_work(adapter->scsi_host); 3444 } 3445 zfcp_port_put(port); 3446 break;
+3 -2
drivers/s390/scsi/zfcp_ext.h
··· 194 extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 195 196 extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 197 - struct scsi_cmnd *); 198 extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 199 - struct scsi_cmnd *, 200 struct zfcp_fsf_req *); 201 extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 202 struct scsi_cmnd *);
··· 194 extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 195 196 extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 197 + struct scsi_cmnd *, 198 + struct zfcp_fsf_req *); 199 extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 200 + struct scsi_cmnd *, struct zfcp_fsf_req *, 201 struct zfcp_fsf_req *); 202 extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 203 struct scsi_cmnd *);
+44 -36
drivers/s390/scsi/zfcp_fsf.c
··· 388 case FSF_PROT_LINK_DOWN: 389 zfcp_fsf_link_down_info_eval(adapter, 390 &prot_status_qual->link_down_info); 391 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 392 break; 393 ··· 559 560 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 561 562 - if (link_down == NULL) { 563 - zfcp_erp_adapter_reopen(adapter, 0); 564 - return; 565 - } 566 567 switch (link_down->error_code) { 568 case FSF_PSQ_LINK_NO_LIGHT: ··· 642 link_down->explanation_code, 643 link_down->vendor_specific_code); 644 645 - switch (link_down->error_code) { 646 - case FSF_PSQ_LINK_NO_LIGHT: 647 - case FSF_PSQ_LINK_WRAP_PLUG: 648 - case FSF_PSQ_LINK_NO_FCP: 649 - case FSF_PSQ_LINK_FIRMWARE_UPDATE: 650 - zfcp_erp_adapter_reopen(adapter, 0); 651 - break; 652 - default: 653 - zfcp_erp_adapter_failed(adapter); 654 - } 655 } 656 657 /* ··· 2295 return retval; 2296 } 2297 2298 2299 /** 2300 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request ··· 2332 static void 2333 zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2334 { 2335 - struct zfcp_adapter *adapter = fsf_req->adapter; 2336 - struct Scsi_Host *shost = adapter->scsi_host; 2337 - struct fsf_qtcb *qtcb = fsf_req->qtcb; 2338 - struct fsf_qtcb_bottom_port *bottom, *data; 2339 2340 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2341 return; 2342 2343 switch (qtcb->header.fsf_status) { 2344 case FSF_GOOD: 2345 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2346 - 2347 - bottom = &qtcb->bottom.port; 2348 - data = (struct fsf_qtcb_bottom_port*) fsf_req->data; 2349 - if (data) 2350 - memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); 2351 - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 2352 - fc_host_permanent_port_name(shost) = bottom->wwpn; 2353 - else 2354 - fc_host_permanent_port_name(shost) = 2355 - fc_host_port_name(shost); 2356 - fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 2357 - fc_host_supported_speeds(shost) = bottom->supported_speed; 2358 break; 2359 - 2360 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 2361 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2362 - 2363 zfcp_fsf_link_down_info_eval(adapter, 2364 &qtcb->header.fsf_status_qual.link_down_info); 2365 break; 2366 - 2367 default: 2368 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); 2369 debug_event(adapter->erp_dbf, 0, ··· 4211 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4212 4213 if (scpnt->result != 0) 4214 - zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt); 4215 else if (scpnt->retries > 0) 4216 - zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt); 4217 else 4218 - zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt); 4219 4220 /* cleanup pointer (need this especially for abort) */ 4221 scpnt->host_scribble = NULL;
··· 388 case FSF_PROT_LINK_DOWN: 389 zfcp_fsf_link_down_info_eval(adapter, 390 &prot_status_qual->link_down_info); 391 + zfcp_erp_adapter_reopen(adapter, 0); 392 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 393 break; 394 ··· 558 559 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 560 561 + if (link_down == NULL) 562 + goto out; 563 564 switch (link_down->error_code) { 565 case FSF_PSQ_LINK_NO_LIGHT: ··· 643 link_down->explanation_code, 644 link_down->vendor_specific_code); 645 646 + out: 647 + zfcp_erp_adapter_failed(adapter); 648 } 649 650 /* ··· 2304 return retval; 2305 } 2306 2307 + /** 2308 + * zfcp_fsf_exchange_port_evaluate 2309 + * @fsf_req: fsf_req which belongs to xchg port data request 2310 + * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1) 2311 + */ 2312 + static void 2313 + zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) 2314 + { 2315 + struct zfcp_adapter *adapter; 2316 + struct fsf_qtcb *qtcb; 2317 + struct fsf_qtcb_bottom_port *bottom, *data; 2318 + struct Scsi_Host *shost; 2319 + 2320 + adapter = fsf_req->adapter; 2321 + qtcb = fsf_req->qtcb; 2322 + bottom = &qtcb->bottom.port; 2323 + shost = adapter->scsi_host; 2324 + 2325 + data = (struct fsf_qtcb_bottom_port*) fsf_req->data; 2326 + if (data) 2327 + memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); 2328 + 2329 + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 2330 + fc_host_permanent_port_name(shost) = bottom->wwpn; 2331 + else 2332 + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 2333 + fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 2334 + fc_host_supported_speeds(shost) = bottom->supported_speed; 2335 + } 2336 2337 /** 2338 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request ··· 2312 static void 2313 zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2314 { 2315 + struct zfcp_adapter *adapter; 2316 + struct fsf_qtcb *qtcb; 2317 + 2318 + adapter = fsf_req->adapter; 2319 + qtcb = fsf_req->qtcb; 2320 2321 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2322 return; 2323 2324 switch (qtcb->header.fsf_status) { 2325 case FSF_GOOD: 2326 + zfcp_fsf_exchange_port_evaluate(fsf_req, 1); 2327 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2328 break; 2329 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 2330 + zfcp_fsf_exchange_port_evaluate(fsf_req, 0); 2331 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2332 zfcp_fsf_link_down_info_eval(adapter, 2333 &qtcb->header.fsf_status_qual.link_down_info); 2334 break; 2335 default: 2336 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); 2337 debug_event(adapter->erp_dbf, 0, ··· 4203 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4204 4205 if (scpnt->result != 0) 4206 + zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req); 4207 else if (scpnt->retries > 0) 4208 + zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req); 4209 else 4210 + zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req); 4211 4212 /* cleanup pointer (need this especially for abort) */ 4213 scpnt->host_scribble = NULL;
+10 -5
drivers/s390/scsi/zfcp_scsi.c
··· 242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 243 zfcp_scsi_dbf_event_result("fail", 4, 244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 245 - scpnt); 246 /* return directly */ 247 scpnt->scsi_done(scpnt); 248 } ··· 446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; 447 if (!old_fsf_req) { 448 write_unlock_irqrestore(&adapter->abort_lock, flags); 449 - zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req); 450 retval = SUCCESS; 451 goto out; 452 } ··· 460 adapter, unit, 0); 461 if (!new_fsf_req) { 462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 463 retval = FAILED; 464 goto out; 465 } ··· 472 473 /* status should be valid since signals were not permitted */ 474 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 475 - zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req); 476 retval = SUCCESS; 477 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 478 - zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req); 479 retval = SUCCESS; 480 } else { 481 - zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req); 482 retval = FAILED; 483 } 484 zfcp_fsf_req_free(new_fsf_req);
··· 242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 243 zfcp_scsi_dbf_event_result("fail", 4, 244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 245 + scpnt, NULL); 246 /* return directly */ 247 scpnt->scsi_done(scpnt); 248 } ··· 446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; 447 if (!old_fsf_req) { 448 write_unlock_irqrestore(&adapter->abort_lock, flags); 449 + zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL); 450 retval = SUCCESS; 451 goto out; 452 } ··· 460 adapter, unit, 0); 461 if (!new_fsf_req) { 462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 463 + zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 464 + old_fsf_req); 465 retval = FAILED; 466 goto out; 467 } ··· 470 471 /* status should be valid since signals were not permitted */ 472 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 473 + zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req, 474 + NULL); 475 retval = SUCCESS; 476 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 477 + zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req, 478 + NULL); 479 retval = SUCCESS; 480 } else { 481 + zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req, 482 + NULL); 483 retval = FAILED; 484 } 485 zfcp_fsf_req_free(new_fsf_req);
-4
drivers/s390/scsi/zfcp_sysfs_adapter.c
··· 55 ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 56 ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 57 ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 58 - ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn); 59 - ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id); 60 ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 61 ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 62 ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", ··· 239 &dev_attr_peer_wwnn.attr, 240 &dev_attr_peer_wwpn.attr, 241 &dev_attr_peer_d_id.attr, 242 - &dev_attr_physical_wwpn.attr, 243 - &dev_attr_physical_s_id.attr, 244 &dev_attr_card_version.attr, 245 &dev_attr_lic_version.attr, 246 &dev_attr_status.attr,
··· 55 ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 56 ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 57 ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 58 ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 59 ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 60 ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", ··· 241 &dev_attr_peer_wwnn.attr, 242 &dev_attr_peer_wwpn.attr, 243 &dev_attr_peer_d_id.attr, 244 &dev_attr_card_version.attr, 245 &dev_attr_lic_version.attr, 246 &dev_attr_status.attr,
+4 -3
drivers/scsi/3w-9xxx.c
··· 61 Add support for embedded firmware error strings. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 63 2.26.02.004 - Add support for 9550SX controllers. 64 */ 65 66 #include <linux/module.h> ··· 85 #include "3w-9xxx.h" 86 87 /* Globals */ 88 - #define TW_DRIVER_VERSION "2.26.02.004" 89 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 90 static unsigned int twa_device_extension_count; 91 static int twa_major = -1; ··· 1409 dma_addr_t mapping; 1410 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1411 struct pci_dev *pdev = tw_dev->tw_pci_dev; 1412 - int retval = 0; 1413 1414 if (cmd->request_bufflen == 0) { 1415 retval = 0; ··· 1799 int i, sg_count; 1800 struct scsi_cmnd *srb = NULL; 1801 struct scatterlist *sglist = NULL; 1802 - u32 buffaddr = 0x0; 1803 int retval = 1; 1804 1805 if (tw_dev->srb[request_id]) {
··· 61 Add support for embedded firmware error strings. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 63 2.26.02.004 - Add support for 9550SX controllers. 64 + 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 65 */ 66 67 #include <linux/module.h> ··· 84 #include "3w-9xxx.h" 85 86 /* Globals */ 87 + #define TW_DRIVER_VERSION "2.26.02.005" 88 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 89 static unsigned int twa_device_extension_count; 90 static int twa_major = -1; ··· 1408 dma_addr_t mapping; 1409 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1410 struct pci_dev *pdev = tw_dev->tw_pci_dev; 1411 + dma_addr_t retval = 0; 1412 1413 if (cmd->request_bufflen == 0) { 1414 retval = 0; ··· 1798 int i, sg_count; 1799 struct scsi_cmnd *srb = NULL; 1800 struct scatterlist *sglist = NULL; 1801 + dma_addr_t buffaddr = 0x0; 1802 int retval = 1; 1803 1804 if (tw_dev->srb[request_id]) {
+85 -132
drivers/scsi/aacraid/aachba.c
··· 173 int status = 0; 174 struct fib * fibptr; 175 176 - if (!(fibptr = fib_alloc(dev))) 177 return -ENOMEM; 178 179 - fib_init(fibptr); 180 { 181 struct aac_get_config_status *dinfo; 182 dinfo = (struct aac_get_config_status *) fib_data(fibptr); ··· 186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 187 } 188 189 - status = fib_send(ContainerCommand, 190 fibptr, 191 sizeof (struct aac_get_config_status), 192 FsaNormal, ··· 209 status = -EINVAL; 210 } 211 } 212 - fib_complete(fibptr); 213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 214 if (status >= 0) { 215 if (commit == 1) { 216 struct aac_commit_config * dinfo; 217 - fib_init(fibptr); 218 dinfo = (struct aac_commit_config *) fib_data(fibptr); 219 220 dinfo->command = cpu_to_le32(VM_ContainerConfig); 221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 222 223 - status = fib_send(ContainerCommand, 224 fibptr, 225 sizeof (struct aac_commit_config), 226 FsaNormal, 227 1, 1, 228 NULL, NULL); 229 - fib_complete(fibptr); 230 } else if (commit == 0) { 231 printk(KERN_WARNING 232 "aac_get_config_status: Foreign device configurations are being ignored\n"); 233 } 234 } 235 - fib_free(fibptr); 236 return status; 237 } 238 ··· 255 256 instance = dev->scsi_host_ptr->unique_id; 257 258 - if (!(fibptr = fib_alloc(dev))) 259 return -ENOMEM; 260 261 - fib_init(fibptr); 262 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 263 dinfo->command = cpu_to_le32(VM_ContainerConfig); 264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 265 266 - status = fib_send(ContainerCommand, 267 fibptr, 268 sizeof (struct aac_get_container_count), 269 FsaNormal, ··· 272 if (status >= 0) { 273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 275 - fib_complete(fibptr); 276 } 277 278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) ··· 280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 282 if (!fsa_dev_ptr) { 283 - fib_free(fibptr); 284 return -ENOMEM; 285 } 286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); ··· 294 295 fsa_dev_ptr[index].devname[0] = '\0'; 296 297 - fib_init(fibptr); 298 dinfo = (struct aac_query_mount *) fib_data(fibptr); 299 300 dinfo->command = cpu_to_le32(VM_NameServe); 301 dinfo->count = cpu_to_le32(index); 302 dinfo->type = cpu_to_le32(FT_FILESYS); 303 304 - status = fib_send(ContainerCommand, 305 fibptr, 306 sizeof (struct aac_query_mount), 307 FsaNormal, ··· 319 dinfo->count = cpu_to_le32(index); 320 dinfo->type = cpu_to_le32(FT_FILESYS); 321 322 - if (fib_send(ContainerCommand, 323 fibptr, 324 sizeof(struct aac_query_mount), 325 FsaNormal, ··· 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 348 fsa_dev_ptr[index].ro = 1; 349 } 350 - fib_complete(fibptr); 351 /* 352 * If there are no more containers, then stop asking. 353 */ ··· 355 break; 356 } 357 } 358 - fib_free(fibptr); 359 return status; 360 } 361 ··· 413 414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 415 416 - fib_complete(fibptr); 417 - fib_free(fibptr); 418 scsicmd->scsi_done(scsicmd); 419 } 420 ··· 430 431 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 432 433 - if (!(cmd_fibcontext = fib_alloc(dev))) 434 return -ENOMEM; 435 436 - fib_init(cmd_fibcontext); 437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 438 439 dinfo->command = cpu_to_le32(VM_ContainerConfig); ··· 441 dinfo->cid = cpu_to_le32(cid); 442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 443 444 - status = fib_send(ContainerCommand, 445 cmd_fibcontext, 446 sizeof (struct aac_get_name), 447 FsaNormal, ··· 455 if (status == -EINPROGRESS) 456 return 0; 457 458 - printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); 459 - fib_complete(cmd_fibcontext); 460 - fib_free(cmd_fibcontext); 461 return -1; 462 } 463 464 /** 465 - * probe_container - query a logical volume 466 * @dev: device to query 467 * @cid: container identifier 468 * ··· 470 * is updated in the struct fsa_dev_info structure rather than returned. 471 */ 472 473 - int probe_container(struct aac_dev *dev, int cid) 474 { 475 struct fsa_dev_info *fsa_dev_ptr; 476 int status; ··· 482 fsa_dev_ptr = dev->fsa_dev; 483 instance = dev->scsi_host_ptr->unique_id; 484 485 - if (!(fibptr = fib_alloc(dev))) 486 return -ENOMEM; 487 488 - fib_init(fibptr); 489 490 dinfo = (struct aac_query_mount *)fib_data(fibptr); 491 ··· 493 dinfo->count = cpu_to_le32(cid); 494 dinfo->type = cpu_to_le32(FT_FILESYS); 495 496 - status = fib_send(ContainerCommand, 497 fibptr, 498 sizeof(struct aac_query_mount), 499 FsaNormal, 500 1, 1, 501 NULL, NULL); 502 if (status < 0) { 503 - printk(KERN_WARNING "aacraid: probe_container query failed.\n"); 504 goto error; 505 } 506 ··· 512 dinfo->count = cpu_to_le32(cid); 513 dinfo->type = cpu_to_le32(FT_FILESYS); 514 515 - if (fib_send(ContainerCommand, 516 fibptr, 517 sizeof(struct aac_query_mount), 518 FsaNormal, ··· 535 } 536 537 error: 538 - fib_complete(fibptr); 539 - fib_free(fibptr); 540 541 return status; 542 } ··· 700 struct aac_bus_info *command; 701 struct aac_bus_info_response *bus_info; 702 703 - if (!(fibptr = fib_alloc(dev))) 704 return -ENOMEM; 705 706 - fib_init(fibptr); 707 info = (struct aac_adapter_info *) fib_data(fibptr); 708 memset(info,0,sizeof(*info)); 709 710 - rcode = fib_send(RequestAdapterInfo, 711 fibptr, 712 sizeof(*info), 713 FsaNormal, ··· 716 NULL); 717 718 if (rcode < 0) { 719 - fib_complete(fibptr); 720 - fib_free(fibptr); 721 return rcode; 722 } 723 memcpy(&dev->adapter_info, info, sizeof(*info)); ··· 725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 726 struct aac_supplement_adapter_info * info; 727 728 - fib_init(fibptr); 729 730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 731 732 memset(info,0,sizeof(*info)); 733 734 - rcode = fib_send(RequestSupplementAdapterInfo, 735 fibptr, 736 sizeof(*info), 737 FsaNormal, ··· 748 * GetBusInfo 749 */ 750 751 - fib_init(fibptr); 752 753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 754 ··· 761 command->MethodId = cpu_to_le32(1); 762 command->CtlCmd = cpu_to_le32(GetBusInfo); 763 764 - rcode = fib_send(ContainerCommand, 765 fibptr, 766 sizeof (*bus_info), 767 FsaNormal, ··· 891 } 892 } 893 894 - fib_complete(fibptr); 895 - fib_free(fibptr); 896 897 return rcode; 898 } ··· 976 ? sizeof(scsicmd->sense_buffer) 977 : sizeof(dev->fsa_dev[cid].sense_data)); 978 } 979 - fib_complete(fibptr); 980 - fib_free(fibptr); 981 982 scsicmd->scsi_done(scsicmd); 983 } ··· 1062 /* 1063 * Alocate and initialize a Fib 1064 */ 1065 - if (!(cmd_fibcontext = fib_alloc(dev))) { 1066 return -1; 1067 } 1068 1069 - fib_init(cmd_fibcontext); 1070 1071 if (dev->raw_io_interface) { 1072 struct aac_raw_io *readcmd; ··· 1086 /* 1087 * Now send the Fib to the adapter 1088 */ 1089 - status = fib_send(ContainerRawIo, 1090 cmd_fibcontext, 1091 fibsize, 1092 FsaNormal, ··· 1112 /* 1113 * Now send the Fib to the adapter 1114 */ 1115 - status = fib_send(ContainerCommand64, 1116 cmd_fibcontext, 1117 fibsize, 1118 FsaNormal, ··· 1136 /* 1137 * Now send the Fib to the adapter 1138 */ 1139 - status = fib_send(ContainerCommand, 1140 cmd_fibcontext, 1141 fibsize, 1142 FsaNormal, ··· 1153 if (status == -EINPROGRESS) 1154 return 0; 1155 1156 - printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); 1157 /* 1158 * For some reason, the Fib didn't queue, return QUEUE_FULL 1159 */ 1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1161 scsicmd->scsi_done(scsicmd); 1162 - fib_complete(cmd_fibcontext); 1163 - fib_free(cmd_fibcontext); 1164 return 0; 1165 } 1166 ··· 1228 /* 1229 * Allocate and initialize a Fib then setup a BlockWrite command 1230 */ 1231 - if (!(cmd_fibcontext = fib_alloc(dev))) { 1232 scsicmd->result = DID_ERROR << 16; 1233 scsicmd->scsi_done(scsicmd); 1234 return 0; 1235 } 1236 - fib_init(cmd_fibcontext); 1237 1238 if (dev->raw_io_interface) { 1239 struct aac_raw_io *writecmd; ··· 1253 /* 1254 * Now send the Fib to the adapter 1255 */ 1256 - status = fib_send(ContainerRawIo, 1257 cmd_fibcontext, 1258 fibsize, 1259 FsaNormal, ··· 1279 /* 1280 * Now send the Fib to the adapter 1281 */ 1282 - status = fib_send(ContainerCommand64, 1283 cmd_fibcontext, 1284 fibsize, 1285 FsaNormal, ··· 1305 /* 1306 * Now send the Fib to the adapter 1307 */ 1308 - status = fib_send(ContainerCommand, 1309 cmd_fibcontext, 1310 fibsize, 1311 FsaNormal, ··· 1322 return 0; 1323 } 1324 1325 - printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); 1326 /* 1327 * For some reason, the Fib didn't queue, return QUEUE_FULL 1328 */ 1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1330 scsicmd->scsi_done(scsicmd); 1331 1332 - fib_complete(cmd_fibcontext); 1333 - fib_free(cmd_fibcontext); 1334 return 0; 1335 } 1336 ··· 1369 sizeof(cmd->sense_buffer))); 1370 } 1371 1372 - fib_complete(fibptr); 1373 - fib_free(fibptr); 1374 cmd->scsi_done(cmd); 1375 } 1376 ··· 1407 * Allocate and initialize a Fib 1408 */ 1409 if (!(cmd_fibcontext = 1410 - fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1411 return SCSI_MLQUEUE_HOST_BUSY; 1412 1413 - fib_init(cmd_fibcontext); 1414 1415 synchronizecmd = fib_data(cmd_fibcontext); 1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); ··· 1422 /* 1423 * Now send the Fib to the adapter 1424 */ 1425 - status = fib_send(ContainerCommand, 1426 cmd_fibcontext, 1427 sizeof(struct aac_synchronize), 1428 FsaNormal, ··· 1437 return 0; 1438 1439 printk(KERN_WARNING 1440 - "aac_synchronize: fib_send failed with status: %d.\n", status); 1441 - fib_complete(cmd_fibcontext); 1442 - fib_free(cmd_fibcontext); 1443 return SCSI_MLQUEUE_HOST_BUSY; 1444 } 1445 ··· 1465 * itself. 1466 */ 1467 if (scmd_id(scsicmd) != host->this_id) { 1468 - if ((scsicmd->device->channel == 0) ){ 1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 1470 scsicmd->result = DID_NO_CONNECT << 16; 1471 scsicmd->scsi_done(scsicmd); ··· 1488 case READ_CAPACITY: 1489 case TEST_UNIT_READY: 1490 spin_unlock_irq(host->host_lock); 1491 - probe_container(dev, cid); 1492 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1493 fsa_dev_ptr[cid].valid = 0; 1494 spin_lock_irq(host->host_lock); ··· 1935 case SRB_STATUS_ERROR_RECOVERY: 1936 case SRB_STATUS_PENDING: 1937 case SRB_STATUS_SUCCESS: 1938 - if(scsicmd->cmnd[0] == INQUIRY ){ 1939 - u8 b; 1940 - u8 b1; 1941 - /* We can't expose disk devices because we can't tell whether they 1942 - * are the raw container drives or stand alone drives. If they have 1943 - * the removable bit set then we should expose them though. 1944 - */ 1945 - b = (*(u8*)scsicmd->buffer)&0x1f; 1946 - b1 = ((u8*)scsicmd->buffer)[1]; 1947 - if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 1948 - || (b==TYPE_DISK && (b1&0x80)) ){ 1949 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1950 - /* 1951 - * We will allow disk devices if in RAID/SCSI mode and 1952 - * the channel is 2 1953 - */ 1954 - } else if ((dev->raid_scsi_mode) && 1955 - (scmd_channel(scsicmd) == 2)) { 1956 - scsicmd->result = DID_OK << 16 | 1957 - COMMAND_COMPLETE << 8; 1958 - } else { 1959 - scsicmd->result = DID_NO_CONNECT << 16 | 1960 - COMMAND_COMPLETE << 8; 1961 - } 1962 - } else { 1963 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1964 - } 1965 break; 1966 case SRB_STATUS_DATA_OVERRUN: 1967 switch(scsicmd->cmnd[0]){ ··· 1955 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1956 break; 1957 case INQUIRY: { 1958 - u8 b; 1959 - u8 b1; 1960 - /* We can't expose disk devices because we can't tell whether they 1961 - * are the raw container drives or stand alone drives 1962 - */ 1963 - b = (*(u8*)scsicmd->buffer)&0x0f; 1964 - b1 = ((u8*)scsicmd->buffer)[1]; 1965 - if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 1966 - || (b==TYPE_DISK && (b1&0x80)) ){ 1967 - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1968 - /* 1969 - * We will allow disk devices if in RAID/SCSI mode and 1970 - * the channel is 2 1971 - */ 1972 - } else if ((dev->raid_scsi_mode) && 1973 - (scmd_channel(scsicmd) == 2)) { 1974 - scsicmd->result = DID_OK << 16 | 1975 - COMMAND_COMPLETE << 8; 1976 - } else { 1977 - scsicmd->result = DID_NO_CONNECT << 16 | 1978 - COMMAND_COMPLETE << 8; 1979 - } 1980 break; 1981 } 1982 default: ··· 2042 */ 2043 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 2044 2045 - fib_complete(fibptr); 2046 - fib_free(fibptr); 2047 scsicmd->scsi_done(scsicmd); 2048 } 2049 ··· 2095 /* 2096 * Allocate and initialize a Fib then setup a BlockWrite command 2097 */ 2098 - if (!(cmd_fibcontext = fib_alloc(dev))) { 2099 return -1; 2100 } 2101 - fib_init(cmd_fibcontext); 2102 2103 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); 2104 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); ··· 2132 /* 2133 * Now send the Fib to the adapter 2134 */ 2135 - status = fib_send(ScsiPortCommand64, cmd_fibcontext, 2136 fibsize, FsaNormal, 0, 1, 2137 (fib_callback) aac_srb_callback, 2138 (void *) scsicmd); ··· 2154 /* 2155 * Now send the Fib to the adapter 2156 */ 2157 - status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 2158 (fib_callback) aac_srb_callback, (void *) scsicmd); 2159 } 2160 /* ··· 2164 return 0; 2165 } 2166 2167 - printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); 2168 - fib_complete(cmd_fibcontext); 2169 - fib_free(cmd_fibcontext); 2170 2171 return -1; 2172 }
··· 173 int status = 0; 174 struct fib * fibptr; 175 176 + if (!(fibptr = aac_fib_alloc(dev))) 177 return -ENOMEM; 178 179 + aac_fib_init(fibptr); 180 { 181 struct aac_get_config_status *dinfo; 182 dinfo = (struct aac_get_config_status *) fib_data(fibptr); ··· 186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 187 } 188 189 + status = aac_fib_send(ContainerCommand, 190 fibptr, 191 sizeof (struct aac_get_config_status), 192 FsaNormal, ··· 209 status = -EINVAL; 210 } 211 } 212 + aac_fib_complete(fibptr); 213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 214 if (status >= 0) { 215 if (commit == 1) { 216 struct aac_commit_config * dinfo; 217 + aac_fib_init(fibptr); 218 dinfo = (struct aac_commit_config *) fib_data(fibptr); 219 220 dinfo->command = cpu_to_le32(VM_ContainerConfig); 221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 222 223 + status = aac_fib_send(ContainerCommand, 224 fibptr, 225 sizeof (struct aac_commit_config), 226 FsaNormal, 227 1, 1, 228 NULL, NULL); 229 + aac_fib_complete(fibptr); 230 } else if (commit == 0) { 231 printk(KERN_WARNING 232 "aac_get_config_status: Foreign device configurations are being ignored\n"); 233 } 234 } 235 + aac_fib_free(fibptr); 236 return status; 237 } 238 ··· 255 256 instance = dev->scsi_host_ptr->unique_id; 257 258 + if (!(fibptr = aac_fib_alloc(dev))) 259 return -ENOMEM; 260 261 + aac_fib_init(fibptr); 262 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 263 dinfo->command = cpu_to_le32(VM_ContainerConfig); 264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 265 266 + status = aac_fib_send(ContainerCommand, 267 fibptr, 268 sizeof (struct aac_get_container_count), 269 FsaNormal, ··· 272 if (status >= 0) { 273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 275 + aac_fib_complete(fibptr); 276 } 277 278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) ··· 280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 282 if (!fsa_dev_ptr) { 283 + aac_fib_free(fibptr); 284 return -ENOMEM; 285 } 286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); ··· 294 295 fsa_dev_ptr[index].devname[0] = '\0'; 296 297 + aac_fib_init(fibptr); 298 dinfo = (struct aac_query_mount *) fib_data(fibptr); 299 300 dinfo->command = cpu_to_le32(VM_NameServe); 301 dinfo->count = cpu_to_le32(index); 302 dinfo->type = cpu_to_le32(FT_FILESYS); 303 304 + status = aac_fib_send(ContainerCommand, 305 fibptr, 306 sizeof (struct aac_query_mount), 307 FsaNormal, ··· 319 dinfo->count = cpu_to_le32(index); 320 dinfo->type = cpu_to_le32(FT_FILESYS); 321 322 + if (aac_fib_send(ContainerCommand, 323 fibptr, 324 sizeof(struct aac_query_mount), 325 FsaNormal, ··· 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 348 fsa_dev_ptr[index].ro = 1; 349 } 350 + aac_fib_complete(fibptr); 351 /* 352 * If there are no more containers, then stop asking. 353 */ ··· 355 break; 356 } 357 } 358 + aac_fib_free(fibptr); 359 return status; 360 } 361 ··· 413 414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 415 416 + aac_fib_complete(fibptr); 417 + aac_fib_free(fibptr); 418 scsicmd->scsi_done(scsicmd); 419 } 420 ··· 430 431 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 432 433 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) 434 return -ENOMEM; 435 436 + aac_fib_init(cmd_fibcontext); 437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 438 439 dinfo->command = cpu_to_le32(VM_ContainerConfig); ··· 441 dinfo->cid = cpu_to_le32(cid); 442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 443 444 + status = aac_fib_send(ContainerCommand, 445 cmd_fibcontext, 446 sizeof (struct aac_get_name), 447 FsaNormal, ··· 455 if (status == -EINPROGRESS) 456 return 0; 457 458 + printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); 459 + aac_fib_complete(cmd_fibcontext); 460 + aac_fib_free(cmd_fibcontext); 461 return -1; 462 } 463 464 /** 465 + * aac_probe_container - query a logical volume 466 * @dev: device to query 467 * @cid: container identifier 468 * ··· 470 * is updated in the struct fsa_dev_info structure rather than returned. 471 */ 472 473 + int aac_probe_container(struct aac_dev *dev, int cid) 474 { 475 struct fsa_dev_info *fsa_dev_ptr; 476 int status; ··· 482 fsa_dev_ptr = dev->fsa_dev; 483 instance = dev->scsi_host_ptr->unique_id; 484 485 + if (!(fibptr = aac_fib_alloc(dev))) 486 return -ENOMEM; 487 488 + aac_fib_init(fibptr); 489 490 dinfo = (struct aac_query_mount *)fib_data(fibptr); 491 ··· 493 dinfo->count = cpu_to_le32(cid); 494 dinfo->type = cpu_to_le32(FT_FILESYS); 495 496 + status = aac_fib_send(ContainerCommand, 497 fibptr, 498 sizeof(struct aac_query_mount), 499 FsaNormal, 500 1, 1, 501 NULL, NULL); 502 if (status < 0) { 503 + printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n"); 504 goto error; 505 } 506 ··· 512 dinfo->count = cpu_to_le32(cid); 513 dinfo->type = cpu_to_le32(FT_FILESYS); 514 515 + if (aac_fib_send(ContainerCommand, 516 fibptr, 517 sizeof(struct aac_query_mount), 518 FsaNormal, ··· 535 } 536 537 error: 538 + aac_fib_complete(fibptr); 539 + aac_fib_free(fibptr); 540 541 return status; 542 } ··· 700 struct aac_bus_info *command; 701 struct aac_bus_info_response *bus_info; 702 703 + if (!(fibptr = aac_fib_alloc(dev))) 704 return -ENOMEM; 705 706 + aac_fib_init(fibptr); 707 info = (struct aac_adapter_info *) fib_data(fibptr); 708 memset(info,0,sizeof(*info)); 709 710 + rcode = aac_fib_send(RequestAdapterInfo, 711 fibptr, 712 sizeof(*info), 713 FsaNormal, ··· 716 NULL); 717 718 if (rcode < 0) { 719 + aac_fib_complete(fibptr); 720 + aac_fib_free(fibptr); 721 return rcode; 722 } 723 memcpy(&dev->adapter_info, info, sizeof(*info)); ··· 725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 726 struct aac_supplement_adapter_info * info; 727 728 + aac_fib_init(fibptr); 729 730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 731 732 memset(info,0,sizeof(*info)); 733 734 + rcode = aac_fib_send(RequestSupplementAdapterInfo, 735 fibptr, 736 sizeof(*info), 737 FsaNormal, ··· 748 * GetBusInfo 749 */ 750 751 + aac_fib_init(fibptr); 752 753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 754 ··· 761 command->MethodId = cpu_to_le32(1); 762 command->CtlCmd = cpu_to_le32(GetBusInfo); 763 764 + rcode = aac_fib_send(ContainerCommand, 765 fibptr, 766 sizeof (*bus_info), 767 FsaNormal, ··· 891 } 892 } 893 894 + aac_fib_complete(fibptr); 895 + aac_fib_free(fibptr); 896 897 return rcode; 898 } ··· 976 ? sizeof(scsicmd->sense_buffer) 977 : sizeof(dev->fsa_dev[cid].sense_data)); 978 } 979 + aac_fib_complete(fibptr); 980 + aac_fib_free(fibptr); 981 982 scsicmd->scsi_done(scsicmd); 983 } ··· 1062 /* 1063 * Alocate and initialize a Fib 1064 */ 1065 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1066 return -1; 1067 } 1068 1069 + aac_fib_init(cmd_fibcontext); 1070 1071 if (dev->raw_io_interface) { 1072 struct aac_raw_io *readcmd; ··· 1086 /* 1087 * Now send the Fib to the adapter 1088 */ 1089 + status = aac_fib_send(ContainerRawIo, 1090 cmd_fibcontext, 1091 fibsize, 1092 FsaNormal, ··· 1112 /* 1113 * Now send the Fib to the adapter 1114 */ 1115 + status = aac_fib_send(ContainerCommand64, 1116 cmd_fibcontext, 1117 fibsize, 1118 FsaNormal, ··· 1136 /* 1137 * Now send the Fib to the adapter 1138 */ 1139 + status = aac_fib_send(ContainerCommand, 1140 cmd_fibcontext, 1141 fibsize, 1142 FsaNormal, ··· 1153 if (status == -EINPROGRESS) 1154 return 0; 1155 1156 + printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); 1157 /* 1158 * For some reason, the Fib didn't queue, return QUEUE_FULL 1159 */ 1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1161 scsicmd->scsi_done(scsicmd); 1162 + aac_fib_complete(cmd_fibcontext); 1163 + aac_fib_free(cmd_fibcontext); 1164 return 0; 1165 } 1166 ··· 1228 /* 1229 * Allocate and initialize a Fib then setup a BlockWrite command 1230 */ 1231 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1232 scsicmd->result = DID_ERROR << 16; 1233 scsicmd->scsi_done(scsicmd); 1234 return 0; 1235 } 1236 + aac_fib_init(cmd_fibcontext); 1237 1238 if (dev->raw_io_interface) { 1239 struct aac_raw_io *writecmd; ··· 1253 /* 1254 * Now send the Fib to the adapter 1255 */ 1256 + status = aac_fib_send(ContainerRawIo, 1257 cmd_fibcontext, 1258 fibsize, 1259 FsaNormal, ··· 1279 /* 1280 * Now send the Fib to the adapter 1281 */ 1282 + status = aac_fib_send(ContainerCommand64, 1283 cmd_fibcontext, 1284 fibsize, 1285 FsaNormal, ··· 1305 /* 1306 * Now send the Fib to the adapter 1307 */ 1308 + status = aac_fib_send(ContainerCommand, 1309 cmd_fibcontext, 1310 fibsize, 1311 FsaNormal, ··· 1322 return 0; 1323 } 1324 1325 + printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); 1326 /* 1327 * For some reason, the Fib didn't queue, return QUEUE_FULL 1328 */ 1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1330 scsicmd->scsi_done(scsicmd); 1331 1332 + aac_fib_complete(cmd_fibcontext); 1333 + aac_fib_free(cmd_fibcontext); 1334 return 0; 1335 } 1336 ··· 1369 sizeof(cmd->sense_buffer))); 1370 } 1371 1372 + aac_fib_complete(fibptr); 1373 + aac_fib_free(fibptr); 1374 cmd->scsi_done(cmd); 1375 } 1376 ··· 1407 * Allocate and initialize a Fib 1408 */ 1409 if (!(cmd_fibcontext = 1410 + aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1411 return SCSI_MLQUEUE_HOST_BUSY; 1412 1413 + aac_fib_init(cmd_fibcontext); 1414 1415 synchronizecmd = fib_data(cmd_fibcontext); 1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); ··· 1422 /* 1423 * Now send the Fib to the adapter 1424 */ 1425 + status = aac_fib_send(ContainerCommand, 1426 cmd_fibcontext, 1427 sizeof(struct aac_synchronize), 1428 FsaNormal, ··· 1437 return 0; 1438 1439 printk(KERN_WARNING 1440 + "aac_synchronize: aac_fib_send failed with status: %d.\n", status); 1441 + aac_fib_complete(cmd_fibcontext); 1442 + aac_fib_free(cmd_fibcontext); 1443 return SCSI_MLQUEUE_HOST_BUSY; 1444 } 1445 ··· 1465 * itself. 1466 */ 1467 if (scmd_id(scsicmd) != host->this_id) { 1468 + if ((scsicmd->device->channel == CONTAINER_CHANNEL)) { 1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 1470 scsicmd->result = DID_NO_CONNECT << 16; 1471 scsicmd->scsi_done(scsicmd); ··· 1488 case READ_CAPACITY: 1489 case TEST_UNIT_READY: 1490 spin_unlock_irq(host->host_lock); 1491 + aac_probe_container(dev, cid); 1492 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1493 fsa_dev_ptr[cid].valid = 0; 1494 spin_lock_irq(host->host_lock); ··· 1935 case SRB_STATUS_ERROR_RECOVERY: 1936 case SRB_STATUS_PENDING: 1937 case SRB_STATUS_SUCCESS: 1938 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1939 break; 1940 case SRB_STATUS_DATA_OVERRUN: 1941 switch(scsicmd->cmnd[0]){ ··· 1981 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1982 break; 1983 case INQUIRY: { 1984 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1985 break; 1986 } 1987 default: ··· 2089 */ 2090 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 2091 2092 + aac_fib_complete(fibptr); 2093 + aac_fib_free(fibptr); 2094 scsicmd->scsi_done(scsicmd); 2095 } 2096 ··· 2142 /* 2143 * Allocate and initialize a Fib then setup a BlockWrite command 2144 */ 2145 + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 2146 return -1; 2147 } 2148 + aac_fib_init(cmd_fibcontext); 2149 2150 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); 2151 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); ··· 2179 /* 2180 * Now send the Fib to the adapter 2181 */ 2182 + status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext, 2183 fibsize, FsaNormal, 0, 1, 2184 (fib_callback) aac_srb_callback, 2185 (void *) scsicmd); ··· 2201 /* 2202 * Now send the Fib to the adapter 2203 */ 2204 + status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 2205 (fib_callback) aac_srb_callback, (void *) scsicmd); 2206 } 2207 /* ··· 2211 return 0; 2212 } 2213 2214 + printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); 2215 + aac_fib_complete(cmd_fibcontext); 2216 + aac_fib_free(cmd_fibcontext); 2217 2218 return -1; 2219 }
+9 -9
drivers/scsi/aacraid/aacraid.h
··· 1774 struct scsi_cmnd; 1775 1776 const char *aac_driverinfo(struct Scsi_Host *); 1777 - struct fib *fib_alloc(struct aac_dev *dev); 1778 - int fib_setup(struct aac_dev *dev); 1779 - void fib_map_free(struct aac_dev *dev); 1780 - void fib_free(struct fib * context); 1781 - void fib_init(struct fib * context); 1782 void aac_printf(struct aac_dev *dev, u32 val); 1783 - int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 1784 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1785 void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1786 - int fib_complete(struct fib * context); 1787 #define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1788 struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1789 int aac_get_config_status(struct aac_dev *dev); ··· 1799 unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1800 int aac_command_thread(struct aac_dev * dev); 1801 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1802 - int fib_adapter_complete(struct fib * fibptr, unsigned short size); 1803 struct aac_driver_ident* aac_get_driver_ident(int devtype); 1804 int aac_get_adapter_info(struct aac_dev* dev); 1805 int aac_send_shutdown(struct aac_dev *dev); 1806 - int probe_container(struct aac_dev *dev, int cid); 1807 extern int numacb; 1808 extern int acbsize; 1809 extern char aac_driver_version[];
··· 1774 struct scsi_cmnd; 1775 1776 const char *aac_driverinfo(struct Scsi_Host *); 1777 + struct fib *aac_fib_alloc(struct aac_dev *dev); 1778 + int aac_fib_setup(struct aac_dev *dev); 1779 + void aac_fib_map_free(struct aac_dev *dev); 1780 + void aac_fib_free(struct fib * context); 1781 + void aac_fib_init(struct fib * context); 1782 void aac_printf(struct aac_dev *dev, u32 val); 1783 + int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 1784 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1785 void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1786 + int aac_fib_complete(struct fib * context); 1787 #define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1788 struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1789 int aac_get_config_status(struct aac_dev *dev); ··· 1799 unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1800 int aac_command_thread(struct aac_dev * dev); 1801 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1802 + int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); 1803 struct aac_driver_ident* aac_get_driver_ident(int devtype); 1804 int aac_get_adapter_info(struct aac_dev* dev); 1805 int aac_send_shutdown(struct aac_dev *dev); 1806 + int aac_probe_container(struct aac_dev *dev, int cid); 1807 extern int numacb; 1808 extern int acbsize; 1809 extern char aac_driver_version[];
+11 -11
drivers/scsi/aacraid/commctrl.c
··· 63 unsigned size; 64 int retval; 65 66 - fibptr = fib_alloc(dev); 67 if(fibptr == NULL) { 68 return -ENOMEM; 69 } ··· 73 * First copy in the header so that we can check the size field. 74 */ 75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 76 - fib_free(fibptr); 77 return -EFAULT; 78 } 79 /* ··· 110 */ 111 kfib->header.XferState = 0; 112 } else { 113 - retval = fib_send(le16_to_cpu(kfib->header.Command), fibptr, 114 le16_to_cpu(kfib->header.Size) , FsaNormal, 115 1, 1, NULL, NULL); 116 if (retval) { 117 goto cleanup; 118 } 119 - if (fib_complete(fibptr) != 0) { 120 retval = -EINVAL; 121 goto cleanup; 122 } ··· 138 fibptr->hw_fib_pa = hw_fib_pa; 139 fibptr->hw_fib = hw_fib; 140 } 141 - fib_free(fibptr); 142 return retval; 143 } 144 ··· 464 /* 465 * Allocate and initialize a Fib then setup a BlockWrite command 466 */ 467 - if (!(srbfib = fib_alloc(dev))) { 468 return -ENOMEM; 469 } 470 - fib_init(srbfib); 471 472 srbcmd = (struct aac_srb*) fib_data(srbfib); 473 ··· 601 602 srbcmd->count = cpu_to_le32(byte_count); 603 psg->count = cpu_to_le32(sg_indx+1); 604 - status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 605 } else { 606 struct user_sgmap* upsg = &user_srbcmd->sg; 607 struct sgmap* psg = &srbcmd->sg; ··· 649 } 650 srbcmd->count = cpu_to_le32(byte_count); 651 psg->count = cpu_to_le32(sg_indx+1); 652 - status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 653 } 654 655 if (status != 0){ ··· 684 for(i=0; i <= sg_indx; i++){ 685 kfree(sg_list[i]); 686 } 687 - fib_complete(srbfib); 688 - fib_free(srbfib); 689 690 return rcode; 691 }
··· 63 unsigned size; 64 int retval; 65 66 + fibptr = aac_fib_alloc(dev); 67 if(fibptr == NULL) { 68 return -ENOMEM; 69 } ··· 73 * First copy in the header so that we can check the size field. 74 */ 75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 76 + aac_fib_free(fibptr); 77 return -EFAULT; 78 } 79 /* ··· 110 */ 111 kfib->header.XferState = 0; 112 } else { 113 + retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, 114 le16_to_cpu(kfib->header.Size) , FsaNormal, 115 1, 1, NULL, NULL); 116 if (retval) { 117 goto cleanup; 118 } 119 + if (aac_fib_complete(fibptr) != 0) { 120 retval = -EINVAL; 121 goto cleanup; 122 } ··· 138 fibptr->hw_fib_pa = hw_fib_pa; 139 fibptr->hw_fib = hw_fib; 140 } 141 + aac_fib_free(fibptr); 142 return retval; 143 } 144 ··· 464 /* 465 * Allocate and initialize a Fib then setup a BlockWrite command 466 */ 467 + if (!(srbfib = aac_fib_alloc(dev))) { 468 return -ENOMEM; 469 } 470 + aac_fib_init(srbfib); 471 472 srbcmd = (struct aac_srb*) fib_data(srbfib); 473 ··· 601 602 srbcmd->count = cpu_to_le32(byte_count); 603 psg->count = cpu_to_le32(sg_indx+1); 604 + status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 605 } else { 606 struct user_sgmap* upsg = &user_srbcmd->sg; 607 struct sgmap* psg = &srbcmd->sg; ··· 649 } 650 srbcmd->count = cpu_to_le32(byte_count); 651 psg->count = cpu_to_le32(sg_indx+1); 652 + status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 653 } 654 655 if (status != 0){ ··· 684 for(i=0; i <= sg_indx; i++){ 685 kfree(sg_list[i]); 686 } 687 + aac_fib_complete(srbfib); 688 + aac_fib_free(srbfib); 689 690 return rcode; 691 }
+6 -6
drivers/scsi/aacraid/comminit.c
··· 185 struct aac_close *cmd; 186 int status; 187 188 - fibctx = fib_alloc(dev); 189 if (!fibctx) 190 return -ENOMEM; 191 - fib_init(fibctx); 192 193 cmd = (struct aac_close *) fib_data(fibctx); 194 195 cmd->command = cpu_to_le32(VM_CloseAll); 196 cmd->cid = cpu_to_le32(0xffffffff); 197 198 - status = fib_send(ContainerCommand, 199 fibctx, 200 sizeof(struct aac_close), 201 FsaNormal, ··· 203 NULL, NULL); 204 205 if (status == 0) 206 - fib_complete(fibctx); 207 - fib_free(fibctx); 208 return status; 209 } 210 ··· 427 /* 428 * Initialize the list of fibs 429 */ 430 - if(fib_setup(dev)<0){ 431 kfree(dev->queues); 432 return NULL; 433 }
··· 185 struct aac_close *cmd; 186 int status; 187 188 + fibctx = aac_fib_alloc(dev); 189 if (!fibctx) 190 return -ENOMEM; 191 + aac_fib_init(fibctx); 192 193 cmd = (struct aac_close *) fib_data(fibctx); 194 195 cmd->command = cpu_to_le32(VM_CloseAll); 196 cmd->cid = cpu_to_le32(0xffffffff); 197 198 + status = aac_fib_send(ContainerCommand, 199 fibctx, 200 sizeof(struct aac_close), 201 FsaNormal, ··· 203 NULL, NULL); 204 205 if (status == 0) 206 + aac_fib_complete(fibctx); 207 + aac_fib_free(fibctx); 208 return status; 209 } 210 ··· 427 /* 428 * Initialize the list of fibs 429 */ 430 + if (aac_fib_setup(dev) < 0) { 431 kfree(dev->queues); 432 return NULL; 433 }
+26 -24
drivers/scsi/aacraid/commsup.c
··· 67 } 68 69 /** 70 - * fib_map_free - free the fib objects 71 * @dev: Adapter to free 72 * 73 * Free the PCI mappings and the memory allocated for FIB blocks 74 * on this adapter. 75 */ 76 77 - void fib_map_free(struct aac_dev *dev) 78 { 79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 80 } 81 82 /** 83 - * fib_setup - setup the fibs 84 * @dev: Adapter to set up 85 * 86 * Allocate the PCI space for the fibs, map it and then intialise the 87 * fib area, the unmapped fib data and also the free list 88 */ 89 90 - int fib_setup(struct aac_dev * dev) 91 { 92 struct fib *fibptr; 93 struct hw_fib *hw_fib_va; ··· 134 } 135 136 /** 137 - * fib_alloc - allocate a fib 138 * @dev: Adapter to allocate the fib for 139 * 140 * Allocate a fib from the adapter fib pool. If the pool is empty we 141 * return NULL. 142 */ 143 144 - struct fib * fib_alloc(struct aac_dev *dev) 145 { 146 struct fib * fibptr; 147 unsigned long flags; ··· 170 } 171 172 /** 173 - * fib_free - free a fib 174 * @fibptr: fib to free up 175 * 176 * Frees up a fib and places it on the appropriate queue 177 * (either free or timed out) 178 */ 179 180 - void fib_free(struct fib * fibptr) 181 { 182 unsigned long flags; 183 ··· 188 fibptr->dev->timeout_fib = fibptr; 189 } else { 190 if (fibptr->hw_fib->header.XferState != 0) { 191 - printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 192 (void*)fibptr, 193 le32_to_cpu(fibptr->hw_fib->header.XferState)); 194 } ··· 199 } 200 201 /** 202 - * fib_init - initialise a fib 203 * @fibptr: The fib to initialize 204 * 205 * Set up the generic fib fields ready for use 206 */ 207 208 - void fib_init(struct fib *fibptr) 209 { 210 struct hw_fib *hw_fib = fibptr->hw_fib; 211 ··· 362 */ 363 364 /** 365 - * fib_send - send a fib to the adapter 366 * @command: Command to send 367 * @fibptr: The fib 368 * @size: Size of fib data area ··· 378 * response FIB is received from the adapter. 379 */ 380 381 - int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 382 { 383 struct aac_dev * dev = fibptr->dev; 384 struct hw_fib * hw_fib = fibptr->hw_fib; ··· 495 q->numpending++; 496 *(q->headers.producer) = cpu_to_le32(index + 1); 497 spin_unlock_irqrestore(q->lock, qflags); 498 - dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); 499 if (!(nointr & aac_config.irq_mod)) 500 aac_adapter_notify(dev, AdapNormCmdQueue); 501 } ··· 522 list_del(&fibptr->queue); 523 spin_unlock_irqrestore(q->lock, qflags); 524 if (wait == -1) { 525 - printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" 526 "Usually a result of a PCI interrupt routing problem;\n" 527 "update mother board BIOS or consider utilizing one of\n" 528 "the SAFE mode kernel options (acpi, apic etc)\n"); ··· 626 } 627 628 /** 629 - * fib_adapter_complete - complete adapter issued fib 630 * @fibptr: fib to complete 631 * @size: size of fib 632 * ··· 634 * the adapter. 635 */ 636 637 - int fib_adapter_complete(struct fib * fibptr, unsigned short size) 638 { 639 struct hw_fib * hw_fib = fibptr->hw_fib; 640 struct aac_dev * dev = fibptr->dev; ··· 685 } 686 else 687 { 688 - printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); 689 BUG(); 690 } 691 return 0; 692 } 693 694 /** 695 - * fib_complete - fib completion handler 696 * @fib: FIB to complete 697 * 698 * Will do all necessary work to complete a FIB. 699 */ 700 701 - int fib_complete(struct fib * fibptr) 702 { 703 struct hw_fib * hw_fib = fibptr->hw_fib; 704 ··· 997 if (!dev || !dev->scsi_host_ptr) 998 return; 999 /* 1000 - * force reload of disk info via probe_container 1001 */ 1002 if ((device_config_needed == CHANGE) 1003 && (dev->fsa_dev[container].valid == 1)) 1004 dev->fsa_dev[container].valid = 2; 1005 if ((device_config_needed == CHANGE) || 1006 (device_config_needed == ADD)) 1007 - probe_container(dev, container); 1008 device = scsi_device_lookup(dev->scsi_host_ptr, 1009 CONTAINER_TO_CHANNEL(container), 1010 CONTAINER_TO_ID(container), ··· 1106 /* Handle Driver Notify Events */ 1107 aac_handle_aif(dev, fib); 1108 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1109 - fib_adapter_complete(fib, (u16)sizeof(u32)); 1110 } else { 1111 struct list_head *entry; 1112 /* The u32 here is important and intended. We are using ··· 1243 * Set the status of this FIB 1244 */ 1245 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1246 - fib_adapter_complete(fib, sizeof(u32)); 1247 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1248 /* Free up the remaining resources */ 1249 hw_fib_p = hw_fib_pool;
··· 67 } 68 69 /** 70 + * aac_fib_map_free - free the fib objects 71 * @dev: Adapter to free 72 * 73 * Free the PCI mappings and the memory allocated for FIB blocks 74 * on this adapter. 75 */ 76 77 + void aac_fib_map_free(struct aac_dev *dev) 78 { 79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 80 } 81 82 /** 83 + * aac_fib_setup - setup the fibs 84 * @dev: Adapter to set up 85 * 86 * Allocate the PCI space for the fibs, map it and then intialise the 87 * fib area, the unmapped fib data and also the free list 88 */ 89 90 + int aac_fib_setup(struct aac_dev * dev) 91 { 92 struct fib *fibptr; 93 struct hw_fib *hw_fib_va; ··· 134 } 135 136 /** 137 + * aac_fib_alloc - allocate a fib 138 * @dev: Adapter to allocate the fib for 139 * 140 * Allocate a fib from the adapter fib pool. If the pool is empty we 141 * return NULL. 142 */ 143 144 + struct fib *aac_fib_alloc(struct aac_dev *dev) 145 { 146 struct fib * fibptr; 147 unsigned long flags; ··· 170 } 171 172 /** 173 + * aac_fib_free - free a fib 174 * @fibptr: fib to free up 175 * 176 * Frees up a fib and places it on the appropriate queue 177 * (either free or timed out) 178 */ 179 180 + void aac_fib_free(struct fib *fibptr) 181 { 182 unsigned long flags; 183 ··· 188 fibptr->dev->timeout_fib = fibptr; 189 } else { 190 if (fibptr->hw_fib->header.XferState != 0) { 191 + printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 192 (void*)fibptr, 193 le32_to_cpu(fibptr->hw_fib->header.XferState)); 194 } ··· 199 } 200 201 /** 202 + * aac_fib_init - initialise a fib 203 * @fibptr: The fib to initialize 204 * 205 * Set up the generic fib fields ready for use 206 */ 207 208 + void aac_fib_init(struct fib *fibptr) 209 { 210 struct hw_fib *hw_fib = fibptr->hw_fib; 211 ··· 362 */ 363 364 /** 365 + * aac_fib_send - send a fib to the adapter 366 * @command: Command to send 367 * @fibptr: The fib 368 * @size: Size of fib data area ··· 378 * response FIB is received from the adapter. 379 */ 380 381 + int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 382 + int priority, int wait, int reply, fib_callback callback, 383 + void *callback_data) 384 { 385 struct aac_dev * dev = fibptr->dev; 386 struct hw_fib * hw_fib = fibptr->hw_fib; ··· 493 q->numpending++; 494 *(q->headers.producer) = cpu_to_le32(index + 1); 495 spin_unlock_irqrestore(q->lock, qflags); 496 + dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); 497 if (!(nointr & aac_config.irq_mod)) 498 aac_adapter_notify(dev, AdapNormCmdQueue); 499 } ··· 520 list_del(&fibptr->queue); 521 spin_unlock_irqrestore(q->lock, qflags); 522 if (wait == -1) { 523 + printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" 524 "Usually a result of a PCI interrupt routing problem;\n" 525 "update mother board BIOS or consider utilizing one of\n" 526 "the SAFE mode kernel options (acpi, apic etc)\n"); ··· 624 } 625 626 /** 627 + * aac_fib_adapter_complete - complete adapter issued fib 628 * @fibptr: fib to complete 629 * @size: size of fib 630 * ··· 632 * the adapter. 633 */ 634 635 + int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 636 { 637 struct hw_fib * hw_fib = fibptr->hw_fib; 638 struct aac_dev * dev = fibptr->dev; ··· 683 } 684 else 685 { 686 + printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); 687 BUG(); 688 } 689 return 0; 690 } 691 692 /** 693 + * aac_fib_complete - fib completion handler 694 * @fib: FIB to complete 695 * 696 * Will do all necessary work to complete a FIB. 697 */ 698 699 + int aac_fib_complete(struct fib *fibptr) 700 { 701 struct hw_fib * hw_fib = fibptr->hw_fib; 702 ··· 995 if (!dev || !dev->scsi_host_ptr) 996 return; 997 /* 998 + * force reload of disk info via aac_probe_container 999 */ 1000 if ((device_config_needed == CHANGE) 1001 && (dev->fsa_dev[container].valid == 1)) 1002 dev->fsa_dev[container].valid = 2; 1003 if ((device_config_needed == CHANGE) || 1004 (device_config_needed == ADD)) 1005 + aac_probe_container(dev, container); 1006 device = scsi_device_lookup(dev->scsi_host_ptr, 1007 CONTAINER_TO_CHANNEL(container), 1008 CONTAINER_TO_ID(container), ··· 1104 /* Handle Driver Notify Events */ 1105 aac_handle_aif(dev, fib); 1106 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1107 + aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 1108 } else { 1109 struct list_head *entry; 1110 /* The u32 here is important and intended. We are using ··· 1241 * Set the status of this FIB 1242 */ 1243 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1244 + aac_fib_adapter_complete(fib, sizeof(u32)); 1245 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1246 /* Free up the remaining resources */ 1247 hw_fib_p = hw_fib_pool;
+1 -1
drivers/scsi/aacraid/dpcsup.c
··· 206 * Set the status of this FIB 207 */ 208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 209 - fib_adapter_complete(fib, sizeof(u32)); 210 spin_lock_irqsave(q->lock, flags); 211 } 212 }
··· 206 * Set the status of this FIB 207 */ 208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 209 + aac_fib_adapter_complete(fib, sizeof(u32)); 210 spin_lock_irqsave(q->lock, flags); 211 } 212 }
+39 -11
drivers/scsi/aacraid/linit.c
··· 385 386 static int aac_slave_configure(struct scsi_device *sdev) 387 { 388 - struct Scsi_Host *host = sdev->host; 389 390 - if (sdev->tagged_supported) 391 - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128); 392 - else 393 scsi_adjust_queue_depth(sdev, 0, 1); 394 - 395 - if (!(((struct aac_dev *)host->hostdata)->adapter_info.options 396 - & AAC_OPT_NEW_COMM)) 397 - blk_queue_max_segment_size(sdev->request_queue, 65536); 398 399 return 0; 400 } ··· 898 899 /* 900 * max channel will be the physical channels plus 1 virtual channel 901 - * all containers are on the virtual channel 0 902 * physical channels are address by their actual physical number+1 903 */ 904 if (aac->nondasd_support == 1) ··· 941 aac_adapter_disable_int(aac); 942 free_irq(pdev->irq, aac); 943 out_unmap: 944 - fib_map_free(aac); 945 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 946 kfree(aac->queues); 947 iounmap(aac->regs.sa); ··· 975 976 aac_send_shutdown(aac); 977 aac_adapter_disable_int(aac); 978 - fib_map_free(aac); 979 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 980 aac->comm_phys); 981 kfree(aac->queues);
··· 385 386 static int aac_slave_configure(struct scsi_device *sdev) 387 { 388 + if (sdev_channel(sdev) == CONTAINER_CHANNEL) { 389 + sdev->skip_ms_page_8 = 1; 390 + sdev->skip_ms_page_3f = 1; 391 + } 392 + if ((sdev->type == TYPE_DISK) && 393 + (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 394 + struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 395 + if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) 396 + sdev->no_uld_attach = 1; 397 + } 398 + if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 399 + (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 400 + struct scsi_device * dev; 401 + struct Scsi_Host *host = sdev->host; 402 + unsigned num_lsu = 0; 403 + unsigned num_one = 0; 404 + unsigned depth; 405 406 + __shost_for_each_device(dev, host) { 407 + if (dev->tagged_supported && (dev->type == TYPE_DISK) && 408 + (sdev_channel(dev) == CONTAINER_CHANNEL)) 409 + ++num_lsu; 410 + else 411 + ++num_one; 412 + } 413 + if (num_lsu == 0) 414 + ++num_lsu; 415 + depth = (host->can_queue - num_one) / num_lsu; 416 + if (depth > 256) 417 + depth = 256; 418 + else if (depth < 2) 419 + depth = 2; 420 + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 421 + if (!(((struct aac_dev *)host->hostdata)->adapter_info.options & 422 + AAC_OPT_NEW_COMM)) 423 + blk_queue_max_segment_size(sdev->request_queue, 65536); 424 + } else 425 scsi_adjust_queue_depth(sdev, 0, 1); 426 427 return 0; 428 } ··· 870 871 /* 872 * max channel will be the physical channels plus 1 virtual channel 873 + * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 874 * physical channels are address by their actual physical number+1 875 */ 876 if (aac->nondasd_support == 1) ··· 913 aac_adapter_disable_int(aac); 914 free_irq(pdev->irq, aac); 915 out_unmap: 916 + aac_fib_map_free(aac); 917 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 918 kfree(aac->queues); 919 iounmap(aac->regs.sa); ··· 947 948 aac_send_shutdown(aac); 949 aac_adapter_disable_int(aac); 950 + aac_fib_map_free(aac); 951 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 952 aac->comm_phys); 953 kfree(aac->queues);
+1 -1
drivers/scsi/gdth.c
··· 2816 } 2817 #endif 2818 2819 - } else { 2820 scp->SCp.Status = GDTH_MAP_SINGLE; 2821 scp->SCp.Message = (read_write == 1 ? 2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
··· 2816 } 2817 #endif 2818 2819 + } else if (scp->request_bufflen) { 2820 scp->SCp.Status = GDTH_MAP_SINGLE; 2821 scp->SCp.Message = (read_write == 1 ? 2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+5 -44
drivers/scsi/ipr.c
··· 4236 } 4237 4238 /** 4239 - * ipr_save_ioafp_mode_select - Save adapters mode select data 4240 - * @ioa_cfg: ioa config struct 4241 - * @scsi_cmd: scsi command struct 4242 - * 4243 - * This function saves mode select data for the adapter to 4244 - * use following an adapter reset. 4245 - * 4246 - * Return value: 4247 - * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure 4248 - **/ 4249 - static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg, 4250 - struct scsi_cmnd *scsi_cmd) 4251 - { 4252 - if (!ioa_cfg->saved_mode_pages) { 4253 - ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages), 4254 - GFP_ATOMIC); 4255 - if (!ioa_cfg->saved_mode_pages) { 4256 - dev_err(&ioa_cfg->pdev->dev, 4257 - "IOA mode select buffer allocation failed\n"); 4258 - return SCSI_MLQUEUE_HOST_BUSY; 4259 - } 4260 - } 4261 - 4262 - memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]); 4263 - ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4]; 4264 - return 0; 4265 - } 4266 - 4267 - /** 4268 * ipr_queuecommand - Queue a mid-layer request 4269 * @scsi_cmd: scsi command struct 4270 * @done: done function ··· 4308 if (scsi_cmd->cmnd[0] >= 0xC0 && 4309 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 4310 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 4311 - 4312 - if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT) 4313 - rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd); 4314 4315 if (likely(rc == 0)) 4316 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); ··· 4797 int length; 4798 4799 ENTER; 4800 - if (ioa_cfg->saved_mode_pages) { 4801 - memcpy(mode_pages, ioa_cfg->saved_mode_pages, 4802 - ioa_cfg->saved_mode_page_len); 4803 - length = ioa_cfg->saved_mode_page_len; 4804 - } else { 4805 - ipr_scsi_bus_speed_limit(ioa_cfg); 4806 - ipr_check_term_power(ioa_cfg, mode_pages); 4807 - ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 4808 - length = mode_pages->hdr.length + 1; 4809 - mode_pages->hdr.length = 0; 4810 - } 4811 4812 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 4813 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), ··· 5931 } 5932 5933 ipr_free_dump(ioa_cfg); 5934 - kfree(ioa_cfg->saved_mode_pages); 5935 kfree(ioa_cfg->trace); 5936 } 5937
··· 4236 } 4237 4238 /** 4239 * ipr_queuecommand - Queue a mid-layer request 4240 * @scsi_cmd: scsi command struct 4241 * @done: done function ··· 4337 if (scsi_cmd->cmnd[0] >= 0xC0 && 4338 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 4339 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 4340 4341 if (likely(rc == 0)) 4342 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); ··· 4829 int length; 4830 4831 ENTER; 4832 + ipr_scsi_bus_speed_limit(ioa_cfg); 4833 + ipr_check_term_power(ioa_cfg, mode_pages); 4834 + ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 4835 + length = mode_pages->hdr.length + 1; 4836 + mode_pages->hdr.length = 0; 4837 4838 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 4839 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), ··· 5969 } 5970 5971 ipr_free_dump(ioa_cfg); 5972 kfree(ioa_cfg->trace); 5973 } 5974
+2 -3
drivers/scsi/ipr.h
··· 36 /* 37 * Literals 38 */ 39 - #define IPR_DRIVER_VERSION "2.1.1" 40 - #define IPR_DRIVER_DATE "(November 15, 2005)" 41 42 /* 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding ··· 1000 struct Scsi_Host *host; 1001 struct pci_dev *pdev; 1002 struct ipr_sglist *ucode_sglist; 1003 - struct ipr_mode_pages *saved_mode_pages; 1004 u8 saved_mode_page_len; 1005 1006 struct work_struct work_q;
··· 36 /* 37 * Literals 38 */ 39 + #define IPR_DRIVER_VERSION "2.1.2" 40 + #define IPR_DRIVER_DATE "(February 8, 2006)" 41 42 /* 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding ··· 1000 struct Scsi_Host *host; 1001 struct pci_dev *pdev; 1002 struct ipr_sglist *ucode_sglist; 1003 u8 saved_mode_page_len; 1004 1005 struct work_struct work_q;
+41 -37
drivers/scsi/iscsi_tcp.c
··· 146 spin_unlock_irqrestore(&session->lock, flags); 147 set_bit(SUSPEND_BIT, &conn->suspend_tx); 148 set_bit(SUSPEND_BIT, &conn->suspend_rx); 149 - iscsi_conn_error(iscsi_handle(conn), err); 150 } 151 152 static inline int ··· 244 if (sc->sc_data_direction == DMA_TO_DEVICE) { 245 struct iscsi_data_task *dtask, *n; 246 /* WRITE: cleanup Data-Out's if any */ 247 - spin_lock(&conn->lock); 248 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { 249 list_del(&dtask->item); 250 mempool_free(dtask, ctask->datapool); 251 } 252 - spin_unlock(&conn->lock); 253 } 254 ctask->xmstate = XMSTATE_IDLE; 255 ctask->r2t = NULL; ··· 687 break; 688 689 if (!conn->in.datalen) { 690 - rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 691 NULL, 0); 692 if (conn->login_mtask != mtask) { 693 spin_lock(&session->lock); ··· 735 if (!conn->in.datalen) { 736 struct iscsi_mgmt_task *mtask; 737 738 - rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 739 NULL, 0); 740 mtask = (struct iscsi_mgmt_task *) 741 session->mgmt_cmds[conn->in.itt - ··· 759 rc = iscsi_check_assign_cmdsn(session, 760 (struct iscsi_nopin*)hdr); 761 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) 762 - rc = iscsi_recv_pdu(iscsi_handle(conn), 763 hdr, NULL, 0); 764 } else 765 rc = ISCSI_ERR_PROTO; ··· 1042 goto exit; 1043 } 1044 1045 - rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr, 1046 conn->data, conn->in.datalen); 1047 1048 if (!rc && conn->datadgst_en && ··· 2426 } 2427 2428 static struct iscsi_cls_conn * 2429 - iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx) 2430 { 2431 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2432 struct iscsi_conn *conn; 2433 struct iscsi_cls_conn *cls_conn; 2434 2435 - cls_conn = iscsi_create_conn(hostdata_session(shost->hostdata), 2436 - conn_idx); 2437 if (!cls_conn) 2438 return NULL; 2439 conn = cls_conn->dd_data; 2440 2441 - memset(conn, 0, sizeof(struct iscsi_conn)); 2442 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2443 conn->in_progress = IN_PROGRESS_WAIT_HEADER; 2444 conn->id = conn_idx; ··· 2450 conn->hdr_size = sizeof(struct iscsi_hdr); 2451 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2452 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2453 - 2454 - spin_lock_init(&conn->lock); 2455 2456 /* initialize general xmit PDU commands queue */ 2457 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), ··· 2622 } 2623 2624 static int 2625 - iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, 2626 - uint32_t transport_fd, int is_leading) 2627 { 2628 - struct iscsi_session *session = iscsi_ptr(sessionh); 2629 - struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh); 2630 struct sock *sk; 2631 struct socket *sock; 2632 int err; ··· 2702 } 2703 2704 static int 2705 - iscsi_conn_start(iscsi_connh_t connh) 2706 { 2707 - struct iscsi_conn *conn = iscsi_ptr(connh); 2708 struct iscsi_session *session = conn->session; 2709 struct sock *sk; 2710 ··· 2753 } 2754 2755 static void 2756 - iscsi_conn_stop(iscsi_connh_t connh, int flag) 2757 { 2758 - struct iscsi_conn *conn = iscsi_ptr(connh); 2759 struct iscsi_session *session = conn->session; 2760 struct sock *sk; 2761 unsigned long flags; ··· 3252 3253 static struct iscsi_transport iscsi_tcp_transport; 3254 3255 - static struct Scsi_Host * 3256 iscsi_session_create(struct scsi_transport_template *scsit, 3257 - uint32_t initial_cmdsn) 3258 { 3259 struct Scsi_Host *shost; 3260 struct iscsi_session *session; ··· 3267 session = iscsi_hostdata(shost->hostdata); 3268 memset(session, 0, sizeof(struct iscsi_session)); 3269 session->host = shost; 3270 - session->state = ISCSI_STATE_LOGGED_IN; 3271 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 3272 session->cmds_max = ISCSI_XMIT_CMDS_MAX; 3273 session->cmdsn = initial_cmdsn; 3274 session->exp_cmdsn = initial_cmdsn + 1; 3275 session->max_cmdsn = initial_cmdsn + 1; 3276 session->max_r2t = 1; 3277 3278 /* initialize SCSI PDU commands pool */ 3279 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, ··· 3311 if (iscsi_r2tpool_alloc(session)) 3312 goto r2tpool_alloc_fail; 3313 3314 - return shost; 3315 3316 r2tpool_alloc_fail: 3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) 3318 kfree(session->mgmt_cmds[cmd_i]->data); 3319 - iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 3320 immdata_alloc_fail: 3321 mgmtpool_alloc_fail: 3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 3323 cmdpool_alloc_fail: 3324 return NULL; 3325 } 3326 3327 static void 3328 - iscsi_session_destroy(struct Scsi_Host *shost) 3329 { 3330 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3331 int cmd_i; 3332 struct iscsi_data_task *dtask, *n; ··· 3352 } 3353 3354 static int 3355 - iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, 3356 uint32_t value) 3357 { 3358 - struct iscsi_conn *conn = iscsi_ptr(connh); 3359 struct iscsi_session *session = conn->session; 3360 3361 spin_lock_bh(&session->lock); ··· 3497 } 3498 3499 static int 3500 - iscsi_session_get_param(struct Scsi_Host *shost, 3501 enum iscsi_param param, uint32_t *value) 3502 { 3503 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3504 3505 switch(param) { ··· 3542 } 3543 3544 static int 3545 - iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value) 3546 { 3547 - struct iscsi_conn *conn = data; 3548 3549 switch(param) { 3550 case ISCSI_PARAM_MAX_RECV_DLENGTH: ··· 3568 } 3569 3570 static void 3571 - iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) 3572 { 3573 - struct iscsi_conn *conn = iscsi_ptr(connh); 3574 3575 stats->txdata_octets = conn->txdata_octets; 3576 stats->rxdata_octets = conn->rxdata_octets; ··· 3591 } 3592 3593 static int 3594 - iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data, 3595 - uint32_t data_size) 3596 { 3597 - struct iscsi_conn *conn = iscsi_ptr(connh); 3598 int rc; 3599 3600 mutex_lock(&conn->xmitmutex);
··· 146 spin_unlock_irqrestore(&session->lock, flags); 147 set_bit(SUSPEND_BIT, &conn->suspend_tx); 148 set_bit(SUSPEND_BIT, &conn->suspend_rx); 149 + iscsi_conn_error(conn->cls_conn, err); 150 } 151 152 static inline int ··· 244 if (sc->sc_data_direction == DMA_TO_DEVICE) { 245 struct iscsi_data_task *dtask, *n; 246 /* WRITE: cleanup Data-Out's if any */ 247 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { 248 list_del(&dtask->item); 249 mempool_free(dtask, ctask->datapool); 250 } 251 } 252 ctask->xmstate = XMSTATE_IDLE; 253 ctask->r2t = NULL; ··· 689 break; 690 691 if (!conn->in.datalen) { 692 + rc = iscsi_recv_pdu(conn->cls_conn, hdr, 693 NULL, 0); 694 if (conn->login_mtask != mtask) { 695 spin_lock(&session->lock); ··· 737 if (!conn->in.datalen) { 738 struct iscsi_mgmt_task *mtask; 739 740 + rc = iscsi_recv_pdu(conn->cls_conn, hdr, 741 NULL, 0); 742 mtask = (struct iscsi_mgmt_task *) 743 session->mgmt_cmds[conn->in.itt - ··· 761 rc = iscsi_check_assign_cmdsn(session, 762 (struct iscsi_nopin*)hdr); 763 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) 764 + rc = iscsi_recv_pdu(conn->cls_conn, 765 hdr, NULL, 0); 766 } else 767 rc = ISCSI_ERR_PROTO; ··· 1044 goto exit; 1045 } 1046 1047 + rc = iscsi_recv_pdu(conn->cls_conn, conn->in.hdr, 1048 conn->data, conn->in.datalen); 1049 1050 if (!rc && conn->datadgst_en && ··· 2428 } 2429 2430 static struct iscsi_cls_conn * 2431 + iscsi_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 2432 { 2433 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2434 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2435 struct iscsi_conn *conn; 2436 struct iscsi_cls_conn *cls_conn; 2437 2438 + cls_conn = iscsi_create_conn(cls_session, conn_idx); 2439 if (!cls_conn) 2440 return NULL; 2441 conn = cls_conn->dd_data; 2442 + memset(conn, 0, sizeof(*conn)); 2443 2444 + conn->cls_conn = cls_conn; 2445 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2446 conn->in_progress = IN_PROGRESS_WAIT_HEADER; 2447 conn->id = conn_idx; ··· 2451 conn->hdr_size = sizeof(struct iscsi_hdr); 2452 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2453 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2454 2455 /* initialize general xmit PDU commands queue */ 2456 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), ··· 2625 } 2626 2627 static int 2628 + iscsi_conn_bind(struct iscsi_cls_session *cls_session, 2629 + struct iscsi_cls_conn *cls_conn, uint32_t transport_fd, 2630 + int is_leading) 2631 { 2632 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2633 + struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2634 + struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = cls_conn->dd_data; 2635 struct sock *sk; 2636 struct socket *sock; 2637 int err; ··· 2703 } 2704 2705 static int 2706 + iscsi_conn_start(struct iscsi_cls_conn *cls_conn) 2707 { 2708 + struct iscsi_conn *conn = cls_conn->dd_data; 2709 struct iscsi_session *session = conn->session; 2710 struct sock *sk; 2711 ··· 2754 } 2755 2756 static void 2757 + iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 2758 { 2759 + struct iscsi_conn *conn = cls_conn->dd_data; 2760 struct iscsi_session *session = conn->session; 2761 struct sock *sk; 2762 unsigned long flags; ··· 3253 3254 static struct iscsi_transport iscsi_tcp_transport; 3255 3256 + static struct iscsi_cls_session * 3257 iscsi_session_create(struct scsi_transport_template *scsit, 3258 + uint32_t initial_cmdsn, uint32_t *sid) 3259 { 3260 struct Scsi_Host *shost; 3261 struct iscsi_session *session; ··· 3268 session = iscsi_hostdata(shost->hostdata); 3269 memset(session, 0, sizeof(struct iscsi_session)); 3270 session->host = shost; 3271 + session->state = ISCSI_STATE_FREE; 3272 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 3273 session->cmds_max = ISCSI_XMIT_CMDS_MAX; 3274 session->cmdsn = initial_cmdsn; 3275 session->exp_cmdsn = initial_cmdsn + 1; 3276 session->max_cmdsn = initial_cmdsn + 1; 3277 session->max_r2t = 1; 3278 + *sid = shost->host_no; 3279 3280 /* initialize SCSI PDU commands pool */ 3281 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, ··· 3311 if (iscsi_r2tpool_alloc(session)) 3312 goto r2tpool_alloc_fail; 3313 3314 + return hostdata_session(shost->hostdata); 3315 3316 r2tpool_alloc_fail: 3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) 3318 kfree(session->mgmt_cmds[cmd_i]->data); 3319 immdata_alloc_fail: 3320 + iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 3321 mgmtpool_alloc_fail: 3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 3323 cmdpool_alloc_fail: 3324 + iscsi_transport_destroy_session(shost); 3325 return NULL; 3326 } 3327 3328 static void 3329 + iscsi_session_destroy(struct iscsi_cls_session *cls_session) 3330 { 3331 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 3332 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3333 int cmd_i; 3334 struct iscsi_data_task *dtask, *n; ··· 3350 } 3351 3352 static int 3353 + iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, 3354 uint32_t value) 3355 { 3356 + struct iscsi_conn *conn = cls_conn->dd_data; 3357 struct iscsi_session *session = conn->session; 3358 3359 spin_lock_bh(&session->lock); ··· 3495 } 3496 3497 static int 3498 + iscsi_session_get_param(struct iscsi_cls_session *cls_session, 3499 enum iscsi_param param, uint32_t *value) 3500 { 3501 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 3502 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3503 3504 switch(param) { ··· 3539 } 3540 3541 static int 3542 + iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 3543 + enum iscsi_param param, uint32_t *value) 3544 { 3545 + struct iscsi_conn *conn = cls_conn->dd_data; 3546 3547 switch(param) { 3548 case ISCSI_PARAM_MAX_RECV_DLENGTH: ··· 3564 } 3565 3566 static void 3567 + iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 3568 { 3569 + struct iscsi_conn *conn = cls_conn->dd_data; 3570 3571 stats->txdata_octets = conn->txdata_octets; 3572 stats->rxdata_octets = conn->rxdata_octets; ··· 3587 } 3588 3589 static int 3590 + iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 3591 + char *data, uint32_t data_size) 3592 { 3593 + struct iscsi_conn *conn = cls_conn->dd_data; 3594 int rc; 3595 3596 mutex_lock(&conn->xmitmutex);
+3 -1
drivers/scsi/iscsi_tcp.h
··· 113 int datadgst; 114 }; 115 116 struct iscsi_conn { 117 struct iscsi_hdr hdr; /* header placeholder */ 118 char hdrext[4*sizeof(__u16) + 119 sizeof(__u32)]; ··· 146 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 147 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 148 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 149 - spinlock_t lock; /* FIXME: to be removed */ 150 151 /* old values for socket callbacks */ 152 void (*old_data_ready)(struct sock *, int);
··· 113 int datadgst; 114 }; 115 116 + struct iscsi_cls_conn; 117 + 118 struct iscsi_conn { 119 + struct iscsi_cls_conn *cls_conn; /* ptr to class connection */ 120 struct iscsi_hdr hdr; /* header placeholder */ 121 char hdrext[4*sizeof(__u16) + 122 sizeof(__u32)]; ··· 143 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 144 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 145 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 146 147 /* old values for socket callbacks */ 148 void (*old_data_ready)(struct sock *, int);
+1 -1
drivers/scsi/megaraid.c
··· 5049 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 5050 5051 static struct pci_driver megaraid_pci_driver = { 5052 - .name = "megaraid", 5053 .id_table = megaraid_pci_tbl, 5054 .probe = megaraid_probe_one, 5055 .remove = __devexit_p(megaraid_remove_one),
··· 5049 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 5050 5051 static struct pci_driver megaraid_pci_driver = { 5052 + .name = "megaraid_legacy", 5053 .id_table = megaraid_pci_tbl, 5054 .probe = megaraid_probe_one, 5055 .remove = __devexit_p(megaraid_remove_one),
+1 -1
drivers/scsi/megaraid.h
··· 5 #include <linux/mutex.h> 6 7 #define MEGARAID_VERSION \ 8 - "v2.00.3 (Release Date: Wed Feb 19 08:51:30 EST 2003)\n" 9 10 /* 11 * Driver features - change the values to enable or disable features in the
··· 5 #include <linux/mutex.h> 6 7 #define MEGARAID_VERSION \ 8 + "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n" 9 10 /* 11 * Driver features - change the values to enable or disable features in the
+99 -2
drivers/scsi/megaraid/megaraid_sas.c
··· 10 * 2 of the License, or (at your option) any later version. 11 * 12 * FILE : megaraid_sas.c 13 - * Version : v00.00.02.02 14 * 15 * Authors: 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> ··· 59 PCI_ANY_ID, 60 PCI_ANY_ID, 61 }, 62 { 63 PCI_VENDOR_ID_DELL, 64 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP ··· 202 /** 203 * This is the end of set of functions & definitions specific 204 * to xscale (deviceid : 1064R, PERC5) controllers 205 */ 206 207 /** ··· 1693 1694 reg_set = instance->reg_set; 1695 1696 - instance->instancet = &megasas_instance_template_xscale; 1697 1698 /* 1699 * We expect the FW state to be READY ··· 2079 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 2080 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 2081 host->max_lun = MEGASAS_MAX_LUN; 2082 2083 /* 2084 * Notify the mid-layer about the new controller
··· 10 * 2 of the License, or (at your option) any later version. 11 * 12 * FILE : megaraid_sas.c 13 + * Version : v00.00.02.04 14 * 15 * Authors: 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> ··· 59 PCI_ANY_ID, 60 PCI_ANY_ID, 61 }, 62 + { 63 + PCI_VENDOR_ID_LSI_LOGIC, 64 + PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP 65 + PCI_ANY_ID, 66 + PCI_ANY_ID, 67 + }, 68 { 69 PCI_VENDOR_ID_DELL, 70 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP ··· 196 /** 197 * This is the end of set of functions & definitions specific 198 * to xscale (deviceid : 1064R, PERC5) controllers 199 + */ 200 + 201 + /** 202 + * The following functions are defined for ppc (deviceid : 0x60) 203 + * controllers 204 + */ 205 + 206 + /** 207 + * megasas_enable_intr_ppc - Enables interrupts 208 + * @regs: MFI register set 209 + */ 210 + static inline void 211 + megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) 212 + { 213 + writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 214 + 215 + writel(~0x80000004, &(regs)->outbound_intr_mask); 216 + 217 + /* Dummy readl to force pci flush */ 218 + readl(&regs->outbound_intr_mask); 219 + } 220 + 221 + /** 222 + * megasas_read_fw_status_reg_ppc - returns the current FW status value 223 + * @regs: MFI register set 224 + */ 225 + static u32 226 + megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) 227 + { 228 + return readl(&(regs)->outbound_scratch_pad); 229 + } 230 + 231 + /** 232 + * megasas_clear_interrupt_ppc - Check & clear interrupt 233 + * @regs: MFI register set 234 + */ 235 + static int 236 + megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 237 + { 238 + u32 status; 239 + /* 240 + * Check if it is our interrupt 241 + */ 242 + status = readl(&regs->outbound_intr_status); 243 + 244 + if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { 245 + return 1; 246 + } 247 + 248 + /* 249 + * Clear the interrupt by writing back the same value 250 + */ 251 + writel(status, &regs->outbound_doorbell_clear); 252 + 253 + return 0; 254 + } 255 + /** 256 + * megasas_fire_cmd_ppc - Sends command to the FW 257 + * @frame_phys_addr : Physical address of cmd 258 + * @frame_count : Number of frames for the command 259 + * @regs : MFI register set 260 + */ 261 + static inline void 262 + megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) 263 + { 264 + writel((frame_phys_addr | (frame_count<<1))|1, 265 + &(regs)->inbound_queue_port); 266 + } 267 + 268 + static struct megasas_instance_template megasas_instance_template_ppc = { 269 + 270 + .fire_cmd = megasas_fire_cmd_ppc, 271 + .enable_intr = megasas_enable_intr_ppc, 272 + .clear_intr = megasas_clear_intr_ppc, 273 + .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 274 + }; 275 + 276 + /** 277 + * This is the end of set of functions & definitions 278 + * specific to ppc (deviceid : 0x60) controllers 279 */ 280 281 /** ··· 1607 1608 reg_set = instance->reg_set; 1609 1610 + switch(instance->pdev->device) 1611 + { 1612 + case PCI_DEVICE_ID_LSI_SAS1078R: 1613 + instance->instancet = &megasas_instance_template_ppc; 1614 + break; 1615 + case PCI_DEVICE_ID_LSI_SAS1064R: 1616 + case PCI_DEVICE_ID_DELL_PERC5: 1617 + default: 1618 + instance->instancet = &megasas_instance_template_xscale; 1619 + break; 1620 + } 1621 1622 /* 1623 * We expect the FW state to be READY ··· 1983 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 1984 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 1985 host->max_lun = MEGASAS_MAX_LUN; 1986 + host->max_cmd_len = 16; 1987 1988 /* 1989 * Notify the mid-layer about the new controller
+34 -19
drivers/scsi/megaraid/megaraid_sas.h
··· 18 /** 19 * MegaRAID SAS Driver meta data 20 */ 21 - #define MEGASAS_VERSION "00.00.02.02" 22 - #define MEGASAS_RELDATE "Jan 23, 2006" 23 - #define MEGASAS_EXT_VERSION "Mon Jan 23 14:09:01 PST 2006" 24 /* 25 * ===================================== 26 * MegaRAID SAS MFI firmware definitions ··· 553 #define MFI_OB_INTR_STATUS_MASK 0x00000002 554 #define MFI_POLL_TIMEOUT_SECS 10 555 556 struct megasas_register_set { 557 558 - u32 reserved_0[4]; /*0000h */ 559 560 - u32 inbound_msg_0; /*0010h */ 561 - u32 inbound_msg_1; /*0014h */ 562 - u32 outbound_msg_0; /*0018h */ 563 - u32 outbound_msg_1; /*001Ch */ 564 565 - u32 inbound_doorbell; /*0020h */ 566 - u32 inbound_intr_status; /*0024h */ 567 - u32 inbound_intr_mask; /*0028h */ 568 569 - u32 outbound_doorbell; /*002Ch */ 570 - u32 outbound_intr_status; /*0030h */ 571 - u32 outbound_intr_mask; /*0034h */ 572 573 - u32 reserved_1[2]; /*0038h */ 574 575 - u32 inbound_queue_port; /*0040h */ 576 - u32 outbound_queue_port; /*0044h */ 577 578 - u32 reserved_2; /*004Ch */ 579 580 - u32 index_registers[1004]; /*0050h */ 581 582 } __attribute__ ((packed)); 583
··· 18 /** 19 * MegaRAID SAS Driver meta data 20 */ 21 + #define MEGASAS_VERSION "00.00.02.04" 22 + #define MEGASAS_RELDATE "Feb 03, 2006" 23 + #define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006" 24 /* 25 * ===================================== 26 * MegaRAID SAS MFI firmware definitions ··· 553 #define MFI_OB_INTR_STATUS_MASK 0x00000002 554 #define MFI_POLL_TIMEOUT_SECS 10 555 556 + #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 557 + #define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060 558 + 559 struct megasas_register_set { 560 + u32 reserved_0[4]; /*0000h*/ 561 562 + u32 inbound_msg_0; /*0010h*/ 563 + u32 inbound_msg_1; /*0014h*/ 564 + u32 outbound_msg_0; /*0018h*/ 565 + u32 outbound_msg_1; /*001Ch*/ 566 567 + u32 inbound_doorbell; /*0020h*/ 568 + u32 inbound_intr_status; /*0024h*/ 569 + u32 inbound_intr_mask; /*0028h*/ 570 571 + u32 outbound_doorbell; /*002Ch*/ 572 + u32 outbound_intr_status; /*0030h*/ 573 + u32 outbound_intr_mask; /*0034h*/ 574 575 + u32 reserved_1[2]; /*0038h*/ 576 577 + u32 inbound_queue_port; /*0040h*/ 578 + u32 outbound_queue_port; /*0044h*/ 579 580 + u32 reserved_2[22]; /*0048h*/ 581 582 + u32 outbound_doorbell_clear; /*00A0h*/ 583 584 + u32 reserved_3[3]; /*00A4h*/ 585 + 586 + u32 outbound_scratch_pad ; /*00B0h*/ 587 + 588 + u32 reserved_4[3]; /*00B4h*/ 589 + 590 + u32 inbound_low_queue_port ; /*00C0h*/ 591 + 592 + u32 inbound_high_queue_port ; /*00C4h*/ 593 + 594 + u32 reserved_5; /*00C8h*/ 595 + u32 index_registers[820]; /*00CCh*/ 596 597 } __attribute__ ((packed)); 598
+271 -5
drivers/scsi/qla2xxx/qla_attr.c
··· 7 #include "qla_def.h" 8 9 #include <linux/vmalloc.h> 10 - #include <scsi/scsi_transport_fc.h> 11 12 /* SYSFS attributes --------------------------------------------------------- */ 13 ··· 113 struct device, kobj))); 114 unsigned long flags; 115 116 - if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 117 return 0; 118 119 /* Read NVRAM. */ ··· 122 ha->nvram_size); 123 spin_unlock_irqrestore(&ha->hardware_lock, flags); 124 125 - return (count); 126 } 127 128 static ssize_t ··· 174 .mode = S_IRUSR | S_IWUSR, 175 .owner = THIS_MODULE, 176 }, 177 - .size = 0, 178 .read = qla2x00_sysfs_read_nvram, 179 .write = qla2x00_sysfs_write_nvram, 180 }; 181 182 void ··· 314 struct Scsi_Host *host = ha->host; 315 316 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 317 - sysfs_nvram_attr.size = ha->nvram_size; 318 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 319 } 320 321 void ··· 327 328 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 329 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 330 } 331 332 /* Scsi_Host attributes. */ ··· 520 return strlen(buf); 521 } 522 523 static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, 524 NULL); 525 static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); ··· 578 qla2x00_zio_store); 579 static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 580 qla2x00_zio_timer_store); 581 582 struct class_device_attribute *qla2x00_host_attrs[] = { 583 &class_device_attr_driver_version, ··· 593 &class_device_attr_state, 594 &class_device_attr_zio, 595 &class_device_attr_zio_timer, 596 NULL, 597 }; 598 ··· 606 607 fc_host_port_id(shost) = ha->d_id.b.domain << 16 | 608 ha->d_id.b.area << 8 | ha->d_id.b.al_pa; 609 } 610 611 static void ··· 738 return 0; 739 } 740 741 struct fc_function_template qla2xxx_transport_functions = { 742 743 .show_host_node_name = 1, ··· 781 782 .get_host_port_id = qla2x00_get_host_port_id, 783 .show_host_port_id = 1, 784 785 .dd_fcrport_size = sizeof(struct fc_port *), 786 .show_rport_supported_classes = 1, ··· 801 .show_rport_dev_loss_tmo = 1, 802 803 .issue_fc_host_lip = qla2x00_issue_lip, 804 }; 805 806 void
··· 7 #include "qla_def.h" 8 9 #include <linux/vmalloc.h> 10 11 /* SYSFS attributes --------------------------------------------------------- */ 12 ··· 114 struct device, kobj))); 115 unsigned long flags; 116 117 + if (!capable(CAP_SYS_ADMIN) || off != 0) 118 return 0; 119 120 /* Read NVRAM. */ ··· 123 ha->nvram_size); 124 spin_unlock_irqrestore(&ha->hardware_lock, flags); 125 126 + return ha->nvram_size; 127 } 128 129 static ssize_t ··· 175 .mode = S_IRUSR | S_IWUSR, 176 .owner = THIS_MODULE, 177 }, 178 + .size = 512, 179 .read = qla2x00_sysfs_read_nvram, 180 .write = qla2x00_sysfs_write_nvram, 181 + }; 182 + 183 + static ssize_t 184 + qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off, 185 + size_t count) 186 + { 187 + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 188 + struct device, kobj))); 189 + 190 + if (ha->optrom_state != QLA_SREADING) 191 + return 0; 192 + if (off > ha->optrom_size) 193 + return 0; 194 + if (off + count > ha->optrom_size) 195 + count = ha->optrom_size - off; 196 + 197 + memcpy(buf, &ha->optrom_buffer[off], count); 198 + 199 + return count; 200 + } 201 + 202 + static ssize_t 203 + qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off, 204 + size_t count) 205 + { 206 + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 207 + struct device, kobj))); 208 + 209 + if (ha->optrom_state != QLA_SWRITING) 210 + return -EINVAL; 211 + if (off > ha->optrom_size) 212 + return -ERANGE; 213 + if (off + count > ha->optrom_size) 214 + count = ha->optrom_size - off; 215 + 216 + memcpy(&ha->optrom_buffer[off], buf, count); 217 + 218 + return count; 219 + } 220 + 221 + static struct bin_attribute sysfs_optrom_attr = { 222 + .attr = { 223 + .name = "optrom", 224 + .mode = S_IRUSR | S_IWUSR, 225 + .owner = THIS_MODULE, 226 + }, 227 + .size = OPTROM_SIZE_24XX, 228 + .read = qla2x00_sysfs_read_optrom, 229 + .write = qla2x00_sysfs_write_optrom, 230 + }; 231 + 232 + static ssize_t 233 + qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off, 234 + size_t count) 235 + { 236 + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 237 + struct device, kobj))); 238 + int val; 239 + 240 + if (off) 241 + return 0; 242 + 243 + if (sscanf(buf, "%d", &val) != 1) 244 + return -EINVAL; 245 + 246 + switch (val) { 247 + case 0: 248 + if (ha->optrom_state != QLA_SREADING && 249 + ha->optrom_state != QLA_SWRITING) 250 + break; 251 + 252 + ha->optrom_state = QLA_SWAITING; 253 + vfree(ha->optrom_buffer); 254 + ha->optrom_buffer = NULL; 255 + break; 256 + case 1: 257 + if (ha->optrom_state != QLA_SWAITING) 258 + break; 259 + 260 + ha->optrom_state = QLA_SREADING; 261 + ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size); 262 + if (ha->optrom_buffer == NULL) { 263 + qla_printk(KERN_WARNING, ha, 264 + "Unable to allocate memory for optrom retrieval " 265 + "(%x).\n", ha->optrom_size); 266 + 267 + ha->optrom_state = QLA_SWAITING; 268 + return count; 269 + } 270 + 271 + memset(ha->optrom_buffer, 0, ha->optrom_size); 272 + ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0, 273 + ha->optrom_size); 274 + break; 275 + case 2: 276 + if (ha->optrom_state != QLA_SWAITING) 277 + break; 278 + 279 + ha->optrom_state = QLA_SWRITING; 280 + ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size); 281 + if (ha->optrom_buffer == NULL) { 282 + qla_printk(KERN_WARNING, ha, 283 + "Unable to allocate memory for optrom update " 284 + "(%x).\n", ha->optrom_size); 285 + 286 + ha->optrom_state = QLA_SWAITING; 287 + return count; 288 + } 289 + memset(ha->optrom_buffer, 0, ha->optrom_size); 290 + break; 291 + case 3: 292 + if (ha->optrom_state != QLA_SWRITING) 293 + break; 294 + 295 + ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0, 296 + ha->optrom_size); 297 + break; 298 + } 299 + return count; 300 + } 301 + 302 + static struct bin_attribute sysfs_optrom_ctl_attr = { 303 + .attr = { 304 + .name = "optrom_ctl", 305 + .mode = S_IWUSR, 306 + .owner = THIS_MODULE, 307 + }, 308 + .size = 0, 309 + .write = qla2x00_sysfs_write_optrom_ctl, 310 }; 311 312 void ··· 186 struct Scsi_Host *host = ha->host; 187 188 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 189 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 190 + sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 191 + sysfs_create_bin_file(&host->shost_gendev.kobj, 192 + &sysfs_optrom_ctl_attr); 193 } 194 195 void ··· 197 198 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 199 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 200 + sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 201 + sysfs_remove_bin_file(&host->shost_gendev.kobj, 202 + &sysfs_optrom_ctl_attr); 203 + 204 + if (ha->beacon_blink_led == 1) 205 + ha->isp_ops.beacon_off(ha); 206 } 207 208 /* Scsi_Host attributes. */ ··· 384 return strlen(buf); 385 } 386 387 + static ssize_t 388 + qla2x00_beacon_show(struct class_device *cdev, char *buf) 389 + { 390 + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); 391 + int len = 0; 392 + 393 + if (ha->beacon_blink_led) 394 + len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 395 + else 396 + len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 397 + return len; 398 + } 399 + 400 + static ssize_t 401 + qla2x00_beacon_store(struct class_device *cdev, const char *buf, 402 + size_t count) 403 + { 404 + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); 405 + int val = 0; 406 + int rval; 407 + 408 + if (IS_QLA2100(ha) || IS_QLA2200(ha)) 409 + return -EPERM; 410 + 411 + if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) { 412 + qla_printk(KERN_WARNING, ha, 413 + "Abort ISP active -- ignoring beacon request.\n"); 414 + return -EBUSY; 415 + } 416 + 417 + if (sscanf(buf, "%d", &val) != 1) 418 + return -EINVAL; 419 + 420 + if (val) 421 + rval = ha->isp_ops.beacon_on(ha); 422 + else 423 + rval = ha->isp_ops.beacon_off(ha); 424 + 425 + if (rval != QLA_SUCCESS) 426 + count = 0; 427 + 428 + return count; 429 + } 430 + 431 static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, 432 NULL); 433 static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); ··· 398 qla2x00_zio_store); 399 static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 400 qla2x00_zio_timer_store); 401 + static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 402 + qla2x00_beacon_store); 403 404 struct class_device_attribute *qla2x00_host_attrs[] = { 405 &class_device_attr_driver_version, ··· 411 &class_device_attr_state, 412 &class_device_attr_zio, 413 &class_device_attr_zio_timer, 414 + &class_device_attr_beacon, 415 NULL, 416 }; 417 ··· 423 424 fc_host_port_id(shost) = ha->d_id.b.domain << 16 | 425 ha->d_id.b.area << 8 | ha->d_id.b.al_pa; 426 + } 427 + 428 + static void 429 + qla2x00_get_host_speed(struct Scsi_Host *shost) 430 + { 431 + scsi_qla_host_t *ha = to_qla_host(shost); 432 + uint32_t speed = 0; 433 + 434 + switch (ha->link_data_rate) { 435 + case LDR_1GB: 436 + speed = 1; 437 + break; 438 + case LDR_2GB: 439 + speed = 2; 440 + break; 441 + case LDR_4GB: 442 + speed = 4; 443 + break; 444 + } 445 + fc_host_speed(shost) = speed; 446 + } 447 + 448 + static void 449 + qla2x00_get_host_port_type(struct Scsi_Host *shost) 450 + { 451 + scsi_qla_host_t *ha = to_qla_host(shost); 452 + uint32_t port_type = FC_PORTTYPE_UNKNOWN; 453 + 454 + switch (ha->current_topology) { 455 + case ISP_CFG_NL: 456 + port_type = FC_PORTTYPE_LPORT; 457 + break; 458 + case ISP_CFG_FL: 459 + port_type = FC_PORTTYPE_NLPORT; 460 + break; 461 + case ISP_CFG_N: 462 + port_type = FC_PORTTYPE_PTP; 463 + break; 464 + case ISP_CFG_F: 465 + port_type = FC_PORTTYPE_NPORT; 466 + break; 467 + } 468 + fc_host_port_type(shost) = port_type; 469 } 470 471 static void ··· 512 return 0; 513 } 514 515 + static struct fc_host_statistics * 516 + qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 517 + { 518 + scsi_qla_host_t *ha = to_qla_host(shost); 519 + int rval; 520 + uint16_t mb_stat[1]; 521 + link_stat_t stat_buf; 522 + struct fc_host_statistics *pfc_host_stat; 523 + 524 + pfc_host_stat = &ha->fc_host_stat; 525 + memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 526 + 527 + if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 528 + rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, 529 + sizeof(stat_buf) / 4, mb_stat); 530 + } else { 531 + rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf, 532 + mb_stat); 533 + } 534 + if (rval != 0) { 535 + qla_printk(KERN_WARNING, ha, 536 + "Unable to retrieve host statistics (%d).\n", mb_stat[0]); 537 + return pfc_host_stat; 538 + } 539 + 540 + pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt; 541 + pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt; 542 + pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt; 543 + pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt; 544 + pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt; 545 + pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt; 546 + 547 + return pfc_host_stat; 548 + } 549 + 550 struct fc_function_template qla2xxx_transport_functions = { 551 552 .show_host_node_name = 1, ··· 520 521 .get_host_port_id = qla2x00_get_host_port_id, 522 .show_host_port_id = 1, 523 + .get_host_speed = qla2x00_get_host_speed, 524 + .show_host_speed = 1, 525 + .get_host_port_type = qla2x00_get_host_port_type, 526 + .show_host_port_type = 1, 527 528 .dd_fcrport_size = sizeof(struct fc_port *), 529 .show_rport_supported_classes = 1, ··· 536 .show_rport_dev_loss_tmo = 1, 537 538 .issue_fc_host_lip = qla2x00_issue_lip, 539 + .get_fc_host_stats = qla2x00_get_fc_host_stats, 540 }; 541 542 void
+42 -2
drivers/scsi/qla2xxx/qla_def.h
··· 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_cmnd.h> 32 33 #if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) 34 #if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE) ··· 180 #define WRT_REG_BYTE(addr, data) writeb(data,addr) 181 #define WRT_REG_WORD(addr, data) writew(data,addr) 182 #define WRT_REG_DWORD(addr, data) writel(data,addr) 183 184 /* 185 * Fibre Channel device definitions. ··· 440 #define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 441 #define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 442 #define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 443 444 union { 445 struct { ··· 2210 2211 void (*fw_dump) (struct scsi_qla_host *, int); 2212 void (*ascii_fw_dump) (struct scsi_qla_host *); 2213 }; 2214 2215 /* ··· 2351 uint16_t min_external_loopid; /* First external loop Id */ 2352 2353 uint16_t link_data_rate; /* F/W operating speed */ 2354 2355 uint8_t current_topology; 2356 uint8_t prev_topology; ··· 2510 uint8_t *port_name; 2511 uint32_t isp_abort_cnt; 2512 2513 /* Needed for BEACON */ 2514 uint16_t beacon_blink_led; 2515 - uint16_t beacon_green_on; 2516 2517 uint16_t zio_mode; 2518 uint16_t zio_timer; 2519 } scsi_qla_host_t; 2520 2521 ··· 2595 /* 2596 * Flash support definitions 2597 */ 2598 - #define FLASH_IMAGE_SIZE 131072 2599 2600 #include "qla_gbl.h" 2601 #include "qla_dbg.h"
··· 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_cmnd.h> 32 + #include <scsi/scsi_transport_fc.h> 33 34 #if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) 35 #if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE) ··· 179 #define WRT_REG_BYTE(addr, data) writeb(data,addr) 180 #define WRT_REG_WORD(addr, data) writew(data,addr) 181 #define WRT_REG_DWORD(addr, data) writel(data,addr) 182 + 183 + /* 184 + * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an 185 + * 133Mhz slot. 186 + */ 187 + #define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr)) 188 + #define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr)) 189 190 /* 191 * Fibre Channel device definitions. ··· 432 #define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 433 #define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 434 #define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 435 + #define GPIO_LED_ALL_OFF 0x0000 436 + #define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */ 437 + #define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */ 438 439 union { 440 struct { ··· 2199 2200 void (*fw_dump) (struct scsi_qla_host *, int); 2201 void (*ascii_fw_dump) (struct scsi_qla_host *); 2202 + 2203 + int (*beacon_on) (struct scsi_qla_host *); 2204 + int (*beacon_off) (struct scsi_qla_host *); 2205 + void (*beacon_blink) (struct scsi_qla_host *); 2206 + 2207 + uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *, 2208 + uint32_t, uint32_t); 2209 + int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t, 2210 + uint32_t); 2211 }; 2212 2213 /* ··· 2331 uint16_t min_external_loopid; /* First external loop Id */ 2332 2333 uint16_t link_data_rate; /* F/W operating speed */ 2334 + #define LDR_1GB 0 2335 + #define LDR_2GB 1 2336 + #define LDR_4GB 3 2337 + #define LDR_UNKNOWN 0xFFFF 2338 2339 uint8_t current_topology; 2340 uint8_t prev_topology; ··· 2486 uint8_t *port_name; 2487 uint32_t isp_abort_cnt; 2488 2489 + /* Option ROM information. */ 2490 + char *optrom_buffer; 2491 + uint32_t optrom_size; 2492 + int optrom_state; 2493 + #define QLA_SWAITING 0 2494 + #define QLA_SREADING 1 2495 + #define QLA_SWRITING 2 2496 + 2497 /* Needed for BEACON */ 2498 uint16_t beacon_blink_led; 2499 + uint8_t beacon_color_state; 2500 + #define QLA_LED_GRN_ON 0x01 2501 + #define QLA_LED_YLW_ON 0x02 2502 + #define QLA_LED_ABR_ON 0x04 2503 + #define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */ 2504 + /* ISP2322: red, green, amber. */ 2505 2506 uint16_t zio_mode; 2507 uint16_t zio_timer; 2508 + struct fc_host_statistics fc_host_stat; 2509 } scsi_qla_host_t; 2510 2511 ··· 2557 /* 2558 * Flash support definitions 2559 */ 2560 + #define OPTROM_SIZE_2300 0x20000 2561 + #define OPTROM_SIZE_2322 0x100000 2562 + #define OPTROM_SIZE_24XX 0x100000 2563 2564 #include "qla_gbl.h" 2565 #include "qla_dbg.h"
+25 -2
drivers/scsi/qla2xxx/qla_gbl.h
··· 75 extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 76 extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); 77 78 - extern void qla2x00_blink_led(scsi_qla_host_t *); 79 - 80 extern int qla2x00_down_timeout(struct semaphore *, unsigned long); 81 82 extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); 83 84 /* 85 * Global Function Prototypes in qla_iocb.c source file. ··· 185 extern int 186 qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 187 188 extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 189 extern int qla24xx_abort_target(fc_port_t *); 190 ··· 234 uint32_t); 235 extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 236 uint32_t); 237 238 /* 239 * Global Function Prototypes in qla_dbg.c source file.
··· 75 extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 76 extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); 77 78 extern int qla2x00_down_timeout(struct semaphore *, unsigned long); 79 80 extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); 81 + 82 + extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 83 84 /* 85 * Global Function Prototypes in qla_iocb.c source file. ··· 185 extern int 186 qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 187 188 + extern int 189 + qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *, 190 + uint16_t *); 191 + 192 + extern int 193 + qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *); 194 + 195 extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 196 extern int qla24xx_abort_target(fc_port_t *); 197 ··· 227 uint32_t); 228 extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 229 uint32_t); 230 + 231 + extern int qla2x00_beacon_on(struct scsi_qla_host *); 232 + extern int qla2x00_beacon_off(struct scsi_qla_host *); 233 + extern void qla2x00_beacon_blink(struct scsi_qla_host *); 234 + extern int qla24xx_beacon_on(struct scsi_qla_host *); 235 + extern int qla24xx_beacon_off(struct scsi_qla_host *); 236 + extern void qla24xx_beacon_blink(struct scsi_qla_host *); 237 + 238 + extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 239 + uint32_t, uint32_t); 240 + extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *, 241 + uint32_t, uint32_t); 242 + extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, 243 + uint32_t, uint32_t); 244 + extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, 245 + uint32_t, uint32_t); 246 247 /* 248 * Global Function Prototypes in qla_dbg.c source file.
-1
drivers/scsi/qla2xxx/qla_init.c
··· 8 9 #include <linux/delay.h> 10 #include <linux/vmalloc.h> 11 - #include <scsi/scsi_transport_fc.h> 12 13 #include "qla_devtbl.h" 14
··· 8 9 #include <linux/delay.h> 10 #include <linux/vmalloc.h> 11 12 #include "qla_devtbl.h" 13
+1
drivers/scsi/qla2xxx/qla_iocb.c
··· 814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 815 816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 817 818 /* Load SCSI command packet. */ 819 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
··· 814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 815 816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 817 + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 818 819 /* Load SCSI command packet. */ 820 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+2 -2
drivers/scsi/qla2xxx/qla_isr.c
··· 402 break; 403 404 case MBA_LOOP_UP: /* Loop Up Event */ 405 - ha->link_data_rate = 0; 406 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 407 link_speed = link_speeds[0]; 408 } else { 409 link_speed = link_speeds[LS_UNKNOWN]; 410 if (mb[1] < 5) ··· 436 } 437 438 ha->flags.management_server_logged_in = 0; 439 - ha->link_data_rate = 0; 440 if (ql2xfdmienable) 441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 442
··· 402 break; 403 404 case MBA_LOOP_UP: /* Loop Up Event */ 405 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 406 link_speed = link_speeds[0]; 407 + ha->link_data_rate = LDR_1GB; 408 } else { 409 link_speed = link_speeds[LS_UNKNOWN]; 410 if (mb[1] < 5) ··· 436 } 437 438 ha->flags.management_server_logged_in = 0; 439 + ha->link_data_rate = LDR_UNKNOWN; 440 if (ql2xfdmienable) 441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 442
+104 -4
drivers/scsi/qla2xxx/qla_mbx.c
··· 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 - #include <scsi/scsi_transport_fc.h> 11 12 static void 13 qla2x00_mbx_sem_timeout(unsigned long data) ··· 1873 mcp->mb[3] = LSW(id_list_dma); 1874 mcp->mb[6] = MSW(MSD(id_list_dma)); 1875 mcp->mb[7] = LSW(MSD(id_list_dma)); 1876 - mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2; 1877 } else { 1878 mcp->mb[1] = MSW(id_list_dma); 1879 mcp->mb[2] = LSW(id_list_dma); ··· 2017 2018 return rval; 2019 } 2020 2021 - uint8_t 2022 qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, 2023 uint16_t *status) 2024 { ··· 2181 2182 return rval; 2183 } 2184 - #endif 2185 2186 int 2187 qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
··· 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 11 static void 12 qla2x00_mbx_sem_timeout(unsigned long data) ··· 1874 mcp->mb[3] = LSW(id_list_dma); 1875 mcp->mb[6] = MSW(MSD(id_list_dma)); 1876 mcp->mb[7] = LSW(MSD(id_list_dma)); 1877 + mcp->mb[8] = 0; 1878 + mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 1879 } else { 1880 mcp->mb[1] = MSW(id_list_dma); 1881 mcp->mb[2] = LSW(id_list_dma); ··· 2017 2018 return rval; 2019 } 2020 + #endif 2021 2022 + /* 2023 + * qla2x00_get_link_status 2024 + * 2025 + * Input: 2026 + * ha = adapter block pointer. 2027 + * loop_id = device loop ID. 2028 + * ret_buf = pointer to link status return buffer. 2029 + * 2030 + * Returns: 2031 + * 0 = success. 2032 + * BIT_0 = mem alloc error. 2033 + * BIT_1 = mailbox error. 2034 + */ 2035 + int 2036 + qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, 2037 + link_stat_t *ret_buf, uint16_t *status) 2038 + { 2039 + int rval; 2040 + mbx_cmd_t mc; 2041 + mbx_cmd_t *mcp = &mc; 2042 + link_stat_t *stat_buf; 2043 + dma_addr_t stat_buf_dma; 2044 + 2045 + DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2046 + 2047 + stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma); 2048 + if (stat_buf == NULL) { 2049 + DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 2050 + __func__, ha->host_no)); 2051 + return BIT_0; 2052 + } 2053 + memset(stat_buf, 0, sizeof(link_stat_t)); 2054 + 2055 + mcp->mb[0] = MBC_GET_LINK_STATUS; 2056 + mcp->mb[2] = MSW(stat_buf_dma); 2057 + mcp->mb[3] = LSW(stat_buf_dma); 2058 + mcp->mb[6] = MSW(MSD(stat_buf_dma)); 2059 + mcp->mb[7] = LSW(MSD(stat_buf_dma)); 2060 + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2061 + mcp->in_mb = MBX_0; 2062 + if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 2063 + mcp->mb[1] = loop_id; 2064 + mcp->mb[4] = 0; 2065 + mcp->mb[10] = 0; 2066 + mcp->out_mb |= MBX_10|MBX_4|MBX_1; 2067 + mcp->in_mb |= MBX_1; 2068 + } else if (HAS_EXTENDED_IDS(ha)) { 2069 + mcp->mb[1] = loop_id; 2070 + mcp->mb[10] = 0; 2071 + mcp->out_mb |= MBX_10|MBX_1; 2072 + } else { 2073 + mcp->mb[1] = loop_id << 8; 2074 + mcp->out_mb |= MBX_1; 2075 + } 2076 + mcp->tov = 30; 2077 + mcp->flags = IOCTL_CMD; 2078 + rval = qla2x00_mailbox_command(ha, mcp); 2079 + 2080 + if (rval == QLA_SUCCESS) { 2081 + if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2082 + DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2083 + __func__, ha->host_no, mcp->mb[0]);) 2084 + status[0] = mcp->mb[0]; 2085 + rval = BIT_1; 2086 + } else { 2087 + /* copy over data -- firmware data is LE. */ 2088 + ret_buf->link_fail_cnt = 2089 + le32_to_cpu(stat_buf->link_fail_cnt); 2090 + ret_buf->loss_sync_cnt = 2091 + le32_to_cpu(stat_buf->loss_sync_cnt); 2092 + ret_buf->loss_sig_cnt = 2093 + le32_to_cpu(stat_buf->loss_sig_cnt); 2094 + ret_buf->prim_seq_err_cnt = 2095 + le32_to_cpu(stat_buf->prim_seq_err_cnt); 2096 + ret_buf->inval_xmit_word_cnt = 2097 + le32_to_cpu(stat_buf->inval_xmit_word_cnt); 2098 + ret_buf->inval_crc_cnt = 2099 + le32_to_cpu(stat_buf->inval_crc_cnt); 2100 + 2101 + DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d " 2102 + "loss_sync=%d loss_sig=%d seq_err=%d " 2103 + "inval_xmt_word=%d inval_crc=%d.\n", __func__, 2104 + ha->host_no, stat_buf->link_fail_cnt, 2105 + stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt, 2106 + stat_buf->prim_seq_err_cnt, 2107 + stat_buf->inval_xmit_word_cnt, 2108 + stat_buf->inval_crc_cnt);) 2109 + } 2110 + } else { 2111 + /* Failed. */ 2112 + DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2113 + ha->host_no, rval);) 2114 + rval = BIT_1; 2115 + } 2116 + 2117 + dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma); 2118 + 2119 + return rval; 2120 + } 2121 + 2122 + int 2123 qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, 2124 uint16_t *status) 2125 { ··· 2080 2081 return rval; 2082 } 2083 2084 int 2085 qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
+42 -1
drivers/scsi/qla2xxx/qla_os.c
··· 366 goto qc_fail_command; 367 } 368 369 if (atomic_read(&fcport->state) != FCS_ONLINE) { 370 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 371 atomic_read(&ha->loop_state) == LOOP_DEAD) { ··· 424 rval = fc_remote_port_chkready(rport); 425 if (rval) { 426 cmd->result = rval; 427 goto qc24_fail_command; 428 } 429 ··· 525 * Success (Adapter is online) : 0 526 * Failed (Adapter is offline/disabled) : 1 527 */ 528 - static int 529 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 530 { 531 int return_status; ··· 1324 ha->ports = MAX_BUSES; 1325 ha->init_cb_size = sizeof(init_cb_t); 1326 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1327 1328 /* Assign ISP specific operations. */ 1329 ha->isp_ops.pci_config = qla2100_pci_config; ··· 1353 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1354 ha->isp_ops.fw_dump = qla2100_fw_dump; 1355 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; 1356 if (IS_QLA2100(ha)) { 1357 host->max_id = MAX_TARGETS_2100; 1358 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; ··· 1380 ha->isp_ops.intr_handler = qla2300_intr_handler; 1381 ha->isp_ops.fw_dump = qla2300_fw_dump; 1382 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; 1383 ha->gid_list_info_size = 6; 1384 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1385 host->max_id = MAX_TARGETS_2200; 1386 ha->mbx_count = MAILBOX_REGISTER_COUNT; ··· 1421 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1422 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1423 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; 1424 ha->gid_list_info_size = 8; 1425 } 1426 host->can_queue = ha->request_q_length + 128; 1427 ··· 1684 spin_lock_irqsave(&fcport->rport_lock, flags); 1685 fcport->drport = rport; 1686 fcport->rport = NULL; 1687 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1688 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1689 } else { 1690 spin_lock_irqsave(&fcport->rport_lock, flags); 1691 fcport->rport = NULL; 1692 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1693 fc_remote_port_delete(rport); 1694 } ··· 2095 ha->fw_dumped = 0; 2096 ha->fw_dump_reading = 0; 2097 ha->fw_dump_buffer = NULL; 2098 } 2099 2100 /* ··· 2345 if (!ha->interrupts_on) 2346 ha->isp_ops.enable_intrs(ha); 2347 2348 ha->dpc_active = 0; 2349 } /* End of while(1) */ 2350 ··· 2525 atomic_read(&ha->loop_down_timer))); 2526 } 2527 2528 /* Schedule the DPC routine if needed */ 2529 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2530 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || ··· 2539 start_dpc || 2540 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2541 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2542 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2543 ha->dpc_wait && !ha->dpc_active) { 2544
··· 366 goto qc_fail_command; 367 } 368 369 + /* Close window on fcport/rport state-transitioning. */ 370 + if (!*(fc_port_t **)rport->dd_data) { 371 + cmd->result = DID_IMM_RETRY << 16; 372 + goto qc_fail_command; 373 + } 374 + 375 if (atomic_read(&fcport->state) != FCS_ONLINE) { 376 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 377 atomic_read(&ha->loop_state) == LOOP_DEAD) { ··· 418 rval = fc_remote_port_chkready(rport); 419 if (rval) { 420 cmd->result = rval; 421 + goto qc24_fail_command; 422 + } 423 + 424 + /* Close window on fcport/rport state-transitioning. */ 425 + if (!*(fc_port_t **)rport->dd_data) { 426 + cmd->result = DID_IMM_RETRY << 16; 427 goto qc24_fail_command; 428 } 429 ··· 513 * Success (Adapter is online) : 0 514 * Failed (Adapter is offline/disabled) : 1 515 */ 516 + int 517 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 518 { 519 int return_status; ··· 1312 ha->ports = MAX_BUSES; 1313 ha->init_cb_size = sizeof(init_cb_t); 1314 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1315 + ha->link_data_rate = LDR_UNKNOWN; 1316 + ha->optrom_size = OPTROM_SIZE_2300; 1317 1318 /* Assign ISP specific operations. */ 1319 ha->isp_ops.pci_config = qla2100_pci_config; ··· 1339 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1340 ha->isp_ops.fw_dump = qla2100_fw_dump; 1341 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; 1342 + ha->isp_ops.read_optrom = qla2x00_read_optrom_data; 1343 + ha->isp_ops.write_optrom = qla2x00_write_optrom_data; 1344 if (IS_QLA2100(ha)) { 1345 host->max_id = MAX_TARGETS_2100; 1346 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; ··· 1364 ha->isp_ops.intr_handler = qla2300_intr_handler; 1365 ha->isp_ops.fw_dump = qla2300_fw_dump; 1366 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; 1367 + ha->isp_ops.beacon_on = qla2x00_beacon_on; 1368 + ha->isp_ops.beacon_off = qla2x00_beacon_off; 1369 + ha->isp_ops.beacon_blink = qla2x00_beacon_blink; 1370 ha->gid_list_info_size = 6; 1371 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1372 + ha->optrom_size = OPTROM_SIZE_2322; 1373 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1374 host->max_id = MAX_TARGETS_2200; 1375 ha->mbx_count = MAILBOX_REGISTER_COUNT; ··· 1400 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1401 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1402 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; 1403 + ha->isp_ops.read_optrom = qla24xx_read_optrom_data; 1404 + ha->isp_ops.write_optrom = qla24xx_write_optrom_data; 1405 + ha->isp_ops.beacon_on = qla24xx_beacon_on; 1406 + ha->isp_ops.beacon_off = qla24xx_beacon_off; 1407 + ha->isp_ops.beacon_blink = qla24xx_beacon_blink; 1408 ha->gid_list_info_size = 8; 1409 + ha->optrom_size = OPTROM_SIZE_24XX; 1410 } 1411 host->can_queue = ha->request_q_length + 128; 1412 ··· 1657 spin_lock_irqsave(&fcport->rport_lock, flags); 1658 fcport->drport = rport; 1659 fcport->rport = NULL; 1660 + *(fc_port_t **)rport->dd_data = NULL; 1661 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1662 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1663 } else { 1664 spin_lock_irqsave(&fcport->rport_lock, flags); 1665 fcport->rport = NULL; 1666 + *(fc_port_t **)rport->dd_data = NULL; 1667 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1668 fc_remote_port_delete(rport); 1669 } ··· 2066 ha->fw_dumped = 0; 2067 ha->fw_dump_reading = 0; 2068 ha->fw_dump_buffer = NULL; 2069 + 2070 + vfree(ha->optrom_buffer); 2071 } 2072 2073 /* ··· 2314 if (!ha->interrupts_on) 2315 ha->isp_ops.enable_intrs(ha); 2316 2317 + if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2318 + ha->isp_ops.beacon_blink(ha); 2319 + 2320 ha->dpc_active = 0; 2321 } /* End of while(1) */ 2322 ··· 2491 atomic_read(&ha->loop_down_timer))); 2492 } 2493 2494 + /* Check if beacon LED needs to be blinked */ 2495 + if (ha->beacon_blink_led == 1) { 2496 + set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2497 + start_dpc++; 2498 + } 2499 + 2500 /* Schedule the DPC routine if needed */ 2501 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2502 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || ··· 2499 start_dpc || 2500 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2501 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2502 + test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2503 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2504 ha->dpc_wait && !ha->dpc_active) { 2505
-2
drivers/scsi/qla2xxx/qla_rscn.c
··· 6 */ 7 #include "qla_def.h" 8 9 - #include <scsi/scsi_transport_fc.h> 10 - 11 /** 12 * IO descriptor handle definitions. 13 *
··· 6 */ 7 #include "qla_def.h" 8 9 /** 10 * IO descriptor handle definitions. 11 *
+963
drivers/scsi/qla2xxx/qla_sup.c
··· 695 696 return ret; 697 }
··· 695 696 return ret; 697 } 698 + 699 + 700 + static inline void 701 + qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 702 + { 703 + if (IS_QLA2322(ha)) { 704 + /* Flip all colors. */ 705 + if (ha->beacon_color_state == QLA_LED_ALL_ON) { 706 + /* Turn off. */ 707 + ha->beacon_color_state = 0; 708 + *pflags = GPIO_LED_ALL_OFF; 709 + } else { 710 + /* Turn on. */ 711 + ha->beacon_color_state = QLA_LED_ALL_ON; 712 + *pflags = GPIO_LED_RGA_ON; 713 + } 714 + } else { 715 + /* Flip green led only. */ 716 + if (ha->beacon_color_state == QLA_LED_GRN_ON) { 717 + /* Turn off. */ 718 + ha->beacon_color_state = 0; 719 + *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF; 720 + } else { 721 + /* Turn on. */ 722 + ha->beacon_color_state = QLA_LED_GRN_ON; 723 + *pflags = GPIO_LED_GREEN_ON_AMBER_OFF; 724 + } 725 + } 726 + } 727 + 728 + void 729 + qla2x00_beacon_blink(struct scsi_qla_host *ha) 730 + { 731 + uint16_t gpio_enable; 732 + uint16_t gpio_data; 733 + uint16_t led_color = 0; 734 + unsigned long flags; 735 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 736 + 737 + if (ha->pio_address) 738 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 739 + 740 + spin_lock_irqsave(&ha->hardware_lock, flags); 741 + 742 + /* Save the Original GPIOE. */ 743 + if (ha->pio_address) { 744 + gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 745 + gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 746 + } else { 747 + gpio_enable = RD_REG_WORD(&reg->gpioe); 748 + gpio_data = RD_REG_WORD(&reg->gpiod); 749 + } 750 + 751 + /* Set the modified gpio_enable values */ 752 + gpio_enable |= GPIO_LED_MASK; 753 + 754 + if (ha->pio_address) { 755 + WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 756 + } else { 757 + WRT_REG_WORD(&reg->gpioe, gpio_enable); 758 + RD_REG_WORD(&reg->gpioe); 759 + } 760 + 761 + qla2x00_flip_colors(ha, &led_color); 762 + 763 + /* Clear out any previously set LED color. */ 764 + gpio_data &= ~GPIO_LED_MASK; 765 + 766 + /* Set the new input LED color to GPIOD. */ 767 + gpio_data |= led_color; 768 + 769 + /* Set the modified gpio_data values */ 770 + if (ha->pio_address) { 771 + WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 772 + } else { 773 + WRT_REG_WORD(&reg->gpiod, gpio_data); 774 + RD_REG_WORD(&reg->gpiod); 775 + } 776 + 777 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 778 + } 779 + 780 + int 781 + qla2x00_beacon_on(struct scsi_qla_host *ha) 782 + { 783 + uint16_t gpio_enable; 784 + uint16_t gpio_data; 785 + unsigned long flags; 786 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 787 + 788 + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 789 + ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 790 + 791 + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 792 + qla_printk(KERN_WARNING, ha, 793 + "Unable to update fw options (beacon on).\n"); 794 + return QLA_FUNCTION_FAILED; 795 + } 796 + 797 + if (ha->pio_address) 798 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 799 + 800 + /* Turn off LEDs. */ 801 + spin_lock_irqsave(&ha->hardware_lock, flags); 802 + if (ha->pio_address) { 803 + gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 804 + gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 805 + } else { 806 + gpio_enable = RD_REG_WORD(&reg->gpioe); 807 + gpio_data = RD_REG_WORD(&reg->gpiod); 808 + } 809 + gpio_enable |= GPIO_LED_MASK; 810 + 811 + /* Set the modified gpio_enable values. */ 812 + if (ha->pio_address) { 813 + WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 814 + } else { 815 + WRT_REG_WORD(&reg->gpioe, gpio_enable); 816 + RD_REG_WORD(&reg->gpioe); 817 + } 818 + 819 + /* Clear out previously set LED colour. */ 820 + gpio_data &= ~GPIO_LED_MASK; 821 + if (ha->pio_address) { 822 + WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 823 + } else { 824 + WRT_REG_WORD(&reg->gpiod, gpio_data); 825 + RD_REG_WORD(&reg->gpiod); 826 + } 827 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 828 + 829 + /* 830 + * Let the per HBA timer kick off the blinking process based on 831 + * the following flags. No need to do anything else now. 832 + */ 833 + ha->beacon_blink_led = 1; 834 + ha->beacon_color_state = 0; 835 + 836 + return QLA_SUCCESS; 837 + } 838 + 839 + int 840 + qla2x00_beacon_off(struct scsi_qla_host *ha) 841 + { 842 + int rval = QLA_SUCCESS; 843 + 844 + ha->beacon_blink_led = 0; 845 + 846 + /* Set the on flag so when it gets flipped it will be off. */ 847 + if (IS_QLA2322(ha)) 848 + ha->beacon_color_state = QLA_LED_ALL_ON; 849 + else 850 + ha->beacon_color_state = QLA_LED_GRN_ON; 851 + 852 + ha->isp_ops.beacon_blink(ha); /* This turns green LED off */ 853 + 854 + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 855 + ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; 856 + 857 + rval = qla2x00_set_fw_options(ha, ha->fw_options); 858 + if (rval != QLA_SUCCESS) 859 + qla_printk(KERN_WARNING, ha, 860 + "Unable to update fw options (beacon off).\n"); 861 + return rval; 862 + } 863 + 864 + 865 + static inline void 866 + qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 867 + { 868 + /* Flip all colors. */ 869 + if (ha->beacon_color_state == QLA_LED_ALL_ON) { 870 + /* Turn off. */ 871 + ha->beacon_color_state = 0; 872 + *pflags = 0; 873 + } else { 874 + /* Turn on. */ 875 + ha->beacon_color_state = QLA_LED_ALL_ON; 876 + *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON; 877 + } 878 + } 879 + 880 + void 881 + qla24xx_beacon_blink(struct scsi_qla_host *ha) 882 + { 883 + uint16_t led_color = 0; 884 + uint32_t gpio_data; 885 + unsigned long flags; 886 + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 887 + 888 + /* Save the Original GPIOD. */ 889 + spin_lock_irqsave(&ha->hardware_lock, flags); 890 + gpio_data = RD_REG_DWORD(&reg->gpiod); 891 + 892 + /* Enable the gpio_data reg for update. */ 893 + gpio_data |= GPDX_LED_UPDATE_MASK; 894 + 895 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 896 + gpio_data = RD_REG_DWORD(&reg->gpiod); 897 + 898 + /* Set the color bits. */ 899 + qla24xx_flip_colors(ha, &led_color); 900 + 901 + /* Clear out any previously set LED color. */ 902 + gpio_data &= ~GPDX_LED_COLOR_MASK; 903 + 904 + /* Set the new input LED color to GPIOD. */ 905 + gpio_data |= led_color; 906 + 907 + /* Set the modified gpio_data values. */ 908 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 909 + gpio_data = RD_REG_DWORD(&reg->gpiod); 910 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 911 + } 912 + 913 + int 914 + qla24xx_beacon_on(struct scsi_qla_host *ha) 915 + { 916 + uint32_t gpio_data; 917 + unsigned long flags; 918 + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 919 + 920 + if (ha->beacon_blink_led == 0) { 921 + /* Enable firmware for update */ 922 + ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; 923 + 924 + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) 925 + return QLA_FUNCTION_FAILED; 926 + 927 + if (qla2x00_get_fw_options(ha, ha->fw_options) != 928 + QLA_SUCCESS) { 929 + qla_printk(KERN_WARNING, ha, 930 + "Unable to update fw options (beacon on).\n"); 931 + return QLA_FUNCTION_FAILED; 932 + } 933 + 934 + spin_lock_irqsave(&ha->hardware_lock, flags); 935 + gpio_data = RD_REG_DWORD(&reg->gpiod); 936 + 937 + /* Enable the gpio_data reg for update. */ 938 + gpio_data |= GPDX_LED_UPDATE_MASK; 939 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 940 + RD_REG_DWORD(&reg->gpiod); 941 + 942 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 943 + } 944 + 945 + /* So all colors blink together. */ 946 + ha->beacon_color_state = 0; 947 + 948 + /* Let the per HBA timer kick off the blinking process. */ 949 + ha->beacon_blink_led = 1; 950 + 951 + return QLA_SUCCESS; 952 + } 953 + 954 + int 955 + qla24xx_beacon_off(struct scsi_qla_host *ha) 956 + { 957 + uint32_t gpio_data; 958 + unsigned long flags; 959 + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 960 + 961 + ha->beacon_blink_led = 0; 962 + ha->beacon_color_state = QLA_LED_ALL_ON; 963 + 964 + ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */ 965 + 966 + /* Give control back to firmware. */ 967 + spin_lock_irqsave(&ha->hardware_lock, flags); 968 + gpio_data = RD_REG_DWORD(&reg->gpiod); 969 + 970 + /* Disable the gpio_data reg for update. */ 971 + gpio_data &= ~GPDX_LED_UPDATE_MASK; 972 + WRT_REG_DWORD(&reg->gpiod, gpio_data); 973 + RD_REG_DWORD(&reg->gpiod); 974 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 975 + 976 + ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 977 + 978 + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 979 + qla_printk(KERN_WARNING, ha, 980 + "Unable to update fw options (beacon off).\n"); 981 + return QLA_FUNCTION_FAILED; 982 + } 983 + 984 + if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 985 + qla_printk(KERN_WARNING, ha, 986 + "Unable to get fw options (beacon off).\n"); 987 + return QLA_FUNCTION_FAILED; 988 + } 989 + 990 + return QLA_SUCCESS; 991 + } 992 + 993 + 994 + /* 995 + * Flash support routines 996 + */ 997 + 998 + /** 999 + * qla2x00_flash_enable() - Setup flash for reading and writing. 1000 + * @ha: HA context 1001 + */ 1002 + static void 1003 + qla2x00_flash_enable(scsi_qla_host_t *ha) 1004 + { 1005 + uint16_t data; 1006 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1007 + 1008 + data = RD_REG_WORD(&reg->ctrl_status); 1009 + data |= CSR_FLASH_ENABLE; 1010 + WRT_REG_WORD(&reg->ctrl_status, data); 1011 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1012 + } 1013 + 1014 + /** 1015 + * qla2x00_flash_disable() - Disable flash and allow RISC to run. 1016 + * @ha: HA context 1017 + */ 1018 + static void 1019 + qla2x00_flash_disable(scsi_qla_host_t *ha) 1020 + { 1021 + uint16_t data; 1022 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1023 + 1024 + data = RD_REG_WORD(&reg->ctrl_status); 1025 + data &= ~(CSR_FLASH_ENABLE); 1026 + WRT_REG_WORD(&reg->ctrl_status, data); 1027 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1028 + } 1029 + 1030 + /** 1031 + * qla2x00_read_flash_byte() - Reads a byte from flash 1032 + * @ha: HA context 1033 + * @addr: Address in flash to read 1034 + * 1035 + * A word is read from the chip, but, only the lower byte is valid. 1036 + * 1037 + * Returns the byte read from flash @addr. 1038 + */ 1039 + static uint8_t 1040 + qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr) 1041 + { 1042 + uint16_t data; 1043 + uint16_t bank_select; 1044 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1045 + 1046 + bank_select = RD_REG_WORD(&reg->ctrl_status); 1047 + 1048 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1049 + /* Specify 64K address range: */ 1050 + /* clear out Module Select and Flash Address bits [19:16]. */ 1051 + bank_select &= ~0xf8; 1052 + bank_select |= addr >> 12 & 0xf0; 1053 + bank_select |= CSR_FLASH_64K_BANK; 1054 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1055 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1056 + 1057 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1058 + data = RD_REG_WORD(&reg->flash_data); 1059 + 1060 + return (uint8_t)data; 1061 + } 1062 + 1063 + /* Setup bit 16 of flash address. */ 1064 + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { 1065 + bank_select |= CSR_FLASH_64K_BANK; 1066 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1067 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1068 + } else if (((addr & BIT_16) == 0) && 1069 + (bank_select & CSR_FLASH_64K_BANK)) { 1070 + bank_select &= ~(CSR_FLASH_64K_BANK); 1071 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1072 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1073 + } 1074 + 1075 + /* Always perform IO mapped accesses to the FLASH registers. */ 1076 + if (ha->pio_address) { 1077 + uint16_t data2; 1078 + 1079 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1080 + WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1081 + do { 1082 + data = RD_REG_WORD_PIO(&reg->flash_data); 1083 + barrier(); 1084 + cpu_relax(); 1085 + data2 = RD_REG_WORD_PIO(&reg->flash_data); 1086 + } while (data != data2); 1087 + } else { 1088 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1089 + data = qla2x00_debounce_register(&reg->flash_data); 1090 + } 1091 + 1092 + return (uint8_t)data; 1093 + } 1094 + 1095 + /** 1096 + * qla2x00_write_flash_byte() - Write a byte to flash 1097 + * @ha: HA context 1098 + * @addr: Address in flash to write 1099 + * @data: Data to write 1100 + */ 1101 + static void 1102 + qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data) 1103 + { 1104 + uint16_t bank_select; 1105 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1106 + 1107 + bank_select = RD_REG_WORD(&reg->ctrl_status); 1108 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1109 + /* Specify 64K address range: */ 1110 + /* clear out Module Select and Flash Address bits [19:16]. */ 1111 + bank_select &= ~0xf8; 1112 + bank_select |= addr >> 12 & 0xf0; 1113 + bank_select |= CSR_FLASH_64K_BANK; 1114 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1115 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1116 + 1117 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1118 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1119 + WRT_REG_WORD(&reg->flash_data, (uint16_t)data); 1120 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1121 + 1122 + return; 1123 + } 1124 + 1125 + /* Setup bit 16 of flash address. */ 1126 + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { 1127 + bank_select |= CSR_FLASH_64K_BANK; 1128 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1129 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1130 + } else if (((addr & BIT_16) == 0) && 1131 + (bank_select & CSR_FLASH_64K_BANK)) { 1132 + bank_select &= ~(CSR_FLASH_64K_BANK); 1133 + WRT_REG_WORD(&reg->ctrl_status, bank_select); 1134 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1135 + } 1136 + 1137 + /* Always perform IO mapped accesses to the FLASH registers. */ 1138 + if (ha->pio_address) { 1139 + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1140 + WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1141 + WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data); 1142 + } else { 1143 + WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1144 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1145 + WRT_REG_WORD(&reg->flash_data, (uint16_t)data); 1146 + RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1147 + } 1148 + } 1149 + 1150 + /** 1151 + * qla2x00_poll_flash() - Polls flash for completion. 1152 + * @ha: HA context 1153 + * @addr: Address in flash to poll 1154 + * @poll_data: Data to be polled 1155 + * @man_id: Flash manufacturer ID 1156 + * @flash_id: Flash ID 1157 + * 1158 + * This function polls the device until bit 7 of what is read matches data 1159 + * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed 1160 + * out (a fatal error). The flash book recommeds reading bit 7 again after 1161 + * reading bit 5 as a 1. 1162 + * 1163 + * Returns 0 on success, else non-zero. 1164 + */ 1165 + static int 1166 + qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data, 1167 + uint8_t man_id, uint8_t flash_id) 1168 + { 1169 + int status; 1170 + uint8_t flash_data; 1171 + uint32_t cnt; 1172 + 1173 + status = 1; 1174 + 1175 + /* Wait for 30 seconds for command to finish. */ 1176 + poll_data &= BIT_7; 1177 + for (cnt = 3000000; cnt; cnt--) { 1178 + flash_data = qla2x00_read_flash_byte(ha, addr); 1179 + if ((flash_data & BIT_7) == poll_data) { 1180 + status = 0; 1181 + break; 1182 + } 1183 + 1184 + if (man_id != 0x40 && man_id != 0xda) { 1185 + if ((flash_data & BIT_5) && cnt > 2) 1186 + cnt = 2; 1187 + } 1188 + udelay(10); 1189 + barrier(); 1190 + } 1191 + return status; 1192 + } 1193 + 1194 + #define IS_OEM_001(ha) \ 1195 + ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2322 && \ 1196 + (ha)->pdev->subsystem_vendor == 0x1028 && \ 1197 + (ha)->pdev->subsystem_device == 0x0170) 1198 + 1199 + /** 1200 + * qla2x00_program_flash_address() - Programs a flash address 1201 + * @ha: HA context 1202 + * @addr: Address in flash to program 1203 + * @data: Data to be written in flash 1204 + * @man_id: Flash manufacturer ID 1205 + * @flash_id: Flash ID 1206 + * 1207 + * Returns 0 on success, else non-zero. 1208 + */ 1209 + static int 1210 + qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data, 1211 + uint8_t man_id, uint8_t flash_id) 1212 + { 1213 + /* Write Program Command Sequence. */ 1214 + if (IS_OEM_001(ha)) { 1215 + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); 1216 + qla2x00_write_flash_byte(ha, 0x555, 0x55); 1217 + qla2x00_write_flash_byte(ha, 0xaaa, 0xa0); 1218 + qla2x00_write_flash_byte(ha, addr, data); 1219 + } else { 1220 + if (man_id == 0xda && flash_id == 0xc1) { 1221 + qla2x00_write_flash_byte(ha, addr, data); 1222 + if (addr & 0x7e) 1223 + return 0; 1224 + } else { 1225 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1226 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1227 + qla2x00_write_flash_byte(ha, 0x5555, 0xa0); 1228 + qla2x00_write_flash_byte(ha, addr, data); 1229 + } 1230 + } 1231 + 1232 + udelay(150); 1233 + 1234 + /* Wait for write to complete. */ 1235 + return qla2x00_poll_flash(ha, addr, data, man_id, flash_id); 1236 + } 1237 + 1238 + /** 1239 + * qla2x00_erase_flash() - Erase the flash. 1240 + * @ha: HA context 1241 + * @man_id: Flash manufacturer ID 1242 + * @flash_id: Flash ID 1243 + * 1244 + * Returns 0 on success, else non-zero. 1245 + */ 1246 + static int 1247 + qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id) 1248 + { 1249 + /* Individual Sector Erase Command Sequence */ 1250 + if (IS_OEM_001(ha)) { 1251 + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); 1252 + qla2x00_write_flash_byte(ha, 0x555, 0x55); 1253 + qla2x00_write_flash_byte(ha, 0xaaa, 0x80); 1254 + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); 1255 + qla2x00_write_flash_byte(ha, 0x555, 0x55); 1256 + qla2x00_write_flash_byte(ha, 0xaaa, 0x10); 1257 + } else { 1258 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1259 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1260 + qla2x00_write_flash_byte(ha, 0x5555, 0x80); 1261 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1262 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1263 + qla2x00_write_flash_byte(ha, 0x5555, 0x10); 1264 + } 1265 + 1266 + udelay(150); 1267 + 1268 + /* Wait for erase to complete. */ 1269 + return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id); 1270 + } 1271 + 1272 + /** 1273 + * qla2x00_erase_flash_sector() - Erase a flash sector. 1274 + * @ha: HA context 1275 + * @addr: Flash sector to erase 1276 + * @sec_mask: Sector address mask 1277 + * @man_id: Flash manufacturer ID 1278 + * @flash_id: Flash ID 1279 + * 1280 + * Returns 0 on success, else non-zero. 1281 + */ 1282 + static int 1283 + qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr, 1284 + uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) 1285 + { 1286 + /* Individual Sector Erase Command Sequence */ 1287 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1288 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1289 + qla2x00_write_flash_byte(ha, 0x5555, 0x80); 1290 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1291 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1292 + if (man_id == 0x1f && flash_id == 0x13) 1293 + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10); 1294 + else 1295 + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30); 1296 + 1297 + udelay(150); 1298 + 1299 + /* Wait for erase to complete. */ 1300 + return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id); 1301 + } 1302 + 1303 + /** 1304 + * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip. 1305 + * @man_id: Flash manufacturer ID 1306 + * @flash_id: Flash ID 1307 + */ 1308 + static void 1309 + qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 1310 + uint8_t *flash_id) 1311 + { 1312 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1313 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1314 + qla2x00_write_flash_byte(ha, 0x5555, 0x90); 1315 + *man_id = qla2x00_read_flash_byte(ha, 0x0000); 1316 + *flash_id = qla2x00_read_flash_byte(ha, 0x0001); 1317 + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1318 + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); 1319 + qla2x00_write_flash_byte(ha, 0x5555, 0xf0); 1320 + } 1321 + 1322 + 1323 + static inline void 1324 + qla2x00_suspend_hba(struct scsi_qla_host *ha) 1325 + { 1326 + int cnt; 1327 + unsigned long flags; 1328 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1329 + 1330 + /* Suspend HBA. */ 1331 + scsi_block_requests(ha->host); 1332 + ha->isp_ops.disable_intrs(ha); 1333 + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1334 + 1335 + /* Pause RISC. */ 1336 + spin_lock_irqsave(&ha->hardware_lock, flags); 1337 + WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 1338 + RD_REG_WORD(&reg->hccr); 1339 + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 1340 + for (cnt = 0; cnt < 30000; cnt++) { 1341 + if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0) 1342 + break; 1343 + udelay(100); 1344 + } 1345 + } else { 1346 + udelay(10); 1347 + } 1348 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1349 + } 1350 + 1351 + static inline void 1352 + qla2x00_resume_hba(struct scsi_qla_host *ha) 1353 + { 1354 + /* Resume HBA. */ 1355 + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1356 + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1357 + up(ha->dpc_wait); 1358 + qla2x00_wait_for_hba_online(ha); 1359 + scsi_unblock_requests(ha->host); 1360 + } 1361 + 1362 + uint8_t * 1363 + qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1364 + uint32_t offset, uint32_t length) 1365 + { 1366 + unsigned long flags; 1367 + uint32_t addr, midpoint; 1368 + uint8_t *data; 1369 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1370 + 1371 + /* Suspend HBA. */ 1372 + qla2x00_suspend_hba(ha); 1373 + 1374 + /* Go with read. */ 1375 + spin_lock_irqsave(&ha->hardware_lock, flags); 1376 + midpoint = ha->optrom_size / 2; 1377 + 1378 + qla2x00_flash_enable(ha); 1379 + WRT_REG_WORD(&reg->nvram, 0); 1380 + RD_REG_WORD(&reg->nvram); /* PCI Posting. */ 1381 + for (addr = offset, data = buf; addr < length; addr++, data++) { 1382 + if (addr == midpoint) { 1383 + WRT_REG_WORD(&reg->nvram, NVR_SELECT); 1384 + RD_REG_WORD(&reg->nvram); /* PCI Posting. */ 1385 + } 1386 + 1387 + *data = qla2x00_read_flash_byte(ha, addr); 1388 + } 1389 + qla2x00_flash_disable(ha); 1390 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1391 + 1392 + /* Resume HBA. */ 1393 + qla2x00_resume_hba(ha); 1394 + 1395 + return buf; 1396 + } 1397 + 1398 + int 1399 + qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1400 + uint32_t offset, uint32_t length) 1401 + { 1402 + 1403 + int rval; 1404 + unsigned long flags; 1405 + uint8_t man_id, flash_id, sec_number, data; 1406 + uint16_t wd; 1407 + uint32_t addr, liter, sec_mask, rest_addr; 1408 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1409 + 1410 + /* Suspend HBA. */ 1411 + qla2x00_suspend_hba(ha); 1412 + 1413 + rval = QLA_SUCCESS; 1414 + sec_number = 0; 1415 + 1416 + /* Reset ISP chip. */ 1417 + spin_lock_irqsave(&ha->hardware_lock, flags); 1418 + WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 1419 + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 1420 + 1421 + /* Go with write. */ 1422 + qla2x00_flash_enable(ha); 1423 + do { /* Loop once to provide quick error exit */ 1424 + /* Structure of flash memory based on manufacturer */ 1425 + if (IS_OEM_001(ha)) { 1426 + /* OEM variant with special flash part. */ 1427 + man_id = flash_id = 0; 1428 + rest_addr = 0xffff; 1429 + sec_mask = 0x10000; 1430 + goto update_flash; 1431 + } 1432 + qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id); 1433 + switch (man_id) { 1434 + case 0x20: /* ST flash. */ 1435 + if (flash_id == 0xd2 || flash_id == 0xe3) { 1436 + /* 1437 + * ST m29w008at part - 64kb sector size with 1438 + * 32kb,8kb,8kb,16kb sectors at memory address 1439 + * 0xf0000. 1440 + */ 1441 + rest_addr = 0xffff; 1442 + sec_mask = 0x10000; 1443 + break; 1444 + } 1445 + /* 1446 + * ST m29w010b part - 16kb sector size 1447 + * Default to 16kb sectors 1448 + */ 1449 + rest_addr = 0x3fff; 1450 + sec_mask = 0x1c000; 1451 + break; 1452 + case 0x40: /* Mostel flash. */ 1453 + /* Mostel v29c51001 part - 512 byte sector size. */ 1454 + rest_addr = 0x1ff; 1455 + sec_mask = 0x1fe00; 1456 + break; 1457 + case 0xbf: /* SST flash. */ 1458 + /* SST39sf10 part - 4kb sector size. */ 1459 + rest_addr = 0xfff; 1460 + sec_mask = 0x1f000; 1461 + break; 1462 + case 0xda: /* Winbond flash. */ 1463 + /* Winbond W29EE011 part - 256 byte sector size. */ 1464 + rest_addr = 0x7f; 1465 + sec_mask = 0x1ff80; 1466 + break; 1467 + case 0xc2: /* Macronix flash. */ 1468 + /* 64k sector size. */ 1469 + if (flash_id == 0x38 || flash_id == 0x4f) { 1470 + rest_addr = 0xffff; 1471 + sec_mask = 0x10000; 1472 + break; 1473 + } 1474 + /* Fall through... */ 1475 + 1476 + case 0x1f: /* Atmel flash. */ 1477 + /* 512k sector size. */ 1478 + if (flash_id == 0x13) { 1479 + rest_addr = 0x7fffffff; 1480 + sec_mask = 0x80000000; 1481 + break; 1482 + } 1483 + /* Fall through... */ 1484 + 1485 + case 0x01: /* AMD flash. */ 1486 + if (flash_id == 0x38 || flash_id == 0x40 || 1487 + flash_id == 0x4f) { 1488 + /* Am29LV081 part - 64kb sector size. */ 1489 + /* Am29LV002BT part - 64kb sector size. */ 1490 + rest_addr = 0xffff; 1491 + sec_mask = 0x10000; 1492 + break; 1493 + } else if (flash_id == 0x3e) { 1494 + /* 1495 + * Am29LV008b part - 64kb sector size with 1496 + * 32kb,8kb,8kb,16kb sector at memory address 1497 + * h0xf0000. 1498 + */ 1499 + rest_addr = 0xffff; 1500 + sec_mask = 0x10000; 1501 + break; 1502 + } else if (flash_id == 0x20 || flash_id == 0x6e) { 1503 + /* 1504 + * Am29LV010 part or AM29f010 - 16kb sector 1505 + * size. 1506 + */ 1507 + rest_addr = 0x3fff; 1508 + sec_mask = 0x1c000; 1509 + break; 1510 + } else if (flash_id == 0x6d) { 1511 + /* Am29LV001 part - 8kb sector size. */ 1512 + rest_addr = 0x1fff; 1513 + sec_mask = 0x1e000; 1514 + break; 1515 + } 1516 + default: 1517 + /* Default to 16 kb sector size. */ 1518 + rest_addr = 0x3fff; 1519 + sec_mask = 0x1c000; 1520 + break; 1521 + } 1522 + 1523 + update_flash: 1524 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1525 + if (qla2x00_erase_flash(ha, man_id, flash_id)) { 1526 + rval = QLA_FUNCTION_FAILED; 1527 + break; 1528 + } 1529 + } 1530 + 1531 + for (addr = offset, liter = 0; liter < length; liter++, 1532 + addr++) { 1533 + data = buf[liter]; 1534 + /* Are we at the beginning of a sector? */ 1535 + if ((addr & rest_addr) == 0) { 1536 + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 1537 + if (addr >= 0x10000UL) { 1538 + if (((addr >> 12) & 0xf0) && 1539 + ((man_id == 0x01 && 1540 + flash_id == 0x3e) || 1541 + (man_id == 0x20 && 1542 + flash_id == 0xd2))) { 1543 + sec_number++; 1544 + if (sec_number == 1) { 1545 + rest_addr = 1546 + 0x7fff; 1547 + sec_mask = 1548 + 0x18000; 1549 + } else if ( 1550 + sec_number == 2 || 1551 + sec_number == 3) { 1552 + rest_addr = 1553 + 0x1fff; 1554 + sec_mask = 1555 + 0x1e000; 1556 + } else if ( 1557 + sec_number == 4) { 1558 + rest_addr = 1559 + 0x3fff; 1560 + sec_mask = 1561 + 0x1c000; 1562 + } 1563 + } 1564 + } 1565 + } else if (addr == ha->optrom_size / 2) { 1566 + WRT_REG_WORD(&reg->nvram, NVR_SELECT); 1567 + RD_REG_WORD(&reg->nvram); 1568 + } 1569 + 1570 + if (flash_id == 0xda && man_id == 0xc1) { 1571 + qla2x00_write_flash_byte(ha, 0x5555, 1572 + 0xaa); 1573 + qla2x00_write_flash_byte(ha, 0x2aaa, 1574 + 0x55); 1575 + qla2x00_write_flash_byte(ha, 0x5555, 1576 + 0xa0); 1577 + } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) { 1578 + /* Then erase it */ 1579 + if (qla2x00_erase_flash_sector(ha, 1580 + addr, sec_mask, man_id, 1581 + flash_id)) { 1582 + rval = QLA_FUNCTION_FAILED; 1583 + break; 1584 + } 1585 + if (man_id == 0x01 && flash_id == 0x6d) 1586 + sec_number++; 1587 + } 1588 + } 1589 + 1590 + if (man_id == 0x01 && flash_id == 0x6d) { 1591 + if (sec_number == 1 && 1592 + addr == (rest_addr - 1)) { 1593 + rest_addr = 0x0fff; 1594 + sec_mask = 0x1f000; 1595 + } else if (sec_number == 3 && (addr & 0x7ffe)) { 1596 + rest_addr = 0x3fff; 1597 + sec_mask = 0x1c000; 1598 + } 1599 + } 1600 + 1601 + if (qla2x00_program_flash_address(ha, addr, data, 1602 + man_id, flash_id)) { 1603 + rval = QLA_FUNCTION_FAILED; 1604 + break; 1605 + } 1606 + } 1607 + } while (0); 1608 + qla2x00_flash_disable(ha); 1609 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1610 + 1611 + /* Resume HBA. */ 1612 + qla2x00_resume_hba(ha); 1613 + 1614 + return rval; 1615 + } 1616 + 1617 + uint8_t * 1618 + qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1619 + uint32_t offset, uint32_t length) 1620 + { 1621 + /* Suspend HBA. */ 1622 + scsi_block_requests(ha->host); 1623 + ha->isp_ops.disable_intrs(ha); 1624 + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1625 + 1626 + /* Go with read. */ 1627 + qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2); 1628 + 1629 + /* Resume HBA. */ 1630 + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1631 + ha->isp_ops.enable_intrs(ha); 1632 + scsi_unblock_requests(ha->host); 1633 + 1634 + return buf; 1635 + } 1636 + 1637 + int 1638 + qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1639 + uint32_t offset, uint32_t length) 1640 + { 1641 + int rval; 1642 + 1643 + /* Suspend HBA. */ 1644 + scsi_block_requests(ha->host); 1645 + ha->isp_ops.disable_intrs(ha); 1646 + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1647 + 1648 + /* Go with write. */ 1649 + rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2, 1650 + length >> 2); 1651 + 1652 + /* Resume HBA -- RISC reset needed. */ 1653 + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1654 + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1655 + up(ha->dpc_wait); 1656 + qla2x00_wait_for_hba_online(ha); 1657 + scsi_unblock_requests(ha->host); 1658 + 1659 + return rval; 1660 + }
+59
drivers/scsi/scsi_lib.c
··· 16 #include <linux/init.h> 17 #include <linux/pci.h> 18 #include <linux/delay.h> 19 20 #include <scsi/scsi.h> 21 #include <scsi/scsi_dbg.h> ··· 2249 device_for_each_child(dev, NULL, target_unblock); 2250 } 2251 EXPORT_SYMBOL_GPL(scsi_target_unblock);
··· 16 #include <linux/init.h> 17 #include <linux/pci.h> 18 #include <linux/delay.h> 19 + #include <linux/hardirq.h> 20 21 #include <scsi/scsi.h> 22 #include <scsi/scsi_dbg.h> ··· 2248 device_for_each_child(dev, NULL, target_unblock); 2249 } 2250 EXPORT_SYMBOL_GPL(scsi_target_unblock); 2251 + 2252 + 2253 + struct work_queue_work { 2254 + struct work_struct work; 2255 + void (*fn)(void *); 2256 + void *data; 2257 + }; 2258 + 2259 + static void execute_in_process_context_work(void *data) 2260 + { 2261 + void (*fn)(void *data); 2262 + struct work_queue_work *wqw = data; 2263 + 2264 + fn = wqw->fn; 2265 + data = wqw->data; 2266 + 2267 + kfree(wqw); 2268 + 2269 + fn(data); 2270 + } 2271 + 2272 + /** 2273 + * scsi_execute_in_process_context - reliably execute the routine with user context 2274 + * @fn: the function to execute 2275 + * @data: data to pass to the function 2276 + * 2277 + * Executes the function immediately if process context is available, 2278 + * otherwise schedules the function for delayed execution. 2279 + * 2280 + * Returns: 0 - function was executed 2281 + * 1 - function was scheduled for execution 2282 + * <0 - error 2283 + */ 2284 + int scsi_execute_in_process_context(void (*fn)(void *data), void *data) 2285 + { 2286 + struct work_queue_work *wqw; 2287 + 2288 + if (!in_interrupt()) { 2289 + fn(data); 2290 + return 0; 2291 + } 2292 + 2293 + wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC); 2294 + 2295 + if (unlikely(!wqw)) { 2296 + printk(KERN_ERR "Failed to allocate memory\n"); 2297 + WARN_ON(1); 2298 + return -ENOMEM; 2299 + } 2300 + 2301 + INIT_WORK(&wqw->work, execute_in_process_context_work, wqw); 2302 + wqw->fn = fn; 2303 + wqw->data = data; 2304 + schedule_work(&wqw->work); 2305 + 2306 + return 1; 2307 + } 2308 + EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);
+4 -22
drivers/scsi/scsi_scan.c
··· 387 return found_target; 388 } 389 390 - struct work_queue_wrapper { 391 - struct work_struct work; 392 - struct scsi_target *starget; 393 - }; 394 - 395 - static void scsi_target_reap_work(void *data) { 396 - struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 397 - struct scsi_target *starget = wqw->starget; 398 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 399 unsigned long flags; 400 - 401 - kfree(wqw); 402 403 spin_lock_irqsave(shost->host_lock, flags); 404 ··· 421 */ 422 void scsi_target_reap(struct scsi_target *starget) 423 { 424 - struct work_queue_wrapper *wqw = 425 - kzalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); 426 - 427 - if (!wqw) { 428 - starget_printk(KERN_ERR, starget, 429 - "Failed to allocate memory in scsi_reap_target()\n"); 430 - return; 431 - } 432 - 433 - INIT_WORK(&wqw->work, scsi_target_reap_work, wqw); 434 - wqw->starget = starget; 435 - schedule_work(&wqw->work); 436 } 437 438 /**
··· 387 return found_target; 388 } 389 390 + static void scsi_target_reap_usercontext(void *data) 391 + { 392 + struct scsi_target *starget = data; 393 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 394 unsigned long flags; 395 396 spin_lock_irqsave(shost->host_lock, flags); 397 ··· 428 */ 429 void scsi_target_reap(struct scsi_target *starget) 430 { 431 + scsi_execute_in_process_context(scsi_target_reap_usercontext, starget); 432 } 433 434 /**
+8 -1
drivers/scsi/scsi_sysfs.c
··· 217 put_device(&sdev->sdev_gendev); 218 } 219 220 - static void scsi_device_dev_release(struct device *dev) 221 { 222 struct scsi_device *sdev; 223 struct device *parent; 224 struct scsi_target *starget; ··· 238 239 if (sdev->request_queue) { 240 sdev->request_queue->queuedata = NULL; 241 scsi_free_queue(sdev->request_queue); 242 /* temporary expedient, try to catch use of queue lock 243 * after free of sdev */ ··· 252 253 if (parent) 254 put_device(parent); 255 } 256 257 static struct class sdev_class = {
··· 217 put_device(&sdev->sdev_gendev); 218 } 219 220 + static void scsi_device_dev_release_usercontext(void *data) 221 { 222 + struct device *dev = data; 223 struct scsi_device *sdev; 224 struct device *parent; 225 struct scsi_target *starget; ··· 237 238 if (sdev->request_queue) { 239 sdev->request_queue->queuedata = NULL; 240 + /* user context needed to free queue */ 241 scsi_free_queue(sdev->request_queue); 242 /* temporary expedient, try to catch use of queue lock 243 * after free of sdev */ ··· 250 251 if (parent) 252 put_device(parent); 253 + } 254 + 255 + static void scsi_device_dev_release(struct device *dev) 256 + { 257 + scsi_execute_in_process_context(scsi_device_dev_release_usercontext, dev); 258 } 259 260 static struct class sdev_class = {
+130 -130
drivers/scsi/scsi_transport_iscsi.c
··· 39 struct iscsi_transport *iscsi_transport; 40 struct list_head list; 41 /* 42 - * List of sessions for this transport 43 - */ 44 - struct list_head sessions; 45 - /* 46 * based on transport capabilities, at register time we set these 47 * bits to tell the transport class it wants attributes displayed 48 * in sysfs or that it can support different iSCSI Data-Path ··· 160 #define Z_MAX_ERROR 16 161 #define Z_HIWAT_ERROR 12 162 163 static LIST_HEAD(connlist); 164 static DEFINE_SPINLOCK(connlock); 165 166 /* 167 * The following functions can be used by LLDs that allocate ··· 395 { 396 struct iscsi_cls_session *session; 397 struct Scsi_Host *shost; 398 399 shost = scsi_host_alloc(transport->host_template, 400 hostdata_privsize(transport)); ··· 420 goto remove_host; 421 422 *(unsigned long*)shost->hostdata = (unsigned long)session; 423 return shost; 424 425 remove_host: ··· 444 int iscsi_transport_destroy_session(struct Scsi_Host *shost) 445 { 446 struct iscsi_cls_session *session; 447 448 scsi_remove_host(shost); 449 session = hostdata_session(shost->hostdata); 450 iscsi_destroy_session(session); 451 /* ref from host alloc */ 452 scsi_host_put(shost); ··· 462 /* 463 * iscsi interface functions 464 */ 465 - static struct iscsi_cls_conn* 466 - iscsi_if_find_conn(uint64_t key) 467 - { 468 - unsigned long flags; 469 - struct iscsi_cls_conn *conn; 470 - 471 - spin_lock_irqsave(&connlock, flags); 472 - list_for_each_entry(conn, &connlist, conn_list) 473 - if (conn->connh == key) { 474 - spin_unlock_irqrestore(&connlock, flags); 475 - return conn; 476 - } 477 - spin_unlock_irqrestore(&connlock, flags); 478 - return NULL; 479 - } 480 - 481 static struct iscsi_internal * 482 iscsi_if_transport_lookup(struct iscsi_transport *tt) 483 { ··· 526 if (!zp) 527 return NULL; 528 529 zp->pool = mempool_create(max, mempool_zone_alloc_skb, 530 mempool_zone_free_skb, zp); 531 if (!zp->pool) { 532 kfree(zp); 533 return NULL; 534 } 535 - 536 - zp->size = size; 537 - zp->hiwat = hiwat; 538 - 539 - INIT_LIST_HEAD(&zp->freequeue); 540 - spin_lock_init(&zp->freelock); 541 - atomic_set(&zp->allocated, 0); 542 543 return zp; 544 } ··· 580 return 0; 581 } 582 583 - int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, 584 char *data, uint32_t data_size) 585 { 586 struct nlmsghdr *nlh; 587 struct sk_buff *skb; 588 struct iscsi_uevent *ev; 589 - struct iscsi_cls_conn *conn; 590 char *pdu; 591 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 592 data_size); 593 - 594 - conn = iscsi_if_find_conn(connh); 595 - BUG_ON(!conn); 596 597 mempool_zone_complete(conn->z_pdu); 598 599 skb = mempool_zone_get_skb(conn->z_pdu); 600 if (!skb) { 601 - iscsi_conn_error(connh, ISCSI_ERR_CONN_FAILED); 602 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 603 "control PDU: OOM\n"); 604 return -ENOMEM; ··· 607 ev->type = ISCSI_KEVENT_RECV_PDU; 608 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat) 609 ev->iferror = -ENOMEM; 610 - ev->r.recv_req.conn_handle = connh; 611 pdu = (char*)ev + sizeof(*ev); 612 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 613 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); ··· 616 } 617 EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 618 619 - void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) 620 { 621 struct nlmsghdr *nlh; 622 struct sk_buff *skb; 623 struct iscsi_uevent *ev; 624 - struct iscsi_cls_conn *conn; 625 int len = NLMSG_SPACE(sizeof(*ev)); 626 - 627 - conn = iscsi_if_find_conn(connh); 628 - BUG_ON(!conn); 629 630 mempool_zone_complete(conn->z_error); 631 ··· 639 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat) 640 ev->iferror = -ENOMEM; 641 ev->r.connerror.error = error; 642 - ev->r.connerror.conn_handle = connh; 643 644 iscsi_unicast_skb(conn->z_error, skb); 645 ··· 675 } 676 677 static int 678 - iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, 679 - struct nlmsghdr *nlh) 680 { 681 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 682 struct iscsi_stats *stats; ··· 689 ISCSI_STATS_CUSTOM_MAX); 690 int err = 0; 691 692 - conn = iscsi_if_find_conn(ev->u.get_stats.conn_handle); 693 if (!conn) 694 return -EEXIST; 695 ··· 719 ((char*)evstat + sizeof(*evstat)); 720 memset(stats, 0, sizeof(*stats)); 721 722 - transport->get_stats(ev->u.get_stats.conn_handle, stats); 723 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 724 sizeof(struct iscsi_stats) + 725 sizeof(struct iscsi_stats_custom) * 726 stats->custom_length); 727 actual_size -= sizeof(*nlhstat); 728 actual_size = NLMSG_LENGTH(actual_size); 729 - skb_trim(skb, NLMSG_ALIGN(actual_size)); 730 nlhstat->nlmsg_len = actual_size; 731 732 err = iscsi_unicast_skb(conn->z_pdu, skbstat); ··· 739 iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 740 { 741 struct iscsi_transport *transport = priv->iscsi_transport; 742 - struct Scsi_Host *shost; 743 744 - if (!transport->create_session) 745 - return -EINVAL; 746 - 747 - shost = transport->create_session(&priv->t, 748 - ev->u.c_session.initial_cmdsn); 749 - if (!shost) 750 return -ENOMEM; 751 752 - ev->r.c_session_ret.session_handle = iscsi_handle(iscsi_hostdata(shost->hostdata)); 753 - ev->r.c_session_ret.sid = shost->host_no; 754 return 0; 755 } 756 757 static int 758 - iscsi_if_destroy_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 759 { 760 - struct iscsi_transport *transport = priv->iscsi_transport; 761 - 762 - struct Scsi_Host *shost; 763 - 764 - if (!transport->destroy_session) 765 - return -EINVAL; 766 - 767 - shost = scsi_host_lookup(ev->u.d_session.sid); 768 - if (shost == ERR_PTR(-ENXIO)) 769 - return -EEXIST; 770 - 771 - if (transport->destroy_session) 772 - transport->destroy_session(shost); 773 - /* ref from host lookup */ 774 - scsi_host_put(shost); 775 - return 0; 776 - } 777 - 778 - static int 779 - iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev){ 780 - struct Scsi_Host *shost; 781 struct iscsi_cls_conn *conn; 782 unsigned long flags; 783 784 - if (!transport->create_conn) 785 return -EINVAL; 786 787 - shost = scsi_host_lookup(ev->u.c_conn.sid); 788 - if (shost == ERR_PTR(-ENXIO)) 789 - return -EEXIST; 790 - 791 - conn = transport->create_conn(shost, ev->u.c_conn.cid); 792 if (!conn) 793 - goto release_ref; 794 795 conn->z_pdu = mempool_zone_init(Z_MAX_PDU, 796 NLMSG_SPACE(sizeof(struct iscsi_uevent) + ··· 788 goto free_pdu_pool; 789 } 790 791 - ev->r.handle = conn->connh = iscsi_handle(conn->dd_data); 792 793 spin_lock_irqsave(&connlock, flags); 794 list_add(&conn->conn_list, &connlist); 795 conn->active = 1; 796 spin_unlock_irqrestore(&connlock, flags); 797 798 - scsi_host_put(shost); 799 return 0; 800 801 free_pdu_pool: ··· 802 destroy_conn: 803 if (transport->destroy_conn) 804 transport->destroy_conn(conn->dd_data); 805 - release_ref: 806 - scsi_host_put(shost); 807 return -ENOMEM; 808 } 809 ··· 812 struct iscsi_cls_conn *conn; 813 struct mempool_zone *z_error, *z_pdu; 814 815 - conn = iscsi_if_find_conn(ev->u.d_conn.conn_handle); 816 if (!conn) 817 - return -EEXIST; 818 - 819 - if (!transport->destroy_conn) 820 return -EINVAL; 821 - 822 spin_lock_irqsave(&connlock, flags); 823 conn->active = 0; 824 list_del(&conn->conn_list); ··· 839 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 840 struct iscsi_transport *transport = NULL; 841 struct iscsi_internal *priv; 842 - 843 - if (NETLINK_CREDS(skb)->uid) 844 - return -EPERM; 845 846 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 847 if (!priv) 848 return -EINVAL; 849 transport = priv->iscsi_transport; 850 851 - daemon_pid = NETLINK_CREDS(skb)->pid; 852 853 switch (nlh->nlmsg_type) { 854 case ISCSI_UEVENT_CREATE_SESSION: 855 err = iscsi_if_create_session(priv, ev); 856 break; 857 case ISCSI_UEVENT_DESTROY_SESSION: 858 - err = iscsi_if_destroy_session(priv, ev); 859 break; 860 case ISCSI_UEVENT_CREATE_CONN: 861 err = iscsi_if_create_conn(transport, ev); ··· 868 err = iscsi_if_destroy_conn(transport, ev); 869 break; 870 case ISCSI_UEVENT_BIND_CONN: 871 - if (!iscsi_if_find_conn(ev->u.b_conn.conn_handle)) 872 - return -EEXIST; 873 - ev->r.retcode = transport->bind_conn( 874 - ev->u.b_conn.session_handle, 875 - ev->u.b_conn.conn_handle, 876 - ev->u.b_conn.transport_fd, 877 - ev->u.b_conn.is_leading); 878 break; 879 case ISCSI_UEVENT_SET_PARAM: 880 - if (!iscsi_if_find_conn(ev->u.set_param.conn_handle)) 881 - return -EEXIST; 882 - ev->r.retcode = transport->set_param( 883 - ev->u.set_param.conn_handle, 884 - ev->u.set_param.param, ev->u.set_param.value); 885 break; 886 case ISCSI_UEVENT_START_CONN: 887 - if (!iscsi_if_find_conn(ev->u.start_conn.conn_handle)) 888 - return -EEXIST; 889 - ev->r.retcode = transport->start_conn( 890 - ev->u.start_conn.conn_handle); 891 break; 892 case ISCSI_UEVENT_STOP_CONN: 893 - if (!iscsi_if_find_conn(ev->u.stop_conn.conn_handle)) 894 - return -EEXIST; 895 - transport->stop_conn(ev->u.stop_conn.conn_handle, 896 - ev->u.stop_conn.flag); 897 break; 898 case ISCSI_UEVENT_SEND_PDU: 899 - if (!iscsi_if_find_conn(ev->u.send_pdu.conn_handle)) 900 - return -EEXIST; 901 - ev->r.retcode = transport->send_pdu( 902 - ev->u.send_pdu.conn_handle, 903 - (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), 904 - (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, 905 - ev->u.send_pdu.data_size); 906 break; 907 case ISCSI_UEVENT_GET_STATS: 908 - err = iscsi_if_get_stats(transport, skb, nlh); 909 break; 910 default: 911 err = -EINVAL; 912 break; 913 } 914 915 return err; 916 } 917 918 /* Get message from skb (based on rtnetlink_rcv_skb). Each message is 919 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are 920 - * discarded silently. */ 921 static void 922 iscsi_if_rx(struct sock *sk, int len) 923 { ··· 933 934 mutex_lock(&rx_queue_mutex); 935 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 936 while (skb->len >= NLMSG_SPACE(0)) { 937 int err; 938 uint32_t rlen; ··· 950 skb->len < nlh->nlmsg_len) { 951 break; 952 } 953 ev = NLMSG_DATA(nlh); 954 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 955 if (rlen > skb->len) 956 rlen = skb->len; 957 err = iscsi_if_recv_msg(skb, nlh); 958 if (err) { 959 ev->type = ISCSI_KEVENT_IF_ERROR; ··· 979 } while (err < 0 && err != -ECONNREFUSED); 980 skb_pull(skb, rlen); 981 } 982 kfree_skb(skb); 983 } 984 mutex_unlock(&rx_queue_mutex); ··· 999 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 1000 struct iscsi_transport *t = conn->transport; \ 1001 \ 1002 - t->get_conn_param(conn->dd_data, param, &value); \ 1003 return snprintf(buf, 20, format"\n", value); \ 1004 } 1005 ··· 1026 { \ 1027 uint32_t value = 0; \ 1028 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1029 - struct Scsi_Host *shost = iscsi_session_to_shost(session); \ 1030 struct iscsi_transport *t = session->transport; \ 1031 \ 1032 - t->get_session_param(shost, param, &value); \ 1033 return snprintf(buf, 20, format"\n", value); \ 1034 } 1035 ··· 1122 return NULL; 1123 memset(priv, 0, sizeof(*priv)); 1124 INIT_LIST_HEAD(&priv->list); 1125 - INIT_LIST_HEAD(&priv->sessions); 1126 priv->iscsi_transport = tt; 1127 1128 priv->cdev.class = &iscsi_transport_class;
··· 39 struct iscsi_transport *iscsi_transport; 40 struct list_head list; 41 /* 42 * based on transport capabilities, at register time we set these 43 * bits to tell the transport class it wants attributes displayed 44 * in sysfs or that it can support different iSCSI Data-Path ··· 164 #define Z_MAX_ERROR 16 165 #define Z_HIWAT_ERROR 12 166 167 + static LIST_HEAD(sesslist); 168 + static DEFINE_SPINLOCK(sesslock); 169 static LIST_HEAD(connlist); 170 static DEFINE_SPINLOCK(connlock); 171 + 172 + static struct iscsi_cls_session *iscsi_session_lookup(uint64_t handle) 173 + { 174 + unsigned long flags; 175 + struct iscsi_cls_session *sess; 176 + 177 + spin_lock_irqsave(&sesslock, flags); 178 + list_for_each_entry(sess, &sesslist, sess_list) { 179 + if (sess == iscsi_ptr(handle)) { 180 + spin_unlock_irqrestore(&sesslock, flags); 181 + return sess; 182 + } 183 + } 184 + spin_unlock_irqrestore(&sesslock, flags); 185 + return NULL; 186 + } 187 + 188 + static struct iscsi_cls_conn *iscsi_conn_lookup(uint64_t handle) 189 + { 190 + unsigned long flags; 191 + struct iscsi_cls_conn *conn; 192 + 193 + spin_lock_irqsave(&connlock, flags); 194 + list_for_each_entry(conn, &connlist, conn_list) { 195 + if (conn == iscsi_ptr(handle)) { 196 + spin_unlock_irqrestore(&connlock, flags); 197 + return conn; 198 + } 199 + } 200 + spin_unlock_irqrestore(&connlock, flags); 201 + return NULL; 202 + } 203 204 /* 205 * The following functions can be used by LLDs that allocate ··· 365 { 366 struct iscsi_cls_session *session; 367 struct Scsi_Host *shost; 368 + unsigned long flags; 369 370 shost = scsi_host_alloc(transport->host_template, 371 hostdata_privsize(transport)); ··· 389 goto remove_host; 390 391 *(unsigned long*)shost->hostdata = (unsigned long)session; 392 + spin_lock_irqsave(&sesslock, flags); 393 + list_add(&session->sess_list, &sesslist); 394 + spin_unlock_irqrestore(&sesslock, flags); 395 return shost; 396 397 remove_host: ··· 410 int iscsi_transport_destroy_session(struct Scsi_Host *shost) 411 { 412 struct iscsi_cls_session *session; 413 + unsigned long flags; 414 415 scsi_remove_host(shost); 416 session = hostdata_session(shost->hostdata); 417 + spin_lock_irqsave(&sesslock, flags); 418 + list_del(&session->sess_list); 419 + spin_unlock_irqrestore(&sesslock, flags); 420 iscsi_destroy_session(session); 421 /* ref from host alloc */ 422 scsi_host_put(shost); ··· 424 /* 425 * iscsi interface functions 426 */ 427 static struct iscsi_internal * 428 iscsi_if_transport_lookup(struct iscsi_transport *tt) 429 { ··· 504 if (!zp) 505 return NULL; 506 507 + zp->size = size; 508 + zp->hiwat = hiwat; 509 + INIT_LIST_HEAD(&zp->freequeue); 510 + spin_lock_init(&zp->freelock); 511 + atomic_set(&zp->allocated, 0); 512 + 513 zp->pool = mempool_create(max, mempool_zone_alloc_skb, 514 mempool_zone_free_skb, zp); 515 if (!zp->pool) { 516 kfree(zp); 517 return NULL; 518 } 519 520 return zp; 521 } ··· 559 return 0; 560 } 561 562 + int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 563 char *data, uint32_t data_size) 564 { 565 struct nlmsghdr *nlh; 566 struct sk_buff *skb; 567 struct iscsi_uevent *ev; 568 char *pdu; 569 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 570 data_size); 571 572 mempool_zone_complete(conn->z_pdu); 573 574 skb = mempool_zone_get_skb(conn->z_pdu); 575 if (!skb) { 576 + iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); 577 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 578 "control PDU: OOM\n"); 579 return -ENOMEM; ··· 590 ev->type = ISCSI_KEVENT_RECV_PDU; 591 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat) 592 ev->iferror = -ENOMEM; 593 + ev->r.recv_req.conn_handle = iscsi_handle(conn); 594 pdu = (char*)ev + sizeof(*ev); 595 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 596 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); ··· 599 } 600 EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 601 602 + void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 603 { 604 struct nlmsghdr *nlh; 605 struct sk_buff *skb; 606 struct iscsi_uevent *ev; 607 int len = NLMSG_SPACE(sizeof(*ev)); 608 609 mempool_zone_complete(conn->z_error); 610 ··· 626 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat) 627 ev->iferror = -ENOMEM; 628 ev->r.connerror.error = error; 629 + ev->r.connerror.conn_handle = iscsi_handle(conn); 630 631 iscsi_unicast_skb(conn->z_error, skb); 632 ··· 662 } 663 664 static int 665 + iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) 666 { 667 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 668 struct iscsi_stats *stats; ··· 677 ISCSI_STATS_CUSTOM_MAX); 678 int err = 0; 679 680 + conn = iscsi_conn_lookup(ev->u.get_stats.conn_handle); 681 if (!conn) 682 return -EEXIST; 683 ··· 707 ((char*)evstat + sizeof(*evstat)); 708 memset(stats, 0, sizeof(*stats)); 709 710 + transport->get_stats(conn, stats); 711 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 712 sizeof(struct iscsi_stats) + 713 sizeof(struct iscsi_stats_custom) * 714 stats->custom_length); 715 actual_size -= sizeof(*nlhstat); 716 actual_size = NLMSG_LENGTH(actual_size); 717 + skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 718 nlhstat->nlmsg_len = actual_size; 719 720 err = iscsi_unicast_skb(conn->z_pdu, skbstat); ··· 727 iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 728 { 729 struct iscsi_transport *transport = priv->iscsi_transport; 730 + struct iscsi_cls_session *session; 731 + uint32_t sid; 732 733 + session = transport->create_session(&priv->t, 734 + ev->u.c_session.initial_cmdsn, 735 + &sid); 736 + if (!session) 737 return -ENOMEM; 738 739 + ev->r.c_session_ret.session_handle = iscsi_handle(session); 740 + ev->r.c_session_ret.sid = sid; 741 return 0; 742 } 743 744 static int 745 + iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) 746 { 747 struct iscsi_cls_conn *conn; 748 + struct iscsi_cls_session *session; 749 unsigned long flags; 750 751 + session = iscsi_session_lookup(ev->u.c_conn.session_handle); 752 + if (!session) 753 return -EINVAL; 754 755 + conn = transport->create_conn(session, ev->u.c_conn.cid); 756 if (!conn) 757 + return -ENOMEM; 758 759 conn->z_pdu = mempool_zone_init(Z_MAX_PDU, 760 NLMSG_SPACE(sizeof(struct iscsi_uevent) + ··· 800 goto free_pdu_pool; 801 } 802 803 + ev->r.handle = iscsi_handle(conn); 804 805 spin_lock_irqsave(&connlock, flags); 806 list_add(&conn->conn_list, &connlist); 807 conn->active = 1; 808 spin_unlock_irqrestore(&connlock, flags); 809 810 return 0; 811 812 free_pdu_pool: ··· 815 destroy_conn: 816 if (transport->destroy_conn) 817 transport->destroy_conn(conn->dd_data); 818 return -ENOMEM; 819 } 820 ··· 827 struct iscsi_cls_conn *conn; 828 struct mempool_zone *z_error, *z_pdu; 829 830 + conn = iscsi_conn_lookup(ev->u.d_conn.conn_handle); 831 if (!conn) 832 return -EINVAL; 833 spin_lock_irqsave(&connlock, flags); 834 conn->active = 0; 835 list_del(&conn->conn_list); ··· 858 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 859 struct iscsi_transport *transport = NULL; 860 struct iscsi_internal *priv; 861 + struct iscsi_cls_session *session; 862 + struct iscsi_cls_conn *conn; 863 864 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 865 if (!priv) 866 return -EINVAL; 867 transport = priv->iscsi_transport; 868 869 + if (!try_module_get(transport->owner)) 870 + return -EINVAL; 871 872 switch (nlh->nlmsg_type) { 873 case ISCSI_UEVENT_CREATE_SESSION: 874 err = iscsi_if_create_session(priv, ev); 875 break; 876 case ISCSI_UEVENT_DESTROY_SESSION: 877 + session = iscsi_session_lookup(ev->u.d_session.session_handle); 878 + if (session) 879 + transport->destroy_session(session); 880 + else 881 + err = -EINVAL; 882 break; 883 case ISCSI_UEVENT_CREATE_CONN: 884 err = iscsi_if_create_conn(transport, ev); ··· 883 err = iscsi_if_destroy_conn(transport, ev); 884 break; 885 case ISCSI_UEVENT_BIND_CONN: 886 + session = iscsi_session_lookup(ev->u.b_conn.session_handle); 887 + conn = iscsi_conn_lookup(ev->u.b_conn.conn_handle); 888 + 889 + if (session && conn) 890 + ev->r.retcode = transport->bind_conn(session, conn, 891 + ev->u.b_conn.transport_fd, 892 + ev->u.b_conn.is_leading); 893 + else 894 + err = -EINVAL; 895 break; 896 case ISCSI_UEVENT_SET_PARAM: 897 + conn = iscsi_conn_lookup(ev->u.set_param.conn_handle); 898 + if (conn) 899 + ev->r.retcode = transport->set_param(conn, 900 + ev->u.set_param.param, ev->u.set_param.value); 901 + else 902 + err = -EINVAL; 903 break; 904 case ISCSI_UEVENT_START_CONN: 905 + conn = iscsi_conn_lookup(ev->u.start_conn.conn_handle); 906 + if (conn) 907 + ev->r.retcode = transport->start_conn(conn); 908 + else 909 + err = -EINVAL; 910 + 911 break; 912 case ISCSI_UEVENT_STOP_CONN: 913 + conn = iscsi_conn_lookup(ev->u.stop_conn.conn_handle); 914 + if (conn) 915 + transport->stop_conn(conn, ev->u.stop_conn.flag); 916 + else 917 + err = -EINVAL; 918 break; 919 case ISCSI_UEVENT_SEND_PDU: 920 + conn = iscsi_conn_lookup(ev->u.send_pdu.conn_handle); 921 + if (conn) 922 + ev->r.retcode = transport->send_pdu(conn, 923 + (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), 924 + (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, 925 + ev->u.send_pdu.data_size); 926 + else 927 + err = -EINVAL; 928 break; 929 case ISCSI_UEVENT_GET_STATS: 930 + err = iscsi_if_get_stats(transport, nlh); 931 break; 932 default: 933 err = -EINVAL; 934 break; 935 } 936 937 + module_put(transport->owner); 938 return err; 939 } 940 941 /* Get message from skb (based on rtnetlink_rcv_skb). Each message is 942 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are 943 + * or invalid creds discarded silently. */ 944 static void 945 iscsi_if_rx(struct sock *sk, int len) 946 { ··· 940 941 mutex_lock(&rx_queue_mutex); 942 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 943 + if (NETLINK_CREDS(skb)->uid) { 944 + skb_pull(skb, skb->len); 945 + goto free_skb; 946 + } 947 + daemon_pid = NETLINK_CREDS(skb)->pid; 948 + 949 while (skb->len >= NLMSG_SPACE(0)) { 950 int err; 951 uint32_t rlen; ··· 951 skb->len < nlh->nlmsg_len) { 952 break; 953 } 954 + 955 ev = NLMSG_DATA(nlh); 956 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 957 if (rlen > skb->len) 958 rlen = skb->len; 959 + 960 err = iscsi_if_recv_msg(skb, nlh); 961 if (err) { 962 ev->type = ISCSI_KEVENT_IF_ERROR; ··· 978 } while (err < 0 && err != -ECONNREFUSED); 979 skb_pull(skb, rlen); 980 } 981 + free_skb: 982 kfree_skb(skb); 983 } 984 mutex_unlock(&rx_queue_mutex); ··· 997 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 998 struct iscsi_transport *t = conn->transport; \ 999 \ 1000 + t->get_conn_param(conn, param, &value); \ 1001 return snprintf(buf, 20, format"\n", value); \ 1002 } 1003 ··· 1024 { \ 1025 uint32_t value = 0; \ 1026 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1027 struct iscsi_transport *t = session->transport; \ 1028 \ 1029 + t->get_session_param(session, param, &value); \ 1030 return snprintf(buf, 20, format"\n", value); \ 1031 } 1032 ··· 1121 return NULL; 1122 memset(priv, 0, sizeof(*priv)); 1123 INIT_LIST_HEAD(&priv->list); 1124 priv->iscsi_transport = tt; 1125 1126 priv->cdev.class = &iscsi_transport_class;
+1 -1
drivers/scsi/sym53c8xx_2/sym_hipd.c
··· 3588 3589 if (pm) { 3590 dp_scr = scr_to_cpu(pm->ret); 3591 - dp_ofs -= scr_to_cpu(pm->sg.size); 3592 } 3593 3594 /*
··· 3588 3589 if (pm) { 3590 dp_scr = scr_to_cpu(pm->ret); 3591 + dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; 3592 } 3593 3594 /*
-3
include/scsi/iscsi_if.h
··· 163 }; 164 #define ISCSI_PARAM_MAX 14 165 166 - typedef uint64_t iscsi_sessionh_t; /* iSCSI Data-Path session handle */ 167 - typedef uint64_t iscsi_connh_t; /* iSCSI Data-Path connection handle */ 168 - 169 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 170 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 171 #define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
··· 163 }; 164 #define ISCSI_PARAM_MAX 14 165 166 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 167 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 168 #define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+2
include/scsi/scsi.h
··· 433 /* Used to obtain the PCI location of a device */ 434 #define SCSI_IOCTL_GET_PCI 0x5387 435 436 #endif /* _SCSI_SCSI_H */
··· 433 /* Used to obtain the PCI location of a device */ 434 #define SCSI_IOCTL_GET_PCI 0x5387 435 436 + int scsi_execute_in_process_context(void (*fn)(void *data), void *data); 437 + 438 #endif /* _SCSI_SCSI_H */
+18 -16
include/scsi/scsi_transport_iscsi.h
··· 63 int max_lun; 64 unsigned int max_conn; 65 unsigned int max_cmd_len; 66 - struct Scsi_Host *(*create_session) (struct scsi_transport_template *t, 67 - uint32_t initial_cmdsn); 68 - void (*destroy_session) (struct Scsi_Host *shost); 69 - struct iscsi_cls_conn *(*create_conn) (struct Scsi_Host *shost, 70 uint32_t cid); 71 - int (*bind_conn) (iscsi_sessionh_t session, iscsi_connh_t conn, 72 uint32_t transport_fd, int is_leading); 73 - int (*start_conn) (iscsi_connh_t conn); 74 - void (*stop_conn) (iscsi_connh_t conn, int flag); 75 void (*destroy_conn) (struct iscsi_cls_conn *conn); 76 - int (*set_param) (iscsi_connh_t conn, enum iscsi_param param, 77 uint32_t value); 78 - int (*get_conn_param) (void *conndata, enum iscsi_param param, 79 uint32_t *value); 80 - int (*get_session_param) (struct Scsi_Host *shost, 81 enum iscsi_param param, uint32_t *value); 82 - int (*send_pdu) (iscsi_connh_t conn, struct iscsi_hdr *hdr, 83 char *data, uint32_t data_size); 84 - void (*get_stats) (iscsi_connh_t conn, struct iscsi_stats *stats); 85 }; 86 87 /* ··· 96 /* 97 * control plane upcalls 98 */ 99 - extern void iscsi_conn_error(iscsi_connh_t conn, enum iscsi_err error); 100 - extern int iscsi_recv_pdu(iscsi_connh_t conn, struct iscsi_hdr *hdr, 101 char *data, uint32_t data_size); 102 103 struct iscsi_cls_conn { 104 struct list_head conn_list; /* item in connlist */ 105 void *dd_data; /* LLD private data */ 106 struct iscsi_transport *transport; 107 - iscsi_connh_t connh; 108 int active; /* must be accessed with the connlock */ 109 struct device dev; /* sysfs transport/container device */ 110 struct mempool_zone *z_error; ··· 115 container_of(_dev, struct iscsi_cls_conn, dev) 116 117 struct iscsi_cls_session { 118 - struct list_head list; /* item in session_list */ 119 struct iscsi_transport *transport; 120 struct device dev; /* sysfs transport/container device */ 121 };
··· 63 int max_lun; 64 unsigned int max_conn; 65 unsigned int max_cmd_len; 66 + struct iscsi_cls_session *(*create_session) 67 + (struct scsi_transport_template *t, uint32_t sn, uint32_t *sid); 68 + void (*destroy_session) (struct iscsi_cls_session *session); 69 + struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, 70 uint32_t cid); 71 + int (*bind_conn) (struct iscsi_cls_session *session, 72 + struct iscsi_cls_conn *cls_conn, 73 uint32_t transport_fd, int is_leading); 74 + int (*start_conn) (struct iscsi_cls_conn *conn); 75 + void (*stop_conn) (struct iscsi_cls_conn *conn, int flag); 76 void (*destroy_conn) (struct iscsi_cls_conn *conn); 77 + int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param, 78 uint32_t value); 79 + int (*get_conn_param) (struct iscsi_cls_conn *conn, 80 + enum iscsi_param param, 81 uint32_t *value); 82 + int (*get_session_param) (struct iscsi_cls_session *session, 83 enum iscsi_param param, uint32_t *value); 84 + int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 85 char *data, uint32_t data_size); 86 + void (*get_stats) (struct iscsi_cls_conn *conn, 87 + struct iscsi_stats *stats); 88 }; 89 90 /* ··· 93 /* 94 * control plane upcalls 95 */ 96 + extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error); 97 + extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 98 char *data, uint32_t data_size); 99 100 struct iscsi_cls_conn { 101 struct list_head conn_list; /* item in connlist */ 102 void *dd_data; /* LLD private data */ 103 struct iscsi_transport *transport; 104 int active; /* must be accessed with the connlock */ 105 struct device dev; /* sysfs transport/container device */ 106 struct mempool_zone *z_error; ··· 113 container_of(_dev, struct iscsi_cls_conn, dev) 114 115 struct iscsi_cls_session { 116 + struct list_head sess_list; /* item in session_list */ 117 struct iscsi_transport *transport; 118 struct device dev; /* sysfs transport/container device */ 119 };