Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
[SCSI] gdth: update deprecated pci_find_device
[SCSI] gdth: scan for scsi devices
[SCSI] sym53c416: fix module parameters
[SCSI] lpfc 8.2.5 : Update lpfc driver version to 8.2.5
[SCSI] lpfc 8.2.5 : Fix buffer leaks
[SCSI] lpfc 8.2.5 : Miscellaneous discovery Fixes
[SCSI] lpfc 8.2.5 : Add MSI-X single message support
[SCSI] lpfc 8.2.5 : Miscellaneous Fixes
[SCSI] lpfc 8.2.5 : Correct ndlp referencing issues
[SCSI] update SG_ALL to avoid causing chaining
[SCSI] aic94xx: fix ABORT_TASK define conflict
[SCSI] fas216: Use scsi_eh API for REQUEST_SENSE invocation
[SCSI] ses: fix memory leaks
[SCSI] aacraid: informational sysfs value corrections
[SCSI] mpt fusion: Request I/O resources only when required
[SCSI] aacraid: ignore adapter reset check polarity
[SCSI] aacraid: add optional MSI support
[SCSI] mpt fusion: Avoid racing when mptsas and mptcl module are loaded in parallel
[SCSI] MegaRAID driver management char device moved to misc
[SCSI] advansys: fix overrun_buf aligned bug

+1232 -370
+46 -8
drivers/message/fusion/mptbase.c
··· 1470 if (mpt_debug_level) 1471 printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level); 1472 1473 - if (pci_enable_device(pdev)) 1474 - return r; 1475 - 1476 ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC); 1477 if (ioc == NULL) { 1478 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); ··· 1478 ioc->debug_level = mpt_debug_level; 1479 ioc->id = mpt_ids++; 1480 sprintf(ioc->name, "ioc%d", ioc->id); 1481 1482 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name)); 1483 ··· 1669 ioc->active = 0; 1670 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1671 1672 /* Set lookup ptr. */ 1673 list_add_tail(&ioc->list, &ioc_list); 1674 ··· 1805 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1806 1807 pci_disable_device(pdev); 1808 pci_set_power_state(pdev, device_state); 1809 1810 return 0; ··· 1822 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 1823 u32 device_state = pdev->current_state; 1824 int recovery_state; 1825 - int err; 1826 1827 printk(MYIOC_s_INFO_FMT 1828 "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", ··· 1829 1830 pci_set_power_state(pdev, 0); 1831 pci_restore_state(pdev); 1832 - err = pci_enable_device(pdev); 1833 - if (err) 1834 - return err; 1835 1836 /* enable interrupts */ 1837 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); ··· 1901 * -2 if READY but IOCFacts Failed 1902 * -3 if READY but PrimeIOCFifos Failed 1903 * -4 if READY but IOCInit Failed 1904 */ 1905 static int 1906 mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) ··· 2000 } 2001 } 2002 2003 /* 2004 * Device is reset now. It must have de-asserted the interrupt line 2005 * (if it was asserted) and it should be safe to register for the ··· 2035 irq_allocated = 1; 2036 ioc->pci_irq = ioc->pcidev->irq; 2037 pci_set_master(ioc->pcidev); /* ?? */ 2038 - pci_set_drvdata(ioc->pcidev, ioc); 2039 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " 2040 "%d\n", ioc->name, ioc->pcidev->irq)); 2041 } ··· 2415 iounmap(ioc->memmap); 2416 ioc->memmap = NULL; 2417 } 2418 2419 #if defined(CONFIG_MTRR) && 0 2420 if (ioc->mtrr_reg > 0) {
··· 1470 if (mpt_debug_level) 1471 printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level); 1472 1473 ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC); 1474 if (ioc == NULL) { 1475 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); ··· 1481 ioc->debug_level = mpt_debug_level; 1482 ioc->id = mpt_ids++; 1483 sprintf(ioc->name, "ioc%d", ioc->id); 1484 + 1485 + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1486 + if (pci_enable_device_mem(pdev)) { 1487 + kfree(ioc); 1488 + printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() " 1489 + "failed\n", ioc->name); 1490 + return r; 1491 + } 1492 + if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { 1493 + kfree(ioc); 1494 + printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " 1495 + "MEM failed\n", ioc->name); 1496 + return r; 1497 + } 1498 1499 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name)); 1500 ··· 1658 ioc->active = 0; 1659 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1660 1661 + /* Set IOC ptr in the pcidev's driver data. */ 1662 + pci_set_drvdata(ioc->pcidev, ioc); 1663 + 1664 /* Set lookup ptr. */ 1665 list_add_tail(&ioc->list, &ioc_list); 1666 ··· 1791 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1792 1793 pci_disable_device(pdev); 1794 + pci_release_selected_regions(pdev, ioc->bars); 1795 pci_set_power_state(pdev, device_state); 1796 1797 return 0; ··· 1807 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 1808 u32 device_state = pdev->current_state; 1809 int recovery_state; 1810 1811 printk(MYIOC_s_INFO_FMT 1812 "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", ··· 1815 1816 pci_set_power_state(pdev, 0); 1817 pci_restore_state(pdev); 1818 + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { 1819 + ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM | 1820 + IORESOURCE_IO); 1821 + if (pci_enable_device(pdev)) 1822 + return 0; 1823 + } else { 1824 + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1825 + if (pci_enable_device_mem(pdev)) 1826 + return 0; 1827 + } 1828 + if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) 1829 + return 0; 1830 1831 /* enable interrupts */ 1832 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); ··· 1878 * -2 if READY but IOCFacts Failed 1879 * -3 if READY but PrimeIOCFifos Failed 1880 * -4 if READY but IOCInit Failed 1881 + * -5 if failed to enable_device and/or request_selected_regions 1882 */ 1883 static int 1884 mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) ··· 1976 } 1977 } 1978 1979 + if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) && 1980 + (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) { 1981 + pci_release_selected_regions(ioc->pcidev, ioc->bars); 1982 + ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM | 1983 + IORESOURCE_IO); 1984 + if (pci_enable_device(ioc->pcidev)) 1985 + return -5; 1986 + if (pci_request_selected_regions(ioc->pcidev, ioc->bars, 1987 + "mpt")) 1988 + return -5; 1989 + } 1990 + 1991 /* 1992 * Device is reset now. It must have de-asserted the interrupt line 1993 * (if it was asserted) and it should be safe to register for the ··· 1999 irq_allocated = 1; 2000 ioc->pci_irq = ioc->pcidev->irq; 2001 pci_set_master(ioc->pcidev); /* ?? */ 2002 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " 2003 "%d\n", ioc->name, ioc->pcidev->irq)); 2004 } ··· 2380 iounmap(ioc->memmap); 2381 ioc->memmap = NULL; 2382 } 2383 + 2384 + pci_disable_device(ioc->pcidev); 2385 + pci_release_selected_regions(ioc->pcidev, ioc->bars); 2386 2387 #if defined(CONFIG_MTRR) && 0 2388 if (ioc->mtrr_reg > 0) {
+1
drivers/message/fusion/mptbase.h
··· 629 dma_addr_t HostPageBuffer_dma; 630 int mtrr_reg; 631 struct pci_dev *pcidev; /* struct pci_dev pointer */ 632 u8 __iomem *memmap; /* mmap address */ 633 struct Scsi_Host *sh; /* Scsi Host pointer */ 634 SpiCfgData spi_data; /* Scsi config. data */
··· 629 dma_addr_t HostPageBuffer_dma; 630 int mtrr_reg; 631 struct pci_dev *pcidev; /* struct pci_dev pointer */ 632 + int bars; /* bitmask of BAR's that must be configured */ 633 u8 __iomem *memmap; /* mmap address */ 634 struct Scsi_Host *sh; /* Scsi Host pointer */ 635 SpiCfgData spi_data; /* Scsi config. data */
+1 -1
drivers/scsi/Kconfig
··· 722 723 config SCSI_GDTH 724 tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" 725 - depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API && PCI_LEGACY 726 ---help--- 727 Formerly called GDT SCSI Disk Array Controller Support. 728
··· 722 723 config SCSI_GDTH 724 tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" 725 + depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API 726 ---help--- 727 Formerly called GDT SCSI Disk Array Controller Support. 728
+49 -21
drivers/scsi/aacraid/aachba.c
··· 144 */ 145 146 static int nondasd = -1; 147 - static int aac_cache = 0; 148 static int dacmode = -1; 149 - 150 int aac_commit = -1; 151 int startup_timeout = 180; 152 int aif_timeout = 120; 153 154 module_param(nondasd, int, S_IRUGO|S_IWUSR); 155 - MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); 156 module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); 157 - MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache"); 158 module_param(dacmode, int, S_IRUGO|S_IWUSR); 159 - MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); 160 module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); 161 - MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on"); 162 module_param(startup_timeout, int, S_IRUGO|S_IWUSR); 163 - MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS."); 164 module_param(aif_timeout, int, S_IRUGO|S_IWUSR); 165 - MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for applications to pick up AIFs before\nderegistering them. This is typically adjusted for heavily burdened systems."); 166 167 int numacb = -1; 168 module_param(numacb, int, S_IRUGO|S_IWUSR); 169 - MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware."); 170 171 int acbsize = -1; 172 module_param(acbsize, int, S_IRUGO|S_IWUSR); 173 - MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 174 175 int update_interval = 30 * 60; 176 module_param(update_interval, int, S_IRUGO|S_IWUSR); 177 - MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter."); 178 179 int check_interval = 24 * 60 * 60; 180 module_param(check_interval, int, S_IRUGO|S_IWUSR); 181 - MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks."); 182 183 int aac_check_reset = 1; 184 module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); 185 - MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it."); 186 187 int expose_physicals = -1; 188 module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 189 - MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); 190 191 - int aac_reset_devices = 0; 192 module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); 193 MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); 194 ··· 1341 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), 1342 dev->supplement_adapter_info.VpdInfo.Tsid); 1343 } 1344 - if (!aac_check_reset || ((aac_check_reset != 1) && 1345 (dev->supplement_adapter_info.SupportedOptions2 & 1346 AAC_OPTION_IGNORE_RESET))) { 1347 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", ··· 1379 1380 if (nondasd != -1) 1381 dev->nondasd_support = (nondasd!=0); 1382 - if(dev->nondasd_support != 0) { 1383 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); 1384 - } 1385 1386 dev->dac_support = 0; 1387 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ 1388 - printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); 1389 dev->dac_support = 1; 1390 } 1391 ··· 1396 if(dev->dac_support != 0) { 1397 if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && 1398 !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { 1399 - printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", 1400 - dev->name, dev->id); 1401 } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && 1402 !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { 1403 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
··· 144 */ 145 146 static int nondasd = -1; 147 + static int aac_cache; 148 static int dacmode = -1; 149 + int aac_msi; 150 int aac_commit = -1; 151 int startup_timeout = 180; 152 int aif_timeout = 120; 153 154 module_param(nondasd, int, S_IRUGO|S_IWUSR); 155 + MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." 156 + " 0=off, 1=on"); 157 module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); 158 + MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" 159 + "\tbit 0 - Disable FUA in WRITE SCSI commands\n" 160 + "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n" 161 + "\tbit 2 - Disable only if Battery not protecting Cache"); 162 module_param(dacmode, int, S_IRUGO|S_IWUSR); 163 + MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC." 164 + " 0=off, 1=on"); 165 module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); 166 + MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the" 167 + " adapter for foreign arrays.\n" 168 + "This is typically needed in systems that do not have a BIOS." 169 + " 0=off, 1=on"); 170 + module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR); 171 + MODULE_PARM_DESC(msi, "IRQ handling." 172 + " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)"); 173 module_param(startup_timeout, int, S_IRUGO|S_IWUSR); 174 + MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" 175 + " adapter to have it's kernel up and\n" 176 + "running. This is typically adjusted for large systems that do not" 177 + " have a BIOS."); 178 module_param(aif_timeout, int, S_IRUGO|S_IWUSR); 179 + MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" 180 + " applications to pick up AIFs before\n" 181 + "deregistering them. This is typically adjusted for heavily burdened" 182 + " systems."); 183 184 int numacb = -1; 185 module_param(numacb, int, S_IRUGO|S_IWUSR); 186 + MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" 187 + " blocks (FIB) allocated. Valid values are 512 and down. Default is" 188 + " to use suggestion from Firmware."); 189 190 int acbsize = -1; 191 module_param(acbsize, int, S_IRUGO|S_IWUSR); 192 + MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)" 193 + " size. Valid values are 512, 2048, 4096 and 8192. Default is to use" 194 + " suggestion from Firmware."); 195 196 int update_interval = 30 * 60; 197 module_param(update_interval, int, S_IRUGO|S_IWUSR); 198 + MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" 199 + " updates issued to adapter."); 200 201 int check_interval = 24 * 60 * 60; 202 module_param(check_interval, int, S_IRUGO|S_IWUSR); 203 + MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" 204 + " checks."); 205 206 int aac_check_reset = 1; 207 module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); 208 + MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the" 209 + " adapter. a value of -1 forces the reset to adapters programmed to" 210 + " ignore it."); 211 212 int expose_physicals = -1; 213 module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 214 + MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays." 215 + " -1=protect 0=off, 1=on"); 216 217 + int aac_reset_devices; 218 module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); 219 MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); 220 ··· 1315 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), 1316 dev->supplement_adapter_info.VpdInfo.Tsid); 1317 } 1318 + if (!aac_check_reset || ((aac_check_reset == 1) && 1319 (dev->supplement_adapter_info.SupportedOptions2 & 1320 AAC_OPTION_IGNORE_RESET))) { 1321 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", ··· 1353 1354 if (nondasd != -1) 1355 dev->nondasd_support = (nondasd!=0); 1356 + if (dev->nondasd_support && !dev->in_reset) 1357 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); 1358 1359 dev->dac_support = 0; 1360 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ 1361 + if (!dev->in_reset) 1362 + printk(KERN_INFO "%s%d: 64bit support enabled.\n", 1363 + dev->name, dev->id); 1364 dev->dac_support = 1; 1365 } 1366 ··· 1369 if(dev->dac_support != 0) { 1370 if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && 1371 !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { 1372 + if (!dev->in_reset) 1373 + printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", 1374 + dev->name, dev->id); 1375 } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && 1376 !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { 1377 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
+2
drivers/scsi/aacraid/aacraid.h
··· 1026 u8 raw_io_64; 1027 u8 printf_enabled; 1028 u8 in_reset; 1029 }; 1030 1031 #define aac_adapter_interrupt(dev) \ ··· 1882 extern int aif_timeout; 1883 extern int expose_physicals; 1884 extern int aac_reset_devices; 1885 extern int aac_commit; 1886 extern int update_interval; 1887 extern int check_interval;
··· 1026 u8 raw_io_64; 1027 u8 printf_enabled; 1028 u8 in_reset; 1029 + u8 msi; 1030 }; 1031 1032 #define aac_adapter_interrupt(dev) \ ··· 1881 extern int aif_timeout; 1882 extern int expose_physicals; 1883 extern int aac_reset_devices; 1884 + extern int aac_msi; 1885 extern int aac_commit; 1886 extern int update_interval; 1887 extern int check_interval;
+1 -1
drivers/scsi/aacraid/commsup.c
··· 1458 1459 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); 1460 1461 - if (!aac_check_reset || ((aac_check_reset != 1) && 1462 (aac->supplement_adapter_info.SupportedOptions2 & 1463 AAC_OPTION_IGNORE_RESET))) 1464 goto out;
··· 1458 1459 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); 1460 1461 + if (!aac_check_reset || ((aac_check_reset == 1) && 1462 (aac->supplement_adapter_info.SupportedOptions2 & 1463 AAC_OPTION_IGNORE_RESET))) 1464 goto out;
+25 -22
drivers/scsi/aacraid/linit.c
··· 275 276 /** 277 * aac_get_driver_ident 278 - * @devtype: index into lookup table 279 * 280 - * Returns a pointer to the entry in the driver lookup table. 281 */ 282 283 struct aac_driver_ident* aac_get_driver_ident(int devtype) ··· 494 495 static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) 496 { 497 - struct scsi_device * sdev = to_scsi_device(dev); 498 if (sdev_channel(sdev) != CONTAINER_CHANNEL) 499 return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach 500 - ? "Hidden\n" : "JBOD"); 501 return snprintf(buf, PAGE_SIZE, "%s\n", 502 - get_container_type(((struct aac_dev *)(sdev->host->hostdata)) 503 - ->fsa_dev[sdev_id(sdev)].type)); 504 } 505 506 static struct device_attribute aac_raid_level_attr = { ··· 642 AAC_OPTION_MU_RESET) && 643 aac_check_reset && 644 ((aac_check_reset != 1) || 645 - (aac->supplement_adapter_info.SupportedOptions2 & 646 AAC_OPTION_IGNORE_RESET))) 647 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ 648 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ ··· 861 le32_to_cpu(dev->adapter_info.serial[0])); 862 if (len && 863 !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ 864 - sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len], 865 - buf, len)) 866 len = snprintf(buf, PAGE_SIZE, "%.*s\n", 867 (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), 868 dev->supplement_adapter_info.MfgPcbaSerialNo); ··· 1005 1006 static struct scsi_host_template aac_driver_template = { 1007 .module = THIS_MODULE, 1008 - .name = "AAC", 1009 .proc_name = AAC_DRIVERNAME, 1010 - .info = aac_info, 1011 - .ioctl = aac_ioctl, 1012 #ifdef CONFIG_COMPAT 1013 .compat_ioctl = aac_compat_ioctl, 1014 #endif 1015 - .queuecommand = aac_queuecommand, 1016 - .bios_param = aac_biosparm, 1017 .shost_attrs = aac_attrs, 1018 .slave_configure = aac_slave_configure, 1019 .change_queue_depth = aac_change_queue_depth, 1020 .sdev_attrs = aac_dev_attrs, 1021 .eh_abort_handler = aac_eh_abort, 1022 .eh_host_reset_handler = aac_eh_reset, 1023 - .can_queue = AAC_NUM_IO_FIB, 1024 - .this_id = MAXIMUM_NUM_CONTAINERS, 1025 - .sg_tablesize = 16, 1026 - .max_sectors = 128, 1027 #if (AAC_NUM_IO_FIB > 256) 1028 .cmd_per_lun = 256, 1029 #else 1030 - .cmd_per_lun = AAC_NUM_IO_FIB, 1031 #endif 1032 .use_clustering = ENABLE_CLUSTERING, 1033 - .emulated = 1, 1034 }; 1035 1036 static void __aac_shutdown(struct aac_dev * aac) ··· 1040 aac_send_shutdown(aac); 1041 aac_adapter_disable_int(aac); 1042 free_irq(aac->pdev->irq, aac); 1043 } 1044 1045 static int __devinit aac_probe_one(struct pci_dev *pdev, ··· 1257 .id_table = aac_pci_tbl, 1258 .probe = aac_probe_one, 1259 .remove = __devexit_p(aac_remove_one), 1260 - .shutdown = aac_shutdown, 1261 }; 1262 1263 static int __init aac_init(void) ··· 1274 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); 1275 if (aac_cfg_major < 0) { 1276 printk(KERN_WARNING 1277 - "aacraid: unable to register \"aac\" device.\n"); 1278 } 1279 1280 return 0;
··· 275 276 /** 277 * aac_get_driver_ident 278 + * @devtype: index into lookup table 279 * 280 + * Returns a pointer to the entry in the driver lookup table. 281 */ 282 283 struct aac_driver_ident* aac_get_driver_ident(int devtype) ··· 494 495 static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) 496 { 497 + struct scsi_device *sdev = to_scsi_device(dev); 498 + struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); 499 if (sdev_channel(sdev) != CONTAINER_CHANNEL) 500 return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach 501 + ? "Hidden\n" : 502 + ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : "")); 503 return snprintf(buf, PAGE_SIZE, "%s\n", 504 + get_container_type(aac->fsa_dev[sdev_id(sdev)].type)); 505 } 506 507 static struct device_attribute aac_raid_level_attr = { ··· 641 AAC_OPTION_MU_RESET) && 642 aac_check_reset && 643 ((aac_check_reset != 1) || 644 + !(aac->supplement_adapter_info.SupportedOptions2 & 645 AAC_OPTION_IGNORE_RESET))) 646 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ 647 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ ··· 860 le32_to_cpu(dev->adapter_info.serial[0])); 861 if (len && 862 !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ 863 + sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], 864 + buf, len-1)) 865 len = snprintf(buf, PAGE_SIZE, "%.*s\n", 866 (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), 867 dev->supplement_adapter_info.MfgPcbaSerialNo); ··· 1004 1005 static struct scsi_host_template aac_driver_template = { 1006 .module = THIS_MODULE, 1007 + .name = "AAC", 1008 .proc_name = AAC_DRIVERNAME, 1009 + .info = aac_info, 1010 + .ioctl = aac_ioctl, 1011 #ifdef CONFIG_COMPAT 1012 .compat_ioctl = aac_compat_ioctl, 1013 #endif 1014 + .queuecommand = aac_queuecommand, 1015 + .bios_param = aac_biosparm, 1016 .shost_attrs = aac_attrs, 1017 .slave_configure = aac_slave_configure, 1018 .change_queue_depth = aac_change_queue_depth, 1019 .sdev_attrs = aac_dev_attrs, 1020 .eh_abort_handler = aac_eh_abort, 1021 .eh_host_reset_handler = aac_eh_reset, 1022 + .can_queue = AAC_NUM_IO_FIB, 1023 + .this_id = MAXIMUM_NUM_CONTAINERS, 1024 + .sg_tablesize = 16, 1025 + .max_sectors = 128, 1026 #if (AAC_NUM_IO_FIB > 256) 1027 .cmd_per_lun = 256, 1028 #else 1029 + .cmd_per_lun = AAC_NUM_IO_FIB, 1030 #endif 1031 .use_clustering = ENABLE_CLUSTERING, 1032 + .emulated = 1, 1033 }; 1034 1035 static void __aac_shutdown(struct aac_dev * aac) ··· 1039 aac_send_shutdown(aac); 1040 aac_adapter_disable_int(aac); 1041 free_irq(aac->pdev->irq, aac); 1042 + if (aac->msi) 1043 + pci_disable_msi(aac->pdev); 1044 } 1045 1046 static int __devinit aac_probe_one(struct pci_dev *pdev, ··· 1254 .id_table = aac_pci_tbl, 1255 .probe = aac_probe_one, 1256 .remove = __devexit_p(aac_remove_one), 1257 + .shutdown = aac_shutdown, 1258 }; 1259 1260 static int __init aac_init(void) ··· 1271 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); 1272 if (aac_cfg_major < 0) { 1273 printk(KERN_WARNING 1274 + "aacraid: unable to register \"aac\" device.\n"); 1275 } 1276 1277 return 0;
+4 -1
drivers/scsi/aacraid/rx.c
··· 625 if (aac_init_adapter(dev) == NULL) 626 goto error_iounmap; 627 aac_adapter_comm(dev, dev->comm_interface); 628 - if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, 629 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 630 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 631 name, instance); 632 goto error_iounmap;
··· 625 if (aac_init_adapter(dev) == NULL) 626 goto error_iounmap; 627 aac_adapter_comm(dev, dev->comm_interface); 628 + dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 629 + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 630 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 631 + if (dev->msi) 632 + pci_disable_msi(dev->pdev); 633 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 634 name, instance); 635 goto error_iounmap;
+3 -2
drivers/scsi/aacraid/sa.c
··· 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/types.h> 34 #include <linux/spinlock.h> 35 #include <linux/slab.h> 36 #include <linux/blkdev.h> ··· 386 387 if(aac_init_adapter(dev) == NULL) 388 goto error_irq; 389 - if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, 390 IRQF_SHARED|IRQF_DISABLED, 391 "aacraid", (void *)dev ) < 0) { 392 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", ··· 404 405 error_irq: 406 aac_sa_disable_interrupt(dev); 407 - free_irq(dev->scsi_host_ptr->irq, (void *)dev); 408 409 error_iounmap: 410
··· 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/types.h> 34 + #include <linux/pci.h> 35 #include <linux/spinlock.h> 36 #include <linux/slab.h> 37 #include <linux/blkdev.h> ··· 385 386 if(aac_init_adapter(dev) == NULL) 387 goto error_irq; 388 + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 389 IRQF_SHARED|IRQF_DISABLED, 390 "aacraid", (void *)dev ) < 0) { 391 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", ··· 403 404 error_irq: 405 aac_sa_disable_interrupt(dev); 406 + free_irq(dev->pdev->irq, (void *)dev); 407 408 error_iounmap: 409
+11 -2
drivers/scsi/advansys.c
··· 566 ASC_SCSI_BIT_ID_TYPE unit_not_ready; 567 ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; 568 ASC_SCSI_BIT_ID_TYPE start_motor; 569 - uchar overrun_buf[ASC_OVERRUN_BSIZE] __aligned(8); 570 dma_addr_t overrun_dma; 571 uchar scsi_reset_wait; 572 uchar chip_no; ··· 13833 */ 13834 if (ASC_NARROW_BOARD(boardp)) { 13835 ASC_DBG(2, "AscInitAsc1000Driver()\n"); 13836 warn_code = AscInitAsc1000Driver(asc_dvc_varp); 13837 13838 if (warn_code || asc_dvc_varp->err_code) { ··· 13846 "warn 0x%x, error 0x%x\n", 13847 asc_dvc_varp->init_state, warn_code, 13848 asc_dvc_varp->err_code); 13849 - if (asc_dvc_varp->err_code) 13850 ret = -ENODEV; 13851 } 13852 } else { 13853 if (advansys_wide_init_chip(shost)) ··· 13902 dma_unmap_single(board->dev, 13903 board->dvc_var.asc_dvc_var.overrun_dma, 13904 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); 13905 } else { 13906 iounmap(board->ioremap_addr); 13907 advansys_wide_free_mem(board);
··· 566 ASC_SCSI_BIT_ID_TYPE unit_not_ready; 567 ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; 568 ASC_SCSI_BIT_ID_TYPE start_motor; 569 + uchar *overrun_buf; 570 dma_addr_t overrun_dma; 571 uchar scsi_reset_wait; 572 uchar chip_no; ··· 13833 */ 13834 if (ASC_NARROW_BOARD(boardp)) { 13835 ASC_DBG(2, "AscInitAsc1000Driver()\n"); 13836 + 13837 + asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); 13838 + if (!asc_dvc_varp->overrun_buf) { 13839 + ret = -ENOMEM; 13840 + goto err_free_wide_mem; 13841 + } 13842 warn_code = AscInitAsc1000Driver(asc_dvc_varp); 13843 13844 if (warn_code || asc_dvc_varp->err_code) { ··· 13840 "warn 0x%x, error 0x%x\n", 13841 asc_dvc_varp->init_state, warn_code, 13842 asc_dvc_varp->err_code); 13843 + if (asc_dvc_varp->err_code) { 13844 ret = -ENODEV; 13845 + kfree(asc_dvc_varp->overrun_buf); 13846 + } 13847 } 13848 } else { 13849 if (advansys_wide_init_chip(shost)) ··· 13894 dma_unmap_single(board->dev, 13895 board->dvc_var.asc_dvc_var.overrun_dma, 13896 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); 13897 + kfree(board->dvc_var.asc_dvc_var.overrun_buf); 13898 } else { 13899 iounmap(board->ioremap_addr); 13900 advansys_wide_free_mem(board);
+1 -1
drivers/scsi/aic94xx/aic94xx_sas.h
··· 292 #define INITIATE_SSP_TASK 0x00 293 #define INITIATE_LONG_SSP_TASK 0x01 294 #define INITIATE_BIDIR_SSP_TASK 0x02 295 - #define ABORT_TASK 0x03 296 #define INITIATE_SSP_TMF 0x04 297 #define SSP_TARG_GET_DATA 0x05 298 #define SSP_TARG_GET_DATA_GOOD 0x06
··· 292 #define INITIATE_SSP_TASK 0x00 293 #define INITIATE_LONG_SSP_TASK 0x01 294 #define INITIATE_BIDIR_SSP_TASK 0x02 295 + #define SCB_ABORT_TASK 0x03 296 #define INITIATE_SSP_TMF 0x04 297 #define SSP_TARG_GET_DATA 0x05 298 #define SSP_TARG_GET_DATA_GOOD 0x06
+1 -1
drivers/scsi/aic94xx/aic94xx_tmf.c
··· 369 return -ENOMEM; 370 scb = ascb->scb; 371 372 - scb->header.opcode = ABORT_TASK; 373 374 switch (task->task_proto) { 375 case SAS_PROTOCOL_SATA:
··· 369 return -ENOMEM; 370 scb = ascb->scb; 371 372 + scb->header.opcode = SCB_ABORT_TASK; 373 374 switch (task->task_proto) { 375 case SAS_PROTOCOL_SATA:
+3 -13
drivers/scsi/arm/fas216.c
··· 2018 * the upper layers to process. This would have been set 2019 * correctly by fas216_std_done. 2020 */ 2021 SCpnt->scsi_done(SCpnt); 2022 } 2023 ··· 2104 if (SCpnt->cmnd[0] == REQUEST_SENSE) 2105 goto done; 2106 2107 fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, 2108 "requesting sense"); 2109 - memset(SCpnt->cmnd, 0, sizeof (SCpnt->cmnd)); 2110 - SCpnt->cmnd[0] = REQUEST_SENSE; 2111 - SCpnt->cmnd[1] = SCpnt->device->lun << 5; 2112 - SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer); 2113 - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); 2114 - SCpnt->SCp.buffer = NULL; 2115 - SCpnt->SCp.buffers_residual = 0; 2116 - SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer; 2117 - SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); 2118 - SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer); 2119 SCpnt->SCp.Message = 0; 2120 SCpnt->SCp.Status = 0; 2121 - SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); 2122 - SCpnt->sc_data_direction = DMA_FROM_DEVICE; 2123 - SCpnt->use_sg = 0; 2124 SCpnt->tag = 0; 2125 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2126
··· 2018 * the upper layers to process. This would have been set 2019 * correctly by fas216_std_done. 2020 */ 2021 + scsi_eh_restore_cmnd(SCpnt, &info->ses); 2022 SCpnt->scsi_done(SCpnt); 2023 } 2024 ··· 2103 if (SCpnt->cmnd[0] == REQUEST_SENSE) 2104 goto done; 2105 2106 + scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0); 2107 fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, 2108 "requesting sense"); 2109 + init_SCp(SCpnt); 2110 SCpnt->SCp.Message = 0; 2111 SCpnt->SCp.Status = 0; 2112 SCpnt->tag = 0; 2113 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2114
+3
drivers/scsi/arm/fas216.h
··· 16 #define NO_IRQ 255 17 #endif 18 19 #include "queue.h" 20 #include "msgqueue.h" 21 ··· 313 314 /* miscellaneous */ 315 int internal_done; /* flag to indicate request done */ 316 unsigned long magic_end; 317 } FAS216_Info; 318
··· 16 #define NO_IRQ 255 17 #endif 18 19 + #include <scsi/scsi_eh.h> 20 + 21 #include "queue.h" 22 #include "msgqueue.h" 23 ··· 311 312 /* miscellaneous */ 313 int internal_done; /* flag to indicate request done */ 314 + struct scsi_eh_save *ses; /* holds request sense restore info */ 315 unsigned long magic_end; 316 } FAS216_Info; 317
+14 -2
drivers/scsi/gdth.c
··· 642 *cnt, vendor, device)); 643 644 pdev = NULL; 645 - while ((pdev = pci_find_device(vendor, device, pdev)) 646 != NULL) { 647 if (pci_enable_device(pdev)) 648 continue; 649 - if (*cnt >= MAXHA) 650 return; 651 /* GDT PCI controller found, resources are already in pdev */ 652 pcistr[*cnt].pdev = pdev; 653 pcistr[*cnt].irq = pdev->irq; ··· 4839 if (error) 4840 goto out_free_coal_stat; 4841 list_add_tail(&ha->list, &gdth_instances); 4842 return 0; 4843 4844 out_free_coal_stat: ··· 4969 if (error) 4970 goto out_free_coal_stat; 4971 list_add_tail(&ha->list, &gdth_instances); 4972 return 0; 4973 4974 out_free_ccb_phys: ··· 5109 if (error) 5110 goto out_free_coal_stat; 5111 list_add_tail(&ha->list, &gdth_instances); 5112 return 0; 5113 5114 out_free_coal_stat:
··· 642 *cnt, vendor, device)); 643 644 pdev = NULL; 645 + while ((pdev = pci_get_device(vendor, device, pdev)) 646 != NULL) { 647 if (pci_enable_device(pdev)) 648 continue; 649 + if (*cnt >= MAXHA) { 650 + pci_dev_put(pdev); 651 return; 652 + } 653 + 654 /* GDT PCI controller found, resources are already in pdev */ 655 pcistr[*cnt].pdev = pdev; 656 pcistr[*cnt].irq = pdev->irq; ··· 4836 if (error) 4837 goto out_free_coal_stat; 4838 list_add_tail(&ha->list, &gdth_instances); 4839 + 4840 + scsi_scan_host(shp); 4841 + 4842 return 0; 4843 4844 out_free_coal_stat: ··· 4963 if (error) 4964 goto out_free_coal_stat; 4965 list_add_tail(&ha->list, &gdth_instances); 4966 + 4967 + scsi_scan_host(shp); 4968 + 4969 return 0; 4970 4971 out_free_ccb_phys: ··· 5100 if (error) 5101 goto out_free_coal_stat; 5102 list_add_tail(&ha->list, &gdth_instances); 5103 + 5104 + scsi_scan_host(shp); 5105 + 5106 return 0; 5107 5108 out_free_coal_stat:
+16 -3
drivers/scsi/lpfc/lpfc.h
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 307 308 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ 309 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ 310 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; 311 struct lpfc_name fc_nodename; /* fc nodename */ 312 struct lpfc_name fc_portname; /* fc portname */ ··· 393 HBA_OVER_TEMP 394 }; 395 396 struct lpfc_hba { 397 struct lpfc_sli sli; 398 uint32_t sli_rev; /* SLI2 or SLI3 */ ··· 417 /* This flag is set while issuing */ 418 /* INIT_LINK mailbox command */ 419 #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 420 - #define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */ 421 422 struct lpfc_sli2_slim *slim2p; 423 struct lpfc_dmabuf hbqslimp; ··· 495 wait_queue_head_t *work_wait; 496 struct task_struct *worker_thread; 497 498 uint32_t hbq_count; /* Count of configured HBQs */ 499 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 500 ··· 565 mempool_t *nlp_mem_pool; 566 567 struct fc_host_statistics link_stats; 568 - uint8_t using_msi; 569 570 struct list_head port_list; 571 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ ··· 606 unsigned long last_completion_time; 607 struct timer_list hb_tmofunc; 608 uint8_t hb_outstanding; 609 /* 610 * Following bit will be set for all buffer tags which are not 611 * associated with any HBQ.
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 307 308 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ 309 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ 310 + uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */ 311 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; 312 struct lpfc_name fc_nodename; /* fc nodename */ 313 struct lpfc_name fc_portname; /* fc portname */ ··· 392 HBA_OVER_TEMP 393 }; 394 395 + enum intr_type_t { 396 + NONE = 0, 397 + INTx, 398 + MSI, 399 + MSIX, 400 + }; 401 + 402 struct lpfc_hba { 403 struct lpfc_sli sli; 404 uint32_t sli_rev; /* SLI2 or SLI3 */ ··· 409 /* This flag is set while issuing */ 410 /* INIT_LINK mailbox command */ 411 #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 412 + #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 413 414 struct lpfc_sli2_slim *slim2p; 415 struct lpfc_dmabuf hbqslimp; ··· 487 wait_queue_head_t *work_wait; 488 struct task_struct *worker_thread; 489 490 + uint32_t hbq_in_use; /* HBQs in use flag */ 491 + struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 492 uint32_t hbq_count; /* Count of configured HBQs */ 493 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 494 ··· 555 mempool_t *nlp_mem_pool; 556 557 struct fc_host_statistics link_stats; 558 + enum intr_type_t intr_type; 559 + struct msix_entry msix_entries[1]; 560 561 struct list_head port_list; 562 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ ··· 595 unsigned long last_completion_time; 596 struct timer_list hb_tmofunc; 597 uint8_t hb_outstanding; 598 + /* ndlp reference management */ 599 + spinlock_t ndlp_lock; 600 /* 601 * Following bit will be set for all buffer tags which are not 602 * associated with any HBQ.
+12 -7
drivers/scsi/lpfc/lpfc_attr.c
··· 1191 shost = lpfc_shost_from_vport(vport); 1192 spin_lock_irq(shost->host_lock); 1193 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) 1194 - if (ndlp->rport) 1195 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 1196 spin_unlock_irq(shost->host_lock); 1197 } ··· 1592 # support this feature 1593 # 0 = MSI disabled (default) 1594 # 1 = MSI enabled 1595 - # Value range is [0,1]. Default value is 0. 1596 */ 1597 - LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); 1598 1599 /* 1600 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. ··· 1948 } 1949 1950 /* If HBA encountered an error attention, allow only DUMP 1951 - * mailbox command until the HBA is restarted. 1952 */ 1953 if ((phba->pport->stopped) && 1954 - (phba->sysfs_mbox.mbox->mb.mbxCommand 1955 - != MBX_DUMP_MEMORY)) { 1956 sysfs_mbox_idle(phba); 1957 spin_unlock_irq(&phba->hbalock); 1958 return -EPERM; ··· 2388 spin_lock_irq(shost->host_lock); 2389 /* Search for this, mapped, target ID */ 2390 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2391 - if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 2392 starget->id == ndlp->nlp_sid) { 2393 spin_unlock_irq(shost->host_lock); 2394 return ndlp;
··· 1191 shost = lpfc_shost_from_vport(vport); 1192 spin_lock_irq(shost->host_lock); 1193 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) 1194 + if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) 1195 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 1196 spin_unlock_irq(shost->host_lock); 1197 } ··· 1592 # support this feature 1593 # 0 = MSI disabled (default) 1594 # 1 = MSI enabled 1595 + # 2 = MSI-X enabled 1596 + # Value range is [0,2]. Default value is 0. 1597 */ 1598 + LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " 1599 + "MSI-X (2), if possible"); 1600 1601 /* 1602 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. ··· 1946 } 1947 1948 /* If HBA encountered an error attention, allow only DUMP 1949 + * or RESTART mailbox commands until the HBA is restarted. 1950 */ 1951 if ((phba->pport->stopped) && 1952 + (phba->sysfs_mbox.mbox->mb.mbxCommand != 1953 + MBX_DUMP_MEMORY && 1954 + phba->sysfs_mbox.mbox->mb.mbxCommand != 1955 + MBX_RESTART)) { 1956 sysfs_mbox_idle(phba); 1957 spin_unlock_irq(&phba->hbalock); 1958 return -EPERM; ··· 2384 spin_lock_irq(shost->host_lock); 2385 /* Search for this, mapped, target ID */ 2386 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2387 + if (NLP_CHK_NODE_ACT(ndlp) && 2388 + ndlp->nlp_state == NLP_STE_MAPPED_NODE && 2389 starget->id == ndlp->nlp_sid) { 2390 spin_unlock_irq(shost->host_lock); 2391 return ndlp;
+5 -1
drivers/scsi/lpfc/lpfc_crtn.h
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 53 void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 54 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 55 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 56 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 57 void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); 58 void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); 59 void lpfc_set_disctmo(struct lpfc_vport *);
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 53 void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 54 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 55 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 56 + void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 57 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 58 + void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *); 59 + struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 60 + struct lpfc_nodelist *, int); 61 void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); 62 void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); 63 void lpfc_set_disctmo(struct lpfc_vport *);
+30 -11
drivers/scsi/lpfc/lpfc_ct.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 294 /* Save for completion so we can release these resources */ 295 geniocb->context1 = (uint8_t *) inp; 296 geniocb->context2 = (uint8_t *) outp; 297 - geniocb->context_un.ndlp = ndlp; 298 299 /* Fill in payload, bp points to frame payload */ 300 icmd->ulpCommand = CMD_GEN_REQUEST64_CR; ··· 489 */ 490 ndlp = lpfc_findnode_did(vport, 491 Did); 492 - if (ndlp && (ndlp->nlp_type & 493 - NLP_FCP_TARGET)) 494 lpfc_setup_disc_node 495 (vport, Did); 496 else if (lpfc_ns_cmd(vport, ··· 775 "0267 NameServer GFF Rsp " 776 "x%x Error (%d %d) Data: x%x x%x\n", 777 did, irsp->ulpStatus, irsp->un.ulpWord[4], 778 - vport->fc_flag, vport->fc_rscn_id_cnt) 779 } 780 781 /* This is a target port, unregistered port, or the GFF_ID failed */ ··· 1066 int rc = 0; 1067 1068 ndlp = lpfc_findnode_did(vport, NameServer_DID); 1069 - if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { 1070 rc=1; 1071 goto ns_cmd_exit; 1072 } ··· 1216 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1217 break; 1218 } 1219 - lpfc_nlp_get(ndlp); 1220 - 1221 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { 1222 /* On success, The cmpl function will free the buffers */ 1223 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, ··· 1226 cmdcode, ndlp->nlp_DID, 0); 1227 return 0; 1228 } 1229 - 1230 rc=6; 1231 lpfc_nlp_put(ndlp); 1232 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1233 ns_cmd_free_bmp: 1234 kfree(bmp); ··· 1279 } 1280 1281 ndlp = lpfc_findnode_did(vport, FDMI_DID); 1282 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 1283 /* FDMI rsp failed */ 1284 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, ··· 1305 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); 1306 break; 1307 } 1308 lpfc_ct_free_iocb(phba, cmdiocb); 1309 return; 1310 } ··· 1663 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1664 1665 cmpl = lpfc_cmpl_ct_cmd_fdmi; 1666 - lpfc_nlp_get(ndlp); 1667 1668 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) 1669 return 0; 1670 1671 lpfc_nlp_put(ndlp); 1672 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1673 fdmi_cmd_free_bmp: 1674 kfree(bmp); ··· 1717 struct lpfc_nodelist *ndlp; 1718 1719 ndlp = lpfc_findnode_did(vport, FDMI_DID); 1720 - if (ndlp) { 1721 if (init_utsname()->nodename[0] != '\0') 1722 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 1723 else
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 294 /* Save for completion so we can release these resources */ 295 geniocb->context1 = (uint8_t *) inp; 296 geniocb->context2 = (uint8_t *) outp; 297 + geniocb->context_un.ndlp = lpfc_nlp_get(ndlp); 298 299 /* Fill in payload, bp points to frame payload */ 300 icmd->ulpCommand = CMD_GEN_REQUEST64_CR; ··· 489 */ 490 ndlp = lpfc_findnode_did(vport, 491 Did); 492 + if (ndlp && 493 + NLP_CHK_NODE_ACT(ndlp) 494 + && (ndlp->nlp_type & 495 + NLP_FCP_TARGET)) 496 lpfc_setup_disc_node 497 (vport, Did); 498 else if (lpfc_ns_cmd(vport, ··· 773 "0267 NameServer GFF Rsp " 774 "x%x Error (%d %d) Data: x%x x%x\n", 775 did, irsp->ulpStatus, irsp->un.ulpWord[4], 776 + vport->fc_flag, vport->fc_rscn_id_cnt); 777 } 778 779 /* This is a target port, unregistered port, or the GFF_ID failed */ ··· 1064 int rc = 0; 1065 1066 ndlp = lpfc_findnode_did(vport, NameServer_DID); 1067 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) 1068 + || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { 1069 rc=1; 1070 goto ns_cmd_exit; 1071 } ··· 1213 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1214 break; 1215 } 1216 + /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count 1217 + * to hold ndlp reference for the corresponding callback function. 1218 + */ 1219 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { 1220 /* On success, The cmpl function will free the buffers */ 1221 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, ··· 1222 cmdcode, ndlp->nlp_DID, 0); 1223 return 0; 1224 } 1225 rc=6; 1226 + 1227 + /* Decrement ndlp reference count to release ndlp reference held 1228 + * for the failed command's callback function. 1229 + */ 1230 lpfc_nlp_put(ndlp); 1231 + 1232 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1233 ns_cmd_free_bmp: 1234 kfree(bmp); ··· 1271 } 1272 1273 ndlp = lpfc_findnode_did(vport, FDMI_DID); 1274 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 1275 + goto fail_out; 1276 + 1277 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 1278 /* FDMI rsp failed */ 1279 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, ··· 1294 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); 1295 break; 1296 } 1297 + 1298 + fail_out: 1299 lpfc_ct_free_iocb(phba, cmdiocb); 1300 return; 1301 } ··· 1650 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1651 1652 cmpl = lpfc_cmpl_ct_cmd_fdmi; 1653 1654 + /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count 1655 + * to hold ndlp reference for the corresponding callback function. 1656 + */ 1657 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) 1658 return 0; 1659 1660 + /* Decrement ndlp reference count to release ndlp reference held 1661 + * for the failed command's callback function. 1662 + */ 1663 lpfc_nlp_put(ndlp); 1664 + 1665 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1666 fdmi_cmd_free_bmp: 1667 kfree(bmp); ··· 1698 struct lpfc_nodelist *ndlp; 1699 1700 ndlp = lpfc_findnode_did(vport, FDMI_DID); 1701 + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 1702 if (init_utsname()->nodename[0] != '\0') 1703 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 1704 else
+49 -17
drivers/scsi/lpfc/lpfc_disc.h
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 73 uint8_t nlp_fcp_info; /* class info, bits 0-3 */ 74 #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 75 76 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 77 struct fc_rport *rport; /* Corresponding FC transport 78 port structure */ ··· 91 }; 92 93 /* Defines for nlp_flag (uint32) */ 94 - #define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */ 95 - #define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */ 96 - #define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */ 97 - #define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ 98 - #define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ 99 - #define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ 100 - #define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */ 101 - #define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ 102 - #define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ 103 - #define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ 104 - #define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */ 105 - #define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */ 106 - #define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful 107 ACC */ 108 - #define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 109 NPR list */ 110 - #define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ 111 - #define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ 112 #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 113 114 /* There are 4 different double linked lists nodelist entries can reside on. 115 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 73 uint8_t nlp_fcp_info; /* class info, bits 0-3 */ 74 #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 75 76 + uint16_t nlp_usg_map; /* ndlp management usage bitmap */ 77 + #define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ 78 + #define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */ 79 + #define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */ 80 + #define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */ 81 + 82 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 83 struct fc_rport *rport; /* Corresponding FC transport 84 port structure */ ··· 85 }; 86 87 /* Defines for nlp_flag (uint32) */ 88 + #define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ 89 + #define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ 90 + #define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ 91 + #define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ 92 + #define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ 93 + #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ 94 + #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ 95 + #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ 96 + #define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ 97 + #define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ 98 + #define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ 99 + #define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ 100 + #define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful 101 ACC */ 102 + #define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from 103 NPR list */ 104 + #define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ 105 + #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 106 #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 107 + #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 108 + 109 + /* ndlp usage management macros */ 110 + #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 111 + & NLP_USG_NODE_ACT_BIT) \ 112 + && \ 113 + !((ndlp)->nlp_usg_map \ 114 + & NLP_USG_FREE_ACK_BIT)) 115 + #define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ 116 + |= NLP_USG_NODE_ACT_BIT) 117 + #define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ 118 + = NLP_USG_NODE_ACT_BIT) 119 + #define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ 120 + &= ~NLP_USG_NODE_ACT_BIT) 121 + #define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ 122 + & NLP_USG_IACT_REQ_BIT) 123 + #define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ 124 + |= NLP_USG_IACT_REQ_BIT) 125 + #define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ 126 + & NLP_USG_FREE_REQ_BIT) 127 + #define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ 128 + |= NLP_USG_FREE_REQ_BIT) 129 + #define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ 130 + & NLP_USG_FREE_ACK_BIT) 131 + #define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ 132 + |= NLP_USG_FREE_ACK_BIT) 133 134 /* There are 4 different double linked lists nodelist entries can reside on. 135 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+272 -112
drivers/scsi/lpfc/lpfc_els.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 113 114 if (elsiocb == NULL) 115 return NULL; 116 icmd = &elsiocb->iocb; 117 118 /* fill in BDEs for command */ ··· 135 if (!prsp || !prsp->virt) 136 goto els_iocb_free_prsp_exit; 137 INIT_LIST_HEAD(&prsp->list); 138 - } else { 139 prsp = NULL; 140 - } 141 142 /* Allocate buffer for Buffer ptr list */ 143 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); ··· 246 247 sp = &phba->fc_fabparam; 248 ndlp = lpfc_findnode_did(vport, Fabric_DID); 249 - if (!ndlp) { 250 err = 1; 251 goto fail; 252 } ··· 282 283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 284 mbox->vport = vport; 285 mbox->context2 = lpfc_nlp_get(ndlp); 286 287 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); ··· 296 return 0; 297 298 fail_issue_reg_login: 299 lpfc_nlp_put(ndlp); 300 mp = (struct lpfc_dmabuf *) mbox->context1; 301 lpfc_mbuf_free(phba, mp->virt, mp->phys); ··· 387 */ 388 list_for_each_entry_safe(np, next_np, 389 &vport->fc_nodes, nlp_listp) { 390 if ((np->nlp_state != NLP_STE_NPR_NODE) || 391 !(np->nlp_flag & NLP_NPR_ADISC)) 392 continue; ··· 464 mempool_free(mbox, phba->mbox_mem_pool); 465 goto fail; 466 } 467 lpfc_nlp_put(ndlp); 468 469 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); ··· 478 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 479 if (!ndlp) 480 goto fail; 481 - 482 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 483 } 484 485 memcpy(&ndlp->nlp_portname, &sp->portName, 486 sizeof(struct lpfc_name)); 487 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 488 sizeof(struct lpfc_name)); 489 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 490 spin_lock_irq(shost->host_lock); 491 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 492 spin_unlock_irq(shost->host_lock); 493 - } else { 494 - /* This side will wait for the PLOGI */ 495 lpfc_nlp_put(ndlp); 496 - } 497 498 /* If we are pt2pt with another NPort, force NPIV off! */ 499 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; ··· 746 if (!ndlp) 747 return 0; 748 lpfc_nlp_init(vport, ndlp, Fabric_DID); 749 - } else { 750 - lpfc_dequeue_node(vport, ndlp); 751 } 752 753 - if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 754 /* This decrement of reference count to node shall kick off 755 * the release of the node. 756 */ 757 lpfc_nlp_put(ndlp); 758 - } 759 return 1; 760 } 761 ··· 778 if (!ndlp) 779 return 0; 780 lpfc_nlp_init(vport, ndlp, Fabric_DID); 781 - } else { 782 - lpfc_dequeue_node(vport, ndlp); 783 } 784 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 785 /* decrement node reference count to trigger the release of 786 * the node. ··· 845 */ 846 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 847 848 - if (new_ndlp == ndlp) 849 return ndlp; 850 851 if (!new_ndlp) { ··· 856 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 857 if (!new_ndlp) 858 return ndlp; 859 - 860 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 861 } 862 863 lpfc_unreg_rpi(vport, new_ndlp); ··· 872 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 873 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 874 875 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 876 877 /* Move this back to NPR state */ ··· 946 irsp->un.elsreq64.remoteID); 947 948 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 949 - if (!ndlp) { 950 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 951 "0136 PLOGI completes to NPort x%x " 952 "with no ndlp. Data: x%x x%x x%x\n", ··· 996 } 997 /* PLOGI failed */ 998 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 999 - if (lpfc_error_lost_link(irsp)) { 1000 rc = NLP_STE_FREED_NODE; 1001 - } else { 1002 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1003 NLP_EVT_CMPL_PLOGI); 1004 - } 1005 } else { 1006 /* Good status, call state machine */ 1007 prsp = list_entry(((struct lpfc_dmabuf *) ··· 1048 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1049 1050 ndlp = lpfc_findnode_did(vport, did); 1051 - /* If ndlp if not NULL, we will bump the reference count on it */ 1052 1053 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1054 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1055 ELS_CMD_PLOGI); ··· 1132 } 1133 /* PRLI failed */ 1134 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1135 - if (lpfc_error_lost_link(irsp)) { 1136 goto out; 1137 - } else { 1138 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1139 NLP_EVT_CMPL_PRLI); 1140 - } 1141 - } else { 1142 /* Good status, call state machine */ 1143 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1144 NLP_EVT_CMPL_PRLI); 1145 - } 1146 - 1147 out: 1148 lpfc_els_free_iocb(phba, cmdiocb); 1149 return; ··· 1307 } 1308 /* ADISC failed */ 1309 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1310 - if (!lpfc_error_lost_link(irsp)) { 1311 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1312 NLP_EVT_CMPL_ADISC); 1313 - } 1314 - } else { 1315 /* Good status, call state machine */ 1316 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1317 NLP_EVT_CMPL_ADISC); 1318 - } 1319 1320 if (disc && vport->num_disc_nodes) { 1321 /* Check to see if there are more ADISCs to be sent */ ··· 1473 else 1474 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1475 NLP_EVT_CMPL_LOGO); 1476 - } else { 1477 /* Good status, call state machine. 1478 * This will unregister the rpi if needed. 1479 */ 1480 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1481 NLP_EVT_CMPL_LOGO); 1482 - } 1483 - 1484 out: 1485 lpfc_els_free_iocb(phba, cmdiocb); 1486 return; ··· 1584 psli = &phba->sli; 1585 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1586 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 1587 - ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1588 - if (!ndlp) 1589 - return 1; 1590 1591 - lpfc_nlp_init(vport, ndlp, nportid); 1592 1593 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1594 ndlp->nlp_DID, ELS_CMD_SCR); ··· 1659 psli = &phba->sli; 1660 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1661 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 1662 - ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1663 - if (!ndlp) 1664 - return 1; 1665 1666 - lpfc_nlp_init(vport, ndlp, nportid); 1667 1668 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1669 ndlp->nlp_DID, ELS_CMD_RNID); ··· 1701 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 1702 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 1703 ondlp = lpfc_findnode_did(vport, nportid); 1704 - if (ondlp) { 1705 memcpy(&fp->OportName, &ondlp->nlp_portname, 1706 sizeof(struct lpfc_name)); 1707 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, ··· 1734 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 1735 { 1736 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1737 1738 spin_lock_irq(shost->host_lock); 1739 nlp->nlp_flag &= ~NLP_DELAY_TMO; ··· 1742 del_timer_sync(&nlp->nlp_delayfunc); 1743 nlp->nlp_last_elscmd = 0; 1744 1745 - if (!list_empty(&nlp->els_retry_evt.evt_listp)) 1746 list_del_init(&nlp->els_retry_evt.evt_listp); 1747 1748 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1749 spin_lock_irq(shost->host_lock); ··· 1891 cmd = *elscmd++; 1892 } 1893 1894 - if (ndlp) 1895 did = ndlp->nlp_DID; 1896 else { 1897 /* We should only hit this case for retrying PLOGI */ 1898 did = irsp->un.elsreq64.remoteID; 1899 ndlp = lpfc_findnode_did(vport, did); 1900 - if (!ndlp && (cmd != ELS_CMD_PLOGI)) 1901 return 1; 1902 } 1903 ··· 1920 break; 1921 1922 case IOERR_ILLEGAL_COMMAND: 1923 - if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && 1924 - (cmd == ELS_CMD_FDISC)) { 1925 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1926 - "0124 FDISC failed (3/6) " 1927 - "retrying...\n"); 1928 - lpfc_mbx_unreg_vpi(vport); 1929 - retry = 1; 1930 - /* FDISC retry policy */ 1931 - maxretry = 48; 1932 - if (cmdiocb->retry >= 32) 1933 - delay = 1000; 1934 - } 1935 break; 1936 1937 case IOERR_NO_RESOURCES: ··· 2014 break; 2015 2016 case LSRJT_LOGICAL_ERR: 2017 case LSRJT_PROTOCOL_ERR: 2018 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2019 (cmd == ELS_CMD_FDISC) && ··· 2054 retry = 1; 2055 2056 if ((cmd == ELS_CMD_FLOGI) && 2057 - (phba->fc_topology != TOPOLOGY_LOOP)) { 2058 /* FLOGI retry policy */ 2059 retry = 1; 2060 maxretry = 48; ··· 2381 if ((rspiocb->iocb.ulpStatus == 0) 2382 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 2383 lpfc_unreg_rpi(vport, ndlp); 2384 mbox->context2 = lpfc_nlp_get(ndlp); 2385 mbox->vport = vport; 2386 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { ··· 2397 NLP_STE_REG_LOGIN_ISSUE); 2398 } 2399 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 2400 - != MBX_NOT_FINISHED) { 2401 goto out; 2402 - } 2403 2404 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 2405 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, ··· 2862 2863 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2864 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2865 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2866 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2867 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { ··· 2901 2902 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 2903 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2904 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2905 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2906 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && ··· 2939 struct lpfc_hba *phba = vport->phba; 2940 int i; 2941 2942 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2943 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 2944 vport->fc_rscn_id_list[i] = NULL; ··· 2958 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 2959 spin_unlock_irq(shost->host_lock); 2960 lpfc_can_disctmo(vport); 2961 } 2962 2963 int ··· 2969 D_ID rscn_did; 2970 uint32_t *lp; 2971 uint32_t payload_len, i; 2972 2973 ns_did.un.word = did; 2974 ··· 2981 if (vport->fc_flag & FC_RSCN_DISCOVERY) 2982 return did; 2983 2984 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2985 lp = vport->fc_rscn_id_list[i]->virt; 2986 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); ··· 3000 switch (rscn_did.un.b.resv) { 3001 case 0: /* Single N_Port ID effected */ 3002 if (ns_did.un.word == rscn_did.un.word) 3003 - return did; 3004 break; 3005 case 1: /* Whole N_Port Area effected */ 3006 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 3007 && (ns_did.un.b.area == rscn_did.un.b.area)) 3008 - return did; 3009 break; 3010 case 2: /* Whole N_Port Domain effected */ 3011 if (ns_did.un.b.domain == rscn_did.un.b.domain) 3012 - return did; 3013 break; 3014 default: 3015 /* Unknown Identifier in RSCN node */ ··· 3018 "RSCN payload Data: x%x\n", 3019 rscn_did.un.word); 3020 case 3: /* Whole Fabric effected */ 3021 - return did; 3022 } 3023 } 3024 } 3025 return 0; 3026 } 3027 3028 static int ··· 3041 */ 3042 3043 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3044 - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || 3045 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) 3046 continue; 3047 ··· 3070 uint32_t *lp, *datap; 3071 IOCB_t *icmd; 3072 uint32_t payload_len, length, nportid, *cmd; 3073 - int rscn_cnt = vport->fc_rscn_id_cnt; 3074 int rscn_id = 0, hba_id = 0; 3075 int i; 3076 ··· 3083 /* RSCN received */ 3084 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3085 "0214 RSCN received Data: x%x x%x x%x x%x\n", 3086 - vport->fc_flag, payload_len, *lp, rscn_cnt); 3087 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 3088 fc_host_post_event(shost, fc_get_event_number(), 3089 FCH_EVT_RSCN, lp[i]); ··· 3122 "0214 Ignore RSCN " 3123 "Data: x%x x%x x%x x%x\n", 3124 vport->fc_flag, payload_len, 3125 - *lp, rscn_cnt); 3126 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3127 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 3128 ndlp->nlp_DID, vport->port_state, ··· 3134 } 3135 } 3136 3137 /* If we are already processing an RSCN, save the received 3138 * RSCN payload buffer, cmdiocb->context2 to process later. 3139 */ ··· 3167 if ((rscn_cnt) && 3168 (payload_len + length <= LPFC_BPL_SIZE)) { 3169 *cmd &= ELS_CMD_MASK; 3170 - *cmd |= be32_to_cpu(payload_len + length); 3171 memcpy(((uint8_t *)cmd) + length, lp, 3172 payload_len); 3173 } else { ··· 3178 */ 3179 cmdiocb->context2 = NULL; 3180 } 3181 - 3182 /* Deferred RSCN */ 3183 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3184 "0235 Deferred RSCN " ··· 3194 vport->fc_rscn_id_cnt, vport->fc_flag, 3195 vport->port_state); 3196 } 3197 /* Send back ACC */ 3198 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3199 - 3200 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3201 lpfc_rscn_recovery_check(vport); 3202 spin_lock_irq(shost->host_lock); ··· 3205 spin_unlock_irq(shost->host_lock); 3206 return 0; 3207 } 3208 - 3209 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3210 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 3211 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); ··· 3213 vport->fc_flag |= FC_RSCN_MODE; 3214 spin_unlock_irq(shost->host_lock); 3215 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 3216 /* 3217 * If we zero, cmdiocb->context2, the calling routine will 3218 * not try to free it. 3219 */ 3220 cmdiocb->context2 = NULL; 3221 - 3222 lpfc_set_disctmo(vport); 3223 - 3224 /* Send back ACC */ 3225 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3226 - 3227 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3228 lpfc_rscn_recovery_check(vport); 3229 - 3230 return lpfc_els_handle_rscn(vport); 3231 } 3232 ··· 3254 vport->num_disc_nodes = 0; 3255 3256 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3257 - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 3258 /* Good ndlp, issue CT Request to NameServer */ 3259 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 3260 /* Wait for NameServer query cmpl before we can ··· 3265 /* If login to NameServer does not exist, issue one */ 3266 /* Good status, issue PLOGI to NameServer */ 3267 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3268 - if (ndlp) 3269 /* Wait for NameServer login cmpl before we can 3270 continue */ 3271 return 1; 3272 3273 - ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3274 - if (!ndlp) { 3275 - lpfc_els_flush_rscn(vport); 3276 - return 0; 3277 } else { 3278 lpfc_nlp_init(vport, ndlp, NameServer_DID); 3279 - ndlp->nlp_type |= NLP_FABRIC; 3280 ndlp->nlp_prev_state = ndlp->nlp_state; 3281 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3282 - lpfc_issue_els_plogi(vport, NameServer_DID, 0); 3283 - /* Wait for NameServer login cmpl before we can 3284 - continue */ 3285 - return 1; 3286 } 3287 } 3288 3289 lpfc_els_flush_rscn(vport); ··· 3792 3793 list_for_each_entry_safe(ndlp, next_ndlp, 3794 &vport->fc_nodes, nlp_listp) { 3795 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3796 continue; 3797 if (ndlp->nlp_type & NLP_FABRIC) { ··· 3819 */ 3820 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 3821 nlp_listp) { 3822 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3823 continue; 3824 ··· 4060 uint32_t cmd, did, newnode, rjt_err = 0; 4061 IOCB_t *icmd = &elsiocb->iocb; 4062 4063 - if (vport == NULL || elsiocb->context2 == NULL) 4064 goto dropit; 4065 4066 newnode = 0; ··· 4095 lpfc_nlp_init(vport, ndlp, did); 4096 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4097 newnode = 1; 4098 - if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 4099 ndlp->nlp_type |= NLP_FABRIC; 4100 } 4101 - } 4102 - else { 4103 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 4104 /* This is simular to the new node path */ 4105 - lpfc_nlp_get(ndlp); 4106 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4107 newnode = 1; 4108 } ··· 4117 phba->fc_stat.elsRcvFrame++; 4118 if (elsiocb->context1) 4119 lpfc_nlp_put(elsiocb->context1); 4120 elsiocb->context1 = lpfc_nlp_get(ndlp); 4121 elsiocb->vport = vport; 4122 ··· 4138 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 4139 4140 if (vport->port_state < LPFC_DISC_AUTH) { 4141 - rjt_err = LSRJT_UNABLE_TPC; 4142 - break; 4143 } 4144 4145 shost = lpfc_shost_from_vport(vport); ··· 4389 vport = lpfc_find_vport_by_vpid(phba, vpi); 4390 } 4391 } 4392 - /* If there are no BDEs associated 4393 - * with this IOCB, there is nothing to do. 4394 - */ 4395 if (icmd->ulpBdeCount == 0) 4396 return; 4397 4398 - /* type of ELS cmd is first 32bit word 4399 - * in packet 4400 - */ 4401 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4402 elsiocb->context2 = bdeBuf1; 4403 } else { ··· 4452 } 4453 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4454 ndlp->nlp_type |= NLP_FABRIC; 4455 } 4456 4457 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); ··· 4510 switch (mb->mbxStatus) { 4511 case 0x11: /* unsupported feature */ 4512 case 0x9603: /* max_vpi exceeded */ 4513 /* giving up on vport registration */ 4514 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4515 spin_lock_irq(shost->host_lock); ··· 4524 spin_lock_irq(shost->host_lock); 4525 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4526 spin_unlock_irq(shost->host_lock); 4527 - lpfc_initial_fdisc(vport); 4528 break; 4529 } 4530 ··· 4625 irsp->ulpStatus, irsp->un.ulpWord[4]); 4626 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) 4627 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4628 - 4629 lpfc_nlp_put(ndlp); 4630 /* giving up on FDISC. Cancel discovery timer */ 4631 lpfc_can_disctmo(vport); ··· 4645 */ 4646 list_for_each_entry_safe(np, next_np, 4647 &vport->fc_nodes, nlp_listp) { 4648 - if (np->nlp_state != NLP_STE_NPR_NODE 4649 - || !(np->nlp_flag & NLP_NPR_ADISC)) 4650 continue; 4651 spin_lock_irq(shost->host_lock); 4652 np->nlp_flag &= ~NLP_NPR_ADISC; ··· 4753 { 4754 struct lpfc_vport *vport = cmdiocb->vport; 4755 IOCB_t *irsp; 4756 4757 irsp = &rspiocb->iocb; 4758 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, ··· 4763 4764 lpfc_els_free_iocb(phba, cmdiocb); 4765 vport->unreg_vpi_cmpl = VPORT_ERROR; 4766 } 4767 4768 int ··· 4845 repeat: 4846 iocb = NULL; 4847 spin_lock_irqsave(&phba->hbalock, iflags); 4848 - /* Post any pending iocb to the SLI layer */ 4849 if (atomic_read(&phba->fabric_iocb_count) == 0) { 4850 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 4851 list); 4852 if (iocb) 4853 atomic_inc(&phba->fabric_iocb_count); 4854 } 4855 spin_unlock_irqrestore(&phba->hbalock, iflags); ··· 4897 int blocked; 4898 4899 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4900 - /* Start a timer to unblock fabric 4901 - * iocbs after 100ms 4902 - */ 4903 if (!blocked) 4904 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 4905 ··· 4945 4946 atomic_dec(&phba->fabric_iocb_count); 4947 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 4948 - /* Post any pending iocbs to HBA */ 4949 - lpfc_resume_fabric_iocbs(phba); 4950 } 4951 } 4952 ··· 4965 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 4966 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4967 4968 spin_unlock_irqrestore(&phba->hbalock, iflags); 4969 if (ready) { 4970 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; ··· 4978 "Fabric sched2: ste:x%x", 4979 iocb->vport->port_state, 0, 0); 4980 4981 - atomic_inc(&phba->fabric_iocb_count); 4982 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 4983 4984 if (ret == IOCB_ERROR) {
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 113 114 if (elsiocb == NULL) 115 return NULL; 116 + 117 icmd = &elsiocb->iocb; 118 119 /* fill in BDEs for command */ ··· 134 if (!prsp || !prsp->virt) 135 goto els_iocb_free_prsp_exit; 136 INIT_LIST_HEAD(&prsp->list); 137 + } else 138 prsp = NULL; 139 140 /* Allocate buffer for Buffer ptr list */ 141 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); ··· 246 247 sp = &phba->fc_fabparam; 248 ndlp = lpfc_findnode_did(vport, Fabric_DID); 249 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 250 err = 1; 251 goto fail; 252 } ··· 282 283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 284 mbox->vport = vport; 285 + /* increment the reference count on ndlp to hold reference 286 + * for the callback routine. 287 + */ 288 mbox->context2 = lpfc_nlp_get(ndlp); 289 290 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); ··· 293 return 0; 294 295 fail_issue_reg_login: 296 + /* decrement the reference count on ndlp just incremented 297 + * for the failed mbox command. 298 + */ 299 lpfc_nlp_put(ndlp); 300 mp = (struct lpfc_dmabuf *) mbox->context1; 301 lpfc_mbuf_free(phba, mp->virt, mp->phys); ··· 381 */ 382 list_for_each_entry_safe(np, next_np, 383 &vport->fc_nodes, nlp_listp) { 384 + if (!NLP_CHK_NODE_ACT(ndlp)) 385 + continue; 386 if ((np->nlp_state != NLP_STE_NPR_NODE) || 387 !(np->nlp_flag & NLP_NPR_ADISC)) 388 continue; ··· 456 mempool_free(mbox, phba->mbox_mem_pool); 457 goto fail; 458 } 459 + /* Decrement ndlp reference count indicating that ndlp can be 460 + * safely released when other references to it are done. 461 + */ 462 lpfc_nlp_put(ndlp); 463 464 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); ··· 467 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 468 if (!ndlp) 469 goto fail; 470 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 471 + } else if (!NLP_CHK_NODE_ACT(ndlp)) { 472 + ndlp = lpfc_enable_node(vport, ndlp, 473 + NLP_STE_UNUSED_NODE); 474 + if(!ndlp) 475 + goto fail; 476 } 477 478 memcpy(&ndlp->nlp_portname, &sp->portName, 479 sizeof(struct lpfc_name)); 480 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 481 sizeof(struct lpfc_name)); 482 + /* Set state will put ndlp onto node list if not already done */ 483 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 484 spin_lock_irq(shost->host_lock); 485 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 486 spin_unlock_irq(shost->host_lock); 487 + } else 488 + /* This side will wait for the PLOGI, decrement ndlp reference 489 + * count indicating that ndlp can be released when other 490 + * references to it are done. 491 + */ 492 lpfc_nlp_put(ndlp); 493 494 /* If we are pt2pt with another NPort, force NPIV off! */ 495 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; ··· 728 if (!ndlp) 729 return 0; 730 lpfc_nlp_init(vport, ndlp, Fabric_DID); 731 + /* Put ndlp onto node list */ 732 + lpfc_enqueue_node(vport, ndlp); 733 + } else if (!NLP_CHK_NODE_ACT(ndlp)) { 734 + /* re-setup ndlp without removing from node list */ 735 + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 736 + if (!ndlp) 737 + return 0; 738 } 739 740 + if (lpfc_issue_els_flogi(vport, ndlp, 0)) 741 /* This decrement of reference count to node shall kick off 742 * the release of the node. 743 */ 744 lpfc_nlp_put(ndlp); 745 + 746 return 1; 747 } 748 ··· 755 if (!ndlp) 756 return 0; 757 lpfc_nlp_init(vport, ndlp, Fabric_DID); 758 + /* Put ndlp onto node list */ 759 + lpfc_enqueue_node(vport, ndlp); 760 + } else if (!NLP_CHK_NODE_ACT(ndlp)) { 761 + /* re-setup ndlp without removing from node list */ 762 + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 763 + if (!ndlp) 764 + return 0; 765 } 766 + 767 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 768 /* decrement node reference count to trigger the release of 769 * the node. ··· 816 */ 817 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 818 819 + if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 820 return ndlp; 821 822 if (!new_ndlp) { ··· 827 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 828 if (!new_ndlp) 829 return ndlp; 830 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 831 + } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 832 + new_ndlp = lpfc_enable_node(vport, new_ndlp, 833 + NLP_STE_UNUSED_NODE); 834 + if (!new_ndlp) 835 + return ndlp; 836 } 837 838 lpfc_unreg_rpi(vport, new_ndlp); ··· 839 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 840 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 841 842 + /* Set state will put new_ndlp on to node list if not already done */ 843 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 844 845 /* Move this back to NPR state */ ··· 912 irsp->un.elsreq64.remoteID); 913 914 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 915 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 916 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 917 "0136 PLOGI completes to NPort x%x " 918 "with no ndlp. Data: x%x x%x x%x\n", ··· 962 } 963 /* PLOGI failed */ 964 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 965 + if (lpfc_error_lost_link(irsp)) 966 rc = NLP_STE_FREED_NODE; 967 + else 968 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 969 NLP_EVT_CMPL_PLOGI); 970 } else { 971 /* Good status, call state machine */ 972 prsp = list_entry(((struct lpfc_dmabuf *) ··· 1015 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1016 1017 ndlp = lpfc_findnode_did(vport, did); 1018 + if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1019 + ndlp = NULL; 1020 1021 + /* If ndlp is not NULL, we will bump the reference count on it */ 1022 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1023 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1024 ELS_CMD_PLOGI); ··· 1097 } 1098 /* PRLI failed */ 1099 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1100 + if (lpfc_error_lost_link(irsp)) 1101 goto out; 1102 + else 1103 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1104 NLP_EVT_CMPL_PRLI); 1105 + } else 1106 /* Good status, call state machine */ 1107 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1108 NLP_EVT_CMPL_PRLI); 1109 out: 1110 lpfc_els_free_iocb(phba, cmdiocb); 1111 return; ··· 1275 } 1276 /* ADISC failed */ 1277 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1278 + if (!lpfc_error_lost_link(irsp)) 1279 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1280 NLP_EVT_CMPL_ADISC); 1281 + } else 1282 /* Good status, call state machine */ 1283 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1284 NLP_EVT_CMPL_ADISC); 1285 1286 if (disc && vport->num_disc_nodes) { 1287 /* Check to see if there are more ADISCs to be sent */ ··· 1443 else 1444 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1445 NLP_EVT_CMPL_LOGO); 1446 + } else 1447 /* Good status, call state machine. 1448 * This will unregister the rpi if needed. 1449 */ 1450 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1451 NLP_EVT_CMPL_LOGO); 1452 out: 1453 lpfc_els_free_iocb(phba, cmdiocb); 1454 return; ··· 1556 psli = &phba->sli; 1557 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1558 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 1559 1560 + ndlp = lpfc_findnode_did(vport, nportid); 1561 + if (!ndlp) { 1562 + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1563 + if (!ndlp) 1564 + return 1; 1565 + lpfc_nlp_init(vport, ndlp, nportid); 1566 + lpfc_enqueue_node(vport, ndlp); 1567 + } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1568 + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1569 + if (!ndlp) 1570 + return 1; 1571 + } 1572 1573 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1574 ndlp->nlp_DID, ELS_CMD_SCR); ··· 1623 psli = &phba->sli; 1624 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1625 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 1626 1627 + ndlp = lpfc_findnode_did(vport, nportid); 1628 + if (!ndlp) { 1629 + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1630 + if (!ndlp) 1631 + return 1; 1632 + lpfc_nlp_init(vport, ndlp, nportid); 1633 + lpfc_enqueue_node(vport, ndlp); 1634 + } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1635 + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1636 + if (!ndlp) 1637 + return 1; 1638 + } 1639 1640 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1641 ndlp->nlp_DID, ELS_CMD_RNID); ··· 1657 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 1658 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 1659 ondlp = lpfc_findnode_did(vport, nportid); 1660 + if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { 1661 memcpy(&fp->OportName, &ondlp->nlp_portname, 1662 sizeof(struct lpfc_name)); 1663 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, ··· 1690 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 1691 { 1692 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1693 + struct lpfc_work_evt *evtp; 1694 1695 spin_lock_irq(shost->host_lock); 1696 nlp->nlp_flag &= ~NLP_DELAY_TMO; ··· 1697 del_timer_sync(&nlp->nlp_delayfunc); 1698 nlp->nlp_last_elscmd = 0; 1699 1700 + if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 1701 list_del_init(&nlp->els_retry_evt.evt_listp); 1702 + /* Decrement nlp reference count held for the delayed retry */ 1703 + evtp = &nlp->els_retry_evt; 1704 + lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 1705 + } 1706 1707 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1708 spin_lock_irq(shost->host_lock); ··· 1842 cmd = *elscmd++; 1843 } 1844 1845 + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 1846 did = ndlp->nlp_DID; 1847 else { 1848 /* We should only hit this case for retrying PLOGI */ 1849 did = irsp->un.elsreq64.remoteID; 1850 ndlp = lpfc_findnode_did(vport, did); 1851 + if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 1852 + && (cmd != ELS_CMD_PLOGI)) 1853 return 1; 1854 } 1855 ··· 1870 break; 1871 1872 case IOERR_ILLEGAL_COMMAND: 1873 + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1874 + "0124 Retry illegal cmd x%x " 1875 + "retry:x%x delay:x%x\n", 1876 + cmd, cmdiocb->retry, delay); 1877 + retry = 1; 1878 + /* All command's retry policy */ 1879 + maxretry = 8; 1880 + if (cmdiocb->retry > 2) 1881 + delay = 1000; 1882 break; 1883 1884 case IOERR_NO_RESOURCES: ··· 1967 break; 1968 1969 case LSRJT_LOGICAL_ERR: 1970 + /* There are some cases where switches return this 1971 + * error when they are not ready and should be returning 1972 + * Logical Busy. We should delay every time. 1973 + */ 1974 + if (cmd == ELS_CMD_FDISC && 1975 + stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 1976 + maxretry = 3; 1977 + delay = 1000; 1978 + retry = 1; 1979 + break; 1980 + } 1981 case LSRJT_PROTOCOL_ERR: 1982 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1983 (cmd == ELS_CMD_FDISC) && ··· 1996 retry = 1; 1997 1998 if ((cmd == ELS_CMD_FLOGI) && 1999 + (phba->fc_topology != TOPOLOGY_LOOP) && 2000 + !lpfc_error_lost_link(irsp)) { 2001 /* FLOGI retry policy */ 2002 retry = 1; 2003 maxretry = 48; ··· 2322 if ((rspiocb->iocb.ulpStatus == 0) 2323 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 2324 lpfc_unreg_rpi(vport, ndlp); 2325 + /* Increment reference count to ndlp to hold the 2326 + * reference to ndlp for the callback function. 2327 + */ 2328 mbox->context2 = lpfc_nlp_get(ndlp); 2329 mbox->vport = vport; 2330 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { ··· 2335 NLP_STE_REG_LOGIN_ISSUE); 2336 } 2337 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 2338 + != MBX_NOT_FINISHED) 2339 goto out; 2340 + else 2341 + /* Decrement the ndlp reference count we 2342 + * set for this failed mailbox command. 2343 + */ 2344 + lpfc_nlp_put(ndlp); 2345 2346 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 2347 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, ··· 2796 2797 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2798 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2799 + if (!NLP_CHK_NODE_ACT(ndlp)) 2800 + continue; 2801 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2802 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2803 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { ··· 2833 2834 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 2835 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2836 + if (!NLP_CHK_NODE_ACT(ndlp)) 2837 + continue; 2838 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2839 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2840 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && ··· 2869 struct lpfc_hba *phba = vport->phba; 2870 int i; 2871 2872 + spin_lock_irq(shost->host_lock); 2873 + if (vport->fc_rscn_flush) { 2874 + /* Another thread is walking fc_rscn_id_list on this vport */ 2875 + spin_unlock_irq(shost->host_lock); 2876 + return; 2877 + } 2878 + /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 2879 + vport->fc_rscn_flush = 1; 2880 + spin_unlock_irq(shost->host_lock); 2881 + 2882 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2883 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 2884 vport->fc_rscn_id_list[i] = NULL; ··· 2878 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 2879 spin_unlock_irq(shost->host_lock); 2880 lpfc_can_disctmo(vport); 2881 + /* Indicate we are done walking this fc_rscn_id_list */ 2882 + vport->fc_rscn_flush = 0; 2883 } 2884 2885 int ··· 2887 D_ID rscn_did; 2888 uint32_t *lp; 2889 uint32_t payload_len, i; 2890 + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2891 2892 ns_did.un.word = did; 2893 ··· 2898 if (vport->fc_flag & FC_RSCN_DISCOVERY) 2899 return did; 2900 2901 + spin_lock_irq(shost->host_lock); 2902 + if (vport->fc_rscn_flush) { 2903 + /* Another thread is walking fc_rscn_id_list on this vport */ 2904 + spin_unlock_irq(shost->host_lock); 2905 + return 0; 2906 + } 2907 + /* Indicate we are walking fc_rscn_id_list on this vport */ 2908 + vport->fc_rscn_flush = 1; 2909 + spin_unlock_irq(shost->host_lock); 2910 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2911 lp = vport->fc_rscn_id_list[i]->virt; 2912 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); ··· 2908 switch (rscn_did.un.b.resv) { 2909 case 0: /* Single N_Port ID effected */ 2910 if (ns_did.un.word == rscn_did.un.word) 2911 + goto return_did_out; 2912 break; 2913 case 1: /* Whole N_Port Area effected */ 2914 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 2915 && (ns_did.un.b.area == rscn_did.un.b.area)) 2916 + goto return_did_out; 2917 break; 2918 case 2: /* Whole N_Port Domain effected */ 2919 if (ns_did.un.b.domain == rscn_did.un.b.domain) 2920 + goto return_did_out; 2921 break; 2922 default: 2923 /* Unknown Identifier in RSCN node */ ··· 2926 "RSCN payload Data: x%x\n", 2927 rscn_did.un.word); 2928 case 3: /* Whole Fabric effected */ 2929 + goto return_did_out; 2930 } 2931 } 2932 } 2933 + /* Indicate we are done with walking fc_rscn_id_list on this vport */ 2934 + vport->fc_rscn_flush = 0; 2935 return 0; 2936 + return_did_out: 2937 + /* Indicate we are done with walking fc_rscn_id_list on this vport */ 2938 + vport->fc_rscn_flush = 0; 2939 + return did; 2940 } 2941 2942 static int ··· 2943 */ 2944 2945 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2946 + if (!NLP_CHK_NODE_ACT(ndlp) || 2947 + ndlp->nlp_state == NLP_STE_UNUSED_NODE || 2948 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) 2949 continue; 2950 ··· 2971 uint32_t *lp, *datap; 2972 IOCB_t *icmd; 2973 uint32_t payload_len, length, nportid, *cmd; 2974 + int rscn_cnt; 2975 int rscn_id = 0, hba_id = 0; 2976 int i; 2977 ··· 2984 /* RSCN received */ 2985 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2986 "0214 RSCN received Data: x%x x%x x%x x%x\n", 2987 + vport->fc_flag, payload_len, *lp, 2988 + vport->fc_rscn_id_cnt); 2989 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 2990 fc_host_post_event(shost, fc_get_event_number(), 2991 FCH_EVT_RSCN, lp[i]); ··· 3022 "0214 Ignore RSCN " 3023 "Data: x%x x%x x%x x%x\n", 3024 vport->fc_flag, payload_len, 3025 + *lp, vport->fc_rscn_id_cnt); 3026 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3027 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 3028 ndlp->nlp_DID, vport->port_state, ··· 3034 } 3035 } 3036 3037 + spin_lock_irq(shost->host_lock); 3038 + if (vport->fc_rscn_flush) { 3039 + /* Another thread is walking fc_rscn_id_list on this vport */ 3040 + spin_unlock_irq(shost->host_lock); 3041 + vport->fc_flag |= FC_RSCN_DISCOVERY; 3042 + return 0; 3043 + } 3044 + /* Indicate we are walking fc_rscn_id_list on this vport */ 3045 + vport->fc_rscn_flush = 1; 3046 + spin_unlock_irq(shost->host_lock); 3047 + /* Get the array count after sucessfully have the token */ 3048 + rscn_cnt = vport->fc_rscn_id_cnt; 3049 /* If we are already processing an RSCN, save the received 3050 * RSCN payload buffer, cmdiocb->context2 to process later. 3051 */ ··· 3055 if ((rscn_cnt) && 3056 (payload_len + length <= LPFC_BPL_SIZE)) { 3057 *cmd &= ELS_CMD_MASK; 3058 + *cmd |= cpu_to_be32(payload_len + length); 3059 memcpy(((uint8_t *)cmd) + length, lp, 3060 payload_len); 3061 } else { ··· 3066 */ 3067 cmdiocb->context2 = NULL; 3068 } 3069 /* Deferred RSCN */ 3070 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3071 "0235 Deferred RSCN " ··· 3083 vport->fc_rscn_id_cnt, vport->fc_flag, 3084 vport->port_state); 3085 } 3086 + /* Indicate we are done walking fc_rscn_id_list on this vport */ 3087 + vport->fc_rscn_flush = 0; 3088 /* Send back ACC */ 3089 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3090 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3091 lpfc_rscn_recovery_check(vport); 3092 spin_lock_irq(shost->host_lock); ··· 3093 spin_unlock_irq(shost->host_lock); 3094 return 0; 3095 } 3096 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3097 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 3098 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); ··· 3102 vport->fc_flag |= FC_RSCN_MODE; 3103 spin_unlock_irq(shost->host_lock); 3104 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 3105 + /* Indicate we are done walking fc_rscn_id_list on this vport */ 3106 + vport->fc_rscn_flush = 0; 3107 /* 3108 * If we zero, cmdiocb->context2, the calling routine will 3109 * not try to free it. 3110 */ 3111 cmdiocb->context2 = NULL; 3112 lpfc_set_disctmo(vport); 3113 /* Send back ACC */ 3114 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3115 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3116 lpfc_rscn_recovery_check(vport); 3117 return lpfc_els_handle_rscn(vport); 3118 } 3119 ··· 3145 vport->num_disc_nodes = 0; 3146 3147 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3148 + if (ndlp && NLP_CHK_NODE_ACT(ndlp) 3149 + && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 3150 /* Good ndlp, issue CT Request to NameServer */ 3151 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 3152 /* Wait for NameServer query cmpl before we can ··· 3155 /* If login to NameServer does not exist, issue one */ 3156 /* Good status, issue PLOGI to NameServer */ 3157 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3158 + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 3159 /* Wait for NameServer login cmpl before we can 3160 continue */ 3161 return 1; 3162 3163 + if (ndlp) { 3164 + ndlp = lpfc_enable_node(vport, ndlp, 3165 + NLP_STE_PLOGI_ISSUE); 3166 + if (!ndlp) { 3167 + lpfc_els_flush_rscn(vport); 3168 + return 0; 3169 + } 3170 + ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 3171 } else { 3172 + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3173 + if (!ndlp) { 3174 + lpfc_els_flush_rscn(vport); 3175 + return 0; 3176 + } 3177 lpfc_nlp_init(vport, ndlp, NameServer_DID); 3178 ndlp->nlp_prev_state = ndlp->nlp_state; 3179 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3180 } 3181 + ndlp->nlp_type |= NLP_FABRIC; 3182 + lpfc_issue_els_plogi(vport, NameServer_DID, 0); 3183 + /* Wait for NameServer login cmpl before we can 3184 + * continue 3185 + */ 3186 + return 1; 3187 } 3188 3189 lpfc_els_flush_rscn(vport); ··· 3672 3673 list_for_each_entry_safe(ndlp, next_ndlp, 3674 &vport->fc_nodes, nlp_listp) { 3675 + if (!NLP_CHK_NODE_ACT(ndlp)) 3676 + continue; 3677 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3678 continue; 3679 if (ndlp->nlp_type & NLP_FABRIC) { ··· 3697 */ 3698 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 3699 nlp_listp) { 3700 + if (!NLP_CHK_NODE_ACT(ndlp)) 3701 + continue; 3702 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3703 continue; 3704 ··· 3936 uint32_t cmd, did, newnode, rjt_err = 0; 3937 IOCB_t *icmd = &elsiocb->iocb; 3938 3939 + if (!vport || !(elsiocb->context2)) 3940 goto dropit; 3941 3942 newnode = 0; ··· 3971 lpfc_nlp_init(vport, ndlp, did); 3972 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3973 newnode = 1; 3974 + if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 3975 ndlp->nlp_type |= NLP_FABRIC; 3976 + } else { 3977 + if (!NLP_CHK_NODE_ACT(ndlp)) { 3978 + ndlp = lpfc_enable_node(vport, ndlp, 3979 + NLP_STE_UNUSED_NODE); 3980 + if (!ndlp) 3981 + goto dropit; 3982 } 3983 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3984 /* This is simular to the new node path */ 3985 + ndlp = lpfc_nlp_get(ndlp); 3986 + if (!ndlp) 3987 + goto dropit; 3988 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3989 newnode = 1; 3990 } ··· 3987 phba->fc_stat.elsRcvFrame++; 3988 if (elsiocb->context1) 3989 lpfc_nlp_put(elsiocb->context1); 3990 + 3991 elsiocb->context1 = lpfc_nlp_get(ndlp); 3992 elsiocb->vport = vport; 3993 ··· 4007 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 4008 4009 if (vport->port_state < LPFC_DISC_AUTH) { 4010 + if (!(phba->pport->fc_flag & FC_PT2PT) || 4011 + (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 4012 + rjt_err = LSRJT_UNABLE_TPC; 4013 + break; 4014 + } 4015 + /* We get here, and drop thru, if we are PT2PT with 4016 + * another NPort and the other side has initiated 4017 + * the PLOGI before responding to our FLOGI. 4018 + */ 4019 } 4020 4021 shost = lpfc_shost_from_vport(vport); ··· 4251 vport = lpfc_find_vport_by_vpid(phba, vpi); 4252 } 4253 } 4254 + /* If there are no BDEs associated 4255 + * with this IOCB, there is nothing to do. 4256 + */ 4257 if (icmd->ulpBdeCount == 0) 4258 return; 4259 4260 + /* type of ELS cmd is first 32bit word 4261 + * in packet 4262 + */ 4263 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4264 elsiocb->context2 = bdeBuf1; 4265 } else { ··· 4314 } 4315 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4316 ndlp->nlp_type |= NLP_FABRIC; 4317 + } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4318 + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4319 + if (!ndlp) { 4320 + if (phba->fc_topology == TOPOLOGY_LOOP) { 4321 + lpfc_disc_start(vport); 4322 + return; 4323 + } 4324 + lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4325 + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4326 + "0348 NameServer login: node freed\n"); 4327 + return; 4328 + } 4329 } 4330 4331 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); ··· 4360 switch (mb->mbxStatus) { 4361 case 0x11: /* unsupported feature */ 4362 case 0x9603: /* max_vpi exceeded */ 4363 + case 0x9602: /* Link event since CLEAR_LA */ 4364 /* giving up on vport registration */ 4365 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4366 spin_lock_irq(shost->host_lock); ··· 4373 spin_lock_irq(shost->host_lock); 4374 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4375 spin_unlock_irq(shost->host_lock); 4376 + if (vport->port_type == LPFC_PHYSICAL_PORT) 4377 + lpfc_initial_flogi(vport); 4378 + else 4379 + lpfc_initial_fdisc(vport); 4380 break; 4381 } 4382 ··· 4471 irsp->ulpStatus, irsp->un.ulpWord[4]); 4472 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) 4473 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4474 lpfc_nlp_put(ndlp); 4475 /* giving up on FDISC. Cancel discovery timer */ 4476 lpfc_can_disctmo(vport); ··· 4492 */ 4493 list_for_each_entry_safe(np, next_np, 4494 &vport->fc_nodes, nlp_listp) { 4495 + if (!NLP_CHK_NODE_ACT(ndlp) || 4496 + (np->nlp_state != NLP_STE_NPR_NODE) || 4497 + !(np->nlp_flag & NLP_NPR_ADISC)) 4498 continue; 4499 spin_lock_irq(shost->host_lock); 4500 np->nlp_flag &= ~NLP_NPR_ADISC; ··· 4599 { 4600 struct lpfc_vport *vport = cmdiocb->vport; 4601 IOCB_t *irsp; 4602 + struct lpfc_nodelist *ndlp; 4603 + ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 4604 4605 irsp = &rspiocb->iocb; 4606 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, ··· 4607 4608 lpfc_els_free_iocb(phba, cmdiocb); 4609 vport->unreg_vpi_cmpl = VPORT_ERROR; 4610 + 4611 + /* Trigger the release of the ndlp after logo */ 4612 + lpfc_nlp_put(ndlp); 4613 } 4614 4615 int ··· 4686 repeat: 4687 iocb = NULL; 4688 spin_lock_irqsave(&phba->hbalock, iflags); 4689 + /* Post any pending iocb to the SLI layer */ 4690 if (atomic_read(&phba->fabric_iocb_count) == 0) { 4691 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 4692 list); 4693 if (iocb) 4694 + /* Increment fabric iocb count to hold the position */ 4695 atomic_inc(&phba->fabric_iocb_count); 4696 } 4697 spin_unlock_irqrestore(&phba->hbalock, iflags); ··· 4737 int blocked; 4738 4739 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4740 + /* Start a timer to unblock fabric iocbs after 100ms */ 4741 if (!blocked) 4742 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 4743 ··· 4787 4788 atomic_dec(&phba->fabric_iocb_count); 4789 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 4790 + /* Post any pending iocbs to HBA */ 4791 + lpfc_resume_fabric_iocbs(phba); 4792 } 4793 } 4794 ··· 4807 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 4808 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4809 4810 + if (ready) 4811 + /* Increment fabric iocb count to hold the position */ 4812 + atomic_inc(&phba->fabric_iocb_count); 4813 spin_unlock_irqrestore(&phba->hbalock, iflags); 4814 if (ready) { 4815 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; ··· 4817 "Fabric sched2: ste:x%x", 4818 iocb->vport->port_state, 0, 0); 4819 4820 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 4821 4822 if (ret == IOCB_ERROR) {
+269 -59
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 272 if (!(vport->load_flag & FC_UNLOADING) && 273 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 - (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 - } 278 } 279 280 ··· 565 int rc; 566 567 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 568 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 569 continue; 570 - 571 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 572 ((vport->port_type == LPFC_NPIV_PORT) && 573 (ndlp->nlp_DID == NameServer_DID))) ··· 629 LPFC_MBOXQ_t *mb; 630 int i; 631 632 - if (phba->link_state == LPFC_LINK_DOWN) { 633 return 0; 634 - } 635 spin_lock_irq(&phba->hbalock); 636 if (phba->link_state > LPFC_LINK_DOWN) { 637 phba->link_state = LPFC_LINK_DOWN; ··· 683 struct lpfc_nodelist *ndlp; 684 685 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 686 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 687 continue; 688 - 689 if (ndlp->nlp_type & NLP_FABRIC) { 690 - /* On Linkup its safe to clean up the ndlp 691 - * from Fabric connections. 692 - */ 693 if (ndlp->nlp_DID != Fabric_DID) 694 lpfc_unreg_rpi(vport, ndlp); 695 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 696 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 697 - /* Fail outstanding IO now since device is 698 - * marked for PLOGI. 699 - */ 700 lpfc_unreg_rpi(vport, ndlp); 701 } 702 } ··· 799 writel(control, phba->HCregaddr); 800 readl(phba->HCregaddr); /* flush */ 801 spin_unlock_irq(&phba->hbalock); 802 return; 803 - 804 - vport->num_disc_nodes = 0; 805 - /* go thru NPR nodes and issue ELS PLOGIs */ 806 - if (vport->fc_npr_cnt) 807 - lpfc_els_disc_plogi(vport); 808 - 809 - if (!vport->num_disc_nodes) { 810 - spin_lock_irq(shost->host_lock); 811 - vport->fc_flag &= ~FC_NDISC_ACTIVE; 812 - spin_unlock_irq(shost->host_lock); 813 - } 814 - 815 - vport->port_state = LPFC_VPORT_READY; 816 817 out: 818 /* Device Discovery completes */ ··· 1121 if (la->attType == AT_LINK_UP) { 1122 phba->fc_stat.LinkUp++; 1123 if (phba->link_flag & LS_LOOPBACK_MODE) { 1124 - lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1125 "1306 Link Up Event in loop back mode " 1126 "x%x received Data: x%x x%x x%x x%x\n", 1127 la->eventTag, phba->fc_eventTag, ··· 1138 lpfc_mbx_process_link_up(phba, la); 1139 } else { 1140 phba->fc_stat.LinkDown++; 1141 - lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1142 "1305 Link Down Event x%x received " 1143 "Data: x%x x%x x%x\n", 1144 la->eventTag, phba->fc_eventTag, 1145 phba->pport->port_state, vport->fc_flag); 1146 lpfc_mbx_issue_link_down(phba); 1147 } 1148 ··· 1303 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1304 kfree(mp); 1305 mempool_free(pmb, phba->mbox_mem_pool); 1306 - lpfc_nlp_put(ndlp); 1307 1308 if (phba->fc_topology == TOPOLOGY_LOOP) { 1309 /* FLOGI failed, use loop map to make discovery list */ ··· 1310 1311 /* Start discovery */ 1312 lpfc_disc_start(vport); 1313 return; 1314 } 1315 ··· 1321 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1322 "0258 Register Fabric login error: 0x%x\n", 1323 mb->mbxStatus); 1324 return; 1325 } 1326 1327 ndlp->nlp_rpi = mb->un.varWords[0]; 1328 ndlp->nlp_type |= NLP_FABRIC; 1329 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1330 - 1331 - lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ 1332 1333 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1334 vports = lpfc_create_vport_work_array(phba); ··· 1359 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1360 kfree(mp); 1361 mempool_free(pmb, phba->mbox_mem_pool); 1362 return; 1363 } 1364 ··· 1471 * registered the port. 1472 */ 1473 if (ndlp->rport && ndlp->rport->dd_data && 1474 - ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { 1475 lpfc_nlp_put(ndlp); 1476 - } 1477 1478 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 1479 "rport add: did:x%x flg:x%x type x%x", ··· 1667 } 1668 1669 void 1670 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1671 { 1672 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ··· 1691 list_del_init(&ndlp->nlp_listp); 1692 spin_unlock_irq(shost->host_lock); 1693 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1694 - NLP_STE_UNUSED_NODE); 1695 } 1696 1697 void ··· 2064 "Data: x%x x%x x%x\n", 2065 ndlp->nlp_DID, ndlp->nlp_flag, 2066 ndlp->nlp_state, ndlp->nlp_rpi); 2067 - lpfc_dequeue_node(vport, ndlp); 2068 2069 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 2070 if ((mb = phba->sli.mbox_active)) { ··· 2100 } 2101 list_del(&mb->list); 2102 mempool_free(mb, phba->mbox_mem_pool); 2103 - lpfc_nlp_put(ndlp); 2104 } 2105 } 2106 spin_unlock_irq(&phba->hbalock); 2107 2108 - lpfc_els_abort(phba,ndlp); 2109 spin_lock_irq(shost->host_lock); 2110 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2111 spin_unlock_irq(shost->host_lock); ··· 2167 } 2168 } 2169 } 2170 - 2171 lpfc_cleanup_node(vport, ndlp); 2172 2173 /* ··· 2291 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2292 spin_unlock_irq(shost->host_lock); 2293 return ndlp; 2294 } 2295 if (vport->fc_flag & FC_RSCN_MODE) { 2296 if (lpfc_rscn_payload_check(vport, did)) { 2297 /* If we've already recieved a PLOGI from this NPort ··· 2481 * continue discovery. 2482 */ 2483 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2484 !(vport->fc_flag & FC_RSCN_MODE)) { 2485 lpfc_issue_reg_vpi(phba, vport); 2486 return; ··· 2604 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 2605 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2606 nlp_listp) { 2607 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2608 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2609 lpfc_free_tx(phba, ndlp); ··· 2693 /* Start discovery by sending FLOGI, clean up old rpis */ 2694 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2695 nlp_listp) { 2696 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 2697 continue; 2698 if (ndlp->nlp_type & NLP_FABRIC) { ··· 2741 "NameServer login\n"); 2742 /* Next look for NameServer ndlp */ 2743 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2744 - if (ndlp) 2745 lpfc_els_abort(phba, ndlp); 2746 2747 /* ReStart discovery */ ··· 3020 ndlp->nlp_sid = NLP_NO_SID; 3021 INIT_LIST_HEAD(&ndlp->nlp_listp); 3022 kref_init(&ndlp->kref); 3023 3024 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3025 "node init: did:x%x", ··· 3035 static void 3036 lpfc_nlp_release(struct kref *kref) 3037 { 3038 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 3039 kref); 3040 ··· 3044 "node release: did:x%x flg:x%x type:x%x", 3045 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3046 3047 lpfc_nlp_remove(ndlp->vport, ndlp); 3048 - mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 3049 } 3050 3051 /* This routine bumps the reference count for a ndlp structure to ensure ··· 3071 struct lpfc_nodelist * 3072 lpfc_nlp_get(struct lpfc_nodelist *ndlp) 3073 { 3074 if (ndlp) { 3075 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3076 "node get: did:x%x flg:x%x refcnt:x%x", 3077 ndlp->nlp_DID, ndlp->nlp_flag, 3078 atomic_read(&ndlp->kref.refcount)); 3079 - kref_get(&ndlp->kref); 3080 } 3081 return ndlp; 3082 } 3083 3084 - 3085 /* This routine decrements the reference count for a ndlp structure. If the 3086 - * count goes to 0, this indicates the the associated nodelist should be freed. 3087 */ 3088 int 3089 lpfc_nlp_put(struct lpfc_nodelist *ndlp) 3090 { 3091 - if (ndlp) { 3092 - lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3093 - "node put: did:x%x flg:x%x refcnt:x%x", 3094 - ndlp->nlp_DID, ndlp->nlp_flag, 3095 - atomic_read(&ndlp->kref.refcount)); 3096 } 3097 - return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; 3098 } 3099 3100 /* This routine free's the specified nodelist if it is not in use 3101 - * by any other discovery thread. This routine returns 1 if the ndlp 3102 - * is not being used by anyone and has been freed. A return value of 3103 - * 0 indicates it is being used by another discovery thread and the 3104 - * refcount is left unchanged. 3105 */ 3106 int 3107 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) ··· 3181 "node not used: did:x%x flg:x%x refcnt:x%x", 3182 ndlp->nlp_DID, ndlp->nlp_flag, 3183 atomic_read(&ndlp->kref.refcount)); 3184 - 3185 - if (atomic_read(&ndlp->kref.refcount) == 1) { 3186 - lpfc_nlp_put(ndlp); 3187 - return 1; 3188 - } 3189 return 0; 3190 } 3191 -
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 272 if (!(vport->load_flag & FC_UNLOADING) && 273 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 + (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 } 278 279 ··· 566 int rc; 567 568 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 569 + if (!NLP_CHK_NODE_ACT(ndlp)) 570 + continue; 571 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 572 continue; 573 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 574 ((vport->port_type == LPFC_NPIV_PORT) && 575 (ndlp->nlp_DID == NameServer_DID))) ··· 629 LPFC_MBOXQ_t *mb; 630 int i; 631 632 + if (phba->link_state == LPFC_LINK_DOWN) 633 return 0; 634 spin_lock_irq(&phba->hbalock); 635 if (phba->link_state > LPFC_LINK_DOWN) { 636 phba->link_state = LPFC_LINK_DOWN; ··· 684 struct lpfc_nodelist *ndlp; 685 686 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 687 + if (!NLP_CHK_NODE_ACT(ndlp)) 688 + continue; 689 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 690 continue; 691 if (ndlp->nlp_type & NLP_FABRIC) { 692 + /* On Linkup its safe to clean up the ndlp 693 + * from Fabric connections. 694 + */ 695 if (ndlp->nlp_DID != Fabric_DID) 696 lpfc_unreg_rpi(vport, ndlp); 697 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 698 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 699 + /* Fail outstanding IO now since device is 700 + * marked for PLOGI. 701 + */ 702 lpfc_unreg_rpi(vport, ndlp); 703 } 704 } ··· 799 writel(control, phba->HCregaddr); 800 readl(phba->HCregaddr); /* flush */ 801 spin_unlock_irq(&phba->hbalock); 802 + mempool_free(pmb, phba->mbox_mem_pool); 803 return; 804 805 out: 806 /* Device Discovery completes */ ··· 1133 if (la->attType == AT_LINK_UP) { 1134 phba->fc_stat.LinkUp++; 1135 if (phba->link_flag & LS_LOOPBACK_MODE) { 1136 + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1137 "1306 Link Up Event in loop back mode " 1138 "x%x received Data: x%x x%x x%x x%x\n", 1139 la->eventTag, phba->fc_eventTag, ··· 1150 lpfc_mbx_process_link_up(phba, la); 1151 } else { 1152 phba->fc_stat.LinkDown++; 1153 + if (phba->link_flag & LS_LOOPBACK_MODE) { 1154 + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1155 + "1308 Link Down Event in loop back mode " 1156 + "x%x received " 1157 + "Data: x%x x%x x%x\n", 1158 + la->eventTag, phba->fc_eventTag, 1159 + phba->pport->port_state, vport->fc_flag); 1160 + } 1161 + else { 1162 + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1163 "1305 Link Down Event x%x received " 1164 "Data: x%x x%x x%x\n", 1165 la->eventTag, phba->fc_eventTag, 1166 phba->pport->port_state, vport->fc_flag); 1167 + } 1168 lpfc_mbx_issue_link_down(phba); 1169 } 1170 ··· 1305 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1306 kfree(mp); 1307 mempool_free(pmb, phba->mbox_mem_pool); 1308 1309 if (phba->fc_topology == TOPOLOGY_LOOP) { 1310 /* FLOGI failed, use loop map to make discovery list */ ··· 1313 1314 /* Start discovery */ 1315 lpfc_disc_start(vport); 1316 + /* Decrement the reference count to ndlp after the 1317 + * reference to the ndlp are done. 1318 + */ 1319 + lpfc_nlp_put(ndlp); 1320 return; 1321 } 1322 ··· 1320 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1321 "0258 Register Fabric login error: 0x%x\n", 1322 mb->mbxStatus); 1323 + /* Decrement the reference count to ndlp after the reference 1324 + * to the ndlp are done. 1325 + */ 1326 + lpfc_nlp_put(ndlp); 1327 return; 1328 } 1329 1330 ndlp->nlp_rpi = mb->un.varWords[0]; 1331 ndlp->nlp_type |= NLP_FABRIC; 1332 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1333 1334 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1335 vports = lpfc_create_vport_work_array(phba); ··· 1356 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1357 kfree(mp); 1358 mempool_free(pmb, phba->mbox_mem_pool); 1359 + 1360 + /* Drop the reference count from the mbox at the end after 1361 + * all the current reference to the ndlp have been done. 1362 + */ 1363 + lpfc_nlp_put(ndlp); 1364 return; 1365 } 1366 ··· 1463 * registered the port. 1464 */ 1465 if (ndlp->rport && ndlp->rport->dd_data && 1466 + ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) 1467 lpfc_nlp_put(ndlp); 1468 1469 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 1470 "rport add: did:x%x flg:x%x type x%x", ··· 1660 } 1661 1662 void 1663 + lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1664 + { 1665 + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1666 + 1667 + if (list_empty(&ndlp->nlp_listp)) { 1668 + spin_lock_irq(shost->host_lock); 1669 + list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 1670 + spin_unlock_irq(shost->host_lock); 1671 + } 1672 + } 1673 + 1674 + void 1675 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1676 { 1677 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ··· 1672 list_del_init(&ndlp->nlp_listp); 1673 spin_unlock_irq(shost->host_lock); 1674 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1675 + NLP_STE_UNUSED_NODE); 1676 + } 1677 + 1678 + void 1679 + lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1680 + { 1681 + if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1682 + lpfc_cancel_retry_delay_tmo(vport, ndlp); 1683 + if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1684 + lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 1685 + lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1686 + NLP_STE_UNUSED_NODE); 1687 + } 1688 + 1689 + struct lpfc_nodelist * 1690 + lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1691 + int state) 1692 + { 1693 + struct lpfc_hba *phba = vport->phba; 1694 + uint32_t did; 1695 + unsigned long flags; 1696 + 1697 + if (!ndlp) 1698 + return NULL; 1699 + 1700 + spin_lock_irqsave(&phba->ndlp_lock, flags); 1701 + /* The ndlp should not be in memory free mode */ 1702 + if (NLP_CHK_FREE_REQ(ndlp)) { 1703 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 1704 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 1705 + "0277 lpfc_enable_node: ndlp:x%p " 1706 + "usgmap:x%x refcnt:%d\n", 1707 + (void *)ndlp, ndlp->nlp_usg_map, 1708 + atomic_read(&ndlp->kref.refcount)); 1709 + return NULL; 1710 + } 1711 + /* The ndlp should not already be in active mode */ 1712 + if (NLP_CHK_NODE_ACT(ndlp)) { 1713 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 1714 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 1715 + "0278 lpfc_enable_node: ndlp:x%p " 1716 + "usgmap:x%x refcnt:%d\n", 1717 + (void *)ndlp, ndlp->nlp_usg_map, 1718 + atomic_read(&ndlp->kref.refcount)); 1719 + return NULL; 1720 + } 1721 + 1722 + /* Keep the original DID */ 1723 + did = ndlp->nlp_DID; 1724 + 1725 + /* re-initialize ndlp except of ndlp linked list pointer */ 1726 + memset((((char *)ndlp) + sizeof (struct list_head)), 0, 1727 + sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 1728 + INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 1729 + INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 1730 + init_timer(&ndlp->nlp_delayfunc); 1731 + ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 1732 + ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 1733 + ndlp->nlp_DID = did; 1734 + ndlp->vport = vport; 1735 + ndlp->nlp_sid = NLP_NO_SID; 1736 + /* ndlp management re-initialize */ 1737 + kref_init(&ndlp->kref); 1738 + NLP_INT_NODE_ACT(ndlp); 1739 + 1740 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 1741 + 1742 + if (state != NLP_STE_UNUSED_NODE) 1743 + lpfc_nlp_set_state(vport, ndlp, state); 1744 + 1745 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 1746 + "node enable: did:x%x", 1747 + ndlp->nlp_DID, 0, 0); 1748 + return ndlp; 1749 } 1750 1751 void ··· 1972 "Data: x%x x%x x%x\n", 1973 ndlp->nlp_DID, ndlp->nlp_flag, 1974 ndlp->nlp_state, ndlp->nlp_rpi); 1975 + if (NLP_CHK_FREE_REQ(ndlp)) { 1976 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 1977 + "0280 lpfc_cleanup_node: ndlp:x%p " 1978 + "usgmap:x%x refcnt:%d\n", 1979 + (void *)ndlp, ndlp->nlp_usg_map, 1980 + atomic_read(&ndlp->kref.refcount)); 1981 + lpfc_dequeue_node(vport, ndlp); 1982 + } else { 1983 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 1984 + "0281 lpfc_cleanup_node: ndlp:x%p " 1985 + "usgmap:x%x refcnt:%d\n", 1986 + (void *)ndlp, ndlp->nlp_usg_map, 1987 + atomic_read(&ndlp->kref.refcount)); 1988 + lpfc_disable_node(vport, ndlp); 1989 + } 1990 1991 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1992 if ((mb = phba->sli.mbox_active)) { ··· 1994 } 1995 list_del(&mb->list); 1996 mempool_free(mb, phba->mbox_mem_pool); 1997 + /* We shall not invoke the lpfc_nlp_put to decrement 1998 + * the ndlp reference count as we are in the process 1999 + * of lpfc_nlp_release. 2000 + */ 2001 } 2002 } 2003 spin_unlock_irq(&phba->hbalock); 2004 2005 + lpfc_els_abort(phba, ndlp); 2006 + 2007 spin_lock_irq(shost->host_lock); 2008 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2009 spin_unlock_irq(shost->host_lock); ··· 2057 } 2058 } 2059 } 2060 lpfc_cleanup_node(vport, ndlp); 2061 2062 /* ··· 2182 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2183 spin_unlock_irq(shost->host_lock); 2184 return ndlp; 2185 + } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2186 + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); 2187 + if (!ndlp) 2188 + return NULL; 2189 + spin_lock_irq(shost->host_lock); 2190 + ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2191 + spin_unlock_irq(shost->host_lock); 2192 + return ndlp; 2193 } 2194 + 2195 if (vport->fc_flag & FC_RSCN_MODE) { 2196 if (lpfc_rscn_payload_check(vport, did)) { 2197 /* If we've already recieved a PLOGI from this NPort ··· 2363 * continue discovery. 2364 */ 2365 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2366 + !(vport->fc_flag & FC_PT2PT) && 2367 !(vport->fc_flag & FC_RSCN_MODE)) { 2368 lpfc_issue_reg_vpi(phba, vport); 2369 return; ··· 2485 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 2486 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2487 nlp_listp) { 2488 + if (!NLP_CHK_NODE_ACT(ndlp)) 2489 + continue; 2490 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2491 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2492 lpfc_free_tx(phba, ndlp); ··· 2572 /* Start discovery by sending FLOGI, clean up old rpis */ 2573 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2574 nlp_listp) { 2575 + if (!NLP_CHK_NODE_ACT(ndlp)) 2576 + continue; 2577 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 2578 continue; 2579 if (ndlp->nlp_type & NLP_FABRIC) { ··· 2618 "NameServer login\n"); 2619 /* Next look for NameServer ndlp */ 2620 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2621 + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 2622 lpfc_els_abort(phba, ndlp); 2623 2624 /* ReStart discovery */ ··· 2897 ndlp->nlp_sid = NLP_NO_SID; 2898 INIT_LIST_HEAD(&ndlp->nlp_listp); 2899 kref_init(&ndlp->kref); 2900 + NLP_INT_NODE_ACT(ndlp); 2901 2902 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2903 "node init: did:x%x", ··· 2911 static void 2912 lpfc_nlp_release(struct kref *kref) 2913 { 2914 + struct lpfc_hba *phba; 2915 + unsigned long flags; 2916 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 2917 kref); 2918 ··· 2918 "node release: did:x%x flg:x%x type:x%x", 2919 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 2920 2921 + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 2922 + "0279 lpfc_nlp_release: ndlp:x%p " 2923 + "usgmap:x%x refcnt:%d\n", 2924 + (void *)ndlp, ndlp->nlp_usg_map, 2925 + atomic_read(&ndlp->kref.refcount)); 2926 + 2927 + /* remove ndlp from action. */ 2928 lpfc_nlp_remove(ndlp->vport, ndlp); 2929 + 2930 + /* clear the ndlp active flag for all release cases */ 2931 + phba = ndlp->vport->phba; 2932 + spin_lock_irqsave(&phba->ndlp_lock, flags); 2933 + NLP_CLR_NODE_ACT(ndlp); 2934 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2935 + 2936 + /* free ndlp memory for final ndlp release */ 2937 + if (NLP_CHK_FREE_REQ(ndlp)) 2938 + mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 2939 } 2940 2941 /* This routine bumps the reference count for a ndlp structure to ensure ··· 2929 struct lpfc_nodelist * 2930 lpfc_nlp_get(struct lpfc_nodelist *ndlp) 2931 { 2932 + struct lpfc_hba *phba; 2933 + unsigned long flags; 2934 + 2935 if (ndlp) { 2936 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 2937 "node get: did:x%x flg:x%x refcnt:x%x", 2938 ndlp->nlp_DID, ndlp->nlp_flag, 2939 atomic_read(&ndlp->kref.refcount)); 2940 + /* The check of ndlp usage to prevent incrementing the 2941 + * ndlp reference count that is in the process of being 2942 + * released. 2943 + */ 2944 + phba = ndlp->vport->phba; 2945 + spin_lock_irqsave(&phba->ndlp_lock, flags); 2946 + if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { 2947 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2948 + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 2949 + "0276 lpfc_nlp_get: ndlp:x%p " 2950 + "usgmap:x%x refcnt:%d\n", 2951 + (void *)ndlp, ndlp->nlp_usg_map, 2952 + atomic_read(&ndlp->kref.refcount)); 2953 + return NULL; 2954 + } else 2955 + kref_get(&ndlp->kref); 2956 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2957 } 2958 return ndlp; 2959 } 2960 2961 /* This routine decrements the reference count for a ndlp structure. If the 2962 + * count goes to 0, this indicates the the associated nodelist should be 2963 + * freed. Returning 1 indicates the ndlp resource has been released; on the 2964 + * other hand, returning 0 indicates the ndlp resource has not been released 2965 + * yet. 2966 */ 2967 int 2968 lpfc_nlp_put(struct lpfc_nodelist *ndlp) 2969 { 2970 + struct lpfc_hba *phba; 2971 + unsigned long flags; 2972 + 2973 + if (!ndlp) 2974 + return 1; 2975 + 2976 + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 2977 + "node put: did:x%x flg:x%x refcnt:x%x", 2978 + ndlp->nlp_DID, ndlp->nlp_flag, 2979 + atomic_read(&ndlp->kref.refcount)); 2980 + phba = ndlp->vport->phba; 2981 + spin_lock_irqsave(&phba->ndlp_lock, flags); 2982 + /* Check the ndlp memory free acknowledge flag to avoid the 2983 + * possible race condition that kref_put got invoked again 2984 + * after previous one has done ndlp memory free. 2985 + */ 2986 + if (NLP_CHK_FREE_ACK(ndlp)) { 2987 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2988 + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 2989 + "0274 lpfc_nlp_put: ndlp:x%p " 2990 + "usgmap:x%x refcnt:%d\n", 2991 + (void *)ndlp, ndlp->nlp_usg_map, 2992 + atomic_read(&ndlp->kref.refcount)); 2993 + return 1; 2994 } 2995 + /* Check the ndlp inactivate log flag to avoid the possible 2996 + * race condition that kref_put got invoked again after ndlp 2997 + * is already in inactivating state. 2998 + */ 2999 + if (NLP_CHK_IACT_REQ(ndlp)) { 3000 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3001 + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 3002 + "0275 lpfc_nlp_put: ndlp:x%p " 3003 + "usgmap:x%x refcnt:%d\n", 3004 + (void *)ndlp, ndlp->nlp_usg_map, 3005 + atomic_read(&ndlp->kref.refcount)); 3006 + return 1; 3007 + } 3008 + /* For last put, mark the ndlp usage flags to make sure no 3009 + * other kref_get and kref_put on the same ndlp shall get 3010 + * in between the process when the final kref_put has been 3011 + * invoked on this ndlp. 3012 + */ 3013 + if (atomic_read(&ndlp->kref.refcount) == 1) { 3014 + /* Indicate ndlp is put to inactive state. */ 3015 + NLP_SET_IACT_REQ(ndlp); 3016 + /* Acknowledge ndlp memory free has been seen. */ 3017 + if (NLP_CHK_FREE_REQ(ndlp)) 3018 + NLP_SET_FREE_ACK(ndlp); 3019 + } 3020 + spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3021 + /* Note, the kref_put returns 1 when decrementing a reference 3022 + * count that was 1, it invokes the release callback function, 3023 + * but it still left the reference count as 1 (not actually 3024 + * performs the last decrementation). Otherwise, it actually 3025 + * decrements the reference count and returns 0. 3026 + */ 3027 + return kref_put(&ndlp->kref, lpfc_nlp_release); 3028 } 3029 3030 /* This routine free's the specified nodelist if it is not in use 3031 + * by any other discovery thread. This routine returns 1 if the 3032 + * ndlp has been freed. A return value of 0 indicates the ndlp is 3033 + * not yet been released. 3034 */ 3035 int 3036 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) ··· 2968 "node not used: did:x%x flg:x%x refcnt:x%x", 2969 ndlp->nlp_DID, ndlp->nlp_flag, 2970 atomic_read(&ndlp->kref.refcount)); 2971 + if (atomic_read(&ndlp->kref.refcount) == 1) 2972 + if (lpfc_nlp_put(ndlp)) 2973 + return 1; 2974 return 0; 2975 }
+17 -1
drivers/scsi/lpfc/lpfc_hw.h
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 581 #define LSEXP_INVALID_O_SID 0x15 582 #define LSEXP_INVALID_OX_RX 0x17 583 #define LSEXP_CMD_IN_PROGRESS 0x19 584 #define LSEXP_INVALID_NPORT_ID 0x1F 585 #define LSEXP_INVALID_SEQ_ID 0x21 586 #define LSEXP_INVALID_XCHG 0x23 ··· 1377 #define CMD_QUE_XRI64_CX 0xB3 1378 #define CMD_IOCB_RCV_SEQ64_CX 0xB5 1379 #define CMD_IOCB_RCV_ELS64_CX 0xB7 1380 #define CMD_IOCB_RCV_CONT64_CX 0xBB 1381 1382 #define CMD_GEN_REQUEST64_CR 0xC2 1383 #define CMD_GEN_REQUEST64_CX 0xC3 1384 1385 #define CMD_MAX_IOCB_CMD 0xE6 1386 #define CMD_IOCB_MASK 0xff
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 581 #define LSEXP_INVALID_O_SID 0x15 582 #define LSEXP_INVALID_OX_RX 0x17 583 #define LSEXP_CMD_IN_PROGRESS 0x19 584 + #define LSEXP_PORT_LOGIN_REQ 0x1E 585 #define LSEXP_INVALID_NPORT_ID 0x1F 586 #define LSEXP_INVALID_SEQ_ID 0x21 587 #define LSEXP_INVALID_XCHG 0x23 ··· 1376 #define CMD_QUE_XRI64_CX 0xB3 1377 #define CMD_IOCB_RCV_SEQ64_CX 0xB5 1378 #define CMD_IOCB_RCV_ELS64_CX 0xB7 1379 + #define CMD_IOCB_RET_XRI64_CX 0xB9 1380 #define CMD_IOCB_RCV_CONT64_CX 0xBB 1381 1382 #define CMD_GEN_REQUEST64_CR 0xC2 1383 #define CMD_GEN_REQUEST64_CX 0xC3 1384 + 1385 + /* Unhandled SLI-3 Commands */ 1386 + #define CMD_IOCB_XMIT_MSEQ64_CR 0xB0 1387 + #define CMD_IOCB_XMIT_MSEQ64_CX 0xB1 1388 + #define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1 1389 + #define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD 1390 + #define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6 1391 + #define CMD_IOCB_ABORT_EXTENDED_CN 0xBA 1392 + #define CMD_IOCB_RET_HBQE64_CN 0xCA 1393 + #define CMD_IOCB_FCP_IBIDIR64_CR 0xAC 1394 + #define CMD_IOCB_FCP_IBIDIR64_CX 0xAD 1395 + #define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF 1396 + #define CMD_IOCB_LOGENTRY_CN 0x94 1397 + #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1398 1399 #define CMD_MAX_IOCB_CMD 0xE6 1400 #define CMD_IOCB_MASK 0xff
+132 -22
drivers/scsi/lpfc/lpfc_init.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 461 int 462 lpfc_hba_down_prep(struct lpfc_hba *phba) 463 { 464 /* Disable interrupts */ 465 writel(0, phba->HCregaddr); 466 readl(phba->HCregaddr); /* flush */ 467 468 - lpfc_cleanup_discovery_resources(phba->pport); 469 return 0; 470 } 471 ··· 1432 lpfc_port_link_failure(vport); 1433 1434 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1435 if (ndlp->nlp_type & NLP_FABRIC) 1436 lpfc_disc_state_machine(vport, ndlp, NULL, 1437 NLP_EVT_DEVICE_RECOVERY); 1438 lpfc_disc_state_machine(vport, ndlp, NULL, 1439 NLP_EVT_DEVICE_RM); 1440 } ··· 1471 if (i++ > 3000) { 1472 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1473 "0233 Nodelist not empty\n"); 1474 break; 1475 } 1476 ··· 1630 list_for_each_entry_safe(ndlp, next_ndlp, 1631 &vports[i]->fc_nodes, 1632 nlp_listp) { 1633 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1634 continue; 1635 if (ndlp->nlp_type & NLP_FABRIC) { ··· 1741 1742 vport = (struct lpfc_vport *) shost->hostdata; 1743 vport->phba = phba; 1744 - 1745 vport->load_flag |= FC_LOADING; 1746 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1747 1748 lpfc_get_vport_cfgparam(vport); 1749 shost->unique_id = instance; ··· 1925 spin_unlock_irq(shost->host_lock); 1926 } 1927 1928 static int __devinit 1929 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1930 { ··· 1986 goto out_release_regions; 1987 1988 spin_lock_init(&phba->hbalock); 1989 1990 phba->pcidev = pdev; 1991 ··· 2087 2088 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 2089 2090 /* Initialize the SLI Layer to run with lpfc HBAs. */ 2091 lpfc_sli_setup(phba); 2092 lpfc_sli_queue_setup(phba); ··· 2164 lpfc_debugfs_initialize(vport); 2165 2166 pci_set_drvdata(pdev, shost); 2167 2168 - if (phba->cfg_use_msi) { 2169 retval = pci_enable_msi(phba->pcidev); 2170 if (!retval) 2171 - phba->using_msi = 1; 2172 else 2173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2174 "0452 Enable MSI failed, continuing " 2175 "with IRQ\n"); 2176 } 2177 2178 - retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 2179 - LPFC_DRIVER_NAME, phba); 2180 - if (retval) { 2181 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2182 - "0451 Enable interrupt handler failed\n"); 2183 - error = retval; 2184 - goto out_disable_msi; 2185 } 2186 2187 phba->MBslimaddr = phba->slim_memmap_p; ··· 2238 out_free_irq: 2239 lpfc_stop_phba_timers(phba); 2240 phba->pport->work_port_events = 0; 2241 - free_irq(phba->pcidev->irq, phba); 2242 out_disable_msi: 2243 - if (phba->using_msi) 2244 pci_disable_msi(phba->pcidev); 2245 destroy_port(vport); 2246 out_kthread_stop: ··· 2318 2319 lpfc_debugfs_terminate(vport); 2320 2321 - /* Release the irq reservation */ 2322 - free_irq(phba->pcidev->irq, phba); 2323 - if (phba->using_msi) 2324 - pci_disable_msi(phba->pcidev); 2325 2326 pci_set_drvdata(pdev, NULL); 2327 scsi_host_put(shost); ··· 2383 pring = &psli->ring[psli->fcp_ring]; 2384 lpfc_sli_abort_iocb_ring(phba, pring); 2385 2386 - /* Release the irq reservation */ 2387 - free_irq(phba->pcidev->irq, phba); 2388 - if (phba->using_msi) 2389 - pci_disable_msi(phba->pcidev); 2390 2391 /* Request a slot reset. */ 2392 return PCI_ERS_RESULT_NEED_RESET;
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 461 int 462 lpfc_hba_down_prep(struct lpfc_hba *phba) 463 { 464 + struct lpfc_vport **vports; 465 + int i; 466 /* Disable interrupts */ 467 writel(0, phba->HCregaddr); 468 readl(phba->HCregaddr); /* flush */ 469 470 + if (phba->pport->load_flag & FC_UNLOADING) 471 + lpfc_cleanup_discovery_resources(phba->pport); 472 + else { 473 + vports = lpfc_create_vport_work_array(phba); 474 + if (vports != NULL) 475 + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 476 + lpfc_cleanup_discovery_resources(vports[i]); 477 + lpfc_destroy_vport_work_array(phba, vports); 478 + } 479 return 0; 480 } 481 ··· 1422 lpfc_port_link_failure(vport); 1423 1424 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1425 + if (!NLP_CHK_NODE_ACT(ndlp)) { 1426 + ndlp = lpfc_enable_node(vport, ndlp, 1427 + NLP_STE_UNUSED_NODE); 1428 + if (!ndlp) 1429 + continue; 1430 + spin_lock_irq(&phba->ndlp_lock); 1431 + NLP_SET_FREE_REQ(ndlp); 1432 + spin_unlock_irq(&phba->ndlp_lock); 1433 + /* Trigger the release of the ndlp memory */ 1434 + lpfc_nlp_put(ndlp); 1435 + continue; 1436 + } 1437 + spin_lock_irq(&phba->ndlp_lock); 1438 + if (NLP_CHK_FREE_REQ(ndlp)) { 1439 + /* The ndlp should not be in memory free mode already */ 1440 + spin_unlock_irq(&phba->ndlp_lock); 1441 + continue; 1442 + } else 1443 + /* Indicate request for freeing ndlp memory */ 1444 + NLP_SET_FREE_REQ(ndlp); 1445 + spin_unlock_irq(&phba->ndlp_lock); 1446 + 1447 if (ndlp->nlp_type & NLP_FABRIC) 1448 lpfc_disc_state_machine(vport, ndlp, NULL, 1449 NLP_EVT_DEVICE_RECOVERY); 1450 + 1451 lpfc_disc_state_machine(vport, ndlp, NULL, 1452 NLP_EVT_DEVICE_RM); 1453 } ··· 1438 if (i++ > 3000) { 1439 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1440 "0233 Nodelist not empty\n"); 1441 + list_for_each_entry_safe(ndlp, next_ndlp, 1442 + &vport->fc_nodes, nlp_listp) { 1443 + lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1444 + LOG_NODE, 1445 + "0282: did:x%x ndlp:x%p " 1446 + "usgmap:x%x refcnt:%d\n", 1447 + ndlp->nlp_DID, (void *)ndlp, 1448 + ndlp->nlp_usg_map, 1449 + atomic_read( 1450 + &ndlp->kref.refcount)); 1451 + } 1452 break; 1453 } 1454 ··· 1586 list_for_each_entry_safe(ndlp, next_ndlp, 1587 &vports[i]->fc_nodes, 1588 nlp_listp) { 1589 + if (!NLP_CHK_NODE_ACT(ndlp)) 1590 + continue; 1591 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1592 continue; 1593 if (ndlp->nlp_type & NLP_FABRIC) { ··· 1695 1696 vport = (struct lpfc_vport *) shost->hostdata; 1697 vport->phba = phba; 1698 vport->load_flag |= FC_LOADING; 1699 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1700 + vport->fc_rscn_flush = 0; 1701 1702 lpfc_get_vport_cfgparam(vport); 1703 shost->unique_id = instance; ··· 1879 spin_unlock_irq(shost->host_lock); 1880 } 1881 1882 + static int 1883 + lpfc_enable_msix(struct lpfc_hba *phba) 1884 + { 1885 + int error; 1886 + 1887 + phba->msix_entries[0].entry = 0; 1888 + phba->msix_entries[0].vector = 0; 1889 + 1890 + error = pci_enable_msix(phba->pcidev, phba->msix_entries, 1891 + ARRAY_SIZE(phba->msix_entries)); 1892 + if (error) { 1893 + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1894 + "0420 Enable MSI-X failed (%d), continuing " 1895 + "with MSI\n", error); 1896 + pci_disable_msix(phba->pcidev); 1897 + return error; 1898 + } 1899 + 1900 + error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, 1901 + LPFC_DRIVER_NAME, phba); 1902 + if (error) { 1903 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1904 + "0421 MSI-X request_irq failed (%d), " 1905 + "continuing with MSI\n", error); 1906 + pci_disable_msix(phba->pcidev); 1907 + } 1908 + return error; 1909 + } 1910 + 1911 + static void 1912 + lpfc_disable_msix(struct lpfc_hba *phba) 1913 + { 1914 + free_irq(phba->msix_entries[0].vector, phba); 1915 + pci_disable_msix(phba->pcidev); 1916 + } 1917 + 1918 static int __devinit 1919 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1920 { ··· 1904 goto out_release_regions; 1905 1906 spin_lock_init(&phba->hbalock); 1907 + 1908 + /* Initialize ndlp management spinlock */ 1909 + spin_lock_init(&phba->ndlp_lock); 1910 1911 phba->pcidev = pdev; 1912 ··· 2002 2003 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 2004 2005 + INIT_LIST_HEAD(&phba->hbqbuf_in_list); 2006 + 2007 /* Initialize the SLI Layer to run with lpfc HBAs. */ 2008 lpfc_sli_setup(phba); 2009 lpfc_sli_queue_setup(phba); ··· 2077 lpfc_debugfs_initialize(vport); 2078 2079 pci_set_drvdata(pdev, shost); 2080 + phba->intr_type = NONE; 2081 2082 + if (phba->cfg_use_msi == 2) { 2083 + error = lpfc_enable_msix(phba); 2084 + if (!error) 2085 + phba->intr_type = MSIX; 2086 + } 2087 + 2088 + /* Fallback to MSI if MSI-X initialization failed */ 2089 + if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { 2090 retval = pci_enable_msi(phba->pcidev); 2091 if (!retval) 2092 + phba->intr_type = MSI; 2093 else 2094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2095 "0452 Enable MSI failed, continuing " 2096 "with IRQ\n"); 2097 } 2098 2099 + /* MSI-X is the only case the doesn't need to call request_irq */ 2100 + if (phba->intr_type != MSIX) { 2101 + retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2102 + IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2103 + if (retval) { 2104 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " 2105 + "interrupt handler failed\n"); 2106 + error = retval; 2107 + goto out_disable_msi; 2108 + } else if (phba->intr_type != MSI) 2109 + phba->intr_type = INTx; 2110 } 2111 2112 phba->MBslimaddr = phba->slim_memmap_p; ··· 2139 out_free_irq: 2140 lpfc_stop_phba_timers(phba); 2141 phba->pport->work_port_events = 0; 2142 + 2143 + if (phba->intr_type == MSIX) 2144 + lpfc_disable_msix(phba); 2145 + else 2146 + free_irq(phba->pcidev->irq, phba); 2147 + 2148 out_disable_msi: 2149 + if (phba->intr_type == MSI) 2150 pci_disable_msi(phba->pcidev); 2151 destroy_port(vport); 2152 out_kthread_stop: ··· 2214 2215 lpfc_debugfs_terminate(vport); 2216 2217 + if (phba->intr_type == MSIX) 2218 + lpfc_disable_msix(phba); 2219 + else { 2220 + free_irq(phba->pcidev->irq, phba); 2221 + if (phba->intr_type == MSI) 2222 + pci_disable_msi(phba->pcidev); 2223 + } 2224 2225 pci_set_drvdata(pdev, NULL); 2226 scsi_host_put(shost); ··· 2276 pring = &psli->ring[psli->fcp_ring]; 2277 lpfc_sli_abort_iocb_ring(phba, pring); 2278 2279 + if (phba->intr_type == MSIX) 2280 + lpfc_disable_msix(phba); 2281 + else { 2282 + free_irq(phba->pcidev->irq, phba); 2283 + if (phba->intr_type == MSI) 2284 + pci_disable_msi(phba->pcidev); 2285 + } 2286 2287 /* Request a slot reset. */ 2288 return PCI_ERS_RESULT_NEED_RESET;
+7 -3
drivers/scsi/lpfc/lpfc_logmsg.h
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2005 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 35 #define LOG_ALL_MSG 0xffff /* LOG all messages */ 36 37 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 38 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 39 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 40 - fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } 41 42 #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 43 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 44 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 45 - fmt, phba->brd_no, ##arg); }
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * ··· 35 #define LOG_ALL_MSG 0xffff /* LOG all messages */ 36 37 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 38 + do { \ 39 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 40 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 41 + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 42 + } while (0) 43 44 #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 45 + do { \ 46 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 47 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 48 + fmt, phba->brd_no, ##arg); } \ 49 + } while (0)
+12 -1
drivers/scsi/lpfc/lpfc_mem.c
··· 264 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 265 { 266 struct hbq_dmabuf *hbq_entry; 267 268 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 269 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); 270 if (hbq_entry->tag == -1) { 271 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 272 (phba, hbq_entry); 273 } else { 274 lpfc_sli_free_hbq(phba, hbq_entry); 275 } 276 } else { 277 lpfc_mbuf_free(phba, mp->virt, mp->phys); 278 kfree(mp); 279 } 280 return; 281 } 282 -
··· 264 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 265 { 266 struct hbq_dmabuf *hbq_entry; 267 + unsigned long flags; 268 + 269 + if (!mp) 270 + return; 271 272 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 273 + /* Check whether HBQ is still in use */ 274 + spin_lock_irqsave(&phba->hbalock, flags); 275 + if (!phba->hbq_in_use) { 276 + spin_unlock_irqrestore(&phba->hbalock, flags); 277 + return; 278 + } 279 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); 280 + list_del(&hbq_entry->dbuf.list); 281 if (hbq_entry->tag == -1) { 282 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 283 (phba, hbq_entry); 284 } else { 285 lpfc_sli_free_hbq(phba, hbq_entry); 286 } 287 + spin_unlock_irqrestore(&phba->hbalock, flags); 288 } else { 289 lpfc_mbuf_free(phba, mp->virt, mp->phys); 290 kfree(mp); 291 } 292 return; 293 }
+38 -17
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 250 struct lpfc_hba *phba = vport->phba; 251 struct lpfc_dmabuf *pcmd; 252 uint32_t *lp; 253 IOCB_t *icmd; 254 struct serv_parm *sp; ··· 436 del_timer_sync(&ndlp->nlp_delayfunc); 437 ndlp->nlp_last_elscmd = 0; 438 439 - if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 440 list_del_init(&ndlp->els_retry_evt.evt_listp); 441 442 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 443 spin_lock_irq(shost->host_lock); ··· 645 return 0; 646 } 647 648 - /* Check config parameter use-adisc or FCP-2 */ 649 - if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 650 - ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 651 - spin_lock_irq(shost->host_lock); 652 - ndlp->nlp_flag |= NLP_NPR_ADISC; 653 - spin_unlock_irq(shost->host_lock); 654 - return 1; 655 } 656 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 657 lpfc_unreg_rpi(vport, ndlp); ··· 665 void *arg, uint32_t evt) 666 { 667 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 668 - "0253 Illegal State Transition: node x%x " 669 "event x%x, state x%x Data: x%x x%x\n", 670 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 671 ndlp->nlp_flag); ··· 683 */ 684 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 685 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 686 - "0253 Illegal State Transition: node x%x " 687 "event x%x, state x%x Data: x%x x%x\n", 688 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 689 ndlp->nlp_flag); ··· 2153 uint32_t cur_state, rc; 2154 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2155 uint32_t); 2156 2157 - lpfc_nlp_get(ndlp); 2158 cur_state = ndlp->nlp_state; 2159 2160 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ ··· 2174 rc = (func) (vport, ndlp, arg, evt); 2175 2176 /* DSM out state <rc> on NPort <nlp_DID> */ 2177 - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2178 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2179 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2180 2181 - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2182 - "DSM out: ste:%d did:x%x flg:x%x", 2183 - rc, ndlp->nlp_DID, ndlp->nlp_flag); 2184 2185 - lpfc_nlp_put(ndlp); 2186 2187 return rc; 2188 }
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 250 struct lpfc_hba *phba = vport->phba; 251 struct lpfc_dmabuf *pcmd; 252 + struct lpfc_work_evt *evtp; 253 uint32_t *lp; 254 IOCB_t *icmd; 255 struct serv_parm *sp; ··· 435 del_timer_sync(&ndlp->nlp_delayfunc); 436 ndlp->nlp_last_elscmd = 0; 437 438 + if (!list_empty(&ndlp->els_retry_evt.evt_listp)) { 439 list_del_init(&ndlp->els_retry_evt.evt_listp); 440 + /* Decrement ndlp reference count held for the 441 + * delayed retry 442 + */ 443 + evtp = &ndlp->els_retry_evt; 444 + lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 445 + } 446 447 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 448 spin_lock_irq(shost->host_lock); ··· 638 return 0; 639 } 640 641 + if (!(vport->fc_flag & FC_PT2PT)) { 642 + /* Check config parameter use-adisc or FCP-2 */ 643 + if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 644 + ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 645 + spin_lock_irq(shost->host_lock); 646 + ndlp->nlp_flag |= NLP_NPR_ADISC; 647 + spin_unlock_irq(shost->host_lock); 648 + return 1; 649 + } 650 } 651 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 652 lpfc_unreg_rpi(vport, ndlp); ··· 656 void *arg, uint32_t evt) 657 { 658 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 659 + "0271 Illegal State Transition: node x%x " 660 "event x%x, state x%x Data: x%x x%x\n", 661 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 662 ndlp->nlp_flag); ··· 674 */ 675 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 676 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 677 + "0272 Illegal State Transition: node x%x " 678 "event x%x, state x%x Data: x%x x%x\n", 679 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 680 ndlp->nlp_flag); ··· 2144 uint32_t cur_state, rc; 2145 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2146 uint32_t); 2147 + uint32_t got_ndlp = 0; 2148 2149 + if (lpfc_nlp_get(ndlp)) 2150 + got_ndlp = 1; 2151 + 2152 cur_state = ndlp->nlp_state; 2153 2154 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ ··· 2162 rc = (func) (vport, ndlp, arg, evt); 2163 2164 /* DSM out state <rc> on NPort <nlp_DID> */ 2165 + if (got_ndlp) { 2166 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2167 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2168 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2169 2170 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2171 + "DSM out: ste:%d did:x%x flg:x%x", 2172 + rc, ndlp->nlp_DID, ndlp->nlp_flag); 2173 + /* Decrement the ndlp reference count held for this function */ 2174 + lpfc_nlp_put(ndlp); 2175 + } else { 2176 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2177 + "0212 DSM out state %d on NPort free\n", rc); 2178 2179 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2180 + "DSM out: ste:%d did:x%x flg:x%x", 2181 + rc, 0, 0); 2182 + } 2183 2184 return rc; 2185 }
+3 -1
drivers/scsi/lpfc/lpfc_scsi.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 1283 match = 0; 1284 spin_lock_irq(shost->host_lock); 1285 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1286 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1287 i == ndlp->nlp_sid && 1288 ndlp->rport) {
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 1283 match = 0; 1284 spin_lock_irq(shost->host_lock); 1285 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1286 + if (!NLP_CHK_NODE_ACT(ndlp)) 1287 + continue; 1288 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1289 i == ndlp->nlp_sid && 1290 ndlp->rport) {
+98 -7
drivers/scsi/lpfc/lpfc_sli.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 203 case CMD_IOCB_RCV_SEQ64_CX: 204 case CMD_IOCB_RCV_ELS64_CX: 205 case CMD_IOCB_RCV_CONT64_CX: 206 type = LPFC_UNSOL_IOCB; 207 break; 208 default: 209 type = LPFC_UNKNOWN_IOCB; ··· 546 { 547 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 548 struct hbq_dmabuf *hbq_buf; 549 int i, hbq_count; 550 551 hbq_count = lpfc_sli_hbq_count(); 552 /* Return all memory used by all HBQs */ 553 for (i = 0; i < hbq_count; ++i) { 554 list_for_each_entry_safe(dmabuf, next_dmabuf, 555 &phba->hbqs[i].hbq_buffer_list, list) { ··· 562 } 563 phba->hbqs[i].buffer_count = 0; 564 } 565 } 566 567 static struct lpfc_hbq_entry * ··· 645 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 646 { 647 uint32_t i, start, end; 648 struct hbq_dmabuf *hbq_buffer; 649 650 if (!phba->hbqs[hbqno].hbq_alloc_buffer) { ··· 656 end = count + start; 657 if (end > lpfc_hbq_defs[hbqno]->entry_count) { 658 end = lpfc_hbq_defs[hbqno]->entry_count; 659 } 660 661 /* Populate HBQ entries */ ··· 676 else 677 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 678 } 679 return 0; 680 } 681 ··· 962 uint32_t hbqno; 963 void *virt; /* virtual address ptr */ 964 dma_addr_t phys; /* mapped address */ 965 966 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 967 - if (hbq_entry == NULL) 968 return NULL; 969 list_del(&hbq_entry->dbuf.list); 970 971 hbqno = tag >> 16; 972 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 973 - if (new_hbq_entry == NULL) 974 return &hbq_entry->dbuf; 975 new_hbq_entry->tag = -1; 976 phys = new_hbq_entry->dbuf.phys; 977 virt = new_hbq_entry->dbuf.virt; ··· 993 hbq_entry->dbuf.phys = phys; 994 hbq_entry->dbuf.virt = virt; 995 lpfc_sli_free_hbq(phba, hbq_entry); 996 return &new_hbq_entry->dbuf; 997 } 998 ··· 1019 uint32_t Rctl, Type; 1020 uint32_t match, i; 1021 struct lpfc_iocbq *iocbq; 1022 1023 match = 0; 1024 irsp = &(saveq->iocb); ··· 1038 "0x%x\n", 1039 pring->ringno, 1040 irsp->un.asyncstat.evt_code); 1041 return 1; 1042 } 1043 ··· 2385 2386 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 2387 phba->link_state = LPFC_INIT_MBX_CMDS; 2388 2389 hbq_entry_index = 0; 2390 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { ··· 2497 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2498 (!pmb->mb.un.varCfgPort.cMA)) { 2499 rc = -ENXIO; 2500 - goto do_prep_failed; 2501 } 2502 - return rc; 2503 2504 do_prep_failed: 2505 mempool_free(pmb, phba->mbox_mem_pool); ··· 2716 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2717 2718 /* Mbox command <mbxCommand> cannot issue */ 2719 - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) 2720 return MBX_NOT_FINISHED; 2721 } 2722 2723 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2724 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2725 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2726 - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) 2727 return MBX_NOT_FINISHED; 2728 } 2729
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 203 case CMD_IOCB_RCV_SEQ64_CX: 204 case CMD_IOCB_RCV_ELS64_CX: 205 case CMD_IOCB_RCV_CONT64_CX: 206 + case CMD_IOCB_RET_XRI64_CX: 207 type = LPFC_UNSOL_IOCB; 208 + break; 209 + case CMD_IOCB_XMIT_MSEQ64_CR: 210 + case CMD_IOCB_XMIT_MSEQ64_CX: 211 + case CMD_IOCB_RCV_SEQ_LIST64_CX: 212 + case CMD_IOCB_RCV_ELS_LIST64_CX: 213 + case CMD_IOCB_CLOSE_EXTENDED_CN: 214 + case CMD_IOCB_ABORT_EXTENDED_CN: 215 + case CMD_IOCB_RET_HBQE64_CN: 216 + case CMD_IOCB_FCP_IBIDIR64_CR: 217 + case CMD_IOCB_FCP_IBIDIR64_CX: 218 + case CMD_IOCB_FCP_ITASKMGT64_CX: 219 + case CMD_IOCB_LOGENTRY_CN: 220 + case CMD_IOCB_LOGENTRY_ASYNC_CN: 221 + printk("%s - Unhandled SLI-3 Command x%x\n", 222 + __FUNCTION__, iocb_cmnd); 223 + type = LPFC_UNKNOWN_IOCB; 224 break; 225 default: 226 type = LPFC_UNKNOWN_IOCB; ··· 529 { 530 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 531 struct hbq_dmabuf *hbq_buf; 532 + unsigned long flags; 533 int i, hbq_count; 534 + uint32_t hbqno; 535 536 hbq_count = lpfc_sli_hbq_count(); 537 /* Return all memory used by all HBQs */ 538 + spin_lock_irqsave(&phba->hbalock, flags); 539 for (i = 0; i < hbq_count; ++i) { 540 list_for_each_entry_safe(dmabuf, next_dmabuf, 541 &phba->hbqs[i].hbq_buffer_list, list) { ··· 542 } 543 phba->hbqs[i].buffer_count = 0; 544 } 545 + /* Return all HBQ buffer that are in-fly */ 546 + list_for_each_entry_safe(dmabuf, next_dmabuf, 547 + &phba->hbqbuf_in_list, list) { 548 + hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 549 + list_del(&hbq_buf->dbuf.list); 550 + if (hbq_buf->tag == -1) { 551 + (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 552 + (phba, hbq_buf); 553 + } else { 554 + hbqno = hbq_buf->tag >> 16; 555 + if (hbqno >= LPFC_MAX_HBQS) 556 + (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 557 + (phba, hbq_buf); 558 + else 559 + (phba->hbqs[hbqno].hbq_free_buffer)(phba, 560 + hbq_buf); 561 + } 562 + } 563 + 564 + /* Mark the HBQs not in use */ 565 + phba->hbq_in_use = 0; 566 + spin_unlock_irqrestore(&phba->hbalock, flags); 567 } 568 569 static struct lpfc_hbq_entry * ··· 603 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 604 { 605 uint32_t i, start, end; 606 + unsigned long flags; 607 struct hbq_dmabuf *hbq_buffer; 608 609 if (!phba->hbqs[hbqno].hbq_alloc_buffer) { ··· 613 end = count + start; 614 if (end > lpfc_hbq_defs[hbqno]->entry_count) { 615 end = lpfc_hbq_defs[hbqno]->entry_count; 616 + } 617 + 618 + /* Check whether HBQ is still in use */ 619 + spin_lock_irqsave(&phba->hbalock, flags); 620 + if (!phba->hbq_in_use) { 621 + spin_unlock_irqrestore(&phba->hbalock, flags); 622 + return 0; 623 } 624 625 /* Populate HBQ entries */ ··· 626 else 627 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 628 } 629 + 630 + spin_unlock_irqrestore(&phba->hbalock, flags); 631 return 0; 632 } 633 ··· 910 uint32_t hbqno; 911 void *virt; /* virtual address ptr */ 912 dma_addr_t phys; /* mapped address */ 913 + unsigned long flags; 914 + 915 + /* Check whether HBQ is still in use */ 916 + spin_lock_irqsave(&phba->hbalock, flags); 917 + if (!phba->hbq_in_use) { 918 + spin_unlock_irqrestore(&phba->hbalock, flags); 919 + return NULL; 920 + } 921 922 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 923 + if (hbq_entry == NULL) { 924 + spin_unlock_irqrestore(&phba->hbalock, flags); 925 return NULL; 926 + } 927 list_del(&hbq_entry->dbuf.list); 928 929 hbqno = tag >> 16; 930 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 931 + if (new_hbq_entry == NULL) { 932 + list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); 933 + spin_unlock_irqrestore(&phba->hbalock, flags); 934 return &hbq_entry->dbuf; 935 + } 936 new_hbq_entry->tag = -1; 937 phys = new_hbq_entry->dbuf.phys; 938 virt = new_hbq_entry->dbuf.virt; ··· 928 hbq_entry->dbuf.phys = phys; 929 hbq_entry->dbuf.virt = virt; 930 lpfc_sli_free_hbq(phba, hbq_entry); 931 + list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); 932 + spin_unlock_irqrestore(&phba->hbalock, flags); 933 + 934 return &new_hbq_entry->dbuf; 935 } 936 ··· 951 uint32_t Rctl, Type; 952 uint32_t match, i; 953 struct lpfc_iocbq *iocbq; 954 + struct lpfc_dmabuf *dmzbuf; 955 956 match = 0; 957 irsp = &(saveq->iocb); ··· 969 "0x%x\n", 970 pring->ringno, 971 irsp->un.asyncstat.evt_code); 972 + return 1; 973 + } 974 + 975 + if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 976 + (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 977 + if (irsp->ulpBdeCount > 0) { 978 + dmzbuf = lpfc_sli_get_buff(phba, pring, 979 + irsp->un.ulpWord[3]); 980 + lpfc_in_buf_free(phba, dmzbuf); 981 + } 982 + 983 + if (irsp->ulpBdeCount > 1) { 984 + dmzbuf = lpfc_sli_get_buff(phba, pring, 985 + irsp->unsli3.sli3Words[3]); 986 + lpfc_in_buf_free(phba, dmzbuf); 987 + } 988 + 989 + if (irsp->ulpBdeCount > 2) { 990 + dmzbuf = lpfc_sli_get_buff(phba, pring, 991 + irsp->unsli3.sli3Words[7]); 992 + lpfc_in_buf_free(phba, dmzbuf); 993 + } 994 + 995 return 1; 996 } 997 ··· 2293 2294 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 2295 phba->link_state = LPFC_INIT_MBX_CMDS; 2296 + phba->hbq_in_use = 1; 2297 2298 hbq_entry_index = 0; 2299 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { ··· 2404 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2405 (!pmb->mb.un.varCfgPort.cMA)) { 2406 rc = -ENXIO; 2407 } 2408 2409 do_prep_failed: 2410 mempool_free(pmb, phba->mbox_mem_pool); ··· 2625 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2626 2627 /* Mbox command <mbxCommand> cannot issue */ 2628 + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2629 return MBX_NOT_FINISHED; 2630 } 2631 2632 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2633 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2634 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2635 + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2636 return MBX_NOT_FINISHED; 2637 } 2638
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 18 * included with this package. * 19 *******************************************************************/ 20 21 - #define LPFC_DRIVER_VERSION "8.2.4" 22 23 #define LPFC_DRIVER_NAME "lpfc" 24
··· 18 * included with this package. * 19 *******************************************************************/ 20 21 + #define LPFC_DRIVER_VERSION "8.2.5" 22 23 #define LPFC_DRIVER_NAME "lpfc" 24
+64 -6
drivers/scsi/lpfc/lpfc_vport.c
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2006 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 327 * up and ready to FDISC. 328 */ 329 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 330 - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 331 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 332 lpfc_set_disctmo(vport); 333 lpfc_initial_fdisc(vport); ··· 359 long timeout; 360 361 ndlp = lpfc_findnode_did(vport, Fabric_DID); 362 - if (ndlp && phba->link_state >= LPFC_LINK_UP) { 363 vport->unreg_vpi_cmpl = VPORT_INVAL; 364 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 365 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) ··· 374 * calling lpfc_cleanup_rpis(vport, 1) 375 */ 376 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 377 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 378 continue; 379 lpfc_disc_state_machine(vport, ndlp, NULL, ··· 418 * up and ready to FDISC. 419 */ 420 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 421 - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 422 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 423 lpfc_set_disctmo(vport); 424 lpfc_initial_fdisc(vport); ··· 503 scsi_remove_host(lpfc_shost_from_vport(vport)); 504 505 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 506 - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 507 phba->link_state >= LPFC_LINK_UP) { 508 if (vport->cfg_enable_da_id) { 509 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); ··· 558 if (!ndlp) 559 goto skip_logo; 560 lpfc_nlp_init(vport, ndlp, Fabric_DID); 561 } else { 562 lpfc_dequeue_node(vport, ndlp); 563 } 564 vport->unreg_vpi_cmpl = VPORT_INVAL; 565 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); ··· 592 lpfc_sli_host_down(vport); 593 594 lpfc_stop_vport_timers(vport); 595 - lpfc_unreg_all_rpis(vport); 596 597 if (!(phba->pport->load_flag & FC_UNLOADING)) { 598 lpfc_unreg_default_rpis(vport); 599 /* 600 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
··· 1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 327 * up and ready to FDISC. 328 */ 329 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 330 + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 331 + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 332 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 333 lpfc_set_disctmo(vport); 334 lpfc_initial_fdisc(vport); ··· 358 long timeout; 359 360 ndlp = lpfc_findnode_did(vport, Fabric_DID); 361 + if (ndlp && NLP_CHK_NODE_ACT(ndlp) 362 + && phba->link_state >= LPFC_LINK_UP) { 363 vport->unreg_vpi_cmpl = VPORT_INVAL; 364 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 365 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) ··· 372 * calling lpfc_cleanup_rpis(vport, 1) 373 */ 374 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 375 + if (!NLP_CHK_NODE_ACT(ndlp)) 376 + continue; 377 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 378 continue; 379 lpfc_disc_state_machine(vport, ndlp, NULL, ··· 414 * up and ready to FDISC. 415 */ 416 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 417 + if (ndlp && NLP_CHK_NODE_ACT(ndlp) 418 + && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 419 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 420 lpfc_set_disctmo(vport); 421 lpfc_initial_fdisc(vport); ··· 498 scsi_remove_host(lpfc_shost_from_vport(vport)); 499 500 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 501 + 502 + /* In case of driver unload, we shall not perform fabric logo as the 503 + * worker thread already stopped at this stage and, in this case, we 504 + * can safely skip the fabric logo. 505 + */ 506 + if (phba->pport->load_flag & FC_UNLOADING) { 507 + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 508 + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 509 + phba->link_state >= LPFC_LINK_UP) { 510 + /* First look for the Fabric ndlp */ 511 + ndlp = lpfc_findnode_did(vport, Fabric_DID); 512 + if (!ndlp) 513 + goto skip_logo; 514 + else if (!NLP_CHK_NODE_ACT(ndlp)) { 515 + ndlp = lpfc_enable_node(vport, ndlp, 516 + NLP_STE_UNUSED_NODE); 517 + if (!ndlp) 518 + goto skip_logo; 519 + } 520 + /* Remove ndlp from vport npld list */ 521 + lpfc_dequeue_node(vport, ndlp); 522 + 523 + /* Indicate free memory when release */ 524 + spin_lock_irq(&phba->ndlp_lock); 525 + NLP_SET_FREE_REQ(ndlp); 526 + spin_unlock_irq(&phba->ndlp_lock); 527 + /* Kick off release ndlp when it can be safely done */ 528 + lpfc_nlp_put(ndlp); 529 + } 530 + goto skip_logo; 531 + } 532 + 533 + /* Otherwise, we will perform fabric logo as needed */ 534 + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 535 + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 536 phba->link_state >= LPFC_LINK_UP) { 537 if (vport->cfg_enable_da_id) { 538 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); ··· 519 if (!ndlp) 520 goto skip_logo; 521 lpfc_nlp_init(vport, ndlp, Fabric_DID); 522 + /* Indicate free memory when release */ 523 + NLP_SET_FREE_REQ(ndlp); 524 } else { 525 + if (!NLP_CHK_NODE_ACT(ndlp)) 526 + ndlp = lpfc_enable_node(vport, ndlp, 527 + NLP_STE_UNUSED_NODE); 528 + if (!ndlp) 529 + goto skip_logo; 530 + 531 + /* Remove ndlp from vport npld list */ 532 lpfc_dequeue_node(vport, ndlp); 533 + spin_lock_irq(&phba->ndlp_lock); 534 + if (!NLP_CHK_FREE_REQ(ndlp)) 535 + /* Indicate free memory when release */ 536 + NLP_SET_FREE_REQ(ndlp); 537 + else { 538 + /* Skip this if ndlp is already in free mode */ 539 + spin_unlock_irq(&phba->ndlp_lock); 540 + goto skip_logo; 541 + } 542 + spin_unlock_irq(&phba->ndlp_lock); 543 } 544 vport->unreg_vpi_cmpl = VPORT_INVAL; 545 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); ··· 534 lpfc_sli_host_down(vport); 535 536 lpfc_stop_vport_timers(vport); 537 538 if (!(phba->pport->load_flag & FC_UNLOADING)) { 539 + lpfc_unreg_all_rpis(vport); 540 lpfc_unreg_default_rpis(vport); 541 /* 542 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
+13 -7
drivers/scsi/megaraid/megaraid_mm.c
··· 59 EXPORT_SYMBOL(mraid_mm_unregister_adp); 60 EXPORT_SYMBOL(mraid_mm_adapter_app_handle); 61 62 - static int majorno; 63 static uint32_t drvr_ver = 0x02200207; 64 65 static int adapters_count_g; ··· 73 .compat_ioctl = mraid_mm_compat_ioctl, 74 #endif 75 .owner = THIS_MODULE, 76 }; 77 78 /** ··· 1189 static int __init 1190 mraid_mm_init(void) 1191 { 1192 // Announce the driver version 1193 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", 1194 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); 1195 1196 - majorno = register_chrdev(0, "megadev", &lsi_fops); 1197 - 1198 - if (majorno < 0) { 1199 - con_log(CL_ANN, ("megaraid cmm: cannot get major\n")); 1200 - return majorno; 1201 } 1202 1203 init_waitqueue_head(&wait_q); ··· 1236 { 1237 con_log(CL_DLEVEL1 , ("exiting common mod\n")); 1238 1239 - unregister_chrdev(majorno, "megadev"); 1240 } 1241 1242 module_init(mraid_mm_init);
··· 59 EXPORT_SYMBOL(mraid_mm_unregister_adp); 60 EXPORT_SYMBOL(mraid_mm_adapter_app_handle); 61 62 static uint32_t drvr_ver = 0x02200207; 63 64 static int adapters_count_g; ··· 74 .compat_ioctl = mraid_mm_compat_ioctl, 75 #endif 76 .owner = THIS_MODULE, 77 + }; 78 + 79 + static struct miscdevice megaraid_mm_dev = { 80 + .minor = MISC_DYNAMIC_MINOR, 81 + .name = "megadev0", 82 + .fops = &lsi_fops, 83 }; 84 85 /** ··· 1184 static int __init 1185 mraid_mm_init(void) 1186 { 1187 + int err; 1188 + 1189 // Announce the driver version 1190 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", 1191 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); 1192 1193 + err = misc_register(&megaraid_mm_dev); 1194 + if (err < 0) { 1195 + con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); 1196 + return err; 1197 } 1198 1199 init_waitqueue_head(&wait_q); ··· 1230 { 1231 con_log(CL_DLEVEL1 , ("exiting common mod\n")); 1232 1233 + misc_deregister(&megaraid_mm_dev); 1234 } 1235 1236 module_init(mraid_mm_init);
+1
drivers/scsi/megaraid/megaraid_mm.h
··· 22 #include <linux/moduleparam.h> 23 #include <linux/pci.h> 24 #include <linux/list.h> 25 26 #include "mbox_defs.h" 27 #include "megaraid_ioctl.h"
··· 22 #include <linux/moduleparam.h> 23 #include <linux/pci.h> 24 #include <linux/list.h> 25 + #include <linux/miscdevice.h> 26 27 #include "mbox_defs.h" 28 #include "megaraid_ioctl.h"
+14 -9
drivers/scsi/ses.c
··· 416 int i, j, types, len, components = 0; 417 int err = -ENOMEM; 418 struct enclosure_device *edev; 419 - struct ses_component *scomp; 420 421 if (!scsi_device_enclosure(sdev)) { 422 /* not an enclosure, but might be in one */ 423 - edev = enclosure_find(&sdev->host->shost_gendev); 424 if (edev) { 425 ses_match_to_enclosure(edev, sdev); 426 class_device_put(&edev->cdev); ··· 456 if (!buf) 457 goto err_free; 458 459 - ses_dev->page1 = buf; 460 - ses_dev->page1_len = len; 461 - 462 result = ses_recv_diag(sdev, 1, buf, len); 463 if (result) 464 goto recv_failed; ··· 470 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) 471 components += type_ptr[1]; 472 } 473 474 result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); 475 if (result) ··· 489 goto recv_failed; 490 ses_dev->page2 = buf; 491 ses_dev->page2_len = len; 492 493 /* The additional information page --- allows us 494 * to match up the devices */ ··· 507 goto recv_failed; 508 ses_dev->page10 = buf; 509 ses_dev->page10_len = len; 510 511 no_page10: 512 - scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL); 513 if (!scomp) 514 - goto err_free; 515 516 edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, 517 components, &ses_enclosure_callbacks); ··· 523 524 edev->scratch = ses_dev; 525 for (i = 0; i < components; i++) 526 - edev->component[i].scratch = scomp++; 527 528 /* Page 7 for the descriptors is optional */ 529 - buf = NULL; 530 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); 531 if (result) 532 goto simple_populate; ··· 533 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 534 /* add 1 for trailing '\0' we'll use */ 535 buf = kzalloc(len + 1, GFP_KERNEL); 536 result = ses_recv_diag(sdev, 7, buf, len); 537 if (result) { 538 simple_populate: ··· 601 err = -ENODEV; 602 err_free: 603 kfree(buf); 604 kfree(ses_dev->page10); 605 kfree(ses_dev->page2); 606 kfree(ses_dev->page1); ··· 634 ses_dev = edev->scratch; 635 edev->scratch = NULL; 636 637 kfree(ses_dev->page1); 638 kfree(ses_dev->page2); 639 kfree(ses_dev);
··· 416 int i, j, types, len, components = 0; 417 int err = -ENOMEM; 418 struct enclosure_device *edev; 419 + struct ses_component *scomp = NULL; 420 421 if (!scsi_device_enclosure(sdev)) { 422 /* not an enclosure, but might be in one */ 423 + edev = enclosure_find(&sdev->host->shost_gendev); 424 if (edev) { 425 ses_match_to_enclosure(edev, sdev); 426 class_device_put(&edev->cdev); ··· 456 if (!buf) 457 goto err_free; 458 459 result = ses_recv_diag(sdev, 1, buf, len); 460 if (result) 461 goto recv_failed; ··· 473 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) 474 components += type_ptr[1]; 475 } 476 + ses_dev->page1 = buf; 477 + ses_dev->page1_len = len; 478 + buf = NULL; 479 480 result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); 481 if (result) ··· 489 goto recv_failed; 490 ses_dev->page2 = buf; 491 ses_dev->page2_len = len; 492 + buf = NULL; 493 494 /* The additional information page --- allows us 495 * to match up the devices */ ··· 506 goto recv_failed; 507 ses_dev->page10 = buf; 508 ses_dev->page10_len = len; 509 + buf = NULL; 510 511 no_page10: 512 + scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); 513 if (!scomp) 514 + goto err_free; 515 516 edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, 517 components, &ses_enclosure_callbacks); ··· 521 522 edev->scratch = ses_dev; 523 for (i = 0; i < components; i++) 524 + edev->component[i].scratch = scomp + i; 525 526 /* Page 7 for the descriptors is optional */ 527 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); 528 if (result) 529 goto simple_populate; ··· 532 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 533 /* add 1 for trailing '\0' we'll use */ 534 buf = kzalloc(len + 1, GFP_KERNEL); 535 + if (!buf) 536 + goto simple_populate; 537 result = ses_recv_diag(sdev, 7, buf, len); 538 if (result) { 539 simple_populate: ··· 598 err = -ENODEV; 599 err_free: 600 kfree(buf); 601 + kfree(scomp); 602 kfree(ses_dev->page10); 603 kfree(ses_dev->page2); 604 kfree(ses_dev->page1); ··· 630 ses_dev = edev->scratch; 631 edev->scratch = NULL; 632 633 + kfree(ses_dev->page10); 634 kfree(ses_dev->page1); 635 kfree(ses_dev->page2); 636 kfree(ses_dev);
+8 -8
drivers/scsi/sym53c416.c
··· 187 #define sym53c416_base_2 sym53c416_2 188 #define sym53c416_base_3 sym53c416_3 189 190 - static unsigned int sym53c416_base[2] = {0,0}; 191 - static unsigned int sym53c416_base_1[2] = {0,0}; 192 - static unsigned int sym53c416_base_2[2] = {0,0}; 193 - static unsigned int sym53c416_base_3[2] = {0,0}; 194 195 #endif 196 ··· 621 int ints[3]; 622 623 ints[0] = 2; 624 - if(sym53c416_base) 625 { 626 ints[1] = sym53c416_base[0]; 627 ints[2] = sym53c416_base[1]; 628 sym53c416_setup(NULL, ints); 629 } 630 - if(sym53c416_base_1) 631 { 632 ints[1] = sym53c416_base_1[0]; 633 ints[2] = sym53c416_base_1[1]; 634 sym53c416_setup(NULL, ints); 635 } 636 - if(sym53c416_base_2) 637 { 638 ints[1] = sym53c416_base_2[0]; 639 ints[2] = sym53c416_base_2[1]; 640 sym53c416_setup(NULL, ints); 641 } 642 - if(sym53c416_base_3) 643 { 644 ints[1] = sym53c416_base_3[0]; 645 ints[2] = sym53c416_base_3[1];
··· 187 #define sym53c416_base_2 sym53c416_2 188 #define sym53c416_base_3 sym53c416_3 189 190 + static unsigned int sym53c416_base[2]; 191 + static unsigned int sym53c416_base_1[2]; 192 + static unsigned int sym53c416_base_2[2]; 193 + static unsigned int sym53c416_base_3[2]; 194 195 #endif 196 ··· 621 int ints[3]; 622 623 ints[0] = 2; 624 + if(sym53c416_base[0]) 625 { 626 ints[1] = sym53c416_base[0]; 627 ints[2] = sym53c416_base[1]; 628 sym53c416_setup(NULL, ints); 629 } 630 + if(sym53c416_base_1[0]) 631 { 632 ints[1] = sym53c416_base_1[0]; 633 ints[2] = sym53c416_base_1[1]; 634 sym53c416_setup(NULL, ints); 635 } 636 + if(sym53c416_base_2[0]) 637 { 638 ints[1] = sym53c416_base_2[0]; 639 ints[2] = sym53c416_base_2[1]; 640 sym53c416_setup(NULL, ints); 641 } 642 + if(sym53c416_base_3[0]) 643 { 644 ints[1] = sym53c416_base_3[0]; 645 ints[2] = sym53c416_base_3[1];
+6 -2
include/scsi/scsi_host.h
··· 6 #include <linux/types.h> 7 #include <linux/workqueue.h> 8 #include <linux/mutex.h> 9 10 struct request_queue; 11 struct block_device; ··· 26 * NONE: Self evident. Host adapter is not capable of scatter-gather. 27 * ALL: Means that the host adapter module can do scatter-gather, 28 * and that there is no limit to the size of the table to which 29 - * we scatter/gather data. 30 * Anything else: Indicates the maximum number of chains that can be 31 * used in one scatter-gather request. 32 */ 33 #define SG_NONE 0 34 - #define SG_ALL 0xff 35 36 #define MODE_UNKNOWN 0x00 37 #define MODE_INITIATOR 0x01
··· 6 #include <linux/types.h> 7 #include <linux/workqueue.h> 8 #include <linux/mutex.h> 9 + #include <scsi/scsi.h> 10 11 struct request_queue; 12 struct block_device; ··· 25 * NONE: Self evident. Host adapter is not capable of scatter-gather. 26 * ALL: Means that the host adapter module can do scatter-gather, 27 * and that there is no limit to the size of the table to which 28 + * we scatter/gather data. The value we set here is the maximum 29 + * single element sglist. To use chained sglists, the adapter 30 + * has to set a value beyond ALL (and correctly use the chain 31 + * handling API. 32 * Anything else: Indicates the maximum number of chains that can be 33 * used in one scatter-gather request. 34 */ 35 #define SG_NONE 0 36 + #define SG_ALL SCSI_MAX_SG_SEGMENTS 37 38 #define MODE_UNKNOWN 0x00 39 #define MODE_INITIATOR 0x01