Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:

- add support for new devices (ufs, mvsas)

- a major set of fixes in lpfc

- get rid of a driver specific ioctl in pcmraid

- a major rework of aha152x to get rid of the scsi_pointer.

- minor fixes and obvious changes including several spelling updates.

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (36 commits)
scsi: megaraid_sas: Target with invalid LUN ID is deleted during scan
scsi: ufs: ufshpb: Fix a NULL check on list iterator
scsi: sd: Clean up gendisk if device_add_disk() failed
scsi: message: fusion: Remove redundant variable dmp
scsi: mvsas: Add PCI ID of RocketRaid 2640
scsi: sd: sd_read_cpr() requires VPD pages
scsi: mpt3sas: Fail reset operation if config request timed out
scsi: sym53c500_cs: Stop using struct scsi_pointer
scsi: ufs: ufs-pci: Add support for Intel MTL
scsi: mpt3sas: Fix mpt3sas_check_same_4gb_region() kdoc comment
scsi: scsi_debug: Fix sdebug_blk_mq_poll() in_use_bm bitmap use
scsi: bnx2i: Fix spelling mistake "mis-match" -> "mismatch"
scsi: bnx2fc: Fix spelling mistake "mis-match" -> "mismatch"
scsi: zorro7xx: Fix a resource leak in zorro7xx_remove_one()
scsi: aic7xxx: Use standard PCI subsystem, subdevice defines
scsi: ufs: qcom: Drop custom Android boot parameters
scsi: core: sysfs: Remove comments that conflict with the actual logic
scsi: hisi_sas: Remove stray fallthrough annotation
scsi: virtio-scsi: Eliminate anonymous module_init & module_exit
scsi: isci: Fix spelling mistake "doesnt" -> "doesn't"
...

+467 -826
+2 -2
drivers/message/fusion/mptbase.c
··· 6658 6658 static int mpt_version_proc_show(struct seq_file *m, void *v) 6659 6659 { 6660 6660 u8 cb_idx; 6661 - int scsi, fc, sas, lan, ctl, targ, dmp; 6661 + int scsi, fc, sas, lan, ctl, targ; 6662 6662 char *drvname; 6663 6663 6664 6664 seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON); 6665 6665 seq_printf(m, " Fusion MPT base driver\n"); 6666 6666 6667 - scsi = fc = sas = lan = ctl = targ = dmp = 0; 6667 + scsi = fc = sas = lan = ctl = targ = 0; 6668 6668 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 6669 6669 drvname = NULL; 6670 6670 if (MptCallbacks[cb_idx]) {
+113 -122
drivers/scsi/aha152x.c
··· 317 317 }; 318 318 319 319 struct aha152x_cmd_priv { 320 - struct scsi_pointer scsi_pointer; 320 + char *ptr; 321 + int this_residual; 322 + struct scatterlist *buffer; 323 + int status; 324 + int message; 325 + int sent_command; 326 + int phase; 321 327 }; 322 328 323 - static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd) 329 + static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd) 324 330 { 325 - struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd); 326 - 327 - return &acmd->scsi_pointer; 331 + return scsi_cmd_priv(cmd); 328 332 } 329 333 330 334 MODULE_AUTHOR("Jürgen Fischer"); ··· 894 890 static int setup_expected_interrupts(struct Scsi_Host *shpnt) 895 891 { 896 892 if(CURRENT_SC) { 897 - struct scsi_pointer *scsi_pointer = 898 - aha152x_scsi_pointer(CURRENT_SC); 893 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 899 894 900 - scsi_pointer->phase |= 1 << 16; 895 + acp->phase |= 1 << 16; 901 896 902 - if (scsi_pointer->phase & selecting) { 897 + if (acp->phase & selecting) { 903 898 SETPORT(SSTAT1, SELTO); 904 899 SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0)); 905 900 SETPORT(SIMODE1, ENSELTIMO); 906 901 } else { 907 - SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0); 902 + SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0); 908 903 SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); 909 904 } 910 905 } else if(STATE==seldi) { ··· 927 924 static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, 928 925 struct completion *complete, int phase) 929 926 { 930 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt); 927 + struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt); 931 928 struct Scsi_Host *shpnt = SCpnt->device->host; 932 929 unsigned long flags; 933 930 934 - scsi_pointer->phase = not_issued | phase; 935 - scsi_pointer->Status = 0x1; /* Ilegal status by SCSI standard */ 936 - scsi_pointer->Message = 0; 937 - scsi_pointer->have_data_in = 0; 938 - scsi_pointer->sent_command = 0; 931 + acp->phase = not_issued | phase; 932 + acp->status = 0x1; /* Illegal status by SCSI standard */ 933 + acp->message = 0; 934 + acp->sent_command = 0; 939 935 940 - if (scsi_pointer->phase & (resetting | check_condition)) { 936 + if (acp->phase & (resetting | check_condition)) { 941 937 if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) { 942 938 scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n"); 943 939 return FAILED; ··· 959 957 SCp.phase : current state of the command */ 960 958 961 959 if ((phase & resetting) || !scsi_sglist(SCpnt)) { 962 - scsi_pointer->ptr = NULL; 963 - scsi_pointer->this_residual = 0; 960 + acp->ptr = NULL; 961 + acp->this_residual = 0; 964 962 scsi_set_resid(SCpnt, 0); 965 - scsi_pointer->buffer = NULL; 963 + acp->buffer = NULL; 966 964 } else { 967 965 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); 968 - scsi_pointer->buffer = scsi_sglist(SCpnt); 969 - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer); 970 - scsi_pointer->this_residual = scsi_pointer->buffer->length; 966 + acp->buffer = scsi_sglist(SCpnt); 967 + acp->ptr = SG_ADDRESS(acp->buffer); 968 + acp->this_residual = acp->buffer->length; 971 969 } 972 970 973 971 DO_LOCK(flags); ··· 1017 1015 1018 1016 static void aha152x_scsi_done(struct scsi_cmnd *SCpnt) 1019 1017 { 1020 - if (aha152x_scsi_pointer(SCpnt)->phase & resetting) 1018 + if (aha152x_priv(SCpnt)->phase & resetting) 1021 1019 reset_done(SCpnt); 1022 1020 else 1023 1021 scsi_done(SCpnt); ··· 1103 1101 1104 1102 DO_LOCK(flags); 1105 1103 1106 - if (aha152x_scsi_pointer(SCpnt)->phase & resetted) { 1104 + if (aha152x_priv(SCpnt)->phase & resetted) { 1107 1105 HOSTDATA(shpnt)->commands--; 1108 1106 if (!HOSTDATA(shpnt)->commands) 1109 1107 SETPORT(PORTA, 0); ··· 1397 1395 SETPORT(SSTAT1, CLRBUSFREE); 1398 1396 1399 1397 if(CURRENT_SC) { 1400 - struct scsi_pointer *scsi_pointer = 1401 - aha152x_scsi_pointer(CURRENT_SC); 1398 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 1402 1399 1403 1400 #if defined(AHA152X_STAT) 1404 1401 action++; 1405 1402 #endif 1406 - scsi_pointer->phase &= ~syncneg; 1403 + acp->phase &= ~syncneg; 1407 1404 1408 - if (scsi_pointer->phase & completed) { 1405 + if (acp->phase & completed) { 1409 1406 /* target sent COMMAND COMPLETE */ 1410 - done(shpnt, scsi_pointer->Status, DID_OK); 1407 + done(shpnt, acp->status, DID_OK); 1411 1408 1412 - } else if (scsi_pointer->phase & aborted) { 1413 - done(shpnt, scsi_pointer->Status, DID_ABORT); 1409 + } else if (acp->phase & aborted) { 1410 + done(shpnt, acp->status, DID_ABORT); 1414 1411 1415 - } else if (scsi_pointer->phase & resetted) { 1416 - done(shpnt, scsi_pointer->Status, DID_RESET); 1412 + } else if (acp->phase & resetted) { 1413 + done(shpnt, acp->status, DID_RESET); 1417 1414 1418 - } else if (scsi_pointer->phase & disconnected) { 1415 + } else if (acp->phase & disconnected) { 1419 1416 /* target sent DISCONNECT */ 1420 1417 #if defined(AHA152X_STAT) 1421 1418 HOSTDATA(shpnt)->disconnections++; 1422 1419 #endif 1423 1420 append_SC(&DISCONNECTED_SC, CURRENT_SC); 1424 - scsi_pointer->phase |= 1 << 16; 1421 + acp->phase |= 1 << 16; 1425 1422 CURRENT_SC = NULL; 1426 1423 1427 1424 } else { ··· 1439 1438 action++; 1440 1439 #endif 1441 1440 1442 - if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) { 1441 + if (aha152x_priv(DONE_SC)->phase & check_condition) { 1443 1442 struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; 1444 1443 struct aha152x_scdata *sc = SCDATA(cmd); 1445 1444 1446 1445 scsi_eh_restore_cmnd(cmd, &sc->ses); 1447 1446 1448 - aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION; 1447 + aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION; 1449 1448 1450 1449 HOSTDATA(shpnt)->commands--; 1451 1450 if (!HOSTDATA(shpnt)->commands) 1452 1451 SETPORT(PORTA, 0); /* turn led off */ 1453 - } else if (aha152x_scsi_pointer(DONE_SC)->Status == 1454 - SAM_STAT_CHECK_CONDITION) { 1452 + } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) { 1455 1453 #if defined(AHA152X_STAT) 1456 1454 HOSTDATA(shpnt)->busfree_with_check_condition++; 1457 1455 #endif 1458 1456 1459 - if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) { 1457 + if (!(aha152x_priv(DONE_SC)->phase & not_issued)) { 1460 1458 struct aha152x_scdata *sc; 1461 1459 struct scsi_cmnd *ptr = DONE_SC; 1462 1460 DONE_SC=NULL; ··· 1480 1480 if (!HOSTDATA(shpnt)->commands) 1481 1481 SETPORT(PORTA, 0); /* turn led off */ 1482 1482 1483 - if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) { 1483 + if (!(aha152x_priv(ptr)->phase & resetting)) { 1484 1484 kfree(ptr->host_scribble); 1485 1485 ptr->host_scribble=NULL; 1486 1486 } ··· 1503 1503 DO_UNLOCK(flags); 1504 1504 1505 1505 if(CURRENT_SC) { 1506 - struct scsi_pointer *scsi_pointer = 1507 - aha152x_scsi_pointer(CURRENT_SC); 1506 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 1508 1507 1509 1508 #if defined(AHA152X_STAT) 1510 1509 action++; 1511 1510 #endif 1512 - scsi_pointer->phase |= selecting; 1511 + acp->phase |= selecting; 1513 1512 1514 1513 /* clear selection timeout */ 1515 1514 SETPORT(SSTAT1, SELTO); ··· 1536 1537 */ 1537 1538 static void seldo_run(struct Scsi_Host *shpnt) 1538 1539 { 1539 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 1540 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 1540 1541 1541 1542 SETPORT(SCSISIG, 0); 1542 1543 SETPORT(SSTAT1, CLRBUSFREE); 1543 1544 SETPORT(SSTAT1, CLRPHASECHG); 1544 1545 1545 - scsi_pointer->phase &= ~(selecting | not_issued); 1546 + acp->phase &= ~(selecting | not_issued); 1546 1547 1547 1548 SETPORT(SCSISEQ, 0); 1548 1549 ··· 1557 1558 1558 1559 ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); 1559 1560 1560 - if (scsi_pointer->phase & aborting) { 1561 + if (acp->phase & aborting) { 1561 1562 ADDMSGO(ABORT); 1562 - } else if (scsi_pointer->phase & resetting) { 1563 + } else if (acp->phase & resetting) { 1563 1564 ADDMSGO(BUS_DEVICE_RESET); 1564 1565 } else if (SYNCNEG==0 && SYNCHRONOUS) { 1565 - scsi_pointer->phase |= syncneg; 1566 + acp->phase |= syncneg; 1566 1567 MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8); 1567 1568 SYNCNEG=1; /* negotiation in progress */ 1568 1569 } ··· 1577 1578 */ 1578 1579 static void selto_run(struct Scsi_Host *shpnt) 1579 1580 { 1580 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 1581 + struct aha152x_cmd_priv *acp; 1581 1582 1582 1583 SETPORT(SCSISEQ, 0); 1583 1584 SETPORT(SSTAT1, CLRSELTIMO); ··· 1585 1586 if (!CURRENT_SC) 1586 1587 return; 1587 1588 1588 - scsi_pointer->phase &= ~selecting; 1589 + acp = aha152x_priv(CURRENT_SC); 1590 + acp->phase &= ~selecting; 1589 1591 1590 - if (scsi_pointer->phase & aborted) 1592 + if (acp->phase & aborted) 1591 1593 done(shpnt, SAM_STAT_GOOD, DID_ABORT); 1592 1594 else if (TESTLO(SSTAT0, SELINGO)) 1593 1595 done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY); ··· 1616 1616 SETPORT(SSTAT1, CLRPHASECHG); 1617 1617 1618 1618 if(CURRENT_SC) { 1619 - struct scsi_pointer *scsi_pointer = 1620 - aha152x_scsi_pointer(CURRENT_SC); 1619 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 1621 1620 1622 - if (!(scsi_pointer->phase & not_issued)) 1621 + if (!(acp->phase & not_issued)) 1623 1622 scmd_printk(KERN_ERR, CURRENT_SC, 1624 1623 "command should not have been issued yet\n"); 1625 1624 ··· 1675 1676 static void msgi_run(struct Scsi_Host *shpnt) 1676 1677 { 1677 1678 for(;;) { 1678 - struct scsi_pointer *scsi_pointer; 1679 + struct aha152x_cmd_priv *acp; 1679 1680 int sstat1 = GETPORT(SSTAT1); 1680 1681 1681 1682 if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT)) ··· 1713 1714 continue; 1714 1715 } 1715 1716 1716 - scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 1717 - scsi_pointer->Message = MSGI(0); 1718 - scsi_pointer->phase &= ~disconnected; 1717 + acp = aha152x_priv(CURRENT_SC); 1718 + acp->message = MSGI(0); 1719 + acp->phase &= ~disconnected; 1719 1720 1720 1721 MSGILEN=0; 1721 1722 ··· 1723 1724 continue; 1724 1725 } 1725 1726 1726 - scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 1727 - scsi_pointer->Message = MSGI(0); 1727 + acp = aha152x_priv(CURRENT_SC); 1728 + acp->message = MSGI(0); 1728 1729 1729 1730 switch (MSGI(0)) { 1730 1731 case DISCONNECT: ··· 1732 1733 scmd_printk(KERN_WARNING, CURRENT_SC, 1733 1734 "target was not allowed to disconnect\n"); 1734 1735 1735 - scsi_pointer->phase |= disconnected; 1736 + acp->phase |= disconnected; 1736 1737 break; 1737 1738 1738 1739 case COMMAND_COMPLETE: 1739 - scsi_pointer->phase |= completed; 1740 + acp->phase |= completed; 1740 1741 break; 1741 1742 1742 1743 case MESSAGE_REJECT: ··· 1866 1867 */ 1867 1868 static void msgo_init(struct Scsi_Host *shpnt) 1868 1869 { 1869 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 1870 - 1871 1870 if(MSGOLEN==0) { 1872 - if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 && 1873 - SYNCRATE==0) { 1871 + if ((aha152x_priv(CURRENT_SC)->phase & syncneg) && 1872 + SYNCNEG == 2 && SYNCRATE == 0) { 1874 1873 ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); 1875 1874 } else { 1876 1875 scmd_printk(KERN_INFO, CURRENT_SC, ··· 1885 1888 */ 1886 1889 static void msgo_run(struct Scsi_Host *shpnt) 1887 1890 { 1888 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 1891 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 1889 1892 1890 1893 while(MSGO_I<MSGOLEN) { 1891 1894 if (TESTLO(SSTAT0, SPIORDY)) ··· 1898 1901 1899 1902 1900 1903 if (MSGO(MSGO_I) & IDENTIFY_BASE) 1901 - scsi_pointer->phase |= identified; 1904 + acp->phase |= identified; 1902 1905 1903 1906 if (MSGO(MSGO_I)==ABORT) 1904 - scsi_pointer->phase |= aborted; 1907 + acp->phase |= aborted; 1905 1908 1906 1909 if (MSGO(MSGO_I)==BUS_DEVICE_RESET) 1907 - scsi_pointer->phase |= resetted; 1910 + acp->phase |= resetted; 1908 1911 1909 1912 SETPORT(SCSIDAT, MSGO(MSGO_I++)); 1910 1913 } ··· 1933 1936 */ 1934 1937 static void cmd_init(struct Scsi_Host *shpnt) 1935 1938 { 1936 - if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) { 1939 + if (aha152x_priv(CURRENT_SC)->sent_command) { 1937 1940 scmd_printk(KERN_ERR, CURRENT_SC, 1938 1941 "command already sent\n"); 1939 1942 done(shpnt, SAM_STAT_GOOD, DID_ERROR); ··· 1964 1967 "command sent incompletely (%d/%d)\n", 1965 1968 CMD_I, CURRENT_SC->cmd_len); 1966 1969 else 1967 - aha152x_scsi_pointer(CURRENT_SC)->sent_command++; 1970 + aha152x_priv(CURRENT_SC)->sent_command++; 1968 1971 } 1969 1972 1970 1973 /* ··· 1976 1979 if (TESTLO(SSTAT0, SPIORDY)) 1977 1980 return; 1978 1981 1979 - aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT); 1982 + aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT); 1980 1983 1981 1984 } 1982 1985 ··· 2000 2003 2001 2004 static void datai_run(struct Scsi_Host *shpnt) 2002 2005 { 2003 - struct scsi_pointer *scsi_pointer; 2006 + struct aha152x_cmd_priv *acp; 2004 2007 unsigned long the_time; 2005 2008 int fifodata, data_count; 2006 2009 ··· 2038 2041 fifodata = GETPORT(FIFOSTAT); 2039 2042 } 2040 2043 2041 - scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 2042 - if (scsi_pointer->this_residual > 0) { 2043 - while (fifodata > 0 && scsi_pointer->this_residual > 0) { 2044 - data_count = fifodata > scsi_pointer->this_residual ? 2045 - scsi_pointer->this_residual : 2046 - fifodata; 2044 + acp = aha152x_priv(CURRENT_SC); 2045 + if (acp->this_residual > 0) { 2046 + while (fifodata > 0 && acp->this_residual > 0) { 2047 + data_count = fifodata > acp->this_residual ? 2048 + acp->this_residual : fifodata; 2047 2049 fifodata -= data_count; 2048 2050 2049 2051 if (data_count & 1) { 2050 2052 SETPORT(DMACNTRL0, ENDMA|_8BIT); 2051 - *scsi_pointer->ptr++ = GETPORT(DATAPORT); 2052 - scsi_pointer->this_residual--; 2053 + *acp->ptr++ = GETPORT(DATAPORT); 2054 + acp->this_residual--; 2053 2055 DATA_LEN++; 2054 2056 SETPORT(DMACNTRL0, ENDMA); 2055 2057 } 2056 2058 2057 2059 if (data_count > 1) { 2058 2060 data_count >>= 1; 2059 - insw(DATAPORT, scsi_pointer->ptr, data_count); 2060 - scsi_pointer->ptr += 2 * data_count; 2061 - scsi_pointer->this_residual -= 2 * data_count; 2061 + insw(DATAPORT, acp->ptr, data_count); 2062 + acp->ptr += 2 * data_count; 2063 + acp->this_residual -= 2 * data_count; 2062 2064 DATA_LEN += 2 * data_count; 2063 2065 } 2064 2066 2065 - if (scsi_pointer->this_residual == 0 && 2066 - !sg_is_last(scsi_pointer->buffer)) { 2067 + if (acp->this_residual == 0 && 2068 + !sg_is_last(acp->buffer)) { 2067 2069 /* advance to next buffer */ 2068 - scsi_pointer->buffer = sg_next(scsi_pointer->buffer); 2069 - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer); 2070 - scsi_pointer->this_residual = scsi_pointer->buffer->length; 2070 + acp->buffer = sg_next(acp->buffer); 2071 + acp->ptr = SG_ADDRESS(acp->buffer); 2072 + acp->this_residual = acp->buffer->length; 2071 2073 } 2072 2074 } 2073 2075 } else if (fifodata > 0) { ··· 2134 2138 2135 2139 static void datao_run(struct Scsi_Host *shpnt) 2136 2140 { 2137 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 2141 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 2138 2142 unsigned long the_time; 2139 2143 int data_count; 2140 2144 2141 2145 /* until phase changes or all data sent */ 2142 - while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) { 2146 + while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) { 2143 2147 data_count = 128; 2144 - if (data_count > scsi_pointer->this_residual) 2145 - data_count = scsi_pointer->this_residual; 2148 + if (data_count > acp->this_residual) 2149 + data_count = acp->this_residual; 2146 2150 2147 2151 if(TESTLO(DMASTAT, DFIFOEMP)) { 2148 2152 scmd_printk(KERN_ERR, CURRENT_SC, ··· 2153 2157 2154 2158 if(data_count & 1) { 2155 2159 SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT); 2156 - SETPORT(DATAPORT, *scsi_pointer->ptr++); 2157 - scsi_pointer->this_residual--; 2160 + SETPORT(DATAPORT, *acp->ptr++); 2161 + acp->this_residual--; 2158 2162 CMD_INC_RESID(CURRENT_SC, -1); 2159 2163 SETPORT(DMACNTRL0,WRITE_READ|ENDMA); 2160 2164 } 2161 2165 2162 2166 if(data_count > 1) { 2163 2167 data_count >>= 1; 2164 - outsw(DATAPORT, scsi_pointer->ptr, data_count); 2165 - scsi_pointer->ptr += 2 * data_count; 2166 - scsi_pointer->this_residual -= 2 * data_count; 2168 + outsw(DATAPORT, acp->ptr, data_count); 2169 + acp->ptr += 2 * data_count; 2170 + acp->this_residual -= 2 * data_count; 2167 2171 CMD_INC_RESID(CURRENT_SC, -2 * data_count); 2168 2172 } 2169 2173 2170 - if (scsi_pointer->this_residual == 0 && 2171 - !sg_is_last(scsi_pointer->buffer)) { 2174 + if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) { 2172 2175 /* advance to next buffer */ 2173 - scsi_pointer->buffer = sg_next(scsi_pointer->buffer); 2174 - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer); 2175 - scsi_pointer->this_residual = scsi_pointer->buffer->length; 2176 + acp->buffer = sg_next(acp->buffer); 2177 + acp->ptr = SG_ADDRESS(acp->buffer); 2178 + acp->this_residual = acp->buffer->length; 2176 2179 } 2177 2180 2178 2181 the_time=jiffies + 100*HZ; ··· 2187 2192 2188 2193 static void datao_end(struct Scsi_Host *shpnt) 2189 2194 { 2190 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 2195 + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); 2191 2196 2192 2197 if(TESTLO(DMASTAT, DFIFOEMP)) { 2193 2198 u32 datao_cnt = GETSTCNT(); ··· 2206 2211 sg = sg_next(sg); 2207 2212 } 2208 2213 2209 - scsi_pointer->buffer = sg; 2210 - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done; 2211 - scsi_pointer->this_residual = scsi_pointer->buffer->length - 2212 - done; 2214 + acp->buffer = sg; 2215 + acp->ptr = SG_ADDRESS(acp->buffer) + done; 2216 + acp->this_residual = acp->buffer->length - done; 2213 2217 } 2214 2218 2215 2219 SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); ··· 2223 2229 */ 2224 2230 static int update_state(struct Scsi_Host *shpnt) 2225 2231 { 2226 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); 2227 2232 int dataphase=0; 2228 2233 unsigned int stat0 = GETPORT(SSTAT0); 2229 2234 unsigned int stat1 = GETPORT(SSTAT1); ··· 2237 2244 } else if (stat0 & SELDI && PREVSTATE == busfree) { 2238 2245 STATE=seldi; 2239 2246 } else if (stat0 & SELDO && CURRENT_SC && 2240 - (scsi_pointer->phase & selecting)) { 2247 + (aha152x_priv(CURRENT_SC)->phase & selecting)) { 2241 2248 STATE=seldo; 2242 2249 } else if(stat1 & SELTO) { 2243 2250 STATE=selto; ··· 2369 2376 SETPORT(SXFRCTL0, CH1); 2370 2377 SETPORT(DMACNTRL0, 0); 2371 2378 if(CURRENT_SC) 2372 - aha152x_scsi_pointer(CURRENT_SC)->phase &= 2373 - ~spiordy; 2379 + aha152x_priv(CURRENT_SC)->phase &= ~spiordy; 2374 2380 } 2375 2381 2376 2382 /* ··· 2391 2399 SETPORT(DMACNTRL0, 0); 2392 2400 SETPORT(SXFRCTL0, CH1|SPIOEN); 2393 2401 if(CURRENT_SC) 2394 - aha152x_scsi_pointer(CURRENT_SC)->phase |= 2395 - spiordy; 2402 + aha152x_priv(CURRENT_SC)->phase |= spiordy; 2396 2403 } 2397 2404 2398 2405 /* ··· 2481 2490 */ 2482 2491 static void show_command(struct scsi_cmnd *ptr) 2483 2492 { 2484 - const int phase = aha152x_scsi_pointer(ptr)->phase; 2493 + const int phase = aha152x_priv(ptr)->phase; 2485 2494 2486 2495 scsi_print_command(ptr); 2487 2496 scmd_printk(KERN_DEBUG, ptr, ··· 2529 2538 2530 2539 static void get_command(struct seq_file *m, struct scsi_cmnd * ptr) 2531 2540 { 2532 - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr); 2533 - const int phase = scsi_pointer->phase; 2541 + struct aha152x_cmd_priv *acp = aha152x_priv(ptr); 2542 + const int phase = acp->phase; 2534 2543 int i; 2535 2544 2536 2545 seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ", ··· 2540 2549 seq_printf(m, "0x%02x ", ptr->cmnd[i]); 2541 2550 2542 2551 seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |", 2543 - scsi_get_resid(ptr), scsi_pointer->this_residual, 2544 - sg_nents(scsi_pointer->buffer) - 1); 2552 + scsi_get_resid(ptr), acp->this_residual, 2553 + sg_nents(acp->buffer) - 1); 2545 2554 2546 2555 if (phase & not_issued) 2547 2556 seq_puts(m, "not issued|");
-2
drivers/scsi/aic7xxx/aic79xx_osm.h
··· 420 420 421 421 /* config registers for header type 0 devices */ 422 422 #define PCIR_MAPS 0x10 423 - #define PCIR_SUBVEND_0 0x2c 424 - #define PCIR_SUBDEV_0 0x2e 425 423 426 424 /****************************** PCI-X definitions *****************************/ 427 425 #define PCIXR_COMMAND 0x96
+3 -3
drivers/scsi/aic7xxx/aic79xx_pci.c
··· 260 260 261 261 vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); 262 262 device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); 263 - subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); 264 - subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); 263 + subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); 264 + subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); 265 265 full_id = ahd_compose_id(device, 266 266 vendor, 267 267 subdevice, ··· 298 298 * Record if this is an HP board. 299 299 */ 300 300 subvendor = ahd_pci_read_config(ahd->dev_softc, 301 - PCIR_SUBVEND_0, /*bytes*/2); 301 + PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); 302 302 if (subvendor == SUBID_HP) 303 303 ahd->flags |= AHD_HP_BOARD; 304 304
-2
drivers/scsi/aic7xxx/aic7xxx_osm.h
··· 433 433 434 434 /* config registers for header type 0 devices */ 435 435 #define PCIR_MAPS 0x10 436 - #define PCIR_SUBVEND_0 0x2c 437 - #define PCIR_SUBDEV_0 0x2e 438 436 439 437 typedef enum 440 438 {
+2 -2
drivers/scsi/aic7xxx/aic7xxx_pci.c
··· 673 673 674 674 vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); 675 675 device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); 676 - subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); 677 - subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); 676 + subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); 677 + subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); 678 678 full_id = ahc_compose_id(device, vendor, subdevice, subvendor); 679 679 680 680 /*
+3 -3
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 1169 1169 ofld_kcqe->fcoe_conn_context_id); 1170 1170 interface = tgt->port->priv; 1171 1171 if (hba != interface->hba) { 1172 - printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); 1172 + printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n"); 1173 1173 goto ofld_cmpl_err; 1174 1174 } 1175 1175 /* ··· 1226 1226 * and enable 1227 1227 */ 1228 1228 if (tgt->context_id != context_id) { 1229 - printk(KERN_ERR PFX "context id mis-match\n"); 1229 + printk(KERN_ERR PFX "context id mismatch\n"); 1230 1230 return; 1231 1231 } 1232 1232 interface = tgt->port->priv; 1233 1233 if (hba != interface->hba) { 1234 - printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); 1234 + printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n"); 1235 1235 goto enbl_cmpl_err; 1236 1236 } 1237 1237 if (!ofld_kcqe->completion_status)
+2 -2
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 2398 2398 } 2399 2399 2400 2400 if (hba != ep->hba) { 2401 - printk(KERN_ALERT "conn destroy- error hba mis-match\n"); 2401 + printk(KERN_ALERT "conn destroy- error hba mismatch\n"); 2402 2402 return; 2403 2403 } 2404 2404 ··· 2432 2432 } 2433 2433 2434 2434 if (hba != ep->hba) { 2435 - printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n"); 2435 + printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n"); 2436 2436 return; 2437 2437 } 2438 2438
-1
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 442 442 case SAS_PROTOCOL_INTERNAL_ABORT: 443 443 hisi_sas_task_prep_abort(hisi_hba, slot); 444 444 break; 445 - fallthrough; 446 445 default: 447 446 return; 448 447 }
+1 -1
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 36 36 37 37 #define IBMVSCSIS_VERSION "v0.2" 38 38 39 - #define INITIAL_SRP_LIMIT 800 39 + #define INITIAL_SRP_LIMIT 1024 40 40 #define DEFAULT_MAX_SECTORS 256 41 41 #define MAX_TXU 1024 * 1024 42 42
+3 -3
drivers/scsi/isci/host.c
··· 413 413 dev_warn(&ihost->pdev->dev, 414 414 "%s: SCIC Controller 0x%p received " 415 415 "event 0x%x for io request object " 416 - "that doesnt exist.\n", 416 + "that doesn't exist.\n", 417 417 __func__, 418 418 ihost, 419 419 ent); ··· 428 428 dev_warn(&ihost->pdev->dev, 429 429 "%s: SCIC Controller 0x%p received " 430 430 "event 0x%x for remote device object " 431 - "that doesnt exist.\n", 431 + "that doesn't exist.\n", 432 432 __func__, 433 433 ihost, 434 434 ent); ··· 462 462 } else 463 463 dev_err(&ihost->pdev->dev, 464 464 "%s: SCIC Controller 0x%p received event 0x%x " 465 - "for remote device object 0x%0x that doesnt " 465 + "for remote device object 0x%0x that doesn't " 466 466 "exist.\n", 467 467 __func__, 468 468 ihost,
-1
drivers/scsi/libiscsi.c
··· 3045 3045 if (!cls_conn) 3046 3046 return NULL; 3047 3047 conn = cls_conn->dd_data; 3048 - memset(conn, 0, sizeof(*conn) + dd_size); 3049 3048 3050 3049 conn->dd_data = cls_conn->dd_data + sizeof(*conn); 3051 3050 conn->session = session;
+5 -2
drivers/scsi/lpfc/lpfc.h
··· 897 897 NHT_MODE, 898 898 }; 899 899 900 + enum lpfc_hba_bit_flags { 901 + FABRIC_COMANDS_BLOCKED, 902 + HBA_PCI_ERR, 903 + }; 904 + 900 905 struct lpfc_hba { 901 906 /* SCSI interface function jump table entries */ 902 907 struct lpfc_io_buf * (*lpfc_get_scsi_buf) ··· 1048 1043 * Firmware supports Forced Link Speed 1049 1044 * capability 1050 1045 */ 1051 - #define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */ 1052 1046 #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ 1053 1047 #define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */ 1054 1048 #define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */ ··· 1354 1350 atomic_t fabric_iocb_count; 1355 1351 struct timer_list fabric_block_timer; 1356 1352 unsigned long bit_flags; 1357 - #define FABRIC_COMANDS_BLOCKED 0 1358 1353 atomic_t num_rsrc_err; 1359 1354 atomic_t num_cmd_success; 1360 1355 unsigned long last_rsrc_error_time;
+3
drivers/scsi/lpfc/lpfc_crtn.h
··· 670 670 uint32_t hash, uint8_t *buf); 671 671 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport); 672 672 int lpfc_issue_els_qfpa(struct lpfc_vport *vport); 673 + 674 + void lpfc_sli_rpi_release(struct lpfc_vport *vport, 675 + struct lpfc_nodelist *ndlp);
+97 -23
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 109 109 110 110 ndlp = rdata->pnode; 111 111 if (!rdata->pnode) { 112 - pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n", 113 - __func__, rport, rport->scsi_target_id); 112 + pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n", 113 + __func__, rport, rport->scsi_target_id); 114 114 return -EINVAL; 115 115 } 116 116 ··· 169 169 170 170 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 171 171 "3181 dev_loss_callbk x%06x, rport x%px flg x%x " 172 - "load_flag x%x refcnt %d\n", 172 + "load_flag x%x refcnt %d state %d xpt x%x\n", 173 173 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, 174 - vport->load_flag, kref_read(&ndlp->kref)); 174 + vport->load_flag, kref_read(&ndlp->kref), 175 + ndlp->nlp_state, ndlp->fc4_xpt_flags); 175 176 176 177 /* Don't schedule a worker thread event if the vport is going down. 177 178 * The teardown process cleans up the node via lpfc_drop_node. ··· 182 181 ndlp->rport = NULL; 183 182 184 183 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; 184 + /* clear the NLP_XPT_REGD if the node is not registered 185 + * with nvme-fc 186 + */ 187 + if (ndlp->fc4_xpt_flags == NLP_XPT_REGD) 188 + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; 185 189 186 190 /* Remove the node reference from remote_port_add now. 187 191 * The driver will not call remote_port_delete. ··· 231 225 ndlp->rport = NULL; 232 226 spin_unlock_irqrestore(&ndlp->lock, iflags); 233 227 234 - /* We need to hold the node by incrementing the reference 235 - * count until this queued work is done 236 - */ 237 - evtp->evt_arg1 = lpfc_nlp_get(ndlp); 228 + if (phba->worker_thread) { 229 + /* We need to hold the node by incrementing the reference 230 + * count until this queued work is done 231 + */ 232 + evtp->evt_arg1 = lpfc_nlp_get(ndlp); 238 233 239 - spin_lock_irqsave(&phba->hbalock, iflags); 240 - if (evtp->evt_arg1) { 241 - evtp->evt = LPFC_EVT_DEV_LOSS; 242 - list_add_tail(&evtp->evt_listp, &phba->work_list); 243 - lpfc_worker_wake_up(phba); 234 + spin_lock_irqsave(&phba->hbalock, iflags); 235 + if (evtp->evt_arg1) { 236 + evtp->evt = LPFC_EVT_DEV_LOSS; 237 + list_add_tail(&evtp->evt_listp, &phba->work_list); 238 + lpfc_worker_wake_up(phba); 239 + } 240 + spin_unlock_irqrestore(&phba->hbalock, iflags); 241 + } else { 242 + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 243 + "3188 worker thread is stopped %s x%06x, " 244 + " rport x%px flg x%x load_flag x%x refcnt " 245 + "%d\n", __func__, ndlp->nlp_DID, 246 + ndlp->rport, ndlp->nlp_flag, 247 + vport->load_flag, kref_read(&ndlp->kref)); 248 + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { 249 + spin_lock_irqsave(&ndlp->lock, iflags); 250 + /* Node is in dev loss. No further transaction. */ 251 + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 252 + spin_unlock_irqrestore(&ndlp->lock, iflags); 253 + lpfc_disc_state_machine(vport, ndlp, NULL, 254 + NLP_EVT_DEVICE_RM); 255 + } 256 + 244 257 } 245 - spin_unlock_irqrestore(&phba->hbalock, iflags); 246 258 247 259 return; 248 260 } ··· 527 503 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 528 504 "0203 Devloss timeout on " 529 505 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 530 - "NPort x%06x Data: x%x x%x x%x\n", 506 + "NPort x%06x Data: x%x x%x x%x refcnt %d\n", 531 507 *name, *(name+1), *(name+2), *(name+3), 532 508 *(name+4), *(name+5), *(name+6), *(name+7), 533 509 ndlp->nlp_DID, ndlp->nlp_flag, 534 - ndlp->nlp_state, ndlp->nlp_rpi); 510 + ndlp->nlp_state, ndlp->nlp_rpi, 511 + kref_read(&ndlp->kref)); 535 512 } else { 536 513 lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, 537 514 "0204 Devloss timeout on " ··· 780 755 int free_evt; 781 756 int fcf_inuse; 782 757 uint32_t nlp_did; 758 + bool hba_pci_err; 783 759 784 760 spin_lock_irq(&phba->hbalock); 785 761 while (!list_empty(&phba->work_list)) { 786 762 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 787 763 evt_listp); 788 764 spin_unlock_irq(&phba->hbalock); 765 + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 789 766 free_evt = 1; 790 767 switch (evtp->evt) { 791 768 case LPFC_EVT_ELS_RETRY: 792 769 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 793 - lpfc_els_retry_delay_handler(ndlp); 794 - free_evt = 0; /* evt is part of ndlp */ 770 + if (!hba_pci_err) { 771 + lpfc_els_retry_delay_handler(ndlp); 772 + free_evt = 0; /* evt is part of ndlp */ 773 + } 795 774 /* decrement the node reference count held 796 775 * for this queued work 797 776 */ ··· 817 788 break; 818 789 case LPFC_EVT_RECOVER_PORT: 819 790 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 820 - lpfc_sli_abts_recover_port(ndlp->vport, ndlp); 821 - free_evt = 0; 791 + if (!hba_pci_err) { 792 + lpfc_sli_abts_recover_port(ndlp->vport, ndlp); 793 + free_evt = 0; 794 + } 822 795 /* decrement the node reference count held for 823 796 * this queued work 824 797 */ ··· 890 859 struct lpfc_vport **vports; 891 860 struct lpfc_vport *vport; 892 861 int i; 862 + bool hba_pci_err; 893 863 864 + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 894 865 spin_lock_irq(&phba->hbalock); 895 866 ha_copy = phba->work_ha; 896 867 phba->work_ha = 0; 897 868 spin_unlock_irq(&phba->hbalock); 869 + if (hba_pci_err) 870 + ha_copy = 0; 898 871 899 872 /* First, try to post the next mailbox command to SLI4 device */ 900 - if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) 873 + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) 901 874 lpfc_sli4_post_async_mbox(phba); 902 875 903 876 if (ha_copy & HA_ERATT) { ··· 921 886 lpfc_handle_latt(phba); 922 887 923 888 /* Handle VMID Events */ 924 - if (lpfc_is_vmid_enabled(phba)) { 889 + if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { 925 890 if (phba->pport->work_port_events & 926 891 WORKER_CHECK_VMID_ISSUE_QFPA) { 927 892 lpfc_check_vmid_qfpa_issue(phba); ··· 971 936 work_port_events = vport->work_port_events; 972 937 vport->work_port_events &= ~work_port_events; 973 938 spin_unlock_irq(&vport->work_port_lock); 939 + if (hba_pci_err) 940 + continue; 974 941 if (work_port_events & WORKER_DISC_TMO) 975 942 lpfc_disc_timeout_handler(vport); 976 943 if (work_port_events & WORKER_ELS_TMO) ··· 1210 1173 struct lpfc_vport **vports; 1211 1174 LPFC_MBOXQ_t *mb; 1212 1175 int i; 1176 + int offline; 1213 1177 1214 1178 if (phba->link_state == LPFC_LINK_DOWN) 1215 1179 return 0; 1216 1180 1217 1181 /* Block all SCSI stack I/Os */ 1218 1182 lpfc_scsi_dev_block(phba); 1183 + offline = pci_channel_offline(phba->pcidev); 1219 1184 1220 1185 phba->defer_flogi_acc_flag = false; 1221 1186 ··· 1258 1219 lpfc_destroy_vport_work_array(phba, vports); 1259 1220 1260 1221 /* Clean up any SLI3 firmware default rpi's */ 1261 - if (phba->sli_rev > LPFC_SLI_REV3) 1222 + if (phba->sli_rev > LPFC_SLI_REV3 || offline) 1262 1223 goto skip_unreg_did; 1263 1224 1264 1225 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); ··· 4751 4712 spin_lock_irqsave(&ndlp->lock, iflags); 4752 4713 if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { 4753 4714 spin_unlock_irqrestore(&ndlp->lock, iflags); 4715 + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4716 + "0999 %s Not regd: ndlp x%px rport x%px DID " 4717 + "x%x FLG x%x XPT x%x\n", 4718 + __func__, ndlp, ndlp->rport, ndlp->nlp_DID, 4719 + ndlp->nlp_flag, ndlp->fc4_xpt_flags); 4754 4720 return; 4755 4721 } 4756 4722 ··· 4766 4722 ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { 4767 4723 vport->phba->nport_event_cnt++; 4768 4724 lpfc_unregister_remote_port(ndlp); 4725 + } else if (!ndlp->rport) { 4726 + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4727 + "1999 %s NDLP in devloss x%px DID x%x FLG x%x" 4728 + " XPT x%x refcnt %d\n", 4729 + __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, 4730 + ndlp->fc4_xpt_flags, 4731 + kref_read(&ndlp->kref)); 4769 4732 } 4770 4733 4771 4734 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { ··· 5422 5371 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5423 5372 mempool_free(mbox, phba->mbox_mem_pool); 5424 5373 acc_plogi = 1; 5374 + lpfc_nlp_put(ndlp); 5425 5375 } 5426 5376 } else { 5427 5377 lpfc_printf_vlog(vport, KERN_INFO, ··· 6149 6097 } 6150 6098 } 6151 6099 6100 + /* 6101 + * lpfc_notify_xport_npr - notifies xport of node disappearance 6102 + * @vport: Pointer to Virtual Port object. 6103 + * 6104 + * Transitions all ndlps to NPR state. When lpfc_nlp_set_state 6105 + * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered 6106 + * and transport notified that the node is gone. 6107 + * Return Code: 6108 + * none 6109 + */ 6110 + static void 6111 + lpfc_notify_xport_npr(struct lpfc_vport *vport) 6112 + { 6113 + struct lpfc_nodelist *ndlp, *next_ndlp; 6114 + 6115 + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 6116 + nlp_listp) { 6117 + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6118 + } 6119 + } 6152 6120 void 6153 6121 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) 6154 6122 { 6155 6123 lpfc_els_flush_rscn(vport); 6156 6124 lpfc_els_flush_cmd(vport); 6157 6125 lpfc_disc_flush_list(vport); 6126 + if (pci_channel_offline(vport->phba->pcidev)) 6127 + lpfc_notify_xport_npr(vport); 6158 6128 } 6159 6129 6160 6130 /*****************************************************************************/
+63 -25
drivers/scsi/lpfc/lpfc_init.c
··· 95 95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 96 96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 97 97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 98 + static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba); 98 99 99 100 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; ··· 1643 1642 { 1644 1643 spin_lock_irq(&phba->hbalock); 1645 1644 if (phba->link_state == LPFC_HBA_ERROR && 1646 - phba->hba_flag & HBA_PCI_ERR) { 1645 + test_bit(HBA_PCI_ERR, &phba->bit_flags)) { 1647 1646 spin_unlock_irq(&phba->hbalock); 1648 1647 return; 1649 1648 } ··· 1986 1985 if (pci_channel_offline(phba->pcidev)) { 1987 1986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1988 1987 "3166 pci channel is offline\n"); 1988 + lpfc_sli_flush_io_rings(phba); 1989 1989 return; 1990 1990 } 1991 1991 ··· 2975 2973 NLP_EVT_DEVICE_RM); 2976 2974 } 2977 2975 2976 + /* This is a special case flush to return all 2977 + * IOs before entering this loop. There are 2978 + * two points in the code where a flush is 2979 + * avoided if the FC_UNLOADING flag is set. 2980 + * one is in the multipool destroy, 2981 + * (this prevents a crash) and the other is 2982 + * in the nvme abort handler, ( also prevents 2983 + * a crash). Both of these exceptions are 2984 + * cases where the slot is still accessible. 2985 + * The flush here is only when the pci slot 2986 + * is offline. 2987 + */ 2988 + if (vport->load_flag & FC_UNLOADING && 2989 + pci_channel_offline(phba->pcidev)) 2990 + lpfc_sli_flush_io_rings(vport->phba); 2991 + 2978 2992 /* At this point, ALL ndlp's should be gone 2979 2993 * because of the previous NLP_EVT_DEVICE_RM. 2980 2994 * Lets wait for this to happen, if needed. ··· 3003 2985 list_for_each_entry_safe(ndlp, next_ndlp, 3004 2986 &vport->fc_nodes, nlp_listp) { 3005 2987 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 3006 - LOG_TRACE_EVENT, 2988 + LOG_DISCOVERY, 3007 2989 "0282 did:x%x ndlp:x%px " 3008 2990 "refcnt:%d xflags x%x nflag x%x\n", 3009 2991 ndlp->nlp_DID, (void *)ndlp, ··· 3700 3682 struct lpfc_vport **vports; 3701 3683 struct Scsi_Host *shost; 3702 3684 int i; 3703 - int offline = 0; 3685 + int offline; 3686 + bool hba_pci_err; 3704 3687 3705 3688 if (vport->fc_flag & FC_OFFLINE_MODE) 3706 3689 return; ··· 3711 3692 lpfc_linkdown(phba); 3712 3693 3713 3694 offline = pci_channel_offline(phba->pcidev); 3695 + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 3714 3696 3715 3697 /* Issue an unreg_login to all nodes on all vports */ 3716 3698 vports = lpfc_create_vport_work_array(phba); ··· 3735 3715 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3736 3716 spin_unlock_irq(&ndlp->lock); 3737 3717 3738 - if (offline) { 3718 + if (offline || hba_pci_err) { 3739 3719 spin_lock_irq(&ndlp->lock); 3740 3720 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3741 3721 NLP_RPI_REGISTERED); 3742 3722 spin_unlock_irq(&ndlp->lock); 3723 + if (phba->sli_rev == LPFC_SLI_REV4) 3724 + lpfc_sli_rpi_release(vports[i], 3725 + ndlp); 3743 3726 } else { 3744 3727 lpfc_unreg_rpi(vports[i], ndlp); 3745 3728 } ··· 13377 13354 /* Abort all iocbs associated with the hba */ 13378 13355 lpfc_sli_hba_iocb_abort(phba); 13379 13356 13380 - /* Wait for completion of device XRI exchange busy */ 13381 - lpfc_sli4_xri_exchange_busy_wait(phba); 13357 + if (!pci_channel_offline(phba->pcidev)) 13358 + /* Wait for completion of device XRI exchange busy */ 13359 + lpfc_sli4_xri_exchange_busy_wait(phba); 13382 13360 13383 13361 /* per-phba callback de-registration for hotplug event */ 13384 13362 if (phba->pport) ··· 13398 13374 /* Disable FW logging to host memory */ 13399 13375 lpfc_ras_stop_fwlog(phba); 13400 13376 13401 - /* Unset the queues shared with the hardware then release all 13402 - * allocated resources. 13403 - */ 13404 - lpfc_sli4_queue_unset(phba); 13405 - lpfc_sli4_queue_destroy(phba); 13406 - 13407 13377 /* Reset SLI4 HBA FCoE function */ 13408 13378 lpfc_pci_function_reset(phba); 13379 + 13380 + /* release all queue allocated resources. */ 13381 + lpfc_sli4_queue_destroy(phba); 13409 13382 13410 13383 /* Free RAS DMA memory */ 13411 13384 if (phba->ras_fwlog.ras_enabled) ··· 14283 14262 "2711 PCI channel permanent disable for failure\n"); 14284 14263 /* Block all SCSI devices' I/Os on the host */ 14285 14264 lpfc_scsi_dev_block(phba); 14265 + lpfc_sli4_prep_dev_for_reset(phba); 14286 14266 14287 14267 /* stop all timers */ 14288 14268 lpfc_stop_hba_timers(phba); ··· 15079 15057 static void 15080 15058 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15081 15059 { 15082 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15083 - "2826 PCI channel disable preparing for reset\n"); 15060 + int offline = pci_channel_offline(phba->pcidev); 15061 + 15062 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15063 + "2826 PCI channel disable preparing for reset offline" 15064 + " %d\n", offline); 15084 15065 15085 15066 /* Block any management I/Os to the device */ 15086 15067 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15087 15068 15088 - /* Block all SCSI devices' I/Os on the host */ 15089 - lpfc_scsi_dev_block(phba); 15090 15069 15070 + /* HBA_PCI_ERR was set in io_error_detect */ 15071 + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 15091 15072 /* Flush all driver's outstanding I/Os as we are to reset */ 15092 15073 lpfc_sli_flush_io_rings(phba); 15074 + lpfc_offline(phba); 15093 15075 15094 15076 /* stop all timers */ 15095 15077 lpfc_stop_hba_timers(phba); 15096 15078 15079 + lpfc_sli4_queue_destroy(phba); 15097 15080 /* Disable interrupt and pci device */ 15098 15081 lpfc_sli4_disable_intr(phba); 15099 - lpfc_sli4_queue_destroy(phba); 15100 15082 pci_disable_device(phba->pcidev); 15101 15083 } 15102 15084 ··· 15149 15123 { 15150 15124 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15151 15125 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15126 + bool hba_pci_err; 15152 15127 15153 15128 switch (state) { 15154 15129 case pci_channel_io_normal: ··· 15157 15130 lpfc_sli4_prep_dev_for_recover(phba); 15158 15131 return PCI_ERS_RESULT_CAN_RECOVER; 15159 15132 case pci_channel_io_frozen: 15160 - phba->hba_flag |= HBA_PCI_ERR; 15133 + hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15161 15134 /* Fatal error, prepare for slot reset */ 15162 - lpfc_sli4_prep_dev_for_reset(phba); 15135 + if (!hba_pci_err) 15136 + lpfc_sli4_prep_dev_for_reset(phba); 15137 + else 15138 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15139 + "2832 Already handling PCI error " 15140 + "state: x%x\n", state); 15163 15141 return PCI_ERS_RESULT_NEED_RESET; 15164 15142 case pci_channel_io_perm_failure: 15165 - phba->hba_flag |= HBA_PCI_ERR; 15143 + set_bit(HBA_PCI_ERR, &phba->bit_flags); 15166 15144 /* Permanent failure, prepare for device down */ 15167 15145 lpfc_sli4_prep_dev_for_perm_failure(phba); 15168 15146 return PCI_ERS_RESULT_DISCONNECT; 15169 15147 default: 15170 - phba->hba_flag |= HBA_PCI_ERR; 15148 + hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15149 + if (!hba_pci_err) 15150 + lpfc_sli4_prep_dev_for_reset(phba); 15171 15151 /* Unknown state, prepare and request slot reset */ 15172 15152 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15173 15153 "2825 Unknown PCI error state: x%x\n", state); ··· 15208 15174 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15209 15175 struct lpfc_sli *psli = &phba->sli; 15210 15176 uint32_t intr_mode; 15177 + bool hba_pci_err; 15211 15178 15212 15179 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15213 15180 if (pci_enable_device_mem(pdev)) { 15214 15181 printk(KERN_ERR "lpfc: Cannot re-enable " 15215 - "PCI device after reset.\n"); 15182 + "PCI device after reset.\n"); 15216 15183 return PCI_ERS_RESULT_DISCONNECT; 15217 15184 } 15218 15185 15219 15186 pci_restore_state(pdev); 15220 15187 15221 - phba->hba_flag &= ~HBA_PCI_ERR; 15188 + hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); 15189 + if (!hba_pci_err) 15190 + dev_info(&pdev->dev, 15191 + "hba_pci_err was not set, recovering slot reset.\n"); 15222 15192 /* 15223 15193 * As the new kernel behavior of pci_restore_state() API call clears 15224 15194 * device saved_state flag, need to save the restored state again. ··· 15236 15198 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15237 15199 spin_unlock_irq(&phba->hbalock); 15238 15200 15201 + /* Init cpu_map array */ 15202 + lpfc_cpu_map_array_init(phba); 15239 15203 /* Configure and enable interrupt */ 15240 15204 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15241 15205 if (intr_mode == LPFC_INTR_ERROR) { ··· 15279 15239 */ 15280 15240 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15281 15241 /* Perform device reset */ 15282 - lpfc_offline_prep(phba, LPFC_MBX_WAIT); 15283 - lpfc_offline(phba); 15284 15242 lpfc_sli_brdrestart(phba); 15285 15243 /* Bring the device back online */ 15286 15244 lpfc_online(phba);
+18 -9
drivers/scsi/lpfc/lpfc_nvme.c
··· 93 93 94 94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 95 95 vport = lport->vport; 96 + 97 + if (!vport || vport->load_flag & FC_UNLOADING || 98 + vport->phba->hba_flag & HBA_IOQ_FLUSH) 99 + return -ENODEV; 100 + 96 101 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); 97 102 if (qhandle == NULL) 98 103 return -ENOMEM; ··· 272 267 return -EINVAL; 273 268 274 269 remoteport = lpfc_rport->remoteport; 275 - if (!vport->localport) 270 + if (!vport->localport || 271 + vport->phba->hba_flag & HBA_IOQ_FLUSH) 276 272 return -EINVAL; 277 273 278 274 lport = vport->localport->private; ··· 565 559 ndlp->nlp_DID, ntype, nstate); 566 560 return -ENODEV; 567 561 } 562 + if (vport->phba->hba_flag & HBA_IOQ_FLUSH) 563 + return -ENODEV; 568 564 569 565 if (!vport->phba->sli4_hba.nvmels_wq) 570 566 return -ENOMEM; ··· 670 662 return -EINVAL; 671 663 672 664 vport = lport->vport; 673 - if (vport->load_flag & FC_UNLOADING) 665 + if (vport->load_flag & FC_UNLOADING || 666 + vport->phba->hba_flag & HBA_IOQ_FLUSH) 674 667 return -ENODEV; 675 668 676 669 atomic_inc(&lport->fc4NvmeLsRequests); ··· 1525 1516 1526 1517 phba = vport->phba; 1527 1518 1528 - if (unlikely(vport->load_flag & FC_UNLOADING)) { 1519 + if ((unlikely(vport->load_flag & FC_UNLOADING)) || 1520 + phba->hba_flag & HBA_IOQ_FLUSH) { 1529 1521 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1530 1522 "6124 Fail IO, Driver unload\n"); 1531 1523 atomic_inc(&lport->xmt_fcp_err); ··· 2179 2169 abts_nvme = 0; 2180 2170 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2181 2171 qp = &phba->sli4_hba.hdwq[i]; 2182 - if (!vport || !vport->localport || 2183 - !qp || !qp->io_wq) 2172 + if (!vport->localport || !qp || !qp->io_wq) 2184 2173 return; 2185 2174 2186 2175 pring = qp->io_wq->pring; ··· 2189 2180 abts_scsi += qp->abts_scsi_io_bufs; 2190 2181 abts_nvme += qp->abts_nvme_io_bufs; 2191 2182 } 2192 - if (!vport || !vport->localport || 2193 - vport->phba->hba_flag & HBA_PCI_ERR) 2183 + if (!vport->localport || 2184 + test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || 2185 + vport->load_flag & FC_UNLOADING) 2194 2186 return; 2195 2187 2196 2188 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, ··· 2551 2541 * return values is ignored. The upcall is a courtesy to the 2552 2542 * transport. 2553 2543 */ 2554 - if (vport->load_flag & FC_UNLOADING || 2555 - unlikely(vport->phba->hba_flag & HBA_PCI_ERR)) 2544 + if (vport->load_flag & FC_UNLOADING) 2556 2545 (void)nvme_fc_set_remoteport_devloss(remoteport, 0); 2557 2546 2558 2547 ret = nvme_fc_unregister_remoteport(remoteport);
+7 -5
drivers/scsi/lpfc/lpfc_scsi.c
··· 5929 5929 } 5930 5930 5931 5931 lpfc_cmd->waitq = &waitq; 5932 - if (phba->sli_rev == LPFC_SLI_REV4) 5932 + if (phba->sli_rev == LPFC_SLI_REV4) { 5933 5933 spin_unlock(&pring_s4->ring_lock); 5934 - else 5934 + ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, 5935 + lpfc_sli_abort_fcp_cmpl); 5936 + } else { 5935 5937 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 5936 - 5937 - ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5938 - lpfc_sli_abort_fcp_cmpl); 5938 + ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5939 + lpfc_sli_abort_fcp_cmpl); 5940 + } 5939 5941 5940 5942 /* Make sure HBA is alive */ 5941 5943 lpfc_issue_hb_tmo(phba);
+53 -22
drivers/scsi/lpfc/lpfc_sli.c
··· 2828 2828 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2829 2829 } 2830 2830 2831 + void 2832 + lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2833 + { 2834 + __lpfc_sli_rpi_release(vport, ndlp); 2835 + } 2836 + 2831 2837 /** 2832 2838 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2833 2839 * @phba: Pointer to HBA context object. ··· 3721 3715 unsigned long iflag; 3722 3716 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; 3723 3717 3718 + if (phba->sli_rev == LPFC_SLI_REV4) 3719 + spin_lock_irqsave(&pring->ring_lock, iflag); 3720 + else 3721 + spin_lock_irqsave(&phba->hbalock, iflag); 3724 3722 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3723 + if (phba->sli_rev == LPFC_SLI_REV4) 3724 + spin_unlock_irqrestore(&pring->ring_lock, iflag); 3725 + else 3726 + spin_unlock_irqrestore(&phba->hbalock, iflag); 3725 3727 3726 3728 ulp_command = get_job_cmnd(phba, saveq); 3727 3729 ulp_status = get_job_ulpstatus(phba, saveq); ··· 4066 4052 break; 4067 4053 } 4068 4054 4069 - spin_unlock_irqrestore(&phba->hbalock, iflag); 4070 4055 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 4071 4056 &rspiocbq); 4072 - spin_lock_irqsave(&phba->hbalock, iflag); 4073 4057 if (unlikely(!cmdiocbq)) 4074 4058 break; 4075 4059 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) ··· 4548 4536 void 4549 4537 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 4550 4538 { 4551 - LIST_HEAD(completions); 4539 + LIST_HEAD(tx_completions); 4540 + LIST_HEAD(txcmplq_completions); 4552 4541 struct lpfc_iocbq *iocb, *next_iocb; 4542 + int offline; 4553 4543 4554 4544 if (pring->ringno == LPFC_ELS_RING) { 4555 4545 lpfc_fabric_abort_hba(phba); 4556 4546 } 4547 + offline = pci_channel_offline(phba->pcidev); 4557 4548 4558 4549 /* Error everything on txq and txcmplq 4559 4550 * First do the txq. 4560 4551 */ 4561 4552 if (phba->sli_rev >= LPFC_SLI_REV4) { 4562 4553 spin_lock_irq(&pring->ring_lock); 4563 - list_splice_init(&pring->txq, &completions); 4554 + list_splice_init(&pring->txq, &tx_completions); 4564 4555 pring->txq_cnt = 0; 4565 - spin_unlock_irq(&pring->ring_lock); 4566 4556 4567 - spin_lock_irq(&phba->hbalock); 4568 - /* Next issue ABTS for everything on the txcmplq */ 4569 - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 4570 - lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); 4571 - spin_unlock_irq(&phba->hbalock); 4557 + if (offline) { 4558 + list_splice_init(&pring->txcmplq, 4559 + &txcmplq_completions); 4560 + } else { 4561 + /* Next issue ABTS for everything on the txcmplq */ 4562 + list_for_each_entry_safe(iocb, next_iocb, 4563 + &pring->txcmplq, list) 4564 + lpfc_sli_issue_abort_iotag(phba, pring, 4565 + iocb, NULL); 4566 + } 4567 + spin_unlock_irq(&pring->ring_lock); 4572 4568 } else { 4573 4569 spin_lock_irq(&phba->hbalock); 4574 - list_splice_init(&pring->txq, &completions); 4570 + list_splice_init(&pring->txq, &tx_completions); 4575 4571 pring->txq_cnt = 0; 4576 4572 4577 - /* Next issue ABTS for everything on the txcmplq */ 4578 - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 4579 - lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); 4573 + if (offline) { 4574 + list_splice_init(&pring->txcmplq, &txcmplq_completions); 4575 + } else { 4576 + /* Next issue ABTS for everything on the txcmplq */ 4577 + list_for_each_entry_safe(iocb, next_iocb, 4578 + &pring->txcmplq, list) 4579 + lpfc_sli_issue_abort_iotag(phba, pring, 4580 + iocb, NULL); 4581 + } 4580 4582 spin_unlock_irq(&phba->hbalock); 4581 4583 } 4582 - /* Make sure HBA is alive */ 4583 - lpfc_issue_hb_tmo(phba); 4584 4584 4585 + if (offline) { 4586 + /* Cancel all the IOCBs from the completions list */ 4587 + lpfc_sli_cancel_iocbs(phba, &txcmplq_completions, 4588 + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 4589 + } else { 4590 + /* Make sure HBA is alive */ 4591 + lpfc_issue_hb_tmo(phba); 4592 + } 4585 4593 /* Cancel all the IOCBs from the completions list */ 4586 - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 4594 + lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT, 4587 4595 IOERR_SLI_ABORTED); 4588 4596 } 4589 4597 ··· 4656 4624 struct lpfc_iocbq *piocb, *next_iocb; 4657 4625 4658 4626 spin_lock_irq(&phba->hbalock); 4659 - if (phba->hba_flag & HBA_IOQ_FLUSH || 4660 - !phba->sli4_hba.hdwq) { 4661 - spin_unlock_irq(&phba->hbalock); 4662 - return; 4663 - } 4664 4627 /* Indicate the I/O queues are flushed */ 4665 4628 phba->hba_flag |= HBA_IOQ_FLUSH; 4666 4629 spin_unlock_irq(&phba->hbalock); ··· 11023 10996 struct lpfc_queue *eq; 11024 10997 unsigned long iflags; 11025 10998 int rc; 10999 + 11000 + /* If the PCI channel is in offline state, do not post iocbs. */ 11001 + if (unlikely(pci_channel_offline(phba->pcidev))) 11002 + return IOCB_ERROR; 11026 11003 11027 11004 if (phba->sli_rev == LPFC_SLI_REV4) { 11028 11005 lpfc_sli_prep_wqe(phba, piocb);
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 20 20 * included with this package. * 21 21 *******************************************************************/ 22 22 23 - #define LPFC_DRIVER_VERSION "14.2.0.0" 23 + #define LPFC_DRIVER_VERSION "14.2.0.1" 24 24 #define LPFC_DRIVER_NAME "lpfc" 25 25 26 26 /* Used for SLI 2/3 */
+3
drivers/scsi/megaraid/megaraid_sas.h
··· 2560 2560 #define MEGASAS_IS_LOGICAL(sdev) \ 2561 2561 ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) 2562 2562 2563 + #define MEGASAS_IS_LUN_VALID(sdev) \ 2564 + (((sdev)->lun == 0) ? 1 : 0) 2565 + 2563 2566 #define MEGASAS_DEV_INDEX(scp) \ 2564 2567 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ 2565 2568 scp->device->id)
+7
drivers/scsi/megaraid/megaraid_sas_base.c
··· 2126 2126 goto scan_target; 2127 2127 } 2128 2128 return -ENXIO; 2129 + } else if (!MEGASAS_IS_LUN_VALID(sdev)) { 2130 + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2131 + return -ENXIO; 2129 2132 } 2130 2133 2131 2134 scan_target: ··· 2159 2156 instance = megasas_lookup_instance(sdev->host->host_no); 2160 2157 2161 2158 if (MEGASAS_IS_LOGICAL(sdev)) { 2159 + if (!MEGASAS_IS_LUN_VALID(sdev)) { 2160 + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2161 + return; 2162 + } 2162 2163 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2163 2164 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; 2164 2165 if (megasas_dbg_lvl & LD_PD_DEBUG)
+1 -2
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 5716 5716 /** 5717 5717 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are 5718 5718 * having same upper 32bits in their base memory address. 5719 - * @reply_pool_start_address: Base address of a reply queue set 5719 + * @start_address: Base address of a reply queue set 5720 5720 * @pool_sz: Size of single Reply Descriptor Post Queues pool size 5721 5721 * 5722 5722 * Return: 1 if reply queues in a set have a same upper 32bits in their base 5723 5723 * memory address, else 0. 5724 5724 */ 5725 - 5726 5725 static int 5727 5726 mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) 5728 5727 {
+6 -3
drivers/scsi/mpt3sas/mpt3sas_config.c
··· 394 394 retry_count++; 395 395 if (ioc->config_cmds.smid == smid) 396 396 mpt3sas_base_free_smid(ioc, smid); 397 - if ((ioc->shost_recovery) || (ioc->config_cmds.status & 398 - MPT3_CMD_RESET) || ioc->pci_error_recovery) 397 + if (ioc->config_cmds.status & MPT3_CMD_RESET) 399 398 goto retry_config; 400 - issue_host_reset = 1; 399 + if (ioc->shost_recovery || ioc->pci_error_recovery) { 400 + issue_host_reset = 0; 401 + r = -EFAULT; 402 + } else 403 + issue_host_reset = 1; 401 404 goto free_mem; 402 405 } 403 406
+4 -1
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 11035 11035 { 11036 11036 struct _sas_port *mpt3sas_port, *next; 11037 11037 unsigned long flags; 11038 + int port_id; 11038 11039 11039 11040 /* remove sibling ports attached to this expander */ 11040 11041 list_for_each_entry_safe(mpt3sas_port, next, ··· 11056 11055 mpt3sas_port->hba_port); 11057 11056 } 11058 11057 11058 + port_id = sas_expander->port->port_id; 11059 + 11059 11060 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 11060 11061 sas_expander->sas_address_parent, sas_expander->port); 11061 11062 ··· 11065 11062 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 11066 11063 sas_expander->handle, (unsigned long long) 11067 11064 sas_expander->sas_address, 11068 - sas_expander->port->port_id); 11065 + port_id); 11069 11066 11070 11067 spin_lock_irqsave(&ioc->sas_node_lock, flags); 11071 11068 list_del(&sas_expander->list);
+1
drivers/scsi/mvsas/mv_init.c
··· 647 647 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, 648 648 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, 649 649 { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, 650 + { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, 650 651 { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, 651 652 { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, 652 653 { PCI_VDEVICE(TTI, 0x2721), chip_9480 },
+25 -27
drivers/scsi/pcmcia/sym53c500_cs.c
··· 192 192 int fast_pio; 193 193 }; 194 194 195 - static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd) 196 - { 197 - return scsi_cmd_priv(cmd); 198 - } 195 + struct sym53c500_cmd_priv { 196 + int status; 197 + int message; 198 + int phase; 199 + }; 199 200 200 201 enum Phase { 201 202 idle, ··· 357 356 struct sym53c500_data *data = 358 357 (struct sym53c500_data *)dev->hostdata; 359 358 struct scsi_cmnd *curSC = data->current_SC; 360 - struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC); 359 + struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC); 361 360 int fast_pio = data->fast_pio; 362 361 363 362 spin_lock_irqsave(dev->host_lock, flags); ··· 404 403 405 404 if (int_reg & 0x20) { /* Disconnect */ 406 405 DEB(printk("SYM53C500: disconnect intr received\n")); 407 - if (scsi_pointer->phase != message_in) { /* Unexpected disconnect */ 406 + if (scp->phase != message_in) { /* Unexpected disconnect */ 408 407 curSC->result = DID_NO_CONNECT << 16; 409 408 } else { /* Command complete, return status and message */ 410 - curSC->result = (scsi_pointer->Status & 0xff) | 411 - ((scsi_pointer->Message & 0xff) << 8) | 412 - (DID_OK << 16); 409 + curSC->result = (scp->status & 0xff) | 410 + ((scp->message & 0xff) << 8) | (DID_OK << 16); 413 411 } 414 412 goto idle_out; 415 413 } ··· 419 419 struct scatterlist *sg; 420 420 int i; 421 421 422 - scsi_pointer->phase = data_out; 422 + scp->phase = data_out; 423 423 VDEB(printk("SYM53C500: Data-Out phase\n")); 424 424 outb(FLUSH_FIFO, port_base + CMD_REG); 425 425 LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ ··· 438 438 struct scatterlist *sg; 439 439 int i; 440 440 441 - scsi_pointer->phase = data_in; 441 + scp->phase = data_in; 442 442 VDEB(printk("SYM53C500: Data-In phase\n")); 443 443 outb(FLUSH_FIFO, port_base + CMD_REG); 444 444 LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ ··· 453 453 break; 454 454 455 455 case 0x02: /* COMMAND */ 456 - scsi_pointer->phase = command_ph; 456 + scp->phase = command_ph; 457 457 printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n"); 458 458 break; 459 459 460 460 case 0x03: /* STATUS */ 461 - scsi_pointer->phase = status_ph; 461 + scp->phase = status_ph; 462 462 VDEB(printk("SYM53C500: Status phase\n")); 463 463 outb(FLUSH_FIFO, port_base + CMD_REG); 464 464 outb(INIT_CMD_COMPLETE, port_base + CMD_REG); ··· 471 471 472 472 case 0x06: /* MESSAGE-OUT */ 473 473 DEB(printk("SYM53C500: Message-Out phase\n")); 474 - scsi_pointer->phase = message_out; 474 + scp->phase = message_out; 475 475 outb(SET_ATN, port_base + CMD_REG); /* Reject the message */ 476 476 outb(MSG_ACCEPT, port_base + CMD_REG); 477 477 break; 478 478 479 479 case 0x07: /* MESSAGE-IN */ 480 480 VDEB(printk("SYM53C500: Message-In phase\n")); 481 - scsi_pointer->phase = message_in; 481 + scp->phase = message_in; 482 482 483 - scsi_pointer->Status = inb(port_base + SCSI_FIFO); 484 - scsi_pointer->Message = inb(port_base + SCSI_FIFO); 483 + scp->status = inb(port_base + SCSI_FIFO); 484 + scp->message = inb(port_base + SCSI_FIFO); 485 485 486 486 VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f)); 487 - DEB(printk("Status = %02x Message = %02x\n", 488 - scsi_pointer->Status, scsi_pointer->Message)); 487 + DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message)); 489 488 490 - if (scsi_pointer->Message == SAVE_POINTERS || 491 - scsi_pointer->Message == DISCONNECT) { 489 + if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) { 492 490 outb(SET_ATN, port_base + CMD_REG); /* Reject message */ 493 491 DEB(printk("Discarding SAVE_POINTERS message\n")); 494 492 } ··· 498 500 return IRQ_HANDLED; 499 501 500 502 idle_out: 501 - scsi_pointer->phase = idle; 503 + scp->phase = idle; 502 504 scsi_done(curSC); 503 505 goto out; 504 506 } ··· 546 548 547 549 static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) 548 550 { 549 - struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt); 551 + struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt); 550 552 int i; 551 553 int port_base = SCpnt->device->host->io_port; 552 554 struct sym53c500_data *data = ··· 563 565 VDEB(printk("\n")); 564 566 565 567 data->current_SC = SCpnt; 566 - scsi_pointer->phase = command_ph; 567 - scsi_pointer->Status = 0; 568 - scsi_pointer->Message = 0; 568 + scp->phase = command_ph; 569 + scp->status = 0; 570 + scp->message = 0; 569 571 570 572 /* We are locked here already by the mid layer */ 571 573 REG0(port_base); ··· 680 682 .this_id = 7, 681 683 .sg_tablesize = 32, 682 684 .shost_groups = SYM53C500_shost_groups, 683 - .cmd_size = sizeof(struct scsi_pointer), 685 + .cmd_size = sizeof(struct sym53c500_cmd_priv), 684 686 }; 685 687 686 688 static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
-491
drivers/scsi/pmcraid.c
··· 3182 3182 } 3183 3183 3184 3184 /** 3185 - * pmcraid_free_sglist - Frees an allocated SG buffer list 3186 - * @sglist: scatter/gather list pointer 3187 - * 3188 - * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist 3189 - * 3190 - * Return value: 3191 - * none 3192 - */ 3193 - static void pmcraid_free_sglist(struct pmcraid_sglist *sglist) 3194 - { 3195 - sgl_free_order(sglist->scatterlist, sglist->order); 3196 - kfree(sglist); 3197 - } 3198 - 3199 - /** 3200 - * pmcraid_alloc_sglist - Allocates memory for a SG list 3201 - * @buflen: buffer length 3202 - * 3203 - * Allocates a DMA'able buffer in chunks and assembles a scatter/gather 3204 - * list. 3205 - * 3206 - * Return value 3207 - * pointer to sglist / NULL on failure 3208 - */ 3209 - static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) 3210 - { 3211 - struct pmcraid_sglist *sglist; 3212 - int sg_size; 3213 - int order; 3214 - 3215 - sg_size = buflen / (PMCRAID_MAX_IOADLS - 1); 3216 - order = (sg_size > 0) ? get_order(sg_size) : 0; 3217 - 3218 - /* Allocate a scatter/gather list for the DMA */ 3219 - sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL); 3220 - if (sglist == NULL) 3221 - return NULL; 3222 - 3223 - sglist->order = order; 3224 - sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO, 3225 - &sglist->num_sg); 3226 - 3227 - return sglist; 3228 - } 3229 - 3230 - /** 3231 - * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list 3232 - * @sglist: scatter/gather list pointer 3233 - * @buffer: buffer pointer 3234 - * @len: buffer length 3235 - * @direction: data transfer direction 3236 - * 3237 - * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist 3238 - * 3239 - * Return value: 3240 - * 0 on success / other on failure 3241 - */ 3242 - static int pmcraid_copy_sglist( 3243 - struct pmcraid_sglist *sglist, 3244 - void __user *buffer, 3245 - u32 len, 3246 - int direction 3247 - ) 3248 - { 3249 - struct scatterlist *sg; 3250 - void *kaddr; 3251 - int bsize_elem; 3252 - int i; 3253 - int rc = 0; 3254 - 3255 - /* Determine the actual number of bytes per element */ 3256 - bsize_elem = PAGE_SIZE * (1 << sglist->order); 3257 - 3258 - sg = sglist->scatterlist; 3259 - 3260 - for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) { 3261 - struct page *page = sg_page(sg); 3262 - 3263 - kaddr = kmap(page); 3264 - if (direction == DMA_TO_DEVICE) 3265 - rc = copy_from_user(kaddr, buffer, bsize_elem); 3266 - else 3267 - rc = copy_to_user(buffer, kaddr, bsize_elem); 3268 - 3269 - kunmap(page); 3270 - 3271 - if (rc) { 3272 - pmcraid_err("failed to copy user data into sg list\n"); 3273 - return -EFAULT; 3274 - } 3275 - 3276 - sg->length = bsize_elem; 3277 - } 3278 - 3279 - if (len % bsize_elem) { 3280 - struct page *page = sg_page(sg); 3281 - 3282 - kaddr = kmap(page); 3283 - 3284 - if (direction == DMA_TO_DEVICE) 3285 - rc = copy_from_user(kaddr, buffer, len % bsize_elem); 3286 - else 3287 - rc = copy_to_user(buffer, kaddr, len % bsize_elem); 3288 - 3289 - kunmap(page); 3290 - 3291 - sg->length = len % bsize_elem; 3292 - } 3293 - 3294 - if (rc) { 3295 - pmcraid_err("failed to copy user data into sg list\n"); 3296 - rc = -EFAULT; 3297 - } 3298 - 3299 - return rc; 3300 - } 3301 - 3302 - /** 3303 3185 * pmcraid_queuecommand_lck - Queue a mid-layer request 3304 3186 * @scsi_cmd: scsi command struct 3305 3187 * ··· 3336 3454 return rc; 3337 3455 } 3338 3456 3339 - 3340 - /** 3341 - * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough 3342 - * commands sent over IOCTL interface 3343 - * 3344 - * @cmd : pointer to struct pmcraid_cmd 3345 - * @buflen : length of the request buffer 3346 - * @direction : data transfer direction 3347 - * 3348 - * Return value 3349 - * 0 on success, non-zero error code on failure 3350 - */ 3351 - static int pmcraid_build_passthrough_ioadls( 3352 - struct pmcraid_cmd *cmd, 3353 - int buflen, 3354 - int direction 3355 - ) 3356 - { 3357 - struct pmcraid_sglist *sglist = NULL; 3358 - struct scatterlist *sg = NULL; 3359 - struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 3360 - struct pmcraid_ioadl_desc *ioadl; 3361 - int i; 3362 - 3363 - sglist = pmcraid_alloc_sglist(buflen); 3364 - 3365 - if (!sglist) { 3366 - pmcraid_err("can't allocate memory for passthrough SGls\n"); 3367 - return -ENOMEM; 3368 - } 3369 - 3370 - sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev, 3371 - sglist->scatterlist, 3372 - sglist->num_sg, direction); 3373 - 3374 - if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) { 3375 - dev_err(&cmd->drv_inst->pdev->dev, 3376 - "Failed to map passthrough buffer!\n"); 3377 - pmcraid_free_sglist(sglist); 3378 - return -EIO; 3379 - } 3380 - 3381 - cmd->sglist = sglist; 3382 - ioarcb->request_flags0 |= NO_LINK_DESCS; 3383 - 3384 - ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg); 3385 - 3386 - /* Initialize IOADL descriptor addresses */ 3387 - for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) { 3388 - ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg)); 3389 - ioadl[i].address = cpu_to_le64(sg_dma_address(sg)); 3390 - ioadl[i].flags = 0; 3391 - } 3392 - 3393 - /* setup the last descriptor */ 3394 - ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC; 3395 - 3396 - return 0; 3397 - } 3398 - 3399 - 3400 - /** 3401 - * pmcraid_release_passthrough_ioadls - release passthrough ioadls 3402 - * 3403 - * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated 3404 - * @buflen: size of the request buffer 3405 - * @direction: data transfer direction 3406 - * 3407 - * Return value 3408 - * 0 on success, non-zero error code on failure 3409 - */ 3410 - static void pmcraid_release_passthrough_ioadls( 3411 - struct pmcraid_cmd *cmd, 3412 - int buflen, 3413 - int direction 3414 - ) 3415 - { 3416 - struct pmcraid_sglist *sglist = cmd->sglist; 3417 - 3418 - if (buflen > 0) { 3419 - dma_unmap_sg(&cmd->drv_inst->pdev->dev, 3420 - sglist->scatterlist, 3421 - sglist->num_sg, 3422 - direction); 3423 - pmcraid_free_sglist(sglist); 3424 - cmd->sglist = NULL; 3425 - } 3426 - } 3427 - 3428 - /** 3429 - * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands 3430 - * 3431 - * @pinstance: pointer to adapter instance structure 3432 - * @ioctl_cmd: ioctl code 3433 - * @buflen: unused 3434 - * @arg: pointer to pmcraid_passthrough_buffer user buffer 3435 - * 3436 - * Return value 3437 - * 0 on success, non-zero error code on failure 3438 - */ 3439 - static long pmcraid_ioctl_passthrough( 3440 - struct pmcraid_instance *pinstance, 3441 - unsigned int ioctl_cmd, 3442 - unsigned int buflen, 3443 - void __user *arg 3444 - ) 3445 - { 3446 - struct pmcraid_passthrough_ioctl_buffer *buffer; 3447 - struct pmcraid_ioarcb *ioarcb; 3448 - struct pmcraid_cmd *cmd; 3449 - struct pmcraid_cmd *cancel_cmd; 3450 - void __user *request_buffer; 3451 - unsigned long request_offset; 3452 - unsigned long lock_flags; 3453 - void __user *ioasa; 3454 - u32 ioasc; 3455 - int request_size; 3456 - int buffer_size; 3457 - u8 direction; 3458 - int rc = 0; 3459 - 3460 - /* If IOA reset is in progress, wait 10 secs for reset to complete */ 3461 - if (pinstance->ioa_reset_in_progress) { 3462 - rc = wait_event_interruptible_timeout( 3463 - pinstance->reset_wait_q, 3464 - !pinstance->ioa_reset_in_progress, 3465 - msecs_to_jiffies(10000)); 3466 - 3467 - if (!rc) 3468 - return -ETIMEDOUT; 3469 - else if (rc < 0) 3470 - return -ERESTARTSYS; 3471 - } 3472 - 3473 - /* If adapter is not in operational state, return error */ 3474 - if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) { 3475 - pmcraid_err("IOA is not operational\n"); 3476 - return -ENOTTY; 3477 - } 3478 - 3479 - buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer); 3480 - buffer = kmalloc(buffer_size, GFP_KERNEL); 3481 - 3482 - if (!buffer) { 3483 - pmcraid_err("no memory for passthrough buffer\n"); 3484 - return -ENOMEM; 3485 - } 3486 - 3487 - request_offset = 3488 - offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer); 3489 - 3490 - request_buffer = arg + request_offset; 3491 - 3492 - rc = copy_from_user(buffer, arg, 3493 - sizeof(struct pmcraid_passthrough_ioctl_buffer)); 3494 - 3495 - ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa); 3496 - 3497 - if (rc) { 3498 - pmcraid_err("ioctl: can't copy passthrough buffer\n"); 3499 - rc = -EFAULT; 3500 - goto out_free_buffer; 3501 - } 3502 - 3503 - request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length); 3504 - 3505 - if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) { 3506 - direction = DMA_TO_DEVICE; 3507 - } else { 3508 - direction = DMA_FROM_DEVICE; 3509 - } 3510 - 3511 - if (request_size < 0) { 3512 - rc = -EINVAL; 3513 - goto out_free_buffer; 3514 - } 3515 - 3516 - /* check if we have any additional command parameters */ 3517 - if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length) 3518 - > PMCRAID_ADD_CMD_PARAM_LEN) { 3519 - rc = -EINVAL; 3520 - goto out_free_buffer; 3521 - } 3522 - 3523 - cmd = pmcraid_get_free_cmd(pinstance); 3524 - 3525 - if (!cmd) { 3526 - pmcraid_err("free command block is not available\n"); 3527 - rc = -ENOMEM; 3528 - goto out_free_buffer; 3529 - } 3530 - 3531 - cmd->scsi_cmd = NULL; 3532 - ioarcb = &(cmd->ioa_cb->ioarcb); 3533 - 3534 - /* Copy the user-provided IOARCB stuff field by field */ 3535 - ioarcb->resource_handle = buffer->ioarcb.resource_handle; 3536 - ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length; 3537 - ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout; 3538 - ioarcb->request_type = buffer->ioarcb.request_type; 3539 - ioarcb->request_flags0 = buffer->ioarcb.request_flags0; 3540 - ioarcb->request_flags1 = buffer->ioarcb.request_flags1; 3541 - memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN); 3542 - 3543 - if (buffer->ioarcb.add_cmd_param_length) { 3544 - ioarcb->add_cmd_param_length = 3545 - buffer->ioarcb.add_cmd_param_length; 3546 - ioarcb->add_cmd_param_offset = 3547 - buffer->ioarcb.add_cmd_param_offset; 3548 - memcpy(ioarcb->add_data.u.add_cmd_params, 3549 - buffer->ioarcb.add_data.u.add_cmd_params, 3550 - le16_to_cpu(buffer->ioarcb.add_cmd_param_length)); 3551 - } 3552 - 3553 - /* set hrrq number where the IOA should respond to. Note that all cmds 3554 - * generated internally uses hrrq_id 0, exception to this is the cmd 3555 - * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses 3556 - * hrrq_id assigned here in queuecommand 3557 - */ 3558 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % 3559 - pinstance->num_hrrq; 3560 - 3561 - if (request_size) { 3562 - rc = pmcraid_build_passthrough_ioadls(cmd, 3563 - request_size, 3564 - direction); 3565 - if (rc) { 3566 - pmcraid_err("couldn't build passthrough ioadls\n"); 3567 - goto out_free_cmd; 3568 - } 3569 - } 3570 - 3571 - /* If data is being written into the device, copy the data from user 3572 - * buffers 3573 - */ 3574 - if (direction == DMA_TO_DEVICE && request_size > 0) { 3575 - rc = pmcraid_copy_sglist(cmd->sglist, 3576 - request_buffer, 3577 - request_size, 3578 - direction); 3579 - if (rc) { 3580 - pmcraid_err("failed to copy user buffer\n"); 3581 - goto out_free_sglist; 3582 - } 3583 - } 3584 - 3585 - /* passthrough ioctl is a blocking command so, put the user to sleep 3586 - * until timeout. Note that a timeout value of 0 means, do timeout. 3587 - */ 3588 - cmd->cmd_done = pmcraid_internal_done; 3589 - init_completion(&cmd->wait_for_completion); 3590 - cmd->completion_req = 1; 3591 - 3592 - pmcraid_info("command(%d) (CDB[0] = %x) for %x\n", 3593 - le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, 3594 - cmd->ioa_cb->ioarcb.cdb[0], 3595 - le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle)); 3596 - 3597 - spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 3598 - _pmcraid_fire_command(cmd); 3599 - spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); 3600 - 3601 - /* NOTE ! Remove the below line once abort_task is implemented 3602 - * in firmware. This line disables ioctl command timeout handling logic 3603 - * similar to IO command timeout handling, making ioctl commands to wait 3604 - * until the command completion regardless of timeout value specified in 3605 - * ioarcb 3606 - */ 3607 - buffer->ioarcb.cmd_timeout = 0; 3608 - 3609 - /* If command timeout is specified put caller to wait till that time, 3610 - * otherwise it would be blocking wait. If command gets timed out, it 3611 - * will be aborted. 3612 - */ 3613 - if (buffer->ioarcb.cmd_timeout == 0) { 3614 - wait_for_completion(&cmd->wait_for_completion); 3615 - } else if (!wait_for_completion_timeout( 3616 - &cmd->wait_for_completion, 3617 - msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) { 3618 - 3619 - pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n", 3620 - le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, 3621 - cmd->ioa_cb->ioarcb.cdb[0]); 3622 - 3623 - spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 3624 - cancel_cmd = pmcraid_abort_cmd(cmd); 3625 - spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); 3626 - 3627 - if (cancel_cmd) { 3628 - wait_for_completion(&cancel_cmd->wait_for_completion); 3629 - ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); 3630 - pmcraid_return_cmd(cancel_cmd); 3631 - 3632 - /* if abort task couldn't find the command i.e it got 3633 - * completed prior to aborting, return good completion. 3634 - * if command got aborted successfully or there was IOA 3635 - * reset due to abort task itself getting timedout then 3636 - * return -ETIMEDOUT 3637 - */ 3638 - if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || 3639 - PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) { 3640 - if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND) 3641 - rc = -ETIMEDOUT; 3642 - goto out_handle_response; 3643 - } 3644 - } 3645 - 3646 - /* no command block for abort task or abort task failed to abort 3647 - * the IOARCB, then wait for 150 more seconds and initiate reset 3648 - * sequence after timeout 3649 - */ 3650 - if (!wait_for_completion_timeout( 3651 - &cmd->wait_for_completion, 3652 - msecs_to_jiffies(150 * 1000))) { 3653 - pmcraid_reset_bringup(cmd->drv_inst); 3654 - rc = -ETIMEDOUT; 3655 - } 3656 - } 3657 - 3658 - out_handle_response: 3659 - /* copy entire IOASA buffer and return IOCTL success. 3660 - * If copying IOASA to user-buffer fails, return 3661 - * EFAULT 3662 - */ 3663 - if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, 3664 - sizeof(struct pmcraid_ioasa))) { 3665 - pmcraid_err("failed to copy ioasa buffer to user\n"); 3666 - rc = -EFAULT; 3667 - } 3668 - 3669 - /* If the data transfer was from device, copy the data onto user 3670 - * buffers 3671 - */ 3672 - else if (direction == DMA_FROM_DEVICE && request_size > 0) { 3673 - rc = pmcraid_copy_sglist(cmd->sglist, 3674 - request_buffer, 3675 - request_size, 3676 - direction); 3677 - if (rc) { 3678 - pmcraid_err("failed to copy user buffer\n"); 3679 - rc = -EFAULT; 3680 - } 3681 - } 3682 - 3683 - out_free_sglist: 3684 - pmcraid_release_passthrough_ioadls(cmd, request_size, direction); 3685 - 3686 - out_free_cmd: 3687 - pmcraid_return_cmd(cmd); 3688 - 3689 - out_free_buffer: 3690 - kfree(buffer); 3691 - 3692 - return rc; 3693 - } 3694 - 3695 - 3696 - 3697 - 3698 3457 /** 3699 3458 * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself 3700 3459 * ··· 3444 3921 } 3445 3922 3446 3923 switch (_IOC_TYPE(cmd)) { 3447 - 3448 - case PMCRAID_PASSTHROUGH_IOCTL: 3449 - /* If ioctl code is to download microcode, we need to block 3450 - * mid-layer requests. 3451 - */ 3452 - if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) 3453 - scsi_block_requests(pinstance->host); 3454 - 3455 - retval = pmcraid_ioctl_passthrough(pinstance, cmd, 3456 - hdr->buffer_length, argp); 3457 - 3458 - if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) 3459 - scsi_unblock_requests(pinstance->host); 3460 - break; 3461 3924 3462 3925 case PMCRAID_DRIVER_IOCTL: 3463 3926 arg += sizeof(struct pmcraid_ioctl_header);
-33
drivers/scsi/pmcraid.h
··· 1023 1023 #define PMCRAID_IOCTL_SIGNATURE "PMCRAID" 1024 1024 1025 1025 /* 1026 - * pmcraid_passthrough_ioctl_buffer - structure given as argument to 1027 - * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires 1028 - * 32-byte alignment so, it is necessary to pack this structure to avoid any 1029 - * holes between ioctl_header and passthrough buffer 1030 - * 1031 - * .ioactl_header : ioctl header 1032 - * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer 1033 - * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware 1034 - * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on 1035 - * the transfer directions passed in ioarcb.flags0. Contents 1036 - * of this buffer are valid only when ioarcb.data_transfer_len 1037 - * is not zero. 1038 - */ 1039 - struct pmcraid_passthrough_ioctl_buffer { 1040 - struct pmcraid_ioctl_header ioctl_header; 1041 - struct pmcraid_ioarcb ioarcb; 1042 - struct pmcraid_ioasa ioasa; 1043 - u8 request_buffer[]; 1044 - } __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT))); 1045 - 1046 - /* 1047 1026 * keys to differentiate between driver handled IOCTLs and passthrough 1048 1027 * IOCTLs passed to IOA. driver determines the ioctl type using macro 1049 1028 * _IOC_TYPE 1050 1029 */ 1051 1030 #define PMCRAID_DRIVER_IOCTL 'D' 1052 - #define PMCRAID_PASSTHROUGH_IOCTL 'F' 1053 1031 1054 1032 #define DRV_IOCTL(n, size) \ 1055 1033 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) 1056 - 1057 - #define FMW_IOCTL(n, size) \ 1058 - _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) 1059 1034 1060 1035 /* 1061 1036 * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd. ··· 1043 1068 1044 1069 #define PMCRAID_IOCTL_RESET_ADAPTER \ 1045 1070 DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header)) 1046 - 1047 - /* passthrough/firmware handled commands */ 1048 - #define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \ 1049 - FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer)) 1050 - 1051 - #define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \ 1052 - FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer)) 1053 - 1054 1071 1055 1072 #endif /* _PMCRAID_H */
+5 -3
drivers/scsi/scsi_debug.c
··· 7519 7519 struct sdebug_defer *sd_dp; 7520 7520 7521 7521 sqp = sdebug_q_arr + queue_num; 7522 - qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue); 7523 - if (qc_idx >= sdebug_max_queue) 7524 - return 0; 7525 7522 7526 7523 spin_lock_irqsave(&sqp->qc_lock, iflags); 7524 + 7525 + qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue); 7526 + if (qc_idx >= sdebug_max_queue) 7527 + goto unlock; 7527 7528 7528 7529 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) { 7529 7530 if (first) { ··· 7590 7589 break; 7591 7590 } 7592 7591 7592 + unlock: 7593 7593 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 7594 7594 7595 7595 if (num_entries > 0)
+1 -1
drivers/scsi/scsi_logging.c
··· 30 30 { 31 31 struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd); 32 32 33 - if (!rq->q->disk) 33 + if (!rq->q || !rq->q->disk) 34 34 return NULL; 35 35 return rq->q->disk->disk_name; 36 36 }
+5
drivers/scsi/scsi_scan.c
··· 223 223 int ret; 224 224 struct sbitmap sb_backup; 225 225 226 + depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev)); 227 + 226 228 /* 227 229 * realloc if new shift is calculated, which is caused by setting 228 230 * up one new default queue depth after calling ->slave_configure ··· 247 245 scsi_device_max_queue_depth(sdev), 248 246 new_shift, GFP_KERNEL, 249 247 sdev->request_queue->node, false, true); 248 + if (!ret) 249 + sbitmap_resize(&sdev->budget_map, depth); 250 + 250 251 if (need_free) { 251 252 if (ret) 252 253 sdev->budget_map = sb_backup;
-4
drivers/scsi/scsi_sysfs.c
··· 1384 1384 if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) { 1385 1385 sdev->bsg_dev = scsi_bsg_register_queue(sdev); 1386 1386 if (IS_ERR(sdev->bsg_dev)) { 1387 - /* 1388 - * We're treating error on bsg register as non-fatal, so 1389 - * pretend nothing went wrong. 1390 - */ 1391 1387 error = PTR_ERR(sdev->bsg_dev); 1392 1388 sdev_printk(KERN_INFO, sdev, 1393 1389 "Failed to register bsg queue, errno=%d\n",
+2 -1
drivers/scsi/sd.c
··· 3216 3216 sd_read_block_limits(sdkp); 3217 3217 sd_read_block_characteristics(sdkp); 3218 3218 sd_zbc_read_zones(sdkp, buffer); 3219 + sd_read_cpr(sdkp); 3219 3220 } 3220 3221 3221 3222 sd_print_capacity(sdkp, old_capacity); ··· 3226 3225 sd_read_app_tag_own(sdkp, buffer); 3227 3226 sd_read_write_same(sdkp, buffer); 3228 3227 sd_read_security(sdkp, buffer); 3229 - sd_read_cpr(sdkp); 3230 3228 } 3231 3229 3232 3230 /* ··· 3475 3475 error = device_add_disk(dev, gd, NULL); 3476 3476 if (error) { 3477 3477 put_device(&sdkp->disk_dev); 3478 + blk_cleanup_disk(gd); 3478 3479 goto out; 3479 3480 } 3480 3481
+1 -1
drivers/scsi/sr.c
··· 535 535 536 536 scsi_autopm_get_device(sdev); 537 537 538 - if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) { 538 + if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) { 539 539 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); 540 540 if (ret != -ENOSYS) 541 541 goto put;
-15
drivers/scsi/ufs/ufs-qcom.c
··· 957 957 .deassert = ufs_qcom_reset_deassert, 958 958 }; 959 959 960 - #define ANDROID_BOOT_DEV_MAX 30 961 - static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; 962 - 963 - #ifndef MODULE 964 - static int __init get_android_boot_dev(char *str) 965 - { 966 - strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); 967 - return 1; 968 - } 969 - __setup("androidboot.bootdevice=", get_android_boot_dev); 970 - #endif 971 - 972 960 /** 973 961 * ufs_qcom_init - bind phy with controller 974 962 * @hba: host controller instance ··· 975 987 struct ufs_qcom_host *host; 976 988 struct resource *res; 977 989 struct ufs_clk_info *clki; 978 - 979 - if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) 980 - return -ENODEV; 981 990 982 991 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 983 992 if (!host) {
+17
drivers/scsi/ufs/ufshcd-pci.c
··· 428 428 return ufs_intel_common_init(hba); 429 429 } 430 430 431 + static int ufs_intel_mtl_init(struct ufs_hba *hba) 432 + { 433 + hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; 434 + return ufs_intel_common_init(hba); 435 + } 436 + 431 437 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { 432 438 .name = "intel-pci", 433 439 .init = ufs_intel_common_init, ··· 466 460 .name = "intel-pci", 467 461 .init = ufs_intel_adl_init, 468 462 .exit = ufs_intel_common_exit, 463 + .link_startup_notify = ufs_intel_link_startup_notify, 464 + .resume = ufs_intel_resume, 465 + .device_reset = ufs_intel_device_reset, 466 + }; 467 + 468 + static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = { 469 + .name = "intel-pci", 470 + .init = ufs_intel_mtl_init, 471 + .exit = ufs_intel_common_exit, 472 + .hce_enable_notify = ufs_intel_hce_enable_notify, 469 473 .link_startup_notify = ufs_intel_link_startup_notify, 470 474 .resume = ufs_intel_resume, 471 475 .device_reset = ufs_intel_device_reset, ··· 595 579 { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops }, 596 580 { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, 597 581 { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, 582 + { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 598 583 { } /* terminate list */ 599 584 }; 600 585
-2
drivers/scsi/ufs/ufshcd.h
··· 820 820 enum ufs_pm_level rpm_lvl; 821 821 /* Desired UFS power management level during system PM */ 822 822 enum ufs_pm_level spm_lvl; 823 - struct device_attribute rpm_lvl_attr; 824 - struct device_attribute spm_lvl_attr; 825 823 int pm_op_in_progress; 826 824 827 825 /* Auto-Hibernate Idle Timer register value */
+5 -6
drivers/scsi/ufs/ufshpb.c
··· 867 867 struct ufshpb_region *rgn, *victim_rgn = NULL; 868 868 869 869 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { 870 - if (!rgn) { 871 - dev_err(&hpb->sdev_ufs_lu->sdev_dev, 872 - "%s: no region allocated\n", 873 - __func__); 874 - return NULL; 875 - } 876 870 if (ufshpb_check_srgns_issue_state(hpb, rgn)) 877 871 continue; 878 872 ··· 881 887 victim_rgn = rgn; 882 888 break; 883 889 } 890 + 891 + if (!victim_rgn) 892 + dev_err(&hpb->sdev_ufs_lu->sdev_dev, 893 + "%s: no region allocated\n", 894 + __func__); 884 895 885 896 return victim_rgn; 886 897 }
+4 -4
drivers/scsi/virtio_scsi.c
··· 988 988 .remove = virtscsi_remove, 989 989 }; 990 990 991 - static int __init init(void) 991 + static int __init virtio_scsi_init(void) 992 992 { 993 993 int ret = -ENOMEM; 994 994 ··· 1020 1020 return ret; 1021 1021 } 1022 1022 1023 - static void __exit fini(void) 1023 + static void __exit virtio_scsi_fini(void) 1024 1024 { 1025 1025 unregister_virtio_driver(&virtio_scsi_driver); 1026 1026 mempool_destroy(virtscsi_cmd_pool); 1027 1027 kmem_cache_destroy(virtscsi_cmd_cache); 1028 1028 } 1029 - module_init(init); 1030 - module_exit(fini); 1029 + module_init(virtio_scsi_init); 1030 + module_exit(virtio_scsi_fini); 1031 1031 1032 1032 MODULE_DEVICE_TABLE(virtio, id_table); 1033 1033 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
+2
drivers/scsi/zorro7xx.c
··· 159 159 scsi_remove_host(host); 160 160 161 161 NCR_700_release(host); 162 + if (host->base > 0x01000000) 163 + iounmap(hostdata->base); 162 164 kfree(hostdata); 163 165 free_irq(host->irq, host); 164 166 zorro_release_device(z);
+2 -1
drivers/target/target_core_user.c
··· 1821 1821 mutex_lock(&udev->cmdr_lock); 1822 1822 page = xa_load(&udev->data_pages, dpi); 1823 1823 if (likely(page)) { 1824 + get_page(page); 1824 1825 mutex_unlock(&udev->cmdr_lock); 1825 1826 return page; 1826 1827 } ··· 1878 1877 /* For the vmalloc()ed cmd area pages */ 1879 1878 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1880 1879 page = vmalloc_to_page(addr); 1880 + get_page(page); 1881 1881 } else { 1882 1882 uint32_t dpi; 1883 1883 ··· 1889 1887 return VM_FAULT_SIGBUS; 1890 1888 } 1891 1889 1892 - get_page(page); 1893 1890 vmf->page = page; 1894 1891 return 0; 1895 1892 }