Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"Thirty-three fixes, I'm afraid.

Essentially the build up from the last couple of weeks while I've been
dealling with Linux Plumbers conference infrastructure issues. It's
mostly the usual assortment of spelling fixes and minor corrections.

The only core relevant changes are to the sd driver to reduce the spin
up message spew and fix a small memory leak on the freeing path"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (33 commits)
scsi: ses: Retry failed Send/Receive Diagnostic commands
scsi: target: Fix spelling mistake "CONFLIFT" -> "CONFLICT"
scsi: lpfc: Fix gcc -Wstringop-overread warning, again
scsi: lpfc: Use correct scnprintf() limit
scsi: lpfc: Fix sprintf() overflow in lpfc_display_fpin_wwpn()
scsi: core: Remove 'current_tag'
scsi: acornscsi: Remove tagged queuing vestiges
scsi: fas216: Kill scmd->tag
scsi: qla2xxx: Restore initiator in dual mode
scsi: ufs: core: Unbreak the reset handler
scsi: sd_zbc: Support disks with more than 2**32 logical blocks
scsi: ufs: core: Revert "scsi: ufs: Synchronize SCSI and UFS error handling"
scsi: bsg: Fix device unregistration
scsi: sd: Make sd_spinup_disk() less noisy
scsi: ufs: ufs-pci: Fix Intel LKF link stability
scsi: mpt3sas: Clean up some inconsistent indenting
scsi: megaraid: Clean up some inconsistent indenting
scsi: sr: Fix spelling mistake "does'nt" -> "doesn't"
scsi: Remove SCSI CDROM MAINTAINERS entry
scsi: megaraid: Fix Coccinelle warning
...

+288 -300
-7
MAINTAINERS
··· 16650 S: Supported 16651 F: drivers/char/pcmcia/scr24x_cs.c 16652 16653 - SCSI CDROM DRIVER 16654 - M: Jens Axboe <axboe@kernel.dk> 16655 - L: linux-scsi@vger.kernel.org 16656 - S: Maintained 16657 - W: http://www.kernel.dk 16658 - F: drivers/scsi/sr* 16659 - 16660 SCSI RDMA PROTOCOL (SRP) INITIATOR 16661 M: Bart Van Assche <bvanassche@acm.org> 16662 L: linux-rdma@vger.kernel.org
··· 16650 S: Supported 16651 F: drivers/char/pcmcia/scr24x_cs.c 16652 16653 SCSI RDMA PROTOCOL (SRP) INITIATOR 16654 M: Bart Van Assche <bvanassche@acm.org> 16655 L: linux-rdma@vger.kernel.org
+15 -8
block/bsg.c
··· 165 .llseek = default_llseek, 166 }; 167 168 void bsg_unregister_queue(struct bsg_device *bd) 169 { 170 if (bd->queue->kobj.sd) 171 sysfs_remove_link(&bd->queue->kobj, "bsg"); 172 cdev_device_del(&bd->cdev, &bd->device); 173 - ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 174 - kfree(bd); 175 } 176 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 177 ··· 200 if (ret < 0) { 201 if (ret == -ENOSPC) 202 dev_err(parent, "bsg: too many bsg devices\n"); 203 - goto out_kfree; 204 } 205 bd->device.devt = MKDEV(bsg_major, ret); 206 bd->device.class = bsg_class; 207 bd->device.parent = parent; 208 dev_set_name(&bd->device, "%s", name); 209 device_initialize(&bd->device); 210 ··· 214 bd->cdev.owner = THIS_MODULE; 215 ret = cdev_device_add(&bd->cdev, &bd->device); 216 if (ret) 217 - goto out_ida_remove; 218 219 if (q->kobj.sd) { 220 ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); ··· 226 227 out_device_del: 228 cdev_device_del(&bd->cdev, &bd->device); 229 - out_ida_remove: 230 - ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 231 - out_kfree: 232 - kfree(bd); 233 return ERR_PTR(ret); 234 } 235 EXPORT_SYMBOL_GPL(bsg_register_queue);
··· 165 .llseek = default_llseek, 166 }; 167 168 + static void bsg_device_release(struct device *dev) 169 + { 170 + struct bsg_device *bd = container_of(dev, struct bsg_device, device); 171 + 172 + ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 173 + kfree(bd); 174 + } 175 + 176 void bsg_unregister_queue(struct bsg_device *bd) 177 { 178 if (bd->queue->kobj.sd) 179 sysfs_remove_link(&bd->queue->kobj, "bsg"); 180 cdev_device_del(&bd->cdev, &bd->device); 181 + put_device(&bd->device); 182 } 183 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 184 ··· 193 if (ret < 0) { 194 if (ret == -ENOSPC) 195 dev_err(parent, "bsg: too many bsg devices\n"); 196 + kfree(bd); 197 + return ERR_PTR(ret); 198 } 199 bd->device.devt = MKDEV(bsg_major, ret); 200 bd->device.class = bsg_class; 201 bd->device.parent = parent; 202 + bd->device.release = bsg_device_release; 203 dev_set_name(&bd->device, "%s", name); 204 device_initialize(&bd->device); 205 ··· 205 bd->cdev.owner = THIS_MODULE; 206 ret = cdev_device_add(&bd->cdev, &bd->device); 207 if (ret) 208 + goto out_put_device; 209 210 if (q->kobj.sd) { 211 ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); ··· 217 218 out_device_del: 219 cdev_device_del(&bd->cdev, &bd->device); 220 + out_put_device: 221 + put_device(&bd->device); 222 return ERR_PTR(ret); 223 } 224 EXPORT_SYMBOL_GPL(bsg_register_queue);
-11
drivers/scsi/arm/Kconfig
··· 10 This enables support for the Acorn SCSI card (aka30). If you have an 11 Acorn system with one of these, say Y. If unsure, say N. 12 13 - config SCSI_ACORNSCSI_TAGGED_QUEUE 14 - bool "Support SCSI 2 Tagged queueing" 15 - depends on SCSI_ACORNSCSI_3 16 - help 17 - Say Y here to enable tagged queuing support on the Acorn SCSI card. 18 - 19 - This is a feature of SCSI-2 which improves performance: the host 20 - adapter can send several SCSI commands to a device's queue even if 21 - previous commands haven't finished yet. Some SCSI devices don't 22 - implement this properly, so the safe answer is N. 23 - 24 config SCSI_ACORNSCSI_SYNC 25 bool "Support SCSI 2 Synchronous Transfers" 26 depends on SCSI_ACORNSCSI_3
··· 10 This enables support for the Acorn SCSI card (aka30). If you have an 11 Acorn system with one of these, say Y. If unsure, say N. 12 13 config SCSI_ACORNSCSI_SYNC 14 bool "Support SCSI 2 Synchronous Transfers" 15 depends on SCSI_ACORNSCSI_3
+22 -81
drivers/scsi/arm/acornscsi.c
··· 52 * You can tell if you have a device that supports tagged queueing my 53 * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported 54 * as '2 TAG'. 55 - * 56 - * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config 57 - * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug, 58 - * comment out the undef. 59 */ 60 - #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 61 /* 62 * SCSI-II Synchronous transfer support. 63 * ··· 167 unsigned int result); 168 static int acornscsi_reconnect_finish(AS_Host *host); 169 static void acornscsi_dma_cleanup(AS_Host *host); 170 - static void acornscsi_abortcmd(AS_Host *host, unsigned char tag); 171 172 /* ==================================================================================== 173 * Miscellaneous ··· 737 #endif 738 739 if (from_queue) { 740 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 741 - /* 742 - * tagged queueing - allocate a new tag to this command 743 - */ 744 - if (SCpnt->device->simple_tags) { 745 - SCpnt->device->current_tag += 1; 746 - if (SCpnt->device->current_tag == 0) 747 - SCpnt->device->current_tag = 1; 748 - SCpnt->tag = SCpnt->device->current_tag; 749 - } else 750 - #endif 751 set_bit(SCpnt->device->id * 8 + 752 (u8)(SCpnt->device->lun & 0x07), host->busyluns); 753 ··· 1177 * the device recognises the attention. 1178 */ 1179 if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { 1180 - acornscsi_abortcmd(host, host->SCpnt->tag); 1181 1182 dmac_write(host, DMAC_TXCNTLO, 0); 1183 dmac_write(host, DMAC_TXCNTHI, 0); ··· 1545 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); 1546 1547 switch (host->scsi.last_message) { 1548 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1549 - case HEAD_OF_QUEUE_TAG: 1550 - case ORDERED_QUEUE_TAG: 1551 - case SIMPLE_QUEUE_TAG: 1552 - /* 1553 - * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17) 1554 - * If a target does not implement tagged queuing and a queue tag 1555 - * message is received, it shall respond with a MESSAGE REJECT 1556 - * message and accept the I/O process as if it were untagged. 1557 - */ 1558 - printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n", 1559 - host->host->host_no, acornscsi_target(host)); 1560 - host->SCpnt->device->simple_tags = 0; 1561 - set_bit(host->SCpnt->device->id * 8 + 1562 - (u8)(host->SCpnt->device->lun & 0x7), host->busyluns); 1563 - break; 1564 - #endif 1565 case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): 1566 /* 1567 * Target can't handle synchronous transfers ··· 1655 #if 0 1656 /* does the device need the current command aborted */ 1657 if (cmd_aborted) { 1658 - acornscsi_abortcmd(host->SCpnt->tag); 1659 return; 1660 } 1661 #endif 1662 1663 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1664 - if (host->SCpnt->tag) { 1665 - unsigned int tag_type; 1666 - 1667 - if (host->SCpnt->cmnd[0] == REQUEST_SENSE || 1668 - host->SCpnt->cmnd[0] == TEST_UNIT_READY || 1669 - host->SCpnt->cmnd[0] == INQUIRY) 1670 - tag_type = HEAD_OF_QUEUE_TAG; 1671 - else 1672 - tag_type = SIMPLE_QUEUE_TAG; 1673 - msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag); 1674 - } 1675 - #endif 1676 1677 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 1678 if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { ··· 1753 "to reconnect with\n", 1754 host->host->host_no, '0' + target); 1755 acornscsi_dumplog(host, target); 1756 - acornscsi_abortcmd(host, 0); 1757 if (host->SCpnt) { 1758 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); 1759 host->SCpnt = NULL; ··· 1776 host->scsi.disconnectable = 0; 1777 if (host->SCpnt->device->id == host->scsi.reconnected.target && 1778 host->SCpnt->device->lun == host->scsi.reconnected.lun && 1779 - host->SCpnt->tag == host->scsi.reconnected.tag) { 1780 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) 1781 DBG(host->SCpnt, printk("scsi%d.%c: reconnected", 1782 host->host->host_no, acornscsi_target(host))); ··· 1803 } 1804 1805 if (!host->SCpnt) 1806 - acornscsi_abortcmd(host, host->scsi.reconnected.tag); 1807 else { 1808 /* 1809 * Restore data pointer from SAVED pointers. ··· 1844 * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) 1845 * Purpose : abort a currently executing command 1846 * Params : host - host with connected command to abort 1847 - * tag - tag to abort 1848 */ 1849 static 1850 - void acornscsi_abortcmd(AS_Host *host, unsigned char tag) 1851 { 1852 host->scsi.phase = PHASE_ABORTED; 1853 sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); 1854 1855 msgqueue_flush(&host->scsi.msgs); 1856 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1857 - if (tag) 1858 - msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag); 1859 - else 1860 - #endif 1861 - msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); 1862 } 1863 1864 /* ========================================================================================== ··· 1942 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n", 1943 host->host->host_no, acornscsi_target(host), ssr); 1944 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 1945 - acornscsi_abortcmd(host, host->SCpnt->tag); 1946 } 1947 return INTR_PROCESSING; 1948 ··· 1978 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n", 1979 host->host->host_no, acornscsi_target(host), ssr); 1980 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 1981 - acornscsi_abortcmd(host, host->SCpnt->tag); 1982 } 1983 return INTR_PROCESSING; 1984 ··· 2024 case 0x18: /* -> PHASE_DATAOUT */ 2025 /* COMMAND -> DATA OUT */ 2026 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2027 - acornscsi_abortcmd(host, host->SCpnt->tag); 2028 acornscsi_dma_setup(host, DMA_OUT); 2029 if (!acornscsi_starttransfer(host)) 2030 - acornscsi_abortcmd(host, host->SCpnt->tag); 2031 host->scsi.phase = PHASE_DATAOUT; 2032 return INTR_IDLE; 2033 2034 case 0x19: /* -> PHASE_DATAIN */ 2035 /* COMMAND -> DATA IN */ 2036 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2037 - acornscsi_abortcmd(host, host->SCpnt->tag); 2038 acornscsi_dma_setup(host, DMA_IN); 2039 if (!acornscsi_starttransfer(host)) 2040 - acornscsi_abortcmd(host, host->SCpnt->tag); 2041 host->scsi.phase = PHASE_DATAIN; 2042 return INTR_IDLE; 2043 ··· 2105 /* MESSAGE IN -> DATA OUT */ 2106 acornscsi_dma_setup(host, DMA_OUT); 2107 if (!acornscsi_starttransfer(host)) 2108 - acornscsi_abortcmd(host, host->SCpnt->tag); 2109 host->scsi.phase = PHASE_DATAOUT; 2110 return INTR_IDLE; 2111 ··· 2114 /* MESSAGE IN -> DATA IN */ 2115 acornscsi_dma_setup(host, DMA_IN); 2116 if (!acornscsi_starttransfer(host)) 2117 - acornscsi_abortcmd(host, host->SCpnt->tag); 2118 host->scsi.phase = PHASE_DATAIN; 2119 return INTR_IDLE; 2120 ··· 2155 switch (ssr) { 2156 case 0x19: /* -> PHASE_DATAIN */ 2157 case 0x89: /* -> PHASE_DATAIN */ 2158 - acornscsi_abortcmd(host, host->SCpnt->tag); 2159 return INTR_IDLE; 2160 2161 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2204 switch (ssr) { 2205 case 0x18: /* -> PHASE_DATAOUT */ 2206 case 0x88: /* -> PHASE_DATAOUT */ 2207 - acornscsi_abortcmd(host, host->SCpnt->tag); 2208 return INTR_IDLE; 2209 2210 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2431 SCpnt->scsi_done = done; 2432 SCpnt->host_scribble = NULL; 2433 SCpnt->result = 0; 2434 - SCpnt->tag = 0; 2435 SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]); 2436 SCpnt->SCp.sent_command = 0; 2437 SCpnt->SCp.scsi_xferred = 0; ··· 2529 break; 2530 2531 default: 2532 - acornscsi_abortcmd(host, host->SCpnt->tag); 2533 res = res_snooze; 2534 } 2535 local_irq_restore(flags); ··· 2695 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2696 " SYNC" 2697 #endif 2698 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2699 - " TAG" 2700 - #endif 2701 #if (DEBUG & DEBUG_NO_WRITE) 2702 " NOWRITE (" __stringify(NO_WRITE) ")" 2703 #endif ··· 2714 seq_printf(m, "AcornSCSI driver v%d.%d.%d" 2715 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2716 " SYNC" 2717 - #endif 2718 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2719 - " TAG" 2720 #endif 2721 #if (DEBUG & DEBUG_NO_WRITE) 2722 " NOWRITE (" __stringify(NO_WRITE) ")" ··· 2769 seq_printf(m, "Device/Lun TaggedQ Sync\n"); 2770 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 2771 if (scd->tagged_supported) 2772 - seq_printf(m, "%3sabled(%3d) ", 2773 - scd->simple_tags ? "en" : "dis", 2774 - scd->current_tag); 2775 else 2776 seq_printf(m, "unsupported "); 2777
··· 52 * You can tell if you have a device that supports tagged queueing my 53 * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported 54 * as '2 TAG'. 55 */ 56 + 57 /* 58 * SCSI-II Synchronous transfer support. 59 * ··· 171 unsigned int result); 172 static int acornscsi_reconnect_finish(AS_Host *host); 173 static void acornscsi_dma_cleanup(AS_Host *host); 174 + static void acornscsi_abortcmd(AS_Host *host); 175 176 /* ==================================================================================== 177 * Miscellaneous ··· 741 #endif 742 743 if (from_queue) { 744 set_bit(SCpnt->device->id * 8 + 745 (u8)(SCpnt->device->lun & 0x07), host->busyluns); 746 ··· 1192 * the device recognises the attention. 1193 */ 1194 if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { 1195 + acornscsi_abortcmd(host); 1196 1197 dmac_write(host, DMAC_TXCNTLO, 0); 1198 dmac_write(host, DMAC_TXCNTHI, 0); ··· 1560 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); 1561 1562 switch (host->scsi.last_message) { 1563 case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): 1564 /* 1565 * Target can't handle synchronous transfers ··· 1687 #if 0 1688 /* does the device need the current command aborted */ 1689 if (cmd_aborted) { 1690 + acornscsi_abortcmd(host); 1691 return; 1692 } 1693 #endif 1694 1695 1696 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 1697 if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { ··· 1798 "to reconnect with\n", 1799 host->host->host_no, '0' + target); 1800 acornscsi_dumplog(host, target); 1801 + acornscsi_abortcmd(host); 1802 if (host->SCpnt) { 1803 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); 1804 host->SCpnt = NULL; ··· 1821 host->scsi.disconnectable = 0; 1822 if (host->SCpnt->device->id == host->scsi.reconnected.target && 1823 host->SCpnt->device->lun == host->scsi.reconnected.lun && 1824 + scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) { 1825 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) 1826 DBG(host->SCpnt, printk("scsi%d.%c: reconnected", 1827 host->host->host_no, acornscsi_target(host))); ··· 1848 } 1849 1850 if (!host->SCpnt) 1851 + acornscsi_abortcmd(host); 1852 else { 1853 /* 1854 * Restore data pointer from SAVED pointers. ··· 1889 * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) 1890 * Purpose : abort a currently executing command 1891 * Params : host - host with connected command to abort 1892 */ 1893 static 1894 + void acornscsi_abortcmd(AS_Host *host) 1895 { 1896 host->scsi.phase = PHASE_ABORTED; 1897 sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); 1898 1899 msgqueue_flush(&host->scsi.msgs); 1900 + msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); 1901 } 1902 1903 /* ========================================================================================== ··· 1993 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n", 1994 host->host->host_no, acornscsi_target(host), ssr); 1995 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 1996 + acornscsi_abortcmd(host); 1997 } 1998 return INTR_PROCESSING; 1999 ··· 2029 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n", 2030 host->host->host_no, acornscsi_target(host), ssr); 2031 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 2032 + acornscsi_abortcmd(host); 2033 } 2034 return INTR_PROCESSING; 2035 ··· 2075 case 0x18: /* -> PHASE_DATAOUT */ 2076 /* COMMAND -> DATA OUT */ 2077 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2078 + acornscsi_abortcmd(host); 2079 acornscsi_dma_setup(host, DMA_OUT); 2080 if (!acornscsi_starttransfer(host)) 2081 + acornscsi_abortcmd(host); 2082 host->scsi.phase = PHASE_DATAOUT; 2083 return INTR_IDLE; 2084 2085 case 0x19: /* -> PHASE_DATAIN */ 2086 /* COMMAND -> DATA IN */ 2087 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2088 + acornscsi_abortcmd(host); 2089 acornscsi_dma_setup(host, DMA_IN); 2090 if (!acornscsi_starttransfer(host)) 2091 + acornscsi_abortcmd(host); 2092 host->scsi.phase = PHASE_DATAIN; 2093 return INTR_IDLE; 2094 ··· 2156 /* MESSAGE IN -> DATA OUT */ 2157 acornscsi_dma_setup(host, DMA_OUT); 2158 if (!acornscsi_starttransfer(host)) 2159 + acornscsi_abortcmd(host); 2160 host->scsi.phase = PHASE_DATAOUT; 2161 return INTR_IDLE; 2162 ··· 2165 /* MESSAGE IN -> DATA IN */ 2166 acornscsi_dma_setup(host, DMA_IN); 2167 if (!acornscsi_starttransfer(host)) 2168 + acornscsi_abortcmd(host); 2169 host->scsi.phase = PHASE_DATAIN; 2170 return INTR_IDLE; 2171 ··· 2206 switch (ssr) { 2207 case 0x19: /* -> PHASE_DATAIN */ 2208 case 0x89: /* -> PHASE_DATAIN */ 2209 + acornscsi_abortcmd(host); 2210 return INTR_IDLE; 2211 2212 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2255 switch (ssr) { 2256 case 0x18: /* -> PHASE_DATAOUT */ 2257 case 0x88: /* -> PHASE_DATAOUT */ 2258 + acornscsi_abortcmd(host); 2259 return INTR_IDLE; 2260 2261 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2482 SCpnt->scsi_done = done; 2483 SCpnt->host_scribble = NULL; 2484 SCpnt->result = 0; 2485 SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]); 2486 SCpnt->SCp.sent_command = 0; 2487 SCpnt->SCp.scsi_xferred = 0; ··· 2581 break; 2582 2583 default: 2584 + acornscsi_abortcmd(host); 2585 res = res_snooze; 2586 } 2587 local_irq_restore(flags); ··· 2747 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2748 " SYNC" 2749 #endif 2750 #if (DEBUG & DEBUG_NO_WRITE) 2751 " NOWRITE (" __stringify(NO_WRITE) ")" 2752 #endif ··· 2769 seq_printf(m, "AcornSCSI driver v%d.%d.%d" 2770 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2771 " SYNC" 2772 #endif 2773 #if (DEBUG & DEBUG_NO_WRITE) 2774 " NOWRITE (" __stringify(NO_WRITE) ")" ··· 2827 seq_printf(m, "Device/Lun TaggedQ Sync\n"); 2828 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 2829 if (scd->tagged_supported) 2830 + seq_printf(m, "%3sabled ", 2831 + scd->simple_tags ? "en" : "dis"); 2832 else 2833 seq_printf(m, "unsupported "); 2834
+8 -23
drivers/scsi/arm/fas216.c
··· 77 * I was thinking that this was a good chip until I found this restriction ;( 78 */ 79 #define SCSI2_SYNC 80 - #undef SCSI2_TAG 81 82 #undef DEBUG_CONNECT 83 #undef DEBUG_MESSAGES ··· 989 info->scsi.disconnectable = 0; 990 if (info->SCpnt->device->id == target && 991 info->SCpnt->device->lun == lun && 992 - info->SCpnt->tag == tag) { 993 fas216_log(info, LOG_CONNECT, "reconnected previously executing command"); 994 } else { 995 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); ··· 1790 /* 1791 * add tag message if required 1792 */ 1793 - if (SCpnt->tag) 1794 - msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag); 1795 1796 do { 1797 #ifdef SCSI2_SYNC ··· 1815 1816 static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) 1817 { 1818 - #ifdef SCSI2_TAG 1819 - /* 1820 - * tagged queuing - allocate a new tag to this command 1821 - */ 1822 - if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE && 1823 - SCpnt->cmnd[0] != INQUIRY) { 1824 - SCpnt->device->current_tag += 1; 1825 - if (SCpnt->device->current_tag == 0) 1826 - SCpnt->device->current_tag = 1; 1827 - SCpnt->tag = SCpnt->device->current_tag; 1828 - } else 1829 - #endif 1830 - set_bit(SCpnt->device->id * 8 + 1831 - (u8)(SCpnt->device->lun & 0x7), info->busyluns); 1832 1833 info->stats.removes += 1; 1834 switch (SCpnt->cmnd[0]) { ··· 2105 init_SCp(SCpnt); 2106 SCpnt->SCp.Message = 0; 2107 SCpnt->SCp.Status = 0; 2108 - SCpnt->tag = 0; 2109 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2110 2111 /* ··· 2210 init_SCp(SCpnt); 2211 2212 info->stats.queues += 1; 2213 - SCpnt->tag = 0; 2214 2215 spin_lock(&info->host_lock); 2216 ··· 2989 dev = &info->device[scd->id]; 2990 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 2991 if (scd->tagged_supported) 2992 - seq_printf(m, "%3sabled(%3d) ", 2993 - scd->simple_tags ? "en" : "dis", 2994 - scd->current_tag); 2995 else 2996 seq_puts(m, "unsupported "); 2997
··· 77 * I was thinking that this was a good chip until I found this restriction ;( 78 */ 79 #define SCSI2_SYNC 80 81 #undef DEBUG_CONNECT 82 #undef DEBUG_MESSAGES ··· 990 info->scsi.disconnectable = 0; 991 if (info->SCpnt->device->id == target && 992 info->SCpnt->device->lun == lun && 993 + scsi_cmd_to_rq(info->SCpnt)->tag == tag) { 994 fas216_log(info, LOG_CONNECT, "reconnected previously executing command"); 995 } else { 996 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); ··· 1791 /* 1792 * add tag message if required 1793 */ 1794 + if (SCpnt->device->simple_tags) 1795 + msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, 1796 + scsi_cmd_to_rq(SCpnt)->tag); 1797 1798 do { 1799 #ifdef SCSI2_SYNC ··· 1815 1816 static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) 1817 { 1818 + set_bit(SCpnt->device->id * 8 + 1819 + (u8)(SCpnt->device->lun & 0x7), info->busyluns); 1820 1821 info->stats.removes += 1; 1822 switch (SCpnt->cmnd[0]) { ··· 2117 init_SCp(SCpnt); 2118 SCpnt->SCp.Message = 0; 2119 SCpnt->SCp.Status = 0; 2120 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2121 2122 /* ··· 2223 init_SCp(SCpnt); 2224 2225 info->stats.queues += 1; 2226 2227 spin_lock(&info->host_lock); 2228 ··· 3003 dev = &info->device[scd->id]; 3004 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 3005 if (scd->tagged_supported) 3006 + seq_printf(m, "%3sabled ", 3007 + scd->simple_tags ? "en" : "dis"); 3008 else 3009 seq_puts(m, "unsupported "); 3010
+1 -1
drivers/scsi/arm/queue.c
··· 214 list_for_each(l, &queue->head) { 215 QE_t *q = list_entry(l, QE_t, list); 216 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && 217 - q->SCpnt->tag == tag) { 218 SCpnt = __queue_remove(queue, l); 219 break; 220 }
··· 214 list_for_each(l, &queue->head) { 215 QE_t *q = list_entry(l, QE_t, list); 216 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && 217 + scsi_cmd_to_rq(q->SCpnt)->tag == tag) { 218 SCpnt = __queue_remove(queue, l); 219 break; 220 }
+2 -2
drivers/scsi/elx/efct/efct_lio.c
··· 880 struct efct *efct = lio_vport->efct; 881 unsigned long flags = 0; 882 883 - spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 884 - 885 if (lio_vport->fc_vport) 886 fc_vport_terminate(lio_vport->fc_vport); 887 888 list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list, 889 list_entry) {
··· 880 struct efct *efct = lio_vport->efct; 881 unsigned long flags = 0; 882 883 if (lio_vport->fc_vport) 884 fc_vport_terminate(lio_vport->fc_vport); 885 + 886 + spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 887 888 list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list, 889 list_entry) {
+3 -4
drivers/scsi/elx/libefc/efc_device.c
··· 928 break; 929 930 case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { 931 - enum efc_nport_topology topology = 932 - (enum efc_nport_topology)arg; 933 934 WARN_ON(node->nport->domain->attached); 935 936 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); 937 938 node_printf(node, "topology notification, topology=%d\n", 939 - topology); 940 941 /* At the time the PLOGI was received, the topology was unknown, 942 * so we didn't know which node would perform the domain attach: 943 * 1. The node from which the PLOGI was sent (p2p) or 944 * 2. The node to which the FLOGI was sent (fabric). 945 */ 946 - if (topology == EFC_NPORT_TOPO_P2P) { 947 /* if this is p2p, need to attach to the domain using 948 * the d_id from the PLOGI received 949 */
··· 928 break; 929 930 case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { 931 + enum efc_nport_topology *topology = arg; 932 933 WARN_ON(node->nport->domain->attached); 934 935 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); 936 937 node_printf(node, "topology notification, topology=%d\n", 938 + *topology); 939 940 /* At the time the PLOGI was received, the topology was unknown, 941 * so we didn't know which node would perform the domain attach: 942 * 1. The node from which the PLOGI was sent (p2p) or 943 * 2. The node to which the FLOGI was sent (fabric). 944 */ 945 + if (*topology == EFC_NPORT_TOPO_P2P) { 946 /* if this is p2p, need to attach to the domain using 947 * the d_id from the PLOGI received 948 */
+1 -2
drivers/scsi/elx/libefc/efc_fabric.c
··· 107 efc_fabric_notify_topology(struct efc_node *node) 108 { 109 struct efc_node *tmp_node; 110 - enum efc_nport_topology topology = node->nport->topology; 111 unsigned long index; 112 113 /* ··· 117 if (tmp_node != node) { 118 efc_node_post_event(tmp_node, 119 EFC_EVT_NPORT_TOPOLOGY_NOTIFY, 120 - (void *)topology); 121 } 122 } 123 }
··· 107 efc_fabric_notify_topology(struct efc_node *node) 108 { 109 struct efc_node *tmp_node; 110 unsigned long index; 111 112 /* ··· 118 if (tmp_node != node) { 119 efc_node_post_event(tmp_node, 120 EFC_EVT_NPORT_TOPOLOGY_NOTIFY, 121 + &node->nport->topology); 122 } 123 } 124 }
+4 -6
drivers/scsi/lpfc/lpfc_attr.c
··· 285 "6312 Catching potential buffer " 286 "overflow > PAGE_SIZE = %lu bytes\n", 287 PAGE_SIZE); 288 - strscpy(buf + PAGE_SIZE - 1 - 289 - strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1), 290 - LPFC_INFO_MORE_STR, 291 - strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1) 292 - + 1); 293 } 294 return len; 295 } ··· 6201 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", 6202 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); 6203 6204 - len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n", 6205 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, 6206 phba->cfg_nvme_seg_cnt); 6207 return len;
··· 285 "6312 Catching potential buffer " 286 "overflow > PAGE_SIZE = %lu bytes\n", 287 PAGE_SIZE); 288 + strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), 289 + LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); 290 } 291 return len; 292 } ··· 6204 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", 6205 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); 6206 6207 + len += scnprintf(buf + len, PAGE_SIZE - len, 6208 + "Cfg: %d SCSI: %d NVME: %d\n", 6209 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, 6210 phba->cfg_nvme_seg_cnt); 6211 return len;
+5 -5
drivers/scsi/lpfc/lpfc_els.c
··· 4015 be32_to_cpu(pcgd->desc_tag), 4016 be32_to_cpu(pcgd->desc_len), 4017 be32_to_cpu(pcgd->xmt_signal_capability), 4018 - be32_to_cpu(pcgd->xmt_signal_frequency.count), 4019 - be32_to_cpu(pcgd->xmt_signal_frequency.units), 4020 be32_to_cpu(pcgd->rcv_signal_capability), 4021 - be32_to_cpu(pcgd->rcv_signal_frequency.count), 4022 - be32_to_cpu(pcgd->rcv_signal_frequency.units)); 4023 4024 /* Compare driver and Fport capabilities and choose 4025 * least common. ··· 9387 /* Extract the next WWPN from the payload */ 9388 wwn = *wwnlist++; 9389 wwpn = be64_to_cpu(wwn); 9390 - len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ, 9391 " %016llx", wwpn); 9392 9393 /* Log a message if we are on the last WWPN
··· 4015 be32_to_cpu(pcgd->desc_tag), 4016 be32_to_cpu(pcgd->desc_len), 4017 be32_to_cpu(pcgd->xmt_signal_capability), 4018 + be16_to_cpu(pcgd->xmt_signal_frequency.count), 4019 + be16_to_cpu(pcgd->xmt_signal_frequency.units), 4020 be32_to_cpu(pcgd->rcv_signal_capability), 4021 + be16_to_cpu(pcgd->rcv_signal_frequency.count), 4022 + be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4023 4024 /* Compare driver and Fport capabilities and choose 4025 * least common. ··· 9387 /* Extract the next WWPN from the payload */ 9388 wwn = *wwnlist++; 9389 wwpn = be64_to_cpu(wwn); 9390 + len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9391 " %016llx", wwpn); 9392 9393 /* Log a message if we are on the last WWPN
+1 -1
drivers/scsi/lpfc/lpfc_hw4.h
··· 1167 #define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF 1168 #define lpfc_mbx_rd_object_rlen_WORD word0 1169 uint32_t rd_object_offset; 1170 - uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; 1171 #define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ 1172 uint32_t rd_object_cnt; 1173 struct lpfc_mbx_host_buf rd_object_hbuf[4];
··· 1167 #define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF 1168 #define lpfc_mbx_rd_object_rlen_WORD word0 1169 uint32_t rd_object_offset; 1170 + __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; 1171 #define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ 1172 uint32_t rd_object_cnt; 1173 struct lpfc_mbx_host_buf rd_object_hbuf[4];
+10 -10
drivers/scsi/lpfc/lpfc_init.c
··· 5518 if (phba->cgn_fpin_frequency && 5519 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5520 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5521 - cp->cgn_stat_npm = cpu_to_le32(value); 5522 } 5523 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5524 LPFC_CGN_CRC32_SEED); ··· 5547 uint32_t mbps; 5548 uint32_t dvalue, wvalue, lvalue, avalue; 5549 uint64_t latsum; 5550 - uint16_t *ptr; 5551 - uint32_t *lptr; 5552 - uint16_t *mptr; 5553 5554 /* Make sure we have a congestion info buffer */ 5555 if (!phba->cgn_i) ··· 5570 if (phba->cgn_fpin_frequency && 5571 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5572 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5573 - cp->cgn_stat_npm = cpu_to_le32(value); 5574 } 5575 5576 /* Read and clear the latency counters for this minute */ ··· 5753 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5754 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5755 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5756 - mbps += le32_to_cpu(cp->cgn_bw_hr[i]); 5757 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5758 } 5759 if (lvalue) /* Avg of latency averages */ ··· 8277 return 0; 8278 8279 out_free_hba_hdwq_info: 8280 - free_percpu(phba->sli4_hba.c_stat); 8281 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8282 out_free_hba_idle_stat: 8283 - kfree(phba->sli4_hba.idle_stat); 8284 #endif 8285 out_free_hba_eq_info: 8286 free_percpu(phba->sli4_hba.eq_info); 8287 out_free_hba_cpu_map: ··· 13411 13412 /* last used Index initialized to 0xff already */ 13413 13414 - cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ; 13415 - cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ; 13416 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13417 cp->cgn_info_crc = cpu_to_le32(crc); 13418
··· 5518 if (phba->cgn_fpin_frequency && 5519 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5520 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5521 + cp->cgn_stat_npm = value; 5522 } 5523 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5524 LPFC_CGN_CRC32_SEED); ··· 5547 uint32_t mbps; 5548 uint32_t dvalue, wvalue, lvalue, avalue; 5549 uint64_t latsum; 5550 + __le16 *ptr; 5551 + __le32 *lptr; 5552 + __le16 *mptr; 5553 5554 /* Make sure we have a congestion info buffer */ 5555 if (!phba->cgn_i) ··· 5570 if (phba->cgn_fpin_frequency && 5571 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5572 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5573 + cp->cgn_stat_npm = value; 5574 } 5575 5576 /* Read and clear the latency counters for this minute */ ··· 5753 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5754 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5755 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5756 + mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5757 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5758 } 5759 if (lvalue) /* Avg of latency averages */ ··· 8277 return 0; 8278 8279 out_free_hba_hdwq_info: 8280 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8281 + free_percpu(phba->sli4_hba.c_stat); 8282 out_free_hba_idle_stat: 8283 #endif 8284 + kfree(phba->sli4_hba.idle_stat); 8285 out_free_hba_eq_info: 8286 free_percpu(phba->sli4_hba.eq_info); 8287 out_free_hba_cpu_map: ··· 13411 13412 /* last used Index initialized to 0xff already */ 13413 13414 + cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13415 + cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13416 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13417 cp->cgn_info_crc = cpu_to_le32(crc); 13418
-2
drivers/scsi/lpfc/lpfc_nvme.c
··· 1489 struct lpfc_nvme_qhandle *lpfc_queue_info; 1490 struct lpfc_nvme_fcpreq_priv *freqpriv; 1491 struct nvme_common_command *sqe; 1492 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1493 uint64_t start = 0; 1494 - #endif 1495 1496 /* Validate pointers. LLDD fault handling with transport does 1497 * have timing races.
··· 1489 struct lpfc_nvme_qhandle *lpfc_queue_info; 1490 struct lpfc_nvme_fcpreq_priv *freqpriv; 1491 struct nvme_common_command *sqe; 1492 uint64_t start = 0; 1493 1494 /* Validate pointers. LLDD fault handling with transport does 1495 * have timing races.
+2 -7
drivers/scsi/lpfc/lpfc_scsi.c
··· 1495 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1496 uint8_t *txop, uint8_t *rxop) 1497 { 1498 - uint8_t ret = 0; 1499 1500 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1501 switch (scsi_get_prot_op(sc)) { ··· 1547 } 1548 } 1549 1550 - return ret; 1551 } 1552 #endif 1553 ··· 5577 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5578 int err, idx; 5579 u8 *uuid = NULL; 5580 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5581 - uint64_t start = 0L; 5582 5583 - if (phba->ktime_on) 5584 - start = ktime_get_ns(); 5585 - #endif 5586 start = ktime_get_ns(); 5587 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5588
··· 1495 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1496 uint8_t *txop, uint8_t *rxop) 1497 { 1498 1499 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1500 switch (scsi_get_prot_op(sc)) { ··· 1548 } 1549 } 1550 1551 + return 0; 1552 } 1553 #endif 1554 ··· 5578 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5579 int err, idx; 5580 u8 *uuid = NULL; 5581 + uint64_t start; 5582 5583 start = ktime_get_ns(); 5584 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5585
+3 -2
drivers/scsi/lpfc/lpfc_sli.c
··· 22090 uint32_t shdr_status, shdr_add_status; 22091 union lpfc_sli4_cfg_shdr *shdr; 22092 struct lpfc_dmabuf *pcmd; 22093 22094 /* sanity check on queue memory */ 22095 if (!datap) ··· 22114 22115 memset((void *)read_object->u.request.rd_object_name, 0, 22116 LPFC_OBJ_NAME_SZ); 22117 - sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject); 22118 for (j = 0; j < strlen(rdobject); j++) 22119 read_object->u.request.rd_object_name[j] = 22120 - cpu_to_le32(read_object->u.request.rd_object_name[j]); 22121 22122 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 22123 if (pcmd)
··· 22090 uint32_t shdr_status, shdr_add_status; 22091 union lpfc_sli4_cfg_shdr *shdr; 22092 struct lpfc_dmabuf *pcmd; 22093 + u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; 22094 22095 /* sanity check on queue memory */ 22096 if (!datap) ··· 22113 22114 memset((void *)read_object->u.request.rd_object_name, 0, 22115 LPFC_OBJ_NAME_SZ); 22116 + scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject); 22117 for (j = 0; j < strlen(rdobject); j++) 22118 read_object->u.request.rd_object_name[j] = 22119 + cpu_to_le32(rd_object_name[j]); 22120 22121 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 22122 if (pcmd)
+3 -4
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1916 raid = MR_LdRaidGet(ld, local_map_ptr); 1917 1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1919 - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1920 1921 mr_device_priv_data->is_tm_capable = 1922 raid->capability.tmCapable; ··· 8033 8034 if (instance->adapter_type != MFI_SERIES) { 8035 megasas_release_fusion(instance); 8036 - pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8037 (sizeof(struct MR_PD_CFG_SEQ) * 8038 (MAX_PHYSICAL_DEVICES - 1)); 8039 for (i = 0; i < 2 ; i++) { ··· 8773 8774 if (event_type & SCAN_VD_CHANNEL) { 8775 if (!instance->requestorId || 8776 - (instance->requestorId && 8777 - megasas_get_ld_vf_affiliation(instance, 0))) { 8778 dcmd_ret = megasas_ld_list_query(instance, 8779 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8780 if (dcmd_ret != DCMD_SUCCESS)
··· 1916 raid = MR_LdRaidGet(ld, local_map_ptr); 1917 1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1919 + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1920 1921 mr_device_priv_data->is_tm_capable = 1922 raid->capability.tmCapable; ··· 8033 8034 if (instance->adapter_type != MFI_SERIES) { 8035 megasas_release_fusion(instance); 8036 + pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8037 (sizeof(struct MR_PD_CFG_SEQ) * 8038 (MAX_PHYSICAL_DEVICES - 1)); 8039 for (i = 0; i < 2 ; i++) { ··· 8773 8774 if (event_type & SCAN_VD_CHANNEL) { 8775 if (!instance->requestorId || 8776 + megasas_get_ld_vf_affiliation(instance, 0)) { 8777 dcmd_ret = megasas_ld_list_query(instance, 8778 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8779 if (dcmd_ret != DCMD_SUCCESS)
+3 -1
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 1582 * wait for current poll to complete. 1583 */ 1584 for (qid = 0; qid < iopoll_q_count; qid++) { 1585 - while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) 1586 udelay(500); 1587 } 1588 } 1589
··· 1582 * wait for current poll to complete. 1583 */ 1584 for (qid = 0; qid < iopoll_q_count; qid++) { 1585 + while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) { 1586 + cpu_relax(); 1587 udelay(500); 1588 + } 1589 } 1590 } 1591
+1 -1
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 2178 mpt3sas_check_cmd_timeout(ioc, 2179 ioc->ctl_cmds.status, mpi_request, 2180 sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); 2181 - *issue_reset = reset_needed; 2182 rc = -EFAULT; 2183 goto out; 2184 }
··· 2178 mpt3sas_check_cmd_timeout(ioc, 2179 ioc->ctl_cmds.status, mpi_request, 2180 sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); 2181 + *issue_reset = reset_needed; 2182 rc = -EFAULT; 2183 goto out; 2184 }
+1 -2
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 10749 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10750 _scsih_pcie_topology_change_event(ioc, fw_event); 10751 ioc->current_event = NULL; 10752 - return; 10753 - break; 10754 } 10755 out: 10756 fw_event_work_put(fw_event);
··· 10749 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10750 _scsih_pcie_topology_change_event(ioc, fw_event); 10751 ioc->current_event = NULL; 10752 + return; 10753 } 10754 out: 10755 fw_event_work_put(fw_event);
-23
drivers/scsi/ncr53c8xx.c
··· 1939 static void ncr_put_start_queue(struct ncb *np, struct ccb *cp); 1940 1941 static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd); 1942 - static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd); 1943 static void process_waiting_list(struct ncb *np, int sts); 1944 1945 - #define remove_from_waiting_list(np, cmd) \ 1946 - retrieve_from_waiting_list(1, (np), (cmd)) 1947 #define requeue_waiting_list(np) process_waiting_list((np), DID_OK) 1948 #define reset_waiting_list(np) process_waiting_list((np), DID_RESET) 1949 ··· 7992 wcmd = (struct scsi_cmnd *) wcmd->next_wcmd; 7993 wcmd->next_wcmd = (char *) cmd; 7994 } 7995 - } 7996 - 7997 - static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd) 7998 - { 7999 - struct scsi_cmnd **pcmd = &np->waiting_list; 8000 - 8001 - while (*pcmd) { 8002 - if (cmd == *pcmd) { 8003 - if (to_remove) { 8004 - *pcmd = (struct scsi_cmnd *) cmd->next_wcmd; 8005 - cmd->next_wcmd = NULL; 8006 - } 8007 - #ifdef DEBUG_WAITING_LIST 8008 - printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd); 8009 - #endif 8010 - return cmd; 8011 - } 8012 - pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd; 8013 - } 8014 - return NULL; 8015 } 8016 8017 static void process_waiting_list(struct ncb *np, int sts)
··· 1939 static void ncr_put_start_queue(struct ncb *np, struct ccb *cp); 1940 1941 static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd); 1942 static void process_waiting_list(struct ncb *np, int sts); 1943 1944 #define requeue_waiting_list(np) process_waiting_list((np), DID_OK) 1945 #define reset_waiting_list(np) process_waiting_list((np), DID_RESET) 1946 ··· 7995 wcmd = (struct scsi_cmnd *) wcmd->next_wcmd; 7996 wcmd->next_wcmd = (char *) cmd; 7997 } 7998 } 7999 8000 static void process_waiting_list(struct ncb *np, int sts)
+2 -1
drivers/scsi/qla2xxx/qla_init.c
··· 7169 return 0; 7170 break; 7171 case QLA2XXX_INI_MODE_DUAL: 7172 - if (!qla_dual_mode_enabled(vha)) 7173 return 0; 7174 break; 7175 case QLA2XXX_INI_MODE_ENABLED:
··· 7169 return 0; 7170 break; 7171 case QLA2XXX_INI_MODE_DUAL: 7172 + if (!qla_dual_mode_enabled(vha) && 7173 + !qla_ini_mode_enabled(vha)) 7174 return 0; 7175 break; 7176 case QLA2XXX_INI_MODE_ENABLED:
+4 -4
drivers/scsi/scsi_transport_iscsi.c
··· 441 struct iscsi_transport *t = iface->transport; 442 int param = -1; 443 444 - if (attr == &dev_attr_iface_enabled.attr) 445 - param = ISCSI_NET_PARAM_IFACE_ENABLE; 446 - else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) 447 param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; 448 else if (attr == &dev_attr_iface_header_digest.attr) 449 param = ISCSI_IFACE_PARAM_HDRDGST_EN; ··· 481 if (param != -1) 482 return t->attr_is_visible(ISCSI_IFACE_PARAM, param); 483 484 - if (attr == &dev_attr_iface_vlan_id.attr) 485 param = ISCSI_NET_PARAM_VLAN_ID; 486 else if (attr == &dev_attr_iface_vlan_priority.attr) 487 param = ISCSI_NET_PARAM_VLAN_PRIORITY;
··· 441 struct iscsi_transport *t = iface->transport; 442 int param = -1; 443 444 + if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) 445 param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; 446 else if (attr == &dev_attr_iface_header_digest.attr) 447 param = ISCSI_IFACE_PARAM_HDRDGST_EN; ··· 483 if (param != -1) 484 return t->attr_is_visible(ISCSI_IFACE_PARAM, param); 485 486 + if (attr == &dev_attr_iface_enabled.attr) 487 + param = ISCSI_NET_PARAM_IFACE_ENABLE; 488 + else if (attr == &dev_attr_iface_vlan_id.attr) 489 param = ISCSI_NET_PARAM_VLAN_ID; 490 else if (attr == &dev_attr_iface_vlan_priority.attr) 491 param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+9 -5
drivers/scsi/sd.c
··· 2124 retries = 0; 2125 2126 do { 2127 cmd[0] = TEST_UNIT_READY; 2128 memset((void *) &cmd[1], 0, 9); 2129 ··· 2140 * with any more polling. 2141 */ 2142 if (media_not_present(sdkp, &sshdr)) { 2143 - sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2144 return; 2145 } 2146 ··· 3404 } 3405 3406 device_initialize(&sdkp->dev); 3407 - sdkp->dev.parent = dev; 3408 sdkp->dev.class = &sd_disk_class; 3409 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 3410 3411 error = device_add(&sdkp->dev); 3412 - if (error) 3413 - goto out_free_index; 3414 3415 - get_device(dev); 3416 dev_set_drvdata(dev, sdkp); 3417 3418 gd->major = sd_major((index & 0xf0) >> 4);
··· 2124 retries = 0; 2125 2126 do { 2127 + bool media_was_present = sdkp->media_present; 2128 + 2129 cmd[0] = TEST_UNIT_READY; 2130 memset((void *) &cmd[1], 0, 9); 2131 ··· 2138 * with any more polling. 2139 */ 2140 if (media_not_present(sdkp, &sshdr)) { 2141 + if (media_was_present) 2142 + sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2143 return; 2144 } 2145 ··· 3401 } 3402 3403 device_initialize(&sdkp->dev); 3404 + sdkp->dev.parent = get_device(dev); 3405 sdkp->dev.class = &sd_disk_class; 3406 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 3407 3408 error = device_add(&sdkp->dev); 3409 + if (error) { 3410 + put_device(&sdkp->dev); 3411 + goto out; 3412 + } 3413 3414 dev_set_drvdata(dev, sdkp); 3415 3416 gd->major = sd_major((index & 0xf0) >> 4);
+4 -4
drivers/scsi/sd_zbc.c
··· 154 155 /* 156 * Report zone buffer size should be at most 64B times the number of 157 - * zones requested plus the 64B reply header, but should be at least 158 - * SECTOR_SIZE for ATA devices. 159 * Make sure that this size does not exceed the hardware capabilities. 160 * Furthermore, since the report zone command cannot be split, make 161 * sure that the allocated buffer can always be mapped by limiting the ··· 174 *buflen = bufsize; 175 return buf; 176 } 177 - bufsize >>= 1; 178 } 179 180 return NULL; ··· 280 { 281 struct scsi_disk *sdkp; 282 unsigned long flags; 283 - unsigned int zno; 284 int ret; 285 286 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
··· 154 155 /* 156 * Report zone buffer size should be at most 64B times the number of 157 + * zones requested plus the 64B reply header, but should be aligned 158 + * to SECTOR_SIZE for ATA devices. 159 * Make sure that this size does not exceed the hardware capabilities. 160 * Furthermore, since the report zone command cannot be split, make 161 * sure that the allocated buffer can always be mapped by limiting the ··· 174 *buflen = bufsize; 175 return buf; 176 } 177 + bufsize = rounddown(bufsize >> 1, SECTOR_SIZE); 178 } 179 180 return NULL; ··· 280 { 281 struct scsi_disk *sdkp; 282 unsigned long flags; 283 + sector_t zno; 284 int ret; 285 286 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
+18 -4
drivers/scsi/ses.c
··· 87 0 88 }; 89 unsigned char recv_page_code; 90 91 - ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 92 - NULL, SES_TIMEOUT, SES_RETRIES, NULL); 93 if (unlikely(ret)) 94 return ret; 95 ··· 128 bufflen & 0xff, 129 0 130 }; 131 132 - result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 133 - NULL, SES_TIMEOUT, SES_RETRIES, NULL); 134 if (result) 135 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", 136 result);
··· 87 0 88 }; 89 unsigned char recv_page_code; 90 + unsigned int retries = SES_RETRIES; 91 + struct scsi_sense_hdr sshdr; 92 93 + do { 94 + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 95 + &sshdr, SES_TIMEOUT, 1, NULL); 96 + } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) && 97 + (sshdr.sense_key == NOT_READY || 98 + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); 99 + 100 if (unlikely(ret)) 101 return ret; 102 ··· 121 bufflen & 0xff, 122 0 123 }; 124 + struct scsi_sense_hdr sshdr; 125 + unsigned int retries = SES_RETRIES; 126 127 + do { 128 + result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 129 + &sshdr, SES_TIMEOUT, 1, NULL); 130 + } while (result > 0 && --retries && scsi_sense_valid(&sshdr) && 131 + (sshdr.sense_key == NOT_READY || 132 + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); 133 + 134 if (result) 135 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", 136 result);
+1 -1
drivers/scsi/sr_ioctl.c
··· 523 return rc; 524 cd->readcd_known = 0; 525 sr_printk(KERN_INFO, cd, 526 - "CDROM does'nt support READ CD (0xbe) command\n"); 527 /* fall & retry the other way */ 528 } 529 /* ... if this fails, we switch the blocksize using MODE SELECT */
··· 523 return rc; 524 cd->readcd_known = 0; 525 sr_printk(KERN_INFO, cd, 526 + "CDROM doesn't support READ CD (0xbe) command\n"); 527 /* fall & retry the other way */ 528 } 529 /* ... if this fails, we switch the blocksize using MODE SELECT */
+1
drivers/scsi/st.c
··· 3823 case CDROM_SEND_PACKET: 3824 if (!capable(CAP_SYS_RAWIO)) 3825 return -EPERM; 3826 default: 3827 break; 3828 }
··· 3823 case CDROM_SEND_PACKET: 3824 if (!capable(CAP_SYS_RAWIO)) 3825 return -EPERM; 3826 + break; 3827 default: 3828 break; 3829 }
+78
drivers/scsi/ufs/ufshcd-pci.c
··· 128 return err; 129 } 130 131 #define INTEL_ACTIVELTR 0x804 132 #define INTEL_IDLELTR 0x808 133 ··· 426 struct ufs_host *ufs_host; 427 int err; 428 429 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; 430 hba->caps |= UFSHCD_CAP_CRYPTO; 431 err = ufs_intel_common_init(hba); ··· 457 .exit = ufs_intel_common_exit, 458 .hce_enable_notify = ufs_intel_hce_enable_notify, 459 .link_startup_notify = ufs_intel_link_startup_notify, 460 .resume = ufs_intel_resume, 461 .device_reset = ufs_intel_device_reset, 462 };
··· 128 return err; 129 } 130 131 + static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) 132 + { 133 + struct ufs_pa_layer_attr pwr_info = hba->pwr_info; 134 + int ret; 135 + 136 + pwr_info.lane_rx = lanes; 137 + pwr_info.lane_tx = lanes; 138 + ret = ufshcd_config_pwr_mode(hba, &pwr_info); 139 + if (ret) 140 + dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", 141 + __func__, lanes, ret); 142 + return ret; 143 + } 144 + 145 + static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, 146 + enum ufs_notify_change_status status, 147 + struct ufs_pa_layer_attr *dev_max_params, 148 + struct ufs_pa_layer_attr *dev_req_params) 149 + { 150 + int err = 0; 151 + 152 + switch (status) { 153 + case PRE_CHANGE: 154 + if (ufshcd_is_hs_mode(dev_max_params) && 155 + (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) 156 + ufs_intel_set_lanes(hba, 2); 157 + memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params)); 158 + break; 159 + case POST_CHANGE: 160 + if (ufshcd_is_hs_mode(dev_req_params)) { 161 + u32 peer_granularity; 162 + 163 + usleep_range(1000, 1250); 164 + err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 165 + &peer_granularity); 166 + } 167 + break; 168 + default: 169 + break; 170 + } 171 + 172 + return err; 173 + } 174 + 175 + static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) 176 + { 177 + u32 granularity, peer_granularity; 178 + u32 pa_tactivate, peer_pa_tactivate; 179 + int ret; 180 + 181 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity); 182 + if (ret) 183 + goto out; 184 + 185 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity); 186 + if (ret) 187 + goto out; 188 + 189 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); 190 + if (ret) 191 + goto out; 192 + 193 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate); 194 + if (ret) 195 + goto out; 196 + 197 + if (granularity == peer_granularity) { 198 + u32 new_peer_pa_tactivate = pa_tactivate + 2; 199 + 200 + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate); 201 + } 202 + out: 203 + return ret; 204 + } 205 + 206 #define INTEL_ACTIVELTR 0x804 207 #define INTEL_IDLELTR 0x808 208 ··· 351 struct ufs_host *ufs_host; 352 int err; 353 354 + hba->nop_out_timeout = 200; 355 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; 356 hba->caps |= UFSHCD_CAP_CRYPTO; 357 err = ufs_intel_common_init(hba); ··· 381 .exit = ufs_intel_common_exit, 382 .hce_enable_notify = ufs_intel_hce_enable_notify, 383 .link_startup_notify = ufs_intel_link_startup_notify, 384 + .pwr_change_notify = ufs_intel_lkf_pwr_change_notify, 385 + .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks, 386 .resume = ufs_intel_resume, 387 .device_reset = ufs_intel_device_reset, 388 };
+57 -59
drivers/scsi/ufs/ufshcd.c
··· 17 #include <linux/blk-pm.h> 18 #include <linux/blkdev.h> 19 #include <scsi/scsi_driver.h> 20 - #include <scsi/scsi_transport.h> 21 - #include "../scsi_transport_api.h" 22 #include "ufshcd.h" 23 #include "ufs_quirks.h" 24 #include "unipro.h" ··· 235 static irqreturn_t ufshcd_intr(int irq, void *__hba); 236 static int ufshcd_change_power_mode(struct ufs_hba *hba, 237 struct ufs_pa_layer_attr *pwr_mode); 238 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 239 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 240 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, ··· 2758 out: 2759 up_read(&hba->clk_scaling_lock); 2760 2761 - if (ufs_trigger_eh()) 2762 - scsi_schedule_eh(hba->host); 2763 2764 return err; 2765 } ··· 3923 } 3924 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 3925 3926 - static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 3927 - { 3928 - lockdep_assert_held(hba->host->host_lock); 3929 - 3930 - return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 3931 - (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 3932 - } 3933 - 3934 - static void ufshcd_schedule_eh(struct ufs_hba *hba) 3935 - { 3936 - bool schedule_eh = false; 3937 - unsigned long flags; 3938 - 3939 - spin_lock_irqsave(hba->host->host_lock, flags); 3940 - /* handle fatal errors only when link is not in error state */ 3941 - if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 3942 - if (hba->force_reset || ufshcd_is_link_broken(hba) || 3943 - ufshcd_is_saved_err_fatal(hba)) 3944 - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 3945 - else 3946 - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 3947 - schedule_eh = true; 3948 - } 3949 - spin_unlock_irqrestore(hba->host->host_lock, flags); 3950 - 3951 - if (schedule_eh) 3952 - scsi_schedule_eh(hba->host); 3953 - } 3954 - 3955 /** 3956 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 3957 * state) and waits for it to take effect. ··· 3943 { 3944 DECLARE_COMPLETION_ONSTACK(uic_async_done); 3945 unsigned long flags; 3946 - bool schedule_eh = false; 3947 u8 status; 3948 int ret; 3949 bool reenable_intr = false; ··· 4012 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 4013 if (ret) { 4014 ufshcd_set_link_broken(hba); 4015 - schedule_eh = true; 4016 } 4017 - 4018 out_unlock: 4019 spin_unlock_irqrestore(hba->host->host_lock, flags); 4020 - 4021 - if (schedule_eh) 4022 - ufshcd_schedule_eh(hba); 4023 mutex_unlock(&hba->uic_cmd_mutex); 4024 4025 return ret; ··· 4746 mutex_lock(&hba->dev_cmd.lock); 4747 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 4748 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 4749 - NOP_OUT_TIMEOUT); 4750 4751 if (!err || err == -ETIMEDOUT) 4752 break; ··· 5881 return err_handling; 5882 } 5883 5884 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) 5885 { 5886 down_write(&hba->clk_scaling_lock); ··· 6035 6036 /** 6037 * ufshcd_err_handler - handle UFS errors that require s/w attention 6038 - * @host: SCSI host pointer 6039 */ 6040 - static void ufshcd_err_handler(struct Scsi_Host *host) 6041 { 6042 - struct ufs_hba *hba = shost_priv(host); 6043 unsigned long flags; 6044 bool err_xfer = false; 6045 bool err_tm = false; ··· 6047 int tag; 6048 bool needs_reset = false, needs_restore = false; 6049 6050 down(&hba->host_sem); 6051 spin_lock_irqsave(hba->host->host_lock, flags); 6052 - hba->host->host_eh_scheduled = 0; 6053 if (ufshcd_err_handling_should_stop(hba)) { 6054 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6055 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; ··· 6363 "host_regs: "); 6364 ufshcd_print_pwr_info(hba); 6365 } 6366 retval |= IRQ_HANDLED; 6367 } 6368 /* ··· 6375 hba->errors = 0; 6376 hba->uic_error = 0; 6377 spin_unlock(hba->host->host_lock); 6378 - 6379 - if (queue_eh_work) 6380 - ufshcd_schedule_eh(hba); 6381 - 6382 return retval; 6383 } 6384 ··· 6865 err = ufshcd_clear_cmd(hba, pos); 6866 if (err) 6867 break; 6868 - __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true); 6869 } 6870 } 6871 ··· 7037 * will be to send LU reset which, again, is a spec violation. 7038 * To avoid these unnecessary/illegal steps, first we clean up 7039 * the lrb taken by this cmd and re-set it in outstanding_reqs, 7040 - * then queue the error handler and bail. 7041 */ 7042 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { 7043 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); 7044 7045 spin_lock_irqsave(host->host_lock, flags); 7046 hba->force_reset = true; 7047 spin_unlock_irqrestore(host->host_lock, flags); 7048 - 7049 - ufshcd_schedule_eh(hba); 7050 - 7051 goto release; 7052 } 7053 ··· 7178 7179 spin_lock_irqsave(hba->host->host_lock, flags); 7180 hba->force_reset = true; 7181 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); 7182 spin_unlock_irqrestore(hba->host->host_lock, flags); 7183 7184 - ufshcd_err_handler(hba->host); 7185 7186 spin_lock_irqsave(hba->host->host_lock, flags); 7187 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) ··· 8592 if (hba->is_powered) { 8593 ufshcd_exit_clk_scaling(hba); 8594 ufshcd_exit_clk_gating(hba); 8595 ufs_debugfs_hba_exit(hba); 8596 ufshcd_variant_hba_exit(hba); 8597 ufshcd_setup_vreg(hba, false); ··· 9438 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); 9439 } 9440 9441 - static struct scsi_transport_template ufshcd_transport_template = { 9442 - .eh_strategy_handler = ufshcd_err_handler, 9443 - }; 9444 - 9445 /** 9446 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) 9447 * @dev: pointer to device handle ··· 9464 err = -ENOMEM; 9465 goto out_error; 9466 } 9467 - host->transportt = &ufshcd_transport_template; 9468 hba = shost_priv(host); 9469 hba->host = host; 9470 hba->dev = dev; 9471 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; 9472 INIT_LIST_HEAD(&hba->clk_list_head); 9473 spin_lock_init(&hba->outstanding_lock); 9474 ··· 9503 int err; 9504 struct Scsi_Host *host = hba->host; 9505 struct device *dev = hba->dev; 9506 9507 if (!mmio_base) { 9508 dev_err(hba->dev, ··· 9557 9558 hba->max_pwr_info.is_valid = false; 9559 9560 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 9561 9562 sema_init(&hba->host_sem, 1);
··· 17 #include <linux/blk-pm.h> 18 #include <linux/blkdev.h> 19 #include <scsi/scsi_driver.h> 20 #include "ufshcd.h" 21 #include "ufs_quirks.h" 22 #include "unipro.h" ··· 237 static irqreturn_t ufshcd_intr(int irq, void *__hba); 238 static int ufshcd_change_power_mode(struct ufs_hba *hba, 239 struct ufs_pa_layer_attr *pwr_mode); 240 + static void ufshcd_schedule_eh_work(struct ufs_hba *hba); 241 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 242 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 243 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, ··· 2759 out: 2760 up_read(&hba->clk_scaling_lock); 2761 2762 + if (ufs_trigger_eh()) { 2763 + unsigned long flags; 2764 + 2765 + spin_lock_irqsave(hba->host->host_lock, flags); 2766 + ufshcd_schedule_eh_work(hba); 2767 + spin_unlock_irqrestore(hba->host->host_lock, flags); 2768 + } 2769 2770 return err; 2771 } ··· 3919 } 3920 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 3921 3922 /** 3923 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 3924 * state) and waits for it to take effect. ··· 3968 { 3969 DECLARE_COMPLETION_ONSTACK(uic_async_done); 3970 unsigned long flags; 3971 u8 status; 3972 int ret; 3973 bool reenable_intr = false; ··· 4038 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 4039 if (ret) { 4040 ufshcd_set_link_broken(hba); 4041 + ufshcd_schedule_eh_work(hba); 4042 } 4043 out_unlock: 4044 spin_unlock_irqrestore(hba->host->host_lock, flags); 4045 mutex_unlock(&hba->uic_cmd_mutex); 4046 4047 return ret; ··· 4776 mutex_lock(&hba->dev_cmd.lock); 4777 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 4778 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 4779 + hba->nop_out_timeout); 4780 4781 if (!err || err == -ETIMEDOUT) 4782 break; ··· 5911 return err_handling; 5912 } 5913 5914 + /* host lock must be held before calling this func */ 5915 + static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 5916 + { 5917 + return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 5918 + (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 5919 + } 5920 + 5921 + /* host lock must be held before calling this func */ 5922 + static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) 5923 + { 5924 + /* handle fatal errors only when link is not in error state */ 5925 + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 5926 + if (hba->force_reset || ufshcd_is_link_broken(hba) || 5927 + ufshcd_is_saved_err_fatal(hba)) 5928 + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 5929 + else 5930 + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 5931 + queue_work(hba->eh_wq, &hba->eh_work); 5932 + } 5933 + } 5934 + 5935 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) 5936 { 5937 down_write(&hba->clk_scaling_lock); ··· 6044 6045 /** 6046 * ufshcd_err_handler - handle UFS errors that require s/w attention 6047 + * @work: pointer to work structure 6048 */ 6049 + static void ufshcd_err_handler(struct work_struct *work) 6050 { 6051 + struct ufs_hba *hba; 6052 unsigned long flags; 6053 bool err_xfer = false; 6054 bool err_tm = false; ··· 6056 int tag; 6057 bool needs_reset = false, needs_restore = false; 6058 6059 + hba = container_of(work, struct ufs_hba, eh_work); 6060 + 6061 down(&hba->host_sem); 6062 spin_lock_irqsave(hba->host->host_lock, flags); 6063 if (ufshcd_err_handling_should_stop(hba)) { 6064 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6065 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; ··· 6371 "host_regs: "); 6372 ufshcd_print_pwr_info(hba); 6373 } 6374 + ufshcd_schedule_eh_work(hba); 6375 retval |= IRQ_HANDLED; 6376 } 6377 /* ··· 6382 hba->errors = 0; 6383 hba->uic_error = 0; 6384 spin_unlock(hba->host->host_lock); 6385 return retval; 6386 } 6387 ··· 6876 err = ufshcd_clear_cmd(hba, pos); 6877 if (err) 6878 break; 6879 + __ufshcd_transfer_req_compl(hba, 1U << pos, false); 6880 } 6881 } 6882 ··· 7048 * will be to send LU reset which, again, is a spec violation. 7049 * To avoid these unnecessary/illegal steps, first we clean up 7050 * the lrb taken by this cmd and re-set it in outstanding_reqs, 7051 + * then queue the eh_work and bail. 7052 */ 7053 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { 7054 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); 7055 7056 spin_lock_irqsave(host->host_lock, flags); 7057 hba->force_reset = true; 7058 + ufshcd_schedule_eh_work(hba); 7059 spin_unlock_irqrestore(host->host_lock, flags); 7060 goto release; 7061 } 7062 ··· 7191 7192 spin_lock_irqsave(hba->host->host_lock, flags); 7193 hba->force_reset = true; 7194 + ufshcd_schedule_eh_work(hba); 7195 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); 7196 spin_unlock_irqrestore(hba->host->host_lock, flags); 7197 7198 + flush_work(&hba->eh_work); 7199 7200 spin_lock_irqsave(hba->host->host_lock, flags); 7201 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) ··· 8604 if (hba->is_powered) { 8605 ufshcd_exit_clk_scaling(hba); 8606 ufshcd_exit_clk_gating(hba); 8607 + if (hba->eh_wq) 8608 + destroy_workqueue(hba->eh_wq); 8609 ufs_debugfs_hba_exit(hba); 8610 ufshcd_variant_hba_exit(hba); 8611 ufshcd_setup_vreg(hba, false); ··· 9448 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); 9449 } 9450 9451 /** 9452 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) 9453 * @dev: pointer to device handle ··· 9478 err = -ENOMEM; 9479 goto out_error; 9480 } 9481 hba = shost_priv(host); 9482 hba->host = host; 9483 hba->dev = dev; 9484 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; 9485 + hba->nop_out_timeout = NOP_OUT_TIMEOUT; 9486 INIT_LIST_HEAD(&hba->clk_list_head); 9487 spin_lock_init(&hba->outstanding_lock); 9488 ··· 9517 int err; 9518 struct Scsi_Host *host = hba->host; 9519 struct device *dev = hba->dev; 9520 + char eh_wq_name[sizeof("ufs_eh_wq_00")]; 9521 9522 if (!mmio_base) { 9523 dev_err(hba->dev, ··· 9570 9571 hba->max_pwr_info.is_valid = false; 9572 9573 + /* Initialize work queues */ 9574 + snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", 9575 + hba->host->host_no); 9576 + hba->eh_wq = create_singlethread_workqueue(eh_wq_name); 9577 + if (!hba->eh_wq) { 9578 + dev_err(hba->dev, "%s: failed to create eh workqueue\n", 9579 + __func__); 9580 + err = -ENOMEM; 9581 + goto out_disable; 9582 + } 9583 + INIT_WORK(&hba->eh_work, ufshcd_err_handler); 9584 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 9585 9586 sema_init(&hba->host_sem, 1);
+5
drivers/scsi/ufs/ufshcd.h
··· 741 * @is_powered: flag to check if HBA is powered 742 * @shutting_down: flag to check if shutdown has been invoked 743 * @host_sem: semaphore used to serialize concurrent contexts 744 * @eeh_work: Worker to handle exception events 745 * @errors: HBA errors 746 * @uic_error: UFS interconnect layer error status ··· 845 struct semaphore host_sem; 846 847 /* Work Queues */ 848 struct work_struct eeh_work; 849 850 /* HBA Errors */ ··· 862 /* Device management request data */ 863 struct ufs_dev_cmd dev_cmd; 864 ktime_t last_dme_cmd_tstamp; 865 866 /* Keeps information of the UFS device connected to this host */ 867 struct ufs_dev_info dev_info;
··· 741 * @is_powered: flag to check if HBA is powered 742 * @shutting_down: flag to check if shutdown has been invoked 743 * @host_sem: semaphore used to serialize concurrent contexts 744 + * @eh_wq: Workqueue that eh_work works on 745 + * @eh_work: Worker to handle UFS errors that require s/w attention 746 * @eeh_work: Worker to handle exception events 747 * @errors: HBA errors 748 * @uic_error: UFS interconnect layer error status ··· 843 struct semaphore host_sem; 844 845 /* Work Queues */ 846 + struct workqueue_struct *eh_wq; 847 + struct work_struct eh_work; 848 struct work_struct eeh_work; 849 850 /* HBA Errors */ ··· 858 /* Device management request data */ 859 struct ufs_dev_cmd dev_cmd; 860 ktime_t last_dme_cmd_tstamp; 861 + int nop_out_timeout; 862 863 /* Keeps information of the UFS device connected to this host */ 864 struct ufs_dev_info dev_info;
+3 -5
drivers/scsi/ufs/ufshpb.c
··· 333 } 334 335 static void 336 - ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb, 337 - struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn, 338 - u8 transfer_len, int read_id) 339 { 340 unsigned char *cdb = lrbp->cmd->cmnd; 341 __be64 ppn_tmp = ppn; ··· 702 } 703 } 704 705 - ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len, 706 - read_id); 707 708 hpb->stats.hit_cnt++; 709 return 0;
··· 333 } 334 335 static void 336 + ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 337 + __be64 ppn, u8 transfer_len, int read_id) 338 { 339 unsigned char *cdb = lrbp->cmd->cmnd; 340 __be64 ppn_tmp = ppn; ··· 703 } 704 } 705 706 + ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id); 707 708 hpb->stats.hit_cnt++; 709 return 0;
+20 -12
drivers/target/target_core_configfs.c
··· 1110 { 1111 struct se_dev_attrib *da = to_attrib(item); 1112 struct se_device *dev = da->da_dev; 1113 - bool flag; 1114 int ret; 1115 1116 if (!(dev->transport->transport_flags_changeable & 1117 TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { 1118 pr_err("dev[%p]: Unable to change SE Device alua_support:" 1119 " alua_support has fixed value\n", dev); 1120 - return -EINVAL; 1121 } 1122 - 1123 - ret = strtobool(page, &flag); 1124 - if (ret < 0) 1125 - return ret; 1126 1127 if (flag) 1128 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; ··· 1149 { 1150 struct se_dev_attrib *da = to_attrib(item); 1151 struct se_device *dev = da->da_dev; 1152 - bool flag; 1153 int ret; 1154 1155 if (!(dev->transport->transport_flags_changeable & 1156 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1157 pr_err("dev[%p]: Unable to change SE Device pgr_support:" 1158 " pgr_support has fixed value\n", dev); 1159 - return -EINVAL; 1160 } 1161 - 1162 - ret = strtobool(page, &flag); 1163 - if (ret < 0) 1164 - return ret; 1165 1166 if (flag) 1167 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
··· 1110 { 1111 struct se_dev_attrib *da = to_attrib(item); 1112 struct se_device *dev = da->da_dev; 1113 + bool flag, oldflag; 1114 int ret; 1115 + 1116 + ret = strtobool(page, &flag); 1117 + if (ret < 0) 1118 + return ret; 1119 + 1120 + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA); 1121 + if (flag == oldflag) 1122 + return count; 1123 1124 if (!(dev->transport->transport_flags_changeable & 1125 TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { 1126 pr_err("dev[%p]: Unable to change SE Device alua_support:" 1127 " alua_support has fixed value\n", dev); 1128 + return -ENOSYS; 1129 } 1130 1131 if (flag) 1132 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; ··· 1145 { 1146 struct se_dev_attrib *da = to_attrib(item); 1147 struct se_device *dev = da->da_dev; 1148 + bool flag, oldflag; 1149 int ret; 1150 + 1151 + ret = strtobool(page, &flag); 1152 + if (ret < 0) 1153 + return ret; 1154 + 1155 + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR); 1156 + if (flag == oldflag) 1157 + return count; 1158 1159 if (!(dev->transport->transport_flags_changeable & 1160 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1161 pr_err("dev[%p]: Unable to change SE Device pgr_support:" 1162 " pgr_support has fixed value\n", dev); 1163 + return -ENOSYS; 1164 } 1165 1166 if (flag) 1167 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+1 -1
drivers/target/target_core_pr.c
··· 269 spin_lock(&dev->dev_reservation_lock); 270 if (dev->reservation_holder && 271 dev->reservation_holder->se_node_acl != sess->se_node_acl) { 272 - pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 273 tpg->se_tpg_tfo->fabric_name); 274 pr_err("Original reserver LUN: %llu %s\n", 275 cmd->se_lun->unpacked_lun,
··· 269 spin_lock(&dev->dev_reservation_lock); 270 if (dev->reservation_holder && 271 dev->reservation_holder->se_node_acl != sess->se_node_acl) { 272 + pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n", 273 tpg->se_tpg_tfo->fabric_name); 274 pr_err("Original reserver LUN: %llu %s\n", 275 cmd->se_lun->unpacked_lun,
-1
include/scsi/scsi_device.h
··· 146 struct scsi_vpd __rcu *vpd_pg83; 147 struct scsi_vpd __rcu *vpd_pg80; 148 struct scsi_vpd __rcu *vpd_pg89; 149 - unsigned char current_tag; /* current tag */ 150 struct scsi_target *sdev_target; 151 152 blist_flags_t sdev_bflags; /* black/white flags as also found in
··· 146 struct scsi_vpd __rcu *vpd_pg83; 147 struct scsi_vpd __rcu *vpd_pg80; 148 struct scsi_vpd __rcu *vpd_pg89; 149 struct scsi_target *sdev_target; 150 151 blist_flags_t sdev_bflags; /* black/white flags as also found in