Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"Thirty-three fixes, I'm afraid.

Essentially the build up from the last couple of weeks while I've been
dealling with Linux Plumbers conference infrastructure issues. It's
mostly the usual assortment of spelling fixes and minor corrections.

The only core relevant changes are to the sd driver to reduce the spin
up message spew and fix a small memory leak on the freeing path"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (33 commits)
scsi: ses: Retry failed Send/Receive Diagnostic commands
scsi: target: Fix spelling mistake "CONFLIFT" -> "CONFLICT"
scsi: lpfc: Fix gcc -Wstringop-overread warning, again
scsi: lpfc: Use correct scnprintf() limit
scsi: lpfc: Fix sprintf() overflow in lpfc_display_fpin_wwpn()
scsi: core: Remove 'current_tag'
scsi: acornscsi: Remove tagged queuing vestiges
scsi: fas216: Kill scmd->tag
scsi: qla2xxx: Restore initiator in dual mode
scsi: ufs: core: Unbreak the reset handler
scsi: sd_zbc: Support disks with more than 2**32 logical blocks
scsi: ufs: core: Revert "scsi: ufs: Synchronize SCSI and UFS error handling"
scsi: bsg: Fix device unregistration
scsi: sd: Make sd_spinup_disk() less noisy
scsi: ufs: ufs-pci: Fix Intel LKF link stability
scsi: mpt3sas: Clean up some inconsistent indenting
scsi: megaraid: Clean up some inconsistent indenting
scsi: sr: Fix spelling mistake "does'nt" -> "doesn't"
scsi: Remove SCSI CDROM MAINTAINERS entry
scsi: megaraid: Fix Coccinelle warning
...

+288 -300
-7
MAINTAINERS
··· 16650 16650 S: Supported 16651 16651 F: drivers/char/pcmcia/scr24x_cs.c 16652 16652 16653 - SCSI CDROM DRIVER 16654 - M: Jens Axboe <axboe@kernel.dk> 16655 - L: linux-scsi@vger.kernel.org 16656 - S: Maintained 16657 - W: http://www.kernel.dk 16658 - F: drivers/scsi/sr* 16659 - 16660 16653 SCSI RDMA PROTOCOL (SRP) INITIATOR 16661 16654 M: Bart Van Assche <bvanassche@acm.org> 16662 16655 L: linux-rdma@vger.kernel.org
+15 -8
block/bsg.c
··· 165 165 .llseek = default_llseek, 166 166 }; 167 167 168 + static void bsg_device_release(struct device *dev) 169 + { 170 + struct bsg_device *bd = container_of(dev, struct bsg_device, device); 171 + 172 + ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 173 + kfree(bd); 174 + } 175 + 168 176 void bsg_unregister_queue(struct bsg_device *bd) 169 177 { 170 178 if (bd->queue->kobj.sd) 171 179 sysfs_remove_link(&bd->queue->kobj, "bsg"); 172 180 cdev_device_del(&bd->cdev, &bd->device); 173 - ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 174 - kfree(bd); 181 + put_device(&bd->device); 175 182 } 176 183 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 177 184 ··· 200 193 if (ret < 0) { 201 194 if (ret == -ENOSPC) 202 195 dev_err(parent, "bsg: too many bsg devices\n"); 203 - goto out_kfree; 196 + kfree(bd); 197 + return ERR_PTR(ret); 204 198 } 205 199 bd->device.devt = MKDEV(bsg_major, ret); 206 200 bd->device.class = bsg_class; 207 201 bd->device.parent = parent; 202 + bd->device.release = bsg_device_release; 208 203 dev_set_name(&bd->device, "%s", name); 209 204 device_initialize(&bd->device); 210 205 ··· 214 205 bd->cdev.owner = THIS_MODULE; 215 206 ret = cdev_device_add(&bd->cdev, &bd->device); 216 207 if (ret) 217 - goto out_ida_remove; 208 + goto out_put_device; 218 209 219 210 if (q->kobj.sd) { 220 211 ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); ··· 226 217 227 218 out_device_del: 228 219 cdev_device_del(&bd->cdev, &bd->device); 229 - out_ida_remove: 230 - ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 231 - out_kfree: 232 - kfree(bd); 220 + out_put_device: 221 + put_device(&bd->device); 233 222 return ERR_PTR(ret); 234 223 } 235 224 EXPORT_SYMBOL_GPL(bsg_register_queue);
-11
drivers/scsi/arm/Kconfig
··· 10 10 This enables support for the Acorn SCSI card (aka30). If you have an 11 11 Acorn system with one of these, say Y. If unsure, say N. 12 12 13 - config SCSI_ACORNSCSI_TAGGED_QUEUE 14 - bool "Support SCSI 2 Tagged queueing" 15 - depends on SCSI_ACORNSCSI_3 16 - help 17 - Say Y here to enable tagged queuing support on the Acorn SCSI card. 18 - 19 - This is a feature of SCSI-2 which improves performance: the host 20 - adapter can send several SCSI commands to a device's queue even if 21 - previous commands haven't finished yet. Some SCSI devices don't 22 - implement this properly, so the safe answer is N. 23 - 24 13 config SCSI_ACORNSCSI_SYNC 25 14 bool "Support SCSI 2 Synchronous Transfers" 26 15 depends on SCSI_ACORNSCSI_3
+22 -81
drivers/scsi/arm/acornscsi.c
··· 52 52 * You can tell if you have a device that supports tagged queueing my 53 53 * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported 54 54 * as '2 TAG'. 55 - * 56 - * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config 57 - * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug, 58 - * comment out the undef. 59 55 */ 60 - #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 56 + 61 57 /* 62 58 * SCSI-II Synchronous transfer support. 63 59 * ··· 167 171 unsigned int result); 168 172 static int acornscsi_reconnect_finish(AS_Host *host); 169 173 static void acornscsi_dma_cleanup(AS_Host *host); 170 - static void acornscsi_abortcmd(AS_Host *host, unsigned char tag); 174 + static void acornscsi_abortcmd(AS_Host *host); 171 175 172 176 /* ==================================================================================== 173 177 * Miscellaneous ··· 737 741 #endif 738 742 739 743 if (from_queue) { 740 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 741 - /* 742 - * tagged queueing - allocate a new tag to this command 743 - */ 744 - if (SCpnt->device->simple_tags) { 745 - SCpnt->device->current_tag += 1; 746 - if (SCpnt->device->current_tag == 0) 747 - SCpnt->device->current_tag = 1; 748 - SCpnt->tag = SCpnt->device->current_tag; 749 - } else 750 - #endif 751 744 set_bit(SCpnt->device->id * 8 + 752 745 (u8)(SCpnt->device->lun & 0x07), host->busyluns); 753 746 ··· 1177 1192 * the device recognises the attention. 1178 1193 */ 1179 1194 if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { 1180 - acornscsi_abortcmd(host, host->SCpnt->tag); 1195 + acornscsi_abortcmd(host); 1181 1196 1182 1197 dmac_write(host, DMAC_TXCNTLO, 0); 1183 1198 dmac_write(host, DMAC_TXCNTHI, 0); ··· 1545 1560 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); 1546 1561 1547 1562 switch (host->scsi.last_message) { 1548 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1549 - case HEAD_OF_QUEUE_TAG: 1550 - case ORDERED_QUEUE_TAG: 1551 - case SIMPLE_QUEUE_TAG: 1552 - /* 1553 - * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17) 1554 - * If a target does not implement tagged queuing and a queue tag 1555 - * message is received, it shall respond with a MESSAGE REJECT 1556 - * message and accept the I/O process as if it were untagged. 1557 - */ 1558 - printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n", 1559 - host->host->host_no, acornscsi_target(host)); 1560 - host->SCpnt->device->simple_tags = 0; 1561 - set_bit(host->SCpnt->device->id * 8 + 1562 - (u8)(host->SCpnt->device->lun & 0x7), host->busyluns); 1563 - break; 1564 - #endif 1565 1563 case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): 1566 1564 /* 1567 1565 * Target can't handle synchronous transfers ··· 1655 1687 #if 0 1656 1688 /* does the device need the current command aborted */ 1657 1689 if (cmd_aborted) { 1658 - acornscsi_abortcmd(host->SCpnt->tag); 1690 + acornscsi_abortcmd(host); 1659 1691 return; 1660 1692 } 1661 1693 #endif 1662 1694 1663 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1664 - if (host->SCpnt->tag) { 1665 - unsigned int tag_type; 1666 - 1667 - if (host->SCpnt->cmnd[0] == REQUEST_SENSE || 1668 - host->SCpnt->cmnd[0] == TEST_UNIT_READY || 1669 - host->SCpnt->cmnd[0] == INQUIRY) 1670 - tag_type = HEAD_OF_QUEUE_TAG; 1671 - else 1672 - tag_type = SIMPLE_QUEUE_TAG; 1673 - msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag); 1674 - } 1675 - #endif 1676 1695 1677 1696 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 1678 1697 if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { ··· 1753 1798 "to reconnect with\n", 1754 1799 host->host->host_no, '0' + target); 1755 1800 acornscsi_dumplog(host, target); 1756 - acornscsi_abortcmd(host, 0); 1801 + acornscsi_abortcmd(host); 1757 1802 if (host->SCpnt) { 1758 1803 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); 1759 1804 host->SCpnt = NULL; ··· 1776 1821 host->scsi.disconnectable = 0; 1777 1822 if (host->SCpnt->device->id == host->scsi.reconnected.target && 1778 1823 host->SCpnt->device->lun == host->scsi.reconnected.lun && 1779 - host->SCpnt->tag == host->scsi.reconnected.tag) { 1824 + scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) { 1780 1825 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) 1781 1826 DBG(host->SCpnt, printk("scsi%d.%c: reconnected", 1782 1827 host->host->host_no, acornscsi_target(host))); ··· 1803 1848 } 1804 1849 1805 1850 if (!host->SCpnt) 1806 - acornscsi_abortcmd(host, host->scsi.reconnected.tag); 1851 + acornscsi_abortcmd(host); 1807 1852 else { 1808 1853 /* 1809 1854 * Restore data pointer from SAVED pointers. ··· 1844 1889 * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) 1845 1890 * Purpose : abort a currently executing command 1846 1891 * Params : host - host with connected command to abort 1847 - * tag - tag to abort 1848 1892 */ 1849 1893 static 1850 - void acornscsi_abortcmd(AS_Host *host, unsigned char tag) 1894 + void acornscsi_abortcmd(AS_Host *host) 1851 1895 { 1852 1896 host->scsi.phase = PHASE_ABORTED; 1853 1897 sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); 1854 1898 1855 1899 msgqueue_flush(&host->scsi.msgs); 1856 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1857 - if (tag) 1858 - msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag); 1859 - else 1860 - #endif 1861 - msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); 1900 + msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); 1862 1901 } 1863 1902 1864 1903 /* ========================================================================================== ··· 1942 1993 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n", 1943 1994 host->host->host_no, acornscsi_target(host), ssr); 1944 1995 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 1945 - acornscsi_abortcmd(host, host->SCpnt->tag); 1996 + acornscsi_abortcmd(host); 1946 1997 } 1947 1998 return INTR_PROCESSING; 1948 1999 ··· 1978 2029 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n", 1979 2030 host->host->host_no, acornscsi_target(host), ssr); 1980 2031 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 1981 - acornscsi_abortcmd(host, host->SCpnt->tag); 2032 + acornscsi_abortcmd(host); 1982 2033 } 1983 2034 return INTR_PROCESSING; 1984 2035 ··· 2024 2075 case 0x18: /* -> PHASE_DATAOUT */ 2025 2076 /* COMMAND -> DATA OUT */ 2026 2077 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2027 - acornscsi_abortcmd(host, host->SCpnt->tag); 2078 + acornscsi_abortcmd(host); 2028 2079 acornscsi_dma_setup(host, DMA_OUT); 2029 2080 if (!acornscsi_starttransfer(host)) 2030 - acornscsi_abortcmd(host, host->SCpnt->tag); 2081 + acornscsi_abortcmd(host); 2031 2082 host->scsi.phase = PHASE_DATAOUT; 2032 2083 return INTR_IDLE; 2033 2084 2034 2085 case 0x19: /* -> PHASE_DATAIN */ 2035 2086 /* COMMAND -> DATA IN */ 2036 2087 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2037 - acornscsi_abortcmd(host, host->SCpnt->tag); 2088 + acornscsi_abortcmd(host); 2038 2089 acornscsi_dma_setup(host, DMA_IN); 2039 2090 if (!acornscsi_starttransfer(host)) 2040 - acornscsi_abortcmd(host, host->SCpnt->tag); 2091 + acornscsi_abortcmd(host); 2041 2092 host->scsi.phase = PHASE_DATAIN; 2042 2093 return INTR_IDLE; 2043 2094 ··· 2105 2156 /* MESSAGE IN -> DATA OUT */ 2106 2157 acornscsi_dma_setup(host, DMA_OUT); 2107 2158 if (!acornscsi_starttransfer(host)) 2108 - acornscsi_abortcmd(host, host->SCpnt->tag); 2159 + acornscsi_abortcmd(host); 2109 2160 host->scsi.phase = PHASE_DATAOUT; 2110 2161 return INTR_IDLE; 2111 2162 ··· 2114 2165 /* MESSAGE IN -> DATA IN */ 2115 2166 acornscsi_dma_setup(host, DMA_IN); 2116 2167 if (!acornscsi_starttransfer(host)) 2117 - acornscsi_abortcmd(host, host->SCpnt->tag); 2168 + acornscsi_abortcmd(host); 2118 2169 host->scsi.phase = PHASE_DATAIN; 2119 2170 return INTR_IDLE; 2120 2171 ··· 2155 2206 switch (ssr) { 2156 2207 case 0x19: /* -> PHASE_DATAIN */ 2157 2208 case 0x89: /* -> PHASE_DATAIN */ 2158 - acornscsi_abortcmd(host, host->SCpnt->tag); 2209 + acornscsi_abortcmd(host); 2159 2210 return INTR_IDLE; 2160 2211 2161 2212 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2204 2255 switch (ssr) { 2205 2256 case 0x18: /* -> PHASE_DATAOUT */ 2206 2257 case 0x88: /* -> PHASE_DATAOUT */ 2207 - acornscsi_abortcmd(host, host->SCpnt->tag); 2258 + acornscsi_abortcmd(host); 2208 2259 return INTR_IDLE; 2209 2260 2210 2261 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2431 2482 SCpnt->scsi_done = done; 2432 2483 SCpnt->host_scribble = NULL; 2433 2484 SCpnt->result = 0; 2434 - SCpnt->tag = 0; 2435 2485 SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]); 2436 2486 SCpnt->SCp.sent_command = 0; 2437 2487 SCpnt->SCp.scsi_xferred = 0; ··· 2529 2581 break; 2530 2582 2531 2583 default: 2532 - acornscsi_abortcmd(host, host->SCpnt->tag); 2584 + acornscsi_abortcmd(host); 2533 2585 res = res_snooze; 2534 2586 } 2535 2587 local_irq_restore(flags); ··· 2695 2747 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2696 2748 " SYNC" 2697 2749 #endif 2698 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2699 - " TAG" 2700 - #endif 2701 2750 #if (DEBUG & DEBUG_NO_WRITE) 2702 2751 " NOWRITE (" __stringify(NO_WRITE) ")" 2703 2752 #endif ··· 2714 2769 seq_printf(m, "AcornSCSI driver v%d.%d.%d" 2715 2770 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2716 2771 " SYNC" 2717 - #endif 2718 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2719 - " TAG" 2720 2772 #endif 2721 2773 #if (DEBUG & DEBUG_NO_WRITE) 2722 2774 " NOWRITE (" __stringify(NO_WRITE) ")" ··· 2769 2827 seq_printf(m, "Device/Lun TaggedQ Sync\n"); 2770 2828 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 2771 2829 if (scd->tagged_supported) 2772 - seq_printf(m, "%3sabled(%3d) ", 2773 - scd->simple_tags ? "en" : "dis", 2774 - scd->current_tag); 2830 + seq_printf(m, "%3sabled ", 2831 + scd->simple_tags ? "en" : "dis"); 2775 2832 else 2776 2833 seq_printf(m, "unsupported "); 2777 2834
+8 -23
drivers/scsi/arm/fas216.c
··· 77 77 * I was thinking that this was a good chip until I found this restriction ;( 78 78 */ 79 79 #define SCSI2_SYNC 80 - #undef SCSI2_TAG 81 80 82 81 #undef DEBUG_CONNECT 83 82 #undef DEBUG_MESSAGES ··· 989 990 info->scsi.disconnectable = 0; 990 991 if (info->SCpnt->device->id == target && 991 992 info->SCpnt->device->lun == lun && 992 - info->SCpnt->tag == tag) { 993 + scsi_cmd_to_rq(info->SCpnt)->tag == tag) { 993 994 fas216_log(info, LOG_CONNECT, "reconnected previously executing command"); 994 995 } else { 995 996 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); ··· 1790 1791 /* 1791 1792 * add tag message if required 1792 1793 */ 1793 - if (SCpnt->tag) 1794 - msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag); 1794 + if (SCpnt->device->simple_tags) 1795 + msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, 1796 + scsi_cmd_to_rq(SCpnt)->tag); 1795 1797 1796 1798 do { 1797 1799 #ifdef SCSI2_SYNC ··· 1815 1815 1816 1816 static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) 1817 1817 { 1818 - #ifdef SCSI2_TAG 1819 - /* 1820 - * tagged queuing - allocate a new tag to this command 1821 - */ 1822 - if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE && 1823 - SCpnt->cmnd[0] != INQUIRY) { 1824 - SCpnt->device->current_tag += 1; 1825 - if (SCpnt->device->current_tag == 0) 1826 - SCpnt->device->current_tag = 1; 1827 - SCpnt->tag = SCpnt->device->current_tag; 1828 - } else 1829 - #endif 1830 - set_bit(SCpnt->device->id * 8 + 1831 - (u8)(SCpnt->device->lun & 0x7), info->busyluns); 1818 + set_bit(SCpnt->device->id * 8 + 1819 + (u8)(SCpnt->device->lun & 0x7), info->busyluns); 1832 1820 1833 1821 info->stats.removes += 1; 1834 1822 switch (SCpnt->cmnd[0]) { ··· 2105 2117 init_SCp(SCpnt); 2106 2118 SCpnt->SCp.Message = 0; 2107 2119 SCpnt->SCp.Status = 0; 2108 - SCpnt->tag = 0; 2109 2120 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2110 2121 2111 2122 /* ··· 2210 2223 init_SCp(SCpnt); 2211 2224 2212 2225 info->stats.queues += 1; 2213 - SCpnt->tag = 0; 2214 2226 2215 2227 spin_lock(&info->host_lock); 2216 2228 ··· 2989 3003 dev = &info->device[scd->id]; 2990 3004 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 2991 3005 if (scd->tagged_supported) 2992 - seq_printf(m, "%3sabled(%3d) ", 2993 - scd->simple_tags ? "en" : "dis", 2994 - scd->current_tag); 3006 + seq_printf(m, "%3sabled ", 3007 + scd->simple_tags ? "en" : "dis"); 2995 3008 else 2996 3009 seq_puts(m, "unsupported "); 2997 3010
+1 -1
drivers/scsi/arm/queue.c
··· 214 214 list_for_each(l, &queue->head) { 215 215 QE_t *q = list_entry(l, QE_t, list); 216 216 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && 217 - q->SCpnt->tag == tag) { 217 + scsi_cmd_to_rq(q->SCpnt)->tag == tag) { 218 218 SCpnt = __queue_remove(queue, l); 219 219 break; 220 220 }
+2 -2
drivers/scsi/elx/efct/efct_lio.c
··· 880 880 struct efct *efct = lio_vport->efct; 881 881 unsigned long flags = 0; 882 882 883 - spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 884 - 885 883 if (lio_vport->fc_vport) 886 884 fc_vport_terminate(lio_vport->fc_vport); 885 + 886 + spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 887 887 888 888 list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list, 889 889 list_entry) {
+3 -4
drivers/scsi/elx/libefc/efc_device.c
··· 928 928 break; 929 929 930 930 case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { 931 - enum efc_nport_topology topology = 932 - (enum efc_nport_topology)arg; 931 + enum efc_nport_topology *topology = arg; 933 932 934 933 WARN_ON(node->nport->domain->attached); 935 934 936 935 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); 937 936 938 937 node_printf(node, "topology notification, topology=%d\n", 939 - topology); 938 + *topology); 940 939 941 940 /* At the time the PLOGI was received, the topology was unknown, 942 941 * so we didn't know which node would perform the domain attach: 943 942 * 1. The node from which the PLOGI was sent (p2p) or 944 943 * 2. The node to which the FLOGI was sent (fabric). 945 944 */ 946 - if (topology == EFC_NPORT_TOPO_P2P) { 945 + if (*topology == EFC_NPORT_TOPO_P2P) { 947 946 /* if this is p2p, need to attach to the domain using 948 947 * the d_id from the PLOGI received 949 948 */
+1 -2
drivers/scsi/elx/libefc/efc_fabric.c
··· 107 107 efc_fabric_notify_topology(struct efc_node *node) 108 108 { 109 109 struct efc_node *tmp_node; 110 - enum efc_nport_topology topology = node->nport->topology; 111 110 unsigned long index; 112 111 113 112 /* ··· 117 118 if (tmp_node != node) { 118 119 efc_node_post_event(tmp_node, 119 120 EFC_EVT_NPORT_TOPOLOGY_NOTIFY, 120 - (void *)topology); 121 + &node->nport->topology); 121 122 } 122 123 } 123 124 }
+4 -6
drivers/scsi/lpfc/lpfc_attr.c
··· 285 285 "6312 Catching potential buffer " 286 286 "overflow > PAGE_SIZE = %lu bytes\n", 287 287 PAGE_SIZE); 288 - strscpy(buf + PAGE_SIZE - 1 - 289 - strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1), 290 - LPFC_INFO_MORE_STR, 291 - strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1) 292 - + 1); 288 + strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), 289 + LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); 293 290 } 294 291 return len; 295 292 } ··· 6201 6204 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", 6202 6205 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); 6203 6206 6204 - len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n", 6207 + len += scnprintf(buf + len, PAGE_SIZE - len, 6208 + "Cfg: %d SCSI: %d NVME: %d\n", 6205 6209 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, 6206 6210 phba->cfg_nvme_seg_cnt); 6207 6211 return len;
+5 -5
drivers/scsi/lpfc/lpfc_els.c
··· 4015 4015 be32_to_cpu(pcgd->desc_tag), 4016 4016 be32_to_cpu(pcgd->desc_len), 4017 4017 be32_to_cpu(pcgd->xmt_signal_capability), 4018 - be32_to_cpu(pcgd->xmt_signal_frequency.count), 4019 - be32_to_cpu(pcgd->xmt_signal_frequency.units), 4018 + be16_to_cpu(pcgd->xmt_signal_frequency.count), 4019 + be16_to_cpu(pcgd->xmt_signal_frequency.units), 4020 4020 be32_to_cpu(pcgd->rcv_signal_capability), 4021 - be32_to_cpu(pcgd->rcv_signal_frequency.count), 4022 - be32_to_cpu(pcgd->rcv_signal_frequency.units)); 4021 + be16_to_cpu(pcgd->rcv_signal_frequency.count), 4022 + be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4023 4023 4024 4024 /* Compare driver and Fport capabilities and choose 4025 4025 * least common. ··· 9387 9387 /* Extract the next WWPN from the payload */ 9388 9388 wwn = *wwnlist++; 9389 9389 wwpn = be64_to_cpu(wwn); 9390 - len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ, 9390 + len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9391 9391 " %016llx", wwpn); 9392 9392 9393 9393 /* Log a message if we are on the last WWPN
+1 -1
drivers/scsi/lpfc/lpfc_hw4.h
··· 1167 1167 #define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF 1168 1168 #define lpfc_mbx_rd_object_rlen_WORD word0 1169 1169 uint32_t rd_object_offset; 1170 - uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; 1170 + __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; 1171 1171 #define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ 1172 1172 uint32_t rd_object_cnt; 1173 1173 struct lpfc_mbx_host_buf rd_object_hbuf[4];
+10 -10
drivers/scsi/lpfc/lpfc_init.c
··· 5518 5518 if (phba->cgn_fpin_frequency && 5519 5519 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5520 5520 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5521 - cp->cgn_stat_npm = cpu_to_le32(value); 5521 + cp->cgn_stat_npm = value; 5522 5522 } 5523 5523 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5524 5524 LPFC_CGN_CRC32_SEED); ··· 5547 5547 uint32_t mbps; 5548 5548 uint32_t dvalue, wvalue, lvalue, avalue; 5549 5549 uint64_t latsum; 5550 - uint16_t *ptr; 5551 - uint32_t *lptr; 5552 - uint16_t *mptr; 5550 + __le16 *ptr; 5551 + __le32 *lptr; 5552 + __le16 *mptr; 5553 5553 5554 5554 /* Make sure we have a congestion info buffer */ 5555 5555 if (!phba->cgn_i) ··· 5570 5570 if (phba->cgn_fpin_frequency && 5571 5571 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5572 5572 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5573 - cp->cgn_stat_npm = cpu_to_le32(value); 5573 + cp->cgn_stat_npm = value; 5574 5574 } 5575 5575 5576 5576 /* Read and clear the latency counters for this minute */ ··· 5753 5753 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5754 5754 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5755 5755 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5756 - mbps += le32_to_cpu(cp->cgn_bw_hr[i]); 5756 + mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5757 5757 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5758 5758 } 5759 5759 if (lvalue) /* Avg of latency averages */ ··· 8277 8277 return 0; 8278 8278 8279 8279 out_free_hba_hdwq_info: 8280 - free_percpu(phba->sli4_hba.c_stat); 8281 8280 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8281 + free_percpu(phba->sli4_hba.c_stat); 8282 8282 out_free_hba_idle_stat: 8283 - kfree(phba->sli4_hba.idle_stat); 8284 8283 #endif 8284 + kfree(phba->sli4_hba.idle_stat); 8285 8285 out_free_hba_eq_info: 8286 8286 free_percpu(phba->sli4_hba.eq_info); 8287 8287 out_free_hba_cpu_map: ··· 13411 13411 13412 13412 /* last used Index initialized to 0xff already */ 13413 13413 13414 - cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ; 13415 - cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ; 13414 + cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13415 + cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13416 13416 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13417 13417 cp->cgn_info_crc = cpu_to_le32(crc); 13418 13418
-2
drivers/scsi/lpfc/lpfc_nvme.c
··· 1489 1489 struct lpfc_nvme_qhandle *lpfc_queue_info; 1490 1490 struct lpfc_nvme_fcpreq_priv *freqpriv; 1491 1491 struct nvme_common_command *sqe; 1492 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1493 1492 uint64_t start = 0; 1494 - #endif 1495 1493 1496 1494 /* Validate pointers. LLDD fault handling with transport does 1497 1495 * have timing races.
+2 -7
drivers/scsi/lpfc/lpfc_scsi.c
··· 1495 1495 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1496 1496 uint8_t *txop, uint8_t *rxop) 1497 1497 { 1498 - uint8_t ret = 0; 1499 1498 1500 1499 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1501 1500 switch (scsi_get_prot_op(sc)) { ··· 1547 1548 } 1548 1549 } 1549 1550 1550 - return ret; 1551 + return 0; 1551 1552 } 1552 1553 #endif 1553 1554 ··· 5577 5578 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5578 5579 int err, idx; 5579 5580 u8 *uuid = NULL; 5580 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5581 - uint64_t start = 0L; 5581 + uint64_t start; 5582 5582 5583 - if (phba->ktime_on) 5584 - start = ktime_get_ns(); 5585 - #endif 5586 5583 start = ktime_get_ns(); 5587 5584 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5588 5585
+3 -2
drivers/scsi/lpfc/lpfc_sli.c
··· 22090 22090 uint32_t shdr_status, shdr_add_status; 22091 22091 union lpfc_sli4_cfg_shdr *shdr; 22092 22092 struct lpfc_dmabuf *pcmd; 22093 + u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; 22093 22094 22094 22095 /* sanity check on queue memory */ 22095 22096 if (!datap) ··· 22114 22113 22115 22114 memset((void *)read_object->u.request.rd_object_name, 0, 22116 22115 LPFC_OBJ_NAME_SZ); 22117 - sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject); 22116 + scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject); 22118 22117 for (j = 0; j < strlen(rdobject); j++) 22119 22118 read_object->u.request.rd_object_name[j] = 22120 - cpu_to_le32(read_object->u.request.rd_object_name[j]); 22119 + cpu_to_le32(rd_object_name[j]); 22121 22120 22122 22121 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 22123 22122 if (pcmd)
+3 -4
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1916 1916 raid = MR_LdRaidGet(ld, local_map_ptr); 1917 1917 1918 1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1919 - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1919 + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1920 1920 1921 1921 mr_device_priv_data->is_tm_capable = 1922 1922 raid->capability.tmCapable; ··· 8033 8033 8034 8034 if (instance->adapter_type != MFI_SERIES) { 8035 8035 megasas_release_fusion(instance); 8036 - pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8036 + pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8037 8037 (sizeof(struct MR_PD_CFG_SEQ) * 8038 8038 (MAX_PHYSICAL_DEVICES - 1)); 8039 8039 for (i = 0; i < 2 ; i++) { ··· 8773 8773 8774 8774 if (event_type & SCAN_VD_CHANNEL) { 8775 8775 if (!instance->requestorId || 8776 - (instance->requestorId && 8777 - megasas_get_ld_vf_affiliation(instance, 0))) { 8776 + megasas_get_ld_vf_affiliation(instance, 0)) { 8778 8777 dcmd_ret = megasas_ld_list_query(instance, 8779 8778 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8780 8779 if (dcmd_ret != DCMD_SUCCESS)
+3 -1
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 1582 1582 * wait for current poll to complete. 1583 1583 */ 1584 1584 for (qid = 0; qid < iopoll_q_count; qid++) { 1585 - while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) 1585 + while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) { 1586 + cpu_relax(); 1586 1587 udelay(500); 1588 + } 1587 1589 } 1588 1590 } 1589 1591
+1 -1
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 2178 2178 mpt3sas_check_cmd_timeout(ioc, 2179 2179 ioc->ctl_cmds.status, mpi_request, 2180 2180 sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); 2181 - *issue_reset = reset_needed; 2181 + *issue_reset = reset_needed; 2182 2182 rc = -EFAULT; 2183 2183 goto out; 2184 2184 }
+1 -2
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 10749 10749 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10750 10750 _scsih_pcie_topology_change_event(ioc, fw_event); 10751 10751 ioc->current_event = NULL; 10752 - return; 10753 - break; 10752 + return; 10754 10753 } 10755 10754 out: 10756 10755 fw_event_work_put(fw_event);
-23
drivers/scsi/ncr53c8xx.c
··· 1939 1939 static void ncr_put_start_queue(struct ncb *np, struct ccb *cp); 1940 1940 1941 1941 static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd); 1942 - static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd); 1943 1942 static void process_waiting_list(struct ncb *np, int sts); 1944 1943 1945 - #define remove_from_waiting_list(np, cmd) \ 1946 - retrieve_from_waiting_list(1, (np), (cmd)) 1947 1944 #define requeue_waiting_list(np) process_waiting_list((np), DID_OK) 1948 1945 #define reset_waiting_list(np) process_waiting_list((np), DID_RESET) 1949 1946 ··· 7992 7995 wcmd = (struct scsi_cmnd *) wcmd->next_wcmd; 7993 7996 wcmd->next_wcmd = (char *) cmd; 7994 7997 } 7995 - } 7996 - 7997 - static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd) 7998 - { 7999 - struct scsi_cmnd **pcmd = &np->waiting_list; 8000 - 8001 - while (*pcmd) { 8002 - if (cmd == *pcmd) { 8003 - if (to_remove) { 8004 - *pcmd = (struct scsi_cmnd *) cmd->next_wcmd; 8005 - cmd->next_wcmd = NULL; 8006 - } 8007 - #ifdef DEBUG_WAITING_LIST 8008 - printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd); 8009 - #endif 8010 - return cmd; 8011 - } 8012 - pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd; 8013 - } 8014 - return NULL; 8015 7998 } 8016 7999 8017 8000 static void process_waiting_list(struct ncb *np, int sts)
+2 -1
drivers/scsi/qla2xxx/qla_init.c
··· 7169 7169 return 0; 7170 7170 break; 7171 7171 case QLA2XXX_INI_MODE_DUAL: 7172 - if (!qla_dual_mode_enabled(vha)) 7172 + if (!qla_dual_mode_enabled(vha) && 7173 + !qla_ini_mode_enabled(vha)) 7173 7174 return 0; 7174 7175 break; 7175 7176 case QLA2XXX_INI_MODE_ENABLED:
+4 -4
drivers/scsi/scsi_transport_iscsi.c
··· 441 441 struct iscsi_transport *t = iface->transport; 442 442 int param = -1; 443 443 444 - if (attr == &dev_attr_iface_enabled.attr) 445 - param = ISCSI_NET_PARAM_IFACE_ENABLE; 446 - else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) 444 + if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) 447 445 param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; 448 446 else if (attr == &dev_attr_iface_header_digest.attr) 449 447 param = ISCSI_IFACE_PARAM_HDRDGST_EN; ··· 481 483 if (param != -1) 482 484 return t->attr_is_visible(ISCSI_IFACE_PARAM, param); 483 485 484 - if (attr == &dev_attr_iface_vlan_id.attr) 486 + if (attr == &dev_attr_iface_enabled.attr) 487 + param = ISCSI_NET_PARAM_IFACE_ENABLE; 488 + else if (attr == &dev_attr_iface_vlan_id.attr) 485 489 param = ISCSI_NET_PARAM_VLAN_ID; 486 490 else if (attr == &dev_attr_iface_vlan_priority.attr) 487 491 param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+9 -5
drivers/scsi/sd.c
··· 2124 2124 retries = 0; 2125 2125 2126 2126 do { 2127 + bool media_was_present = sdkp->media_present; 2128 + 2127 2129 cmd[0] = TEST_UNIT_READY; 2128 2130 memset((void *) &cmd[1], 0, 9); 2129 2131 ··· 2140 2138 * with any more polling. 2141 2139 */ 2142 2140 if (media_not_present(sdkp, &sshdr)) { 2143 - sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2141 + if (media_was_present) 2142 + sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2144 2143 return; 2145 2144 } 2146 2145 ··· 3404 3401 } 3405 3402 3406 3403 device_initialize(&sdkp->dev); 3407 - sdkp->dev.parent = dev; 3404 + sdkp->dev.parent = get_device(dev); 3408 3405 sdkp->dev.class = &sd_disk_class; 3409 3406 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 3410 3407 3411 3408 error = device_add(&sdkp->dev); 3412 - if (error) 3413 - goto out_free_index; 3409 + if (error) { 3410 + put_device(&sdkp->dev); 3411 + goto out; 3412 + } 3414 3413 3415 - get_device(dev); 3416 3414 dev_set_drvdata(dev, sdkp); 3417 3415 3418 3416 gd->major = sd_major((index & 0xf0) >> 4);
+4 -4
drivers/scsi/sd_zbc.c
··· 154 154 155 155 /* 156 156 * Report zone buffer size should be at most 64B times the number of 157 - * zones requested plus the 64B reply header, but should be at least 158 - * SECTOR_SIZE for ATA devices. 157 + * zones requested plus the 64B reply header, but should be aligned 158 + * to SECTOR_SIZE for ATA devices. 159 159 * Make sure that this size does not exceed the hardware capabilities. 160 160 * Furthermore, since the report zone command cannot be split, make 161 161 * sure that the allocated buffer can always be mapped by limiting the ··· 174 174 *buflen = bufsize; 175 175 return buf; 176 176 } 177 - bufsize >>= 1; 177 + bufsize = rounddown(bufsize >> 1, SECTOR_SIZE); 178 178 } 179 179 180 180 return NULL; ··· 280 280 { 281 281 struct scsi_disk *sdkp; 282 282 unsigned long flags; 283 - unsigned int zno; 283 + sector_t zno; 284 284 int ret; 285 285 286 286 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
+18 -4
drivers/scsi/ses.c
··· 87 87 0 88 88 }; 89 89 unsigned char recv_page_code; 90 + unsigned int retries = SES_RETRIES; 91 + struct scsi_sense_hdr sshdr; 90 92 91 - ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 92 - NULL, SES_TIMEOUT, SES_RETRIES, NULL); 93 + do { 94 + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 95 + &sshdr, SES_TIMEOUT, 1, NULL); 96 + } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) && 97 + (sshdr.sense_key == NOT_READY || 98 + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); 99 + 93 100 if (unlikely(ret)) 94 101 return ret; 95 102 ··· 128 121 bufflen & 0xff, 129 122 0 130 123 }; 124 + struct scsi_sense_hdr sshdr; 125 + unsigned int retries = SES_RETRIES; 131 126 132 - result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 133 - NULL, SES_TIMEOUT, SES_RETRIES, NULL); 127 + do { 128 + result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 129 + &sshdr, SES_TIMEOUT, 1, NULL); 130 + } while (result > 0 && --retries && scsi_sense_valid(&sshdr) && 131 + (sshdr.sense_key == NOT_READY || 132 + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); 133 + 134 134 if (result) 135 135 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", 136 136 result);
+1 -1
drivers/scsi/sr_ioctl.c
··· 523 523 return rc; 524 524 cd->readcd_known = 0; 525 525 sr_printk(KERN_INFO, cd, 526 - "CDROM does'nt support READ CD (0xbe) command\n"); 526 + "CDROM doesn't support READ CD (0xbe) command\n"); 527 527 /* fall & retry the other way */ 528 528 } 529 529 /* ... if this fails, we switch the blocksize using MODE SELECT */
+1
drivers/scsi/st.c
··· 3823 3823 case CDROM_SEND_PACKET: 3824 3824 if (!capable(CAP_SYS_RAWIO)) 3825 3825 return -EPERM; 3826 + break; 3826 3827 default: 3827 3828 break; 3828 3829 }
+78
drivers/scsi/ufs/ufshcd-pci.c
··· 128 128 return err; 129 129 } 130 130 131 + static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) 132 + { 133 + struct ufs_pa_layer_attr pwr_info = hba->pwr_info; 134 + int ret; 135 + 136 + pwr_info.lane_rx = lanes; 137 + pwr_info.lane_tx = lanes; 138 + ret = ufshcd_config_pwr_mode(hba, &pwr_info); 139 + if (ret) 140 + dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", 141 + __func__, lanes, ret); 142 + return ret; 143 + } 144 + 145 + static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, 146 + enum ufs_notify_change_status status, 147 + struct ufs_pa_layer_attr *dev_max_params, 148 + struct ufs_pa_layer_attr *dev_req_params) 149 + { 150 + int err = 0; 151 + 152 + switch (status) { 153 + case PRE_CHANGE: 154 + if (ufshcd_is_hs_mode(dev_max_params) && 155 + (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) 156 + ufs_intel_set_lanes(hba, 2); 157 + memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params)); 158 + break; 159 + case POST_CHANGE: 160 + if (ufshcd_is_hs_mode(dev_req_params)) { 161 + u32 peer_granularity; 162 + 163 + usleep_range(1000, 1250); 164 + err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 165 + &peer_granularity); 166 + } 167 + break; 168 + default: 169 + break; 170 + } 171 + 172 + return err; 173 + } 174 + 175 + static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) 176 + { 177 + u32 granularity, peer_granularity; 178 + u32 pa_tactivate, peer_pa_tactivate; 179 + int ret; 180 + 181 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity); 182 + if (ret) 183 + goto out; 184 + 185 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity); 186 + if (ret) 187 + goto out; 188 + 189 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); 190 + if (ret) 191 + goto out; 192 + 193 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate); 194 + if (ret) 195 + goto out; 196 + 197 + if (granularity == peer_granularity) { 198 + u32 new_peer_pa_tactivate = pa_tactivate + 2; 199 + 200 + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate); 201 + } 202 + out: 203 + return ret; 204 + } 205 + 131 206 #define INTEL_ACTIVELTR 0x804 132 207 #define INTEL_IDLELTR 0x808 133 208 ··· 426 351 struct ufs_host *ufs_host; 427 352 int err; 428 353 354 + hba->nop_out_timeout = 200; 429 355 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; 430 356 hba->caps |= UFSHCD_CAP_CRYPTO; 431 357 err = ufs_intel_common_init(hba); ··· 457 381 .exit = ufs_intel_common_exit, 458 382 .hce_enable_notify = ufs_intel_hce_enable_notify, 459 383 .link_startup_notify = ufs_intel_link_startup_notify, 384 + .pwr_change_notify = ufs_intel_lkf_pwr_change_notify, 385 + .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks, 460 386 .resume = ufs_intel_resume, 461 387 .device_reset = ufs_intel_device_reset, 462 388 };
+57 -59
drivers/scsi/ufs/ufshcd.c
··· 17 17 #include <linux/blk-pm.h> 18 18 #include <linux/blkdev.h> 19 19 #include <scsi/scsi_driver.h> 20 - #include <scsi/scsi_transport.h> 21 - #include "../scsi_transport_api.h" 22 20 #include "ufshcd.h" 23 21 #include "ufs_quirks.h" 24 22 #include "unipro.h" ··· 235 237 static irqreturn_t ufshcd_intr(int irq, void *__hba); 236 238 static int ufshcd_change_power_mode(struct ufs_hba *hba, 237 239 struct ufs_pa_layer_attr *pwr_mode); 240 + static void ufshcd_schedule_eh_work(struct ufs_hba *hba); 238 241 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 239 242 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 240 243 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, ··· 2758 2759 out: 2759 2760 up_read(&hba->clk_scaling_lock); 2760 2761 2761 - if (ufs_trigger_eh()) 2762 - scsi_schedule_eh(hba->host); 2762 + if (ufs_trigger_eh()) { 2763 + unsigned long flags; 2764 + 2765 + spin_lock_irqsave(hba->host->host_lock, flags); 2766 + ufshcd_schedule_eh_work(hba); 2767 + spin_unlock_irqrestore(hba->host->host_lock, flags); 2768 + } 2763 2769 2764 2770 return err; 2765 2771 } ··· 3923 3919 } 3924 3920 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 3925 3921 3926 - static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 3927 - { 3928 - lockdep_assert_held(hba->host->host_lock); 3929 - 3930 - return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 3931 - (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 3932 - } 3933 - 3934 - static void ufshcd_schedule_eh(struct ufs_hba *hba) 3935 - { 3936 - bool schedule_eh = false; 3937 - unsigned long flags; 3938 - 3939 - spin_lock_irqsave(hba->host->host_lock, flags); 3940 - /* handle fatal errors only when link is not in error state */ 3941 - if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 3942 - if (hba->force_reset || ufshcd_is_link_broken(hba) || 3943 - ufshcd_is_saved_err_fatal(hba)) 3944 - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 3945 - else 3946 - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 3947 - schedule_eh = true; 3948 - } 3949 - spin_unlock_irqrestore(hba->host->host_lock, flags); 3950 - 3951 - if (schedule_eh) 3952 - scsi_schedule_eh(hba->host); 3953 - } 3954 - 3955 3922 /** 3956 3923 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 3957 3924 * state) and waits for it to take effect. ··· 3943 3968 { 3944 3969 DECLARE_COMPLETION_ONSTACK(uic_async_done); 3945 3970 unsigned long flags; 3946 - bool schedule_eh = false; 3947 3971 u8 status; 3948 3972 int ret; 3949 3973 bool reenable_intr = false; ··· 4012 4038 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 4013 4039 if (ret) { 4014 4040 ufshcd_set_link_broken(hba); 4015 - schedule_eh = true; 4041 + ufshcd_schedule_eh_work(hba); 4016 4042 } 4017 - 4018 4043 out_unlock: 4019 4044 spin_unlock_irqrestore(hba->host->host_lock, flags); 4020 - 4021 - if (schedule_eh) 4022 - ufshcd_schedule_eh(hba); 4023 4045 mutex_unlock(&hba->uic_cmd_mutex); 4024 4046 4025 4047 return ret; ··· 4746 4776 mutex_lock(&hba->dev_cmd.lock); 4747 4777 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 4748 4778 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 4749 - NOP_OUT_TIMEOUT); 4779 + hba->nop_out_timeout); 4750 4780 4751 4781 if (!err || err == -ETIMEDOUT) 4752 4782 break; ··· 5881 5911 return err_handling; 5882 5912 } 5883 5913 5914 + /* host lock must be held before calling this func */ 5915 + static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 5916 + { 5917 + return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 5918 + (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 5919 + } 5920 + 5921 + /* host lock must be held before calling this func */ 5922 + static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) 5923 + { 5924 + /* handle fatal errors only when link is not in error state */ 5925 + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 5926 + if (hba->force_reset || ufshcd_is_link_broken(hba) || 5927 + ufshcd_is_saved_err_fatal(hba)) 5928 + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 5929 + else 5930 + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 5931 + queue_work(hba->eh_wq, &hba->eh_work); 5932 + } 5933 + } 5934 + 5884 5935 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) 5885 5936 { 5886 5937 down_write(&hba->clk_scaling_lock); ··· 6035 6044 6036 6045 /** 6037 6046 * ufshcd_err_handler - handle UFS errors that require s/w attention 6038 - * @host: SCSI host pointer 6047 + * @work: pointer to work structure 6039 6048 */ 6040 - static void ufshcd_err_handler(struct Scsi_Host *host) 6049 + static void ufshcd_err_handler(struct work_struct *work) 6041 6050 { 6042 - struct ufs_hba *hba = shost_priv(host); 6051 + struct ufs_hba *hba; 6043 6052 unsigned long flags; 6044 6053 bool err_xfer = false; 6045 6054 bool err_tm = false; ··· 6047 6056 int tag; 6048 6057 bool needs_reset = false, needs_restore = false; 6049 6058 6059 + hba = container_of(work, struct ufs_hba, eh_work); 6060 + 6050 6061 down(&hba->host_sem); 6051 6062 spin_lock_irqsave(hba->host->host_lock, flags); 6052 - hba->host->host_eh_scheduled = 0; 6053 6063 if (ufshcd_err_handling_should_stop(hba)) { 6054 6064 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6055 6065 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; ··· 6363 6371 "host_regs: "); 6364 6372 ufshcd_print_pwr_info(hba); 6365 6373 } 6374 + ufshcd_schedule_eh_work(hba); 6366 6375 retval |= IRQ_HANDLED; 6367 6376 } 6368 6377 /* ··· 6375 6382 hba->errors = 0; 6376 6383 hba->uic_error = 0; 6377 6384 spin_unlock(hba->host->host_lock); 6378 - 6379 - if (queue_eh_work) 6380 - ufshcd_schedule_eh(hba); 6381 - 6382 6385 return retval; 6383 6386 } 6384 6387 ··· 6865 6876 err = ufshcd_clear_cmd(hba, pos); 6866 6877 if (err) 6867 6878 break; 6868 - __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true); 6879 + __ufshcd_transfer_req_compl(hba, 1U << pos, false); 6869 6880 } 6870 6881 } 6871 6882 ··· 7037 7048 * will be to send LU reset which, again, is a spec violation. 7038 7049 * To avoid these unnecessary/illegal steps, first we clean up 7039 7050 * the lrb taken by this cmd and re-set it in outstanding_reqs, 7040 - * then queue the error handler and bail. 7051 + * then queue the eh_work and bail. 7041 7052 */ 7042 7053 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { 7043 7054 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); 7044 7055 7045 7056 spin_lock_irqsave(host->host_lock, flags); 7046 7057 hba->force_reset = true; 7058 + ufshcd_schedule_eh_work(hba); 7047 7059 spin_unlock_irqrestore(host->host_lock, flags); 7048 - 7049 - ufshcd_schedule_eh(hba); 7050 - 7051 7060 goto release; 7052 7061 } 7053 7062 ··· 7178 7191 7179 7192 spin_lock_irqsave(hba->host->host_lock, flags); 7180 7193 hba->force_reset = true; 7194 + ufshcd_schedule_eh_work(hba); 7181 7195 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); 7182 7196 spin_unlock_irqrestore(hba->host->host_lock, flags); 7183 7197 7184 - ufshcd_err_handler(hba->host); 7198 + flush_work(&hba->eh_work); 7185 7199 7186 7200 spin_lock_irqsave(hba->host->host_lock, flags); 7187 7201 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) ··· 8592 8604 if (hba->is_powered) { 8593 8605 ufshcd_exit_clk_scaling(hba); 8594 8606 ufshcd_exit_clk_gating(hba); 8607 + if (hba->eh_wq) 8608 + destroy_workqueue(hba->eh_wq); 8595 8609 ufs_debugfs_hba_exit(hba); 8596 8610 ufshcd_variant_hba_exit(hba); 8597 8611 ufshcd_setup_vreg(hba, false); ··· 9438 9448 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); 9439 9449 } 9440 9450 9441 - static struct scsi_transport_template ufshcd_transport_template = { 9442 - .eh_strategy_handler = ufshcd_err_handler, 9443 - }; 9444 - 9445 9451 /** 9446 9452 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) 9447 9453 * @dev: pointer to device handle ··· 9464 9478 err = -ENOMEM; 9465 9479 goto out_error; 9466 9480 } 9467 - host->transportt = &ufshcd_transport_template; 9468 9481 hba = shost_priv(host); 9469 9482 hba->host = host; 9470 9483 hba->dev = dev; 9471 9484 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; 9485 + hba->nop_out_timeout = NOP_OUT_TIMEOUT; 9472 9486 INIT_LIST_HEAD(&hba->clk_list_head); 9473 9487 spin_lock_init(&hba->outstanding_lock); 9474 9488 ··· 9503 9517 int err; 9504 9518 struct Scsi_Host *host = hba->host; 9505 9519 struct device *dev = hba->dev; 9520 + char eh_wq_name[sizeof("ufs_eh_wq_00")]; 9506 9521 9507 9522 if (!mmio_base) { 9508 9523 dev_err(hba->dev, ··· 9557 9570 9558 9571 hba->max_pwr_info.is_valid = false; 9559 9572 9573 + /* Initialize work queues */ 9574 + snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", 9575 + hba->host->host_no); 9576 + hba->eh_wq = create_singlethread_workqueue(eh_wq_name); 9577 + if (!hba->eh_wq) { 9578 + dev_err(hba->dev, "%s: failed to create eh workqueue\n", 9579 + __func__); 9580 + err = -ENOMEM; 9581 + goto out_disable; 9582 + } 9583 + INIT_WORK(&hba->eh_work, ufshcd_err_handler); 9560 9584 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 9561 9585 9562 9586 sema_init(&hba->host_sem, 1);
+5
drivers/scsi/ufs/ufshcd.h
··· 741 741 * @is_powered: flag to check if HBA is powered 742 742 * @shutting_down: flag to check if shutdown has been invoked 743 743 * @host_sem: semaphore used to serialize concurrent contexts 744 + * @eh_wq: Workqueue that eh_work works on 745 + * @eh_work: Worker to handle UFS errors that require s/w attention 744 746 * @eeh_work: Worker to handle exception events 745 747 * @errors: HBA errors 746 748 * @uic_error: UFS interconnect layer error status ··· 845 843 struct semaphore host_sem; 846 844 847 845 /* Work Queues */ 846 + struct workqueue_struct *eh_wq; 847 + struct work_struct eh_work; 848 848 struct work_struct eeh_work; 849 849 850 850 /* HBA Errors */ ··· 862 858 /* Device management request data */ 863 859 struct ufs_dev_cmd dev_cmd; 864 860 ktime_t last_dme_cmd_tstamp; 861 + int nop_out_timeout; 865 862 866 863 /* Keeps information of the UFS device connected to this host */ 867 864 struct ufs_dev_info dev_info;
+3 -5
drivers/scsi/ufs/ufshpb.c
··· 333 333 } 334 334 335 335 static void 336 - ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb, 337 - struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn, 338 - u8 transfer_len, int read_id) 336 + ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 337 + __be64 ppn, u8 transfer_len, int read_id) 339 338 { 340 339 unsigned char *cdb = lrbp->cmd->cmnd; 341 340 __be64 ppn_tmp = ppn; ··· 702 703 } 703 704 } 704 705 705 - ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len, 706 - read_id); 706 + ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id); 707 707 708 708 hpb->stats.hit_cnt++; 709 709 return 0;
+20 -12
drivers/target/target_core_configfs.c
··· 1110 1110 { 1111 1111 struct se_dev_attrib *da = to_attrib(item); 1112 1112 struct se_device *dev = da->da_dev; 1113 - bool flag; 1113 + bool flag, oldflag; 1114 1114 int ret; 1115 + 1116 + ret = strtobool(page, &flag); 1117 + if (ret < 0) 1118 + return ret; 1119 + 1120 + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA); 1121 + if (flag == oldflag) 1122 + return count; 1115 1123 1116 1124 if (!(dev->transport->transport_flags_changeable & 1117 1125 TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { 1118 1126 pr_err("dev[%p]: Unable to change SE Device alua_support:" 1119 1127 " alua_support has fixed value\n", dev); 1120 - return -EINVAL; 1128 + return -ENOSYS; 1121 1129 } 1122 - 1123 - ret = strtobool(page, &flag); 1124 - if (ret < 0) 1125 - return ret; 1126 1130 1127 1131 if (flag) 1128 1132 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; ··· 1149 1145 { 1150 1146 struct se_dev_attrib *da = to_attrib(item); 1151 1147 struct se_device *dev = da->da_dev; 1152 - bool flag; 1148 + bool flag, oldflag; 1153 1149 int ret; 1150 + 1151 + ret = strtobool(page, &flag); 1152 + if (ret < 0) 1153 + return ret; 1154 + 1155 + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR); 1156 + if (flag == oldflag) 1157 + return count; 1154 1158 1155 1159 if (!(dev->transport->transport_flags_changeable & 1156 1160 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1157 1161 pr_err("dev[%p]: Unable to change SE Device pgr_support:" 1158 1162 " pgr_support has fixed value\n", dev); 1159 - return -EINVAL; 1163 + return -ENOSYS; 1160 1164 } 1161 - 1162 - ret = strtobool(page, &flag); 1163 - if (ret < 0) 1164 - return ret; 1165 1165 1166 1166 if (flag) 1167 1167 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+1 -1
drivers/target/target_core_pr.c
··· 269 269 spin_lock(&dev->dev_reservation_lock); 270 270 if (dev->reservation_holder && 271 271 dev->reservation_holder->se_node_acl != sess->se_node_acl) { 272 - pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 272 + pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n", 273 273 tpg->se_tpg_tfo->fabric_name); 274 274 pr_err("Original reserver LUN: %llu %s\n", 275 275 cmd->se_lun->unpacked_lun,
-1
include/scsi/scsi_device.h
··· 146 146 struct scsi_vpd __rcu *vpd_pg83; 147 147 struct scsi_vpd __rcu *vpd_pg80; 148 148 struct scsi_vpd __rcu *vpd_pg89; 149 - unsigned char current_tag; /* current tag */ 150 149 struct scsi_target *sdev_target; 151 150 152 151 blist_flags_t sdev_bflags; /* black/white flags as also found in