Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
"The only new stuff which missed the first pull request is an update to
the UFS driver.

The rest is an assortment of bug fixes and minor tweaks which appeared
recently (some are fixes for recent code and some are stuff spotted
recently by the checkers or the new gcc-6 compiler [most of Arnd's
stuff])"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (32 commits)
scsi_common: do not clobber fixed sense information
scsi: ufs: select CONFIG_NLS
scsi: fc: use get/put_unaligned64 for wwn access
fnic: move printk()s outside of the critical code section.
qla2xxx: avoid maybe_uninitialized warning
megaraid_sas: add missing curly braces in ioctl handler
lpfc: fix misleading indentation
scsi_transport_sas: add 'scsi_target_id' sysfs attribute
scsi_dh_alua: uninitialized variable in alua_check_vpd()
scsi: ufs-qcom: add printouts of testbus debug registers
scsi: ufs-qcom: enable/disable the device ref clock
scsi: ufs-qcom: set PA_Local_TX_LCC_Enable before link startup
scsi: ufs: add device quirk delay before putting UFS rails in LPM
scsi: ufs: fix leakage during link off state
scsi: ufs: tune UniPro parameters to optimize hibern8 exit time
scsi: ufs: handle non spec compliant bkops behaviour by device
scsi: ufs: add retry for query descriptors
scsi: ufs: add error recovery after DL NAC error
scsi: ufs: make error handling bit faster
scsi: ufs: disable vccq if it's not needed by UFS device
...

+3
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
··· 38 38 defined or a value in the array is "0" then it is assumed 39 39 that the frequency is set by the parent clock or a 40 40 fixed rate clock source. 41 + -lanes-per-direction : number of lanes available per direction - either 1 or 2. 42 + Note that it is assume same number of lanes is used both 43 + directions at once. If not specified, default is 2 lanes per direction. 41 44 42 45 Note: If above properties are not defined it can be assumed that the supply 43 46 regulators or clocks are always on.
+1 -1
drivers/scsi/device_handler/scsi_dh_alua.c
··· 332 332 { 333 333 int rel_port = -1, group_id; 334 334 struct alua_port_group *pg, *old_pg = NULL; 335 - bool pg_updated; 335 + bool pg_updated = false; 336 336 unsigned long flags; 337 337 338 338 group_id = scsi_vpd_tpg_id(sdev, &rel_port);
+6 -7
drivers/scsi/fnic/fnic_scsi.c
··· 958 958 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ 959 959 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ 960 960 default: 961 - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", 962 - fnic_fcpio_status_to_str(hdr_status)); 963 961 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; 964 962 break; 965 963 } 964 + 965 + /* Break link with the SCSI command */ 966 + CMD_SP(sc) = NULL; 967 + CMD_FLAGS(sc) |= FNIC_IO_DONE; 968 + 969 + spin_unlock_irqrestore(io_lock, flags); 966 970 967 971 if (hdr_status != FCPIO_SUCCESS) { 968 972 atomic64_inc(&fnic_stats->io_stats.io_failures); 969 973 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", 970 974 fnic_fcpio_status_to_str(hdr_status)); 971 975 } 972 - /* Break link with the SCSI command */ 973 - CMD_SP(sc) = NULL; 974 - CMD_FLAGS(sc) |= FNIC_IO_DONE; 975 - 976 - spin_unlock_irqrestore(io_lock, flags); 977 976 978 977 fnic_release_ioreq_buf(fnic, io_req, sc); 979 978
+3 -2
drivers/scsi/lpfc/lpfc_init.c
··· 2860 2860 } 2861 2861 2862 2862 vports = lpfc_create_vport_work_array(phba); 2863 - if (vports != NULL) 2863 + if (vports != NULL) { 2864 2864 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2865 2865 struct Scsi_Host *shost; 2866 2866 shost = lpfc_shost_from_vport(vports[i]); ··· 2877 2877 } 2878 2878 spin_unlock_irq(shost->host_lock); 2879 2879 } 2880 - lpfc_destroy_vport_work_array(phba, vports); 2880 + } 2881 + lpfc_destroy_vport_work_array(phba, vports); 2881 2882 2882 2883 lpfc_unblock_mgmt_io(phba); 2883 2884 return 0;
+1 -1
drivers/scsi/megaraid/megaraid_sas.h
··· 2097 2097 u8 UnevenSpanSupport; 2098 2098 2099 2099 u8 supportmax256vd; 2100 - u8 allow_fw_scan; 2100 + u8 pd_list_not_supported; 2101 2101 u16 fw_supported_vd_count; 2102 2102 u16 fw_supported_pd_count; 2103 2103
+12 -5
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1838 1838 struct megasas_instance *instance; 1839 1839 1840 1840 instance = megasas_lookup_instance(sdev->host->host_no); 1841 - if (instance->allow_fw_scan) { 1841 + if (instance->pd_list_not_supported) { 1842 1842 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1843 1843 sdev->type == TYPE_DISK) { 1844 1844 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + ··· 1874 1874 pd_index = 1875 1875 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1876 1876 sdev->id; 1877 - if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState == 1877 + if ((instance->pd_list_not_supported || 1878 + instance->pd_list[pd_index].driveState == 1878 1879 MR_PD_STATE_SYSTEM)) { 1879 1880 goto scan_target; 1880 1881 } ··· 4088 4087 4089 4088 switch (ret) { 4090 4089 case DCMD_FAILED: 4091 - megaraid_sas_kill_hba(instance); 4090 + dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4091 + "failed/not supported by firmware\n"); 4092 + 4093 + if (instance->ctrl_context) 4094 + megaraid_sas_kill_hba(instance); 4095 + else 4096 + instance->pd_list_not_supported = 1; 4092 4097 break; 4093 4098 case DCMD_TIMEOUT: 4094 4099 ··· 5041 5034 case PCI_DEVICE_ID_DELL_PERC5: 5042 5035 default: 5043 5036 instance->instancet = &megasas_instance_template_xscale; 5044 - instance->allow_fw_scan = 1; 5045 5037 break; 5046 5038 } 5047 5039 ··· 6656 6650 } 6657 6651 6658 6652 for (i = 0; i < ioc->sge_count; i++) { 6659 - if (kbuff_arr[i]) 6653 + if (kbuff_arr[i]) { 6660 6654 dma_free_coherent(&instance->pdev->dev, 6661 6655 le32_to_cpu(kern_sge32[i].length), 6662 6656 kbuff_arr[i], 6663 6657 le32_to_cpu(kern_sge32[i].phys_addr)); 6664 6658 kbuff_arr[i] = NULL; 6659 + } 6665 6660 } 6666 6661 6667 6662 megasas_return_cmd(instance, cmd);
+9 -7
drivers/scsi/qla2xxx/qla_target.c
··· 1881 1881 else 1882 1882 vha->req->cnt = vha->req->length - 1883 1883 (vha->req->ring_index - cnt); 1884 + 1885 + if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1886 + ql_dbg(ql_dbg_io, vha, 0x305a, 1887 + "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", 1888 + vha->vp_idx, vha->req->ring_index, 1889 + vha->req->cnt, req_cnt, cnt, cnt_in, 1890 + vha->req->length); 1891 + return -EAGAIN; 1892 + } 1884 1893 } 1885 1894 1886 - if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1887 - ql_dbg(ql_dbg_io, vha, 0x305a, 1888 - "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", 1889 - vha->vp_idx, vha->req->ring_index, 1890 - vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length); 1891 - return -EAGAIN; 1892 - } 1893 1895 vha->req->cnt -= req_cnt; 1894 1896 1895 1897 return 0;
+10 -2
drivers/scsi/scsi_common.c
··· 278 278 ucp[3] = 0; 279 279 put_unaligned_be64(info, &ucp[4]); 280 280 } else if ((buf[0] & 0x7f) == 0x70) { 281 - buf[0] |= 0x80; 282 - put_unaligned_be64(info, &buf[3]); 281 + /* 282 + * Only set the 'VALID' bit if we can represent the value 283 + * correctly; otherwise just fill out the lower bytes and 284 + * clear the 'VALID' flag. 285 + */ 286 + if (info <= 0xffffffffUL) 287 + buf[0] |= 0x80; 288 + else 289 + buf[0] &= 0x7f; 290 + put_unaligned_be32((u32)info, &buf[3]); 283 291 } 284 292 285 293 return 0;
+1 -1
drivers/scsi/scsi_sas_internal.h
··· 4 4 #define SAS_HOST_ATTRS 0 5 5 #define SAS_PHY_ATTRS 17 6 6 #define SAS_PORT_ATTRS 1 7 - #define SAS_RPORT_ATTRS 7 7 + #define SAS_RPORT_ATTRS 8 8 8 #define SAS_END_DEV_ATTRS 5 9 9 #define SAS_EXPANDER_ATTRS 7 10 10
+1 -1
drivers/scsi/scsi_sysfs.c
··· 1105 1105 if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) 1106 1106 return 0; 1107 1107 1108 - if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83) 1108 + if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83) 1109 1109 return 0; 1110 1110 1111 1111 return S_IRUGO;
+2
drivers/scsi/scsi_transport_sas.c
··· 1286 1286 sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 1287 1287 unsigned long long); 1288 1288 sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 1289 + sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32); 1289 1290 1290 1291 /* only need 8 bytes of data plus header (4 or 8) */ 1291 1292 #define BUF_SIZE 64 ··· 1887 1886 SETUP_RPORT_ATTRIBUTE(rphy_device_type); 1888 1887 SETUP_RPORT_ATTRIBUTE(rphy_sas_address); 1889 1888 SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); 1889 + SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id); 1890 1890 SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, 1891 1891 get_enclosure_identifier); 1892 1892 SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier,
+1
drivers/scsi/ufs/Kconfig
··· 37 37 depends on SCSI && SCSI_DMA 38 38 select PM_DEVFREQ 39 39 select DEVFREQ_GOV_SIMPLE_ONDEMAND 40 + select NLS 40 41 ---help--- 41 42 This selects the support for UFS devices in Linux, say Y and make 42 43 sure that you know the name of your UFS host adapter (the card
+131 -24
drivers/scsi/ufs/ufs-qcom.c
··· 1 1 /* 2 - * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. 2 + * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. 3 3 * 4 4 * This program is free software; you can redistribute it and/or modify 5 5 * it under the terms of the GNU General Public License version 2 and ··· 16 16 #include <linux/of.h> 17 17 #include <linux/platform_device.h> 18 18 #include <linux/phy/phy.h> 19 - 20 19 #include <linux/phy/phy-qcom-ufs.h> 20 + 21 21 #include "ufshcd.h" 22 22 #include "ufshcd-pltfrm.h" 23 23 #include "unipro.h" ··· 56 56 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, 57 57 16, 4, (void __force *)hba->mmio_base + offset, 58 58 len * 4, false); 59 + } 60 + 61 + static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, 62 + char *prefix, void *priv) 63 + { 64 + ufs_qcom_dump_regs(hba, offset, len, prefix); 59 65 } 60 66 61 67 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) ··· 112 106 if (!host->is_lane_clks_enabled) 113 107 return; 114 108 115 - clk_disable_unprepare(host->tx_l1_sync_clk); 109 + if (host->hba->lanes_per_direction > 1) 110 + clk_disable_unprepare(host->tx_l1_sync_clk); 116 111 clk_disable_unprepare(host->tx_l0_sync_clk); 117 - clk_disable_unprepare(host->rx_l1_sync_clk); 112 + if (host->hba->lanes_per_direction > 1) 113 + clk_disable_unprepare(host->rx_l1_sync_clk); 118 114 clk_disable_unprepare(host->rx_l0_sync_clk); 119 115 120 116 host->is_lane_clks_enabled = false; ··· 140 132 if (err) 141 133 goto disable_rx_l0; 142 134 143 - err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", 144 - host->rx_l1_sync_clk); 145 - if (err) 146 - goto disable_tx_l0; 135 + if (host->hba->lanes_per_direction > 1) { 136 + err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", 137 + host->rx_l1_sync_clk); 138 + if (err) 139 + goto disable_tx_l0; 147 140 148 - err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", 149 - host->tx_l1_sync_clk); 150 - if (err) 151 - goto disable_rx_l1; 141 + err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", 142 + host->tx_l1_sync_clk); 143 + if (err) 144 + goto disable_rx_l1; 145 + } 152 146 153 147 host->is_lane_clks_enabled = true; 154 148 goto out; 155 149 156 150 disable_rx_l1: 157 - clk_disable_unprepare(host->rx_l1_sync_clk); 151 + if (host->hba->lanes_per_direction > 1) 152 + clk_disable_unprepare(host->rx_l1_sync_clk); 158 153 disable_tx_l0: 159 154 clk_disable_unprepare(host->tx_l0_sync_clk); 160 155 disable_rx_l0: ··· 181 170 if (err) 182 171 goto out; 183 172 184 - err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", 185 - &host->rx_l1_sync_clk); 186 - if (err) 187 - goto out; 173 + /* In case of single lane per direction, don't read lane1 clocks */ 174 + if (host->hba->lanes_per_direction > 1) { 175 + err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", 176 + &host->rx_l1_sync_clk); 177 + if (err) 178 + goto out; 188 179 189 - err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 190 - &host->tx_l1_sync_clk); 191 - 180 + err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 181 + &host->tx_l1_sync_clk); 182 + } 192 183 out: 193 184 return err; 194 185 } ··· 280 267 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); 281 268 282 269 if (ret) { 283 - dev_err(hba->dev, 284 - "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", 285 - __func__, ret); 270 + dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", 271 + __func__, ret); 286 272 goto out; 287 273 } 288 274 ··· 530 518 */ 531 519 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 532 520 150); 521 + 522 + /* 523 + * Some UFS devices (and may be host) have issues if LCC is 524 + * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 525 + * before link startup which will make sure that both host 526 + * and device TX LCC are disabled once link startup is 527 + * completed. 528 + */ 529 + if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) 530 + err = ufshcd_dme_set(hba, 531 + UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 532 + 0); 533 533 534 534 break; 535 535 case POST_CHANGE: ··· 986 962 goto out; 987 963 } 988 964 965 + /* enable the device ref clock before changing to HS mode */ 966 + if (!ufshcd_is_hs_mode(&hba->pwr_info) && 967 + ufshcd_is_hs_mode(dev_req_params)) 968 + ufs_qcom_dev_ref_clk_ctrl(host, true); 989 969 break; 990 970 case POST_CHANGE: 991 971 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, ··· 1017 989 memcpy(&host->dev_req_params, 1018 990 dev_req_params, sizeof(*dev_req_params)); 1019 991 ufs_qcom_update_bus_bw_vote(host); 992 + 993 + /* disable the device ref clock if entered PWM mode */ 994 + if (ufshcd_is_hs_mode(&hba->pwr_info) && 995 + !ufshcd_is_hs_mode(dev_req_params)) 996 + ufs_qcom_dev_ref_clk_ctrl(host, false); 1020 997 break; 1021 998 default: 1022 999 ret = -EINVAL; ··· 1123 1090 ufs_qcom_phy_disable_iface_clk(host->generic_phy); 1124 1091 goto out; 1125 1092 } 1093 + /* enable the device ref clock for HS mode*/ 1094 + if (ufshcd_is_hs_mode(&hba->pwr_info)) 1095 + ufs_qcom_dev_ref_clk_ctrl(host, true); 1126 1096 vote = host->bus_vote.saved_vote; 1127 1097 if (vote == host->bus_vote.min_bw_vote) 1128 1098 ufs_qcom_update_bus_bw_vote(host); ··· 1403 1367 return err; 1404 1368 } 1405 1369 1370 + static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, 1371 + void *priv, void (*print_fn)(struct ufs_hba *hba, 1372 + int offset, int num_regs, char *str, void *priv)) 1373 + { 1374 + u32 reg; 1375 + struct ufs_qcom_host *host; 1376 + 1377 + if (unlikely(!hba)) { 1378 + pr_err("%s: hba is NULL\n", __func__); 1379 + return; 1380 + } 1381 + if (unlikely(!print_fn)) { 1382 + dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); 1383 + return; 1384 + } 1385 + 1386 + host = ufshcd_get_variant(hba); 1387 + if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) 1388 + return; 1389 + 1390 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); 1391 + print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); 1392 + 1393 + reg = ufshcd_readl(hba, REG_UFS_CFG1); 1394 + reg |= UFS_BIT(17); 1395 + ufshcd_writel(hba, reg, REG_UFS_CFG1); 1396 + 1397 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); 1398 + print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); 1399 + 1400 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); 1401 + print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); 1402 + 1403 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); 1404 + print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); 1405 + 1406 + ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1); 1407 + 1408 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); 1409 + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); 1410 + 1411 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); 1412 + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); 1413 + 1414 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); 1415 + print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); 1416 + 1417 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); 1418 + print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); 1419 + 1420 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); 1421 + print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); 1422 + 1423 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); 1424 + print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); 1425 + 1426 + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); 1427 + print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); 1428 + } 1429 + 1430 + static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) 1431 + { 1432 + if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 1433 + ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); 1434 + else 1435 + ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); 1436 + } 1437 + 1406 1438 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1407 1439 { 1408 1440 /* provide a legal default configuration */ ··· 1579 1475 ufshcd_rmwl(host->hba, mask, 1580 1476 (u32)host->testbus.select_minor << offset, 1581 1477 reg); 1478 + ufs_qcom_enable_test_bus(host); 1582 1479 ufshcd_release(host->hba); 1583 1480 pm_runtime_put_sync(host->hba->dev); 1584 1481 ··· 1596 1491 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, 1597 1492 "HCI Vendor Specific Registers "); 1598 1493 1494 + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); 1599 1495 ufs_qcom_testbus_read(hba); 1600 1496 } 1497 + 1601 1498 /** 1602 1499 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 1603 1500 * ··· 1644 1537 * ufs_qcom_remove - set driver_data of the device to NULL 1645 1538 * @pdev: pointer to platform device handle 1646 1539 * 1647 - * Always return 0 1540 + * Always returns 0 1648 1541 */ 1649 1542 static int ufs_qcom_remove(struct platform_device *pdev) 1650 1543 {
+9
drivers/scsi/ufs/ufs-qcom.h
··· 241 241 struct ufs_qcom_testbus testbus; 242 242 }; 243 243 244 + static inline u32 245 + ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg) 246 + { 247 + if (host->hw_ver.major <= 0x02) 248 + return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg); 249 + 250 + return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg); 251 + }; 252 + 244 253 #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) 245 254 #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) 246 255 #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+33
drivers/scsi/ufs/ufs.h
··· 43 43 #define GENERAL_UPIU_REQUEST_SIZE 32 44 44 #define QUERY_DESC_MAX_SIZE 255 45 45 #define QUERY_DESC_MIN_SIZE 2 46 + #define QUERY_DESC_HDR_SIZE 2 46 47 #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ 47 48 (sizeof(struct utp_upiu_header))) 48 49 ··· 194 193 UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, 195 194 UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20, 196 195 UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, 196 + }; 197 + 198 + /* Device descriptor parameters offsets in bytes*/ 199 + enum device_desc_param { 200 + DEVICE_DESC_PARAM_LEN = 0x0, 201 + DEVICE_DESC_PARAM_TYPE = 0x1, 202 + DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2, 203 + DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3, 204 + DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4, 205 + DEVICE_DESC_PARAM_PRTCL = 0x5, 206 + DEVICE_DESC_PARAM_NUM_LU = 0x6, 207 + DEVICE_DESC_PARAM_NUM_WLU = 0x7, 208 + DEVICE_DESC_PARAM_BOOT_ENBL = 0x8, 209 + DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9, 210 + DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA, 211 + DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB, 212 + DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC, 213 + DEVICE_DESC_PARAM_SEC_LU = 0xD, 214 + DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE, 215 + DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF, 216 + DEVICE_DESC_PARAM_SPEC_VER = 0x10, 217 + DEVICE_DESC_PARAM_MANF_DATE = 0x12, 218 + DEVICE_DESC_PARAM_MANF_NAME = 0x14, 219 + DEVICE_DESC_PARAM_PRDCT_NAME = 0x15, 220 + DEVICE_DESC_PARAM_SN = 0x16, 221 + DEVICE_DESC_PARAM_OEM_ID = 0x17, 222 + DEVICE_DESC_PARAM_MANF_ID = 0x18, 223 + DEVICE_DESC_PARAM_UD_OFFSET = 0x1A, 224 + DEVICE_DESC_PARAM_UD_LEN = 0x1B, 225 + DEVICE_DESC_PARAM_RTT_CAP = 0x1C, 226 + DEVICE_DESC_PARAM_FRQ_RTC = 0x1D, 197 227 }; 198 228 199 229 /* ··· 501 469 struct regulator *reg; 502 470 const char *name; 503 471 bool enabled; 472 + bool unused; 504 473 int min_uV; 505 474 int max_uV; 506 475 int min_uA;
+151
drivers/scsi/ufs/ufs_quirks.h
··· 1 + /* 2 + * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + */ 14 + 15 + #ifndef _UFS_QUIRKS_H_ 16 + #define _UFS_QUIRKS_H_ 17 + 18 + /* return true if s1 is a prefix of s2 */ 19 + #define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1)) 20 + 21 + #define UFS_ANY_VENDOR 0xFFFF 22 + #define UFS_ANY_MODEL "ANY_MODEL" 23 + 24 + #define MAX_MODEL_LEN 16 25 + 26 + #define UFS_VENDOR_TOSHIBA 0x198 27 + #define UFS_VENDOR_SAMSUNG 0x1CE 28 + 29 + /** 30 + * ufs_device_info - ufs device details 31 + * @wmanufacturerid: card details 32 + * @model: card model 33 + */ 34 + struct ufs_device_info { 35 + u16 wmanufacturerid; 36 + char model[MAX_MODEL_LEN + 1]; 37 + }; 38 + 39 + /** 40 + * ufs_dev_fix - ufs device quirk info 41 + * @card: ufs card details 42 + * @quirk: device quirk 43 + */ 44 + struct ufs_dev_fix { 45 + struct ufs_device_info card; 46 + unsigned int quirk; 47 + }; 48 + 49 + #define END_FIX { { 0 }, 0 } 50 + 51 + /* add specific device quirk */ 52 + #define UFS_FIX(_vendor, _model, _quirk) \ 53 + { \ 54 + .card.wmanufacturerid = (_vendor),\ 55 + .card.model = (_model), \ 56 + .quirk = (_quirk), \ 57 + } 58 + 59 + /* 60 + * If UFS device is having issue in processing LCC (Line Control 61 + * Command) coming from UFS host controller then enable this quirk. 62 + * When this quirk is enabled, host controller driver should disable 63 + * the LCC transmission on UFS host controller (by clearing 64 + * TX_LCC_ENABLE attribute of host to 0). 65 + */ 66 + #define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0) 67 + 68 + /* 69 + * Some UFS devices don't need VCCQ rail for device operations. Enabling this 70 + * quirk for such devices will make sure that VCCQ rail is not voted. 71 + */ 72 + #define UFS_DEVICE_NO_VCCQ (1 << 1) 73 + 74 + /* 75 + * Some vendor's UFS device sends back to back NACs for the DL data frames 76 + * causing the host controller to raise the DFES error status. Sometimes 77 + * such UFS devices send back to back NAC without waiting for new 78 + * retransmitted DL frame from the host and in such cases it might be possible 79 + * the Host UniPro goes into bad state without raising the DFES error 80 + * interrupt. If this happens then all the pending commands would timeout 81 + * only after respective SW command (which is generally too large). 82 + * 83 + * We can workaround such device behaviour like this: 84 + * - As soon as SW sees the DL NAC error, it should schedule the error handler 85 + * - Error handler would sleep for 50ms to see if there are any fatal errors 86 + * raised by UFS controller. 87 + * - If there are fatal errors then SW does normal error recovery. 88 + * - If there are no fatal errors then SW sends the NOP command to device 89 + * to check if link is alive. 90 + * - If NOP command times out, SW does normal error recovery 91 + * - If NOP command succeed, skip the error handling. 92 + * 93 + * If DL NAC error is seen multiple times with some vendor's UFS devices then 94 + * enable this quirk to initiate quick error recovery and also silence related 95 + * error logs to reduce spamming of kernel logs. 96 + */ 97 + #define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2) 98 + 99 + /* 100 + * Some UFS devices may not work properly after resume if the link was kept 101 + * in off state during suspend. Enabling this quirk will not allow the 102 + * link to be kept in off state during suspend. 103 + */ 104 + #define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3) 105 + 106 + /* 107 + * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as 108 + * 600us which may not be enough for reliable hibern8 exit hardware sequence 109 + * from UFS device. 110 + * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even 111 + * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms. 112 + */ 113 + #define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4) 114 + 115 + /* 116 + * Some UFS memory devices may have really low read/write throughput in 117 + * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is 118 + * never enabled for such devices. 119 + */ 120 + #define UFS_DEVICE_NO_FASTAUTO (1 << 5) 121 + 122 + /* 123 + * It seems some UFS devices may keep drawing more than sleep current 124 + * (atleast for 500us) from UFS rails (especially from VCCQ rail). 125 + * To avoid this situation, add 2ms delay before putting these UFS 126 + * rails in LPM mode. 127 + */ 128 + #define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6) 129 + 130 + struct ufs_hba; 131 + void ufs_advertise_fixup_device(struct ufs_hba *hba); 132 + 133 + static struct ufs_dev_fix ufs_fixups[] = { 134 + /* UFS cards deviations table */ 135 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, 136 + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), 137 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), 138 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, 139 + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), 140 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, 141 + UFS_DEVICE_NO_FASTAUTO), 142 + UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, 143 + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), 144 + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", 145 + UFS_DEVICE_QUIRK_PA_TACTIVATE), 146 + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", 147 + UFS_DEVICE_QUIRK_PA_TACTIVATE), 148 + 149 + END_FIX 150 + }; 151 + #endif /* UFS_QUIRKS_H_ */
+19
drivers/scsi/ufs/ufshcd-pltfrm.c
··· 40 40 #include "ufshcd.h" 41 41 #include "ufshcd-pltfrm.h" 42 42 43 + #define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2 44 + 43 45 static int ufshcd_parse_clock_info(struct ufs_hba *hba) 44 46 { 45 47 int ret = 0; ··· 279 277 } 280 278 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); 281 279 280 + static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) 281 + { 282 + struct device *dev = hba->dev; 283 + int ret; 284 + 285 + ret = of_property_read_u32(dev->of_node, "lanes-per-direction", 286 + &hba->lanes_per_direction); 287 + if (ret) { 288 + dev_dbg(hba->dev, 289 + "%s: failed to read lanes-per-direction, ret=%d\n", 290 + __func__, ret); 291 + hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION; 292 + } 293 + } 294 + 282 295 /** 283 296 * ufshcd_pltfrm_init - probe routine of the driver 284 297 * @pdev: pointer to Platform device handle ··· 347 330 348 331 pm_runtime_set_active(&pdev->dev); 349 332 pm_runtime_enable(&pdev->dev); 333 + 334 + ufshcd_init_lanes_per_dir(hba); 350 335 351 336 err = ufshcd_init(hba, mmio_base, irq); 352 337 if (err) {
+724 -94
drivers/scsi/ufs/ufshcd.c
··· 39 39 40 40 #include <linux/async.h> 41 41 #include <linux/devfreq.h> 42 - 42 + #include <linux/nls.h> 43 + #include <linux/of.h> 43 44 #include "ufshcd.h" 45 + #include "ufs_quirks.h" 44 46 #include "unipro.h" 45 47 46 48 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ ··· 133 131 /* UFSHCD UIC layer error flags */ 134 132 enum { 135 133 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ 136 - UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ 137 - UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ 138 - UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ 134 + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ 135 + UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ 136 + UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ 137 + UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ 138 + UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ 139 139 }; 140 140 141 141 /* Interrupt configuration options */ ··· 197 193 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, 198 194 bool skip_ref_clk); 199 195 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 196 + static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused); 200 197 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); 201 198 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); 202 199 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); ··· 236 231 } 237 232 } 238 233 234 + /* replace non-printable or non-ASCII characters with spaces */ 235 + static inline void ufshcd_remove_non_printable(char *val) 236 + { 237 + if (!val) 238 + return; 239 + 240 + if (*val < 0x20 || *val > 0x7e) 241 + *val = ' '; 242 + } 243 + 239 244 /* 240 245 * ufshcd_wait_for_register - wait for register value to change 241 246 * @hba - per-adapter interface ··· 254 239 * @val - wait condition 255 240 * @interval_us - polling interval in microsecs 256 241 * @timeout_ms - timeout in millisecs 242 + * @can_sleep - perform sleep or just spin 257 243 * 258 244 * Returns -ETIMEDOUT on error, zero on success 259 245 */ 260 - static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 261 - u32 val, unsigned long interval_us, unsigned long timeout_ms) 246 + int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 247 + u32 val, unsigned long interval_us, 248 + unsigned long timeout_ms, bool can_sleep) 262 249 { 263 250 int err = 0; 264 251 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); ··· 269 252 val = val & mask; 270 253 271 254 while ((ufshcd_readl(hba, reg) & mask) != val) { 272 - /* wakeup within 50us of expiry */ 273 - usleep_range(interval_us, interval_us + 50); 274 - 255 + if (can_sleep) 256 + usleep_range(interval_us, interval_us + 50); 257 + else 258 + udelay(interval_us); 275 259 if (time_after(jiffies, timeout)) { 276 260 if ((ufshcd_readl(hba, reg) & mask) != val) 277 261 err = -ETIMEDOUT; ··· 568 550 static inline int ufshcd_is_hba_active(struct ufs_hba *hba) 569 551 { 570 552 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; 553 + } 554 + 555 + u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) 556 + { 557 + /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ 558 + if ((hba->ufs_version == UFSHCI_VERSION_10) || 559 + (hba->ufs_version == UFSHCI_VERSION_11)) 560 + return UFS_UNIPRO_VER_1_41; 561 + else 562 + return UFS_UNIPRO_VER_1_6; 563 + } 564 + EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); 565 + 566 + static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) 567 + { 568 + /* 569 + * If both host and device support UniPro ver1.6 or later, PA layer 570 + * parameters tuning happens during link startup itself. 571 + * 572 + * We can manually tune PA layer parameters if either host or device 573 + * doesn't support UniPro ver 1.6 or later. But to keep manual tuning 574 + * logic simple, we will only do manual tuning if local unipro version 575 + * doesn't support ver1.6 or later. 576 + */ 577 + if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) 578 + return true; 579 + else 580 + return false; 571 581 } 572 582 573 583 static void ufshcd_ungate_work(struct work_struct *work) ··· 1504 1458 */ 1505 1459 err = ufshcd_wait_for_register(hba, 1506 1460 REG_UTP_TRANSFER_REQ_DOOR_BELL, 1507 - mask, ~mask, 1000, 1000); 1461 + mask, ~mask, 1000, 1000, true); 1508 1462 1509 1463 return err; 1510 1464 } ··· 1903 1857 return ret; 1904 1858 } 1905 1859 1906 - /** 1907 - * ufshcd_query_descriptor - API function for sending descriptor requests 1908 - * hba: per-adapter instance 1909 - * opcode: attribute opcode 1910 - * idn: attribute idn to access 1911 - * index: index field 1912 - * selector: selector field 1913 - * desc_buf: the buffer that contains the descriptor 1914 - * buf_len: length parameter passed to the device 1915 - * 1916 - * Returns 0 for success, non-zero in case of failure. 1917 - * The buf_len parameter will contain, on return, the length parameter 1918 - * received on the response. 1919 - */ 1920 - static int ufshcd_query_descriptor(struct ufs_hba *hba, 1860 + static int __ufshcd_query_descriptor(struct ufs_hba *hba, 1921 1861 enum query_opcode opcode, enum desc_idn idn, u8 index, 1922 1862 u8 selector, u8 *desc_buf, int *buf_len) 1923 1863 { ··· 1968 1936 } 1969 1937 1970 1938 /** 1939 + * ufshcd_query_descriptor_retry - API function for sending descriptor 1940 + * requests 1941 + * hba: per-adapter instance 1942 + * opcode: attribute opcode 1943 + * idn: attribute idn to access 1944 + * index: index field 1945 + * selector: selector field 1946 + * desc_buf: the buffer that contains the descriptor 1947 + * buf_len: length parameter passed to the device 1948 + * 1949 + * Returns 0 for success, non-zero in case of failure. 1950 + * The buf_len parameter will contain, on return, the length parameter 1951 + * received on the response. 1952 + */ 1953 + int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 1954 + enum query_opcode opcode, enum desc_idn idn, u8 index, 1955 + u8 selector, u8 *desc_buf, int *buf_len) 1956 + { 1957 + int err; 1958 + int retries; 1959 + 1960 + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 1961 + err = __ufshcd_query_descriptor(hba, opcode, idn, index, 1962 + selector, desc_buf, buf_len); 1963 + if (!err || err == -EINVAL) 1964 + break; 1965 + } 1966 + 1967 + return err; 1968 + } 1969 + EXPORT_SYMBOL(ufshcd_query_descriptor_retry); 1970 + 1971 + /** 1971 1972 * ufshcd_read_desc_param - read the specified descriptor parameter 1972 1973 * @hba: Pointer to adapter instance 1973 1974 * @desc_id: descriptor idn value ··· 2042 1977 return -ENOMEM; 2043 1978 } 2044 1979 2045 - ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, 2046 - desc_id, desc_index, 0, desc_buf, 2047 - &buff_len); 1980 + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 1981 + desc_id, desc_index, 0, desc_buf, 1982 + &buff_len); 2048 1983 2049 1984 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || 2050 1985 (desc_buf[QUERY_DESC_LENGTH_OFFSET] != ··· 2081 2016 { 2082 2017 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); 2083 2018 } 2019 + 2020 + int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) 2021 + { 2022 + return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); 2023 + } 2024 + EXPORT_SYMBOL(ufshcd_read_device_desc); 2025 + 2026 + /** 2027 + * ufshcd_read_string_desc - read string descriptor 2028 + * @hba: pointer to adapter instance 2029 + * @desc_index: descriptor index 2030 + * @buf: pointer to buffer where descriptor would be read 2031 + * @size: size of buf 2032 + * @ascii: if true convert from unicode to ascii characters 2033 + * 2034 + * Return 0 in case of success, non-zero otherwise 2035 + */ 2036 + int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, 2037 + u32 size, bool ascii) 2038 + { 2039 + int err = 0; 2040 + 2041 + err = ufshcd_read_desc(hba, 2042 + QUERY_DESC_IDN_STRING, desc_index, buf, size); 2043 + 2044 + if (err) { 2045 + dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", 2046 + __func__, QUERY_REQ_RETRIES, err); 2047 + goto out; 2048 + } 2049 + 2050 + if (ascii) { 2051 + int desc_len; 2052 + int ascii_len; 2053 + int i; 2054 + char *buff_ascii; 2055 + 2056 + desc_len = buf[0]; 2057 + /* remove header and divide by 2 to move from UTF16 to UTF8 */ 2058 + ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; 2059 + if (size < ascii_len + QUERY_DESC_HDR_SIZE) { 2060 + dev_err(hba->dev, "%s: buffer allocated size is too small\n", 2061 + __func__); 2062 + err = -ENOMEM; 2063 + goto out; 2064 + } 2065 + 2066 + buff_ascii = kmalloc(ascii_len, GFP_KERNEL); 2067 + if (!buff_ascii) { 2068 + err = -ENOMEM; 2069 + goto out_free_buff; 2070 + } 2071 + 2072 + /* 2073 + * the descriptor contains string in UTF16 format 2074 + * we need to convert to utf-8 so it can be displayed 2075 + */ 2076 + utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], 2077 + desc_len - QUERY_DESC_HDR_SIZE, 2078 + UTF16_BIG_ENDIAN, buff_ascii, ascii_len); 2079 + 2080 + /* replace non-printable or non-ASCII characters with spaces */ 2081 + for (i = 0; i < ascii_len; i++) 2082 + ufshcd_remove_non_printable(&buff_ascii[i]); 2083 + 2084 + memset(buf + QUERY_DESC_HDR_SIZE, 0, 2085 + size - QUERY_DESC_HDR_SIZE); 2086 + memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); 2087 + buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; 2088 + out_free_buff: 2089 + kfree(buff_ascii); 2090 + } 2091 + out: 2092 + return err; 2093 + } 2094 + EXPORT_SYMBOL(ufshcd_read_string_desc); 2084 2095 2085 2096 /** 2086 2097 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter ··· 2955 2814 } 2956 2815 2957 2816 /** 2817 + * ufshcd_hba_stop - Send controller to reset state 2818 + * @hba: per adapter instance 2819 + * @can_sleep: perform sleep or just spin 2820 + */ 2821 + static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) 2822 + { 2823 + int err; 2824 + 2825 + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 2826 + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, 2827 + CONTROLLER_ENABLE, CONTROLLER_DISABLE, 2828 + 10, 1, can_sleep); 2829 + if (err) 2830 + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); 2831 + } 2832 + 2833 + /** 2958 2834 * ufshcd_hba_enable - initialize the controller 2959 2835 * @hba: per adapter instance 2960 2836 * ··· 2991 2833 * development and testing of this driver. msleep can be changed to 2992 2834 * mdelay and retry count can be reduced based on the controller. 2993 2835 */ 2994 - if (!ufshcd_is_hba_active(hba)) { 2995 - 2836 + if (!ufshcd_is_hba_active(hba)) 2996 2837 /* change controller state to "reset state" */ 2997 - ufshcd_hba_stop(hba); 2998 - 2999 - /* 3000 - * This delay is based on the testing done with UFS host 3001 - * controller FPGA. The delay can be changed based on the 3002 - * host controller used. 3003 - */ 3004 - msleep(5); 3005 - } 2838 + ufshcd_hba_stop(hba, true); 3006 2839 3007 2840 /* UniPro link is disabled at this point */ 3008 2841 ufshcd_set_link_off(hba); ··· 3514 3365 } 3515 3366 3516 3367 /** 3517 - * ufshcd_transfer_req_compl - handle SCSI and query command completion 3368 + * __ufshcd_transfer_req_compl - handle SCSI and query command completion 3518 3369 * @hba: per adapter instance 3370 + * @completed_reqs: requests to complete 3519 3371 */ 3520 - static void ufshcd_transfer_req_compl(struct ufs_hba *hba) 3372 + static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, 3373 + unsigned long completed_reqs) 3521 3374 { 3522 3375 struct ufshcd_lrb *lrbp; 3523 3376 struct scsi_cmnd *cmd; 3524 - unsigned long completed_reqs; 3525 - u32 tr_doorbell; 3526 3377 int result; 3527 3378 int index; 3528 - 3529 - /* Resetting interrupt aggregation counters first and reading the 3530 - * DOOR_BELL afterward allows us to handle all the completed requests. 3531 - * In order to prevent other interrupts starvation the DB is read once 3532 - * after reset. The down side of this solution is the possibility of 3533 - * false interrupt if device completes another request after resetting 3534 - * aggregation and before reading the DB. 3535 - */ 3536 - if (ufshcd_is_intr_aggr_allowed(hba)) 3537 - ufshcd_reset_intr_aggr(hba); 3538 - 3539 - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 3540 - completed_reqs = tr_doorbell ^ hba->outstanding_reqs; 3541 3379 3542 3380 for_each_set_bit(index, &completed_reqs, hba->nutrs) { 3543 3381 lrbp = &hba->lrb[index]; ··· 3552 3416 3553 3417 /* we might have free'd some tags above */ 3554 3418 wake_up(&hba->dev_cmd.tag_wq); 3419 + } 3420 + 3421 + /** 3422 + * ufshcd_transfer_req_compl - handle SCSI and query command completion 3423 + * @hba: per adapter instance 3424 + */ 3425 + static void ufshcd_transfer_req_compl(struct ufs_hba *hba) 3426 + { 3427 + unsigned long completed_reqs; 3428 + u32 tr_doorbell; 3429 + 3430 + /* Resetting interrupt aggregation counters first and reading the 3431 + * DOOR_BELL afterward allows us to handle all the completed requests. 3432 + * In order to prevent other interrupts starvation the DB is read once 3433 + * after reset. The down side of this solution is the possibility of 3434 + * false interrupt if device completes another request after resetting 3435 + * aggregation and before reading the DB. 3436 + */ 3437 + if (ufshcd_is_intr_aggr_allowed(hba)) 3438 + ufshcd_reset_intr_aggr(hba); 3439 + 3440 + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 3441 + completed_reqs = tr_doorbell ^ hba->outstanding_reqs; 3442 + 3443 + __ufshcd_transfer_req_compl(hba, completed_reqs); 3555 3444 } 3556 3445 3557 3446 /** ··· 3791 3630 */ 3792 3631 static int ufshcd_urgent_bkops(struct ufs_hba *hba) 3793 3632 { 3794 - return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); 3633 + return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); 3795 3634 } 3796 3635 3797 3636 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) 3798 3637 { 3799 3638 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 3800 3639 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); 3640 + } 3641 + 3642 + static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) 3643 + { 3644 + int err; 3645 + u32 curr_status = 0; 3646 + 3647 + if (hba->is_urgent_bkops_lvl_checked) 3648 + goto enable_auto_bkops; 3649 + 3650 + err = ufshcd_get_bkops_status(hba, &curr_status); 3651 + if (err) { 3652 + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 3653 + __func__, err); 3654 + goto out; 3655 + } 3656 + 3657 + /* 3658 + * We are seeing that some devices are raising the urgent bkops 3659 + * exception events even when BKOPS status doesn't indicate performace 3660 + * impacted or critical. Handle these device by determining their urgent 3661 + * bkops status at runtime. 3662 + */ 3663 + if (curr_status < BKOPS_STATUS_PERF_IMPACT) { 3664 + dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", 3665 + __func__, curr_status); 3666 + /* update the current status as the urgent bkops level */ 3667 + hba->urgent_bkops_lvl = curr_status; 3668 + hba->is_urgent_bkops_lvl_checked = true; 3669 + } 3670 + 3671 + enable_auto_bkops: 3672 + err = ufshcd_enable_auto_bkops(hba); 3673 + out: 3674 + if (err < 0) 3675 + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", 3676 + __func__, err); 3801 3677 } 3802 3678 3803 3679 /** ··· 3860 3662 } 3861 3663 3862 3664 status &= hba->ee_ctrl_mask; 3863 - if (status & MASK_EE_URGENT_BKOPS) { 3864 - err = ufshcd_urgent_bkops(hba); 3865 - if (err < 0) 3866 - dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", 3867 - __func__, err); 3868 - } 3665 + 3666 + if (status & MASK_EE_URGENT_BKOPS) 3667 + ufshcd_bkops_exception_event_handler(hba); 3668 + 3869 3669 out: 3870 3670 pm_runtime_put_sync(hba->dev); 3871 3671 return; 3672 + } 3673 + 3674 + /* Complete requests that have door-bell cleared */ 3675 + static void ufshcd_complete_requests(struct ufs_hba *hba) 3676 + { 3677 + ufshcd_transfer_req_compl(hba); 3678 + ufshcd_tmc_handler(hba); 3679 + } 3680 + 3681 + /** 3682 + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is 3683 + * to recover from the DL NAC errors or not. 3684 + * @hba: per-adapter instance 3685 + * 3686 + * Returns true if error handling is required, false otherwise 3687 + */ 3688 + static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) 3689 + { 3690 + unsigned long flags; 3691 + bool err_handling = true; 3692 + 3693 + spin_lock_irqsave(hba->host->host_lock, flags); 3694 + /* 3695 + * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the 3696 + * device fatal error and/or DL NAC & REPLAY timeout errors. 3697 + */ 3698 + if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) 3699 + goto out; 3700 + 3701 + if ((hba->saved_err & DEVICE_FATAL_ERROR) || 3702 + ((hba->saved_err & UIC_ERROR) && 3703 + (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) 3704 + goto out; 3705 + 3706 + if ((hba->saved_err & UIC_ERROR) && 3707 + (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { 3708 + int err; 3709 + /* 3710 + * wait for 50ms to see if we can get any other errors or not. 3711 + */ 3712 + spin_unlock_irqrestore(hba->host->host_lock, flags); 3713 + msleep(50); 3714 + spin_lock_irqsave(hba->host->host_lock, flags); 3715 + 3716 + /* 3717 + * now check if we have got any other severe errors other than 3718 + * DL NAC error? 3719 + */ 3720 + if ((hba->saved_err & INT_FATAL_ERRORS) || 3721 + ((hba->saved_err & UIC_ERROR) && 3722 + (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) 3723 + goto out; 3724 + 3725 + /* 3726 + * As DL NAC is the only error received so far, send out NOP 3727 + * command to confirm if link is still active or not. 3728 + * - If we don't get any response then do error recovery. 3729 + * - If we get response then clear the DL NAC error bit. 3730 + */ 3731 + 3732 + spin_unlock_irqrestore(hba->host->host_lock, flags); 3733 + err = ufshcd_verify_dev_init(hba); 3734 + spin_lock_irqsave(hba->host->host_lock, flags); 3735 + 3736 + if (err) 3737 + goto out; 3738 + 3739 + /* Link seems to be alive hence ignore the DL NAC errors */ 3740 + if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) 3741 + hba->saved_err &= ~UIC_ERROR; 3742 + /* clear NAC error */ 3743 + hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 3744 + if (!hba->saved_uic_err) { 3745 + err_handling = false; 3746 + goto out; 3747 + } 3748 + } 3749 + out: 3750 + spin_unlock_irqrestore(hba->host->host_lock, flags); 3751 + return err_handling; 3872 3752 } 3873 3753 3874 3754 /** ··· 3961 3685 u32 err_tm = 0; 3962 3686 int err = 0; 3963 3687 int tag; 3688 + bool needs_reset = false; 3964 3689 3965 3690 hba = container_of(work, struct ufs_hba, eh_work); 3966 3691 ··· 3969 3692 ufshcd_hold(hba, false); 3970 3693 3971 3694 spin_lock_irqsave(hba->host->host_lock, flags); 3972 - if (hba->ufshcd_state == UFSHCD_STATE_RESET) { 3973 - spin_unlock_irqrestore(hba->host->host_lock, flags); 3695 + if (hba->ufshcd_state == UFSHCD_STATE_RESET) 3974 3696 goto out; 3975 - } 3976 3697 3977 3698 hba->ufshcd_state = UFSHCD_STATE_RESET; 3978 3699 ufshcd_set_eh_in_progress(hba); 3979 3700 3980 3701 /* Complete requests that have door-bell cleared by h/w */ 3981 - ufshcd_transfer_req_compl(hba); 3982 - ufshcd_tmc_handler(hba); 3983 - spin_unlock_irqrestore(hba->host->host_lock, flags); 3702 + ufshcd_complete_requests(hba); 3984 3703 3704 + if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 3705 + bool ret; 3706 + 3707 + spin_unlock_irqrestore(hba->host->host_lock, flags); 3708 + /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ 3709 + ret = ufshcd_quirk_dl_nac_errors(hba); 3710 + spin_lock_irqsave(hba->host->host_lock, flags); 3711 + if (!ret) 3712 + goto skip_err_handling; 3713 + } 3714 + if ((hba->saved_err & INT_FATAL_ERRORS) || 3715 + ((hba->saved_err & UIC_ERROR) && 3716 + (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR | 3717 + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | 3718 + UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) 3719 + needs_reset = true; 3720 + 3721 + /* 3722 + * if host reset is required then skip clearing the pending 3723 + * transfers forcefully because they will automatically get 3724 + * cleared after link startup. 3725 + */ 3726 + if (needs_reset) 3727 + goto skip_pending_xfer_clear; 3728 + 3729 + /* release lock as clear command might sleep */ 3730 + spin_unlock_irqrestore(hba->host->host_lock, flags); 3985 3731 /* Clear pending transfer requests */ 3986 - for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) 3987 - if (ufshcd_clear_cmd(hba, tag)) 3988 - err_xfer |= 1 << tag; 3732 + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { 3733 + if (ufshcd_clear_cmd(hba, tag)) { 3734 + err_xfer = true; 3735 + goto lock_skip_pending_xfer_clear; 3736 + } 3737 + } 3989 3738 3990 3739 /* Clear pending task management requests */ 3991 - for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) 3992 - if (ufshcd_clear_tm_cmd(hba, tag)) 3993 - err_tm |= 1 << tag; 3740 + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { 3741 + if (ufshcd_clear_tm_cmd(hba, tag)) { 3742 + err_tm = true; 3743 + goto lock_skip_pending_xfer_clear; 3744 + } 3745 + } 3746 + 3747 + lock_skip_pending_xfer_clear: 3748 + spin_lock_irqsave(hba->host->host_lock, flags); 3994 3749 3995 3750 /* Complete the requests that are cleared by s/w */ 3996 - spin_lock_irqsave(hba->host->host_lock, flags); 3997 - ufshcd_transfer_req_compl(hba); 3998 - ufshcd_tmc_handler(hba); 3999 - spin_unlock_irqrestore(hba->host->host_lock, flags); 3751 + ufshcd_complete_requests(hba); 4000 3752 3753 + if (err_xfer || err_tm) 3754 + needs_reset = true; 3755 + 3756 + skip_pending_xfer_clear: 4001 3757 /* Fatal errors need reset */ 4002 - if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || 4003 - ((hba->saved_err & UIC_ERROR) && 4004 - (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { 3758 + if (needs_reset) { 3759 + unsigned long max_doorbells = (1UL << hba->nutrs) - 1; 3760 + 3761 + /* 3762 + * ufshcd_reset_and_restore() does the link reinitialization 3763 + * which will need atleast one empty doorbell slot to send the 3764 + * device management commands (NOP and query commands). 3765 + * If there is no slot empty at this moment then free up last 3766 + * slot forcefully. 3767 + */ 3768 + if (hba->outstanding_reqs == max_doorbells) 3769 + __ufshcd_transfer_req_compl(hba, 3770 + (1UL << (hba->nutrs - 1))); 3771 + 3772 + spin_unlock_irqrestore(hba->host->host_lock, flags); 4005 3773 err = ufshcd_reset_and_restore(hba); 3774 + spin_lock_irqsave(hba->host->host_lock, flags); 4006 3775 if (err) { 4007 3776 dev_err(hba->dev, "%s: reset and restore failed\n", 4008 3777 __func__); ··· 4062 3739 hba->saved_err = 0; 4063 3740 hba->saved_uic_err = 0; 4064 3741 } 3742 + 3743 + skip_err_handling: 3744 + if (!needs_reset) { 3745 + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 3746 + if (hba->saved_err || hba->saved_uic_err) 3747 + dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", 3748 + __func__, hba->saved_err, hba->saved_uic_err); 3749 + } 3750 + 4065 3751 ufshcd_clear_eh_in_progress(hba); 4066 3752 4067 3753 out: 3754 + spin_unlock_irqrestore(hba->host->host_lock, flags); 4068 3755 scsi_unblock_requests(hba->host); 4069 3756 ufshcd_release(hba); 4070 3757 pm_runtime_put_sync(hba->dev); ··· 4092 3759 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 4093 3760 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 4094 3761 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; 3762 + else if (hba->dev_quirks & 3763 + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 3764 + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) 3765 + hba->uic_error |= 3766 + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 3767 + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) 3768 + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; 3769 + } 4095 3770 4096 3771 /* UIC NL/TL/DME errors needs software retry */ 4097 3772 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); ··· 4137 3796 } 4138 3797 4139 3798 if (queue_eh_work) { 3799 + /* 3800 + * update the transfer error masks to sticky bits, let's do this 3801 + * irrespective of current ufshcd_state. 3802 + */ 3803 + hba->saved_err |= hba->errors; 3804 + hba->saved_uic_err |= hba->uic_error; 3805 + 4140 3806 /* handle fatal errors only when link is functional */ 4141 3807 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { 4142 3808 /* block commands from scsi mid-layer */ 4143 3809 scsi_block_requests(hba->host); 4144 - 4145 - /* transfer error masks to sticky bits */ 4146 - hba->saved_err |= hba->errors; 4147 - hba->saved_uic_err |= hba->uic_error; 4148 3810 4149 3811 hba->ufshcd_state = UFSHCD_STATE_ERROR; 4150 3812 schedule_work(&hba->eh_work); ··· 4241 3897 /* poll for max. 1 sec to clear door bell register by h/w */ 4242 3898 err = ufshcd_wait_for_register(hba, 4243 3899 REG_UTP_TASK_REQ_DOOR_BELL, 4244 - mask, 0, 1000, 1000); 3900 + mask, 0, 1000, 1000, true); 4245 3901 out: 4246 3902 return err; 4247 3903 } ··· 4523 4179 4524 4180 /* Reset the host controller */ 4525 4181 spin_lock_irqsave(hba->host->host_lock, flags); 4526 - ufshcd_hba_stop(hba); 4182 + ufshcd_hba_stop(hba, false); 4527 4183 spin_unlock_irqrestore(hba->host->host_lock, flags); 4528 4184 4529 4185 err = ufshcd_hba_enable(hba); ··· 4810 4466 return ret; 4811 4467 } 4812 4468 4469 + static int ufs_get_device_info(struct ufs_hba *hba, 4470 + struct ufs_device_info *card_data) 4471 + { 4472 + int err; 4473 + u8 model_index; 4474 + u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0}; 4475 + u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; 4476 + 4477 + err = ufshcd_read_device_desc(hba, desc_buf, 4478 + QUERY_DESC_DEVICE_MAX_SIZE); 4479 + if (err) { 4480 + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 4481 + __func__, err); 4482 + goto out; 4483 + } 4484 + 4485 + /* 4486 + * getting vendor (manufacturerID) and Bank Index in big endian 4487 + * format 4488 + */ 4489 + card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | 4490 + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; 4491 + 4492 + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 4493 + 4494 + err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, 4495 + QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); 4496 + if (err) { 4497 + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 4498 + __func__, err); 4499 + goto out; 4500 + } 4501 + 4502 + str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; 4503 + strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), 4504 + min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], 4505 + MAX_MODEL_LEN)); 4506 + 4507 + /* Null terminate the model string */ 4508 + card_data->model[MAX_MODEL_LEN] = '\0'; 4509 + 4510 + out: 4511 + return err; 4512 + } 4513 + 4514 + void ufs_advertise_fixup_device(struct ufs_hba *hba) 4515 + { 4516 + int err; 4517 + struct ufs_dev_fix *f; 4518 + struct ufs_device_info card_data; 4519 + 4520 + card_data.wmanufacturerid = 0; 4521 + 4522 + err = ufs_get_device_info(hba, &card_data); 4523 + if (err) { 4524 + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 4525 + __func__, err); 4526 + return; 4527 + } 4528 + 4529 + for (f = ufs_fixups; f->quirk; f++) { 4530 + if (((f->card.wmanufacturerid == card_data.wmanufacturerid) || 4531 + (f->card.wmanufacturerid == UFS_ANY_VENDOR)) && 4532 + (STR_PRFX_EQUAL(f->card.model, card_data.model) || 4533 + !strcmp(f->card.model, UFS_ANY_MODEL))) 4534 + hba->dev_quirks |= f->quirk; 4535 + } 4536 + } 4537 + 4538 + /** 4539 + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro 4540 + * @hba: per-adapter instance 4541 + * 4542 + * PA_TActivate parameter can be tuned manually if UniPro version is less than 4543 + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's 4544 + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce 4545 + * the hibern8 exit latency. 4546 + * 4547 + * Returns zero on success, non-zero error value on failure. 4548 + */ 4549 + static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) 4550 + { 4551 + int ret = 0; 4552 + u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; 4553 + 4554 + ret = ufshcd_dme_peer_get(hba, 4555 + UIC_ARG_MIB_SEL( 4556 + RX_MIN_ACTIVATETIME_CAPABILITY, 4557 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 4558 + &peer_rx_min_activatetime); 4559 + if (ret) 4560 + goto out; 4561 + 4562 + /* make sure proper unit conversion is applied */ 4563 + tuned_pa_tactivate = 4564 + ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) 4565 + / PA_TACTIVATE_TIME_UNIT_US); 4566 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 4567 + tuned_pa_tactivate); 4568 + 4569 + out: 4570 + return ret; 4571 + } 4572 + 4573 + /** 4574 + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro 4575 + * @hba: per-adapter instance 4576 + * 4577 + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than 4578 + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's 4579 + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. 4580 + * This optimal value can help reduce the hibern8 exit latency. 4581 + * 4582 + * Returns zero on success, non-zero error value on failure. 4583 + */ 4584 + static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) 4585 + { 4586 + int ret = 0; 4587 + u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; 4588 + u32 max_hibern8_time, tuned_pa_hibern8time; 4589 + 4590 + ret = ufshcd_dme_get(hba, 4591 + UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, 4592 + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 4593 + &local_tx_hibern8_time_cap); 4594 + if (ret) 4595 + goto out; 4596 + 4597 + ret = ufshcd_dme_peer_get(hba, 4598 + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, 4599 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 4600 + &peer_rx_hibern8_time_cap); 4601 + if (ret) 4602 + goto out; 4603 + 4604 + max_hibern8_time = max(local_tx_hibern8_time_cap, 4605 + peer_rx_hibern8_time_cap); 4606 + /* make sure proper unit conversion is applied */ 4607 + tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) 4608 + / PA_HIBERN8_TIME_UNIT_US); 4609 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 4610 + tuned_pa_hibern8time); 4611 + out: 4612 + return ret; 4613 + } 4614 + 4615 + static void ufshcd_tune_unipro_params(struct ufs_hba *hba) 4616 + { 4617 + if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { 4618 + ufshcd_tune_pa_tactivate(hba); 4619 + ufshcd_tune_pa_hibern8time(hba); 4620 + } 4621 + 4622 + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) 4623 + /* set 1ms timeout for PA_TACTIVATE */ 4624 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); 4625 + } 4626 + 4813 4627 /** 4814 4628 * ufshcd_probe_hba - probe hba to detect device and initialize 4815 4629 * @hba: per-adapter instance ··· 4984 4482 4985 4483 ufshcd_init_pwr_info(hba); 4986 4484 4485 + /* set the default level for urgent bkops */ 4486 + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; 4487 + hba->is_urgent_bkops_lvl_checked = false; 4488 + 4987 4489 /* UniPro link is active now */ 4988 4490 ufshcd_set_link_active(hba); 4989 4491 ··· 4996 4490 goto out; 4997 4491 4998 4492 ret = ufshcd_complete_dev_init(hba); 4493 + if (ret) 4494 + goto out; 4495 + 4496 + ufs_advertise_fixup_device(hba); 4497 + ufshcd_tune_unipro_params(hba); 4498 + 4499 + ret = ufshcd_set_vccq_rail_unused(hba, 4500 + (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false); 4999 4501 if (ret) 5000 4502 goto out; 5001 4503 ··· 5081 4567 ufshcd_probe_hba(hba); 5082 4568 } 5083 4569 4570 + static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) 4571 + { 4572 + unsigned long flags; 4573 + struct Scsi_Host *host; 4574 + struct ufs_hba *hba; 4575 + int index; 4576 + bool found = false; 4577 + 4578 + if (!scmd || !scmd->device || !scmd->device->host) 4579 + return BLK_EH_NOT_HANDLED; 4580 + 4581 + host = scmd->device->host; 4582 + hba = shost_priv(host); 4583 + if (!hba) 4584 + return BLK_EH_NOT_HANDLED; 4585 + 4586 + spin_lock_irqsave(host->host_lock, flags); 4587 + 4588 + for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { 4589 + if (hba->lrb[index].cmd == scmd) { 4590 + found = true; 4591 + break; 4592 + } 4593 + } 4594 + 4595 + spin_unlock_irqrestore(host->host_lock, flags); 4596 + 4597 + /* 4598 + * Bypass SCSI error handling and reset the block layer timer if this 4599 + * SCSI command was not actually dispatched to UFS driver, otherwise 4600 + * let SCSI layer handle the error as usual. 4601 + */ 4602 + return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER; 4603 + } 4604 + 5084 4605 static struct scsi_host_template ufshcd_driver_template = { 5085 4606 .module = THIS_MODULE, 5086 4607 .name = UFSHCD, ··· 5128 4579 .eh_abort_handler = ufshcd_abort, 5129 4580 .eh_device_reset_handler = ufshcd_eh_device_reset_handler, 5130 4581 .eh_host_reset_handler = ufshcd_eh_host_reset_handler, 4582 + .eh_timed_out = ufshcd_eh_timed_out, 5131 4583 .this_id = -1, 5132 4584 .sg_tablesize = SG_ALL, 5133 4585 .cmd_per_lun = UFSHCD_CMD_PER_LUN, ··· 5157 4607 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 5158 4608 struct ufs_vreg *vreg) 5159 4609 { 5160 - return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 4610 + if (!vreg) 4611 + return 0; 4612 + else if (vreg->unused) 4613 + return 0; 4614 + else 4615 + return ufshcd_config_vreg_load(hba->dev, vreg, 4616 + UFS_VREG_LPM_LOAD_UA); 5161 4617 } 5162 4618 5163 4619 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 5164 4620 struct ufs_vreg *vreg) 5165 4621 { 5166 - return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 4622 + if (!vreg) 4623 + return 0; 4624 + else if (vreg->unused) 4625 + return 0; 4626 + else 4627 + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 5167 4628 } 5168 4629 5169 4630 static int ufshcd_config_vreg(struct device *dev, ··· 5209 4648 { 5210 4649 int ret = 0; 5211 4650 5212 - if (!vreg || vreg->enabled) 4651 + if (!vreg) 4652 + goto out; 4653 + else if (vreg->enabled || vreg->unused) 5213 4654 goto out; 5214 4655 5215 4656 ret = ufshcd_config_vreg(dev, vreg, true); ··· 5231 4668 { 5232 4669 int ret = 0; 5233 4670 5234 - if (!vreg || !vreg->enabled) 4671 + if (!vreg) 4672 + goto out; 4673 + else if (!vreg->enabled || vreg->unused) 5235 4674 goto out; 5236 4675 5237 4676 ret = regulator_disable(vreg->reg); ··· 5337 4772 return ufshcd_get_vreg(hba->dev, info->vdd_hba); 5338 4773 5339 4774 return 0; 4775 + } 4776 + 4777 + static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused) 4778 + { 4779 + int ret = 0; 4780 + struct ufs_vreg_info *info = &hba->vreg_info; 4781 + 4782 + if (!info) 4783 + goto out; 4784 + else if (!info->vccq) 4785 + goto out; 4786 + 4787 + if (unused) { 4788 + /* shut off the rail here */ 4789 + ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false); 4790 + /* 4791 + * Mark this rail as no longer used, so it doesn't get enabled 4792 + * later by mistake 4793 + */ 4794 + if (!ret) 4795 + info->vccq->unused = true; 4796 + } else { 4797 + /* 4798 + * rail should have been already enabled hence just make sure 4799 + * that unused flag is cleared. 4800 + */ 4801 + info->vccq->unused = false; 4802 + } 4803 + out: 4804 + return ret; 5340 4805 } 5341 4806 5342 4807 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, ··· 5688 5093 (!check_for_bkops || (check_for_bkops && 5689 5094 !hba->auto_bkops_enabled))) { 5690 5095 /* 5096 + * Let's make sure that link is in low power mode, we are doing 5097 + * this currently by putting the link in Hibern8. Otherway to 5098 + * put the link in low power mode is to send the DME end point 5099 + * to device and then send the DME reset command to local 5100 + * unipro. But putting the link in hibern8 is much faster. 5101 + */ 5102 + ret = ufshcd_uic_hibern8_enter(hba); 5103 + if (ret) 5104 + goto out; 5105 + /* 5691 5106 * Change controller state to "reset state" which 5692 5107 * should also put the link in off/reset state 5693 5108 */ 5694 - ufshcd_hba_stop(hba); 5109 + ufshcd_hba_stop(hba, true); 5695 5110 /* 5696 5111 * TODO: Check if we need any delay to make sure that 5697 5112 * controller is reset ··· 5715 5110 5716 5111 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) 5717 5112 { 5113 + /* 5114 + * It seems some UFS devices may keep drawing more than sleep current 5115 + * (atleast for 500us) from UFS rails (especially from VCCQ rail). 5116 + * To avoid this situation, add 2ms delay before putting these UFS 5117 + * rails in LPM mode. 5118 + */ 5119 + if (!ufshcd_is_link_active(hba) && 5120 + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) 5121 + usleep_range(2000, 2100); 5122 + 5718 5123 /* 5719 5124 * If UFS device is either in UFS_Sleep turn off VCC rail to save some 5720 5125 * power. ··· 6187 5572 scsi_remove_host(hba->host); 6188 5573 /* disable interrupts */ 6189 5574 ufshcd_disable_intr(hba, hba->intr_mask); 6190 - ufshcd_hba_stop(hba); 5575 + ufshcd_hba_stop(hba, true); 6191 5576 6192 5577 scsi_host_put(hba->host); 6193 5578 ··· 6451 5836 init_waitqueue_head(&hba->dev_cmd.tag_wq); 6452 5837 6453 5838 ufshcd_init_clk_gating(hba); 5839 + 5840 + /* 5841 + * In order to avoid any spurious interrupt immediately after 5842 + * registering UFS controller interrupt handler, clear any pending UFS 5843 + * interrupt status and disable all the UFS interrupts. 5844 + */ 5845 + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), 5846 + REG_INTERRUPT_STATUS); 5847 + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); 5848 + /* 5849 + * Make sure that UFS interrupts are disabled and any pending interrupt 5850 + * status is cleared before registering UFS interrupt handler. 5851 + */ 5852 + mb(); 5853 + 6454 5854 /* IRQ registration */ 6455 5855 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 6456 5856 if (err) {
+31 -9
drivers/scsi/ufs/ufshcd.h
··· 54 54 #include <linux/clk.h> 55 55 #include <linux/completion.h> 56 56 #include <linux/regulator/consumer.h> 57 + #include "unipro.h" 57 58 58 59 #include <asm/irq.h> 59 60 #include <asm/byteorder.h> ··· 384 383 * @clk_list_head: UFS host controller clocks list node head 385 384 * @pwr_info: holds current power mode 386 385 * @max_pwr_info: keeps the device max valid pwm 386 + * @urgent_bkops_lvl: keeps track of urgent bkops level for device 387 + * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for 388 + * device is known or not. 387 389 */ 388 390 struct ufs_hba { 389 391 void __iomem *mmio_base; ··· 474 470 475 471 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 476 472 473 + /* Device deviations from standard UFS device spec. */ 474 + unsigned int dev_quirks; 475 + 477 476 wait_queue_head_t tm_wq; 478 477 wait_queue_head_t tm_tag_wq; 479 478 unsigned long tm_condition; ··· 516 509 517 510 bool wlun_dev_clr_ua; 518 511 512 + /* Number of lanes available (1 or 2) for Rx/Tx */ 513 + u32 lanes_per_direction; 519 514 struct ufs_pa_layer_attr pwr_info; 520 515 struct ufs_pwr_mode_info max_pwr_info; 521 516 ··· 542 533 struct devfreq *devfreq; 543 534 struct ufs_clk_scaling clk_scaling; 544 535 bool is_sys_suspended; 536 + 537 + enum bkops_status urgent_bkops_lvl; 538 + bool is_urgent_bkops_lvl_checked; 545 539 }; 546 540 547 541 /* Returns true if clocks can be gated. Otherwise false */ ··· 600 588 void ufshcd_dealloc_host(struct ufs_hba *); 601 589 int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); 602 590 void ufshcd_remove(struct ufs_hba *); 603 - 604 - /** 605 - * ufshcd_hba_stop - Send controller to reset state 606 - * @hba: per adapter instance 607 - */ 608 - static inline void ufshcd_hba_stop(struct ufs_hba *hba) 609 - { 610 - ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 611 - } 591 + int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 592 + u32 val, unsigned long interval_us, 593 + unsigned long timeout_ms, bool can_sleep); 612 594 613 595 static inline void check_upiu_size(void) 614 596 { ··· 688 682 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 689 683 } 690 684 685 + int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size); 686 + 687 + static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) 688 + { 689 + return (pwr_info->pwr_rx == FAST_MODE || 690 + pwr_info->pwr_rx == FASTAUTO_MODE) && 691 + (pwr_info->pwr_tx == FAST_MODE || 692 + pwr_info->pwr_tx == FASTAUTO_MODE); 693 + } 694 + 695 + #define ASCII_STD true 696 + 697 + int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, 698 + u32 size, bool ascii); 699 + 691 700 /* Expose Query-Request API */ 692 701 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 693 702 enum flag_idn idn, bool *flag_res); 694 703 int ufshcd_hold(struct ufs_hba *hba, bool async); 695 704 void ufshcd_release(struct ufs_hba *hba); 705 + u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); 696 706 697 707 /* Wrapper functions for safely calling variant operations */ 698 708 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
+4
drivers/scsi/ufs/ufshci.h
··· 92 92 UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */ 93 93 UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */ 94 94 UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */ 95 + UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */ 95 96 }; 96 97 97 98 /* ··· 171 170 #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) 172 171 #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF 173 172 #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 173 + #define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001 174 + #define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002 174 175 175 176 /* UECN - Host UIC Error Code Network Layer 40h */ 176 177 #define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) ··· 212 209 213 210 /* GenSelectorIndex calculation macros for M-PHY attributes */ 214 211 #define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane) 212 + #define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane)) 215 213 216 214 #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ 217 215 ((sel) & 0xFFFF))
+22
drivers/scsi/ufs/unipro.h
··· 15 15 /* 16 16 * M-TX Configuration Attributes 17 17 */ 18 + #define TX_HIBERN8TIME_CAPABILITY 0x000F 18 19 #define TX_MODE 0x0021 19 20 #define TX_HSRATE_SERIES 0x0022 20 21 #define TX_HSGEAR 0x0023 ··· 49 48 #define RX_ENTER_HIBERN8 0x00A7 50 49 #define RX_BYPASS_8B10B_ENABLE 0x00A8 51 50 #define RX_TERMINATION_FORCE_ENABLE 0x0089 51 + #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F 52 + #define RX_HIBERN8TIME_CAPABILITY 0x0092 52 53 53 54 #define is_mphy_tx_attr(attr) (attr < RX_MODE) 55 + #define RX_MIN_ACTIVATETIME_UNIT_US 100 56 + #define HIBERN8TIME_UNIT_US 100 54 57 /* 55 58 * PHY Adpater attributes 56 59 */ ··· 75 70 #define PA_MAXRXSPEEDFAST 0x1541 76 71 #define PA_MAXRXSPEEDSLOW 0x1542 77 72 #define PA_TXLINKSTARTUPHS 0x1544 73 + #define PA_LOCAL_TX_LCC_ENABLE 0x155E 78 74 #define PA_TXSPEEDFAST 0x1565 79 75 #define PA_TXSPEEDSLOW 0x1566 80 76 #define PA_REMOTEVERINFO 0x15A0 ··· 116 110 #define PA_STALLNOCONFIGTIME 0x15A3 117 111 #define PA_SAVECONFIGTIME 0x15A4 118 112 113 + #define PA_TACTIVATE_TIME_UNIT_US 10 114 + #define PA_HIBERN8_TIME_UNIT_US 100 115 + 116 + /* PHY Adapter Protocol Constants */ 117 + #define PA_MAXDATALANES 4 118 + 119 119 /* PA power modes */ 120 120 enum { 121 121 FAST_MODE = 1, ··· 153 141 UFS_HS_G1, /* HS Gear 1 (default for reset) */ 154 142 UFS_HS_G2, /* HS Gear 2 */ 155 143 UFS_HS_G3, /* HS Gear 3 */ 144 + }; 145 + 146 + enum ufs_unipro_ver { 147 + UFS_UNIPRO_VER_RESERVED = 0, 148 + UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */ 149 + UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */ 150 + UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */ 151 + UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */ 152 + /* UniPro version field mask in PA_LOCALVERINFO */ 153 + UFS_UNIPRO_VER_MASK = 0xF, 156 154 }; 157 155 158 156 /*
+3 -12
include/scsi/scsi_transport_fc.h
··· 28 28 #define SCSI_TRANSPORT_FC_H 29 29 30 30 #include <linux/sched.h> 31 + #include <asm/unaligned.h> 31 32 #include <scsi/scsi.h> 32 33 #include <scsi/scsi_netlink.h> 33 34 ··· 798 797 799 798 static inline u64 wwn_to_u64(u8 *wwn) 800 799 { 801 - return (u64)wwn[0] << 56 | (u64)wwn[1] << 48 | 802 - (u64)wwn[2] << 40 | (u64)wwn[3] << 32 | 803 - (u64)wwn[4] << 24 | (u64)wwn[5] << 16 | 804 - (u64)wwn[6] << 8 | (u64)wwn[7]; 800 + return get_unaligned_be64(wwn); 805 801 } 806 802 807 803 static inline void u64_to_wwn(u64 inm, u8 *wwn) 808 804 { 809 - wwn[0] = (inm >> 56) & 0xff; 810 - wwn[1] = (inm >> 48) & 0xff; 811 - wwn[2] = (inm >> 40) & 0xff; 812 - wwn[3] = (inm >> 32) & 0xff; 813 - wwn[4] = (inm >> 24) & 0xff; 814 - wwn[5] = (inm >> 16) & 0xff; 815 - wwn[6] = (inm >> 8) & 0xff; 816 - wwn[7] = inm & 0xff; 805 + put_unaligned_be64(inm, wwn); 817 806 } 818 807 819 808 /**