Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mmc-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC updates from Ulf Hansson:
"MMC core:
- Add support for host software queue for (e)MMC/SD
- Throttle polling rate for CMD6
- Update CMD13 busy condition check for CMD6 commands
- Improve busy detect polling for erase/trim/discard/HPI
- Fixup support for HW busy detection for HPI commands
- Re-work and improve support for eMMC sanitize commands

MMC host:
- mmci:
* Add support for sdmmc variant revision 2.0
- mmci_sdmmc:
* Improve support for busyend detection
* Fixup support for signal voltage switch
* Add support for tuning with delay block
- mtk-sd:
* Fix another SDIO irq issue
- sdhci:
* Disable native card detect when GPIO based type exist
- sdhci:
* Add option to defer request completion
- sdhci_am654:
* Add support to set a tap value per speed mode
- sdhci-esdhc-imx:
* Add support for i.MX8MM based variant
* Fixup support for standard tuning on i.MX8 usdhc
* Optimize for strobe/clock dll settings
* Fixup support for system and runtime suspend/resume
- sdhci-iproc:
* Update regulator/bus-voltage management for bcm2711
- sdhci-msm:
* Prevent clock gating with PWRSAVE_DLL on broken variants
* Fix management of CQE during SDHCI reset
- sdhci-of-arasan:
* Add support for auto tuning on ZynqMP based platforms
- sdhci-omap:
* Add support for system suspend/resume
- sdhci-sprd:
* Add support for HW busy detection
* Enable support host software queue
- sdhci-tegra:
* Add support for HW busy detection
- tmio/renesas_sdhi:
* Enforce retune after runtime suspend
- renesas_sdhi:
* Use manual tap correction for HS400 on some variants
* Add support for manual correction of tap values for tunings"

* tag 'mmc-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (86 commits)
mmc: cavium-octeon: remove nonsense variable coercion
mmc: mediatek: fix SDIO irq issue
mmc: mmci_sdmmc: Fix clear busyd0end irq flag
dt-bindings: mmc: Fix node name in an example
mmc: core: Re-work the code for eMMC sanitize
mmc: sdhci: use FIELD_GET for preset value bit masks
mmc: sdhci-of-at91: Display clock changes for debug purpose only
mmc: sdhci: iproc: Add custom set_power() callback for bcm2711
mmc: sdhci: am654: Use sdhci_set_power_and_voltage()
mmc: sdhci: at91: Use sdhci_set_power_and_voltage()
mmc: sdhci: milbeaut: Use sdhci_set_power_and_voltage()
mmc: sdhci: arasan: Use sdhci_set_power_and_voltage()
mmc: sdhci: Introduce sdhci_set_power_and_bus_voltage()
mmc: vub300: Use scnprintf() for avoiding potential buffer overflow
dt-bindings: mmc: synopsys-dw-mshc: fix clock-freq-min-max in example
sdhci: tegra: Enable MMC_CAP_WAIT_WHILE_BUSY host capability
sdhci: tegra: Implement Tegra specific set_timeout callback
mmc: sdhci-omap: Add Support for Suspend/Resume
mmc: renesas_sdhi: simplify execute_tuning
mmc: renesas_sdhi: Use BITS_PER_LONG helper
...

+1826 -571
+5
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
··· 43 43 This property allows user to change the tuning step to more than one delay 44 44 cells which is useful for some special boards or cards when the default 45 45 tuning step can't find the proper delay window within limited tuning retries. 46 + - fsl,strobe-dll-delay-target: Specify the strobe dll control slave delay target. 47 + This delay target programming host controller loopback read clock, and this 48 + property allows user to change the delay target for the strobe input read clock. 49 + If not use this property, driver default set the delay target to value 7. 50 + Only eMMC HS400 mode need to take care of this property. 46 51 47 52 Examples: 48 53
+1 -1
Documentation/devicetree/bindings/mmc/mmc-controller.yaml
··· 351 351 352 352 examples: 353 353 - | 354 - sdhci@ab000000 { 354 + mmc@ab000000 { 355 355 compatible = "sdhci"; 356 356 reg = <0xab000000 0x200>; 357 357 interrupts = <23>;
+2
Documentation/devicetree/bindings/mmc/mmci.txt
··· 28 28 - st,sig-pin-fbclk : feedback clock signal pin used. 29 29 30 30 specific for sdmmc variant: 31 + - reg : a second base register may be defined if a delay 32 + block is present and used for tuning. 31 33 - st,sig-dir : signal direction polarity used for cmd, dat0 dat123. 32 34 - st,neg-edge : data & command phase relation, generated on 33 35 sd clock falling edge.
+19 -2
Documentation/devicetree/bindings/mmc/sdhci-am654.txt
··· 18 18 - clocks: Handles to the clock inputs. 19 19 - clock-names: Tuple including "clk_xin" and "clk_ahb" 20 20 - interrupts: Interrupt specifiers 21 - - ti,otap-del-sel: Output Tap Delay select 21 + Output tap delay for each speed mode: 22 + - ti,otap-del-sel-legacy 23 + - ti,otap-del-sel-mmc-hs 24 + - ti,otap-del-sel-sd-hs 25 + - ti,otap-del-sel-sdr12 26 + - ti,otap-del-sel-sdr25 27 + - ti,otap-del-sel-sdr50 28 + - ti,otap-del-sel-sdr104 29 + - ti,otap-del-sel-ddr50 30 + - ti,otap-del-sel-ddr52 31 + - ti,otap-del-sel-hs200 32 + - ti,otap-del-sel-hs400 33 + These bindings must be provided otherwise the driver will disable the 34 + corresponding speed mode (i.e. all nodes must provide at least -legacy) 22 35 23 36 Optional Properties (Required for ti,am654-sdhci-5.1 and ti,j721e-sdhci-8bit): 24 37 - ti,trm-icp: DLL trim select ··· 51 38 interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>; 52 39 sdhci-caps-mask = <0x80000007 0x0>; 53 40 mmc-ddr-1_8v; 54 - ti,otap-del-sel = <0x2>; 41 + ti,otap-del-sel-legacy = <0x0>; 42 + ti,otap-del-sel-mmc-hs = <0x0>; 43 + ti,otap-del-sel-ddr52 = <0x5>; 44 + ti,otap-del-sel-hs200 = <0x5>; 45 + ti,otap-del-sel-hs400 = <0x0>; 55 46 ti,trm-icp = <0x8>; 56 47 };
+7 -1
Documentation/devicetree/bindings/mmc/sdhci-msm.txt
··· 26 26 27 27 - reg: Base address and length of the register in the following order: 28 28 - Host controller register map (required) 29 - - SD Core register map (required for msm-v4 and below) 29 + - SD Core register map (required for controllers earlier than msm-v5) 30 + - CQE register map (Optional, CQE support is present on SDHC instance meant 31 + for eMMC and version v4.2 and above) 32 + - reg-names: When CQE register map is supplied, below reg-names are required 33 + - "hc" for Host controller register map 34 + - "core" for SD core register map 35 + - "cqhci" for CQE register map 30 36 - interrupts: Should contain an interrupt-specifiers for the interrupts: 31 37 - Host controller interrupt (required) 32 38 - pinctrl-names: Should contain only one value - "default".
+1 -1
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml
··· 62 62 cap-mmc-highspeed; 63 63 cap-sd-highspeed; 64 64 card-detect-delay = <200>; 65 - clock-freq-min-max = <400000 200000000>; 65 + max-frequency = <200000000>; 66 66 clock-frequency = <400000000>; 67 67 data-addr = <0x200>; 68 68 fifo-depth = <0x80>;
+2
drivers/firmware/xilinx/zynqmp.c
··· 512 512 static inline int zynqmp_is_valid_ioctl(u32 ioctl_id) 513 513 { 514 514 switch (ioctl_id) { 515 + case IOCTL_SD_DLL_RESET: 516 + case IOCTL_SET_SD_TAPDELAY: 515 517 case IOCTL_SET_PLL_FRAC_MODE: 516 518 case IOCTL_GET_PLL_FRAC_MODE: 517 519 case IOCTL_SET_PLL_FRAC_DATA:
+67 -58
drivers/mmc/core/block.c
··· 70 70 * ample. 71 71 */ 72 72 #define MMC_BLK_TIMEOUT_MS (10 * 1000) 73 - #define MMC_SANITIZE_REQ_TIMEOUT 240000 74 73 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 75 74 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) 76 75 ··· 167 168 168 169 static inline int mmc_blk_part_switch(struct mmc_card *card, 169 170 unsigned int part_type); 171 + static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 172 + struct mmc_card *card, 173 + int disable_multi, 174 + struct mmc_queue *mq); 175 + static void mmc_blk_hsq_req_done(struct mmc_request *mrq); 170 176 171 177 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 172 178 { ··· 412 408 return 0; 413 409 } 414 410 415 - static int ioctl_do_sanitize(struct mmc_card *card) 416 - { 417 - int err; 418 - 419 - if (!mmc_can_sanitize(card)) { 420 - pr_warn("%s: %s - SANITIZE is not supported\n", 421 - mmc_hostname(card->host), __func__); 422 - err = -EOPNOTSUPP; 423 - goto out; 424 - } 425 - 426 - pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", 427 - mmc_hostname(card->host), __func__); 428 - 429 - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 430 - EXT_CSD_SANITIZE_START, 1, 431 - MMC_SANITIZE_REQ_TIMEOUT); 432 - 433 - if (err) 434 - pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", 435 - mmc_hostname(card->host), __func__, err); 436 - 437 - pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), 438 - __func__); 439 - out: 440 - return err; 441 - } 442 - 443 - static inline bool mmc_blk_in_tran_state(u32 status) 444 - { 445 - /* 446 - * Some cards mishandle the status bits, so make sure to check both the 447 - * busy indication and the card state. 448 - */ 449 - return status & R1_READY_FOR_DATA && 450 - (R1_CURRENT_STATE(status) == R1_STATE_TRAN); 451 - } 452 - 453 411 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, 454 412 u32 *resp_errs) 455 413 { ··· 443 477 __func__, status); 444 478 return -ETIMEDOUT; 445 479 } 446 - 447 - /* 448 - * Some cards mishandle the status bits, 449 - * so make sure to check both the busy 450 - * indication and the card state. 451 - */ 452 - } while (!mmc_blk_in_tran_state(status)); 480 + } while (!mmc_ready_for_data(status)); 453 481 454 482 return err; 455 483 } ··· 540 580 } 541 581 542 582 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 543 - (cmd.opcode == MMC_SWITCH)) { 544 - err = ioctl_do_sanitize(card); 545 - 546 - if (err) 547 - pr_err("%s: ioctl_do_sanitize() failed. err = %d", 548 - __func__, err); 549 - 550 - return err; 551 - } 583 + (cmd.opcode == MMC_SWITCH)) 584 + return mmc_sanitize(card); 552 585 553 586 mmc_wait_for_req(card->host, &mrq); 554 587 ··· 1485 1532 return mmc_blk_cqe_start_req(mq->card->host, mrq); 1486 1533 } 1487 1534 1535 + static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1536 + { 1537 + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1538 + struct mmc_host *host = mq->card->host; 1539 + int err; 1540 + 1541 + mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 1542 + mqrq->brq.mrq.done = mmc_blk_hsq_req_done; 1543 + mmc_pre_req(host, &mqrq->brq.mrq); 1544 + 1545 + err = mmc_cqe_start_req(host, &mqrq->brq.mrq); 1546 + if (err) 1547 + mmc_post_req(host, &mqrq->brq.mrq, err); 1548 + 1549 + return err; 1550 + } 1551 + 1488 1552 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1489 1553 { 1490 1554 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1555 + struct mmc_host *host = mq->card->host; 1556 + 1557 + if (host->hsq_enabled) 1558 + return mmc_blk_hsq_issue_rw_rq(mq, req); 1491 1559 1492 1560 mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); 1493 1561 ··· 1640 1666 goto error_exit; 1641 1667 1642 1668 if (!mmc_host_is_spi(host) && 1643 - !mmc_blk_in_tran_state(status)) { 1669 + !mmc_ready_for_data(status)) { 1644 1670 err = mmc_blk_fix_state(card, req); 1645 1671 if (err) 1646 1672 goto error_exit; ··· 1700 1726 return brq->cmd.resp[0] & CMD_ERRORS || 1701 1727 brq->stop.resp[0] & stop_err_bits || 1702 1728 status & stop_err_bits || 1703 - (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); 1729 + (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); 1704 1730 } 1705 1731 1706 1732 static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) ··· 1762 1788 1763 1789 /* Try to get back to "tran" state */ 1764 1790 if (!mmc_host_is_spi(mq->card->host) && 1765 - (err || !mmc_blk_in_tran_state(status))) 1791 + (err || !mmc_ready_for_data(status))) 1766 1792 err = mmc_blk_fix_state(mq->card, req); 1767 1793 1768 1794 /* ··· 1892 1918 { 1893 1919 if (mmc_blk_urgent_bkops_needed(mq, mqrq)) 1894 1920 mmc_run_bkops(mq->card); 1921 + } 1922 + 1923 + static void mmc_blk_hsq_req_done(struct mmc_request *mrq) 1924 + { 1925 + struct mmc_queue_req *mqrq = 1926 + container_of(mrq, struct mmc_queue_req, brq.mrq); 1927 + struct request *req = mmc_queue_req_to_req(mqrq); 1928 + struct request_queue *q = req->q; 1929 + struct mmc_queue *mq = q->queuedata; 1930 + struct mmc_host *host = mq->card->host; 1931 + unsigned long flags; 1932 + 1933 + if (mmc_blk_rq_error(&mqrq->brq) || 1934 + mmc_blk_urgent_bkops_needed(mq, mqrq)) { 1935 + spin_lock_irqsave(&mq->lock, flags); 1936 + mq->recovery_needed = true; 1937 + mq->recovery_req = req; 1938 + spin_unlock_irqrestore(&mq->lock, flags); 1939 + 1940 + host->cqe_ops->cqe_recovery_start(host); 1941 + 1942 + schedule_work(&mq->recovery_work); 1943 + return; 1944 + } 1945 + 1946 + mmc_blk_rw_reset_success(mq, req); 1947 + 1948 + /* 1949 + * Block layer timeouts race with completions which means the normal 1950 + * completion path cannot be used during recovery. 1951 + */ 1952 + if (mq->in_recovery) 1953 + mmc_blk_cqe_complete_rq(mq, req); 1954 + else 1955 + blk_mq_complete_request(req); 1895 1956 } 1896 1957 1897 1958 void mmc_blk_mq_complete(struct request *req)
+2 -52
drivers/mmc/core/core.c
··· 403 403 404 404 cmd = mrq->cmd; 405 405 406 - /* 407 - * If host has timed out waiting for the sanitize 408 - * to complete, card might be still in programming state 409 - * so let's try to bring the card out of programming 410 - * state. 411 - */ 412 - if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { 413 - if (!mmc_interrupt_hpi(host->card)) { 414 - pr_warn("%s: %s: Interrupted sanitize\n", 415 - mmc_hostname(host), __func__); 416 - cmd->error = 0; 417 - break; 418 - } else { 419 - pr_err("%s: %s: Failed to interrupt sanitize\n", 420 - mmc_hostname(host), __func__); 421 - } 422 - } 423 406 if (!cmd->error || !cmd->retries || 424 407 mmc_card_removed(host->card)) 425 408 break; ··· 1641 1658 struct mmc_command cmd = {}; 1642 1659 unsigned int qty = 0, busy_timeout = 0; 1643 1660 bool use_r1b_resp = false; 1644 - unsigned long timeout; 1645 - int loop_udelay=64, udelay_max=32768; 1646 1661 int err; 1647 1662 1648 1663 mmc_retune_hold(card->host); ··· 1744 1763 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) 1745 1764 goto out; 1746 1765 1747 - timeout = jiffies + msecs_to_jiffies(busy_timeout); 1748 - do { 1749 - memset(&cmd, 0, sizeof(struct mmc_command)); 1750 - cmd.opcode = MMC_SEND_STATUS; 1751 - cmd.arg = card->rca << 16; 1752 - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1753 - /* Do not retry else we can't see errors */ 1754 - err = mmc_wait_for_cmd(card->host, &cmd, 0); 1755 - if (err || R1_STATUS(cmd.resp[0])) { 1756 - pr_err("error %d requesting status %#x\n", 1757 - err, cmd.resp[0]); 1758 - err = -EIO; 1759 - goto out; 1760 - } 1761 - 1762 - /* Timeout if the device never becomes ready for data and 1763 - * never leaves the program state. 1764 - */ 1765 - if (time_after(jiffies, timeout)) { 1766 - pr_err("%s: Card stuck in programming state! %s\n", 1767 - mmc_hostname(card->host), __func__); 1768 - err = -EIO; 1769 - goto out; 1770 - } 1771 - if ((cmd.resp[0] & R1_READY_FOR_DATA) && 1772 - R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG) 1773 - break; 1774 - 1775 - usleep_range(loop_udelay, loop_udelay*2); 1776 - if (loop_udelay < udelay_max) 1777 - loop_udelay *= 2; 1778 - } while (1); 1766 + /* Let's poll to find out when the erase operation completes. */ 1767 + err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE); 1779 1768 1780 1769 out: 1781 1770 mmc_retune_release(card->host); ··· 1908 1957 return 1; 1909 1958 return 0; 1910 1959 } 1911 - EXPORT_SYMBOL(mmc_can_sanitize); 1912 1960 1913 1961 int mmc_can_secure_erase_trim(struct mmc_card *card) 1914 1962 {
+30 -26
drivers/mmc/core/mmc.c
··· 1055 1055 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1056 1056 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, 1057 1057 card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS, 1058 - true, true, true); 1058 + true, true); 1059 1059 if (err) 1060 1060 pr_warn("%s: switch to high-speed failed, err:%d\n", 1061 1061 mmc_hostname(card->host), err); ··· 1087 1087 ext_csd_bits, 1088 1088 card->ext_csd.generic_cmd6_time, 1089 1089 MMC_TIMING_MMC_DDR52, 1090 - true, true, true); 1090 + true, true); 1091 1091 if (err) { 1092 1092 pr_err("%s: switch to bus width %d ddr failed\n", 1093 1093 mmc_hostname(host), 1 << bus_width); ··· 1155 1155 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1156 1156 EXT_CSD_HS_TIMING, val, 1157 1157 card->ext_csd.generic_cmd6_time, 0, 1158 - true, false, true); 1158 + false, true); 1159 1159 if (err) { 1160 1160 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", 1161 1161 mmc_hostname(host), err); ··· 1173 1173 max_dtr = card->ext_csd.hs_max_dtr; 1174 1174 mmc_set_clock(host, max_dtr); 1175 1175 1176 - err = mmc_switch_status(card); 1176 + err = mmc_switch_status(card, true); 1177 1177 if (err) 1178 1178 goto out_err; 1179 1179 ··· 1197 1197 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1198 1198 EXT_CSD_HS_TIMING, val, 1199 1199 card->ext_csd.generic_cmd6_time, 0, 1200 - true, false, true); 1200 + false, true); 1201 1201 if (err) { 1202 1202 pr_err("%s: switch to hs400 failed, err:%d\n", 1203 1203 mmc_hostname(host), err); ··· 1211 1211 if (host->ops->hs400_complete) 1212 1212 host->ops->hs400_complete(host); 1213 1213 1214 - err = mmc_switch_status(card); 1214 + err = mmc_switch_status(card, true); 1215 1215 if (err) 1216 1216 goto out_err; 1217 1217 ··· 1243 1243 val = EXT_CSD_TIMING_HS; 1244 1244 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1245 1245 val, card->ext_csd.generic_cmd6_time, 0, 1246 - true, false, true); 1246 + false, true); 1247 1247 if (err) 1248 1248 goto out_err; 1249 1249 1250 1250 mmc_set_timing(host, MMC_TIMING_MMC_DDR52); 1251 1251 1252 - err = mmc_switch_status(card); 1252 + err = mmc_switch_status(card, true); 1253 1253 if (err) 1254 1254 goto out_err; 1255 1255 1256 1256 /* Switch HS DDR to HS */ 1257 1257 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, 1258 1258 EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time, 1259 - 0, true, false, true); 1259 + 0, false, true); 1260 1260 if (err) 1261 1261 goto out_err; 1262 1262 ··· 1265 1265 if (host->ops->hs400_downgrade) 1266 1266 host->ops->hs400_downgrade(host); 1267 1267 1268 - err = mmc_switch_status(card); 1268 + err = mmc_switch_status(card, true); 1269 1269 if (err) 1270 1270 goto out_err; 1271 1271 ··· 1274 1274 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1275 1275 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1276 1276 val, card->ext_csd.generic_cmd6_time, 0, 1277 - true, false, true); 1277 + false, true); 1278 1278 if (err) 1279 1279 goto out_err; 1280 1280 ··· 1285 1285 * failed. If there really is a problem, we would expect tuning will 1286 1286 * fail and the result ends up the same. 1287 1287 */ 1288 - err = __mmc_switch_status(card, false); 1288 + err = mmc_switch_status(card, false); 1289 1289 if (err) 1290 1290 goto out_err; 1291 1291 ··· 1358 1358 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1359 1359 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, 1360 1360 card->ext_csd.generic_cmd6_time, 0, 1361 - true, false, true); 1361 + false, true); 1362 1362 if (err) { 1363 1363 pr_err("%s: switch to hs for hs400es failed, err:%d\n", 1364 1364 mmc_hostname(host), err); ··· 1366 1366 } 1367 1367 1368 1368 mmc_set_timing(host, MMC_TIMING_MMC_HS); 1369 - err = mmc_switch_status(card); 1369 + err = mmc_switch_status(card, true); 1370 1370 if (err) 1371 1371 goto out_err; 1372 1372 ··· 1392 1392 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1393 1393 EXT_CSD_HS_TIMING, val, 1394 1394 card->ext_csd.generic_cmd6_time, 0, 1395 - true, false, true); 1395 + false, true); 1396 1396 if (err) { 1397 1397 pr_err("%s: switch to hs400es failed, err:%d\n", 1398 1398 mmc_hostname(host), err); ··· 1407 1407 if (host->ops->hs400_enhanced_strobe) 1408 1408 host->ops->hs400_enhanced_strobe(host, &host->ios); 1409 1409 1410 - err = mmc_switch_status(card); 1410 + err = mmc_switch_status(card, true); 1411 1411 if (err) 1412 1412 goto out_err; 1413 1413 ··· 1457 1457 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1458 1458 EXT_CSD_HS_TIMING, val, 1459 1459 card->ext_csd.generic_cmd6_time, 0, 1460 - true, false, true); 1460 + false, true); 1461 1461 if (err) 1462 1462 goto err; 1463 1463 old_timing = host->ios.timing; ··· 1468 1468 * switch failed. If there really is a problem, we would expect 1469 1469 * tuning will fail and the result ends up the same. 1470 1470 */ 1471 - err = __mmc_switch_status(card, false); 1471 + err = mmc_switch_status(card, false); 1472 1472 1473 1473 /* 1474 1474 * mmc_select_timing() assumes timing has not changed if ··· 1851 1851 */ 1852 1852 card->reenable_cmdq = card->ext_csd.cmdq_en; 1853 1853 1854 - if (card->ext_csd.cmdq_en && !host->cqe_enabled) { 1854 + if (host->cqe_ops && !host->cqe_enabled) { 1855 1855 err = host->cqe_ops->cqe_enable(host, card); 1856 - if (err) { 1857 - pr_err("%s: Failed to enable CQE, error %d\n", 1858 - mmc_hostname(host), err); 1859 - } else { 1856 + if (!err) { 1860 1857 host->cqe_enabled = true; 1861 - pr_info("%s: Command Queue Engine enabled\n", 1862 - mmc_hostname(host)); 1858 + 1859 + if (card->ext_csd.cmdq_en) { 1860 + pr_info("%s: Command Queue Engine enabled\n", 1861 + mmc_hostname(host)); 1862 + } else { 1863 + host->hsq_enabled = true; 1864 + pr_info("%s: Host Software Queue enabled\n", 1865 + mmc_hostname(host)); 1866 + } 1863 1867 } 1864 1868 } 1865 1869 ··· 1962 1958 1963 1959 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1964 1960 EXT_CSD_POWER_OFF_NOTIFICATION, 1965 - notify_type, timeout, 0, true, false, false); 1961 + notify_type, timeout, 0, false, false); 1966 1962 if (err) 1967 1963 pr_err("%s: Power Off Notification timed out, %u\n", 1968 1964 mmc_hostname(card->host), timeout);
+130 -66
drivers/mmc/core/mmc_ops.c
··· 19 19 #include "host.h" 20 20 #include "mmc_ops.h" 21 21 22 - #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ 23 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 24 23 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ 24 + #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 25 25 26 26 static const u8 tuning_blk_pattern_4bit[] = { 27 27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, ··· 431 431 } 432 432 433 433 /* Caller must hold re-tuning */ 434 - int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 434 + int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 435 435 { 436 436 u32 status; 437 437 int err; ··· 445 445 return mmc_switch_status_error(card->host, status); 446 446 } 447 447 448 - int mmc_switch_status(struct mmc_card *card) 448 + static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, 449 + enum mmc_busy_cmd busy_cmd, bool *busy) 449 450 { 450 - return __mmc_switch_status(card, true); 451 + struct mmc_host *host = card->host; 452 + u32 status = 0; 453 + int err; 454 + 455 + if (host->ops->card_busy) { 456 + *busy = host->ops->card_busy(host); 457 + return 0; 458 + } 459 + 460 + err = mmc_send_status(card, &status); 461 + if (retry_crc_err && err == -EILSEQ) { 462 + *busy = true; 463 + return 0; 464 + } 465 + if (err) 466 + return err; 467 + 468 + switch (busy_cmd) { 469 + case MMC_BUSY_CMD6: 470 + err = mmc_switch_status_error(card->host, status); 471 + break; 472 + case MMC_BUSY_ERASE: 473 + err = R1_STATUS(status) ? -EIO : 0; 474 + break; 475 + case MMC_BUSY_HPI: 476 + break; 477 + default: 478 + err = -EINVAL; 479 + } 480 + 481 + if (err) 482 + return err; 483 + 484 + *busy = !mmc_ready_for_data(status); 485 + return 0; 451 486 } 452 487 453 - static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 454 - bool send_status, bool retry_crc_err) 488 + static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 489 + bool send_status, bool retry_crc_err, 490 + enum mmc_busy_cmd busy_cmd) 455 491 { 456 492 struct mmc_host *host = card->host; 457 493 int err; 458 494 unsigned long timeout; 459 - u32 status = 0; 495 + unsigned int udelay = 32, udelay_max = 32768; 460 496 bool expired = false; 461 497 bool busy = false; 462 498 ··· 514 478 */ 515 479 expired = time_after(jiffies, timeout); 516 480 517 - if (host->ops->card_busy) { 518 - busy = host->ops->card_busy(host); 519 - } else { 520 - err = mmc_send_status(card, &status); 521 - if (retry_crc_err && err == -EILSEQ) { 522 - busy = true; 523 - } else if (err) { 524 - return err; 525 - } else { 526 - err = mmc_switch_status_error(host, status); 527 - if (err) 528 - return err; 529 - busy = R1_CURRENT_STATE(status) == R1_STATE_PRG; 530 - } 531 - } 481 + err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy); 482 + if (err) 483 + return err; 532 484 533 485 /* Timeout if the device still remains busy. */ 534 486 if (expired && busy) { ··· 524 500 mmc_hostname(host), __func__); 525 501 return -ETIMEDOUT; 526 502 } 503 + 504 + /* Throttle the polling rate to avoid hogging the CPU. */ 505 + if (busy) { 506 + usleep_range(udelay, udelay * 2); 507 + if (udelay < udelay_max) 508 + udelay *= 2; 509 + } 527 510 } while (busy); 528 511 529 512 return 0; 513 + } 514 + 515 + int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 516 + enum mmc_busy_cmd busy_cmd) 517 + { 518 + return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd); 530 519 } 531 520 532 521 /** ··· 551 514 * @timeout_ms: timeout (ms) for operation performed by register write, 552 515 * timeout of zero implies maximum possible timeout 553 516 * @timing: new timing to change to 554 - * @use_busy_signal: use the busy signal as response type 555 517 * @send_status: send status cmd to poll for busy 556 518 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 557 519 * ··· 558 522 */ 559 523 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 560 524 unsigned int timeout_ms, unsigned char timing, 561 - bool use_busy_signal, bool send_status, bool retry_crc_err) 525 + bool send_status, bool retry_crc_err) 562 526 { 563 527 struct mmc_host *host = card->host; 564 528 int err; 565 529 struct mmc_command cmd = {}; 566 - bool use_r1b_resp = use_busy_signal; 530 + bool use_r1b_resp = true; 567 531 unsigned char old_timing = host->ios.timing; 568 532 569 533 mmc_retune_hold(host); ··· 598 562 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; 599 563 } 600 564 601 - if (index == EXT_CSD_SANITIZE_START) 602 - cmd.sanitize_busy = true; 603 - 604 565 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 605 566 if (err) 606 - goto out; 607 - 608 - /* No need to check card status in case of unblocking command */ 609 - if (!use_busy_signal) 610 567 goto out; 611 568 612 569 /*If SPI or used HW busy detection above, then we don't need to poll. */ ··· 608 579 goto out_tim; 609 580 610 581 /* Let's try to poll to find out when the command is completed. */ 611 - err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); 582 + err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err, 583 + MMC_BUSY_CMD6); 612 584 if (err) 613 585 goto out; 614 586 ··· 619 589 mmc_set_timing(host, timing); 620 590 621 591 if (send_status) { 622 - err = mmc_switch_status(card); 592 + err = mmc_switch_status(card, true); 623 593 if (err && timing) 624 594 mmc_set_timing(host, old_timing); 625 595 } ··· 633 603 unsigned int timeout_ms) 634 604 { 635 605 return __mmc_switch(card, set, index, value, timeout_ms, 0, 636 - true, true, false); 606 + true, false); 637 607 } 638 608 EXPORT_SYMBOL_GPL(mmc_switch); 639 609 ··· 829 799 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 830 800 } 831 801 832 - static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) 802 + static int mmc_send_hpi_cmd(struct mmc_card *card) 833 803 { 804 + unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 805 + struct mmc_host *host = card->host; 806 + bool use_r1b_resp = true; 834 807 struct mmc_command cmd = {}; 835 - unsigned int opcode; 836 808 int err; 837 809 838 - opcode = card->ext_csd.hpi_cmd; 839 - if (opcode == MMC_STOP_TRANSMISSION) 840 - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 841 - else if (opcode == MMC_SEND_STATUS) 842 - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 843 - 844 - cmd.opcode = opcode; 810 + cmd.opcode = card->ext_csd.hpi_cmd; 845 811 cmd.arg = card->rca << 16 | 1; 846 812 847 - err = mmc_wait_for_cmd(card->host, &cmd, 0); 813 + /* 814 + * Make sure the host's max_busy_timeout fit the needed timeout for HPI. 815 + * In case it doesn't, let's instruct the host to avoid HW busy 816 + * detection, by using a R1 response instead of R1B. 817 + */ 818 + if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout) 819 + use_r1b_resp = false; 820 + 821 + if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) { 822 + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 823 + cmd.busy_timeout = busy_timeout_ms; 824 + } else { 825 + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 826 + use_r1b_resp = false; 827 + } 828 + 829 + err = mmc_wait_for_cmd(host, &cmd, 0); 848 830 if (err) { 849 - pr_warn("%s: error %d interrupting operation. " 850 - "HPI command response %#x\n", mmc_hostname(card->host), 851 - err, cmd.resp[0]); 831 + pr_warn("%s: HPI error %d. Command response %#x\n", 832 + mmc_hostname(host), err, cmd.resp[0]); 852 833 return err; 853 834 } 854 - if (status) 855 - *status = cmd.resp[0]; 856 835 857 - return 0; 836 + /* No need to poll when using HW busy detection. */ 837 + if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 838 + return 0; 839 + 840 + /* Let's poll to find out when the HPI request completes. */ 841 + return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI); 858 842 } 859 843 860 844 /** ··· 882 838 { 883 839 int err; 884 840 u32 status; 885 - unsigned long prg_wait; 886 841 887 842 if (!card->ext_csd.hpi_en) { 888 843 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); ··· 914 871 goto out; 915 872 } 916 873 917 - err = mmc_send_hpi_cmd(card, &status); 918 - if (err) 919 - goto out; 920 - 921 - prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 922 - do { 923 - err = mmc_send_status(card, &status); 924 - 925 - if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 926 - break; 927 - if (time_after(jiffies, prg_wait)) 928 - err = -ETIMEDOUT; 929 - } while (!err); 930 - 874 + err = mmc_send_hpi_cmd(card); 931 875 out: 932 876 return err; 933 877 } ··· 1030 1000 return mmc_cmdq_switch(card, false); 1031 1001 } 1032 1002 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1003 + 1004 + int mmc_sanitize(struct mmc_card *card) 1005 + { 1006 + struct mmc_host *host = card->host; 1007 + int err; 1008 + 1009 + if (!mmc_can_sanitize(card)) { 1010 + pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1011 + return -EOPNOTSUPP; 1012 + } 1013 + 1014 + pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1015 + 1016 + mmc_retune_hold(host); 1017 + 1018 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1019 + 1, MMC_SANITIZE_TIMEOUT_MS); 1020 + if (err) 1021 + pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1022 + 1023 + /* 1024 + * If the sanitize operation timed out, the card is probably still busy 1025 + * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1026 + * it with a HPI command to get back into R1_STATE_TRAN. 1027 + */ 1028 + if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1029 + pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1030 + 1031 + mmc_retune_release(host); 1032 + 1033 + pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1034 + return err; 1035 + } 1036 + EXPORT_SYMBOL_GPL(mmc_sanitize);
+11 -4
drivers/mmc/core/mmc_ops.h
··· 10 10 11 11 #include <linux/types.h> 12 12 13 + enum mmc_busy_cmd { 14 + MMC_BUSY_CMD6, 15 + MMC_BUSY_ERASE, 16 + MMC_BUSY_HPI, 17 + }; 18 + 13 19 struct mmc_host; 14 20 struct mmc_card; 15 21 ··· 32 26 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 33 27 int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 34 28 int mmc_bus_test(struct mmc_card *card, u8 bus_width); 35 - int mmc_interrupt_hpi(struct mmc_card *card); 36 29 int mmc_can_ext_csd(struct mmc_card *card); 37 30 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); 38 - int mmc_switch_status(struct mmc_card *card); 39 - int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal); 31 + int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal); 32 + int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 33 + enum mmc_busy_cmd busy_cmd); 40 34 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 41 35 unsigned int timeout_ms, unsigned char timing, 42 - bool use_busy_signal, bool send_status, bool retry_crc_err); 36 + bool send_status, bool retry_crc_err); 43 37 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 44 38 unsigned int timeout_ms); 45 39 void mmc_run_bkops(struct mmc_card *card); 46 40 int mmc_flush_cache(struct mmc_card *card); 47 41 int mmc_cmdq_enable(struct mmc_card *card); 48 42 int mmc_cmdq_disable(struct mmc_card *card); 43 + int mmc_sanitize(struct mmc_card *card); 49 44 50 45 #endif 51 46
+40 -12
drivers/mmc/core/mmc_test.c
··· 71 71 * @sg_len: length of currently mapped scatterlist @sg 72 72 * @mem: allocated memory 73 73 * @sg: scatterlist 74 + * @sg_areq: scatterlist for non-blocking request 74 75 */ 75 76 struct mmc_test_area { 76 77 unsigned long max_sz; ··· 83 82 unsigned int sg_len; 84 83 struct mmc_test_mem *mem; 85 84 struct scatterlist *sg; 85 + struct scatterlist *sg_areq; 86 86 }; 87 87 88 88 /** ··· 838 836 } 839 837 840 838 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 841 - struct scatterlist *sg, unsigned sg_len, 842 - unsigned dev_addr, unsigned blocks, 843 - unsigned blksz, int write, int count) 839 + unsigned int dev_addr, int write, 840 + int count) 844 841 { 845 842 struct mmc_test_req *rq1, *rq2; 846 843 struct mmc_request *mrq, *prev_mrq; 847 844 int i; 848 845 int ret = RESULT_OK; 846 + struct mmc_test_area *t = &test->area; 847 + struct scatterlist *sg = t->sg; 848 + struct scatterlist *sg_areq = t->sg_areq; 849 849 850 850 rq1 = mmc_test_req_alloc(); 851 851 rq2 = mmc_test_req_alloc(); ··· 861 857 862 858 for (i = 0; i < count; i++) { 863 859 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); 864 - mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks, 865 - blksz, write); 860 + mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr, 861 + t->blocks, 512, write); 866 862 ret = mmc_test_start_areq(test, mrq, prev_mrq); 867 863 if (ret) 868 864 goto err; ··· 871 867 prev_mrq = &rq2->mrq; 872 868 873 869 swap(mrq, prev_mrq); 874 - dev_addr += blocks; 870 + swap(sg, sg_areq); 871 + dev_addr += t->blocks; 875 872 } 876 873 877 874 ret = mmc_test_start_areq(test, NULL, prev_mrq); ··· 1401 1396 * Map sz bytes so that it can be transferred. 1402 1397 */ 1403 1398 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1404 - int max_scatter, int min_sg_len) 1399 + int max_scatter, int min_sg_len, bool nonblock) 1405 1400 { 1406 1401 struct mmc_test_area *t = &test->area; 1407 1402 int err; 1403 + unsigned int sg_len = 0; 1408 1404 1409 1405 t->blocks = sz >> 9; 1410 1406 ··· 1417 1411 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1418 1412 t->max_seg_sz, &t->sg_len, min_sg_len); 1419 1413 } 1414 + 1415 + if (err || !nonblock) 1416 + goto err; 1417 + 1418 + if (max_scatter) { 1419 + err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq, 1420 + t->max_segs, t->max_seg_sz, 1421 + &sg_len); 1422 + } else { 1423 + err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs, 1424 + t->max_seg_sz, &sg_len, min_sg_len); 1425 + } 1426 + if (!err && sg_len != t->sg_len) 1427 + err = -EINVAL; 1428 + 1429 + err: 1420 1430 if (err) 1421 1431 pr_info("%s: Failed to map sg list\n", 1422 1432 mmc_hostname(test->card->host)); ··· 1462 1440 struct timespec64 ts1, ts2; 1463 1441 int ret = 0; 1464 1442 int i; 1465 - struct mmc_test_area *t = &test->area; 1466 1443 1467 1444 /* 1468 1445 * In the case of a maximally scattered transfer, the maximum transfer ··· 1479 1458 sz = max_tfr; 1480 1459 } 1481 1460 1482 - ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); 1461 + ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock); 1483 1462 if (ret) 1484 1463 return ret; 1485 1464 1486 1465 if (timed) 1487 1466 ktime_get_ts64(&ts1); 1488 1467 if (nonblock) 1489 - ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, 1490 - dev_addr, t->blocks, 512, write, count); 1468 + ret = mmc_test_nonblock_transfer(test, dev_addr, write, count); 1491 1469 else 1492 1470 for (i = 0; i < count && ret == 0; i++) { 1493 1471 ret = mmc_test_area_transfer(test, dev_addr, write); ··· 1545 1525 struct mmc_test_area *t = &test->area; 1546 1526 1547 1527 kfree(t->sg); 1528 + kfree(t->sg_areq); 1548 1529 mmc_test_free_mem(t->mem); 1549 1530 1550 1531 return 0; ··· 1601 1580 1602 1581 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); 1603 1582 if (!t->sg) { 1583 + ret = -ENOMEM; 1584 + goto out_free; 1585 + } 1586 + 1587 + t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq), 1588 + GFP_KERNEL); 1589 + if (!t->sg_areq) { 1604 1590 ret = -ENOMEM; 1605 1591 goto out_free; 1606 1592 } ··· 2496 2468 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2497 2469 return RESULT_UNSUP_HOST; 2498 2470 2499 - ret = mmc_test_area_map(test, sz, 0, 0); 2471 + ret = mmc_test_area_map(test, sz, 0, 0, use_areq); 2500 2472 if (ret) 2501 2473 return ret; 2502 2474
+18 -4
drivers/mmc/core/queue.c
··· 62 62 { 63 63 struct mmc_host *host = mq->card->host; 64 64 65 - if (mq->use_cqe) 65 + if (mq->use_cqe && !host->hsq_enabled) 66 66 return mmc_cqe_issue_type(host, req); 67 67 68 68 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) ··· 124 124 { 125 125 struct request_queue *q = req->q; 126 126 struct mmc_queue *mq = q->queuedata; 127 + struct mmc_card *card = mq->card; 128 + struct mmc_host *host = card->host; 127 129 unsigned long flags; 128 130 int ret; 129 131 130 132 spin_lock_irqsave(&mq->lock, flags); 131 133 132 - if (mq->recovery_needed || !mq->use_cqe) 134 + if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled) 133 135 ret = BLK_EH_RESET_TIMER; 134 136 else 135 137 ret = mmc_cqe_timed_out(req); ··· 146 144 struct mmc_queue *mq = container_of(work, struct mmc_queue, 147 145 recovery_work); 148 146 struct request_queue *q = mq->queue; 147 + struct mmc_host *host = mq->card->host; 149 148 150 149 mmc_get_card(mq->card, &mq->ctx); 151 150 152 151 mq->in_recovery = true; 153 152 154 - if (mq->use_cqe) 153 + if (mq->use_cqe && !host->hsq_enabled) 155 154 mmc_blk_cqe_recovery(mq); 156 155 else 157 156 mmc_blk_mq_recovery(mq); ··· 162 159 spin_lock_irq(&mq->lock); 163 160 mq->recovery_needed = false; 164 161 spin_unlock_irq(&mq->lock); 162 + 163 + if (host->hsq_enabled) 164 + host->cqe_ops->cqe_recovery_finish(host); 165 165 166 166 mmc_put_card(mq->card, &mq->ctx); 167 167 ··· 285 279 } 286 280 break; 287 281 case MMC_ISSUE_ASYNC: 282 + /* 283 + * For MMC host software queue, we only allow 2 requests in 284 + * flight to avoid a long latency. 285 + */ 286 + if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { 287 + spin_unlock_irq(&mq->lock); 288 + return BLK_STS_RESOURCE; 289 + } 288 290 break; 289 291 default: 290 292 /* ··· 444 430 * The queue depth for CQE must match the hardware because the request 445 431 * tag is used to index the hardware queue. 446 432 */ 447 - if (mq->use_cqe) 433 + if (mq->use_cqe && !host->hsq_enabled) 448 434 mq->tag_set.queue_depth = 449 435 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); 450 436 else
+10
drivers/mmc/core/sd.c
··· 1082 1082 } 1083 1083 } 1084 1084 1085 + if (host->cqe_ops && !host->cqe_enabled) { 1086 + err = host->cqe_ops->cqe_enable(host, card); 1087 + if (!err) { 1088 + host->cqe_enabled = true; 1089 + host->hsq_enabled = true; 1090 + pr_info("%s: Host Software Queue enabled\n", 1091 + mmc_hostname(host)); 1092 + } 1093 + } 1094 + 1085 1095 if (host->caps2 & MMC_CAP2_AVOID_3_3V && 1086 1096 host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1087 1097 pr_err("%s: Host failed to negotiate down from 3.3V\n",
+8 -7
drivers/mmc/core/sdio_irq.c
··· 276 276 277 277 card->sdio_single_irq = NULL; 278 278 if ((card->host->caps & MMC_CAP_SDIO_IRQ) && 279 - card->host->sdio_irqs == 1) 279 + card->host->sdio_irqs == 1) { 280 280 for (i = 0; i < card->sdio_funcs; i++) { 281 - func = card->sdio_func[i]; 282 - if (func && func->irq_handler) { 283 - card->sdio_single_irq = func; 284 - break; 285 - } 286 - } 281 + func = card->sdio_func[i]; 282 + if (func && func->irq_handler) { 283 + card->sdio_single_irq = func; 284 + break; 285 + } 286 + } 287 + } 287 288 } 288 289 289 290 /**
+12
drivers/mmc/host/Kconfig
··· 645 645 depends on ARCH_SPRD 646 646 depends on MMC_SDHCI_PLTFM 647 647 select MMC_SDHCI_IO_ACCESSORS 648 + select MMC_HSQ 648 649 help 649 650 This selects the SDIO Host Controller in Spreadtrum 650 651 SoCs, this driver supports R11(IP version: R11P0). ··· 947 946 This controller supports eMMC devices with command queue support. 948 947 949 948 If you have a controller with this interface, say Y or M here. 949 + 950 + If unsure, say N. 951 + 952 + config MMC_HSQ 953 + tristate "MMC Host Software Queue support" 954 + help 955 + This selects the MMC Host Software Queue support. This may increase 956 + performance, if the host controller and its driver supports it. 957 + 958 + If you have a controller/driver supporting this interface, say Y or M 959 + here. 950 960 951 961 If unsure, say N. 952 962
+1
drivers/mmc/host/Makefile
··· 100 100 obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o 101 101 obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o 102 102 obj-$(CONFIG_MMC_CQHCI) += cqhci.o 103 + obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o 103 104 104 105 ifeq ($(CONFIG_CB710_DEBUG),y) 105 106 CFLAGS-cb710-mmc += -DDEBUG
+2 -2
drivers/mmc/host/cavium-octeon.c
··· 207 207 base = devm_platform_ioremap_resource(pdev, 0); 208 208 if (IS_ERR(base)) 209 209 return PTR_ERR(base); 210 - host->base = (void __iomem *)base; 210 + host->base = base; 211 211 host->reg_off = 0; 212 212 213 213 base = devm_platform_ioremap_resource(pdev, 1); 214 214 if (IS_ERR(base)) 215 215 return PTR_ERR(base); 216 - host->dma_base = (void __iomem *)base; 216 + host->dma_base = base; 217 217 /* 218 218 * To keep the register addresses shared we intentionaly use 219 219 * a negative offset here, first register used on Octeon therefore
+11 -5
drivers/mmc/host/cqhci.c
··· 298 298 cq_host->activated = false; 299 299 } 300 300 301 - int cqhci_suspend(struct mmc_host *mmc) 301 + int cqhci_deactivate(struct mmc_host *mmc) 302 302 { 303 303 struct cqhci_host *cq_host = mmc->cqe_private; 304 304 305 - if (cq_host->enabled) 305 + if (cq_host->enabled && cq_host->activated) 306 306 __cqhci_disable(cq_host); 307 307 308 308 return 0; 309 309 } 310 - EXPORT_SYMBOL(cqhci_suspend); 310 + EXPORT_SYMBOL(cqhci_deactivate); 311 311 312 312 int cqhci_resume(struct mmc_host *mmc) 313 313 { ··· 321 321 struct cqhci_host *cq_host = mmc->cqe_private; 322 322 int err; 323 323 324 + if (!card->ext_csd.cmdq_en) 325 + return -EINVAL; 326 + 324 327 if (cq_host->enabled) 325 328 return 0; 326 329 327 330 cq_host->rca = card->rca; 328 331 329 332 err = cqhci_host_alloc_tdl(cq_host); 330 - if (err) 333 + if (err) { 334 + pr_err("%s: Failed to enable CQE, error %d\n", 335 + mmc_hostname(mmc), err); 331 336 return err; 337 + } 332 338 333 339 __cqhci_enable(cq_host); 334 340 ··· 1077 1071 1078 1072 /* check and setup CMDQ interface */ 1079 1073 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1080 - "cqhci_mem"); 1074 + "cqhci"); 1081 1075 if (!cqhci_memres) { 1082 1076 dev_dbg(&pdev->dev, "CMDQ not supported\n"); 1083 1077 return ERR_PTR(-EINVAL);
+5 -1
drivers/mmc/host/cqhci.h
··· 230 230 int data_error); 231 231 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64); 232 232 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev); 233 - int cqhci_suspend(struct mmc_host *mmc); 233 + int cqhci_deactivate(struct mmc_host *mmc); 234 + static inline int cqhci_suspend(struct mmc_host *mmc) 235 + { 236 + return cqhci_deactivate(mmc); 237 + } 234 238 int cqhci_resume(struct mmc_host *mmc); 235 239 236 240 #endif
+348
drivers/mmc/host/mmc_hsq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * 4 + * MMC software queue support based on command queue interfaces 5 + * 6 + * Copyright (C) 2019 Linaro, Inc. 7 + * Author: Baolin Wang <baolin.wang@linaro.org> 8 + */ 9 + 10 + #include <linux/mmc/card.h> 11 + #include <linux/mmc/host.h> 12 + #include <linux/module.h> 13 + 14 + #include "mmc_hsq.h" 15 + 16 + #define HSQ_NUM_SLOTS 64 17 + #define HSQ_INVALID_TAG HSQ_NUM_SLOTS 18 + 19 + static void mmc_hsq_pump_requests(struct mmc_hsq *hsq) 20 + { 21 + struct mmc_host *mmc = hsq->mmc; 22 + struct hsq_slot *slot; 23 + unsigned long flags; 24 + 25 + spin_lock_irqsave(&hsq->lock, flags); 26 + 27 + /* Make sure we are not already running a request now */ 28 + if (hsq->mrq) { 29 + spin_unlock_irqrestore(&hsq->lock, flags); 30 + return; 31 + } 32 + 33 + /* Make sure there are remain requests need to pump */ 34 + if (!hsq->qcnt || !hsq->enabled) { 35 + spin_unlock_irqrestore(&hsq->lock, flags); 36 + return; 37 + } 38 + 39 + slot = &hsq->slot[hsq->next_tag]; 40 + hsq->mrq = slot->mrq; 41 + hsq->qcnt--; 42 + 43 + spin_unlock_irqrestore(&hsq->lock, flags); 44 + 45 + mmc->ops->request(mmc, hsq->mrq); 46 + } 47 + 48 + static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains) 49 + { 50 + struct hsq_slot *slot; 51 + int tag; 52 + 53 + /* 54 + * If there are no remain requests in software queue, then set a invalid 55 + * tag. 56 + */ 57 + if (!remains) { 58 + hsq->next_tag = HSQ_INVALID_TAG; 59 + return; 60 + } 61 + 62 + /* 63 + * Increasing the next tag and check if the corresponding request is 64 + * available, if yes, then we found a candidate request. 65 + */ 66 + if (++hsq->next_tag != HSQ_INVALID_TAG) { 67 + slot = &hsq->slot[hsq->next_tag]; 68 + if (slot->mrq) 69 + return; 70 + } 71 + 72 + /* Othersie we should iterate all slots to find a available tag. */ 73 + for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) { 74 + slot = &hsq->slot[tag]; 75 + if (slot->mrq) 76 + break; 77 + } 78 + 79 + if (tag == HSQ_NUM_SLOTS) 80 + tag = HSQ_INVALID_TAG; 81 + 82 + hsq->next_tag = tag; 83 + } 84 + 85 + static void mmc_hsq_post_request(struct mmc_hsq *hsq) 86 + { 87 + unsigned long flags; 88 + int remains; 89 + 90 + spin_lock_irqsave(&hsq->lock, flags); 91 + 92 + remains = hsq->qcnt; 93 + hsq->mrq = NULL; 94 + 95 + /* Update the next available tag to be queued. */ 96 + mmc_hsq_update_next_tag(hsq, remains); 97 + 98 + if (hsq->waiting_for_idle && !remains) { 99 + hsq->waiting_for_idle = false; 100 + wake_up(&hsq->wait_queue); 101 + } 102 + 103 + /* Do not pump new request in recovery mode. */ 104 + if (hsq->recovery_halt) { 105 + spin_unlock_irqrestore(&hsq->lock, flags); 106 + return; 107 + } 108 + 109 + spin_unlock_irqrestore(&hsq->lock, flags); 110 + 111 + /* 112 + * Try to pump new request to host controller as fast as possible, 113 + * after completing previous request. 114 + */ 115 + if (remains > 0) 116 + mmc_hsq_pump_requests(hsq); 117 + } 118 + 119 + /** 120 + * mmc_hsq_finalize_request - finalize one request if the request is done 121 + * @mmc: the host controller 122 + * @mrq: the request need to be finalized 123 + * 124 + * Return true if we finalized the corresponding request in software queue, 125 + * otherwise return false. 126 + */ 127 + bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq) 128 + { 129 + struct mmc_hsq *hsq = mmc->cqe_private; 130 + unsigned long flags; 131 + 132 + spin_lock_irqsave(&hsq->lock, flags); 133 + 134 + if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) { 135 + spin_unlock_irqrestore(&hsq->lock, flags); 136 + return false; 137 + } 138 + 139 + /* 140 + * Clear current completed slot request to make a room for new request. 141 + */ 142 + hsq->slot[hsq->next_tag].mrq = NULL; 143 + 144 + spin_unlock_irqrestore(&hsq->lock, flags); 145 + 146 + mmc_cqe_request_done(mmc, hsq->mrq); 147 + 148 + mmc_hsq_post_request(hsq); 149 + 150 + return true; 151 + } 152 + EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request); 153 + 154 + static void mmc_hsq_recovery_start(struct mmc_host *mmc) 155 + { 156 + struct mmc_hsq *hsq = mmc->cqe_private; 157 + unsigned long flags; 158 + 159 + spin_lock_irqsave(&hsq->lock, flags); 160 + 161 + hsq->recovery_halt = true; 162 + 163 + spin_unlock_irqrestore(&hsq->lock, flags); 164 + } 165 + 166 + static void mmc_hsq_recovery_finish(struct mmc_host *mmc) 167 + { 168 + struct mmc_hsq *hsq = mmc->cqe_private; 169 + int remains; 170 + 171 + spin_lock_irq(&hsq->lock); 172 + 173 + hsq->recovery_halt = false; 174 + remains = hsq->qcnt; 175 + 176 + spin_unlock_irq(&hsq->lock); 177 + 178 + /* 179 + * Try to pump new request if there are request pending in software 180 + * queue after finishing recovery. 181 + */ 182 + if (remains > 0) 183 + mmc_hsq_pump_requests(hsq); 184 + } 185 + 186 + static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq) 187 + { 188 + struct mmc_hsq *hsq = mmc->cqe_private; 189 + int tag = mrq->tag; 190 + 191 + spin_lock_irq(&hsq->lock); 192 + 193 + if (!hsq->enabled) { 194 + spin_unlock_irq(&hsq->lock); 195 + return -ESHUTDOWN; 196 + } 197 + 198 + /* Do not queue any new requests in recovery mode. */ 199 + if (hsq->recovery_halt) { 200 + spin_unlock_irq(&hsq->lock); 201 + return -EBUSY; 202 + } 203 + 204 + hsq->slot[tag].mrq = mrq; 205 + 206 + /* 207 + * Set the next tag as current request tag if no available 208 + * next tag. 209 + */ 210 + if (hsq->next_tag == HSQ_INVALID_TAG) 211 + hsq->next_tag = tag; 212 + 213 + hsq->qcnt++; 214 + 215 + spin_unlock_irq(&hsq->lock); 216 + 217 + mmc_hsq_pump_requests(hsq); 218 + 219 + return 0; 220 + } 221 + 222 + static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq) 223 + { 224 + if (mmc->ops->post_req) 225 + mmc->ops->post_req(mmc, mrq, 0); 226 + } 227 + 228 + static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret) 229 + { 230 + bool is_idle; 231 + 232 + spin_lock_irq(&hsq->lock); 233 + 234 + is_idle = (!hsq->mrq && !hsq->qcnt) || 235 + hsq->recovery_halt; 236 + 237 + *ret = hsq->recovery_halt ? -EBUSY : 0; 238 + hsq->waiting_for_idle = !is_idle; 239 + 240 + spin_unlock_irq(&hsq->lock); 241 + 242 + return is_idle; 243 + } 244 + 245 + static int mmc_hsq_wait_for_idle(struct mmc_host *mmc) 246 + { 247 + struct mmc_hsq *hsq = mmc->cqe_private; 248 + int ret; 249 + 250 + wait_event(hsq->wait_queue, 251 + mmc_hsq_queue_is_idle(hsq, &ret)); 252 + 253 + return ret; 254 + } 255 + 256 + static void mmc_hsq_disable(struct mmc_host *mmc) 257 + { 258 + struct mmc_hsq *hsq = mmc->cqe_private; 259 + u32 timeout = 500; 260 + int ret; 261 + 262 + spin_lock_irq(&hsq->lock); 263 + 264 + if (!hsq->enabled) { 265 + spin_unlock_irq(&hsq->lock); 266 + return; 267 + } 268 + 269 + spin_unlock_irq(&hsq->lock); 270 + 271 + ret = wait_event_timeout(hsq->wait_queue, 272 + mmc_hsq_queue_is_idle(hsq, &ret), 273 + msecs_to_jiffies(timeout)); 274 + if (ret == 0) { 275 + pr_warn("could not stop mmc software queue\n"); 276 + return; 277 + } 278 + 279 + spin_lock_irq(&hsq->lock); 280 + 281 + hsq->enabled = false; 282 + 283 + spin_unlock_irq(&hsq->lock); 284 + } 285 + 286 + static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card) 287 + { 288 + struct mmc_hsq *hsq = mmc->cqe_private; 289 + 290 + spin_lock_irq(&hsq->lock); 291 + 292 + if (hsq->enabled) { 293 + spin_unlock_irq(&hsq->lock); 294 + return -EBUSY; 295 + } 296 + 297 + hsq->enabled = true; 298 + 299 + spin_unlock_irq(&hsq->lock); 300 + 301 + return 0; 302 + } 303 + 304 + static const struct mmc_cqe_ops mmc_hsq_ops = { 305 + .cqe_enable = mmc_hsq_enable, 306 + .cqe_disable = mmc_hsq_disable, 307 + .cqe_request = mmc_hsq_request, 308 + .cqe_post_req = mmc_hsq_post_req, 309 + .cqe_wait_for_idle = mmc_hsq_wait_for_idle, 310 + .cqe_recovery_start = mmc_hsq_recovery_start, 311 + .cqe_recovery_finish = mmc_hsq_recovery_finish, 312 + }; 313 + 314 + int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc) 315 + { 316 + hsq->num_slots = HSQ_NUM_SLOTS; 317 + hsq->next_tag = HSQ_INVALID_TAG; 318 + 319 + hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots, 320 + sizeof(struct hsq_slot), GFP_KERNEL); 321 + if (!hsq->slot) 322 + return -ENOMEM; 323 + 324 + hsq->mmc = mmc; 325 + hsq->mmc->cqe_private = hsq; 326 + mmc->cqe_ops = &mmc_hsq_ops; 327 + 328 + spin_lock_init(&hsq->lock); 329 + init_waitqueue_head(&hsq->wait_queue); 330 + 331 + return 0; 332 + } 333 + EXPORT_SYMBOL_GPL(mmc_hsq_init); 334 + 335 + void mmc_hsq_suspend(struct mmc_host *mmc) 336 + { 337 + mmc_hsq_disable(mmc); 338 + } 339 + EXPORT_SYMBOL_GPL(mmc_hsq_suspend); 340 + 341 + int mmc_hsq_resume(struct mmc_host *mmc) 342 + { 343 + return mmc_hsq_enable(mmc, NULL); 344 + } 345 + EXPORT_SYMBOL_GPL(mmc_hsq_resume); 346 + 347 + MODULE_DESCRIPTION("MMC Host Software Queue support"); 348 + MODULE_LICENSE("GPL v2");
+30
drivers/mmc/host/mmc_hsq.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef LINUX_MMC_HSQ_H 3 + #define LINUX_MMC_HSQ_H 4 + 5 + struct hsq_slot { 6 + struct mmc_request *mrq; 7 + }; 8 + 9 + struct mmc_hsq { 10 + struct mmc_host *mmc; 11 + struct mmc_request *mrq; 12 + wait_queue_head_t wait_queue; 13 + struct hsq_slot *slot; 14 + spinlock_t lock; 15 + 16 + int next_tag; 17 + int num_slots; 18 + int qcnt; 19 + 20 + bool enabled; 21 + bool waiting_for_idle; 22 + bool recovery_halt; 23 + }; 24 + 25 + int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc); 26 + void mmc_hsq_suspend(struct mmc_host *mmc); 27 + int mmc_hsq_resume(struct mmc_host *mmc); 28 + bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq); 29 + 30 + #endif
+41 -2
drivers/mmc/host/mmci.c
··· 22 22 #include <linux/mmc/pm.h> 23 23 #include <linux/mmc/host.h> 24 24 #include <linux/mmc/card.h> 25 + #include <linux/mmc/sd.h> 25 26 #include <linux/mmc/slot-gpio.h> 26 27 #include <linux/amba/bus.h> 27 28 #include <linux/clk.h> ··· 268 267 .datactrl_blocksz = 14, 269 268 .datactrl_any_blocksz = true, 270 269 .stm32_idmabsize_mask = GENMASK(12, 5), 270 + .busy_timeout = true, 271 + .busy_detect = true, 272 + .busy_detect_flag = MCI_STM32_BUSYD0, 273 + .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK, 274 + .init = sdmmc_variant_init, 275 + }; 276 + 277 + static struct variant_data variant_stm32_sdmmcv2 = { 278 + .fifosize = 16 * 4, 279 + .fifohalfsize = 8 * 4, 280 + .f_max = 208000000, 281 + .stm32_clkdiv = true, 282 + .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE, 283 + .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC, 284 + .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC, 285 + .cmdreg_srsp = MCI_CPSM_STM32_SRSP, 286 + .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP, 287 + .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS, 288 + .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK, 289 + .datactrl_first = true, 290 + .datacnt_useless = true, 291 + .datalength_bits = 25, 292 + .datactrl_blocksz = 14, 293 + .datactrl_any_blocksz = true, 294 + .stm32_idmabsize_mask = GENMASK(16, 5), 295 + .dma_lli = true, 271 296 .busy_timeout = true, 272 297 .busy_detect = true, 273 298 .busy_detect_flag = MCI_STM32_BUSYD0, ··· 1244 1217 writel_relaxed(clks, host->base + MMCIDATATIMER); 1245 1218 } 1246 1219 1220 + if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE) 1221 + host->ops->pre_sig_volt_switch(host); 1222 + 1247 1223 if (/*interrupt*/0) 1248 1224 c |= MCI_CPSM_INTERRUPT; 1249 1225 ··· 1860 1830 1861 1831 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1862 1832 { 1833 + struct mmci_host *host = mmc_priv(mmc); 1863 1834 int ret = 0; 1864 1835 1865 1836 if (!IS_ERR(mmc->supply.vqmmc)) { ··· 1879 1848 1100000, 1300000); 1880 1849 break; 1881 1850 } 1851 + 1852 + if (!ret && host->ops && host->ops->post_sig_volt_switch) 1853 + ret = host->ops->post_sig_volt_switch(host, ios); 1882 1854 1883 1855 if (ret) 1884 1856 dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); ··· 1967 1933 1968 1934 host = mmc_priv(mmc); 1969 1935 host->mmc = mmc; 1936 + host->mmc_ops = &mmci_ops; 1937 + mmc->ops = &mmci_ops; 1970 1938 1971 1939 /* 1972 1940 * Some variant (STM32) doesn't have opendrain bit, nevertheless ··· 2107 2071 host->stop_abort.opcode = MMC_STOP_TRANSMISSION; 2108 2072 host->stop_abort.arg = 0; 2109 2073 host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC; 2110 - 2111 - mmc->ops = &mmci_ops; 2112 2074 2113 2075 /* We support these PM capabilities. */ 2114 2076 mmc->pm_caps |= MMC_PM_KEEP_POWER; ··· 2368 2334 .id = 0x10153180, 2369 2335 .mask = 0xf0ffffff, 2370 2336 .data = &variant_stm32_sdmmc, 2337 + }, 2338 + { 2339 + .id = 0x00253180, 2340 + .mask = 0xf0ffffff, 2341 + .data = &variant_stm32_sdmmcv2, 2371 2342 }, 2372 2343 /* Qualcomm variants */ 2373 2344 {
+8
drivers/mmc/host/mmci.h
··· 165 165 /* Extended status bits for the STM32 variants */ 166 166 #define MCI_STM32_BUSYD0 BIT(20) 167 167 #define MCI_STM32_BUSYD0END BIT(21) 168 + #define MCI_STM32_VSWEND BIT(25) 168 169 169 170 #define MMCICLEAR 0x038 170 171 #define MCI_CMDCRCFAILCLR (1 << 0) ··· 183 182 #define MCI_ST_SDIOITC (1 << 22) 184 183 #define MCI_ST_CEATAENDC (1 << 23) 185 184 #define MCI_ST_BUSYENDC (1 << 24) 185 + /* Extended clear bits for the STM32 variants */ 186 + #define MCI_STM32_VSWENDC BIT(25) 187 + #define MCI_STM32_CKSTOPC BIT(26) 186 188 187 189 #define MMCIMASK0 0x03c 188 190 #define MCI_CMDCRCFAILMASK (1 << 0) ··· 381 377 void (*set_clkreg)(struct mmci_host *host, unsigned int desired); 382 378 void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr); 383 379 bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk); 380 + void (*pre_sig_volt_switch)(struct mmci_host *host); 381 + int (*post_sig_volt_switch)(struct mmci_host *host, struct mmc_ios *ios); 384 382 }; 385 383 386 384 struct mmci_host { ··· 413 407 u32 mask1_reg; 414 408 u8 vqmmc_enabled:1; 415 409 struct mmci_platform_data *plat; 410 + struct mmc_host_ops *mmc_ops; 416 411 struct mmci_host_ops *ops; 417 412 struct variant_data *variant; 413 + void *variant_priv; 418 414 struct pinctrl *pinctrl; 419 415 struct pinctrl_state *pins_opendrain; 420 416
+200 -8
drivers/mmc/host/mmci_stm32_sdmmc.c
··· 3 3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved 4 4 * Author: Ludovic.barre@st.com for STMicroelectronics. 5 5 */ 6 + #include <linux/bitfield.h> 6 7 #include <linux/delay.h> 7 8 #include <linux/dma-mapping.h> 9 + #include <linux/iopoll.h> 8 10 #include <linux/mmc/host.h> 9 11 #include <linux/mmc/card.h> 12 + #include <linux/of_address.h> 10 13 #include <linux/reset.h> 11 14 #include <linux/scatterlist.h> 12 15 #include "mmci.h" ··· 17 14 #define SDMMC_LLI_BUF_LEN PAGE_SIZE 18 15 #define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT) 19 16 17 + #define DLYB_CR 0x0 18 + #define DLYB_CR_DEN BIT(0) 19 + #define DLYB_CR_SEN BIT(1) 20 + 21 + #define DLYB_CFGR 0x4 22 + #define DLYB_CFGR_SEL_MASK GENMASK(3, 0) 23 + #define DLYB_CFGR_UNIT_MASK GENMASK(14, 8) 24 + #define DLYB_CFGR_LNG_MASK GENMASK(27, 16) 25 + #define DLYB_CFGR_LNGF BIT(31) 26 + 27 + #define DLYB_NB_DELAY 11 28 + #define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1) 29 + #define DLYB_CFGR_UNIT_MAX 127 30 + 31 + #define DLYB_LNG_TIMEOUT_US 1000 32 + #define SDMMC_VSWEND_TIMEOUT_US 10000 33 + 20 34 struct sdmmc_lli_desc { 21 35 u32 idmalar; 22 36 u32 idmabase; 23 37 u32 idmasize; 24 38 }; 25 39 26 - struct sdmmc_priv { 40 + struct sdmmc_idma { 27 41 dma_addr_t sg_dma; 28 42 void *sg_cpu; 43 + }; 44 + 45 + struct sdmmc_dlyb { 46 + void __iomem *base; 47 + u32 unit; 48 + u32 max; 29 49 }; 30 50 31 51 static int sdmmc_idma_validate_data(struct mmci_host *host, ··· 62 36 * excepted the last element which has no constraint on idmasize 63 37 */ 64 38 for_each_sg(data->sg, sg, data->sg_len - 1, i) { 65 - if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32)) || 66 - !IS_ALIGNED(sg_dma_len(data->sg), SDMMC_IDMA_BURST)) { 39 + if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) || 40 + !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) { 67 41 dev_err(mmc_dev(host->mmc), 68 42 "unaligned scatterlist: ofst:%x length:%d\n", 69 43 data->sg->offset, data->sg->length); ··· 71 45 } 72 46 } 73 47 74 - if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32))) { 48 + if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) { 75 49 dev_err(mmc_dev(host->mmc), 76 50 "unaligned last scatterlist: ofst:%x length:%d\n", 77 51 data->sg->offset, data->sg->length); ··· 118 92 119 93 static int sdmmc_idma_setup(struct mmci_host *host) 120 94 { 121 - struct sdmmc_priv *idma; 95 + struct sdmmc_idma *idma; 122 96 123 97 idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL); 124 98 if (!idma) ··· 149 123 static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) 150 124 151 125 { 152 - struct sdmmc_priv *idma = host->dma_priv; 126 + struct sdmmc_idma *idma = host->dma_priv; 153 127 struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; 154 128 struct mmc_data *data = host->data; 155 129 struct scatterlist *sg; ··· 252 226 mmci_write_clkreg(host, clk); 253 227 } 254 228 229 + static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb) 230 + { 231 + if (!dlyb || !dlyb->base) 232 + return; 233 + 234 + /* Output clock = Input clock */ 235 + writel_relaxed(0, dlyb->base + DLYB_CR); 236 + } 237 + 255 238 static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr) 256 239 { 257 240 struct mmc_ios ios = host->mmc->ios; 241 + struct sdmmc_dlyb *dlyb = host->variant_priv; 258 242 243 + /* adds OF options */ 259 244 pwr = host->pwr_reg_add; 245 + 246 + sdmmc_dlyb_input_ck(dlyb); 260 247 261 248 if (ios.power_mode == MMC_POWER_OFF) { 262 249 /* Only a reset could power-off sdmmc */ ··· 292 253 */ 293 254 writel(MCI_IRQENABLE | host->variant->start_err, 294 255 host->base + MMCIMASK0); 256 + 257 + /* preserves voltage switch bits */ 258 + pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN | 259 + MCI_STM32_VSWITCH); 295 260 296 261 /* 297 262 * After a power-cycle state, we must set the SDMMC in ··· 358 315 if (host->busy_status) { 359 316 writel_relaxed(mask & ~host->variant->busy_detect_mask, 360 317 base + MMCIMASK0); 361 - writel_relaxed(host->variant->busy_detect_mask, 362 - base + MMCICLEAR); 363 318 host->busy_status = 0; 364 319 } 365 320 321 + writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR); 322 + 366 323 return true; 324 + } 325 + 326 + static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb, 327 + int unit, int phase, bool sampler) 328 + { 329 + u32 cfgr; 330 + 331 + writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR); 332 + 333 + cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) | 334 + FIELD_PREP(DLYB_CFGR_SEL_MASK, phase); 335 + writel_relaxed(cfgr, dlyb->base + DLYB_CFGR); 336 + 337 + if (!sampler) 338 + writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); 339 + } 340 + 341 + static int sdmmc_dlyb_lng_tuning(struct mmci_host *host) 342 + { 343 + struct sdmmc_dlyb *dlyb = host->variant_priv; 344 + u32 cfgr; 345 + int i, lng, ret; 346 + 347 + for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) { 348 + sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true); 349 + 350 + ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr, 351 + (cfgr & DLYB_CFGR_LNGF), 352 + 1, DLYB_LNG_TIMEOUT_US); 353 + if (ret) { 354 + dev_warn(mmc_dev(host->mmc), 355 + "delay line cfg timeout unit:%d cfgr:%d\n", 356 + i, cfgr); 357 + continue; 358 + } 359 + 360 + lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr); 361 + if (lng < BIT(DLYB_NB_DELAY) && lng > 0) 362 + break; 363 + } 364 + 365 + if (i > DLYB_CFGR_UNIT_MAX) 366 + return -EINVAL; 367 + 368 + dlyb->unit = i; 369 + dlyb->max = __fls(lng); 370 + 371 + return 0; 372 + } 373 + 374 + static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode) 375 + { 376 + struct sdmmc_dlyb *dlyb = host->variant_priv; 377 + int cur_len = 0, max_len = 0, end_of_len = 0; 378 + int phase; 379 + 380 + for (phase = 0; phase <= dlyb->max; phase++) { 381 + sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false); 382 + 383 + if (mmc_send_tuning(host->mmc, opcode, NULL)) { 384 + cur_len = 0; 385 + } else { 386 + cur_len++; 387 + if (cur_len > max_len) { 388 + max_len = cur_len; 389 + end_of_len = phase; 390 + } 391 + } 392 + } 393 + 394 + if (!max_len) { 395 + dev_err(mmc_dev(host->mmc), "no tuning point found\n"); 396 + return -EINVAL; 397 + } 398 + 399 + phase = end_of_len - max_len / 2; 400 + sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false); 401 + 402 + dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n", 403 + dlyb->unit, dlyb->max, phase); 404 + 405 + return 0; 406 + } 407 + 408 + static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 409 + { 410 + struct mmci_host *host = mmc_priv(mmc); 411 + struct sdmmc_dlyb *dlyb = host->variant_priv; 412 + 413 + if (!dlyb || !dlyb->base) 414 + return -EINVAL; 415 + 416 + if (sdmmc_dlyb_lng_tuning(host)) 417 + return -EINVAL; 418 + 419 + return sdmmc_dlyb_phase_tuning(host, opcode); 420 + } 421 + 422 + static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host) 423 + { 424 + /* clear the voltage switch completion flag */ 425 + writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR); 426 + /* enable Voltage switch procedure */ 427 + mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN); 428 + } 429 + 430 + static int sdmmc_post_sig_volt_switch(struct mmci_host *host, 431 + struct mmc_ios *ios) 432 + { 433 + unsigned long flags; 434 + u32 status; 435 + int ret = 0; 436 + 437 + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { 438 + spin_lock_irqsave(&host->lock, flags); 439 + mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); 440 + spin_unlock_irqrestore(&host->lock, flags); 441 + 442 + /* wait voltage switch completion while 10ms */ 443 + ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS, 444 + status, 445 + (status & MCI_STM32_VSWEND), 446 + 10, SDMMC_VSWEND_TIMEOUT_US); 447 + 448 + writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC, 449 + host->base + MMCICLEAR); 450 + mmci_write_pwrreg(host, host->pwr_reg & 451 + ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH)); 452 + } 453 + 454 + return ret; 367 455 } 368 456 369 457 static struct mmci_host_ops sdmmc_variant_ops = { ··· 508 334 .set_clkreg = mmci_sdmmc_set_clkreg, 509 335 .set_pwrreg = mmci_sdmmc_set_pwrreg, 510 336 .busy_complete = sdmmc_busy_complete, 337 + .pre_sig_volt_switch = sdmmc_pre_sig_volt_vswitch, 338 + .post_sig_volt_switch = sdmmc_post_sig_volt_switch, 511 339 }; 512 340 513 341 void sdmmc_variant_init(struct mmci_host *host) 514 342 { 343 + struct device_node *np = host->mmc->parent->of_node; 344 + void __iomem *base_dlyb; 345 + struct sdmmc_dlyb *dlyb; 346 + 515 347 host->ops = &sdmmc_variant_ops; 348 + 349 + base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL); 350 + if (IS_ERR(base_dlyb)) 351 + return; 352 + 353 + dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL); 354 + if (!dlyb) 355 + return; 356 + 357 + dlyb->base = base_dlyb; 358 + host->variant_priv = dlyb; 359 + host->mmc_ops->execute_tuning = sdmmc_execute_tuning; 516 360 }
+41
drivers/mmc/host/mtk-sd.c
··· 128 128 #define MSDC_PS_CDSTS (0x1 << 1) /* R */ 129 129 #define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */ 130 130 #define MSDC_PS_DAT (0xff << 16) /* R */ 131 + #define MSDC_PS_DATA1 (0x1 << 17) /* R */ 131 132 #define MSDC_PS_CMD (0x1 << 24) /* R */ 132 133 #define MSDC_PS_WP (0x1 << 31) /* R */ 133 134 ··· 362 361 363 362 struct mtk_mmc_compatible { 364 363 u8 clk_div_bits; 364 + bool recheck_sdio_irq; 365 365 bool hs400_tune; /* only used for MT8173 */ 366 366 u32 pad_tune_reg; 367 367 bool async_fifo; ··· 438 436 439 437 static const struct mtk_mmc_compatible mt8135_compat = { 440 438 .clk_div_bits = 8, 439 + .recheck_sdio_irq = false, 441 440 .hs400_tune = false, 442 441 .pad_tune_reg = MSDC_PAD_TUNE, 443 442 .async_fifo = false, ··· 451 448 452 449 static const struct mtk_mmc_compatible mt8173_compat = { 453 450 .clk_div_bits = 8, 451 + .recheck_sdio_irq = true, 454 452 .hs400_tune = true, 455 453 .pad_tune_reg = MSDC_PAD_TUNE, 456 454 .async_fifo = false, ··· 464 460 465 461 static const struct mtk_mmc_compatible mt8183_compat = { 466 462 .clk_div_bits = 12, 463 + .recheck_sdio_irq = false, 467 464 .hs400_tune = false, 468 465 .pad_tune_reg = MSDC_PAD_TUNE0, 469 466 .async_fifo = true, ··· 477 472 478 473 static const struct mtk_mmc_compatible mt2701_compat = { 479 474 .clk_div_bits = 12, 475 + .recheck_sdio_irq = false, 480 476 .hs400_tune = false, 481 477 .pad_tune_reg = MSDC_PAD_TUNE0, 482 478 .async_fifo = true, ··· 490 484 491 485 static const struct mtk_mmc_compatible mt2712_compat = { 492 486 .clk_div_bits = 12, 487 + .recheck_sdio_irq = false, 493 488 .hs400_tune = false, 494 489 .pad_tune_reg = MSDC_PAD_TUNE0, 495 490 .async_fifo = true, ··· 503 496 504 497 static const struct mtk_mmc_compatible mt7622_compat = { 505 498 .clk_div_bits = 12, 499 + .recheck_sdio_irq = false, 506 500 .hs400_tune = false, 507 501 .pad_tune_reg = MSDC_PAD_TUNE0, 508 502 .async_fifo = true, ··· 516 508 517 509 static const struct mtk_mmc_compatible mt8516_compat = { 518 510 .clk_div_bits = 12, 511 + .recheck_sdio_irq = false, 519 512 .hs400_tune = false, 520 513 .pad_tune_reg = MSDC_PAD_TUNE0, 521 514 .async_fifo = true, ··· 527 518 528 519 static const struct mtk_mmc_compatible mt7620_compat = { 529 520 .clk_div_bits = 8, 521 + .recheck_sdio_irq = false, 530 522 .hs400_tune = false, 531 523 .pad_tune_reg = MSDC_PAD_TUNE, 532 524 .async_fifo = false, ··· 601 591 602 592 static void msdc_cmd_next(struct msdc_host *host, 603 593 struct mmc_request *mrq, struct mmc_command *cmd); 594 + static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb); 604 595 605 596 static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR | 606 597 MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY | ··· 1018 1007 return cmd->error; 1019 1008 } 1020 1009 1010 + /** 1011 + * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost 1012 + * 1013 + * Host controller may lost interrupt in some special case. 1014 + * Add SDIO irq recheck mechanism to make sure all interrupts 1015 + * can be processed immediately 1016 + * 1017 + */ 1018 + static void msdc_recheck_sdio_irq(struct msdc_host *host) 1019 + { 1020 + u32 reg_int, reg_inten, reg_ps; 1021 + 1022 + if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 1023 + reg_inten = readl(host->base + MSDC_INTEN); 1024 + if (reg_inten & MSDC_INTEN_SDIOIRQ) { 1025 + reg_int = readl(host->base + MSDC_INT); 1026 + reg_ps = readl(host->base + MSDC_PS); 1027 + if (!(reg_int & MSDC_INT_SDIOIRQ || 1028 + reg_ps & MSDC_PS_DATA1)) { 1029 + __msdc_enable_sdio_irq(host, 0); 1030 + sdio_signal_irq(host->mmc); 1031 + } 1032 + } 1033 + } 1034 + } 1035 + 1021 1036 static void msdc_track_cmd_data(struct msdc_host *host, 1022 1037 struct mmc_command *cmd, struct mmc_data *data) 1023 1038 { ··· 1072 1035 if (host->error) 1073 1036 msdc_reset_hw(host); 1074 1037 mmc_request_done(host->mmc, mrq); 1038 + if (host->dev_comp->recheck_sdio_irq) 1039 + msdc_recheck_sdio_irq(host); 1075 1040 } 1076 1041 1077 1042 /* returns true if command is fully handled; returns false otherwise */ ··· 1432 1393 if (enb) { 1433 1394 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1434 1395 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1396 + if (host->dev_comp->recheck_sdio_irq) 1397 + msdc_recheck_sdio_irq(host); 1435 1398 } else { 1436 1399 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1437 1400 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+6
drivers/mmc/host/renesas_sdhi.h
··· 57 57 void __iomem *scc_ctl; 58 58 u32 scc_tappos; 59 59 u32 scc_tappos_hs400; 60 + bool doing_tune; 61 + 62 + /* Tuning values: 1 for success, 0 for failure */ 63 + DECLARE_BITMAP(taps, BITS_PER_LONG); 64 + unsigned int tap_num; 65 + unsigned long tap_set; 60 66 }; 61 67 62 68 #define host_to_priv(host) \
+135 -48
drivers/mmc/host/renesas_sdhi_core.c
··· 250 250 #define SH_MOBILE_SDHI_SCC_CKSEL 0x006 251 251 #define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008 252 252 #define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A 253 + #define SH_MOBILE_SDHI_SCC_SMPCMP 0x00C 253 254 #define SH_MOBILE_SDHI_SCC_TMPPORT2 0x00E 254 255 255 - /* Definitions for values the SH_MOBILE_SDHI_SCC_DTCNTL register */ 256 256 #define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0) 257 257 #define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16 258 258 #define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK 0xff 259 259 260 - /* Definitions for values the SH_MOBILE_SDHI_SCC_CKSEL register */ 261 260 #define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL BIT(0) 262 - /* Definitions for values the SH_MOBILE_SDHI_SCC_RVSCNTL register */ 261 + 263 262 #define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0) 264 - /* Definitions for values the SH_MOBILE_SDHI_SCC_RVSREQ register */ 263 + 264 + #define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN BIT(0) 265 + #define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP BIT(1) 265 266 #define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2) 266 - /* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT2 register */ 267 + 268 + #define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN BIT(8) 269 + #define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP BIT(24) 270 + #define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR (BIT(8) | BIT(24)) 271 + 267 272 #define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4) 268 273 #define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN BIT(31) 269 274 ··· 321 316 SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK; 322 317 } 323 318 324 - static void renesas_sdhi_prepare_tuning(struct tmio_mmc_host *host, 325 - unsigned long tap) 319 + static void renesas_sdhi_hs400_complete(struct mmc_host *mmc) 326 320 { 327 - struct renesas_sdhi *priv = host_to_priv(host); 328 - 329 - /* Set sampling clock position */ 330 - sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap); 331 - } 332 - 333 - static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host) 334 - { 321 + struct tmio_mmc_host *host = mmc_priv(mmc); 335 322 struct renesas_sdhi *priv = host_to_priv(host); 336 323 337 324 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & ··· 335 338 336 339 sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, 337 340 priv->scc_tappos_hs400); 341 + 342 + /* Gen3 can't do automatic tap correction with HS400, so disable it */ 343 + if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC) 344 + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, 345 + ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN & 346 + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); 338 347 339 348 sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2, 340 349 (SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN | ··· 355 352 356 353 if (priv->quirks && priv->quirks->hs400_4taps) 357 354 sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, 358 - host->tap_set / 2); 355 + priv->tap_set / 2); 359 356 360 357 sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL, 361 358 SH_MOBILE_SDHI_SCC_CKSEL_DTSEL | ··· 377 374 SH_MOBILE_SDHI_SCC_CKSEL)); 378 375 } 379 376 380 - static void renesas_sdhi_disable_scc(struct tmio_mmc_host *host) 377 + static void renesas_sdhi_disable_scc(struct mmc_host *mmc) 381 378 { 379 + struct tmio_mmc_host *host = mmc_priv(mmc); 382 380 struct renesas_sdhi *priv = host_to_priv(host); 383 381 384 382 renesas_sdhi_reset_scc(host, priv); ··· 414 410 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 415 411 } 416 412 417 - static void renesas_sdhi_prepare_hs400_tuning(struct tmio_mmc_host *host) 413 + static int renesas_sdhi_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 418 414 { 415 + struct tmio_mmc_host *host = mmc_priv(mmc); 416 + 419 417 renesas_sdhi_reset_hs400_mode(host, host_to_priv(host)); 418 + return 0; 420 419 } 421 420 422 421 #define SH_MOBILE_SDHI_MAX_TAP 3 ··· 433 426 unsigned long ntap; /* temporary counter of tuning success */ 434 427 unsigned long i; 435 428 429 + priv->doing_tune = false; 430 + 436 431 /* Clear SCC_RVSREQ */ 437 432 sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0); 438 433 ··· 443 434 * result requiring the tap to be good in both runs before 444 435 * considering it for tuning selection. 445 436 */ 446 - for (i = 0; i < host->tap_num * 2; i++) { 447 - int offset = host->tap_num * (i < host->tap_num ? 1 : -1); 437 + for (i = 0; i < priv->tap_num * 2; i++) { 438 + int offset = priv->tap_num * (i < priv->tap_num ? 1 : -1); 448 439 449 - if (!test_bit(i, host->taps)) 450 - clear_bit(i + offset, host->taps); 440 + if (!test_bit(i, priv->taps)) 441 + clear_bit(i + offset, priv->taps); 451 442 } 452 443 453 444 /* ··· 459 450 ntap = 0; 460 451 tap_start = 0; 461 452 tap_end = 0; 462 - for (i = 0; i < host->tap_num * 2; i++) { 463 - if (test_bit(i, host->taps)) { 453 + for (i = 0; i < priv->tap_num * 2; i++) { 454 + if (test_bit(i, priv->taps)) { 464 455 ntap++; 465 456 } else { 466 457 if (ntap > tap_cnt) { ··· 479 470 } 480 471 481 472 if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP) 482 - host->tap_set = (tap_start + tap_end) / 2 % host->tap_num; 473 + priv->tap_set = (tap_start + tap_end) / 2 % priv->tap_num; 483 474 else 484 475 return -EIO; 485 476 486 477 /* Set SCC */ 487 - sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set); 478 + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, priv->tap_set); 488 479 489 480 /* Enable auto re-tuning */ 490 481 sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, ··· 492 483 sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); 493 484 494 485 return 0; 486 + } 487 + 488 + static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode) 489 + { 490 + struct renesas_sdhi *priv = host_to_priv(host); 491 + int i; 492 + 493 + priv->tap_num = renesas_sdhi_init_tuning(host); 494 + if (!priv->tap_num) 495 + return 0; /* Tuning is not supported */ 496 + 497 + if (priv->tap_num * 2 >= sizeof(priv->taps) * BITS_PER_BYTE) { 498 + dev_err(&host->pdev->dev, 499 + "Too many taps, please update 'taps' in tmio_mmc_host!\n"); 500 + return -EINVAL; 501 + } 502 + 503 + priv->doing_tune = true; 504 + bitmap_zero(priv->taps, priv->tap_num * 2); 505 + 506 + /* Issue CMD19 twice for each tap */ 507 + for (i = 0; i < 2 * priv->tap_num; i++) { 508 + /* Set sampling clock position */ 509 + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num); 510 + 511 + if (mmc_send_tuning(host->mmc, opcode, NULL) == 0) 512 + set_bit(i, priv->taps); 513 + } 514 + 515 + return renesas_sdhi_select_tuning(host); 516 + } 517 + 518 + static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_4tap) 519 + { 520 + struct renesas_sdhi *priv = host_to_priv(host); 521 + unsigned long new_tap = priv->tap_set; 522 + u32 val; 523 + 524 + val = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ); 525 + if (!val) 526 + return false; 527 + 528 + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0); 529 + 530 + /* Change TAP position according to correction status */ 531 + if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC && 532 + host->mmc->ios.timing == MMC_TIMING_MMC_HS400) { 533 + /* 534 + * With HS400, the DAT signal is based on DS, not CLK. 535 + * Therefore, use only CMD status. 536 + */ 537 + u32 smpcmp = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) & 538 + SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR; 539 + if (!smpcmp) 540 + return false; /* no error in CMD signal */ 541 + else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP) 542 + new_tap++; 543 + else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN) 544 + new_tap--; 545 + else 546 + return true; /* need retune */ 547 + } else { 548 + if (val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) 549 + return true; /* need retune */ 550 + else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP) 551 + new_tap++; 552 + else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN) 553 + new_tap--; 554 + else 555 + return false; 556 + } 557 + 558 + priv->tap_set = (new_tap % priv->tap_num); 559 + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, 560 + priv->tap_set / (use_4tap ? 2 : 1)); 561 + 562 + return false; 563 + } 564 + 565 + static bool renesas_sdhi_auto_correction(struct tmio_mmc_host *host) 566 + { 567 + struct renesas_sdhi *priv = host_to_priv(host); 568 + 569 + /* Check SCC error */ 570 + if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) & 571 + SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) { 572 + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0); 573 + return true; 574 + } 575 + 576 + return false; 495 577 } 496 578 497 579 static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host) ··· 599 499 !(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap)) 600 500 return false; 601 501 602 - if (mmc_doing_retune(host->mmc)) 502 + if (mmc_doing_retune(host->mmc) || priv->doing_tune) 603 503 return false; 604 504 605 - /* Check SCC error */ 606 505 if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) & 607 - SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN && 608 - sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) & 609 - SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) { 610 - /* Clear SCC error */ 611 - sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0); 612 - return true; 613 - } 506 + SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN) 507 + return renesas_sdhi_auto_correction(host); 614 508 615 - return false; 509 + return renesas_sdhi_manual_correction(host, use_4tap); 616 510 } 617 511 618 512 static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host) ··· 620 526 621 527 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | 622 528 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 623 - 624 - sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, 625 - ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN & 626 - sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); 627 529 628 530 sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, 629 531 ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN & ··· 901 811 if (!hit) 902 812 dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); 903 813 904 - host->init_tuning = renesas_sdhi_init_tuning; 905 - host->prepare_tuning = renesas_sdhi_prepare_tuning; 906 - host->select_tuning = renesas_sdhi_select_tuning; 907 - host->check_scc_error = renesas_sdhi_check_scc_error; 908 - host->prepare_hs400_tuning = 909 - renesas_sdhi_prepare_hs400_tuning; 910 - host->hs400_downgrade = renesas_sdhi_disable_scc; 911 - host->hs400_complete = renesas_sdhi_hs400_complete; 814 + host->execute_tuning = renesas_sdhi_execute_tuning; 815 + host->check_retune = renesas_sdhi_check_scc_error; 816 + host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; 817 + host->ops.hs400_downgrade = renesas_sdhi_disable_scc; 818 + host->ops.hs400_complete = renesas_sdhi_hs400_complete; 912 819 } 913 820 914 821 num_irqs = platform_irq_count(pdev);
+1 -1
drivers/mmc/host/sdhci-acpi.c
··· 75 75 bool use_runtime_pm; 76 76 bool is_intel; 77 77 bool reset_signal_volt_on_suspend; 78 - unsigned long private[0] ____cacheline_aligned; 78 + unsigned long private[] ____cacheline_aligned; 79 79 }; 80 80 81 81 enum {
+1 -1
drivers/mmc/host/sdhci-cadence.c
··· 68 68 void __iomem *hrs_addr; 69 69 bool enhanced_strobe; 70 70 unsigned int nr_phy_params; 71 - struct sdhci_cdns_phy_param phy_params[0]; 71 + struct sdhci_cdns_phy_param phy_params[]; 72 72 }; 73 73 74 74 struct sdhci_cdns_phy_cfg {
+146 -21
drivers/mmc/host/sdhci-esdhc-imx.c
··· 9 9 */ 10 10 11 11 #include <linux/io.h> 12 + #include <linux/iopoll.h> 12 13 #include <linux/delay.h> 13 14 #include <linux/err.h> 14 15 #include <linux/clk.h> ··· 74 73 #define ESDHC_STROBE_DLL_CTRL 0x70 75 74 #define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0) 76 75 #define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1) 76 + #define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT 0x7 77 77 #define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3 78 78 #define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20) 79 79 ··· 162 160 #define ESDHC_FLAG_CQHCI BIT(12) 163 161 /* need request pmqos during low power */ 164 162 #define ESDHC_FLAG_PMQOS BIT(13) 163 + /* The IP state got lost in low power mode */ 164 + #define ESDHC_FLAG_STATE_LOST_IN_LPMODE BIT(14) 165 + /* The IP lost clock rate in PM_RUNTIME */ 166 + #define ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME BIT(15) 167 + /* 168 + * The IP do not support the ACMD23 feature completely when use ADMA mode. 169 + * In ADMA mode, it only use the 16 bit block count of the register 0x4 170 + * (BLOCK_ATT) as the CMD23's argument for ACMD23 mode, which means it will 171 + * ignore the upper 16 bit of the CMD23's argument. This will block the reliable 172 + * write operation in RPMB, because RPMB reliable write need to set the bit31 173 + * of the CMD23's argument. 174 + * imx6qpdl/imx6sx/imx6sl/imx7d has this limitation only for ADMA mode, SDMA 175 + * do not has this limitation. so when these SoC use ADMA mode, it need to 176 + * disable the ACMD23 feature. 177 + */ 178 + #define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16) 165 179 166 180 struct esdhc_soc_data { 167 181 u32 flags; ··· 200 182 }; 201 183 202 184 static const struct esdhc_soc_data usdhc_imx6q_data = { 203 - .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING, 185 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING 186 + | ESDHC_FLAG_BROKEN_AUTO_CMD23, 204 187 }; 205 188 206 189 static const struct esdhc_soc_data usdhc_imx6sl_data = { 207 190 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 208 191 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536 209 - | ESDHC_FLAG_HS200, 192 + | ESDHC_FLAG_HS200 193 + | ESDHC_FLAG_BROKEN_AUTO_CMD23, 194 + }; 195 + 196 + static const struct esdhc_soc_data usdhc_imx6sll_data = { 197 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 198 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 199 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 210 200 }; 211 201 212 202 static const struct esdhc_soc_data usdhc_imx6sx_data = { 213 203 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 214 - | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200, 204 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 205 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE 206 + | ESDHC_FLAG_BROKEN_AUTO_CMD23, 215 207 }; 216 208 217 209 static const struct esdhc_soc_data usdhc_imx6ull_data = { 218 210 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 219 211 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 220 - | ESDHC_FLAG_ERR010450, 212 + | ESDHC_FLAG_ERR010450 213 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 221 214 }; 222 215 223 216 static const struct esdhc_soc_data usdhc_imx7d_data = { 224 217 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 225 218 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 226 - | ESDHC_FLAG_HS400, 219 + | ESDHC_FLAG_HS400 220 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE 221 + | ESDHC_FLAG_BROKEN_AUTO_CMD23, 227 222 }; 228 223 229 224 static struct esdhc_soc_data usdhc_imx7ulp_data = { 230 225 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 231 226 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 232 - | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400, 227 + | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400 228 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 233 229 }; 234 230 235 231 static struct esdhc_soc_data usdhc_imx8qxp_data = { 236 232 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 237 233 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 238 234 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES 239 - | ESDHC_FLAG_CQHCI, 235 + | ESDHC_FLAG_CQHCI 236 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE 237 + | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME, 238 + }; 239 + 240 + static struct esdhc_soc_data usdhc_imx8mm_data = { 241 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 242 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 243 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES 244 + | ESDHC_FLAG_CQHCI 245 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 240 246 }; 241 247 242 248 struct pltfm_imx_data { ··· 306 264 { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, 307 265 { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, }, 308 266 { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, 267 + { .compatible = "fsl,imx6sll-usdhc", .data = &usdhc_imx6sll_data, }, 309 268 { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, 310 269 { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, }, 311 270 { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, }, 312 271 { .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, }, 313 272 { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, }, 273 + { .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, }, 314 274 { /* sentinel */ } 315 275 }; 316 276 MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); ··· 343 299 u32 shift = (reg & 0x3) * 8; 344 300 345 301 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); 302 + } 303 + 304 + static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host) 305 + { 306 + u32 present_state; 307 + int ret; 308 + 309 + ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, present_state, 310 + (present_state & ESDHC_CLOCK_GATE_OFF), 2, 100); 311 + if (ret == -ETIMEDOUT) 312 + dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__); 346 313 } 347 314 348 315 static u32 esdhc_readl_le(struct sdhci_host *host, int reg) ··· 569 514 else 570 515 new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; 571 516 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); 517 + if (!(new_val & ESDHC_VENDOR_SPEC_FRC_SDCLK_ON)) 518 + esdhc_wait_for_card_clock_gate_off(host); 572 519 return; 573 520 case SDHCI_HOST_CONTROL2: 574 521 new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); ··· 639 582 * For DMA access restore the levels to default value. 640 583 */ 641 584 m = readl(host->ioaddr + ESDHC_WTMK_LVL); 642 - if (val & SDHCI_TRNS_DMA) 585 + if (val & SDHCI_TRNS_DMA) { 643 586 wml = ESDHC_WTMK_LVL_WML_VAL_DEF; 644 - else 587 + } else { 588 + u8 ctrl; 645 589 wml = ESDHC_WTMK_LVL_WML_VAL_MAX; 590 + 591 + /* 592 + * Since already disable DMA mode, so also need 593 + * to clear the DMASEL. Otherwise, for standard 594 + * tuning, when send tuning command, usdhc will 595 + * still prefetch the ADMA script from wrong 596 + * DMA address, then we will see IOMMU report 597 + * some error which show lack of TLB mapping. 598 + */ 599 + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 600 + ctrl &= ~SDHCI_CTRL_DMA_MASK; 601 + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 602 + } 646 603 m &= ~(ESDHC_WTMK_LVL_RD_WML_MASK | 647 604 ESDHC_WTMK_LVL_WR_WML_MASK); 648 605 m |= (wml << ESDHC_WTMK_LVL_RD_WML_SHIFT) | ··· 813 742 int ddr_pre_div = imx_data->is_ddr ? 2 : 1; 814 743 int pre_div = 1; 815 744 int div = 1; 745 + int ret; 816 746 u32 temp, val; 817 747 818 748 if (esdhc_is_usdhc(imx_data)) { 819 749 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 820 750 writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, 821 751 host->ioaddr + ESDHC_VENDOR_SPEC); 752 + esdhc_wait_for_card_clock_gate_off(host); 822 753 } 823 754 824 755 if (clock == 0) { ··· 875 802 | (pre_div << ESDHC_PREDIV_SHIFT)); 876 803 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 877 804 805 + /* need to wait the bit 3 of the PRSSTAT to be set, make sure card clock is stable */ 806 + ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, temp, 807 + (temp & ESDHC_CLOCK_STABLE), 2, 100); 808 + if (ret == -ETIMEDOUT) 809 + dev_warn(mmc_dev(host->mmc), "card clock still not stable in 100us!.\n"); 810 + 878 811 if (esdhc_is_usdhc(imx_data)) { 879 812 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 880 813 writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, 881 814 host->ioaddr + ESDHC_VENDOR_SPEC); 882 815 } 883 816 884 - mdelay(1); 885 817 } 886 818 887 819 static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) ··· 1061 983 */ 1062 984 static void esdhc_set_strobe_dll(struct sdhci_host *host) 1063 985 { 986 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 987 + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 988 + u32 strobe_delay; 1064 989 u32 v; 990 + int ret; 1065 991 1066 992 /* disable clock before enabling strobe dll */ 1067 993 writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & 1068 994 ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, 1069 995 host->ioaddr + ESDHC_VENDOR_SPEC); 996 + esdhc_wait_for_card_clock_gate_off(host); 1070 997 1071 998 /* force a reset on strobe dll */ 1072 999 writel(ESDHC_STROBE_DLL_CTRL_RESET, ··· 1083 1000 * enable strobe dll ctrl and adjust the delay target 1084 1001 * for the uSDHC loopback read clock 1085 1002 */ 1003 + if (imx_data->boarddata.strobe_dll_delay_target) 1004 + strobe_delay = imx_data->boarddata.strobe_dll_delay_target; 1005 + else 1006 + strobe_delay = ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT; 1086 1007 v = ESDHC_STROBE_DLL_CTRL_ENABLE | 1087 1008 ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT | 1088 - (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); 1009 + (strobe_delay << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); 1089 1010 writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); 1090 - /* wait 5us to make sure strobe dll status register stable */ 1091 - udelay(5); 1092 - v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS); 1093 - if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK)) 1011 + 1012 + /* wait max 50us to get the REF/SLV lock */ 1013 + ret = readl_poll_timeout(host->ioaddr + ESDHC_STROBE_DLL_STATUS, v, 1014 + ((v & ESDHC_STROBE_DLL_STS_REF_LOCK) && (v & ESDHC_STROBE_DLL_STS_SLV_LOCK)), 1, 50); 1015 + if (ret == -ETIMEDOUT) 1094 1016 dev_warn(mmc_dev(host->mmc), 1095 - "warning! HS400 strobe DLL status REF not lock!\n"); 1096 - if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK)) 1097 - dev_warn(mmc_dev(host->mmc), 1098 - "warning! HS400 strobe DLL status SLV not lock!\n"); 1017 + "warning! HS400 strobe DLL status REF/SLV not lock in 50us, STROBE DLL status is %x!\n", v); 1099 1018 } 1100 1019 1101 1020 static void esdhc_reset_tuning(struct sdhci_host *host) ··· 1247 1162 { 1248 1163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1249 1164 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1165 + struct cqhci_host *cq_host = host->mmc->cqe_private; 1250 1166 int tmp; 1251 1167 1252 1168 if (esdhc_is_usdhc(imx_data)) { ··· 1323 1237 tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); 1324 1238 tmp &= ~ESDHC_STD_TUNING_EN; 1325 1239 writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); 1240 + } 1241 + 1242 + /* 1243 + * On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card 1244 + * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the 1245 + * the 1st linux configure power/clock for the 2nd Linux. 1246 + * 1247 + * When the 2nd Linux is booting into rootfs stage, we let the 1st Linux 1248 + * to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump. 1249 + * After we clear the pending interrupt and halt CQCTL, issue gone. 1250 + */ 1251 + if (cq_host) { 1252 + tmp = cqhci_readl(cq_host, CQHCI_IS); 1253 + cqhci_writel(cq_host, tmp, CQHCI_IS); 1254 + cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 1326 1255 } 1327 1256 } 1328 1257 } ··· 1429 1328 of_property_read_u32(np, "fsl,tuning-start-tap", 1430 1329 &boarddata->tuning_start_tap); 1431 1330 1331 + of_property_read_u32(np, "fsl,strobe-dll-delay-target", 1332 + &boarddata->strobe_dll_delay_target); 1432 1333 if (of_find_property(np, "no-1-8-v", NULL)) 1433 1334 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 1434 1335 ··· 1590 1487 imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); 1591 1488 if (IS_ERR(imx_data->pinctrl)) { 1592 1489 err = PTR_ERR(imx_data->pinctrl); 1593 - goto disable_ahb_clk; 1490 + dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n"); 1594 1491 } 1595 1492 1596 1493 if (esdhc_is_usdhc(imx_data)) { ··· 1620 1517 1621 1518 if (imx_data->socdata->flags & ESDHC_FLAG_HS400) 1622 1519 host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400; 1520 + 1521 + if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) 1522 + host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; 1623 1523 1624 1524 if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { 1625 1525 host->mmc->caps2 |= MMC_CAP2_HS400_ES; ··· 1711 1605 static int sdhci_esdhc_suspend(struct device *dev) 1712 1606 { 1713 1607 struct sdhci_host *host = dev_get_drvdata(dev); 1608 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1609 + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1714 1610 int ret; 1715 1611 1716 1612 if (host->mmc->caps2 & MMC_CAP2_CQE) { ··· 1721 1613 return ret; 1722 1614 } 1723 1615 1616 + if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) && 1617 + (host->tuning_mode != SDHCI_TUNING_MODE_1)) { 1618 + mmc_retune_timer_stop(host->mmc); 1619 + mmc_retune_needed(host->mmc); 1620 + } 1621 + 1724 1622 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1725 1623 mmc_retune_needed(host->mmc); 1726 1624 1727 - return sdhci_suspend_host(host); 1625 + ret = sdhci_suspend_host(host); 1626 + if (!ret) 1627 + return pinctrl_pm_select_sleep_state(dev); 1628 + 1629 + return ret; 1728 1630 } 1729 1631 1730 1632 static int sdhci_esdhc_resume(struct device *dev) 1731 1633 { 1732 1634 struct sdhci_host *host = dev_get_drvdata(dev); 1733 1635 int ret; 1636 + 1637 + ret = pinctrl_pm_select_default_state(dev); 1638 + if (ret) 1639 + return ret; 1734 1640 1735 1641 /* re-initialize hw state in case it's lost in low power mode */ 1736 1642 sdhci_esdhc_imx_hwinit(host); ··· 1802 1680 1803 1681 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) 1804 1682 cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); 1683 + 1684 + if (imx_data->socdata->flags & ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME) 1685 + clk_set_rate(imx_data->clk_per, pltfm_host->clock); 1805 1686 1806 1687 err = clk_prepare_enable(imx_data->clk_ahb); 1807 1688 if (err)
+1
drivers/mmc/host/sdhci-esdhc.h
··· 31 31 32 32 /* Present State Register */ 33 33 #define ESDHC_PRSSTAT 0x24 34 + #define ESDHC_CLOCK_GATE_OFF 0x00000080 34 35 #define ESDHC_CLOCK_STABLE 0x00000008 35 36 36 37 /* Protocol Control Register */
+16 -1
drivers/mmc/host/sdhci-iproc.c
··· 261 261 .mmc_caps = 0x00000000, 262 262 }; 263 263 264 + static const struct sdhci_ops sdhci_iproc_bcm2711_ops = { 265 + .read_l = sdhci_iproc_readl, 266 + .read_w = sdhci_iproc_readw, 267 + .read_b = sdhci_iproc_readb, 268 + .write_l = sdhci_iproc_writel, 269 + .write_w = sdhci_iproc_writew, 270 + .write_b = sdhci_iproc_writeb, 271 + .set_clock = sdhci_set_clock, 272 + .set_power = sdhci_set_power_and_bus_voltage, 273 + .get_max_clock = sdhci_iproc_get_max_clock, 274 + .set_bus_width = sdhci_set_bus_width, 275 + .reset = sdhci_reset, 276 + .set_uhs_signaling = sdhci_set_uhs_signaling, 277 + }; 278 + 264 279 static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = { 265 280 .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, 266 - .ops = &sdhci_iproc_32only_ops, 281 + .ops = &sdhci_iproc_bcm2711_ops, 267 282 }; 268 283 269 284 static const struct sdhci_iproc_data bcm2711_data = {
+1 -12
drivers/mmc/host/sdhci-milbeaut.c
··· 121 121 } 122 122 } 123 123 124 - static void sdhci_milbeaut_set_power(struct sdhci_host *host, 125 - unsigned char mode, unsigned short vdd) 126 - { 127 - if (!IS_ERR(host->mmc->supply.vmmc)) { 128 - struct mmc_host *mmc = host->mmc; 129 - 130 - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 131 - } 132 - sdhci_set_power_noreg(host, mode, vdd); 133 - } 134 - 135 124 static const struct sdhci_ops sdhci_milbeaut_ops = { 136 125 .voltage_switch = sdhci_milbeaut_soft_voltage_switch, 137 126 .get_min_clock = sdhci_milbeaut_get_min_clock, ··· 128 139 .set_clock = sdhci_set_clock, 129 140 .set_bus_width = sdhci_set_bus_width, 130 141 .set_uhs_signaling = sdhci_set_uhs_signaling, 131 - .set_power = sdhci_milbeaut_set_power, 142 + .set_power = sdhci_set_power_and_bus_voltage, 132 143 }; 133 144 134 145 static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host,
+23 -4
drivers/mmc/host/sdhci-msm.c
··· 977 977 goto out; 978 978 } 979 979 980 - config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3); 981 - config |= CORE_PWRSAVE_DLL; 982 - writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3); 980 + /* 981 + * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3. 982 + * When MCLK is gated OFF, it is not gated for less than 0.5us 983 + * and MCLK must be switched on for at-least 1us before DATA 984 + * starts coming. Controllers with 14lpp and later tech DLL cannot 985 + * guarantee above requirement. So PWRSAVE_DLL should not be 986 + * turned on for host controllers using this DLL. 987 + */ 988 + if (!msm_host->use_14lpp_dll_reset) { 989 + config = readl_relaxed(host->ioaddr + 990 + msm_offset->core_vendor_spec3); 991 + config |= CORE_PWRSAVE_DLL; 992 + writel_relaxed(config, host->ioaddr + 993 + msm_offset->core_vendor_spec3); 994 + } 983 995 984 996 /* 985 997 * Drain writebuffer to ensure above DLL calibration ··· 1823 1811 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps); 1824 1812 } 1825 1813 1814 + static void sdhci_msm_reset(struct sdhci_host *host, u8 mask) 1815 + { 1816 + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL)) 1817 + cqhci_deactivate(host->mmc); 1818 + sdhci_reset(host, mask); 1819 + } 1820 + 1826 1821 static const struct sdhci_msm_variant_ops mci_var_ops = { 1827 1822 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed, 1828 1823 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed, ··· 1868 1849 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); 1869 1850 1870 1851 static const struct sdhci_ops sdhci_msm_ops = { 1871 - .reset = sdhci_reset, 1852 + .reset = sdhci_msm_reset, 1872 1853 .set_clock = sdhci_msm_set_clock, 1873 1854 .get_min_clock = sdhci_msm_get_min_clock, 1874 1855 .get_max_clock = sdhci_msm_get_max_clock,
+60 -14
drivers/mmc/host/sdhci-of-arasan.c
··· 325 325 return -EINVAL; 326 326 } 327 327 328 - static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode, 329 - unsigned short vdd) 330 - { 331 - if (!IS_ERR(host->mmc->supply.vmmc)) { 332 - struct mmc_host *mmc = host->mmc; 333 - 334 - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 335 - } 336 - sdhci_set_power_noreg(host, mode, vdd); 337 - } 338 - 339 328 static const struct sdhci_ops sdhci_arasan_ops = { 340 329 .set_clock = sdhci_arasan_set_clock, 341 330 .get_max_clock = sdhci_pltfm_clk_get_max_clock, ··· 332 343 .set_bus_width = sdhci_set_bus_width, 333 344 .reset = sdhci_arasan_reset, 334 345 .set_uhs_signaling = sdhci_set_uhs_signaling, 335 - .set_power = sdhci_arasan_set_power, 346 + .set_power = sdhci_set_power_and_bus_voltage, 336 347 }; 337 348 338 349 static const struct sdhci_pltfm_data sdhci_arasan_pdata = { ··· 345 356 346 357 static struct sdhci_arasan_of_data sdhci_arasan_data = { 347 358 .pdata = &sdhci_arasan_pdata, 359 + }; 360 + 361 + static const struct sdhci_pltfm_data sdhci_arasan_zynqmp_pdata = { 362 + .ops = &sdhci_arasan_ops, 363 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 364 + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN | 365 + SDHCI_QUIRK2_STOP_WITH_TC, 366 + }; 367 + 368 + static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = { 369 + .pdata = &sdhci_arasan_zynqmp_pdata, 348 370 }; 349 371 350 372 static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask) ··· 403 403 .set_bus_width = sdhci_set_bus_width, 404 404 .reset = sdhci_arasan_reset, 405 405 .set_uhs_signaling = sdhci_set_uhs_signaling, 406 - .set_power = sdhci_arasan_set_power, 406 + .set_power = sdhci_set_power_and_bus_voltage, 407 407 .irq = sdhci_arasan_cqhci_irq, 408 408 }; 409 409 ··· 553 553 }, 554 554 { 555 555 .compatible = "xlnx,zynqmp-8.9a", 556 - .data = &sdhci_arasan_data, 556 + .data = &sdhci_arasan_zynqmp_data, 557 557 }, 558 558 { /* sentinel */ } 559 559 }; ··· 756 756 .recalc_rate = sdhci_arasan_sampleclk_recalc_rate, 757 757 .set_phase = sdhci_zynqmp_sampleclk_set_phase, 758 758 }; 759 + 760 + static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid) 761 + { 762 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 763 + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); 764 + struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data = 765 + sdhci_arasan->clk_data.clk_of_data; 766 + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops; 767 + u16 clk; 768 + 769 + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 770 + clk &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN); 771 + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 772 + 773 + /* Issue DLL Reset */ 774 + eemi_ops->ioctl(deviceid, IOCTL_SD_DLL_RESET, 775 + PM_DLL_RESET_PULSE, 0, NULL); 776 + 777 + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 778 + 779 + sdhci_enable_clk(host, clk); 780 + } 781 + 782 + static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode) 783 + { 784 + struct sdhci_host *host = mmc_priv(mmc); 785 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 786 + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); 787 + struct clk_hw *hw = &sdhci_arasan->clk_data.sdcardclk_hw; 788 + const char *clk_name = clk_hw_get_name(hw); 789 + u32 device_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : 790 + NODE_SD_1; 791 + int err; 792 + 793 + arasan_zynqmp_dll_reset(host, device_id); 794 + 795 + err = sdhci_execute_tuning(mmc, opcode); 796 + if (err) 797 + return err; 798 + 799 + arasan_zynqmp_dll_reset(host, device_id); 800 + 801 + return 0; 802 + } 759 803 760 804 /** 761 805 * sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier ··· 1291 1247 1292 1248 zynqmp_clk_data->eemi_ops = eemi_ops; 1293 1249 sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data; 1250 + host->mmc_host_ops.execute_tuning = 1251 + arasan_zynqmp_execute_tuning; 1294 1252 } 1295 1253 1296 1254 arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
+3 -19
drivers/mmc/host/sdhci-of-at91.c
··· 101 101 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 102 102 } 103 103 104 - /* 105 - * In this specific implementation of the SDHCI controller, the power register 106 - * needs to have a valid voltage set even when the power supply is managed by 107 - * an external regulator. 108 - */ 109 - static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode, 110 - unsigned short vdd) 111 - { 112 - if (!IS_ERR(host->mmc->supply.vmmc)) { 113 - struct mmc_host *mmc = host->mmc; 114 - 115 - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 116 - } 117 - sdhci_set_power_noreg(host, mode, vdd); 118 - } 119 - 120 104 static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, 121 105 unsigned int timing) 122 106 { ··· 130 146 .set_bus_width = sdhci_set_bus_width, 131 147 .reset = sdhci_at91_reset, 132 148 .set_uhs_signaling = sdhci_at91_set_uhs_signaling, 133 - .set_power = sdhci_at91_set_power, 149 + .set_power = sdhci_set_power_and_bus_voltage, 134 150 }; 135 151 136 152 static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = { ··· 189 205 /* Set capabilities in ro mode. */ 190 206 writel(0, host->ioaddr + SDMMC_CACR); 191 207 192 - dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n", 193 - clk_mul, gck_rate, clk_base_rate); 208 + dev_dbg(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n", 209 + clk_mul, gck_rate, clk_base_rate); 194 210 195 211 /* 196 212 * We have to set preset values because it depends on the clk_mul
+57
drivers/mmc/host/sdhci-omap.c
··· 108 108 struct pinctrl *pinctrl; 109 109 struct pinctrl_state **pinctrl_state; 110 110 bool is_tuning; 111 + /* Omap specific context save */ 112 + u32 con; 113 + u32 hctl; 114 + u32 sysctl; 115 + u32 capa; 111 116 }; 112 117 113 118 static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host); ··· 1240 1235 1241 1236 return 0; 1242 1237 } 1238 + #ifdef CONFIG_PM_SLEEP 1239 + static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host) 1240 + { 1241 + omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); 1242 + omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL); 1243 + omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); 1244 + } 1245 + 1246 + static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host) 1247 + { 1248 + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con); 1249 + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl); 1250 + sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa); 1251 + } 1252 + 1253 + static int __maybe_unused sdhci_omap_suspend(struct device *dev) 1254 + { 1255 + struct sdhci_host *host = dev_get_drvdata(dev); 1256 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1257 + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); 1258 + 1259 + sdhci_suspend_host(host); 1260 + 1261 + sdhci_omap_context_save(omap_host); 1262 + 1263 + pinctrl_pm_select_idle_state(dev); 1264 + 1265 + pm_runtime_force_suspend(dev); 1266 + 1267 + return 0; 1268 + } 1269 + 1270 + static int __maybe_unused sdhci_omap_resume(struct device *dev) 1271 + { 1272 + struct sdhci_host *host = dev_get_drvdata(dev); 1273 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1274 + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); 1275 + 1276 + pm_runtime_force_resume(dev); 1277 + 1278 + pinctrl_pm_select_default_state(dev); 1279 + 1280 + sdhci_omap_context_restore(omap_host); 1281 + 1282 + sdhci_resume_host(host); 1283 + 1284 + return 0; 1285 + } 1286 + #endif 1287 + static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend, 1288 + sdhci_omap_resume); 1243 1289 1244 1290 static struct platform_driver sdhci_omap_driver = { 1245 1291 .probe = sdhci_omap_probe, 1246 1292 .remove = sdhci_omap_remove, 1247 1293 .driver = { 1248 1294 .name = "sdhci-omap", 1295 + .pm = &sdhci_omap_dev_pm_ops, 1249 1296 .of_match_table = omap_sdhci_match, 1250 1297 }, 1251 1298 };
+1 -1
drivers/mmc/host/sdhci-pci.h
··· 163 163 bool cd_override_level; 164 164 165 165 void (*hw_reset)(struct sdhci_host *host); 166 - unsigned long private[0] ____cacheline_aligned; 166 + unsigned long private[] ____cacheline_aligned; 167 167 }; 168 168 169 169 struct sdhci_pci_chip {
+1 -1
drivers/mmc/host/sdhci-pltfm.h
··· 25 25 unsigned int clock; 26 26 u16 xfer_mode_shadow; 27 27 28 - unsigned long private[0] ____cacheline_aligned; 28 + unsigned long private[] ____cacheline_aligned; 29 29 }; 30 30 31 31 #ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+29 -1
drivers/mmc/host/sdhci-sprd.c
··· 19 19 #include <linux/slab.h> 20 20 21 21 #include "sdhci-pltfm.h" 22 + #include "mmc_hsq.h" 22 23 23 24 /* SDHCI_ARGUMENT2 register high 16bit */ 24 25 #define SDHCI_SPRD_ARG2_STUFF GENMASK(31, 16) ··· 380 379 return 0; 381 380 } 382 381 382 + static void sdhci_sprd_request_done(struct sdhci_host *host, 383 + struct mmc_request *mrq) 384 + { 385 + /* Validate if the request was from software queue firstly. */ 386 + if (mmc_hsq_finalize_request(host->mmc, mrq)) 387 + return; 388 + 389 + mmc_request_done(host->mmc, mrq); 390 + } 391 + 383 392 static struct sdhci_ops sdhci_sprd_ops = { 384 393 .read_l = sdhci_sprd_readl, 385 394 .write_l = sdhci_sprd_writel, ··· 403 392 .hw_reset = sdhci_sprd_hw_reset, 404 393 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, 405 394 .get_ro = sdhci_sprd_get_ro, 395 + .request_done = sdhci_sprd_request_done, 406 396 }; 407 397 408 398 static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) ··· 533 521 { 534 522 struct sdhci_host *host; 535 523 struct sdhci_sprd_host *sprd_host; 524 + struct mmc_hsq *hsq; 536 525 struct clk *clk; 537 526 int ret = 0; 538 527 ··· 556 543 sdhci_sprd_voltage_switch; 557 544 558 545 host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | 559 - MMC_CAP_ERASE | MMC_CAP_CMD23; 546 + MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY; 560 547 ret = mmc_of_parse(host->mmc); 561 548 if (ret) 562 549 goto pltfm_free; ··· 644 631 645 632 sprd_host->flags = host->flags; 646 633 634 + hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL); 635 + if (!hsq) { 636 + ret = -ENOMEM; 637 + goto err_cleanup_host; 638 + } 639 + 640 + ret = mmc_hsq_init(hsq, host->mmc); 641 + if (ret) 642 + goto err_cleanup_host; 643 + 644 + host->always_defer_done = true; 645 + 647 646 ret = __sdhci_add_host(host); 648 647 if (ret) 649 648 goto err_cleanup_host; ··· 714 689 struct sdhci_host *host = dev_get_drvdata(dev); 715 690 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); 716 691 692 + mmc_hsq_suspend(host->mmc); 717 693 sdhci_runtime_suspend_host(host); 718 694 719 695 clk_disable_unprepare(sprd_host->clk_sdio); ··· 743 717 goto clk_disable; 744 718 745 719 sdhci_runtime_resume_host(host, 1); 720 + mmc_hsq_resume(host->mmc); 721 + 746 722 return 0; 747 723 748 724 clk_disable:
+33 -2
drivers/mmc/host/sdhci-tegra.c
··· 45 45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8 46 46 47 47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 48 + #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0) 48 49 #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 49 50 #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 50 51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 ··· 1228 1227 return 0; 1229 1228 } 1230 1229 1230 + static void tegra_sdhci_set_timeout(struct sdhci_host *host, 1231 + struct mmc_command *cmd) 1232 + { 1233 + u32 val; 1234 + 1235 + /* 1236 + * HW busy detection timeout is based on programmed data timeout 1237 + * counter and maximum supported timeout is 11s which may not be 1238 + * enough for long operations like cache flush, sleep awake, erase. 1239 + * 1240 + * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows 1241 + * host controller to wait for busy state until the card is busy 1242 + * without HW timeout. 1243 + * 1244 + * So, use infinite busy wait mode for operations that may take 1245 + * more than maximum HW busy timeout of 11s otherwise use finite 1246 + * busy wait mode. 1247 + */ 1248 + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1249 + if (cmd && cmd->busy_timeout >= 11 * HZ) 1250 + val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1251 + else 1252 + val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1253 + sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1254 + 1255 + __sdhci_set_timeout(host, cmd); 1256 + } 1257 + 1231 1258 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { 1232 1259 .write_l = tegra_cqhci_writel, 1233 1260 .enable = sdhci_tegra_cqe_enable, ··· 1395 1366 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1396 1367 .voltage_switch = tegra_sdhci_voltage_switch, 1397 1368 .get_max_clock = tegra_sdhci_get_max_clock, 1369 + .set_timeout = tegra_sdhci_set_timeout, 1398 1370 }; 1399 1371 1400 1372 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { ··· 1433 1403 .voltage_switch = tegra_sdhci_voltage_switch, 1434 1404 .get_max_clock = tegra_sdhci_get_max_clock, 1435 1405 .irq = sdhci_tegra_cqhci_irq, 1406 + .set_timeout = tegra_sdhci_set_timeout, 1436 1407 }; 1437 1408 1438 1409 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { ··· 1583 1552 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 1584 1553 host->mmc->caps |= MMC_CAP_1_8V_DDR; 1585 1554 1586 - /* R1B responses is required to properly manage HW busy detection. */ 1587 - host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; 1555 + /* HW busy detection is supported, but R1B responses are required. */ 1556 + host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 1588 1557 1589 1558 tegra_sdhci_parse_dt(host); 1590 1559
+36 -9
drivers/mmc/host/sdhci.c
··· 9 9 * - JMicron (hardware and technical support) 10 10 */ 11 11 12 + #include <linux/bitfield.h> 12 13 #include <linux/delay.h> 13 14 #include <linux/dmaengine.h> 14 15 #include <linux/ktime.h> ··· 154 153 u32 present; 155 154 156 155 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 157 - !mmc_card_is_removable(host->mmc)) 156 + !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 158 157 return; 159 158 160 159 if (enable) { ··· 1767 1766 1768 1767 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1769 1768 pre_val = sdhci_get_preset_value(host); 1770 - div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1771 - >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1769 + div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1772 1770 if (host->clk_mul && 1773 - (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1771 + (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1774 1772 clk = SDHCI_PROG_CLOCK_MODE; 1775 1773 real_div = div + 1; 1776 1774 clk_mul = host->clk_mul; ··· 2010 2010 } 2011 2011 EXPORT_SYMBOL_GPL(sdhci_set_power); 2012 2012 2013 + /* 2014 + * Some controllers need to configure a valid bus voltage on their power 2015 + * register regardless of whether an external regulator is taking care of power 2016 + * supply. This helper function takes care of it if set as the controller's 2017 + * sdhci_ops.set_power callback. 2018 + */ 2019 + void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2020 + unsigned char mode, 2021 + unsigned short vdd) 2022 + { 2023 + if (!IS_ERR(host->mmc->supply.vmmc)) { 2024 + struct mmc_host *mmc = host->mmc; 2025 + 2026 + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2027 + } 2028 + sdhci_set_power_noreg(host, mode, vdd); 2029 + } 2030 + EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2031 + 2013 2032 /*****************************************************************************\ 2014 2033 * * 2015 2034 * MMC callbacks * ··· 2246 2227 2247 2228 sdhci_enable_preset_value(host, true); 2248 2229 preset = sdhci_get_preset_value(host); 2249 - ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 2250 - >> SDHCI_PRESET_DRV_SHIFT; 2230 + ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2231 + preset); 2251 2232 } 2252 2233 2253 2234 /* Re-enable SD Clock */ ··· 2963 2944 2964 2945 spin_unlock_irqrestore(&host->lock, flags); 2965 2946 2966 - mmc_request_done(host->mmc, mrq); 2947 + if (host->ops->request_done) 2948 + host->ops->request_done(host, mrq); 2949 + else 2950 + mmc_request_done(host->mmc, mrq); 2967 2951 2968 2952 return false; 2969 2953 } ··· 3269 3247 { 3270 3248 struct mmc_data *data = mrq->data; 3271 3249 3272 - return host->pending_reset || 3250 + return host->pending_reset || host->always_defer_done || 3273 3251 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3274 3252 data->host_cookie == COOKIE_MAPPED); 3275 3253 } ··· 3394 3372 3395 3373 /* Process mrqs ready for immediate completion */ 3396 3374 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3397 - if (mrqs_done[i]) 3375 + if (!mrqs_done[i]) 3376 + continue; 3377 + 3378 + if (host->ops->request_done) 3379 + host->ops->request_done(host, mrqs_done[i]); 3380 + else 3398 3381 mmc_request_done(host->mmc, mrqs_done[i]); 3399 3382 } 3400 3383
+11 -7
drivers/mmc/host/sdhci.h
··· 9 9 #ifndef __SDHCI_HW_H 10 10 #define __SDHCI_HW_H 11 11 12 + #include <linux/bits.h> 12 13 #include <linux/scatterlist.h> 13 14 #include <linux/compiler.h> 14 15 #include <linux/types.h> ··· 268 267 #define SDHCI_PRESET_FOR_SDR104 0x6C 269 268 #define SDHCI_PRESET_FOR_DDR50 0x6E 270 269 #define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */ 271 - #define SDHCI_PRESET_DRV_MASK 0xC000 272 - #define SDHCI_PRESET_DRV_SHIFT 14 273 - #define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400 274 - #define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10 275 - #define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF 276 - #define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0 270 + #define SDHCI_PRESET_DRV_MASK GENMASK(15, 14) 271 + #define SDHCI_PRESET_CLKGEN_SEL BIT(10) 272 + #define SDHCI_PRESET_SDCLK_FREQ_MASK GENMASK(9, 0) 277 273 278 274 #define SDHCI_SLOT_INT_STATUS 0xFC 279 275 ··· 535 537 bool irq_wake_enabled; /* IRQ wakeup is enabled */ 536 538 bool v4_mode; /* Host Version 4 Enable */ 537 539 bool use_external_dma; /* Host selects to use external DMA */ 540 + bool always_defer_done; /* Always defer to complete requests */ 538 541 539 542 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ 540 543 struct mmc_command *cmd; /* Current command */ ··· 612 613 613 614 u64 data_timeout; 614 615 615 - unsigned long private[0] ____cacheline_aligned; 616 + unsigned long private[] ____cacheline_aligned; 616 617 }; 617 618 618 619 struct sdhci_ops { ··· 653 654 void (*voltage_switch)(struct sdhci_host *host); 654 655 void (*adma_write_desc)(struct sdhci_host *host, void **desc, 655 656 dma_addr_t addr, int len, unsigned int cmd); 657 + void (*request_done)(struct sdhci_host *host, 658 + struct mmc_request *mrq); 656 659 }; 657 660 658 661 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS ··· 770 769 void sdhci_enable_clk(struct sdhci_host *host, u16 clk); 771 770 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 772 771 unsigned short vdd); 772 + void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 773 + unsigned char mode, 774 + unsigned short vdd); 773 775 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 774 776 unsigned short vdd); 775 777 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
+174 -90
drivers/mmc/host/sdhci_am654.c
··· 81 81 82 82 struct sdhci_am654_data { 83 83 struct regmap *base; 84 - int otap_del_sel; 84 + bool legacy_otapdly; 85 + int otap_del_sel[11]; 85 86 int trm_icp; 86 87 int drv_strength; 87 88 bool dll_on; ··· 99 98 #define DLL_PRESENT (1 << 3) 100 99 }; 101 100 102 - static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) 101 + struct timing_data { 102 + const char *binding; 103 + u32 capability; 104 + }; 105 + 106 + static const struct timing_data td[] = { 107 + [MMC_TIMING_LEGACY] = {"ti,otap-del-sel-legacy", 0}, 108 + [MMC_TIMING_MMC_HS] = {"ti,otap-del-sel-mmc-hs", MMC_CAP_MMC_HIGHSPEED}, 109 + [MMC_TIMING_SD_HS] = {"ti,otap-del-sel-sd-hs", MMC_CAP_SD_HIGHSPEED}, 110 + [MMC_TIMING_UHS_SDR12] = {"ti,otap-del-sel-sdr12", MMC_CAP_UHS_SDR12}, 111 + [MMC_TIMING_UHS_SDR25] = {"ti,otap-del-sel-sdr25", MMC_CAP_UHS_SDR25}, 112 + [MMC_TIMING_UHS_SDR50] = {"ti,otap-del-sel-sdr50", MMC_CAP_UHS_SDR50}, 113 + [MMC_TIMING_UHS_SDR104] = {"ti,otap-del-sel-sdr104", 114 + MMC_CAP_UHS_SDR104}, 115 + [MMC_TIMING_UHS_DDR50] = {"ti,otap-del-sel-ddr50", MMC_CAP_UHS_DDR50}, 116 + [MMC_TIMING_MMC_DDR52] = {"ti,otap-del-sel-ddr52", MMC_CAP_DDR}, 117 + [MMC_TIMING_MMC_HS200] = {"ti,otap-del-sel-hs200", MMC_CAP2_HS200}, 118 + [MMC_TIMING_MMC_HS400] = {"ti,otap-del-sel-hs400", MMC_CAP2_HS400}, 119 + }; 120 + 121 + static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock) 103 122 { 104 123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 105 124 struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); 106 125 int sel50, sel100, freqsel; 107 126 u32 mask, val; 108 127 int ret; 128 + 129 + if (sdhci_am654->flags & FREQSEL_2_BIT) { 130 + switch (clock) { 131 + case 200000000: 132 + sel50 = 0; 133 + sel100 = 0; 134 + break; 135 + case 100000000: 136 + sel50 = 0; 137 + sel100 = 1; 138 + break; 139 + default: 140 + sel50 = 1; 141 + sel100 = 0; 142 + } 143 + 144 + /* Configure PHY DLL frequency */ 145 + mask = SEL50_MASK | SEL100_MASK; 146 + val = (sel50 << SEL50_SHIFT) | (sel100 << SEL100_SHIFT); 147 + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val); 148 + 149 + } else { 150 + switch (clock) { 151 + case 200000000: 152 + freqsel = 0x0; 153 + break; 154 + default: 155 + freqsel = 0x4; 156 + } 157 + 158 + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, FREQSEL_MASK, 159 + freqsel << FREQSEL_SHIFT); 160 + } 161 + /* Configure DLL TRIM */ 162 + mask = DLL_TRIM_ICP_MASK; 163 + val = sdhci_am654->trm_icp << DLL_TRIM_ICP_SHIFT; 164 + 165 + /* Configure DLL driver strength */ 166 + mask |= DR_TY_MASK; 167 + val |= sdhci_am654->drv_strength << DR_TY_SHIFT; 168 + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, mask, val); 169 + 170 + /* Enable DLL */ 171 + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 172 + 0x1 << ENDLL_SHIFT); 173 + /* 174 + * Poll for DLL ready. Use a one second timeout. 175 + * Works in all experiments done so far 176 + */ 177 + ret = regmap_read_poll_timeout(sdhci_am654->base, PHY_STAT1, val, 178 + val & DLLRDY_MASK, 1000, 1000000); 179 + if (ret) { 180 + dev_err(mmc_dev(host->mmc), "DLL failed to relock\n"); 181 + return; 182 + } 183 + 184 + sdhci_am654->dll_on = true; 185 + } 186 + 187 + static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) 188 + { 189 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 190 + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); 191 + unsigned char timing = host->mmc->ios.timing; 192 + u32 otap_del_sel; 193 + u32 otap_del_ena; 194 + u32 mask, val; 109 195 110 196 if (sdhci_am654->dll_on) { 111 197 regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0); ··· 204 116 205 117 if (clock > CLOCK_TOO_SLOW_HZ) { 206 118 /* Setup DLL Output TAP delay */ 119 + if (sdhci_am654->legacy_otapdly) 120 + otap_del_sel = sdhci_am654->otap_del_sel[0]; 121 + else 122 + otap_del_sel = sdhci_am654->otap_del_sel[timing]; 123 + 124 + otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0; 125 + 207 126 mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; 208 - val = (1 << OTAPDLYENA_SHIFT) | 209 - (sdhci_am654->otap_del_sel << OTAPDLYSEL_SHIFT); 210 - regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); 127 + val = (otap_del_ena << OTAPDLYENA_SHIFT) | 128 + (otap_del_sel << OTAPDLYSEL_SHIFT); 129 + 211 130 /* Write to STRBSEL for HS400 speed mode */ 212 - if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400) { 131 + if (timing == MMC_TIMING_MMC_HS400) { 213 132 if (sdhci_am654->flags & STRBSEL_4_BIT) 214 - mask = STRBSEL_4BIT_MASK; 133 + mask |= STRBSEL_4BIT_MASK; 215 134 else 216 - mask = STRBSEL_8BIT_MASK; 135 + mask |= STRBSEL_8BIT_MASK; 217 136 218 - regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, 219 - sdhci_am654->strb_sel << 220 - STRBSEL_SHIFT); 137 + val |= sdhci_am654->strb_sel << STRBSEL_SHIFT; 221 138 } 222 139 223 - if (sdhci_am654->flags & FREQSEL_2_BIT) { 224 - switch (clock) { 225 - case 200000000: 226 - sel50 = 0; 227 - sel100 = 0; 228 - break; 229 - case 100000000: 230 - sel50 = 0; 231 - sel100 = 1; 232 - break; 233 - default: 234 - sel50 = 1; 235 - sel100 = 0; 236 - } 140 + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); 237 141 238 - /* Configure PHY DLL frequency */ 239 - mask = SEL50_MASK | SEL100_MASK; 240 - val = (sel50 << SEL50_SHIFT) | (sel100 << SEL100_SHIFT); 241 - regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, 242 - val); 243 - } else { 244 - switch (clock) { 245 - case 200000000: 246 - freqsel = 0x0; 247 - break; 248 - default: 249 - freqsel = 0x4; 250 - } 251 - 252 - regmap_update_bits(sdhci_am654->base, PHY_CTRL5, 253 - FREQSEL_MASK, 254 - freqsel << FREQSEL_SHIFT); 255 - } 256 - 257 - /* Configure DLL TRIM */ 258 - mask = DLL_TRIM_ICP_MASK; 259 - val = sdhci_am654->trm_icp << DLL_TRIM_ICP_SHIFT; 260 - 261 - /* Configure DLL driver strength */ 262 - mask |= DR_TY_MASK; 263 - val |= sdhci_am654->drv_strength << DR_TY_SHIFT; 264 - regmap_update_bits(sdhci_am654->base, PHY_CTRL1, mask, val); 265 - /* Enable DLL */ 266 - regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 267 - 0x1 << ENDLL_SHIFT); 268 - /* 269 - * Poll for DLL ready. Use a one second timeout. 270 - * Works in all experiments done so far 271 - */ 272 - ret = regmap_read_poll_timeout(sdhci_am654->base, PHY_STAT1, 273 - val, val & DLLRDY_MASK, 1000, 274 - 1000000); 275 - if (ret) { 276 - dev_err(mmc_dev(host->mmc), "DLL failed to relock\n"); 277 - return; 278 - } 279 - 280 - sdhci_am654->dll_on = true; 142 + if (timing > MMC_TIMING_UHS_SDR25) 143 + sdhci_am654_setup_dll(host, clock); 281 144 } 282 145 } 283 146 ··· 237 198 { 238 199 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 239 200 struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); 240 - int val, mask; 201 + unsigned char timing = host->mmc->ios.timing; 202 + u32 otap_del_sel; 203 + u32 mask, val; 204 + 205 + /* Setup DLL Output TAP delay */ 206 + if (sdhci_am654->legacy_otapdly) 207 + otap_del_sel = sdhci_am654->otap_del_sel[0]; 208 + else 209 + otap_del_sel = sdhci_am654->otap_del_sel[timing]; 241 210 242 211 mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; 243 - val = (1 << OTAPDLYENA_SHIFT) | 244 - (sdhci_am654->otap_del_sel << OTAPDLYSEL_SHIFT); 212 + val = (0x1 << OTAPDLYENA_SHIFT) | 213 + (otap_del_sel << OTAPDLYSEL_SHIFT); 245 214 regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); 246 215 247 216 sdhci_set_clock(host, clock); 248 - } 249 - 250 - static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode, 251 - unsigned short vdd) 252 - { 253 - if (!IS_ERR(host->mmc->supply.vmmc)) { 254 - struct mmc_host *mmc = host->mmc; 255 - 256 - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 257 - } 258 - sdhci_set_power_noreg(host, mode, vdd); 259 217 } 260 218 261 219 static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) ··· 310 274 .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, 311 275 .set_uhs_signaling = sdhci_set_uhs_signaling, 312 276 .set_bus_width = sdhci_set_bus_width, 313 - .set_power = sdhci_am654_set_power, 277 + .set_power = sdhci_set_power_and_bus_voltage, 314 278 .set_clock = sdhci_am654_set_clock, 315 279 .write_b = sdhci_am654_write_b, 316 280 .irq = sdhci_am654_cqhci_irq, ··· 333 297 .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, 334 298 .set_uhs_signaling = sdhci_set_uhs_signaling, 335 299 .set_bus_width = sdhci_set_bus_width, 336 - .set_power = sdhci_am654_set_power, 300 + .set_power = sdhci_set_power_and_bus_voltage, 337 301 .set_clock = sdhci_am654_set_clock, 338 302 .write_b = sdhci_am654_write_b, 339 303 .irq = sdhci_am654_cqhci_irq, ··· 356 320 .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, 357 321 .set_uhs_signaling = sdhci_set_uhs_signaling, 358 322 .set_bus_width = sdhci_set_bus_width, 359 - .set_power = sdhci_am654_set_power, 323 + .set_power = sdhci_set_power_and_bus_voltage, 360 324 .set_clock = sdhci_j721e_4bit_set_clock, 361 325 .write_b = sdhci_am654_write_b, 362 326 .irq = sdhci_am654_cqhci_irq, ··· 407 371 return ret; 408 372 } 409 373 374 + static int sdhci_am654_get_otap_delay(struct sdhci_host *host, 375 + struct sdhci_am654_data *sdhci_am654) 376 + { 377 + struct device *dev = mmc_dev(host->mmc); 378 + int i; 379 + int ret; 380 + 381 + ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].binding, 382 + &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]); 383 + if (ret) { 384 + /* 385 + * ti,otap-del-sel-legacy is mandatory, look for old binding 386 + * if not found. 387 + */ 388 + ret = device_property_read_u32(dev, "ti,otap-del-sel", 389 + &sdhci_am654->otap_del_sel[0]); 390 + if (ret) { 391 + dev_err(dev, "Couldn't find otap-del-sel\n"); 392 + 393 + return ret; 394 + } 395 + 396 + dev_info(dev, "Using legacy binding ti,otap-del-sel\n"); 397 + sdhci_am654->legacy_otapdly = true; 398 + 399 + return 0; 400 + } 401 + 402 + for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) { 403 + 404 + ret = device_property_read_u32(dev, td[i].binding, 405 + &sdhci_am654->otap_del_sel[i]); 406 + if (ret) { 407 + dev_dbg(dev, "Couldn't find %s\n", 408 + td[i].binding); 409 + /* 410 + * Remove the corresponding capability 411 + * if an otap-del-sel value is not found 412 + */ 413 + if (i <= MMC_TIMING_MMC_DDR52) 414 + host->mmc->caps &= ~td[i].capability; 415 + else 416 + host->mmc->caps2 &= ~td[i].capability; 417 + } 418 + } 419 + 420 + return 0; 421 + } 422 + 410 423 static int sdhci_am654_init(struct sdhci_host *host) 411 424 { 412 425 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ··· 504 419 if (ret) 505 420 goto err_cleanup_host; 506 421 422 + ret = sdhci_am654_get_otap_delay(host, sdhci_am654); 423 + if (ret) 424 + goto err_cleanup_host; 425 + 507 426 ret = __sdhci_add_host(host); 508 427 if (ret) 509 428 goto err_cleanup_host; ··· 525 436 struct device *dev = &pdev->dev; 526 437 int drv_strength; 527 438 int ret; 528 - 529 - ret = device_property_read_u32(dev, "ti,otap-del-sel", 530 - &sdhci_am654->otap_del_sel); 531 - if (ret) 532 - return ret; 533 439 534 440 if (sdhci_am654->flags & DLL_PRESENT) { 535 441 ret = device_property_read_u32(dev, "ti,trm-icp",
+2 -9
drivers/mmc/host/tmio_mmc.h
··· 176 176 int (*write16_hook)(struct tmio_mmc_host *host, int addr); 177 177 void (*reset)(struct tmio_mmc_host *host); 178 178 void (*hw_reset)(struct tmio_mmc_host *host); 179 - void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap); 180 - bool (*check_scc_error)(struct tmio_mmc_host *host); 179 + bool (*check_retune)(struct tmio_mmc_host *host); 181 180 182 181 /* 183 182 * Mandatory callback for tuning to occur which is optional for SDR50 184 183 * and mandatory for SDR104. 185 184 */ 186 - unsigned int (*init_tuning)(struct tmio_mmc_host *host); 187 - int (*select_tuning)(struct tmio_mmc_host *host); 188 - 189 - /* Tuning values: 1 for success, 0 for failure */ 190 - DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long)); 191 - unsigned int tap_num; 192 - unsigned long tap_set; 185 + int (*execute_tuning)(struct tmio_mmc_host *host, u32 opcode); 193 186 194 187 void (*prepare_hs400_tuning)(struct tmio_mmc_host *host); 195 188 void (*hs400_downgrade)(struct tmio_mmc_host *host);
+8 -69
drivers/mmc/host/tmio_mmc_core.c
··· 718 718 static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 719 719 { 720 720 struct tmio_mmc_host *host = mmc_priv(mmc); 721 - int i, ret = 0; 721 + int ret; 722 722 723 - if (!host->init_tuning || !host->select_tuning) 724 - /* Tuning is not supported */ 725 - goto out; 723 + if (!host->execute_tuning) 724 + return 0; 726 725 727 - host->tap_num = host->init_tuning(host); 728 - if (!host->tap_num) 729 - /* Tuning is not supported */ 730 - goto out; 726 + ret = host->execute_tuning(host, opcode); 731 727 732 - if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) { 733 - dev_warn_once(&host->pdev->dev, 734 - "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n"); 735 - goto out; 736 - } 737 - 738 - bitmap_zero(host->taps, host->tap_num * 2); 739 - 740 - /* Issue CMD19 twice for each tap */ 741 - for (i = 0; i < 2 * host->tap_num; i++) { 742 - if (host->prepare_tuning) 743 - host->prepare_tuning(host, i % host->tap_num); 744 - 745 - ret = mmc_send_tuning(mmc, opcode, NULL); 746 - if (ret == 0) 747 - set_bit(i, host->taps); 748 - } 749 - 750 - ret = host->select_tuning(host); 751 - 752 - out: 753 728 if (ret < 0) { 754 729 dev_warn(&host->pdev->dev, "Tuning procedure failed\n"); 755 730 tmio_mmc_hw_reset(mmc); ··· 818 843 if (mrq->cmd->error || (mrq->data && mrq->data->error)) 819 844 tmio_mmc_abort_dma(host); 820 845 821 - /* SCC error means retune, but executed command was still successful */ 822 - if (host->check_scc_error && host->check_scc_error(host)) 846 + /* Error means retune, but executed command was still successful */ 847 + if (host->check_retune && host->check_retune(host)) 823 848 mmc_retune_needed(host->mmc); 824 849 825 850 /* If SET_BLOCK_COUNT, continue with main command */ ··· 997 1022 return blk_size; 998 1023 } 999 1024 1000 - static int tmio_mmc_prepare_hs400_tuning(struct mmc_host *mmc, 1001 - struct mmc_ios *ios) 1002 - { 1003 - struct tmio_mmc_host *host = mmc_priv(mmc); 1004 - 1005 - if (host->prepare_hs400_tuning) 1006 - host->prepare_hs400_tuning(host); 1007 - 1008 - return 0; 1009 - } 1010 - 1011 - static void tmio_mmc_hs400_downgrade(struct mmc_host *mmc) 1012 - { 1013 - struct tmio_mmc_host *host = mmc_priv(mmc); 1014 - 1015 - if (host->hs400_downgrade) 1016 - host->hs400_downgrade(host); 1017 - } 1018 - 1019 - static void tmio_mmc_hs400_complete(struct mmc_host *mmc) 1020 - { 1021 - struct tmio_mmc_host *host = mmc_priv(mmc); 1022 - 1023 - if (host->hs400_complete) 1024 - host->hs400_complete(host); 1025 - } 1026 - 1027 - static const struct mmc_host_ops tmio_mmc_ops = { 1025 + static struct mmc_host_ops tmio_mmc_ops = { 1028 1026 .request = tmio_mmc_request, 1029 1027 .set_ios = tmio_mmc_set_ios, 1030 1028 .get_ro = tmio_mmc_get_ro, ··· 1006 1058 .multi_io_quirk = tmio_multi_io_quirk, 1007 1059 .hw_reset = tmio_mmc_hw_reset, 1008 1060 .execute_tuning = tmio_mmc_execute_tuning, 1009 - .prepare_hs400_tuning = tmio_mmc_prepare_hs400_tuning, 1010 - .hs400_downgrade = tmio_mmc_hs400_downgrade, 1011 - .hs400_complete = tmio_mmc_hs400_complete, 1012 1061 }; 1013 1062 1014 1063 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) ··· 1270 1325 } 1271 1326 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend); 1272 1327 1273 - static bool tmio_mmc_can_retune(struct tmio_mmc_host *host) 1274 - { 1275 - return host->tap_num && mmc_can_retune(host->mmc); 1276 - } 1277 - 1278 1328 int tmio_mmc_host_runtime_resume(struct device *dev) 1279 1329 { 1280 1330 struct tmio_mmc_host *host = dev_get_drvdata(dev); ··· 1286 1346 1287 1347 tmio_mmc_enable_dma(host, true); 1288 1348 1289 - if (tmio_mmc_can_retune(host) && host->select_tuning(host)) 1290 - dev_warn(&host->pdev->dev, "Tuning selection failed\n"); 1349 + mmc_retune_needed(host->mmc); 1291 1350 1292 1351 return 0; 1293 1352 }
+2 -2
drivers/mmc/host/vub300.c
··· 95 95 u8 port_number; 96 96 u8 command_type; 97 97 u8 command_index; 98 - u8 command_response[0]; 98 + u8 command_response[]; 99 99 } __packed; 100 100 101 101 struct sd_status_header { ··· 1363 1363 int retval; 1364 1364 for (n = 0; n < sdio_funcs; n++) { 1365 1365 struct sdio_func *sf = card->sdio_func[n]; 1366 - l += snprintf(vub300->vub_name + l, 1366 + l += scnprintf(vub300->vub_name + l, 1367 1367 sizeof(vub300->vub_name) - l, "_%04X%04X", 1368 1368 sf->vendor, sf->device); 1369 1369 }
+8 -1
include/linux/firmware/xlnx-zynqmp.h
··· 100 100 }; 101 101 102 102 enum pm_ioctl_id { 103 - IOCTL_SET_SD_TAPDELAY = 7, 103 + IOCTL_SD_DLL_RESET = 6, 104 + IOCTL_SET_SD_TAPDELAY, 104 105 IOCTL_SET_PLL_FRAC_MODE, 105 106 IOCTL_GET_PLL_FRAC_MODE, 106 107 IOCTL_SET_PLL_FRAC_DATA, ··· 270 269 enum tap_delay_type { 271 270 PM_TAPDELAY_INPUT = 0, 272 271 PM_TAPDELAY_OUTPUT, 272 + }; 273 + 274 + enum dll_reset_type { 275 + PM_DLL_RESET_ASSERT, 276 + PM_DLL_RESET_RELEASE, 277 + PM_DLL_RESET_PULSE, 273 278 }; 274 279 275 280 /**
-3
include/linux/mmc/core.h
··· 107 107 */ 108 108 109 109 unsigned int busy_timeout; /* busy detect timeout in ms */ 110 - /* Set this flag only for blocking sanitize request */ 111 - bool sanitize_busy; 112 - 113 110 struct mmc_data *data; /* data segment associated with cmd */ 114 111 struct mmc_request *mrq; /* associated request */ 115 112 };
+6 -1
include/linux/mmc/host.h
··· 322 322 #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ 323 323 #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ 324 324 #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ 325 + #define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \ 326 + MMC_CAP_1_2V_DDR) 325 327 #define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */ 326 328 #define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */ 327 329 #define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */ ··· 465 463 bool cqe_enabled; 466 464 bool cqe_on; 467 465 468 - unsigned long private[0] ____cacheline_aligned; 466 + /* Host Software Queue support */ 467 + bool hsq_enabled; 468 + 469 + unsigned long private[] ____cacheline_aligned; 469 470 }; 470 471 471 472 struct device_node;
+10
include/linux/mmc/mmc.h
··· 161 161 #define R1_STATE_PRG 7 162 162 #define R1_STATE_DIS 8 163 163 164 + static inline bool mmc_ready_for_data(u32 status) 165 + { 166 + /* 167 + * Some cards mishandle the status bits, so make sure to check both the 168 + * busy indication and the card state. 169 + */ 170 + return status & R1_READY_FOR_DATA && 171 + R1_CURRENT_STATE(status) == R1_STATE_TRAN; 172 + } 173 + 164 174 /* 165 175 * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS 166 176 * R1 is the low order byte; R2 is the next highest byte, when present.
+1 -1
include/linux/mmc/sdio_func.h
··· 25 25 struct sdio_func_tuple *next; 26 26 unsigned char code; 27 27 unsigned char size; 28 - unsigned char data[0]; 28 + unsigned char data[]; 29 29 }; 30 30 31 31 /*
+1
include/linux/platform_data/mmc-esdhc-imx.h
··· 37 37 unsigned int delay_line; 38 38 unsigned int tuning_step; /* The delay cell steps in tuning procedure */ 39 39 unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ 40 + unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ 40 41 }; 41 42 #endif /* __ASM_ARCH_IMX_ESDHC_H */
+1 -1
include/uapi/linux/mmc/ioctl.h
··· 57 57 */ 58 58 struct mmc_ioc_multi_cmd { 59 59 __u64 num_of_cmds; 60 - struct mmc_ioc_cmd cmds[0]; 60 + struct mmc_ioc_cmd cmds[]; 61 61 }; 62 62 63 63 #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)