Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mmc-v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC updates from Ulf Hansson:
"MMC core:
- Don't force a retune before eMMC RPMB switch
- Add optional HS400 tuning in HS400es initialization
- Add a sysfs node to for write-protect-group-size
- Add re-tuning test to the mmc-test module
- Use mrq.sbc to support close-ended ioctl requests

MMC host:
- mmci: Add support for SDIO in-band irqs for the stm32 variant
- mmc_spi: Remove broken support custom DMA mapped buffers
- mtk-sd: Improve and extend the support for tunings
- renesas_sdhi: Document support for the RZ/Five variant
- sdhci_am654: Drop support for the ti,otap-del-sel DT property
- sdhci-brcmstb: Add support for the brcm 74165b0 variant
- sdhci-msm: Add compatibles for IPQ4019 and IPQ8074
- sdhci-of-dwcmshc: Add support for the T-Head TH1520 variant
- sdhci-xenon: Add support for the Marvell ac5 variant"

* tag 'mmc-v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (27 commits)
mmc: xenon: Add ac5 support via bounce buffer
dt-bindings: mmc: add Marvell ac5
mmc: sdhci-brcmstb: add new sdhci reset sequence for brcm 74165b0
dt-bindings: mmc: brcm,sdhci-brcmstb: Add support for 74165b0
mmc: core: Do not force a retune before RPMB switch
mmc: core: Add HS400 tuning in HS400es initialization
mmc: sdhci_omap: Fix TI SoC dependencies
mmc: sdhci_am654: Fix TI SoC dependencies
mmc: core: Add wp_grp_size sysfs node
mmc: mmc_test: Add re-tuning test
mmc: mmc_spi: remove custom DMA mapped buffers
dt-bindings: mmc: sdhci-msm: document dedicated IPQ4019 and IPQ8074
dt-bindings: mmc: synopsys-dw-mshc: add iommus for Intel SocFPGA
mmc: mtk-sd: Extend number of tuning steps
dt-bindings: mmc: mtk-sd: add tuning steps related property
mmc: sdhci-omap: don't misuse kernel-doc marker
mmc: mtk-sd: Increase the verbosity of msdc_track_cmd_data
mmc: core: Use mrq.sbc in close-ended ffu
mmc: sdhci_am654: Drop lookup for deprecated ti,otap-del-sel
mmc: sdhci-of-dwcmshc: Use logical OR instead of bitwise OR in dwcmshc_probe()
...

+771 -319
+4 -4
Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
··· 226 226 interrupt-parent = <&gic>; 227 227 interrupts = <0 48 4>; 228 228 reg = <0xff160000 0x1000>; 229 - clocks = <&clk200>, <&clk200>; 230 - clock-names = "clk_xin", "clk_ahb"; 229 + clocks = <&clk200>, <&clk200>, <&clk1200>; 230 + clock-names = "clk_xin", "clk_ahb", "gate"; 231 231 clock-output-names = "clk_out_sd0", "clk_in_sd0"; 232 232 #clock-cells = <1>; 233 233 clk-phase-sd-hs = <63>, <72>; ··· 239 239 interrupt-parent = <&gic>; 240 240 interrupts = <0 126 4>; 241 241 reg = <0xf1040000 0x10000>; 242 - clocks = <&clk200>, <&clk200>; 243 - clock-names = "clk_xin", "clk_ahb"; 242 + clocks = <&clk200>, <&clk200>, <&clk1200>; 243 + clock-names = "clk_xin", "clk_ahb", "gate"; 244 244 clock-output-names = "clk_out_sd0", "clk_in_sd0"; 245 245 #clock-cells = <1>; 246 246 clk-phase-sd-hs = <132>, <60>;
+1 -3
Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
··· 20 20 - const: brcm,sdhci-brcmstb 21 21 - items: 22 22 - enum: 23 + - brcm,bcm74165b0-sdhci 23 24 - brcm,bcm7445-sdhci 24 - - const: brcm,sdhci-brcmstb 25 - - items: 26 - - enum: 27 25 - brcm,bcm7425-sdhci 28 26 - const: brcm,sdhci-brcmstb 29 27
+3 -1
Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml
··· 27 27 - marvell,armada-ap806-sdhci 28 28 29 29 - items: 30 - - const: marvell,armada-ap807-sdhci 30 + - enum: 31 + - marvell,armada-ap807-sdhci 32 + - marvell,ac5-sdhci 31 33 - const: marvell,armada-ap806-sdhci 32 34 33 35 - items:
+9
Documentation/devicetree/bindings/mmc/mtk-sd.yaml
··· 145 145 minimum: 0 146 146 maximum: 7 147 147 148 + mediatek,tuning-step: 149 + $ref: /schemas/types.yaml#/definitions/uint32 150 + description: 151 + Some SoCs need extend tuning step for better delay value to avoid CRC issue. 152 + If not present, default tuning step is 32. For eMMC and SD, this can yield 153 + satisfactory calibration results in most cases. 154 + enum: [32, 64] 155 + default: 32 156 + 148 157 resets: 149 158 maxItems: 1 150 159
+1 -1
Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
··· 56 56 - renesas,sdhi-r8a77980 # R-Car V3H 57 57 - renesas,sdhi-r8a77990 # R-Car E3 58 58 - renesas,sdhi-r8a77995 # R-Car D3 59 - - renesas,sdhi-r9a07g043 # RZ/G2UL 59 + - renesas,sdhi-r9a07g043 # RZ/G2UL and RZ/Five 60 60 - renesas,sdhi-r9a07g044 # RZ/G2{L,LC} 61 61 - renesas,sdhi-r9a07g054 # RZ/V2L 62 62 - renesas,sdhi-r9a08g045 # RZ/G3S
+2
Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
··· 22 22 - items: 23 23 - enum: 24 24 - qcom,apq8084-sdhci 25 + - qcom,ipq4019-sdhci 26 + - qcom,ipq8074-sdhci 25 27 - qcom,msm8226-sdhci 26 28 - qcom,msm8953-sdhci 27 29 - qcom,msm8974-sdhci
+1
Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
··· 19 19 - rockchip,rk3568-dwcmshc 20 20 - rockchip,rk3588-dwcmshc 21 21 - snps,dwcmshc-sdhci 22 + - thead,th1520-dwcmshc 22 23 23 24 reg: 24 25 maxItems: 1
+4
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml
··· 35 35 - const: biu 36 36 - const: ciu 37 37 38 + iommus: 39 + maxItems: 1 40 + 38 41 altr,sysmgr-syscon: 39 42 $ref: /schemas/types.yaml#/definitions/phandle-array 40 43 items: ··· 65 62 altr,sysmgr-syscon: true 66 63 else: 67 64 properties: 65 + iommus: false 68 66 altr,sysmgr-syscon: false 69 67 70 68 required:
+43 -3
drivers/mmc/core/block.c
··· 400 400 struct mmc_ioc_cmd ic; 401 401 unsigned char *buf; 402 402 u64 buf_bytes; 403 + unsigned int flags; 404 + #define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */ 405 + #define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */ 406 + 403 407 struct mmc_rpmb_data *rpmb; 404 408 }; 405 409 ··· 469 465 } 470 466 471 467 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, 472 - struct mmc_blk_ioc_data *idata) 468 + struct mmc_blk_ioc_data **idatas, int i) 473 469 { 474 470 struct mmc_command cmd = {}, sbc = {}; 475 471 struct mmc_data data = {}; ··· 479 475 unsigned int busy_timeout_ms; 480 476 int err; 481 477 unsigned int target_part; 478 + struct mmc_blk_ioc_data *idata = idatas[i]; 479 + struct mmc_blk_ioc_data *prev_idata = NULL; 482 480 483 481 if (!card || !md || !idata) 484 482 return -EINVAL; 483 + 484 + if (idata->flags & MMC_BLK_IOC_DROP) 485 + return 0; 486 + 487 + if (idata->flags & MMC_BLK_IOC_SBC) 488 + prev_idata = idatas[i - 1]; 485 489 486 490 /* 487 491 * The RPMB accesses comes in from the character device, so we ··· 544 532 return err; 545 533 } 546 534 547 - if (idata->rpmb) { 535 + if (idata->rpmb || prev_idata) { 548 536 sbc.opcode = MMC_SET_BLOCK_COUNT; 549 537 /* 550 538 * We don't do any blockcount validation because the max size ··· 552 540 * 'Reliable Write' bit here. 553 541 */ 554 542 sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); 543 + if (prev_idata) 544 + sbc.arg = prev_idata->ic.arg; 555 545 sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 556 546 mrq.sbc = &sbc; 557 547 } ··· 570 556 571 557 mmc_wait_for_req(card->host, &mrq); 572 558 memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); 559 + 560 + if (prev_idata) { 561 + memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp)); 562 + if (sbc.error) { 563 + dev_err(mmc_dev(card->host), "%s: sbc error %d\n", 564 + __func__, sbc.error); 565 + return sbc.error; 566 + } 567 + } 573 568 574 569 if (cmd.error) { 575 570 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", ··· 1057 1034 md->reset_done &= ~type; 1058 1035 } 1059 1036 1037 + static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq) 1038 + { 1039 + struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data; 1040 + int i; 1041 + 1042 + for (i = 1; i < mq_rq->ioc_count; i++) { 1043 + if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT && 1044 + mmc_op_multi(idata[i]->ic.opcode)) { 1045 + idata[i - 1]->flags |= MMC_BLK_IOC_DROP; 1046 + idata[i]->flags |= MMC_BLK_IOC_SBC; 1047 + } 1048 + } 1049 + } 1050 + 1060 1051 /* 1061 1052 * The non-block commands come back from the block layer after it queued it and 1062 1053 * processed it with all other requests and then they get issued in this ··· 1098 1061 if (ret) 1099 1062 break; 1100 1063 } 1064 + 1065 + mmc_blk_check_sbc(mq_rq); 1066 + 1101 1067 fallthrough; 1102 1068 case MMC_DRV_OP_IOCTL_RPMB: 1103 1069 idata = mq_rq->drv_op_data; 1104 1070 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { 1105 - ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); 1071 + ret = __mmc_blk_ioctl_cmd(card, md, idata, i); 1106 1072 if (ret) 1107 1073 break; 1108 1074 }
+1 -2
drivers/mmc/core/host.c
··· 119 119 120 120 /* 121 121 * Pause re-tuning for a small set of operations. The pause begins after the 122 - * next command and after first doing re-tuning. 122 + * next command. 123 123 */ 124 124 void mmc_retune_pause(struct mmc_host *host) 125 125 { 126 126 if (!host->retune_paused) { 127 127 host->retune_paused = 1; 128 - mmc_retune_needed(host); 129 128 mmc_retune_hold(host); 130 129 } 131 130 }
+22 -8
drivers/mmc/core/mmc.c
··· 136 136 mmc_init_erase(card); 137 137 } 138 138 139 + 140 + static void mmc_set_wp_grp_size(struct mmc_card *card) 141 + { 142 + if (card->ext_csd.erase_group_def & 1) 143 + card->wp_grp_size = card->ext_csd.hc_erase_size * 144 + card->ext_csd.raw_hc_erase_gap_size; 145 + else 146 + card->wp_grp_size = card->csd.erase_size * 147 + (card->csd.wp_grp_size + 1); 148 + } 149 + 139 150 /* 140 151 * Given a 128-bit response, decode to our card CSD structure. 141 152 */ ··· 197 186 b = UNSTUFF_BITS(resp, 37, 5); 198 187 csd->erase_size = (a + 1) * (b + 1); 199 188 csd->erase_size <<= csd->write_blkbits - 9; 189 + csd->wp_grp_size = UNSTUFF_BITS(resp, 32, 5); 200 190 } 201 191 202 192 return 0; ··· 625 613 } else { 626 614 card->ext_csd.data_tag_unit_size = 0; 627 615 } 628 - 629 - card->ext_csd.max_packed_writes = 630 - ext_csd[EXT_CSD_MAX_PACKED_WRITES]; 631 - card->ext_csd.max_packed_reads = 632 - ext_csd[EXT_CSD_MAX_PACKED_READS]; 633 616 } else { 634 617 card->ext_csd.data_sector_size = 512; 635 618 } ··· 797 790 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 798 791 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 799 792 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 793 + MMC_DEV_ATTR(wp_grp_size, "%u\n", card->wp_grp_size << 9); 800 794 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable); 801 795 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 802 796 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); ··· 858 850 &dev_attr_date.attr, 859 851 &dev_attr_erase_size.attr, 860 852 &dev_attr_preferred_erase_size.attr, 853 + &dev_attr_wp_grp_size.attr, 861 854 &dev_attr_fwrev.attr, 862 855 &dev_attr_ffu_capable.attr, 863 856 &dev_attr_hwrev.attr, ··· 1773 1764 mmc_set_erase_size(card); 1774 1765 } 1775 1766 } 1776 - 1767 + mmc_set_wp_grp_size(card); 1777 1768 /* 1778 1769 * Ensure eMMC user default partition is enabled 1779 1770 */ ··· 1831 1822 1832 1823 if (err) 1833 1824 goto free_card; 1834 - 1835 - } else if (!mmc_card_hs400es(card)) { 1825 + } else if (mmc_card_hs400es(card)) { 1826 + if (host->ops->execute_hs400_tuning) { 1827 + err = host->ops->execute_hs400_tuning(host, card); 1828 + if (err) 1829 + goto free_card; 1830 + } 1831 + } else { 1836 1832 /* Select the desired bus width optionally */ 1837 1833 err = mmc_select_bus_width(card); 1838 1834 if (err > 0 && mmc_card_hs(card)) {
+27 -6
drivers/mmc/core/mmc_test.c
··· 1904 1904 } 1905 1905 1906 1906 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1907 - unsigned long sz) 1907 + unsigned long sz, int secs, int force_retuning) 1908 1908 { 1909 1909 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1910 1910 unsigned int ssz; ··· 1921 1921 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1922 1922 ktime_get_ts64(&ts2); 1923 1923 ts = timespec64_sub(ts2, ts1); 1924 - if (ts.tv_sec >= 10) 1924 + if (ts.tv_sec >= secs) 1925 1925 break; 1926 1926 ea = mmc_test_rnd_num(range1); 1927 1927 if (ea == last_ea) ··· 1929 1929 last_ea = ea; 1930 1930 dev_addr = rnd_addr + test->card->pref_erase * ea + 1931 1931 ssz * mmc_test_rnd_num(range2); 1932 + if (force_retuning) 1933 + mmc_retune_needed(test->card->host); 1932 1934 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1933 1935 if (ret) 1934 1936 return ret; ··· 1955 1953 */ 1956 1954 if (write) { 1957 1955 next = rnd_next; 1958 - ret = mmc_test_rnd_perf(test, write, 0, sz); 1956 + ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1959 1957 if (ret) 1960 1958 return ret; 1961 1959 rnd_next = next; 1962 1960 } 1963 - ret = mmc_test_rnd_perf(test, write, 1, sz); 1961 + ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1964 1962 if (ret) 1965 1963 return ret; 1966 1964 } 1967 1965 sz = t->max_tfr; 1968 1966 if (write) { 1969 1967 next = rnd_next; 1970 - ret = mmc_test_rnd_perf(test, write, 0, sz); 1968 + ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1971 1969 if (ret) 1972 1970 return ret; 1973 1971 rnd_next = next; 1974 1972 } 1975 - return mmc_test_rnd_perf(test, write, 1, sz); 1973 + return mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1974 + } 1975 + 1976 + static int mmc_test_retuning(struct mmc_test_card *test) 1977 + { 1978 + if (!mmc_can_retune(test->card->host)) { 1979 + pr_info("%s: No retuning - test skipped\n", 1980 + mmc_hostname(test->card->host)); 1981 + return RESULT_UNSUP_HOST; 1982 + } 1983 + 1984 + return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1); 1976 1985 } 1977 1986 1978 1987 /* ··· 2934 2921 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2935 2922 .cleanup = mmc_test_area_cleanup, 2936 2923 }, 2924 + 2925 + { 2926 + .name = "Re-tuning reliability", 2927 + .prepare = mmc_test_area_prepare, 2928 + .run = mmc_test_retuning, 2929 + .cleanup = mmc_test_area_cleanup, 2930 + }, 2931 + 2937 2932 }; 2938 2933 2939 2934 static DEFINE_MUTEX(mmc_test_lock);
+6 -4
drivers/mmc/host/Kconfig
··· 1026 1026 1027 1027 config MMC_SDHCI_OMAP 1028 1028 tristate "TI SDHCI Controller Support" 1029 + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST 1029 1030 depends on MMC_SDHCI_PLTFM && OF 1030 1031 select THERMAL 1031 1032 imply TI_SOC_THERMAL 1032 1033 select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE 1033 1034 help 1034 1035 This selects the Secure Digital Host Controller Interface (SDHCI) 1035 - support present in TI's DRA7 SOCs. The controller supports 1036 - SD/MMC/SDIO devices. 1036 + support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller 1037 + supports SD/MMC/SDIO devices. 1037 1038 1038 1039 If you have a controller with this interface, say Y or M here. 1039 1040 ··· 1042 1041 1043 1042 config MMC_SDHCI_AM654 1044 1043 tristate "Support for the SDHCI Controller in TI's AM654 SOCs" 1044 + depends on ARCH_K3 || COMPILE_TEST 1045 1045 depends on MMC_SDHCI_PLTFM && OF 1046 1046 select MMC_SDHCI_IO_ACCESSORS 1047 1047 select MMC_CQHCI 1048 1048 select REGMAP_MMIO 1049 1049 help 1050 1050 This selects the Secure Digital Host Controller Interface (SDHCI) 1051 - support present in TI's AM654 SOCs. The controller supports 1052 - SD/MMC/SDIO devices. 1051 + support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller 1052 + supports SD/MMC/SDIO devices. 1053 1053 1054 1054 If you have a controller with this interface, say Y or M here. 1055 1055
+5 -181
drivers/mmc/host/mmc_spi.c
··· 15 15 #include <linux/slab.h> 16 16 #include <linux/module.h> 17 17 #include <linux/bio.h> 18 - #include <linux/dma-mapping.h> 18 + #include <linux/dma-direction.h> 19 19 #include <linux/crc7.h> 20 20 #include <linux/crc-itu-t.h> 21 21 #include <linux/scatterlist.h> ··· 119 119 struct spi_transfer status; 120 120 struct spi_message readback; 121 121 122 - /* underlying DMA-aware controller, or null */ 123 - struct device *dma_dev; 124 - 125 122 /* buffer used for commands and for message "overhead" */ 126 123 struct scratch *data; 127 - dma_addr_t data_dma; 128 124 129 125 /* Specs say to write ones most of the time, even when the card 130 126 * has no need to read its input data; and many cards won't care. 131 127 * This is our source of those ones. 132 128 */ 133 129 void *ones; 134 - dma_addr_t ones_dma; 135 130 }; 136 131 137 132 ··· 142 147 return spi_setup(host->spi); 143 148 } 144 149 145 - static int 146 - mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) 150 + static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len) 147 151 { 148 - int status; 149 - 150 152 if (len > sizeof(*host->data)) { 151 153 WARN_ON(1); 152 154 return -EIO; ··· 151 159 152 160 host->status.len = len; 153 161 154 - if (host->dma_dev) 155 - dma_sync_single_for_device(host->dma_dev, 156 - host->data_dma, sizeof(*host->data), 157 - DMA_FROM_DEVICE); 158 - 159 - status = spi_sync_locked(host->spi, &host->readback); 160 - 161 - if (host->dma_dev) 162 - dma_sync_single_for_cpu(host->dma_dev, 163 - host->data_dma, sizeof(*host->data), 164 - DMA_FROM_DEVICE); 165 - 166 - return status; 162 + return spi_sync_locked(host->spi, &host->readback); 167 163 } 168 164 169 165 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, ··· 486 506 t = &host->t; 487 507 memset(t, 0, sizeof(*t)); 488 508 t->tx_buf = t->rx_buf = data->status; 489 - t->tx_dma = t->rx_dma = host->data_dma; 490 509 t->len = cp - data->status; 491 510 t->cs_change = 1; 492 511 spi_message_add_tail(t, &host->m); 493 512 494 - if (host->dma_dev) { 495 - host->m.is_dma_mapped = 1; 496 - dma_sync_single_for_device(host->dma_dev, 497 - host->data_dma, sizeof(*host->data), 498 - DMA_BIDIRECTIONAL); 499 - } 500 513 status = spi_sync_locked(host->spi, &host->m); 501 - 502 - if (host->dma_dev) 503 - dma_sync_single_for_cpu(host->dma_dev, 504 - host->data_dma, sizeof(*host->data), 505 - DMA_BIDIRECTIONAL); 506 514 if (status < 0) { 507 515 dev_dbg(&host->spi->dev, " ... write returned %d\n", status); 508 516 cmd->error = status; ··· 508 540 * We always provide TX data for data and CRC. The MMC/SD protocol 509 541 * requires us to write ones; but Linux defaults to writing zeroes; 510 542 * so we explicitly initialize it to all ones on RX paths. 511 - * 512 - * We also handle DMA mapping, so the underlying SPI controller does 513 - * not need to (re)do it for each message. 514 543 */ 515 544 static void 516 545 mmc_spi_setup_data_message( ··· 517 552 { 518 553 struct spi_transfer *t; 519 554 struct scratch *scratch = host->data; 520 - dma_addr_t dma = host->data_dma; 521 555 522 556 spi_message_init(&host->m); 523 - if (dma) 524 - host->m.is_dma_mapped = 1; 525 557 526 558 /* for reads, readblock() skips 0xff bytes before finding 527 559 * the token; for writes, this transfer issues that token. ··· 532 570 else 533 571 scratch->data_token = SPI_TOKEN_SINGLE; 534 572 t->tx_buf = &scratch->data_token; 535 - if (dma) 536 - t->tx_dma = dma + offsetof(struct scratch, data_token); 537 573 spi_message_add_tail(t, &host->m); 538 574 } 539 575 ··· 541 581 t = &host->t; 542 582 memset(t, 0, sizeof(*t)); 543 583 t->tx_buf = host->ones; 544 - t->tx_dma = host->ones_dma; 545 584 /* length and actual buffer info are written later */ 546 585 spi_message_add_tail(t, &host->m); 547 586 ··· 550 591 if (direction == DMA_TO_DEVICE) { 551 592 /* the actual CRC may get written later */ 552 593 t->tx_buf = &scratch->crc_val; 553 - if (dma) 554 - t->tx_dma = dma + offsetof(struct scratch, crc_val); 555 594 } else { 556 595 t->tx_buf = host->ones; 557 - t->tx_dma = host->ones_dma; 558 596 t->rx_buf = &scratch->crc_val; 559 - if (dma) 560 - t->rx_dma = dma + offsetof(struct scratch, crc_val); 561 597 } 562 598 spi_message_add_tail(t, &host->m); 563 599 ··· 575 621 memset(t, 0, sizeof(*t)); 576 622 t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1; 577 623 t->tx_buf = host->ones; 578 - t->tx_dma = host->ones_dma; 579 624 t->rx_buf = scratch->status; 580 - if (dma) 581 - t->rx_dma = dma + offsetof(struct scratch, status); 582 625 t->cs_change = 1; 583 626 spi_message_add_tail(t, &host->m); 584 627 } ··· 604 653 605 654 if (host->mmc->use_spi_crc) 606 655 scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len)); 607 - if (host->dma_dev) 608 - dma_sync_single_for_device(host->dma_dev, 609 - host->data_dma, sizeof(*scratch), 610 - DMA_BIDIRECTIONAL); 611 656 612 657 status = spi_sync_locked(spi, &host->m); 613 - 614 658 if (status != 0) { 615 659 dev_dbg(&spi->dev, "write error (%d)\n", status); 616 660 return status; 617 661 } 618 - 619 - if (host->dma_dev) 620 - dma_sync_single_for_cpu(host->dma_dev, 621 - host->data_dma, sizeof(*scratch), 622 - DMA_BIDIRECTIONAL); 623 662 624 663 /* 625 664 * Get the transmission data-response reply. It must follow ··· 659 718 } 660 719 661 720 t->tx_buf += t->len; 662 - if (host->dma_dev) 663 - t->tx_dma += t->len; 664 721 665 722 /* Return when not busy. If we didn't collect that status yet, 666 723 * we'll need some more I/O. ··· 722 783 } 723 784 leftover = status << 1; 724 785 725 - if (host->dma_dev) { 726 - dma_sync_single_for_device(host->dma_dev, 727 - host->data_dma, sizeof(*scratch), 728 - DMA_BIDIRECTIONAL); 729 - dma_sync_single_for_device(host->dma_dev, 730 - t->rx_dma, t->len, 731 - DMA_FROM_DEVICE); 732 - } 733 - 734 786 status = spi_sync_locked(spi, &host->m); 735 787 if (status < 0) { 736 788 dev_dbg(&spi->dev, "read error %d\n", status); 737 789 return status; 738 - } 739 - 740 - if (host->dma_dev) { 741 - dma_sync_single_for_cpu(host->dma_dev, 742 - host->data_dma, sizeof(*scratch), 743 - DMA_BIDIRECTIONAL); 744 - dma_sync_single_for_cpu(host->dma_dev, 745 - t->rx_dma, t->len, 746 - DMA_FROM_DEVICE); 747 790 } 748 791 749 792 if (bitshift) { ··· 762 841 } 763 842 764 843 t->rx_buf += t->len; 765 - if (host->dma_dev) 766 - t->rx_dma += t->len; 767 844 768 845 return 0; 769 846 } ··· 776 857 struct mmc_data *data, u32 blk_size) 777 858 { 778 859 struct spi_device *spi = host->spi; 779 - struct device *dma_dev = host->dma_dev; 780 860 struct spi_transfer *t; 781 861 enum dma_data_direction direction = mmc_get_dma_dir(data); 782 862 struct scatterlist *sg; ··· 802 884 */ 803 885 for_each_sg(data->sg, sg, data->sg_len, n_sg) { 804 886 int status = 0; 805 - dma_addr_t dma_addr = 0; 806 887 void *kmap_addr; 807 888 unsigned length = sg->length; 808 - enum dma_data_direction dir = direction; 809 - 810 - /* set up dma mapping for controller drivers that might 811 - * use DMA ... though they may fall back to PIO 812 - */ 813 - if (dma_dev) { 814 - /* never invalidate whole *shared* pages ... */ 815 - if ((sg->offset != 0 || length != PAGE_SIZE) 816 - && dir == DMA_FROM_DEVICE) 817 - dir = DMA_BIDIRECTIONAL; 818 - 819 - dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, 820 - PAGE_SIZE, dir); 821 - if (dma_mapping_error(dma_dev, dma_addr)) { 822 - data->error = -EFAULT; 823 - break; 824 - } 825 - if (direction == DMA_TO_DEVICE) 826 - t->tx_dma = dma_addr + sg->offset; 827 - else 828 - t->rx_dma = dma_addr + sg->offset; 829 - } 830 889 831 890 /* allow pio too; we don't allow highmem */ 832 891 kmap_addr = kmap(sg_page(sg)); ··· 836 941 if (direction == DMA_FROM_DEVICE) 837 942 flush_dcache_page(sg_page(sg)); 838 943 kunmap(sg_page(sg)); 839 - if (dma_dev) 840 - dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir); 841 944 842 945 if (status < 0) { 843 946 data->error = status; ··· 870 977 scratch->status[0] = SPI_TOKEN_STOP_TRAN; 871 978 872 979 host->early_status.tx_buf = host->early_status.rx_buf; 873 - host->early_status.tx_dma = host->early_status.rx_dma; 874 980 host->early_status.len = statlen; 875 981 876 - if (host->dma_dev) 877 - dma_sync_single_for_device(host->dma_dev, 878 - host->data_dma, sizeof(*scratch), 879 - DMA_BIDIRECTIONAL); 880 - 881 982 tmp = spi_sync_locked(spi, &host->m); 882 - 883 - if (host->dma_dev) 884 - dma_sync_single_for_cpu(host->dma_dev, 885 - host->data_dma, sizeof(*scratch), 886 - DMA_BIDIRECTIONAL); 887 - 888 983 if (tmp < 0) { 889 984 if (!data->error) 890 985 data->error = tmp; ··· 1146 1265 return IRQ_HANDLED; 1147 1266 } 1148 1267 1149 - #ifdef CONFIG_HAS_DMA 1150 - static int mmc_spi_dma_alloc(struct mmc_spi_host *host) 1151 - { 1152 - struct spi_device *spi = host->spi; 1153 - struct device *dev; 1154 - 1155 - if (!spi->master->dev.parent->dma_mask) 1156 - return 0; 1157 - 1158 - dev = spi->master->dev.parent; 1159 - 1160 - host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE, 1161 - DMA_TO_DEVICE); 1162 - if (dma_mapping_error(dev, host->ones_dma)) 1163 - return -ENOMEM; 1164 - 1165 - host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), 1166 - DMA_BIDIRECTIONAL); 1167 - if (dma_mapping_error(dev, host->data_dma)) { 1168 - dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE, 1169 - DMA_TO_DEVICE); 1170 - return -ENOMEM; 1171 - } 1172 - 1173 - dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data), 1174 - DMA_BIDIRECTIONAL); 1175 - 1176 - host->dma_dev = dev; 1177 - return 0; 1178 - } 1179 - 1180 - static void mmc_spi_dma_free(struct mmc_spi_host *host) 1181 - { 1182 - if (!host->dma_dev) 1183 - return; 1184 - 1185 - dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, 1186 - DMA_TO_DEVICE); 1187 - dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), 1188 - DMA_BIDIRECTIONAL); 1189 - } 1190 - #else 1191 - static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } 1192 - static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {} 1193 - #endif 1194 - 1195 1268 static int mmc_spi_probe(struct spi_device *spi) 1196 1269 { 1197 1270 void *ones; ··· 1237 1402 host->powerup_msecs = 250; 1238 1403 } 1239 1404 1240 - /* preallocate dma buffers */ 1405 + /* Preallocate buffers */ 1241 1406 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL); 1242 1407 if (!host->data) 1243 1408 goto fail_nobuf1; 1244 1409 1245 - status = mmc_spi_dma_alloc(host); 1246 - if (status) 1247 - goto fail_dma; 1248 - 1249 1410 /* setup message for status/busy readback */ 1250 1411 spi_message_init(&host->readback); 1251 - host->readback.is_dma_mapped = (host->dma_dev != NULL); 1252 1412 1253 1413 spi_message_add_tail(&host->status, &host->readback); 1254 1414 host->status.tx_buf = host->ones; 1255 - host->status.tx_dma = host->ones_dma; 1256 1415 host->status.rx_buf = &host->data->status; 1257 - host->status.rx_dma = host->data_dma + offsetof(struct scratch, status); 1258 1416 host->status.cs_change = 1; 1259 1417 1260 1418 /* register card detect irq */ ··· 1292 1464 if (!status) 1293 1465 has_ro = true; 1294 1466 1295 - dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", 1467 + dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n", 1296 1468 dev_name(&mmc->class_dev), 1297 - host->dma_dev ? "" : ", no DMA", 1298 1469 has_ro ? "" : ", no WP", 1299 1470 (host->pdata && host->pdata->setpower) 1300 1471 ? "" : ", no poweroff", ··· 1304 1477 fail_gpiod_request: 1305 1478 mmc_remove_host(mmc); 1306 1479 fail_glue_init: 1307 - mmc_spi_dma_free(host); 1308 - fail_dma: 1309 1480 kfree(host->data); 1310 1481 fail_nobuf1: 1311 1482 mmc_spi_put_pdata(spi); ··· 1325 1500 1326 1501 mmc_remove_host(mmc); 1327 1502 1328 - mmc_spi_dma_free(host); 1329 1503 kfree(host->data); 1330 1504 kfree(host->ones); 1331 1505
+67 -2
drivers/mmc/host/mmci.c
··· 273 273 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 274 274 .stm32_idmabsize_mask = GENMASK(12, 5), 275 275 .stm32_idmabsize_align = BIT(5), 276 + .supports_sdio_irq = true, 276 277 .busy_timeout = true, 277 278 .busy_detect = true, 278 279 .busy_detect_flag = MCI_STM32_BUSYD0, ··· 301 300 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 302 301 .stm32_idmabsize_mask = GENMASK(16, 5), 303 302 .stm32_idmabsize_align = BIT(5), 303 + .supports_sdio_irq = true, 304 304 .dma_lli = true, 305 305 .busy_timeout = true, 306 306 .busy_detect = true, ··· 330 328 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 331 329 .stm32_idmabsize_mask = GENMASK(16, 6), 332 330 .stm32_idmabsize_align = BIT(6), 331 + .supports_sdio_irq = true, 333 332 .dma_lli = true, 334 333 .busy_timeout = true, 335 334 .busy_detect = true, ··· 424 421 */ 425 422 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) 426 423 { 427 - /* Keep busy mode in DPSM if enabled */ 428 - datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag; 424 + /* Keep busy mode in DPSM and SDIO mask if enabled */ 425 + datactrl |= host->datactrl_reg & (host->variant->busy_dpsm_flag | 426 + host->variant->datactrl_mask_sdio); 429 427 430 428 if (host->datactrl_reg != datactrl) { 431 429 host->datactrl_reg = datactrl; ··· 1766 1762 return IRQ_HANDLED; 1767 1763 } 1768 1764 1765 + static void mmci_write_sdio_irq_bit(struct mmci_host *host, int enable) 1766 + { 1767 + void __iomem *base = host->base; 1768 + u32 mask = readl_relaxed(base + MMCIMASK0); 1769 + 1770 + if (enable) 1771 + writel_relaxed(mask | MCI_ST_SDIOITMASK, base + MMCIMASK0); 1772 + else 1773 + writel_relaxed(mask & ~MCI_ST_SDIOITMASK, base + MMCIMASK0); 1774 + } 1775 + 1776 + static void mmci_signal_sdio_irq(struct mmci_host *host, u32 status) 1777 + { 1778 + if (status & MCI_ST_SDIOIT) { 1779 + mmci_write_sdio_irq_bit(host, 0); 1780 + sdio_signal_irq(host->mmc); 1781 + } 1782 + } 1783 + 1769 1784 /* 1770 1785 * Handle completion of command and data transfers. 1771 1786 */ ··· 1828 1805 mmci_cmd_irq(host, host->cmd, status); 1829 1806 mmci_data_irq(host, host->data, status); 1830 1807 } 1808 + 1809 + if (host->variant->supports_sdio_irq) 1810 + mmci_signal_sdio_irq(host, status); 1831 1811 1832 1812 /* 1833 1813 * Busy detection has been handled by mmci_cmd_irq() above. ··· 2066 2040 dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); 2067 2041 2068 2042 return ret; 2043 + } 2044 + 2045 + static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2046 + { 2047 + struct mmci_host *host = mmc_priv(mmc); 2048 + unsigned long flags; 2049 + 2050 + if (enable) 2051 + /* Keep the SDIO mode bit if SDIO irqs are enabled */ 2052 + pm_runtime_get_sync(mmc_dev(mmc)); 2053 + 2054 + spin_lock_irqsave(&host->lock, flags); 2055 + mmci_write_sdio_irq_bit(host, enable); 2056 + spin_unlock_irqrestore(&host->lock, flags); 2057 + 2058 + if (!enable) { 2059 + pm_runtime_mark_last_busy(mmc_dev(mmc)); 2060 + pm_runtime_put_autosuspend(mmc_dev(mmc)); 2061 + } 2062 + } 2063 + 2064 + static void mmci_ack_sdio_irq(struct mmc_host *mmc) 2065 + { 2066 + struct mmci_host *host = mmc_priv(mmc); 2067 + unsigned long flags; 2068 + 2069 + spin_lock_irqsave(&host->lock, flags); 2070 + mmci_write_sdio_irq_bit(host, 1); 2071 + spin_unlock_irqrestore(&host->lock, flags); 2069 2072 } 2070 2073 2071 2074 static struct mmc_host_ops mmci_ops = { ··· 2370 2315 mmci_write_datactrlreg(host, 2371 2316 host->variant->busy_dpsm_flag); 2372 2317 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 2318 + } 2319 + 2320 + if (variant->supports_sdio_irq && host->mmc->caps & MMC_CAP_SDIO_IRQ) { 2321 + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2322 + 2323 + mmci_ops.enable_sdio_irq = mmci_enable_sdio_irq; 2324 + mmci_ops.ack_sdio_irq = mmci_ack_sdio_irq; 2325 + 2326 + mmci_write_datactrlreg(host, 2327 + host->variant->datactrl_mask_sdio); 2373 2328 } 2374 2329 2375 2330 /* Variants with mandatory busy timeout in HW needs R1B responses. */
+2
drivers/mmc/host/mmci.h
··· 331 331 * register. 332 332 * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register 333 333 * @dma_lli: true if variant has dma link list feature. 334 + * @supports_sdio_irq: allow SD I/O card to interrupt the host 334 335 * @stm32_idmabsize_mask: stm32 sdmmc idma buffer size. 335 336 * @dma_flow_controller: use peripheral as flow controller for DMA. 336 337 */ ··· 378 377 u32 start_err; 379 378 u32 opendrain; 380 379 u8 dma_lli:1; 380 + bool supports_sdio_irq; 381 381 u32 stm32_idmabsize_mask; 382 382 u32 stm32_idmabsize_align; 383 383 bool dma_flow_controller;
+115 -51
drivers/mmc/host/mtk-sd.c
··· 252 252 253 253 #define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */ 254 254 #define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */ 255 + #define MSDC_PAD_TUNE_DATRRDLY2 GENMASK(12, 8) /* RW */ 255 256 #define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */ 257 + #define MSDC_PAD_TUNE_CMDRDLY2 GENMASK(20, 16) /* RW */ 256 258 #define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */ 257 259 #define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */ 258 260 #define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */ 259 261 #define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */ 260 262 #define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */ 263 + #define MSDC_PAD_TUNE_RD2_SEL BIT(13) /* RW */ 264 + #define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */ 261 265 262 266 #define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */ 263 267 #define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */ ··· 329 325 330 326 #define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */ 331 327 332 - #define PAD_DELAY_MAX 32 /* PAD delay cells */ 328 + #define TUNING_REG2_FIXED_OFFEST 4 329 + #define PAD_DELAY_HALF 32 /* PAD delay cells */ 330 + #define PAD_DELAY_FULL 64 333 331 /*--------------------------------------------------------------------------*/ 334 332 /* Descriptor Structure */ 335 333 /*--------------------------------------------------------------------------*/ ··· 467 461 u32 hs400_ds_dly3; 468 462 u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ 469 463 u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ 464 + u32 tuning_step; 470 465 bool hs400_cmd_resp_sel_rising; 471 466 /* cmd response sample selection for HS400 */ 472 467 bool hs400_mode; /* current eMMC will run at hs400 mode */ ··· 1156 1149 1157 1150 static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd) 1158 1151 { 1159 - if (host->error) 1160 - dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", 1161 - __func__, cmd->opcode, cmd->arg, host->error); 1152 + if (host->error && 1153 + ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) || 1154 + cmd->error == -ETIMEDOUT)) 1155 + dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", 1156 + __func__, cmd->opcode, cmd->arg, host->error); 1162 1157 } 1163 1158 1164 1159 static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) ··· 1624 1615 } 1625 1616 1626 1617 if (cmd_err || dat_err) { 1627 - dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x", 1618 + dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x", 1628 1619 cmd_err, dat_err, intsts); 1629 1620 } 1630 1621 ··· 1789 1780 DATA_K_VALUE_SEL); 1790 1781 sdr_set_bits(host->top_base + EMMC_TOP_CMD, 1791 1782 PAD_CMD_RD_RXDLY_SEL); 1783 + if (host->tuning_step > PAD_DELAY_HALF) { 1784 + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1785 + PAD_DAT_RD_RXDLY2_SEL); 1786 + sdr_set_bits(host->top_base + EMMC_TOP_CMD, 1787 + PAD_CMD_RD_RXDLY2_SEL); 1788 + } 1792 1789 } else { 1793 1790 sdr_set_bits(host->base + tune_reg, 1794 1791 MSDC_PAD_TUNE_RD_SEL | 1795 1792 MSDC_PAD_TUNE_CMD_SEL); 1793 + if (host->tuning_step > PAD_DELAY_HALF) 1794 + sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 1795 + MSDC_PAD_TUNE_RD2_SEL | 1796 + MSDC_PAD_TUNE_CMD2_SEL); 1796 1797 } 1797 1798 } else { 1798 1799 /* choose clock tune */ ··· 1944 1925 msdc_set_mclk(host, ios->timing, ios->clock); 1945 1926 } 1946 1927 1947 - static u32 test_delay_bit(u32 delay, u32 bit) 1928 + static u64 test_delay_bit(u64 delay, u32 bit) 1948 1929 { 1949 - bit %= PAD_DELAY_MAX; 1950 - return delay & BIT(bit); 1930 + bit %= PAD_DELAY_FULL; 1931 + return delay & BIT_ULL(bit); 1951 1932 } 1952 1933 1953 - static int get_delay_len(u32 delay, u32 start_bit) 1934 + static int get_delay_len(u64 delay, u32 start_bit) 1954 1935 { 1955 1936 int i; 1956 1937 1957 - for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) { 1938 + for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) { 1958 1939 if (test_delay_bit(delay, start_bit + i) == 0) 1959 1940 return i; 1960 1941 } 1961 - return PAD_DELAY_MAX - start_bit; 1942 + return PAD_DELAY_FULL - start_bit; 1962 1943 } 1963 1944 1964 - static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) 1945 + static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay) 1965 1946 { 1966 1947 int start = 0, len = 0; 1967 1948 int start_final = 0, len_final = 0; ··· 1969 1950 struct msdc_delay_phase delay_phase = { 0, }; 1970 1951 1971 1952 if (delay == 0) { 1972 - dev_err(host->dev, "phase error: [map:%x]\n", delay); 1953 + dev_err(host->dev, "phase error: [map:%016llx]\n", delay); 1973 1954 delay_phase.final_phase = final_phase; 1974 1955 return delay_phase; 1975 1956 } 1976 1957 1977 - while (start < PAD_DELAY_MAX) { 1958 + while (start < PAD_DELAY_FULL) { 1978 1959 len = get_delay_len(delay, start); 1979 1960 if (len_final < len) { 1980 1961 start_final = start; 1981 1962 len_final = len; 1982 1963 } 1983 1964 start += len ? len : 1; 1984 - if (len >= 12 && start_final < 4) 1965 + if (!upper_32_bits(delay) && len >= 12 && start_final < 4) 1985 1966 break; 1986 1967 } 1987 1968 1988 1969 /* The rule is that to find the smallest delay cell */ 1989 1970 if (start_final == 0) 1990 - final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX; 1971 + final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL; 1991 1972 else 1992 - final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX; 1993 - dev_dbg(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n", 1973 + final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL; 1974 + dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n", 1994 1975 delay, len_final, final_phase); 1995 1976 1996 1977 delay_phase.maxlen = len_final; ··· 2003 1984 { 2004 1985 u32 tune_reg = host->dev_comp->pad_tune_reg; 2005 1986 2006 - if (host->top_base) 2007 - sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, 2008 - value); 2009 - else 2010 - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, 2011 - value); 1987 + if (host->top_base) { 1988 + if (value < PAD_DELAY_HALF) { 1989 + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value); 1990 + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0); 1991 + } else { 1992 + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, 1993 + PAD_DELAY_HALF - 1); 1994 + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 1995 + value - PAD_DELAY_HALF); 1996 + } 1997 + } else { 1998 + if (value < PAD_DELAY_HALF) { 1999 + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value); 2000 + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2001 + MSDC_PAD_TUNE_CMDRDLY2, 0); 2002 + } else { 2003 + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, 2004 + PAD_DELAY_HALF - 1); 2005 + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2006 + MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF); 2007 + } 2008 + } 2012 2009 } 2013 2010 2014 2011 static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) 2015 2012 { 2016 2013 u32 tune_reg = host->dev_comp->pad_tune_reg; 2017 2014 2018 - if (host->top_base) 2019 - sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2020 - PAD_DAT_RD_RXDLY, value); 2021 - else 2022 - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, 2023 - value); 2015 + if (host->top_base) { 2016 + if (value < PAD_DELAY_HALF) { 2017 + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2018 + PAD_DAT_RD_RXDLY, value); 2019 + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2020 + PAD_DAT_RD_RXDLY2, 0); 2021 + } else { 2022 + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2023 + PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1); 2024 + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2025 + PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF); 2026 + } 2027 + } else { 2028 + if (value < PAD_DELAY_HALF) { 2029 + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value); 2030 + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2031 + MSDC_PAD_TUNE_DATRRDLY2, 0); 2032 + } else { 2033 + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, 2034 + PAD_DELAY_HALF - 1); 2035 + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2036 + MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF); 2037 + } 2038 + } 2024 2039 } 2025 2040 2026 2041 static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) 2027 2042 { 2028 2043 struct msdc_host *host = mmc_priv(mmc); 2029 - u32 rise_delay = 0, fall_delay = 0; 2044 + u64 rise_delay = 0, fall_delay = 0; 2030 2045 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2031 2046 struct msdc_delay_phase internal_delay_phase; 2032 2047 u8 final_delay, final_maxlen; ··· 2076 2023 host->hs200_cmd_int_delay); 2077 2024 2078 2025 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2079 - for (i = 0 ; i < PAD_DELAY_MAX; i++) { 2026 + for (i = 0; i < host->tuning_step; i++) { 2080 2027 msdc_set_cmd_delay(host, i); 2081 2028 /* 2082 2029 * Using the same parameters, it may sometimes pass the test, ··· 2086 2033 for (j = 0; j < 3; j++) { 2087 2034 mmc_send_tuning(mmc, opcode, &cmd_err); 2088 2035 if (!cmd_err) { 2089 - rise_delay |= BIT(i); 2036 + rise_delay |= BIT_ULL(i); 2090 2037 } else { 2091 - rise_delay &= ~BIT(i); 2038 + rise_delay &= ~BIT_ULL(i); 2092 2039 break; 2093 2040 } 2094 2041 } ··· 2100 2047 goto skip_fall; 2101 2048 2102 2049 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2103 - for (i = 0; i < PAD_DELAY_MAX; i++) { 2050 + for (i = 0; i < host->tuning_step; i++) { 2104 2051 msdc_set_cmd_delay(host, i); 2105 2052 /* 2106 2053 * Using the same parameters, it may sometimes pass the test, ··· 2110 2057 for (j = 0; j < 3; j++) { 2111 2058 mmc_send_tuning(mmc, opcode, &cmd_err); 2112 2059 if (!cmd_err) { 2113 - fall_delay |= BIT(i); 2060 + fall_delay |= BIT_ULL(i); 2114 2061 } else { 2115 - fall_delay &= ~BIT(i); 2062 + fall_delay &= ~BIT_ULL(i); 2116 2063 break; 2117 2064 } 2118 2065 } ··· 2135 2082 if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) 2136 2083 goto skip_internal; 2137 2084 2138 - for (i = 0; i < PAD_DELAY_MAX; i++) { 2085 + for (i = 0; i < host->tuning_step; i++) { 2139 2086 sdr_set_field(host->base + tune_reg, 2140 2087 MSDC_PAD_TUNE_CMDRRDLY, i); 2141 2088 mmc_send_tuning(mmc, opcode, &cmd_err); 2142 2089 if (!cmd_err) 2143 - internal_delay |= BIT(i); 2090 + internal_delay |= BIT_ULL(i); 2144 2091 } 2145 2092 dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); 2146 2093 internal_delay_phase = get_best_delay(host, internal_delay); ··· 2174 2121 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2175 2122 else 2176 2123 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2177 - for (i = 0 ; i < PAD_DELAY_MAX; i++) { 2124 + 2125 + for (i = 0; i < PAD_DELAY_HALF; i++) { 2178 2126 sdr_set_field(host->base + PAD_CMD_TUNE, 2179 2127 PAD_CMD_TUNE_RX_DLY3, i); 2180 2128 /* ··· 2205 2151 static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) 2206 2152 { 2207 2153 struct msdc_host *host = mmc_priv(mmc); 2208 - u32 rise_delay = 0, fall_delay = 0; 2154 + u64 rise_delay = 0, fall_delay = 0; 2209 2155 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2210 2156 u8 final_delay, final_maxlen; 2211 2157 int i, ret; ··· 2214 2160 host->latch_ck); 2215 2161 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); 2216 2162 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); 2217 - for (i = 0 ; i < PAD_DELAY_MAX; i++) { 2163 + for (i = 0; i < host->tuning_step; i++) { 2218 2164 msdc_set_data_delay(host, i); 2219 2165 ret = mmc_send_tuning(mmc, opcode, NULL); 2220 2166 if (!ret) 2221 - rise_delay |= BIT(i); 2167 + rise_delay |= BIT_ULL(i); 2222 2168 } 2223 2169 final_rise_delay = get_best_delay(host, rise_delay); 2224 2170 /* if rising edge has enough margin, then do not scan falling edge */ ··· 2228 2174 2229 2175 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); 2230 2176 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); 2231 - for (i = 0; i < PAD_DELAY_MAX; i++) { 2177 + for (i = 0; i < host->tuning_step; i++) { 2232 2178 msdc_set_data_delay(host, i); 2233 2179 ret = mmc_send_tuning(mmc, opcode, NULL); 2234 2180 if (!ret) 2235 - fall_delay |= BIT(i); 2181 + fall_delay |= BIT_ULL(i); 2236 2182 } 2237 2183 final_fall_delay = get_best_delay(host, fall_delay); 2238 2184 ··· 2260 2206 static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) 2261 2207 { 2262 2208 struct msdc_host *host = mmc_priv(mmc); 2263 - u32 rise_delay = 0, fall_delay = 0; 2209 + u64 rise_delay = 0, fall_delay = 0; 2264 2210 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2265 2211 u8 final_delay, final_maxlen; 2266 2212 int i, ret; ··· 2271 2217 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2272 2218 sdr_clr_bits(host->base + MSDC_IOCON, 2273 2219 MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); 2274 - for (i = 0 ; i < PAD_DELAY_MAX; i++) { 2220 + for (i = 0; i < host->tuning_step; i++) { 2275 2221 msdc_set_cmd_delay(host, i); 2276 2222 msdc_set_data_delay(host, i); 2277 2223 ret = mmc_send_tuning(mmc, opcode, NULL); 2278 2224 if (!ret) 2279 - rise_delay |= BIT(i); 2225 + rise_delay |= BIT_ULL(i); 2280 2226 } 2281 2227 final_rise_delay = get_best_delay(host, rise_delay); 2282 2228 /* if rising edge has enough margin, then do not scan falling edge */ ··· 2287 2233 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2288 2234 sdr_set_bits(host->base + MSDC_IOCON, 2289 2235 MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); 2290 - for (i = 0; i < PAD_DELAY_MAX; i++) { 2236 + for (i = 0; i < host->tuning_step; i++) { 2291 2237 msdc_set_cmd_delay(host, i); 2292 2238 msdc_set_data_delay(host, i); 2293 2239 ret = mmc_send_tuning(mmc, opcode, NULL); 2294 2240 if (!ret) 2295 - fall_delay |= BIT(i); 2241 + fall_delay |= BIT_ULL(i); 2296 2242 } 2297 2243 final_fall_delay = get_best_delay(host, fall_delay); 2298 2244 ··· 2400 2346 } 2401 2347 2402 2348 host->hs400_tuning = true; 2403 - for (i = 0; i < PAD_DELAY_MAX; i++) { 2349 + for (i = 0; i < PAD_DELAY_HALF; i++) { 2404 2350 if (host->top_base) 2405 2351 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2406 2352 PAD_DS_DLY1, i); ··· 2634 2580 static void msdc_of_property_parse(struct platform_device *pdev, 2635 2581 struct msdc_host *host) 2636 2582 { 2583 + struct mmc_host *mmc = mmc_from_priv(host); 2584 + 2637 2585 of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", 2638 2586 &host->latch_ck); 2639 2587 ··· 2656 2600 host->hs400_cmd_resp_sel_rising = true; 2657 2601 else 2658 2602 host->hs400_cmd_resp_sel_rising = false; 2603 + 2604 + if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step", 2605 + &host->tuning_step)) { 2606 + if (mmc->caps2 & MMC_CAP2_NO_MMC) 2607 + host->tuning_step = PAD_DELAY_FULL; 2608 + else 2609 + host->tuning_step = PAD_DELAY_HALF; 2610 + } 2659 2611 2660 2612 if (of_property_read_bool(pdev->dev.of_node, 2661 2613 "supports-cqe"))
+64 -5
drivers/mmc/host/sdhci-brcmstb.c
··· 6 6 */ 7 7 8 8 #include <linux/io.h> 9 + #include <linux/iopoll.h> 9 10 #include <linux/mmc/host.h> 10 11 #include <linux/module.h> 11 12 #include <linux/of.h> ··· 45 44 46 45 static inline void enable_clock_gating(struct sdhci_host *host) 47 46 { 47 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 48 + struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); 48 49 u32 reg; 50 + 51 + if (!(priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)) 52 + return; 49 53 50 54 reg = sdhci_readl(host, SDHCI_VENDOR); 51 55 reg |= SDHCI_VENDOR_GATE_SDCLK_EN; ··· 59 53 60 54 static void brcmstb_reset(struct sdhci_host *host, u8 mask) 61 55 { 62 - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 63 - struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); 64 - 65 56 sdhci_and_cqhci_reset(host, mask); 66 57 67 58 /* Reset will clear this, so re-enable it */ 68 - if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK) 69 - enable_clock_gating(host); 59 + enable_clock_gating(host); 60 + } 61 + 62 + static void brcmstb_sdhci_reset_cmd_data(struct sdhci_host *host, u8 mask) 63 + { 64 + u32 new_mask = (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) << 24; 65 + int ret; 66 + u32 reg; 67 + 68 + /* 69 + * SDHCI_CLOCK_CONTROL register CARD_EN and CLOCK_INT_EN bits shall 70 + * be set along with SOFTWARE_RESET register RESET_CMD or RESET_DATA 71 + * bits, hence access SDHCI_CLOCK_CONTROL register as 32-bit register 72 + */ 73 + new_mask |= SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN; 74 + reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); 75 + sdhci_writel(host, reg | new_mask, SDHCI_CLOCK_CONTROL); 76 + 77 + reg = sdhci_readb(host, SDHCI_SOFTWARE_RESET); 78 + 79 + ret = read_poll_timeout_atomic(sdhci_readb, reg, !(reg & mask), 80 + 10, 10000, false, 81 + host, SDHCI_SOFTWARE_RESET); 82 + 83 + if (ret) { 84 + pr_err("%s: Reset 0x%x never completed.\n", 85 + mmc_hostname(host->mmc), (int)mask); 86 + sdhci_err_stats_inc(host, CTRL_TIMEOUT); 87 + sdhci_dumpregs(host); 88 + } 89 + } 90 + 91 + static void brcmstb_reset_74165b0(struct sdhci_host *host, u8 mask) 92 + { 93 + /* take care of RESET_ALL as usual */ 94 + if (mask & SDHCI_RESET_ALL) 95 + sdhci_and_cqhci_reset(host, SDHCI_RESET_ALL); 96 + 97 + /* cmd and/or data treated differently on this core */ 98 + if (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) 99 + brcmstb_sdhci_reset_cmd_data(host, mask); 100 + 101 + /* Reset will clear this, so re-enable it */ 102 + enable_clock_gating(host); 70 103 } 71 104 72 105 static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) ··· 207 162 .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, 208 163 }; 209 164 165 + static struct sdhci_ops sdhci_brcmstb_ops_74165b0 = { 166 + .set_clock = sdhci_brcmstb_set_clock, 167 + .set_bus_width = sdhci_set_bus_width, 168 + .reset = brcmstb_reset_74165b0, 169 + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, 170 + }; 171 + 210 172 static struct brcmstb_match_priv match_priv_7425 = { 211 173 .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT | 212 174 BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, ··· 231 179 .ops = &sdhci_brcmstb_ops_7216, 232 180 }; 233 181 182 + static struct brcmstb_match_priv match_priv_74165b0 = { 183 + .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE, 184 + .hs400es = sdhci_brcmstb_hs400es, 185 + .ops = &sdhci_brcmstb_ops_74165b0, 186 + }; 187 + 234 188 static const struct of_device_id __maybe_unused sdhci_brcm_of_match[] = { 235 189 { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, 236 190 { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, 237 191 { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, 192 + { .compatible = "brcm,bcm74165b0-sdhci", .data = &match_priv_74165b0 }, 238 193 {}, 239 194 }; 240 195
+349
drivers/mmc/host/sdhci-of-dwcmshc.c
··· 8 8 */ 9 9 10 10 #include <linux/acpi.h> 11 + #include <linux/bitfield.h> 11 12 #include <linux/clk.h> 12 13 #include <linux/dma-mapping.h> 13 14 #include <linux/iopoll.h> ··· 36 35 #define DWCMSHC_CARD_IS_EMMC BIT(0) 37 36 #define DWCMSHC_ENHANCED_STROBE BIT(8) 38 37 #define DWCMSHC_EMMC_ATCTRL 0x40 38 + /* Tuning and auto-tuning fields in AT_CTRL_R control register */ 39 + #define AT_CTRL_AT_EN BIT(0) /* autotuning is enabled */ 40 + #define AT_CTRL_CI_SEL BIT(1) /* interval to drive center phase select */ 41 + #define AT_CTRL_SWIN_TH_EN BIT(2) /* sampling window threshold enable */ 42 + #define AT_CTRL_RPT_TUNE_ERR BIT(3) /* enable reporting framing errors */ 43 + #define AT_CTRL_SW_TUNE_EN BIT(4) /* enable software managed tuning */ 44 + #define AT_CTRL_WIN_EDGE_SEL_MASK GENMASK(11, 8) /* bits [11:8] */ 45 + #define AT_CTRL_WIN_EDGE_SEL 0xf /* sampling window edge select */ 46 + #define AT_CTRL_TUNE_CLK_STOP_EN BIT(16) /* clocks stopped during phase code change */ 47 + #define AT_CTRL_PRE_CHANGE_DLY_MASK GENMASK(18, 17) /* bits [18:17] */ 48 + #define AT_CTRL_PRE_CHANGE_DLY 0x1 /* 2-cycle latency */ 49 + #define AT_CTRL_POST_CHANGE_DLY_MASK GENMASK(20, 19) /* bits [20:19] */ 50 + #define AT_CTRL_POST_CHANGE_DLY 0x3 /* 4-cycle latency */ 51 + #define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */ 52 + #define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */ 39 53 40 54 /* Rockchip specific Registers */ 41 55 #define DWCMSHC_EMMC_DLL_CTRL 0x800 ··· 88 72 (((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0)) 89 73 #define RK35xx_MAX_CLKS 3 90 74 75 + /* PHY register area pointer */ 76 + #define DWC_MSHC_PTR_PHY_R 0x300 77 + 78 + /* PHY general configuration */ 79 + #define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00) 80 + #define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */ 81 + #define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */ 82 + #define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */ 83 + #define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */ 84 + #define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */ 85 + 86 + /* PHY command/response pad settings */ 87 + #define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04) 88 + 89 + /* PHY data pad settings */ 90 + #define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06) 91 + 92 + /* PHY clock pad settings */ 93 + #define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08) 94 + 95 + /* PHY strobe pad settings */ 96 + #define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a) 97 + 98 + /* PHY reset pad settings */ 99 + #define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c) 100 + 101 + /* Bitfields are common for all pad settings */ 102 + #define PHY_PAD_RXSEL_1V8 0x1 /* Receiver type select for 1.8V */ 103 + #define PHY_PAD_RXSEL_3V3 0x2 /* Receiver type select for 3.3V */ 104 + 105 + #define PHY_PAD_WEAKPULL_MASK GENMASK(4, 3) /* bits [4:3] */ 106 + #define PHY_PAD_WEAKPULL_PULLUP 0x1 /* Weak pull up enabled */ 107 + #define PHY_PAD_WEAKPULL_PULLDOWN 0x2 /* Weak pull down enabled */ 108 + 109 + #define PHY_PAD_TXSLEW_CTRL_P_MASK GENMASK(8, 5) /* bits [8:5] */ 110 + #define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */ 111 + #define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */ 112 + #define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */ 113 + 114 + /* PHY CLK delay line settings */ 115 + #define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d) 116 + #define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */ 117 + 118 + /* PHY CLK delay line delay code */ 119 + #define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e) 120 + #define PHY_SDCLKDL_DC_INITIAL 0x40 /* initial delay code */ 121 + #define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */ 122 + #define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */ 123 + 124 + /* PHY drift_cclk_rx delay line configuration setting */ 125 + #define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21) 126 + #define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */ 127 + #define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */ 128 + 129 + /* PHY DLL control settings */ 130 + #define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24) 131 + #define PHY_DLL_CTRL_DISABLE 0x0 /* PHY DLL is enabled */ 132 + #define PHY_DLL_CTRL_ENABLE 0x1 /* PHY DLL is disabled */ 133 + 134 + /* PHY DLL configuration register 1 */ 135 + #define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25) 136 + #define PHY_DLL_CNFG1_SLVDLY_MASK GENMASK(5, 4) /* bits [5:4] */ 137 + #define PHY_DLL_CNFG1_SLVDLY 0x2 /* DLL slave update delay input */ 138 + #define PHY_DLL_CNFG1_WAITCYCLE 0x5 /* DLL wait cycle input */ 139 + 140 + /* PHY DLL configuration register 2 */ 141 + #define PHY_DLL_CNFG2_R (DWC_MSHC_PTR_PHY_R + 0x26) 142 + #define PHY_DLL_CNFG2_JUMPSTEP 0xa /* DLL jump step input */ 143 + 144 + /* PHY DLL master and slave delay line configuration settings */ 145 + #define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28) 146 + #define PHY_DLLDL_CNFG_SLV_INPSEL_MASK GENMASK(6, 5) /* bits [6:5] */ 147 + #define PHY_DLLDL_CNFG_SLV_INPSEL 0x3 /* clock source select for slave DL */ 148 + 149 + #define FLAG_IO_FIXED_1V8 BIT(0) 150 + 91 151 #define BOUNDARY_OK(addr, len) \ 92 152 ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) 93 153 ··· 184 92 struct clk *bus_clk; 185 93 int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */ 186 94 void *priv; /* pointer to SoC private stuff */ 95 + u16 delay_line; 96 + u16 flags; 187 97 }; 188 98 189 99 /* ··· 251 157 sdhci_request(mmc, mrq); 252 158 } 253 159 160 + static void dwcmshc_phy_1_8v_init(struct sdhci_host *host) 161 + { 162 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 163 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 164 + u32 val; 165 + 166 + /* deassert phy reset & set tx drive strength */ 167 + val = PHY_CNFG_RSTN_DEASSERT; 168 + val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP); 169 + val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN); 170 + sdhci_writel(host, val, PHY_CNFG_R); 171 + 172 + /* disable delay line */ 173 + sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R); 174 + 175 + /* set delay line */ 176 + sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R); 177 + sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R); 178 + 179 + /* enable delay lane */ 180 + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); 181 + val &= ~(PHY_SDCLKDL_CNFG_UPDATE); 182 + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); 183 + 184 + /* configure phy pads */ 185 + val = PHY_PAD_RXSEL_1V8; 186 + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP); 187 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); 188 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); 189 + sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); 190 + sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); 191 + sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); 192 + 193 + val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); 194 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); 195 + sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); 196 + 197 + val = PHY_PAD_RXSEL_1V8; 198 + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN); 199 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); 200 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); 201 + sdhci_writew(host, val, PHY_STBPAD_CNFG_R); 202 + 203 + /* enable data strobe mode */ 204 + sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL), 205 + PHY_DLLDL_CNFG_R); 206 + 207 + /* enable phy dll */ 208 + sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R); 209 + } 210 + 211 + static void dwcmshc_phy_3_3v_init(struct sdhci_host *host) 212 + { 213 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 214 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 215 + u32 val; 216 + 217 + /* deassert phy reset & set tx drive strength */ 218 + val = PHY_CNFG_RSTN_DEASSERT; 219 + val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP); 220 + val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN); 221 + sdhci_writel(host, val, PHY_CNFG_R); 222 + 223 + /* disable delay line */ 224 + sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R); 225 + 226 + /* set delay line */ 227 + sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R); 228 + sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R); 229 + 230 + /* enable delay lane */ 231 + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); 232 + val &= ~(PHY_SDCLKDL_CNFG_UPDATE); 233 + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); 234 + 235 + /* configure phy pads */ 236 + val = PHY_PAD_RXSEL_3V3; 237 + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP); 238 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); 239 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); 240 + sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); 241 + sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); 242 + sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); 243 + 244 + val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); 245 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); 246 + sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); 247 + 248 + val = PHY_PAD_RXSEL_3V3; 249 + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN); 250 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); 251 + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); 252 + sdhci_writew(host, val, PHY_STBPAD_CNFG_R); 253 + 254 + /* enable phy dll */ 255 + sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R); 256 + } 257 + 258 + static void th1520_sdhci_set_phy(struct sdhci_host *host) 259 + { 260 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 261 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 262 + u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO; 263 + u16 emmc_ctrl; 264 + 265 + /* Before power on, set PHY configs */ 266 + if (priv->flags & FLAG_IO_FIXED_1V8) 267 + dwcmshc_phy_1_8v_init(host); 268 + else 269 + dwcmshc_phy_3_3v_init(host); 270 + 271 + if ((host->mmc->caps2 & emmc_caps) == emmc_caps) { 272 + emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL); 273 + emmc_ctrl |= DWCMSHC_CARD_IS_EMMC; 274 + sdhci_writew(host, emmc_ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL); 275 + } 276 + 277 + sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) | 278 + PHY_DLL_CNFG1_WAITCYCLE, PHY_DLL_CNFG1_R); 279 + } 280 + 254 281 static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, 255 282 unsigned int timing) 256 283 { ··· 404 189 ctrl_2 |= DWCMSHC_CTRL_HS400; 405 190 } 406 191 192 + if (priv->flags & FLAG_IO_FIXED_1V8) 193 + ctrl_2 |= SDHCI_CTRL_VDD_180; 407 194 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 195 + } 196 + 197 + static void th1520_set_uhs_signaling(struct sdhci_host *host, 198 + unsigned int timing) 199 + { 200 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 201 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 202 + 203 + dwcmshc_set_uhs_signaling(host, timing); 204 + if (timing == MMC_TIMING_MMC_HS400) 205 + priv->delay_line = PHY_SDCLKDL_DC_HS400; 206 + else 207 + sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R); 208 + th1520_sdhci_set_phy(host); 408 209 } 409 210 410 211 static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc, ··· 569 338 sdhci_reset(host, mask); 570 339 } 571 340 341 + static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode) 342 + { 343 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 344 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 345 + u32 val = 0; 346 + 347 + if (host->flags & SDHCI_HS400_TUNING) 348 + return 0; 349 + 350 + sdhci_writeb(host, FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL), 351 + PHY_ATDL_CNFG_R); 352 + val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); 353 + 354 + /* 355 + * configure tuning settings: 356 + * - center phase select code driven in block gap interval 357 + * - disable reporting of framing errors 358 + * - disable software managed tuning 359 + * - disable user selection of sampling window edges, 360 + * instead tuning calculated edges are used 361 + */ 362 + val &= ~(AT_CTRL_CI_SEL | AT_CTRL_RPT_TUNE_ERR | AT_CTRL_SW_TUNE_EN | 363 + FIELD_PREP(AT_CTRL_WIN_EDGE_SEL_MASK, AT_CTRL_WIN_EDGE_SEL)); 364 + 365 + /* 366 + * configure tuning settings: 367 + * - enable auto-tuning 368 + * - enable sampling window threshold 369 + * - stop clocks during phase code change 370 + * - set max latency in cycles between tx and rx clocks 371 + * - set max latency in cycles to switch output phase 372 + * - set max sampling window threshold value 373 + */ 374 + val |= AT_CTRL_AT_EN | AT_CTRL_SWIN_TH_EN | AT_CTRL_TUNE_CLK_STOP_EN; 375 + val |= FIELD_PREP(AT_CTRL_PRE_CHANGE_DLY_MASK, AT_CTRL_PRE_CHANGE_DLY); 376 + val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY); 377 + val |= FIELD_PREP(AT_CTRL_SWIN_TH_VAL_MASK, AT_CTRL_SWIN_TH_VAL); 378 + 379 + sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); 380 + val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); 381 + 382 + /* perform tuning */ 383 + sdhci_start_tuning(host); 384 + host->tuning_err = __sdhci_execute_tuning(host, opcode); 385 + if (host->tuning_err) { 386 + /* disable auto-tuning upon tuning error */ 387 + val &= ~AT_CTRL_AT_EN; 388 + sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); 389 + dev_err(mmc_dev(host->mmc), "tuning failed: %d\n", host->tuning_err); 390 + return -EIO; 391 + } 392 + sdhci_end_tuning(host); 393 + 394 + return 0; 395 + } 396 + 397 + static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask) 398 + { 399 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 400 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 401 + u16 ctrl_2; 402 + 403 + sdhci_reset(host, mask); 404 + 405 + if (priv->flags & FLAG_IO_FIXED_1V8) { 406 + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 407 + if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) { 408 + ctrl_2 |= SDHCI_CTRL_VDD_180; 409 + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 410 + } 411 + } 412 + } 413 + 572 414 static const struct sdhci_ops sdhci_dwcmshc_ops = { 573 415 .set_clock = sdhci_set_clock, 574 416 .set_bus_width = sdhci_set_bus_width, ··· 658 354 .get_max_clock = rk35xx_get_max_clock, 659 355 .reset = rk35xx_sdhci_reset, 660 356 .adma_write_desc = dwcmshc_adma_write_desc, 357 + }; 358 + 359 + static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = { 360 + .set_clock = sdhci_set_clock, 361 + .set_bus_width = sdhci_set_bus_width, 362 + .set_uhs_signaling = th1520_set_uhs_signaling, 363 + .get_max_clock = dwcmshc_get_max_clock, 364 + .reset = th1520_sdhci_reset, 365 + .adma_write_desc = dwcmshc_adma_write_desc, 366 + .voltage_switch = dwcmshc_phy_1_8v_init, 367 + .platform_execute_tuning = &th1520_execute_tuning, 661 368 }; 662 369 663 370 static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { ··· 692 377 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 693 378 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 694 379 SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, 380 + }; 381 + 382 + static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = { 383 + .ops = &sdhci_dwcmshc_th1520_ops, 384 + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 385 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 695 386 }; 696 387 697 388 static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) ··· 767 446 { 768 447 .compatible = "snps,dwcmshc-sdhci", 769 448 .data = &sdhci_dwcmshc_pdata, 449 + }, 450 + { 451 + .compatible = "thead,th1520-dwcmshc", 452 + .data = &sdhci_dwcmshc_th1520_pdata, 770 453 }, 771 454 {}, 772 455 }; ··· 865 540 err = dwcmshc_rk35xx_init(host, priv); 866 541 if (err) 867 542 goto err_clk; 543 + } 544 + 545 + if (pltfm_data == &sdhci_dwcmshc_th1520_pdata) { 546 + priv->delay_line = PHY_SDCLKDL_DC_DEFAULT; 547 + 548 + if (device_property_read_bool(dev, "mmc-ddr-1_8v") || 549 + device_property_read_bool(dev, "mmc-hs200-1_8v") || 550 + device_property_read_bool(dev, "mmc-hs400-1_8v")) 551 + priv->flags |= FLAG_IO_FIXED_1V8; 552 + else 553 + priv->flags &= ~FLAG_IO_FIXED_1V8; 554 + 555 + /* 556 + * start_signal_voltage_switch() will try 3.3V first 557 + * then 1.8V. Use SDHCI_SIGNALING_180 rather than 558 + * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V 559 + * in sdhci_start_signal_voltage_switch(). 560 + */ 561 + if (priv->flags & FLAG_IO_FIXED_1V8) { 562 + host->flags &= ~SDHCI_SIGNALING_330; 563 + host->flags |= SDHCI_SIGNALING_180; 564 + } 565 + 566 + sdhci_enable_v4_mode(host); 868 567 } 869 568 870 569 #ifdef CONFIG_ACPI
+1 -1
drivers/mmc/host/sdhci-omap.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - /** 2 + /* 3 3 * SDHCI Controller driver for TI's OMAP SoCs 4 4 * 5 5 * Copyright (C) 2017 Texas Instruments
+31
drivers/mmc/host/sdhci-xenon.c
··· 18 18 #include <linux/of.h> 19 19 #include <linux/pm.h> 20 20 #include <linux/pm_runtime.h> 21 + #include <linux/mm.h> 22 + #include <linux/dma-mapping.h> 21 23 22 24 #include "sdhci-pltfm.h" 23 25 #include "sdhci-xenon.h" ··· 424 422 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); 425 423 u32 sdhc_id, nr_sdhc; 426 424 u32 tuning_count; 425 + struct sysinfo si; 427 426 428 427 /* Disable HS200 on Armada AP806 */ 429 428 if (priv->hw_version == XENON_AP806) ··· 452 449 } 453 450 } 454 451 priv->tuning_count = tuning_count; 452 + 453 + /* 454 + * AC5/X/IM HW has only 31-bits passed in the crossbar switch. 455 + * If we have more than 2GB of memory, this means we might pass 456 + * memory pointers which are above 2GB and which cannot be properly 457 + * represented. In this case, disable ADMA, 64-bit DMA and allow only SDMA. 458 + * This effectively will enable bounce buffer quirk in the 459 + * generic SDHCI driver, which will make sure DMA is only done 460 + * from supported memory regions: 461 + */ 462 + if (priv->hw_version == XENON_AC5) { 463 + si_meminfo(&si); 464 + if (si.totalram * si.mem_unit > SZ_2G) { 465 + host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 466 + host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; 467 + } 468 + } 455 469 456 470 return xenon_phy_parse_params(dev, host); 457 471 } ··· 582 562 goto remove_sdhc; 583 563 584 564 pm_runtime_put_autosuspend(&pdev->dev); 565 + /* 566 + * If we previously detected AC5 with over 2GB of memory, 567 + * then we disable ADMA and 64-bit DMA. 568 + * This means generic SDHCI driver has set the DMA mask to 569 + * 32-bit. Since DDR starts at 0x2_0000_0000, we must use 570 + * 34-bit DMA mask to access this DDR memory: 571 + */ 572 + if (priv->hw_version == XENON_AC5 && 573 + host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 574 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34)); 585 575 586 576 return 0; 587 577 ··· 710 680 { .compatible = "marvell,armada-ap807-sdhci", .data = (void *)XENON_AP807}, 711 681 { .compatible = "marvell,armada-cp110-sdhci", .data = (void *)XENON_CP110}, 712 682 { .compatible = "marvell,armada-3700-sdhci", .data = (void *)XENON_A3700}, 683 + { .compatible = "marvell,ac5-sdhci", .data = (void *)XENON_AC5}, 713 684 {} 714 685 }; 715 686 MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids);
+2 -1
drivers/mmc/host/sdhci-xenon.h
··· 57 57 XENON_A3700, 58 58 XENON_AP806, 59 59 XENON_AP807, 60 - XENON_CP110 60 + XENON_CP110, 61 + XENON_AC5 61 62 }; 62 63 63 64 struct xenon_priv {
+2 -1
drivers/mmc/host/sdhci.c
··· 2841 2841 } 2842 2842 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2843 2843 2844 - static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2844 + int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2845 2845 { 2846 2846 int i; 2847 2847 ··· 2879 2879 sdhci_reset_tuning(host); 2880 2880 return -EAGAIN; 2881 2881 } 2882 + EXPORT_SYMBOL_GPL(__sdhci_execute_tuning); 2882 2883 2883 2884 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2884 2885 {
+1
drivers/mmc/host/sdhci.h
··· 793 793 void sdhci_reset(struct sdhci_host *host, u8 mask); 794 794 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 795 795 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 796 + int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode); 796 797 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 797 798 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 798 799 struct mmc_ios *ios);
+6 -31
drivers/mmc/host/sdhci_am654.c
··· 141 141 142 142 struct sdhci_am654_data { 143 143 struct regmap *base; 144 - bool legacy_otapdly; 145 144 int otap_del_sel[ARRAY_SIZE(td)]; 146 145 int itap_del_sel[ARRAY_SIZE(td)]; 147 146 int clkbuf_sel; ··· 271 272 sdhci_set_clock(host, clock); 272 273 273 274 /* Setup DLL Output TAP delay */ 274 - if (sdhci_am654->legacy_otapdly) 275 - otap_del_sel = sdhci_am654->otap_del_sel[0]; 276 - else 277 - otap_del_sel = sdhci_am654->otap_del_sel[timing]; 278 - 275 + otap_del_sel = sdhci_am654->otap_del_sel[timing]; 279 276 otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0; 280 277 281 278 mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; ··· 309 314 u32 mask, val; 310 315 311 316 /* Setup DLL Output TAP delay */ 312 - if (sdhci_am654->legacy_otapdly) 313 - otap_del_sel = sdhci_am654->otap_del_sel[0]; 314 - else 315 - otap_del_sel = sdhci_am654->otap_del_sel[timing]; 317 + otap_del_sel = sdhci_am654->otap_del_sel[timing]; 316 318 317 319 mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; 318 320 val = (0x1 << OTAPDLYENA_SHIFT) | ··· 569 577 int i; 570 578 int ret; 571 579 572 - ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding, 573 - &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]); 574 - if (ret) { 575 - /* 576 - * ti,otap-del-sel-legacy is mandatory, look for old binding 577 - * if not found. 578 - */ 579 - ret = device_property_read_u32(dev, "ti,otap-del-sel", 580 - &sdhci_am654->otap_del_sel[0]); 581 - if (ret) { 582 - dev_err(dev, "Couldn't find otap-del-sel\n"); 583 - 584 - return ret; 585 - } 586 - 587 - dev_info(dev, "Using legacy binding ti,otap-del-sel\n"); 588 - sdhci_am654->legacy_otapdly = true; 589 - 590 - return 0; 591 - } 592 - 593 580 for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) { 594 581 595 582 ret = device_property_read_u32(dev, td[i].otap_binding, 596 583 &sdhci_am654->otap_del_sel[i]); 597 584 if (ret) { 585 + if (i == MMC_TIMING_LEGACY) { 586 + dev_err(dev, "Couldn't find mandatory ti,otap-del-sel-legacy\n"); 587 + return ret; 588 + } 598 589 dev_dbg(dev, "Couldn't find %s\n", 599 590 td[i].otap_binding); 600 591 /*
+2 -3
include/linux/mmc/card.h
··· 32 32 unsigned int r2w_factor; 33 33 unsigned int max_dtr; 34 34 unsigned int erase_size; /* In sectors */ 35 + unsigned int wp_grp_size; 35 36 unsigned int read_blkbits; 36 37 unsigned int write_blkbits; 37 38 unsigned int capacity; ··· 53 52 u8 part_config; 54 53 u8 cache_ctrl; 55 54 u8 rst_n_function; 56 - u8 max_packed_writes; 57 - u8 max_packed_reads; 58 - u8 packed_event_en; 59 55 unsigned int part_time; /* Units: ms */ 60 56 unsigned int sa_timeout; /* Units: 100ns */ 61 57 unsigned int generic_cmd6_time; /* Units: 10ms */ ··· 304 306 unsigned int eg_boundary; /* don't cross erase-group boundaries */ 305 307 unsigned int erase_arg; /* erase / trim / discard */ 306 308 u8 erased_byte; /* value of erased bytes */ 309 + unsigned int wp_grp_size; /* write group size in sectors */ 307 310 308 311 u32 raw_cid[4]; /* raw card CID */ 309 312 u32 raw_csd[4]; /* raw card CSD */
-1
include/linux/mmc/core.h
··· 27 27 u32 opcode; 28 28 u32 arg; 29 29 #define MMC_CMD23_ARG_REL_WR (1 << 31) 30 - #define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30)) 31 30 #define MMC_CMD23_ARG_TAG_REQ (1 << 29) 32 31 u32 resp[4]; 33 32 unsigned int flags; /* expected response type */
-10
include/linux/mmc/mmc.h
··· 257 257 #define EXT_CSD_FLUSH_CACHE 32 /* W */ 258 258 #define EXT_CSD_CACHE_CTRL 33 /* R/W */ 259 259 #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ 260 - #define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */ 261 - #define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */ 262 260 #define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */ 263 261 #define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ 264 262 #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ ··· 319 321 #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ 320 322 #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ 321 323 #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ 322 - #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ 323 - #define EXT_CSD_MAX_PACKED_READS 501 /* RO */ 324 324 #define EXT_CSD_BKOPS_SUPPORT 502 /* RO */ 325 325 #define EXT_CSD_HPI_FEATURES 503 /* RO */ 326 326 ··· 398 402 #define EXT_CSD_PWR_CL_8BIT_SHIFT 4 399 403 #define EXT_CSD_PWR_CL_4BIT_SHIFT 0 400 404 401 - #define EXT_CSD_PACKED_EVENT_EN BIT(3) 402 - 403 405 /* 404 406 * EXCEPTION_EVENT_STATUS field 405 407 */ 406 408 #define EXT_CSD_URGENT_BKOPS BIT(0) 407 409 #define EXT_CSD_DYNCAP_NEEDED BIT(1) 408 410 #define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2) 409 - #define EXT_CSD_PACKED_FAILURE BIT(3) 410 - 411 - #define EXT_CSD_PACKED_GENERIC_ERROR BIT(0) 412 - #define EXT_CSD_PACKED_INDEXED_ERROR BIT(1) 413 411 414 412 /* 415 413 * BKOPS status level