Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc: sdhci-of-dwcmshc: Implement SDHCI CQE support

For enabling CQE support just set 'supports-cqe' in your DevTree file
for appropriate mmc node.

Signed-off-by: Sergey Khimich <serghox@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20240319115932.4108904-3-serghox@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>

authored by

Sergey Khimich and committed by
Ulf Hansson
53ab7f7f 52bf134f

+190 -2
+1
drivers/mmc/host/Kconfig
··· 233 233 depends on MMC_SDHCI_PLTFM 234 234 depends on OF 235 235 depends on COMMON_CLK 236 + select MMC_CQHCI 236 237 help 237 238 This selects Synopsys DesignWare Cores Mobile Storage Controller 238 239 support.
+189 -2
drivers/mmc/host/sdhci-of-dwcmshc.c
··· 21 21 #include <linux/sizes.h> 22 22 23 23 #include "sdhci-pltfm.h" 24 + #include "cqhci.h" 24 25 25 26 #define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16) 26 27 ··· 52 51 #define AT_CTRL_POST_CHANGE_DLY 0x3 /* 4-cycle latency */ 53 52 #define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */ 54 53 #define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */ 54 + 55 + /* DWC IP vendor area 2 pointer */ 56 + #define DWCMSHC_P_VENDOR_AREA2 0xea 55 57 56 58 /* Sophgo CV18XX specific Registers */ 57 59 #define CV18XX_SDHCI_MSHC_CTRL 0x00 ··· 185 181 #define BOUNDARY_OK(addr, len) \ 186 182 ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) 187 183 184 + #define DWCMSHC_SDHCI_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ 185 + SDHCI_TRNS_BLK_CNT_EN | \ 186 + SDHCI_TRNS_DMA) 187 + 188 188 enum dwcmshc_rk_type { 189 189 DWCMSHC_RK3568, 190 190 DWCMSHC_RK3588, ··· 204 196 205 197 struct dwcmshc_priv { 206 198 struct clk *bus_clk; 207 - int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */ 199 + int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */ 200 + int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */ 201 + 208 202 void *priv; /* pointer to SoC private stuff */ 209 203 u16 delay_line; 210 204 u16 flags; ··· 465 455 sdhci_writel(host, vendor, reg); 466 456 } 467 457 458 + static int dwcmshc_execute_tuning(struct mmc_host *mmc, u32 opcode) 459 + { 460 + int err = sdhci_execute_tuning(mmc, opcode); 461 + struct sdhci_host *host = mmc_priv(mmc); 462 + 463 + if (err) 464 + return err; 465 + 466 + /* 467 + * Tuning can leave the IP in an active state (Buffer Read Enable bit 468 + * set) which prevents the entry to low power states (i.e. S0i3). Data 469 + * reset will clear it. 470 + */ 471 + sdhci_reset(host, SDHCI_RESET_DATA); 472 + 473 + return 0; 474 + } 475 + 476 + static u32 dwcmshc_cqe_irq_handler(struct sdhci_host *host, u32 intmask) 477 + { 478 + int cmd_error = 0; 479 + int data_error = 0; 480 + 481 + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 482 + return intmask; 483 + 484 + cqhci_irq(host->mmc, intmask, cmd_error, data_error); 485 + 486 + return 0; 487 + } 488 + 489 + static void dwcmshc_sdhci_cqe_enable(struct mmc_host *mmc) 490 + { 491 + struct sdhci_host *host = mmc_priv(mmc); 492 + u8 ctrl; 493 + 494 + sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); 495 + 496 + sdhci_cqe_enable(mmc); 497 + 498 + /* 499 + * The "DesignWare Cores Mobile Storage Host Controller 500 + * DWC_mshc / DWC_mshc_lite Databook" says: 501 + * when Host Version 4 Enable" is 1 in Host Control 2 register, 502 + * SDHCI_CTRL_ADMA32 bit means ADMA2 is selected. 503 + * Selection of 32-bit/64-bit System Addressing: 504 + * either 32-bit or 64-bit system addressing is selected by 505 + * 64-bit Addressing bit in Host Control 2 register. 506 + * 507 + * On the other hand the "DesignWare Cores Mobile Storage Host 508 + * Controller DWC_mshc / DWC_mshc_lite User Guide" says, that we have to 509 + * set DMA_SEL to ADMA2 _only_ mode in the Host Control 2 register. 510 + */ 511 + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 512 + ctrl &= ~SDHCI_CTRL_DMA_MASK; 513 + ctrl |= SDHCI_CTRL_ADMA32; 514 + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 515 + } 516 + 517 + static void dwcmshc_set_tran_desc(struct cqhci_host *cq_host, u8 **desc, 518 + dma_addr_t addr, int len, bool end, bool dma64) 519 + { 520 + int tmplen, offset; 521 + 522 + if (likely(!len || BOUNDARY_OK(addr, len))) { 523 + cqhci_set_tran_desc(*desc, addr, len, end, dma64); 524 + return; 525 + } 526 + 527 + offset = addr & (SZ_128M - 1); 528 + tmplen = SZ_128M - offset; 529 + cqhci_set_tran_desc(*desc, addr, tmplen, false, dma64); 530 + 531 + addr += tmplen; 532 + len -= tmplen; 533 + *desc += cq_host->trans_desc_len; 534 + cqhci_set_tran_desc(*desc, addr, len, end, dma64); 535 + } 536 + 537 + static void dwcmshc_cqhci_dumpregs(struct mmc_host *mmc) 538 + { 539 + sdhci_dumpregs(mmc_priv(mmc)); 540 + } 541 + 468 542 static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock) 469 543 { 470 544 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ··· 786 692 .get_max_clock = dwcmshc_get_max_clock, 787 693 .reset = sdhci_reset, 788 694 .adma_write_desc = dwcmshc_adma_write_desc, 695 + .irq = dwcmshc_cqe_irq_handler, 789 696 }; 790 697 791 698 static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = { ··· 852 757 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 853 758 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 854 759 }; 760 + 761 + static const struct cqhci_host_ops dwcmshc_cqhci_ops = { 762 + .enable = dwcmshc_sdhci_cqe_enable, 763 + .disable = sdhci_cqe_disable, 764 + .dumpregs = dwcmshc_cqhci_dumpregs, 765 + .set_tran_desc = dwcmshc_set_tran_desc, 766 + }; 767 + 768 + static void dwcmshc_cqhci_init(struct sdhci_host *host, struct platform_device *pdev) 769 + { 770 + struct cqhci_host *cq_host; 771 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 772 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 773 + bool dma64 = false; 774 + u16 clk; 775 + int err; 776 + 777 + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 778 + cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); 779 + if (!cq_host) { 780 + dev_err(mmc_dev(host->mmc), "Unable to setup CQE: not enough memory\n"); 781 + goto dsbl_cqe_caps; 782 + } 783 + 784 + /* 785 + * For dwcmshc host controller we have to enable internal clock 786 + * before access to some registers from Vendor Specific Area 2. 787 + */ 788 + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 789 + clk |= SDHCI_CLOCK_INT_EN; 790 + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 791 + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 792 + if (!(clk & SDHCI_CLOCK_INT_EN)) { 793 + dev_err(mmc_dev(host->mmc), "Unable to setup CQE: internal clock enable error\n"); 794 + goto free_cq_host; 795 + } 796 + 797 + cq_host->mmio = host->ioaddr + priv->vendor_specific_area2; 798 + cq_host->ops = &dwcmshc_cqhci_ops; 799 + 800 + /* Enable using of 128-bit task descriptors */ 801 + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 802 + if (dma64) { 803 + dev_dbg(mmc_dev(host->mmc), "128-bit task descriptors\n"); 804 + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 805 + } 806 + err = cqhci_init(cq_host, host->mmc, dma64); 807 + if (err) { 808 + dev_err(mmc_dev(host->mmc), "Unable to setup CQE: error %d\n", err); 809 + goto int_clock_disable; 810 + } 811 + 812 + dev_dbg(mmc_dev(host->mmc), "CQE init done\n"); 813 + 814 + return; 815 + 816 + int_clock_disable: 817 + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 818 + clk &= ~SDHCI_CLOCK_INT_EN; 819 + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 820 + 821 + free_cq_host: 822 + devm_kfree(&pdev->dev, cq_host); 823 + 824 + dsbl_cqe_caps: 825 + host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD); 826 + } 855 827 856 828 static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) 857 829 { ··· 1024 862 struct rk35xx_priv *rk_priv = NULL; 1025 863 const struct sdhci_pltfm_data *pltfm_data; 1026 864 int err; 1027 - u32 extra; 865 + u32 extra, caps; 1028 866 1029 867 pltfm_data = device_get_match_data(&pdev->dev); 1030 868 if (!pltfm_data) { ··· 1075 913 1076 914 host->mmc_host_ops.request = dwcmshc_request; 1077 915 host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe; 916 + host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning; 1078 917 1079 918 if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) { 1080 919 rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL); ··· 1125 962 sdhci_enable_v4_mode(host); 1126 963 #endif 1127 964 965 + caps = sdhci_readl(host, SDHCI_CAPABILITIES); 966 + if (caps & SDHCI_CAN_64BIT_V4) 967 + sdhci_enable_v4_mode(host); 968 + 1128 969 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1129 970 1130 971 pm_runtime_get_noresume(dev); ··· 1138 971 err = sdhci_setup_host(host); 1139 972 if (err) 1140 973 goto err_rpm; 974 + 975 + /* Setup Command Queue Engine if enabled */ 976 + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { 977 + priv->vendor_specific_area2 = 978 + sdhci_readw(host, DWCMSHC_P_VENDOR_AREA2); 979 + 980 + dwcmshc_cqhci_init(host, pdev); 981 + } 1141 982 1142 983 if (rk_priv) 1143 984 dwcmshc_rk35xx_postinit(host, priv); ··· 1219 1044 1220 1045 pm_runtime_resume(dev); 1221 1046 1047 + if (host->mmc->caps2 & MMC_CAP2_CQE) { 1048 + ret = cqhci_suspend(host->mmc); 1049 + if (ret) 1050 + return ret; 1051 + } 1052 + 1222 1053 ret = sdhci_suspend_host(host); 1223 1054 if (ret) 1224 1055 return ret; ··· 1268 1087 ret = sdhci_resume_host(host); 1269 1088 if (ret) 1270 1089 goto disable_rockchip_clks; 1090 + 1091 + if (host->mmc->caps2 & MMC_CAP2_CQE) { 1092 + ret = cqhci_resume(host->mmc); 1093 + if (ret) 1094 + goto disable_rockchip_clks; 1095 + } 1271 1096 1272 1097 return 0; 1273 1098