Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mmc-updates-for-3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

Pull MMC update from Chris Ball:
"MMC highlights for 3.16:

Core:
- support HS400 mode of eMMC 5.0, via DT bindings mmc-hs400-1_{2,8}v
- if card init at 3.3v doesn't work, try 1.8v and 1.2v too

Drivers:
- moxart: New driver for MOXA ART SoCs
- rtsx_usb_sdmmc: New driver for Realtek USB card readers
- sdhci: Large rework around IRQ/regulator handling, remove card_tasklet
- sdhci-pci-o2micro: Add SeaBird SeaEagle SD3 support
- sunxi: New driver for Allwinner sunxi SoCs
- usdhi6rol0: New driver for Renesas SD/SDIO controller"

* tag 'mmc-updates-for-3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (95 commits)
mmc: sdhci-s3c: use mmc_of_parse and remove the card_tasklet
mmc: add a driver for the Renesas usdhi6rol0 SD/SDIO host controller
mmc: sdhci-of-esdhc: Fixup compile error
mmc: tegra: fix reporting of base clock frequency
mmc: tegra: disable UHS modes
mmc: sdhci-dove: use mmc_of_parse() and remove card_tasklet CD handler
MAINTAINERS: mmc: Add path to git tree
mmc: dove: fix missing MACH_DOVE dependency
mmc: sdhci: SD tuning is broken for some controllers
mmc: sdhci-esdhc-imx: fix mmc ddr mode regression issue
mmc: sdhci-pci-o2micro: Add SeaBird SeaEagle SD3 support
mmc: omap_hsmmc: split omap-dma header file
mmc: omap_hsmmc: fix cmd23 multiblock read/write
mmc: omap_hsmmc: use devm_ioremap_resource
mmc: omap_hsmmc: use devm_request_threaded_irq
mmc: omap_hsmmc: use devm_request_irq
mmc: omap_hsmmc: use devm_clk_get
mmc: sunxi: Add driver for SD/MMC hosts found on Allwinner sunxi SoCs
mmc: wmt-sdmmc: Use GFP_KERNEL instead of hard-coded value
mmc: omap: Use DIV_ROUND_UP instead of open coded
...

+4110 -1495
+2
Documentation/devicetree/bindings/mmc/mmc.txt
··· 38 38 - mmc-highspeed-ddr-1_2v: eMMC high-speed DDR mode(1.2V I/O) is supported 39 39 - mmc-hs200-1_8v: eMMC HS200 mode(1.8V I/O) is supported 40 40 - mmc-hs200-1_2v: eMMC HS200 mode(1.2V I/O) is supported 41 + - mmc-hs400-1_8v: eMMC HS400 mode(1.8V I/O) is supported 42 + - mmc-hs400-1_2v: eMMC HS400 mode(1.2V I/O) is supported 41 43 42 44 *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line 43 45 polarity properties, we have to fix the meaning of the "normal" and "inverted"
+30
Documentation/devicetree/bindings/mmc/moxa,moxart-mmc.txt
··· 1 + MOXA ART MMC Host Controller Interface 2 + 3 + Inherits from mmc binding[1]. 4 + 5 + [1] Documentation/devicetree/bindings/mmc/mmc.txt 6 + 7 + Required properties: 8 + 9 + - compatible : Must be "moxa,moxart-mmc" or "faraday,ftsdc010" 10 + - reg : Should contain registers location and length 11 + - interrupts : Should contain the interrupt number 12 + - clocks : Should contain phandle for the clock feeding the MMC controller 13 + 14 + Optional properties: 15 + 16 + - dmas : Should contain two DMA channels, line request number must be 5 for 17 + both channels 18 + - dma-names : Must be "tx", "rx" 19 + 20 + Example: 21 + 22 + mmc: mmc@98e00000 { 23 + compatible = "moxa,moxart-mmc"; 24 + reg = <0x98e00000 0x5C>; 25 + interrupts = <5 0>; 26 + clocks = <&clk_apb>; 27 + dmas = <&dma 5>, 28 + <&dma 5>; 29 + dma-names = "tx", "rx"; 30 + };
-5
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
··· 69 69 70 70 * supports-highspeed: Enables support for high speed cards (up to 50MHz) 71 71 72 - * caps2-mmc-hs200-1_8v: Supports mmc HS200 SDR 1.8V mode 73 - 74 - * caps2-mmc-hs200-1_2v: Supports mmc HS200 SDR 1.2V mode 75 - 76 72 * broken-cd: as documented in mmc core bindings. 77 73 78 74 * vmmc-supply: The phandle to the regulator to use for vmmc. If this is ··· 99 103 clock-freq-min-max = <400000 200000000>; 100 104 num-slots = <1>; 101 105 supports-highspeed; 102 - caps2-mmc-hs200-1_8v; 103 106 broken-cd; 104 107 fifo-depth = <0x80>; 105 108 card-detect-delay = <200>;
+33
Documentation/devicetree/bindings/mmc/usdhi6rol0.txt
··· 1 + * Renesas usdhi6rol0 SD/SDIO host controller 2 + 3 + Required properties: 4 + 5 + - compatible: must be 6 + "renesas,usdhi6rol0" 7 + - interrupts: 3 interrupts, named "card detect", "data" and "SDIO" must be 8 + specified 9 + - clocks: a clock binding for the IMCLK input 10 + 11 + Optional properties: 12 + 13 + - vmmc-supply: a phandle of a regulator, supplying Vcc to the card 14 + - vqmmc-supply: a phandle of a regulator, supplying VccQ to the card 15 + 16 + Additionally any standard mmc bindings from mmc.txt can be used. 17 + 18 + Example: 19 + 20 + sd0: sd@ab000000 { 21 + compatible = "renesas,usdhi6rol0"; 22 + reg = <0xab000000 0x200>; 23 + interrupts = <0 23 0x4 24 + 0 24 0x4 25 + 0 25 0x4>; 26 + interrupt-names = "card detect", "data", "SDIO"; 27 + bus-width = <4>; 28 + max-frequency = <50000000>; 29 + cap-power-off-card; 30 + clocks = <&imclk>; 31 + vmmc-supply = <&vcc_sd0>; 32 + vqmmc-supply = <&vccq_sd0>; 33 + };
+2 -1
MAINTAINERS
··· 5974 5974 M: Ulf Hansson <ulf.hansson@linaro.org> 5975 5975 L: linux-mmc@vger.kernel.org 5976 5976 T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git 5977 + T: git git://git.linaro.org/people/ulf.hansson/mmc.git 5977 5978 S: Maintained 5978 5979 F: drivers/mmc/ 5979 5980 F: include/linux/mmc/ ··· 9104 9103 F: include/uapi/linux/toshiba.h 9105 9104 9106 9105 TMIO MMC DRIVER 9107 - M: Ian Molton <ian@mnementh.co.uk> 9106 + M: Ian Molton <ian.molton@codethink.co.uk> 9108 9107 L: linux-mmc@vger.kernel.org 9109 9108 S: Maintained 9110 9109 F: drivers/mmc/host/tmio_mmc*
+5 -4
drivers/mmc/core/bus.c
··· 341 341 if (mmc_host_is_spi(card->host)) { 342 342 pr_info("%s: new %s%s%s card on SPI\n", 343 343 mmc_hostname(card->host), 344 - mmc_card_highspeed(card) ? "high speed " : "", 345 - mmc_card_ddr_mode(card) ? "DDR " : "", 344 + mmc_card_hs(card) ? "high speed " : "", 345 + mmc_card_ddr52(card) ? "DDR " : "", 346 346 type); 347 347 } else { 348 348 pr_info("%s: new %s%s%s%s%s card at address %04x\n", 349 349 mmc_hostname(card->host), 350 350 mmc_card_uhs(card) ? "ultra high speed " : 351 - (mmc_card_highspeed(card) ? "high speed " : ""), 351 + (mmc_card_hs(card) ? "high speed " : ""), 352 + mmc_card_hs400(card) ? "HS400 " : 352 353 (mmc_card_hs200(card) ? "HS200 " : ""), 353 - mmc_card_ddr_mode(card) ? "DDR " : "", 354 + mmc_card_ddr52(card) ? "DDR " : "", 354 355 uhs_bus_speed_mode, type, card->rca); 355 356 } 356 357
+36 -16
drivers/mmc/core/core.c
··· 800 800 data->timeout_ns = limit_us * 1000; 801 801 data->timeout_clks = 0; 802 802 } 803 + 804 + /* assign limit value if invalid */ 805 + if (timeout_us == 0) 806 + data->timeout_ns = limit_us * 1000; 803 807 } 804 808 805 809 /* ··· 1314 1310 } 1315 1311 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1316 1312 1313 + #endif /* CONFIG_REGULATOR */ 1314 + 1317 1315 int mmc_regulator_get_supply(struct mmc_host *mmc) 1318 1316 { 1319 1317 struct device *dev = mmc_dev(mmc); 1320 - struct regulator *supply; 1321 1318 int ret; 1322 1319 1323 - supply = devm_regulator_get(dev, "vmmc"); 1324 - mmc->supply.vmmc = supply; 1320 + mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); 1325 1321 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); 1326 1322 1327 - if (IS_ERR(supply)) 1328 - return PTR_ERR(supply); 1323 + if (IS_ERR(mmc->supply.vmmc)) { 1324 + if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) 1325 + return -EPROBE_DEFER; 1326 + dev_info(dev, "No vmmc regulator found\n"); 1327 + } else { 1328 + ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); 1329 + if (ret > 0) 1330 + mmc->ocr_avail = ret; 1331 + else 1332 + dev_warn(dev, "Failed getting OCR mask: %d\n", ret); 1333 + } 1329 1334 1330 - ret = mmc_regulator_get_ocrmask(supply); 1331 - if (ret > 0) 1332 - mmc->ocr_avail = ret; 1333 - else 1334 - dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret); 1335 + if (IS_ERR(mmc->supply.vqmmc)) { 1336 + if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) 1337 + return -EPROBE_DEFER; 1338 + dev_info(dev, "No vqmmc regulator found\n"); 1339 + } 1335 1340 1336 1341 return 0; 1337 1342 } 1338 1343 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); 1339 - 1340 - #endif /* CONFIG_REGULATOR */ 1341 1344 1342 1345 /* 1343 1346 * Mask off any voltages we don't support and select ··· 1544 1533 host->ios.timing = MMC_TIMING_LEGACY; 1545 1534 mmc_set_ios(host); 1546 1535 1547 - /* Set signal voltage to 3.3V */ 1548 - __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330); 1536 + /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ 1537 + if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) 1538 + dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); 1539 + else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0) 1540 + dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); 1541 + else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0) 1542 + dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n"); 1549 1543 1550 1544 /* 1551 1545 * This delay should be sufficient to allow the power supply ··· 2199 2183 { 2200 2184 struct mmc_command cmd = {0}; 2201 2185 2202 - if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 2186 + if (mmc_card_blockaddr(card) || mmc_card_ddr52(card)) 2203 2187 return 0; 2204 2188 2205 2189 cmd.opcode = MMC_SET_BLOCKLEN; ··· 2279 2263 } 2280 2264 } 2281 2265 2282 - host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 2283 2266 if (mmc_host_is_spi(host)) { 2284 2267 host->ios.chip_select = MMC_CS_HIGH; 2285 2268 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; ··· 2417 2402 struct mmc_host *host = 2418 2403 container_of(work, struct mmc_host, detect.work); 2419 2404 int i; 2405 + 2406 + if (host->trigger_card_event && host->ops->card_event) { 2407 + host->ops->card_event(host); 2408 + host->trigger_card_event = false; 2409 + } 2420 2410 2421 2411 if (host->rescan_disable) 2422 2412 return;
+7 -1
drivers/mmc/core/debugfs.c
··· 135 135 case MMC_TIMING_UHS_DDR50: 136 136 str = "sd uhs DDR50"; 137 137 break; 138 + case MMC_TIMING_MMC_DDR52: 139 + str = "mmc DDR52"; 140 + break; 138 141 case MMC_TIMING_MMC_HS200: 139 - str = "mmc high-speed SDR200"; 142 + str = "mmc HS200"; 143 + break; 144 + case MMC_TIMING_MMC_HS400: 145 + str = "mmc HS400"; 140 146 break; 141 147 default: 142 148 str = "invalid";
+4
drivers/mmc/core/host.c
··· 447 447 host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; 448 448 if (of_find_property(np, "mmc-hs200-1_2v", &len)) 449 449 host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; 450 + if (of_find_property(np, "mmc-hs400-1_8v", &len)) 451 + host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; 452 + if (of_find_property(np, "mmc-hs400-1_2v", &len)) 453 + host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; 450 454 451 455 return 0; 452 456
+409 -281
drivers/mmc/core/mmc.c
··· 240 240 static void mmc_select_card_type(struct mmc_card *card) 241 241 { 242 242 struct mmc_host *host = card->host; 243 - u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; 243 + u8 card_type = card->ext_csd.raw_card_type; 244 244 u32 caps = host->caps, caps2 = host->caps2; 245 - unsigned int hs_max_dtr = 0; 246 - 247 - if (card_type & EXT_CSD_CARD_TYPE_26) 248 - hs_max_dtr = MMC_HIGH_26_MAX_DTR; 245 + unsigned int hs_max_dtr = 0, hs200_max_dtr = 0; 246 + unsigned int avail_type = 0; 249 247 250 248 if (caps & MMC_CAP_MMC_HIGHSPEED && 251 - card_type & EXT_CSD_CARD_TYPE_52) 249 + card_type & EXT_CSD_CARD_TYPE_HS_26) { 250 + hs_max_dtr = MMC_HIGH_26_MAX_DTR; 251 + avail_type |= EXT_CSD_CARD_TYPE_HS_26; 252 + } 253 + 254 + if (caps & MMC_CAP_MMC_HIGHSPEED && 255 + card_type & EXT_CSD_CARD_TYPE_HS_52) { 252 256 hs_max_dtr = MMC_HIGH_52_MAX_DTR; 257 + avail_type |= EXT_CSD_CARD_TYPE_HS_52; 258 + } 253 259 254 - if ((caps & MMC_CAP_1_8V_DDR && 255 - card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) || 256 - (caps & MMC_CAP_1_2V_DDR && 257 - card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)) 260 + if (caps & MMC_CAP_1_8V_DDR && 261 + card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) { 258 262 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; 263 + avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 264 + } 259 265 260 - if ((caps2 & MMC_CAP2_HS200_1_8V_SDR && 261 - card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) || 262 - (caps2 & MMC_CAP2_HS200_1_2V_SDR && 263 - card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)) 264 - hs_max_dtr = MMC_HS200_MAX_DTR; 266 + if (caps & MMC_CAP_1_2V_DDR && 267 + card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) { 268 + hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; 269 + avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V; 270 + } 271 + 272 + if (caps2 & MMC_CAP2_HS200_1_8V_SDR && 273 + card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) { 274 + hs200_max_dtr = MMC_HS200_MAX_DTR; 275 + avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 276 + } 277 + 278 + if (caps2 & MMC_CAP2_HS200_1_2V_SDR && 279 + card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) { 280 + hs200_max_dtr = MMC_HS200_MAX_DTR; 281 + avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V; 282 + } 283 + 284 + if (caps2 & MMC_CAP2_HS400_1_8V && 285 + card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) { 286 + hs200_max_dtr = MMC_HS200_MAX_DTR; 287 + avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V; 288 + } 289 + 290 + if (caps2 & MMC_CAP2_HS400_1_2V && 291 + card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) { 292 + hs200_max_dtr = MMC_HS200_MAX_DTR; 293 + avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V; 294 + } 265 295 266 296 card->ext_csd.hs_max_dtr = hs_max_dtr; 267 - card->ext_csd.card_type = card_type; 297 + card->ext_csd.hs200_max_dtr = hs200_max_dtr; 298 + card->mmc_avail_type = avail_type; 268 299 } 269 300 270 301 /* ··· 511 480 ext_csd[EXT_CSD_PWR_CL_DDR_52_195]; 512 481 card->ext_csd.raw_pwr_cl_ddr_52_360 = 513 482 ext_csd[EXT_CSD_PWR_CL_DDR_52_360]; 483 + card->ext_csd.raw_pwr_cl_ddr_200_360 = 484 + ext_csd[EXT_CSD_PWR_CL_DDR_200_360]; 514 485 } 515 486 516 487 if (card->ext_csd.rev >= 5) { ··· 679 646 (card->ext_csd.raw_pwr_cl_ddr_52_195 == 680 647 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) && 681 648 (card->ext_csd.raw_pwr_cl_ddr_52_360 == 682 - bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360])); 649 + bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) && 650 + (card->ext_csd.raw_pwr_cl_ddr_200_360 == 651 + bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360])); 652 + 683 653 if (err) 684 654 err = -EINVAL; 685 655 ··· 730 694 &dev_attr_rel_sectors.attr, 731 695 NULL, 732 696 }; 733 - 734 - static struct attribute_group mmc_std_attr_group = { 735 - .attrs = mmc_std_attrs, 736 - }; 737 - 738 - static const struct attribute_group *mmc_attr_groups[] = { 739 - &mmc_std_attr_group, 740 - NULL, 741 - }; 697 + ATTRIBUTE_GROUPS(mmc_std); 742 698 743 699 static struct device_type mmc_type = { 744 - .groups = mmc_attr_groups, 700 + .groups = mmc_std_groups, 745 701 }; 746 702 747 703 /* ··· 742 714 * extended CSD register, select it by executing the 743 715 * mmc_switch command. 744 716 */ 745 - static int mmc_select_powerclass(struct mmc_card *card, 746 - unsigned int bus_width) 717 + static int __mmc_select_powerclass(struct mmc_card *card, 718 + unsigned int bus_width) 747 719 { 748 - int err = 0; 720 + struct mmc_host *host = card->host; 721 + struct mmc_ext_csd *ext_csd = &card->ext_csd; 749 722 unsigned int pwrclass_val = 0; 750 - struct mmc_host *host; 751 - 752 - BUG_ON(!card); 753 - 754 - host = card->host; 755 - BUG_ON(!host); 723 + int err = 0; 756 724 757 725 /* Power class selection is supported for versions >= 4.0 */ 758 726 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) ··· 760 736 761 737 switch (1 << host->ios.vdd) { 762 738 case MMC_VDD_165_195: 763 - if (host->ios.clock <= 26000000) 764 - pwrclass_val = card->ext_csd.raw_pwr_cl_26_195; 765 - else if (host->ios.clock <= 52000000) 739 + if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) 740 + pwrclass_val = ext_csd->raw_pwr_cl_26_195; 741 + else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) 766 742 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 767 - card->ext_csd.raw_pwr_cl_52_195 : 768 - card->ext_csd.raw_pwr_cl_ddr_52_195; 769 - else if (host->ios.clock <= 200000000) 770 - pwrclass_val = card->ext_csd.raw_pwr_cl_200_195; 743 + ext_csd->raw_pwr_cl_52_195 : 744 + ext_csd->raw_pwr_cl_ddr_52_195; 745 + else if (host->ios.clock <= MMC_HS200_MAX_DTR) 746 + pwrclass_val = ext_csd->raw_pwr_cl_200_195; 771 747 break; 772 748 case MMC_VDD_27_28: 773 749 case MMC_VDD_28_29: ··· 778 754 case MMC_VDD_33_34: 779 755 case MMC_VDD_34_35: 780 756 case MMC_VDD_35_36: 781 - if (host->ios.clock <= 26000000) 782 - pwrclass_val = card->ext_csd.raw_pwr_cl_26_360; 783 - else if (host->ios.clock <= 52000000) 757 + if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) 758 + pwrclass_val = ext_csd->raw_pwr_cl_26_360; 759 + else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) 784 760 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 785 - card->ext_csd.raw_pwr_cl_52_360 : 786 - card->ext_csd.raw_pwr_cl_ddr_52_360; 787 - else if (host->ios.clock <= 200000000) 788 - pwrclass_val = card->ext_csd.raw_pwr_cl_200_360; 761 + ext_csd->raw_pwr_cl_52_360 : 762 + ext_csd->raw_pwr_cl_ddr_52_360; 763 + else if (host->ios.clock <= MMC_HS200_MAX_DTR) 764 + pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ? 765 + ext_csd->raw_pwr_cl_ddr_200_360 : 766 + ext_csd->raw_pwr_cl_200_360; 789 767 break; 790 768 default: 791 769 pr_warning("%s: Voltage range not supported " ··· 813 787 return err; 814 788 } 815 789 816 - /* 817 - * Selects the desired buswidth and switch to the HS200 mode 818 - * if bus width set without error 819 - */ 820 - static int mmc_select_hs200(struct mmc_card *card) 790 + static int mmc_select_powerclass(struct mmc_card *card) 821 791 { 822 - int idx, err = -EINVAL; 823 - struct mmc_host *host; 792 + struct mmc_host *host = card->host; 793 + u32 bus_width, ext_csd_bits; 794 + int err, ddr; 795 + 796 + /* Power class selection is supported for versions >= 4.0 */ 797 + if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 798 + return 0; 799 + 800 + bus_width = host->ios.bus_width; 801 + /* Power class values are defined only for 4/8 bit bus */ 802 + if (bus_width == MMC_BUS_WIDTH_1) 803 + return 0; 804 + 805 + ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52; 806 + if (ddr) 807 + ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 808 + EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 809 + else 810 + ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 811 + EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 812 + 813 + err = __mmc_select_powerclass(card, ext_csd_bits); 814 + if (err) 815 + pr_warn("%s: power class selection to bus width %d ddr %d failed\n", 816 + mmc_hostname(host), 1 << bus_width, ddr); 817 + 818 + return err; 819 + } 820 + 821 + /* 822 + * Set the bus speed for the selected speed mode. 823 + */ 824 + static void mmc_set_bus_speed(struct mmc_card *card) 825 + { 826 + unsigned int max_dtr = (unsigned int)-1; 827 + 828 + if ((mmc_card_hs200(card) || mmc_card_hs400(card)) && 829 + max_dtr > card->ext_csd.hs200_max_dtr) 830 + max_dtr = card->ext_csd.hs200_max_dtr; 831 + else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr) 832 + max_dtr = card->ext_csd.hs_max_dtr; 833 + else if (max_dtr > card->csd.max_dtr) 834 + max_dtr = card->csd.max_dtr; 835 + 836 + mmc_set_clock(card->host, max_dtr); 837 + } 838 + 839 + /* 840 + * Select the bus width amoung 4-bit and 8-bit(SDR). 841 + * If the bus width is changed successfully, return the selected width value. 842 + * Zero is returned instead of error value if the wide width is not supported. 843 + */ 844 + static int mmc_select_bus_width(struct mmc_card *card) 845 + { 824 846 static unsigned ext_csd_bits[] = { 825 - EXT_CSD_BUS_WIDTH_4, 826 847 EXT_CSD_BUS_WIDTH_8, 848 + EXT_CSD_BUS_WIDTH_4, 827 849 }; 828 850 static unsigned bus_widths[] = { 829 - MMC_BUS_WIDTH_4, 830 851 MMC_BUS_WIDTH_8, 852 + MMC_BUS_WIDTH_4, 831 853 }; 854 + struct mmc_host *host = card->host; 855 + unsigned idx, bus_width = 0; 856 + int err = 0; 832 857 833 - BUG_ON(!card); 858 + if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) && 859 + !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) 860 + return 0; 834 861 835 - host = card->host; 836 - 837 - if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 838 - host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 839 - err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); 840 - 841 - if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && 842 - host->caps2 & MMC_CAP2_HS200_1_8V_SDR) 843 - err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); 844 - 845 - /* If fails try again during next card power cycle */ 846 - if (err) 847 - goto err; 848 - 849 - idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; 862 + idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1; 850 863 851 864 /* 852 865 * Unlike SD, MMC cards dont have a configuration register to notify ··· 893 828 * the supported bus width or compare the ext csd values of current 894 829 * bus width and ext csd values of 1 bit mode read earlier. 895 830 */ 896 - for (; idx >= 0; idx--) { 897 - 831 + for (; idx < ARRAY_SIZE(bus_widths); idx++) { 898 832 /* 899 833 * Host is capable of 8bit transfer, then switch 900 834 * the device to work in 8bit transfer mode. If the ··· 908 844 if (err) 909 845 continue; 910 846 911 - mmc_set_bus_width(card->host, bus_widths[idx]); 847 + bus_width = bus_widths[idx]; 848 + mmc_set_bus_width(host, bus_width); 912 849 850 + /* 851 + * If controller can't handle bus width test, 852 + * compare ext_csd previously read in 1 bit mode 853 + * against ext_csd at new bus width 854 + */ 913 855 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 914 - err = mmc_compare_ext_csds(card, bus_widths[idx]); 856 + err = mmc_compare_ext_csds(card, bus_width); 915 857 else 916 - err = mmc_bus_test(card, bus_widths[idx]); 917 - if (!err) 858 + err = mmc_bus_test(card, bus_width); 859 + 860 + if (!err) { 861 + err = bus_width; 918 862 break; 863 + } else { 864 + pr_warn("%s: switch to bus width %d failed\n", 865 + mmc_hostname(host), ext_csd_bits[idx]); 866 + } 919 867 } 920 868 921 - /* switch to HS200 mode if bus width set successfully */ 869 + return err; 870 + } 871 + 872 + /* 873 + * Switch to the high-speed mode 874 + */ 875 + static int mmc_select_hs(struct mmc_card *card) 876 + { 877 + int err; 878 + 879 + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 880 + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, 881 + card->ext_csd.generic_cmd6_time, 882 + true, true, true); 922 883 if (!err) 884 + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 885 + 886 + return err; 887 + } 888 + 889 + /* 890 + * Activate wide bus and DDR if supported. 891 + */ 892 + static int mmc_select_hs_ddr(struct mmc_card *card) 893 + { 894 + struct mmc_host *host = card->host; 895 + u32 bus_width, ext_csd_bits; 896 + int err = 0; 897 + 898 + if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52)) 899 + return 0; 900 + 901 + bus_width = host->ios.bus_width; 902 + if (bus_width == MMC_BUS_WIDTH_1) 903 + return 0; 904 + 905 + ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 906 + EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 907 + 908 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 909 + EXT_CSD_BUS_WIDTH, 910 + ext_csd_bits, 911 + card->ext_csd.generic_cmd6_time); 912 + if (err) { 913 + pr_warn("%s: switch to bus width %d ddr failed\n", 914 + mmc_hostname(host), 1 << bus_width); 915 + return err; 916 + } 917 + 918 + /* 919 + * eMMC cards can support 3.3V to 1.2V i/o (vccq) 920 + * signaling. 921 + * 922 + * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. 923 + * 924 + * 1.8V vccq at 3.3V core voltage (vcc) is not required 925 + * in the JEDEC spec for DDR. 926 + * 927 + * Do not force change in vccq since we are obviously 928 + * working and no change to vccq is needed. 929 + * 930 + * WARNING: eMMC rules are NOT the same as SD DDR 931 + */ 932 + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) { 933 + err = __mmc_set_signal_voltage(host, 934 + MMC_SIGNAL_VOLTAGE_120); 935 + if (err) 936 + return err; 937 + } 938 + 939 + mmc_set_timing(host, MMC_TIMING_MMC_DDR52); 940 + 941 + return err; 942 + } 943 + 944 + static int mmc_select_hs400(struct mmc_card *card) 945 + { 946 + struct mmc_host *host = card->host; 947 + int err = 0; 948 + 949 + /* 950 + * HS400 mode requires 8-bit bus width 951 + */ 952 + if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && 953 + host->ios.bus_width == MMC_BUS_WIDTH_8)) 954 + return 0; 955 + 956 + /* 957 + * Before switching to dual data rate operation for HS400, 958 + * it is required to convert from HS200 mode to HS mode. 959 + */ 960 + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 961 + mmc_set_bus_speed(card); 962 + 963 + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 964 + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, 965 + card->ext_csd.generic_cmd6_time, 966 + true, true, true); 967 + if (err) { 968 + pr_warn("%s: switch to high-speed from hs200 failed, err:%d\n", 969 + mmc_hostname(host), err); 970 + return err; 971 + } 972 + 973 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 974 + EXT_CSD_BUS_WIDTH, 975 + EXT_CSD_DDR_BUS_WIDTH_8, 976 + card->ext_csd.generic_cmd6_time); 977 + if (err) { 978 + pr_warn("%s: switch to bus width for hs400 failed, err:%d\n", 979 + mmc_hostname(host), err); 980 + return err; 981 + } 982 + 983 + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 984 + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, 985 + card->ext_csd.generic_cmd6_time, 986 + true, true, true); 987 + if (err) { 988 + pr_warn("%s: switch to hs400 failed, err:%d\n", 989 + mmc_hostname(host), err); 990 + return err; 991 + } 992 + 993 + mmc_set_timing(host, MMC_TIMING_MMC_HS400); 994 + mmc_set_bus_speed(card); 995 + 996 + return 0; 997 + } 998 + 999 + /* 1000 + * For device supporting HS200 mode, the following sequence 1001 + * should be done before executing the tuning process. 1002 + * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported) 1003 + * 2. switch to HS200 mode 1004 + * 3. set the clock to > 52Mhz and <=200MHz 1005 + */ 1006 + static int mmc_select_hs200(struct mmc_card *card) 1007 + { 1008 + struct mmc_host *host = card->host; 1009 + int err = -EINVAL; 1010 + 1011 + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V) 1012 + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); 1013 + 1014 + if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V) 1015 + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); 1016 + 1017 + /* If fails try again during next card power cycle */ 1018 + if (err) 1019 + goto err; 1020 + 1021 + /* 1022 + * Set the bus width(4 or 8) with host's support and 1023 + * switch to HS200 mode if bus width is set successfully. 1024 + */ 1025 + err = mmc_select_bus_width(card); 1026 + if (!IS_ERR_VALUE(err)) { 923 1027 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 924 - EXT_CSD_HS_TIMING, 2, 925 - card->ext_csd.generic_cmd6_time, 926 - true, true, true); 1028 + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200, 1029 + card->ext_csd.generic_cmd6_time, 1030 + true, true, true); 1031 + if (!err) 1032 + mmc_set_timing(host, MMC_TIMING_MMC_HS200); 1033 + } 927 1034 err: 1035 + return err; 1036 + } 1037 + 1038 + /* 1039 + * Activate High Speed or HS200 mode if supported. 1040 + */ 1041 + static int mmc_select_timing(struct mmc_card *card) 1042 + { 1043 + int err = 0; 1044 + 1045 + if ((card->csd.mmca_vsn < CSD_SPEC_VER_4 && 1046 + card->ext_csd.hs_max_dtr == 0)) 1047 + goto bus_speed; 1048 + 1049 + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) 1050 + err = mmc_select_hs200(card); 1051 + else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) 1052 + err = mmc_select_hs(card); 1053 + 1054 + if (err && err != -EBADMSG) 1055 + return err; 1056 + 1057 + if (err) { 1058 + pr_warn("%s: switch to %s failed\n", 1059 + mmc_card_hs(card) ? "high-speed" : 1060 + (mmc_card_hs200(card) ? "hs200" : ""), 1061 + mmc_hostname(card->host)); 1062 + err = 0; 1063 + } 1064 + 1065 + bus_speed: 1066 + /* 1067 + * Set the bus speed to the selected bus timing. 1068 + * If timing is not selected, backward compatible is the default. 1069 + */ 1070 + mmc_set_bus_speed(card); 1071 + return err; 1072 + } 1073 + 1074 + /* 1075 + * Execute tuning sequence to seek the proper bus operating 1076 + * conditions for HS200 and HS400, which sends CMD21 to the device. 1077 + */ 1078 + static int mmc_hs200_tuning(struct mmc_card *card) 1079 + { 1080 + struct mmc_host *host = card->host; 1081 + int err = 0; 1082 + 1083 + /* 1084 + * Timing should be adjusted to the HS400 target 1085 + * operation frequency for tuning process 1086 + */ 1087 + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && 1088 + host->ios.bus_width == MMC_BUS_WIDTH_8) 1089 + if (host->ops->prepare_hs400_tuning) 1090 + host->ops->prepare_hs400_tuning(host, &host->ios); 1091 + 1092 + if (host->ops->execute_tuning) { 1093 + mmc_host_clk_hold(host); 1094 + err = host->ops->execute_tuning(host, 1095 + MMC_SEND_TUNING_BLOCK_HS200); 1096 + mmc_host_clk_release(host); 1097 + 1098 + if (err) 1099 + pr_warn("%s: tuning execution failed\n", 1100 + mmc_hostname(host)); 1101 + } 1102 + 928 1103 return err; 929 1104 } 930 1105 ··· 1177 874 struct mmc_card *oldcard) 1178 875 { 1179 876 struct mmc_card *card; 1180 - int err, ddr = 0; 877 + int err; 1181 878 u32 cid[4]; 1182 - unsigned int max_dtr; 1183 879 u32 rocr; 1184 880 u8 *ext_csd = NULL; 1185 881 ··· 1370 1068 } 1371 1069 1372 1070 /* 1373 - * Activate high speed (if supported) 1071 + * Select timing interface 1374 1072 */ 1375 - if (card->ext_csd.hs_max_dtr != 0) { 1376 - err = 0; 1377 - if (card->ext_csd.hs_max_dtr > 52000000 && 1378 - host->caps2 & MMC_CAP2_HS200) 1379 - err = mmc_select_hs200(card); 1380 - else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 1381 - err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1382 - EXT_CSD_HS_TIMING, 1, 1383 - card->ext_csd.generic_cmd6_time, 1384 - true, true, true); 1073 + err = mmc_select_timing(card); 1074 + if (err) 1075 + goto free_card; 1385 1076 1386 - if (err && err != -EBADMSG) 1387 - goto free_card; 1388 - 1389 - if (err) { 1390 - pr_warning("%s: switch to highspeed failed\n", 1391 - mmc_hostname(card->host)); 1392 - err = 0; 1393 - } else { 1394 - if (card->ext_csd.hs_max_dtr > 52000000 && 1395 - host->caps2 & MMC_CAP2_HS200) { 1396 - mmc_card_set_hs200(card); 1397 - mmc_set_timing(card->host, 1398 - MMC_TIMING_MMC_HS200); 1399 - } else { 1400 - mmc_card_set_highspeed(card); 1401 - mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 1402 - } 1403 - } 1404 - } 1405 - 1406 - /* 1407 - * Compute bus speed. 1408 - */ 1409 - max_dtr = (unsigned int)-1; 1410 - 1411 - if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { 1412 - if (max_dtr > card->ext_csd.hs_max_dtr) 1413 - max_dtr = card->ext_csd.hs_max_dtr; 1414 - if (mmc_card_highspeed(card) && (max_dtr > 52000000)) 1415 - max_dtr = 52000000; 1416 - } else if (max_dtr > card->csd.max_dtr) { 1417 - max_dtr = card->csd.max_dtr; 1418 - } 1419 - 1420 - mmc_set_clock(host, max_dtr); 1421 - 1422 - /* 1423 - * Indicate DDR mode (if supported). 1424 - */ 1425 - if (mmc_card_highspeed(card)) { 1426 - if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 1427 - && (host->caps & MMC_CAP_1_8V_DDR)) 1428 - ddr = MMC_1_8V_DDR_MODE; 1429 - else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 1430 - && (host->caps & MMC_CAP_1_2V_DDR)) 1431 - ddr = MMC_1_2V_DDR_MODE; 1432 - } 1433 - 1434 - /* 1435 - * Indicate HS200 SDR mode (if supported). 1436 - */ 1437 1077 if (mmc_card_hs200(card)) { 1438 - u32 ext_csd_bits; 1439 - u32 bus_width = card->host->ios.bus_width; 1440 - 1441 - /* 1442 - * For devices supporting HS200 mode, the bus width has 1443 - * to be set before executing the tuning function. If 1444 - * set before tuning, then device will respond with CRC 1445 - * errors for responses on CMD line. So for HS200 the 1446 - * sequence will be 1447 - * 1. set bus width 4bit / 8 bit (1 bit not supported) 1448 - * 2. switch to HS200 mode 1449 - * 3. set the clock to > 52Mhz <=200MHz and 1450 - * 4. execute tuning for HS200 1451 - */ 1452 - if ((host->caps2 & MMC_CAP2_HS200) && 1453 - card->host->ops->execute_tuning) { 1454 - mmc_host_clk_hold(card->host); 1455 - err = card->host->ops->execute_tuning(card->host, 1456 - MMC_SEND_TUNING_BLOCK_HS200); 1457 - mmc_host_clk_release(card->host); 1458 - } 1459 - if (err) { 1460 - pr_warning("%s: tuning execution failed\n", 1461 - mmc_hostname(card->host)); 1462 - goto err; 1463 - } 1464 - 1465 - ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 1466 - EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1467 - err = mmc_select_powerclass(card, ext_csd_bits); 1078 + err = mmc_hs200_tuning(card); 1468 1079 if (err) 1469 - pr_warning("%s: power class selection to bus width %d" 1470 - " failed\n", mmc_hostname(card->host), 1471 - 1 << bus_width); 1080 + goto err; 1081 + 1082 + err = mmc_select_hs400(card); 1083 + if (err) 1084 + goto err; 1085 + } else if (mmc_card_hs(card)) { 1086 + /* Select the desired bus width optionally */ 1087 + err = mmc_select_bus_width(card); 1088 + if (!IS_ERR_VALUE(err)) { 1089 + err = mmc_select_hs_ddr(card); 1090 + if (err) 1091 + goto err; 1092 + } 1472 1093 } 1473 1094 1474 1095 /* 1475 - * Activate wide bus and DDR (if supported). 1096 + * Choose the power class with selected bus interface 1476 1097 */ 1477 - if (!mmc_card_hs200(card) && 1478 - (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1479 - (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1480 - static unsigned ext_csd_bits[][2] = { 1481 - { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1482 - { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, 1483 - { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, 1484 - }; 1485 - static unsigned bus_widths[] = { 1486 - MMC_BUS_WIDTH_8, 1487 - MMC_BUS_WIDTH_4, 1488 - MMC_BUS_WIDTH_1 1489 - }; 1490 - unsigned idx, bus_width = 0; 1491 - 1492 - if (host->caps & MMC_CAP_8_BIT_DATA) 1493 - idx = 0; 1494 - else 1495 - idx = 1; 1496 - for (; idx < ARRAY_SIZE(bus_widths); idx++) { 1497 - bus_width = bus_widths[idx]; 1498 - if (bus_width == MMC_BUS_WIDTH_1) 1499 - ddr = 0; /* no DDR for 1-bit width */ 1500 - err = mmc_select_powerclass(card, ext_csd_bits[idx][0]); 1501 - if (err) 1502 - pr_warning("%s: power class selection to " 1503 - "bus width %d failed\n", 1504 - mmc_hostname(card->host), 1505 - 1 << bus_width); 1506 - 1507 - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1508 - EXT_CSD_BUS_WIDTH, 1509 - ext_csd_bits[idx][0], 1510 - card->ext_csd.generic_cmd6_time); 1511 - if (!err) { 1512 - mmc_set_bus_width(card->host, bus_width); 1513 - 1514 - /* 1515 - * If controller can't handle bus width test, 1516 - * compare ext_csd previously read in 1 bit mode 1517 - * against ext_csd at new bus width 1518 - */ 1519 - if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 1520 - err = mmc_compare_ext_csds(card, 1521 - bus_width); 1522 - else 1523 - err = mmc_bus_test(card, bus_width); 1524 - if (!err) 1525 - break; 1526 - } 1527 - } 1528 - 1529 - if (!err && ddr) { 1530 - err = mmc_select_powerclass(card, ext_csd_bits[idx][1]); 1531 - if (err) 1532 - pr_warning("%s: power class selection to " 1533 - "bus width %d ddr %d failed\n", 1534 - mmc_hostname(card->host), 1535 - 1 << bus_width, ddr); 1536 - 1537 - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1538 - EXT_CSD_BUS_WIDTH, 1539 - ext_csd_bits[idx][1], 1540 - card->ext_csd.generic_cmd6_time); 1541 - } 1542 - if (err) { 1543 - pr_warning("%s: switch to bus width %d ddr %d " 1544 - "failed\n", mmc_hostname(card->host), 1545 - 1 << bus_width, ddr); 1546 - goto free_card; 1547 - } else if (ddr) { 1548 - /* 1549 - * eMMC cards can support 3.3V to 1.2V i/o (vccq) 1550 - * signaling. 1551 - * 1552 - * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. 1553 - * 1554 - * 1.8V vccq at 3.3V core voltage (vcc) is not required 1555 - * in the JEDEC spec for DDR. 1556 - * 1557 - * Do not force change in vccq since we are obviously 1558 - * working and no change to vccq is needed. 1559 - * 1560 - * WARNING: eMMC rules are NOT the same as SD DDR 1561 - */ 1562 - if (ddr == MMC_1_2V_DDR_MODE) { 1563 - err = __mmc_set_signal_voltage(host, 1564 - MMC_SIGNAL_VOLTAGE_120); 1565 - if (err) 1566 - goto err; 1567 - } 1568 - mmc_card_set_ddr_mode(card); 1569 - mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1570 - mmc_set_bus_width(card->host, bus_width); 1571 - } 1572 - } 1098 + mmc_select_powerclass(card); 1573 1099 1574 1100 /* 1575 1101 * Enable HPI feature (if supported) ··· 1637 1507 err = mmc_sleep(host); 1638 1508 else if (!mmc_host_is_spi(host)) 1639 1509 err = mmc_deselect_cards(host); 1640 - host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1641 1510 1642 1511 if (!err) { 1643 1512 mmc_power_off(host); ··· 1766 1637 { 1767 1638 int ret; 1768 1639 1769 - host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1770 1640 mmc_claim_host(host); 1771 1641 ret = mmc_init_card(host, host->card->ocr, host->card); 1772 1642 mmc_release_host(host);
+5 -23
drivers/mmc/core/sd.c
··· 707 707 &dev_attr_serial.attr, 708 708 NULL, 709 709 }; 710 - 711 - static struct attribute_group sd_std_attr_group = { 712 - .attrs = sd_std_attrs, 713 - }; 714 - 715 - static const struct attribute_group *sd_attr_groups[] = { 716 - &sd_std_attr_group, 717 - NULL, 718 - }; 710 + ATTRIBUTE_GROUPS(sd_std); 719 711 720 712 struct device_type sd_type = { 721 - .groups = sd_attr_groups, 713 + .groups = sd_std_groups, 722 714 }; 723 715 724 716 /* ··· 887 895 { 888 896 unsigned max_dtr = (unsigned int)-1; 889 897 890 - if (mmc_card_highspeed(card)) { 898 + if (mmc_card_hs(card)) { 891 899 if (max_dtr > card->sw_caps.hs_max_dtr) 892 900 max_dtr = card->sw_caps.hs_max_dtr; 893 901 } else if (max_dtr > card->csd.max_dtr) { ··· 895 903 } 896 904 897 905 return max_dtr; 898 - } 899 - 900 - void mmc_sd_go_highspeed(struct mmc_card *card) 901 - { 902 - mmc_card_set_highspeed(card); 903 - mmc_set_timing(card->host, MMC_TIMING_SD_HS); 904 906 } 905 907 906 908 /* ··· 971 985 err = mmc_sd_init_uhs_card(card); 972 986 if (err) 973 987 goto free_card; 974 - 975 - /* Card is an ultra-high-speed card */ 976 - mmc_card_set_uhs(card); 977 988 } else { 978 989 /* 979 990 * Attempt to change to high-speed (if supported) 980 991 */ 981 992 err = mmc_sd_switch_hs(card); 982 993 if (err > 0) 983 - mmc_sd_go_highspeed(card); 994 + mmc_set_timing(card->host, MMC_TIMING_SD_HS); 984 995 else if (err) 985 996 goto free_card; 986 997 ··· 1072 1089 1073 1090 if (!mmc_host_is_spi(host)) 1074 1091 err = mmc_deselect_cards(host); 1075 - host->card->state &= ~MMC_STATE_HIGHSPEED; 1092 + 1076 1093 if (!err) { 1077 1094 mmc_power_off(host); 1078 1095 mmc_card_set_suspended(host->card); ··· 1181 1198 { 1182 1199 int ret; 1183 1200 1184 - host->card->state &= ~MMC_STATE_HIGHSPEED; 1185 1201 mmc_claim_host(host); 1186 1202 ret = mmc_sd_init_card(host, host->card->ocr, host->card); 1187 1203 mmc_release_host(host);
-1
drivers/mmc/core/sd.h
··· 12 12 bool reinit); 13 13 unsigned mmc_sd_get_max_clock(struct mmc_card *card); 14 14 int mmc_sd_switch_hs(struct mmc_card *card); 15 - void mmc_sd_go_highspeed(struct mmc_card *card); 16 15 17 16 #endif
+6 -47
drivers/mmc/core/sdio.c
··· 363 363 { 364 364 unsigned max_dtr; 365 365 366 - if (mmc_card_highspeed(card)) { 366 + if (mmc_card_hs(card)) { 367 367 /* 368 368 * The SDIO specification doesn't mention how 369 369 * the CIS transfer speed register relates to ··· 733 733 mmc_set_clock(host, card->cis.max_dtr); 734 734 735 735 if (card->cccr.high_speed) { 736 - mmc_card_set_highspeed(card); 737 736 mmc_set_timing(card->host, MMC_TIMING_SD_HS); 738 737 } 739 738 ··· 791 792 err = mmc_sdio_init_uhs_card(card); 792 793 if (err) 793 794 goto remove; 794 - 795 - /* Card is an ultra-high-speed card */ 796 - mmc_card_set_uhs(card); 797 795 } else { 798 796 /* 799 797 * Switch to high-speed (if supported). 800 798 */ 801 799 err = sdio_enable_hs(card); 802 800 if (err > 0) 803 - mmc_sd_go_highspeed(card); 801 + mmc_set_timing(card->host, MMC_TIMING_SD_HS); 804 802 else if (err) 805 803 goto remove; 806 804 ··· 939 943 */ 940 944 static int mmc_sdio_suspend(struct mmc_host *host) 941 945 { 942 - int i, err = 0; 943 - 944 - for (i = 0; i < host->card->sdio_funcs; i++) { 945 - struct sdio_func *func = host->card->sdio_func[i]; 946 - if (func && sdio_func_present(func) && func->dev.driver) { 947 - const struct dev_pm_ops *pmops = func->dev.driver->pm; 948 - err = pmops->suspend(&func->dev); 949 - if (err) 950 - break; 951 - } 952 - } 953 - while (err && --i >= 0) { 954 - struct sdio_func *func = host->card->sdio_func[i]; 955 - if (func && sdio_func_present(func) && func->dev.driver) { 956 - const struct dev_pm_ops *pmops = func->dev.driver->pm; 957 - pmops->resume(&func->dev); 958 - } 959 - } 960 - 961 - if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { 946 + if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { 962 947 mmc_claim_host(host); 963 948 sdio_disable_wide(host->card); 964 949 mmc_release_host(host); 965 950 } 966 951 967 - if (!err && !mmc_card_keep_power(host)) 952 + if (!mmc_card_keep_power(host)) 968 953 mmc_power_off(host); 969 954 970 - return err; 955 + return 0; 971 956 } 972 957 973 958 static int mmc_sdio_resume(struct mmc_host *host) 974 959 { 975 - int i, err = 0; 960 + int err = 0; 976 961 977 962 BUG_ON(!host); 978 963 BUG_ON(!host->card); ··· 995 1018 if (!err && host->sdio_irqs) 996 1019 wake_up_process(host->sdio_irq_thread); 997 1020 mmc_release_host(host); 998 - 999 - /* 1000 - * If the card looked to be the same as before suspending, then 1001 - * we proceed to resume all card functions. If one of them returns 1002 - * an error then we simply return that error to the core and the 1003 - * card will be redetected as new. It is the responsibility of 1004 - * the function driver to perform further tests with the extra 1005 - * knowledge it has of the card to confirm the card is indeed the 1006 - * same as before suspending (same MAC address for network cards, 1007 - * etc.) and return an error otherwise. 1008 - */ 1009 - for (i = 0; !err && i < host->card->sdio_funcs; i++) { 1010 - struct sdio_func *func = host->card->sdio_func[i]; 1011 - if (func && sdio_func_present(func) && func->dev.driver) { 1012 - const struct dev_pm_ops *pmops = func->dev.driver->pm; 1013 - err = pmops->resume(&func->dev); 1014 - } 1015 - } 1016 1021 1017 1022 host->pm_flags &= ~MMC_PM_KEEP_POWER; 1018 1023 return err;
+1 -13
drivers/mmc/core/sdio_bus.c
··· 197 197 198 198 #ifdef CONFIG_PM 199 199 200 - #ifdef CONFIG_PM_SLEEP 201 - static int pm_no_operation(struct device *dev) 202 - { 203 - /* 204 - * Prevent the PM core from calling SDIO device drivers' suspend 205 - * callback routines, which it is not supposed to do, by using this 206 - * empty function as the bus type suspend callaback for SDIO. 207 - */ 208 - return 0; 209 - } 210 - #endif 211 - 212 200 static const struct dev_pm_ops sdio_bus_pm_ops = { 213 - SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation) 201 + SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) 214 202 SET_RUNTIME_PM_OPS( 215 203 pm_generic_runtime_suspend, 216 204 pm_generic_runtime_resume,
+31 -10
drivers/mmc/core/sdio_irq.c
··· 90 90 return ret; 91 91 } 92 92 93 + void sdio_run_irqs(struct mmc_host *host) 94 + { 95 + mmc_claim_host(host); 96 + host->sdio_irq_pending = true; 97 + process_sdio_pending_irqs(host); 98 + mmc_release_host(host); 99 + } 100 + EXPORT_SYMBOL_GPL(sdio_run_irqs); 101 + 93 102 static int sdio_irq_thread(void *_host) 94 103 { 95 104 struct mmc_host *host = _host; ··· 198 189 WARN_ON(!host->claimed); 199 190 200 191 if (!host->sdio_irqs++) { 201 - atomic_set(&host->sdio_irq_thread_abort, 0); 202 - host->sdio_irq_thread = 203 - kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", 204 - mmc_hostname(host)); 205 - if (IS_ERR(host->sdio_irq_thread)) { 206 - int err = PTR_ERR(host->sdio_irq_thread); 207 - host->sdio_irqs--; 208 - return err; 192 + if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { 193 + atomic_set(&host->sdio_irq_thread_abort, 0); 194 + host->sdio_irq_thread = 195 + kthread_run(sdio_irq_thread, host, 196 + "ksdioirqd/%s", mmc_hostname(host)); 197 + if (IS_ERR(host->sdio_irq_thread)) { 198 + int err = PTR_ERR(host->sdio_irq_thread); 199 + host->sdio_irqs--; 200 + return err; 201 + } 202 + } else { 203 + mmc_host_clk_hold(host); 204 + host->ops->enable_sdio_irq(host, 1); 205 + mmc_host_clk_release(host); 209 206 } 210 207 } 211 208 ··· 226 211 BUG_ON(host->sdio_irqs < 1); 227 212 228 213 if (!--host->sdio_irqs) { 229 - atomic_set(&host->sdio_irq_thread_abort, 1); 230 - kthread_stop(host->sdio_irq_thread); 214 + if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { 215 + atomic_set(&host->sdio_irq_thread_abort, 1); 216 + kthread_stop(host->sdio_irq_thread); 217 + } else { 218 + mmc_host_clk_hold(host); 219 + host->ops->enable_sdio_irq(host, 0); 220 + mmc_host_clk_release(host); 221 + } 231 222 } 232 223 233 224 return 0;
+1 -3
drivers/mmc/core/slot-gpio.c
··· 32 32 /* Schedule a card detection after a debounce timeout */ 33 33 struct mmc_host *host = dev_id; 34 34 35 - if (host->ops->card_event) 36 - host->ops->card_event(host); 37 - 35 + host->trigger_card_event = true; 38 36 mmc_detect_change(host, msecs_to_jiffies(200)); 39 37 40 38 return IRQ_HANDLED;
+16 -1
drivers/mmc/host/Kconfig
··· 168 168 169 169 config MMC_SDHCI_DOVE 170 170 tristate "SDHCI support on Marvell's Dove SoC" 171 - depends on ARCH_DOVE 171 + depends on ARCH_DOVE || MACH_DOVE 172 172 depends on MMC_SDHCI_PLTFM 173 173 select MMC_SDHCI_IO_ACCESSORS 174 174 help ··· 282 282 platform with SD or MMC devices, say Y or M here. 283 283 284 284 If unsure, say N. 285 + 286 + config MMC_MOXART 287 + tristate "MOXART SD/MMC Host Controller support" 288 + depends on ARCH_MOXART && MMC 289 + help 290 + This selects support for the MOXART SD/MMC Host Controller. 291 + MOXA provides one multi-functional card reader which can 292 + be found on some embedded hardware such as UC-7112-LX. 293 + If you have a controller with this interface, say Y here. 285 294 286 295 config MMC_OMAP 287 296 tristate "TI OMAP Multimedia Card Interface support" ··· 696 687 697 688 To compile this driver as a module, choose M here: the 698 689 module will be called wmt-sdmmc. 690 + 691 + config MMC_USDHI6ROL0 692 + tristate "Renesas USDHI6ROL0 SD/SDIO Host Controller support" 693 + help 694 + This selects support for the Renesas USDHI6ROL0 SD/SDIO 695 + Host Controller 699 696 700 697 config MMC_REALTEK_PCI 701 698 tristate "Realtek PCI-E SD/MMC Card Interface Driver"
+2
drivers/mmc/host/Makefile
··· 50 50 obj-$(CONFIG_MMC_VUB300) += vub300.o 51 51 obj-$(CONFIG_MMC_USHC) += ushc.o 52 52 obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o 53 + obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o 53 54 obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o 55 + obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o 54 56 55 57 obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o 56 58 obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
+3 -10
drivers/mmc/host/atmel-mci.c
··· 820 820 821 821 atmci_pdc_cleanup(host); 822 822 823 - /* 824 - * If the card was removed, data will be NULL. No point trying 825 - * to send the stop command or waiting for NBUSY in this case. 826 - */ 827 - if (host->data) { 828 - dev_dbg(&host->pdev->dev, 829 - "(%s) set pending xfer complete\n", __func__); 830 - atmci_set_pending(host, EVENT_XFER_COMPLETE); 831 - tasklet_schedule(&host->tasklet); 832 - } 823 + dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__); 824 + atmci_set_pending(host, EVENT_XFER_COMPLETE); 825 + tasklet_schedule(&host->tasklet); 833 826 } 834 827 835 828 static void atmci_dma_cleanup(struct atmel_mci *host)
+3 -4
drivers/mmc/host/dw_mmc-exynos.c
··· 187 187 unsigned long actual; 188 188 u8 div = priv->ciu_div + 1; 189 189 190 - if (ios->timing == MMC_TIMING_UHS_DDR50) { 190 + if (ios->timing == MMC_TIMING_MMC_DDR52) { 191 191 mci_writel(host, CLKSEL, priv->ddr_timing); 192 192 /* Should be double rate for DDR mode */ 193 193 if (ios->bus_width == MMC_BUS_WIDTH_8) ··· 386 386 387 387 /* Common capabilities of Exynos4/Exynos5 SoC */ 388 388 static unsigned long exynos_dwmmc_caps[4] = { 389 - MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR | 390 - MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, 389 + MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, 391 390 MMC_CAP_CMD23, 392 391 MMC_CAP_CMD23, 393 392 MMC_CAP_CMD23, ··· 425 426 return dw_mci_pltfm_register(pdev, drv_data); 426 427 } 427 428 428 - const struct dev_pm_ops dw_mci_exynos_pmops = { 429 + static const struct dev_pm_ops dw_mci_exynos_pmops = { 429 430 SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume) 430 431 .resume_noirq = dw_mci_exynos_resume_noirq, 431 432 .thaw_noirq = dw_mci_exynos_resume_noirq,
+24 -156
drivers/mmc/host/dw_mmc.c
··· 235 235 } 236 236 #endif /* defined(CONFIG_DEBUG_FS) */ 237 237 238 - static void dw_mci_set_timeout(struct dw_mci *host) 239 - { 240 - /* timeout (maximum) */ 241 - mci_writel(host, TMOUT, 0xffffffff); 242 - } 243 - 244 238 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 245 239 { 246 240 struct mmc_data *data; ··· 251 257 (cmd->opcode == SD_IO_RW_DIRECT && 252 258 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 253 259 cmdr |= SDMMC_CMD_STOP; 254 - else 255 - if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 256 - cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 260 + else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 261 + cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 257 262 258 263 if (cmd->flags & MMC_RSP_PRESENT) { 259 264 /* We expect a response, so set this bit */ ··· 843 850 u32 cmdflags; 844 851 845 852 mrq = slot->mrq; 846 - if (host->pdata->select_slot) 847 - host->pdata->select_slot(slot->id); 848 853 849 854 host->cur_slot = slot; 850 855 host->mrq = mrq; ··· 855 864 856 865 data = cmd->data; 857 866 if (data) { 858 - dw_mci_set_timeout(host); 867 + mci_writel(host, TMOUT, 0xFFFFFFFF); 859 868 mci_writel(host, BYTCNT, data->blksz*data->blocks); 860 869 mci_writel(host, BLKSIZ, data->blksz); 861 870 } ··· 953 962 regs = mci_readl(slot->host, UHS_REG); 954 963 955 964 /* DDR mode set */ 956 - if (ios->timing == MMC_TIMING_UHS_DDR50) 965 + if (ios->timing == MMC_TIMING_MMC_DDR52) 957 966 regs |= ((0x1 << slot->id) << 16); 958 967 else 959 968 regs &= ~((0x1 << slot->id) << 16); ··· 976 985 switch (ios->power_mode) { 977 986 case MMC_POWER_UP: 978 987 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 979 - /* Power up slot */ 980 - if (slot->host->pdata->setpower) 981 - slot->host->pdata->setpower(slot->id, mmc->ocr_avail); 982 988 regs = mci_readl(slot->host, PWREN); 983 989 regs |= (1 << slot->id); 984 990 mci_writel(slot->host, PWREN, regs); 985 991 break; 986 992 case MMC_POWER_OFF: 987 - /* Power down slot */ 988 - if (slot->host->pdata->setpower) 989 - slot->host->pdata->setpower(slot->id, 0); 990 993 regs = mci_readl(slot->host, PWREN); 991 994 regs &= ~(1 << slot->id); 992 995 mci_writel(slot->host, PWREN, regs); ··· 994 1009 { 995 1010 int read_only; 996 1011 struct dw_mci_slot *slot = mmc_priv(mmc); 997 - struct dw_mci_board *brd = slot->host->pdata; 1012 + int gpio_ro = mmc_gpio_get_ro(mmc); 998 1013 999 1014 /* Use platform get_ro function, else try on board write protect */ 1000 1015 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) 1001 1016 read_only = 0; 1002 - else if (brd->get_ro) 1003 - read_only = brd->get_ro(slot->id); 1004 - else if (gpio_is_valid(slot->wp_gpio)) 1005 - read_only = gpio_get_value(slot->wp_gpio); 1017 + else if (!IS_ERR_VALUE(gpio_ro)) 1018 + read_only = gpio_ro; 1006 1019 else 1007 1020 read_only = 1008 1021 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; ··· 1022 1039 /* Use platform get_cd function, else try onboard card detect */ 1023 1040 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 1024 1041 present = 1; 1025 - else if (brd->get_cd) 1026 - present = !brd->get_cd(slot->id); 1027 1042 else if (!IS_ERR_VALUE(gpio_cd)) 1028 1043 present = gpio_cd; 1029 1044 else ··· 1229 1248 data->error = -EIO; 1230 1249 } 1231 1250 1232 - dev_err(host->dev, "data error, status 0x%08x\n", status); 1251 + dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1233 1252 1234 1253 /* 1235 1254 * After an error, there may be data lingering ··· 2026 2045 2027 2046 return quirks; 2028 2047 } 2029 - 2030 - /* find out bus-width for a given slot */ 2031 - static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) 2032 - { 2033 - struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 2034 - u32 bus_wd = 1; 2035 - 2036 - if (!np) 2037 - return 1; 2038 - 2039 - if (of_property_read_u32(np, "bus-width", &bus_wd)) 2040 - dev_err(dev, "bus-width property not found, assuming width" 2041 - " as 1\n"); 2042 - return bus_wd; 2043 - } 2044 - 2045 - /* find the write protect gpio for a given slot; or -1 if none specified */ 2046 - static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot) 2047 - { 2048 - struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 2049 - int gpio; 2050 - 2051 - if (!np) 2052 - return -EINVAL; 2053 - 2054 - gpio = of_get_named_gpio(np, "wp-gpios", 0); 2055 - 2056 - /* Having a missing entry is valid; return silently */ 2057 - if (!gpio_is_valid(gpio)) 2058 - return -EINVAL; 2059 - 2060 - if (devm_gpio_request(dev, gpio, "dw-mci-wp")) { 2061 - dev_warn(dev, "gpio [%d] request failed\n", gpio); 2062 - return -EINVAL; 2063 - } 2064 - 2065 - return gpio; 2066 - } 2067 - 2068 - /* find the cd gpio for a given slot */ 2069 - static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot, 2070 - struct mmc_host *mmc) 2071 - { 2072 - struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 2073 - int gpio; 2074 - 2075 - if (!np) 2076 - return; 2077 - 2078 - gpio = of_get_named_gpio(np, "cd-gpios", 0); 2079 - 2080 - /* Having a missing entry is valid; return silently */ 2081 - if (!gpio_is_valid(gpio)) 2082 - return; 2083 - 2084 - if (mmc_gpio_request_cd(mmc, gpio, 0)) 2085 - dev_warn(dev, "gpio [%d] request failed\n", gpio); 2086 - } 2087 2048 #else /* CONFIG_OF */ 2088 2049 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) 2089 2050 { 2090 2051 return 0; 2091 2052 } 2092 - static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) 2093 - { 2094 - return 1; 2095 - } 2096 2053 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) 2097 2054 { 2098 2055 return NULL; 2099 - } 2100 - static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot) 2101 - { 2102 - return -EINVAL; 2103 - } 2104 - static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot, 2105 - struct mmc_host *mmc) 2106 - { 2107 - return; 2108 2056 } 2109 2057 #endif /* CONFIG_OF */ 2110 2058 ··· 2044 2134 const struct dw_mci_drv_data *drv_data = host->drv_data; 2045 2135 int ctrl_id, ret; 2046 2136 u32 freq[2]; 2047 - u8 bus_width; 2048 2137 2049 2138 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2050 2139 if (!mmc) ··· 2067 2158 mmc->f_max = freq[1]; 2068 2159 } 2069 2160 2070 - if (host->pdata->get_ocr) 2071 - mmc->ocr_avail = host->pdata->get_ocr(id); 2072 - else 2073 - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2074 - 2075 - /* 2076 - * Start with slot power disabled, it will be enabled when a card 2077 - * is detected. 2078 - */ 2079 - if (host->pdata->setpower) 2080 - host->pdata->setpower(id, 0); 2161 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2081 2162 2082 2163 if (host->pdata->caps) 2083 2164 mmc->caps = host->pdata->caps; ··· 2088 2189 if (host->pdata->caps2) 2089 2190 mmc->caps2 = host->pdata->caps2; 2090 2191 2091 - if (host->pdata->get_bus_wd) 2092 - bus_width = host->pdata->get_bus_wd(slot->id); 2093 - else if (host->dev->of_node) 2094 - bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id); 2095 - else 2096 - bus_width = 1; 2097 - 2098 - switch (bus_width) { 2099 - case 8: 2100 - mmc->caps |= MMC_CAP_8_BIT_DATA; 2101 - case 4: 2102 - mmc->caps |= MMC_CAP_4_BIT_DATA; 2103 - } 2192 + mmc_of_parse(mmc); 2104 2193 2105 2194 if (host->pdata->blk_settings) { 2106 2195 mmc->max_segs = host->pdata->blk_settings->max_segs; ··· 2113 2226 #endif /* CONFIG_MMC_DW_IDMAC */ 2114 2227 } 2115 2228 2116 - slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id); 2117 - dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc); 2229 + if (dw_mci_get_cd(mmc)) 2230 + set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 2231 + else 2232 + clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 2118 2233 2119 2234 ret = mmc_add_host(mmc); 2120 2235 if (ret) ··· 2138 2249 2139 2250 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2140 2251 { 2141 - /* Shutdown detect IRQ */ 2142 - if (slot->host->pdata->exit) 2143 - slot->host->pdata->exit(id); 2144 - 2145 2252 /* Debugfs stuff is cleaned up by mmc core */ 2146 2253 mmc_remove_host(slot->mmc); 2147 2254 slot->host->slot[id] = NULL; ··· 2284 2399 return ERR_PTR(ret); 2285 2400 } 2286 2401 2287 - if (of_find_property(np, "keep-power-in-suspend", NULL)) 2288 - pdata->pm_caps |= MMC_PM_KEEP_POWER; 2289 - 2290 - if (of_find_property(np, "enable-sdio-wakeup", NULL)) 2291 - pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 2292 - 2293 2402 if (of_find_property(np, "supports-highspeed", NULL)) 2294 2403 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2295 - 2296 - if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL)) 2297 - pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR; 2298 - 2299 - if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL)) 2300 - pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR; 2301 - 2302 - if (of_get_property(np, "cd-inverted", NULL)) 2303 - pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 2304 2404 2305 2405 return pdata; 2306 2406 } ··· 2312 2442 } 2313 2443 } 2314 2444 2315 - if (!host->pdata->select_slot && host->pdata->num_slots > 1) { 2445 + if (host->pdata->num_slots > 1) { 2316 2446 dev_err(host->dev, 2317 - "Platform data must supply select_slot function\n"); 2447 + "Platform data must supply num_slots.\n"); 2318 2448 return -ENODEV; 2319 2449 } 2320 2450 ··· 2344 2474 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 2345 2475 if (ret) 2346 2476 dev_warn(host->dev, 2347 - "Unable to set bus rate to %ul\n", 2477 + "Unable to set bus rate to %uHz\n", 2348 2478 host->pdata->bus_hz); 2349 2479 } 2350 2480 host->bus_hz = clk_get_rate(host->ciu_clk); 2481 + } 2482 + 2483 + if (!host->bus_hz) { 2484 + dev_err(host->dev, 2485 + "Platform data must supply bus speed\n"); 2486 + ret = -ENODEV; 2487 + goto err_clk_ciu; 2351 2488 } 2352 2489 2353 2490 if (drv_data && drv_data->init) { ··· 2391 2514 "regulator_enable fail: %d\n", ret); 2392 2515 goto err_clk_ciu; 2393 2516 } 2394 - } 2395 - 2396 - if (!host->bus_hz) { 2397 - dev_err(host->dev, 2398 - "Platform data must supply bus speed\n"); 2399 - ret = -ENODEV; 2400 - goto err_regulator; 2401 2517 } 2402 2518 2403 2519 host->quirks = host->pdata->quirks; ··· 2536 2666 err_dmaunmap: 2537 2667 if (host->use_dma && host->dma_ops->exit) 2538 2668 host->dma_ops->exit(host); 2539 - 2540 - err_regulator: 2541 2669 if (host->vmmc) 2542 2670 regulator_disable(host->vmmc); 2543 2671
-2
drivers/mmc/host/dw_mmc.h
··· 195 195 * @mmc: The mmc_host representing this slot. 196 196 * @host: The MMC controller this slot is using. 197 197 * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) 198 - * @wp_gpio: If gpio_is_valid() we'll use this to read write protect. 199 198 * @ctype: Card type for this slot. 200 199 * @mrq: mmc_request currently being processed or waiting to be 201 200 * processed, or NULL when the slot is idle. ··· 213 214 struct dw_mci *host; 214 215 215 216 int quirks; 216 - int wp_gpio; 217 217 218 218 u32 ctype; 219 219
+7 -4
drivers/mmc/host/jz4740_mmc.c
··· 515 515 516 516 jz4740_mmc_send_command(host, req->stop); 517 517 518 - timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_PRG_DONE); 519 - if (timeout) { 520 - host->state = JZ4740_MMC_STATE_DONE; 521 - break; 518 + if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { 519 + timeout = jz4740_mmc_poll_irq(host, 520 + JZ_MMC_IRQ_PRG_DONE); 521 + if (timeout) { 522 + host->state = JZ4740_MMC_STATE_DONE; 523 + break; 524 + } 522 525 } 523 526 case JZ4740_MMC_STATE_DONE: 524 527 break;
+4 -2
drivers/mmc/host/mmci.c
··· 301 301 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 302 302 clk |= MCI_ST_8BIT_BUS; 303 303 304 - if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 304 + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 305 + host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 305 306 clk |= MCI_ST_UX500_NEG_EDGE; 306 307 307 308 mmci_write_clkreg(host, clk); ··· 765 764 mmci_write_clkreg(host, clk); 766 765 } 767 766 768 - if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 767 + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 768 + host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 769 769 datactrl |= MCI_ST_DPSM_DDRMODE; 770 770 771 771 /*
+730
drivers/mmc/host/moxart-mmc.c
··· 1 + /* 2 + * MOXA ART MMC host driver. 3 + * 4 + * Copyright (C) 2014 Jonas Jensen 5 + * 6 + * Jonas Jensen <jonas.jensen@gmail.com> 7 + * 8 + * Based on code from 9 + * Moxa Technologies Co., Ltd. <www.moxa.com> 10 + * 11 + * This file is licensed under the terms of the GNU General Public 12 + * License version 2. This program is licensed "as is" without any 13 + * warranty of any kind, whether express or implied. 14 + */ 15 + 16 + #include <linux/version.h> 17 + #include <linux/module.h> 18 + #include <linux/init.h> 19 + #include <linux/platform_device.h> 20 + #include <linux/delay.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/blkdev.h> 23 + #include <linux/dma-mapping.h> 24 + #include <linux/dmaengine.h> 25 + #include <linux/mmc/host.h> 26 + #include <linux/mmc/sd.h> 27 + #include <linux/sched.h> 28 + #include <linux/io.h> 29 + #include <linux/of_address.h> 30 + #include <linux/of_irq.h> 31 + #include <linux/clk.h> 32 + #include <linux/bitops.h> 33 + #include <linux/of_dma.h> 34 + #include <linux/spinlock.h> 35 + 36 + #define REG_COMMAND 0 37 + #define REG_ARGUMENT 4 38 + #define REG_RESPONSE0 8 39 + #define REG_RESPONSE1 12 40 + #define REG_RESPONSE2 16 41 + #define REG_RESPONSE3 20 42 + #define REG_RESPONSE_COMMAND 24 43 + #define REG_DATA_CONTROL 28 44 + #define REG_DATA_TIMER 32 45 + #define REG_DATA_LENGTH 36 46 + #define REG_STATUS 40 47 + #define REG_CLEAR 44 48 + #define REG_INTERRUPT_MASK 48 49 + #define REG_POWER_CONTROL 52 50 + #define REG_CLOCK_CONTROL 56 51 + #define REG_BUS_WIDTH 60 52 + #define REG_DATA_WINDOW 64 53 + #define REG_FEATURE 68 54 + #define REG_REVISION 72 55 + 56 + /* REG_COMMAND */ 57 + #define CMD_SDC_RESET BIT(10) 58 + #define CMD_EN BIT(9) 59 + #define CMD_APP_CMD BIT(8) 60 + #define CMD_LONG_RSP BIT(7) 61 + #define CMD_NEED_RSP BIT(6) 62 + #define CMD_IDX_MASK 0x3f 63 + 64 + /* REG_RESPONSE_COMMAND */ 65 + #define RSP_CMD_APP BIT(6) 66 + #define RSP_CMD_IDX_MASK 0x3f 67 + 68 + /* REG_DATA_CONTROL */ 69 + #define DCR_DATA_FIFO_RESET BIT(8) 70 + #define DCR_DATA_THRES BIT(7) 71 + #define DCR_DATA_EN BIT(6) 72 + #define DCR_DMA_EN BIT(5) 73 + #define DCR_DATA_WRITE BIT(4) 74 + #define DCR_BLK_SIZE 0x0f 75 + 76 + /* REG_DATA_LENGTH */ 77 + #define DATA_LEN_MASK 0xffffff 78 + 79 + /* REG_STATUS */ 80 + #define WRITE_PROT BIT(12) 81 + #define CARD_DETECT BIT(11) 82 + /* 1-10 below can be sent to either registers, interrupt or clear. */ 83 + #define CARD_CHANGE BIT(10) 84 + #define FIFO_ORUN BIT(9) 85 + #define FIFO_URUN BIT(8) 86 + #define DATA_END BIT(7) 87 + #define CMD_SENT BIT(6) 88 + #define DATA_CRC_OK BIT(5) 89 + #define RSP_CRC_OK BIT(4) 90 + #define DATA_TIMEOUT BIT(3) 91 + #define RSP_TIMEOUT BIT(2) 92 + #define DATA_CRC_FAIL BIT(1) 93 + #define RSP_CRC_FAIL BIT(0) 94 + 95 + #define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \ 96 + RSP_CRC_OK | CARD_DETECT | CMD_SENT) 97 + 98 + #define MASK_DATA (DATA_CRC_OK | DATA_END | \ 99 + DATA_CRC_FAIL | DATA_TIMEOUT) 100 + 101 + #define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE) 102 + 103 + /* REG_POWER_CONTROL */ 104 + #define SD_POWER_ON BIT(4) 105 + #define SD_POWER_MASK 0x0f 106 + 107 + /* REG_CLOCK_CONTROL */ 108 + #define CLK_HISPD BIT(9) 109 + #define CLK_OFF BIT(8) 110 + #define CLK_SD BIT(7) 111 + #define CLK_DIV_MASK 0x7f 112 + 113 + /* REG_BUS_WIDTH */ 114 + #define BUS_WIDTH_8 BIT(2) 115 + #define BUS_WIDTH_4 BIT(1) 116 + #define BUS_WIDTH_1 BIT(0) 117 + 118 + #define MMC_VDD_360 23 119 + #define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK) 120 + #define MAX_RETRIES 500000 121 + 122 + struct moxart_host { 123 + spinlock_t lock; 124 + 125 + void __iomem *base; 126 + 127 + phys_addr_t reg_phys; 128 + 129 + struct dma_chan *dma_chan_tx; 130 + struct dma_chan *dma_chan_rx; 131 + struct dma_async_tx_descriptor *tx_desc; 132 + struct mmc_host *mmc; 133 + struct mmc_request *mrq; 134 + struct scatterlist *cur_sg; 135 + struct completion dma_complete; 136 + struct completion pio_complete; 137 + 138 + u32 num_sg; 139 + u32 data_remain; 140 + u32 data_len; 141 + u32 fifo_width; 142 + u32 timeout; 143 + u32 rate; 144 + 145 + long sysclk; 146 + 147 + bool have_dma; 148 + bool is_removed; 149 + }; 150 + 151 + static inline void moxart_init_sg(struct moxart_host *host, 152 + struct mmc_data *data) 153 + { 154 + host->cur_sg = data->sg; 155 + host->num_sg = data->sg_len; 156 + host->data_remain = host->cur_sg->length; 157 + 158 + if (host->data_remain > host->data_len) 159 + host->data_remain = host->data_len; 160 + } 161 + 162 + static inline int moxart_next_sg(struct moxart_host *host) 163 + { 164 + int remain; 165 + struct mmc_data *data = host->mrq->cmd->data; 166 + 167 + host->cur_sg++; 168 + host->num_sg--; 169 + 170 + if (host->num_sg > 0) { 171 + host->data_remain = host->cur_sg->length; 172 + remain = host->data_len - data->bytes_xfered; 173 + if (remain > 0 && remain < host->data_remain) 174 + host->data_remain = remain; 175 + } 176 + 177 + return host->num_sg; 178 + } 179 + 180 + static int moxart_wait_for_status(struct moxart_host *host, 181 + u32 mask, u32 *status) 182 + { 183 + int ret = -ETIMEDOUT; 184 + u32 i; 185 + 186 + for (i = 0; i < MAX_RETRIES; i++) { 187 + *status = readl(host->base + REG_STATUS); 188 + if (!(*status & mask)) { 189 + udelay(5); 190 + continue; 191 + } 192 + writel(*status & mask, host->base + REG_CLEAR); 193 + ret = 0; 194 + break; 195 + } 196 + 197 + if (ret) 198 + dev_err(mmc_dev(host->mmc), "timed out waiting for status\n"); 199 + 200 + return ret; 201 + } 202 + 203 + 204 + static void moxart_send_command(struct moxart_host *host, 205 + struct mmc_command *cmd) 206 + { 207 + u32 status, cmdctrl; 208 + 209 + writel(RSP_TIMEOUT | RSP_CRC_OK | 210 + RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR); 211 + writel(cmd->arg, host->base + REG_ARGUMENT); 212 + 213 + cmdctrl = cmd->opcode & CMD_IDX_MASK; 214 + if (cmdctrl == SD_APP_SET_BUS_WIDTH || cmdctrl == SD_APP_OP_COND || 215 + cmdctrl == SD_APP_SEND_SCR || cmdctrl == SD_APP_SD_STATUS || 216 + cmdctrl == SD_APP_SEND_NUM_WR_BLKS) 217 + cmdctrl |= CMD_APP_CMD; 218 + 219 + if (cmd->flags & MMC_RSP_PRESENT) 220 + cmdctrl |= CMD_NEED_RSP; 221 + 222 + if (cmd->flags & MMC_RSP_136) 223 + cmdctrl |= CMD_LONG_RSP; 224 + 225 + writel(cmdctrl | CMD_EN, host->base + REG_COMMAND); 226 + 227 + if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT) 228 + cmd->error = -ETIMEDOUT; 229 + 230 + if (status & RSP_TIMEOUT) { 231 + cmd->error = -ETIMEDOUT; 232 + return; 233 + } 234 + if (status & RSP_CRC_FAIL) { 235 + cmd->error = -EIO; 236 + return; 237 + } 238 + if (status & RSP_CRC_OK) { 239 + if (cmd->flags & MMC_RSP_136) { 240 + cmd->resp[3] = readl(host->base + REG_RESPONSE0); 241 + cmd->resp[2] = readl(host->base + REG_RESPONSE1); 242 + cmd->resp[1] = readl(host->base + REG_RESPONSE2); 243 + cmd->resp[0] = readl(host->base + REG_RESPONSE3); 244 + } else { 245 + cmd->resp[0] = readl(host->base + REG_RESPONSE0); 246 + } 247 + } 248 + } 249 + 250 + static void moxart_dma_complete(void *param) 251 + { 252 + struct moxart_host *host = param; 253 + 254 + complete(&host->dma_complete); 255 + } 256 + 257 + static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) 258 + { 259 + u32 len, dir_data, dir_slave; 260 + unsigned long dma_time; 261 + struct dma_async_tx_descriptor *desc = NULL; 262 + struct dma_chan *dma_chan; 263 + 264 + if (host->data_len == data->bytes_xfered) 265 + return; 266 + 267 + if (data->flags & MMC_DATA_WRITE) { 268 + dma_chan = host->dma_chan_tx; 269 + dir_data = DMA_TO_DEVICE; 270 + dir_slave = DMA_MEM_TO_DEV; 271 + } else { 272 + dma_chan = host->dma_chan_rx; 273 + dir_data = DMA_FROM_DEVICE; 274 + dir_slave = DMA_DEV_TO_MEM; 275 + } 276 + 277 + len = dma_map_sg(dma_chan->device->dev, data->sg, 278 + data->sg_len, dir_data); 279 + 280 + if (len > 0) { 281 + desc = dmaengine_prep_slave_sg(dma_chan, data->sg, 282 + len, dir_slave, 283 + DMA_PREP_INTERRUPT | 284 + DMA_CTRL_ACK); 285 + } else { 286 + dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n"); 287 + } 288 + 289 + if (desc) { 290 + host->tx_desc = desc; 291 + desc->callback = moxart_dma_complete; 292 + desc->callback_param = host; 293 + dmaengine_submit(desc); 294 + dma_async_issue_pending(dma_chan); 295 + } 296 + 297 + data->bytes_xfered += host->data_remain; 298 + 299 + dma_time = wait_for_completion_interruptible_timeout( 300 + &host->dma_complete, host->timeout); 301 + 302 + dma_unmap_sg(dma_chan->device->dev, 303 + data->sg, data->sg_len, 304 + dir_data); 305 + } 306 + 307 + 308 + static void moxart_transfer_pio(struct moxart_host *host) 309 + { 310 + struct mmc_data *data = host->mrq->cmd->data; 311 + u32 *sgp, len = 0, remain, status; 312 + 313 + if (host->data_len == data->bytes_xfered) 314 + return; 315 + 316 + sgp = sg_virt(host->cur_sg); 317 + remain = host->data_remain; 318 + 319 + if (data->flags & MMC_DATA_WRITE) { 320 + while (remain > 0) { 321 + if (moxart_wait_for_status(host, FIFO_URUN, &status) 322 + == -ETIMEDOUT) { 323 + data->error = -ETIMEDOUT; 324 + complete(&host->pio_complete); 325 + return; 326 + } 327 + for (len = 0; len < remain && len < host->fifo_width;) { 328 + iowrite32(*sgp, host->base + REG_DATA_WINDOW); 329 + sgp++; 330 + len += 4; 331 + } 332 + remain -= len; 333 + } 334 + 335 + } else { 336 + while (remain > 0) { 337 + if (moxart_wait_for_status(host, FIFO_ORUN, &status) 338 + == -ETIMEDOUT) { 339 + data->error = -ETIMEDOUT; 340 + complete(&host->pio_complete); 341 + return; 342 + } 343 + for (len = 0; len < remain && len < host->fifo_width;) { 344 + /* SCR data must be read in big endian. */ 345 + if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) 346 + *sgp = ioread32be(host->base + 347 + REG_DATA_WINDOW); 348 + else 349 + *sgp = ioread32(host->base + 350 + REG_DATA_WINDOW); 351 + sgp++; 352 + len += 4; 353 + } 354 + remain -= len; 355 + } 356 + } 357 + 358 + data->bytes_xfered += host->data_remain - remain; 359 + host->data_remain = remain; 360 + 361 + if (host->data_len != data->bytes_xfered) 362 + moxart_next_sg(host); 363 + else 364 + complete(&host->pio_complete); 365 + } 366 + 367 + static void moxart_prepare_data(struct moxart_host *host) 368 + { 369 + struct mmc_data *data = host->mrq->cmd->data; 370 + u32 datactrl; 371 + int blksz_bits; 372 + 373 + if (!data) 374 + return; 375 + 376 + host->data_len = data->blocks * data->blksz; 377 + blksz_bits = ffs(data->blksz) - 1; 378 + BUG_ON(1 << blksz_bits != data->blksz); 379 + 380 + moxart_init_sg(host, data); 381 + 382 + datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE); 383 + 384 + if (data->flags & MMC_DATA_WRITE) 385 + datactrl |= DCR_DATA_WRITE; 386 + 387 + if ((host->data_len > host->fifo_width) && host->have_dma) 388 + datactrl |= DCR_DMA_EN; 389 + 390 + writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL); 391 + writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR); 392 + writel(host->rate, host->base + REG_DATA_TIMER); 393 + writel(host->data_len, host->base + REG_DATA_LENGTH); 394 + writel(datactrl, host->base + REG_DATA_CONTROL); 395 + } 396 + 397 + static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq) 398 + { 399 + struct moxart_host *host = mmc_priv(mmc); 400 + unsigned long pio_time, flags; 401 + u32 status; 402 + 403 + spin_lock_irqsave(&host->lock, flags); 404 + 405 + init_completion(&host->dma_complete); 406 + init_completion(&host->pio_complete); 407 + 408 + host->mrq = mrq; 409 + 410 + if (readl(host->base + REG_STATUS) & CARD_DETECT) { 411 + mrq->cmd->error = -ETIMEDOUT; 412 + goto request_done; 413 + } 414 + 415 + moxart_prepare_data(host); 416 + moxart_send_command(host, host->mrq->cmd); 417 + 418 + if (mrq->cmd->data) { 419 + if ((host->data_len > host->fifo_width) && host->have_dma) { 420 + 421 + writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); 422 + 423 + spin_unlock_irqrestore(&host->lock, flags); 424 + 425 + moxart_transfer_dma(mrq->cmd->data, host); 426 + 427 + spin_lock_irqsave(&host->lock, flags); 428 + } else { 429 + 430 + writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK); 431 + 432 + spin_unlock_irqrestore(&host->lock, flags); 433 + 434 + /* PIO transfers start from interrupt. */ 435 + pio_time = wait_for_completion_interruptible_timeout( 436 + &host->pio_complete, host->timeout); 437 + 438 + spin_lock_irqsave(&host->lock, flags); 439 + } 440 + 441 + if (host->is_removed) { 442 + dev_err(mmc_dev(host->mmc), "card removed\n"); 443 + mrq->cmd->error = -ETIMEDOUT; 444 + goto request_done; 445 + } 446 + 447 + if (moxart_wait_for_status(host, MASK_DATA, &status) 448 + == -ETIMEDOUT) { 449 + mrq->cmd->data->error = -ETIMEDOUT; 450 + goto request_done; 451 + } 452 + 453 + if (status & DATA_CRC_FAIL) 454 + mrq->cmd->data->error = -ETIMEDOUT; 455 + 456 + if (mrq->cmd->data->stop) 457 + moxart_send_command(host, mrq->cmd->data->stop); 458 + } 459 + 460 + request_done: 461 + spin_unlock_irqrestore(&host->lock, flags); 462 + mmc_request_done(host->mmc, mrq); 463 + } 464 + 465 + static irqreturn_t moxart_irq(int irq, void *devid) 466 + { 467 + struct moxart_host *host = (struct moxart_host *)devid; 468 + u32 status; 469 + unsigned long flags; 470 + 471 + spin_lock_irqsave(&host->lock, flags); 472 + 473 + status = readl(host->base + REG_STATUS); 474 + if (status & CARD_CHANGE) { 475 + host->is_removed = status & CARD_DETECT; 476 + if (host->is_removed && host->have_dma) { 477 + dmaengine_terminate_all(host->dma_chan_tx); 478 + dmaengine_terminate_all(host->dma_chan_rx); 479 + } 480 + host->mrq = NULL; 481 + writel(MASK_INTR_PIO, host->base + REG_CLEAR); 482 + writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); 483 + mmc_detect_change(host->mmc, 0); 484 + } 485 + if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq) 486 + moxart_transfer_pio(host); 487 + 488 + spin_unlock_irqrestore(&host->lock, flags); 489 + 490 + return IRQ_HANDLED; 491 + } 492 + 493 + static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 494 + { 495 + struct moxart_host *host = mmc_priv(mmc); 496 + unsigned long flags; 497 + u8 power, div; 498 + u32 ctrl; 499 + 500 + spin_lock_irqsave(&host->lock, flags); 501 + 502 + if (ios->clock) { 503 + for (div = 0; div < CLK_DIV_MASK; ++div) { 504 + if (ios->clock >= host->sysclk / (2 * (div + 1))) 505 + break; 506 + } 507 + ctrl = CLK_SD | div; 508 + host->rate = host->sysclk / (2 * (div + 1)); 509 + if (host->rate > host->sysclk) 510 + ctrl |= CLK_HISPD; 511 + writel(ctrl, host->base + REG_CLOCK_CONTROL); 512 + } 513 + 514 + if (ios->power_mode == MMC_POWER_OFF) { 515 + writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON, 516 + host->base + REG_POWER_CONTROL); 517 + } else { 518 + if (ios->vdd < MIN_POWER) 519 + power = 0; 520 + else 521 + power = ios->vdd - MIN_POWER; 522 + 523 + writel(SD_POWER_ON | (u32) power, 524 + host->base + REG_POWER_CONTROL); 525 + } 526 + 527 + switch (ios->bus_width) { 528 + case MMC_BUS_WIDTH_4: 529 + writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH); 530 + break; 531 + case MMC_BUS_WIDTH_8: 532 + writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH); 533 + break; 534 + default: 535 + writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH); 536 + break; 537 + } 538 + 539 + spin_unlock_irqrestore(&host->lock, flags); 540 + } 541 + 542 + 543 + static int moxart_get_ro(struct mmc_host *mmc) 544 + { 545 + struct moxart_host *host = mmc_priv(mmc); 546 + 547 + return !!(readl(host->base + REG_STATUS) & WRITE_PROT); 548 + } 549 + 550 + static struct mmc_host_ops moxart_ops = { 551 + .request = moxart_request, 552 + .set_ios = moxart_set_ios, 553 + .get_ro = moxart_get_ro, 554 + }; 555 + 556 + static int moxart_probe(struct platform_device *pdev) 557 + { 558 + struct device *dev = &pdev->dev; 559 + struct device_node *node = dev->of_node; 560 + struct resource res_mmc; 561 + struct mmc_host *mmc; 562 + struct moxart_host *host = NULL; 563 + struct dma_slave_config cfg; 564 + struct clk *clk; 565 + void __iomem *reg_mmc; 566 + dma_cap_mask_t mask; 567 + int irq, ret; 568 + u32 i; 569 + 570 + mmc = mmc_alloc_host(sizeof(struct moxart_host), dev); 571 + if (!mmc) { 572 + dev_err(dev, "mmc_alloc_host failed\n"); 573 + ret = -ENOMEM; 574 + goto out; 575 + } 576 + 577 + ret = of_address_to_resource(node, 0, &res_mmc); 578 + if (ret) { 579 + dev_err(dev, "of_address_to_resource failed\n"); 580 + goto out; 581 + } 582 + 583 + irq = irq_of_parse_and_map(node, 0); 584 + if (irq <= 0) { 585 + dev_err(dev, "irq_of_parse_and_map failed\n"); 586 + ret = -EINVAL; 587 + goto out; 588 + } 589 + 590 + clk = of_clk_get(node, 0); 591 + if (IS_ERR(clk)) { 592 + dev_err(dev, "of_clk_get failed\n"); 593 + ret = PTR_ERR(clk); 594 + goto out; 595 + } 596 + 597 + reg_mmc = devm_ioremap_resource(dev, &res_mmc); 598 + if (IS_ERR(reg_mmc)) { 599 + ret = PTR_ERR(reg_mmc); 600 + goto out; 601 + } 602 + 603 + mmc_of_parse(mmc); 604 + 605 + dma_cap_zero(mask); 606 + dma_cap_set(DMA_SLAVE, mask); 607 + 608 + host = mmc_priv(mmc); 609 + host->mmc = mmc; 610 + host->base = reg_mmc; 611 + host->reg_phys = res_mmc.start; 612 + host->timeout = msecs_to_jiffies(1000); 613 + host->sysclk = clk_get_rate(clk); 614 + host->fifo_width = readl(host->base + REG_FEATURE) << 2; 615 + host->dma_chan_tx = of_dma_request_slave_channel(node, "tx"); 616 + host->dma_chan_rx = of_dma_request_slave_channel(node, "rx"); 617 + 618 + spin_lock_init(&host->lock); 619 + 620 + mmc->ops = &moxart_ops; 621 + mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2); 622 + mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2); 623 + mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */ 624 + 625 + if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) { 626 + dev_dbg(dev, "PIO mode transfer enabled\n"); 627 + host->have_dma = false; 628 + } else { 629 + dev_dbg(dev, "DMA channels found (%p,%p)\n", 630 + host->dma_chan_tx, host->dma_chan_rx); 631 + host->have_dma = true; 632 + 633 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 634 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 635 + 636 + cfg.direction = DMA_MEM_TO_DEV; 637 + cfg.src_addr = 0; 638 + cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW; 639 + dmaengine_slave_config(host->dma_chan_tx, &cfg); 640 + 641 + cfg.direction = DMA_DEV_TO_MEM; 642 + cfg.src_addr = host->reg_phys + REG_DATA_WINDOW; 643 + cfg.dst_addr = 0; 644 + dmaengine_slave_config(host->dma_chan_rx, &cfg); 645 + } 646 + 647 + switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) { 648 + case 1: 649 + mmc->caps |= MMC_CAP_4_BIT_DATA; 650 + break; 651 + case 2: 652 + mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 653 + break; 654 + default: 655 + break; 656 + } 657 + 658 + writel(0, host->base + REG_INTERRUPT_MASK); 659 + 660 + writel(CMD_SDC_RESET, host->base + REG_COMMAND); 661 + for (i = 0; i < MAX_RETRIES; i++) { 662 + if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET)) 663 + break; 664 + udelay(5); 665 + } 666 + 667 + ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host); 668 + if (ret) 669 + goto out; 670 + 671 + dev_set_drvdata(dev, mmc); 672 + mmc_add_host(mmc); 673 + 674 + dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); 675 + 676 + return 0; 677 + 678 + out: 679 + if (mmc) 680 + mmc_free_host(mmc); 681 + return ret; 682 + } 683 + 684 + static int moxart_remove(struct platform_device *pdev) 685 + { 686 + struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); 687 + struct moxart_host *host = mmc_priv(mmc); 688 + 689 + dev_set_drvdata(&pdev->dev, NULL); 690 + 691 + if (mmc) { 692 + if (!IS_ERR(host->dma_chan_tx)) 693 + dma_release_channel(host->dma_chan_tx); 694 + if (!IS_ERR(host->dma_chan_rx)) 695 + dma_release_channel(host->dma_chan_rx); 696 + mmc_remove_host(mmc); 697 + mmc_free_host(mmc); 698 + 699 + writel(0, host->base + REG_INTERRUPT_MASK); 700 + writel(0, host->base + REG_POWER_CONTROL); 701 + writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, 702 + host->base + REG_CLOCK_CONTROL); 703 + } 704 + 705 + kfree(host); 706 + 707 + return 0; 708 + } 709 + 710 + static const struct of_device_id moxart_mmc_match[] = { 711 + { .compatible = "moxa,moxart-mmc" }, 712 + { .compatible = "faraday,ftsdc010" }, 713 + { } 714 + }; 715 + 716 + static struct platform_driver moxart_mmc_driver = { 717 + .probe = moxart_probe, 718 + .remove = moxart_remove, 719 + .driver = { 720 + .name = "mmc-moxart", 721 + .owner = THIS_MODULE, 722 + .of_match_table = moxart_mmc_match, 723 + }, 724 + }; 725 + module_platform_driver(moxart_mmc_driver); 726 + 727 + MODULE_ALIAS("platform:mmc-moxart"); 728 + MODULE_DESCRIPTION("MOXA ART MMC driver"); 729 + MODULE_LICENSE("GPL v2"); 730 + MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+17 -3
drivers/mmc/host/mvsdio.c
··· 354 354 intr_status, mvsd_read(MVSD_NOR_INTR_EN), 355 355 mvsd_read(MVSD_HW_STATE)); 356 356 357 + /* 358 + * It looks like, SDIO IP can issue one late, spurious irq 359 + * although all irqs should be disabled. To work around this, 360 + * bail out early, if we didn't expect any irqs to occur. 361 + */ 362 + if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) { 363 + dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n", 364 + mvsd_read(MVSD_NOR_INTR_STATUS), 365 + mvsd_read(MVSD_NOR_INTR_EN), 366 + mvsd_read(MVSD_ERR_INTR_STATUS), 367 + mvsd_read(MVSD_ERR_INTR_EN)); 368 + return IRQ_HANDLED; 369 + } 370 + 357 371 spin_lock(&host->lock); 358 372 359 373 /* PIO handling, if needed. Messy business... */ ··· 815 801 goto out; 816 802 817 803 if (!(mmc->caps & MMC_CAP_NEEDS_POLL)) 818 - dev_notice(&pdev->dev, "using GPIO for card detection\n"); 804 + dev_dbg(&pdev->dev, "using GPIO for card detection\n"); 819 805 else 820 - dev_notice(&pdev->dev, 821 - "lacking card detect (fall back to polling)\n"); 806 + dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n"); 807 + 822 808 return 0; 823 809 824 810 out:
+49 -91
drivers/mmc/host/mxcmmc.c
··· 124 124 125 125 struct mxcmci_host { 126 126 struct mmc_host *mmc; 127 - struct resource *res; 128 127 void __iomem *base; 129 - int irq; 128 + dma_addr_t phys_base; 130 129 int detect_irq; 131 130 struct dma_chan *dma; 132 131 struct dma_async_tx_descriptor *desc; ··· 152 153 153 154 struct work_struct datawork; 154 155 spinlock_t lock; 155 - 156 - struct regulator *vcc; 157 156 158 157 int burstlen; 159 158 int dmareq; ··· 238 241 239 242 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 240 243 241 - static inline void mxcmci_init_ocr(struct mxcmci_host *host) 244 + static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd) 242 245 { 243 - host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); 244 - 245 - if (IS_ERR(host->vcc)) { 246 - host->vcc = NULL; 247 - } else { 248 - host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc); 249 - if (host->pdata && host->pdata->ocr_avail) 250 - dev_warn(mmc_dev(host->mmc), 251 - "pdata->ocr_avail will not be used\n"); 252 - } 253 - 254 - if (host->vcc == NULL) { 255 - /* fall-back to platform data */ 256 - if (host->pdata && host->pdata->ocr_avail) 257 - host->mmc->ocr_avail = host->pdata->ocr_avail; 258 - else 259 - host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 260 - } 261 - } 262 - 263 - static inline void mxcmci_set_power(struct mxcmci_host *host, 264 - unsigned char power_mode, 265 - unsigned int vdd) 266 - { 267 - if (host->vcc) { 268 - if (power_mode == MMC_POWER_UP) 269 - mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); 270 - else if (power_mode == MMC_POWER_OFF) 271 - mmc_regulator_set_ocr(host->mmc, host->vcc, 0); 246 + if (!IS_ERR(host->mmc->supply.vmmc)) { 247 + if (host->power_mode == MMC_POWER_UP) 248 + mmc_regulator_set_ocr(host->mmc, 249 + host->mmc->supply.vmmc, vdd); 250 + else if (host->power_mode == MMC_POWER_OFF) 251 + mmc_regulator_set_ocr(host->mmc, 252 + host->mmc->supply.vmmc, 0); 272 253 } 273 254 274 255 if (host->pdata && host->pdata->setpower) ··· 274 299 275 300 mxcmci_writew(host, 0xff, MMC_REG_RES_TO); 276 301 } 277 - static int mxcmci_setup_dma(struct mmc_host *mmc); 278 302 279 303 #if IS_ENABLED(CONFIG_PPC_MPC512x) 280 304 static inline void buffer_swap32(u32 *buf, int len) ··· 842 868 struct mxcmci_host *host = mmc_priv(mmc); 843 869 struct dma_slave_config *config = &host->dma_slave_config; 844 870 845 - config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS; 846 - config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS; 871 + config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; 872 + config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; 847 873 config->dst_addr_width = 4; 848 874 config->src_addr_width = 4; 849 875 config->dst_maxburst = host->burstlen; ··· 885 911 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; 886 912 887 913 if (host->power_mode != ios->power_mode) { 888 - mxcmci_set_power(host, ios->power_mode, ios->vdd); 889 914 host->power_mode = ios->power_mode; 915 + mxcmci_set_power(host, ios->vdd); 890 916 891 917 if (ios->power_mode == MMC_POWER_ON) 892 918 host->cmdat |= CMD_DAT_CONT_INIT; ··· 1014 1040 static int mxcmci_probe(struct platform_device *pdev) 1015 1041 { 1016 1042 struct mmc_host *mmc; 1017 - struct mxcmci_host *host = NULL; 1018 - struct resource *iores, *r; 1043 + struct mxcmci_host *host; 1044 + struct resource *res; 1019 1045 int ret = 0, irq; 1020 1046 bool dat3_card_detect = false; 1021 1047 dma_cap_mask_t mask; ··· 1026 1052 1027 1053 of_id = of_match_device(mxcmci_of_match, &pdev->dev); 1028 1054 1029 - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1055 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1030 1056 irq = platform_get_irq(pdev, 0); 1031 - if (!iores || irq < 0) 1057 + if (irq < 0) 1032 1058 return -EINVAL; 1033 1059 1034 - r = request_mem_region(iores->start, resource_size(iores), pdev->name); 1035 - if (!r) 1036 - return -EBUSY; 1060 + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); 1061 + if (!mmc) 1062 + return -ENOMEM; 1037 1063 1038 - mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev); 1039 - if (!mmc) { 1040 - ret = -ENOMEM; 1041 - goto out_release_mem; 1064 + host = mmc_priv(mmc); 1065 + 1066 + host->base = devm_ioremap_resource(&pdev->dev, res); 1067 + if (IS_ERR(host->base)) { 1068 + ret = PTR_ERR(host->base); 1069 + goto out_free; 1042 1070 } 1071 + 1072 + host->phys_base = res->start; 1043 1073 1044 1074 ret = mmc_of_parse(mmc); 1045 1075 if (ret) ··· 1061 1083 mmc->max_blk_count = 65535; 1062 1084 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1063 1085 mmc->max_seg_size = mmc->max_req_size; 1064 - 1065 - host = mmc_priv(mmc); 1066 - host->base = ioremap(r->start, resource_size(r)); 1067 - if (!host->base) { 1068 - ret = -ENOMEM; 1069 - goto out_free; 1070 - } 1071 1086 1072 1087 if (of_id) { 1073 1088 const struct platform_device_id *id_entry = of_id->data; ··· 1083 1112 && !of_property_read_bool(pdev->dev.of_node, "cd-gpios")) 1084 1113 dat3_card_detect = true; 1085 1114 1086 - mxcmci_init_ocr(host); 1115 + ret = mmc_regulator_get_supply(mmc); 1116 + if (ret) { 1117 + if (pdata && ret != -EPROBE_DEFER) 1118 + mmc->ocr_avail = pdata->ocr_avail ? : 1119 + MMC_VDD_32_33 | MMC_VDD_33_34; 1120 + else 1121 + goto out_free; 1122 + } 1087 1123 1088 1124 if (dat3_card_detect) 1089 1125 host->default_irq_mask = ··· 1098 1120 else 1099 1121 host->default_irq_mask = 0; 1100 1122 1101 - host->res = r; 1102 - host->irq = irq; 1103 - 1104 1123 host->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1105 1124 if (IS_ERR(host->clk_ipg)) { 1106 1125 ret = PTR_ERR(host->clk_ipg); 1107 - goto out_iounmap; 1126 + goto out_free; 1108 1127 } 1109 1128 1110 1129 host->clk_per = devm_clk_get(&pdev->dev, "per"); 1111 1130 if (IS_ERR(host->clk_per)) { 1112 1131 ret = PTR_ERR(host->clk_per); 1113 - goto out_iounmap; 1132 + goto out_free; 1114 1133 } 1115 1134 1116 1135 clk_prepare_enable(host->clk_per); ··· 1134 1159 if (!host->pdata) { 1135 1160 host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); 1136 1161 } else { 1137 - r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1138 - if (r) { 1139 - host->dmareq = r->start; 1162 + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1163 + if (res) { 1164 + host->dmareq = res->start; 1140 1165 host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; 1141 1166 host->dma_data.priority = DMA_PRIO_LOW; 1142 1167 host->dma_data.dma_request = host->dmareq; ··· 1153 1178 1154 1179 INIT_WORK(&host->datawork, mxcmci_datawork); 1155 1180 1156 - ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); 1181 + ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0, 1182 + dev_name(&pdev->dev), host); 1157 1183 if (ret) 1158 1184 goto out_free_dma; 1159 1185 ··· 1164 1188 ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq, 1165 1189 host->mmc); 1166 1190 if (ret) 1167 - goto out_free_irq; 1191 + goto out_free_dma; 1168 1192 } 1169 1193 1170 1194 init_timer(&host->watchdog); ··· 1175 1199 1176 1200 return 0; 1177 1201 1178 - out_free_irq: 1179 - free_irq(host->irq, host); 1180 1202 out_free_dma: 1181 1203 if (host->dma) 1182 1204 dma_release_channel(host->dma); 1205 + 1183 1206 out_clk_put: 1184 1207 clk_disable_unprepare(host->clk_per); 1185 1208 clk_disable_unprepare(host->clk_ipg); 1186 - out_iounmap: 1187 - iounmap(host->base); 1209 + 1188 1210 out_free: 1189 1211 mmc_free_host(mmc); 1190 - out_release_mem: 1191 - release_mem_region(iores->start, resource_size(iores)); 1212 + 1192 1213 return ret; 1193 1214 } 1194 1215 ··· 1196 1223 1197 1224 mmc_remove_host(mmc); 1198 1225 1199 - if (host->vcc) 1200 - regulator_put(host->vcc); 1201 - 1202 1226 if (host->pdata && host->pdata->exit) 1203 1227 host->pdata->exit(&pdev->dev, mmc); 1204 - 1205 - free_irq(host->irq, host); 1206 - iounmap(host->base); 1207 1228 1208 1229 if (host->dma) 1209 1230 dma_release_channel(host->dma); ··· 1205 1238 clk_disable_unprepare(host->clk_per); 1206 1239 clk_disable_unprepare(host->clk_ipg); 1207 1240 1208 - release_mem_region(host->res->start, resource_size(host->res)); 1209 - 1210 1241 mmc_free_host(mmc); 1211 1242 1212 1243 return 0; 1213 1244 } 1214 1245 1215 - #ifdef CONFIG_PM 1216 - static int mxcmci_suspend(struct device *dev) 1246 + static int __maybe_unused mxcmci_suspend(struct device *dev) 1217 1247 { 1218 1248 struct mmc_host *mmc = dev_get_drvdata(dev); 1219 1249 struct mxcmci_host *host = mmc_priv(mmc); ··· 1220 1256 return 0; 1221 1257 } 1222 1258 1223 - static int mxcmci_resume(struct device *dev) 1259 + static int __maybe_unused mxcmci_resume(struct device *dev) 1224 1260 { 1225 1261 struct mmc_host *mmc = dev_get_drvdata(dev); 1226 1262 struct mxcmci_host *host = mmc_priv(mmc); ··· 1230 1266 return 0; 1231 1267 } 1232 1268 1233 - static const struct dev_pm_ops mxcmci_pm_ops = { 1234 - .suspend = mxcmci_suspend, 1235 - .resume = mxcmci_resume, 1236 - }; 1237 - #endif 1269 + static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); 1238 1270 1239 1271 static struct platform_driver mxcmci_driver = { 1240 1272 .probe = mxcmci_probe, ··· 1239 1279 .driver = { 1240 1280 .name = DRIVER_NAME, 1241 1281 .owner = THIS_MODULE, 1242 - #ifdef CONFIG_PM 1243 1282 .pm = &mxcmci_pm_ops, 1244 - #endif 1245 1283 .of_match_table = mxcmci_of_match, 1246 1284 } 1247 1285 };
+7
drivers/mmc/host/mxs-mmc.c
··· 70 70 unsigned char bus_width; 71 71 spinlock_t lock; 72 72 int sdio_irq_en; 73 + bool broken_cd; 73 74 }; 74 75 75 76 static int mxs_mmc_get_cd(struct mmc_host *mmc) ··· 78 77 struct mxs_mmc_host *host = mmc_priv(mmc); 79 78 struct mxs_ssp *ssp = &host->ssp; 80 79 int present, ret; 80 + 81 + if (host->broken_cd) 82 + return -ENOSYS; 81 83 82 84 ret = mmc_gpio_get_cd(mmc); 83 85 if (ret >= 0) ··· 572 568 { 573 569 const struct of_device_id *of_id = 574 570 of_match_device(mxs_mmc_dt_ids, &pdev->dev); 571 + struct device_node *np = pdev->dev.of_node; 575 572 struct mxs_mmc_host *host; 576 573 struct mmc_host *mmc; 577 574 struct resource *iores; ··· 638 633 mmc->ops = &mxs_mmc_ops; 639 634 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | 640 635 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; 636 + 637 + host->broken_cd = of_property_read_bool(np, "broken-cd"); 641 638 642 639 mmc->f_min = 400000; 643 640 mmc->f_max = 288000000;
+5 -5
drivers/mmc/host/omap.c
··· 177 177 unsigned long tick_ns; 178 178 179 179 if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) { 180 - tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq; 180 + tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq); 181 181 ndelay(8 * tick_ns); 182 182 } 183 183 } ··· 435 435 struct mmc_data *data = host->stop_data; 436 436 unsigned long tick_ns; 437 437 438 - tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq; 438 + tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq); 439 439 ndelay(8*tick_ns); 440 440 441 441 mmc_omap_start_command(host, data->stop); ··· 477 477 u16 stat = 0; 478 478 479 479 /* Sending abort takes 80 clocks. Have some extra and round up */ 480 - timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq; 480 + timeout = DIV_ROUND_UP(120 * USEC_PER_SEC, slot->fclk_freq); 481 481 restarts = 0; 482 482 while (restarts < maxloops) { 483 483 OMAP_MMC_WRITE(host, STAT, 0xFFFF); ··· 677 677 if (n > host->buffer_bytes_left) 678 678 n = host->buffer_bytes_left; 679 679 680 - nwords = n / 2; 681 - nwords += n & 1; /* handle odd number of bytes to transfer */ 680 + /* Round up to handle odd number of bytes to transfer */ 681 + nwords = DIV_ROUND_UP(n, 2); 682 682 683 683 host->buffer_bytes_left -= n; 684 684 host->total_bytes_left -= n;
+20 -41
drivers/mmc/host/omap_hsmmc.c
··· 31 31 #include <linux/of.h> 32 32 #include <linux/of_gpio.h> 33 33 #include <linux/of_device.h> 34 - #include <linux/omap-dma.h> 34 + #include <linux/omap-dmaengine.h> 35 35 #include <linux/mmc/host.h> 36 36 #include <linux/mmc/core.h> 37 37 #include <linux/mmc/mmc.h> ··· 582 582 * - MMC/SD clock coming out of controller > 25MHz 583 583 */ 584 584 if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) && 585 - (ios->timing != MMC_TIMING_UHS_DDR50) && 585 + (ios->timing != MMC_TIMING_MMC_DDR52) && 586 586 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { 587 587 regval = OMAP_HSMMC_READ(host->base, HCTL); 588 588 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) ··· 602 602 u32 con; 603 603 604 604 con = OMAP_HSMMC_READ(host->base, CON); 605 - if (ios->timing == MMC_TIMING_UHS_DDR50) 605 + if (ios->timing == MMC_TIMING_MMC_DDR52) 606 606 con |= DDR; /* configure in DDR mode */ 607 607 else 608 608 con &= ~DDR; ··· 920 920 static void 921 921 omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) 922 922 { 923 - host->cmd = NULL; 924 - 925 923 if (host->mrq->sbc && (host->cmd == host->mrq->sbc) && 926 924 !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) { 925 + host->cmd = NULL; 927 926 omap_hsmmc_start_dma_transfer(host); 928 927 omap_hsmmc_start_command(host, host->mrq->cmd, 929 928 host->mrq->data); 930 929 return; 931 930 } 931 + 932 + host->cmd = NULL; 932 933 933 934 if (cmd->flags & MMC_RSP_PRESENT) { 934 935 if (cmd->flags & MMC_RSP_136) { ··· 1852 1851 unsigned tx_req, rx_req; 1853 1852 struct pinctrl *pinctrl; 1854 1853 const struct omap_mmc_of_data *data; 1854 + void __iomem *base; 1855 1855 1856 1856 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); 1857 1857 if (match) { ··· 1883 1881 if (res == NULL || irq < 0) 1884 1882 return -ENXIO; 1885 1883 1886 - res = request_mem_region(res->start, resource_size(res), pdev->name); 1887 - if (res == NULL) 1888 - return -EBUSY; 1884 + base = devm_ioremap_resource(&pdev->dev, res); 1885 + if (IS_ERR(base)) 1886 + return PTR_ERR(base); 1889 1887 1890 1888 ret = omap_hsmmc_gpio_init(pdata); 1891 1889 if (ret) ··· 1906 1904 host->irq = irq; 1907 1905 host->slot_id = 0; 1908 1906 host->mapbase = res->start + pdata->reg_offset; 1909 - host->base = ioremap(host->mapbase, SZ_4K); 1907 + host->base = base + pdata->reg_offset; 1910 1908 host->power_mode = MMC_POWER_OFF; 1911 1909 host->next_data.cookie = 1; 1912 1910 host->pbias_enabled = 0; ··· 1924 1922 1925 1923 spin_lock_init(&host->irq_lock); 1926 1924 1927 - host->fclk = clk_get(&pdev->dev, "fck"); 1925 + host->fclk = devm_clk_get(&pdev->dev, "fck"); 1928 1926 if (IS_ERR(host->fclk)) { 1929 1927 ret = PTR_ERR(host->fclk); 1930 1928 host->fclk = NULL; ··· 1943 1941 1944 1942 omap_hsmmc_context_save(host); 1945 1943 1946 - host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); 1944 + host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck"); 1947 1945 /* 1948 1946 * MMC can still work without debounce clock. 1949 1947 */ ··· 1951 1949 host->dbclk = NULL; 1952 1950 } else if (clk_prepare_enable(host->dbclk) != 0) { 1953 1951 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); 1954 - clk_put(host->dbclk); 1955 1952 host->dbclk = NULL; 1956 1953 } 1957 1954 ··· 2019 2018 } 2020 2019 2021 2020 /* Request IRQ for MMC operations */ 2022 - ret = request_irq(host->irq, omap_hsmmc_irq, 0, 2021 + ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, 2023 2022 mmc_hostname(mmc), host); 2024 2023 if (ret) { 2025 2024 dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); ··· 2030 2029 if (pdata->init(&pdev->dev) != 0) { 2031 2030 dev_err(mmc_dev(host->mmc), 2032 2031 "Unable to configure MMC IRQs\n"); 2033 - goto err_irq_cd_init; 2032 + goto err_irq; 2034 2033 } 2035 2034 } 2036 2035 ··· 2045 2044 2046 2045 /* Request IRQ for card detect */ 2047 2046 if ((mmc_slot(host).card_detect_irq)) { 2048 - ret = request_threaded_irq(mmc_slot(host).card_detect_irq, 2049 - NULL, 2050 - omap_hsmmc_detect, 2047 + ret = devm_request_threaded_irq(&pdev->dev, 2048 + mmc_slot(host).card_detect_irq, 2049 + NULL, omap_hsmmc_detect, 2051 2050 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2052 2051 mmc_hostname(mmc), host); 2053 2052 if (ret) { ··· 2090 2089 2091 2090 err_slot_name: 2092 2091 mmc_remove_host(mmc); 2093 - free_irq(mmc_slot(host).card_detect_irq, host); 2094 2092 err_irq_cd: 2095 2093 if (host->use_reg) 2096 2094 omap_hsmmc_reg_put(host); 2097 2095 err_reg: 2098 2096 if (host->pdata->cleanup) 2099 2097 host->pdata->cleanup(&pdev->dev); 2100 - err_irq_cd_init: 2101 - free_irq(host->irq, host); 2102 2098 err_irq: 2103 2099 if (host->tx_chan) 2104 2100 dma_release_channel(host->tx_chan); ··· 2103 2105 dma_release_channel(host->rx_chan); 2104 2106 pm_runtime_put_sync(host->dev); 2105 2107 pm_runtime_disable(host->dev); 2106 - clk_put(host->fclk); 2107 - if (host->dbclk) { 2108 + if (host->dbclk) 2108 2109 clk_disable_unprepare(host->dbclk); 2109 - clk_put(host->dbclk); 2110 - } 2111 2110 err1: 2112 - iounmap(host->base); 2113 2111 mmc_free_host(mmc); 2114 2112 err_alloc: 2115 2113 omap_hsmmc_gpio_free(pdata); 2116 2114 err: 2117 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2118 - if (res) 2119 - release_mem_region(res->start, resource_size(res)); 2120 2115 return ret; 2121 2116 } 2122 2117 2123 2118 static int omap_hsmmc_remove(struct platform_device *pdev) 2124 2119 { 2125 2120 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2126 - struct resource *res; 2127 2121 2128 2122 pm_runtime_get_sync(host->dev); 2129 2123 mmc_remove_host(host->mmc); ··· 2123 2133 omap_hsmmc_reg_put(host); 2124 2134 if (host->pdata->cleanup) 2125 2135 host->pdata->cleanup(&pdev->dev); 2126 - free_irq(host->irq, host); 2127 - if (mmc_slot(host).card_detect_irq) 2128 - free_irq(mmc_slot(host).card_detect_irq, host); 2129 2136 2130 2137 if (host->tx_chan) 2131 2138 dma_release_channel(host->tx_chan); ··· 2131 2144 2132 2145 pm_runtime_put_sync(host->dev); 2133 2146 pm_runtime_disable(host->dev); 2134 - clk_put(host->fclk); 2135 - if (host->dbclk) { 2147 + if (host->dbclk) 2136 2148 clk_disable_unprepare(host->dbclk); 2137 - clk_put(host->dbclk); 2138 - } 2139 2149 2140 2150 omap_hsmmc_gpio_free(host->pdata); 2141 - iounmap(host->base); 2142 2151 mmc_free_host(host->mmc); 2143 - 2144 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2145 - if (res) 2146 - release_mem_region(res->start, resource_size(res)); 2147 2152 2148 2153 return 0; 2149 2154 }
+5
drivers/mmc/host/rtsx_pci_sdmmc.c
··· 236 236 case MMC_RSP_R1: 237 237 rsp_type = SD_RSP_TYPE_R1; 238 238 break; 239 + case MMC_RSP_R1 & ~MMC_RSP_CRC: 240 + rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; 241 + break; 239 242 case MMC_RSP_R1B: 240 243 rsp_type = SD_RSP_TYPE_R1b; 241 244 break; ··· 819 816 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); 820 817 break; 821 818 819 + case MMC_TIMING_MMC_DDR52: 822 820 case MMC_TIMING_UHS_DDR50: 823 821 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, 824 822 0x0C | SD_ASYNC_FIFO_NOT_RST, ··· 900 896 host->vpclk = true; 901 897 host->double_clk = false; 902 898 break; 899 + case MMC_TIMING_MMC_DDR52: 903 900 case MMC_TIMING_UHS_DDR50: 904 901 case MMC_TIMING_UHS_SDR25: 905 902 host->ssc_depth = RTSX_SSC_DEPTH_1M;
+3 -2
drivers/mmc/host/rtsx_usb_sdmmc.c
··· 34 34 #include <linux/mfd/rtsx_usb.h> 35 35 #include <asm/unaligned.h> 36 36 37 - #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 37 + #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ 38 + defined(CONFIG_MMC_REALTEK_USB_MODULE)) 38 39 #include <linux/leds.h> 39 40 #include <linux/workqueue.h> 40 41 #define RTSX_USB_USE_LEDS_CLASS ··· 60 59 61 60 unsigned char power_mode; 62 61 63 - #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 62 + #ifdef RTSX_USB_USE_LEDS_CLASS 64 63 struct led_classdev led; 65 64 char led_name[32]; 66 65 struct work_struct led_work;
+8
drivers/mmc/host/sdhci-acpi.c
··· 102 102 } 103 103 104 104 static const struct sdhci_ops sdhci_acpi_ops_dflt = { 105 + .set_clock = sdhci_set_clock, 105 106 .enable_dma = sdhci_acpi_enable_dma, 107 + .set_bus_width = sdhci_set_bus_width, 108 + .reset = sdhci_reset, 109 + .set_uhs_signaling = sdhci_set_uhs_signaling, 106 110 }; 107 111 108 112 static const struct sdhci_ops sdhci_acpi_ops_int = { 113 + .set_clock = sdhci_set_clock, 109 114 .enable_dma = sdhci_acpi_enable_dma, 115 + .set_bus_width = sdhci_set_bus_width, 116 + .reset = sdhci_reset, 117 + .set_uhs_signaling = sdhci_set_uhs_signaling, 110 118 .hw_reset = sdhci_acpi_int_hw_reset, 111 119 }; 112 120
+4
drivers/mmc/host/sdhci-bcm-kona.c
··· 206 206 } 207 207 208 208 static struct sdhci_ops sdhci_bcm_kona_ops = { 209 + .set_clock = sdhci_set_clock, 209 210 .get_max_clock = sdhci_bcm_kona_get_max_clk, 210 211 .get_timeout_clock = sdhci_bcm_kona_get_timeout_clock, 211 212 .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks, 213 + .set_bus_width = sdhci_set_bus_width, 214 + .reset = sdhci_reset, 215 + .set_uhs_signaling = sdhci_set_uhs_signaling, 212 216 .card_event = sdhci_bcm_kona_card_event, 213 217 }; 214 218
+4
drivers/mmc/host/sdhci-bcm2835.c
··· 131 131 .read_l = bcm2835_sdhci_readl, 132 132 .read_w = bcm2835_sdhci_readw, 133 133 .read_b = bcm2835_sdhci_readb, 134 + .set_clock = sdhci_set_clock, 134 135 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 135 136 .get_min_clock = bcm2835_sdhci_get_min_clock, 137 + .set_bus_width = sdhci_set_bus_width, 138 + .reset = sdhci_reset, 139 + .set_uhs_signaling = sdhci_set_uhs_signaling, 136 140 }; 137 141 138 142 static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
+6 -7
drivers/mmc/host/sdhci-cns3xxx.c
··· 30 30 u16 clk; 31 31 unsigned long timeout; 32 32 33 - if (clock == host->clock) 34 - return; 33 + host->mmc->actual_clock = 0; 35 34 36 35 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 37 36 38 37 if (clock == 0) 39 - goto out; 38 + return; 40 39 41 40 while (host->max_clk / div > clock) { 42 41 /* ··· 74 75 75 76 clk |= SDHCI_CLOCK_CARD_EN; 76 77 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 77 - out: 78 - host->clock = clock; 79 78 } 80 79 81 80 static const struct sdhci_ops sdhci_cns3xxx_ops = { 82 81 .get_max_clock = sdhci_cns3xxx_get_max_clk, 83 82 .set_clock = sdhci_cns3xxx_set_clock, 83 + .set_bus_width = sdhci_set_bus_width, 84 + .reset = sdhci_reset, 85 + .set_uhs_signaling = sdhci_set_uhs_signaling, 84 86 }; 85 87 86 88 static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { ··· 90 90 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 91 91 SDHCI_QUIRK_INVERTED_WRITE_PROTECT | 92 92 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | 93 - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 94 - SDHCI_QUIRK_NONSTANDARD_CLOCK, 93 + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 95 94 }; 96 95 97 96 static int sdhci_cns3xxx_probe(struct platform_device *pdev)
+9 -69
drivers/mmc/host/sdhci-dove.c
··· 21 21 22 22 #include <linux/clk.h> 23 23 #include <linux/err.h> 24 - #include <linux/gpio.h> 25 24 #include <linux/io.h> 26 25 #include <linux/mmc/host.h> 27 26 #include <linux/module.h> 28 27 #include <linux/of.h> 29 - #include <linux/of_gpio.h> 30 28 31 29 #include "sdhci-pltfm.h" 32 30 33 31 struct sdhci_dove_priv { 34 32 struct clk *clk; 35 - int gpio_cd; 36 33 }; 37 - 38 - static irqreturn_t sdhci_dove_carddetect_irq(int irq, void *data) 39 - { 40 - struct sdhci_host *host = data; 41 - 42 - tasklet_schedule(&host->card_tasklet); 43 - return IRQ_HANDLED; 44 - } 45 34 46 35 static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) 47 36 { ··· 49 60 50 61 static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) 51 62 { 52 - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 53 - struct sdhci_dove_priv *priv = pltfm_host->priv; 54 63 u32 ret; 55 64 56 65 ret = readl(host->ioaddr + reg); ··· 58 71 /* Mask the support for 3.0V */ 59 72 ret &= ~SDHCI_CAN_VDD_300; 60 73 break; 61 - case SDHCI_PRESENT_STATE: 62 - if (gpio_is_valid(priv->gpio_cd)) { 63 - if (gpio_get_value(priv->gpio_cd) == 0) 64 - ret |= SDHCI_CARD_PRESENT; 65 - else 66 - ret &= ~SDHCI_CARD_PRESENT; 67 - } 68 - break; 69 74 } 70 75 return ret; 71 76 } ··· 65 86 static const struct sdhci_ops sdhci_dove_ops = { 66 87 .read_w = sdhci_dove_readw, 67 88 .read_l = sdhci_dove_readl, 89 + .set_clock = sdhci_set_clock, 90 + .set_bus_width = sdhci_set_bus_width, 91 + .reset = sdhci_reset, 92 + .set_uhs_signaling = sdhci_set_uhs_signaling, 68 93 }; 69 94 70 95 static const struct sdhci_pltfm_data sdhci_dove_pdata = { ··· 96 113 97 114 priv->clk = devm_clk_get(&pdev->dev, NULL); 98 115 99 - if (pdev->dev.of_node) { 100 - priv->gpio_cd = of_get_named_gpio(pdev->dev.of_node, 101 - "cd-gpios", 0); 102 - } else { 103 - priv->gpio_cd = -EINVAL; 104 - } 105 - 106 - if (gpio_is_valid(priv->gpio_cd)) { 107 - ret = gpio_request(priv->gpio_cd, "sdhci-cd"); 108 - if (ret) { 109 - dev_err(&pdev->dev, "card detect gpio request failed: %d\n", 110 - ret); 111 - return ret; 112 - } 113 - gpio_direction_input(priv->gpio_cd); 114 - } 115 - 116 116 host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0); 117 - if (IS_ERR(host)) { 118 - ret = PTR_ERR(host); 119 - goto err_sdhci_pltfm_init; 120 - } 117 + if (IS_ERR(host)) 118 + return PTR_ERR(host); 121 119 122 120 pltfm_host = sdhci_priv(host); 123 121 pltfm_host->priv = priv; ··· 106 142 if (!IS_ERR(priv->clk)) 107 143 clk_prepare_enable(priv->clk); 108 144 109 - sdhci_get_of_property(pdev); 145 + ret = mmc_of_parse(host->mmc); 146 + if (ret) 147 + goto err_sdhci_add; 110 148 111 149 ret = sdhci_add_host(host); 112 150 if (ret) 113 151 goto err_sdhci_add; 114 152 115 - /* 116 - * We must request the IRQ after sdhci_add_host(), as the tasklet only 117 - * gets setup in sdhci_add_host() and we oops. 118 - */ 119 - if (gpio_is_valid(priv->gpio_cd)) { 120 - ret = request_irq(gpio_to_irq(priv->gpio_cd), 121 - sdhci_dove_carddetect_irq, 122 - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 123 - mmc_hostname(host->mmc), host); 124 - if (ret) { 125 - dev_err(&pdev->dev, "card detect irq request failed: %d\n", 126 - ret); 127 - goto err_request_irq; 128 - } 129 - } 130 - 131 153 return 0; 132 154 133 - err_request_irq: 134 - sdhci_remove_host(host, 0); 135 155 err_sdhci_add: 136 156 if (!IS_ERR(priv->clk)) 137 157 clk_disable_unprepare(priv->clk); 138 158 sdhci_pltfm_free(pdev); 139 - err_sdhci_pltfm_init: 140 - if (gpio_is_valid(priv->gpio_cd)) 141 - gpio_free(priv->gpio_cd); 142 159 return ret; 143 160 } 144 161 ··· 130 185 struct sdhci_dove_priv *priv = pltfm_host->priv; 131 186 132 187 sdhci_pltfm_unregister(pdev); 133 - 134 - if (gpio_is_valid(priv->gpio_cd)) { 135 - free_irq(gpio_to_irq(priv->gpio_cd), host); 136 - gpio_free(priv->gpio_cd); 137 - } 138 188 139 189 if (!IS_ERR(priv->clk)) 140 190 clk_disable_unprepare(priv->clk);
+45 -38
drivers/mmc/host/sdhci-esdhc-imx.c
··· 160 160 MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ 161 161 WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ 162 162 } multiblock_status; 163 - u32 uhs_mode; 164 163 u32 is_ddr; 165 164 }; 166 165 ··· 381 382 if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) 382 383 ret |= SDHCI_CTRL_TUNED_CLK; 383 384 384 - ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK); 385 385 ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 386 386 387 387 return ret; ··· 427 429 else 428 430 new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; 429 431 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); 430 - imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK; 431 432 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { 432 433 new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); 433 434 if (val & SDHCI_CTRL_TUNED_CLK) ··· 597 600 u32 temp, val; 598 601 599 602 if (clock == 0) { 603 + host->mmc->actual_clock = 0; 604 + 600 605 if (esdhc_is_usdhc(imx_data)) { 601 606 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 602 607 writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, 603 608 host->ioaddr + ESDHC_VENDOR_SPEC); 604 609 } 605 - goto out; 610 + return; 606 611 } 607 612 608 613 if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr) ··· 644 645 } 645 646 646 647 mdelay(1); 647 - out: 648 - host->clock = clock; 649 648 } 650 649 651 650 static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) ··· 665 668 return -ENOSYS; 666 669 } 667 670 668 - static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width) 671 + static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) 669 672 { 670 673 u32 ctrl; 671 674 ··· 683 686 684 687 esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, 685 688 SDHCI_HOST_CONTROL); 686 - 687 - return 0; 688 689 } 689 690 690 691 static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) ··· 692 697 /* FIXME: delay a bit for card to be ready for next tuning due to errors */ 693 698 mdelay(1); 694 699 700 + /* This is balanced by the runtime put in sdhci_tasklet_finish */ 695 701 pm_runtime_get_sync(host->mmc->parent); 696 702 reg = readl(host->ioaddr + ESDHC_MIX_CTRL); 697 703 reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | ··· 709 713 complete(&mrq->completion); 710 714 } 711 715 712 - static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode) 716 + static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode, 717 + struct scatterlist *sg) 713 718 { 714 719 struct mmc_command cmd = {0}; 715 720 struct mmc_request mrq = {NULL}; 716 721 struct mmc_data data = {0}; 717 - struct scatterlist sg; 718 - char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN]; 719 722 720 723 cmd.opcode = opcode; 721 724 cmd.arg = 0; ··· 723 728 data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN; 724 729 data.blocks = 1; 725 730 data.flags = MMC_DATA_READ; 726 - data.sg = &sg; 731 + data.sg = sg; 727 732 data.sg_len = 1; 728 - 729 - sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern)); 730 733 731 734 mrq.cmd = &cmd; 732 735 mrq.cmd->mrq = &mrq; ··· 735 742 mrq.done = esdhc_request_done; 736 743 init_completion(&(mrq.completion)); 737 744 738 - disable_irq(host->irq); 739 - spin_lock(&host->lock); 745 + spin_lock_irq(&host->lock); 740 746 host->mrq = &mrq; 741 747 742 748 sdhci_send_command(host, mrq.cmd); 743 749 744 - spin_unlock(&host->lock); 745 - enable_irq(host->irq); 750 + spin_unlock_irq(&host->lock); 746 751 747 752 wait_for_completion(&mrq.completion); 748 753 ··· 763 772 764 773 static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) 765 774 { 775 + struct scatterlist sg; 776 + char *tuning_pattern; 766 777 int min, max, avg, ret; 778 + 779 + tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL); 780 + if (!tuning_pattern) 781 + return -ENOMEM; 782 + 783 + sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN); 767 784 768 785 /* find the mininum delay first which can pass tuning */ 769 786 min = ESDHC_TUNE_CTRL_MIN; 770 787 while (min < ESDHC_TUNE_CTRL_MAX) { 771 788 esdhc_prepare_tuning(host, min); 772 - if (!esdhc_send_tuning_cmd(host, opcode)) 789 + if (!esdhc_send_tuning_cmd(host, opcode, &sg)) 773 790 break; 774 791 min += ESDHC_TUNE_CTRL_STEP; 775 792 } ··· 786 787 max = min + ESDHC_TUNE_CTRL_STEP; 787 788 while (max < ESDHC_TUNE_CTRL_MAX) { 788 789 esdhc_prepare_tuning(host, max); 789 - if (esdhc_send_tuning_cmd(host, opcode)) { 790 + if (esdhc_send_tuning_cmd(host, opcode, &sg)) { 790 791 max -= ESDHC_TUNE_CTRL_STEP; 791 792 break; 792 793 } ··· 796 797 /* use average delay to get the best timing */ 797 798 avg = (min + max) / 2; 798 799 esdhc_prepare_tuning(host, avg); 799 - ret = esdhc_send_tuning_cmd(host, opcode); 800 + ret = esdhc_send_tuning_cmd(host, opcode, &sg); 800 801 esdhc_post_tuning(host); 802 + 803 + kfree(tuning_pattern); 801 804 802 805 dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", 803 806 ret ? "failed" : "passed", avg, ret); ··· 838 837 return pinctrl_select_state(imx_data->pinctrl, pinctrl); 839 838 } 840 839 841 - static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) 840 + static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 842 841 { 843 842 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 844 843 struct pltfm_imx_data *imx_data = pltfm_host->priv; 845 844 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 846 845 847 - switch (uhs) { 846 + switch (timing) { 848 847 case MMC_TIMING_UHS_SDR12: 849 - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12; 850 - break; 851 848 case MMC_TIMING_UHS_SDR25: 852 - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25; 853 - break; 854 849 case MMC_TIMING_UHS_SDR50: 855 - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50; 856 - break; 857 850 case MMC_TIMING_UHS_SDR104: 858 851 case MMC_TIMING_MMC_HS200: 859 - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104; 860 852 break; 861 853 case MMC_TIMING_UHS_DDR50: 862 - imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50; 854 + case MMC_TIMING_MMC_DDR52: 863 855 writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | 864 856 ESDHC_MIX_CTRL_DDREN, 865 857 host->ioaddr + ESDHC_MIX_CTRL); ··· 869 875 break; 870 876 } 871 877 872 - return esdhc_change_pinstate(host, uhs); 878 + esdhc_change_pinstate(host, timing); 879 + } 880 + 881 + static void esdhc_reset(struct sdhci_host *host, u8 mask) 882 + { 883 + sdhci_reset(host, mask); 884 + 885 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 886 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 873 887 } 874 888 875 889 static struct sdhci_ops sdhci_esdhc_ops = { ··· 890 888 .get_max_clock = esdhc_pltfm_get_max_clock, 891 889 .get_min_clock = esdhc_pltfm_get_min_clock, 892 890 .get_ro = esdhc_pltfm_get_ro, 893 - .platform_bus_width = esdhc_pltfm_bus_width, 891 + .set_bus_width = esdhc_pltfm_set_bus_width, 894 892 .set_uhs_signaling = esdhc_set_uhs_signaling, 893 + .reset = esdhc_reset, 895 894 }; 896 895 897 896 static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { ··· 1173 1170 1174 1171 ret = sdhci_runtime_suspend_host(host); 1175 1172 1176 - clk_disable_unprepare(imx_data->clk_per); 1177 - clk_disable_unprepare(imx_data->clk_ipg); 1173 + if (!sdhci_sdio_irq_enabled(host)) { 1174 + clk_disable_unprepare(imx_data->clk_per); 1175 + clk_disable_unprepare(imx_data->clk_ipg); 1176 + } 1178 1177 clk_disable_unprepare(imx_data->clk_ahb); 1179 1178 1180 1179 return ret; ··· 1188 1183 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1189 1184 struct pltfm_imx_data *imx_data = pltfm_host->priv; 1190 1185 1191 - clk_prepare_enable(imx_data->clk_per); 1192 - clk_prepare_enable(imx_data->clk_ipg); 1186 + if (!sdhci_sdio_irq_enabled(host)) { 1187 + clk_prepare_enable(imx_data->clk_per); 1188 + clk_prepare_enable(imx_data->clk_ipg); 1189 + } 1193 1190 clk_prepare_enable(imx_data->clk_ahb); 1194 1191 1195 1192 return sdhci_runtime_resume_host(host);
+1 -3
drivers/mmc/host/sdhci-esdhc.h
··· 20 20 21 21 #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ 22 22 SDHCI_QUIRK_NO_BUSY_IRQ | \ 23 - SDHCI_QUIRK_NONSTANDARD_CLOCK | \ 24 23 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ 25 - SDHCI_QUIRK_PIO_NEEDS_DELAY | \ 26 - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 24 + SDHCI_QUIRK_PIO_NEEDS_DELAY) 27 25 28 26 #define ESDHC_SYSTEM_CONTROL 0x2c 29 27 #define ESDHC_CLOCK_MASK 0x0000fff0
+4
drivers/mmc/host/sdhci-of-arasan.c
··· 52 52 } 53 53 54 54 static struct sdhci_ops sdhci_arasan_ops = { 55 + .set_clock = sdhci_set_clock, 55 56 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 56 57 .get_timeout_clock = sdhci_arasan_get_timeout_clock, 58 + .set_bus_width = sdhci_set_bus_width, 59 + .reset = sdhci_reset, 60 + .set_uhs_signaling = sdhci_set_uhs_signaling, 57 61 }; 58 62 59 63 static struct sdhci_pltfm_data sdhci_arasan_pdata = {
+43 -27
drivers/mmc/host/sdhci-of-esdhc.c
··· 199 199 200 200 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) 201 201 { 202 - 203 202 int pre_div = 2; 204 203 int div = 1; 205 204 u32 temp; 206 205 206 + host->mmc->actual_clock = 0; 207 + 207 208 if (clock == 0) 208 - goto out; 209 + return; 209 210 210 211 /* Workaround to reduce the clock frequency for p1010 esdhc */ 211 212 if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { ··· 239 238 | (pre_div << ESDHC_PREDIV_SHIFT)); 240 239 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 241 240 mdelay(1); 242 - out: 243 - host->clock = clock; 244 241 } 245 - 246 - #ifdef CONFIG_PM 247 - static u32 esdhc_proctl; 248 - static void esdhc_of_suspend(struct sdhci_host *host) 249 - { 250 - esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); 251 - } 252 - 253 - static void esdhc_of_resume(struct sdhci_host *host) 254 - { 255 - esdhc_of_enable_dma(host); 256 - sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); 257 - } 258 - #endif 259 242 260 243 static void esdhc_of_platform_init(struct sdhci_host *host) 261 244 { ··· 254 269 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; 255 270 } 256 271 257 - static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width) 272 + static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) 258 273 { 259 274 u32 ctrl; 260 275 ··· 274 289 275 290 clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, 276 291 ESDHC_CTRL_BUSWIDTH_MASK, ctrl); 277 - 278 - return 0; 279 292 } 280 293 281 294 static const struct sdhci_ops sdhci_esdhc_ops = { ··· 288 305 .get_max_clock = esdhc_of_get_max_clock, 289 306 .get_min_clock = esdhc_of_get_min_clock, 290 307 .platform_init = esdhc_of_platform_init, 291 - #ifdef CONFIG_PM 292 - .platform_suspend = esdhc_of_suspend, 293 - .platform_resume = esdhc_of_resume, 294 - #endif 295 308 .adma_workaround = esdhci_of_adma_workaround, 296 - .platform_bus_width = esdhc_pltfm_bus_width, 309 + .set_bus_width = esdhc_pltfm_set_bus_width, 310 + .reset = sdhci_reset, 311 + .set_uhs_signaling = sdhci_set_uhs_signaling, 297 312 }; 313 + 314 + #ifdef CONFIG_PM 315 + 316 + static u32 esdhc_proctl; 317 + static int esdhc_of_suspend(struct device *dev) 318 + { 319 + struct sdhci_host *host = dev_get_drvdata(dev); 320 + 321 + esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); 322 + 323 + return sdhci_suspend_host(host); 324 + } 325 + 326 + static int esdhc_of_resume(struct device *dev) 327 + { 328 + struct sdhci_host *host = dev_get_drvdata(dev); 329 + int ret = sdhci_resume_host(host); 330 + 331 + if (ret == 0) { 332 + /* Isn't this already done by sdhci_resume_host() ? --rmk */ 333 + esdhc_of_enable_dma(host); 334 + sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); 335 + } 336 + 337 + return ret; 338 + } 339 + 340 + static const struct dev_pm_ops esdhc_pmops = { 341 + .suspend = esdhc_of_suspend, 342 + .resume = esdhc_of_resume, 343 + }; 344 + #define ESDHC_PMOPS (&esdhc_pmops) 345 + #else 346 + #define ESDHC_PMOPS NULL 347 + #endif 298 348 299 349 static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { 300 350 /* ··· 390 374 .name = "sdhci-esdhc", 391 375 .owner = THIS_MODULE, 392 376 .of_match_table = sdhci_esdhc_of_match, 393 - .pm = SDHCI_PLTFM_PMOPS, 377 + .pm = ESDHC_PMOPS, 394 378 }, 395 379 .probe = sdhci_esdhc_probe, 396 380 .remove = sdhci_esdhc_remove,
+4
drivers/mmc/host/sdhci-of-hlwd.c
··· 58 58 .write_l = sdhci_hlwd_writel, 59 59 .write_w = sdhci_hlwd_writew, 60 60 .write_b = sdhci_hlwd_writeb, 61 + .set_clock = sdhci_set_clock, 62 + .set_bus_width = sdhci_set_bus_width, 63 + .reset = sdhci_reset, 64 + .set_uhs_signaling = sdhci_set_uhs_signaling, 61 65 }; 62 66 63 67 static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
+77 -1
drivers/mmc/host/sdhci-pci-o2micro.c
··· 21 21 #include "sdhci-pci.h" 22 22 #include "sdhci-pci-o2micro.h" 23 23 24 + static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value) 25 + { 26 + u32 scratch_32; 27 + pci_read_config_dword(chip->pdev, 28 + O2_SD_PLL_SETTING, &scratch_32); 29 + 30 + scratch_32 &= 0x0000FFFF; 31 + scratch_32 |= value; 32 + 33 + pci_write_config_dword(chip->pdev, 34 + O2_SD_PLL_SETTING, scratch_32); 35 + } 36 + 37 + static void o2_pci_led_enable(struct sdhci_pci_chip *chip) 38 + { 39 + int ret; 40 + u32 scratch_32; 41 + 42 + /* Set led of SD host function enable */ 43 + ret = pci_read_config_dword(chip->pdev, 44 + O2_SD_FUNC_REG0, &scratch_32); 45 + if (ret) 46 + return; 47 + 48 + scratch_32 &= ~O2_SD_FREG0_LEDOFF; 49 + pci_write_config_dword(chip->pdev, 50 + O2_SD_FUNC_REG0, scratch_32); 51 + 52 + ret = pci_read_config_dword(chip->pdev, 53 + O2_SD_TEST_REG, &scratch_32); 54 + if (ret) 55 + return; 56 + 57 + scratch_32 |= O2_SD_LED_ENABLE; 58 + pci_write_config_dword(chip->pdev, 59 + O2_SD_TEST_REG, scratch_32); 60 + 61 + } 62 + 24 63 void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip) 25 64 { 26 65 u32 scratch_32; ··· 255 216 scratch &= 0x7f; 256 217 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); 257 218 219 + /* DevId=8520 subId= 0x11 or 0x12 Type Chip support */ 220 + if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) { 221 + ret = pci_read_config_dword(chip->pdev, 222 + O2_SD_FUNC_REG0, 223 + &scratch_32); 224 + scratch_32 = ((scratch_32 & 0xFF000000) >> 24); 225 + 226 + /* Check Whether subId is 0x11 or 0x12 */ 227 + if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) { 228 + scratch_32 = 0x2c280000; 229 + 230 + /* Set Base Clock to 208MZ */ 231 + o2_pci_set_baseclk(chip, scratch_32); 232 + ret = pci_read_config_dword(chip->pdev, 233 + O2_SD_FUNC_REG4, 234 + &scratch_32); 235 + 236 + /* Enable Base Clk setting change */ 237 + scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET; 238 + pci_write_config_dword(chip->pdev, 239 + O2_SD_FUNC_REG4, 240 + scratch_32); 241 + 242 + /* Set Tuning Window to 4 */ 243 + pci_write_config_byte(chip->pdev, 244 + O2_SD_TUNING_CTRL, 0x44); 245 + 246 + break; 247 + } 248 + } 249 + 250 + /* Enable 8520 led function */ 251 + o2_pci_led_enable(chip); 252 + 258 253 /* Set timeout CLK */ 259 254 ret = pci_read_config_dword(chip->pdev, 260 255 O2_SD_CLK_SETTING, &scratch_32); ··· 349 276 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); 350 277 351 278 ret = pci_read_config_dword(chip->pdev, 352 - O2_SD_FUNC_REG0, &scratch_32); 279 + O2_SD_PLL_SETTING, &scratch_32); 353 280 354 281 if ((scratch_32 & 0xff000000) == 0x01000000) { 355 282 scratch_32 &= 0x0000FFFF; ··· 372 299 O2_SD_FUNC_REG4, scratch_32); 373 300 } 374 301 302 + /* Set Tuning Windows to 5 */ 303 + pci_write_config_byte(chip->pdev, 304 + O2_SD_TUNING_CTRL, 0x55); 375 305 /* Lock WP */ 376 306 ret = pci_read_config_byte(chip->pdev, 377 307 O2_SD_LOCK_WP, &scratch);
+3
drivers/mmc/host/sdhci-pci-o2micro.h
··· 57 57 #define O2_SD_UHS2_L1_CTRL 0x35C 58 58 #define O2_SD_FUNC_REG3 0x3E0 59 59 #define O2_SD_FUNC_REG4 0x3E4 60 + #define O2_SD_LED_ENABLE BIT(6) 61 + #define O2_SD_FREG0_LEDOFF BIT(13) 62 + #define O2_SD_FREG4_ENABLE_CLK_SET BIT(22) 60 63 61 64 #define O2_SD_VENDOR_SETTING 0x110 62 65 #define O2_SD_VENDOR_SETTING2 0x1C8
+5 -4
drivers/mmc/host/sdhci-pci.c
··· 1031 1031 return 0; 1032 1032 } 1033 1033 1034 - static int sdhci_pci_bus_width(struct sdhci_host *host, int width) 1034 + static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width) 1035 1035 { 1036 1036 u8 ctrl; 1037 1037 ··· 1052 1052 } 1053 1053 1054 1054 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1055 - 1056 - return 0; 1057 1055 } 1058 1056 1059 1057 static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) ··· 1078 1080 } 1079 1081 1080 1082 static const struct sdhci_ops sdhci_pci_ops = { 1083 + .set_clock = sdhci_set_clock, 1081 1084 .enable_dma = sdhci_pci_enable_dma, 1082 - .platform_bus_width = sdhci_pci_bus_width, 1085 + .set_bus_width = sdhci_pci_set_bus_width, 1086 + .reset = sdhci_reset, 1087 + .set_uhs_signaling = sdhci_set_uhs_signaling, 1083 1088 .hw_reset = sdhci_pci_hw_reset, 1084 1089 }; 1085 1090
+4
drivers/mmc/host/sdhci-pltfm.c
··· 45 45 EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock); 46 46 47 47 static const struct sdhci_ops sdhci_pltfm_ops = { 48 + .set_clock = sdhci_set_clock, 49 + .set_bus_width = sdhci_set_bus_width, 50 + .reset = sdhci_reset, 51 + .set_uhs_signaling = sdhci_set_uhs_signaling, 48 52 }; 49 53 50 54 #ifdef CONFIG_OF
+8 -6
drivers/mmc/host/sdhci-pxav2.c
··· 51 51 #define MMC_CARD 0x1000 52 52 #define MMC_WIDTH 0x0100 53 53 54 - static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) 54 + static void pxav2_reset(struct sdhci_host *host, u8 mask) 55 55 { 56 56 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); 57 57 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; 58 + 59 + sdhci_reset(host, mask); 58 60 59 61 if (mask == SDHCI_RESET_ALL) { 60 62 u16 tmp = 0; ··· 90 88 } 91 89 } 92 90 93 - static int pxav2_mmc_set_width(struct sdhci_host *host, int width) 91 + static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width) 94 92 { 95 93 u8 ctrl; 96 94 u16 tmp; ··· 109 107 } 110 108 writew(tmp, host->ioaddr + SD_CE_ATA_2); 111 109 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); 112 - 113 - return 0; 114 110 } 115 111 116 112 static const struct sdhci_ops pxav2_sdhci_ops = { 113 + .set_clock = sdhci_set_clock, 117 114 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 118 - .platform_reset_exit = pxav2_set_private_registers, 119 - .platform_bus_width = pxav2_mmc_set_width, 115 + .set_bus_width = pxav2_mmc_set_bus_width, 116 + .reset = pxav2_reset, 117 + .set_uhs_signaling = sdhci_set_uhs_signaling, 120 118 }; 121 119 122 120 #ifdef CONFIG_OF
+8 -5
drivers/mmc/host/sdhci-pxav3.c
··· 112 112 return 0; 113 113 } 114 114 115 - static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask) 115 + static void pxav3_reset(struct sdhci_host *host, u8 mask) 116 116 { 117 117 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); 118 118 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; 119 + 120 + sdhci_reset(host, mask); 119 121 120 122 if (mask == SDHCI_RESET_ALL) { 121 123 /* ··· 186 184 pxa->power_mode = power_mode; 187 185 } 188 186 189 - static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) 187 + static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) 190 188 { 191 189 u16 ctrl_2; 192 190 ··· 220 218 dev_dbg(mmc_dev(host->mmc), 221 219 "%s uhs = %d, ctrl_2 = %04X\n", 222 220 __func__, uhs, ctrl_2); 223 - 224 - return 0; 225 221 } 226 222 227 223 static const struct sdhci_ops pxav3_sdhci_ops = { 228 - .platform_reset_exit = pxav3_set_private_registers, 224 + .set_clock = sdhci_set_clock, 229 225 .set_uhs_signaling = pxav3_set_uhs_signaling, 230 226 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, 231 227 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 228 + .set_bus_width = sdhci_set_bus_width, 229 + .reset = pxav3_reset, 230 + .set_uhs_signaling = sdhci_set_uhs_signaling, 232 231 }; 233 232 234 233 static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
+27 -111
drivers/mmc/host/sdhci-s3c.c
··· 33 33 34 34 #define MAX_BUS_CLK (4) 35 35 36 - /* Number of gpio's used is max data bus width + command and clock lines */ 37 - #define NUM_GPIOS(x) (x + 2) 38 - 39 36 /** 40 37 * struct sdhci_s3c - S3C SDHCI instance 41 38 * @host: The SDHCI host created ··· 55 58 struct clk *clk_io; 56 59 struct clk *clk_bus[MAX_BUS_CLK]; 57 60 unsigned long clk_rates[MAX_BUS_CLK]; 61 + 62 + bool no_divider; 58 63 }; 59 64 60 65 /** ··· 69 70 */ 70 71 struct sdhci_s3c_drv_data { 71 72 unsigned int sdhci_quirks; 73 + bool no_divider; 72 74 }; 73 75 74 76 static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) ··· 119 119 * If controller uses a non-standard clock division, find the best clock 120 120 * speed possible with selected clock source and skip the division. 121 121 */ 122 - if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) { 122 + if (ourhost->no_divider) { 123 123 rate = clk_round_rate(clksrc, wanted); 124 124 return wanted - rate; 125 125 } ··· 161 161 int src; 162 162 u32 ctrl; 163 163 164 + host->mmc->actual_clock = 0; 165 + 164 166 /* don't bother if the clock is going off. */ 165 - if (clock == 0) 167 + if (clock == 0) { 168 + sdhci_set_clock(host, clock); 166 169 return; 170 + } 167 171 168 172 for (src = 0; src < MAX_BUS_CLK; src++) { 169 173 delta = sdhci_s3c_consider_clock(ourhost, src, clock); ··· 219 215 if (clock < 25 * 1000000) 220 216 ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2); 221 217 writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); 218 + 219 + sdhci_set_clock(host, clock); 222 220 } 223 221 224 222 /** ··· 301 295 unsigned long timeout; 302 296 u16 clk = 0; 303 297 298 + host->mmc->actual_clock = 0; 299 + 304 300 /* If the clock is going off, set to 0 at clock control register */ 305 301 if (clock == 0) { 306 302 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 307 - host->clock = clock; 308 303 return; 309 304 } 310 305 311 306 sdhci_s3c_set_clock(host, clock); 312 307 313 308 clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); 314 - 315 - host->clock = clock; 316 309 317 310 clk = SDHCI_CLOCK_INT_EN; 318 311 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ··· 334 329 } 335 330 336 331 /** 337 - * sdhci_s3c_platform_bus_width - support 8bit buswidth 332 + * sdhci_s3c_set_bus_width - support 8bit buswidth 338 333 * @host: The SDHCI host being queried 339 334 * @width: MMC_BUS_WIDTH_ macro for the bus width being requested 340 335 * 341 336 * We have 8-bit width support but is not a v3 controller. 342 337 * So we add platform_bus_width() and support 8bit width. 343 338 */ 344 - static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width) 339 + static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width) 345 340 { 346 341 u8 ctrl; 347 342 ··· 363 358 } 364 359 365 360 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 366 - 367 - return 0; 368 361 } 369 362 370 363 static struct sdhci_ops sdhci_s3c_ops = { 371 364 .get_max_clock = sdhci_s3c_get_max_clk, 372 365 .set_clock = sdhci_s3c_set_clock, 373 366 .get_min_clock = sdhci_s3c_get_min_clock, 374 - .platform_bus_width = sdhci_s3c_platform_bus_width, 367 + .set_bus_width = sdhci_s3c_set_bus_width, 368 + .reset = sdhci_reset, 369 + .set_uhs_signaling = sdhci_set_uhs_signaling, 375 370 }; 376 - 377 - static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 378 - { 379 - struct sdhci_host *host = platform_get_drvdata(dev); 380 - #ifdef CONFIG_PM_RUNTIME 381 - struct sdhci_s3c *sc = sdhci_priv(host); 382 - #endif 383 - unsigned long flags; 384 - 385 - if (host) { 386 - spin_lock_irqsave(&host->lock, flags); 387 - if (state) { 388 - dev_dbg(&dev->dev, "card inserted.\n"); 389 - #ifdef CONFIG_PM_RUNTIME 390 - clk_prepare_enable(sc->clk_io); 391 - #endif 392 - host->flags &= ~SDHCI_DEVICE_DEAD; 393 - host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 394 - } else { 395 - dev_dbg(&dev->dev, "card removed.\n"); 396 - host->flags |= SDHCI_DEVICE_DEAD; 397 - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 398 - #ifdef CONFIG_PM_RUNTIME 399 - clk_disable_unprepare(sc->clk_io); 400 - #endif 401 - } 402 - tasklet_schedule(&host->card_tasklet); 403 - spin_unlock_irqrestore(&host->lock, flags); 404 - } 405 - } 406 - 407 - static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id) 408 - { 409 - struct sdhci_s3c *sc = dev_id; 410 - int status = gpio_get_value(sc->ext_cd_gpio); 411 - if (sc->pdata->ext_cd_gpio_invert) 412 - status = !status; 413 - sdhci_s3c_notify_change(sc->pdev, status); 414 - return IRQ_HANDLED; 415 - } 416 - 417 - static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) 418 - { 419 - struct s3c_sdhci_platdata *pdata = sc->pdata; 420 - struct device *dev = &sc->pdev->dev; 421 - 422 - if (devm_gpio_request(dev, pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) { 423 - sc->ext_cd_gpio = pdata->ext_cd_gpio; 424 - sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio); 425 - if (sc->ext_cd_irq && 426 - request_threaded_irq(sc->ext_cd_irq, NULL, 427 - sdhci_s3c_gpio_card_detect_thread, 428 - IRQF_TRIGGER_RISING | 429 - IRQF_TRIGGER_FALLING | 430 - IRQF_ONESHOT, 431 - dev_name(dev), sc) == 0) { 432 - int status = gpio_get_value(sc->ext_cd_gpio); 433 - if (pdata->ext_cd_gpio_invert) 434 - status = !status; 435 - sdhci_s3c_notify_change(sc->pdev, status); 436 - } else { 437 - dev_warn(dev, "cannot request irq for card detect\n"); 438 - sc->ext_cd_irq = 0; 439 - } 440 - } else { 441 - dev_err(dev, "cannot request gpio for card detect\n"); 442 - } 443 - } 444 371 445 372 #ifdef CONFIG_OF 446 373 static int sdhci_s3c_parse_dt(struct device *dev, 447 374 struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) 448 375 { 449 376 struct device_node *node = dev->of_node; 450 - struct sdhci_s3c *ourhost = to_s3c(host); 451 377 u32 max_width; 452 - int gpio; 453 378 454 379 /* if the bus-width property is not specified, assume width as 1 */ 455 380 if (of_property_read_u32(node, "bus-width", &max_width)) ··· 397 462 return 0; 398 463 } 399 464 400 - gpio = of_get_named_gpio(node, "cd-gpios", 0); 401 - if (gpio_is_valid(gpio)) { 402 - pdata->cd_type = S3C_SDHCI_CD_GPIO; 403 - pdata->ext_cd_gpio = gpio; 404 - ourhost->ext_cd_gpio = -1; 405 - if (of_get_property(node, "cd-inverted", NULL)) 406 - pdata->ext_cd_gpio_invert = 1; 465 + if (of_get_named_gpio(node, "cd-gpios", 0)) 407 466 return 0; 408 - } else if (gpio != -ENOENT) { 409 - dev_err(dev, "invalid card detect gpio specified\n"); 410 - return -EINVAL; 411 - } 412 467 413 468 /* assuming internal card detect that will be configured by pinctrl */ 414 469 pdata->cd_type = S3C_SDHCI_CD_INTERNAL; ··· 531 606 /* Setup quirks for the controller */ 532 607 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; 533 608 host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; 534 - if (drv_data) 609 + if (drv_data) { 535 610 host->quirks |= drv_data->sdhci_quirks; 611 + sc->no_divider = drv_data->no_divider; 612 + } 536 613 537 614 #ifndef CONFIG_MMC_SDHCI_S3C_DMA 538 615 ··· 583 656 * If controller does not have internal clock divider, 584 657 * we can use overriding functions instead of default. 585 658 */ 586 - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) { 659 + if (sc->no_divider) { 587 660 sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; 588 661 sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; 589 662 sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; ··· 601 674 pm_runtime_use_autosuspend(&pdev->dev); 602 675 pm_suspend_ignore_children(&pdev->dev, 1); 603 676 677 + mmc_of_parse(host->mmc); 678 + 604 679 ret = sdhci_add_host(host); 605 680 if (ret) { 606 681 dev_err(dev, "sdhci_add_host() failed\n"); ··· 610 681 pm_runtime_get_noresume(&pdev->dev); 611 682 goto err_req_regs; 612 683 } 613 - 614 - /* The following two methods of card detection might call 615 - sdhci_s3c_notify_change() immediately, so they can be called 616 - only after sdhci_add_host(). Setup errors are ignored. */ 617 - if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init) 618 - pdata->ext_cd_init(&sdhci_s3c_notify_change); 619 - if (pdata->cd_type == S3C_SDHCI_CD_GPIO && 620 - gpio_is_valid(pdata->ext_cd_gpio)) 621 - sdhci_s3c_setup_card_detect_gpio(sc); 622 684 623 685 #ifdef CONFIG_PM_RUNTIME 624 686 if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) ··· 631 711 { 632 712 struct sdhci_host *host = platform_get_drvdata(pdev); 633 713 struct sdhci_s3c *sc = sdhci_priv(host); 634 - struct s3c_sdhci_platdata *pdata = sc->pdata; 635 - 636 - if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) 637 - pdata->ext_cd_cleanup(&sdhci_s3c_notify_change); 638 714 639 715 if (sc->ext_cd_irq) 640 716 free_irq(sc->ext_cd_irq, sc); 641 717 642 718 #ifdef CONFIG_PM_RUNTIME 643 - if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) 719 + if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL) 644 720 clk_prepare_enable(sc->clk_io); 645 721 #endif 646 722 sdhci_remove_host(host, 1); ··· 713 797 714 798 #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) 715 799 static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { 716 - .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK, 800 + .no_divider = true, 717 801 }; 718 802 #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data) 719 803 #else
+4
drivers/mmc/host/sdhci-sirf.c
··· 28 28 } 29 29 30 30 static struct sdhci_ops sdhci_sirf_ops = { 31 + .set_clock = sdhci_set_clock, 31 32 .get_max_clock = sdhci_sirf_get_max_clk, 33 + .set_bus_width = sdhci_set_bus_width, 34 + .reset = sdhci_reset, 35 + .set_uhs_signaling = sdhci_set_uhs_signaling, 32 36 }; 33 37 34 38 static struct sdhci_pltfm_data sdhci_sirf_pdata = {
+4 -1
drivers/mmc/host/sdhci-spear.c
··· 38 38 39 39 /* sdhci ops */ 40 40 static const struct sdhci_ops sdhci_pltfm_ops = { 41 - /* Nothing to do for now. */ 41 + .set_clock = sdhci_set_clock, 42 + .set_bus_width = sdhci_set_bus_width, 43 + .reset = sdhci_reset, 44 + .set_uhs_signaling = sdhci_set_uhs_signaling, 42 45 }; 43 46 44 47 #ifdef CONFIG_OF
+38 -29
drivers/mmc/host/sdhci-tegra.c
··· 32 32 33 33 /* Tegra SDHOST controller vendor register definitions */ 34 34 #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 35 + #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 36 + #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 35 37 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 38 + #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 36 39 37 40 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 38 41 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 39 42 #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) 43 + #define NVQUIRK_DISABLE_SDR50 BIT(3) 44 + #define NVQUIRK_DISABLE_SDR104 BIT(4) 45 + #define NVQUIRK_DISABLE_DDR50 BIT(5) 40 46 41 47 struct sdhci_tegra_soc_data { 42 48 const struct sdhci_pltfm_data *pdata; ··· 53 47 const struct sdhci_tegra_soc_data *soc_data; 54 48 int power_gpio; 55 49 }; 56 - 57 - static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) 58 - { 59 - u32 val; 60 - 61 - if (unlikely(reg == SDHCI_PRESENT_STATE)) { 62 - /* Use wp_gpio here instead? */ 63 - val = readl(host->ioaddr + reg); 64 - return val | SDHCI_WRITE_PROTECT; 65 - } 66 - 67 - return readl(host->ioaddr + reg); 68 - } 69 50 70 51 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 71 52 { ··· 101 108 return mmc_gpio_get_ro(host->mmc); 102 109 } 103 110 104 - static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask) 111 + static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) 105 112 { 106 113 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 107 114 struct sdhci_tegra *tegra_host = pltfm_host->priv; 108 115 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 116 + u32 misc_ctrl; 117 + 118 + sdhci_reset(host, mask); 109 119 110 120 if (!(mask & SDHCI_RESET_ALL)) 111 121 return; 112 122 123 + misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 113 124 /* Erratum: Enable SDHCI spec v3.00 support */ 114 - if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) { 115 - u32 misc_ctrl; 116 - 117 - misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 125 + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) 118 126 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; 119 - sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); 120 - } 127 + /* Don't advertise UHS modes which aren't supported yet */ 128 + if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR50) 129 + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50; 130 + if (soc_data->nvquirks & NVQUIRK_DISABLE_DDR50) 131 + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50; 132 + if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR104) 133 + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104; 134 + sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); 121 135 } 122 136 123 - static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width) 137 + static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) 124 138 { 125 139 u32 ctrl; 126 140 ··· 144 144 ctrl &= ~SDHCI_CTRL_4BITBUS; 145 145 } 146 146 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 147 - return 0; 148 147 } 149 148 150 149 static const struct sdhci_ops tegra_sdhci_ops = { 151 150 .get_ro = tegra_sdhci_get_ro, 152 - .read_l = tegra_sdhci_readl, 153 151 .read_w = tegra_sdhci_readw, 154 152 .write_l = tegra_sdhci_writel, 155 - .platform_bus_width = tegra_sdhci_buswidth, 156 - .platform_reset_exit = tegra_sdhci_reset_exit, 153 + .set_clock = sdhci_set_clock, 154 + .set_bus_width = tegra_sdhci_set_bus_width, 155 + .reset = tegra_sdhci_reset, 156 + .set_uhs_signaling = sdhci_set_uhs_signaling, 157 + .get_max_clock = sdhci_pltfm_clk_get_max_clock, 157 158 }; 158 159 159 160 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { 160 161 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 161 162 SDHCI_QUIRK_SINGLE_POWER_WRITE | 162 163 SDHCI_QUIRK_NO_HISPD_BIT | 163 - SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, 164 + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 165 + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 164 166 .ops = &tegra_sdhci_ops, 165 167 }; 166 168 ··· 177 175 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 178 176 SDHCI_QUIRK_SINGLE_POWER_WRITE | 179 177 SDHCI_QUIRK_NO_HISPD_BIT | 180 - SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, 178 + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 179 + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 181 180 .ops = &tegra_sdhci_ops, 182 181 }; 183 182 184 183 static struct sdhci_tegra_soc_data soc_data_tegra30 = { 185 184 .pdata = &sdhci_tegra30_pdata, 186 - .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300, 185 + .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | 186 + NVQUIRK_DISABLE_SDR50 | 187 + NVQUIRK_DISABLE_SDR104, 187 188 }; 188 189 189 190 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { ··· 194 189 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 195 190 SDHCI_QUIRK_SINGLE_POWER_WRITE | 196 191 SDHCI_QUIRK_NO_HISPD_BIT | 197 - SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, 192 + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 193 + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 198 194 .ops = &tegra_sdhci_ops, 199 195 }; 200 196 201 197 static struct sdhci_tegra_soc_data soc_data_tegra114 = { 202 198 .pdata = &sdhci_tegra114_pdata, 199 + .nvquirks = NVQUIRK_DISABLE_SDR50 | 200 + NVQUIRK_DISABLE_DDR50 | 201 + NVQUIRK_DISABLE_SDR104, 203 202 }; 204 203 205 204 static const struct of_device_id sdhci_tegra_dt_match[] = {
+365 -378
drivers/mmc/host/sdhci.c
··· 44 44 45 45 #define MAX_TUNING_LOOP 40 46 46 47 + #define ADMA_SIZE ((128 * 2 + 1) * 4) 48 + 47 49 static unsigned int debug_quirks = 0; 48 50 static unsigned int debug_quirks2; 49 51 ··· 133 131 * * 134 132 \*****************************************************************************/ 135 133 136 - static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set) 137 - { 138 - u32 ier; 139 - 140 - ier = sdhci_readl(host, SDHCI_INT_ENABLE); 141 - ier &= ~clear; 142 - ier |= set; 143 - sdhci_writel(host, ier, SDHCI_INT_ENABLE); 144 - sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); 145 - } 146 - 147 - static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs) 148 - { 149 - sdhci_clear_set_irqs(host, 0, irqs); 150 - } 151 - 152 - static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) 153 - { 154 - sdhci_clear_set_irqs(host, irqs, 0); 155 - } 156 - 157 134 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 158 135 { 159 - u32 present, irqs; 136 + u32 present; 160 137 161 138 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 162 139 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 163 140 return; 164 141 165 - present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 166 - SDHCI_CARD_PRESENT; 167 - irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT; 142 + if (enable) { 143 + present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 144 + SDHCI_CARD_PRESENT; 168 145 169 - if (enable) 170 - sdhci_unmask_irqs(host, irqs); 171 - else 172 - sdhci_mask_irqs(host, irqs); 146 + host->ier |= present ? SDHCI_INT_CARD_REMOVE : 147 + SDHCI_INT_CARD_INSERT; 148 + } else { 149 + host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 150 + } 151 + 152 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 153 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 173 154 } 174 155 175 156 static void sdhci_enable_card_detection(struct sdhci_host *host) ··· 165 180 sdhci_set_card_detection(host, false); 166 181 } 167 182 168 - static void sdhci_reset(struct sdhci_host *host, u8 mask) 183 + void sdhci_reset(struct sdhci_host *host, u8 mask) 169 184 { 170 185 unsigned long timeout; 171 - u32 uninitialized_var(ier); 172 - 173 - if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 174 - if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & 175 - SDHCI_CARD_PRESENT)) 176 - return; 177 - } 178 - 179 - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 180 - ier = sdhci_readl(host, SDHCI_INT_ENABLE); 181 - 182 - if (host->ops->platform_reset_enter) 183 - host->ops->platform_reset_enter(host, mask); 184 186 185 187 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 186 188 ··· 192 220 timeout--; 193 221 mdelay(1); 194 222 } 223 + } 224 + EXPORT_SYMBOL_GPL(sdhci_reset); 195 225 196 - if (host->ops->platform_reset_exit) 197 - host->ops->platform_reset_exit(host, mask); 226 + static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 227 + { 228 + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 229 + if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & 230 + SDHCI_CARD_PRESENT)) 231 + return; 232 + } 198 233 199 - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 200 - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 234 + host->ops->reset(host, mask); 201 235 202 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 203 - if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL)) 204 - host->ops->enable_dma(host); 236 + if (mask & SDHCI_RESET_ALL) { 237 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 238 + if (host->ops->enable_dma) 239 + host->ops->enable_dma(host); 240 + } 241 + 242 + /* Resetting the controller clears many */ 243 + host->preset_enabled = false; 205 244 } 206 245 } 207 246 ··· 221 238 static void sdhci_init(struct sdhci_host *host, int soft) 222 239 { 223 240 if (soft) 224 - sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 241 + sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 225 242 else 226 - sdhci_reset(host, SDHCI_RESET_ALL); 243 + sdhci_do_reset(host, SDHCI_RESET_ALL); 227 244 228 - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, 229 - SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 230 - SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | 231 - SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 232 - SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE); 245 + host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 246 + SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 247 + SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 248 + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 249 + SDHCI_INT_RESPONSE; 250 + 251 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 252 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 233 253 234 254 if (soft) { 235 255 /* force clock reconfiguration */ ··· 488 502 else 489 503 direction = DMA_TO_DEVICE; 490 504 491 - /* 492 - * The ADMA descriptor table is mapped further down as we 493 - * need to fill it with data first. 494 - */ 495 - 496 505 host->align_addr = dma_map_single(mmc_dev(host->mmc), 497 506 host->align_buffer, 128 * 4, direction); 498 507 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) ··· 548 567 * If this triggers then we have a calculation bug 549 568 * somewhere. :/ 550 569 */ 551 - WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 570 + WARN_ON((desc - host->adma_desc) > ADMA_SIZE); 552 571 } 553 572 554 573 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { ··· 576 595 host->align_addr, 128 * 4, direction); 577 596 } 578 597 579 - host->adma_addr = dma_map_single(mmc_dev(host->mmc), 580 - host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); 581 - if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) 582 - goto unmap_entries; 583 - BUG_ON(host->adma_addr & 0x3); 584 - 585 598 return 0; 586 599 587 - unmap_entries: 588 - dma_unmap_sg(mmc_dev(host->mmc), data->sg, 589 - data->sg_len, direction); 590 600 unmap_align: 591 601 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 592 602 128 * 4, direction); ··· 595 623 u8 *align; 596 624 char *buffer; 597 625 unsigned long flags; 626 + bool has_unaligned; 598 627 599 628 if (data->flags & MMC_DATA_READ) 600 629 direction = DMA_FROM_DEVICE; 601 630 else 602 631 direction = DMA_TO_DEVICE; 603 632 604 - dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, 605 - (128 * 2 + 1) * 4, DMA_TO_DEVICE); 606 - 607 633 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 608 634 128 * 4, direction); 609 635 610 - if (data->flags & MMC_DATA_READ) { 636 + /* Do a quick scan of the SG list for any unaligned mappings */ 637 + has_unaligned = false; 638 + for_each_sg(data->sg, sg, host->sg_count, i) 639 + if (sg_dma_address(sg) & 3) { 640 + has_unaligned = true; 641 + break; 642 + } 643 + 644 + if (has_unaligned && data->flags & MMC_DATA_READ) { 611 645 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 612 646 data->sg_len, direction); 613 647 ··· 699 721 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 700 722 701 723 if (host->flags & SDHCI_REQ_USE_DMA) 702 - sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); 724 + host->ier = (host->ier & ~pio_irqs) | dma_irqs; 703 725 else 704 - sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); 726 + host->ier = (host->ier & ~dma_irqs) | pio_irqs; 727 + 728 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 729 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 705 730 } 706 731 707 732 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) ··· 957 976 * upon error conditions. 958 977 */ 959 978 if (data->error) { 960 - sdhci_reset(host, SDHCI_RESET_CMD); 961 - sdhci_reset(host, SDHCI_RESET_DATA); 979 + sdhci_do_reset(host, SDHCI_RESET_CMD); 980 + sdhci_do_reset(host, SDHCI_RESET_DATA); 962 981 } 963 982 964 983 sdhci_send_command(host, data->stop); ··· 1088 1107 1089 1108 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1090 1109 { 1091 - u16 ctrl, preset = 0; 1110 + u16 preset = 0; 1092 1111 1093 - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1094 - 1095 - switch (ctrl & SDHCI_CTRL_UHS_MASK) { 1096 - case SDHCI_CTRL_UHS_SDR12: 1112 + switch (host->timing) { 1113 + case MMC_TIMING_UHS_SDR12: 1097 1114 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1098 1115 break; 1099 - case SDHCI_CTRL_UHS_SDR25: 1116 + case MMC_TIMING_UHS_SDR25: 1100 1117 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1101 1118 break; 1102 - case SDHCI_CTRL_UHS_SDR50: 1119 + case MMC_TIMING_UHS_SDR50: 1103 1120 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1104 1121 break; 1105 - case SDHCI_CTRL_UHS_SDR104: 1122 + case MMC_TIMING_UHS_SDR104: 1123 + case MMC_TIMING_MMC_HS200: 1106 1124 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1107 1125 break; 1108 - case SDHCI_CTRL_UHS_DDR50: 1126 + case MMC_TIMING_UHS_DDR50: 1109 1127 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1110 1128 break; 1111 1129 default: ··· 1116 1136 return preset; 1117 1137 } 1118 1138 1119 - static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1139 + void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1120 1140 { 1121 1141 int div = 0; /* Initialized for compiler warning */ 1122 1142 int real_div = div, clk_mul = 1; 1123 1143 u16 clk = 0; 1124 1144 unsigned long timeout; 1125 1145 1126 - if (clock && clock == host->clock) 1127 - return; 1128 - 1129 1146 host->mmc->actual_clock = 0; 1130 - 1131 - if (host->ops->set_clock) { 1132 - host->ops->set_clock(host, clock); 1133 - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) 1134 - return; 1135 - } 1136 1147 1137 1148 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1138 1149 1139 1150 if (clock == 0) 1140 - goto out; 1151 + return; 1141 1152 1142 1153 if (host->version >= SDHCI_SPEC_300) { 1143 - if (sdhci_readw(host, SDHCI_HOST_CONTROL2) & 1144 - SDHCI_CTRL_PRESET_VAL_ENABLE) { 1154 + if (host->preset_enabled) { 1145 1155 u16 pre_val; 1146 1156 1147 1157 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ··· 1217 1247 1218 1248 clk |= SDHCI_CLOCK_CARD_EN; 1219 1249 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1220 - 1221 - out: 1222 - host->clock = clock; 1223 1250 } 1251 + EXPORT_SYMBOL_GPL(sdhci_set_clock); 1224 1252 1225 - static inline void sdhci_update_clock(struct sdhci_host *host) 1226 - { 1227 - unsigned int clock; 1228 - 1229 - clock = host->clock; 1230 - host->clock = 0; 1231 - sdhci_set_clock(host, clock); 1232 - } 1233 - 1234 - static int sdhci_set_power(struct sdhci_host *host, unsigned short power) 1253 + static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1254 + unsigned short vdd) 1235 1255 { 1236 1256 u8 pwr = 0; 1237 1257 1238 - if (power != (unsigned short)-1) { 1239 - switch (1 << power) { 1258 + if (mode != MMC_POWER_OFF) { 1259 + switch (1 << vdd) { 1240 1260 case MMC_VDD_165_195: 1241 1261 pwr = SDHCI_POWER_180; 1242 1262 break; ··· 1244 1284 } 1245 1285 1246 1286 if (host->pwr == pwr) 1247 - return -1; 1287 + return; 1248 1288 1249 1289 host->pwr = pwr; 1250 1290 ··· 1252 1292 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1253 1293 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1254 1294 sdhci_runtime_pm_bus_off(host); 1255 - return 0; 1256 - } 1295 + vdd = 0; 1296 + } else { 1297 + /* 1298 + * Spec says that we should clear the power reg before setting 1299 + * a new value. Some controllers don't seem to like this though. 1300 + */ 1301 + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1302 + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1257 1303 1258 - /* 1259 - * Spec says that we should clear the power reg before setting 1260 - * a new value. Some controllers don't seem to like this though. 1261 - */ 1262 - if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1263 - sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1304 + /* 1305 + * At least the Marvell CaFe chip gets confused if we set the 1306 + * voltage and set turn on power at the same time, so set the 1307 + * voltage first. 1308 + */ 1309 + if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1310 + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1264 1311 1265 - /* 1266 - * At least the Marvell CaFe chip gets confused if we set the voltage 1267 - * and set turn on power at the same time, so set the voltage first. 1268 - */ 1269 - if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1312 + pwr |= SDHCI_POWER_ON; 1313 + 1270 1314 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1271 1315 1272 - pwr |= SDHCI_POWER_ON; 1316 + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1317 + sdhci_runtime_pm_bus_on(host); 1273 1318 1274 - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1319 + /* 1320 + * Some controllers need an extra 10ms delay of 10ms before 1321 + * they can apply clock after applying power 1322 + */ 1323 + if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1324 + mdelay(10); 1325 + } 1275 1326 1276 - if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1277 - sdhci_runtime_pm_bus_on(host); 1278 - 1279 - /* 1280 - * Some controllers need an extra 10ms delay of 10ms before they 1281 - * can apply clock after applying power 1282 - */ 1283 - if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1284 - mdelay(10); 1285 - 1286 - return power; 1327 + if (host->vmmc) { 1328 + spin_unlock_irq(&host->lock); 1329 + mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd); 1330 + spin_lock_irq(&host->lock); 1331 + } 1287 1332 } 1288 1333 1289 1334 /*****************************************************************************\ ··· 1392 1427 spin_unlock_irqrestore(&host->lock, flags); 1393 1428 } 1394 1429 1430 + void sdhci_set_bus_width(struct sdhci_host *host, int width) 1431 + { 1432 + u8 ctrl; 1433 + 1434 + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1435 + if (width == MMC_BUS_WIDTH_8) { 1436 + ctrl &= ~SDHCI_CTRL_4BITBUS; 1437 + if (host->version >= SDHCI_SPEC_300) 1438 + ctrl |= SDHCI_CTRL_8BITBUS; 1439 + } else { 1440 + if (host->version >= SDHCI_SPEC_300) 1441 + ctrl &= ~SDHCI_CTRL_8BITBUS; 1442 + if (width == MMC_BUS_WIDTH_4) 1443 + ctrl |= SDHCI_CTRL_4BITBUS; 1444 + else 1445 + ctrl &= ~SDHCI_CTRL_4BITBUS; 1446 + } 1447 + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1448 + } 1449 + EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1450 + 1451 + void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1452 + { 1453 + u16 ctrl_2; 1454 + 1455 + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1456 + /* Select Bus Speed Mode for host */ 1457 + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1458 + if ((timing == MMC_TIMING_MMC_HS200) || 1459 + (timing == MMC_TIMING_UHS_SDR104)) 1460 + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1461 + else if (timing == MMC_TIMING_UHS_SDR12) 1462 + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1463 + else if (timing == MMC_TIMING_UHS_SDR25) 1464 + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1465 + else if (timing == MMC_TIMING_UHS_SDR50) 1466 + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1467 + else if ((timing == MMC_TIMING_UHS_DDR50) || 1468 + (timing == MMC_TIMING_MMC_DDR52)) 1469 + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1470 + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1471 + } 1472 + EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1473 + 1395 1474 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) 1396 1475 { 1397 1476 unsigned long flags; 1398 - int vdd_bit = -1; 1399 1477 u8 ctrl; 1400 1478 1401 1479 spin_lock_irqsave(&host->lock, flags); ··· 1464 1456 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1465 1457 sdhci_enable_preset_value(host, false); 1466 1458 1467 - sdhci_set_clock(host, ios->clock); 1468 - 1469 - if (ios->power_mode == MMC_POWER_OFF) 1470 - vdd_bit = sdhci_set_power(host, -1); 1471 - else 1472 - vdd_bit = sdhci_set_power(host, ios->vdd); 1473 - 1474 - if (host->vmmc && vdd_bit != -1) { 1475 - spin_unlock_irqrestore(&host->lock, flags); 1476 - mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit); 1477 - spin_lock_irqsave(&host->lock, flags); 1459 + if (!ios->clock || ios->clock != host->clock) { 1460 + host->ops->set_clock(host, ios->clock); 1461 + host->clock = ios->clock; 1478 1462 } 1463 + 1464 + sdhci_set_power(host, ios->power_mode, ios->vdd); 1479 1465 1480 1466 if (host->ops->platform_send_init_74_clocks) 1481 1467 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1482 1468 1483 - /* 1484 - * If your platform has 8-bit width support but is not a v3 controller, 1485 - * or if it requires special setup code, you should implement that in 1486 - * platform_bus_width(). 1487 - */ 1488 - if (host->ops->platform_bus_width) { 1489 - host->ops->platform_bus_width(host, ios->bus_width); 1490 - } else { 1491 - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1492 - if (ios->bus_width == MMC_BUS_WIDTH_8) { 1493 - ctrl &= ~SDHCI_CTRL_4BITBUS; 1494 - if (host->version >= SDHCI_SPEC_300) 1495 - ctrl |= SDHCI_CTRL_8BITBUS; 1496 - } else { 1497 - if (host->version >= SDHCI_SPEC_300) 1498 - ctrl &= ~SDHCI_CTRL_8BITBUS; 1499 - if (ios->bus_width == MMC_BUS_WIDTH_4) 1500 - ctrl |= SDHCI_CTRL_4BITBUS; 1501 - else 1502 - ctrl &= ~SDHCI_CTRL_4BITBUS; 1503 - } 1504 - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1505 - } 1469 + host->ops->set_bus_width(host, ios->bus_width); 1506 1470 1507 1471 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1508 1472 ··· 1490 1510 1491 1511 /* In case of UHS-I modes, set High Speed Enable */ 1492 1512 if ((ios->timing == MMC_TIMING_MMC_HS200) || 1513 + (ios->timing == MMC_TIMING_MMC_DDR52) || 1493 1514 (ios->timing == MMC_TIMING_UHS_SDR50) || 1494 1515 (ios->timing == MMC_TIMING_UHS_SDR104) || 1495 1516 (ios->timing == MMC_TIMING_UHS_DDR50) || 1496 1517 (ios->timing == MMC_TIMING_UHS_SDR25)) 1497 1518 ctrl |= SDHCI_CTRL_HISPD; 1498 1519 1499 - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1500 - if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 1520 + if (!host->preset_enabled) { 1501 1521 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1502 1522 /* 1503 1523 * We only need to set Driver Strength if the 1504 1524 * preset value enable is not set. 1505 1525 */ 1526 + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1506 1527 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1507 1528 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1508 1529 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; ··· 1527 1546 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1528 1547 1529 1548 /* Re-enable SD Clock */ 1530 - sdhci_update_clock(host); 1549 + host->ops->set_clock(host, host->clock); 1531 1550 } 1532 1551 1533 1552 ··· 1536 1555 clk &= ~SDHCI_CLOCK_CARD_EN; 1537 1556 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1538 1557 1539 - if (host->ops->set_uhs_signaling) 1540 - host->ops->set_uhs_signaling(host, ios->timing); 1541 - else { 1542 - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1543 - /* Select Bus Speed Mode for host */ 1544 - ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1545 - if ((ios->timing == MMC_TIMING_MMC_HS200) || 1546 - (ios->timing == MMC_TIMING_UHS_SDR104)) 1547 - ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1548 - else if (ios->timing == MMC_TIMING_UHS_SDR12) 1549 - ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1550 - else if (ios->timing == MMC_TIMING_UHS_SDR25) 1551 - ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1552 - else if (ios->timing == MMC_TIMING_UHS_SDR50) 1553 - ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1554 - else if (ios->timing == MMC_TIMING_UHS_DDR50) 1555 - ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1556 - sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1557 - } 1558 + host->ops->set_uhs_signaling(host, ios->timing); 1559 + host->timing = ios->timing; 1558 1560 1559 1561 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 1560 1562 ((ios->timing == MMC_TIMING_UHS_SDR12) || ··· 1554 1590 } 1555 1591 1556 1592 /* Re-enable SD Clock */ 1557 - sdhci_update_clock(host); 1593 + host->ops->set_clock(host, host->clock); 1558 1594 } else 1559 1595 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1560 1596 ··· 1564 1600 * it on each ios seems to solve the problem. 1565 1601 */ 1566 1602 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1567 - sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1603 + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1568 1604 1569 1605 mmiowb(); 1570 1606 spin_unlock_irqrestore(&host->lock, flags); ··· 1673 1709 1674 1710 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1675 1711 { 1676 - if (host->flags & SDHCI_DEVICE_DEAD) 1677 - goto out; 1712 + if (!(host->flags & SDHCI_DEVICE_DEAD)) { 1713 + if (enable) 1714 + host->ier |= SDHCI_INT_CARD_INT; 1715 + else 1716 + host->ier &= ~SDHCI_INT_CARD_INT; 1678 1717 1679 - if (enable) 1680 - host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1681 - else 1682 - host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1683 - 1684 - /* SDIO IRQ will be enabled as appropriate in runtime resume */ 1685 - if (host->runtime_suspended) 1686 - goto out; 1687 - 1688 - if (enable) 1689 - sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); 1690 - else 1691 - sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); 1692 - out: 1693 - mmiowb(); 1718 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1719 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1720 + mmiowb(); 1721 + } 1694 1722 } 1695 1723 1696 1724 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) ··· 1690 1734 struct sdhci_host *host = mmc_priv(mmc); 1691 1735 unsigned long flags; 1692 1736 1737 + sdhci_runtime_pm_get(host); 1738 + 1693 1739 spin_lock_irqsave(&host->lock, flags); 1740 + if (enable) 1741 + host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1742 + else 1743 + host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1744 + 1694 1745 sdhci_enable_sdio_irq_nolock(host, enable); 1695 1746 spin_unlock_irqrestore(&host->lock, flags); 1747 + 1748 + sdhci_runtime_pm_put(host); 1696 1749 } 1697 1750 1698 1751 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, ··· 1820 1855 1821 1856 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1822 1857 { 1823 - struct sdhci_host *host; 1858 + struct sdhci_host *host = mmc_priv(mmc); 1824 1859 u16 ctrl; 1825 - u32 ier; 1826 1860 int tuning_loop_counter = MAX_TUNING_LOOP; 1827 - unsigned long timeout; 1828 1861 int err = 0; 1829 - bool requires_tuning_nonuhs = false; 1830 1862 unsigned long flags; 1831 - 1832 - host = mmc_priv(mmc); 1833 1863 1834 1864 sdhci_runtime_pm_get(host); 1835 1865 spin_lock_irqsave(&host->lock, flags); 1836 - 1837 - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1838 1866 1839 1867 /* 1840 1868 * The Host Controller needs tuning only in case of SDR104 mode ··· 1836 1878 * If the Host Controller supports the HS200 mode then the 1837 1879 * tuning function has to be executed. 1838 1880 */ 1839 - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && 1840 - (host->flags & SDHCI_SDR50_NEEDS_TUNING || 1841 - host->flags & SDHCI_SDR104_NEEDS_TUNING)) 1842 - requires_tuning_nonuhs = true; 1881 + switch (host->timing) { 1882 + case MMC_TIMING_MMC_HS200: 1883 + case MMC_TIMING_UHS_SDR104: 1884 + break; 1843 1885 1844 - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || 1845 - requires_tuning_nonuhs) 1846 - ctrl |= SDHCI_CTRL_EXEC_TUNING; 1847 - else { 1886 + case MMC_TIMING_UHS_SDR50: 1887 + if (host->flags & SDHCI_SDR50_NEEDS_TUNING || 1888 + host->flags & SDHCI_SDR104_NEEDS_TUNING) 1889 + break; 1890 + /* FALLTHROUGH */ 1891 + 1892 + default: 1848 1893 spin_unlock_irqrestore(&host->lock, flags); 1849 1894 sdhci_runtime_pm_put(host); 1850 1895 return 0; ··· 1860 1899 return err; 1861 1900 } 1862 1901 1902 + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1903 + ctrl |= SDHCI_CTRL_EXEC_TUNING; 1863 1904 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1864 1905 1865 1906 /* ··· 1874 1911 * to make sure we don't hit a controller bug, we _only_ 1875 1912 * enable Buffer Read Ready interrupt here. 1876 1913 */ 1877 - ier = sdhci_readl(host, SDHCI_INT_ENABLE); 1878 - sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL); 1914 + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 1915 + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 1879 1916 1880 1917 /* 1881 1918 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number 1882 1919 * of loops reaches 40 times or a timeout of 150ms occurs. 1883 1920 */ 1884 - timeout = 150; 1885 1921 do { 1886 1922 struct mmc_command cmd = {0}; 1887 1923 struct mmc_request mrq = {NULL}; 1888 - 1889 - if (!tuning_loop_counter && !timeout) 1890 - break; 1891 1924 1892 1925 cmd.opcode = opcode; 1893 1926 cmd.arg = 0; ··· 1891 1932 cmd.retries = 0; 1892 1933 cmd.data = NULL; 1893 1934 cmd.error = 0; 1935 + 1936 + if (tuning_loop_counter-- == 0) 1937 + break; 1894 1938 1895 1939 mrq.cmd = &cmd; 1896 1940 host->mrq = &mrq; ··· 1952 1990 host->tuning_done = 0; 1953 1991 1954 1992 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1955 - tuning_loop_counter--; 1956 - timeout--; 1957 - mdelay(1); 1993 + 1994 + /* eMMC spec does not require a delay between tuning cycles */ 1995 + if (opcode == MMC_SEND_TUNING_BLOCK) 1996 + mdelay(1); 1958 1997 } while (ctrl & SDHCI_CTRL_EXEC_TUNING); 1959 1998 1960 1999 /* 1961 2000 * The Host Driver has exhausted the maximum number of loops allowed, 1962 2001 * so use fixed sampling frequency. 1963 2002 */ 1964 - if (!tuning_loop_counter || !timeout) { 2003 + if (tuning_loop_counter < 0) { 1965 2004 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1966 2005 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2006 + } 2007 + if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 2008 + pr_info(DRIVER_NAME ": Tuning procedure" 2009 + " failed, falling back to fixed sampling" 2010 + " clock\n"); 1967 2011 err = -EIO; 1968 - } else { 1969 - if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 1970 - pr_info(DRIVER_NAME ": Tuning procedure" 1971 - " failed, falling back to fixed sampling" 1972 - " clock\n"); 1973 - err = -EIO; 1974 - } 1975 2012 } 1976 2013 1977 2014 out: ··· 2005 2044 if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) 2006 2045 err = 0; 2007 2046 2008 - sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); 2047 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2048 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2009 2049 spin_unlock_irqrestore(&host->lock, flags); 2010 2050 sdhci_runtime_pm_put(host); 2011 2051 ··· 2016 2054 2017 2055 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2018 2056 { 2019 - u16 ctrl; 2020 - 2021 2057 /* Host Controller v3.00 defines preset value registers */ 2022 2058 if (host->version < SDHCI_SPEC_300) 2023 2059 return; 2024 - 2025 - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2026 2060 2027 2061 /* 2028 2062 * We only enable or disable Preset Value if they are not already 2029 2063 * enabled or disabled respectively. Otherwise, we bail out. 2030 2064 */ 2031 - if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 2032 - ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2065 + if (host->preset_enabled != enable) { 2066 + u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2067 + 2068 + if (enable) 2069 + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2070 + else 2071 + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2072 + 2033 2073 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2034 - host->flags |= SDHCI_PV_ENABLED; 2035 - } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 2036 - ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2037 - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2038 - host->flags &= ~SDHCI_PV_ENABLED; 2074 + 2075 + if (enable) 2076 + host->flags |= SDHCI_PV_ENABLED; 2077 + else 2078 + host->flags &= ~SDHCI_PV_ENABLED; 2079 + 2080 + host->preset_enabled = enable; 2039 2081 } 2040 2082 } 2041 2083 ··· 2061 2095 pr_err("%s: Resetting controller.\n", 2062 2096 mmc_hostname(host->mmc)); 2063 2097 2064 - sdhci_reset(host, SDHCI_RESET_CMD); 2065 - sdhci_reset(host, SDHCI_RESET_DATA); 2098 + sdhci_do_reset(host, SDHCI_RESET_CMD); 2099 + sdhci_do_reset(host, SDHCI_RESET_DATA); 2066 2100 2067 2101 host->mrq->cmd->error = -ENOMEDIUM; 2068 2102 tasklet_schedule(&host->finish_tasklet); ··· 2089 2123 * Tasklets * 2090 2124 * * 2091 2125 \*****************************************************************************/ 2092 - 2093 - static void sdhci_tasklet_card(unsigned long param) 2094 - { 2095 - struct sdhci_host *host = (struct sdhci_host*)param; 2096 - 2097 - sdhci_card_event(host->mmc); 2098 - 2099 - mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 2100 - } 2101 2126 2102 2127 static void sdhci_tasklet_finish(unsigned long param) 2103 2128 { ··· 2126 2169 /* Some controllers need this kick or reset won't work here */ 2127 2170 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2128 2171 /* This is to force an update */ 2129 - sdhci_update_clock(host); 2172 + host->ops->set_clock(host, host->clock); 2130 2173 2131 2174 /* Spec says we should do both at the same time, but Ricoh 2132 2175 controllers do not like that. */ 2133 - sdhci_reset(host, SDHCI_RESET_CMD); 2134 - sdhci_reset(host, SDHCI_RESET_DATA); 2176 + sdhci_do_reset(host, SDHCI_RESET_CMD); 2177 + sdhci_do_reset(host, SDHCI_RESET_DATA); 2135 2178 } 2136 2179 2137 2180 host->mrq = NULL; ··· 2381 2424 2382 2425 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2383 2426 { 2384 - irqreturn_t result; 2427 + irqreturn_t result = IRQ_NONE; 2385 2428 struct sdhci_host *host = dev_id; 2386 - u32 intmask, unexpected = 0; 2387 - int cardint = 0, max_loops = 16; 2429 + u32 intmask, mask, unexpected = 0; 2430 + int max_loops = 16; 2388 2431 2389 2432 spin_lock(&host->lock); 2390 2433 2391 - if (host->runtime_suspended) { 2434 + if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { 2392 2435 spin_unlock(&host->lock); 2393 2436 return IRQ_NONE; 2394 2437 } 2395 2438 2396 2439 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2397 - 2398 2440 if (!intmask || intmask == 0xffffffff) { 2399 2441 result = IRQ_NONE; 2400 2442 goto out; 2401 2443 } 2402 2444 2403 - again: 2404 - DBG("*** %s got interrupt: 0x%08x\n", 2405 - mmc_hostname(host->mmc), intmask); 2445 + do { 2446 + /* Clear selected interrupts. */ 2447 + mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2448 + SDHCI_INT_BUS_POWER); 2449 + sdhci_writel(host, mask, SDHCI_INT_STATUS); 2406 2450 2407 - if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2408 - u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2409 - SDHCI_CARD_PRESENT; 2451 + DBG("*** %s got interrupt: 0x%08x\n", 2452 + mmc_hostname(host->mmc), intmask); 2410 2453 2411 - /* 2412 - * There is a observation on i.mx esdhc. INSERT bit will be 2413 - * immediately set again when it gets cleared, if a card is 2414 - * inserted. We have to mask the irq to prevent interrupt 2415 - * storm which will freeze the system. And the REMOVE gets 2416 - * the same situation. 2417 - * 2418 - * More testing are needed here to ensure it works for other 2419 - * platforms though. 2420 - */ 2421 - sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT : 2422 - SDHCI_INT_CARD_REMOVE); 2423 - sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE : 2424 - SDHCI_INT_CARD_INSERT); 2454 + if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2455 + u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2456 + SDHCI_CARD_PRESENT; 2425 2457 2426 - sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2427 - SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2428 - intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); 2429 - tasklet_schedule(&host->card_tasklet); 2430 - } 2458 + /* 2459 + * There is a observation on i.mx esdhc. INSERT 2460 + * bit will be immediately set again when it gets 2461 + * cleared, if a card is inserted. We have to mask 2462 + * the irq to prevent interrupt storm which will 2463 + * freeze the system. And the REMOVE gets the 2464 + * same situation. 2465 + * 2466 + * More testing are needed here to ensure it works 2467 + * for other platforms though. 2468 + */ 2469 + host->ier &= ~(SDHCI_INT_CARD_INSERT | 2470 + SDHCI_INT_CARD_REMOVE); 2471 + host->ier |= present ? SDHCI_INT_CARD_REMOVE : 2472 + SDHCI_INT_CARD_INSERT; 2473 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2474 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2431 2475 2432 - if (intmask & SDHCI_INT_CMD_MASK) { 2433 - sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, 2434 - SDHCI_INT_STATUS); 2435 - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); 2436 - } 2476 + sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2477 + SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2437 2478 2438 - if (intmask & SDHCI_INT_DATA_MASK) { 2439 - sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK, 2440 - SDHCI_INT_STATUS); 2441 - sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2442 - } 2479 + host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 2480 + SDHCI_INT_CARD_REMOVE); 2481 + result = IRQ_WAKE_THREAD; 2482 + } 2443 2483 2444 - intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); 2484 + if (intmask & SDHCI_INT_CMD_MASK) 2485 + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); 2445 2486 2446 - intmask &= ~SDHCI_INT_ERROR; 2487 + if (intmask & SDHCI_INT_DATA_MASK) 2488 + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2447 2489 2448 - if (intmask & SDHCI_INT_BUS_POWER) { 2449 - pr_err("%s: Card is consuming too much power!\n", 2450 - mmc_hostname(host->mmc)); 2451 - sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); 2452 - } 2490 + if (intmask & SDHCI_INT_BUS_POWER) 2491 + pr_err("%s: Card is consuming too much power!\n", 2492 + mmc_hostname(host->mmc)); 2453 2493 2454 - intmask &= ~SDHCI_INT_BUS_POWER; 2494 + if (intmask & SDHCI_INT_CARD_INT) { 2495 + sdhci_enable_sdio_irq_nolock(host, false); 2496 + host->thread_isr |= SDHCI_INT_CARD_INT; 2497 + result = IRQ_WAKE_THREAD; 2498 + } 2455 2499 2456 - if (intmask & SDHCI_INT_CARD_INT) 2457 - cardint = 1; 2500 + intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 2501 + SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2502 + SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 2503 + SDHCI_INT_CARD_INT); 2458 2504 2459 - intmask &= ~SDHCI_INT_CARD_INT; 2505 + if (intmask) { 2506 + unexpected |= intmask; 2507 + sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2508 + } 2460 2509 2461 - if (intmask) { 2462 - unexpected |= intmask; 2463 - sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2464 - } 2510 + if (result == IRQ_NONE) 2511 + result = IRQ_HANDLED; 2465 2512 2466 - result = IRQ_HANDLED; 2467 - 2468 - intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2469 - 2470 - /* 2471 - * If we know we'll call the driver to signal SDIO IRQ, disregard 2472 - * further indications of Card Interrupt in the status to avoid a 2473 - * needless loop. 2474 - */ 2475 - if (cardint) 2476 - intmask &= ~SDHCI_INT_CARD_INT; 2477 - if (intmask && --max_loops) 2478 - goto again; 2513 + intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2514 + } while (intmask && --max_loops); 2479 2515 out: 2480 2516 spin_unlock(&host->lock); 2481 2517 ··· 2477 2527 mmc_hostname(host->mmc), unexpected); 2478 2528 sdhci_dumpregs(host); 2479 2529 } 2480 - /* 2481 - * We have to delay this as it calls back into the driver. 2482 - */ 2483 - if (cardint) 2484 - mmc_signal_sdio_irq(host->mmc); 2485 2530 2486 2531 return result; 2532 + } 2533 + 2534 + static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 2535 + { 2536 + struct sdhci_host *host = dev_id; 2537 + unsigned long flags; 2538 + u32 isr; 2539 + 2540 + spin_lock_irqsave(&host->lock, flags); 2541 + isr = host->thread_isr; 2542 + host->thread_isr = 0; 2543 + spin_unlock_irqrestore(&host->lock, flags); 2544 + 2545 + if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2546 + sdhci_card_event(host->mmc); 2547 + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 2548 + } 2549 + 2550 + if (isr & SDHCI_INT_CARD_INT) { 2551 + sdio_run_irqs(host->mmc); 2552 + 2553 + spin_lock_irqsave(&host->lock, flags); 2554 + if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2555 + sdhci_enable_sdio_irq_nolock(host, true); 2556 + spin_unlock_irqrestore(&host->lock, flags); 2557 + } 2558 + 2559 + return isr ? IRQ_HANDLED : IRQ_NONE; 2487 2560 } 2488 2561 2489 2562 /*****************************************************************************\ ··· 2545 2572 2546 2573 int sdhci_suspend_host(struct sdhci_host *host) 2547 2574 { 2548 - if (host->ops->platform_suspend) 2549 - host->ops->platform_suspend(host); 2550 - 2551 2575 sdhci_disable_card_detection(host); 2552 2576 2553 2577 /* Disable tuning since we are suspending */ ··· 2554 2584 } 2555 2585 2556 2586 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2557 - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); 2587 + host->ier = 0; 2588 + sdhci_writel(host, 0, SDHCI_INT_ENABLE); 2589 + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2558 2590 free_irq(host->irq, host); 2559 2591 } else { 2560 2592 sdhci_enable_irq_wakeups(host); ··· 2577 2605 } 2578 2606 2579 2607 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2580 - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2581 - mmc_hostname(host->mmc), host); 2608 + ret = request_threaded_irq(host->irq, sdhci_irq, 2609 + sdhci_thread_irq, IRQF_SHARED, 2610 + mmc_hostname(host->mmc), host); 2582 2611 if (ret) 2583 2612 return ret; 2584 2613 } else { ··· 2600 2627 } 2601 2628 2602 2629 sdhci_enable_card_detection(host); 2603 - 2604 - if (host->ops->platform_resume) 2605 - host->ops->platform_resume(host); 2606 2630 2607 2631 /* Set the re-tuning expiration flag */ 2608 2632 if (host->flags & SDHCI_USING_RETUNING_TIMER) ··· 2652 2682 } 2653 2683 2654 2684 spin_lock_irqsave(&host->lock, flags); 2655 - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); 2685 + host->ier &= SDHCI_INT_CARD_INT; 2686 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2687 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2656 2688 spin_unlock_irqrestore(&host->lock, flags); 2657 2689 2658 - synchronize_irq(host->irq); 2690 + synchronize_hardirq(host->irq); 2659 2691 2660 2692 spin_lock_irqsave(&host->lock, flags); 2661 2693 host->runtime_suspended = true; ··· 2701 2729 host->runtime_suspended = false; 2702 2730 2703 2731 /* Enable SDIO IRQ */ 2704 - if ((host->flags & SDHCI_SDIO_IRQ_ENABLED)) 2732 + if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2705 2733 sdhci_enable_sdio_irq_nolock(host, true); 2706 2734 2707 2735 /* Enable Card Detection */ ··· 2760 2788 if (debug_quirks2) 2761 2789 host->quirks2 = debug_quirks2; 2762 2790 2763 - sdhci_reset(host, SDHCI_RESET_ALL); 2791 + sdhci_do_reset(host, SDHCI_RESET_ALL); 2764 2792 2765 2793 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2766 2794 host->version = (host->version & SDHCI_SPEC_VER_MASK) ··· 2820 2848 * (128) and potentially one alignment transfer for 2821 2849 * each of those entries. 2822 2850 */ 2823 - host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); 2851 + host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc), 2852 + ADMA_SIZE, &host->adma_addr, 2853 + GFP_KERNEL); 2824 2854 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); 2825 2855 if (!host->adma_desc || !host->align_buffer) { 2826 - kfree(host->adma_desc); 2856 + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, 2857 + host->adma_desc, host->adma_addr); 2827 2858 kfree(host->align_buffer); 2828 2859 pr_warning("%s: Unable to allocate ADMA " 2829 2860 "buffers. Falling back to standard DMA.\n", 2830 2861 mmc_hostname(mmc)); 2831 2862 host->flags &= ~SDHCI_USE_ADMA; 2863 + host->adma_desc = NULL; 2864 + host->align_buffer = NULL; 2865 + } else if (host->adma_addr & 3) { 2866 + pr_warning("%s: unable to allocate aligned ADMA descriptor\n", 2867 + mmc_hostname(mmc)); 2868 + host->flags &= ~SDHCI_USE_ADMA; 2869 + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, 2870 + host->adma_desc, host->adma_addr); 2871 + kfree(host->align_buffer); 2872 + host->adma_desc = NULL; 2873 + host->align_buffer = NULL; 2832 2874 } 2833 2875 } 2834 2876 ··· 2927 2941 mmc->max_busy_timeout = (1 << 27) / host->timeout_clk; 2928 2942 2929 2943 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2944 + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2930 2945 2931 2946 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 2932 2947 host->flags |= SDHCI_AUTO_CMD12; ··· 3199 3212 /* 3200 3213 * Init tasklets. 3201 3214 */ 3202 - tasklet_init(&host->card_tasklet, 3203 - sdhci_tasklet_card, (unsigned long)host); 3204 3215 tasklet_init(&host->finish_tasklet, 3205 3216 sdhci_tasklet_finish, (unsigned long)host); 3206 3217 ··· 3215 3230 3216 3231 sdhci_init(host, 0); 3217 3232 3218 - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 3219 - mmc_hostname(mmc), host); 3233 + ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 3234 + IRQF_SHARED, mmc_hostname(mmc), host); 3220 3235 if (ret) { 3221 3236 pr_err("%s: Failed to request IRQ %d: %d\n", 3222 3237 mmc_hostname(mmc), host->irq, ret); ··· 3258 3273 3259 3274 #ifdef SDHCI_USE_LEDS_CLASS 3260 3275 reset: 3261 - sdhci_reset(host, SDHCI_RESET_ALL); 3262 - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); 3276 + sdhci_do_reset(host, SDHCI_RESET_ALL); 3277 + sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3278 + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3263 3279 free_irq(host->irq, host); 3264 3280 #endif 3265 3281 untasklet: 3266 - tasklet_kill(&host->card_tasklet); 3267 3282 tasklet_kill(&host->finish_tasklet); 3268 3283 3269 3284 return ret; ··· 3300 3315 #endif 3301 3316 3302 3317 if (!dead) 3303 - sdhci_reset(host, SDHCI_RESET_ALL); 3318 + sdhci_do_reset(host, SDHCI_RESET_ALL); 3304 3319 3305 - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); 3320 + sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3321 + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3306 3322 free_irq(host->irq, host); 3307 3323 3308 3324 del_timer_sync(&host->timer); 3309 3325 3310 - tasklet_kill(&host->card_tasklet); 3311 3326 tasklet_kill(&host->finish_tasklet); 3312 3327 3313 3328 if (host->vmmc) { ··· 3320 3335 regulator_put(host->vqmmc); 3321 3336 } 3322 3337 3323 - kfree(host->adma_desc); 3338 + if (host->adma_desc) 3339 + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, 3340 + host->adma_desc, host->adma_addr); 3324 3341 kfree(host->align_buffer); 3325 3342 3326 3343 host->adma_desc = NULL;
+13 -7
drivers/mmc/host/sdhci.h
··· 281 281 unsigned int (*get_max_clock)(struct sdhci_host *host); 282 282 unsigned int (*get_min_clock)(struct sdhci_host *host); 283 283 unsigned int (*get_timeout_clock)(struct sdhci_host *host); 284 - int (*platform_bus_width)(struct sdhci_host *host, 285 - int width); 284 + void (*set_bus_width)(struct sdhci_host *host, int width); 286 285 void (*platform_send_init_74_clocks)(struct sdhci_host *host, 287 286 u8 power_mode); 288 287 unsigned int (*get_ro)(struct sdhci_host *host); 289 - void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); 290 - void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); 288 + void (*reset)(struct sdhci_host *host, u8 mask); 291 289 int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); 292 - int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 290 + void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 293 291 void (*hw_reset)(struct sdhci_host *host); 294 - void (*platform_suspend)(struct sdhci_host *host); 295 - void (*platform_resume)(struct sdhci_host *host); 296 292 void (*adma_workaround)(struct sdhci_host *host, u32 intmask); 297 293 void (*platform_init)(struct sdhci_host *host); 298 294 void (*card_event)(struct sdhci_host *host); ··· 392 396 extern void sdhci_remove_host(struct sdhci_host *host, int dead); 393 397 extern void sdhci_send_command(struct sdhci_host *host, 394 398 struct mmc_command *cmd); 399 + 400 + static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) 401 + { 402 + return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED); 403 + } 404 + 405 + void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); 406 + void sdhci_set_bus_width(struct sdhci_host *host, int width); 407 + void sdhci_reset(struct sdhci_host *host, u8 mask); 408 + void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 395 409 396 410 #ifdef CONFIG_PM 397 411 extern int sdhci_suspend_host(struct sdhci_host *host);
+5 -4
drivers/mmc/host/sh_mmcif.c
··· 803 803 break; 804 804 } 805 805 switch (host->timing) { 806 - case MMC_TIMING_UHS_DDR50: 806 + case MMC_TIMING_MMC_DDR52: 807 807 /* 808 808 * MMC core will only set this timing, if the host 809 - * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF 810 - * implementations with this capability, e.g. sh73a0, 811 - * will have to set it in their platform data. 809 + * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR 810 + * capability. MMCIF implementations with this 811 + * capability, e.g. sh73a0, will have to set it 812 + * in their platform data. 812 813 */ 813 814 tmp |= CMD_SET_DARS; 814 815 break;
+1847
drivers/mmc/host/usdhi6rol0.c
··· 1 + /* 2 + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. 3 + * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of version 2 of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + */ 9 + 10 + #include <linux/clk.h> 11 + #include <linux/delay.h> 12 + #include <linux/device.h> 13 + #include <linux/dma-mapping.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/highmem.h> 16 + #include <linux/interrupt.h> 17 + #include <linux/io.h> 18 + #include <linux/log2.h> 19 + #include <linux/mmc/host.h> 20 + #include <linux/mmc/mmc.h> 21 + #include <linux/mmc/sd.h> 22 + #include <linux/mmc/sdio.h> 23 + #include <linux/module.h> 24 + #include <linux/pagemap.h> 25 + #include <linux/platform_device.h> 26 + #include <linux/scatterlist.h> 27 + #include <linux/string.h> 28 + #include <linux/time.h> 29 + #include <linux/virtio.h> 30 + #include <linux/workqueue.h> 31 + 32 + #define USDHI6_SD_CMD 0x0000 33 + #define USDHI6_SD_PORT_SEL 0x0004 34 + #define USDHI6_SD_ARG 0x0008 35 + #define USDHI6_SD_STOP 0x0010 36 + #define USDHI6_SD_SECCNT 0x0014 37 + #define USDHI6_SD_RSP10 0x0018 38 + #define USDHI6_SD_RSP32 0x0020 39 + #define USDHI6_SD_RSP54 0x0028 40 + #define USDHI6_SD_RSP76 0x0030 41 + #define USDHI6_SD_INFO1 0x0038 42 + #define USDHI6_SD_INFO2 0x003c 43 + #define USDHI6_SD_INFO1_MASK 0x0040 44 + #define USDHI6_SD_INFO2_MASK 0x0044 45 + #define USDHI6_SD_CLK_CTRL 0x0048 46 + #define USDHI6_SD_SIZE 0x004c 47 + #define USDHI6_SD_OPTION 0x0050 48 + #define USDHI6_SD_ERR_STS1 0x0058 49 + #define USDHI6_SD_ERR_STS2 0x005c 50 + #define USDHI6_SD_BUF0 0x0060 51 + #define USDHI6_SDIO_MODE 0x0068 52 + #define USDHI6_SDIO_INFO1 0x006c 53 + #define USDHI6_SDIO_INFO1_MASK 0x0070 54 + #define USDHI6_CC_EXT_MODE 0x01b0 55 + #define USDHI6_SOFT_RST 0x01c0 56 + #define USDHI6_VERSION 0x01c4 57 + #define USDHI6_HOST_MODE 0x01c8 58 + #define USDHI6_SDIF_MODE 0x01cc 59 + 60 + #define USDHI6_SD_CMD_APP 0x0040 61 + #define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000 62 + #define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300 63 + #define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */ 64 + #define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */ 65 + #define USDHI6_SD_CMD_MODE_RSP_R2 0x0600 66 + #define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */ 67 + #define USDHI6_SD_CMD_DATA 0x0800 68 + #define USDHI6_SD_CMD_READ 0x1000 69 + #define USDHI6_SD_CMD_MULTI 0x2000 70 + #define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000 71 + 72 + #define USDHI6_CC_EXT_MODE_SDRW BIT(1) 73 + 74 + #define USDHI6_SD_INFO1_RSP_END BIT(0) 75 + #define USDHI6_SD_INFO1_ACCESS_END BIT(2) 76 + #define USDHI6_SD_INFO1_CARD_OUT BIT(3) 77 + #define USDHI6_SD_INFO1_CARD_IN BIT(4) 78 + #define USDHI6_SD_INFO1_CD BIT(5) 79 + #define USDHI6_SD_INFO1_WP BIT(7) 80 + #define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8) 81 + #define USDHI6_SD_INFO1_D3_CARD_IN BIT(9) 82 + 83 + #define USDHI6_SD_INFO2_CMD_ERR BIT(0) 84 + #define USDHI6_SD_INFO2_CRC_ERR BIT(1) 85 + #define USDHI6_SD_INFO2_END_ERR BIT(2) 86 + #define USDHI6_SD_INFO2_TOUT BIT(3) 87 + #define USDHI6_SD_INFO2_IWA_ERR BIT(4) 88 + #define USDHI6_SD_INFO2_IRA_ERR BIT(5) 89 + #define USDHI6_SD_INFO2_RSP_TOUT BIT(6) 90 + #define USDHI6_SD_INFO2_SDDAT0 BIT(7) 91 + #define USDHI6_SD_INFO2_BRE BIT(8) 92 + #define USDHI6_SD_INFO2_BWE BIT(9) 93 + #define USDHI6_SD_INFO2_SCLKDIVEN BIT(13) 94 + #define USDHI6_SD_INFO2_CBSY BIT(14) 95 + #define USDHI6_SD_INFO2_ILA BIT(15) 96 + 97 + #define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN) 98 + #define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT) 99 + #define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT) 100 + #define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT) 101 + 102 + #define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \ 103 + USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \ 104 + USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \ 105 + USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \ 106 + USDHI6_SD_INFO2_ILA) 107 + 108 + #define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \ 109 + USDHI6_SD_INFO1_CARD) 110 + 111 + #define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \ 112 + USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA) 113 + 114 + #define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8) 115 + 116 + #define USDHI6_SD_STOP_STP BIT(0) 117 + #define USDHI6_SD_STOP_SEC BIT(8) 118 + 119 + #define USDHI6_SDIO_INFO1_IOIRQ BIT(0) 120 + #define USDHI6_SDIO_INFO1_EXPUB52 BIT(14) 121 + #define USDHI6_SDIO_INFO1_EXWT BIT(15) 122 + 123 + #define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13) 124 + 125 + #define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2)) 126 + #define USDHI6_SOFT_RST_RESET BIT(0) 127 + 128 + #define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4 129 + #define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT) 130 + #define USDHI6_SD_OPTION_WIDTH_1 BIT(15) 131 + 132 + #define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8 133 + 134 + #define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff 135 + 136 + #define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \ 137 + USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT) 138 + 139 + #define USDHI6_MIN_DMA 64 140 + 141 + enum usdhi6_wait_for { 142 + USDHI6_WAIT_FOR_REQUEST, 143 + USDHI6_WAIT_FOR_CMD, 144 + USDHI6_WAIT_FOR_MREAD, 145 + USDHI6_WAIT_FOR_MWRITE, 146 + USDHI6_WAIT_FOR_READ, 147 + USDHI6_WAIT_FOR_WRITE, 148 + USDHI6_WAIT_FOR_DATA_END, 149 + USDHI6_WAIT_FOR_STOP, 150 + USDHI6_WAIT_FOR_DMA, 151 + }; 152 + 153 + struct usdhi6_page { 154 + struct page *page; 155 + void *mapped; /* mapped page */ 156 + }; 157 + 158 + struct usdhi6_host { 159 + struct mmc_host *mmc; 160 + struct mmc_request *mrq; 161 + void __iomem *base; 162 + struct clk *clk; 163 + 164 + /* SG memory handling */ 165 + 166 + /* Common for multiple and single block requests */ 167 + struct usdhi6_page pg; /* current page from an SG */ 168 + void *blk_page; /* either a mapped page, or the bounce buffer */ 169 + size_t offset; /* offset within a page, including sg->offset */ 170 + 171 + /* Blocks, crossing a page boundary */ 172 + size_t head_len; 173 + struct usdhi6_page head_pg; 174 + 175 + /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */ 176 + struct scatterlist bounce_sg; 177 + u8 bounce_buf[512]; 178 + 179 + /* Multiple block requests only */ 180 + struct scatterlist *sg; /* current SG segment */ 181 + int page_idx; /* page index within an SG segment */ 182 + 183 + enum usdhi6_wait_for wait; 184 + u32 status_mask; 185 + u32 status2_mask; 186 + u32 sdio_mask; 187 + u32 io_error; 188 + u32 irq_status; 189 + unsigned long imclk; 190 + unsigned long rate; 191 + bool app_cmd; 192 + 193 + /* Timeout handling */ 194 + struct delayed_work timeout_work; 195 + unsigned long timeout; 196 + 197 + /* DMA support */ 198 + struct dma_chan *chan_rx; 199 + struct dma_chan *chan_tx; 200 + bool dma_active; 201 + }; 202 + 203 + /* I/O primitives */ 204 + 205 + static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data) 206 + { 207 + iowrite32(data, host->base + reg); 208 + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 209 + host->base, reg, data); 210 + } 211 + 212 + static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data) 213 + { 214 + iowrite16(data, host->base + reg); 215 + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 216 + host->base, reg, data); 217 + } 218 + 219 + static u32 usdhi6_read(struct usdhi6_host *host, u32 reg) 220 + { 221 + u32 data = ioread32(host->base + reg); 222 + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 223 + host->base, reg, data); 224 + return data; 225 + } 226 + 227 + static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg) 228 + { 229 + u16 data = ioread16(host->base + reg); 230 + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 231 + host->base, reg, data); 232 + return data; 233 + } 234 + 235 + static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2) 236 + { 237 + host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; 238 + host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; 239 + usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); 240 + usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); 241 + } 242 + 243 + static void usdhi6_wait_for_resp(struct usdhi6_host *host) 244 + { 245 + usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END | 246 + USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD, 247 + USDHI6_SD_INFO2_ERR); 248 + } 249 + 250 + static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read) 251 + { 252 + usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END | 253 + USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR | 254 + (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE)); 255 + } 256 + 257 + static void usdhi6_only_cd(struct usdhi6_host *host) 258 + { 259 + /* Mask all except card hotplug */ 260 + usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0); 261 + } 262 + 263 + static void usdhi6_mask_all(struct usdhi6_host *host) 264 + { 265 + usdhi6_irq_enable(host, 0, 0); 266 + } 267 + 268 + static int usdhi6_error_code(struct usdhi6_host *host) 269 + { 270 + u32 err; 271 + 272 + usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP); 273 + 274 + if (host->io_error & 275 + (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) { 276 + u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); 277 + int opc = host->mrq ? host->mrq->cmd->opcode : -1; 278 + 279 + err = usdhi6_read(host, USDHI6_SD_ERR_STS2); 280 + /* Response timeout is often normal, don't spam the log */ 281 + if (host->wait == USDHI6_WAIT_FOR_CMD) 282 + dev_dbg(mmc_dev(host->mmc), 283 + "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", 284 + err, rsp54, host->wait, opc); 285 + else 286 + dev_warn(mmc_dev(host->mmc), 287 + "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", 288 + err, rsp54, host->wait, opc); 289 + return -ETIMEDOUT; 290 + } 291 + 292 + err = usdhi6_read(host, USDHI6_SD_ERR_STS1); 293 + if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR) 294 + dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", 295 + err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); 296 + if (host->io_error & USDHI6_SD_INFO2_ILA) 297 + return -EILSEQ; 298 + 299 + return -EIO; 300 + } 301 + 302 + /* Scatter-Gather management */ 303 + 304 + /* 305 + * In PIO mode we have to map each page separately, using kmap(). That way 306 + * adjacent pages are mapped to non-adjacent virtual addresses. That's why we 307 + * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks 308 + * have been observed with an SDIO WiFi card (b43 driver). 309 + */ 310 + static void usdhi6_blk_bounce(struct usdhi6_host *host, 311 + struct scatterlist *sg) 312 + { 313 + struct mmc_data *data = host->mrq->data; 314 + size_t blk_head = host->head_len; 315 + 316 + dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", 317 + __func__, host->mrq->cmd->opcode, data->sg_len, 318 + data->blksz, data->blocks, sg->offset); 319 + 320 + host->head_pg.page = host->pg.page; 321 + host->head_pg.mapped = host->pg.mapped; 322 + host->pg.page = nth_page(host->pg.page, 1); 323 + host->pg.mapped = kmap(host->pg.page); 324 + 325 + host->blk_page = host->bounce_buf; 326 + host->offset = 0; 327 + 328 + if (data->flags & MMC_DATA_READ) 329 + return; 330 + 331 + memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, 332 + blk_head); 333 + memcpy(host->bounce_buf + blk_head, host->pg.mapped, 334 + data->blksz - blk_head); 335 + } 336 + 337 + /* Only called for multiple block IO */ 338 + static void usdhi6_sg_prep(struct usdhi6_host *host) 339 + { 340 + struct mmc_request *mrq = host->mrq; 341 + struct mmc_data *data = mrq->data; 342 + 343 + usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); 344 + 345 + host->sg = data->sg; 346 + /* TODO: if we always map, this is redundant */ 347 + host->offset = host->sg->offset; 348 + } 349 + 350 + /* Map the first page in an SG segment: common for multiple and single block IO */ 351 + static void *usdhi6_sg_map(struct usdhi6_host *host) 352 + { 353 + struct mmc_data *data = host->mrq->data; 354 + struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; 355 + size_t head = PAGE_SIZE - sg->offset; 356 + size_t blk_head = head % data->blksz; 357 + 358 + WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); 359 + if (WARN(sg_dma_len(sg) % data->blksz, 360 + "SG size %zd isn't a multiple of block size %zd\n", 361 + sg_dma_len(sg), data->blksz)) 362 + return NULL; 363 + 364 + host->pg.page = sg_page(sg); 365 + host->pg.mapped = kmap(host->pg.page); 366 + host->offset = sg->offset; 367 + 368 + /* 369 + * Block size must be a power of 2 for multi-block transfers, 370 + * therefore blk_head is equal for all pages in this SG 371 + */ 372 + host->head_len = blk_head; 373 + 374 + if (head < data->blksz) 375 + /* 376 + * The first block in the SG crosses a page boundary. 377 + * Max blksz = 512, so blocks can only span 2 pages 378 + */ 379 + usdhi6_blk_bounce(host, sg); 380 + else 381 + host->blk_page = host->pg.mapped; 382 + 383 + dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", 384 + host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, 385 + sg->offset, host->mrq->cmd->opcode, host->mrq); 386 + 387 + return host->blk_page + host->offset; 388 + } 389 + 390 + /* Unmap the current page: common for multiple and single block IO */ 391 + static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force) 392 + { 393 + struct mmc_data *data = host->mrq->data; 394 + struct page *page = host->head_pg.page; 395 + 396 + if (page) { 397 + /* Previous block was cross-page boundary */ 398 + struct scatterlist *sg = data->sg_len > 1 ? 399 + host->sg : data->sg; 400 + size_t blk_head = host->head_len; 401 + 402 + if (!data->error && data->flags & MMC_DATA_READ) { 403 + memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, 404 + host->bounce_buf, blk_head); 405 + memcpy(host->pg.mapped, host->bounce_buf + blk_head, 406 + data->blksz - blk_head); 407 + } 408 + 409 + flush_dcache_page(page); 410 + kunmap(page); 411 + 412 + host->head_pg.page = NULL; 413 + 414 + if (!force && sg_dma_len(sg) + sg->offset > 415 + (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) 416 + /* More blocks in this SG, don't unmap the next page */ 417 + return; 418 + } 419 + 420 + page = host->pg.page; 421 + if (!page) 422 + return; 423 + 424 + flush_dcache_page(page); 425 + kunmap(page); 426 + 427 + host->pg.page = NULL; 428 + } 429 + 430 + /* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */ 431 + static void usdhi6_sg_advance(struct usdhi6_host *host) 432 + { 433 + struct mmc_data *data = host->mrq->data; 434 + size_t done, total; 435 + 436 + /* New offset: set at the end of the previous block */ 437 + if (host->head_pg.page) { 438 + /* Finished a cross-page block, jump to the new page */ 439 + host->page_idx++; 440 + host->offset = data->blksz - host->head_len; 441 + host->blk_page = host->pg.mapped; 442 + usdhi6_sg_unmap(host, false); 443 + } else { 444 + host->offset += data->blksz; 445 + /* The completed block didn't cross a page boundary */ 446 + if (host->offset == PAGE_SIZE) { 447 + /* If required, we'll map the page below */ 448 + host->offset = 0; 449 + host->page_idx++; 450 + } 451 + } 452 + 453 + /* 454 + * Now host->blk_page + host->offset point at the end of our last block 455 + * and host->page_idx is the index of the page, in which our new block 456 + * is located, if any 457 + */ 458 + 459 + done = (host->page_idx << PAGE_SHIFT) + host->offset; 460 + total = host->sg->offset + sg_dma_len(host->sg); 461 + 462 + dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %u\n", __func__, 463 + done, total, host->offset); 464 + 465 + if (done < total && host->offset) { 466 + /* More blocks in this page */ 467 + if (host->offset + data->blksz > PAGE_SIZE) 468 + /* We approached at a block, that spans 2 pages */ 469 + usdhi6_blk_bounce(host, host->sg); 470 + 471 + return; 472 + } 473 + 474 + /* Finished current page or an SG segment */ 475 + usdhi6_sg_unmap(host, false); 476 + 477 + if (done == total) { 478 + /* 479 + * End of an SG segment or the complete SG: jump to the next 480 + * segment, we'll map it later in usdhi6_blk_read() or 481 + * usdhi6_blk_write() 482 + */ 483 + struct scatterlist *next = sg_next(host->sg); 484 + 485 + host->page_idx = 0; 486 + 487 + if (!next) 488 + host->wait = USDHI6_WAIT_FOR_DATA_END; 489 + host->sg = next; 490 + 491 + if (WARN(next && sg_dma_len(next) % data->blksz, 492 + "SG size %zd isn't a multiple of block size %zd\n", 493 + sg_dma_len(next), data->blksz)) 494 + data->error = -EINVAL; 495 + 496 + return; 497 + } 498 + 499 + /* We cannot get here after crossing a page border */ 500 + 501 + /* Next page in the same SG */ 502 + host->pg.page = nth_page(sg_page(host->sg), host->page_idx); 503 + host->pg.mapped = kmap(host->pg.page); 504 + host->blk_page = host->pg.mapped; 505 + 506 + dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", 507 + host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, 508 + host->mrq->cmd->opcode, host->mrq); 509 + } 510 + 511 + /* DMA handling */ 512 + 513 + static void usdhi6_dma_release(struct usdhi6_host *host) 514 + { 515 + host->dma_active = false; 516 + if (host->chan_tx) { 517 + struct dma_chan *chan = host->chan_tx; 518 + host->chan_tx = NULL; 519 + dma_release_channel(chan); 520 + } 521 + if (host->chan_rx) { 522 + struct dma_chan *chan = host->chan_rx; 523 + host->chan_rx = NULL; 524 + dma_release_channel(chan); 525 + } 526 + } 527 + 528 + static void usdhi6_dma_stop_unmap(struct usdhi6_host *host) 529 + { 530 + struct mmc_data *data = host->mrq->data; 531 + 532 + if (!host->dma_active) 533 + return; 534 + 535 + usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); 536 + host->dma_active = false; 537 + 538 + if (data->flags & MMC_DATA_READ) 539 + dma_unmap_sg(host->chan_rx->device->dev, data->sg, 540 + data->sg_len, DMA_FROM_DEVICE); 541 + else 542 + dma_unmap_sg(host->chan_tx->device->dev, data->sg, 543 + data->sg_len, DMA_TO_DEVICE); 544 + } 545 + 546 + static void usdhi6_dma_complete(void *arg) 547 + { 548 + struct usdhi6_host *host = arg; 549 + struct mmc_request *mrq = host->mrq; 550 + 551 + if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n", 552 + dev_name(mmc_dev(host->mmc)), mrq)) 553 + return; 554 + 555 + dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, 556 + mrq->cmd->opcode); 557 + 558 + usdhi6_dma_stop_unmap(host); 559 + usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); 560 + } 561 + 562 + static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan, 563 + enum dma_transfer_direction dir) 564 + { 565 + struct mmc_data *data = host->mrq->data; 566 + struct scatterlist *sg = data->sg; 567 + struct dma_async_tx_descriptor *desc = NULL; 568 + dma_cookie_t cookie = -EINVAL; 569 + enum dma_data_direction data_dir; 570 + int ret; 571 + 572 + switch (dir) { 573 + case DMA_MEM_TO_DEV: 574 + data_dir = DMA_TO_DEVICE; 575 + break; 576 + case DMA_DEV_TO_MEM: 577 + data_dir = DMA_FROM_DEVICE; 578 + break; 579 + default: 580 + return -EINVAL; 581 + } 582 + 583 + ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); 584 + if (ret > 0) { 585 + host->dma_active = true; 586 + desc = dmaengine_prep_slave_sg(chan, sg, ret, dir, 587 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 588 + } 589 + 590 + if (desc) { 591 + desc->callback = usdhi6_dma_complete; 592 + desc->callback_param = host; 593 + cookie = dmaengine_submit(desc); 594 + } 595 + 596 + dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", 597 + __func__, data->sg_len, ret, cookie, desc); 598 + 599 + if (cookie < 0) { 600 + /* DMA failed, fall back to PIO */ 601 + if (ret >= 0) 602 + ret = cookie; 603 + usdhi6_dma_release(host); 604 + dev_warn(mmc_dev(host->mmc), 605 + "DMA failed: %d, falling back to PIO\n", ret); 606 + } 607 + 608 + return cookie; 609 + } 610 + 611 + static int usdhi6_dma_start(struct usdhi6_host *host) 612 + { 613 + if (!host->chan_rx || !host->chan_tx) 614 + return -ENODEV; 615 + 616 + if (host->mrq->data->flags & MMC_DATA_READ) 617 + return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); 618 + 619 + return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); 620 + } 621 + 622 + static void usdhi6_dma_kill(struct usdhi6_host *host) 623 + { 624 + struct mmc_data *data = host->mrq->data; 625 + 626 + dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", 627 + __func__, data->sg_len, data->blocks, data->blksz); 628 + /* Abort DMA */ 629 + if (data->flags & MMC_DATA_READ) 630 + dmaengine_terminate_all(host->chan_rx); 631 + else 632 + dmaengine_terminate_all(host->chan_tx); 633 + } 634 + 635 + static void usdhi6_dma_check_error(struct usdhi6_host *host) 636 + { 637 + struct mmc_data *data = host->mrq->data; 638 + 639 + dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", 640 + __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); 641 + 642 + if (host->io_error) { 643 + data->error = usdhi6_error_code(host); 644 + data->bytes_xfered = 0; 645 + usdhi6_dma_kill(host); 646 + usdhi6_dma_release(host); 647 + dev_warn(mmc_dev(host->mmc), 648 + "DMA failed: %d, falling back to PIO\n", data->error); 649 + return; 650 + } 651 + 652 + /* 653 + * The datasheet tells us to check a response from the card, whereas 654 + * responses only come after the command phase, not after the data 655 + * phase. Let's check anyway. 656 + */ 657 + if (host->irq_status & USDHI6_SD_INFO1_RSP_END) 658 + dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); 659 + } 660 + 661 + static void usdhi6_dma_kick(struct usdhi6_host *host) 662 + { 663 + if (host->mrq->data->flags & MMC_DATA_READ) 664 + dma_async_issue_pending(host->chan_rx); 665 + else 666 + dma_async_issue_pending(host->chan_tx); 667 + } 668 + 669 + static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) 670 + { 671 + struct dma_slave_config cfg = { 672 + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 673 + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 674 + }; 675 + int ret; 676 + 677 + host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); 678 + dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, 679 + host->chan_tx); 680 + 681 + if (!host->chan_tx) 682 + return; 683 + 684 + cfg.direction = DMA_MEM_TO_DEV; 685 + cfg.dst_addr = start + USDHI6_SD_BUF0; 686 + cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ 687 + cfg.src_addr = 0; 688 + ret = dmaengine_slave_config(host->chan_tx, &cfg); 689 + if (ret < 0) 690 + goto e_release_tx; 691 + 692 + host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); 693 + dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, 694 + host->chan_rx); 695 + 696 + if (!host->chan_rx) 697 + goto e_release_tx; 698 + 699 + cfg.direction = DMA_DEV_TO_MEM; 700 + cfg.src_addr = cfg.dst_addr; 701 + cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ 702 + cfg.dst_addr = 0; 703 + ret = dmaengine_slave_config(host->chan_rx, &cfg); 704 + if (ret < 0) 705 + goto e_release_rx; 706 + 707 + return; 708 + 709 + e_release_rx: 710 + dma_release_channel(host->chan_rx); 711 + host->chan_rx = NULL; 712 + e_release_tx: 713 + dma_release_channel(host->chan_tx); 714 + host->chan_tx = NULL; 715 + } 716 + 717 + /* API helpers */ 718 + 719 + static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios) 720 + { 721 + unsigned long rate = ios->clock; 722 + u32 val; 723 + unsigned int i; 724 + 725 + for (i = 1000; i; i--) { 726 + if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) 727 + break; 728 + usleep_range(10, 100); 729 + } 730 + 731 + if (!i) { 732 + dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); 733 + return; 734 + } 735 + 736 + val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK; 737 + 738 + if (rate) { 739 + unsigned long new_rate; 740 + 741 + if (host->imclk <= rate) { 742 + if (ios->timing != MMC_TIMING_UHS_DDR50) { 743 + /* Cannot have 1-to-1 clock in DDR mode */ 744 + new_rate = host->imclk; 745 + val |= 0xff; 746 + } else { 747 + new_rate = host->imclk / 2; 748 + } 749 + } else { 750 + unsigned long div = 751 + roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); 752 + val |= div >> 2; 753 + new_rate = host->imclk / div; 754 + } 755 + 756 + if (host->rate == new_rate) 757 + return; 758 + 759 + host->rate = new_rate; 760 + 761 + dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", 762 + rate, (val & 0xff) << 2, new_rate); 763 + } 764 + 765 + /* 766 + * if old or new rate is equal to input rate, have to switch the clock 767 + * off before changing and on after 768 + */ 769 + if (host->imclk == rate || host->imclk == host->rate || !rate) 770 + usdhi6_write(host, USDHI6_SD_CLK_CTRL, 771 + val & ~USDHI6_SD_CLK_CTRL_SCLKEN); 772 + 773 + if (!rate) { 774 + host->rate = 0; 775 + return; 776 + } 777 + 778 + usdhi6_write(host, USDHI6_SD_CLK_CTRL, val); 779 + 780 + if (host->imclk == rate || host->imclk == host->rate || 781 + !(val & USDHI6_SD_CLK_CTRL_SCLKEN)) 782 + usdhi6_write(host, USDHI6_SD_CLK_CTRL, 783 + val | USDHI6_SD_CLK_CTRL_SCLKEN); 784 + } 785 + 786 + static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios) 787 + { 788 + struct mmc_host *mmc = host->mmc; 789 + 790 + if (!IS_ERR(mmc->supply.vmmc)) 791 + /* Errors ignored... */ 792 + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 793 + ios->power_mode ? ios->vdd : 0); 794 + } 795 + 796 + static int usdhi6_reset(struct usdhi6_host *host) 797 + { 798 + int i; 799 + 800 + usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED); 801 + cpu_relax(); 802 + usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET); 803 + for (i = 1000; i; i--) 804 + if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET) 805 + break; 806 + 807 + return i ? 0 : -ETIMEDOUT; 808 + } 809 + 810 + static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 811 + { 812 + struct usdhi6_host *host = mmc_priv(mmc); 813 + u32 option, mode; 814 + int ret; 815 + 816 + dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n", 817 + ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing); 818 + 819 + switch (ios->power_mode) { 820 + case MMC_POWER_OFF: 821 + usdhi6_set_power(host, ios); 822 + usdhi6_only_cd(host); 823 + break; 824 + case MMC_POWER_UP: 825 + /* 826 + * We only also touch USDHI6_SD_OPTION from .request(), which 827 + * cannot race with MMC_POWER_UP 828 + */ 829 + ret = usdhi6_reset(host); 830 + if (ret < 0) { 831 + dev_err(mmc_dev(mmc), "Cannot reset the interface!\n"); 832 + } else { 833 + usdhi6_set_power(host, ios); 834 + usdhi6_only_cd(host); 835 + } 836 + break; 837 + case MMC_POWER_ON: 838 + option = usdhi6_read(host, USDHI6_SD_OPTION); 839 + /* 840 + * The eMMC standard only allows 4 or 8 bits in the DDR mode, 841 + * the same probably holds for SD cards. We check here anyway, 842 + * since the datasheet explicitly requires 4 bits for DDR. 843 + */ 844 + if (ios->bus_width == MMC_BUS_WIDTH_1) { 845 + if (ios->timing == MMC_TIMING_UHS_DDR50) 846 + dev_err(mmc_dev(mmc), 847 + "4 bits are required for DDR\n"); 848 + option |= USDHI6_SD_OPTION_WIDTH_1; 849 + mode = 0; 850 + } else { 851 + option &= ~USDHI6_SD_OPTION_WIDTH_1; 852 + mode = ios->timing == MMC_TIMING_UHS_DDR50; 853 + } 854 + usdhi6_write(host, USDHI6_SD_OPTION, option); 855 + usdhi6_write(host, USDHI6_SDIF_MODE, mode); 856 + break; 857 + } 858 + 859 + if (host->rate != ios->clock) 860 + usdhi6_clk_set(host, ios); 861 + } 862 + 863 + /* This is data timeout. Response timeout is fixed to 640 clock cycles */ 864 + static void usdhi6_timeout_set(struct usdhi6_host *host) 865 + { 866 + struct mmc_request *mrq = host->mrq; 867 + u32 val; 868 + unsigned long ticks; 869 + 870 + if (!mrq->data) 871 + ticks = host->rate / 1000 * mrq->cmd->busy_timeout; 872 + else 873 + ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + 874 + mrq->data->timeout_clks; 875 + 876 + if (!ticks || ticks > 1 << 27) 877 + /* Max timeout */ 878 + val = 14; 879 + else if (ticks < 1 << 13) 880 + /* Min timeout */ 881 + val = 0; 882 + else 883 + val = order_base_2(ticks) - 13; 884 + 885 + dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", 886 + mrq->data ? "data" : "cmd", ticks, host->rate); 887 + 888 + /* Timeout Counter mask: 0xf0 */ 889 + usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) | 890 + (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK)); 891 + } 892 + 893 + static void usdhi6_request_done(struct usdhi6_host *host) 894 + { 895 + struct mmc_request *mrq = host->mrq; 896 + struct mmc_data *data = mrq->data; 897 + 898 + if (WARN(host->pg.page || host->head_pg.page, 899 + "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%x %ux%u in SG%u!\n", 900 + host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, 901 + data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-', 902 + data ? host->offset : 0, data ? data->blocks : 0, 903 + data ? data->blksz : 0, data ? data->sg_len : 0)) 904 + usdhi6_sg_unmap(host, true); 905 + 906 + if (mrq->cmd->error || 907 + (data && data->error) || 908 + (mrq->stop && mrq->stop->error)) 909 + dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", 910 + __func__, mrq->cmd->opcode, data ? data->blocks : 0, 911 + data ? data->blksz : 0, 912 + mrq->cmd->error, 913 + data ? data->error : 1, 914 + mrq->stop ? mrq->stop->error : 1); 915 + 916 + /* Disable DMA */ 917 + usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); 918 + host->wait = USDHI6_WAIT_FOR_REQUEST; 919 + host->mrq = NULL; 920 + 921 + mmc_request_done(host->mmc, mrq); 922 + } 923 + 924 + static int usdhi6_cmd_flags(struct usdhi6_host *host) 925 + { 926 + struct mmc_request *mrq = host->mrq; 927 + struct mmc_command *cmd = mrq->cmd; 928 + u16 opc = cmd->opcode; 929 + 930 + if (host->app_cmd) { 931 + host->app_cmd = false; 932 + opc |= USDHI6_SD_CMD_APP; 933 + } 934 + 935 + if (mrq->data) { 936 + opc |= USDHI6_SD_CMD_DATA; 937 + 938 + if (mrq->data->flags & MMC_DATA_READ) 939 + opc |= USDHI6_SD_CMD_READ; 940 + 941 + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 942 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 943 + (cmd->opcode == SD_IO_RW_EXTENDED && 944 + mrq->data->blocks > 1)) { 945 + opc |= USDHI6_SD_CMD_MULTI; 946 + if (!mrq->stop) 947 + opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF; 948 + } 949 + 950 + switch (mmc_resp_type(cmd)) { 951 + case MMC_RSP_NONE: 952 + opc |= USDHI6_SD_CMD_MODE_RSP_NONE; 953 + break; 954 + case MMC_RSP_R1: 955 + opc |= USDHI6_SD_CMD_MODE_RSP_R1; 956 + break; 957 + case MMC_RSP_R1B: 958 + opc |= USDHI6_SD_CMD_MODE_RSP_R1B; 959 + break; 960 + case MMC_RSP_R2: 961 + opc |= USDHI6_SD_CMD_MODE_RSP_R2; 962 + break; 963 + case MMC_RSP_R3: 964 + opc |= USDHI6_SD_CMD_MODE_RSP_R3; 965 + break; 966 + default: 967 + dev_warn(mmc_dev(host->mmc), 968 + "Unknown response type %d\n", 969 + mmc_resp_type(cmd)); 970 + return -EINVAL; 971 + } 972 + } 973 + 974 + return opc; 975 + } 976 + 977 + static int usdhi6_rq_start(struct usdhi6_host *host) 978 + { 979 + struct mmc_request *mrq = host->mrq; 980 + struct mmc_command *cmd = mrq->cmd; 981 + struct mmc_data *data = mrq->data; 982 + int opc = usdhi6_cmd_flags(host); 983 + int i; 984 + 985 + if (opc < 0) 986 + return opc; 987 + 988 + for (i = 1000; i; i--) { 989 + if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY)) 990 + break; 991 + usleep_range(10, 100); 992 + } 993 + 994 + if (!i) { 995 + dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); 996 + return -EAGAIN; 997 + } 998 + 999 + if (data) { 1000 + bool use_dma; 1001 + int ret = 0; 1002 + 1003 + host->page_idx = 0; 1004 + 1005 + if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { 1006 + switch (data->blksz) { 1007 + case 512: 1008 + break; 1009 + case 32: 1010 + case 64: 1011 + case 128: 1012 + case 256: 1013 + if (mrq->stop) 1014 + ret = -EINVAL; 1015 + break; 1016 + default: 1017 + ret = -EINVAL; 1018 + } 1019 + } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1020 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) && 1021 + data->blksz != 512) { 1022 + ret = -EINVAL; 1023 + } 1024 + 1025 + if (ret < 0) { 1026 + dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", 1027 + __func__, data->blocks, data->blksz); 1028 + return -EINVAL; 1029 + } 1030 + 1031 + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1032 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 1033 + (cmd->opcode == SD_IO_RW_EXTENDED && 1034 + data->blocks > 1)) 1035 + usdhi6_sg_prep(host); 1036 + 1037 + usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); 1038 + 1039 + if ((data->blksz >= USDHI6_MIN_DMA || 1040 + data->blocks > 1) && 1041 + (data->blksz % 4 || 1042 + data->sg->offset % 4)) 1043 + dev_dbg(mmc_dev(host->mmc), 1044 + "Bad SG of %u: %ux%u @ %u\n", data->sg_len, 1045 + data->blksz, data->blocks, data->sg->offset); 1046 + 1047 + /* Enable DMA for USDHI6_MIN_DMA bytes or more */ 1048 + use_dma = data->blksz >= USDHI6_MIN_DMA && 1049 + !(data->blksz % 4) && 1050 + usdhi6_dma_start(host) >= DMA_MIN_COOKIE; 1051 + 1052 + if (use_dma) 1053 + usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW); 1054 + 1055 + dev_dbg(mmc_dev(host->mmc), 1056 + "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n", 1057 + __func__, cmd->opcode, data->blocks, data->blksz, 1058 + data->sg_len, use_dma ? "DMA" : "PIO", 1059 + data->flags & MMC_DATA_READ ? "read" : "write", 1060 + data->sg->offset, mrq->stop ? " + stop" : ""); 1061 + } else { 1062 + dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", 1063 + __func__, cmd->opcode); 1064 + } 1065 + 1066 + /* We have to get a command completion interrupt with DMA too */ 1067 + usdhi6_wait_for_resp(host); 1068 + 1069 + host->wait = USDHI6_WAIT_FOR_CMD; 1070 + schedule_delayed_work(&host->timeout_work, host->timeout); 1071 + 1072 + /* SEC bit is required to enable block counting by the core */ 1073 + usdhi6_write(host, USDHI6_SD_STOP, 1074 + data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); 1075 + usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); 1076 + 1077 + /* Kick command execution */ 1078 + usdhi6_write(host, USDHI6_SD_CMD, opc); 1079 + 1080 + return 0; 1081 + } 1082 + 1083 + static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq) 1084 + { 1085 + struct usdhi6_host *host = mmc_priv(mmc); 1086 + int ret; 1087 + 1088 + cancel_delayed_work_sync(&host->timeout_work); 1089 + 1090 + host->mrq = mrq; 1091 + host->sg = NULL; 1092 + 1093 + usdhi6_timeout_set(host); 1094 + ret = usdhi6_rq_start(host); 1095 + if (ret < 0) { 1096 + mrq->cmd->error = ret; 1097 + usdhi6_request_done(host); 1098 + } 1099 + } 1100 + 1101 + static int usdhi6_get_cd(struct mmc_host *mmc) 1102 + { 1103 + struct usdhi6_host *host = mmc_priv(mmc); 1104 + /* Read is atomic, no need to lock */ 1105 + u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD; 1106 + 1107 + /* 1108 + * level status.CD CD_ACTIVE_HIGH card present 1109 + * 1 0 0 0 1110 + * 1 0 1 1 1111 + * 0 1 0 1 1112 + * 0 1 1 0 1113 + */ 1114 + return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); 1115 + } 1116 + 1117 + static int usdhi6_get_ro(struct mmc_host *mmc) 1118 + { 1119 + struct usdhi6_host *host = mmc_priv(mmc); 1120 + /* No locking as above */ 1121 + u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP; 1122 + 1123 + /* 1124 + * level status.WP RO_ACTIVE_HIGH card read-only 1125 + * 1 0 0 0 1126 + * 1 0 1 1 1127 + * 0 1 0 1 1128 + * 0 1 1 0 1129 + */ 1130 + return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); 1131 + } 1132 + 1133 + static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable) 1134 + { 1135 + struct usdhi6_host *host = mmc_priv(mmc); 1136 + 1137 + dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis"); 1138 + 1139 + if (enable) { 1140 + host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; 1141 + usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); 1142 + usdhi6_write(host, USDHI6_SDIO_MODE, 1); 1143 + } else { 1144 + usdhi6_write(host, USDHI6_SDIO_MODE, 0); 1145 + usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ); 1146 + host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; 1147 + } 1148 + } 1149 + 1150 + static struct mmc_host_ops usdhi6_ops = { 1151 + .request = usdhi6_request, 1152 + .set_ios = usdhi6_set_ios, 1153 + .get_cd = usdhi6_get_cd, 1154 + .get_ro = usdhi6_get_ro, 1155 + .enable_sdio_irq = usdhi6_enable_sdio_irq, 1156 + }; 1157 + 1158 + /* State machine handlers */ 1159 + 1160 + static void usdhi6_resp_cmd12(struct usdhi6_host *host) 1161 + { 1162 + struct mmc_command *cmd = host->mrq->stop; 1163 + cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); 1164 + } 1165 + 1166 + static void usdhi6_resp_read(struct usdhi6_host *host) 1167 + { 1168 + struct mmc_command *cmd = host->mrq->cmd; 1169 + u32 *rsp = cmd->resp, tmp = 0; 1170 + int i; 1171 + 1172 + /* 1173 + * RSP10 39-8 1174 + * RSP32 71-40 1175 + * RSP54 103-72 1176 + * RSP76 127-104 1177 + * R2-type response: 1178 + * resp[0] = r[127..96] 1179 + * resp[1] = r[95..64] 1180 + * resp[2] = r[63..32] 1181 + * resp[3] = r[31..0] 1182 + * Other responses: 1183 + * resp[0] = r[39..8] 1184 + */ 1185 + 1186 + if (mmc_resp_type(cmd) == MMC_RSP_NONE) 1187 + return; 1188 + 1189 + if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { 1190 + dev_err(mmc_dev(host->mmc), 1191 + "CMD%d: response expected but is missing!\n", cmd->opcode); 1192 + return; 1193 + } 1194 + 1195 + if (mmc_resp_type(cmd) & MMC_RSP_136) 1196 + for (i = 0; i < 4; i++) { 1197 + if (i) 1198 + rsp[3 - i] = tmp >> 24; 1199 + tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8); 1200 + rsp[3 - i] |= tmp << 8; 1201 + } 1202 + else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1203 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) 1204 + /* Read RSP54 to avoid conflict with auto CMD12 */ 1205 + rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54); 1206 + else 1207 + rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10); 1208 + 1209 + dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); 1210 + } 1211 + 1212 + static int usdhi6_blk_read(struct usdhi6_host *host) 1213 + { 1214 + struct mmc_data *data = host->mrq->data; 1215 + u32 *p; 1216 + int i, rest; 1217 + 1218 + if (host->io_error) { 1219 + data->error = usdhi6_error_code(host); 1220 + goto error; 1221 + } 1222 + 1223 + if (host->pg.page) { 1224 + p = host->blk_page + host->offset; 1225 + } else { 1226 + p = usdhi6_sg_map(host); 1227 + if (!p) { 1228 + data->error = -ENOMEM; 1229 + goto error; 1230 + } 1231 + } 1232 + 1233 + for (i = 0; i < data->blksz / 4; i++, p++) 1234 + *p = usdhi6_read(host, USDHI6_SD_BUF0); 1235 + 1236 + rest = data->blksz % 4; 1237 + for (i = 0; i < (rest + 1) / 2; i++) { 1238 + u16 d = usdhi6_read16(host, USDHI6_SD_BUF0); 1239 + ((u8 *)p)[2 * i] = ((u8 *)&d)[0]; 1240 + if (rest > 1 && !i) 1241 + ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1]; 1242 + } 1243 + 1244 + return 0; 1245 + 1246 + error: 1247 + dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); 1248 + host->wait = USDHI6_WAIT_FOR_REQUEST; 1249 + return data->error; 1250 + } 1251 + 1252 + static int usdhi6_blk_write(struct usdhi6_host *host) 1253 + { 1254 + struct mmc_data *data = host->mrq->data; 1255 + u32 *p; 1256 + int i, rest; 1257 + 1258 + if (host->io_error) { 1259 + data->error = usdhi6_error_code(host); 1260 + goto error; 1261 + } 1262 + 1263 + if (host->pg.page) { 1264 + p = host->blk_page + host->offset; 1265 + } else { 1266 + p = usdhi6_sg_map(host); 1267 + if (!p) { 1268 + data->error = -ENOMEM; 1269 + goto error; 1270 + } 1271 + } 1272 + 1273 + for (i = 0; i < data->blksz / 4; i++, p++) 1274 + usdhi6_write(host, USDHI6_SD_BUF0, *p); 1275 + 1276 + rest = data->blksz % 4; 1277 + for (i = 0; i < (rest + 1) / 2; i++) { 1278 + u16 d; 1279 + ((u8 *)&d)[0] = ((u8 *)p)[2 * i]; 1280 + if (rest > 1 && !i) 1281 + ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1]; 1282 + else 1283 + ((u8 *)&d)[1] = 0; 1284 + usdhi6_write16(host, USDHI6_SD_BUF0, d); 1285 + } 1286 + 1287 + return 0; 1288 + 1289 + error: 1290 + dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); 1291 + host->wait = USDHI6_WAIT_FOR_REQUEST; 1292 + return data->error; 1293 + } 1294 + 1295 + static int usdhi6_stop_cmd(struct usdhi6_host *host) 1296 + { 1297 + struct mmc_request *mrq = host->mrq; 1298 + 1299 + switch (mrq->cmd->opcode) { 1300 + case MMC_READ_MULTIPLE_BLOCK: 1301 + case MMC_WRITE_MULTIPLE_BLOCK: 1302 + if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) { 1303 + host->wait = USDHI6_WAIT_FOR_STOP; 1304 + return 0; 1305 + } 1306 + /* Unsupported STOP command */ 1307 + default: 1308 + dev_err(mmc_dev(host->mmc), 1309 + "unsupported stop CMD%d for CMD%d\n", 1310 + mrq->stop->opcode, mrq->cmd->opcode); 1311 + mrq->stop->error = -EOPNOTSUPP; 1312 + } 1313 + 1314 + return -EOPNOTSUPP; 1315 + } 1316 + 1317 + static bool usdhi6_end_cmd(struct usdhi6_host *host) 1318 + { 1319 + struct mmc_request *mrq = host->mrq; 1320 + struct mmc_command *cmd = mrq->cmd; 1321 + 1322 + if (host->io_error) { 1323 + cmd->error = usdhi6_error_code(host); 1324 + return false; 1325 + } 1326 + 1327 + usdhi6_resp_read(host); 1328 + 1329 + if (!mrq->data) 1330 + return false; 1331 + 1332 + if (host->dma_active) { 1333 + usdhi6_dma_kick(host); 1334 + if (!mrq->stop) 1335 + host->wait = USDHI6_WAIT_FOR_DMA; 1336 + else if (usdhi6_stop_cmd(host) < 0) 1337 + return false; 1338 + } else if (mrq->data->flags & MMC_DATA_READ) { 1339 + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1340 + (cmd->opcode == SD_IO_RW_EXTENDED && 1341 + mrq->data->blocks > 1)) 1342 + host->wait = USDHI6_WAIT_FOR_MREAD; 1343 + else 1344 + host->wait = USDHI6_WAIT_FOR_READ; 1345 + } else { 1346 + if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 1347 + (cmd->opcode == SD_IO_RW_EXTENDED && 1348 + mrq->data->blocks > 1)) 1349 + host->wait = USDHI6_WAIT_FOR_MWRITE; 1350 + else 1351 + host->wait = USDHI6_WAIT_FOR_WRITE; 1352 + } 1353 + 1354 + return true; 1355 + } 1356 + 1357 + static bool usdhi6_read_block(struct usdhi6_host *host) 1358 + { 1359 + /* ACCESS_END IRQ is already unmasked */ 1360 + int ret = usdhi6_blk_read(host); 1361 + 1362 + /* 1363 + * Have to force unmapping both pages: the single block could have been 1364 + * cross-page, in which case for single-block IO host->page_idx == 0. 1365 + * So, if we don't force, the second page won't be unmapped. 1366 + */ 1367 + usdhi6_sg_unmap(host, true); 1368 + 1369 + if (ret < 0) 1370 + return false; 1371 + 1372 + host->wait = USDHI6_WAIT_FOR_DATA_END; 1373 + return true; 1374 + } 1375 + 1376 + static bool usdhi6_mread_block(struct usdhi6_host *host) 1377 + { 1378 + int ret = usdhi6_blk_read(host); 1379 + 1380 + if (ret < 0) 1381 + return false; 1382 + 1383 + usdhi6_sg_advance(host); 1384 + 1385 + return !host->mrq->data->error && 1386 + (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); 1387 + } 1388 + 1389 + static bool usdhi6_write_block(struct usdhi6_host *host) 1390 + { 1391 + int ret = usdhi6_blk_write(host); 1392 + 1393 + /* See comment in usdhi6_read_block() */ 1394 + usdhi6_sg_unmap(host, true); 1395 + 1396 + if (ret < 0) 1397 + return false; 1398 + 1399 + host->wait = USDHI6_WAIT_FOR_DATA_END; 1400 + return true; 1401 + } 1402 + 1403 + static bool usdhi6_mwrite_block(struct usdhi6_host *host) 1404 + { 1405 + int ret = usdhi6_blk_write(host); 1406 + 1407 + if (ret < 0) 1408 + return false; 1409 + 1410 + usdhi6_sg_advance(host); 1411 + 1412 + return !host->mrq->data->error && 1413 + (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); 1414 + } 1415 + 1416 + /* Interrupt & timeout handlers */ 1417 + 1418 + static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id) 1419 + { 1420 + struct usdhi6_host *host = dev_id; 1421 + struct mmc_request *mrq; 1422 + struct mmc_command *cmd; 1423 + struct mmc_data *data; 1424 + bool io_wait = false; 1425 + 1426 + cancel_delayed_work_sync(&host->timeout_work); 1427 + 1428 + mrq = host->mrq; 1429 + if (!mrq) 1430 + return IRQ_HANDLED; 1431 + 1432 + cmd = mrq->cmd; 1433 + data = mrq->data; 1434 + 1435 + switch (host->wait) { 1436 + case USDHI6_WAIT_FOR_REQUEST: 1437 + /* We're too late, the timeout has already kicked in */ 1438 + return IRQ_HANDLED; 1439 + case USDHI6_WAIT_FOR_CMD: 1440 + /* Wait for data? */ 1441 + io_wait = usdhi6_end_cmd(host); 1442 + break; 1443 + case USDHI6_WAIT_FOR_MREAD: 1444 + /* Wait for more data? */ 1445 + io_wait = usdhi6_mread_block(host); 1446 + break; 1447 + case USDHI6_WAIT_FOR_READ: 1448 + /* Wait for data end? */ 1449 + io_wait = usdhi6_read_block(host); 1450 + break; 1451 + case USDHI6_WAIT_FOR_MWRITE: 1452 + /* Wait data to write? */ 1453 + io_wait = usdhi6_mwrite_block(host); 1454 + break; 1455 + case USDHI6_WAIT_FOR_WRITE: 1456 + /* Wait for data end? */ 1457 + io_wait = usdhi6_write_block(host); 1458 + break; 1459 + case USDHI6_WAIT_FOR_DMA: 1460 + usdhi6_dma_check_error(host); 1461 + break; 1462 + case USDHI6_WAIT_FOR_STOP: 1463 + usdhi6_write(host, USDHI6_SD_STOP, 0); 1464 + if (host->io_error) { 1465 + int ret = usdhi6_error_code(host); 1466 + if (mrq->stop) 1467 + mrq->stop->error = ret; 1468 + else 1469 + mrq->data->error = ret; 1470 + dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); 1471 + break; 1472 + } 1473 + usdhi6_resp_cmd12(host); 1474 + mrq->stop->error = 0; 1475 + break; 1476 + case USDHI6_WAIT_FOR_DATA_END: 1477 + if (host->io_error) { 1478 + mrq->data->error = usdhi6_error_code(host); 1479 + dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, 1480 + mrq->data->error); 1481 + } 1482 + break; 1483 + default: 1484 + cmd->error = -EFAULT; 1485 + dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); 1486 + usdhi6_request_done(host); 1487 + return IRQ_HANDLED; 1488 + } 1489 + 1490 + if (io_wait) { 1491 + schedule_delayed_work(&host->timeout_work, host->timeout); 1492 + /* Wait for more data or ACCESS_END */ 1493 + if (!host->dma_active) 1494 + usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); 1495 + return IRQ_HANDLED; 1496 + } 1497 + 1498 + if (!cmd->error) { 1499 + if (data) { 1500 + if (!data->error) { 1501 + if (host->wait != USDHI6_WAIT_FOR_STOP && 1502 + host->mrq->stop && 1503 + !host->mrq->stop->error && 1504 + !usdhi6_stop_cmd(host)) { 1505 + /* Sending STOP */ 1506 + usdhi6_wait_for_resp(host); 1507 + 1508 + schedule_delayed_work(&host->timeout_work, 1509 + host->timeout); 1510 + 1511 + return IRQ_HANDLED; 1512 + } 1513 + 1514 + data->bytes_xfered = data->blocks * data->blksz; 1515 + } else { 1516 + /* Data error: might need to unmap the last page */ 1517 + dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", 1518 + __func__, data->error); 1519 + usdhi6_sg_unmap(host, true); 1520 + } 1521 + } else if (cmd->opcode == MMC_APP_CMD) { 1522 + host->app_cmd = true; 1523 + } 1524 + } 1525 + 1526 + usdhi6_request_done(host); 1527 + 1528 + return IRQ_HANDLED; 1529 + } 1530 + 1531 + static irqreturn_t usdhi6_sd(int irq, void *dev_id) 1532 + { 1533 + struct usdhi6_host *host = dev_id; 1534 + u16 status, status2, error; 1535 + 1536 + status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & 1537 + ~USDHI6_SD_INFO1_CARD; 1538 + status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; 1539 + 1540 + usdhi6_only_cd(host); 1541 + 1542 + dev_dbg(mmc_dev(host->mmc), 1543 + "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2); 1544 + 1545 + if (!status && !status2) 1546 + return IRQ_NONE; 1547 + 1548 + error = status2 & USDHI6_SD_INFO2_ERR; 1549 + 1550 + /* Ack / clear interrupts */ 1551 + if (USDHI6_SD_INFO1_IRQ & status) 1552 + usdhi6_write(host, USDHI6_SD_INFO1, 1553 + 0xffff & ~(USDHI6_SD_INFO1_IRQ & status)); 1554 + 1555 + if (USDHI6_SD_INFO2_IRQ & status2) { 1556 + if (error) 1557 + /* In error cases BWE and BRE aren't cleared automatically */ 1558 + status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE; 1559 + 1560 + usdhi6_write(host, USDHI6_SD_INFO2, 1561 + 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2)); 1562 + } 1563 + 1564 + host->io_error = error; 1565 + host->irq_status = status; 1566 + 1567 + if (error) { 1568 + /* Don't pollute the log with unsupported command timeouts */ 1569 + if (host->wait != USDHI6_WAIT_FOR_CMD || 1570 + error != USDHI6_SD_INFO2_RSP_TOUT) 1571 + dev_warn(mmc_dev(host->mmc), 1572 + "%s(): INFO2 error bits 0x%08x\n", 1573 + __func__, error); 1574 + else 1575 + dev_dbg(mmc_dev(host->mmc), 1576 + "%s(): INFO2 error bits 0x%08x\n", 1577 + __func__, error); 1578 + } 1579 + 1580 + return IRQ_WAKE_THREAD; 1581 + } 1582 + 1583 + static irqreturn_t usdhi6_sdio(int irq, void *dev_id) 1584 + { 1585 + struct usdhi6_host *host = dev_id; 1586 + u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; 1587 + 1588 + dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); 1589 + 1590 + if (!status) 1591 + return IRQ_NONE; 1592 + 1593 + usdhi6_write(host, USDHI6_SDIO_INFO1, ~status); 1594 + 1595 + mmc_signal_sdio_irq(host->mmc); 1596 + 1597 + return IRQ_HANDLED; 1598 + } 1599 + 1600 + static irqreturn_t usdhi6_cd(int irq, void *dev_id) 1601 + { 1602 + struct usdhi6_host *host = dev_id; 1603 + struct mmc_host *mmc = host->mmc; 1604 + u16 status; 1605 + 1606 + /* We're only interested in hotplug events here */ 1607 + status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & 1608 + USDHI6_SD_INFO1_CARD; 1609 + 1610 + if (!status) 1611 + return IRQ_NONE; 1612 + 1613 + /* Ack */ 1614 + usdhi6_write(host, USDHI6_SD_INFO1, !status); 1615 + 1616 + if (!work_pending(&mmc->detect.work) && 1617 + (((status & USDHI6_SD_INFO1_CARD_INSERT) && 1618 + !mmc->card) || 1619 + ((status & USDHI6_SD_INFO1_CARD_EJECT) && 1620 + mmc->card))) 1621 + mmc_detect_change(mmc, msecs_to_jiffies(100)); 1622 + 1623 + return IRQ_HANDLED; 1624 + } 1625 + 1626 + /* 1627 + * Actually this should not be needed, if the built-in timeout works reliably in 1628 + * the both PIO cases and DMA never fails. But if DMA does fail, a timeout 1629 + * handler might be the only way to catch the error. 1630 + */ 1631 + static void usdhi6_timeout_work(struct work_struct *work) 1632 + { 1633 + struct delayed_work *d = container_of(work, struct delayed_work, work); 1634 + struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); 1635 + struct mmc_request *mrq = host->mrq; 1636 + struct mmc_data *data = mrq ? mrq->data : NULL; 1637 + 1638 + dev_warn(mmc_dev(host->mmc), 1639 + "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", 1640 + host->dma_active ? "DMA" : "PIO", 1641 + host->wait, mrq ? mrq->cmd->opcode : -1, 1642 + usdhi6_read(host, USDHI6_SD_INFO1), 1643 + usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); 1644 + 1645 + if (host->dma_active) { 1646 + usdhi6_dma_kill(host); 1647 + usdhi6_dma_stop_unmap(host); 1648 + } 1649 + 1650 + switch (host->wait) { 1651 + default: 1652 + dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); 1653 + /* mrq can be NULL in this actually impossible case */ 1654 + case USDHI6_WAIT_FOR_CMD: 1655 + usdhi6_error_code(host); 1656 + if (mrq) 1657 + mrq->cmd->error = -ETIMEDOUT; 1658 + break; 1659 + case USDHI6_WAIT_FOR_STOP: 1660 + usdhi6_error_code(host); 1661 + mrq->stop->error = -ETIMEDOUT; 1662 + break; 1663 + case USDHI6_WAIT_FOR_DMA: 1664 + case USDHI6_WAIT_FOR_MREAD: 1665 + case USDHI6_WAIT_FOR_MWRITE: 1666 + case USDHI6_WAIT_FOR_READ: 1667 + case USDHI6_WAIT_FOR_WRITE: 1668 + dev_dbg(mmc_dev(host->mmc), 1669 + "%c: page #%u @ +0x%x %ux%u in SG%u. Current SG %u bytes @ %u\n", 1670 + data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, 1671 + host->offset, data->blocks, data->blksz, data->sg_len, 1672 + sg_dma_len(host->sg), host->sg->offset); 1673 + usdhi6_sg_unmap(host, true); 1674 + /* 1675 + * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped 1676 + * the page 1677 + */ 1678 + case USDHI6_WAIT_FOR_DATA_END: 1679 + usdhi6_error_code(host); 1680 + data->error = -ETIMEDOUT; 1681 + } 1682 + 1683 + if (mrq) 1684 + usdhi6_request_done(host); 1685 + } 1686 + 1687 + /* Probe / release */ 1688 + 1689 + static const struct of_device_id usdhi6_of_match[] = { 1690 + {.compatible = "renesas,usdhi6rol0"}, 1691 + {} 1692 + }; 1693 + MODULE_DEVICE_TABLE(of, usdhi6_of_match); 1694 + 1695 + static int usdhi6_probe(struct platform_device *pdev) 1696 + { 1697 + struct device *dev = &pdev->dev; 1698 + struct mmc_host *mmc; 1699 + struct usdhi6_host *host; 1700 + struct resource *res; 1701 + int irq_cd, irq_sd, irq_sdio; 1702 + u32 version; 1703 + int ret; 1704 + 1705 + if (!dev->of_node) 1706 + return -ENODEV; 1707 + 1708 + irq_cd = platform_get_irq_byname(pdev, "card detect"); 1709 + irq_sd = platform_get_irq_byname(pdev, "data"); 1710 + irq_sdio = platform_get_irq_byname(pdev, "SDIO"); 1711 + if (irq_sd < 0 || irq_sdio < 0) 1712 + return -ENODEV; 1713 + 1714 + mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); 1715 + if (!mmc) 1716 + return -ENOMEM; 1717 + 1718 + ret = mmc_of_parse(mmc); 1719 + if (ret < 0) 1720 + goto e_free_mmc; 1721 + 1722 + mmc_regulator_get_supply(mmc); 1723 + 1724 + host = mmc_priv(mmc); 1725 + host->mmc = mmc; 1726 + host->wait = USDHI6_WAIT_FOR_REQUEST; 1727 + host->timeout = msecs_to_jiffies(4000); 1728 + 1729 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1730 + host->base = devm_ioremap_resource(dev, res); 1731 + if (IS_ERR(host->base)) { 1732 + ret = PTR_ERR(host->base); 1733 + goto e_free_mmc; 1734 + } 1735 + 1736 + host->clk = devm_clk_get(dev, NULL); 1737 + if (IS_ERR(host->clk)) 1738 + goto e_free_mmc; 1739 + 1740 + host->imclk = clk_get_rate(host->clk); 1741 + 1742 + ret = clk_prepare_enable(host->clk); 1743 + if (ret < 0) 1744 + goto e_free_mmc; 1745 + 1746 + version = usdhi6_read(host, USDHI6_VERSION); 1747 + if ((version & 0xfff) != 0xa0d) { 1748 + dev_err(dev, "Version not recognized %x\n", version); 1749 + goto e_clk_off; 1750 + } 1751 + 1752 + dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n", 1753 + usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT); 1754 + 1755 + usdhi6_mask_all(host); 1756 + 1757 + if (irq_cd >= 0) { 1758 + ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0, 1759 + dev_name(dev), host); 1760 + if (ret < 0) 1761 + goto e_clk_off; 1762 + } else { 1763 + mmc->caps |= MMC_CAP_NEEDS_POLL; 1764 + } 1765 + 1766 + ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0, 1767 + dev_name(dev), host); 1768 + if (ret < 0) 1769 + goto e_clk_off; 1770 + 1771 + ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0, 1772 + dev_name(dev), host); 1773 + if (ret < 0) 1774 + goto e_clk_off; 1775 + 1776 + INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); 1777 + 1778 + usdhi6_dma_request(host, res->start); 1779 + 1780 + mmc->ops = &usdhi6_ops; 1781 + mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | 1782 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ; 1783 + /* Set .max_segs to some random number. Feel free to adjust. */ 1784 + mmc->max_segs = 32; 1785 + mmc->max_blk_size = 512; 1786 + mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1787 + mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1788 + /* 1789 + * Setting .max_seg_size to 1 page would simplify our page-mapping code, 1790 + * But OTOH, having large segments makes DMA more efficient. We could 1791 + * check, whether we managed to get DMA and fall back to 1 page 1792 + * segments, but if we do manage to obtain DMA and then it fails at 1793 + * run-time and we fall back to PIO, we will continue getting large 1794 + * segments. So, we wouldn't be able to get rid of the code anyway. 1795 + */ 1796 + mmc->max_seg_size = mmc->max_req_size; 1797 + if (!mmc->f_max) 1798 + mmc->f_max = host->imclk; 1799 + mmc->f_min = host->imclk / 512; 1800 + 1801 + platform_set_drvdata(pdev, host); 1802 + 1803 + ret = mmc_add_host(mmc); 1804 + if (ret < 0) 1805 + goto e_clk_off; 1806 + 1807 + return 0; 1808 + 1809 + e_clk_off: 1810 + clk_disable_unprepare(host->clk); 1811 + e_free_mmc: 1812 + mmc_free_host(mmc); 1813 + 1814 + return ret; 1815 + } 1816 + 1817 + static int usdhi6_remove(struct platform_device *pdev) 1818 + { 1819 + struct usdhi6_host *host = platform_get_drvdata(pdev); 1820 + 1821 + mmc_remove_host(host->mmc); 1822 + 1823 + usdhi6_mask_all(host); 1824 + cancel_delayed_work_sync(&host->timeout_work); 1825 + usdhi6_dma_release(host); 1826 + clk_disable_unprepare(host->clk); 1827 + mmc_free_host(host->mmc); 1828 + 1829 + return 0; 1830 + } 1831 + 1832 + static struct platform_driver usdhi6_driver = { 1833 + .probe = usdhi6_probe, 1834 + .remove = usdhi6_remove, 1835 + .driver = { 1836 + .name = "usdhi6rol0", 1837 + .owner = THIS_MODULE, 1838 + .of_match_table = usdhi6_of_match, 1839 + }, 1840 + }; 1841 + 1842 + module_platform_driver(usdhi6_driver); 1843 + 1844 + MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver"); 1845 + MODULE_LICENSE("GPL v2"); 1846 + MODULE_ALIAS("platform:usdhi6rol0"); 1847 + MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+1 -1
drivers/mmc/host/wmt-sdmmc.c
··· 840 840 priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev, 841 841 mmc->max_blk_count * 16, 842 842 &priv->dma_desc_device_addr, 843 - 208); 843 + GFP_KERNEL); 844 844 if (!priv->dma_desc_buffer) { 845 845 dev_err(&pdev->dev, "DMA alloc fail\n"); 846 846 ret = -EPERM;
+1 -3
drivers/net/wireless/rsi/rsi_91x_sdio.c
··· 285 285 if (err) { 286 286 rsi_dbg(ERR_ZONE, "%s: CCCR speed reg read failed: %d\n", 287 287 __func__, err); 288 - card->state &= ~MMC_STATE_HIGHSPEED; 289 288 } else { 290 289 err = rsi_cmd52writebyte(card, 291 290 SDIO_CCCR_SPEED, ··· 295 296 __func__, err); 296 297 return; 297 298 } 298 - mmc_card_set_highspeed(card); 299 299 host->ios.timing = MMC_TIMING_SD_HS; 300 300 host->ops->set_ios(host, &host->ios); 301 301 } 302 302 } 303 303 304 304 /* Set clock */ 305 - if (mmc_card_highspeed(card)) 305 + if (mmc_card_hs(card)) 306 306 clock = 50000000; 307 307 else 308 308 clock = card->cis.max_dtr;
+10 -19
include/linux/mmc/card.h
··· 63 63 unsigned int power_off_longtime; /* Units: ms */ 64 64 u8 power_off_notification; /* state */ 65 65 unsigned int hs_max_dtr; 66 + unsigned int hs200_max_dtr; 66 67 #define MMC_HIGH_26_MAX_DTR 26000000 67 68 #define MMC_HIGH_52_MAX_DTR 52000000 68 69 #define MMC_HIGH_DDR_MAX_DTR 52000000 69 70 #define MMC_HS200_MAX_DTR 200000000 70 71 unsigned int sectors; 71 - unsigned int card_type; 72 72 unsigned int hc_erase_size; /* In sectors */ 73 73 unsigned int hc_erase_timeout; /* In milliseconds */ 74 74 unsigned int sec_trim_mult; /* Secure trim multiplier */ ··· 110 110 u8 raw_pwr_cl_200_360; /* 237 */ 111 111 u8 raw_pwr_cl_ddr_52_195; /* 238 */ 112 112 u8 raw_pwr_cl_ddr_52_360; /* 239 */ 113 + u8 raw_pwr_cl_ddr_200_360; /* 253 */ 113 114 u8 raw_bkops_status; /* 246 */ 114 115 u8 raw_sectors[4]; /* 212 - 4 bytes */ 115 116 ··· 195 194 }; 196 195 197 196 struct mmc_host; 197 + struct mmc_ios; 198 198 struct sdio_func; 199 199 struct sdio_func_tuple; 200 200 ··· 252 250 unsigned int state; /* (our) card state */ 253 251 #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ 254 252 #define MMC_STATE_READONLY (1<<1) /* card is read-only */ 255 - #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ 256 - #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ 257 - #define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */ 258 - #define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */ 259 - #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ 260 - #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ 261 - #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ 262 - #define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */ 263 - #define MMC_STATE_SUSPENDED (1<<11) /* card is suspended */ 253 + #define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */ 254 + #define MMC_CARD_SDXC (1<<3) /* card is SDXC */ 255 + #define MMC_CARD_REMOVED (1<<4) /* card has been removed */ 256 + #define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */ 257 + #define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */ 264 258 unsigned int quirks; /* card quirks */ 265 259 #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 266 260 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ ··· 299 301 struct sdio_func_tuple *tuples; /* unknown common tuples */ 300 302 301 303 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ 304 + unsigned int mmc_avail_type; /* supported device type by both host and card */ 302 305 303 306 struct dentry *debugfs_root; 304 307 struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ ··· 352 353 #define CID_OEMID_ANY ((unsigned short) -1) 353 354 #define CID_NAME_ANY (NULL) 354 355 355 - #define END_FIXUP { 0 } 356 + #define END_FIXUP { NULL } 356 357 357 358 #define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ 358 359 _cis_vendor, _cis_device, \ ··· 417 418 418 419 #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) 419 420 #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) 420 - #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 421 - #define mmc_card_hs200(c) ((c)->state & MMC_STATE_HIGHSPEED_200) 422 421 #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) 423 - #define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) 424 - #define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 425 422 #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) 426 423 #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) 427 424 #define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) ··· 425 430 426 431 #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 427 432 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 428 - #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 429 - #define mmc_card_set_hs200(c) ((c)->state |= MMC_STATE_HIGHSPEED_200) 430 433 #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 431 - #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) 432 - #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) 433 434 #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) 434 435 #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) 435 436 #define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
-14
include/linux/mmc/dw_mmc.h
··· 248 248 /* delay in mS before detecting cards after interrupt */ 249 249 u32 detect_delay_ms; 250 250 251 - int (*init)(u32 slot_id, irq_handler_t , void *); 252 - int (*get_ro)(u32 slot_id); 253 - int (*get_cd)(u32 slot_id); 254 - int (*get_ocr)(u32 slot_id); 255 - int (*get_bus_wd)(u32 slot_id); 256 - /* 257 - * Enable power to selected slot and set voltage to desired level. 258 - * Voltage levels are specified using MMC_VDD_xxx defines defined 259 - * in linux/mmc/host.h file. 260 - */ 261 - void (*setpower)(u32 slot_id, u32 volt); 262 - void (*exit)(u32 slot_id); 263 - void (*select_slot)(u32 slot_id); 264 - 265 251 struct dw_mci_dma_ops *dma_ops; 266 252 struct dma_pdata *data; 267 253 struct block_settings *blk_settings;
+46 -13
include/linux/mmc/host.h
··· 17 17 #include <linux/fault-inject.h> 18 18 19 19 #include <linux/mmc/core.h> 20 + #include <linux/mmc/card.h> 20 21 #include <linux/mmc/pm.h> 21 22 22 23 struct mmc_ios { ··· 59 58 #define MMC_TIMING_UHS_SDR50 5 60 59 #define MMC_TIMING_UHS_SDR104 6 61 60 #define MMC_TIMING_UHS_DDR50 7 62 - #define MMC_TIMING_MMC_HS200 8 63 - 64 - #define MMC_SDR_MODE 0 65 - #define MMC_1_2V_DDR_MODE 1 66 - #define MMC_1_8V_DDR_MODE 2 67 - #define MMC_1_2V_SDR_MODE 3 68 - #define MMC_1_8V_SDR_MODE 4 61 + #define MMC_TIMING_MMC_DDR52 8 62 + #define MMC_TIMING_MMC_HS200 9 63 + #define MMC_TIMING_MMC_HS400 10 69 64 70 65 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ 71 66 ··· 133 136 134 137 /* The tuning command opcode value is different for SD and eMMC cards */ 135 138 int (*execute_tuning)(struct mmc_host *host, u32 opcode); 139 + 140 + /* Prepare HS400 target operating frequency depending host driver */ 141 + int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); 136 142 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); 137 143 void (*hw_reset)(struct mmc_host *host); 138 144 void (*card_event)(struct mmc_host *host); ··· 278 278 #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \ 279 279 MMC_CAP2_PACKED_WR) 280 280 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ 281 + #define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */ 282 + #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ 283 + #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ 284 + MMC_CAP2_HS400_1_2V) 285 + #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) 281 286 282 287 mmc_pm_flag_t pm_caps; /* supported pm features */ 283 288 ··· 322 317 323 318 int rescan_disable; /* disable card detection */ 324 319 int rescan_entered; /* used with nonremovable devices */ 320 + 321 + bool trigger_card_event; /* card_event necessary */ 325 322 326 323 struct mmc_card *card; /* device attached to this host */ 327 324 ··· 398 391 wake_up_process(host->sdio_irq_thread); 399 392 } 400 393 394 + void sdio_run_irqs(struct mmc_host *host); 395 + 401 396 #ifdef CONFIG_REGULATOR 402 397 int mmc_regulator_get_ocrmask(struct regulator *supply); 403 398 int mmc_regulator_set_ocr(struct mmc_host *mmc, 404 399 struct regulator *supply, 405 400 unsigned short vdd_bit); 406 - int mmc_regulator_get_supply(struct mmc_host *mmc); 407 401 #else 408 402 static inline int mmc_regulator_get_ocrmask(struct regulator *supply) 409 403 { ··· 417 409 { 418 410 return 0; 419 411 } 420 - 421 - static inline int mmc_regulator_get_supply(struct mmc_host *mmc) 422 - { 423 - return 0; 424 - } 425 412 #endif 413 + 414 + int mmc_regulator_get_supply(struct mmc_host *mmc); 426 415 427 416 int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); 428 417 ··· 480 475 return host->ios.clock; 481 476 } 482 477 #endif 478 + 479 + static inline int mmc_card_hs(struct mmc_card *card) 480 + { 481 + return card->host->ios.timing == MMC_TIMING_SD_HS || 482 + card->host->ios.timing == MMC_TIMING_MMC_HS; 483 + } 484 + 485 + static inline int mmc_card_uhs(struct mmc_card *card) 486 + { 487 + return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && 488 + card->host->ios.timing <= MMC_TIMING_UHS_DDR50; 489 + } 490 + 491 + static inline bool mmc_card_hs200(struct mmc_card *card) 492 + { 493 + return card->host->ios.timing == MMC_TIMING_MMC_HS200; 494 + } 495 + 496 + static inline bool mmc_card_ddr52(struct mmc_card *card) 497 + { 498 + return card->host->ios.timing == MMC_TIMING_MMC_DDR52; 499 + } 500 + 501 + static inline bool mmc_card_hs400(struct mmc_card *card) 502 + { 503 + return card->host->ios.timing == MMC_TIMING_MMC_HS400; 504 + } 505 + 483 506 #endif /* LINUX_MMC_HOST_H */
+18 -5
include/linux/mmc/mmc.h
··· 325 325 #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ 326 326 #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ 327 327 #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ 328 + #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ 328 329 #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ 329 330 #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ 330 331 #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ ··· 355 354 #define EXT_CSD_CMD_SET_SECURE (1<<1) 356 355 #define EXT_CSD_CMD_SET_CPSECURE (1<<2) 357 356 358 - #define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ 359 - #define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ 360 - #define EXT_CSD_CARD_TYPE_MASK 0x3F /* Mask out reserved bits */ 357 + #define EXT_CSD_CARD_TYPE_HS_26 (1<<0) /* Card can run at 26MHz */ 358 + #define EXT_CSD_CARD_TYPE_HS_52 (1<<1) /* Card can run at 52MHz */ 359 + #define EXT_CSD_CARD_TYPE_HS (EXT_CSD_CARD_TYPE_HS_26 | \ 360 + EXT_CSD_CARD_TYPE_HS_52) 361 361 #define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */ 362 362 /* DDR mode @1.8V or 3V I/O */ 363 363 #define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */ 364 364 /* DDR mode @1.2V I/O */ 365 365 #define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \ 366 366 | EXT_CSD_CARD_TYPE_DDR_1_2V) 367 - #define EXT_CSD_CARD_TYPE_SDR_1_8V (1<<4) /* Card can run at 200MHz */ 368 - #define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */ 367 + #define EXT_CSD_CARD_TYPE_HS200_1_8V (1<<4) /* Card can run at 200MHz */ 368 + #define EXT_CSD_CARD_TYPE_HS200_1_2V (1<<5) /* Card can run at 200MHz */ 369 369 /* SDR mode @1.2V I/O */ 370 + #define EXT_CSD_CARD_TYPE_HS200 (EXT_CSD_CARD_TYPE_HS200_1_8V | \ 371 + EXT_CSD_CARD_TYPE_HS200_1_2V) 372 + #define EXT_CSD_CARD_TYPE_HS400_1_8V (1<<6) /* Card can run at 200MHz DDR, 1.8V */ 373 + #define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */ 374 + #define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \ 375 + EXT_CSD_CARD_TYPE_HS400_1_2V) 370 376 371 377 #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ 372 378 #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ 373 379 #define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ 374 380 #define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */ 375 381 #define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */ 382 + 383 + #define EXT_CSD_TIMING_BC 0 /* Backwards compatility */ 384 + #define EXT_CSD_TIMING_HS 1 /* High speed */ 385 + #define EXT_CSD_TIMING_HS200 2 /* HS200 */ 386 + #define EXT_CSD_TIMING_HS400 3 /* HS400 */ 376 387 377 388 #define EXT_CSD_SEC_ER_EN BIT(0) 378 389 #define EXT_CSD_SEC_BD_BLK_EN BIT(2)
+9 -6
include/linux/mmc/sdhci.h
··· 57 57 #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) 58 58 /* Controller reports inverted write-protect state */ 59 59 #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) 60 - /* Controller has nonstandard clock management */ 61 - #define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17) 62 60 /* Controller does not like fast PIO transfers */ 63 61 #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) 64 - /* Controller losing signal/interrupt enable states after reset */ 65 - #define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19) 66 62 /* Controller has to be forced to use block size of 2048 bytes */ 67 63 #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) 68 64 /* Controller cannot do multi-block transfers */ ··· 143 147 144 148 bool runtime_suspended; /* Host is runtime suspended */ 145 149 bool bus_on; /* Bus power prevents runtime suspend */ 150 + bool preset_enabled; /* Preset is enabled */ 146 151 147 152 struct mmc_request *mrq; /* Current request */ 148 153 struct mmc_command *cmd; /* Current command */ ··· 161 164 dma_addr_t adma_addr; /* Mapped ADMA descr. table */ 162 165 dma_addr_t align_addr; /* Mapped bounce buffer */ 163 166 164 - struct tasklet_struct card_tasklet; /* Tasklet structures */ 165 - struct tasklet_struct finish_tasklet; 167 + struct tasklet_struct finish_tasklet; /* Tasklet structures */ 166 168 167 169 struct timer_list timer; /* Timer for timeouts */ 168 170 ··· 172 176 unsigned int ocr_avail_sd; 173 177 unsigned int ocr_avail_mmc; 174 178 u32 ocr_mask; /* available voltages */ 179 + 180 + unsigned timing; /* Current timing */ 181 + 182 + u32 thread_isr; 183 + 184 + /* cached registers */ 185 + u32 ier; 175 186 176 187 wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ 177 188 unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
+1 -18
include/linux/omap-dma.h
··· 1 - /* 2 - * OMAP DMA Engine support 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License version 2 as 6 - * published by the Free Software Foundation. 7 - */ 8 1 #ifndef __LINUX_OMAP_DMA_H 9 2 #define __LINUX_OMAP_DMA_H 10 - 11 - struct dma_chan; 12 - 13 - #if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) 14 - bool omap_dma_filter_fn(struct dma_chan *, void *); 15 - #else 16 - static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) 17 - { 18 - return false; 19 - } 20 - #endif 3 + #include <linux/omap-dmaengine.h> 21 4 22 5 /* 23 6 * Legacy OMAP DMA handling defines and functions
+21
include/linux/omap-dmaengine.h
··· 1 + /* 2 + * OMAP DMA Engine support 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + #ifndef __LINUX_OMAP_DMAENGINE_H 9 + #define __LINUX_OMAP_DMAENGINE_H 10 + 11 + struct dma_chan; 12 + 13 + #if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) 14 + bool omap_dma_filter_fn(struct dma_chan *, void *); 15 + #else 16 + static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) 17 + { 18 + return false; 19 + } 20 + #endif 21 + #endif /* __LINUX_OMAP_DMAENGINE_H */