Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mmc-v4.3' of git://git.linaro.org/people/ulf.hansson/mmc

Pull MMC updates from Ulf Hansson:
"MMC core:
- Fix a race condition in the request handling
- Skip trim commands for some buggy kingston eMMCs
- An optimization and a correction for erase groups
- Set CMD23 quirk for some Sandisk cards

MMC host:
- sdhci: Give GPIO CD higher precedence and don't poll when it's used
- sdhci: Fix DMA memory leakage
- sdhci: Some updates for clock management
- sdhci-of-at91: introduce driver for the Atmel SDMMC
- sdhci-of-arasan: Add support for sdhci-5.1
- sdhci-esdhc-imx: Add support for imx7d which also supports HS400
- sdhci: A collection of fixes and improvements for various sdhci hosts
- omap_hsmmc: Modernization of the regulator code
- dw_mmc: A couple of fixes for DMA and PIO mode
- usdhi6rol0: A few fixes and support probe deferral for regulators
- pxamci: Convert to use dmaengine
- sh_mmcif: Fix the suspend process in a short term solution
- tmio: Adjust timeout for commands
- sunxi: Fix timeout while gating/ungating clock"

* tag 'mmc-v4.3' of git://git.linaro.org/people/ulf.hansson/mmc: (67 commits)
mmc: android-goldfish: remove incorrect __iomem annotation
mmc: core: fix race condition in mmc_wait_data_done
mmc: host: omap_hsmmc: remove CONFIG_REGULATOR check
mmc: host: omap_hsmmc: use ios->vdd for setting vmmc voltage
mmc: host: omap_hsmmc: use regulator_is_enabled to find pbias status
mmc: host: omap_hsmmc: enable/disable vmmc_aux regulator based on previous state
mmc: host: omap_hsmmc: don't use ->set_power to set initial regulator state
mmc: host: omap_hsmmc: avoid pbias regulator enable on power off
mmc: host: omap_hsmmc: add separate function to set pbias
mmc: host: omap_hsmmc: add separate functions for enable/disable supply
mmc: host: omap_hsmmc: return error if any of the regulator APIs fail
mmc: host: omap_hsmmc: remove unnecessary pbias set_voltage
mmc: host: omap_hsmmc: use mmc_host's vmmc and vqmmc
mmc: host: omap_hsmmc: use the ocrmask provided by the vmmc regulator
mmc: host: omap_hsmmc: cleanup omap_hsmmc_reg_get()
mmc: host: omap_hsmmc: return on fatal errors from omap_hsmmc_reg_get
mmc: host: omap_hsmmc: use devm_regulator_get_optional() for vmmc
mmc: sdhci-of-at91: fix platform_no_drv_owner.cocci warnings
mmc: sh_mmcif: Fix suspend process
mmc: usdhi6rol0: fix error return code
...

+1085 -424
+1 -1
Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
··· 9 9 10 10 Required Properties: 11 11 - compatible: Compatibility string. Must be 'arasan,sdhci-8.9a' or 12 - 'arasan,sdhci-4.9a' 12 + 'arasan,sdhci-4.9a' or 'arasan,sdhci-5.1' 13 13 - reg: From mmc bindings: Register location and length. 14 14 - clocks: From clock bindings: Handles to clock inputs. 15 15 - clock-names: From clock bindings: Tuple including "clk_xin" and "clk_ahb"
+6
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
··· 15 15 "fsl,imx6q-usdhc" 16 16 "fsl,imx6sl-usdhc" 17 17 "fsl,imx6sx-usdhc" 18 + "fsl,imx7d-usdhc" 18 19 19 20 Optional properties: 20 21 - fsl,wp-controller : Indicate to use controller internal write protection ··· 28 27 transparent level shifters on the outputs of the controller. Two cells are 29 28 required, first cell specifies minimum slot voltage (mV), second cell 30 29 specifies maximum slot voltage (mV). Several ranges could be specified. 30 + - fsl,tuning-step: Specify the increasing delay cell steps in tuning procedure. 31 + The uSDHC use one delay cell as default increasing step to do tuning process. 32 + This property allows user to change the tuning step to more than one delay 33 + cells which is useful for some special boards or cards when the default 34 + tuning step can't find the proper delay window within limited tuning retries. 31 35 32 36 Examples: 33 37
+21
Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
··· 1 + * Atmel SDHCI controller 2 + 3 + This file documents the differences between the core properties in 4 + Documentation/devicetree/bindings/mmc/mmc.txt and the properties used by the 5 + sdhci-of-at91 driver. 6 + 7 + Required properties: 8 + - compatible: Must be "atmel,sama5d2-sdhci". 9 + - clocks: Phandlers to the clocks. 10 + - clock-names: Must be "hclock", "multclk", "baseclk"; 11 + 12 + 13 + Example: 14 + 15 + sdmmc0: sdio-host@a0000000 { 16 + compatible = "atmel,sama5d2-sdhci"; 17 + reg = <0xa0000000 0x300>; 18 + interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>; 19 + clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>; 20 + clock-names = "hclock", "multclk", "baseclk"; 21 + };
+1 -1
Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
··· 102 102 pinctrl-1 = <&mmc1_idle>; 103 103 pinctrl-2 = <&mmc1_sleep>; 104 104 ... 105 - interrupts-extended = <&intc 64 &gpio2 28 0>; 105 + interrupts-extended = <&intc 64 &gpio2 28 GPIO_ACTIVE_LOW>; 106 106 }; 107 107 108 108 mmc1_idle : pinmux_cirq_pin {
+6
MAINTAINERS
··· 1905 1905 S: Supported 1906 1906 F: drivers/mtd/nand/atmel_nand* 1907 1907 1908 + ATMEL SDMMC DRIVER 1909 + M: Ludovic Desroches <ludovic.desroches@atmel.com> 1910 + L: linux-mmc@vger.kernel.org 1911 + S: Supported 1912 + F: drivers/mmc/host/sdhci-of-at91.c 1913 + 1908 1914 ATMEL SPI DRIVER 1909 1915 M: Nicolas Ferre <nicolas.ferre@atmel.com> 1910 1916 S: Supported
+17
drivers/mmc/card/block.c
··· 47 47 #include "queue.h" 48 48 49 49 MODULE_ALIAS("mmc:block"); 50 + 51 + #ifdef KERNEL 50 52 #ifdef MODULE_PARAM_PREFIX 51 53 #undef MODULE_PARAM_PREFIX 52 54 #endif 53 55 #define MODULE_PARAM_PREFIX "mmcblk." 56 + #endif 54 57 55 58 #define INAND_CMD38_ARG_EXT_CSD 113 56 59 #define INAND_CMD38_ARG_ERASE 0x00 ··· 2389 2386 #define CID_MANFID_TOSHIBA 0x11 2390 2387 #define CID_MANFID_MICRON 0x13 2391 2388 #define CID_MANFID_SAMSUNG 0x15 2389 + #define CID_MANFID_KINGSTON 0x70 2392 2390 2393 2391 static const struct mmc_fixup blk_fixups[] = 2394 2392 { ··· 2412 2408 * 2413 2409 * N.B. This doesn't affect SD cards. 2414 2410 */ 2411 + MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, 2412 + MMC_QUIRK_BLK_NO_CMD23), 2413 + MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, 2414 + MMC_QUIRK_BLK_NO_CMD23), 2415 2415 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 2416 2416 MMC_QUIRK_BLK_NO_CMD23), 2417 2417 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, ··· 2451 2443 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2452 2444 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 2453 2445 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 2446 + 2447 + /* 2448 + * On Some Kingston eMMCs, performing trim can result in 2449 + * unrecoverable data conrruption occasionally due to a firmware bug. 2450 + */ 2451 + MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, 2452 + MMC_QUIRK_TRIM_BROKEN), 2453 + MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, 2454 + MMC_QUIRK_TRIM_BROKEN), 2454 2455 2455 2456 END_FIXUP 2456 2457 };
+39 -7
drivers/mmc/core/core.c
··· 358 358 */ 359 359 static void mmc_wait_data_done(struct mmc_request *mrq) 360 360 { 361 - mrq->host->context_info.is_done_rcv = true; 362 - wake_up_interruptible(&mrq->host->context_info.wait); 361 + struct mmc_context_info *context_info = &mrq->host->context_info; 362 + 363 + context_info->is_done_rcv = true; 364 + wake_up_interruptible(&context_info->wait); 363 365 } 364 366 365 367 static void mmc_wait_done(struct mmc_request *mrq) ··· 2170 2168 unsigned int arg) 2171 2169 { 2172 2170 unsigned int rem, to = from + nr; 2171 + int err; 2173 2172 2174 2173 if (!(card->host->caps & MMC_CAP_ERASE) || 2175 2174 !(card->csd.cmdclass & CCC_ERASE)) ··· 2221 2218 /* 'from' and 'to' are inclusive */ 2222 2219 to -= 1; 2223 2220 2221 + /* 2222 + * Special case where only one erase-group fits in the timeout budget: 2223 + * If the region crosses an erase-group boundary on this particular 2224 + * case, we will be trimming more than one erase-group which, does not 2225 + * fit in the timeout budget of the controller, so we need to split it 2226 + * and call mmc_do_erase() twice if necessary. This special case is 2227 + * identified by the card->eg_boundary flag. 2228 + */ 2229 + rem = card->erase_size - (from % card->erase_size); 2230 + if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) { 2231 + err = mmc_do_erase(card, from, from + rem - 1, arg); 2232 + from += rem; 2233 + if ((err) || (to <= from)) 2234 + return err; 2235 + } 2236 + 2224 2237 return mmc_do_erase(card, from, to, arg); 2225 2238 } 2226 2239 EXPORT_SYMBOL(mmc_erase); ··· 2252 2233 2253 2234 int mmc_can_trim(struct mmc_card *card) 2254 2235 { 2255 - if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 2236 + if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) && 2237 + (!(card->quirks & MMC_QUIRK_TRIM_BROKEN))) 2256 2238 return 1; 2257 2239 return 0; 2258 2240 } ··· 2333 2313 if (!qty) 2334 2314 return 0; 2335 2315 2316 + /* 2317 + * When specifying a sector range to trim, chances are we might cross 2318 + * an erase-group boundary even if the amount of sectors is less than 2319 + * one erase-group. 2320 + * If we can only fit one erase-group in the controller timeout budget, 2321 + * we have to care that erase-group boundaries are not crossed by a 2322 + * single trim operation. We flag that special case with "eg_boundary". 2323 + * In all other cases we can just decrement qty and pretend that we 2324 + * always touch (qty + 1) erase-groups as a simple optimization. 2325 + */ 2336 2326 if (qty == 1) 2337 - return 1; 2327 + card->eg_boundary = 1; 2328 + else 2329 + qty--; 2338 2330 2339 2331 /* Convert qty to sectors */ 2340 2332 if (card->erase_shift) 2341 - max_discard = --qty << card->erase_shift; 2333 + max_discard = qty << card->erase_shift; 2342 2334 else if (mmc_card_sd(card)) 2343 - max_discard = qty; 2335 + max_discard = qty + 1; 2344 2336 else 2345 - max_discard = --qty * card->erase_size; 2337 + max_discard = qty * card->erase_size; 2346 2338 2347 2339 return max_discard; 2348 2340 }
+21 -21
drivers/mmc/core/host.c
··· 398 398 { 399 399 struct device_node *np; 400 400 u32 bus_width; 401 - int len, ret; 401 + int ret; 402 402 bool cd_cap_invert, cd_gpio_invert = false; 403 403 bool ro_cap_invert, ro_gpio_invert = false; 404 404 ··· 445 445 */ 446 446 447 447 /* Parse Card Detection */ 448 - if (of_find_property(np, "non-removable", &len)) { 448 + if (of_property_read_bool(np, "non-removable")) { 449 449 host->caps |= MMC_CAP_NONREMOVABLE; 450 450 } else { 451 451 cd_cap_invert = of_property_read_bool(np, "cd-inverted"); 452 452 453 - if (of_find_property(np, "broken-cd", &len)) 453 + if (of_property_read_bool(np, "broken-cd")) 454 454 host->caps |= MMC_CAP_NEEDS_POLL; 455 455 456 456 ret = mmc_gpiod_request_cd(host, "cd", 0, true, ··· 491 491 if (ro_cap_invert ^ ro_gpio_invert) 492 492 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 493 493 494 - if (of_find_property(np, "cap-sd-highspeed", &len)) 494 + if (of_property_read_bool(np, "cap-sd-highspeed")) 495 495 host->caps |= MMC_CAP_SD_HIGHSPEED; 496 - if (of_find_property(np, "cap-mmc-highspeed", &len)) 496 + if (of_property_read_bool(np, "cap-mmc-highspeed")) 497 497 host->caps |= MMC_CAP_MMC_HIGHSPEED; 498 - if (of_find_property(np, "sd-uhs-sdr12", &len)) 498 + if (of_property_read_bool(np, "sd-uhs-sdr12")) 499 499 host->caps |= MMC_CAP_UHS_SDR12; 500 - if (of_find_property(np, "sd-uhs-sdr25", &len)) 500 + if (of_property_read_bool(np, "sd-uhs-sdr25")) 501 501 host->caps |= MMC_CAP_UHS_SDR25; 502 - if (of_find_property(np, "sd-uhs-sdr50", &len)) 502 + if (of_property_read_bool(np, "sd-uhs-sdr50")) 503 503 host->caps |= MMC_CAP_UHS_SDR50; 504 - if (of_find_property(np, "sd-uhs-sdr104", &len)) 504 + if (of_property_read_bool(np, "sd-uhs-sdr104")) 505 505 host->caps |= MMC_CAP_UHS_SDR104; 506 - if (of_find_property(np, "sd-uhs-ddr50", &len)) 506 + if (of_property_read_bool(np, "sd-uhs-ddr50")) 507 507 host->caps |= MMC_CAP_UHS_DDR50; 508 - if (of_find_property(np, "cap-power-off-card", &len)) 508 + if (of_property_read_bool(np, "cap-power-off-card")) 509 509 host->caps |= MMC_CAP_POWER_OFF_CARD; 510 - if (of_find_property(np, "cap-sdio-irq", &len)) 510 + if (of_property_read_bool(np, "cap-sdio-irq")) 511 511 host->caps |= MMC_CAP_SDIO_IRQ; 512 - if (of_find_property(np, "full-pwr-cycle", &len)) 512 + if (of_property_read_bool(np, "full-pwr-cycle")) 513 513 host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; 514 - if (of_find_property(np, "keep-power-in-suspend", &len)) 514 + if (of_property_read_bool(np, "keep-power-in-suspend")) 515 515 host->pm_caps |= MMC_PM_KEEP_POWER; 516 - if (of_find_property(np, "enable-sdio-wakeup", &len)) 516 + if (of_property_read_bool(np, "enable-sdio-wakeup")) 517 517 host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 518 - if (of_find_property(np, "mmc-ddr-1_8v", &len)) 518 + if (of_property_read_bool(np, "mmc-ddr-1_8v")) 519 519 host->caps |= MMC_CAP_1_8V_DDR; 520 - if (of_find_property(np, "mmc-ddr-1_2v", &len)) 520 + if (of_property_read_bool(np, "mmc-ddr-1_2v")) 521 521 host->caps |= MMC_CAP_1_2V_DDR; 522 - if (of_find_property(np, "mmc-hs200-1_8v", &len)) 522 + if (of_property_read_bool(np, "mmc-hs200-1_8v")) 523 523 host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; 524 - if (of_find_property(np, "mmc-hs200-1_2v", &len)) 524 + if (of_property_read_bool(np, "mmc-hs200-1_2v")) 525 525 host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; 526 - if (of_find_property(np, "mmc-hs400-1_8v", &len)) 526 + if (of_property_read_bool(np, "mmc-hs400-1_8v")) 527 527 host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; 528 - if (of_find_property(np, "mmc-hs400-1_2v", &len)) 528 + if (of_property_read_bool(np, "mmc-hs400-1_2v")) 529 529 host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; 530 530 531 531 host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
+8
drivers/mmc/host/Kconfig
··· 129 129 130 130 If unsure, say N. 131 131 132 + config MMC_SDHCI_OF_AT91 133 + tristate "SDHCI OF support for the Atmel SDMMC controller" 134 + depends on MMC_SDHCI_PLTFM 135 + depends on OF 136 + select MMC_SDHCI_IO_ACCESSORS 137 + help 138 + This selects the Atmel SDMMC driver 139 + 132 140 config MMC_SDHCI_OF_ESDHC 133 141 tristate "SDHCI OF support for the Freescale eSDHC controller" 134 142 depends on MMC_SDHCI_PLTFM
+1
drivers/mmc/host/Makefile
··· 67 67 obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o 68 68 obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o 69 69 obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o 70 + obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o 70 71 obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o 71 72 obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o 72 73 obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
+1 -1
drivers/mmc/host/android-goldfish.c
··· 118 118 struct mmc_host *mmc; 119 119 struct device *dev; 120 120 unsigned char id; /* 16xx chips have 2 MMC blocks */ 121 - void __iomem *virt_base; 121 + void *virt_base; 122 122 unsigned int phys_base; 123 123 int irq; 124 124 unsigned char bus_mode;
-1
drivers/mmc/host/atmel-mci.c
··· 29 29 #include <linux/slab.h> 30 30 #include <linux/stat.h> 31 31 #include <linux/types.h> 32 - #include <linux/platform_data/atmel.h> 33 32 #include <linux/platform_data/mmc-atmel-mci.h> 34 33 35 34 #include <linux/mmc/host.h>
+3
drivers/mmc/host/dw_mmc-rockchip.c
··· 73 73 /* It is slot 8 on Rockchip SoCs */ 74 74 host->sdio_id0 = 8; 75 75 76 + /* It needs this quirk on all Rockchip SoCs */ 77 + host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO; 78 + 76 79 return 0; 77 80 } 78 81
+197 -87
drivers/mmc/host/dw_mmc.c
··· 99 99 100 100 __le32 des3; /* buffer 2 physical address */ 101 101 }; 102 + 103 + /* Each descriptor can transfer up to 4KB of data in chained mode */ 104 + #define DW_MCI_DESC_DATA_LENGTH 0x1000 102 105 #endif /* CONFIG_MMC_DW_IDMAC */ 103 106 104 107 static bool dw_mci_reset(struct dw_mci *host); ··· 238 235 struct dw_mci *host = slot->host; 239 236 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 240 237 u32 cmdr; 241 - cmd->error = -EINPROGRESS; 242 238 239 + cmd->error = -EINPROGRESS; 243 240 cmdr = cmd->opcode; 244 241 245 242 if (cmd->opcode == MMC_STOP_TRANSMISSION || ··· 374 371 cmd->arg, cmd_flags); 375 372 376 373 mci_writel(host, CMDARG, cmd->arg); 377 - wmb(); 374 + wmb(); /* drain writebuffer */ 378 375 dw_mci_wait_while_busy(host, cmd_flags); 379 376 380 377 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); ··· 383 380 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 384 381 { 385 382 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; 383 + 386 384 dw_mci_start_command(host, stop, host->stop_cmdr); 387 385 } 388 386 ··· 466 462 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 467 463 unsigned int sg_len) 468 464 { 465 + unsigned int desc_len; 469 466 int i; 470 - if (host->dma_64bit_address == 1) { 471 - struct idmac_desc_64addr *desc = host->sg_cpu; 472 467 473 - for (i = 0; i < sg_len; i++, desc++) { 468 + if (host->dma_64bit_address == 1) { 469 + struct idmac_desc_64addr *desc_first, *desc_last, *desc; 470 + 471 + desc_first = desc_last = desc = host->sg_cpu; 472 + 473 + for (i = 0; i < sg_len; i++) { 474 474 unsigned int length = sg_dma_len(&data->sg[i]); 475 + 475 476 u64 mem_addr = sg_dma_address(&data->sg[i]); 476 477 477 - /* 478 - * Set the OWN bit and disable interrupts for this 479 - * descriptor 480 - */ 481 - desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 482 - IDMAC_DES0_CH; 483 - /* Buffer length */ 484 - IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length); 478 + for ( ; length ; desc++) { 479 + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 480 + length : DW_MCI_DESC_DATA_LENGTH; 485 481 486 - /* Physical address to DMA to/from */ 487 - desc->des4 = mem_addr & 0xffffffff; 488 - desc->des5 = mem_addr >> 32; 482 + length -= desc_len; 483 + 484 + /* 485 + * Set the OWN bit and disable interrupts 486 + * for this descriptor 487 + */ 488 + desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 489 + IDMAC_DES0_CH; 490 + 491 + /* Buffer length */ 492 + IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 493 + 494 + /* Physical address to DMA to/from */ 495 + desc->des4 = mem_addr & 0xffffffff; 496 + desc->des5 = mem_addr >> 32; 497 + 498 + /* Update physical address for the next desc */ 499 + mem_addr += desc_len; 500 + 501 + /* Save pointer to the last descriptor */ 502 + desc_last = desc; 503 + } 489 504 } 490 505 491 506 /* Set first descriptor */ 492 - desc = host->sg_cpu; 493 - desc->des0 |= IDMAC_DES0_FD; 507 + desc_first->des0 |= IDMAC_DES0_FD; 494 508 495 509 /* Set last descriptor */ 496 - desc = host->sg_cpu + (i - 1) * 497 - sizeof(struct idmac_desc_64addr); 498 - desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 499 - desc->des0 |= IDMAC_DES0_LD; 510 + desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 511 + desc_last->des0 |= IDMAC_DES0_LD; 500 512 501 513 } else { 502 - struct idmac_desc *desc = host->sg_cpu; 514 + struct idmac_desc *desc_first, *desc_last, *desc; 503 515 504 - for (i = 0; i < sg_len; i++, desc++) { 516 + desc_first = desc_last = desc = host->sg_cpu; 517 + 518 + for (i = 0; i < sg_len; i++) { 505 519 unsigned int length = sg_dma_len(&data->sg[i]); 520 + 506 521 u32 mem_addr = sg_dma_address(&data->sg[i]); 507 522 508 - /* 509 - * Set the OWN bit and disable interrupts for this 510 - * descriptor 511 - */ 512 - desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 513 - IDMAC_DES0_DIC | IDMAC_DES0_CH); 514 - /* Buffer length */ 515 - IDMAC_SET_BUFFER1_SIZE(desc, length); 523 + for ( ; length ; desc++) { 524 + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 525 + length : DW_MCI_DESC_DATA_LENGTH; 516 526 517 - /* Physical address to DMA to/from */ 518 - desc->des2 = cpu_to_le32(mem_addr); 527 + length -= desc_len; 528 + 529 + /* 530 + * Set the OWN bit and disable interrupts 531 + * for this descriptor 532 + */ 533 + desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 534 + IDMAC_DES0_DIC | 535 + IDMAC_DES0_CH); 536 + 537 + /* Buffer length */ 538 + IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 539 + 540 + /* Physical address to DMA to/from */ 541 + desc->des2 = cpu_to_le32(mem_addr); 542 + 543 + /* Update physical address for the next desc */ 544 + mem_addr += desc_len; 545 + 546 + /* Save pointer to the last descriptor */ 547 + desc_last = desc; 548 + } 519 549 } 520 550 521 551 /* Set first descriptor */ 522 - desc = host->sg_cpu; 523 - desc->des0 |= cpu_to_le32(IDMAC_DES0_FD); 552 + desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 524 553 525 554 /* Set last descriptor */ 526 - desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 527 - desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC)); 528 - desc->des0 |= cpu_to_le32(IDMAC_DES0_LD); 555 + desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 556 + IDMAC_DES0_DIC)); 557 + desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 529 558 } 530 559 531 - wmb(); 560 + wmb(); /* drain writebuffer */ 532 561 } 533 562 534 563 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) ··· 579 542 temp |= SDMMC_CTRL_USE_IDMAC; 580 543 mci_writel(host, CTRL, temp); 581 544 545 + /* drain writebuffer */ 582 546 wmb(); 583 547 584 548 /* Enable the IDMAC */ ··· 627 589 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 628 590 629 591 /* Forward link the descriptor list */ 630 - for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) { 592 + for (i = 0, p = host->sg_cpu; 593 + i < host->ring_size - 1; 594 + i++, p++) { 631 595 p->des3 = cpu_to_le32(host->sg_dma + 632 596 (sizeof(struct idmac_desc) * (i + 1))); 633 597 p->des1 = 0; ··· 758 718 u32 fifo_width = 1 << host->data_shift; 759 719 u32 blksz_depth = blksz / fifo_width, fifoth_val; 760 720 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 761 - int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1; 721 + int idx = ARRAY_SIZE(mszs) - 1; 762 722 763 723 tx_wmark = (host->fifo_depth) / 2; 764 724 tx_wmark_invers = host->fifo_depth - tx_wmark; ··· 883 843 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 884 844 { 885 845 unsigned long irqflags; 846 + int flags = SG_MITER_ATOMIC; 886 847 u32 temp; 887 848 888 849 data->error = -EINPROGRESS; ··· 900 859 } 901 860 902 861 if (dw_mci_submit_data_dma(host, data)) { 903 - int flags = SG_MITER_ATOMIC; 904 862 if (host->data->flags & MMC_DATA_READ) 905 863 flags |= SG_MITER_TO_SG; 906 864 else ··· 946 906 unsigned int cmd_status = 0; 947 907 948 908 mci_writel(host, CMDARG, arg); 949 - wmb(); 909 + wmb(); /* drain writebuffer */ 950 910 dw_mci_wait_while_busy(host, cmd); 951 911 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 952 912 ··· 1059 1019 1060 1020 if (data) { 1061 1021 dw_mci_submit_data(host, data); 1062 - wmb(); 1022 + wmb(); /* drain writebuffer */ 1063 1023 } 1064 1024 1065 1025 dw_mci_start_command(host, cmd, cmdflags); ··· 1424 1384 struct dw_mci_slot *slot = mmc_priv(mmc); 1425 1385 struct dw_mci *host = slot->host; 1426 1386 const struct dw_mci_drv_data *drv_data = host->drv_data; 1427 - int err = -ENOSYS; 1387 + int err = -EINVAL; 1428 1388 1429 1389 if (drv_data && drv_data->execute_tuning) 1430 1390 err = drv_data->execute_tuning(slot); 1431 1391 return err; 1432 1392 } 1433 1393 1434 - static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 1394 + static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1395 + struct mmc_ios *ios) 1435 1396 { 1436 1397 struct dw_mci_slot *slot = mmc_priv(mmc); 1437 1398 struct dw_mci *host = slot->host; ··· 1574 1533 return data->error; 1575 1534 } 1576 1535 1536 + static void dw_mci_set_drto(struct dw_mci *host) 1537 + { 1538 + unsigned int drto_clks; 1539 + unsigned int drto_ms; 1540 + 1541 + drto_clks = mci_readl(host, TMOUT) >> 8; 1542 + drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); 1543 + 1544 + /* add a bit spare time */ 1545 + drto_ms += 10; 1546 + 1547 + mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); 1548 + } 1549 + 1577 1550 static void dw_mci_tasklet_func(unsigned long priv) 1578 1551 { 1579 1552 struct dw_mci *host = (struct dw_mci *)priv; ··· 1665 1610 } 1666 1611 1667 1612 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1668 - &host->pending_events)) 1613 + &host->pending_events)) { 1614 + /* 1615 + * If all data-related interrupts don't come 1616 + * within the given time in reading data state. 1617 + */ 1618 + if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) && 1619 + (host->dir_status == DW_MCI_RECV_STATUS)) 1620 + dw_mci_set_drto(host); 1669 1621 break; 1622 + } 1670 1623 1671 1624 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1672 1625 ··· 1707 1644 1708 1645 case STATE_DATA_BUSY: 1709 1646 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1710 - &host->pending_events)) 1647 + &host->pending_events)) { 1648 + /* 1649 + * If data error interrupt comes but data over 1650 + * interrupt doesn't come within the given time. 1651 + * in reading data state. 1652 + */ 1653 + if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) && 1654 + (host->dir_status == DW_MCI_RECV_STATUS)) 1655 + dw_mci_set_drto(host); 1711 1656 break; 1657 + } 1712 1658 1713 1659 host->data = NULL; 1714 1660 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); ··· 1815 1743 /* pull first bytes from part_buf, only use during pull */ 1816 1744 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1817 1745 { 1818 - cnt = min(cnt, (int)host->part_buf_count); 1746 + cnt = min_t(int, cnt, host->part_buf_count); 1819 1747 if (cnt) { 1820 1748 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1821 1749 cnt); ··· 1841 1769 /* try and push anything in the part_buf */ 1842 1770 if (unlikely(host->part_buf_count)) { 1843 1771 int len = dw_mci_push_part_bytes(host, buf, cnt); 1772 + 1844 1773 buf += len; 1845 1774 cnt -= len; 1846 1775 if (host->part_buf_count == 2) { ··· 1868 1795 #endif 1869 1796 { 1870 1797 u16 *pdata = buf; 1798 + 1871 1799 for (; cnt >= 2; cnt -= 2) 1872 1800 mci_fifo_writew(host->fifo_reg, *pdata++); 1873 1801 buf = pdata; ··· 1893 1819 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1894 1820 int items = len >> 1; 1895 1821 int i; 1822 + 1896 1823 for (i = 0; i < items; ++i) 1897 1824 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 1898 1825 /* memcpy from aligned buffer into output buffer */ ··· 1905 1830 #endif 1906 1831 { 1907 1832 u16 *pdata = buf; 1833 + 1908 1834 for (; cnt >= 2; cnt -= 2) 1909 1835 *pdata++ = mci_fifo_readw(host->fifo_reg); 1910 1836 buf = pdata; ··· 1924 1848 /* try and push anything in the part_buf */ 1925 1849 if (unlikely(host->part_buf_count)) { 1926 1850 int len = dw_mci_push_part_bytes(host, buf, cnt); 1851 + 1927 1852 buf += len; 1928 1853 cnt -= len; 1929 1854 if (host->part_buf_count == 4) { ··· 1951 1874 #endif 1952 1875 { 1953 1876 u32 *pdata = buf; 1877 + 1954 1878 for (; cnt >= 4; cnt -= 4) 1955 1879 mci_fifo_writel(host->fifo_reg, *pdata++); 1956 1880 buf = pdata; ··· 1976 1898 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1977 1899 int items = len >> 2; 1978 1900 int i; 1901 + 1979 1902 for (i = 0; i < items; ++i) 1980 1903 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 1981 1904 /* memcpy from aligned buffer into output buffer */ ··· 1988 1909 #endif 1989 1910 { 1990 1911 u32 *pdata = buf; 1912 + 1991 1913 for (; cnt >= 4; cnt -= 4) 1992 1914 *pdata++ = mci_fifo_readl(host->fifo_reg); 1993 1915 buf = pdata; ··· 2007 1927 /* try and push anything in the part_buf */ 2008 1928 if (unlikely(host->part_buf_count)) { 2009 1929 int len = dw_mci_push_part_bytes(host, buf, cnt); 1930 + 2010 1931 buf += len; 2011 1932 cnt -= len; 2012 1933 ··· 2035 1954 #endif 2036 1955 { 2037 1956 u64 *pdata = buf; 1957 + 2038 1958 for (; cnt >= 8; cnt -= 8) 2039 1959 mci_fifo_writeq(host->fifo_reg, *pdata++); 2040 1960 buf = pdata; ··· 2060 1978 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2061 1979 int items = len >> 3; 2062 1980 int i; 1981 + 2063 1982 for (i = 0; i < items; ++i) 2064 1983 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2065 1984 ··· 2073 1990 #endif 2074 1991 { 2075 1992 u64 *pdata = buf; 1993 + 2076 1994 for (; cnt >= 8; cnt -= 8) 2077 1995 *pdata++ = mci_fifo_readq(host->fifo_reg); 2078 1996 buf = pdata; ··· 2149 2065 done: 2150 2066 sg_miter_stop(sg_miter); 2151 2067 host->sg = NULL; 2152 - smp_wmb(); 2068 + smp_wmb(); /* drain writebuffer */ 2153 2069 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2154 2070 } 2155 2071 ··· 2203 2119 done: 2204 2120 sg_miter_stop(sg_miter); 2205 2121 host->sg = NULL; 2206 - smp_wmb(); 2122 + smp_wmb(); /* drain writebuffer */ 2207 2123 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2208 2124 } 2209 2125 ··· 2212 2128 if (!host->cmd_status) 2213 2129 host->cmd_status = status; 2214 2130 2215 - smp_wmb(); 2131 + smp_wmb(); /* drain writebuffer */ 2216 2132 2217 2133 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2218 2134 tasklet_schedule(&host->tasklet); ··· 2276 2192 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2277 2193 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2278 2194 host->cmd_status = pending; 2279 - smp_wmb(); 2195 + smp_wmb(); /* drain writebuffer */ 2280 2196 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2281 2197 } 2282 2198 ··· 2284 2200 /* if there is an error report DATA_ERROR */ 2285 2201 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2286 2202 host->data_status = pending; 2287 - smp_wmb(); 2203 + smp_wmb(); /* drain writebuffer */ 2288 2204 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2289 2205 tasklet_schedule(&host->tasklet); 2290 2206 } 2291 2207 2292 2208 if (pending & SDMMC_INT_DATA_OVER) { 2209 + if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO) 2210 + del_timer(&host->dto_timer); 2211 + 2293 2212 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2294 2213 if (!host->data_status) 2295 2214 host->data_status = pending; 2296 - smp_wmb(); 2215 + smp_wmb(); /* drain writebuffer */ 2297 2216 if (host->dir_status == DW_MCI_RECV_STATUS) { 2298 2217 if (host->sg != NULL) 2299 2218 dw_mci_read_data_pio(host, true); ··· 2470 2383 if (ret) 2471 2384 goto err_host_allocated; 2472 2385 2473 - if (host->pdata->blk_settings) { 2474 - mmc->max_segs = host->pdata->blk_settings->max_segs; 2475 - mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 2476 - mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; 2477 - mmc->max_req_size = host->pdata->blk_settings->max_req_size; 2478 - mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 2479 - } else { 2480 - /* Useful defaults if platform data is unset. */ 2481 - #ifdef CONFIG_MMC_DW_IDMAC 2386 + /* Useful defaults if platform data is unset. */ 2387 + if (host->use_dma) { 2482 2388 mmc->max_segs = host->ring_size; 2483 2389 mmc->max_blk_size = 65536; 2484 2390 mmc->max_seg_size = 0x1000; 2485 2391 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2486 2392 mmc->max_blk_count = mmc->max_req_size / 512; 2487 - #else 2393 + } else { 2488 2394 mmc->max_segs = 64; 2489 2395 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 2490 2396 mmc->max_blk_count = 512; 2491 - mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 2397 + mmc->max_req_size = mmc->max_blk_size * 2398 + mmc->max_blk_count; 2492 2399 mmc->max_seg_size = mmc->max_req_size; 2493 - #endif /* CONFIG_MMC_DW_IDMAC */ 2494 2400 } 2495 2401 2496 2402 if (dw_mci_get_cd(mmc)) ··· 2553 2473 if (host->dma_ops->init && host->dma_ops->start && 2554 2474 host->dma_ops->stop && host->dma_ops->cleanup) { 2555 2475 if (host->dma_ops->init(host)) { 2556 - dev_err(host->dev, "%s: Unable to initialize " 2557 - "DMA Controller.\n", __func__); 2476 + dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 2477 + __func__); 2558 2478 goto no_dma; 2559 2479 } 2560 2480 } else { ··· 2568 2488 no_dma: 2569 2489 dev_info(host->dev, "Using PIO mode.\n"); 2570 2490 host->use_dma = 0; 2571 - return; 2572 2491 } 2573 2492 2574 2493 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) ··· 2621 2542 if (host->use_dma) { 2622 2543 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2623 2544 u32 status; 2545 + 2624 2546 do { 2625 2547 status = mci_readl(host, STATUS); 2626 2548 if (!(status & SDMMC_STATUS_DMA_REQ)) ··· 2631 2551 2632 2552 if (status & SDMMC_STATUS_DMA_REQ) { 2633 2553 dev_err(host->dev, 2634 - "%s: Timeout waiting for dma_req to " 2635 - "clear during reset\n", __func__); 2554 + "%s: Timeout waiting for dma_req to clear during reset\n", 2555 + __func__); 2636 2556 goto ciu_out; 2637 2557 } 2638 2558 ··· 2643 2563 } else { 2644 2564 /* if the controller reset bit did clear, then set clock regs */ 2645 2565 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 2646 - dev_err(host->dev, "%s: fifo/dma reset bits didn't " 2647 - "clear but ciu was reset, doing clock update\n", 2566 + dev_err(host->dev, 2567 + "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 2648 2568 __func__); 2649 2569 goto ciu_out; 2650 2570 } ··· 2678 2598 tasklet_schedule(&host->tasklet); 2679 2599 } 2680 2600 2601 + static void dw_mci_dto_timer(unsigned long arg) 2602 + { 2603 + struct dw_mci *host = (struct dw_mci *)arg; 2604 + 2605 + switch (host->state) { 2606 + case STATE_SENDING_DATA: 2607 + case STATE_DATA_BUSY: 2608 + /* 2609 + * If DTO interrupt does NOT come in sending data state, 2610 + * we should notify the driver to terminate current transfer 2611 + * and report a data timeout to the core. 2612 + */ 2613 + host->data_status = SDMMC_INT_DRTO; 2614 + set_bit(EVENT_DATA_ERROR, &host->pending_events); 2615 + set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2616 + tasklet_schedule(&host->tasklet); 2617 + break; 2618 + default: 2619 + break; 2620 + } 2621 + } 2622 + 2681 2623 #ifdef CONFIG_OF 2682 2624 static struct dw_mci_of_quirks { 2683 2625 char *quirk; ··· 2727 2625 /* find out number of slots supported */ 2728 2626 if (of_property_read_u32(dev->of_node, "num-slots", 2729 2627 &pdata->num_slots)) { 2730 - dev_info(dev, "num-slots property not found, " 2731 - "assuming 1 slot is available\n"); 2628 + dev_info(dev, 2629 + "num-slots property not found, assuming 1 slot is available\n"); 2732 2630 pdata->num_slots = 1; 2733 2631 } 2734 2632 ··· 2738 2636 pdata->quirks |= of_quirks[idx].id; 2739 2637 2740 2638 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2741 - dev_info(dev, "fifo-depth property not found, using " 2742 - "value of FIFOTH register as default\n"); 2639 + dev_info(dev, 2640 + "fifo-depth property not found, using value of FIFOTH register as default\n"); 2743 2641 2744 2642 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2745 2643 ··· 2752 2650 return ERR_PTR(ret); 2753 2651 } 2754 2652 2755 - if (of_find_property(np, "supports-highspeed", NULL)) 2653 + if (of_find_property(np, "supports-highspeed", NULL)) { 2654 + dev_info(dev, "supports-highspeed property is deprecated.\n"); 2756 2655 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2656 + } 2757 2657 2758 2658 return pdata; 2759 2659 } ··· 2810 2706 } 2811 2707 } 2812 2708 2813 - if (host->pdata->num_slots > 1) { 2709 + if (host->pdata->num_slots < 1) { 2814 2710 dev_err(host->dev, 2815 2711 "Platform data must supply num_slots.\n"); 2816 2712 return -ENODEV; ··· 2877 2773 dw_mci_cmd11_timer, (unsigned long)host); 2878 2774 2879 2775 host->quirks = host->pdata->quirks; 2776 + 2777 + if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO) 2778 + setup_timer(&host->dto_timer, 2779 + dw_mci_dto_timer, (unsigned long)host); 2880 2780 2881 2781 spin_lock_init(&host->lock); 2882 2782 spin_lock_init(&host->irq_lock); ··· 2982 2874 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2983 2875 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2984 2876 DW_MCI_ERROR_FLAGS); 2985 - mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2877 + /* Enable mci interrupt */ 2878 + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 2986 2879 2987 - dev_info(host->dev, "DW MMC controller at irq %d, " 2988 - "%d bit host data width, " 2989 - "%u deep fifo\n", 2880 + dev_info(host->dev, 2881 + "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 2990 2882 host->irq, width, fifo_size); 2991 2883 2992 2884 /* We need at least one slot to succeed */ ··· 3001 2893 if (init_slots) { 3002 2894 dev_info(host->dev, "%d slots initialized\n", init_slots); 3003 2895 } else { 3004 - dev_dbg(host->dev, "attempted to initialize %d slots, " 3005 - "but failed on all\n", host->num_slots); 2896 + dev_dbg(host->dev, 2897 + "attempted to initialize %d slots, but failed on all\n", 2898 + host->num_slots); 3006 2899 goto err_dmaunmap; 3007 2900 } 3008 2901 ··· 3101 2992 3102 2993 for (i = 0; i < host->num_slots; i++) { 3103 2994 struct dw_mci_slot *slot = host->slot[i]; 2995 + 3104 2996 if (!slot) 3105 2997 continue; 3106 2998 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+6 -3
drivers/mmc/host/omap.c
··· 948 948 { 949 949 struct mmc_data *data = req->data; 950 950 int i, use_dma = 1, block_size; 951 + struct scatterlist *sg; 951 952 unsigned sg_len; 952 953 953 954 host->data = data; ··· 973 972 sg_len = (data->blocks == 1) ? 1 : data->sg_len; 974 973 975 974 /* Only do DMA for entire blocks */ 976 - for (i = 0; i < sg_len; i++) { 977 - if ((data->sg[i].length % block_size) != 0) { 975 + for_each_sg(data->sg, sg, sg_len, i) { 976 + if ((sg->length % block_size) != 0) { 978 977 use_dma = 0; 979 978 break; 980 979 } ··· 1420 1419 host->reg_shift = (mmc_omap7xx() ? 1 : 2); 1421 1420 1422 1421 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); 1423 - if (!host->mmc_omap_wq) 1422 + if (!host->mmc_omap_wq) { 1423 + ret = -ENOMEM; 1424 1424 goto err_plat_cleanup; 1425 + } 1425 1426 1426 1427 for (i = 0; i < pdata->nr_slots; i++) { 1427 1428 ret = mmc_omap_new_slot(host, i);
+224 -131
drivers/mmc/host/omap_hsmmc.c
··· 181 181 struct mmc_data *data; 182 182 struct clk *fclk; 183 183 struct clk *dbclk; 184 - /* 185 - * vcc == configured supply 186 - * vcc_aux == optional 187 - * - MMC1, supply for DAT4..DAT7 188 - * - MMC2/MMC2, external level shifter voltage supply, for 189 - * chip (SDIO, eMMC, etc) or transceiver (MMC2 only) 190 - */ 191 - struct regulator *vcc; 192 - struct regulator *vcc_aux; 193 184 struct regulator *pbias; 194 - bool pbias_enabled; 195 185 void __iomem *base; 186 + int vqmmc_enabled; 196 187 resource_size_t mapbase; 197 188 spinlock_t irq_lock; /* Prevent races with irq handler */ 198 189 unsigned int dma_len; ··· 204 213 int context_loss; 205 214 int protect_card; 206 215 int reqs_blocked; 207 - int use_reg; 208 216 int req_in_progress; 209 217 unsigned long clk_rate; 210 218 unsigned int flags; ··· 244 254 return mmc_gpio_get_cd(host->mmc); 245 255 } 246 256 247 - #ifdef CONFIG_REGULATOR 257 + static int omap_hsmmc_enable_supply(struct mmc_host *mmc) 258 + { 259 + int ret; 260 + struct omap_hsmmc_host *host = mmc_priv(mmc); 261 + struct mmc_ios *ios = &mmc->ios; 262 + 263 + if (mmc->supply.vmmc) { 264 + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 265 + if (ret) 266 + return ret; 267 + } 268 + 269 + /* Enable interface voltage rail, if needed */ 270 + if (mmc->supply.vqmmc && !host->vqmmc_enabled) { 271 + ret = regulator_enable(mmc->supply.vqmmc); 272 + if (ret) { 273 + dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n"); 274 + goto err_vqmmc; 275 + } 276 + host->vqmmc_enabled = 1; 277 + } 278 + 279 + return 0; 280 + 281 + err_vqmmc: 282 + if (mmc->supply.vmmc) 283 + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 284 + 285 + return ret; 286 + } 287 + 288 + static int omap_hsmmc_disable_supply(struct mmc_host *mmc) 289 + { 290 + int ret; 291 + int status; 292 + struct omap_hsmmc_host *host = mmc_priv(mmc); 293 + 294 + if (mmc->supply.vqmmc && host->vqmmc_enabled) { 295 + ret = regulator_disable(mmc->supply.vqmmc); 296 + if (ret) { 297 + dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n"); 298 + return ret; 299 + } 300 + host->vqmmc_enabled = 0; 301 + } 302 + 303 + if (mmc->supply.vmmc) { 304 + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 305 + if (ret) 306 + goto err_set_ocr; 307 + } 308 + 309 + return 0; 310 + 311 + err_set_ocr: 312 + if (mmc->supply.vqmmc) { 313 + status = regulator_enable(mmc->supply.vqmmc); 314 + if (status) 315 + dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n"); 316 + } 317 + 318 + return ret; 319 + } 320 + 321 + static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, 322 + int vdd) 323 + { 324 + int ret; 325 + 326 + if (!host->pbias) 327 + return 0; 328 + 329 + if (power_on) { 330 + if (vdd <= VDD_165_195) 331 + ret = regulator_set_voltage(host->pbias, VDD_1V8, 332 + VDD_1V8); 333 + else 334 + ret = regulator_set_voltage(host->pbias, VDD_3V0, 335 + VDD_3V0); 336 + if (ret < 0) { 337 + dev_err(host->dev, "pbias set voltage fail\n"); 338 + return ret; 339 + } 340 + 341 + if (!regulator_is_enabled(host->pbias)) { 342 + ret = regulator_enable(host->pbias); 343 + if (ret) { 344 + dev_err(host->dev, "pbias reg enable fail\n"); 345 + return ret; 346 + } 347 + } 348 + } else { 349 + if (regulator_is_enabled(host->pbias)) { 350 + ret = regulator_disable(host->pbias); 351 + if (ret) { 352 + dev_err(host->dev, "pbias reg disable fail\n"); 353 + return ret; 354 + } 355 + } 356 + } 357 + 358 + return 0; 359 + } 248 360 249 361 static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd) 250 362 { 251 363 struct omap_hsmmc_host *host = 252 364 platform_get_drvdata(to_platform_device(dev)); 365 + struct mmc_host *mmc = host->mmc; 253 366 int ret = 0; 367 + 368 + if (mmc_pdata(host)->set_power) 369 + return mmc_pdata(host)->set_power(dev, power_on, vdd); 254 370 255 371 /* 256 372 * If we don't see a Vcc regulator, assume it's a fixed 257 373 * voltage always-on regulator. 258 374 */ 259 - if (!host->vcc) 375 + if (!mmc->supply.vmmc) 260 376 return 0; 261 377 262 378 if (mmc_pdata(host)->before_set_reg) 263 379 mmc_pdata(host)->before_set_reg(dev, power_on, vdd); 264 380 265 - if (host->pbias) { 266 - if (host->pbias_enabled == 1) { 267 - ret = regulator_disable(host->pbias); 268 - if (!ret) 269 - host->pbias_enabled = 0; 270 - } 271 - regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0); 272 - } 381 + ret = omap_hsmmc_set_pbias(host, false, 0); 382 + if (ret) 383 + return ret; 273 384 274 385 /* 275 386 * Assume Vcc regulator is used only to power the card ... OMAP ··· 386 295 * chips/cards need an interface voltage rail too. 387 296 */ 388 297 if (power_on) { 389 - if (host->vcc) 390 - ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); 391 - /* Enable interface voltage rail, if needed */ 392 - if (ret == 0 && host->vcc_aux) { 393 - ret = regulator_enable(host->vcc_aux); 394 - if (ret < 0 && host->vcc) 395 - ret = mmc_regulator_set_ocr(host->mmc, 396 - host->vcc, 0); 397 - } 298 + ret = omap_hsmmc_enable_supply(mmc); 299 + if (ret) 300 + return ret; 301 + 302 + ret = omap_hsmmc_set_pbias(host, true, vdd); 303 + if (ret) 304 + goto err_set_voltage; 398 305 } else { 399 - /* Shut down the rail */ 400 - if (host->vcc_aux) 401 - ret = regulator_disable(host->vcc_aux); 402 - if (host->vcc) { 403 - /* Then proceed to shut down the local regulator */ 404 - ret = mmc_regulator_set_ocr(host->mmc, 405 - host->vcc, 0); 406 - } 407 - } 408 - 409 - if (host->pbias) { 410 - if (vdd <= VDD_165_195) 411 - ret = regulator_set_voltage(host->pbias, VDD_1V8, 412 - VDD_1V8); 413 - else 414 - ret = regulator_set_voltage(host->pbias, VDD_3V0, 415 - VDD_3V0); 416 - if (ret < 0) 417 - goto error_set_power; 418 - 419 - if (host->pbias_enabled == 0) { 420 - ret = regulator_enable(host->pbias); 421 - if (!ret) 422 - host->pbias_enabled = 1; 423 - } 306 + ret = omap_hsmmc_disable_supply(mmc); 307 + if (ret) 308 + return ret; 424 309 } 425 310 426 311 if (mmc_pdata(host)->after_set_reg) 427 312 mmc_pdata(host)->after_set_reg(dev, power_on, vdd); 428 313 429 - error_set_power: 314 + return 0; 315 + 316 + err_set_voltage: 317 + omap_hsmmc_disable_supply(mmc); 318 + 430 319 return ret; 320 + } 321 + 322 + static int omap_hsmmc_disable_boot_regulator(struct regulator *reg) 323 + { 324 + int ret; 325 + 326 + if (!reg) 327 + return 0; 328 + 329 + if (regulator_is_enabled(reg)) { 330 + ret = regulator_enable(reg); 331 + if (ret) 332 + return ret; 333 + 334 + ret = regulator_disable(reg); 335 + if (ret) 336 + return ret; 337 + } 338 + 339 + return 0; 340 + } 341 + 342 + static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host) 343 + { 344 + struct mmc_host *mmc = host->mmc; 345 + int ret; 346 + 347 + /* 348 + * disable regulators enabled during boot and get the usecount 349 + * right so that regulators can be enabled/disabled by checking 350 + * the return value of regulator_is_enabled 351 + */ 352 + ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc); 353 + if (ret) { 354 + dev_err(host->dev, "fail to disable boot enabled vmmc reg\n"); 355 + return ret; 356 + } 357 + 358 + ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc); 359 + if (ret) { 360 + dev_err(host->dev, 361 + "fail to disable boot enabled vmmc_aux reg\n"); 362 + return ret; 363 + } 364 + 365 + ret = omap_hsmmc_disable_boot_regulator(host->pbias); 366 + if (ret) { 367 + dev_err(host->dev, 368 + "failed to disable boot enabled pbias reg\n"); 369 + return ret; 370 + } 371 + 372 + return 0; 431 373 } 432 374 433 375 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 434 376 { 435 - struct regulator *reg; 436 377 int ocr_value = 0; 378 + int ret; 379 + struct mmc_host *mmc = host->mmc; 437 380 438 - reg = devm_regulator_get(host->dev, "vmmc"); 439 - if (IS_ERR(reg)) { 440 - dev_err(host->dev, "unable to get vmmc regulator %ld\n", 441 - PTR_ERR(reg)); 442 - return PTR_ERR(reg); 381 + if (mmc_pdata(host)->set_power) 382 + return 0; 383 + 384 + mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); 385 + if (IS_ERR(mmc->supply.vmmc)) { 386 + ret = PTR_ERR(mmc->supply.vmmc); 387 + if (ret != -ENODEV) 388 + return ret; 389 + dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", 390 + PTR_ERR(mmc->supply.vmmc)); 391 + mmc->supply.vmmc = NULL; 443 392 } else { 444 - host->vcc = reg; 445 - ocr_value = mmc_regulator_get_ocrmask(reg); 446 - if (!mmc_pdata(host)->ocr_mask) { 393 + ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc); 394 + if (ocr_value > 0) 447 395 mmc_pdata(host)->ocr_mask = ocr_value; 448 - } else { 449 - if (!(mmc_pdata(host)->ocr_mask & ocr_value)) { 450 - dev_err(host->dev, "ocrmask %x is not supported\n", 451 - mmc_pdata(host)->ocr_mask); 452 - mmc_pdata(host)->ocr_mask = 0; 453 - return -EINVAL; 454 - } 455 - } 456 396 } 457 - mmc_pdata(host)->set_power = omap_hsmmc_set_power; 458 397 459 398 /* Allow an aux regulator */ 460 - reg = devm_regulator_get_optional(host->dev, "vmmc_aux"); 461 - host->vcc_aux = IS_ERR(reg) ? NULL : reg; 399 + mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); 400 + if (IS_ERR(mmc->supply.vqmmc)) { 401 + ret = PTR_ERR(mmc->supply.vqmmc); 402 + if (ret != -ENODEV) 403 + return ret; 404 + dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", 405 + PTR_ERR(mmc->supply.vqmmc)); 406 + mmc->supply.vqmmc = NULL; 407 + } 462 408 463 - reg = devm_regulator_get_optional(host->dev, "pbias"); 464 - host->pbias = IS_ERR(reg) ? NULL : reg; 409 + host->pbias = devm_regulator_get_optional(host->dev, "pbias"); 410 + if (IS_ERR(host->pbias)) { 411 + ret = PTR_ERR(host->pbias); 412 + if (ret != -ENODEV) 413 + return ret; 414 + dev_dbg(host->dev, "unable to get pbias regulator %ld\n", 415 + PTR_ERR(host->pbias)); 416 + host->pbias = NULL; 417 + } 465 418 466 419 /* For eMMC do not power off when not in sleep state */ 467 420 if (mmc_pdata(host)->no_regulator_off_init) 468 421 return 0; 469 - /* 470 - * To disable boot_on regulator, enable regulator 471 - * to increase usecount and then disable it. 472 - */ 473 - if ((host->vcc && regulator_is_enabled(host->vcc) > 0) || 474 - (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { 475 - int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1; 476 422 477 - mmc_pdata(host)->set_power(host->dev, 1, vdd); 478 - mmc_pdata(host)->set_power(host->dev, 0, 0); 479 - } 423 + ret = omap_hsmmc_disable_boot_regulators(host); 424 + if (ret) 425 + return ret; 480 426 481 427 return 0; 482 428 } 483 - 484 - static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) 485 - { 486 - mmc_pdata(host)->set_power = NULL; 487 - } 488 - 489 - static inline int omap_hsmmc_have_reg(void) 490 - { 491 - return 1; 492 - } 493 - 494 - #else 495 - 496 - static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 497 - { 498 - return -EINVAL; 499 - } 500 - 501 - static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) 502 - { 503 - } 504 - 505 - static inline int omap_hsmmc_have_reg(void) 506 - { 507 - return 0; 508 - } 509 - 510 - #endif 511 429 512 430 static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id); 513 431 ··· 1249 1149 clk_disable_unprepare(host->dbclk); 1250 1150 1251 1151 /* Turn the power off */ 1252 - ret = mmc_pdata(host)->set_power(host->dev, 0, 0); 1152 + ret = omap_hsmmc_set_power(host->dev, 0, 0); 1253 1153 1254 1154 /* Turn the power ON with given VDD 1.8 or 3.0v */ 1255 1155 if (!ret) 1256 - ret = mmc_pdata(host)->set_power(host->dev, 1, vdd); 1156 + ret = omap_hsmmc_set_power(host->dev, 1, vdd); 1257 1157 pm_runtime_get_sync(host->dev); 1258 1158 if (host->dbclk) 1259 1159 clk_prepare_enable(host->dbclk); ··· 1652 1552 if (ios->power_mode != host->power_mode) { 1653 1553 switch (ios->power_mode) { 1654 1554 case MMC_POWER_OFF: 1655 - mmc_pdata(host)->set_power(host->dev, 0, 0); 1555 + omap_hsmmc_set_power(host->dev, 0, 0); 1656 1556 break; 1657 1557 case MMC_POWER_UP: 1658 - mmc_pdata(host)->set_power(host->dev, 1, ios->vdd); 1558 + omap_hsmmc_set_power(host->dev, 1, ios->vdd); 1659 1559 break; 1660 1560 case MMC_POWER_ON: 1661 1561 do_send_init_stream = 1; ··· 2053 1953 host->base = base + pdata->reg_offset; 2054 1954 host->power_mode = MMC_POWER_OFF; 2055 1955 host->next_data.cookie = 1; 2056 - host->pbias_enabled = 0; 1956 + host->vqmmc_enabled = 0; 2057 1957 2058 1958 ret = omap_hsmmc_gpio_init(mmc, host, pdata); 2059 1959 if (ret) ··· 2178 2078 goto err_irq; 2179 2079 } 2180 2080 2181 - if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) { 2182 - ret = omap_hsmmc_reg_get(host); 2183 - if (ret) 2184 - goto err_irq; 2185 - host->use_reg = 1; 2186 - } 2081 + ret = omap_hsmmc_reg_get(host); 2082 + if (ret) 2083 + goto err_irq; 2187 2084 2188 2085 mmc->ocr_avail = mmc_pdata(host)->ocr_mask; 2189 2086 ··· 2222 2125 2223 2126 err_slot_name: 2224 2127 mmc_remove_host(mmc); 2225 - if (host->use_reg) 2226 - omap_hsmmc_reg_put(host); 2227 2128 err_irq: 2228 2129 device_init_wakeup(&pdev->dev, false); 2229 2130 if (host->tx_chan) ··· 2245 2150 2246 2151 pm_runtime_get_sync(host->dev); 2247 2152 mmc_remove_host(host->mmc); 2248 - if (host->use_reg) 2249 - omap_hsmmc_reg_put(host); 2250 2153 2251 2154 if (host->tx_chan) 2252 2155 dma_release_channel(host->tx_chan);
+115 -87
drivers/mmc/host/pxamci.c
··· 22 22 #include <linux/platform_device.h> 23 23 #include <linux/delay.h> 24 24 #include <linux/interrupt.h> 25 + #include <linux/dmaengine.h> 25 26 #include <linux/dma-mapping.h> 27 + #include <linux/dma/pxa-dma.h> 26 28 #include <linux/clk.h> 27 29 #include <linux/err.h> 28 30 #include <linux/mmc/host.h> ··· 39 37 #include <asm/sizes.h> 40 38 41 39 #include <mach/hardware.h> 42 - #include <mach/dma.h> 43 40 #include <linux/platform_data/mmc-pxamci.h> 44 41 45 42 #include "pxamci.h" ··· 59 58 struct clk *clk; 60 59 unsigned long clkrate; 61 60 int irq; 62 - int dma; 63 61 unsigned int clkrt; 64 62 unsigned int cmdat; 65 63 unsigned int imask; ··· 69 69 struct mmc_command *cmd; 70 70 struct mmc_data *data; 71 71 72 + struct dma_chan *dma_chan_rx; 73 + struct dma_chan *dma_chan_tx; 74 + dma_cookie_t dma_cookie; 72 75 dma_addr_t sg_dma; 73 - struct pxa_dma_desc *sg_cpu; 74 76 unsigned int dma_len; 75 77 76 78 unsigned int dma_dir; ··· 175 173 spin_unlock_irqrestore(&host->lock, flags); 176 174 } 177 175 176 + static void pxamci_dma_irq(void *param); 177 + 178 178 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) 179 179 { 180 + struct dma_async_tx_descriptor *tx; 181 + enum dma_data_direction direction; 182 + struct dma_slave_config config; 183 + struct dma_chan *chan; 180 184 unsigned int nob = data->blocks; 181 185 unsigned long long clks; 182 186 unsigned int timeout; 183 - bool dalgn = 0; 184 - u32 dcmd; 185 - int i; 187 + int ret; 186 188 187 189 host->data = data; 188 190 ··· 201 195 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); 202 196 writel((timeout + 255) / 256, host->base + MMC_RDTO); 203 197 198 + memset(&config, 0, sizeof(config)); 199 + config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 200 + config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 201 + config.src_addr = host->res->start + MMC_RXFIFO; 202 + config.dst_addr = host->res->start + MMC_TXFIFO; 203 + config.src_maxburst = 32; 204 + config.dst_maxburst = 32; 205 + 204 206 if (data->flags & MMC_DATA_READ) { 205 207 host->dma_dir = DMA_FROM_DEVICE; 206 - dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; 207 - DRCMR(host->dma_drcmrtx) = 0; 208 - DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD; 208 + direction = DMA_DEV_TO_MEM; 209 + chan = host->dma_chan_rx; 209 210 } else { 210 211 host->dma_dir = DMA_TO_DEVICE; 211 - dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; 212 - DRCMR(host->dma_drcmrrx) = 0; 213 - DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD; 212 + direction = DMA_MEM_TO_DEV; 213 + chan = host->dma_chan_tx; 214 214 } 215 215 216 - dcmd |= DCMD_BURST32 | DCMD_WIDTH1; 216 + config.direction = direction; 217 217 218 - host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 218 + ret = dmaengine_slave_config(chan, &config); 219 + if (ret < 0) { 220 + dev_err(mmc_dev(host->mmc), "dma slave config failed\n"); 221 + return; 222 + } 223 + 224 + host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, 219 225 host->dma_dir); 220 226 221 - for (i = 0; i < host->dma_len; i++) { 222 - unsigned int length = sg_dma_len(&data->sg[i]); 223 - host->sg_cpu[i].dcmd = dcmd | length; 224 - if (length & 31 && !(data->flags & MMC_DATA_READ)) 225 - host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN; 226 - /* Not aligned to 8-byte boundary? */ 227 - if (sg_dma_address(&data->sg[i]) & 0x7) 228 - dalgn = 1; 229 - if (data->flags & MMC_DATA_READ) { 230 - host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO; 231 - host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]); 232 - } else { 233 - host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]); 234 - host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO; 235 - } 236 - host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) * 237 - sizeof(struct pxa_dma_desc); 227 + tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction, 228 + DMA_PREP_INTERRUPT); 229 + if (!tx) { 230 + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); 231 + return; 238 232 } 239 - host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP; 240 - wmb(); 241 233 242 - /* 243 - * The PXA27x DMA controller encounters overhead when working with 244 - * unaligned (to 8-byte boundaries) data, so switch on byte alignment 245 - * mode only if we have unaligned data. 246 - */ 247 - if (dalgn) 248 - DALGN |= (1 << host->dma); 249 - else 250 - DALGN &= ~(1 << host->dma); 251 - DDADR(host->dma) = host->sg_dma; 234 + if (!(data->flags & MMC_DATA_READ)) { 235 + tx->callback = pxamci_dma_irq; 236 + tx->callback_param = host; 237 + } 238 + 239 + host->dma_cookie = dmaengine_submit(tx); 252 240 253 241 /* 254 242 * workaround for erratum #91: ··· 251 251 * before starting DMA. 252 252 */ 253 253 if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ) 254 - DCSR(host->dma) = DCSR_RUN; 254 + dma_async_issue_pending(chan); 255 255 } 256 256 257 257 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) ··· 343 343 * enable DMA late 344 344 */ 345 345 if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE) 346 - DCSR(host->dma) = DCSR_RUN; 346 + dma_async_issue_pending(host->dma_chan_tx); 347 347 } else { 348 348 pxamci_finish_request(host, host->mrq); 349 349 } ··· 354 354 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) 355 355 { 356 356 struct mmc_data *data = host->data; 357 + struct dma_chan *chan; 357 358 358 359 if (!data) 359 360 return 0; 360 361 361 - DCSR(host->dma) = 0; 362 - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 363 - host->dma_dir); 362 + if (data->flags & MMC_DATA_READ) 363 + chan = host->dma_chan_rx; 364 + else 365 + chan = host->dma_chan_tx; 366 + dma_unmap_sg(chan->device->dev, 367 + data->sg, data->sg_len, host->dma_dir); 364 368 365 369 if (stat & STAT_READ_TIME_OUT) 366 370 data->error = -ETIMEDOUT; ··· 556 552 .enable_sdio_irq = pxamci_enable_sdio_irq, 557 553 }; 558 554 559 - static void pxamci_dma_irq(int dma, void *devid) 555 + static void pxamci_dma_irq(void *param) 560 556 { 561 - struct pxamci_host *host = devid; 562 - int dcsr = DCSR(dma); 563 - DCSR(dma) = dcsr & ~DCSR_STOPIRQEN; 557 + struct pxamci_host *host = param; 558 + struct dma_tx_state state; 559 + enum dma_status status; 560 + struct dma_chan *chan; 561 + unsigned long flags; 564 562 565 - if (dcsr & DCSR_ENDINTR) { 563 + spin_lock_irqsave(&host->lock, flags); 564 + 565 + if (!host->data) 566 + goto out_unlock; 567 + 568 + if (host->data->flags & MMC_DATA_READ) 569 + chan = host->dma_chan_rx; 570 + else 571 + chan = host->dma_chan_tx; 572 + 573 + status = dmaengine_tx_status(chan, host->dma_cookie, &state); 574 + 575 + if (likely(status == DMA_COMPLETE)) { 566 576 writel(BUF_PART_FULL, host->base + MMC_PRTBUF); 567 577 } else { 568 - pr_err("%s: DMA error on channel %d (DCSR=%#x)\n", 569 - mmc_hostname(host->mmc), dma, dcsr); 578 + pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc), 579 + host->data->flags & MMC_DATA_READ ? "rx" : "tx"); 570 580 host->data->error = -EIO; 571 581 pxamci_data_done(host, 0); 572 582 } 583 + 584 + out_unlock: 585 + spin_unlock_irqrestore(&host->lock, flags); 573 586 } 574 587 575 588 static irqreturn_t pxamci_detect_irq(int irq, void *devid) ··· 646 625 struct mmc_host *mmc; 647 626 struct pxamci_host *host = NULL; 648 627 struct resource *r, *dmarx, *dmatx; 628 + struct pxad_param param_rx, param_tx; 649 629 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; 630 + dma_cap_mask_t mask; 650 631 651 632 ret = pxamci_of_init(pdev); 652 633 if (ret) ··· 694 671 695 672 host = mmc_priv(mmc); 696 673 host->mmc = mmc; 697 - host->dma = -1; 698 674 host->pdata = pdev->dev.platform_data; 699 675 host->clkrt = CLKRT_OFF; 700 676 ··· 724 702 MMC_CAP_SD_HIGHSPEED; 725 703 } 726 704 727 - host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); 728 - if (!host->sg_cpu) { 729 - ret = -ENOMEM; 730 - goto out; 731 - } 732 - 733 705 spin_lock_init(&host->lock); 734 706 host->res = r; 735 707 host->irq = irq; ··· 744 728 writel(64, host->base + MMC_RESTO); 745 729 writel(host->imask, host->base + MMC_I_MASK); 746 730 747 - host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW, 748 - pxamci_dma_irq, host); 749 - if (host->dma < 0) { 750 - ret = -EBUSY; 751 - goto out; 752 - } 753 - 754 731 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); 755 732 if (ret) 756 733 goto out; 757 734 758 735 platform_set_drvdata(pdev, mmc); 759 736 760 - dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0); 761 - if (!dmarx) { 762 - ret = -ENXIO; 763 - goto out; 737 + if (!pdev->dev.of_node) { 738 + dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0); 739 + dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1); 740 + if (!dmarx || !dmatx) { 741 + ret = -ENXIO; 742 + goto out; 743 + } 744 + param_rx.prio = PXAD_PRIO_LOWEST; 745 + param_rx.drcmr = dmarx->start; 746 + param_tx.prio = PXAD_PRIO_LOWEST; 747 + param_tx.drcmr = dmatx->start; 764 748 } 765 - host->dma_drcmrrx = dmarx->start; 766 749 767 - dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1); 768 - if (!dmatx) { 769 - ret = -ENXIO; 750 + dma_cap_zero(mask); 751 + dma_cap_set(DMA_SLAVE, mask); 752 + 753 + host->dma_chan_rx = 754 + dma_request_slave_channel_compat(mask, pxad_filter_fn, 755 + &param_rx, &pdev->dev, "rx"); 756 + if (host->dma_chan_rx == NULL) { 757 + dev_err(&pdev->dev, "unable to request rx dma channel\n"); 758 + ret = -ENODEV; 770 759 goto out; 771 760 } 772 - host->dma_drcmrtx = dmatx->start; 761 + 762 + host->dma_chan_tx = 763 + dma_request_slave_channel_compat(mask, pxad_filter_fn, 764 + &param_tx, &pdev->dev, "tx"); 765 + if (host->dma_chan_tx == NULL) { 766 + dev_err(&pdev->dev, "unable to request tx dma channel\n"); 767 + ret = -ENODEV; 768 + goto out; 769 + } 773 770 774 771 if (host->pdata) { 775 772 gpio_cd = host->pdata->gpio_card_detect; ··· 843 814 gpio_free(gpio_power); 844 815 out: 845 816 if (host) { 846 - if (host->dma >= 0) 847 - pxa_free_dma(host->dma); 817 + if (host->dma_chan_rx) 818 + dma_release_channel(host->dma_chan_rx); 819 + if (host->dma_chan_tx) 820 + dma_release_channel(host->dma_chan_tx); 848 821 if (host->base) 849 822 iounmap(host->base); 850 - if (host->sg_cpu) 851 - dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 852 823 if (host->clk) 853 824 clk_put(host->clk); 854 825 } ··· 892 863 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 893 864 host->base + MMC_I_MASK); 894 865 895 - DRCMR(host->dma_drcmrrx) = 0; 896 - DRCMR(host->dma_drcmrtx) = 0; 897 - 898 866 free_irq(host->irq, host); 899 - pxa_free_dma(host->dma); 867 + dmaengine_terminate_all(host->dma_chan_rx); 868 + dmaengine_terminate_all(host->dma_chan_tx); 869 + dma_release_channel(host->dma_chan_rx); 870 + dma_release_channel(host->dma_chan_tx); 900 871 iounmap(host->base); 901 - dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 902 872 903 873 clk_put(host->clk); 904 874
+110 -4
drivers/mmc/host/sdhci-esdhc-imx.c
··· 32 32 #include "sdhci-esdhc.h" 33 33 34 34 #define ESDHC_CTRL_D3CD 0x08 35 + #define ESDHC_BURST_LEN_EN_INCR (1 << 27) 35 36 /* VENDOR SPEC register */ 36 37 #define ESDHC_VENDOR_SPEC 0xc0 37 38 #define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1) ··· 45 44 #define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22) 46 45 #define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23) 47 46 #define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) 47 + #define ESDHC_MIX_CTRL_HS400_EN (1 << 26) 48 48 /* Bits 3 and 6 are not SDHCI standard definitions */ 49 49 #define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 50 50 /* Tuning bits */ ··· 62 60 #define ESDHC_TUNE_CTRL_MIN 0 63 61 #define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1) 64 62 63 + /* strobe dll register */ 64 + #define ESDHC_STROBE_DLL_CTRL 0x70 65 + #define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0) 66 + #define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1) 67 + #define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3 68 + 69 + #define ESDHC_STROBE_DLL_STATUS 0x74 70 + #define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1) 71 + #define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1 72 + 65 73 #define ESDHC_TUNING_CTRL 0xcc 66 74 #define ESDHC_STD_TUNING_EN (1 << 24) 67 75 /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ 68 76 #define ESDHC_TUNING_START_TAP 0x1 77 + #define ESDHC_TUNING_STEP_SHIFT 16 69 78 70 79 /* pinctrl state */ 71 80 #define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz" ··· 133 120 #define ESDHC_FLAG_ERR004536 BIT(7) 134 121 /* The IP supports HS200 mode */ 135 122 #define ESDHC_FLAG_HS200 BIT(8) 123 + /* The IP supports HS400 mode */ 124 + #define ESDHC_FLAG_HS400 BIT(9) 125 + 126 + /* A higher clock ferquency than this rate requires strobell dll control */ 127 + #define ESDHC_STROBE_DLL_CLK_FREQ 100000000 136 128 137 129 struct esdhc_soc_data { 138 130 u32 flags; ··· 172 154 static struct esdhc_soc_data usdhc_imx6sx_data = { 173 155 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 174 156 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200, 157 + }; 158 + 159 + static struct esdhc_soc_data usdhc_imx7d_data = { 160 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 161 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 162 + | ESDHC_FLAG_HS400, 175 163 }; 176 164 177 165 struct pltfm_imx_data { ··· 223 199 { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, }, 224 200 { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, 225 201 { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, 202 + { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, }, 226 203 { /* sentinel */ } 227 204 }; 228 205 MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); ··· 299 274 val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 300 275 | SDHCI_SUPPORT_SDR50 301 276 | SDHCI_USE_SDR50_TUNING; 277 + 278 + if (imx_data->socdata->flags & ESDHC_FLAG_HS400) 279 + val |= SDHCI_SUPPORT_HS400; 302 280 } 303 281 } 304 282 ··· 476 448 } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { 477 449 u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); 478 450 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); 451 + u32 tuning_ctrl; 479 452 if (val & SDHCI_CTRL_TUNED_CLK) { 480 453 v |= ESDHC_MIX_CTRL_SMPCLK_SEL; 481 454 } else { ··· 487 458 if (val & SDHCI_CTRL_EXEC_TUNING) { 488 459 v |= ESDHC_MIX_CTRL_EXE_TUNE; 489 460 m |= ESDHC_MIX_CTRL_FBCLK_SEL; 461 + tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL); 462 + tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP; 463 + if (imx_data->boarddata.tuning_step) 464 + tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT; 465 + writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL); 490 466 } else { 491 467 v &= ~ESDHC_MIX_CTRL_EXE_TUNE; 492 468 } ··· 808 774 break; 809 775 case MMC_TIMING_UHS_SDR104: 810 776 case MMC_TIMING_MMC_HS200: 777 + case MMC_TIMING_MMC_HS400: 811 778 pinctrl = imx_data->pins_200mhz; 812 779 break; 813 780 default: ··· 819 784 return pinctrl_select_state(imx_data->pinctrl, pinctrl); 820 785 } 821 786 787 + /* 788 + * For HS400 eMMC, there is a data_strobe line, this signal is generated 789 + * by the device and used for data output and CRC status response output 790 + * in HS400 mode. The frequency of this signal follows the frequency of 791 + * CLK generated by host. Host receive the data which is aligned to the 792 + * edge of data_strobe line. Due to the time delay between CLK line and 793 + * data_strobe line, if the delay time is larger than one clock cycle, 794 + * then CLK and data_strobe line will misaligned, read error shows up. 795 + * So when the CLK is higher than 100MHz, each clock cycle is short enough, 796 + * host should config the delay target. 797 + */ 798 + static void esdhc_set_strobe_dll(struct sdhci_host *host) 799 + { 800 + u32 v; 801 + 802 + if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) { 803 + /* force a reset on strobe dll */ 804 + writel(ESDHC_STROBE_DLL_CTRL_RESET, 805 + host->ioaddr + ESDHC_STROBE_DLL_CTRL); 806 + /* 807 + * enable strobe dll ctrl and adjust the delay target 808 + * for the uSDHC loopback read clock 809 + */ 810 + v = ESDHC_STROBE_DLL_CTRL_ENABLE | 811 + (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); 812 + writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); 813 + /* wait 1us to make sure strobe dll status register stable */ 814 + udelay(1); 815 + v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS); 816 + if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK)) 817 + dev_warn(mmc_dev(host->mmc), 818 + "warning! HS400 strobe DLL status REF not lock!\n"); 819 + if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK)) 820 + dev_warn(mmc_dev(host->mmc), 821 + "warning! HS400 strobe DLL status SLV not lock!\n"); 822 + } 823 + } 824 + 822 825 static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 823 826 { 827 + u32 m; 824 828 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 825 829 struct pltfm_imx_data *imx_data = pltfm_host->priv; 826 830 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 831 + 832 + /* disable ddr mode and disable HS400 mode */ 833 + m = readl(host->ioaddr + ESDHC_MIX_CTRL); 834 + m &= ~(ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN); 835 + imx_data->is_ddr = 0; 827 836 828 837 switch (timing) { 829 838 case MMC_TIMING_UHS_SDR12: ··· 875 796 case MMC_TIMING_UHS_SDR50: 876 797 case MMC_TIMING_UHS_SDR104: 877 798 case MMC_TIMING_MMC_HS200: 799 + writel(m, host->ioaddr + ESDHC_MIX_CTRL); 878 800 break; 879 801 case MMC_TIMING_UHS_DDR50: 880 802 case MMC_TIMING_MMC_DDR52: 881 - writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | 882 - ESDHC_MIX_CTRL_DDREN, 883 - host->ioaddr + ESDHC_MIX_CTRL); 803 + m |= ESDHC_MIX_CTRL_DDREN; 804 + writel(m, host->ioaddr + ESDHC_MIX_CTRL); 884 805 imx_data->is_ddr = 1; 885 806 if (boarddata->delay_line) { 886 807 u32 v; ··· 891 812 v <<= 1; 892 813 writel(v, host->ioaddr + ESDHC_DLL_CTRL); 893 814 } 815 + break; 816 + case MMC_TIMING_MMC_HS400: 817 + m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN; 818 + writel(m, host->ioaddr + ESDHC_MIX_CTRL); 819 + imx_data->is_ddr = 1; 820 + esdhc_set_strobe_dll(host); 894 821 break; 895 822 } 896 823 ··· 970 885 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 971 886 if (gpio_is_valid(boarddata->wp_gpio)) 972 887 boarddata->wp_type = ESDHC_WP_GPIO; 888 + 889 + of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step); 973 890 974 891 if (of_find_property(np, "no-1-8-v", NULL)) 975 892 boarddata->support_vsel = false; ··· 1160 1073 * to something insane. Change it back here. 1161 1074 */ 1162 1075 if (esdhc_is_usdhc(imx_data)) { 1163 - writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); 1076 + writel(0x10401040, host->ioaddr + ESDHC_WTMK_LVL); 1077 + 1164 1078 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; 1165 1079 host->mmc->caps |= MMC_CAP_1_8V_DDR; 1080 + 1081 + /* 1082 + * ROM code will change the bit burst_length_enable setting 1083 + * to zero if this usdhc is choosed to boot system. Change 1084 + * it back here, otherwise it will impact the performance a 1085 + * lot. This bit is used to enable/disable the burst length 1086 + * for the external AHB2AXI bridge, it's usefully especially 1087 + * for INCR transfer because without burst length indicator, 1088 + * the AHB2AXI bridge does not know the burst length in 1089 + * advance. And without burst length indicator, AHB INCR 1090 + * transfer can only be converted to singles on the AXI side. 1091 + */ 1092 + writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) 1093 + | ESDHC_BURST_LEN_EN_INCR, 1094 + host->ioaddr + SDHCI_HOST_CONTROL); 1166 1095 1167 1096 if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200)) 1168 1097 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; ··· 1202 1099 1203 1100 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) 1204 1101 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1102 + 1103 + if (imx_data->socdata->flags & ESDHC_FLAG_HS400) 1104 + host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400; 1205 1105 1206 1106 if (of_id) 1207 1107 err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+2 -1
drivers/mmc/host/sdhci-esdhc.h
··· 21 21 #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ 22 22 SDHCI_QUIRK_NO_BUSY_IRQ | \ 23 23 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ 24 - SDHCI_QUIRK_PIO_NEEDS_DELAY) 24 + SDHCI_QUIRK_PIO_NEEDS_DELAY | \ 25 + SDHCI_QUIRK_NO_HISPD_BIT) 25 26 26 27 #define ESDHC_SYSTEM_CONTROL 0x2c 27 28 #define ESDHC_CLOCK_MASK 0x0000fff0
+5
drivers/mmc/host/sdhci-msm.c
··· 489 489 goto pclk_disable; 490 490 } 491 491 492 + /* Vote for maximum clock rate for maximum performance */ 493 + ret = clk_set_rate(msm_host->clk, INT_MAX); 494 + if (ret) 495 + dev_warn(&pdev->dev, "core clock boost failed\n"); 496 + 492 497 ret = clk_prepare_enable(msm_host->clk); 493 498 if (ret) 494 499 goto pclk_disable;
+4
drivers/mmc/host/sdhci-of-arasan.c
··· 63 63 64 64 static struct sdhci_pltfm_data sdhci_arasan_pdata = { 65 65 .ops = &sdhci_arasan_ops, 66 + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 67 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 68 + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, 66 69 }; 67 70 68 71 #ifdef CONFIG_PM_SLEEP ··· 217 214 218 215 static const struct of_device_id sdhci_arasan_of_match[] = { 219 216 { .compatible = "arasan,sdhci-8.9a" }, 217 + { .compatible = "arasan,sdhci-5.1" }, 220 218 { .compatible = "arasan,sdhci-4.9a" }, 221 219 { } 222 220 };
+191
drivers/mmc/host/sdhci-of-at91.c
··· 1 + /* 2 + * Atmel SDMMC controller driver. 3 + * 4 + * Copyright (C) 2015 Atmel, 5 + * 2015 Ludovic Desroches <ludovic.desroches@atmel.com> 6 + * 7 + * This software is licensed under the terms of the GNU General Public 8 + * License version 2, as published by the Free Software Foundation, and 9 + * may be copied, distributed, and modified under those terms. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + 17 + #include <linux/clk.h> 18 + #include <linux/err.h> 19 + #include <linux/io.h> 20 + #include <linux/mmc/host.h> 21 + #include <linux/module.h> 22 + #include <linux/of.h> 23 + #include <linux/of_device.h> 24 + 25 + #include "sdhci-pltfm.h" 26 + 27 + #define SDMMC_CACR 0x230 28 + #define SDMMC_CACR_CAPWREN BIT(0) 29 + #define SDMMC_CACR_KEY (0x46 << 8) 30 + 31 + struct sdhci_at91_priv { 32 + struct clk *hclock; 33 + struct clk *gck; 34 + struct clk *mainck; 35 + }; 36 + 37 + static const struct sdhci_ops sdhci_at91_sama5d2_ops = { 38 + .set_clock = sdhci_set_clock, 39 + .set_bus_width = sdhci_set_bus_width, 40 + .reset = sdhci_reset, 41 + .set_uhs_signaling = sdhci_set_uhs_signaling, 42 + }; 43 + 44 + static const struct sdhci_pltfm_data soc_data_sama5d2 = { 45 + .ops = &sdhci_at91_sama5d2_ops, 46 + }; 47 + 48 + static const struct of_device_id sdhci_at91_dt_match[] = { 49 + { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 }, 50 + {} 51 + }; 52 + 53 + static int sdhci_at91_probe(struct platform_device *pdev) 54 + { 55 + const struct of_device_id *match; 56 + const struct sdhci_pltfm_data *soc_data; 57 + struct sdhci_host *host; 58 + struct sdhci_pltfm_host *pltfm_host; 59 + struct sdhci_at91_priv *priv; 60 + unsigned int caps0, caps1; 61 + unsigned int clk_base, clk_mul; 62 + unsigned int gck_rate, real_gck_rate; 63 + int ret; 64 + 65 + match = of_match_device(sdhci_at91_dt_match, &pdev->dev); 66 + if (!match) 67 + return -EINVAL; 68 + soc_data = match->data; 69 + 70 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 71 + if (!priv) { 72 + dev_err(&pdev->dev, "unable to allocate private data\n"); 73 + return -ENOMEM; 74 + } 75 + 76 + priv->mainck = devm_clk_get(&pdev->dev, "baseclk"); 77 + if (IS_ERR(priv->mainck)) { 78 + dev_err(&pdev->dev, "failed to get baseclk\n"); 79 + return PTR_ERR(priv->mainck); 80 + } 81 + 82 + priv->hclock = devm_clk_get(&pdev->dev, "hclock"); 83 + if (IS_ERR(priv->hclock)) { 84 + dev_err(&pdev->dev, "failed to get hclock\n"); 85 + return PTR_ERR(priv->hclock); 86 + } 87 + 88 + priv->gck = devm_clk_get(&pdev->dev, "multclk"); 89 + if (IS_ERR(priv->gck)) { 90 + dev_err(&pdev->dev, "failed to get multclk\n"); 91 + return PTR_ERR(priv->gck); 92 + } 93 + 94 + host = sdhci_pltfm_init(pdev, soc_data, 0); 95 + if (IS_ERR(host)) 96 + return PTR_ERR(host); 97 + 98 + /* 99 + * The mult clock is provided by as a generated clock by the PMC 100 + * controller. In order to set the rate of gck, we have to get the 101 + * base clock rate and the clock mult from capabilities. 102 + */ 103 + clk_prepare_enable(priv->hclock); 104 + caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); 105 + caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); 106 + clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 107 + clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; 108 + gck_rate = clk_base * 1000000 * (clk_mul + 1); 109 + ret = clk_set_rate(priv->gck, gck_rate); 110 + if (ret < 0) { 111 + dev_err(&pdev->dev, "failed to set gck"); 112 + goto hclock_disable_unprepare; 113 + return -EINVAL; 114 + } 115 + /* 116 + * We need to check if we have the requested rate for gck because in 117 + * some cases this rate could be not supported. If it happens, the rate 118 + * is the closest one gck can provide. We have to update the value 119 + * of clk mul. 120 + */ 121 + real_gck_rate = clk_get_rate(priv->gck); 122 + if (real_gck_rate != gck_rate) { 123 + clk_mul = real_gck_rate / (clk_base * 1000000) - 1; 124 + caps1 &= (~SDHCI_CLOCK_MUL_MASK); 125 + caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK); 126 + /* Set capabilities in r/w mode. */ 127 + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); 128 + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); 129 + /* Set capabilities in ro mode. */ 130 + writel(0, host->ioaddr + SDMMC_CACR); 131 + dev_info(&pdev->dev, "update clk mul to %u as gck rate is %u Hz\n", 132 + clk_mul, real_gck_rate); 133 + } 134 + 135 + clk_prepare_enable(priv->mainck); 136 + clk_prepare_enable(priv->gck); 137 + 138 + pltfm_host = sdhci_priv(host); 139 + pltfm_host->priv = priv; 140 + 141 + ret = mmc_of_parse(host->mmc); 142 + if (ret) 143 + goto clocks_disable_unprepare; 144 + 145 + sdhci_get_of_property(pdev); 146 + 147 + ret = sdhci_add_host(host); 148 + if (ret) 149 + goto clocks_disable_unprepare; 150 + 151 + return 0; 152 + 153 + clocks_disable_unprepare: 154 + clk_disable_unprepare(priv->gck); 155 + clk_disable_unprepare(priv->mainck); 156 + hclock_disable_unprepare: 157 + clk_disable_unprepare(priv->hclock); 158 + sdhci_pltfm_free(pdev); 159 + return ret; 160 + } 161 + 162 + static int sdhci_at91_remove(struct platform_device *pdev) 163 + { 164 + struct sdhci_host *host = platform_get_drvdata(pdev); 165 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 166 + struct sdhci_at91_priv *priv = pltfm_host->priv; 167 + 168 + sdhci_pltfm_unregister(pdev); 169 + 170 + clk_disable_unprepare(priv->gck); 171 + clk_disable_unprepare(priv->hclock); 172 + clk_disable_unprepare(priv->mainck); 173 + 174 + return 0; 175 + } 176 + 177 + static struct platform_driver sdhci_at91_driver = { 178 + .driver = { 179 + .name = "sdhci-at91", 180 + .of_match_table = sdhci_at91_dt_match, 181 + .pm = SDHCI_PLTFM_PMOPS, 182 + }, 183 + .probe = sdhci_at91_probe, 184 + .remove = sdhci_at91_remove, 185 + }; 186 + 187 + module_platform_driver(sdhci_at91_driver); 188 + 189 + MODULE_DESCRIPTION("SDHCI driver for at91"); 190 + MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>"); 191 + MODULE_LICENSE("GPL v2");
+6
drivers/mmc/host/sdhci-of-esdhc.c
··· 208 208 if (clock == 0) 209 209 return; 210 210 211 + /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ 212 + temp = esdhc_readw(host, SDHCI_HOST_VERSION); 213 + temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; 214 + if (temp < VENDOR_V_23) 215 + pre_div = 2; 216 + 211 217 /* Workaround to reduce the clock frequency for p1010 esdhc */ 212 218 if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { 213 219 if (clock > 20000000)
+1
drivers/mmc/host/sdhci-pci.c
··· 618 618 static const struct sdhci_pci_fixes sdhci_o2 = { 619 619 .probe = sdhci_pci_o2_probe, 620 620 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 621 + .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, 621 622 .probe_slot = sdhci_pci_o2_probe_slot, 622 623 .resume = sdhci_pci_o2_resume, 623 624 };
+2 -2
drivers/mmc/host/sdhci-sirf.c
··· 161 161 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 162 162 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 163 163 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | 164 - SDHCI_QUIRK_INVERTED_WRITE_PROTECT | 165 - SDHCI_QUIRK_DELAY_AFTER_POWER, 164 + SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS, 165 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 166 166 }; 167 167 168 168 static int sdhci_sirf_probe(struct platform_device *pdev)
+64 -59
drivers/mmc/host/sdhci.c
··· 54 54 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 55 55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 56 56 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 57 - struct mmc_data *data, 58 - struct sdhci_host_next *next); 57 + struct mmc_data *data); 59 58 static int sdhci_do_get_cd(struct sdhci_host *host); 60 59 61 60 #ifdef CONFIG_PM ··· 206 207 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 207 208 { 208 209 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 209 - if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & 210 - SDHCI_CARD_PRESENT)) 210 + if (!sdhci_do_get_cd(host)) 211 211 return; 212 212 } 213 213 ··· 494 496 goto fail; 495 497 BUG_ON(host->align_addr & host->align_mask); 496 498 497 - host->sg_count = sdhci_pre_dma_transfer(host, data, NULL); 499 + host->sg_count = sdhci_pre_dma_transfer(host, data); 498 500 if (host->sg_count < 0) 499 501 goto unmap_align; 500 502 ··· 633 635 } 634 636 } 635 637 636 - if (!data->host_cookie) 638 + if (data->host_cookie == COOKIE_MAPPED) { 637 639 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 638 640 data->sg_len, direction); 641 + data->host_cookie = COOKIE_UNMAPPED; 642 + } 639 643 } 640 644 641 645 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) ··· 833 833 } else { 834 834 int sg_cnt; 835 835 836 - sg_cnt = sdhci_pre_dma_transfer(host, data, NULL); 836 + sg_cnt = sdhci_pre_dma_transfer(host, data); 837 837 if (sg_cnt <= 0) { 838 838 /* 839 839 * This only happens when someone fed ··· 949 949 if (host->flags & SDHCI_USE_ADMA) 950 950 sdhci_adma_table_post(host, data); 951 951 else { 952 - if (!data->host_cookie) 952 + if (data->host_cookie == COOKIE_MAPPED) { 953 953 dma_unmap_sg(mmc_dev(host->mmc), 954 954 data->sg, data->sg_len, 955 955 (data->flags & MMC_DATA_READ) ? 956 956 DMA_FROM_DEVICE : DMA_TO_DEVICE); 957 + data->host_cookie = COOKIE_UNMAPPED; 958 + } 957 959 } 958 960 } 959 961 ··· 1134 1132 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1135 1133 break; 1136 1134 case MMC_TIMING_UHS_DDR50: 1135 + case MMC_TIMING_MMC_DDR52: 1137 1136 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1138 1137 break; 1139 1138 case MMC_TIMING_MMC_HS400: ··· 1155 1152 int real_div = div, clk_mul = 1; 1156 1153 u16 clk = 0; 1157 1154 unsigned long timeout; 1155 + bool switch_base_clk = false; 1158 1156 1159 1157 host->mmc->actual_clock = 0; 1160 1158 ··· 1193 1189 <= clock) 1194 1190 break; 1195 1191 } 1196 - /* 1197 - * Set Programmable Clock Mode in the Clock 1198 - * Control register. 1199 - */ 1200 - clk = SDHCI_PROG_CLOCK_MODE; 1201 - real_div = div; 1202 - clk_mul = host->clk_mul; 1203 - div--; 1204 - } else { 1192 + if ((host->max_clk * host->clk_mul / div) <= clock) { 1193 + /* 1194 + * Set Programmable Clock Mode in the Clock 1195 + * Control register. 1196 + */ 1197 + clk = SDHCI_PROG_CLOCK_MODE; 1198 + real_div = div; 1199 + clk_mul = host->clk_mul; 1200 + div--; 1201 + } else { 1202 + /* 1203 + * Divisor can be too small to reach clock 1204 + * speed requirement. Then use the base clock. 1205 + */ 1206 + switch_base_clk = true; 1207 + } 1208 + } 1209 + 1210 + if (!host->clk_mul || switch_base_clk) { 1205 1211 /* Version 3.00 divisors must be a multiple of 2. */ 1206 1212 if (host->max_clk <= clock) 1207 1213 div = 1; ··· 1224 1210 } 1225 1211 real_div = div; 1226 1212 div >>= 1; 1213 + if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1214 + && !div && host->max_clk <= 25000000) 1215 + div = 1; 1227 1216 } 1228 1217 } else { 1229 1218 /* Version 2.00 divisors must be a power of 2. */ ··· 1576 1559 (ios->timing == MMC_TIMING_UHS_SDR25) || 1577 1560 (ios->timing == MMC_TIMING_UHS_SDR50) || 1578 1561 (ios->timing == MMC_TIMING_UHS_SDR104) || 1579 - (ios->timing == MMC_TIMING_UHS_DDR50))) { 1562 + (ios->timing == MMC_TIMING_UHS_DDR50) || 1563 + (ios->timing == MMC_TIMING_MMC_DDR52))) { 1580 1564 u16 preset; 1581 1565 1582 1566 sdhci_enable_preset_value(host, true); ··· 1619 1601 if (host->flags & SDHCI_DEVICE_DEAD) 1620 1602 return 0; 1621 1603 1622 - /* If polling/nonremovable, assume that the card is always present. */ 1623 - if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 1624 - (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 1604 + /* If nonremovable, assume that the card is always present. */ 1605 + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) 1625 1606 return 1; 1626 1607 1627 - /* Try slot gpio detect */ 1608 + /* 1609 + * Try slot gpio detect, if defined it take precedence 1610 + * over build in controller functionality 1611 + */ 1628 1612 if (!IS_ERR_VALUE(gpio_cd)) 1629 1613 return !!gpio_cd; 1614 + 1615 + /* If polling, assume that the card is always present. */ 1616 + if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1617 + return 1; 1630 1618 1631 1619 /* Host native card detect */ 1632 1620 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); ··· 2121 2097 struct mmc_data *data = mrq->data; 2122 2098 2123 2099 if (host->flags & SDHCI_REQ_USE_DMA) { 2124 - if (data->host_cookie) 2100 + if (data->host_cookie == COOKIE_GIVEN || 2101 + data->host_cookie == COOKIE_MAPPED) 2125 2102 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2126 2103 data->flags & MMC_DATA_WRITE ? 2127 2104 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2128 - mrq->data->host_cookie = 0; 2105 + data->host_cookie = COOKIE_UNMAPPED; 2129 2106 } 2130 2107 } 2131 2108 2132 2109 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 2133 - struct mmc_data *data, 2134 - struct sdhci_host_next *next) 2110 + struct mmc_data *data) 2135 2111 { 2136 2112 int sg_count; 2137 2113 2138 - if (!next && data->host_cookie && 2139 - data->host_cookie != host->next_data.cookie) { 2140 - pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n", 2141 - __func__, data->host_cookie, host->next_data.cookie); 2142 - data->host_cookie = 0; 2114 + if (data->host_cookie == COOKIE_MAPPED) { 2115 + data->host_cookie = COOKIE_GIVEN; 2116 + return data->sg_count; 2143 2117 } 2144 2118 2145 - /* Check if next job is already prepared */ 2146 - if (next || 2147 - (!next && data->host_cookie != host->next_data.cookie)) { 2148 - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, 2149 - data->sg_len, 2150 - data->flags & MMC_DATA_WRITE ? 2151 - DMA_TO_DEVICE : DMA_FROM_DEVICE); 2119 + WARN_ON(data->host_cookie == COOKIE_GIVEN); 2152 2120 2153 - } else { 2154 - sg_count = host->next_data.sg_count; 2155 - host->next_data.sg_count = 0; 2156 - } 2157 - 2121 + sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2122 + data->flags & MMC_DATA_WRITE ? 2123 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 2158 2124 2159 2125 if (sg_count == 0) 2160 - return -EINVAL; 2126 + return -ENOSPC; 2161 2127 2162 - if (next) { 2163 - next->sg_count = sg_count; 2164 - data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; 2165 - } else 2166 - host->sg_count = sg_count; 2128 + data->sg_count = sg_count; 2129 + data->host_cookie = COOKIE_MAPPED; 2167 2130 2168 2131 return sg_count; 2169 2132 } ··· 2160 2149 { 2161 2150 struct sdhci_host *host = mmc_priv(mmc); 2162 2151 2163 - if (mrq->data->host_cookie) { 2164 - mrq->data->host_cookie = 0; 2165 - return; 2166 - } 2152 + mrq->data->host_cookie = COOKIE_UNMAPPED; 2167 2153 2168 2154 if (host->flags & SDHCI_REQ_USE_DMA) 2169 - if (sdhci_pre_dma_transfer(host, 2170 - mrq->data, 2171 - &host->next_data) < 0) 2172 - mrq->data->host_cookie = 0; 2155 + sdhci_pre_dma_transfer(host, mrq->data); 2173 2156 } 2174 2157 2175 2158 static void sdhci_card_event(struct mmc_host *mmc) ··· 3035 3030 host->max_clk = host->ops->get_max_clock(host); 3036 3031 } 3037 3032 3038 - host->next_data.cookie = 1; 3039 3033 /* 3040 3034 * In case of Host Controller v3.00, find out whether clock 3041 3035 * multiplier is supported. ··· 3130 3126 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3131 3127 3132 3128 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3133 - !(mmc->caps & MMC_CAP_NONREMOVABLE)) 3129 + !(mmc->caps & MMC_CAP_NONREMOVABLE) && 3130 + IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) 3134 3131 mmc->caps |= MMC_CAP_NEEDS_POLL; 3135 3132 3136 3133 /* If there are external regulators, get them */
+6 -4
drivers/mmc/host/sdhci.h
··· 309 309 */ 310 310 #define SDHCI_MAX_SEGS 128 311 311 312 - struct sdhci_host_next { 313 - unsigned int sg_count; 314 - s32 cookie; 312 + enum sdhci_cookie { 313 + COOKIE_UNMAPPED, 314 + COOKIE_MAPPED, 315 + COOKIE_GIVEN, 315 316 }; 316 317 317 318 struct sdhci_host { ··· 410 409 #define SDHCI_QUIRK2_SUPPORT_SINGLE (1<<13) 411 410 /* Controller broken with using ACMD23 */ 412 411 #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) 412 + /* Broken Clock divider zero in controller */ 413 + #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) 413 414 414 415 int irq; /* Device IRQ */ 415 416 void __iomem *ioaddr; /* Mapped address */ ··· 506 503 unsigned int tuning_mode; /* Re-tuning mode supported by host */ 507 504 #define SDHCI_TUNING_MODE_1 0 508 505 509 - struct sdhci_host_next next_data; 510 506 unsigned long private[0] ____cacheline_aligned; 511 507 }; 512 508
+2
drivers/mmc/host/sh_mmcif.c
··· 1632 1632 { 1633 1633 struct sh_mmcif_host *host = dev_get_drvdata(dev); 1634 1634 1635 + pm_runtime_get_sync(dev); 1635 1636 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1637 + pm_runtime_put(dev); 1636 1638 1637 1639 return 0; 1638 1640 }
+1 -1
drivers/mmc/host/sunxi-mmc.c
··· 595 595 596 596 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en) 597 597 { 598 - unsigned long expire = jiffies + msecs_to_jiffies(250); 598 + unsigned long expire = jiffies + msecs_to_jiffies(750); 599 599 u32 rval; 600 600 601 601 rval = mmc_readl(host, REG_CLKCR);
+4 -2
drivers/mmc/host/tmio_mmc_pio.c
··· 83 83 return --host->sg_len; 84 84 } 85 85 86 + #define CMDREQ_TIMEOUT 5000 87 + 86 88 #ifdef CONFIG_MMC_DEBUG 87 89 88 90 #define STATUS_TO_TEXT(a, status, i) \ ··· 232 230 */ 233 231 if (IS_ERR_OR_NULL(mrq) 234 232 || time_is_after_jiffies(host->last_req_ts + 235 - msecs_to_jiffies(2000))) { 233 + msecs_to_jiffies(CMDREQ_TIMEOUT))) { 236 234 spin_unlock_irqrestore(&host->lock, flags); 237 235 return; 238 236 } ··· 820 818 ret = tmio_mmc_start_command(host, mrq->cmd); 821 819 if (!ret) { 822 820 schedule_delayed_work(&host->delayed_reset_work, 823 - msecs_to_jiffies(2000)); 821 + msecs_to_jiffies(CMDREQ_TIMEOUT)); 824 822 return; 825 823 } 826 824
+10 -5
drivers/mmc/host/usdhi6rol0.c
··· 1611 1611 return IRQ_NONE; 1612 1612 1613 1613 /* Ack */ 1614 - usdhi6_write(host, USDHI6_SD_INFO1, !status); 1614 + usdhi6_write(host, USDHI6_SD_INFO1, ~status); 1615 1615 1616 1616 if (!work_pending(&mmc->detect.work) && 1617 1617 (((status & USDHI6_SD_INFO1_CARD_INSERT) && ··· 1634 1634 struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); 1635 1635 struct mmc_request *mrq = host->mrq; 1636 1636 struct mmc_data *data = mrq ? mrq->data : NULL; 1637 + struct scatterlist *sg = host->sg ?: data->sg; 1637 1638 1638 1639 dev_warn(mmc_dev(host->mmc), 1639 1640 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", ··· 1670 1669 "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", 1671 1670 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, 1672 1671 host->offset, data->blocks, data->blksz, data->sg_len, 1673 - sg_dma_len(host->sg), host->sg->offset); 1672 + sg_dma_len(sg), sg->offset); 1674 1673 usdhi6_sg_unmap(host, true); 1675 1674 /* 1676 1675 * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped ··· 1716 1715 if (!mmc) 1717 1716 return -ENOMEM; 1718 1717 1718 + ret = mmc_regulator_get_supply(mmc); 1719 + if (ret == -EPROBE_DEFER) 1720 + goto e_free_mmc; 1721 + 1719 1722 ret = mmc_of_parse(mmc); 1720 1723 if (ret < 0) 1721 1724 goto e_free_mmc; 1722 - 1723 - mmc_regulator_get_supply(mmc); 1724 1725 1725 1726 host = mmc_priv(mmc); 1726 1727 host->mmc = mmc; ··· 1737 1734 } 1738 1735 1739 1736 host->clk = devm_clk_get(dev, NULL); 1740 - if (IS_ERR(host->clk)) 1737 + if (IS_ERR(host->clk)) { 1738 + ret = PTR_ERR(host->clk); 1741 1739 goto e_free_mmc; 1740 + } 1742 1741 1743 1742 host->imclk = clk_get_rate(host->clk); 1744 1743
+3
include/linux/mmc/card.h
··· 279 279 #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ 280 280 #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ 281 281 #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ 282 + #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ 283 + 282 284 283 285 unsigned int erase_size; /* erase size in sectors */ 284 286 unsigned int erase_shift; /* if erase unit is power 2 */ 285 287 unsigned int pref_erase; /* in sectors */ 288 + unsigned int eg_boundary; /* don't cross erase-group boundaries */ 286 289 u8 erased_byte; /* value of erased bytes */ 287 290 288 291 u32 raw_cid[4]; /* raw card CID */
+4 -5
include/linux/mmc/dw_mmc.h
··· 98 98 * @irq_flags: The flags to be passed to request_irq. 99 99 * @irq: The irq value to be passed to request_irq. 100 100 * @sdio_id0: Number of slot0 in the SDIO interrupt registers. 101 + * @dto_timer: Timer for broken data transfer over scheme. 101 102 * 102 103 * Locking 103 104 * ======= ··· 154 153 dma_addr_t sg_dma; 155 154 void *sg_cpu; 156 155 const struct dw_mci_dma_ops *dma_ops; 157 - #ifdef CONFIG_MMC_DW_IDMAC 158 156 unsigned int ring_size; 159 - #else 160 - struct dw_mci_dma_data *dma_data; 161 - #endif 162 157 u32 cmd_status; 163 158 u32 data_status; 164 159 u32 stop_cmdr; ··· 201 204 int sdio_id0; 202 205 203 206 struct timer_list cmd11_timer; 207 + struct timer_list dto_timer; 204 208 }; 205 209 206 210 /* DMA ops for Internal/External DMAC interface */ ··· 224 226 #define DW_MCI_QUIRK_HIGHSPEED BIT(2) 225 227 /* Unreliable card detection */ 226 228 #define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) 229 + /* Timer for broken data transfer over scheme */ 230 + #define DW_MCI_QUIRK_BROKEN_DTO BIT(4) 227 231 228 232 struct dma_pdata; 229 233 ··· 259 259 260 260 struct dw_mci_dma_ops *dma_ops; 261 261 struct dma_pdata *data; 262 - struct block_settings *blk_settings; 263 262 }; 264 263 265 264 #endif /* LINUX_MMC_DW_MMC_H */
+2 -1
include/linux/mmc/host.h
··· 412 412 { 413 413 host->ops->enable_sdio_irq(host, 0); 414 414 host->sdio_irq_pending = true; 415 - wake_up_process(host->sdio_irq_thread); 415 + if (host->sdio_irq_thread) 416 + wake_up_process(host->sdio_irq_thread); 416 417 } 417 418 418 419 void sdio_run_irqs(struct mmc_host *host);
+1
include/linux/platform_data/mmc-esdhc-imx.h
··· 45 45 int max_bus_width; 46 46 bool support_vsel; 47 47 unsigned int delay_line; 48 + unsigned int tuning_step; /* The delay cell steps in tuning procedure */ 48 49 }; 49 50 #endif /* __ASM_ARCH_IMX_ESDHC_H */