Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mmc-merge-for-3.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

Pull MMC updates from Chris Ball:

Core:
* Support for MMC 4.5 Data Tag feature -- we tag REQ_META, so devices
that support Data Tag will provide increased throughput for metadata.
* Faster detection of card removal on I/O errors.

Drivers:
* dw_mmc now supports eMMC Power Off Notify, has PCI support, and
implements pre_req and post_req for asynchronous requests.
* omap_hsmmc now supports device tree.
* esdhc now has power management support.
* sdhci-tegra now supports Tegra30 devices.
* sdhci-spear now supports hibernation.
* tmio_mmc now supports using a GPIO for card detection.
* Intel PCH now supports 8-bit bus transfers.

* tag 'mmc-merge-for-3.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (53 commits)
mmc: sh_mmcif: simplify bitmask macros
mmc: sh_mobile_sdhi: support modular mmc-core with non-standard hotplug
mmc: sh_mobile_sdhi: add a callback for board specific init code
mmc: tmio: cosmetic: prettify the tmio_mmc_set_ios() function
mmc: sh_mobile_sdhi: do not manage PM clocks manually
mmc: tmio_mmc: remove unused sdio_irq_enabled flag
mmc: tmio_mmc: power status flag doesn't have to be exposed in platform data
mmc: sh_mobile_sdhi: pass card hotplug GPIO number to TMIO MMC
mmc: tmio_mmc: support the generic MMC GPIO card hotplug helper
mmc: tmio: calculate the native hotplug condition only once
mmc: simplify mmc_cd_gpio_request() by removing two parameters
mmc: sdhci-pci: allow 8-bit bus width for Intel PCH
mmc: sdhci: check interrupt flags in ISR again
mmc: sdhci-pci: Add MSI support
mmc: core: warn when card doesn't support HPI
mmc: davinci: Poll status for small size transfers
mmc: davinci: Eliminate spurious interrupts
mmc: omap_hsmmc: Avoid a regulator voltage change with dt
mmc: omap_hsmmc: Convert hsmmc driver to use device tree
mmc: sdhci-pci: add SDHCI_QUIRK2_HOST_OFF_CARD_ON for Medfield SDIO
...

+1162 -723
+33
Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
··· 1 + * TI Highspeed MMC host controller for OMAP 2 + 3 + The Highspeed MMC Host Controller on TI OMAP family 4 + provides an interface for MMC, SD, and SDIO types of memory cards. 5 + 6 + Required properties: 7 + - compatible: 8 + Should be "ti,omap2-hsmmc", for OMAP2 controllers 9 + Should be "ti,omap3-hsmmc", for OMAP3 controllers 10 + Should be "ti,omap4-hsmmc", for OMAP4 controllers 11 + - ti,hwmods: Must be "mmc<n>", n is controller instance starting 1 12 + - reg : should contain hsmmc registers location and length 13 + 14 + Optional properties: 15 + ti,dual-volt: boolean, supports dual voltage cards 16 + <supply-name>-supply: phandle to the regulator device tree node 17 + "supply-name" examples are "vmmc", "vmmc_aux" etc 18 + ti,bus-width: Number of data lines, default assumed is 1 if the property is missing. 19 + cd-gpios: GPIOs for card detection 20 + wp-gpios: GPIOs for write protection 21 + ti,non-removable: non-removable slot (like eMMC) 22 + ti,needs-special-reset: Requires a special softreset sequence 23 + 24 + Example: 25 + mmc1: mmc@0x4809c000 { 26 + compatible = "ti,omap4-hsmmc"; 27 + reg = <0x4809c000 0x400>; 28 + ti,hwmods = "mmc1"; 29 + ti,dual-volt; 30 + ti,bus-width = <4>; 31 + vmmc-supply = <&vmmc>; /* phandle to regulator node */ 32 + ti,non-removable; 33 + };
+2 -3
arch/arm/mach-exynos/mach-nuri.c
··· 111 111 .max_width = 8, 112 112 .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | 113 113 MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 114 - MMC_CAP_DISABLE | MMC_CAP_ERASE), 114 + MMC_CAP_ERASE), 115 115 .cd_type = S3C_SDHCI_CD_PERMANENT, 116 116 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 117 117 }; ··· 150 150 static struct s3c_sdhci_platdata nuri_hsmmc2_data __initdata = { 151 151 .max_width = 4, 152 152 .host_caps = MMC_CAP_4_BIT_DATA | 153 - MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 154 - MMC_CAP_DISABLE, 153 + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, 155 154 .ext_cd_gpio = EXYNOS4_GPX3(3), /* XEINT_27 */ 156 155 .ext_cd_gpio_invert = 1, 157 156 .cd_type = S3C_SDHCI_CD_GPIO,
+3 -6
arch/arm/mach-exynos/mach-universal_c210.c
··· 745 745 static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = { 746 746 .max_width = 8, 747 747 .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | 748 - MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 749 - MMC_CAP_DISABLE), 748 + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED), 750 749 .cd_type = S3C_SDHCI_CD_PERMANENT, 751 750 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 752 751 }; ··· 783 784 static struct s3c_sdhci_platdata universal_hsmmc2_data __initdata = { 784 785 .max_width = 4, 785 786 .host_caps = MMC_CAP_4_BIT_DATA | 786 - MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 787 - MMC_CAP_DISABLE, 787 + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, 788 788 .ext_cd_gpio = EXYNOS4_GPX3(4), /* XEINT_28 */ 789 789 .ext_cd_gpio_invert = 1, 790 790 .cd_type = S3C_SDHCI_CD_GPIO, ··· 794 796 static struct s3c_sdhci_platdata universal_hsmmc3_data __initdata = { 795 797 .max_width = 4, 796 798 .host_caps = MMC_CAP_4_BIT_DATA | 797 - MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 798 - MMC_CAP_DISABLE, 799 + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, 799 800 .cd_type = S3C_SDHCI_CD_EXTERNAL, 800 801 }; 801 802
+1
arch/arm/mach-omap2/hsmmc.c
··· 316 316 mmc->slots[0].pm_caps = c->pm_caps; 317 317 mmc->slots[0].internal_clock = !c->ext_clock; 318 318 mmc->dma_mask = 0xffffffff; 319 + mmc->max_freq = c->max_freq; 319 320 if (cpu_is_omap44xx()) 320 321 mmc->reg_offset = OMAP4_MMC_REG_OFFSET; 321 322 else
+2
arch/arm/mach-omap2/hsmmc.h
··· 27 27 char *name; /* or NULL for default */ 28 28 struct platform_device *pdev; /* mmc controller instance */ 29 29 int ocr_mask; /* temporary HACK */ 30 + int max_freq; /* maximum clock, if constrained by external 31 + * circuitry, or 0 for default */ 30 32 /* Remux (pad configuration) when powering on/off */ 31 33 void (*remux)(struct device *dev, int slot, int power_on); 32 34 /* init some special card */
-2
arch/arm/plat-omap/include/plat/mmc.h
··· 137 137 int (*set_power)(struct device *dev, int slot, 138 138 int power_on, int vdd); 139 139 int (*get_ro)(struct device *dev, int slot); 140 - int (*set_sleep)(struct device *dev, int slot, int sleep, 141 - int vdd, int cardsleep); 142 140 void (*remux)(struct device *dev, int slot, int power_on); 143 141 /* Call back before enabling / disabling regulators */ 144 142 void (*before_set_reg)(struct device *dev, int slot,
+16 -5
drivers/mmc/card/block.c
··· 1079 1079 struct mmc_blk_request *brq = &mqrq->brq; 1080 1080 struct request *req = mqrq->req; 1081 1081 struct mmc_blk_data *md = mq->data; 1082 + bool do_data_tag; 1082 1083 1083 1084 /* 1084 1085 * Reliable writes are used to implement Forced Unit Access and ··· 1156 1155 mmc_apply_rel_rw(brq, card, req); 1157 1156 1158 1157 /* 1158 + * Data tag is used only during writing meta data to speed 1159 + * up write and any subsequent read of this meta data 1160 + */ 1161 + do_data_tag = (card->ext_csd.data_tag_unit_size) && 1162 + (req->cmd_flags & REQ_META) && 1163 + (rq_data_dir(req) == WRITE) && 1164 + ((brq->data.blocks * brq->data.blksz) >= 1165 + card->ext_csd.data_tag_unit_size); 1166 + 1167 + /* 1159 1168 * Pre-defined multi-block transfers are preferable to 1160 1169 * open ended-ones (and necessary for reliable writes). 1161 1170 * However, it is not sufficient to just send CMD23, ··· 1183 1172 * We'll avoid using CMD23-bounded multiblock writes for 1184 1173 * these, while retaining features like reliable writes. 1185 1174 */ 1186 - 1187 - if ((md->flags & MMC_BLK_CMD23) && 1188 - mmc_op_multi(brq->cmd.opcode) && 1189 - (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { 1175 + if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1176 + (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || 1177 + do_data_tag)) { 1190 1178 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1191 1179 brq->sbc.arg = brq->data.blocks | 1192 - (do_rel_wr ? (1 << 31) : 0); 1180 + (do_rel_wr ? (1 << 31) : 0) | 1181 + (do_data_tag ? (1 << 29) : 0); 1193 1182 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1194 1183 brq->mrq.sbc = &brq->sbc; 1195 1184 }
+9 -4
drivers/mmc/core/cd-gpio.c
··· 28 28 return IRQ_HANDLED; 29 29 } 30 30 31 - int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio, 32 - unsigned int irq, unsigned long flags) 31 + int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio) 33 32 { 34 33 size_t len = strlen(dev_name(host->parent)) + 4; 35 - struct mmc_cd_gpio *cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL); 34 + struct mmc_cd_gpio *cd; 35 + int irq = gpio_to_irq(gpio); 36 36 int ret; 37 37 38 + if (irq < 0) 39 + return irq; 40 + 41 + cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL); 38 42 if (!cd) 39 43 return -ENOMEM; 40 44 ··· 49 45 goto egpioreq; 50 46 51 47 ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt, 52 - flags, cd->label, host); 48 + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 49 + cd->label, host); 53 50 if (ret < 0) 54 51 goto eirqreq; 55 52
+57 -195
drivers/mmc/core/core.c
··· 188 188 struct scatterlist *sg; 189 189 #endif 190 190 191 + if (mrq->sbc) { 192 + pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 193 + mmc_hostname(host), mrq->sbc->opcode, 194 + mrq->sbc->arg, mrq->sbc->flags); 195 + } 196 + 191 197 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 192 198 mmc_hostname(host), mrq->cmd->opcode, 193 199 mrq->cmd->arg, mrq->cmd->flags); ··· 249 243 complete(&mrq->completion); 250 244 } 251 245 252 - static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 246 + static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 253 247 { 254 248 init_completion(&mrq->completion); 255 249 mrq->done = mmc_wait_done; 256 250 if (mmc_card_removed(host->card)) { 257 251 mrq->cmd->error = -ENOMEDIUM; 258 252 complete(&mrq->completion); 259 - return; 253 + return -ENOMEDIUM; 260 254 } 261 255 mmc_start_request(host, mrq); 256 + return 0; 262 257 } 263 258 264 259 static void mmc_wait_for_req_done(struct mmc_host *host, ··· 343 336 struct mmc_async_req *areq, int *error) 344 337 { 345 338 int err = 0; 339 + int start_err = 0; 346 340 struct mmc_async_req *data = host->areq; 347 341 348 342 /* Prepare a new request */ ··· 353 345 if (host->areq) { 354 346 mmc_wait_for_req_done(host, host->areq->mrq); 355 347 err = host->areq->err_check(host->card, host->areq); 356 - if (err) { 357 - /* post process the completed failed request */ 358 - mmc_post_req(host, host->areq->mrq, 0); 359 - if (areq) 360 - /* 361 - * Cancel the new prepared request, because 362 - * it can't run until the failed 363 - * request has been properly handled. 364 - */ 365 - mmc_post_req(host, areq->mrq, -EINVAL); 366 - 367 - host->areq = NULL; 368 - goto out; 369 - } 370 348 } 371 349 372 - if (areq) 373 - __mmc_start_req(host, areq->mrq); 350 + if (!err && areq) 351 + start_err = __mmc_start_req(host, areq->mrq); 374 352 375 353 if (host->areq) 376 354 mmc_post_req(host, host->areq->mrq, 0); 377 355 378 - host->areq = areq; 379 - out: 356 + /* Cancel a prepared request if it was not started. */ 357 + if ((err || start_err) && areq) 358 + mmc_post_req(host, areq->mrq, -EINVAL); 359 + 360 + if (err) 361 + host->areq = NULL; 362 + else 363 + host->areq = areq; 364 + 380 365 if (error) 381 366 *error = err; 382 367 return data; ··· 600 599 EXPORT_SYMBOL(mmc_align_data_size); 601 600 602 601 /** 603 - * mmc_host_enable - enable a host. 604 - * @host: mmc host to enable 605 - * 606 - * Hosts that support power saving can use the 'enable' and 'disable' 607 - * methods to exit and enter power saving states. For more information 608 - * see comments for struct mmc_host_ops. 609 - */ 610 - int mmc_host_enable(struct mmc_host *host) 611 - { 612 - if (!(host->caps & MMC_CAP_DISABLE)) 613 - return 0; 614 - 615 - if (host->en_dis_recurs) 616 - return 0; 617 - 618 - if (host->nesting_cnt++) 619 - return 0; 620 - 621 - cancel_delayed_work_sync(&host->disable); 622 - 623 - if (host->enabled) 624 - return 0; 625 - 626 - if (host->ops->enable) { 627 - int err; 628 - 629 - host->en_dis_recurs = 1; 630 - mmc_host_clk_hold(host); 631 - err = host->ops->enable(host); 632 - mmc_host_clk_release(host); 633 - host->en_dis_recurs = 0; 634 - 635 - if (err) { 636 - pr_debug("%s: enable error %d\n", 637 - mmc_hostname(host), err); 638 - return err; 639 - } 640 - } 641 - host->enabled = 1; 642 - return 0; 643 - } 644 - EXPORT_SYMBOL(mmc_host_enable); 645 - 646 - static int mmc_host_do_disable(struct mmc_host *host, int lazy) 647 - { 648 - if (host->ops->disable) { 649 - int err; 650 - 651 - host->en_dis_recurs = 1; 652 - mmc_host_clk_hold(host); 653 - err = host->ops->disable(host, lazy); 654 - mmc_host_clk_release(host); 655 - host->en_dis_recurs = 0; 656 - 657 - if (err < 0) { 658 - pr_debug("%s: disable error %d\n", 659 - mmc_hostname(host), err); 660 - return err; 661 - } 662 - if (err > 0) { 663 - unsigned long delay = msecs_to_jiffies(err); 664 - 665 - mmc_schedule_delayed_work(&host->disable, delay); 666 - } 667 - } 668 - host->enabled = 0; 669 - return 0; 670 - } 671 - 672 - /** 673 - * mmc_host_disable - disable a host. 674 - * @host: mmc host to disable 675 - * 676 - * Hosts that support power saving can use the 'enable' and 'disable' 677 - * methods to exit and enter power saving states. For more information 678 - * see comments for struct mmc_host_ops. 679 - */ 680 - int mmc_host_disable(struct mmc_host *host) 681 - { 682 - int err; 683 - 684 - if (!(host->caps & MMC_CAP_DISABLE)) 685 - return 0; 686 - 687 - if (host->en_dis_recurs) 688 - return 0; 689 - 690 - if (--host->nesting_cnt) 691 - return 0; 692 - 693 - if (!host->enabled) 694 - return 0; 695 - 696 - err = mmc_host_do_disable(host, 0); 697 - return err; 698 - } 699 - EXPORT_SYMBOL(mmc_host_disable); 700 - 701 - /** 702 602 * __mmc_claim_host - exclusively claim a host 703 603 * @host: mmc host to claim 704 604 * @abort: whether or not the operation should be aborted ··· 637 735 wake_up(&host->wq); 638 736 spin_unlock_irqrestore(&host->lock, flags); 639 737 remove_wait_queue(&host->wq, &wait); 640 - if (!stop) 641 - mmc_host_enable(host); 738 + if (host->ops->enable && !stop && host->claim_cnt == 1) 739 + host->ops->enable(host); 642 740 return stop; 643 741 } 644 742 ··· 663 761 claimed_host = 1; 664 762 } 665 763 spin_unlock_irqrestore(&host->lock, flags); 764 + if (host->ops->enable && claimed_host && host->claim_cnt == 1) 765 + host->ops->enable(host); 666 766 return claimed_host; 667 767 } 668 768 EXPORT_SYMBOL(mmc_try_claim_host); 669 769 670 770 /** 671 - * mmc_do_release_host - release a claimed host 771 + * mmc_release_host - release a host 672 772 * @host: mmc host to release 673 773 * 674 - * If you successfully claimed a host, this function will 675 - * release it again. 774 + * Release a MMC host, allowing others to claim the host 775 + * for their operations. 676 776 */ 677 - void mmc_do_release_host(struct mmc_host *host) 777 + void mmc_release_host(struct mmc_host *host) 678 778 { 679 779 unsigned long flags; 780 + 781 + WARN_ON(!host->claimed); 782 + 783 + if (host->ops->disable && host->claim_cnt == 1) 784 + host->ops->disable(host); 680 785 681 786 spin_lock_irqsave(&host->lock, flags); 682 787 if (--host->claim_cnt) { ··· 696 787 wake_up(&host->wq); 697 788 } 698 789 } 699 - EXPORT_SYMBOL(mmc_do_release_host); 700 - 701 - void mmc_host_deeper_disable(struct work_struct *work) 702 - { 703 - struct mmc_host *host = 704 - container_of(work, struct mmc_host, disable.work); 705 - 706 - /* If the host is claimed then we do not want to disable it anymore */ 707 - if (!mmc_try_claim_host(host)) 708 - return; 709 - mmc_host_do_disable(host, 1); 710 - mmc_do_release_host(host); 711 - } 712 - 713 - /** 714 - * mmc_host_lazy_disable - lazily disable a host. 715 - * @host: mmc host to disable 716 - * 717 - * Hosts that support power saving can use the 'enable' and 'disable' 718 - * methods to exit and enter power saving states. For more information 719 - * see comments for struct mmc_host_ops. 720 - */ 721 - int mmc_host_lazy_disable(struct mmc_host *host) 722 - { 723 - if (!(host->caps & MMC_CAP_DISABLE)) 724 - return 0; 725 - 726 - if (host->en_dis_recurs) 727 - return 0; 728 - 729 - if (--host->nesting_cnt) 730 - return 0; 731 - 732 - if (!host->enabled) 733 - return 0; 734 - 735 - if (host->disable_delay) { 736 - mmc_schedule_delayed_work(&host->disable, 737 - msecs_to_jiffies(host->disable_delay)); 738 - return 0; 739 - } else 740 - return mmc_host_do_disable(host, 1); 741 - } 742 - EXPORT_SYMBOL(mmc_host_lazy_disable); 743 - 744 - /** 745 - * mmc_release_host - release a host 746 - * @host: mmc host to release 747 - * 748 - * Release a MMC host, allowing others to claim the host 749 - * for their operations. 750 - */ 751 - void mmc_release_host(struct mmc_host *host) 752 - { 753 - WARN_ON(!host->claimed); 754 - 755 - mmc_host_lazy_disable(host); 756 - 757 - mmc_do_release_host(host); 758 - } 759 - 760 790 EXPORT_SYMBOL(mmc_release_host); 761 791 762 792 /* ··· 1963 2115 int mmc_detect_card_removed(struct mmc_host *host) 1964 2116 { 1965 2117 struct mmc_card *card = host->card; 2118 + int ret; 1966 2119 1967 2120 WARN_ON(!host->claimed); 2121 + 2122 + if (!card) 2123 + return 1; 2124 + 2125 + ret = mmc_card_removed(card); 1968 2126 /* 1969 2127 * The card will be considered unchanged unless we have been asked to 1970 2128 * detect a change or host requires polling to provide card detection. 1971 2129 */ 1972 - if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) 1973 - return mmc_card_removed(card); 2130 + if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) && 2131 + !(host->caps2 & MMC_CAP2_DETECT_ON_ERR)) 2132 + return ret; 1974 2133 1975 2134 host->detect_change = 0; 2135 + if (!ret) { 2136 + ret = _mmc_detect_card_removed(host); 2137 + if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) { 2138 + /* 2139 + * Schedule a detect work as soon as possible to let a 2140 + * rescan handle the card removal. 2141 + */ 2142 + cancel_delayed_work(&host->detect); 2143 + mmc_detect_change(host, 0); 2144 + } 2145 + } 1976 2146 1977 - return _mmc_detect_card_removed(host); 2147 + return ret; 1978 2148 } 1979 2149 EXPORT_SYMBOL(mmc_detect_card_removed); 1980 2150 ··· 2069 2203 spin_unlock_irqrestore(&host->lock, flags); 2070 2204 #endif 2071 2205 2072 - if (host->caps & MMC_CAP_DISABLE) 2073 - cancel_delayed_work(&host->disable); 2074 2206 cancel_delayed_work_sync(&host->detect); 2075 2207 mmc_flush_scheduled_work(); 2076 2208 ··· 2263 2399 { 2264 2400 int err = 0; 2265 2401 2266 - if (host->caps & MMC_CAP_DISABLE) 2267 - cancel_delayed_work(&host->disable); 2268 2402 cancel_delayed_work(&host->detect); 2269 2403 mmc_flush_scheduled_work(); 2270 2404 if (mmc_try_claim_host(host)) { 2271 2405 err = mmc_cache_ctrl(host, 0); 2272 - mmc_do_release_host(host); 2406 + mmc_release_host(host); 2273 2407 } else { 2274 2408 err = -EBUSY; 2275 2409 } ··· 2288 2426 if (host->bus_ops->suspend) { 2289 2427 err = host->bus_ops->suspend(host); 2290 2428 } 2291 - mmc_do_release_host(host); 2429 + mmc_release_host(host); 2292 2430 2293 2431 if (err == -ENOSYS || !host->bus_ops->resume) { 2294 2432 /*
-1
drivers/mmc/core/host.c
··· 330 330 spin_lock_init(&host->lock); 331 331 init_waitqueue_head(&host->wq); 332 332 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 333 - INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 334 333 #ifdef CONFIG_PM 335 334 host->pm_notify.notifier_call = mmc_pm_notify; 336 335 #endif
-1
drivers/mmc/core/host.h
··· 14 14 15 15 int mmc_register_host_class(void); 16 16 void mmc_unregister_host_class(void); 17 - void mmc_host_deeper_disable(struct work_struct *work); 18 17 19 18 #endif 20 19
+37 -18
drivers/mmc/core/mmc.c
··· 519 519 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | 520 520 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | 521 521 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; 522 + 523 + if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) 524 + card->ext_csd.data_sector_size = 4096; 525 + else 526 + card->ext_csd.data_sector_size = 512; 527 + 528 + if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && 529 + (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { 530 + card->ext_csd.data_tag_unit_size = 531 + ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * 532 + (card->ext_csd.data_sector_size); 533 + } else { 534 + card->ext_csd.data_tag_unit_size = 0; 535 + } 522 536 } 523 537 524 538 out: ··· 952 938 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF 953 939 * bit. This bit will be lost every time after a reset or power off. 954 940 */ 955 - if (card->ext_csd.enhanced_area_en) { 941 + if (card->ext_csd.enhanced_area_en || 942 + (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { 956 943 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 957 944 EXT_CSD_ERASE_GROUP_DEF, 1, 958 945 card->ext_csd.generic_cmd6_time); ··· 1048 1033 } 1049 1034 1050 1035 /* 1051 - * Enable HPI feature (if supported) 1052 - */ 1053 - if (card->ext_csd.hpi) { 1054 - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1055 - EXT_CSD_HPI_MGMT, 1, 0); 1056 - if (err && err != -EBADMSG) 1057 - goto free_card; 1058 - if (err) { 1059 - pr_warning("%s: Enabling HPI failed\n", 1060 - mmc_hostname(card->host)); 1061 - err = 0; 1062 - } else 1063 - card->ext_csd.hpi_en = 1; 1064 - } 1065 - 1066 - /* 1067 1036 * Compute bus speed. 1068 1037 */ 1069 1038 max_dtr = (unsigned int)-1; ··· 1096 1097 * 4. execute tuning for HS200 1097 1098 */ 1098 1099 if ((host->caps2 & MMC_CAP2_HS200) && 1099 - card->host->ops->execute_tuning) 1100 + card->host->ops->execute_tuning) { 1101 + mmc_host_clk_hold(card->host); 1100 1102 err = card->host->ops->execute_tuning(card->host, 1101 1103 MMC_SEND_TUNING_BLOCK_HS200); 1104 + mmc_host_clk_release(card->host); 1105 + } 1102 1106 if (err) { 1103 1107 pr_warning("%s: tuning execution failed\n", 1104 1108 mmc_hostname(card->host)); ··· 1218 1216 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1219 1217 mmc_set_bus_width(card->host, bus_width); 1220 1218 } 1219 + } 1220 + 1221 + /* 1222 + * Enable HPI feature (if supported) 1223 + */ 1224 + if (card->ext_csd.hpi) { 1225 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1226 + EXT_CSD_HPI_MGMT, 1, 1227 + card->ext_csd.generic_cmd6_time); 1228 + if (err && err != -EBADMSG) 1229 + goto free_card; 1230 + if (err) { 1231 + pr_warning("%s: Enabling HPI failed\n", 1232 + mmc_hostname(card->host)); 1233 + err = 0; 1234 + } else 1235 + card->ext_csd.hpi_en = 1; 1221 1236 } 1222 1237 1223 1238 /*
+8 -4
drivers/mmc/core/mmc_ops.c
··· 553 553 { 554 554 struct mmc_command cmd = {0}; 555 555 unsigned int opcode; 556 - unsigned int flags; 557 556 int err; 557 + 558 + if (!card->ext_csd.hpi) { 559 + pr_warning("%s: Card didn't support HPI command\n", 560 + mmc_hostname(card->host)); 561 + return -EINVAL; 562 + } 558 563 559 564 opcode = card->ext_csd.hpi_cmd; 560 565 if (opcode == MMC_STOP_TRANSMISSION) 561 - flags = MMC_RSP_R1 | MMC_CMD_AC; 566 + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 562 567 else if (opcode == MMC_SEND_STATUS) 563 - flags = MMC_RSP_R1 | MMC_CMD_AC; 568 + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 564 569 565 570 cmd.opcode = opcode; 566 571 cmd.arg = card->rca << 16 | 1; 567 - cmd.flags = flags; 568 572 cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time; 569 573 570 574 err = mmc_wait_for_cmd(card->host, &cmd, 0);
+25
drivers/mmc/host/Kconfig
··· 533 533 Designware Mobile Storage IP block. This disables the external DMA 534 534 interface. 535 535 536 + config MMC_DW_PLTFM 537 + tristate "Synopsys Designware MCI Support as platform device" 538 + depends on MMC_DW 539 + default y 540 + help 541 + This selects the common helper functions support for Host Controller 542 + Interface based platform driver. Please select this option if the IP 543 + is present as a platform device. This is the common interface for the 544 + Synopsys Designware IP. 545 + 546 + If you have a controller with this interface, say Y or M here. 547 + 548 + If unsure, say Y. 549 + 550 + config MMC_DW_PCI 551 + tristate "Synopsys Designware MCI support on PCI bus" 552 + depends on MMC_DW && PCI 553 + help 554 + This selects the PCI bus for the Synopsys Designware Mobile Storage IP. 555 + Select this option if the IP is present on PCI platform. 556 + 557 + If you have a controller with this interface, say Y or M here. 558 + 559 + If unsure, say N. 560 + 536 561 config MMC_SH_MMCIF 537 562 tristate "SuperH Internal MMCIF support" 538 563 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
+2
drivers/mmc/host/Makefile
··· 39 39 obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 40 40 obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 41 41 obj-$(CONFIG_MMC_DW) += dw_mmc.o 42 + obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o 43 + obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o 42 44 obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 43 45 obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 44 46 obj-$(CONFIG_MMC_VUB300) += vub300.o
+1 -1
drivers/mmc/host/atmel-mci.c
··· 1975 1975 return false; 1976 1976 } else { 1977 1977 dev_info(&host->pdev->dev, 1978 - "Using %s for DMA transfers\n", 1978 + "using %s for DMA transfers\n", 1979 1979 dma_chan_name(host->dma.chan)); 1980 1980 return true; 1981 1981 }
+55 -11
drivers/mmc/host/davinci_mmc.c
··· 160 160 MODULE_PARM_DESC(rw_threshold, 161 161 "Read/Write threshold. Default = 32"); 162 162 163 + static unsigned poll_threshold = 128; 164 + module_param(poll_threshold, uint, S_IRUGO); 165 + MODULE_PARM_DESC(poll_threshold, 166 + "Polling transaction size threshold. Default = 128"); 167 + 168 + static unsigned poll_loopcount = 32; 169 + module_param(poll_loopcount, uint, S_IRUGO); 170 + MODULE_PARM_DESC(poll_loopcount, 171 + "Maximum polling loop count. Default = 32"); 172 + 163 173 static unsigned __initdata use_dma = 1; 164 174 module_param(use_dma, uint, 0); 165 175 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); ··· 203 193 bool use_dma; 204 194 bool do_dma; 205 195 bool sdio_int; 196 + bool active_request; 206 197 207 198 /* Scatterlist DMA uses one or more parameter RAM entries: 208 199 * the main one (associated with rxdma or txdma) plus zero or ··· 230 219 #endif 231 220 }; 232 221 222 + static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 233 223 234 224 /* PIO only */ 235 225 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) ··· 388 376 389 377 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 390 378 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 391 - writel(im_val, host->base + DAVINCI_MMCIM); 379 + 380 + host->active_request = true; 381 + 382 + if (!host->do_dma && host->bytes_left <= poll_threshold) { 383 + u32 count = poll_loopcount; 384 + 385 + while (host->active_request && count--) { 386 + mmc_davinci_irq(0, host); 387 + cpu_relax(); 388 + } 389 + } 390 + 391 + if (host->active_request) 392 + writel(im_val, host->base + DAVINCI_MMCIM); 392 393 } 393 394 394 395 /*----------------------------------------------------------------------*/ ··· 940 915 if (!data->stop || (host->cmd && host->cmd->error)) { 941 916 mmc_request_done(host->mmc, data->mrq); 942 917 writel(0, host->base + DAVINCI_MMCIM); 918 + host->active_request = false; 943 919 } else 944 920 mmc_davinci_start_command(host, data->stop); 945 921 } ··· 968 942 cmd->mrq->cmd->retries = 0; 969 943 mmc_request_done(host->mmc, cmd->mrq); 970 944 writel(0, host->base + DAVINCI_MMCIM); 945 + host->active_request = false; 971 946 } 972 947 } 973 948 ··· 1036 1009 * by read. So, it is not unbouned loop even in the case of 1037 1010 * non-dma. 1038 1011 */ 1039 - while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 1040 - davinci_fifo_data_trans(host, rw_threshold); 1041 - status = readl(host->base + DAVINCI_MMCST0); 1042 - if (!status) 1043 - break; 1044 - qstatus |= status; 1012 + if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 1013 + unsigned long im_val; 1014 + 1015 + /* 1016 + * If interrupts fire during the following loop, they will be 1017 + * handled by the handler, but the PIC will still buffer these. 1018 + * As a result, the handler will be called again to serve these 1019 + * needlessly. In order to avoid these spurious interrupts, 1020 + * keep interrupts masked during the loop. 1021 + */ 1022 + im_val = readl(host->base + DAVINCI_MMCIM); 1023 + writel(0, host->base + DAVINCI_MMCIM); 1024 + 1025 + do { 1026 + davinci_fifo_data_trans(host, rw_threshold); 1027 + status = readl(host->base + DAVINCI_MMCST0); 1028 + qstatus |= status; 1029 + } while (host->bytes_left && 1030 + (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 1031 + 1032 + /* 1033 + * If an interrupt is pending, it is assumed it will fire when 1034 + * it is unmasked. This assumption is also taken when the MMCIM 1035 + * is first set. Otherwise, writing to MMCIM after reading the 1036 + * status is race-prone. 1037 + */ 1038 + writel(im_val, host->base + DAVINCI_MMCIM); 1045 1039 } 1046 1040 1047 1041 if (qstatus & MMCST0_DATDNE) { ··· 1466 1418 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1467 1419 int ret; 1468 1420 1469 - mmc_host_enable(host->mmc); 1470 1421 ret = mmc_suspend_host(host->mmc); 1471 1422 if (!ret) { 1472 1423 writel(0, host->base + DAVINCI_MMCIM); 1473 1424 mmc_davinci_reset_ctrl(host, 1); 1474 - mmc_host_disable(host->mmc); 1475 1425 clk_disable(host->clk); 1476 1426 host->suspended = 1; 1477 1427 } else { 1478 1428 host->suspended = 0; 1479 - mmc_host_disable(host->mmc); 1480 1429 } 1481 1430 1482 1431 return ret; ··· 1489 1444 return 0; 1490 1445 1491 1446 clk_enable(host->clk); 1492 - mmc_host_enable(host->mmc); 1493 1447 1494 1448 mmc_davinci_reset_ctrl(host, 0); 1495 1449 ret = mmc_resume_host(host->mmc);
+158
drivers/mmc/host/dw_mmc-pci.c
··· 1 + /* 2 + * Synopsys DesignWare Multimedia Card PCI Interface driver 3 + * 4 + * Copyright (C) 2012 Vayavya Labs Pvt. Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + */ 11 + 12 + #include <linux/interrupt.h> 13 + #include <linux/module.h> 14 + #include <linux/io.h> 15 + #include <linux/irq.h> 16 + #include <linux/pci.h> 17 + #include <linux/slab.h> 18 + #include <linux/mmc/host.h> 19 + #include <linux/mmc/mmc.h> 20 + #include <linux/mmc/dw_mmc.h> 21 + #include "dw_mmc.h" 22 + 23 + #define PCI_BAR_NO 2 24 + #define COMPLETE_BAR 0 25 + #define SYNOPSYS_DW_MCI_VENDOR_ID 0x700 26 + #define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107 27 + /* Defining the Capabilities */ 28 + #define DW_MCI_CAPABILITIES (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |\ 29 + MMC_CAP_SD_HIGHSPEED | MMC_CAP_8_BIT_DATA |\ 30 + MMC_CAP_SDIO_IRQ) 31 + 32 + static struct dw_mci_board pci_board_data = { 33 + .num_slots = 1, 34 + .caps = DW_MCI_CAPABILITIES, 35 + .bus_hz = 33 * 1000 * 1000, 36 + .detect_delay_ms = 200, 37 + .fifo_depth = 32, 38 + }; 39 + 40 + static int __devinit dw_mci_pci_probe(struct pci_dev *pdev, 41 + const struct pci_device_id *entries) 42 + { 43 + struct dw_mci *host; 44 + int ret; 45 + 46 + ret = pci_enable_device(pdev); 47 + if (ret) 48 + return ret; 49 + if (pci_request_regions(pdev, "dw_mmc_pci")) { 50 + ret = -ENODEV; 51 + goto err_disable_dev; 52 + } 53 + 54 + host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); 55 + if (!host) { 56 + ret = -ENOMEM; 57 + goto err_release; 58 + } 59 + 60 + host->irq = pdev->irq; 61 + host->irq_flags = IRQF_SHARED; 62 + host->dev = pdev->dev; 63 + host->pdata = &pci_board_data; 64 + 65 + host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR); 66 + if (!host->regs) { 67 + ret = -EIO; 68 + goto err_unmap; 69 + } 70 + 71 + pci_set_drvdata(pdev, host); 72 + ret = dw_mci_probe(host); 73 + if (ret) 74 + goto err_probe_failed; 75 + return ret; 76 + 77 + err_probe_failed: 78 + pci_iounmap(pdev, host->regs); 79 + err_unmap: 80 + kfree(host); 81 + err_release: 82 + pci_release_regions(pdev); 83 + err_disable_dev: 84 + pci_disable_device(pdev); 85 + return ret; 86 + } 87 + 88 + static void __devexit dw_mci_pci_remove(struct pci_dev *pdev) 89 + { 90 + struct dw_mci *host = pci_get_drvdata(pdev); 91 + 92 + dw_mci_remove(host); 93 + pci_set_drvdata(pdev, NULL); 94 + pci_release_regions(pdev); 95 + pci_iounmap(pdev, host->regs); 96 + kfree(host); 97 + pci_disable_device(pdev); 98 + } 99 + 100 + #ifdef CONFIG_PM_SLEEP 101 + static int dw_mci_pci_suspend(struct device *dev) 102 + { 103 + int ret; 104 + struct pci_dev *pdev = to_pci_dev(dev); 105 + struct dw_mci *host = pci_get_drvdata(pdev); 106 + 107 + ret = dw_mci_suspend(host); 108 + return ret; 109 + } 110 + 111 + static int dw_mci_pci_resume(struct device *dev) 112 + { 113 + int ret; 114 + struct pci_dev *pdev = to_pci_dev(dev); 115 + struct dw_mci *host = pci_get_drvdata(pdev); 116 + 117 + ret = dw_mci_resume(host); 118 + return ret; 119 + } 120 + #else 121 + #define dw_mci_pci_suspend NULL 122 + #define dw_mci_pci_resume NULL 123 + #endif /* CONFIG_PM_SLEEP */ 124 + 125 + static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume); 126 + 127 + static DEFINE_PCI_DEVICE_TABLE(dw_mci_pci_id) = { 128 + { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, 129 + {} 130 + }; 131 + MODULE_DEVICE_TABLE(pci, dw_mci_pci_id); 132 + 133 + static struct pci_driver dw_mci_pci_driver = { 134 + .name = "dw_mmc_pci", 135 + .id_table = dw_mci_pci_id, 136 + .probe = dw_mci_pci_probe, 137 + .remove = dw_mci_pci_remove, 138 + .driver = { 139 + .pm = &dw_mci_pci_pmops 140 + }, 141 + }; 142 + 143 + static int __init dw_mci_init(void) 144 + { 145 + return pci_register_driver(&dw_mci_pci_driver); 146 + } 147 + 148 + static void __exit dw_mci_exit(void) 149 + { 150 + pci_unregister_driver(&dw_mci_pci_driver); 151 + } 152 + 153 + module_init(dw_mci_init); 154 + module_exit(dw_mci_exit); 155 + 156 + MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); 157 + MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); 158 + MODULE_LICENSE("GPL v2");
+134
drivers/mmc/host/dw_mmc-pltfm.c
··· 1 + /* 2 + * Synopsys DesignWare Multimedia Card Interface driver 3 + * 4 + * Copyright (C) 2009 NXP Semiconductors 5 + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + */ 12 + 13 + #include <linux/interrupt.h> 14 + #include <linux/module.h> 15 + #include <linux/io.h> 16 + #include <linux/irq.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/slab.h> 19 + #include <linux/mmc/host.h> 20 + #include <linux/mmc/mmc.h> 21 + #include <linux/mmc/dw_mmc.h> 22 + #include "dw_mmc.h" 23 + 24 + static int dw_mci_pltfm_probe(struct platform_device *pdev) 25 + { 26 + struct dw_mci *host; 27 + struct resource *regs; 28 + int ret; 29 + 30 + host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); 31 + if (!host) 32 + return -ENOMEM; 33 + 34 + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 35 + if (!regs) { 36 + ret = -ENXIO; 37 + goto err_free; 38 + } 39 + 40 + host->irq = platform_get_irq(pdev, 0); 41 + if (host->irq < 0) { 42 + ret = host->irq; 43 + goto err_free; 44 + } 45 + 46 + host->dev = pdev->dev; 47 + host->irq_flags = 0; 48 + host->pdata = pdev->dev.platform_data; 49 + ret = -ENOMEM; 50 + host->regs = ioremap(regs->start, resource_size(regs)); 51 + if (!host->regs) 52 + goto err_free; 53 + platform_set_drvdata(pdev, host); 54 + ret = dw_mci_probe(host); 55 + if (ret) 56 + goto err_out; 57 + return ret; 58 + err_out: 59 + iounmap(host->regs); 60 + err_free: 61 + kfree(host); 62 + return ret; 63 + } 64 + 65 + static int __exit dw_mci_pltfm_remove(struct platform_device *pdev) 66 + { 67 + struct dw_mci *host = platform_get_drvdata(pdev); 68 + 69 + platform_set_drvdata(pdev, NULL); 70 + dw_mci_remove(host); 71 + iounmap(host->regs); 72 + kfree(host); 73 + return 0; 74 + } 75 + 76 + #ifdef CONFIG_PM_SLEEP 77 + /* 78 + * TODO: we should probably disable the clock to the card in the suspend path. 79 + */ 80 + static int dw_mci_pltfm_suspend(struct device *dev) 81 + { 82 + int ret; 83 + struct dw_mci *host = dev_get_drvdata(dev); 84 + 85 + ret = dw_mci_suspend(host); 86 + if (ret) 87 + return ret; 88 + 89 + return 0; 90 + } 91 + 92 + static int dw_mci_pltfm_resume(struct device *dev) 93 + { 94 + int ret; 95 + struct dw_mci *host = dev_get_drvdata(dev); 96 + 97 + ret = dw_mci_resume(host); 98 + if (ret) 99 + return ret; 100 + 101 + return 0; 102 + } 103 + #else 104 + #define dw_mci_pltfm_suspend NULL 105 + #define dw_mci_pltfm_resume NULL 106 + #endif /* CONFIG_PM_SLEEP */ 107 + 108 + static SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume); 109 + 110 + static struct platform_driver dw_mci_pltfm_driver = { 111 + .remove = __exit_p(dw_mci_pltfm_remove), 112 + .driver = { 113 + .name = "dw_mmc", 114 + .pm = &dw_mci_pltfm_pmops, 115 + }, 116 + }; 117 + 118 + static int __init dw_mci_init(void) 119 + { 120 + return platform_driver_probe(&dw_mci_pltfm_driver, dw_mci_pltfm_probe); 121 + } 122 + 123 + static void __exit dw_mci_exit(void) 124 + { 125 + platform_driver_unregister(&dw_mci_pltfm_driver); 126 + } 127 + 128 + module_init(dw_mci_init); 129 + module_exit(dw_mci_exit); 130 + 131 + MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 132 + MODULE_AUTHOR("NXP Semiconductor VietNam"); 133 + MODULE_AUTHOR("Imagination Technologies Ltd"); 134 + MODULE_LICENSE("GPL v2");
+153 -129
drivers/mmc/host/dw_mmc.c
··· 268 268 struct mmc_command *cmd, u32 cmd_flags) 269 269 { 270 270 host->cmd = cmd; 271 - dev_vdbg(&host->pdev->dev, 271 + dev_vdbg(&host->dev, 272 272 "start command: ARGR=0x%08x CMDR=0x%08x\n", 273 273 cmd->arg, cmd_flags); 274 274 ··· 295 295 } 296 296 } 297 297 298 + static int dw_mci_get_dma_dir(struct mmc_data *data) 299 + { 300 + if (data->flags & MMC_DATA_WRITE) 301 + return DMA_TO_DEVICE; 302 + else 303 + return DMA_FROM_DEVICE; 304 + } 305 + 298 306 #ifdef CONFIG_MMC_DW_IDMAC 299 307 static void dw_mci_dma_cleanup(struct dw_mci *host) 300 308 { 301 309 struct mmc_data *data = host->data; 302 310 303 311 if (data) 304 - dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 305 - ((data->flags & MMC_DATA_WRITE) 306 - ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 312 + if (!data->host_cookie) 313 + dma_unmap_sg(&host->dev, 314 + data->sg, 315 + data->sg_len, 316 + dw_mci_get_dma_dir(data)); 307 317 } 308 318 309 319 static void dw_mci_idmac_stop_dma(struct dw_mci *host) ··· 336 326 { 337 327 struct mmc_data *data = host->data; 338 328 339 - dev_vdbg(&host->pdev->dev, "DMA complete\n"); 329 + dev_vdbg(&host->dev, "DMA complete\n"); 340 330 341 331 host->dma_ops->cleanup(host); 342 332 ··· 438 428 }; 439 429 #endif /* CONFIG_MMC_DW_IDMAC */ 440 430 441 - static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 431 + static int dw_mci_pre_dma_transfer(struct dw_mci *host, 432 + struct mmc_data *data, 433 + bool next) 442 434 { 443 435 struct scatterlist *sg; 444 - unsigned int i, direction, sg_len; 445 - u32 temp; 436 + unsigned int i, sg_len; 446 437 447 - host->using_dma = 0; 448 - 449 - /* If we don't have a channel, we can't do DMA */ 450 - if (!host->use_dma) 451 - return -ENODEV; 438 + if (!next && data->host_cookie) 439 + return data->host_cookie; 452 440 453 441 /* 454 442 * We don't do DMA on "complex" transfers, i.e. with ··· 455 447 */ 456 448 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 457 449 return -EINVAL; 450 + 458 451 if (data->blksz & 3) 459 452 return -EINVAL; 460 453 ··· 464 455 return -EINVAL; 465 456 } 466 457 458 + sg_len = dma_map_sg(&host->dev, 459 + data->sg, 460 + data->sg_len, 461 + dw_mci_get_dma_dir(data)); 462 + if (sg_len == 0) 463 + return -EINVAL; 464 + 465 + if (next) 466 + data->host_cookie = sg_len; 467 + 468 + return sg_len; 469 + } 470 + 471 + static void dw_mci_pre_req(struct mmc_host *mmc, 472 + struct mmc_request *mrq, 473 + bool is_first_req) 474 + { 475 + struct dw_mci_slot *slot = mmc_priv(mmc); 476 + struct mmc_data *data = mrq->data; 477 + 478 + if (!slot->host->use_dma || !data) 479 + return; 480 + 481 + if (data->host_cookie) { 482 + data->host_cookie = 0; 483 + return; 484 + } 485 + 486 + if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 487 + data->host_cookie = 0; 488 + } 489 + 490 + static void dw_mci_post_req(struct mmc_host *mmc, 491 + struct mmc_request *mrq, 492 + int err) 493 + { 494 + struct dw_mci_slot *slot = mmc_priv(mmc); 495 + struct mmc_data *data = mrq->data; 496 + 497 + if (!slot->host->use_dma || !data) 498 + return; 499 + 500 + if (data->host_cookie) 501 + dma_unmap_sg(&slot->host->dev, 502 + data->sg, 503 + data->sg_len, 504 + dw_mci_get_dma_dir(data)); 505 + data->host_cookie = 0; 506 + } 507 + 508 + static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 509 + { 510 + int sg_len; 511 + u32 temp; 512 + 513 + host->using_dma = 0; 514 + 515 + /* If we don't have a channel, we can't do DMA */ 516 + if (!host->use_dma) 517 + return -ENODEV; 518 + 519 + sg_len = dw_mci_pre_dma_transfer(host, data, 0); 520 + if (sg_len < 0) 521 + return sg_len; 522 + 467 523 host->using_dma = 1; 468 524 469 - if (data->flags & MMC_DATA_READ) 470 - direction = DMA_FROM_DEVICE; 471 - else 472 - direction = DMA_TO_DEVICE; 473 - 474 - sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, 475 - direction); 476 - 477 - dev_vdbg(&host->pdev->dev, 525 + dev_vdbg(&host->dev, 478 526 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 479 527 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 480 528 sg_len); ··· 645 579 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 646 580 647 581 /* enable clock */ 648 - mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE | 649 - SDMMC_CLKEN_LOW_PWR); 582 + mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | 583 + SDMMC_CLKEN_LOW_PWR) << slot->id)); 650 584 651 585 /* inform CIU */ 652 586 mci_send_cmd(slot, ··· 866 800 867 801 static const struct mmc_host_ops dw_mci_ops = { 868 802 .request = dw_mci_request, 803 + .pre_req = dw_mci_pre_req, 804 + .post_req = dw_mci_post_req, 869 805 .set_ios = dw_mci_set_ios, 870 806 .get_ro = dw_mci_get_ro, 871 807 .get_cd = dw_mci_get_cd, ··· 889 821 slot = list_entry(host->queue.next, 890 822 struct dw_mci_slot, queue_node); 891 823 list_del(&slot->queue_node); 892 - dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", 824 + dev_vdbg(&host->dev, "list not empty: %s is next\n", 893 825 mmc_hostname(slot->mmc)); 894 826 host->state = STATE_SENDING_CMD; 895 827 dw_mci_start_request(host, slot); 896 828 } else { 897 - dev_vdbg(&host->pdev->dev, "list empty\n"); 829 + dev_vdbg(&host->dev, "list empty\n"); 898 830 host->state = STATE_IDLE; 899 831 } 900 832 ··· 1033 965 data->bytes_xfered = 0; 1034 966 data->error = -ETIMEDOUT; 1035 967 } else { 1036 - dev_err(&host->pdev->dev, 968 + dev_err(&host->dev, 1037 969 "data FIFO error " 1038 970 "(status=%08x)\n", 1039 971 status); ··· 1750 1682 struct mmc_host *mmc; 1751 1683 struct dw_mci_slot *slot; 1752 1684 1753 - mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev); 1685 + mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev); 1754 1686 if (!mmc) 1755 1687 return -ENOMEM; 1756 1688 ··· 1788 1720 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1789 1721 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1790 1722 1791 - #ifdef CONFIG_MMC_DW_IDMAC 1792 - mmc->max_segs = host->ring_size; 1793 - mmc->max_blk_size = 65536; 1794 - mmc->max_blk_count = host->ring_size; 1795 - mmc->max_seg_size = 0x1000; 1796 - mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 1797 - #else 1723 + if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) 1724 + mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 1725 + else 1726 + mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; 1727 + 1798 1728 if (host->pdata->blk_settings) { 1799 1729 mmc->max_segs = host->pdata->blk_settings->max_segs; 1800 1730 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; ··· 1801 1735 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 1802 1736 } else { 1803 1737 /* Useful defaults if platform data is unset. */ 1738 + #ifdef CONFIG_MMC_DW_IDMAC 1739 + mmc->max_segs = host->ring_size; 1740 + mmc->max_blk_size = 65536; 1741 + mmc->max_blk_count = host->ring_size; 1742 + mmc->max_seg_size = 0x1000; 1743 + mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 1744 + #else 1804 1745 mmc->max_segs = 64; 1805 1746 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 1806 1747 mmc->max_blk_count = 512; 1807 1748 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1808 1749 mmc->max_seg_size = mmc->max_req_size; 1809 - } 1810 1750 #endif /* CONFIG_MMC_DW_IDMAC */ 1751 + } 1811 1752 1812 1753 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 1813 1754 if (IS_ERR(host->vmmc)) { ··· 1862 1789 static void dw_mci_init_dma(struct dw_mci *host) 1863 1790 { 1864 1791 /* Alloc memory for sg translation */ 1865 - host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE, 1792 + host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE, 1866 1793 &host->sg_dma, GFP_KERNEL); 1867 1794 if (!host->sg_cpu) { 1868 - dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n", 1795 + dev_err(&host->dev, "%s: could not alloc DMA memory\n", 1869 1796 __func__); 1870 1797 goto no_dma; 1871 1798 } ··· 1873 1800 /* Determine which DMA interface to use */ 1874 1801 #ifdef CONFIG_MMC_DW_IDMAC 1875 1802 host->dma_ops = &dw_mci_idmac_ops; 1876 - dev_info(&host->pdev->dev, "Using internal DMA controller.\n"); 1803 + dev_info(&host->dev, "Using internal DMA controller.\n"); 1877 1804 #endif 1878 1805 1879 1806 if (!host->dma_ops) ··· 1881 1808 1882 1809 if (host->dma_ops->init) { 1883 1810 if (host->dma_ops->init(host)) { 1884 - dev_err(&host->pdev->dev, "%s: Unable to initialize " 1811 + dev_err(&host->dev, "%s: Unable to initialize " 1885 1812 "DMA Controller.\n", __func__); 1886 1813 goto no_dma; 1887 1814 } 1888 1815 } else { 1889 - dev_err(&host->pdev->dev, "DMA initialization not found.\n"); 1816 + dev_err(&host->dev, "DMA initialization not found.\n"); 1890 1817 goto no_dma; 1891 1818 } 1892 1819 ··· 1894 1821 return; 1895 1822 1896 1823 no_dma: 1897 - dev_info(&host->pdev->dev, "Using PIO mode.\n"); 1824 + dev_info(&host->dev, "Using PIO mode.\n"); 1898 1825 host->use_dma = 0; 1899 1826 return; 1900 1827 } ··· 1920 1847 return false; 1921 1848 } 1922 1849 1923 - static int dw_mci_probe(struct platform_device *pdev) 1850 + int dw_mci_probe(struct dw_mci *host) 1924 1851 { 1925 - struct dw_mci *host; 1926 - struct resource *regs; 1927 - struct dw_mci_board *pdata; 1928 - int irq, ret, i, width; 1852 + int width, i, ret = 0; 1929 1853 u32 fifo_size; 1930 1854 1931 - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1932 - if (!regs) 1933 - return -ENXIO; 1934 - 1935 - irq = platform_get_irq(pdev, 0); 1936 - if (irq < 0) 1937 - return irq; 1938 - 1939 - host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); 1940 - if (!host) 1941 - return -ENOMEM; 1942 - 1943 - host->pdev = pdev; 1944 - host->pdata = pdata = pdev->dev.platform_data; 1945 - if (!pdata || !pdata->init) { 1946 - dev_err(&pdev->dev, 1855 + if (!host->pdata || !host->pdata->init) { 1856 + dev_err(&host->dev, 1947 1857 "Platform data must supply init function\n"); 1948 - ret = -ENODEV; 1949 - goto err_freehost; 1858 + return -ENODEV; 1950 1859 } 1951 1860 1952 - if (!pdata->select_slot && pdata->num_slots > 1) { 1953 - dev_err(&pdev->dev, 1861 + if (!host->pdata->select_slot && host->pdata->num_slots > 1) { 1862 + dev_err(&host->dev, 1954 1863 "Platform data must supply select_slot function\n"); 1955 - ret = -ENODEV; 1956 - goto err_freehost; 1864 + return -ENODEV; 1957 1865 } 1958 1866 1959 - if (!pdata->bus_hz) { 1960 - dev_err(&pdev->dev, 1867 + if (!host->pdata->bus_hz) { 1868 + dev_err(&host->dev, 1961 1869 "Platform data must supply bus speed\n"); 1962 - ret = -ENODEV; 1963 - goto err_freehost; 1870 + return -ENODEV; 1964 1871 } 1965 1872 1966 - host->bus_hz = pdata->bus_hz; 1967 - host->quirks = pdata->quirks; 1873 + host->bus_hz = host->pdata->bus_hz; 1874 + host->quirks = host->pdata->quirks; 1968 1875 1969 1876 spin_lock_init(&host->lock); 1970 1877 INIT_LIST_HEAD(&host->queue); 1971 1878 1972 - ret = -ENOMEM; 1973 - host->regs = ioremap(regs->start, resource_size(regs)); 1974 - if (!host->regs) 1975 - goto err_freehost; 1976 1879 1977 - host->dma_ops = pdata->dma_ops; 1880 + host->dma_ops = host->pdata->dma_ops; 1978 1881 dw_mci_init_dma(host); 1979 1882 1980 1883 /* ··· 1980 1931 } 1981 1932 1982 1933 /* Reset all blocks */ 1983 - if (!mci_wait_reset(&pdev->dev, host)) { 1934 + if (!mci_wait_reset(&host->dev, host)) { 1984 1935 ret = -ENODEV; 1985 1936 goto err_dmaunmap; 1986 1937 } ··· 2023 1974 if (!dw_mci_card_workqueue) 2024 1975 goto err_dmaunmap; 2025 1976 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 2026 - 2027 - ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host); 1977 + ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); 2028 1978 if (ret) 2029 1979 goto err_workqueue; 2030 - 2031 - platform_set_drvdata(pdev, host); 2032 1980 2033 1981 if (host->pdata->num_slots) 2034 1982 host->num_slots = host->pdata->num_slots; ··· 2046 2000 * Need to check the version-id and set data-offset for DATA register. 2047 2001 */ 2048 2002 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2049 - dev_info(&pdev->dev, "Version ID is %04x\n", host->verid); 2003 + dev_info(&host->dev, "Version ID is %04x\n", host->verid); 2050 2004 2051 2005 if (host->verid < DW_MMC_240A) 2052 2006 host->data_offset = DATA_OFFSET; ··· 2063 2017 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2064 2018 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2065 2019 2066 - dev_info(&pdev->dev, "DW MMC controller at irq %d, " 2020 + dev_info(&host->dev, "DW MMC controller at irq %d, " 2067 2021 "%d bit host data width, " 2068 2022 "%u deep fifo\n", 2069 - irq, width, fifo_size); 2023 + host->irq, width, fifo_size); 2070 2024 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2071 - dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); 2025 + dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n"); 2072 2026 2073 2027 return 0; 2074 2028 ··· 2079 2033 dw_mci_cleanup_slot(host->slot[i], i); 2080 2034 i--; 2081 2035 } 2082 - free_irq(irq, host); 2036 + free_irq(host->irq, host); 2083 2037 2084 2038 err_workqueue: 2085 2039 destroy_workqueue(dw_mci_card_workqueue); ··· 2087 2041 err_dmaunmap: 2088 2042 if (host->use_dma && host->dma_ops->exit) 2089 2043 host->dma_ops->exit(host); 2090 - dma_free_coherent(&host->pdev->dev, PAGE_SIZE, 2044 + dma_free_coherent(&host->dev, PAGE_SIZE, 2091 2045 host->sg_cpu, host->sg_dma); 2092 - iounmap(host->regs); 2093 2046 2094 2047 if (host->vmmc) { 2095 2048 regulator_disable(host->vmmc); 2096 2049 regulator_put(host->vmmc); 2097 2050 } 2098 - 2099 - 2100 - err_freehost: 2101 - kfree(host); 2102 2051 return ret; 2103 2052 } 2053 + EXPORT_SYMBOL(dw_mci_probe); 2104 2054 2105 - static int __exit dw_mci_remove(struct platform_device *pdev) 2055 + void dw_mci_remove(struct dw_mci *host) 2106 2056 { 2107 - struct dw_mci *host = platform_get_drvdata(pdev); 2108 2057 int i; 2109 2058 2110 2059 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2111 2060 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2112 2061 2113 - platform_set_drvdata(pdev, NULL); 2114 - 2115 2062 for (i = 0; i < host->num_slots; i++) { 2116 - dev_dbg(&pdev->dev, "remove slot %d\n", i); 2063 + dev_dbg(&host->dev, "remove slot %d\n", i); 2117 2064 if (host->slot[i]) 2118 2065 dw_mci_cleanup_slot(host->slot[i], i); 2119 2066 } ··· 2115 2076 mci_writel(host, CLKENA, 0); 2116 2077 mci_writel(host, CLKSRC, 0); 2117 2078 2118 - free_irq(platform_get_irq(pdev, 0), host); 2079 + free_irq(host->irq, host); 2119 2080 destroy_workqueue(dw_mci_card_workqueue); 2120 - dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2081 + dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2121 2082 2122 2083 if (host->use_dma && host->dma_ops->exit) 2123 2084 host->dma_ops->exit(host); ··· 2127 2088 regulator_put(host->vmmc); 2128 2089 } 2129 2090 2130 - iounmap(host->regs); 2131 - 2132 - kfree(host); 2133 - return 0; 2134 2091 } 2092 + EXPORT_SYMBOL(dw_mci_remove); 2093 + 2094 + 2135 2095 2136 2096 #ifdef CONFIG_PM_SLEEP 2137 2097 /* 2138 2098 * TODO: we should probably disable the clock to the card in the suspend path. 2139 2099 */ 2140 - static int dw_mci_suspend(struct device *dev) 2100 + int dw_mci_suspend(struct dw_mci *host) 2141 2101 { 2142 - int i, ret; 2143 - struct dw_mci *host = dev_get_drvdata(dev); 2102 + int i, ret = 0; 2144 2103 2145 2104 for (i = 0; i < host->num_slots; i++) { 2146 2105 struct dw_mci_slot *slot = host->slot[i]; ··· 2160 2123 2161 2124 return 0; 2162 2125 } 2126 + EXPORT_SYMBOL(dw_mci_suspend); 2163 2127 2164 - static int dw_mci_resume(struct device *dev) 2128 + int dw_mci_resume(struct dw_mci *host) 2165 2129 { 2166 2130 int i, ret; 2167 - struct dw_mci *host = dev_get_drvdata(dev); 2168 2131 2169 2132 if (host->vmmc) 2170 2133 regulator_enable(host->vmmc); ··· 2172 2135 if (host->dma_ops->init) 2173 2136 host->dma_ops->init(host); 2174 2137 2175 - if (!mci_wait_reset(dev, host)) { 2138 + if (!mci_wait_reset(&host->dev, host)) { 2176 2139 ret = -ENODEV; 2177 2140 return ret; 2178 2141 } ··· 2194 2157 if (ret < 0) 2195 2158 return ret; 2196 2159 } 2197 - 2198 2160 return 0; 2199 2161 } 2200 - #else 2201 - #define dw_mci_suspend NULL 2202 - #define dw_mci_resume NULL 2162 + EXPORT_SYMBOL(dw_mci_resume); 2203 2163 #endif /* CONFIG_PM_SLEEP */ 2204 - 2205 - static SIMPLE_DEV_PM_OPS(dw_mci_pmops, dw_mci_suspend, dw_mci_resume); 2206 - 2207 - static struct platform_driver dw_mci_driver = { 2208 - .remove = __exit_p(dw_mci_remove), 2209 - .driver = { 2210 - .name = "dw_mmc", 2211 - .pm = &dw_mci_pmops, 2212 - }, 2213 - }; 2214 2164 2215 2165 static int __init dw_mci_init(void) 2216 2166 { 2217 - return platform_driver_probe(&dw_mci_driver, dw_mci_probe); 2167 + printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver"); 2168 + return 0; 2218 2169 } 2219 2170 2220 2171 static void __exit dw_mci_exit(void) 2221 2172 { 2222 - platform_driver_unregister(&dw_mci_driver); 2223 2173 } 2224 2174 2225 2175 module_init(dw_mci_init);
+7
drivers/mmc/host/dw_mmc.h
··· 175 175 (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value)) 176 176 #endif 177 177 178 + extern int dw_mci_probe(struct dw_mci *host); 179 + extern void dw_mci_remove(struct dw_mci *host); 180 + #ifdef CONFIG_PM 181 + extern int dw_mci_suspend(struct dw_mci *host); 182 + extern int dw_mci_resume(struct dw_mci *host); 183 + #endif 184 + 178 185 #endif /* _DW_MMC_H_ */
+116 -177
drivers/mmc/host/omap_hsmmc.c
··· 26 26 #include <linux/platform_device.h> 27 27 #include <linux/timer.h> 28 28 #include <linux/clk.h> 29 + #include <linux/of.h> 30 + #include <linux/of_gpio.h> 31 + #include <linux/of_device.h> 29 32 #include <linux/mmc/host.h> 30 33 #include <linux/mmc/core.h> 31 34 #include <linux/mmc/mmc.h> ··· 109 106 #define SOFTRESET (1 << 1) 110 107 #define RESETDONE (1 << 0) 111 108 112 - /* 113 - * FIXME: Most likely all the data using these _DEVID defines should come 114 - * from the platform_data, or implemented in controller and slot specific 115 - * functions. 116 - */ 117 - #define OMAP_MMC1_DEVID 0 118 - #define OMAP_MMC2_DEVID 1 119 - #define OMAP_MMC3_DEVID 2 120 - #define OMAP_MMC4_DEVID 3 121 - #define OMAP_MMC5_DEVID 4 122 - 123 109 #define MMC_AUTOSUSPEND_DELAY 100 124 110 #define MMC_TIMEOUT_MS 20 125 111 #define OMAP_MMC_MIN_CLOCK 400000 ··· 156 164 void __iomem *base; 157 165 resource_size_t mapbase; 158 166 spinlock_t irq_lock; /* Prevent races with irq handler */ 159 - unsigned int id; 160 167 unsigned int dma_len; 161 168 unsigned int dma_sg_idx; 162 169 unsigned char bus_mode; ··· 170 179 int got_dbclk; 171 180 int response_busy; 172 181 int context_loss; 173 - int dpm_state; 174 182 int vdd; 175 183 int protect_card; 176 184 int reqs_blocked; ··· 231 241 232 242 #ifdef CONFIG_REGULATOR 233 243 234 - static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, 235 - int vdd) 236 - { 237 - struct omap_hsmmc_host *host = 238 - platform_get_drvdata(to_platform_device(dev)); 239 - int ret; 240 - 241 - if (mmc_slot(host).before_set_reg) 242 - mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); 243 - 244 - if (power_on) 245 - ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); 246 - else 247 - ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); 248 - 249 - if (mmc_slot(host).after_set_reg) 250 - mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); 251 - 252 - return ret; 253 - } 254 - 255 - static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, 244 + static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, 256 245 int vdd) 257 246 { 258 247 struct omap_hsmmc_host *host = ··· 243 274 * voltage always-on regulator. 244 275 */ 245 276 if (!host->vcc) 277 + return 0; 278 + /* 279 + * With DT, never turn OFF the regulator. This is because 280 + * the pbias cell programming support is still missing when 281 + * booting with Device tree 282 + */ 283 + if (of_have_populated_dt() && !vdd) 246 284 return 0; 247 285 248 286 if (mmc_slot(host).before_set_reg) ··· 294 318 return ret; 295 319 } 296 320 297 - static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on, 298 - int vdd) 299 - { 300 - return 0; 301 - } 302 - 303 - static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, 304 - int vdd, int cardsleep) 305 - { 306 - struct omap_hsmmc_host *host = 307 - platform_get_drvdata(to_platform_device(dev)); 308 - int mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; 309 - 310 - return regulator_set_mode(host->vcc, mode); 311 - } 312 - 313 - static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep, 314 - int vdd, int cardsleep) 315 - { 316 - struct omap_hsmmc_host *host = 317 - platform_get_drvdata(to_platform_device(dev)); 318 - int err, mode; 319 - 320 - /* 321 - * If we don't see a Vcc regulator, assume it's a fixed 322 - * voltage always-on regulator. 323 - */ 324 - if (!host->vcc) 325 - return 0; 326 - 327 - mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; 328 - 329 - if (!host->vcc_aux) 330 - return regulator_set_mode(host->vcc, mode); 331 - 332 - if (cardsleep) { 333 - /* VCC can be turned off if card is asleep */ 334 - if (sleep) 335 - err = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); 336 - else 337 - err = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); 338 - } else 339 - err = regulator_set_mode(host->vcc, mode); 340 - if (err) 341 - return err; 342 - 343 - if (!mmc_slot(host).vcc_aux_disable_is_sleep) 344 - return regulator_set_mode(host->vcc_aux, mode); 345 - 346 - if (sleep) 347 - return regulator_disable(host->vcc_aux); 348 - else 349 - return regulator_enable(host->vcc_aux); 350 - } 351 - 352 - static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep, 353 - int vdd, int cardsleep) 354 - { 355 - return 0; 356 - } 357 - 358 321 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 359 322 { 360 323 struct regulator *reg; 361 - int ret = 0; 362 324 int ocr_value = 0; 363 325 364 - switch (host->id) { 365 - case OMAP_MMC1_DEVID: 366 - /* On-chip level shifting via PBIAS0/PBIAS1 */ 367 - mmc_slot(host).set_power = omap_hsmmc_1_set_power; 368 - mmc_slot(host).set_sleep = omap_hsmmc_1_set_sleep; 369 - break; 370 - case OMAP_MMC2_DEVID: 371 - case OMAP_MMC3_DEVID: 372 - case OMAP_MMC5_DEVID: 373 - /* Off-chip level shifting, or none */ 374 - mmc_slot(host).set_power = omap_hsmmc_235_set_power; 375 - mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep; 376 - break; 377 - case OMAP_MMC4_DEVID: 378 - mmc_slot(host).set_power = omap_hsmmc_4_set_power; 379 - mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep; 380 - default: 381 - pr_err("MMC%d configuration not supported!\n", host->id); 382 - return -EINVAL; 383 - } 326 + mmc_slot(host).set_power = omap_hsmmc_set_power; 384 327 385 328 reg = regulator_get(host->dev, "vmmc"); 386 329 if (IS_ERR(reg)) { 387 330 dev_dbg(host->dev, "vmmc regulator missing\n"); 388 - /* 389 - * HACK: until fixed.c regulator is usable, 390 - * we don't require a main regulator 391 - * for MMC2 or MMC3 392 - */ 393 - if (host->id == OMAP_MMC1_DEVID) { 394 - ret = PTR_ERR(reg); 395 - goto err; 396 - } 397 331 } else { 398 332 host->vcc = reg; 399 333 ocr_value = mmc_regulator_get_ocrmask(reg); ··· 311 425 mmc_slot(host).ocr_mask = ocr_value; 312 426 } else { 313 427 if (!(mmc_slot(host).ocr_mask & ocr_value)) { 314 - pr_err("MMC%d ocrmask %x is not supported\n", 315 - host->id, mmc_slot(host).ocr_mask); 428 + dev_err(host->dev, "ocrmask %x is not supported\n", 429 + mmc_slot(host).ocr_mask); 316 430 mmc_slot(host).ocr_mask = 0; 317 431 return -EINVAL; 318 432 } ··· 345 459 } 346 460 347 461 return 0; 348 - 349 - err: 350 - mmc_slot(host).set_power = NULL; 351 - mmc_slot(host).set_sleep = NULL; 352 - return ret; 353 462 } 354 463 355 464 static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) ··· 352 471 regulator_put(host->vcc); 353 472 regulator_put(host->vcc_aux); 354 473 mmc_slot(host).set_power = NULL; 355 - mmc_slot(host).set_sleep = NULL; 356 474 } 357 475 358 476 static inline int omap_hsmmc_have_reg(void) ··· 590 710 OMAP_HSMMC_WRITE(host->base, SYSCONFIG, 591 711 OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); 592 712 593 - if (host->id == OMAP_MMC1_DEVID) { 713 + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 594 714 if (host->power_mode != MMC_POWER_OFF && 595 715 (1 << ios->vdd) <= MMC_VDD_23_24) 596 716 hctl = SDVS18; ··· 1141 1261 host->reqs_blocked = 0; 1142 1262 if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { 1143 1263 if (host->protect_card) { 1144 - pr_info("%s: cover is closed, " 1264 + dev_info(host->dev, "%s: cover is closed, " 1145 1265 "card is now accessible\n", 1146 1266 mmc_hostname(host->mmc)); 1147 1267 host->protect_card = 0; 1148 1268 } 1149 1269 } else { 1150 1270 if (!host->protect_card) { 1151 - pr_info("%s: cover is open, " 1271 + dev_info(host->dev, "%s: cover is open, " 1152 1272 "card is now inaccessible\n", 1153 1273 mmc_hostname(host->mmc)); 1154 1274 host->protect_card = 1; ··· 1285 1405 1286 1406 if (!next && data->host_cookie && 1287 1407 data->host_cookie != host->next_data.cookie) { 1288 - pr_warning("[%s] invalid cookie: data->host_cookie %d" 1408 + dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" 1289 1409 " host->next_data.cookie %d\n", 1290 1410 __func__, data->host_cookie, host->next_data.cookie); 1291 1411 data->host_cookie = 0; ··· 1543 1663 * of external transceiver; but they all handle 1.8V. 1544 1664 */ 1545 1665 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && 1546 - (ios->vdd == DUAL_VOLT_OCR_BIT)) { 1666 + (ios->vdd == DUAL_VOLT_OCR_BIT) && 1667 + /* 1668 + * With pbias cell programming missing, this 1669 + * can't be allowed when booting with device 1670 + * tree. 1671 + */ 1672 + (!of_have_populated_dt())) { 1547 1673 /* 1548 1674 * The mmc_select_voltage fn of the core does 1549 1675 * not seem to set the power_mode to ··· 1634 1748 return 0; 1635 1749 } 1636 1750 1637 - static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy) 1751 + static int omap_hsmmc_disable_fclk(struct mmc_host *mmc) 1638 1752 { 1639 1753 struct omap_hsmmc_host *host = mmc_priv(mmc); 1640 1754 ··· 1668 1782 if (host->pdata->get_context_loss_count) 1669 1783 context_loss = host->pdata->get_context_loss_count(host->dev); 1670 1784 1671 - seq_printf(s, "mmc%d:\n" 1672 - " enabled:\t%d\n" 1673 - " dpm_state:\t%d\n" 1674 - " nesting_cnt:\t%d\n" 1675 - " ctx_loss:\t%d:%d\n" 1676 - "\nregs:\n", 1677 - mmc->index, mmc->enabled ? 1 : 0, 1678 - host->dpm_state, mmc->nesting_cnt, 1679 - host->context_loss, context_loss); 1785 + seq_printf(s, "mmc%d:\n ctx_loss:\t%d:%d\n\nregs:\n", 1786 + mmc->index, host->context_loss, context_loss); 1680 1787 1681 1788 if (host->suspended) { 1682 1789 seq_printf(s, "host suspended, can't read registers\n"); ··· 1726 1847 1727 1848 #endif 1728 1849 1850 + #ifdef CONFIG_OF 1851 + static u16 omap4_reg_offset = 0x100; 1852 + 1853 + static const struct of_device_id omap_mmc_of_match[] = { 1854 + { 1855 + .compatible = "ti,omap2-hsmmc", 1856 + }, 1857 + { 1858 + .compatible = "ti,omap3-hsmmc", 1859 + }, 1860 + { 1861 + .compatible = "ti,omap4-hsmmc", 1862 + .data = &omap4_reg_offset, 1863 + }, 1864 + {}, 1865 + } 1866 + MODULE_DEVICE_TABLE(of, omap_mmc_of_match); 1867 + 1868 + static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) 1869 + { 1870 + struct omap_mmc_platform_data *pdata; 1871 + struct device_node *np = dev->of_node; 1872 + u32 bus_width; 1873 + 1874 + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1875 + if (!pdata) 1876 + return NULL; /* out of memory */ 1877 + 1878 + if (of_find_property(np, "ti,dual-volt", NULL)) 1879 + pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; 1880 + 1881 + /* This driver only supports 1 slot */ 1882 + pdata->nr_slots = 1; 1883 + pdata->slots[0].switch_pin = of_get_named_gpio(np, "cd-gpios", 0); 1884 + pdata->slots[0].gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1885 + 1886 + if (of_find_property(np, "ti,non-removable", NULL)) { 1887 + pdata->slots[0].nonremovable = true; 1888 + pdata->slots[0].no_regulator_off_init = true; 1889 + } 1890 + of_property_read_u32(np, "ti,bus-width", &bus_width); 1891 + if (bus_width == 4) 1892 + pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; 1893 + else if (bus_width == 8) 1894 + pdata->slots[0].caps |= MMC_CAP_8_BIT_DATA; 1895 + 1896 + if (of_find_property(np, "ti,needs-special-reset", NULL)) 1897 + pdata->slots[0].features |= HSMMC_HAS_UPDATED_RESET; 1898 + 1899 + return pdata; 1900 + } 1901 + #else 1902 + static inline struct omap_mmc_platform_data 1903 + *of_get_hsmmc_pdata(struct device *dev) 1904 + { 1905 + return NULL; 1906 + } 1907 + #endif 1908 + 1729 1909 static int __init omap_hsmmc_probe(struct platform_device *pdev) 1730 1910 { 1731 1911 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; ··· 1792 1854 struct omap_hsmmc_host *host = NULL; 1793 1855 struct resource *res; 1794 1856 int ret, irq; 1857 + const struct of_device_id *match; 1858 + 1859 + match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); 1860 + if (match) { 1861 + pdata = of_get_hsmmc_pdata(&pdev->dev); 1862 + if (match->data) { 1863 + u16 *offsetp = match->data; 1864 + pdata->reg_offset = *offsetp; 1865 + } 1866 + } 1795 1867 1796 1868 if (pdata == NULL) { 1797 1869 dev_err(&pdev->dev, "Platform Data is missing\n"); ··· 1842 1894 host->dev->dma_mask = &pdata->dma_mask; 1843 1895 host->dma_ch = -1; 1844 1896 host->irq = irq; 1845 - host->id = pdev->id; 1846 1897 host->slot_id = 0; 1847 1898 host->mapbase = res->start; 1848 1899 host->base = ioremap(host->mapbase, SZ_4K); ··· 1859 1912 if (mmc_slot(host).vcc_aux_disable_is_sleep) 1860 1913 mmc_slot(host).no_off = 1; 1861 1914 1862 - mmc->f_min = OMAP_MMC_MIN_CLOCK; 1863 - mmc->f_max = OMAP_MMC_MAX_CLOCK; 1915 + mmc->f_min = OMAP_MMC_MIN_CLOCK; 1916 + 1917 + if (pdata->max_freq > 0) 1918 + mmc->f_max = pdata->max_freq; 1919 + else 1920 + mmc->f_max = OMAP_MMC_MAX_CLOCK; 1864 1921 1865 1922 spin_lock_init(&host->irq_lock); 1866 1923 ··· 1877 1926 1878 1927 omap_hsmmc_context_save(host); 1879 1928 1880 - mmc->caps |= MMC_CAP_DISABLE; 1881 1929 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { 1882 1930 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); 1883 1931 mmc->caps2 |= MMC_CAP2_NO_MULTI_READ; ··· 1927 1977 1928 1978 omap_hsmmc_conf_bus_power(host); 1929 1979 1930 - /* Select DMA lines */ 1931 - switch (host->id) { 1932 - case OMAP_MMC1_DEVID: 1933 - host->dma_line_tx = OMAP24XX_DMA_MMC1_TX; 1934 - host->dma_line_rx = OMAP24XX_DMA_MMC1_RX; 1935 - break; 1936 - case OMAP_MMC2_DEVID: 1937 - host->dma_line_tx = OMAP24XX_DMA_MMC2_TX; 1938 - host->dma_line_rx = OMAP24XX_DMA_MMC2_RX; 1939 - break; 1940 - case OMAP_MMC3_DEVID: 1941 - host->dma_line_tx = OMAP34XX_DMA_MMC3_TX; 1942 - host->dma_line_rx = OMAP34XX_DMA_MMC3_RX; 1943 - break; 1944 - case OMAP_MMC4_DEVID: 1945 - host->dma_line_tx = OMAP44XX_DMA_MMC4_TX; 1946 - host->dma_line_rx = OMAP44XX_DMA_MMC4_RX; 1947 - break; 1948 - case OMAP_MMC5_DEVID: 1949 - host->dma_line_tx = OMAP44XX_DMA_MMC5_TX; 1950 - host->dma_line_rx = OMAP44XX_DMA_MMC5_RX; 1951 - break; 1952 - default: 1953 - dev_err(mmc_dev(host->mmc), "Invalid MMC id\n"); 1980 + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); 1981 + if (!res) { 1982 + dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); 1954 1983 goto err_irq; 1955 1984 } 1985 + host->dma_line_tx = res->start; 1986 + 1987 + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 1988 + if (!res) { 1989 + dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); 1990 + goto err_irq; 1991 + } 1992 + host->dma_line_rx = res->start; 1956 1993 1957 1994 /* Request IRQ for MMC operations */ 1958 1995 ret = request_irq(host->irq, omap_hsmmc_irq, 0, ··· 2020 2083 err_irq: 2021 2084 pm_runtime_mark_last_busy(host->dev); 2022 2085 pm_runtime_put_autosuspend(host->dev); 2086 + pm_runtime_disable(host->dev); 2023 2087 clk_put(host->fclk); 2024 2088 if (host->got_dbclk) { 2025 2089 clk_disable(host->dbclk); ··· 2207 2269 .name = DRIVER_NAME, 2208 2270 .owner = THIS_MODULE, 2209 2271 .pm = &omap_hsmmc_dev_pm_ops, 2272 + .of_match_table = of_match_ptr(omap_mmc_of_match), 2210 2273 }, 2211 2274 }; 2212 2275
+35 -2
drivers/mmc/host/sdhci-of-esdhc.c
··· 1 1 /* 2 2 * Freescale eSDHC controller driver. 3 3 * 4 - * Copyright (c) 2007, 2010 Freescale Semiconductor, Inc. 4 + * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc. 5 5 * Copyright (c) 2009 MontaVista Software, Inc. 6 6 * 7 7 * Authors: Xiaobo Xie <X.Xie@freescale.com> ··· 14 14 */ 15 15 16 16 #include <linux/io.h> 17 + #include <linux/of.h> 17 18 #include <linux/delay.h> 18 19 #include <linux/module.h> 19 20 #include <linux/mmc/host.h> ··· 115 114 return pltfm_host->clock / 256 / 16; 116 115 } 117 116 117 + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) 118 + { 119 + /* Workaround to reduce the clock frequency for p1010 esdhc */ 120 + if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { 121 + if (clock > 20000000) 122 + clock -= 5000000; 123 + if (clock > 40000000) 124 + clock -= 5000000; 125 + } 126 + 127 + /* Set the clock */ 128 + esdhc_set_clock(host, clock); 129 + } 130 + 131 + #ifdef CONFIG_PM 132 + static u32 esdhc_proctl; 133 + static void esdhc_of_suspend(struct sdhci_host *host) 134 + { 135 + esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); 136 + } 137 + 138 + static void esdhc_of_resume(struct sdhci_host *host) 139 + { 140 + esdhc_of_enable_dma(host); 141 + sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); 142 + } 143 + #endif 144 + 118 145 static struct sdhci_ops sdhci_esdhc_ops = { 119 146 .read_l = sdhci_be32bs_readl, 120 147 .read_w = esdhc_readw, ··· 150 121 .write_l = sdhci_be32bs_writel, 151 122 .write_w = esdhc_writew, 152 123 .write_b = esdhc_writeb, 153 - .set_clock = esdhc_set_clock, 124 + .set_clock = esdhc_of_set_clock, 154 125 .enable_dma = esdhc_of_enable_dma, 155 126 .get_max_clock = esdhc_of_get_max_clock, 156 127 .get_min_clock = esdhc_of_get_min_clock, 128 + #ifdef CONFIG_PM 129 + .platform_suspend = esdhc_of_suspend, 130 + .platform_resume = esdhc_of_resume, 131 + #endif 157 132 }; 158 133 159 134 static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
+46 -1
drivers/mmc/host/sdhci-pci.c
··· 29 29 #include "sdhci.h" 30 30 31 31 /* 32 + * PCI device IDs 33 + */ 34 + #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 35 + #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a 36 + 37 + /* 32 38 * PCI registers 33 39 */ 34 40 ··· 53 47 54 48 struct sdhci_pci_fixes { 55 49 unsigned int quirks; 50 + unsigned int quirks2; 56 51 bool allow_runtime_pm; 57 52 58 53 int (*probe) (struct sdhci_pci_chip *); ··· 80 73 struct pci_dev *pdev; 81 74 82 75 unsigned int quirks; 76 + unsigned int quirks2; 83 77 bool allow_runtime_pm; 84 78 const struct sdhci_pci_fixes *fixes; 85 79 ··· 180 172 return 0; 181 173 } 182 174 175 + static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) 176 + { 177 + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 178 + return 0; 179 + } 180 + 183 181 #ifdef CONFIG_PM_RUNTIME 184 182 185 183 static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) ··· 258 244 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) 259 245 { 260 246 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; 261 - slot->host->mmc->caps2 = MMC_CAP2_BOOTPART_NOACC; 247 + slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC | 248 + MMC_CAP2_HC_ERASE_SZ; 262 249 return 0; 263 250 } 264 251 ··· 286 271 287 272 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { 288 273 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 274 + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 289 275 .allow_runtime_pm = true, 290 276 .probe_slot = mfd_sdio_probe_slot, 291 277 }; ··· 295 279 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 296 280 .allow_runtime_pm = true, 297 281 .probe_slot = mfd_emmc_probe_slot, 282 + }; 283 + 284 + static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { 285 + .quirks = SDHCI_QUIRK_BROKEN_ADMA, 286 + .probe_slot = pch_hc_probe_slot, 298 287 }; 299 288 300 289 /* O2Micro extra registers */ ··· 838 817 }, 839 818 840 819 { 820 + .vendor = PCI_VENDOR_ID_INTEL, 821 + .device = PCI_DEVICE_ID_INTEL_PCH_SDIO0, 822 + .subvendor = PCI_ANY_ID, 823 + .subdevice = PCI_ANY_ID, 824 + .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, 825 + }, 826 + 827 + { 828 + .vendor = PCI_VENDOR_ID_INTEL, 829 + .device = PCI_DEVICE_ID_INTEL_PCH_SDIO1, 830 + .subvendor = PCI_ANY_ID, 831 + .subdevice = PCI_ANY_ID, 832 + .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, 833 + }, 834 + 835 + { 841 836 .vendor = PCI_VENDOR_ID_O2, 842 837 .device = PCI_DEVICE_ID_O2_8120, 843 838 .subvendor = PCI_ANY_ID, ··· 1243 1206 host->hw_name = "PCI"; 1244 1207 host->ops = &sdhci_pci_ops; 1245 1208 host->quirks = chip->quirks; 1209 + host->quirks2 = chip->quirks2; 1246 1210 1247 1211 host->irq = pdev->irq; 1248 1212 ··· 1403 1365 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; 1404 1366 if (chip->fixes) { 1405 1367 chip->quirks = chip->fixes->quirks; 1368 + chip->quirks2 = chip->fixes->quirks2; 1406 1369 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; 1407 1370 } 1408 1371 chip->num_slots = slots; ··· 1417 1378 } 1418 1379 1419 1380 slots = chip->num_slots; /* Quirk may have changed this */ 1381 + 1382 + pci_enable_msi(pdev); 1420 1383 1421 1384 for (i = 0; i < slots; i++) { 1422 1385 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); ··· 1438 1397 return 0; 1439 1398 1440 1399 free: 1400 + pci_disable_msi(pdev); 1401 + 1441 1402 pci_set_drvdata(pdev, NULL); 1442 1403 kfree(chip); 1443 1404 ··· 1461 1418 1462 1419 for (i = 0; i < chip->num_slots; i++) 1463 1420 sdhci_pci_remove_slot(chip->slots[i]); 1421 + 1422 + pci_disable_msi(pdev); 1464 1423 1465 1424 pci_set_drvdata(pdev, NULL); 1466 1425 kfree(chip);
+2 -7
drivers/mmc/host/sdhci-spear.c
··· 300 300 301 301 return sdhci_resume_host(host); 302 302 } 303 - 304 - const struct dev_pm_ops sdhci_pm_ops = { 305 - .suspend = sdhci_suspend, 306 - .resume = sdhci_resume, 307 - }; 308 303 #endif 304 + 305 + static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); 309 306 310 307 static struct platform_driver sdhci_driver = { 311 308 .driver = { 312 309 .name = "sdhci", 313 310 .owner = THIS_MODULE, 314 - #ifdef CONFIG_PM 315 311 .pm = &sdhci_pm_ops, 316 - #endif 317 312 }, 318 313 .probe = sdhci_probe, 319 314 .remove = __devexit_p(sdhci_remove),
+87 -13
drivers/mmc/host/sdhci-tegra.c
··· 19 19 #include <linux/clk.h> 20 20 #include <linux/io.h> 21 21 #include <linux/of.h> 22 + #include <linux/of_device.h> 22 23 #include <linux/of_gpio.h> 23 24 #include <linux/gpio.h> 24 25 #include <linux/mmc/card.h> ··· 31 30 #include <mach/sdhci.h> 32 31 33 32 #include "sdhci-pltfm.h" 33 + 34 + #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 35 + #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 36 + 37 + struct sdhci_tegra_soc_data { 38 + struct sdhci_pltfm_data *pdata; 39 + u32 nvquirks; 40 + }; 41 + 42 + struct sdhci_tegra { 43 + const struct tegra_sdhci_platform_data *plat; 44 + const struct sdhci_tegra_soc_data *soc_data; 45 + }; 34 46 35 47 static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) 36 48 { ··· 60 46 61 47 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 62 48 { 63 - if (unlikely(reg == SDHCI_HOST_VERSION)) { 49 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 50 + struct sdhci_tegra *tegra_host = pltfm_host->priv; 51 + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 52 + 53 + if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && 54 + (reg == SDHCI_HOST_VERSION))) { 64 55 /* Erratum: Version register is invalid in HW. */ 65 56 return SDHCI_SPEC_200; 66 57 } ··· 75 56 76 57 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) 77 58 { 59 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 60 + struct sdhci_tegra *tegra_host = pltfm_host->priv; 61 + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 62 + 78 63 /* Seems like we're getting spurious timeout and crc errors, so 79 64 * disable signalling of them. In case of real errors software 80 65 * timers should take care of eventually detecting them. ··· 88 65 89 66 writel(val, host->ioaddr + reg); 90 67 91 - if (unlikely(reg == SDHCI_INT_ENABLE)) { 68 + if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && 69 + (reg == SDHCI_INT_ENABLE))) { 92 70 /* Erratum: Must enable block gap interrupt detection */ 93 71 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 94 72 if (val & SDHCI_INT_CARD_INT) ··· 100 76 } 101 77 } 102 78 103 - static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci) 79 + static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) 104 80 { 105 - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); 106 - struct tegra_sdhci_platform_data *plat = pltfm_host->priv; 81 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 82 + struct sdhci_tegra *tegra_host = pltfm_host->priv; 83 + const struct tegra_sdhci_platform_data *plat = tegra_host->plat; 107 84 108 85 if (!gpio_is_valid(plat->wp_gpio)) 109 86 return -1; ··· 123 98 static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) 124 99 { 125 100 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 126 - struct tegra_sdhci_platform_data *plat = pltfm_host->priv; 101 + struct sdhci_tegra *tegra_host = pltfm_host->priv; 102 + const struct tegra_sdhci_platform_data *plat = tegra_host->plat; 127 103 u32 ctrl; 128 104 129 105 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ··· 150 124 .platform_8bit_width = tegra_sdhci_8bit, 151 125 }; 152 126 153 - static struct sdhci_pltfm_data sdhci_tegra_pdata = { 127 + #ifdef CONFIG_ARCH_TEGRA_2x_SOC 128 + static struct sdhci_pltfm_data sdhci_tegra20_pdata = { 154 129 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 155 130 SDHCI_QUIRK_SINGLE_POWER_WRITE | 156 131 SDHCI_QUIRK_NO_HISPD_BIT | ··· 159 132 .ops = &tegra_sdhci_ops, 160 133 }; 161 134 135 + static struct sdhci_tegra_soc_data soc_data_tegra20 = { 136 + .pdata = &sdhci_tegra20_pdata, 137 + .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | 138 + NVQUIRK_ENABLE_BLOCK_GAP_DET, 139 + }; 140 + #endif 141 + 142 + #ifdef CONFIG_ARCH_TEGRA_3x_SOC 143 + static struct sdhci_pltfm_data sdhci_tegra30_pdata = { 144 + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 145 + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 146 + SDHCI_QUIRK_SINGLE_POWER_WRITE | 147 + SDHCI_QUIRK_NO_HISPD_BIT | 148 + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, 149 + .ops = &tegra_sdhci_ops, 150 + }; 151 + 152 + static struct sdhci_tegra_soc_data soc_data_tegra30 = { 153 + .pdata = &sdhci_tegra30_pdata, 154 + }; 155 + #endif 156 + 162 157 static const struct of_device_id sdhci_tegra_dt_match[] __devinitdata = { 163 - { .compatible = "nvidia,tegra20-sdhci", }, 158 + #ifdef CONFIG_ARCH_TEGRA_3x_SOC 159 + { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 160 + #endif 161 + #ifdef CONFIG_ARCH_TEGRA_2x_SOC 162 + { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 163 + #endif 164 164 {} 165 165 }; 166 166 MODULE_DEVICE_TABLE(of, sdhci_dt_ids); ··· 218 164 219 165 static int __devinit sdhci_tegra_probe(struct platform_device *pdev) 220 166 { 167 + const struct of_device_id *match; 168 + const struct sdhci_tegra_soc_data *soc_data; 169 + struct sdhci_host *host; 221 170 struct sdhci_pltfm_host *pltfm_host; 222 171 struct tegra_sdhci_platform_data *plat; 223 - struct sdhci_host *host; 172 + struct sdhci_tegra *tegra_host; 224 173 struct clk *clk; 225 174 int rc; 226 175 227 - host = sdhci_pltfm_init(pdev, &sdhci_tegra_pdata); 176 + match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); 177 + if (match) 178 + soc_data = match->data; 179 + else 180 + soc_data = &soc_data_tegra20; 181 + 182 + host = sdhci_pltfm_init(pdev, soc_data->pdata); 228 183 if (IS_ERR(host)) 229 184 return PTR_ERR(host); 230 185 ··· 250 187 goto err_no_plat; 251 188 } 252 189 253 - pltfm_host->priv = plat; 190 + tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL); 191 + if (!tegra_host) { 192 + dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); 193 + rc = -ENOMEM; 194 + goto err_no_plat; 195 + } 196 + 197 + tegra_host->plat = plat; 198 + tegra_host->soc_data = soc_data; 199 + 200 + pltfm_host->priv = tegra_host; 254 201 255 202 if (gpio_is_valid(plat->power_gpio)) { 256 203 rc = gpio_request(plat->power_gpio, "sdhci_power"); ··· 356 283 { 357 284 struct sdhci_host *host = platform_get_drvdata(pdev); 358 285 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 359 - struct tegra_sdhci_platform_data *plat = pltfm_host->priv; 286 + struct sdhci_tegra *tegra_host = pltfm_host->priv; 287 + const struct tegra_sdhci_platform_data *plat = tegra_host->plat; 360 288 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); 361 289 362 290 sdhci_remove_host(host, dead); ··· 400 326 module_platform_driver(sdhci_tegra_driver); 401 327 402 328 MODULE_DESCRIPTION("SDHCI driver for Tegra"); 403 - MODULE_AUTHOR(" Google, Inc."); 329 + MODULE_AUTHOR("Google, Inc."); 404 330 MODULE_LICENSE("GPL v2");
+29 -9
drivers/mmc/host/sdhci.c
··· 2267 2267 { 2268 2268 irqreturn_t result; 2269 2269 struct sdhci_host *host = dev_id; 2270 - u32 intmask; 2271 - int cardint = 0; 2270 + u32 intmask, unexpected = 0; 2271 + int cardint = 0, max_loops = 16; 2272 2272 2273 2273 spin_lock(&host->lock); 2274 2274 ··· 2286 2286 goto out; 2287 2287 } 2288 2288 2289 + again: 2289 2290 DBG("*** %s got interrupt: 0x%08x\n", 2290 2291 mmc_hostname(host->mmc), intmask); 2291 2292 ··· 2345 2344 intmask &= ~SDHCI_INT_CARD_INT; 2346 2345 2347 2346 if (intmask) { 2348 - pr_err("%s: Unexpected interrupt 0x%08x.\n", 2349 - mmc_hostname(host->mmc), intmask); 2350 - sdhci_dumpregs(host); 2351 - 2347 + unexpected |= intmask; 2352 2348 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2353 2349 } 2354 2350 2355 2351 result = IRQ_HANDLED; 2356 2352 2357 - mmiowb(); 2353 + intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2354 + if (intmask && --max_loops) 2355 + goto again; 2358 2356 out: 2359 2357 spin_unlock(&host->lock); 2360 2358 2359 + if (unexpected) { 2360 + pr_err("%s: Unexpected interrupt 0x%08x.\n", 2361 + mmc_hostname(host->mmc), unexpected); 2362 + sdhci_dumpregs(host); 2363 + } 2361 2364 /* 2362 2365 * We have to delay this as it calls back into the driver. 2363 2366 */ ··· 2383 2378 { 2384 2379 int ret; 2385 2380 bool has_tuning_timer; 2381 + 2382 + if (host->ops->platform_suspend) 2383 + host->ops->platform_suspend(host); 2386 2384 2387 2385 sdhci_disable_card_detection(host); 2388 2386 ··· 2431 2423 if (ret) 2432 2424 return ret; 2433 2425 2434 - sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2435 - mmiowb(); 2426 + if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2427 + (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2428 + /* Card keeps power but host controller does not */ 2429 + sdhci_init(host, 0); 2430 + host->pwr = 0; 2431 + host->clock = 0; 2432 + sdhci_do_set_ios(host, &host->mmc->ios); 2433 + } else { 2434 + sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2435 + mmiowb(); 2436 + } 2436 2437 2437 2438 ret = mmc_resume_host(host->mmc); 2438 2439 sdhci_enable_card_detection(host); 2440 + 2441 + if (host->ops->platform_resume) 2442 + host->ops->platform_resume(host); 2439 2443 2440 2444 /* Set the re-tuning expiration flag */ 2441 2445 if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
+2
drivers/mmc/host/sdhci.h
··· 275 275 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); 276 276 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 277 277 void (*hw_reset)(struct sdhci_host *host); 278 + void (*platform_suspend)(struct sdhci_host *host); 279 + void (*platform_resume)(struct sdhci_host *host); 278 280 }; 279 281 280 282 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
-2
drivers/mmc/host/sh_mmcif.c
··· 746 746 case MMC_SET_WRITE_PROT: 747 747 case MMC_CLR_WRITE_PROT: 748 748 case MMC_ERASE: 749 - case MMC_GEN_CMD: 750 749 tmp |= CMD_SET_RBSY; 751 750 break; 752 751 } ··· 828 829 case MMC_SET_WRITE_PROT: 829 830 case MMC_CLR_WRITE_PROT: 830 831 case MMC_ERASE: 831 - case MMC_GEN_CMD: 832 832 mask = MASK_START_CMD | MASK_MRBSYE; 833 833 break; 834 834 default:
+24 -5
drivers/mmc/host/sh_mobile_sdhi.c
··· 90 90 return 0; 91 91 } 92 92 93 + static void sh_mobile_sdhi_cd_wakeup(const struct platform_device *pdev) 94 + { 95 + mmc_detect_change(dev_get_drvdata(&pdev->dev), msecs_to_jiffies(100)); 96 + } 97 + 98 + static const struct sh_mobile_sdhi_ops sdhi_ops = { 99 + .cd_wakeup = sh_mobile_sdhi_cd_wakeup, 100 + }; 101 + 93 102 static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) 94 103 { 95 104 struct sh_mobile_sdhi *priv; ··· 118 109 mmc_data = &priv->mmc_data; 119 110 p->pdata = mmc_data; 120 111 112 + if (p->init) { 113 + ret = p->init(pdev, &sdhi_ops); 114 + if (ret) 115 + goto einit; 116 + } 117 + 121 118 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); 122 119 priv->clk = clk_get(&pdev->dev, clk_name); 123 120 if (IS_ERR(priv->clk)) { ··· 131 116 ret = PTR_ERR(priv->clk); 132 117 goto eclkget; 133 118 } 134 - 135 - clk_enable(priv->clk); 136 119 137 120 mmc_data->hclk = clk_get_rate(priv->clk); 138 121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; ··· 142 129 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; 143 130 mmc_data->ocr_mask = p->tmio_ocr_mask; 144 131 mmc_data->capabilities |= p->tmio_caps; 132 + mmc_data->cd_gpio = p->cd_gpio; 145 133 146 134 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { 147 135 priv->param_tx.slave_id = p->dma_slave_tx; ··· 225 211 226 212 dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", 227 213 mmc_hostname(host->mmc), (unsigned long) 228 - (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start), 214 + (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), 229 215 mmc_data->hclk / 1000000); 230 216 231 217 return ret; ··· 246 232 eirq_card_detect: 247 233 tmio_mmc_host_remove(host); 248 234 eprobe: 249 - clk_disable(priv->clk); 250 235 clk_put(priv->clk); 251 236 eclkget: 237 + if (p->cleanup) 238 + p->cleanup(pdev); 239 + einit: 252 240 kfree(priv); 253 241 return ret; 254 242 } ··· 274 258 free_irq(irq, host); 275 259 } 276 260 277 - clk_disable(priv->clk); 278 261 clk_put(priv->clk); 262 + 263 + if (p->cleanup) 264 + p->cleanup(pdev); 265 + 279 266 kfree(priv); 280 267 281 268 return 0;
+4 -5
drivers/mmc/host/tmio_mmc.h
··· 47 47 struct mmc_request *mrq; 48 48 struct mmc_data *data; 49 49 struct mmc_host *mmc; 50 - unsigned int sdio_irq_enabled; 50 + 51 + /* Controller power state */ 52 + bool power; 51 53 52 54 /* Callbacks for clock / power control */ 53 55 void (*set_pwr)(struct platform_device *host, int state); 54 56 void (*set_clk_div)(struct platform_device *host, int state); 55 - 56 - int pm_error; 57 - /* recognise system-wide suspend in runtime PM methods */ 58 - bool pm_global; 59 57 60 58 /* pio related stuff */ 61 59 struct scatterlist *sg_ptr; ··· 84 86 spinlock_t lock; /* protect host private data */ 85 87 unsigned long last_req_ts; 86 88 struct mutex ios_lock; /* protect set_ios() context */ 89 + bool native_hotplug; 87 90 }; 88 91 89 92 int tmio_mmc_host_probe(struct tmio_mmc_host **host,
+52 -56
drivers/mmc/host/tmio_mmc_pio.c
··· 34 34 #include <linux/io.h> 35 35 #include <linux/irq.h> 36 36 #include <linux/mfd/tmio.h> 37 + #include <linux/mmc/cd-gpio.h> 37 38 #include <linux/mmc/host.h> 38 39 #include <linux/mmc/tmio.h> 39 40 #include <linux/module.h> ··· 128 127 struct tmio_mmc_host *host = mmc_priv(mmc); 129 128 130 129 if (enable) { 131 - host->sdio_irq_enabled = 1; 132 130 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & 133 131 ~TMIO_SDIO_STAT_IOIRQ; 134 132 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); ··· 136 136 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; 137 137 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 138 138 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 139 - host->sdio_irq_enabled = 0; 140 139 } 141 140 } 142 141 ··· 303 304 { 304 305 struct mmc_data *data = host->data; 305 306 int c = cmd->opcode; 307 + u32 irq_mask = TMIO_MASK_CMD; 306 308 307 309 /* Command 12 is handled by hardware */ 308 310 if (cmd->opcode == 12 && !cmd->arg) { ··· 339 339 c |= TRANSFER_READ; 340 340 } 341 341 342 - tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); 342 + if (!host->native_hotplug) 343 + irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); 344 + tmio_mmc_enable_mmc_irqs(host, irq_mask); 343 345 344 346 /* Fire off the command */ 345 347 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); ··· 760 758 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 761 759 { 762 760 struct tmio_mmc_host *host = mmc_priv(mmc); 763 - struct tmio_mmc_data *pdata = host->pdata; 761 + struct device *dev = &host->pdev->dev; 764 762 unsigned long flags; 765 763 766 764 mutex_lock(&host->ios_lock); ··· 768 766 spin_lock_irqsave(&host->lock, flags); 769 767 if (host->mrq) { 770 768 if (IS_ERR(host->mrq)) { 771 - dev_dbg(&host->pdev->dev, 769 + dev_dbg(dev, 772 770 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", 773 771 current->comm, task_pid_nr(current), 774 772 ios->clock, ios->power_mode); 775 773 host->mrq = ERR_PTR(-EINTR); 776 774 } else { 777 - dev_dbg(&host->pdev->dev, 775 + dev_dbg(dev, 778 776 "%s.%d: CMD%u active since %lu, now %lu!\n", 779 777 current->comm, task_pid_nr(current), 780 778 host->mrq->cmd->opcode, host->last_req_ts, jiffies); ··· 790 788 spin_unlock_irqrestore(&host->lock, flags); 791 789 792 790 /* 793 - * pdata->power == false only if COLD_CD is available, otherwise only 794 - * in short time intervals during probing or resuming 791 + * host->power toggles between false and true in both cases - either 792 + * or not the controller can be runtime-suspended during inactivity. 793 + * But if the controller has to be kept on, the runtime-pm usage_count 794 + * is kept positive, so no suspending actually takes place. 795 795 */ 796 796 if (ios->power_mode == MMC_POWER_ON && ios->clock) { 797 - if (!pdata->power) { 798 - pm_runtime_get_sync(&host->pdev->dev); 799 - pdata->power = true; 797 + if (!host->power) { 798 + pm_runtime_get_sync(dev); 799 + host->power = true; 800 800 } 801 801 tmio_mmc_set_clock(host, ios->clock); 802 802 /* power up SD bus */ ··· 809 805 } else if (ios->power_mode != MMC_POWER_UP) { 810 806 if (host->set_pwr && ios->power_mode == MMC_POWER_OFF) 811 807 host->set_pwr(host->pdev, 0); 812 - if (pdata->power) { 813 - pdata->power = false; 814 - pm_runtime_put(&host->pdev->dev); 808 + if (host->power) { 809 + host->power = false; 810 + pm_runtime_put(dev); 815 811 } 816 812 tmio_mmc_clk_stop(host); 817 813 } ··· 917 913 else 918 914 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 919 915 920 - pdata->power = false; 916 + _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || 917 + mmc->caps & MMC_CAP_NEEDS_POLL || 918 + mmc->caps & MMC_CAP_NONREMOVABLE); 919 + 920 + _host->power = false; 921 921 pm_runtime_enable(&pdev->dev); 922 922 ret = pm_runtime_resume(&pdev->dev); 923 923 if (ret < 0) ··· 934 926 * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL 935 927 * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE 936 928 * 937 - * While we increment the rtpm counter for all scenarios when the mmc 938 - * core activates us by calling an appropriate set_ios(), we must 929 + * While we increment the runtime PM counter for all scenarios when 930 + * the mmc core activates us by calling an appropriate set_ios(), we 931 + * must additionally ensure that in case 2) the tmio mmc hardware stays 939 932 * additionally ensure that in case 2) the tmio mmc hardware stays 940 933 * powered on during runtime for the card detection to work. 941 934 */ 942 - if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD 943 - || mmc->caps & MMC_CAP_NEEDS_POLL 944 - || mmc->caps & MMC_CAP_NONREMOVABLE)) 935 + if (_host->native_hotplug) 945 936 pm_runtime_get_noresume(&pdev->dev); 946 937 947 938 tmio_mmc_clk_stop(_host); ··· 970 963 irq_mask |= TMIO_MASK_READOP; 971 964 if (!_host->chan_tx) 972 965 irq_mask |= TMIO_MASK_WRITEOP; 966 + if (!_host->native_hotplug) 967 + irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); 973 968 974 969 tmio_mmc_enable_mmc_irqs(_host, irq_mask); 970 + 971 + if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { 972 + ret = mmc_cd_gpio_request(mmc, pdata->cd_gpio); 973 + if (ret < 0) { 974 + tmio_mmc_host_remove(_host); 975 + return ret; 976 + } 977 + } 975 978 976 979 *host = _host; 977 980 ··· 1000 983 void tmio_mmc_host_remove(struct tmio_mmc_host *host) 1001 984 { 1002 985 struct platform_device *pdev = host->pdev; 986 + struct tmio_mmc_data *pdata = host->pdata; 987 + struct mmc_host *mmc = host->mmc; 1003 988 1004 - /* 1005 - * We don't have to manipulate pdata->power here: if there is a card in 1006 - * the slot, the runtime PM is active and our .runtime_resume() will not 1007 - * be run. If there is no card in the slot and the platform can suspend 1008 - * the controller, the runtime PM is suspended and pdata->power == false, 1009 - * so, our .runtime_resume() will not try to detect a card in the slot. 1010 - */ 1011 - if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD 1012 - || host->mmc->caps & MMC_CAP_NEEDS_POLL 1013 - || host->mmc->caps & MMC_CAP_NONREMOVABLE) 989 + if (pdata->flags & TMIO_MMC_USE_GPIO_CD) 990 + /* 991 + * This means we can miss a card-eject, but this is anyway 992 + * possible, because of delayed processing of hotplug events. 993 + */ 994 + mmc_cd_gpio_free(mmc); 995 + 996 + if (!host->native_hotplug) 1014 997 pm_runtime_get_sync(&pdev->dev); 1015 998 1016 999 dev_pm_qos_hide_latency_limit(&pdev->dev); 1017 1000 1018 - mmc_remove_host(host->mmc); 1001 + mmc_remove_host(mmc); 1019 1002 cancel_work_sync(&host->done); 1020 1003 cancel_delayed_work_sync(&host->delayed_reset_work); 1021 1004 tmio_mmc_release_dma(host); ··· 1024 1007 pm_runtime_disable(&pdev->dev); 1025 1008 1026 1009 iounmap(host->ctl); 1027 - mmc_free_host(host->mmc); 1010 + mmc_free_host(mmc); 1028 1011 } 1029 1012 EXPORT_SYMBOL(tmio_mmc_host_remove); 1030 1013 ··· 1038 1021 if (!ret) 1039 1022 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); 1040 1023 1041 - host->pm_error = pm_runtime_put_sync(dev); 1042 - 1043 1024 return ret; 1044 1025 } 1045 1026 EXPORT_SYMBOL(tmio_mmc_host_suspend); ··· 1047 1032 struct mmc_host *mmc = dev_get_drvdata(dev); 1048 1033 struct tmio_mmc_host *host = mmc_priv(mmc); 1049 1034 1035 + tmio_mmc_reset(host); 1036 + tmio_mmc_enable_dma(host, true); 1037 + 1050 1038 /* The MMC core will perform the complete set up */ 1051 - host->pdata->power = false; 1052 - 1053 - host->pm_global = true; 1054 - if (!host->pm_error) 1055 - pm_runtime_get_sync(dev); 1056 - 1057 - if (host->pm_global) { 1058 - /* Runtime PM resume callback didn't run */ 1059 - tmio_mmc_reset(host); 1060 - tmio_mmc_enable_dma(host, true); 1061 - host->pm_global = false; 1062 - } 1063 - 1064 1039 return mmc_resume_host(mmc); 1065 1040 } 1066 1041 EXPORT_SYMBOL(tmio_mmc_host_resume); ··· 1067 1062 { 1068 1063 struct mmc_host *mmc = dev_get_drvdata(dev); 1069 1064 struct tmio_mmc_host *host = mmc_priv(mmc); 1070 - struct tmio_mmc_data *pdata = host->pdata; 1071 1065 1072 1066 tmio_mmc_reset(host); 1073 1067 tmio_mmc_enable_dma(host, true); 1074 - 1075 - if (pdata->power) { 1076 - /* Only entered after a card-insert interrupt */ 1077 - if (!mmc->card) 1078 - tmio_mmc_set_ios(mmc, &mmc->ios); 1079 - mmc_detect_change(mmc, msecs_to_jiffies(100)); 1080 - } 1081 - host->pm_global = false; 1082 1068 1083 1069 return 0; 1084 1070 }
+19 -7
include/linux/mfd/tmio.h
··· 1 1 #ifndef MFD_TMIO_H 2 2 #define MFD_TMIO_H 3 3 4 + #include <linux/device.h> 4 5 #include <linux/fb.h> 5 6 #include <linux/io.h> 7 + #include <linux/jiffies.h> 6 8 #include <linux/platform_device.h> 7 9 #include <linux/pm_runtime.h> 8 10 ··· 66 64 #define TMIO_MMC_SDIO_IRQ (1 << 2) 67 65 /* 68 66 * Some platforms can detect card insertion events with controller powered 69 - * down, in which case they have to call tmio_mmc_cd_wakeup() to power up the 70 - * controller and report the event to the driver. 67 + * down, using a GPIO IRQ, in which case they have to fill in cd_irq, cd_gpio, 68 + * and cd_flags fields of struct tmio_mmc_data. 71 69 */ 72 70 #define TMIO_MMC_HAS_COLD_CD (1 << 3) 73 71 /* ··· 75 73 * idle before writing to some registers. 76 74 */ 77 75 #define TMIO_MMC_HAS_IDLE_WAIT (1 << 4) 76 + /* 77 + * A GPIO is used for card hotplug detection. We need an extra flag for this, 78 + * because 0 is a valid GPIO number too, and requiring users to specify 79 + * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility. 80 + */ 81 + #define TMIO_MMC_USE_GPIO_CD (1 << 5) 78 82 79 83 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 80 84 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); ··· 105 97 u32 ocr_mask; /* available voltages */ 106 98 struct tmio_mmc_dma *dma; 107 99 struct device *dev; 108 - bool power; 100 + unsigned int cd_gpio; 109 101 void (*set_pwr)(struct platform_device *host, int state); 110 102 void (*set_clk_div)(struct platform_device *host, int state); 111 103 int (*get_cd)(struct platform_device *host); 112 104 int (*write16_hook)(struct tmio_mmc_host *host, int addr); 113 105 }; 114 106 107 + /* 108 + * This function is deprecated and will be removed soon. Please, convert your 109 + * platform to use drivers/mmc/core/cd-gpio.c 110 + */ 111 + #include <linux/mmc/host.h> 115 112 static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata) 116 113 { 117 - if (pdata && !pdata->power) { 118 - pdata->power = true; 119 - pm_runtime_get(pdata->dev); 120 - } 114 + if (pdata) 115 + mmc_detect_change(dev_get_drvdata(pdata->dev), 116 + msecs_to_jiffies(100)); 121 117 } 122 118 123 119 /*
+2
include/linux/mmc/card.h
··· 72 72 bool hpi_en; /* HPI enablebit */ 73 73 bool hpi; /* HPI support bit */ 74 74 unsigned int hpi_cmd; /* cmd used as HPI */ 75 + unsigned int data_sector_size; /* 512 bytes or 4KB */ 76 + unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ 75 77 unsigned int boot_ro_lock; /* ro lock support */ 76 78 bool boot_ro_lockable; 77 79 u8 raw_partition_support; /* 160 */
+1 -2
include/linux/mmc/cd-gpio.h
··· 12 12 #define MMC_CD_GPIO_H 13 13 14 14 struct mmc_host; 15 - int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio, 16 - unsigned int irq, unsigned long flags); 15 + int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio); 17 16 void mmc_cd_gpio_free(struct mmc_host *host); 18 17 19 18 #endif
-1
include/linux/mmc/core.h
··· 175 175 176 176 extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); 177 177 extern void mmc_release_host(struct mmc_host *host); 178 - extern void mmc_do_release_host(struct mmc_host *host); 179 178 extern int mmc_try_claim_host(struct mmc_host *host); 180 179 181 180 extern int mmc_flush_cache(struct mmc_card *);
+6 -2
include/linux/mmc/dw_mmc.h
··· 76 76 * @num_slots: Number of slots available. 77 77 * @verid: Denote Version ID. 78 78 * @data_offset: Set the offset of DATA register according to VERID. 79 - * @pdev: Platform device associated with the MMC controller. 79 + * @dev: Device associated with the MMC controller. 80 80 * @pdata: Platform data associated with the MMC controller. 81 81 * @slot: Slots sharing this MMC controller. 82 82 * @fifo_depth: depth of FIFO. ··· 87 87 * @push_data: Pointer to FIFO push function. 88 88 * @pull_data: Pointer to FIFO pull function. 89 89 * @quirks: Set of quirks that apply to specific versions of the IP. 90 + * @irq_flags: The flags to be passed to request_irq. 91 + * @irq: The irq value to be passed to request_irq. 90 92 * 91 93 * Locking 92 94 * ======= ··· 155 153 u32 fifoth_val; 156 154 u16 verid; 157 155 u16 data_offset; 158 - struct platform_device *pdev; 156 + struct device dev; 159 157 struct dw_mci_board *pdata; 160 158 struct dw_mci_slot *slot[MAX_MCI_SLOTS]; 161 159 ··· 176 174 u32 quirks; 177 175 178 176 struct regulator *vmmc; /* Power regulator */ 177 + unsigned long irq_flags; /* IRQ flags */ 178 + unsigned int irq; 179 179 }; 180 180 181 181 /* DMA ops for Internal/External DMAC interface */
+6 -42
include/linux/mmc/host.h
··· 81 81 82 82 struct mmc_host_ops { 83 83 /* 84 - * Hosts that support power saving can use the 'enable' and 'disable' 85 - * methods to exit and enter power saving states. 'enable' is called 86 - * when the host is claimed and 'disable' is called (or scheduled with 87 - * a delay) when the host is released. The 'disable' is scheduled if 88 - * the disable delay set by 'mmc_set_disable_delay()' is non-zero, 89 - * otherwise 'disable' is called immediately. 'disable' may be 90 - * scheduled repeatedly, to permit ever greater power saving at the 91 - * expense of ever greater latency to re-enable. Rescheduling is 92 - * determined by the return value of the 'disable' method. A positive 93 - * value gives the delay in milliseconds. 94 - * 95 - * In the case where a host function (like set_ios) may be called 96 - * with or without the host claimed, enabling and disabling can be 97 - * done directly and will nest correctly. Call 'mmc_host_enable()' and 98 - * 'mmc_host_lazy_disable()' for this purpose, but note that these 99 - * functions must be paired. 100 - * 101 - * Alternatively, 'mmc_host_enable()' may be paired with 102 - * 'mmc_host_disable()' which calls 'disable' immediately. In this 103 - * case the 'disable' method will be called with 'lazy' set to 0. 104 - * This is mainly useful for error paths. 105 - * 106 - * Because lazy disable may be called from a work queue, the 'disable' 107 - * method must claim the host when 'lazy' != 0, which will work 108 - * correctly because recursion is detected and handled. 84 + * 'enable' is called when the host is claimed and 'disable' is called 85 + * when the host is released. 'enable' and 'disable' are deprecated. 109 86 */ 110 87 int (*enable)(struct mmc_host *host); 111 - int (*disable)(struct mmc_host *host, int lazy); 88 + int (*disable)(struct mmc_host *host); 112 89 /* 113 90 * It is optional for the host to implement pre_req and post_req in 114 91 * order to support double buffering of requests (prepare one ··· 196 219 #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ 197 220 #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ 198 221 #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ 199 - #define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ 222 + 200 223 #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ 201 224 #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ 202 225 #define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ ··· 236 259 #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ 237 260 MMC_CAP2_HS200_1_2V_SDR) 238 261 #define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */ 262 + #define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */ 263 + #define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */ 239 264 240 265 mmc_pm_flag_t pm_caps; /* supported pm features */ 241 266 unsigned int power_notify_type; ··· 280 301 unsigned int removed:1; /* host is being removed */ 281 302 #endif 282 303 283 - /* Only used with MMC_CAP_DISABLE */ 284 - int enabled; /* host is enabled */ 285 304 int rescan_disable; /* disable card detection */ 286 - int nesting_cnt; /* "enable" nesting count */ 287 - int en_dis_recurs; /* detect recursion */ 288 - unsigned int disable_delay; /* disable delay in msecs */ 289 - struct delayed_work disable; /* disabling work */ 290 305 291 306 struct mmc_card *card; /* device attached to this host */ 292 307 ··· 380 407 int mmc_card_sleep(struct mmc_host *host); 381 408 int mmc_card_can_sleep(struct mmc_host *host); 382 409 383 - int mmc_host_enable(struct mmc_host *host); 384 - int mmc_host_disable(struct mmc_host *host); 385 - int mmc_host_lazy_disable(struct mmc_host *host); 386 410 int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); 387 - 388 - static inline void mmc_set_disable_delay(struct mmc_host *host, 389 - unsigned int disable_delay) 390 - { 391 - host->disable_delay = disable_delay; 392 - } 393 411 394 412 /* Module parameter */ 395 413 extern bool mmc_assume_removable;
+3
include/linux/mmc/mmc.h
··· 274 274 #define EXT_CSD_FLUSH_CACHE 32 /* W */ 275 275 #define EXT_CSD_CACHE_CTRL 33 /* R/W */ 276 276 #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ 277 + #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ 277 278 #define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ 278 279 #define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ 279 280 #define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ ··· 316 315 #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ 317 316 #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ 318 317 #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ 318 + #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ 319 + #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ 319 320 #define EXT_CSD_HPI_FEATURES 503 /* RO */ 320 321 321 322 /*
+2
include/linux/mmc/sdhci.h
··· 90 90 91 91 unsigned int quirks2; /* More deviations from spec. */ 92 92 93 + #define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0) 94 + 93 95 int irq; /* Device IRQ */ 94 96 void __iomem *ioaddr; /* Mapped address */ 95 97
+9 -12
include/linux/mmc/sh_mmcif.h
··· 77 77 78 78 /* CE_CLK_CTRL */ 79 79 #define CLK_ENABLE (1 << 24) /* 1: output mmc clock */ 80 - #define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) 81 - #define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) 82 - #define CLKDIV_4 (1<<16) /* mmc clock frequency. 83 - * n: bus clock/(2^(n+1)) */ 84 - #define CLKDIV_256 (7<<16) /* mmc clock frequency. (see above) */ 85 - #define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */ 86 - #define SRBSYTO_29 ((1 << 11) | (1 << 10) | \ 87 - (1 << 9) | (1 << 8)) /* resp busy timeout */ 88 - #define SRWDTO_29 ((1 << 7) | (1 << 6) | \ 89 - (1 << 5) | (1 << 4)) /* read/write timeout */ 90 - #define SCCSTO_29 ((1 << 3) | (1 << 2) | \ 91 - (1 << 1) | (1 << 0)) /* ccs timeout */ 80 + #define CLK_CLEAR (0xf << 16) 81 + #define CLK_SUP_PCLK (0xf << 16) 82 + #define CLKDIV_4 (1 << 16) /* mmc clock frequency. 83 + * n: bus clock/(2^(n+1)) */ 84 + #define CLKDIV_256 (7 << 16) /* mmc clock frequency. (see above) */ 85 + #define SRSPTO_256 (2 << 12) /* resp timeout */ 86 + #define SRBSYTO_29 (0xf << 8) /* resp busy timeout */ 87 + #define SRWDTO_29 (0xf << 4) /* read/write timeout */ 88 + #define SCCSTO_29 (0xf << 0) /* ccs timeout */ 92 89 93 90 /* CE_VERSION */ 94 91 #define SOFT_RST_ON (1 << 31)
+14
include/linux/mmc/sh_mobile_sdhi.h
··· 10 10 #define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" 11 11 #define SH_MOBILE_SDHI_IRQ_SDIO "sdio" 12 12 13 + /** 14 + * struct sh_mobile_sdhi_ops - SDHI driver callbacks 15 + * @cd_wakeup: trigger a card-detection run 16 + */ 17 + struct sh_mobile_sdhi_ops { 18 + void (*cd_wakeup)(const struct platform_device *pdev); 19 + }; 20 + 13 21 struct sh_mobile_sdhi_info { 14 22 int dma_slave_tx; 15 23 int dma_slave_rx; 16 24 unsigned long tmio_flags; 17 25 unsigned long tmio_caps; 18 26 u32 tmio_ocr_mask; /* available MMC voltages */ 27 + unsigned int cd_gpio; 19 28 struct tmio_mmc_data *pdata; 20 29 void (*set_pwr)(struct platform_device *pdev, int state); 21 30 int (*get_cd)(struct platform_device *pdev); 31 + 32 + /* callbacks for board specific setup code */ 33 + int (*init)(struct platform_device *pdev, 34 + const struct sh_mobile_sdhi_ops *ops); 35 + void (*cleanup)(struct platform_device *pdev); 22 36 }; 23 37 24 38 #endif /* LINUX_MMC_SH_MOBILE_SDHI_H */