Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (83 commits)
mmc: fix compile error when CONFIG_BLOCK is not enabled
mmc: core: Cleanup eMMC4.5 conditionals
mmc: omap_hsmmc: if multiblock reads are broken, disable them
mmc: core: add workaround for controllers with broken multiblock reads
mmc: core: Prevent too long response times for suspend
mmc: recognise SDIO cards with SDIO_CCCR_REV 3.00
mmc: sd: Handle SD3.0 cards not supporting UHS-I bus speed mode
mmc: core: support HPI send command
mmc: core: Add cache control for eMMC4.5 device
mmc: core: Modify the timeout value for writing power class
mmc: core: new discard feature support at eMMC v4.5
mmc: core: mmc sanitize feature support for v4.5
mmc: dw_mmc: modify DATA register offset
mmc: sdhci-pci: add flag for devices that can support runtime PM
mmc: omap_hsmmc: ensure pbias configuration is always done
mmc: core: Add Power Off Notify Feature eMMC 4.5
mmc: sdhci-s3c: fix potential NULL dereference
mmc: replace printk with appropriate display macro
mmc: core: Add default timeout value for CMD6
mmc: sdhci-pci: add runtime pm support
...

+3151 -1067
+27
Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
··· 1 + * NVIDIA Tegra Secure Digital Host Controller 2 + 3 + This controller on Tegra family SoCs provides an interface for MMC, SD, 4 + and SDIO types of memory cards. 5 + 6 + Required properties: 7 + - compatible : Should be "nvidia,<chip>-sdhci" 8 + - reg : Should contain SD/MMC registers location and length 9 + - interrupts : Should contain SD/MMC interrupt 10 + 11 + Optional properties: 12 + - cd-gpios : Specify GPIOs for card detection 13 + - wp-gpios : Specify GPIOs for write protection 14 + - power-gpios : Specify GPIOs for power control 15 + - support-8bit : Boolean, indicates if 8-bit mode should be used. 16 + 17 + Example: 18 + 19 + sdhci@c8000200 { 20 + compatible = "nvidia,tegra20-sdhci"; 21 + reg = <0xc8000200 0x200>; 22 + interrupts = <47>; 23 + cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 24 + wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 25 + power-gpios = <&gpio 155 0>; /* gpio PT3 */ 26 + support-8bit; 27 + };
+7 -1
Documentation/fault-injection/fault-injection.txt
··· 21 21 /sys/block/<device>/make-it-fail or 22 22 /sys/block/<device>/<partition>/make-it-fail. (generic_make_request()) 23 23 24 + o fail_mmc_request 25 + 26 + injects MMC data errors on devices permitted by setting 27 + debugfs entries under /sys/kernel/debug/mmc0/fail_mmc_request 28 + 24 29 Configure fault-injection capabilities behavior 25 30 ----------------------------------------------- 26 31 ··· 120 115 121 116 failslab= 122 117 fail_page_alloc= 123 - fail_make_request=<interval>,<probability>,<space>,<times> 118 + fail_make_request= 119 + mmc_core.fail_request=<interval>,<probability>,<space>,<times> 124 120 125 121 How to add new fault injection capability 126 122 -----------------------------------------
+1 -1
arch/arm/mach-at91/at91sam9260_devices.c
··· 319 319 if (!data) 320 320 return; 321 321 322 - for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { 322 + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 323 323 if (data->slot[i].bus_width) { 324 324 /* input/irq */ 325 325 if (data->slot[i].detect_pin) {
-24
arch/arm/mach-msm/devices-msm7x00.c
··· 176 176 .name = "cmd_irq", 177 177 }, 178 178 { 179 - .start = INT_SDC1_1, 180 - .end = INT_SDC1_1, 181 - .flags = IORESOURCE_IRQ, 182 - .name = "pio_irq", 183 - }, 184 - { 185 179 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, 186 180 .name = "status_irq" 187 181 }, ··· 197 203 .end = INT_SDC2_0, 198 204 .flags = IORESOURCE_IRQ, 199 205 .name = "cmd_irq", 200 - }, 201 - { 202 - .start = INT_SDC2_1, 203 - .end = INT_SDC2_1, 204 - .flags = IORESOURCE_IRQ, 205 - .name = "pio_irq", 206 206 }, 207 207 { 208 208 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, ··· 221 233 .flags = IORESOURCE_IRQ, 222 234 .name = "cmd_irq", 223 235 }, 224 - { 225 - .start = INT_SDC3_1, 226 - .end = INT_SDC3_1, 227 - .flags = IORESOURCE_IRQ, 228 - .name = "pio_irq", 229 - }, 230 236 { 231 237 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, 232 238 .name = "status_irq" ··· 243 261 .end = INT_SDC4_0, 244 262 .flags = IORESOURCE_IRQ, 245 263 .name = "cmd_irq", 246 - }, 247 - { 248 - .start = INT_SDC4_1, 249 - .end = INT_SDC4_1, 250 - .flags = IORESOURCE_IRQ, 251 - .name = "pio_irq", 252 264 }, 253 265 { 254 266 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
-24
arch/arm/mach-msm/devices-qsd8x50.c
··· 140 140 .name = "cmd_irq", 141 141 }, 142 142 { 143 - .start = INT_SDC1_1, 144 - .end = INT_SDC1_1, 145 - .flags = IORESOURCE_IRQ, 146 - .name = "pio_irq", 147 - }, 148 - { 149 143 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, 150 144 .name = "status_irq" 151 145 }, ··· 161 167 .end = INT_SDC2_0, 162 168 .flags = IORESOURCE_IRQ, 163 169 .name = "cmd_irq", 164 - }, 165 - { 166 - .start = INT_SDC2_1, 167 - .end = INT_SDC2_1, 168 - .flags = IORESOURCE_IRQ, 169 - .name = "pio_irq", 170 170 }, 171 171 { 172 172 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, ··· 185 197 .flags = IORESOURCE_IRQ, 186 198 .name = "cmd_irq", 187 199 }, 188 - { 189 - .start = INT_SDC3_1, 190 - .end = INT_SDC3_1, 191 - .flags = IORESOURCE_IRQ, 192 - .name = "pio_irq", 193 - }, 194 200 { 195 201 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, 196 202 .name = "status_irq" ··· 207 225 .end = INT_SDC4_0, 208 226 .flags = IORESOURCE_IRQ, 209 227 .name = "cmd_irq", 210 - }, 211 - { 212 - .start = INT_SDC4_1, 213 - .end = INT_SDC4_1, 214 - .flags = IORESOURCE_IRQ, 215 - .name = "pio_irq", 216 228 }, 217 229 { 218 230 .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+1 -8
arch/arm/mach-msm/include/mach/mmc.h
··· 8 8 #include <linux/mmc/card.h> 9 9 #include <linux/mmc/sdio_func.h> 10 10 11 - struct embedded_sdio_data { 12 - struct sdio_cis cis; 13 - struct sdio_cccr cccr; 14 - struct sdio_embedded_func *funcs; 15 - int num_funcs; 16 - }; 17 - 18 11 struct msm_mmc_gpio { 19 12 unsigned no; 20 13 const char *name; ··· 22 29 unsigned int ocr_mask; /* available voltages */ 23 30 u32 (*translate_vdd)(struct device *, unsigned int); 24 31 unsigned int (*status)(struct device *); 25 - struct embedded_sdio_data *embedded_sdio; 26 32 int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id); 27 33 struct msm_mmc_gpio_data *gpio_data; 34 + void (*init_card)(struct mmc_card *card); 28 35 }; 29 36 30 37 #endif
+6
arch/arm/mach-shmobile/board-ag5evm.c
··· 355 355 .flags = IORESOURCE_MEM, 356 356 }, 357 357 [1] = { 358 + .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, 358 359 .start = gic_spi(83), 359 360 .flags = IORESOURCE_IRQ, 360 361 }, 361 362 [2] = { 363 + .name = SH_MOBILE_SDHI_IRQ_SDCARD, 362 364 .start = gic_spi(84), 363 365 .flags = IORESOURCE_IRQ, 364 366 }, 365 367 [3] = { 368 + .name = SH_MOBILE_SDHI_IRQ_SDIO, 366 369 .start = gic_spi(85), 367 370 .flags = IORESOURCE_IRQ, 368 371 }, ··· 401 398 .flags = IORESOURCE_MEM, 402 399 }, 403 400 [1] = { 401 + .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, 404 402 .start = gic_spi(87), 405 403 .flags = IORESOURCE_IRQ, 406 404 }, 407 405 [2] = { 406 + .name = SH_MOBILE_SDHI_IRQ_SDCARD, 408 407 .start = gic_spi(88), 409 408 .flags = IORESOURCE_IRQ, 410 409 }, 411 410 [3] = { 411 + .name = SH_MOBILE_SDHI_IRQ_SDIO, 412 412 .start = gic_spi(89), 413 413 .flags = IORESOURCE_IRQ, 414 414 },
+6
arch/arm/mach-shmobile/board-mackerel.c
··· 1072 1072 .flags = IORESOURCE_MEM, 1073 1073 }, 1074 1074 [1] = { 1075 + .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, 1075 1076 .start = evt2irq(0x0e80), /* SDHI1_SDHI1I0 */ 1076 1077 .flags = IORESOURCE_IRQ, 1077 1078 }, 1078 1079 [2] = { 1080 + .name = SH_MOBILE_SDHI_IRQ_SDCARD, 1079 1081 .start = evt2irq(0x0ea0), /* SDHI1_SDHI1I1 */ 1080 1082 .flags = IORESOURCE_IRQ, 1081 1083 }, 1082 1084 [3] = { 1085 + .name = SH_MOBILE_SDHI_IRQ_SDIO, 1083 1086 .start = evt2irq(0x0ec0), /* SDHI1_SDHI1I2 */ 1084 1087 .flags = IORESOURCE_IRQ, 1085 1088 }, ··· 1126 1123 .flags = IORESOURCE_MEM, 1127 1124 }, 1128 1125 [1] = { 1126 + .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, 1129 1127 .start = evt2irq(0x1200), /* SDHI2_SDHI2I0 */ 1130 1128 .flags = IORESOURCE_IRQ, 1131 1129 }, 1132 1130 [2] = { 1131 + .name = SH_MOBILE_SDHI_IRQ_SDCARD, 1133 1132 .start = evt2irq(0x1220), /* SDHI2_SDHI2I1 */ 1134 1133 .flags = IORESOURCE_IRQ, 1135 1134 }, 1136 1135 [3] = { 1136 + .name = SH_MOBILE_SDHI_IRQ_SDIO, 1137 1137 .start = evt2irq(0x1240), /* SDHI2_SDHI2I2 */ 1138 1138 .flags = IORESOURCE_IRQ, 1139 1139 },
+18 -1
arch/arm/plat-omap/include/plat/mmc.h
··· 31 31 32 32 #define OMAP_MMC_MAX_SLOTS 2 33 33 34 - #define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(1) 34 + /* 35 + * struct omap_mmc_dev_attr.flags possibilities 36 + * 37 + * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can 38 + * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag 39 + * should be set if this is the case. See for example Section 22.5.3 40 + * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia 41 + * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). 42 + * 43 + * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers 44 + * don't work correctly on some MMC controller instances on some 45 + * OMAP3 SoCs; this flag should be set if this is the case. See 46 + * for example Advisory 2.1.1.128 "MMC: Multiple Block Read 47 + * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ 48 + * Revision F (October 2010) (SPRZ278F). 49 + */ 50 + #define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) 51 + #define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) 35 52 36 53 struct omap_mmc_dev_attr { 37 54 u8 flags;
+212 -98
drivers/mmc/card/block.c
··· 94 94 unsigned int read_only; 95 95 unsigned int part_type; 96 96 unsigned int name_idx; 97 + unsigned int reset_done; 98 + #define MMC_BLK_READ BIT(0) 99 + #define MMC_BLK_WRITE BIT(1) 100 + #define MMC_BLK_DISCARD BIT(2) 101 + #define MMC_BLK_SECDISCARD BIT(3) 97 102 98 103 /* 99 104 * Only set in main mmc_blk_data associated ··· 114 109 enum mmc_blk_status { 115 110 MMC_BLK_SUCCESS = 0, 116 111 MMC_BLK_PARTIAL, 117 - MMC_BLK_RETRY, 118 - MMC_BLK_RETRY_SINGLE, 119 - MMC_BLK_DATA_ERR, 120 112 MMC_BLK_CMD_ERR, 113 + MMC_BLK_RETRY, 121 114 MMC_BLK_ABORT, 115 + MMC_BLK_DATA_ERR, 116 + MMC_BLK_ECC_ERR, 122 117 }; 123 118 124 119 module_param(perdev_minors, int, 0444); ··· 296 291 struct mmc_card *card; 297 292 struct mmc_command cmd = {0}; 298 293 struct mmc_data data = {0}; 299 - struct mmc_request mrq = {0}; 294 + struct mmc_request mrq = {NULL}; 300 295 struct scatterlist sg; 301 296 int err; 302 297 ··· 447 442 { 448 443 int ret; 449 444 struct mmc_blk_data *main_md = mmc_get_drvdata(card); 445 + 450 446 if (main_md->part_curr == md->part_type) 451 447 return 0; 452 448 453 449 if (mmc_card_mmc(card)) { 454 - card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 455 - card->ext_csd.part_config |= md->part_type; 450 + u8 part_config = card->ext_csd.part_config; 451 + 452 + part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 453 + part_config |= md->part_type; 456 454 457 455 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 458 - EXT_CSD_PART_CONFIG, card->ext_csd.part_config, 456 + EXT_CSD_PART_CONFIG, part_config, 459 457 card->ext_csd.part_time); 460 458 if (ret) 461 459 return ret; 462 - } 460 + 461 + card->ext_csd.part_config = part_config; 462 + } 463 463 464 464 main_md->part_curr = md->part_type; 465 465 return 0; ··· 476 466 u32 result; 477 467 __be32 *blocks; 478 468 479 - struct mmc_request mrq = {0}; 469 + struct mmc_request mrq = {NULL}; 480 470 struct mmc_command cmd = {0}; 481 471 struct mmc_data data = {0}; 482 472 unsigned int timeout_us; ··· 626 616 * Otherwise we don't understand what happened, so abort. 627 617 */ 628 618 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 629 - struct mmc_blk_request *brq) 619 + struct mmc_blk_request *brq, int *ecc_err) 630 620 { 631 621 bool prev_cmd_status_valid = true; 632 622 u32 status, stop_status = 0; ··· 651 641 if (err) 652 642 return ERR_ABORT; 653 643 644 + /* Flag ECC errors */ 645 + if ((status & R1_CARD_ECC_FAILED) || 646 + (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || 647 + (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) 648 + *ecc_err = 1; 649 + 654 650 /* 655 651 * Check the current card state. If it is in some data transfer 656 652 * mode, tell it to stop (and hopefully transition back to TRAN.) ··· 674 658 */ 675 659 if (err) 676 660 return ERR_ABORT; 661 + if (stop_status & R1_CARD_ECC_FAILED) 662 + *ecc_err = 1; 677 663 } 678 664 679 665 /* Check for set block count errors */ ··· 687 669 if (brq->cmd.error) 688 670 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 689 671 prev_cmd_status_valid, status); 672 + 673 + /* Data errors */ 674 + if (!brq->stop.error) 675 + return ERR_CONTINUE; 690 676 691 677 /* Now for stop errors. These aren't fatal to the transfer. */ 692 678 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", ··· 708 686 return ERR_CONTINUE; 709 687 } 710 688 689 + static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 690 + int type) 691 + { 692 + int err; 693 + 694 + if (md->reset_done & type) 695 + return -EEXIST; 696 + 697 + md->reset_done |= type; 698 + err = mmc_hw_reset(host); 699 + /* Ensure we switch back to the correct partition */ 700 + if (err != -EOPNOTSUPP) { 701 + struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); 702 + int part_err; 703 + 704 + main_md->part_curr = main_md->part_type; 705 + part_err = mmc_blk_part_switch(host->card, md); 706 + if (part_err) { 707 + /* 708 + * We have failed to get back into the correct 709 + * partition, so we need to abort the whole request. 710 + */ 711 + return -ENODEV; 712 + } 713 + } 714 + return err; 715 + } 716 + 717 + static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 718 + { 719 + md->reset_done &= ~type; 720 + } 721 + 711 722 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 712 723 { 713 724 struct mmc_blk_data *md = mq->data; 714 725 struct mmc_card *card = md->queue.card; 715 726 unsigned int from, nr, arg; 716 - int err = 0; 727 + int err = 0, type = MMC_BLK_DISCARD; 717 728 718 729 if (!mmc_can_erase(card)) { 719 730 err = -EOPNOTSUPP; ··· 756 701 from = blk_rq_pos(req); 757 702 nr = blk_rq_sectors(req); 758 703 759 - if (mmc_can_trim(card)) 704 + if (mmc_can_discard(card)) 705 + arg = MMC_DISCARD_ARG; 706 + else if (mmc_can_trim(card)) 760 707 arg = MMC_TRIM_ARG; 761 708 else 762 709 arg = MMC_ERASE_ARG; 763 - 710 + retry: 764 711 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 765 712 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 766 713 INAND_CMD38_ARG_EXT_CSD, ··· 775 718 } 776 719 err = mmc_erase(card, from, nr, arg); 777 720 out: 721 + if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 722 + goto retry; 723 + if (!err) 724 + mmc_blk_reset_success(md, type); 778 725 spin_lock_irq(&md->lock); 779 726 __blk_end_request(req, err, blk_rq_bytes(req)); 780 727 spin_unlock_irq(&md->lock); ··· 792 731 struct mmc_blk_data *md = mq->data; 793 732 struct mmc_card *card = md->queue.card; 794 733 unsigned int from, nr, arg; 795 - int err = 0; 734 + int err = 0, type = MMC_BLK_SECDISCARD; 796 735 797 - if (!mmc_can_secure_erase_trim(card)) { 736 + if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { 798 737 err = -EOPNOTSUPP; 738 + goto out; 739 + } 740 + 741 + /* The sanitize operation is supported at v4.5 only */ 742 + if (mmc_can_sanitize(card)) { 743 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 744 + EXT_CSD_SANITIZE_START, 1, 0); 799 745 goto out; 800 746 } 801 747 ··· 813 745 arg = MMC_SECURE_TRIM1_ARG; 814 746 else 815 747 arg = MMC_SECURE_ERASE_ARG; 816 - 748 + retry: 817 749 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 818 750 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 819 751 INAND_CMD38_ARG_EXT_CSD, ··· 837 769 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 838 770 } 839 771 out: 772 + if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 773 + goto retry; 774 + if (!err) 775 + mmc_blk_reset_success(md, type); 840 776 spin_lock_irq(&md->lock); 841 777 __blk_end_request(req, err, blk_rq_bytes(req)); 842 778 spin_unlock_irq(&md->lock); ··· 851 779 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 852 780 { 853 781 struct mmc_blk_data *md = mq->data; 782 + struct mmc_card *card = md->queue.card; 783 + int ret = 0; 854 784 855 - /* 856 - * No-op, only service this because we need REQ_FUA for reliable 857 - * writes. 858 - */ 785 + ret = mmc_flush_cache(card); 786 + if (ret) 787 + ret = -EIO; 788 + 859 789 spin_lock_irq(&md->lock); 860 - __blk_end_request_all(req, 0); 790 + __blk_end_request_all(req, ret); 861 791 spin_unlock_irq(&md->lock); 862 792 863 - return 1; 793 + return ret ? 0 : 1; 864 794 } 865 795 866 796 /* ··· 899 825 static int mmc_blk_err_check(struct mmc_card *card, 900 826 struct mmc_async_req *areq) 901 827 { 902 - enum mmc_blk_status ret = MMC_BLK_SUCCESS; 903 828 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 904 829 mmc_active); 905 830 struct mmc_blk_request *brq = &mq_mrq->brq; 906 831 struct request *req = mq_mrq->req; 832 + int ecc_err = 0; 907 833 908 834 /* 909 835 * sbc.error indicates a problem with the set block count ··· 915 841 * stop.error indicates a problem with the stop command. Data 916 842 * may have been transferred, or may still be transferring. 917 843 */ 918 - if (brq->sbc.error || brq->cmd.error || brq->stop.error) { 919 - switch (mmc_blk_cmd_recovery(card, req, brq)) { 844 + if (brq->sbc.error || brq->cmd.error || brq->stop.error || 845 + brq->data.error) { 846 + switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { 920 847 case ERR_RETRY: 921 848 return MMC_BLK_RETRY; 922 849 case ERR_ABORT: ··· 948 873 do { 949 874 int err = get_card_status(card, &status, 5); 950 875 if (err) { 951 - printk(KERN_ERR "%s: error %d requesting status\n", 876 + pr_err("%s: error %d requesting status\n", 952 877 req->rq_disk->disk_name, err); 953 878 return MMC_BLK_CMD_ERR; 954 879 } ··· 969 894 brq->cmd.resp[0], brq->stop.resp[0]); 970 895 971 896 if (rq_data_dir(req) == READ) { 972 - if (brq->data.blocks > 1) { 973 - /* Redo read one sector at a time */ 974 - pr_warning("%s: retrying using single block read\n", 975 - req->rq_disk->disk_name); 976 - return MMC_BLK_RETRY_SINGLE; 977 - } 897 + if (ecc_err) 898 + return MMC_BLK_ECC_ERR; 978 899 return MMC_BLK_DATA_ERR; 979 900 } else { 980 901 return MMC_BLK_CMD_ERR; 981 902 } 982 903 } 983 904 984 - if (ret == MMC_BLK_SUCCESS && 985 - blk_rq_bytes(req) != brq->data.bytes_xfered) 986 - ret = MMC_BLK_PARTIAL; 905 + if (!brq->data.bytes_xfered) 906 + return MMC_BLK_RETRY; 987 907 988 - return ret; 908 + if (blk_rq_bytes(req) != brq->data.bytes_xfered) 909 + return MMC_BLK_PARTIAL; 910 + 911 + return MMC_BLK_SUCCESS; 989 912 } 990 913 991 914 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, ··· 1030 957 if (brq->data.blocks > card->host->max_blk_count) 1031 958 brq->data.blocks = card->host->max_blk_count; 1032 959 1033 - /* 1034 - * After a read error, we redo the request one sector at a time 1035 - * in order to accurately determine which sectors can be read 1036 - * successfully. 1037 - */ 1038 - if (disable_multi && brq->data.blocks > 1) 1039 - brq->data.blocks = 1; 960 + if (brq->data.blocks > 1) { 961 + /* 962 + * After a read error, we redo the request one sector 963 + * at a time in order to accurately determine which 964 + * sectors can be read successfully. 965 + */ 966 + if (disable_multi) 967 + brq->data.blocks = 1; 968 + 969 + /* Some controllers can't do multiblock reads due to hw bugs */ 970 + if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && 971 + rq_data_dir(req) == READ) 972 + brq->data.blocks = 1; 973 + } 1040 974 1041 975 if (brq->data.blocks > 1 || do_rel_wr) { 1042 976 /* SPI multiblock writes terminate using a special ··· 1129 1049 mmc_queue_bounce_pre(mqrq); 1130 1050 } 1131 1051 1052 + static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1053 + struct mmc_blk_request *brq, struct request *req, 1054 + int ret) 1055 + { 1056 + /* 1057 + * If this is an SD card and we're writing, we can first 1058 + * mark the known good sectors as ok. 1059 + * 1060 + * If the card is not SD, we can still ok written sectors 1061 + * as reported by the controller (which might be less than 1062 + * the real number of written sectors, but never more). 1063 + */ 1064 + if (mmc_card_sd(card)) { 1065 + u32 blocks; 1066 + 1067 + blocks = mmc_sd_num_wr_blocks(card); 1068 + if (blocks != (u32)-1) { 1069 + spin_lock_irq(&md->lock); 1070 + ret = __blk_end_request(req, 0, blocks << 9); 1071 + spin_unlock_irq(&md->lock); 1072 + } 1073 + } else { 1074 + spin_lock_irq(&md->lock); 1075 + ret = __blk_end_request(req, 0, brq->data.bytes_xfered); 1076 + spin_unlock_irq(&md->lock); 1077 + } 1078 + return ret; 1079 + } 1080 + 1132 1081 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1133 1082 { 1134 1083 struct mmc_blk_data *md = mq->data; 1135 1084 struct mmc_card *card = md->queue.card; 1136 1085 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; 1137 - int ret = 1, disable_multi = 0, retry = 0; 1086 + int ret = 1, disable_multi = 0, retry = 0, type; 1138 1087 enum mmc_blk_status status; 1139 1088 struct mmc_queue_req *mq_rq; 1140 1089 struct request *req; ··· 1185 1076 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1186 1077 brq = &mq_rq->brq; 1187 1078 req = mq_rq->req; 1079 + type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1188 1080 mmc_queue_bounce_post(mq_rq); 1189 1081 1190 1082 switch (status) { ··· 1194 1084 /* 1195 1085 * A block was successfully transferred. 1196 1086 */ 1087 + mmc_blk_reset_success(md, type); 1197 1088 spin_lock_irq(&md->lock); 1198 1089 ret = __blk_end_request(req, 0, 1199 1090 brq->data.bytes_xfered); 1200 1091 spin_unlock_irq(&md->lock); 1092 + /* 1093 + * If the blk_end_request function returns non-zero even 1094 + * though all data has been transferred and no errors 1095 + * were returned by the host controller, it's a bug. 1096 + */ 1201 1097 if (status == MMC_BLK_SUCCESS && ret) { 1202 - /* 1203 - * The blk_end_request has returned non zero 1204 - * even though all data is transfered and no 1205 - * erros returned by host. 1206 - * If this happen it's a bug. 1207 - */ 1208 - printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", 1098 + pr_err("%s BUG rq_tot %d d_xfer %d\n", 1209 1099 __func__, blk_rq_bytes(req), 1210 1100 brq->data.bytes_xfered); 1211 1101 rqc = NULL; ··· 1213 1103 } 1214 1104 break; 1215 1105 case MMC_BLK_CMD_ERR: 1216 - goto cmd_err; 1217 - case MMC_BLK_RETRY_SINGLE: 1218 - disable_multi = 1; 1219 - break; 1106 + ret = mmc_blk_cmd_err(md, card, brq, req, ret); 1107 + if (!mmc_blk_reset(md, card->host, type)) 1108 + break; 1109 + goto cmd_abort; 1220 1110 case MMC_BLK_RETRY: 1221 1111 if (retry++ < 5) 1222 1112 break; 1113 + /* Fall through */ 1223 1114 case MMC_BLK_ABORT: 1115 + if (!mmc_blk_reset(md, card->host, type)) 1116 + break; 1224 1117 goto cmd_abort; 1225 - case MMC_BLK_DATA_ERR: 1118 + case MMC_BLK_DATA_ERR: { 1119 + int err; 1120 + 1121 + err = mmc_blk_reset(md, card->host, type); 1122 + if (!err) 1123 + break; 1124 + if (err == -ENODEV) 1125 + goto cmd_abort; 1126 + /* Fall through */ 1127 + } 1128 + case MMC_BLK_ECC_ERR: 1129 + if (brq->data.blocks > 1) { 1130 + /* Redo read one sector at a time */ 1131 + pr_warning("%s: retrying using single block read\n", 1132 + req->rq_disk->disk_name); 1133 + disable_multi = 1; 1134 + break; 1135 + } 1226 1136 /* 1227 1137 * After an error, we redo I/O one sector at a 1228 1138 * time, so we only reach here after trying to ··· 1259 1129 1260 1130 if (ret) { 1261 1131 /* 1262 - * In case of a none complete request 1132 + * In case of a incomplete request 1263 1133 * prepare it again and resend. 1264 1134 */ 1265 1135 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); ··· 1268 1138 } while (ret); 1269 1139 1270 1140 return 1; 1271 - 1272 - cmd_err: 1273 - /* 1274 - * If this is an SD card and we're writing, we can first 1275 - * mark the known good sectors as ok. 1276 - * 1277 - * If the card is not SD, we can still ok written sectors 1278 - * as reported by the controller (which might be less than 1279 - * the real number of written sectors, but never more). 1280 - */ 1281 - if (mmc_card_sd(card)) { 1282 - u32 blocks; 1283 - 1284 - blocks = mmc_sd_num_wr_blocks(card); 1285 - if (blocks != (u32)-1) { 1286 - spin_lock_irq(&md->lock); 1287 - ret = __blk_end_request(req, 0, blocks << 9); 1288 - spin_unlock_irq(&md->lock); 1289 - } 1290 - } else { 1291 - spin_lock_irq(&md->lock); 1292 - ret = __blk_end_request(req, 0, brq->data.bytes_xfered); 1293 - spin_unlock_irq(&md->lock); 1294 - } 1295 1141 1296 1142 cmd_abort: 1297 1143 spin_lock_irq(&md->lock); ··· 1296 1190 1297 1191 ret = mmc_blk_part_switch(card, md); 1298 1192 if (ret) { 1193 + if (req) { 1194 + spin_lock_irq(&md->lock); 1195 + __blk_end_request_all(req, -EIO); 1196 + spin_unlock_irq(&md->lock); 1197 + } 1299 1198 ret = 0; 1300 1199 goto out; 1301 1200 } ··· 1485 1374 1486 1375 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, 1487 1376 cap_str, sizeof(cap_str)); 1488 - printk(KERN_INFO "%s: %s %s partition %u %s\n", 1377 + pr_info("%s: %s %s partition %u %s\n", 1489 1378 part_md->disk->disk_name, mmc_card_id(card), 1490 1379 mmc_card_name(card), part_md->part_type, cap_str); 1491 1380 return 0; 1492 1381 } 1493 1382 1383 + /* MMC Physical partitions consist of two boot partitions and 1384 + * up to four general purpose partitions. 1385 + * For each partition enabled in EXT_CSD a block device will be allocatedi 1386 + * to provide access to the partition. 1387 + */ 1388 + 1494 1389 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 1495 1390 { 1496 - int ret = 0; 1391 + int idx, ret = 0; 1497 1392 1498 1393 if (!mmc_card_mmc(card)) 1499 1394 return 0; 1500 1395 1501 - if (card->ext_csd.boot_size) { 1502 - ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0, 1503 - card->ext_csd.boot_size >> 9, 1504 - true, 1505 - "boot0"); 1506 - if (ret) 1507 - return ret; 1508 - ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1, 1509 - card->ext_csd.boot_size >> 9, 1510 - true, 1511 - "boot1"); 1512 - if (ret) 1513 - return ret; 1396 + for (idx = 0; idx < card->nr_parts; idx++) { 1397 + if (card->part[idx].size) { 1398 + ret = mmc_blk_alloc_part(card, md, 1399 + card->part[idx].part_cfg, 1400 + card->part[idx].size >> 9, 1401 + card->part[idx].force_ro, 1402 + card->part[idx].name); 1403 + if (ret) 1404 + return ret; 1405 + } 1514 1406 } 1515 1407 1516 1408 return ret; ··· 1529 1415 mmc_release_host(card->host); 1530 1416 1531 1417 if (err) { 1532 - printk(KERN_ERR "%s: unable to set block size to 512: %d\n", 1418 + pr_err("%s: unable to set block size to 512: %d\n", 1533 1419 md->disk->disk_name, err); 1534 1420 return -EINVAL; 1535 1421 } ··· 1631 1517 1632 1518 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, 1633 1519 cap_str, sizeof(cap_str)); 1634 - printk(KERN_INFO "%s: %s %s %s %s\n", 1520 + pr_info("%s: %s %s %s %s\n", 1635 1521 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1636 1522 cap_str, md->read_only ? "(ro)" : ""); 1637 1523
+48 -17
drivers/mmc/card/mmc_test.c
··· 22 22 #include <linux/debugfs.h> 23 23 #include <linux/uaccess.h> 24 24 #include <linux/seq_file.h> 25 + #include <linux/module.h> 25 26 26 27 #define RESULT_OK 0 27 28 #define RESULT_FAIL 1 ··· 251 250 if (!busy && mmc_test_busy(&cmd)) { 252 251 busy = 1; 253 252 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 254 - printk(KERN_INFO "%s: Warning: Host did not " 253 + pr_info("%s: Warning: Host did not " 255 254 "wait for busy state to end.\n", 256 255 mmc_hostname(test->card->host)); 257 256 } ··· 553 552 rate = mmc_test_rate(bytes, &ts); 554 553 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 555 554 556 - printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 555 + pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 557 556 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 558 557 mmc_hostname(test->card->host), sectors, sectors >> 1, 559 558 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, ··· 579 578 rate = mmc_test_rate(tot, &ts); 580 579 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 581 580 582 - printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 581 + pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 583 582 "%lu.%09lu seconds (%u kB/s, %u KiB/s, " 584 583 "%u.%02u IOPS, sg_len %d)\n", 585 584 mmc_hostname(test->card->host), count, sectors, count, ··· 1409 1408 1410 1409 static int mmc_test_no_highmem(struct mmc_test_card *test) 1411 1410 { 1412 - printk(KERN_INFO "%s: Highmem not configured - test skipped\n", 1411 + pr_info("%s: Highmem not configured - test skipped\n", 1413 1412 mmc_hostname(test->card->host)); 1414 1413 return 0; 1415 1414 } ··· 1436 1435 t->max_seg_sz, &t->sg_len, min_sg_len); 1437 1436 } 1438 1437 if (err) 1439 - printk(KERN_INFO "%s: Failed to map sg list\n", 1438 + pr_info("%s: Failed to map sg list\n", 1440 1439 mmc_hostname(test->card->host)); 1441 1440 return err; 1442 1441 } ··· 2136 2135 2137 2136 return ret; 2138 2137 err: 2139 - printk(KERN_INFO "[%s] error\n", __func__); 2138 + pr_info("[%s] error\n", __func__); 2140 2139 return ret; 2141 2140 } 2142 2141 ··· 2150 2149 2151 2150 if (rw->do_nonblock_req && 2152 2151 ((!pre_req && post_req) || (pre_req && !post_req))) { 2153 - printk(KERN_INFO "error: only one of pre/post is defined\n"); 2152 + pr_info("error: only one of pre/post is defined\n"); 2154 2153 return -EINVAL; 2155 2154 } 2156 2155 ··· 2327 2326 }; 2328 2327 2329 2328 return mmc_test_rw_multiple_sg_len(test, &test_data); 2329 + } 2330 + 2331 + /* 2332 + * eMMC hardware reset. 2333 + */ 2334 + static int mmc_test_hw_reset(struct mmc_test_card *test) 2335 + { 2336 + struct mmc_card *card = test->card; 2337 + struct mmc_host *host = card->host; 2338 + int err; 2339 + 2340 + err = mmc_hw_reset_check(host); 2341 + if (!err) 2342 + return RESULT_OK; 2343 + 2344 + if (err == -ENOSYS) 2345 + return RESULT_FAIL; 2346 + 2347 + if (err != -EOPNOTSUPP) 2348 + return err; 2349 + 2350 + if (!mmc_can_reset(card)) 2351 + return RESULT_UNSUP_CARD; 2352 + 2353 + return RESULT_UNSUP_HOST; 2330 2354 } 2331 2355 2332 2356 static const struct mmc_test_case mmc_test_cases[] = { ··· 2676 2650 .run = mmc_test_profile_sglen_r_nonblock_perf, 2677 2651 .cleanup = mmc_test_area_cleanup, 2678 2652 }, 2653 + 2654 + { 2655 + .name = "eMMC hardware reset", 2656 + .run = mmc_test_hw_reset, 2657 + }, 2679 2658 }; 2680 2659 2681 2660 static DEFINE_MUTEX(mmc_test_lock); ··· 2691 2660 { 2692 2661 int i, ret; 2693 2662 2694 - printk(KERN_INFO "%s: Starting tests of card %s...\n", 2663 + pr_info("%s: Starting tests of card %s...\n", 2695 2664 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2696 2665 2697 2666 mmc_claim_host(test->card->host); ··· 2702 2671 if (testcase && ((i + 1) != testcase)) 2703 2672 continue; 2704 2673 2705 - printk(KERN_INFO "%s: Test case %d. %s...\n", 2674 + pr_info("%s: Test case %d. %s...\n", 2706 2675 mmc_hostname(test->card->host), i + 1, 2707 2676 mmc_test_cases[i].name); 2708 2677 2709 2678 if (mmc_test_cases[i].prepare) { 2710 2679 ret = mmc_test_cases[i].prepare(test); 2711 2680 if (ret) { 2712 - printk(KERN_INFO "%s: Result: Prepare " 2681 + pr_info("%s: Result: Prepare " 2713 2682 "stage failed! (%d)\n", 2714 2683 mmc_hostname(test->card->host), 2715 2684 ret); ··· 2739 2708 ret = mmc_test_cases[i].run(test); 2740 2709 switch (ret) { 2741 2710 case RESULT_OK: 2742 - printk(KERN_INFO "%s: Result: OK\n", 2711 + pr_info("%s: Result: OK\n", 2743 2712 mmc_hostname(test->card->host)); 2744 2713 break; 2745 2714 case RESULT_FAIL: 2746 - printk(KERN_INFO "%s: Result: FAILED\n", 2715 + pr_info("%s: Result: FAILED\n", 2747 2716 mmc_hostname(test->card->host)); 2748 2717 break; 2749 2718 case RESULT_UNSUP_HOST: 2750 - printk(KERN_INFO "%s: Result: UNSUPPORTED " 2719 + pr_info("%s: Result: UNSUPPORTED " 2751 2720 "(by host)\n", 2752 2721 mmc_hostname(test->card->host)); 2753 2722 break; 2754 2723 case RESULT_UNSUP_CARD: 2755 - printk(KERN_INFO "%s: Result: UNSUPPORTED " 2724 + pr_info("%s: Result: UNSUPPORTED " 2756 2725 "(by card)\n", 2757 2726 mmc_hostname(test->card->host)); 2758 2727 break; 2759 2728 default: 2760 - printk(KERN_INFO "%s: Result: ERROR (%d)\n", 2729 + pr_info("%s: Result: ERROR (%d)\n", 2761 2730 mmc_hostname(test->card->host), ret); 2762 2731 } 2763 2732 ··· 2768 2737 if (mmc_test_cases[i].cleanup) { 2769 2738 ret = mmc_test_cases[i].cleanup(test); 2770 2739 if (ret) { 2771 - printk(KERN_INFO "%s: Warning: Cleanup " 2740 + pr_info("%s: Warning: Cleanup " 2772 2741 "stage failed! (%d)\n", 2773 2742 mmc_hostname(test->card->host), 2774 2743 ret); ··· 2778 2747 2779 2748 mmc_release_host(test->card->host); 2780 2749 2781 - printk(KERN_INFO "%s: Tests completed.\n", 2750 + pr_info("%s: Tests completed.\n", 2782 2751 mmc_hostname(test->card->host)); 2783 2752 } 2784 2753
+4 -4
drivers/mmc/card/queue.c
··· 108 108 wake_up_process(mq->thread); 109 109 } 110 110 111 - struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 111 + static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 112 112 { 113 113 struct scatterlist *sg; 114 114 ··· 140 140 /* granularity must not be greater than max. discard */ 141 141 if (card->pref_erase > max_discard) 142 142 q->limits.discard_granularity = 0; 143 - if (mmc_can_secure_erase_trim(card)) 143 + if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) 144 144 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); 145 145 } 146 146 ··· 197 197 if (bouncesz > 512) { 198 198 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 199 199 if (!mqrq_cur->bounce_buf) { 200 - printk(KERN_WARNING "%s: unable to " 200 + pr_warning("%s: unable to " 201 201 "allocate bounce cur buffer\n", 202 202 mmc_card_name(card)); 203 203 } 204 204 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 205 205 if (!mqrq_prev->bounce_buf) { 206 - printk(KERN_WARNING "%s: unable to " 206 + pr_warning("%s: unable to " 207 207 "allocate bounce prev buffer\n", 208 208 mmc_card_name(card)); 209 209 kfree(mqrq_cur->bounce_buf);
+5 -5
drivers/mmc/card/sdio_uart.c
··· 1082 1082 return -ENOMEM; 1083 1083 1084 1084 if (func->class == SDIO_CLASS_UART) { 1085 - printk(KERN_WARNING "%s: need info on UART class basic setup\n", 1085 + pr_warning("%s: need info on UART class basic setup\n", 1086 1086 sdio_func_id(func)); 1087 1087 kfree(port); 1088 1088 return -ENOSYS; ··· 1101 1101 break; 1102 1102 } 1103 1103 if (!tpl) { 1104 - printk(KERN_WARNING 1104 + pr_warning( 1105 1105 "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n", 1106 1106 sdio_func_id(func)); 1107 1107 kfree(port); 1108 1108 return -EINVAL; 1109 1109 } 1110 - printk(KERN_DEBUG "%s: Register ID = 0x%02x, Exp ID = 0x%02x\n", 1110 + pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n", 1111 1111 sdio_func_id(func), tpl->data[2], tpl->data[3]); 1112 1112 port->regs_offset = (tpl->data[4] << 0) | 1113 1113 (tpl->data[5] << 8) | 1114 1114 (tpl->data[6] << 16); 1115 - printk(KERN_DEBUG "%s: regs offset = 0x%x\n", 1115 + pr_debug("%s: regs offset = 0x%x\n", 1116 1116 sdio_func_id(func), port->regs_offset); 1117 1117 port->uartclk = tpl->data[7] * 115200; 1118 1118 if (port->uartclk == 0) 1119 1119 port->uartclk = 115200; 1120 - printk(KERN_DEBUG "%s: clk %d baudcode %u 4800-div %u\n", 1120 + pr_debug("%s: clk %d baudcode %u 4800-div %u\n", 1121 1121 sdio_func_id(func), port->uartclk, 1122 1122 tpl->data[7], tpl->data[8] | (tpl->data[9] << 8)); 1123 1123 } else {
+3 -3
drivers/mmc/core/bus.c
··· 295 295 } 296 296 297 297 if (mmc_host_is_spi(card->host)) { 298 - printk(KERN_INFO "%s: new %s%s%s card on SPI\n", 298 + pr_info("%s: new %s%s%s card on SPI\n", 299 299 mmc_hostname(card->host), 300 300 mmc_card_highspeed(card) ? "high speed " : "", 301 301 mmc_card_ddr_mode(card) ? "DDR " : "", ··· 334 334 335 335 if (mmc_card_present(card)) { 336 336 if (mmc_host_is_spi(card->host)) { 337 - printk(KERN_INFO "%s: SPI card removed\n", 337 + pr_info("%s: SPI card removed\n", 338 338 mmc_hostname(card->host)); 339 339 } else { 340 - printk(KERN_INFO "%s: card %04x removed\n", 340 + pr_info("%s: card %04x removed\n", 341 341 mmc_hostname(card->host), card->rca); 342 342 } 343 343 device_del(&card->dev);
+387 -39
drivers/mmc/core/core.c
··· 24 24 #include <linux/regulator/consumer.h> 25 25 #include <linux/pm_runtime.h> 26 26 #include <linux/suspend.h> 27 + #include <linux/fault-inject.h> 28 + #include <linux/random.h> 27 29 28 30 #include <linux/mmc/card.h> 29 31 #include <linux/mmc/host.h> ··· 85 83 flush_workqueue(workqueue); 86 84 } 87 85 86 + #ifdef CONFIG_FAIL_MMC_REQUEST 87 + 88 + /* 89 + * Internal function. Inject random data errors. 90 + * If mmc_data is NULL no errors are injected. 91 + */ 92 + static void mmc_should_fail_request(struct mmc_host *host, 93 + struct mmc_request *mrq) 94 + { 95 + struct mmc_command *cmd = mrq->cmd; 96 + struct mmc_data *data = mrq->data; 97 + static const int data_errors[] = { 98 + -ETIMEDOUT, 99 + -EILSEQ, 100 + -EIO, 101 + }; 102 + 103 + if (!data) 104 + return; 105 + 106 + if (cmd->error || data->error || 107 + !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 108 + return; 109 + 110 + data->error = data_errors[random32() % ARRAY_SIZE(data_errors)]; 111 + data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9; 112 + } 113 + 114 + #else /* CONFIG_FAIL_MMC_REQUEST */ 115 + 116 + static inline void mmc_should_fail_request(struct mmc_host *host, 117 + struct mmc_request *mrq) 118 + { 119 + } 120 + 121 + #endif /* CONFIG_FAIL_MMC_REQUEST */ 122 + 88 123 /** 89 124 * mmc_request_done - finish processing an MMC request 90 125 * @host: MMC host which completed request ··· 141 102 } 142 103 143 104 if (err && cmd->retries) { 144 - pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 145 - mmc_hostname(host), cmd->opcode, err); 146 - 147 - cmd->retries--; 148 - cmd->error = 0; 149 - host->ops->request(host, mrq); 105 + /* 106 + * Request starter must handle retries - see 107 + * mmc_wait_for_req_done(). 108 + */ 109 + if (mrq->done) 110 + mrq->done(mrq); 150 111 } else { 112 + mmc_should_fail_request(host, mrq); 113 + 151 114 led_trigger_event(host->led, LED_OFF); 152 115 153 116 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", ··· 253 212 static void mmc_wait_for_req_done(struct mmc_host *host, 254 213 struct mmc_request *mrq) 255 214 { 256 - wait_for_completion(&mrq->completion); 215 + struct mmc_command *cmd; 216 + 217 + while (1) { 218 + wait_for_completion(&mrq->completion); 219 + 220 + cmd = mrq->cmd; 221 + if (!cmd->error || !cmd->retries) 222 + break; 223 + 224 + pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 225 + mmc_hostname(host), cmd->opcode, cmd->error); 226 + cmd->retries--; 227 + cmd->error = 0; 228 + host->ops->request(host, mrq); 229 + } 257 230 } 258 231 259 232 /** ··· 334 279 mmc_wait_for_req_done(host, host->areq->mrq); 335 280 err = host->areq->err_check(host->card, host->areq); 336 281 if (err) { 282 + /* post process the completed failed request */ 337 283 mmc_post_req(host, host->areq->mrq, 0); 338 284 if (areq) 285 + /* 286 + * Cancel the new prepared request, because 287 + * it can't run until the failed 288 + * request has been properly handled. 289 + */ 339 290 mmc_post_req(host, areq->mrq, -EINVAL); 340 291 341 292 host->areq = NULL; ··· 380 319 EXPORT_SYMBOL(mmc_wait_for_req); 381 320 382 321 /** 322 + * mmc_interrupt_hpi - Issue for High priority Interrupt 323 + * @card: the MMC card associated with the HPI transfer 324 + * 325 + * Issued High Priority Interrupt, and check for card status 326 + * util out-of prg-state. 327 + */ 328 + int mmc_interrupt_hpi(struct mmc_card *card) 329 + { 330 + int err; 331 + u32 status; 332 + 333 + BUG_ON(!card); 334 + 335 + if (!card->ext_csd.hpi_en) { 336 + pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 337 + return 1; 338 + } 339 + 340 + mmc_claim_host(card->host); 341 + err = mmc_send_status(card, &status); 342 + if (err) { 343 + pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 344 + goto out; 345 + } 346 + 347 + /* 348 + * If the card status is in PRG-state, we can send the HPI command. 349 + */ 350 + if (R1_CURRENT_STATE(status) == R1_STATE_PRG) { 351 + do { 352 + /* 353 + * We don't know when the HPI command will finish 354 + * processing, so we need to resend HPI until out 355 + * of prg-state, and keep checking the card status 356 + * with SEND_STATUS. If a timeout error occurs when 357 + * sending the HPI command, we are already out of 358 + * prg-state. 359 + */ 360 + err = mmc_send_hpi_cmd(card, &status); 361 + if (err) 362 + pr_debug("%s: abort HPI (%d error)\n", 363 + mmc_hostname(card->host), err); 364 + 365 + err = mmc_send_status(card, &status); 366 + if (err) 367 + break; 368 + } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); 369 + } else 370 + pr_debug("%s: Left prg-state\n", mmc_hostname(card->host)); 371 + 372 + out: 373 + mmc_release_host(card->host); 374 + return err; 375 + } 376 + EXPORT_SYMBOL(mmc_interrupt_hpi); 377 + 378 + /** 383 379 * mmc_wait_for_cmd - start a command and wait for completion 384 380 * @host: MMC host to start command 385 381 * @cmd: MMC command to start ··· 448 330 */ 449 331 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 450 332 { 451 - struct mmc_request mrq = {0}; 333 + struct mmc_request mrq = {NULL}; 452 334 453 335 WARN_ON(!host->claimed); 454 336 ··· 1237 1119 bit = fls(host->ocr_avail) - 1; 1238 1120 1239 1121 host->ios.vdd = bit; 1240 - if (mmc_host_is_spi(host)) { 1122 + if (mmc_host_is_spi(host)) 1241 1123 host->ios.chip_select = MMC_CS_HIGH; 1242 - host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1243 - } else { 1124 + else 1244 1125 host->ios.chip_select = MMC_CS_DONTCARE; 1245 - host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1246 - } 1126 + host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1247 1127 host->ios.power_mode = MMC_POWER_UP; 1248 1128 host->ios.bus_width = MMC_BUS_WIDTH_1; 1249 1129 host->ios.timing = MMC_TIMING_LEGACY; ··· 1267 1151 mmc_host_clk_release(host); 1268 1152 } 1269 1153 1270 - static void mmc_power_off(struct mmc_host *host) 1154 + void mmc_power_off(struct mmc_host *host) 1271 1155 { 1156 + struct mmc_card *card; 1157 + unsigned int notify_type; 1158 + unsigned int timeout; 1159 + int err; 1160 + 1272 1161 mmc_host_clk_hold(host); 1273 1162 1163 + card = host->card; 1274 1164 host->ios.clock = 0; 1275 1165 host->ios.vdd = 0; 1166 + 1167 + if (card && mmc_card_mmc(card) && 1168 + (card->poweroff_notify_state == MMC_POWERED_ON)) { 1169 + 1170 + if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) { 1171 + notify_type = EXT_CSD_POWER_OFF_SHORT; 1172 + timeout = card->ext_csd.generic_cmd6_time; 1173 + card->poweroff_notify_state = MMC_POWEROFF_SHORT; 1174 + } else { 1175 + notify_type = EXT_CSD_POWER_OFF_LONG; 1176 + timeout = card->ext_csd.power_off_longtime; 1177 + card->poweroff_notify_state = MMC_POWEROFF_LONG; 1178 + } 1179 + 1180 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1181 + EXT_CSD_POWER_OFF_NOTIFICATION, 1182 + notify_type, timeout); 1183 + 1184 + if (err && err != -EBADMSG) 1185 + pr_err("Device failed to respond within %d poweroff " 1186 + "time. Forcefully powering down the device\n", 1187 + timeout); 1188 + 1189 + /* Set the card state to no notification after the poweroff */ 1190 + card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; 1191 + } 1276 1192 1277 1193 /* 1278 1194 * Reset ocr mask to be the highest possible voltage supported for ··· 1320 1172 host->ios.bus_width = MMC_BUS_WIDTH_1; 1321 1173 host->ios.timing = MMC_TIMING_LEGACY; 1322 1174 mmc_set_ios(host); 1175 + 1176 + /* 1177 + * Some configurations, such as the 802.11 SDIO card in the OLPC 1178 + * XO-1.5, require a short delay after poweroff before the card 1179 + * can be successfully turned on again. 1180 + */ 1181 + mmc_delay(1); 1323 1182 1324 1183 mmc_host_clk_release(host); 1325 1184 } ··· 1396 1241 } 1397 1242 1398 1243 /* 1399 - * Remove the current bus handler from a host. Assumes that there are 1400 - * no interesting cards left, so the bus is powered down. 1244 + * Remove the current bus handler from a host. 1401 1245 */ 1402 1246 void mmc_detach_bus(struct mmc_host *host) 1403 1247 { ··· 1412 1258 host->bus_dead = 1; 1413 1259 1414 1260 spin_unlock_irqrestore(&host->lock, flags); 1415 - 1416 - mmc_power_off(host); 1417 1261 1418 1262 mmc_bus_put(host); 1419 1263 } ··· 1630 1478 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1631 1479 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1632 1480 if (err) { 1633 - printk(KERN_ERR "mmc_erase: group start error %d, " 1481 + pr_err("mmc_erase: group start error %d, " 1634 1482 "status %#x\n", err, cmd.resp[0]); 1635 - err = -EINVAL; 1483 + err = -EIO; 1636 1484 goto out; 1637 1485 } 1638 1486 ··· 1645 1493 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1646 1494 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1647 1495 if (err) { 1648 - printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1496 + pr_err("mmc_erase: group end error %d, status %#x\n", 1649 1497 err, cmd.resp[0]); 1650 - err = -EINVAL; 1498 + err = -EIO; 1651 1499 goto out; 1652 1500 } 1653 1501 ··· 1658 1506 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1659 1507 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1660 1508 if (err) { 1661 - printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1509 + pr_err("mmc_erase: erase error %d, status %#x\n", 1662 1510 err, cmd.resp[0]); 1663 1511 err = -EIO; 1664 1512 goto out; ··· 1675 1523 /* Do not retry else we can't see errors */ 1676 1524 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1677 1525 if (err || (cmd.resp[0] & 0xFDF92000)) { 1678 - printk(KERN_ERR "error %d requesting status %#x\n", 1526 + pr_err("error %d requesting status %#x\n", 1679 1527 err, cmd.resp[0]); 1680 1528 err = -EIO; 1681 1529 goto out; ··· 1766 1614 { 1767 1615 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1768 1616 return 1; 1617 + if (mmc_can_discard(card)) 1618 + return 1; 1769 1619 return 0; 1770 1620 } 1771 1621 EXPORT_SYMBOL(mmc_can_trim); 1622 + 1623 + int mmc_can_discard(struct mmc_card *card) 1624 + { 1625 + /* 1626 + * As there's no way to detect the discard support bit at v4.5 1627 + * use the s/w feature support filed. 1628 + */ 1629 + if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 1630 + return 1; 1631 + return 0; 1632 + } 1633 + EXPORT_SYMBOL(mmc_can_discard); 1634 + 1635 + int mmc_can_sanitize(struct mmc_card *card) 1636 + { 1637 + if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 1638 + return 1; 1639 + return 0; 1640 + } 1641 + EXPORT_SYMBOL(mmc_can_sanitize); 1772 1642 1773 1643 int mmc_can_secure_erase_trim(struct mmc_card *card) 1774 1644 { ··· 1901 1727 } 1902 1728 EXPORT_SYMBOL(mmc_set_blocklen); 1903 1729 1730 + static void mmc_hw_reset_for_init(struct mmc_host *host) 1731 + { 1732 + if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1733 + return; 1734 + mmc_host_clk_hold(host); 1735 + host->ops->hw_reset(host); 1736 + mmc_host_clk_release(host); 1737 + } 1738 + 1739 + int mmc_can_reset(struct mmc_card *card) 1740 + { 1741 + u8 rst_n_function; 1742 + 1743 + if (!mmc_card_mmc(card)) 1744 + return 0; 1745 + rst_n_function = card->ext_csd.rst_n_function; 1746 + if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 1747 + return 0; 1748 + return 1; 1749 + } 1750 + EXPORT_SYMBOL(mmc_can_reset); 1751 + 1752 + static int mmc_do_hw_reset(struct mmc_host *host, int check) 1753 + { 1754 + struct mmc_card *card = host->card; 1755 + 1756 + if (!host->bus_ops->power_restore) 1757 + return -EOPNOTSUPP; 1758 + 1759 + if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1760 + return -EOPNOTSUPP; 1761 + 1762 + if (!card) 1763 + return -EINVAL; 1764 + 1765 + if (!mmc_can_reset(card)) 1766 + return -EOPNOTSUPP; 1767 + 1768 + mmc_host_clk_hold(host); 1769 + mmc_set_clock(host, host->f_init); 1770 + 1771 + host->ops->hw_reset(host); 1772 + 1773 + /* If the reset has happened, then a status command will fail */ 1774 + if (check) { 1775 + struct mmc_command cmd = {0}; 1776 + int err; 1777 + 1778 + cmd.opcode = MMC_SEND_STATUS; 1779 + if (!mmc_host_is_spi(card->host)) 1780 + cmd.arg = card->rca << 16; 1781 + cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 1782 + err = mmc_wait_for_cmd(card->host, &cmd, 0); 1783 + if (!err) { 1784 + mmc_host_clk_release(host); 1785 + return -ENOSYS; 1786 + } 1787 + } 1788 + 1789 + host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 1790 + if (mmc_host_is_spi(host)) { 1791 + host->ios.chip_select = MMC_CS_HIGH; 1792 + host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1793 + } else { 1794 + host->ios.chip_select = MMC_CS_DONTCARE; 1795 + host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1796 + } 1797 + host->ios.bus_width = MMC_BUS_WIDTH_1; 1798 + host->ios.timing = MMC_TIMING_LEGACY; 1799 + mmc_set_ios(host); 1800 + 1801 + mmc_host_clk_release(host); 1802 + 1803 + return host->bus_ops->power_restore(host); 1804 + } 1805 + 1806 + int mmc_hw_reset(struct mmc_host *host) 1807 + { 1808 + return mmc_do_hw_reset(host, 0); 1809 + } 1810 + EXPORT_SYMBOL(mmc_hw_reset); 1811 + 1812 + int mmc_hw_reset_check(struct mmc_host *host) 1813 + { 1814 + return mmc_do_hw_reset(host, 1); 1815 + } 1816 + EXPORT_SYMBOL(mmc_hw_reset_check); 1817 + 1904 1818 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1905 1819 { 1906 1820 host->f_init = freq; ··· 1998 1736 mmc_hostname(host), __func__, host->f_init); 1999 1737 #endif 2000 1738 mmc_power_up(host); 1739 + 1740 + /* 1741 + * Some eMMCs (with VCCQ always on) may not be reset after power up, so 1742 + * do a hardware reset if possible. 1743 + */ 1744 + mmc_hw_reset_for_init(host); 2001 1745 2002 1746 /* 2003 1747 * sdio_reset sends CMD52 to reset card. Since we do not know ··· 2113 1845 2114 1846 mmc_claim_host(host); 2115 1847 mmc_detach_bus(host); 1848 + mmc_power_off(host); 2116 1849 mmc_release_host(host); 2117 1850 mmc_bus_put(host); 2118 1851 return; ··· 2215 1946 } 2216 1947 EXPORT_SYMBOL(mmc_card_can_sleep); 2217 1948 1949 + /* 1950 + * Flush the cache to the non-volatile storage. 1951 + */ 1952 + int mmc_flush_cache(struct mmc_card *card) 1953 + { 1954 + struct mmc_host *host = card->host; 1955 + int err = 0; 1956 + 1957 + if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 1958 + return err; 1959 + 1960 + if (mmc_card_mmc(card) && 1961 + (card->ext_csd.cache_size > 0) && 1962 + (card->ext_csd.cache_ctrl & 1)) { 1963 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1964 + EXT_CSD_FLUSH_CACHE, 1, 0); 1965 + if (err) 1966 + pr_err("%s: cache flush error %d\n", 1967 + mmc_hostname(card->host), err); 1968 + } 1969 + 1970 + return err; 1971 + } 1972 + EXPORT_SYMBOL(mmc_flush_cache); 1973 + 1974 + /* 1975 + * Turn the cache ON/OFF. 1976 + * Turning the cache OFF shall trigger flushing of the data 1977 + * to the non-volatile storage. 1978 + */ 1979 + int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 1980 + { 1981 + struct mmc_card *card = host->card; 1982 + int err = 0; 1983 + 1984 + if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 1985 + mmc_card_is_removable(host)) 1986 + return err; 1987 + 1988 + if (card && mmc_card_mmc(card) && 1989 + (card->ext_csd.cache_size > 0)) { 1990 + enable = !!enable; 1991 + 1992 + if (card->ext_csd.cache_ctrl ^ enable) 1993 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1994 + EXT_CSD_CACHE_CTRL, enable, 0); 1995 + if (err) 1996 + pr_err("%s: cache %s error %d\n", 1997 + mmc_hostname(card->host), 1998 + enable ? "on" : "off", 1999 + err); 2000 + else 2001 + card->ext_csd.cache_ctrl = enable; 2002 + } 2003 + 2004 + return err; 2005 + } 2006 + EXPORT_SYMBOL(mmc_cache_ctrl); 2007 + 2218 2008 #ifdef CONFIG_PM 2219 2009 2220 2010 /** ··· 2288 1960 cancel_delayed_work(&host->disable); 2289 1961 cancel_delayed_work(&host->detect); 2290 1962 mmc_flush_scheduled_work(); 1963 + err = mmc_cache_ctrl(host, 0); 1964 + if (err) 1965 + goto out; 2291 1966 2292 1967 mmc_bus_get(host); 2293 1968 if (host->bus_ops && !host->bus_dead) { 2294 - if (host->bus_ops->suspend) 2295 - err = host->bus_ops->suspend(host); 2296 - if (err == -ENOSYS || !host->bus_ops->resume) { 2297 - /* 2298 - * We simply "remove" the card in this case. 2299 - * It will be redetected on resume. 2300 - */ 2301 - if (host->bus_ops->remove) 2302 - host->bus_ops->remove(host); 2303 - mmc_claim_host(host); 2304 - mmc_detach_bus(host); 2305 - mmc_release_host(host); 2306 - host->pm_flags = 0; 2307 - err = 0; 1969 + 1970 + /* 1971 + * A long response time is not acceptable for device drivers 1972 + * when doing suspend. Prevent mmc_claim_host in the suspend 1973 + * sequence, to potentially wait "forever" by trying to 1974 + * pre-claim the host. 1975 + */ 1976 + if (mmc_try_claim_host(host)) { 1977 + if (host->bus_ops->suspend) 1978 + err = host->bus_ops->suspend(host); 1979 + if (err == -ENOSYS || !host->bus_ops->resume) { 1980 + /* 1981 + * We simply "remove" the card in this case. 1982 + * It will be redetected on resume. 1983 + */ 1984 + if (host->bus_ops->remove) 1985 + host->bus_ops->remove(host); 1986 + mmc_claim_host(host); 1987 + mmc_detach_bus(host); 1988 + mmc_power_off(host); 1989 + mmc_release_host(host); 1990 + host->pm_flags = 0; 1991 + err = 0; 1992 + } 1993 + mmc_do_release_host(host); 1994 + } else { 1995 + err = -EBUSY; 2308 1996 } 2309 1997 } 2310 1998 mmc_bus_put(host); ··· 2328 1984 if (!err && !mmc_card_keep_power(host)) 2329 1985 mmc_power_off(host); 2330 1986 1987 + out: 2331 1988 return err; 2332 1989 } 2333 1990 ··· 2363 2018 BUG_ON(!host->bus_ops->resume); 2364 2019 err = host->bus_ops->resume(host); 2365 2020 if (err) { 2366 - printk(KERN_WARNING "%s: error %d during resume " 2021 + pr_warning("%s: error %d during resume " 2367 2022 "(card was removed?)\n", 2368 2023 mmc_hostname(host), err); 2369 2024 err = 0; ··· 2394 2049 2395 2050 spin_lock_irqsave(&host->lock, flags); 2396 2051 host->rescan_disable = 1; 2052 + host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 2397 2053 spin_unlock_irqrestore(&host->lock, flags); 2398 2054 cancel_delayed_work_sync(&host->detect); 2399 2055 ··· 2407 2061 host->bus_ops->remove(host); 2408 2062 2409 2063 mmc_detach_bus(host); 2064 + mmc_power_off(host); 2410 2065 mmc_release_host(host); 2411 2066 host->pm_flags = 0; 2412 2067 break; ··· 2418 2071 2419 2072 spin_lock_irqsave(&host->lock, flags); 2420 2073 host->rescan_disable = 0; 2074 + host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG; 2421 2075 spin_unlock_irqrestore(&host->lock, flags); 2422 2076 mmc_detect_change(host, 0); 2423 2077
+1
drivers/mmc/core/core.h
··· 43 43 bool cmd11); 44 44 void mmc_set_timing(struct mmc_host *host, unsigned int timing); 45 45 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); 46 + void mmc_power_off(struct mmc_host *host); 46 47 47 48 static inline void mmc_delay(unsigned int ms) 48 49 {
+28
drivers/mmc/core/debugfs.c
··· 7 7 * it under the terms of the GNU General Public License version 2 as 8 8 * published by the Free Software Foundation. 9 9 */ 10 + #include <linux/moduleparam.h> 10 11 #include <linux/debugfs.h> 11 12 #include <linux/fs.h> 12 13 #include <linux/seq_file.h> 13 14 #include <linux/slab.h> 14 15 #include <linux/stat.h> 16 + #include <linux/fault-inject.h> 15 17 16 18 #include <linux/mmc/card.h> 17 19 #include <linux/mmc/host.h> 18 20 19 21 #include "core.h" 20 22 #include "mmc_ops.h" 23 + 24 + #ifdef CONFIG_FAIL_MMC_REQUEST 25 + 26 + static DECLARE_FAULT_ATTR(fail_default_attr); 27 + static char *fail_request; 28 + module_param(fail_request, charp, 0); 29 + 30 + #endif /* CONFIG_FAIL_MMC_REQUEST */ 21 31 22 32 /* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */ 23 33 static int mmc_ios_show(struct seq_file *s, void *data) ··· 123 113 case MMC_TIMING_SD_HS: 124 114 str = "sd high-speed"; 125 115 break; 116 + case MMC_TIMING_UHS_SDR50: 117 + str = "sd uhs SDR50"; 118 + break; 119 + case MMC_TIMING_UHS_SDR104: 120 + str = "sd uhs SDR104"; 121 + break; 122 + case MMC_TIMING_UHS_DDR50: 123 + str = "sd uhs DDR50"; 124 + break; 126 125 default: 127 126 str = "invalid"; 128 127 break; ··· 205 186 #ifdef CONFIG_MMC_CLKGATE 206 187 if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR), 207 188 root, &host->clk_delay)) 189 + goto err_node; 190 + #endif 191 + #ifdef CONFIG_FAIL_MMC_REQUEST 192 + if (fail_request) 193 + setup_fault_attr(&fail_default_attr, fail_request); 194 + host->fail_mmc_request = fail_default_attr; 195 + if (IS_ERR(fault_create_debugfs_attr("fail_mmc_request", 196 + root, 197 + &host->fail_mmc_request))) 208 198 goto err_node; 209 199 #endif 210 200 return;
+11
drivers/mmc/core/host.c
··· 301 301 host->max_blk_size = 512; 302 302 host->max_blk_count = PAGE_CACHE_SIZE / 512; 303 303 304 + /* 305 + * Enable runtime power management by default. This flag was added due 306 + * to runtime power management causing disruption for some users, but 307 + * the power on/off code has been improved since then. 308 + * 309 + * We'll enable this flag by default as an experiment, and if no 310 + * problems are reported, we will follow up later and remove the flag 311 + * altogether. 312 + */ 313 + host->caps = MMC_CAP_POWER_OFF_CARD; 314 + 304 315 return host; 305 316 306 317 free:
+265 -24
drivers/mmc/core/mmc.c
··· 101 101 break; 102 102 103 103 default: 104 - printk(KERN_ERR "%s: card has unknown MMCA version %d\n", 104 + pr_err("%s: card has unknown MMCA version %d\n", 105 105 mmc_hostname(card->host), card->csd.mmca_vsn); 106 106 return -EINVAL; 107 107 } ··· 135 135 */ 136 136 csd->structure = UNSTUFF_BITS(resp, 126, 2); 137 137 if (csd->structure == 0) { 138 - printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", 138 + pr_err("%s: unrecognised CSD structure version %d\n", 139 139 mmc_hostname(card->host), csd->structure); 140 140 return -EINVAL; 141 141 } ··· 195 195 */ 196 196 ext_csd = kmalloc(512, GFP_KERNEL); 197 197 if (!ext_csd) { 198 - printk(KERN_ERR "%s: could not allocate a buffer to " 198 + pr_err("%s: could not allocate a buffer to " 199 199 "receive the ext_csd.\n", mmc_hostname(card->host)); 200 200 return -ENOMEM; 201 201 } ··· 217 217 * stored in their CSD. 218 218 */ 219 219 if (card->csd.capacity == (4096 * 512)) { 220 - printk(KERN_ERR "%s: unable to read EXT_CSD " 220 + pr_err("%s: unable to read EXT_CSD " 221 221 "on a possible high capacity card. " 222 222 "Card will be ignored.\n", 223 223 mmc_hostname(card->host)); 224 224 } else { 225 - printk(KERN_WARNING "%s: unable to read " 225 + pr_warning("%s: unable to read " 226 226 "EXT_CSD, performance might " 227 227 "suffer.\n", 228 228 mmc_hostname(card->host)); ··· 239 239 */ 240 240 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) 241 241 { 242 - int err = 0; 242 + int err = 0, idx; 243 + unsigned int part_size; 244 + u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; 243 245 244 246 BUG_ON(!card); 245 247 ··· 252 250 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; 253 251 if (card->csd.structure == 3) { 254 252 if (card->ext_csd.raw_ext_csd_structure > 2) { 255 - printk(KERN_ERR "%s: unrecognised EXT_CSD structure " 253 + pr_err("%s: unrecognised EXT_CSD structure " 256 254 "version %d\n", mmc_hostname(card->host), 257 255 card->ext_csd.raw_ext_csd_structure); 258 256 err = -EINVAL; ··· 262 260 263 261 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 264 262 if (card->ext_csd.rev > 6) { 265 - printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", 263 + pr_err("%s: unrecognised EXT_CSD revision %d\n", 266 264 mmc_hostname(card->host), card->ext_csd.rev); 267 265 err = -EINVAL; 268 266 goto out; ··· 308 306 break; 309 307 default: 310 308 /* MMC v4 spec says this cannot happen */ 311 - printk(KERN_WARNING "%s: card is mmc v4 but doesn't " 309 + pr_warning("%s: card is mmc v4 but doesn't " 312 310 "support any high-speed modes.\n", 313 311 mmc_hostname(card->host)); 314 312 } ··· 342 340 * There are two boot regions of equal size, defined in 343 341 * multiples of 128K. 344 342 */ 345 - card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 343 + if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { 344 + for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { 345 + part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 346 + mmc_part_add(card, part_size, 347 + EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, 348 + "boot%d", idx, true); 349 + } 350 + } 346 351 } 347 352 348 353 card->ext_csd.raw_hc_erase_gap_size = ··· 368 359 * card has the Enhanced area enabled. If so, export enhanced 369 360 * area offset and size to user by adding sysfs interface. 370 361 */ 362 + card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; 371 363 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && 372 364 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { 373 - u8 hc_erase_grp_sz = 365 + hc_erase_grp_sz = 374 366 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 375 - u8 hc_wp_grp_sz = 367 + hc_wp_grp_sz = 376 368 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 377 369 378 370 card->ext_csd.enhanced_area_en = 1; ··· 402 392 card->ext_csd.enhanced_area_offset = -EINVAL; 403 393 card->ext_csd.enhanced_area_size = -EINVAL; 404 394 } 395 + 396 + /* 397 + * General purpose partition feature support -- 398 + * If ext_csd has the size of general purpose partitions, 399 + * set size, part_cfg, partition name in mmc_part. 400 + */ 401 + if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & 402 + EXT_CSD_PART_SUPPORT_PART_EN) { 403 + if (card->ext_csd.enhanced_area_en != 1) { 404 + hc_erase_grp_sz = 405 + ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 406 + hc_wp_grp_sz = 407 + ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 408 + 409 + card->ext_csd.enhanced_area_en = 1; 410 + } 411 + 412 + for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { 413 + if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && 414 + !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && 415 + !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) 416 + continue; 417 + part_size = 418 + (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] 419 + << 16) + 420 + (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] 421 + << 8) + 422 + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; 423 + part_size *= (size_t)(hc_erase_grp_sz * 424 + hc_wp_grp_sz); 425 + mmc_part_add(card, part_size << 19, 426 + EXT_CSD_PART_CONFIG_ACC_GP0 + idx, 427 + "gp%d", idx, false); 428 + } 429 + } 405 430 card->ext_csd.sec_trim_mult = 406 431 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 407 432 card->ext_csd.sec_erase_mult = ··· 447 402 ext_csd[EXT_CSD_TRIM_MULT]; 448 403 } 449 404 450 - if (card->ext_csd.rev >= 5) 451 - card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 405 + if (card->ext_csd.rev >= 5) { 406 + /* check whether the eMMC card supports HPI */ 407 + if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { 408 + card->ext_csd.hpi = 1; 409 + if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) 410 + card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; 411 + else 412 + card->ext_csd.hpi_cmd = MMC_SEND_STATUS; 413 + /* 414 + * Indicate the maximum timeout to close 415 + * a command interrupted by HPI 416 + */ 417 + card->ext_csd.out_of_int_time = 418 + ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; 419 + } 452 420 421 + card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 422 + card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; 423 + } 424 + 425 + card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; 453 426 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 454 427 card->erased_byte = 0xFF; 455 428 else 456 429 card->erased_byte = 0x0; 430 + 431 + /* eMMC v4.5 or later */ 432 + if (card->ext_csd.rev >= 6) { 433 + card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 434 + 435 + card->ext_csd.generic_cmd6_time = 10 * 436 + ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; 437 + card->ext_csd.power_off_longtime = 10 * 438 + ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; 439 + 440 + card->ext_csd.cache_size = 441 + ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | 442 + ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | 443 + ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | 444 + ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; 445 + } 457 446 458 447 out: 459 448 return err; ··· 609 530 }; 610 531 611 532 /* 533 + * Select the PowerClass for the current bus width 534 + * If power class is defined for 4/8 bit bus in the 535 + * extended CSD register, select it by executing the 536 + * mmc_switch command. 537 + */ 538 + static int mmc_select_powerclass(struct mmc_card *card, 539 + unsigned int bus_width, u8 *ext_csd) 540 + { 541 + int err = 0; 542 + unsigned int pwrclass_val; 543 + unsigned int index = 0; 544 + struct mmc_host *host; 545 + 546 + BUG_ON(!card); 547 + 548 + host = card->host; 549 + BUG_ON(!host); 550 + 551 + if (ext_csd == NULL) 552 + return 0; 553 + 554 + /* Power class selection is supported for versions >= 4.0 */ 555 + if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 556 + return 0; 557 + 558 + /* Power class values are defined only for 4/8 bit bus */ 559 + if (bus_width == EXT_CSD_BUS_WIDTH_1) 560 + return 0; 561 + 562 + switch (1 << host->ios.vdd) { 563 + case MMC_VDD_165_195: 564 + if (host->ios.clock <= 26000000) 565 + index = EXT_CSD_PWR_CL_26_195; 566 + else if (host->ios.clock <= 52000000) 567 + index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 568 + EXT_CSD_PWR_CL_52_195 : 569 + EXT_CSD_PWR_CL_DDR_52_195; 570 + else if (host->ios.clock <= 200000000) 571 + index = EXT_CSD_PWR_CL_200_195; 572 + break; 573 + case MMC_VDD_32_33: 574 + case MMC_VDD_33_34: 575 + case MMC_VDD_34_35: 576 + case MMC_VDD_35_36: 577 + if (host->ios.clock <= 26000000) 578 + index = EXT_CSD_PWR_CL_26_360; 579 + else if (host->ios.clock <= 52000000) 580 + index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 581 + EXT_CSD_PWR_CL_52_360 : 582 + EXT_CSD_PWR_CL_DDR_52_360; 583 + else if (host->ios.clock <= 200000000) 584 + index = EXT_CSD_PWR_CL_200_360; 585 + break; 586 + default: 587 + pr_warning("%s: Voltage range not supported " 588 + "for power class.\n", mmc_hostname(host)); 589 + return -EINVAL; 590 + } 591 + 592 + pwrclass_val = ext_csd[index]; 593 + 594 + if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) 595 + pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> 596 + EXT_CSD_PWR_CL_8BIT_SHIFT; 597 + else 598 + pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> 599 + EXT_CSD_PWR_CL_4BIT_SHIFT; 600 + 601 + /* If the power class is different from the default value */ 602 + if (pwrclass_val > 0) { 603 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 604 + EXT_CSD_POWER_CLASS, 605 + pwrclass_val, 606 + card->ext_csd.generic_cmd6_time); 607 + } 608 + 609 + return err; 610 + } 611 + 612 + /* 612 613 * Handle the detection and initialisation of a card. 613 614 * 614 615 * In the case of a resume, "oldcard" will contain the card ··· 707 548 BUG_ON(!host); 708 549 WARN_ON(!host->claimed); 709 550 551 + /* Set correct bus mode for MMC before attempting init */ 552 + if (!mmc_host_is_spi(host)) 553 + mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 554 + 710 555 /* 711 556 * Since we're changing the OCR value, we seem to 712 557 * need to tell some cards to go back to the idle 713 558 * state. We wait 1ms to give cards time to 714 559 * respond. 560 + * mmc_go_idle is needed for eMMC that are asleep 715 561 */ 716 562 mmc_go_idle(host); 717 563 ··· 832 668 */ 833 669 if (card->ext_csd.enhanced_area_en) { 834 670 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 835 - EXT_CSD_ERASE_GROUP_DEF, 1, 0); 671 + EXT_CSD_ERASE_GROUP_DEF, 1, 672 + card->ext_csd.generic_cmd6_time); 836 673 837 674 if (err && err != -EBADMSG) 838 675 goto free_card; ··· 871 706 } 872 707 873 708 /* 709 + * If the host supports the power_off_notify capability then 710 + * set the notification byte in the ext_csd register of device 711 + */ 712 + if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && 713 + (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) { 714 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 715 + EXT_CSD_POWER_OFF_NOTIFICATION, 716 + EXT_CSD_POWER_ON, 717 + card->ext_csd.generic_cmd6_time); 718 + if (err && err != -EBADMSG) 719 + goto free_card; 720 + } 721 + 722 + if (!err) 723 + card->poweroff_notify_state = MMC_POWERED_ON; 724 + 725 + /* 874 726 * Activate high speed (if supported) 875 727 */ 876 728 if ((card->ext_csd.hs_max_dtr != 0) && 877 729 (host->caps & MMC_CAP_MMC_HIGHSPEED)) { 878 730 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 879 - EXT_CSD_HS_TIMING, 1, 0); 731 + EXT_CSD_HS_TIMING, 1, 732 + card->ext_csd.generic_cmd6_time); 880 733 if (err && err != -EBADMSG) 881 734 goto free_card; 882 735 883 736 if (err) { 884 - printk(KERN_WARNING "%s: switch to highspeed failed\n", 737 + pr_warning("%s: switch to highspeed failed\n", 885 738 mmc_hostname(card->host)); 886 739 err = 0; 887 740 } else { 888 741 mmc_card_set_highspeed(card); 889 742 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 890 743 } 744 + } 745 + 746 + /* 747 + * Enable HPI feature (if supported) 748 + */ 749 + if (card->ext_csd.hpi) { 750 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 751 + EXT_CSD_HPI_MGMT, 1, 0); 752 + if (err && err != -EBADMSG) 753 + goto free_card; 754 + if (err) { 755 + pr_warning("%s: Enabling HPI failed\n", 756 + mmc_hostname(card->host)); 757 + err = 0; 758 + } else 759 + card->ext_csd.hpi_en = 1; 891 760 } 892 761 893 762 /* ··· 979 780 bus_width = bus_widths[idx]; 980 781 if (bus_width == MMC_BUS_WIDTH_1) 981 782 ddr = 0; /* no DDR for 1-bit width */ 783 + err = mmc_select_powerclass(card, ext_csd_bits[idx][0], 784 + ext_csd); 785 + if (err) 786 + pr_err("%s: power class selection to " 787 + "bus width %d failed\n", 788 + mmc_hostname(card->host), 789 + 1 << bus_width); 790 + 982 791 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 983 792 EXT_CSD_BUS_WIDTH, 984 793 ext_csd_bits[idx][0], 985 - 0); 794 + card->ext_csd.generic_cmd6_time); 986 795 if (!err) { 987 796 mmc_set_bus_width(card->host, bus_width); 988 797 ··· 1010 803 } 1011 804 1012 805 if (!err && ddr) { 806 + err = mmc_select_powerclass(card, ext_csd_bits[idx][1], 807 + ext_csd); 808 + if (err) 809 + pr_err("%s: power class selection to " 810 + "bus width %d ddr %d failed\n", 811 + mmc_hostname(card->host), 812 + 1 << bus_width, ddr); 813 + 1013 814 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1014 815 EXT_CSD_BUS_WIDTH, 1015 816 ext_csd_bits[idx][1], 1016 - 0); 817 + card->ext_csd.generic_cmd6_time); 1017 818 } 1018 819 if (err) { 1019 - printk(KERN_WARNING "%s: switch to bus width %d ddr %d " 820 + pr_warning("%s: switch to bus width %d ddr %d " 1020 821 "failed\n", mmc_hostname(card->host), 1021 822 1 << bus_width, ddr); 1022 823 goto free_card; ··· 1053 838 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1054 839 mmc_set_bus_width(card->host, bus_width); 1055 840 } 841 + } 842 + 843 + /* 844 + * If cache size is higher than 0, this indicates 845 + * the existence of cache and it can be turned on. 846 + */ 847 + if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && 848 + card->ext_csd.cache_size > 0) { 849 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 850 + EXT_CSD_CACHE_CTRL, 1, 0); 851 + if (err && err != -EBADMSG) 852 + goto free_card; 853 + 854 + /* 855 + * Only if no error, cache is turned on successfully. 856 + */ 857 + card->ext_csd.cache_ctrl = err ? 0 : 1; 1056 858 } 1057 859 1058 860 if (!oldcard) ··· 1123 891 1124 892 mmc_claim_host(host); 1125 893 mmc_detach_bus(host); 894 + mmc_power_off(host); 1126 895 mmc_release_host(host); 1127 896 } 1128 897 } ··· 1133 900 */ 1134 901 static int mmc_suspend(struct mmc_host *host) 1135 902 { 903 + int err = 0; 904 + 1136 905 BUG_ON(!host); 1137 906 BUG_ON(!host->card); 1138 907 1139 908 mmc_claim_host(host); 1140 - if (!mmc_host_is_spi(host)) 909 + if (mmc_card_can_sleep(host)) 910 + err = mmc_card_sleep(host); 911 + else if (!mmc_host_is_spi(host)) 1141 912 mmc_deselect_cards(host); 1142 913 host->card->state &= ~MMC_STATE_HIGHSPEED; 1143 914 mmc_release_host(host); 1144 915 1145 - return 0; 916 + return err; 1146 917 } 1147 918 1148 919 /* ··· 1253 1016 BUG_ON(!host); 1254 1017 WARN_ON(!host->claimed); 1255 1018 1019 + /* Set correct bus mode for MMC before attempting attach */ 1020 + if (!mmc_host_is_spi(host)) 1021 + mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 1022 + 1256 1023 err = mmc_send_op_cond(host, 0, &ocr); 1257 1024 if (err) 1258 1025 return err; ··· 1279 1038 * support. 1280 1039 */ 1281 1040 if (ocr & 0x7F) { 1282 - printk(KERN_WARNING "%s: card claims to support voltages " 1041 + pr_warning("%s: card claims to support voltages " 1283 1042 "below the defined range. These will be ignored.\n", 1284 1043 mmc_hostname(host)); 1285 1044 ocr &= ~0x7F; ··· 1318 1077 err: 1319 1078 mmc_detach_bus(host); 1320 1079 1321 - printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", 1080 + pr_err("%s: error %d whilst initialising MMC card\n", 1322 1081 mmc_hostname(host), err); 1323 1082 1324 1083 return err;
+35 -4
drivers/mmc/core/mmc_ops.c
··· 233 233 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 234 234 u32 opcode, void *buf, unsigned len) 235 235 { 236 - struct mmc_request mrq = {0}; 236 + struct mmc_request mrq = {NULL}; 237 237 struct mmc_command cmd = {0}; 238 238 struct mmc_data data = {0}; 239 239 struct scatterlist sg; ··· 414 414 return -EBADMSG; 415 415 } else { 416 416 if (status & 0xFDFFA000) 417 - printk(KERN_WARNING "%s: unexpected status %#x after " 417 + pr_warning("%s: unexpected status %#x after " 418 418 "switch", mmc_hostname(card->host), status); 419 419 if (status & R1_SWITCH_ERROR) 420 420 return -EBADMSG; ··· 454 454 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 455 455 u8 len) 456 456 { 457 - struct mmc_request mrq = {0}; 457 + struct mmc_request mrq = {NULL}; 458 458 struct mmc_command cmd = {0}; 459 459 struct mmc_data data = {0}; 460 460 struct scatterlist sg; ··· 476 476 else if (len == 4) 477 477 test_buf = testdata_4bit; 478 478 else { 479 - printk(KERN_ERR "%s: Invalid bus_width %d\n", 479 + pr_err("%s: Invalid bus_width %d\n", 480 480 mmc_hostname(host), len); 481 481 kfree(data_buf); 482 482 return -EINVAL; ··· 546 546 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 547 547 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 548 548 return err; 549 + } 550 + 551 + int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) 552 + { 553 + struct mmc_command cmd = {0}; 554 + unsigned int opcode; 555 + unsigned int flags; 556 + int err; 557 + 558 + opcode = card->ext_csd.hpi_cmd; 559 + if (opcode == MMC_STOP_TRANSMISSION) 560 + flags = MMC_RSP_R1 | MMC_CMD_AC; 561 + else if (opcode == MMC_SEND_STATUS) 562 + flags = MMC_RSP_R1 | MMC_CMD_AC; 563 + 564 + cmd.opcode = opcode; 565 + cmd.arg = card->rca << 16 | 1; 566 + cmd.flags = flags; 567 + cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time; 568 + 569 + err = mmc_wait_for_cmd(card->host, &cmd, 0); 570 + if (err) { 571 + pr_warn("%s: error %d interrupting operation. " 572 + "HPI command response %#x\n", mmc_hostname(card->host), 573 + err, cmd.resp[0]); 574 + return err; 575 + } 576 + if (status) 577 + *status = cmd.resp[0]; 578 + 579 + return 0; 549 580 }
+1
drivers/mmc/core/mmc_ops.h
··· 26 26 int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 27 27 int mmc_card_sleepawake(struct mmc_host *host, int sleep); 28 28 int mmc_bus_test(struct mmc_card *card, u8 bus_width); 29 + int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status); 29 30 30 31 #endif 31 32
+11
drivers/mmc/core/quirks.c
··· 21 21 #define SDIO_DEVICE_ID_TI_WL1271 0x4076 22 22 #endif 23 23 24 + #ifndef SDIO_VENDOR_ID_STE 25 + #define SDIO_VENDOR_ID_STE 0x0020 26 + #endif 27 + 28 + #ifndef SDIO_DEVICE_ID_STE_CW1200 29 + #define SDIO_DEVICE_ID_STE_CW1200 0x2280 30 + #endif 31 + 24 32 /* 25 33 * This hook just adds a quirk for all sdio devices 26 34 */ ··· 53 45 54 46 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, 55 47 add_quirk, MMC_QUIRK_DISABLE_CD), 48 + 49 + SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200, 50 + add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512), 56 51 57 52 END_FIXUP 58 53 };
+24 -25
drivers/mmc/core/sd.c
··· 163 163 csd->erase_size = 1; 164 164 break; 165 165 default: 166 - printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", 166 + pr_err("%s: unrecognised CSD structure version %d\n", 167 167 mmc_hostname(card->host), csd_struct); 168 168 return -EINVAL; 169 169 } ··· 187 187 188 188 scr_struct = UNSTUFF_BITS(resp, 60, 4); 189 189 if (scr_struct != 0) { 190 - printk(KERN_ERR "%s: unrecognised SCR structure version %d\n", 190 + pr_err("%s: unrecognised SCR structure version %d\n", 191 191 mmc_hostname(card->host), scr_struct); 192 192 return -EINVAL; 193 193 } ··· 218 218 u32 *ssr; 219 219 220 220 if (!(card->csd.cmdclass & CCC_APP_SPEC)) { 221 - printk(KERN_WARNING "%s: card lacks mandatory SD Status " 221 + pr_warning("%s: card lacks mandatory SD Status " 222 222 "function.\n", mmc_hostname(card->host)); 223 223 return 0; 224 224 } ··· 229 229 230 230 err = mmc_app_sd_status(card, ssr); 231 231 if (err) { 232 - printk(KERN_WARNING "%s: problem reading SD Status " 232 + pr_warning("%s: problem reading SD Status " 233 233 "register.\n", mmc_hostname(card->host)); 234 234 err = 0; 235 235 goto out; ··· 253 253 card->ssr.erase_offset = eo * 1000; 254 254 } 255 255 } else { 256 - printk(KERN_WARNING "%s: SD Status: Invalid Allocation Unit " 256 + pr_warning("%s: SD Status: Invalid Allocation Unit " 257 257 "size.\n", mmc_hostname(card->host)); 258 258 } 259 259 out: ··· 273 273 return 0; 274 274 275 275 if (!(card->csd.cmdclass & CCC_SWITCH)) { 276 - printk(KERN_WARNING "%s: card lacks mandatory switch " 276 + pr_warning("%s: card lacks mandatory switch " 277 277 "function, performance might suffer.\n", 278 278 mmc_hostname(card->host)); 279 279 return 0; ··· 283 283 284 284 status = kmalloc(64, GFP_KERNEL); 285 285 if (!status) { 286 - printk(KERN_ERR "%s: could not allocate a buffer for " 286 + pr_err("%s: could not allocate a buffer for " 287 287 "switch capabilities.\n", 288 288 mmc_hostname(card->host)); 289 289 return -ENOMEM; ··· 299 299 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) 300 300 goto out; 301 301 302 - printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n", 302 + pr_warning("%s: problem reading Bus Speed modes.\n", 303 303 mmc_hostname(card->host)); 304 304 err = 0; 305 305 306 306 goto out; 307 307 } 308 + 309 + if (status[13] & UHS_SDR50_BUS_SPEED) 310 + card->sw_caps.hs_max_dtr = 50000000; 308 311 309 312 if (card->scr.sda_spec3) { 310 313 card->sw_caps.sd3_bus_mode = status[13]; ··· 322 319 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) 323 320 goto out; 324 321 325 - printk(KERN_WARNING "%s: problem reading " 322 + pr_warning("%s: problem reading " 326 323 "Driver Strength.\n", 327 324 mmc_hostname(card->host)); 328 325 err = 0; ··· 342 339 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) 343 340 goto out; 344 341 345 - printk(KERN_WARNING "%s: problem reading " 342 + pr_warning("%s: problem reading " 346 343 "Current Limit.\n", 347 344 mmc_hostname(card->host)); 348 345 err = 0; ··· 351 348 } 352 349 353 350 card->sw_caps.sd3_curr_limit = status[7]; 354 - } else { 355 - if (status[13] & 0x02) 356 - card->sw_caps.hs_max_dtr = 50000000; 357 351 } 358 352 359 353 out: ··· 383 383 384 384 status = kmalloc(64, GFP_KERNEL); 385 385 if (!status) { 386 - printk(KERN_ERR "%s: could not allocate a buffer for " 386 + pr_err("%s: could not allocate a buffer for " 387 387 "switch capabilities.\n", mmc_hostname(card->host)); 388 388 return -ENOMEM; 389 389 } ··· 393 393 goto out; 394 394 395 395 if ((status[16] & 0xF) != 1) { 396 - printk(KERN_WARNING "%s: Problem switching card " 396 + pr_warning("%s: Problem switching card " 397 397 "into high-speed mode!\n", 398 398 mmc_hostname(card->host)); 399 399 err = 0; ··· 459 459 return err; 460 460 461 461 if ((status[15] & 0xF) != drive_strength) { 462 - printk(KERN_WARNING "%s: Problem setting drive strength!\n", 462 + pr_warning("%s: Problem setting drive strength!\n", 463 463 mmc_hostname(card->host)); 464 464 return 0; 465 465 } ··· 538 538 return err; 539 539 540 540 if ((status[16] & 0xF) != card->sd_bus_speed) 541 - printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", 541 + pr_warning("%s: Problem setting bus speed mode!\n", 542 542 mmc_hostname(card->host)); 543 543 else { 544 544 mmc_set_timing(card->host, timing); ··· 600 600 return err; 601 601 602 602 if (((status[15] >> 4) & 0x0F) != current_limit) 603 - printk(KERN_WARNING "%s: Problem setting current limit!\n", 603 + pr_warning("%s: Problem setting current limit!\n", 604 604 mmc_hostname(card->host)); 605 605 606 606 return 0; ··· 622 622 623 623 status = kmalloc(64, GFP_KERNEL); 624 624 if (!status) { 625 - printk(KERN_ERR "%s: could not allocate a buffer for " 625 + pr_err("%s: could not allocate a buffer for " 626 626 "switch capabilities.\n", mmc_hostname(card->host)); 627 627 return -ENOMEM; 628 628 } ··· 852 852 ro = host->ops->get_ro(host); 853 853 854 854 if (ro < 0) { 855 - printk(KERN_WARNING "%s: host does not " 855 + pr_warning("%s: host does not " 856 856 "support reading read-only " 857 857 "switch. assuming write-enable.\n", 858 858 mmc_hostname(host)); ··· 929 929 err = mmc_send_relative_addr(host, &card->rca); 930 930 if (err) 931 931 return err; 932 - 933 - mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 934 932 } 935 933 936 934 if (!oldcard) { ··· 1041 1043 1042 1044 mmc_claim_host(host); 1043 1045 mmc_detach_bus(host); 1046 + mmc_power_off(host); 1044 1047 mmc_release_host(host); 1045 1048 } 1046 1049 } ··· 1166 1167 * support. 1167 1168 */ 1168 1169 if (ocr & 0x7F) { 1169 - printk(KERN_WARNING "%s: card claims to support voltages " 1170 + pr_warning("%s: card claims to support voltages " 1170 1171 "below the defined range. These will be ignored.\n", 1171 1172 mmc_hostname(host)); 1172 1173 ocr &= ~0x7F; ··· 1174 1175 1175 1176 if ((ocr & MMC_VDD_165_195) && 1176 1177 !(host->ocr_avail_sd & MMC_VDD_165_195)) { 1177 - printk(KERN_WARNING "%s: SD card claims to support the " 1178 + pr_warning("%s: SD card claims to support the " 1178 1179 "incompletely defined 'low voltage range'. This " 1179 1180 "will be ignored.\n", mmc_hostname(host)); 1180 1181 ocr &= ~MMC_VDD_165_195; ··· 1213 1214 err: 1214 1215 mmc_detach_bus(host); 1215 1216 1216 - printk(KERN_ERR "%s: error %d whilst initialising SD card\n", 1217 + pr_err("%s: error %d whilst initialising SD card\n", 1217 1218 mmc_hostname(host), err); 1218 1219 1219 1220 return err;
+4 -4
drivers/mmc/core/sd_ops.c
··· 67 67 int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, 68 68 struct mmc_command *cmd, int retries) 69 69 { 70 - struct mmc_request mrq = {0}; 70 + struct mmc_request mrq = {NULL}; 71 71 72 72 int i, err; 73 73 ··· 244 244 int mmc_app_send_scr(struct mmc_card *card, u32 *scr) 245 245 { 246 246 int err; 247 - struct mmc_request mrq = {0}; 247 + struct mmc_request mrq = {NULL}; 248 248 struct mmc_command cmd = {0}; 249 249 struct mmc_data data = {0}; 250 250 struct scatterlist sg; ··· 303 303 int mmc_sd_switch(struct mmc_card *card, int mode, int group, 304 304 u8 value, u8 *resp) 305 305 { 306 - struct mmc_request mrq = {0}; 306 + struct mmc_request mrq = {NULL}; 307 307 struct mmc_command cmd = {0}; 308 308 struct mmc_data data = {0}; 309 309 struct scatterlist sg; ··· 348 348 int mmc_app_sd_status(struct mmc_card *card, void *ssr) 349 349 { 350 350 int err; 351 - struct mmc_request mrq = {0}; 351 + struct mmc_request mrq = {NULL}; 352 352 struct mmc_command cmd = {0}; 353 353 struct mmc_data data = {0}; 354 354 struct scatterlist sg;
+5 -6
drivers/mmc/core/sdio.c
··· 111 111 112 112 cccr_vsn = data & 0x0f; 113 113 114 - if (cccr_vsn > SDIO_CCCR_REV_1_20) { 115 - printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n", 114 + if (cccr_vsn > SDIO_CCCR_REV_3_00) { 115 + pr_err("%s: unrecognised CCCR structure version %d\n", 116 116 mmc_hostname(card->host), cccr_vsn); 117 117 return -EINVAL; 118 118 } ··· 408 408 */ 409 409 if (oldcard) 410 410 oldcard->rca = card->rca; 411 - 412 - mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 413 411 } 414 412 415 413 /* ··· 595 597 596 598 mmc_claim_host(host); 597 599 mmc_detach_bus(host); 600 + mmc_power_off(host); 598 601 mmc_release_host(host); 599 602 } 600 603 } ··· 777 778 * support. 778 779 */ 779 780 if (ocr & 0x7F) { 780 - printk(KERN_WARNING "%s: card claims to support voltages " 781 + pr_warning("%s: card claims to support voltages " 781 782 "below the defined range. These will be ignored.\n", 782 783 mmc_hostname(host)); 783 784 ocr &= ~0x7F; ··· 874 875 err: 875 876 mmc_detach_bus(host); 876 877 877 - printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", 878 + pr_err("%s: error %d whilst initialising SDIO card\n", 878 879 mmc_hostname(host), err); 879 880 880 881 return err;
+1 -1
drivers/mmc/core/sdio_bus.c
··· 173 173 drv->remove(func); 174 174 175 175 if (func->irq_handler) { 176 - printk(KERN_WARNING "WARNING: driver %s did not remove " 176 + pr_warning("WARNING: driver %s did not remove " 177 177 "its interrupt handler!\n", drv->name); 178 178 sdio_claim_host(func); 179 179 sdio_release_irq(func);
+2 -2
drivers/mmc/core/sdio_cis.c
··· 132 132 ret = -EINVAL; 133 133 } 134 134 if (ret && ret != -EILSEQ && ret != -ENOENT) { 135 - printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n", 135 + pr_err("%s: bad %s tuple 0x%02x (%u bytes)\n", 136 136 mmc_hostname(card->host), tpl_descr, code, size); 137 137 } 138 138 } else { ··· 313 313 314 314 if (ret == -ENOENT) { 315 315 /* warn about unknown tuples */ 316 - printk(KERN_WARNING "%s: queuing unknown" 316 + pr_warning("%s: queuing unknown" 317 317 " CIS tuple 0x%02x (%u bytes)\n", 318 318 mmc_hostname(card->host), 319 319 tpl_code, tpl_link);
+3 -3
drivers/mmc/core/sdio_irq.c
··· 45 45 46 46 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); 47 47 if (ret) { 48 - printk(KERN_DEBUG "%s: error %d reading SDIO_CCCR_INTx\n", 48 + pr_debug("%s: error %d reading SDIO_CCCR_INTx\n", 49 49 mmc_card_id(card), ret); 50 50 return ret; 51 51 } ··· 55 55 if (pending & (1 << i)) { 56 56 func = card->sdio_func[i - 1]; 57 57 if (!func) { 58 - printk(KERN_WARNING "%s: pending IRQ for " 58 + pr_warning("%s: pending IRQ for " 59 59 "non-existent function\n", 60 60 mmc_card_id(card)); 61 61 ret = -EINVAL; ··· 63 63 func->irq_handler(func); 64 64 count++; 65 65 } else { 66 - printk(KERN_WARNING "%s: pending IRQ with no handler\n", 66 + pr_warning("%s: pending IRQ with no handler\n", 67 67 sdio_func_id(func)); 68 68 ret = -EINVAL; 69 69 }
+6 -3
drivers/mmc/core/sdio_ops.c
··· 121 121 int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, 122 122 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) 123 123 { 124 - struct mmc_request mrq = {0}; 124 + struct mmc_request mrq = {NULL}; 125 125 struct mmc_command cmd = {0}; 126 126 struct mmc_data data = {0}; 127 127 struct scatterlist sg; ··· 144 144 cmd.arg |= fn << 28; 145 145 cmd.arg |= incr_addr ? 0x04000000 : 0x00000000; 146 146 cmd.arg |= addr << 9; 147 - if (blocks == 1 && blksz <= 512) 148 - cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */ 147 + if (blocks == 1 && blksz < 512) 148 + cmd.arg |= blksz; /* byte mode */ 149 + else if (blocks == 1 && blksz == 512 && 150 + !(mmc_card_broken_byte_mode_512(card))) 151 + cmd.arg |= 0; /* byte mode, 0==512 */ 149 152 else 150 153 cmd.arg |= 0x08000000 | blocks; /* block mode */ 151 154 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+8 -8
drivers/mmc/host/Kconfig
··· 130 130 If unsure, say N. 131 131 132 132 config MMC_SDHCI_ESDHC_IMX 133 - tristate "SDHCI platform support for the Freescale eSDHC i.MX controller" 134 - depends on ARCH_MX25 || ARCH_MX35 || ARCH_MX5 133 + tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller" 134 + depends on ARCH_MXC 135 135 depends on MMC_SDHCI_PLTFM 136 136 select MMC_SDHCI_IO_ACCESSORS 137 137 help 138 - This selects the Freescale eSDHC controller support on the platform 139 - bus, found on platforms like mx35/51. 138 + This selects the Freescale eSDHC/uSDHC controller support 139 + found on i.MX25, i.MX35 i.MX5x and i.MX6x. 140 140 141 141 If you have a controller with this interface, say Y or M here. 142 142 ··· 326 326 support for SDIO devices. 327 327 328 328 config MMC_MXC 329 - tristate "Freescale i.MX2/3 Multimedia Card Interface support" 330 - depends on MACH_MX21 || MACH_MX27 || ARCH_MX31 329 + tristate "Freescale i.MX21/27/31 Multimedia Card Interface support" 330 + depends on ARCH_MXC 331 331 help 332 - This selects the Freescale i.MX2/3 Multimedia card Interface. 333 - If you have a i.MX platform with a Multimedia Card slot, 332 + This selects the Freescale i.MX21, i.MX27 and i.MX31 Multimedia card 333 + Interface. If you have a i.MX platform with a Multimedia Card slot, 334 334 say Y or M here. 335 335 336 336 If unsure, say N.
+5 -1
drivers/mmc/host/at91_mci.c
··· 869 869 static irqreturn_t at91_mmc_det_irq(int irq, void *_host) 870 870 { 871 871 struct at91mci_host *host = _host; 872 - int present = !gpio_get_value(irq_to_gpio(irq)); 872 + int present; 873 + 874 + /* entering this ISR means that we have configured det_pin: 875 + * we can use its value in board structure */ 876 + present = !gpio_get_value(host->board->det_pin); 873 877 874 878 /* 875 879 * we expect this irq on both insert and remove,
+117 -103
drivers/mmc/host/atmel-mci-regs.h
··· 17 17 #define __DRIVERS_MMC_ATMEL_MCI_H__ 18 18 19 19 /* MCI Register Definitions */ 20 - #define MCI_CR 0x0000 /* Control */ 21 - # define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ 22 - # define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ 23 - # define MCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */ 24 - # define MCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */ 25 - # define MCI_CR_SWRST ( 1 << 7) /* Software Reset */ 26 - #define MCI_MR 0x0004 /* Mode */ 27 - # define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ 28 - # define MCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */ 29 - # define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ 30 - # define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ 31 - # define MCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */ 32 - # define MCI_MR_PDCPADV ( 1 << 14) /* Padding Value */ 33 - # define MCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */ 34 - #define MCI_DTOR 0x0008 /* Data Timeout */ 35 - # define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ 36 - # define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ 37 - #define MCI_SDCR 0x000c /* SD Card / SDIO */ 38 - # define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ 39 - # define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ 40 - # define MCI_SDCSEL_MASK ( 3 << 0) 41 - # define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ 42 - # define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ 43 - # define MCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */ 44 - # define MCI_SDCBUS_MASK ( 3 << 6) 45 - #define MCI_ARGR 0x0010 /* Command Argument */ 46 - #define MCI_CMDR 0x0014 /* Command */ 47 - # define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ 48 - # define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ 49 - # define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ 50 - # define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ 51 - # define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ 52 - # define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ 53 - # define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ 54 - # define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ 55 - # define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ 56 - # define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ 57 - # define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ 58 - # define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ 59 - # define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ 60 - # define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ 61 - # define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ 62 - # define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ 63 - # define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ 64 - # define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ 65 - # define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ 66 - # define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ 67 - # define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ 68 - # define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ 69 - #define MCI_BLKR 0x0018 /* Block */ 70 - # define MCI_BCNT(x) ((x) << 0) /* Data Block Count */ 71 - # define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ 72 - #define MCI_CSTOR 0x001c /* Completion Signal Timeout[2] */ 73 - # define MCI_CSTOCYC(x) ((x) << 0) /* CST cycles */ 74 - # define MCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */ 75 - #define MCI_RSPR 0x0020 /* Response 0 */ 76 - #define MCI_RSPR1 0x0024 /* Response 1 */ 77 - #define MCI_RSPR2 0x0028 /* Response 2 */ 78 - #define MCI_RSPR3 0x002c /* Response 3 */ 79 - #define MCI_RDR 0x0030 /* Receive Data */ 80 - #define MCI_TDR 0x0034 /* Transmit Data */ 81 - #define MCI_SR 0x0040 /* Status */ 82 - #define MCI_IER 0x0044 /* Interrupt Enable */ 83 - #define MCI_IDR 0x0048 /* Interrupt Disable */ 84 - #define MCI_IMR 0x004c /* Interrupt Mask */ 85 - # define MCI_CMDRDY ( 1 << 0) /* Command Ready */ 86 - # define MCI_RXRDY ( 1 << 1) /* Receiver Ready */ 87 - # define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */ 88 - # define MCI_BLKE ( 1 << 3) /* Data Block Ended */ 89 - # define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ 90 - # define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ 91 - # define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ 92 - # define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ 93 - # define MCI_RINDE ( 1 << 16) /* Response Index Error */ 94 - # define MCI_RDIRE ( 1 << 17) /* Response Direction Error */ 95 - # define MCI_RCRCE ( 1 << 18) /* Response CRC Error */ 96 - # define MCI_RENDE ( 1 << 19) /* Response End Bit Error */ 97 - # define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */ 98 - # define MCI_DCRCE ( 1 << 21) /* Data CRC Error */ 99 - # define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */ 100 - # define MCI_OVRE ( 1 << 30) /* RX Overrun Error */ 101 - # define MCI_UNRE ( 1 << 31) /* TX Underrun Error */ 102 - #define MCI_DMA 0x0050 /* DMA Configuration[2] */ 103 - # define MCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */ 104 - # define MCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */ 105 - # define MCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */ 106 - #define MCI_CFG 0x0054 /* Configuration[2] */ 107 - # define MCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */ 108 - # define MCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */ 109 - # define MCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */ 110 - # define MCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */ 111 - #define MCI_WPMR 0x00e4 /* Write Protection Mode[2] */ 112 - # define MCI_WP_EN ( 1 << 0) /* WP Enable */ 113 - # define MCI_WP_KEY (0x4d4349 << 8) /* WP Key */ 114 - #define MCI_WPSR 0x00e8 /* Write Protection Status[2] */ 115 - # define MCI_GET_WP_VS(x) ((x) & 0x0f) 116 - # define MCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff) 117 - #define MCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */ 20 + #define ATMCI_CR 0x0000 /* Control */ 21 + # define ATMCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ 22 + # define ATMCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ 23 + # define ATMCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */ 24 + # define ATMCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */ 25 + # define ATMCI_CR_SWRST ( 1 << 7) /* Software Reset */ 26 + #define ATMCI_MR 0x0004 /* Mode */ 27 + # define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ 28 + # define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */ 29 + # define ATMCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ 30 + # define ATMCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ 31 + # define ATMCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */ 32 + # define ATMCI_MR_PDCPADV ( 1 << 14) /* Padding Value */ 33 + # define ATMCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */ 34 + #define ATMCI_DTOR 0x0008 /* Data Timeout */ 35 + # define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ 36 + # define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ 37 + #define ATMCI_SDCR 0x000c /* SD Card / SDIO */ 38 + # define ATMCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ 39 + # define ATMCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ 40 + # define ATMCI_SDCSEL_MASK ( 3 << 0) 41 + # define ATMCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ 42 + # define ATMCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ 43 + # define ATMCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */ 44 + # define ATMCI_SDCBUS_MASK ( 3 << 6) 45 + #define ATMCI_ARGR 0x0010 /* Command Argument */ 46 + #define ATMCI_CMDR 0x0014 /* Command */ 47 + # define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ 48 + # define ATMCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ 49 + # define ATMCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ 50 + # define ATMCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ 51 + # define ATMCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ 52 + # define ATMCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ 53 + # define ATMCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ 54 + # define ATMCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ 55 + # define ATMCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ 56 + # define ATMCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ 57 + # define ATMCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ 58 + # define ATMCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ 59 + # define ATMCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ 60 + # define ATMCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ 61 + # define ATMCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ 62 + # define ATMCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ 63 + # define ATMCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ 64 + # define ATMCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ 65 + # define ATMCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ 66 + # define ATMCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ 67 + # define ATMCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ 68 + # define ATMCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ 69 + #define ATMCI_BLKR 0x0018 /* Block */ 70 + # define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */ 71 + # define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ 72 + #define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */ 73 + # define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */ 74 + # define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */ 75 + #define ATMCI_RSPR 0x0020 /* Response 0 */ 76 + #define ATMCI_RSPR1 0x0024 /* Response 1 */ 77 + #define ATMCI_RSPR2 0x0028 /* Response 2 */ 78 + #define ATMCI_RSPR3 0x002c /* Response 3 */ 79 + #define ATMCI_RDR 0x0030 /* Receive Data */ 80 + #define ATMCI_TDR 0x0034 /* Transmit Data */ 81 + #define ATMCI_SR 0x0040 /* Status */ 82 + #define ATMCI_IER 0x0044 /* Interrupt Enable */ 83 + #define ATMCI_IDR 0x0048 /* Interrupt Disable */ 84 + #define ATMCI_IMR 0x004c /* Interrupt Mask */ 85 + # define ATMCI_CMDRDY ( 1 << 0) /* Command Ready */ 86 + # define ATMCI_RXRDY ( 1 << 1) /* Receiver Ready */ 87 + # define ATMCI_TXRDY ( 1 << 2) /* Transmitter Ready */ 88 + # define ATMCI_BLKE ( 1 << 3) /* Data Block Ended */ 89 + # define ATMCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ 90 + # define ATMCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ 91 + # define ATMCI_ENDRX ( 1 << 6) /* End of RX Buffer */ 92 + # define ATMCI_ENDTX ( 1 << 7) /* End of TX Buffer */ 93 + # define ATMCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ 94 + # define ATMCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ 95 + # define ATMCI_SDIOWAIT ( 1 << 12) /* SDIO Read Wait Operation Status */ 96 + # define ATMCI_CSRCV ( 1 << 13) /* CE-ATA Completion Signal Received */ 97 + # define ATMCI_RXBUFF ( 1 << 14) /* RX Buffer Full */ 98 + # define ATMCI_TXBUFE ( 1 << 15) /* TX Buffer Empty */ 99 + # define ATMCI_RINDE ( 1 << 16) /* Response Index Error */ 100 + # define ATMCI_RDIRE ( 1 << 17) /* Response Direction Error */ 101 + # define ATMCI_RCRCE ( 1 << 18) /* Response CRC Error */ 102 + # define ATMCI_RENDE ( 1 << 19) /* Response End Bit Error */ 103 + # define ATMCI_RTOE ( 1 << 20) /* Response Time-Out Error */ 104 + # define ATMCI_DCRCE ( 1 << 21) /* Data CRC Error */ 105 + # define ATMCI_DTOE ( 1 << 22) /* Data Time-Out Error */ 106 + # define ATMCI_CSTOE ( 1 << 23) /* Completion Signal Time-out Error */ 107 + # define ATMCI_BLKOVRE ( 1 << 24) /* DMA Block Overrun Error */ 108 + # define ATMCI_DMADONE ( 1 << 25) /* DMA Transfer Done */ 109 + # define ATMCI_FIFOEMPTY ( 1 << 26) /* FIFO Empty Flag */ 110 + # define ATMCI_XFRDONE ( 1 << 27) /* Transfer Done Flag */ 111 + # define ATMCI_ACKRCV ( 1 << 28) /* Boot Operation Acknowledge Received */ 112 + # define ATMCI_ACKRCVE ( 1 << 29) /* Boot Operation Acknowledge Error */ 113 + # define ATMCI_OVRE ( 1 << 30) /* RX Overrun Error */ 114 + # define ATMCI_UNRE ( 1 << 31) /* TX Underrun Error */ 115 + #define ATMCI_DMA 0x0050 /* DMA Configuration[2] */ 116 + # define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */ 117 + # define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */ 118 + # define ATMCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */ 119 + #define ATMCI_CFG 0x0054 /* Configuration[2] */ 120 + # define ATMCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */ 121 + # define ATMCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */ 122 + # define ATMCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */ 123 + # define ATMCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */ 124 + #define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */ 125 + # define ATMCI_WP_EN ( 1 << 0) /* WP Enable */ 126 + # define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */ 127 + #define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */ 128 + # define ATMCI_GET_WP_VS(x) ((x) & 0x0f) 129 + # define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff) 130 + #define ATMCI_VERSION 0x00FC /* Version */ 131 + #define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */ 118 132 119 133 /* This is not including the FIFO Aperture on MCI2 */ 120 - #define MCI_REGS_SIZE 0x100 134 + #define ATMCI_REGS_SIZE 0x100 121 135 122 136 /* Register access macros */ 123 - #define mci_readl(port,reg) \ 124 - __raw_readl((port)->regs + MCI_##reg) 125 - #define mci_writel(port,reg,value) \ 126 - __raw_writel((value), (port)->regs + MCI_##reg) 137 + #define atmci_readl(port,reg) \ 138 + __raw_readl((port)->regs + reg) 139 + #define atmci_writel(port,reg,value) \ 140 + __raw_writel((value), (port)->regs + reg) 127 141 128 142 #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
+615 -322
drivers/mmc/host/atmel-mci.c
··· 30 30 31 31 #include <mach/atmel-mci.h> 32 32 #include <linux/atmel-mci.h> 33 + #include <linux/atmel_pdc.h> 33 34 34 35 #include <asm/io.h> 35 36 #include <asm/unaligned.h> ··· 40 39 41 40 #include "atmel-mci-regs.h" 42 41 43 - #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) 42 + #define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE) 44 43 #define ATMCI_DMA_THRESHOLD 16 45 44 46 45 enum { ··· 59 58 STATE_DATA_ERROR, 60 59 }; 61 60 61 + enum atmci_xfer_dir { 62 + XFER_RECEIVE = 0, 63 + XFER_TRANSMIT, 64 + }; 65 + 66 + enum atmci_pdc_buf { 67 + PDC_FIRST_BUF = 0, 68 + PDC_SECOND_BUF, 69 + }; 70 + 71 + struct atmel_mci_caps { 72 + bool has_dma; 73 + bool has_pdc; 74 + bool has_cfg_reg; 75 + bool has_cstor_reg; 76 + bool has_highspeed; 77 + bool has_rwproof; 78 + }; 79 + 62 80 struct atmel_mci_dma { 63 - #ifdef CONFIG_MMC_ATMELMCI_DMA 64 81 struct dma_chan *chan; 65 82 struct dma_async_tx_descriptor *data_desc; 66 - #endif 67 83 }; 68 84 69 85 /** 70 86 * struct atmel_mci - MMC controller state shared between all slots 71 87 * @lock: Spinlock protecting the queue and associated data. 72 88 * @regs: Pointer to MMIO registers. 73 - * @sg: Scatterlist entry currently being processed by PIO code, if any. 89 + * @sg: Scatterlist entry currently being processed by PIO or PDC code. 74 90 * @pio_offset: Offset into the current scatterlist entry. 75 91 * @cur_slot: The slot which is currently using the controller. 76 92 * @mrq: The request currently being processed on @cur_slot, ··· 95 77 * @cmd: The command currently being sent to the card, or NULL. 96 78 * @data: The data currently being transferred, or NULL if no data 97 79 * transfer is in progress. 80 + * @data_size: just data->blocks * data->blksz. 98 81 * @dma: DMA client state. 99 82 * @data_chan: DMA channel being used for the current data transfer. 100 83 * @cmd_status: Snapshot of SR taken upon completion of the current ··· 122 103 * @mck: The peripheral bus clock hooked up to the MMC controller. 123 104 * @pdev: Platform device associated with the MMC controller. 124 105 * @slot: Slots sharing this MMC controller. 106 + * @caps: MCI capabilities depending on MCI version. 107 + * @prepare_data: function to setup MCI before data transfer which 108 + * depends on MCI capabilities. 109 + * @submit_data: function to start data transfer which depends on MCI 110 + * capabilities. 111 + * @stop_transfer: function to stop data transfer which depends on MCI 112 + * capabilities. 125 113 * 126 114 * Locking 127 115 * ======= ··· 169 143 struct mmc_request *mrq; 170 144 struct mmc_command *cmd; 171 145 struct mmc_data *data; 146 + unsigned int data_size; 172 147 173 148 struct atmel_mci_dma dma; 174 149 struct dma_chan *data_chan; ··· 193 166 struct clk *mck; 194 167 struct platform_device *pdev; 195 168 196 - struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS]; 169 + struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS]; 170 + 171 + struct atmel_mci_caps caps; 172 + 173 + u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data); 174 + void (*submit_data)(struct atmel_mci *host, struct mmc_data *data); 175 + void (*stop_transfer)(struct atmel_mci *host); 197 176 }; 198 177 199 178 /** ··· 251 218 set_bit(event, &host->completed_events) 252 219 #define atmci_set_pending(host, event) \ 253 220 set_bit(event, &host->pending_events) 254 - 255 - /* 256 - * Enable or disable features/registers based on 257 - * whether the processor supports them 258 - */ 259 - static bool mci_has_rwproof(void) 260 - { 261 - if (cpu_is_at91sam9261() || cpu_is_at91rm9200()) 262 - return false; 263 - else 264 - return true; 265 - } 266 - 267 - /* 268 - * The new MCI2 module isn't 100% compatible with the old MCI module, 269 - * and it has a few nice features which we want to use... 270 - */ 271 - static inline bool atmci_is_mci2(void) 272 - { 273 - if (cpu_is_at91sam9g45()) 274 - return true; 275 - 276 - return false; 277 - } 278 - 279 221 280 222 /* 281 223 * The debugfs stuff below is mostly optimized away when ··· 360 352 struct atmel_mci *host = s->private; 361 353 u32 *buf; 362 354 363 - buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL); 355 + buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL); 364 356 if (!buf) 365 357 return -ENOMEM; 366 358 ··· 371 363 */ 372 364 spin_lock_bh(&host->lock); 373 365 clk_enable(host->mck); 374 - memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); 366 + memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE); 375 367 clk_disable(host->mck); 376 368 spin_unlock_bh(&host->lock); 377 369 378 370 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", 379 - buf[MCI_MR / 4], 380 - buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "", 381 - buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "", 382 - buf[MCI_MR / 4] & 0xff); 383 - seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]); 384 - seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]); 385 - seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]); 371 + buf[ATMCI_MR / 4], 372 + buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "", 373 + buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "", 374 + buf[ATMCI_MR / 4] & 0xff); 375 + seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]); 376 + seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]); 377 + seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]); 386 378 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", 387 - buf[MCI_BLKR / 4], 388 - buf[MCI_BLKR / 4] & 0xffff, 389 - (buf[MCI_BLKR / 4] >> 16) & 0xffff); 390 - if (atmci_is_mci2()) 391 - seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]); 379 + buf[ATMCI_BLKR / 4], 380 + buf[ATMCI_BLKR / 4] & 0xffff, 381 + (buf[ATMCI_BLKR / 4] >> 16) & 0xffff); 382 + if (host->caps.has_cstor_reg) 383 + seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]); 392 384 393 385 /* Don't read RSPR and RDR; it will consume the data there */ 394 386 395 - atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); 396 - atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); 387 + atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); 388 + atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); 397 389 398 - if (atmci_is_mci2()) { 390 + if (host->caps.has_dma) { 399 391 u32 val; 400 392 401 - val = buf[MCI_DMA / 4]; 393 + val = buf[ATMCI_DMA / 4]; 402 394 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", 403 395 val, val & 3, 404 396 ((val >> 4) & 3) ? 405 397 1 << (((val >> 4) & 3) + 1) : 1, 406 - val & MCI_DMAEN ? " DMAEN" : ""); 398 + val & ATMCI_DMAEN ? " DMAEN" : ""); 399 + } 400 + if (host->caps.has_cfg_reg) { 401 + u32 val; 407 402 408 - val = buf[MCI_CFG / 4]; 403 + val = buf[ATMCI_CFG / 4]; 409 404 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", 410 405 val, 411 - val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", 412 - val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", 413 - val & MCI_CFG_HSMODE ? " HSMODE" : "", 414 - val & MCI_CFG_LSYNC ? " LSYNC" : ""); 406 + val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", 407 + val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", 408 + val & ATMCI_CFG_HSMODE ? " HSMODE" : "", 409 + val & ATMCI_CFG_LSYNC ? " LSYNC" : ""); 415 410 } 416 411 417 412 kfree(buf); ··· 477 466 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 478 467 } 479 468 480 - static inline unsigned int ns_to_clocks(struct atmel_mci *host, 469 + static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, 481 470 unsigned int ns) 482 471 { 483 472 return (ns * (host->bus_hz / 1000000) + 999) / 1000; ··· 493 482 unsigned dtocyc; 494 483 unsigned dtomul; 495 484 496 - timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; 485 + timeout = atmci_ns_to_clocks(host, data->timeout_ns) 486 + + data->timeout_clks; 497 487 498 488 for (dtomul = 0; dtomul < 8; dtomul++) { 499 489 unsigned shift = dtomul_to_shift[dtomul]; ··· 510 498 511 499 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", 512 500 dtocyc << dtomul_to_shift[dtomul]); 513 - mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); 501 + atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc))); 514 502 } 515 503 516 504 /* ··· 524 512 525 513 cmd->error = -EINPROGRESS; 526 514 527 - cmdr = MCI_CMDR_CMDNB(cmd->opcode); 515 + cmdr = ATMCI_CMDR_CMDNB(cmd->opcode); 528 516 529 517 if (cmd->flags & MMC_RSP_PRESENT) { 530 518 if (cmd->flags & MMC_RSP_136) 531 - cmdr |= MCI_CMDR_RSPTYP_136BIT; 519 + cmdr |= ATMCI_CMDR_RSPTYP_136BIT; 532 520 else 533 - cmdr |= MCI_CMDR_RSPTYP_48BIT; 521 + cmdr |= ATMCI_CMDR_RSPTYP_48BIT; 534 522 } 535 523 536 524 /* ··· 538 526 * it's too difficult to determine whether this is an ACMD or 539 527 * not. Better make it 64. 540 528 */ 541 - cmdr |= MCI_CMDR_MAXLAT_64CYC; 529 + cmdr |= ATMCI_CMDR_MAXLAT_64CYC; 542 530 543 531 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) 544 - cmdr |= MCI_CMDR_OPDCMD; 532 + cmdr |= ATMCI_CMDR_OPDCMD; 545 533 546 534 data = cmd->data; 547 535 if (data) { 548 - cmdr |= MCI_CMDR_START_XFER; 536 + cmdr |= ATMCI_CMDR_START_XFER; 549 537 550 538 if (cmd->opcode == SD_IO_RW_EXTENDED) { 551 - cmdr |= MCI_CMDR_SDIO_BLOCK; 539 + cmdr |= ATMCI_CMDR_SDIO_BLOCK; 552 540 } else { 553 541 if (data->flags & MMC_DATA_STREAM) 554 - cmdr |= MCI_CMDR_STREAM; 542 + cmdr |= ATMCI_CMDR_STREAM; 555 543 else if (data->blocks > 1) 556 - cmdr |= MCI_CMDR_MULTI_BLOCK; 544 + cmdr |= ATMCI_CMDR_MULTI_BLOCK; 557 545 else 558 - cmdr |= MCI_CMDR_BLOCK; 546 + cmdr |= ATMCI_CMDR_BLOCK; 559 547 } 560 548 561 549 if (data->flags & MMC_DATA_READ) 562 - cmdr |= MCI_CMDR_TRDIR_READ; 550 + cmdr |= ATMCI_CMDR_TRDIR_READ; 563 551 } 564 552 565 553 return cmdr; 566 554 } 567 555 568 - static void atmci_start_command(struct atmel_mci *host, 556 + static void atmci_send_command(struct atmel_mci *host, 569 557 struct mmc_command *cmd, u32 cmd_flags) 570 558 { 571 559 WARN_ON(host->cmd); ··· 575 563 "start command: ARGR=0x%08x CMDR=0x%08x\n", 576 564 cmd->arg, cmd_flags); 577 565 578 - mci_writel(host, ARGR, cmd->arg); 579 - mci_writel(host, CMDR, cmd_flags); 566 + atmci_writel(host, ATMCI_ARGR, cmd->arg); 567 + atmci_writel(host, ATMCI_CMDR, cmd_flags); 580 568 } 581 569 582 - static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) 570 + static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) 583 571 { 584 - atmci_start_command(host, data->stop, host->stop_cmdr); 585 - mci_writel(host, IER, MCI_CMDRDY); 572 + atmci_send_command(host, data->stop, host->stop_cmdr); 573 + atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); 586 574 } 587 575 588 - #ifdef CONFIG_MMC_ATMELMCI_DMA 589 - static void atmci_dma_cleanup(struct atmel_mci *host) 576 + /* 577 + * Configure given PDC buffer taking care of alignement issues. 578 + * Update host->data_size and host->sg. 579 + */ 580 + static void atmci_pdc_set_single_buf(struct atmel_mci *host, 581 + enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb) 590 582 { 591 - struct mmc_data *data = host->data; 583 + u32 pointer_reg, counter_reg; 592 584 593 - if (data) 594 - dma_unmap_sg(host->dma.chan->device->dev, 595 - data->sg, data->sg_len, 596 - ((data->flags & MMC_DATA_WRITE) 597 - ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 598 - } 599 - 600 - static void atmci_stop_dma(struct atmel_mci *host) 601 - { 602 - struct dma_chan *chan = host->data_chan; 603 - 604 - if (chan) { 605 - dmaengine_terminate_all(chan); 606 - atmci_dma_cleanup(host); 585 + if (dir == XFER_RECEIVE) { 586 + pointer_reg = ATMEL_PDC_RPR; 587 + counter_reg = ATMEL_PDC_RCR; 607 588 } else { 608 - /* Data transfer was stopped by the interrupt handler */ 609 - atmci_set_pending(host, EVENT_XFER_COMPLETE); 610 - mci_writel(host, IER, MCI_NOTBUSY); 589 + pointer_reg = ATMEL_PDC_TPR; 590 + counter_reg = ATMEL_PDC_TCR; 591 + } 592 + 593 + if (buf_nb == PDC_SECOND_BUF) { 594 + pointer_reg += ATMEL_PDC_SCND_BUF_OFF; 595 + counter_reg += ATMEL_PDC_SCND_BUF_OFF; 596 + } 597 + 598 + atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); 599 + if (host->data_size <= sg_dma_len(host->sg)) { 600 + if (host->data_size & 0x3) { 601 + /* If size is different from modulo 4, transfer bytes */ 602 + atmci_writel(host, counter_reg, host->data_size); 603 + atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE); 604 + } else { 605 + /* Else transfer 32-bits words */ 606 + atmci_writel(host, counter_reg, host->data_size / 4); 607 + } 608 + host->data_size = 0; 609 + } else { 610 + /* We assume the size of a page is 32-bits aligned */ 611 + atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4); 612 + host->data_size -= sg_dma_len(host->sg); 613 + if (host->data_size) 614 + host->sg = sg_next(host->sg); 611 615 } 612 616 } 613 617 614 - /* This function is called by the DMA driver from tasklet context. */ 618 + /* 619 + * Configure PDC buffer according to the data size ie configuring one or two 620 + * buffers. Don't use this function if you want to configure only the second 621 + * buffer. In this case, use atmci_pdc_set_single_buf. 622 + */ 623 + static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir) 624 + { 625 + atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF); 626 + if (host->data_size) 627 + atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF); 628 + } 629 + 630 + /* 631 + * Unmap sg lists, called when transfer is finished. 632 + */ 633 + static void atmci_pdc_cleanup(struct atmel_mci *host) 634 + { 635 + struct mmc_data *data = host->data; 636 + 637 + if (data) 638 + dma_unmap_sg(&host->pdev->dev, 639 + data->sg, data->sg_len, 640 + ((data->flags & MMC_DATA_WRITE) 641 + ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 642 + } 643 + 644 + /* 645 + * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after 646 + * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY 647 + * interrupt needed for both transfer directions. 648 + */ 649 + static void atmci_pdc_complete(struct atmel_mci *host) 650 + { 651 + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); 652 + atmci_pdc_cleanup(host); 653 + 654 + /* 655 + * If the card was removed, data will be NULL. No point trying 656 + * to send the stop command or waiting for NBUSY in this case. 657 + */ 658 + if (host->data) { 659 + atmci_set_pending(host, EVENT_XFER_COMPLETE); 660 + tasklet_schedule(&host->tasklet); 661 + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 662 + } 663 + } 664 + 665 + static void atmci_dma_cleanup(struct atmel_mci *host) 666 + { 667 + struct mmc_data *data = host->data; 668 + 669 + if (data) 670 + dma_unmap_sg(host->dma.chan->device->dev, 671 + data->sg, data->sg_len, 672 + ((data->flags & MMC_DATA_WRITE) 673 + ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 674 + } 675 + 676 + /* 677 + * This function is called by the DMA driver from tasklet context. 678 + */ 615 679 static void atmci_dma_complete(void *arg) 616 680 { 617 681 struct atmel_mci *host = arg; ··· 695 607 696 608 dev_vdbg(&host->pdev->dev, "DMA complete\n"); 697 609 698 - if (atmci_is_mci2()) 610 + if (host->caps.has_dma) 699 611 /* Disable DMA hardware handshaking on MCI */ 700 - mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN); 612 + atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); 701 613 702 614 atmci_dma_cleanup(host); 703 615 ··· 729 641 * completion callback" rule of the dma engine 730 642 * framework. 731 643 */ 732 - mci_writel(host, IER, MCI_NOTBUSY); 644 + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 733 645 } 734 646 } 735 - 736 - static int 737 - atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) 738 - { 739 - struct dma_chan *chan; 740 - struct dma_async_tx_descriptor *desc; 741 - struct scatterlist *sg; 742 - unsigned int i; 743 - enum dma_data_direction direction; 744 - unsigned int sglen; 745 - 746 - /* 747 - * We don't do DMA on "complex" transfers, i.e. with 748 - * non-word-aligned buffers or lengths. Also, we don't bother 749 - * with all the DMA setup overhead for short transfers. 750 - */ 751 - if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) 752 - return -EINVAL; 753 - if (data->blksz & 3) 754 - return -EINVAL; 755 - 756 - for_each_sg(data->sg, sg, data->sg_len, i) { 757 - if (sg->offset & 3 || sg->length & 3) 758 - return -EINVAL; 759 - } 760 - 761 - /* If we don't have a channel, we can't do DMA */ 762 - chan = host->dma.chan; 763 - if (chan) 764 - host->data_chan = chan; 765 - 766 - if (!chan) 767 - return -ENODEV; 768 - 769 - if (atmci_is_mci2()) 770 - mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN); 771 - 772 - if (data->flags & MMC_DATA_READ) 773 - direction = DMA_FROM_DEVICE; 774 - else 775 - direction = DMA_TO_DEVICE; 776 - 777 - sglen = dma_map_sg(chan->device->dev, data->sg, 778 - data->sg_len, direction); 779 - 780 - desc = chan->device->device_prep_slave_sg(chan, 781 - data->sg, sglen, direction, 782 - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 783 - if (!desc) 784 - goto unmap_exit; 785 - 786 - host->dma.data_desc = desc; 787 - desc->callback = atmci_dma_complete; 788 - desc->callback_param = host; 789 - 790 - return 0; 791 - unmap_exit: 792 - dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); 793 - return -ENOMEM; 794 - } 795 - 796 - static void atmci_submit_data(struct atmel_mci *host) 797 - { 798 - struct dma_chan *chan = host->data_chan; 799 - struct dma_async_tx_descriptor *desc = host->dma.data_desc; 800 - 801 - if (chan) { 802 - dmaengine_submit(desc); 803 - dma_async_issue_pending(chan); 804 - } 805 - } 806 - 807 - #else /* CONFIG_MMC_ATMELMCI_DMA */ 808 - 809 - static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) 810 - { 811 - return -ENOSYS; 812 - } 813 - 814 - static void atmci_submit_data(struct atmel_mci *host) {} 815 - 816 - static void atmci_stop_dma(struct atmel_mci *host) 817 - { 818 - /* Data transfer was stopped by the interrupt handler */ 819 - atmci_set_pending(host, EVENT_XFER_COMPLETE); 820 - mci_writel(host, IER, MCI_NOTBUSY); 821 - } 822 - 823 - #endif /* CONFIG_MMC_ATMELMCI_DMA */ 824 647 825 648 /* 826 649 * Returns a mask of interrupt flags to be enabled after the whole ··· 743 744 744 745 data->error = -EINPROGRESS; 745 746 747 + host->sg = data->sg; 748 + host->data = data; 749 + host->data_chan = NULL; 750 + 751 + iflags = ATMCI_DATA_ERROR_FLAGS; 752 + 753 + /* 754 + * Errata: MMC data write operation with less than 12 755 + * bytes is impossible. 756 + * 757 + * Errata: MCI Transmit Data Register (TDR) FIFO 758 + * corruption when length is not multiple of 4. 759 + */ 760 + if (data->blocks * data->blksz < 12 761 + || (data->blocks * data->blksz) & 3) 762 + host->need_reset = true; 763 + 764 + host->pio_offset = 0; 765 + if (data->flags & MMC_DATA_READ) 766 + iflags |= ATMCI_RXRDY; 767 + else 768 + iflags |= ATMCI_TXRDY; 769 + 770 + return iflags; 771 + } 772 + 773 + /* 774 + * Set interrupt flags and set block length into the MCI mode register even 775 + * if this value is also accessible in the MCI block register. It seems to be 776 + * necessary before the High Speed MCI version. It also map sg and configure 777 + * PDC registers. 778 + */ 779 + static u32 780 + atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) 781 + { 782 + u32 iflags, tmp; 783 + unsigned int sg_len; 784 + enum dma_data_direction dir; 785 + 786 + data->error = -EINPROGRESS; 787 + 788 + host->data = data; 789 + host->sg = data->sg; 790 + iflags = ATMCI_DATA_ERROR_FLAGS; 791 + 792 + /* Enable pdc mode */ 793 + atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE); 794 + 795 + if (data->flags & MMC_DATA_READ) { 796 + dir = DMA_FROM_DEVICE; 797 + iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; 798 + } else { 799 + dir = DMA_TO_DEVICE; 800 + iflags |= ATMCI_ENDTX | ATMCI_TXBUFE; 801 + } 802 + 803 + /* Set BLKLEN */ 804 + tmp = atmci_readl(host, ATMCI_MR); 805 + tmp &= 0x0000ffff; 806 + tmp |= ATMCI_BLKLEN(data->blksz); 807 + atmci_writel(host, ATMCI_MR, tmp); 808 + 809 + /* Configure PDC */ 810 + host->data_size = data->blocks * data->blksz; 811 + sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); 812 + if (host->data_size) 813 + atmci_pdc_set_both_buf(host, 814 + ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); 815 + 816 + return iflags; 817 + } 818 + 819 + static u32 820 + atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) 821 + { 822 + struct dma_chan *chan; 823 + struct dma_async_tx_descriptor *desc; 824 + struct scatterlist *sg; 825 + unsigned int i; 826 + enum dma_data_direction direction; 827 + unsigned int sglen; 828 + u32 iflags; 829 + 830 + data->error = -EINPROGRESS; 831 + 746 832 WARN_ON(host->data); 747 833 host->sg = NULL; 748 834 host->data = data; 749 835 750 836 iflags = ATMCI_DATA_ERROR_FLAGS; 751 - if (atmci_prepare_data_dma(host, data)) { 752 - host->data_chan = NULL; 753 837 754 - /* 755 - * Errata: MMC data write operation with less than 12 756 - * bytes is impossible. 757 - * 758 - * Errata: MCI Transmit Data Register (TDR) FIFO 759 - * corruption when length is not multiple of 4. 760 - */ 761 - if (data->blocks * data->blksz < 12 762 - || (data->blocks * data->blksz) & 3) 763 - host->need_reset = true; 838 + /* 839 + * We don't do DMA on "complex" transfers, i.e. with 840 + * non-word-aligned buffers or lengths. Also, we don't bother 841 + * with all the DMA setup overhead for short transfers. 842 + */ 843 + if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) 844 + return atmci_prepare_data(host, data); 845 + if (data->blksz & 3) 846 + return atmci_prepare_data(host, data); 764 847 765 - host->sg = data->sg; 766 - host->pio_offset = 0; 767 - if (data->flags & MMC_DATA_READ) 768 - iflags |= MCI_RXRDY; 769 - else 770 - iflags |= MCI_TXRDY; 848 + for_each_sg(data->sg, sg, data->sg_len, i) { 849 + if (sg->offset & 3 || sg->length & 3) 850 + return atmci_prepare_data(host, data); 771 851 } 772 852 853 + /* If we don't have a channel, we can't do DMA */ 854 + chan = host->dma.chan; 855 + if (chan) 856 + host->data_chan = chan; 857 + 858 + if (!chan) 859 + return -ENODEV; 860 + 861 + if (host->caps.has_dma) 862 + atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); 863 + 864 + if (data->flags & MMC_DATA_READ) 865 + direction = DMA_FROM_DEVICE; 866 + else 867 + direction = DMA_TO_DEVICE; 868 + 869 + sglen = dma_map_sg(chan->device->dev, data->sg, 870 + data->sg_len, direction); 871 + 872 + desc = chan->device->device_prep_slave_sg(chan, 873 + data->sg, sglen, direction, 874 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 875 + if (!desc) 876 + goto unmap_exit; 877 + 878 + host->dma.data_desc = desc; 879 + desc->callback = atmci_dma_complete; 880 + desc->callback_param = host; 881 + 773 882 return iflags; 883 + unmap_exit: 884 + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); 885 + return -ENOMEM; 774 886 } 775 887 888 + static void 889 + atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) 890 + { 891 + return; 892 + } 893 + 894 + /* 895 + * Start PDC according to transfer direction. 896 + */ 897 + static void 898 + atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data) 899 + { 900 + if (data->flags & MMC_DATA_READ) 901 + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 902 + else 903 + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 904 + } 905 + 906 + static void 907 + atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) 908 + { 909 + struct dma_chan *chan = host->data_chan; 910 + struct dma_async_tx_descriptor *desc = host->dma.data_desc; 911 + 912 + if (chan) { 913 + dmaengine_submit(desc); 914 + dma_async_issue_pending(chan); 915 + } 916 + } 917 + 918 + static void atmci_stop_transfer(struct atmel_mci *host) 919 + { 920 + atmci_set_pending(host, EVENT_XFER_COMPLETE); 921 + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 922 + } 923 + 924 + /* 925 + * Stop data transfer because error(s) occured. 926 + */ 927 + static void atmci_stop_transfer_pdc(struct atmel_mci *host) 928 + { 929 + atmci_set_pending(host, EVENT_XFER_COMPLETE); 930 + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 931 + } 932 + 933 + static void atmci_stop_transfer_dma(struct atmel_mci *host) 934 + { 935 + struct dma_chan *chan = host->data_chan; 936 + 937 + if (chan) { 938 + dmaengine_terminate_all(chan); 939 + atmci_dma_cleanup(host); 940 + } else { 941 + /* Data transfer was stopped by the interrupt handler */ 942 + atmci_set_pending(host, EVENT_XFER_COMPLETE); 943 + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 944 + } 945 + } 946 + 947 + /* 948 + * Start a request: prepare data if needed, prepare the command and activate 949 + * interrupts. 950 + */ 776 951 static void atmci_start_request(struct atmel_mci *host, 777 952 struct atmel_mci_slot *slot) 778 953 { ··· 965 792 host->data_status = 0; 966 793 967 794 if (host->need_reset) { 968 - mci_writel(host, CR, MCI_CR_SWRST); 969 - mci_writel(host, CR, MCI_CR_MCIEN); 970 - mci_writel(host, MR, host->mode_reg); 971 - if (atmci_is_mci2()) 972 - mci_writel(host, CFG, host->cfg_reg); 795 + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 796 + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 797 + atmci_writel(host, ATMCI_MR, host->mode_reg); 798 + if (host->caps.has_cfg_reg) 799 + atmci_writel(host, ATMCI_CFG, host->cfg_reg); 973 800 host->need_reset = false; 974 801 } 975 - mci_writel(host, SDCR, slot->sdc_reg); 802 + atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); 976 803 977 - iflags = mci_readl(host, IMR); 978 - if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB)) 804 + iflags = atmci_readl(host, ATMCI_IMR); 805 + if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 979 806 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 980 807 iflags); 981 808 982 809 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { 983 810 /* Send init sequence (74 clock cycles) */ 984 - mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); 985 - while (!(mci_readl(host, SR) & MCI_CMDRDY)) 811 + atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT); 812 + while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY)) 986 813 cpu_relax(); 987 814 } 988 815 iflags = 0; ··· 991 818 atmci_set_timeout(host, slot, data); 992 819 993 820 /* Must set block count/size before sending command */ 994 - mci_writel(host, BLKR, MCI_BCNT(data->blocks) 995 - | MCI_BLKLEN(data->blksz)); 821 + atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks) 822 + | ATMCI_BLKLEN(data->blksz)); 996 823 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", 997 - MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); 824 + ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz)); 998 825 999 - iflags |= atmci_prepare_data(host, data); 826 + iflags |= host->prepare_data(host, data); 1000 827 } 1001 828 1002 - iflags |= MCI_CMDRDY; 829 + iflags |= ATMCI_CMDRDY; 1003 830 cmd = mrq->cmd; 1004 831 cmdflags = atmci_prepare_command(slot->mmc, cmd); 1005 - atmci_start_command(host, cmd, cmdflags); 832 + atmci_send_command(host, cmd, cmdflags); 1006 833 1007 834 if (data) 1008 - atmci_submit_data(host); 835 + host->submit_data(host, data); 1009 836 1010 837 if (mrq->stop) { 1011 838 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); 1012 - host->stop_cmdr |= MCI_CMDR_STOP_XFER; 839 + host->stop_cmdr |= ATMCI_CMDR_STOP_XFER; 1013 840 if (!(data->flags & MMC_DATA_WRITE)) 1014 - host->stop_cmdr |= MCI_CMDR_TRDIR_READ; 841 + host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ; 1015 842 if (data->flags & MMC_DATA_STREAM) 1016 - host->stop_cmdr |= MCI_CMDR_STREAM; 843 + host->stop_cmdr |= ATMCI_CMDR_STREAM; 1017 844 else 1018 - host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; 845 + host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK; 1019 846 } 1020 847 1021 848 /* ··· 1024 851 * conditions (e.g. command and data complete, but stop not 1025 852 * prepared yet.) 1026 853 */ 1027 - mci_writel(host, IER, iflags); 854 + atmci_writel(host, ATMCI_IER, iflags); 1028 855 } 1029 856 1030 857 static void atmci_queue_request(struct atmel_mci *host, ··· 1082 909 struct atmel_mci *host = slot->host; 1083 910 unsigned int i; 1084 911 1085 - slot->sdc_reg &= ~MCI_SDCBUS_MASK; 912 + slot->sdc_reg &= ~ATMCI_SDCBUS_MASK; 1086 913 switch (ios->bus_width) { 1087 914 case MMC_BUS_WIDTH_1: 1088 - slot->sdc_reg |= MCI_SDCBUS_1BIT; 915 + slot->sdc_reg |= ATMCI_SDCBUS_1BIT; 1089 916 break; 1090 917 case MMC_BUS_WIDTH_4: 1091 - slot->sdc_reg |= MCI_SDCBUS_4BIT; 918 + slot->sdc_reg |= ATMCI_SDCBUS_4BIT; 1092 919 break; 1093 920 } 1094 921 ··· 1099 926 spin_lock_bh(&host->lock); 1100 927 if (!host->mode_reg) { 1101 928 clk_enable(host->mck); 1102 - mci_writel(host, CR, MCI_CR_SWRST); 1103 - mci_writel(host, CR, MCI_CR_MCIEN); 1104 - if (atmci_is_mci2()) 1105 - mci_writel(host, CFG, host->cfg_reg); 929 + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 930 + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 931 + if (host->caps.has_cfg_reg) 932 + atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1106 933 } 1107 934 1108 935 /* ··· 1110 937 * core ios update when finding the minimum. 1111 938 */ 1112 939 slot->clock = ios->clock; 1113 - for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { 940 + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 1114 941 if (host->slot[i] && host->slot[i]->clock 1115 942 && host->slot[i]->clock < clock_min) 1116 943 clock_min = host->slot[i]->clock; ··· 1125 952 clkdiv = 255; 1126 953 } 1127 954 1128 - host->mode_reg = MCI_MR_CLKDIV(clkdiv); 955 + host->mode_reg = ATMCI_MR_CLKDIV(clkdiv); 1129 956 1130 957 /* 1131 958 * WRPROOF and RDPROOF prevent overruns/underruns by 1132 959 * stopping the clock when the FIFO is full/empty. 1133 960 * This state is not expected to last for long. 1134 961 */ 1135 - if (mci_has_rwproof()) 1136 - host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); 962 + if (host->caps.has_rwproof) 963 + host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF); 1137 964 1138 - if (atmci_is_mci2()) { 965 + if (host->caps.has_cfg_reg) { 1139 966 /* setup High Speed mode in relation with card capacity */ 1140 967 if (ios->timing == MMC_TIMING_SD_HS) 1141 - host->cfg_reg |= MCI_CFG_HSMODE; 968 + host->cfg_reg |= ATMCI_CFG_HSMODE; 1142 969 else 1143 - host->cfg_reg &= ~MCI_CFG_HSMODE; 970 + host->cfg_reg &= ~ATMCI_CFG_HSMODE; 1144 971 } 1145 972 1146 973 if (list_empty(&host->queue)) { 1147 - mci_writel(host, MR, host->mode_reg); 1148 - if (atmci_is_mci2()) 1149 - mci_writel(host, CFG, host->cfg_reg); 974 + atmci_writel(host, ATMCI_MR, host->mode_reg); 975 + if (host->caps.has_cfg_reg) 976 + atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1150 977 } else { 1151 978 host->need_clock_update = true; 1152 979 } ··· 1157 984 1158 985 spin_lock_bh(&host->lock); 1159 986 slot->clock = 0; 1160 - for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { 987 + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 1161 988 if (host->slot[i] && host->slot[i]->clock) { 1162 989 any_slot_active = true; 1163 990 break; 1164 991 } 1165 992 } 1166 993 if (!any_slot_active) { 1167 - mci_writel(host, CR, MCI_CR_MCIDIS); 994 + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 1168 995 if (host->mode_reg) { 1169 - mci_readl(host, MR); 996 + atmci_readl(host, ATMCI_MR); 1170 997 clk_disable(host->mck); 1171 998 } 1172 999 host->mode_reg = 0; ··· 1230 1057 struct atmel_mci *host = slot->host; 1231 1058 1232 1059 if (enable) 1233 - mci_writel(host, IER, slot->sdio_irq); 1060 + atmci_writel(host, ATMCI_IER, slot->sdio_irq); 1234 1061 else 1235 - mci_writel(host, IDR, slot->sdio_irq); 1062 + atmci_writel(host, ATMCI_IDR, slot->sdio_irq); 1236 1063 } 1237 1064 1238 1065 static const struct mmc_host_ops atmci_ops = { ··· 1259 1086 * busy transferring data. 1260 1087 */ 1261 1088 if (host->need_clock_update) { 1262 - mci_writel(host, MR, host->mode_reg); 1263 - if (atmci_is_mci2()) 1264 - mci_writel(host, CFG, host->cfg_reg); 1089 + atmci_writel(host, ATMCI_MR, host->mode_reg); 1090 + if (host->caps.has_cfg_reg) 1091 + atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1265 1092 } 1266 1093 1267 1094 host->cur_slot->mrq = NULL; ··· 1290 1117 u32 status = host->cmd_status; 1291 1118 1292 1119 /* Read the response from the card (up to 16 bytes) */ 1293 - cmd->resp[0] = mci_readl(host, RSPR); 1294 - cmd->resp[1] = mci_readl(host, RSPR); 1295 - cmd->resp[2] = mci_readl(host, RSPR); 1296 - cmd->resp[3] = mci_readl(host, RSPR); 1120 + cmd->resp[0] = atmci_readl(host, ATMCI_RSPR); 1121 + cmd->resp[1] = atmci_readl(host, ATMCI_RSPR); 1122 + cmd->resp[2] = atmci_readl(host, ATMCI_RSPR); 1123 + cmd->resp[3] = atmci_readl(host, ATMCI_RSPR); 1297 1124 1298 - if (status & MCI_RTOE) 1125 + if (status & ATMCI_RTOE) 1299 1126 cmd->error = -ETIMEDOUT; 1300 - else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) 1127 + else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE)) 1301 1128 cmd->error = -EILSEQ; 1302 - else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) 1129 + else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE)) 1303 1130 cmd->error = -EIO; 1304 1131 else 1305 1132 cmd->error = 0; ··· 1309 1136 "command error: status=0x%08x\n", status); 1310 1137 1311 1138 if (cmd->data) { 1312 - atmci_stop_dma(host); 1139 + host->stop_transfer(host); 1313 1140 host->data = NULL; 1314 - mci_writel(host, IDR, MCI_NOTBUSY 1315 - | MCI_TXRDY | MCI_RXRDY 1141 + atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY 1142 + | ATMCI_TXRDY | ATMCI_RXRDY 1316 1143 | ATMCI_DATA_ERROR_FLAGS); 1317 1144 } 1318 1145 } ··· 1364 1191 * Reset controller to terminate any ongoing 1365 1192 * commands or data transfers. 1366 1193 */ 1367 - mci_writel(host, CR, MCI_CR_SWRST); 1368 - mci_writel(host, CR, MCI_CR_MCIEN); 1369 - mci_writel(host, MR, host->mode_reg); 1370 - if (atmci_is_mci2()) 1371 - mci_writel(host, CFG, host->cfg_reg); 1194 + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1195 + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 1196 + atmci_writel(host, ATMCI_MR, host->mode_reg); 1197 + if (host->caps.has_cfg_reg) 1198 + atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1372 1199 1373 1200 host->data = NULL; 1374 1201 host->cmd = NULL; ··· 1383 1210 /* fall through */ 1384 1211 case STATE_SENDING_DATA: 1385 1212 mrq->data->error = -ENOMEDIUM; 1386 - atmci_stop_dma(host); 1213 + host->stop_transfer(host); 1387 1214 break; 1388 1215 case STATE_DATA_BUSY: 1389 1216 case STATE_DATA_ERROR: ··· 1434 1261 dev_vdbg(&host->pdev->dev, 1435 1262 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", 1436 1263 state, host->pending_events, host->completed_events, 1437 - mci_readl(host, IMR)); 1264 + atmci_readl(host, ATMCI_IMR)); 1438 1265 1439 1266 do { 1440 1267 prev_state = state; ··· 1462 1289 case STATE_SENDING_DATA: 1463 1290 if (atmci_test_and_clear_pending(host, 1464 1291 EVENT_DATA_ERROR)) { 1465 - atmci_stop_dma(host); 1292 + host->stop_transfer(host); 1466 1293 if (data->stop) 1467 - send_stop_cmd(host, data); 1294 + atmci_send_stop_cmd(host, data); 1468 1295 state = STATE_DATA_ERROR; 1469 1296 break; 1470 1297 } ··· 1486 1313 atmci_set_completed(host, EVENT_DATA_COMPLETE); 1487 1314 status = host->data_status; 1488 1315 if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { 1489 - if (status & MCI_DTOE) { 1316 + if (status & ATMCI_DTOE) { 1490 1317 dev_dbg(&host->pdev->dev, 1491 1318 "data timeout error\n"); 1492 1319 data->error = -ETIMEDOUT; 1493 - } else if (status & MCI_DCRCE) { 1320 + } else if (status & ATMCI_DCRCE) { 1494 1321 dev_dbg(&host->pdev->dev, 1495 1322 "data CRC error\n"); 1496 1323 data->error = -EILSEQ; ··· 1503 1330 } else { 1504 1331 data->bytes_xfered = data->blocks * data->blksz; 1505 1332 data->error = 0; 1506 - mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS); 1333 + atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS); 1507 1334 } 1508 1335 1509 1336 if (!data->stop) { ··· 1513 1340 1514 1341 prev_state = state = STATE_SENDING_STOP; 1515 1342 if (!data->error) 1516 - send_stop_cmd(host, data); 1343 + atmci_send_stop_cmd(host, data); 1517 1344 /* fall through */ 1518 1345 1519 1346 case STATE_SENDING_STOP: ··· 1553 1380 unsigned int nbytes = 0; 1554 1381 1555 1382 do { 1556 - value = mci_readl(host, RDR); 1383 + value = atmci_readl(host, ATMCI_RDR); 1557 1384 if (likely(offset + 4 <= sg->length)) { 1558 1385 put_unaligned(value, (u32 *)(buf + offset)); 1559 1386 ··· 1585 1412 nbytes += offset; 1586 1413 } 1587 1414 1588 - status = mci_readl(host, SR); 1415 + status = atmci_readl(host, ATMCI_SR); 1589 1416 if (status & ATMCI_DATA_ERROR_FLAGS) { 1590 - mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY 1417 + atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY 1591 1418 | ATMCI_DATA_ERROR_FLAGS)); 1592 1419 host->data_status = status; 1593 1420 data->bytes_xfered += nbytes; ··· 1596 1423 tasklet_schedule(&host->tasklet); 1597 1424 return; 1598 1425 } 1599 - } while (status & MCI_RXRDY); 1426 + } while (status & ATMCI_RXRDY); 1600 1427 1601 1428 host->pio_offset = offset; 1602 1429 data->bytes_xfered += nbytes; ··· 1604 1431 return; 1605 1432 1606 1433 done: 1607 - mci_writel(host, IDR, MCI_RXRDY); 1608 - mci_writel(host, IER, MCI_NOTBUSY); 1434 + atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY); 1435 + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1609 1436 data->bytes_xfered += nbytes; 1610 1437 smp_wmb(); 1611 1438 atmci_set_pending(host, EVENT_XFER_COMPLETE); ··· 1624 1451 do { 1625 1452 if (likely(offset + 4 <= sg->length)) { 1626 1453 value = get_unaligned((u32 *)(buf + offset)); 1627 - mci_writel(host, TDR, value); 1454 + atmci_writel(host, ATMCI_TDR, value); 1628 1455 1629 1456 offset += 4; 1630 1457 nbytes += 4; ··· 1645 1472 1646 1473 host->sg = sg = sg_next(sg); 1647 1474 if (!sg) { 1648 - mci_writel(host, TDR, value); 1475 + atmci_writel(host, ATMCI_TDR, value); 1649 1476 goto done; 1650 1477 } 1651 1478 1652 1479 offset = 4 - remaining; 1653 1480 buf = sg_virt(sg); 1654 1481 memcpy((u8 *)&value + remaining, buf, offset); 1655 - mci_writel(host, TDR, value); 1482 + atmci_writel(host, ATMCI_TDR, value); 1656 1483 nbytes += offset; 1657 1484 } 1658 1485 1659 - status = mci_readl(host, SR); 1486 + status = atmci_readl(host, ATMCI_SR); 1660 1487 if (status & ATMCI_DATA_ERROR_FLAGS) { 1661 - mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY 1488 + atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY 1662 1489 | ATMCI_DATA_ERROR_FLAGS)); 1663 1490 host->data_status = status; 1664 1491 data->bytes_xfered += nbytes; ··· 1667 1494 tasklet_schedule(&host->tasklet); 1668 1495 return; 1669 1496 } 1670 - } while (status & MCI_TXRDY); 1497 + } while (status & ATMCI_TXRDY); 1671 1498 1672 1499 host->pio_offset = offset; 1673 1500 data->bytes_xfered += nbytes; ··· 1675 1502 return; 1676 1503 1677 1504 done: 1678 - mci_writel(host, IDR, MCI_TXRDY); 1679 - mci_writel(host, IER, MCI_NOTBUSY); 1505 + atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY); 1506 + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1680 1507 data->bytes_xfered += nbytes; 1681 1508 smp_wmb(); 1682 1509 atmci_set_pending(host, EVENT_XFER_COMPLETE); ··· 1684 1511 1685 1512 static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) 1686 1513 { 1687 - mci_writel(host, IDR, MCI_CMDRDY); 1514 + atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); 1688 1515 1689 1516 host->cmd_status = status; 1690 1517 smp_wmb(); ··· 1696 1523 { 1697 1524 int i; 1698 1525 1699 - for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { 1526 + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 1700 1527 struct atmel_mci_slot *slot = host->slot[i]; 1701 1528 if (slot && (status & slot->sdio_irq)) { 1702 1529 mmc_signal_sdio_irq(slot->mmc); ··· 1712 1539 unsigned int pass_count = 0; 1713 1540 1714 1541 do { 1715 - status = mci_readl(host, SR); 1716 - mask = mci_readl(host, IMR); 1542 + status = atmci_readl(host, ATMCI_SR); 1543 + mask = atmci_readl(host, ATMCI_IMR); 1717 1544 pending = status & mask; 1718 1545 if (!pending) 1719 1546 break; 1720 1547 1721 1548 if (pending & ATMCI_DATA_ERROR_FLAGS) { 1722 - mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS 1723 - | MCI_RXRDY | MCI_TXRDY); 1724 - pending &= mci_readl(host, IMR); 1549 + atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS 1550 + | ATMCI_RXRDY | ATMCI_TXRDY); 1551 + pending &= atmci_readl(host, ATMCI_IMR); 1725 1552 1726 1553 host->data_status = status; 1727 1554 smp_wmb(); 1728 1555 atmci_set_pending(host, EVENT_DATA_ERROR); 1729 1556 tasklet_schedule(&host->tasklet); 1730 1557 } 1731 - if (pending & MCI_NOTBUSY) { 1732 - mci_writel(host, IDR, 1733 - ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); 1558 + 1559 + if (pending & ATMCI_TXBUFE) { 1560 + atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); 1561 + atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1562 + /* 1563 + * We can receive this interruption before having configured 1564 + * the second pdc buffer, so we need to reconfigure first and 1565 + * second buffers again 1566 + */ 1567 + if (host->data_size) { 1568 + atmci_pdc_set_both_buf(host, XFER_TRANSMIT); 1569 + atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); 1570 + atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE); 1571 + } else { 1572 + atmci_pdc_complete(host); 1573 + } 1574 + } else if (pending & ATMCI_ENDTX) { 1575 + atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1576 + 1577 + if (host->data_size) { 1578 + atmci_pdc_set_single_buf(host, 1579 + XFER_TRANSMIT, PDC_SECOND_BUF); 1580 + atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); 1581 + } 1582 + } 1583 + 1584 + if (pending & ATMCI_RXBUFF) { 1585 + atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); 1586 + atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1587 + /* 1588 + * We can receive this interruption before having configured 1589 + * the second pdc buffer, so we need to reconfigure first and 1590 + * second buffers again 1591 + */ 1592 + if (host->data_size) { 1593 + atmci_pdc_set_both_buf(host, XFER_RECEIVE); 1594 + atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); 1595 + atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF); 1596 + } else { 1597 + atmci_pdc_complete(host); 1598 + } 1599 + } else if (pending & ATMCI_ENDRX) { 1600 + atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1601 + 1602 + if (host->data_size) { 1603 + atmci_pdc_set_single_buf(host, 1604 + XFER_RECEIVE, PDC_SECOND_BUF); 1605 + atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); 1606 + } 1607 + } 1608 + 1609 + 1610 + if (pending & ATMCI_NOTBUSY) { 1611 + atmci_writel(host, ATMCI_IDR, 1612 + ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY); 1734 1613 if (!host->data_status) 1735 1614 host->data_status = status; 1736 1615 smp_wmb(); 1737 1616 atmci_set_pending(host, EVENT_DATA_COMPLETE); 1738 1617 tasklet_schedule(&host->tasklet); 1739 1618 } 1740 - if (pending & MCI_RXRDY) 1619 + if (pending & ATMCI_RXRDY) 1741 1620 atmci_read_data_pio(host); 1742 - if (pending & MCI_TXRDY) 1621 + if (pending & ATMCI_TXRDY) 1743 1622 atmci_write_data_pio(host); 1744 1623 1745 - if (pending & MCI_CMDRDY) 1624 + if (pending & ATMCI_CMDRDY) 1746 1625 atmci_cmd_interrupt(host, status); 1747 1626 1748 - if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB)) 1627 + if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 1749 1628 atmci_sdio_interrupt(host, status); 1750 1629 1751 1630 } while (pass_count++ < 5); ··· 1846 1621 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1847 1622 if (sdio_irq) 1848 1623 mmc->caps |= MMC_CAP_SDIO_IRQ; 1849 - if (atmci_is_mci2()) 1624 + if (host->caps.has_highspeed) 1850 1625 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1851 1626 if (slot_data->bus_width >= 4) 1852 1627 mmc->caps |= MMC_CAP_4_BIT_DATA; ··· 1929 1704 mmc_free_host(slot->mmc); 1930 1705 } 1931 1706 1932 - #ifdef CONFIG_MMC_ATMELMCI_DMA 1933 - static bool filter(struct dma_chan *chan, void *slave) 1707 + static bool atmci_filter(struct dma_chan *chan, void *slave) 1934 1708 { 1935 1709 struct mci_dma_data *sl = slave; 1936 1710 ··· 1954 1730 dma_cap_mask_t mask; 1955 1731 1956 1732 setup_dma_addr(pdata->dma_slave, 1957 - host->mapbase + MCI_TDR, 1958 - host->mapbase + MCI_RDR); 1733 + host->mapbase + ATMCI_TDR, 1734 + host->mapbase + ATMCI_RDR); 1959 1735 1960 1736 /* Try to grab a DMA channel */ 1961 1737 dma_cap_zero(mask); 1962 1738 dma_cap_set(DMA_SLAVE, mask); 1963 1739 host->dma.chan = 1964 - dma_request_channel(mask, filter, pdata->dma_slave); 1740 + dma_request_channel(mask, atmci_filter, pdata->dma_slave); 1965 1741 } 1966 1742 if (!host->dma.chan) 1967 1743 dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); ··· 1970 1746 "Using %s for DMA transfers\n", 1971 1747 dma_chan_name(host->dma.chan)); 1972 1748 } 1749 + 1750 + static inline unsigned int atmci_get_version(struct atmel_mci *host) 1751 + { 1752 + return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; 1753 + } 1754 + 1755 + /* 1756 + * HSMCI (High Speed MCI) module is not fully compatible with MCI module. 1757 + * HSMCI provides DMA support and a new config register but no more supports 1758 + * PDC. 1759 + */ 1760 + static void __init atmci_get_cap(struct atmel_mci *host) 1761 + { 1762 + unsigned int version; 1763 + 1764 + version = atmci_get_version(host); 1765 + dev_info(&host->pdev->dev, 1766 + "version: 0x%x\n", version); 1767 + 1768 + host->caps.has_dma = 0; 1769 + host->caps.has_pdc = 0; 1770 + host->caps.has_cfg_reg = 0; 1771 + host->caps.has_cstor_reg = 0; 1772 + host->caps.has_highspeed = 0; 1773 + host->caps.has_rwproof = 0; 1774 + 1775 + /* keep only major version number */ 1776 + switch (version & 0xf00) { 1777 + case 0x100: 1778 + case 0x200: 1779 + host->caps.has_pdc = 1; 1780 + host->caps.has_rwproof = 1; 1781 + break; 1782 + case 0x300: 1783 + case 0x400: 1784 + case 0x500: 1785 + #ifdef CONFIG_AT_HDMAC 1786 + host->caps.has_dma = 1; 1973 1787 #else 1974 - static void atmci_configure_dma(struct atmel_mci *host) {} 1788 + host->caps.has_dma = 0; 1789 + dev_info(&host->pdev->dev, 1790 + "has dma capability but dma engine is not selected, then use pio\n"); 1975 1791 #endif 1792 + host->caps.has_cfg_reg = 1; 1793 + host->caps.has_cstor_reg = 1; 1794 + host->caps.has_highspeed = 1; 1795 + host->caps.has_rwproof = 1; 1796 + break; 1797 + default: 1798 + dev_warn(&host->pdev->dev, 1799 + "Unmanaged mci version, set minimum capabilities\n"); 1800 + break; 1801 + } 1802 + } 1976 1803 1977 1804 static int __init atmci_probe(struct platform_device *pdev) 1978 1805 { ··· 2064 1789 goto err_ioremap; 2065 1790 2066 1791 clk_enable(host->mck); 2067 - mci_writel(host, CR, MCI_CR_SWRST); 1792 + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 2068 1793 host->bus_hz = clk_get_rate(host->mck); 2069 1794 clk_disable(host->mck); 2070 1795 ··· 2076 1801 if (ret) 2077 1802 goto err_request_irq; 2078 1803 2079 - atmci_configure_dma(host); 1804 + /* Get MCI capabilities and set operations according to it */ 1805 + atmci_get_cap(host); 1806 + if (host->caps.has_dma) { 1807 + dev_info(&pdev->dev, "using DMA\n"); 1808 + host->prepare_data = &atmci_prepare_data_dma; 1809 + host->submit_data = &atmci_submit_data_dma; 1810 + host->stop_transfer = &atmci_stop_transfer_dma; 1811 + } else if (host->caps.has_pdc) { 1812 + dev_info(&pdev->dev, "using PDC\n"); 1813 + host->prepare_data = &atmci_prepare_data_pdc; 1814 + host->submit_data = &atmci_submit_data_pdc; 1815 + host->stop_transfer = &atmci_stop_transfer_pdc; 1816 + } else { 1817 + dev_info(&pdev->dev, "no DMA, no PDC\n"); 1818 + host->prepare_data = &atmci_prepare_data; 1819 + host->submit_data = &atmci_submit_data; 1820 + host->stop_transfer = &atmci_stop_transfer; 1821 + } 1822 + 1823 + if (host->caps.has_dma) 1824 + atmci_configure_dma(host); 2080 1825 2081 1826 platform_set_drvdata(pdev, host); 2082 1827 ··· 2105 1810 ret = -ENODEV; 2106 1811 if (pdata->slot[0].bus_width) { 2107 1812 ret = atmci_init_slot(host, &pdata->slot[0], 2108 - 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA); 1813 + 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); 2109 1814 if (!ret) 2110 1815 nr_slots++; 2111 1816 } 2112 1817 if (pdata->slot[1].bus_width) { 2113 1818 ret = atmci_init_slot(host, &pdata->slot[1], 2114 - 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB); 1819 + 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); 2115 1820 if (!ret) 2116 1821 nr_slots++; 2117 1822 } ··· 2128 1833 return 0; 2129 1834 2130 1835 err_init_slot: 2131 - #ifdef CONFIG_MMC_ATMELMCI_DMA 2132 1836 if (host->dma.chan) 2133 1837 dma_release_channel(host->dma.chan); 2134 - #endif 2135 1838 free_irq(irq, host); 2136 1839 err_request_irq: 2137 1840 iounmap(host->regs); ··· 2147 1854 2148 1855 platform_set_drvdata(pdev, NULL); 2149 1856 2150 - for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { 1857 + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2151 1858 if (host->slot[i]) 2152 1859 atmci_cleanup_slot(host->slot[i], i); 2153 1860 } 2154 1861 2155 1862 clk_enable(host->mck); 2156 - mci_writel(host, IDR, ~0UL); 2157 - mci_writel(host, CR, MCI_CR_MCIDIS); 2158 - mci_readl(host, SR); 1863 + atmci_writel(host, ATMCI_IDR, ~0UL); 1864 + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 1865 + atmci_readl(host, ATMCI_SR); 2159 1866 clk_disable(host->mck); 2160 1867 2161 1868 #ifdef CONFIG_MMC_ATMELMCI_DMA ··· 2178 1885 struct atmel_mci *host = dev_get_drvdata(dev); 2179 1886 int i; 2180 1887 2181 - for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { 1888 + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2182 1889 struct atmel_mci_slot *slot = host->slot[i]; 2183 1890 int ret; 2184 1891 ··· 2209 1916 int i; 2210 1917 int ret = 0; 2211 1918 2212 - for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { 1919 + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2213 1920 struct atmel_mci_slot *slot = host->slot[i]; 2214 1921 int err; 2215 1922
+5 -5
drivers/mmc/host/au1xmmc.c
··· 55 55 56 56 #ifdef DEBUG 57 57 #define DBG(fmt, idx, args...) \ 58 - printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args) 58 + pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args) 59 59 #else 60 60 #define DBG(fmt, idx, args...) do {} while (0) 61 61 #endif ··· 268 268 mmccmd |= SD_CMD_RT_3; 269 269 break; 270 270 default: 271 - printk(KERN_INFO "au1xmmc: unhandled response type %02x\n", 271 + pr_info("au1xmmc: unhandled response type %02x\n", 272 272 mmc_resp_type(cmd)); 273 273 return -EINVAL; 274 274 } ··· 1031 1031 #ifdef CONFIG_SOC_AU1200 1032 1032 ret = au1xmmc_dbdma_init(host); 1033 1033 if (ret) 1034 - printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n"); 1034 + pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n"); 1035 1035 #endif 1036 1036 1037 1037 #ifdef CONFIG_LEDS_CLASS ··· 1056 1056 1057 1057 platform_set_drvdata(pdev, host); 1058 1058 1059 - printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" 1059 + pr_info(DRIVER_NAME ": MMC Controller %d set up at %8.8X" 1060 1060 " (mode=%s)\n", pdev->id, host->iobase, 1061 1061 host->flags & HOST_F_DMA ? "dma" : "pio"); 1062 1062 ··· 1188 1188 */ 1189 1189 memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); 1190 1190 if (!memid) 1191 - printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n"); 1191 + pr_err("au1xmmc: cannot add memory dbdma dev\n"); 1192 1192 #endif 1193 1193 return platform_driver_register(&au1xmmc_driver); 1194 1194 }
+78 -26
drivers/mmc/host/dw_mmc.c
··· 764 764 return present; 765 765 } 766 766 767 + static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 768 + { 769 + struct dw_mci_slot *slot = mmc_priv(mmc); 770 + struct dw_mci *host = slot->host; 771 + u32 int_mask; 772 + 773 + /* Enable/disable Slot Specific SDIO interrupt */ 774 + int_mask = mci_readl(host, INTMASK); 775 + if (enb) { 776 + mci_writel(host, INTMASK, 777 + (int_mask | (1 << SDMMC_INT_SDIO(slot->id)))); 778 + } else { 779 + mci_writel(host, INTMASK, 780 + (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id)))); 781 + } 782 + } 783 + 767 784 static const struct mmc_host_ops dw_mci_ops = { 768 - .request = dw_mci_request, 769 - .set_ios = dw_mci_set_ios, 770 - .get_ro = dw_mci_get_ro, 771 - .get_cd = dw_mci_get_cd, 785 + .request = dw_mci_request, 786 + .set_ios = dw_mci_set_ios, 787 + .get_ro = dw_mci_get_ro, 788 + .get_cd = dw_mci_get_cd, 789 + .enable_sdio_irq = dw_mci_enable_sdio_irq, 772 790 }; 773 791 774 792 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) ··· 1043 1025 buf += len; 1044 1026 cnt -= len; 1045 1027 if (!sg_next(host->sg) || host->part_buf_count == 2) { 1046 - mci_writew(host, DATA, host->part_buf16); 1028 + mci_writew(host, DATA(host->data_offset), 1029 + host->part_buf16); 1047 1030 host->part_buf_count = 0; 1048 1031 } 1049 1032 } ··· 1061 1042 cnt -= len; 1062 1043 /* push data from aligned buffer into fifo */ 1063 1044 for (i = 0; i < items; ++i) 1064 - mci_writew(host, DATA, aligned_buf[i]); 1045 + mci_writew(host, DATA(host->data_offset), 1046 + aligned_buf[i]); 1065 1047 } 1066 1048 } else 1067 1049 #endif 1068 1050 { 1069 1051 u16 *pdata = buf; 1070 1052 for (; cnt >= 2; cnt -= 2) 1071 - mci_writew(host, DATA, *pdata++); 1053 + mci_writew(host, DATA(host->data_offset), *pdata++); 1072 1054 buf = pdata; 1073 1055 } 1074 1056 /* put anything remaining in the part_buf */ 1075 1057 if (cnt) { 1076 1058 dw_mci_set_part_bytes(host, buf, cnt); 1077 1059 if (!sg_next(host->sg)) 1078 - mci_writew(host, DATA, host->part_buf16); 1060 + mci_writew(host, DATA(host->data_offset), 1061 + host->part_buf16); 1079 1062 } 1080 1063 } 1081 1064 ··· 1092 1071 int items = len >> 1; 1093 1072 int i; 1094 1073 for (i = 0; i < items; ++i) 1095 - aligned_buf[i] = mci_readw(host, DATA); 1074 + aligned_buf[i] = mci_readw(host, 1075 + DATA(host->data_offset)); 1096 1076 /* memcpy from aligned buffer into output buffer */ 1097 1077 memcpy(buf, aligned_buf, len); 1098 1078 buf += len; ··· 1104 1082 { 1105 1083 u16 *pdata = buf; 1106 1084 for (; cnt >= 2; cnt -= 2) 1107 - *pdata++ = mci_readw(host, DATA); 1085 + *pdata++ = mci_readw(host, DATA(host->data_offset)); 1108 1086 buf = pdata; 1109 1087 } 1110 1088 if (cnt) { 1111 - host->part_buf16 = mci_readw(host, DATA); 1089 + host->part_buf16 = mci_readw(host, DATA(host->data_offset)); 1112 1090 dw_mci_pull_final_bytes(host, buf, cnt); 1113 1091 } 1114 1092 } ··· 1121 1099 buf += len; 1122 1100 cnt -= len; 1123 1101 if (!sg_next(host->sg) || host->part_buf_count == 4) { 1124 - mci_writel(host, DATA, host->part_buf32); 1102 + mci_writel(host, DATA(host->data_offset), 1103 + host->part_buf32); 1125 1104 host->part_buf_count = 0; 1126 1105 } 1127 1106 } ··· 1139 1116 cnt -= len; 1140 1117 /* push data from aligned buffer into fifo */ 1141 1118 for (i = 0; i < items; ++i) 1142 - mci_writel(host, DATA, aligned_buf[i]); 1119 + mci_writel(host, DATA(host->data_offset), 1120 + aligned_buf[i]); 1143 1121 } 1144 1122 } else 1145 1123 #endif 1146 1124 { 1147 1125 u32 *pdata = buf; 1148 1126 for (; cnt >= 4; cnt -= 4) 1149 - mci_writel(host, DATA, *pdata++); 1127 + mci_writel(host, DATA(host->data_offset), *pdata++); 1150 1128 buf = pdata; 1151 1129 } 1152 1130 /* put anything remaining in the part_buf */ 1153 1131 if (cnt) { 1154 1132 dw_mci_set_part_bytes(host, buf, cnt); 1155 1133 if (!sg_next(host->sg)) 1156 - mci_writel(host, DATA, host->part_buf32); 1134 + mci_writel(host, DATA(host->data_offset), 1135 + host->part_buf32); 1157 1136 } 1158 1137 } 1159 1138 ··· 1170 1145 int items = len >> 2; 1171 1146 int i; 1172 1147 for (i = 0; i < items; ++i) 1173 - aligned_buf[i] = mci_readl(host, DATA); 1148 + aligned_buf[i] = mci_readl(host, 1149 + DATA(host->data_offset)); 1174 1150 /* memcpy from aligned buffer into output buffer */ 1175 1151 memcpy(buf, aligned_buf, len); 1176 1152 buf += len; ··· 1182 1156 { 1183 1157 u32 *pdata = buf; 1184 1158 for (; cnt >= 4; cnt -= 4) 1185 - *pdata++ = mci_readl(host, DATA); 1159 + *pdata++ = mci_readl(host, DATA(host->data_offset)); 1186 1160 buf = pdata; 1187 1161 } 1188 1162 if (cnt) { 1189 - host->part_buf32 = mci_readl(host, DATA); 1163 + host->part_buf32 = mci_readl(host, DATA(host->data_offset)); 1190 1164 dw_mci_pull_final_bytes(host, buf, cnt); 1191 1165 } 1192 1166 } ··· 1199 1173 buf += len; 1200 1174 cnt -= len; 1201 1175 if (!sg_next(host->sg) || host->part_buf_count == 8) { 1202 - mci_writew(host, DATA, host->part_buf); 1176 + mci_writew(host, DATA(host->data_offset), 1177 + host->part_buf); 1203 1178 host->part_buf_count = 0; 1204 1179 } 1205 1180 } ··· 1217 1190 cnt -= len; 1218 1191 /* push data from aligned buffer into fifo */ 1219 1192 for (i = 0; i < items; ++i) 1220 - mci_writeq(host, DATA, aligned_buf[i]); 1193 + mci_writeq(host, DATA(host->data_offset), 1194 + aligned_buf[i]); 1221 1195 } 1222 1196 } else 1223 1197 #endif 1224 1198 { 1225 1199 u64 *pdata = buf; 1226 1200 for (; cnt >= 8; cnt -= 8) 1227 - mci_writeq(host, DATA, *pdata++); 1201 + mci_writeq(host, DATA(host->data_offset), *pdata++); 1228 1202 buf = pdata; 1229 1203 } 1230 1204 /* put anything remaining in the part_buf */ 1231 1205 if (cnt) { 1232 1206 dw_mci_set_part_bytes(host, buf, cnt); 1233 1207 if (!sg_next(host->sg)) 1234 - mci_writeq(host, DATA, host->part_buf); 1208 + mci_writeq(host, DATA(host->data_offset), 1209 + host->part_buf); 1235 1210 } 1236 1211 } 1237 1212 ··· 1248 1219 int items = len >> 3; 1249 1220 int i; 1250 1221 for (i = 0; i < items; ++i) 1251 - aligned_buf[i] = mci_readq(host, DATA); 1222 + aligned_buf[i] = mci_readq(host, 1223 + DATA(host->data_offset)); 1252 1224 /* memcpy from aligned buffer into output buffer */ 1253 1225 memcpy(buf, aligned_buf, len); 1254 1226 buf += len; ··· 1260 1230 { 1261 1231 u64 *pdata = buf; 1262 1232 for (; cnt >= 8; cnt -= 8) 1263 - *pdata++ = mci_readq(host, DATA); 1233 + *pdata++ = mci_readq(host, DATA(host->data_offset)); 1264 1234 buf = pdata; 1265 1235 } 1266 1236 if (cnt) { 1267 - host->part_buf = mci_readq(host, DATA); 1237 + host->part_buf = mci_readq(host, DATA(host->data_offset)); 1268 1238 dw_mci_pull_final_bytes(host, buf, cnt); 1269 1239 } 1270 1240 } ··· 1436 1406 struct dw_mci *host = dev_id; 1437 1407 u32 status, pending; 1438 1408 unsigned int pass_count = 0; 1409 + int i; 1439 1410 1440 1411 do { 1441 1412 status = mci_readl(host, RINTSTS); ··· 1506 1475 if (pending & SDMMC_INT_CD) { 1507 1476 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1508 1477 queue_work(dw_mci_card_workqueue, &host->card_work); 1478 + } 1479 + 1480 + /* Handle SDIO Interrupts */ 1481 + for (i = 0; i < host->num_slots; i++) { 1482 + struct dw_mci_slot *slot = host->slot[i]; 1483 + if (pending & SDMMC_INT_SDIO(i)) { 1484 + mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); 1485 + mmc_signal_sdio_irq(slot->mmc); 1486 + } 1509 1487 } 1510 1488 1511 1489 } while (pass_count++ < 5); ··· 1713 1673 1714 1674 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 1715 1675 if (IS_ERR(host->vmmc)) { 1716 - printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); 1676 + pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 1717 1677 host->vmmc = NULL; 1718 1678 } else 1719 1679 regulator_enable(host->vmmc); ··· 1962 1922 goto err_init_slot; 1963 1923 } 1964 1924 } 1925 + 1926 + /* 1927 + * In 2.40a spec, Data offset is changed. 1928 + * Need to check the version-id and set data-offset for DATA register. 1929 + */ 1930 + host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 1931 + dev_info(&pdev->dev, "Version ID is %04x\n", host->verid); 1932 + 1933 + if (host->verid < DW_MMC_240A) 1934 + host->data_offset = DATA_OFFSET; 1935 + else 1936 + host->data_offset = DATA_240A_OFFSET; 1965 1937 1966 1938 /* 1967 1939 * Enable interrupts for command done, data over, data empty, card det,
+13 -2
drivers/mmc/host/dw_mmc.h
··· 14 14 #ifndef _DW_MMC_H_ 15 15 #define _DW_MMC_H_ 16 16 17 + #define DW_MMC_240A 0x240a 18 + 17 19 #define SDMMC_CTRL 0x000 18 20 #define SDMMC_PWREN 0x004 19 21 #define SDMMC_CLKDIV 0x008 ··· 53 51 #define SDMMC_IDINTEN 0x090 54 52 #define SDMMC_DSCADDR 0x094 55 53 #define SDMMC_BUFADDR 0x098 56 - #define SDMMC_DATA 0x100 54 + #define SDMMC_DATA(x) (x) 55 + 56 + /* 57 + * Data offset is difference according to Version 58 + * Lower than 2.40a : data register offest is 0x100 59 + */ 60 + #define DATA_OFFSET 0x100 61 + #define DATA_240A_OFFSET 0x200 57 62 58 63 /* shift bit field */ 59 64 #define _SBF(f, v) ((v) << (f)) ··· 91 82 #define SDMMC_CTYPE_4BIT BIT(0) 92 83 #define SDMMC_CTYPE_1BIT 0 93 84 /* Interrupt status & mask register defines */ 94 - #define SDMMC_INT_SDIO BIT(16) 85 + #define SDMMC_INT_SDIO(n) BIT(16 + (n)) 95 86 #define SDMMC_INT_EBE BIT(15) 96 87 #define SDMMC_INT_ACD BIT(14) 97 88 #define SDMMC_INT_SBE BIT(13) ··· 139 130 #define SDMMC_IDMAC_ENABLE BIT(7) 140 131 #define SDMMC_IDMAC_FB BIT(1) 141 132 #define SDMMC_IDMAC_SWRESET BIT(0) 133 + /* Version ID register define */ 134 + #define SDMMC_GET_VERID(x) ((x) & 0xFFFF) 142 135 143 136 /* Register access macros */ 144 137 #define mci_readl(dev, reg) \
+1 -1
drivers/mmc/host/imxmmc.c
··· 942 942 int ret = 0, irq; 943 943 u16 rev_no; 944 944 945 - printk(KERN_INFO "i.MX mmc driver\n"); 945 + pr_info("i.MX mmc driver\n"); 946 946 947 947 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 948 948 irq = platform_get_irq(pdev, 0);
+1
drivers/mmc/host/mmc_spi.c
··· 27 27 #include <linux/sched.h> 28 28 #include <linux/delay.h> 29 29 #include <linux/slab.h> 30 + #include <linux/module.h> 30 31 #include <linux/bio.h> 31 32 #include <linux/dma-mapping.h> 32 33 #include <linux/crc7.h>
+2 -2
drivers/mmc/host/mmci.c
··· 466 466 struct mmci_host_next *next = &host->next_data; 467 467 468 468 if (data->host_cookie && data->host_cookie != next->cookie) { 469 - printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" 469 + pr_warning("[%s] invalid cookie: data->host_cookie %d" 470 470 " host->next_data.cookie %d\n", 471 471 __func__, data->host_cookie, host->next_data.cookie); 472 472 data->host_cookie = 0; ··· 531 531 if (chan) { 532 532 if (err) 533 533 dmaengine_terminate_all(chan); 534 - if (err || data->host_cookie) 534 + if (data->host_cookie) 535 535 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 536 536 data->sg_len, dir); 537 537 mrq->data->host_cookie = 0;
+58 -28
drivers/mmc/host/msm_sdcc.c
··· 213 213 msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER); 214 214 msmsdcc_writel(host, (unsigned int)host->curr.xfer_size, 215 215 MMCIDATALENGTH); 216 - msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1); 216 + msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) & 217 + (~MCI_IRQ_PIO)) | host->cmd_pio_irqmask, MMCIMASK0); 217 218 msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL); 218 219 219 220 if (host->cmd_cmd) { ··· 389 388 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, 390 389 host->dma.num_ents, host->dma.dir); 391 390 if (n == 0) { 392 - printk(KERN_ERR "%s: Unable to map in all sg elements\n", 391 + pr_err("%s: Unable to map in all sg elements\n", 393 392 mmc_hostname(host->mmc)); 394 393 host->dma.sg = NULL; 395 394 host->dma.num_ents = 0; ··· 475 474 *c |= MCI_CSPM_MCIABORT; 476 475 477 476 if (host->curr.cmd != NULL) { 478 - printk(KERN_ERR "%s: Overlapping command requests\n", 477 + pr_err("%s: Overlapping command requests\n", 479 478 mmc_hostname(host->mmc)); 480 479 } 481 480 host->curr.cmd = cmd; ··· 544 543 545 544 msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH); 546 545 547 - msmsdcc_writel(host, pio_irqmask, MMCIMASK1); 546 + msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) & 547 + (~MCI_IRQ_PIO)) | pio_irqmask, MMCIMASK0); 548 + 548 549 msmsdcc_writel(host, datactrl, MMCIDATACTRL); 549 550 550 551 if (cmd) { ··· 662 659 { 663 660 struct msmsdcc_host *host = dev_id; 664 661 uint32_t status; 662 + u32 mci_mask0; 665 663 666 664 status = msmsdcc_readl(host, MMCISTATUS); 665 + mci_mask0 = msmsdcc_readl(host, MMCIMASK0); 666 + 667 + if (((mci_mask0 & status) & MCI_IRQ_PIO) == 0) 668 + return IRQ_NONE; 667 669 668 670 do { 669 671 unsigned long flags; ··· 727 719 } while (1); 728 720 729 721 if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE) 730 - msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1); 722 + msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | 723 + MCI_RXDATAAVLBLMASK, MMCIMASK0); 731 724 732 725 if (!host->curr.xfer_remain) 733 - msmsdcc_writel(host, 0, MMCIMASK1); 726 + msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | 0, 727 + MMCIMASK0); 734 728 735 729 return IRQ_HANDLED; 736 730 } ··· 864 854 do { 865 855 status = msmsdcc_readl(host, MMCISTATUS); 866 856 status &= msmsdcc_readl(host, MMCIMASK0); 857 + if ((status & (~MCI_IRQ_PIO)) == 0) 858 + break; 867 859 msmsdcc_writel(host, status, MMCICLEAR); 868 860 869 861 if (status & MCI_SDIOINTR) ··· 951 939 struct msm_mmc_gpio_data *curr; 952 940 int i, rc = 0; 953 941 954 - if (!host->plat->gpio_data && host->gpio_config_status == enable) 942 + if (!host->plat->gpio_data || host->gpio_config_status == enable) 955 943 return; 956 944 957 945 curr = host->plat->gpio_data; ··· 1064 1052 spin_unlock_irqrestore(&host->lock, flags); 1065 1053 } 1066 1054 1055 + static void msmsdcc_init_card(struct mmc_host *mmc, struct mmc_card *card) 1056 + { 1057 + struct msmsdcc_host *host = mmc_priv(mmc); 1058 + 1059 + if (host->plat->init_card) 1060 + host->plat->init_card(card); 1061 + } 1062 + 1067 1063 static const struct mmc_host_ops msmsdcc_ops = { 1068 1064 .request = msmsdcc_request, 1069 1065 .set_ios = msmsdcc_set_ios, 1070 1066 .enable_sdio_irq = msmsdcc_enable_sdio_irq, 1067 + .init_card = msmsdcc_init_card, 1071 1068 }; 1072 1069 1073 1070 static void ··· 1113 1092 { 1114 1093 struct msmsdcc_host *host = dev_id; 1115 1094 1116 - printk(KERN_DEBUG "%s: %d\n", __func__, irq); 1095 + pr_debug("%s: %d\n", __func__, irq); 1117 1096 msmsdcc_check_status((unsigned long) host); 1118 1097 return IRQ_HANDLED; 1119 1098 } ··· 1123 1102 { 1124 1103 struct msmsdcc_host *host = dev_id; 1125 1104 1126 - printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc), 1105 + pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc), 1127 1106 card_present); 1128 1107 msmsdcc_check_status((unsigned long) host); 1129 1108 } ··· 1171 1150 struct msmsdcc_host *host; 1172 1151 struct mmc_host *mmc; 1173 1152 struct resource *cmd_irqres = NULL; 1174 - struct resource *pio_irqres = NULL; 1175 1153 struct resource *stat_irqres = NULL; 1176 1154 struct resource *memres = NULL; 1177 1155 struct resource *dmares = NULL; ··· 1195 1175 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1196 1176 cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 1197 1177 "cmd_irq"); 1198 - pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 1199 - "pio_irq"); 1200 1178 stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 1201 1179 "status_irq"); 1202 1180 1203 - if (!cmd_irqres || !pio_irqres || !memres) { 1181 + if (!cmd_irqres || !memres) { 1204 1182 pr_err("%s: Invalid resource\n", __func__); 1205 1183 return -ENXIO; 1206 1184 } ··· 1218 1200 host->plat = plat; 1219 1201 host->mmc = mmc; 1220 1202 host->curr.cmd = NULL; 1203 + init_timer(&host->busclk_timer); 1204 + host->busclk_timer.data = (unsigned long) host; 1205 + host->busclk_timer.function = msmsdcc_busclk_expired; 1206 + 1221 1207 1222 1208 host->cmdpoll = 1; 1223 1209 1224 1210 host->base = ioremap(memres->start, PAGE_SIZE); 1225 1211 if (!host->base) { 1226 1212 ret = -ENOMEM; 1227 - goto out; 1213 + goto host_free; 1228 1214 } 1229 1215 1230 1216 host->cmd_irqres = cmd_irqres; 1231 - host->pio_irqres = pio_irqres; 1232 1217 host->memres = memres; 1233 1218 host->dmares = dmares; 1234 1219 spin_lock_init(&host->lock); ··· 1242 1221 /* 1243 1222 * Setup DMA 1244 1223 */ 1245 - msmsdcc_init_dma(host); 1224 + if (host->dmares) { 1225 + ret = msmsdcc_init_dma(host); 1226 + if (ret) 1227 + goto ioremap_free; 1228 + } else { 1229 + host->dma.channel = -1; 1230 + } 1246 1231 1247 1232 /* Get our clocks */ 1248 1233 host->pclk = clk_get(&pdev->dev, "sdc_pclk"); 1249 1234 if (IS_ERR(host->pclk)) { 1250 1235 ret = PTR_ERR(host->pclk); 1251 - goto host_free; 1236 + goto dma_free; 1252 1237 } 1253 1238 1254 1239 host->clk = clk_get(&pdev->dev, "sdc_clk"); ··· 1263 1236 goto pclk_put; 1264 1237 } 1265 1238 1239 + ret = clk_set_rate(host->clk, msmsdcc_fmin); 1240 + if (ret) { 1241 + pr_err("%s: Clock rate set failed (%d)\n", __func__, ret); 1242 + goto clk_put; 1243 + } 1244 + 1266 1245 /* Enable clocks */ 1267 1246 ret = msmsdcc_enable_clocks(host); 1268 1247 if (ret) 1269 1248 goto clk_put; 1270 - 1271 - ret = clk_set_rate(host->clk, msmsdcc_fmin); 1272 - if (ret) { 1273 - pr_err("%s: Clock rate set failed (%d)\n", __func__, ret); 1274 - goto clk_disable; 1275 - } 1276 1249 1277 1250 host->pclk_rate = clk_get_rate(host->pclk); 1278 1251 host->clk_rate = clk_get_rate(host->clk); ··· 1343 1316 host->eject = !host->oldstat; 1344 1317 } 1345 1318 1346 - init_timer(&host->busclk_timer); 1347 - host->busclk_timer.data = (unsigned long) host; 1348 - host->busclk_timer.function = msmsdcc_busclk_expired; 1349 - 1350 1319 ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED, 1351 1320 DRIVER_NAME " (cmd)", host); 1352 1321 if (ret) 1353 1322 goto stat_irq_free; 1354 1323 1355 - ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, 1324 + ret = request_irq(cmd_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, 1356 1325 DRIVER_NAME " (pio)", host); 1357 1326 if (ret) 1358 1327 goto cmd_irq_free; ··· 1391 1368 clk_put(host->clk); 1392 1369 pclk_put: 1393 1370 clk_put(host->pclk); 1371 + dma_free: 1372 + if (host->dmares) 1373 + dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata), 1374 + host->dma.nc, host->dma.nc_busaddr); 1375 + ioremap_free: 1376 + tasklet_kill(&host->dma_tlet); 1377 + iounmap(host->base); 1394 1378 host_free: 1395 1379 mmc_free_host(mmc); 1396 1380 out:
+5 -1
drivers/mmc/host/msm_sdcc.h
··· 140 140 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 141 141 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK) 142 142 143 + #define MCI_IRQ_PIO \ 144 + (MCI_RXDATAAVLBLMASK | MCI_TXDATAAVLBLMASK | MCI_RXFIFOEMPTYMASK | \ 145 + MCI_TXFIFOEMPTYMASK | MCI_RXFIFOFULLMASK | MCI_TXFIFOFULLMASK | \ 146 + MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK | \ 147 + MCI_RXACTIVEMASK | MCI_TXACTIVEMASK) 143 148 /* 144 149 * The size of the FIFO in bytes. 145 150 */ ··· 207 202 208 203 struct msmsdcc_host { 209 204 struct resource *cmd_irqres; 210 - struct resource *pio_irqres; 211 205 struct resource *memres; 212 206 struct resource *dmares; 213 207 void __iomem *base;
+7 -7
drivers/mmc/host/mvsdio.c
··· 117 117 host->pio_size = data->blocks * data->blksz; 118 118 host->pio_ptr = sg_virt(data->sg); 119 119 if (!nodma) 120 - printk(KERN_DEBUG "%s: fallback to PIO for data " 120 + pr_debug("%s: fallback to PIO for data " 121 121 "at 0x%p size %d\n", 122 122 mmc_hostname(host->mmc), 123 123 host->pio_ptr, host->pio_size); ··· 471 471 if (mrq->data) 472 472 err_status = mvsd_finish_data(host, mrq->data, err_status); 473 473 if (err_status) { 474 - printk(KERN_ERR "%s: unhandled error status %#04x\n", 474 + pr_err("%s: unhandled error status %#04x\n", 475 475 mmc_hostname(host->mmc), err_status); 476 476 cmd->error = -ENOMSG; 477 477 } ··· 489 489 if (irq_handled) 490 490 return IRQ_HANDLED; 491 491 492 - printk(KERN_ERR "%s: unhandled interrupt status=0x%04x en=0x%04x " 492 + pr_err("%s: unhandled interrupt status=0x%04x en=0x%04x " 493 493 "pio=%d\n", mmc_hostname(host->mmc), intr_status, 494 494 host->intr_en, host->pio_size); 495 495 return IRQ_NONE; ··· 505 505 spin_lock_irqsave(&host->lock, flags); 506 506 mrq = host->mrq; 507 507 if (mrq) { 508 - printk(KERN_ERR "%s: Timeout waiting for hardware interrupt.\n", 508 + pr_err("%s: Timeout waiting for hardware interrupt.\n", 509 509 mmc_hostname(host->mmc)); 510 - printk(KERN_ERR "%s: hw_state=0x%04x, intr_status=0x%04x " 510 + pr_err("%s: hw_state=0x%04x, intr_status=0x%04x " 511 511 "intr_en=0x%04x\n", mmc_hostname(host->mmc), 512 512 mvsd_read(MVSD_HW_STATE), 513 513 mvsd_read(MVSD_NOR_INTR_STATUS), ··· 762 762 763 763 ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); 764 764 if (ret) { 765 - printk(KERN_ERR "%s: cannot assign irq %d\n", DRIVER_NAME, irq); 765 + pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq); 766 766 goto out; 767 767 } else 768 768 host->irq = irq; ··· 802 802 if (ret) 803 803 goto out; 804 804 805 - printk(KERN_NOTICE "%s: %s driver initialized, ", 805 + pr_notice("%s: %s driver initialized, ", 806 806 mmc_hostname(mmc), DRIVER_NAME); 807 807 if (host->gpio_card_detect) 808 808 printk("using GPIO %d for card detection\n",
+1 -1
drivers/mmc/host/mxcmmc.c
··· 842 842 int ret = 0, irq; 843 843 dma_cap_mask_t mask; 844 844 845 - printk(KERN_INFO "i.MX SDHC driver\n"); 845 + pr_info("i.MX SDHC driver\n"); 846 846 847 847 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 848 848 irq = platform_get_irq(pdev, 0);
+1
drivers/mmc/host/mxs-mmc.c
··· 37 37 #include <linux/mmc/sdio.h> 38 38 #include <linux/gpio.h> 39 39 #include <linux/regulator/consumer.h> 40 + #include <linux/module.h> 40 41 41 42 #include <mach/mxs.h> 42 43 #include <mach/common.h>
+17 -15
drivers/mmc/host/omap_hsmmc.c
··· 450 450 * framework is fixed, we need a workaround like this 451 451 * (which is safe for MMC, but not in general). 452 452 */ 453 - if (regulator_is_enabled(host->vcc) > 0) { 454 - regulator_enable(host->vcc); 455 - regulator_disable(host->vcc); 456 - } 457 - if (host->vcc_aux) { 458 - if (regulator_is_enabled(reg) > 0) { 459 - regulator_enable(reg); 460 - regulator_disable(reg); 461 - } 453 + if (regulator_is_enabled(host->vcc) > 0 || 454 + (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { 455 + int vdd = ffs(mmc_slot(host).ocr_mask) - 1; 456 + 457 + mmc_slot(host).set_power(host->dev, host->slot_id, 458 + 1, vdd); 459 + mmc_slot(host).set_power(host->dev, host->slot_id, 460 + 0, 0); 462 461 } 463 462 } 464 463 ··· 1263 1264 host->reqs_blocked = 0; 1264 1265 if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { 1265 1266 if (host->protect_card) { 1266 - printk(KERN_INFO "%s: cover is closed, " 1267 + pr_info("%s: cover is closed, " 1267 1268 "card is now accessible\n", 1268 1269 mmc_hostname(host->mmc)); 1269 1270 host->protect_card = 0; 1270 1271 } 1271 1272 } else { 1272 1273 if (!host->protect_card) { 1273 - printk(KERN_INFO "%s: cover is open, " 1274 + pr_info"%s: cover is open, " 1274 1275 "card is now inaccessible\n", 1275 1276 mmc_hostname(host->mmc)); 1276 1277 host->protect_card = 1; ··· 1421 1422 1422 1423 if (!next && data->host_cookie && 1423 1424 data->host_cookie != host->next_data.cookie) { 1424 - printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" 1425 + pr_warning("[%s] invalid cookie: data->host_cookie %d" 1425 1426 " host->next_data.cookie %d\n", 1426 1427 __func__, data->host_cookie, host->next_data.cookie); 1427 1428 data->host_cookie = 0; ··· 1942 1943 omap_hsmmc_context_save(host); 1943 1944 1944 1945 mmc->caps |= MMC_CAP_DISABLE; 1946 + if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { 1947 + dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); 1948 + mmc->caps2 |= MMC_CAP2_NO_MULTI_READ; 1949 + } 1945 1950 1946 1951 pm_runtime_enable(host->dev); 1947 1952 pm_runtime_get_sync(host->dev); ··· 2018 2015 } 2019 2016 2020 2017 /* Request IRQ for MMC operations */ 2021 - ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED, 2018 + ret = request_irq(host->irq, omap_hsmmc_irq, 0, 2022 2019 mmc_hostname(mmc), host); 2023 2020 if (ret) { 2024 2021 dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); ··· 2046 2043 if ((mmc_slot(host).card_detect_irq)) { 2047 2044 ret = request_irq(mmc_slot(host).card_detect_irq, 2048 2045 omap_hsmmc_cd_handler, 2049 - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING 2050 - | IRQF_DISABLED, 2046 + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 2051 2047 mmc_hostname(mmc), host); 2052 2048 if (ret) { 2053 2049 dev_dbg(mmc_dev(host->mmc),
+1 -1
drivers/mmc/host/pxamci.c
··· 558 558 if (dcsr & DCSR_ENDINTR) { 559 559 writel(BUF_PART_FULL, host->base + MMC_PRTBUF); 560 560 } else { 561 - printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n", 561 + pr_err("%s: DMA error on channel %d (DCSR=%#x)\n", 562 562 mmc_hostname(host->mmc), dma, dcsr); 563 563 host->data->error = -EIO; 564 564 pxamci_data_done(host, 0);
+2 -2
drivers/mmc/host/s3cmci.c
··· 247 247 { 248 248 if (host->sdio_irqen) { 249 249 if (gpio_get_value(S3C2410_GPE(8)) == 0) { 250 - printk(KERN_DEBUG "%s: signalling irq\n", __func__); 250 + pr_debug("%s: signalling irq\n", __func__); 251 251 mmc_signal_sdio_irq(host->mmc); 252 252 } 253 253 } ··· 344 344 345 345 local_irq_save(flags); 346 346 347 - //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer); 347 + /* pr_debug("%s: transfer %d\n", __func__, transfer); */ 348 348 349 349 host->irq_disabled = transfer; 350 350
+73 -7
drivers/mmc/host/sdhci-esdhc-imx.c
··· 32 32 /* VENDOR SPEC register */ 33 33 #define SDHCI_VENDOR_SPEC 0xC0 34 34 #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 35 + #define SDHCI_MIX_CTRL 0x48 36 + 37 + /* 38 + * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC: 39 + * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design, 40 + * but bit28 is used as the INT DMA ERR in fsl eSDHC design. 41 + * Define this macro DMA error INT for fsl eSDHC 42 + */ 43 + #define SDHCI_INT_VENDOR_SPEC_DMA_ERR 0x10000000 35 44 36 45 /* 37 46 * The CMDTYPE of the CMD register (offset 0xE) should be set to ··· 60 51 IMX35_ESDHC, 61 52 IMX51_ESDHC, 62 53 IMX53_ESDHC, 54 + IMX6Q_USDHC, 63 55 }; 64 56 65 57 struct pltfm_imx_data { ··· 84 74 .name = "sdhci-esdhc-imx53", 85 75 .driver_data = IMX53_ESDHC, 86 76 }, { 77 + .name = "sdhci-usdhc-imx6q", 78 + .driver_data = IMX6Q_USDHC, 79 + }, { 87 80 /* sentinel */ 88 81 } 89 82 }; ··· 97 84 { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], }, 98 85 { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], }, 99 86 { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], }, 87 + { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], }, 100 88 { /* sentinel */ } 101 89 }; 102 90 MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); ··· 120 106 static inline int is_imx53_esdhc(struct pltfm_imx_data *data) 121 107 { 122 108 return data->devtype == IMX53_ESDHC; 109 + } 110 + 111 + static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) 112 + { 113 + return data->devtype == IMX6Q_USDHC; 123 114 } 124 115 125 116 static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) ··· 152 133 else 153 134 /* ... in all other cases assume card is present */ 154 135 val |= SDHCI_CARD_PRESENT; 136 + } 137 + 138 + if (unlikely(reg == SDHCI_CAPABILITIES)) { 139 + /* In FSL esdhc IC module, only bit20 is used to indicate the 140 + * ADMA2 capability of esdhc, but this bit is messed up on 141 + * some SOCs (e.g. on MX25, MX35 this bit is set, but they 142 + * don't actually support ADMA2). So set the BROKEN_ADMA 143 + * uirk on MX25/35 platforms. 144 + */ 145 + 146 + if (val & SDHCI_CAN_DO_ADMA1) { 147 + val &= ~SDHCI_CAN_DO_ADMA1; 148 + val |= SDHCI_CAN_DO_ADMA2; 149 + } 150 + } 151 + 152 + if (unlikely(reg == SDHCI_INT_STATUS)) { 153 + if (val & SDHCI_INT_VENDOR_SPEC_DMA_ERR) { 154 + val &= ~SDHCI_INT_VENDOR_SPEC_DMA_ERR; 155 + val |= SDHCI_INT_ADMA_ERROR; 156 + } 155 157 } 156 158 157 159 return val; ··· 219 179 writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); 220 180 } 221 181 182 + if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { 183 + if (val & SDHCI_INT_ADMA_ERROR) { 184 + val &= ~SDHCI_INT_ADMA_ERROR; 185 + val |= SDHCI_INT_VENDOR_SPEC_DMA_ERR; 186 + } 187 + } 188 + 222 189 writel(val, host->ioaddr + reg); 223 190 } 224 191 225 192 static u16 esdhc_readw_le(struct sdhci_host *host, int reg) 226 193 { 227 - if (unlikely(reg == SDHCI_HOST_VERSION)) 228 - reg ^= 2; 194 + if (unlikely(reg == SDHCI_HOST_VERSION)) { 195 + u16 val = readw(host->ioaddr + (reg ^ 2)); 196 + /* 197 + * uSDHC supports SDHCI v3.0, but it's encoded as value 198 + * 0x3 in host controller version register, which violates 199 + * SDHCI_SPEC_300 definition. Work it around here. 200 + */ 201 + if ((val & SDHCI_SPEC_VER_MASK) == 3) 202 + return --val; 203 + } 229 204 230 205 return readw(host->ioaddr + reg); 231 206 } ··· 271 216 if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) 272 217 && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) 273 218 val |= SDHCI_CMD_ABORTCMD; 274 - writel(val << 16 | imx_data->scratchpad, 275 - host->ioaddr + SDHCI_TRANSFER_MODE); 219 + 220 + if (is_imx6q_usdhc(imx_data)) { 221 + u32 m = readl(host->ioaddr + SDHCI_MIX_CTRL); 222 + m = imx_data->scratchpad | (m & 0xffff0000); 223 + writel(m, host->ioaddr + SDHCI_MIX_CTRL); 224 + writel(val << 16, 225 + host->ioaddr + SDHCI_TRANSFER_MODE); 226 + } else { 227 + writel(val << 16 | imx_data->scratchpad, 228 + host->ioaddr + SDHCI_TRANSFER_MODE); 229 + } 276 230 return; 277 231 case SDHCI_BLOCK_SIZE: 278 232 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); ··· 375 311 }; 376 312 377 313 static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { 378 - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA 314 + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT 315 + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC 316 + | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC 379 317 | SDHCI_QUIRK_BROKEN_CARD_DETECTION, 380 - /* ADMA has issues. Might be fixable */ 381 318 .ops = &sdhci_esdhc_ops, 382 319 }; 383 320 ··· 470 405 471 406 if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) 472 407 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ 473 - host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; 408 + host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK 409 + | SDHCI_QUIRK_BROKEN_ADMA; 474 410 475 411 if (is_imx53_esdhc(imx_data)) 476 412 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
+15 -4
drivers/mmc/host/sdhci-of-esdhc.c
··· 1 1 /* 2 2 * Freescale eSDHC controller driver. 3 3 * 4 - * Copyright (c) 2007 Freescale Semiconductor, Inc. 4 + * Copyright (c) 2007, 2010 Freescale Semiconductor, Inc. 5 5 * Copyright (c) 2009 MontaVista Software, Inc. 6 6 * 7 7 * Authors: Xiaobo Xie <X.Xie@freescale.com> ··· 15 15 16 16 #include <linux/io.h> 17 17 #include <linux/delay.h> 18 + #include <linux/module.h> 18 19 #include <linux/mmc/host.h> 19 20 #include "sdhci-pltfm.h" 20 21 #include "sdhci-esdhc.h" ··· 23 22 static u16 esdhc_readw(struct sdhci_host *host, int reg) 24 23 { 25 24 u16 ret; 25 + int base = reg & ~0x3; 26 + int shift = (reg & 0x2) * 8; 26 27 27 28 if (unlikely(reg == SDHCI_HOST_VERSION)) 28 - ret = in_be16(host->ioaddr + reg); 29 + ret = in_be32(host->ioaddr + base) & 0xffff; 29 30 else 30 - ret = sdhci_be32bs_readw(host, reg); 31 + ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; 32 + return ret; 33 + } 34 + 35 + static u8 esdhc_readb(struct sdhci_host *host, int reg) 36 + { 37 + int base = reg & ~0x3; 38 + int shift = (reg & 0x3) * 8; 39 + u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; 31 40 return ret; 32 41 } 33 42 ··· 85 74 static struct sdhci_ops sdhci_esdhc_ops = { 86 75 .read_l = sdhci_be32bs_readl, 87 76 .read_w = esdhc_readw, 88 - .read_b = sdhci_be32bs_readb, 77 + .read_b = esdhc_readb, 89 78 .write_l = sdhci_be32bs_writel, 90 79 .write_w = esdhc_writew, 91 80 .write_b = esdhc_writeb,
+1
drivers/mmc/host/sdhci-of-hlwd.c
··· 20 20 */ 21 21 22 22 #include <linux/delay.h> 23 + #include <linux/module.h> 23 24 #include <linux/mmc/host.h> 24 25 #include "sdhci-pltfm.h" 25 26
+266 -2
drivers/mmc/host/sdhci-pci.c
··· 14 14 15 15 #include <linux/delay.h> 16 16 #include <linux/highmem.h> 17 + #include <linux/module.h> 17 18 #include <linux/pci.h> 18 19 #include <linux/dma-mapping.h> 19 20 #include <linux/slab.h> ··· 22 21 #include <linux/mmc/host.h> 23 22 #include <linux/scatterlist.h> 24 23 #include <linux/io.h> 24 + #include <linux/gpio.h> 25 + #include <linux/sfi.h> 26 + #include <linux/pm_runtime.h> 25 27 26 28 #include "sdhci.h" 27 29 ··· 47 43 48 44 struct sdhci_pci_fixes { 49 45 unsigned int quirks; 46 + bool allow_runtime_pm; 50 47 51 48 int (*probe) (struct sdhci_pci_chip *); 52 49 ··· 64 59 struct sdhci_host *host; 65 60 66 61 int pci_bar; 62 + int rst_n_gpio; 63 + int cd_gpio; 64 + int cd_irq; 67 65 }; 68 66 69 67 struct sdhci_pci_chip { 70 68 struct pci_dev *pdev; 71 69 72 70 unsigned int quirks; 71 + bool allow_runtime_pm; 73 72 const struct sdhci_pci_fixes *fixes; 74 73 75 74 int num_slots; /* Slots on controller */ ··· 172 163 return 0; 173 164 } 174 165 166 + /* Medfield eMMC hardware reset GPIOs */ 167 + static int mfd_emmc0_rst_gpio = -EINVAL; 168 + static int mfd_emmc1_rst_gpio = -EINVAL; 169 + 170 + static int mfd_emmc_gpio_parse(struct sfi_table_header *table) 171 + { 172 + struct sfi_table_simple *sb = (struct sfi_table_simple *)table; 173 + struct sfi_gpio_table_entry *entry; 174 + int i, num; 175 + 176 + num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry); 177 + entry = (struct sfi_gpio_table_entry *)sb->pentry; 178 + 179 + for (i = 0; i < num; i++, entry++) { 180 + if (!strncmp(entry->pin_name, "emmc0_rst", SFI_NAME_LEN)) 181 + mfd_emmc0_rst_gpio = entry->pin_no; 182 + else if (!strncmp(entry->pin_name, "emmc1_rst", SFI_NAME_LEN)) 183 + mfd_emmc1_rst_gpio = entry->pin_no; 184 + } 185 + 186 + return 0; 187 + } 188 + 189 + #ifdef CONFIG_PM_RUNTIME 190 + 191 + static irqreturn_t mfd_sd_cd(int irq, void *dev_id) 192 + { 193 + struct sdhci_pci_slot *slot = dev_id; 194 + struct sdhci_host *host = slot->host; 195 + 196 + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 197 + return IRQ_HANDLED; 198 + } 199 + 200 + #define MFLD_SD_CD_PIN 69 201 + 202 + static int mfd_sd_probe_slot(struct sdhci_pci_slot *slot) 203 + { 204 + int err, irq, gpio = MFLD_SD_CD_PIN; 205 + 206 + slot->cd_gpio = -EINVAL; 207 + slot->cd_irq = -EINVAL; 208 + 209 + err = gpio_request(gpio, "sd_cd"); 210 + if (err < 0) 211 + goto out; 212 + 213 + err = gpio_direction_input(gpio); 214 + if (err < 0) 215 + goto out_free; 216 + 217 + irq = gpio_to_irq(gpio); 218 + if (irq < 0) 219 + goto out_free; 220 + 221 + err = request_irq(irq, mfd_sd_cd, IRQF_TRIGGER_RISING | 222 + IRQF_TRIGGER_FALLING, "sd_cd", slot); 223 + if (err) 224 + goto out_free; 225 + 226 + slot->cd_gpio = gpio; 227 + slot->cd_irq = irq; 228 + slot->host->quirks2 |= SDHCI_QUIRK2_OWN_CARD_DETECTION; 229 + 230 + return 0; 231 + 232 + out_free: 233 + gpio_free(gpio); 234 + out: 235 + dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); 236 + return 0; 237 + } 238 + 239 + static void mfd_sd_remove_slot(struct sdhci_pci_slot *slot, int dead) 240 + { 241 + if (slot->cd_irq >= 0) 242 + free_irq(slot->cd_irq, slot); 243 + gpio_free(slot->cd_gpio); 244 + } 245 + 246 + #else 247 + 248 + #define mfd_sd_probe_slot NULL 249 + #define mfd_sd_remove_slot NULL 250 + 251 + #endif 252 + 175 253 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) 176 254 { 177 - slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 255 + const char *name = NULL; 256 + int gpio = -EINVAL; 257 + 258 + sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, mfd_emmc_gpio_parse); 259 + 260 + switch (slot->chip->pdev->device) { 261 + case PCI_DEVICE_ID_INTEL_MFD_EMMC0: 262 + gpio = mfd_emmc0_rst_gpio; 263 + name = "eMMC0_reset"; 264 + break; 265 + case PCI_DEVICE_ID_INTEL_MFD_EMMC1: 266 + gpio = mfd_emmc1_rst_gpio; 267 + name = "eMMC1_reset"; 268 + break; 269 + } 270 + 271 + if (!gpio_request(gpio, name)) { 272 + gpio_direction_output(gpio, 1); 273 + slot->rst_n_gpio = gpio; 274 + slot->host->mmc->caps |= MMC_CAP_HW_RESET; 275 + } 276 + 277 + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; 278 + 279 + slot->host->mmc->caps2 = MMC_CAP2_BOOTPART_NOACC; 280 + 178 281 return 0; 282 + } 283 + 284 + static void mfd_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead) 285 + { 286 + gpio_free(slot->rst_n_gpio); 179 287 } 180 288 181 289 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { ··· 307 181 308 182 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { 309 183 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 184 + .allow_runtime_pm = true, 185 + .probe_slot = mfd_sd_probe_slot, 186 + .remove_slot = mfd_sd_remove_slot, 310 187 }; 311 188 312 189 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { 313 190 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 191 + .allow_runtime_pm = true, 314 192 }; 315 193 316 194 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { 317 195 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 196 + .allow_runtime_pm = true, 318 197 .probe_slot = mfd_emmc_probe_slot, 198 + .remove_slot = mfd_emmc_remove_slot, 319 199 }; 320 200 321 201 /* O2Micro extra registers */ ··· 964 832 return 0; 965 833 } 966 834 835 + static void sdhci_pci_hw_reset(struct sdhci_host *host) 836 + { 837 + struct sdhci_pci_slot *slot = sdhci_priv(host); 838 + int rst_n_gpio = slot->rst_n_gpio; 839 + 840 + if (!gpio_is_valid(rst_n_gpio)) 841 + return; 842 + gpio_set_value_cansleep(rst_n_gpio, 0); 843 + /* For eMMC, minimum is 1us but give it 10us for good measure */ 844 + udelay(10); 845 + gpio_set_value_cansleep(rst_n_gpio, 1); 846 + /* For eMMC, minimum is 200us but give it 300us for good measure */ 847 + usleep_range(300, 1000); 848 + } 849 + 967 850 static struct sdhci_ops sdhci_pci_ops = { 968 851 .enable_dma = sdhci_pci_enable_dma, 969 852 .platform_8bit_width = sdhci_pci_8bit_width, 853 + .hw_reset = sdhci_pci_hw_reset, 970 854 }; 971 855 972 856 /*****************************************************************************\ ··· 1092 944 1093 945 #endif /* CONFIG_PM */ 1094 946 947 + #ifdef CONFIG_PM_RUNTIME 948 + 949 + static int sdhci_pci_runtime_suspend(struct device *dev) 950 + { 951 + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 952 + struct sdhci_pci_chip *chip; 953 + struct sdhci_pci_slot *slot; 954 + pm_message_t state = { .event = PM_EVENT_SUSPEND }; 955 + int i, ret; 956 + 957 + chip = pci_get_drvdata(pdev); 958 + if (!chip) 959 + return 0; 960 + 961 + for (i = 0; i < chip->num_slots; i++) { 962 + slot = chip->slots[i]; 963 + if (!slot) 964 + continue; 965 + 966 + ret = sdhci_runtime_suspend_host(slot->host); 967 + 968 + if (ret) { 969 + for (i--; i >= 0; i--) 970 + sdhci_runtime_resume_host(chip->slots[i]->host); 971 + return ret; 972 + } 973 + } 974 + 975 + if (chip->fixes && chip->fixes->suspend) { 976 + ret = chip->fixes->suspend(chip, state); 977 + if (ret) { 978 + for (i = chip->num_slots - 1; i >= 0; i--) 979 + sdhci_runtime_resume_host(chip->slots[i]->host); 980 + return ret; 981 + } 982 + } 983 + 984 + return 0; 985 + } 986 + 987 + static int sdhci_pci_runtime_resume(struct device *dev) 988 + { 989 + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 990 + struct sdhci_pci_chip *chip; 991 + struct sdhci_pci_slot *slot; 992 + int i, ret; 993 + 994 + chip = pci_get_drvdata(pdev); 995 + if (!chip) 996 + return 0; 997 + 998 + if (chip->fixes && chip->fixes->resume) { 999 + ret = chip->fixes->resume(chip); 1000 + if (ret) 1001 + return ret; 1002 + } 1003 + 1004 + for (i = 0; i < chip->num_slots; i++) { 1005 + slot = chip->slots[i]; 1006 + if (!slot) 1007 + continue; 1008 + 1009 + ret = sdhci_runtime_resume_host(slot->host); 1010 + if (ret) 1011 + return ret; 1012 + } 1013 + 1014 + return 0; 1015 + } 1016 + 1017 + static int sdhci_pci_runtime_idle(struct device *dev) 1018 + { 1019 + return 0; 1020 + } 1021 + 1022 + #else 1023 + 1024 + #define sdhci_pci_runtime_suspend NULL 1025 + #define sdhci_pci_runtime_resume NULL 1026 + #define sdhci_pci_runtime_idle NULL 1027 + 1028 + #endif 1029 + 1030 + static const struct dev_pm_ops sdhci_pci_pm_ops = { 1031 + .runtime_suspend = sdhci_pci_runtime_suspend, 1032 + .runtime_resume = sdhci_pci_runtime_resume, 1033 + .runtime_idle = sdhci_pci_runtime_idle, 1034 + }; 1035 + 1095 1036 /*****************************************************************************\ 1096 1037 * * 1097 1038 * Device probing/removal * ··· 1225 988 slot->chip = chip; 1226 989 slot->host = host; 1227 990 slot->pci_bar = bar; 991 + slot->rst_n_gpio = -EINVAL; 1228 992 1229 993 host->hw_name = "PCI"; 1230 994 host->ops = &sdhci_pci_ops; ··· 1296 1058 sdhci_free_host(slot->host); 1297 1059 } 1298 1060 1061 + static void __devinit sdhci_pci_runtime_pm_allow(struct device *dev) 1062 + { 1063 + pm_runtime_put_noidle(dev); 1064 + pm_runtime_allow(dev); 1065 + pm_runtime_set_autosuspend_delay(dev, 50); 1066 + pm_runtime_use_autosuspend(dev); 1067 + pm_suspend_ignore_children(dev, 1); 1068 + } 1069 + 1070 + static void __devexit sdhci_pci_runtime_pm_forbid(struct device *dev) 1071 + { 1072 + pm_runtime_forbid(dev); 1073 + pm_runtime_get_noresume(dev); 1074 + } 1075 + 1299 1076 static int __devinit sdhci_pci_probe(struct pci_dev *pdev, 1300 1077 const struct pci_device_id *ent) 1301 1078 { ··· 1360 1107 1361 1108 chip->pdev = pdev; 1362 1109 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; 1363 - if (chip->fixes) 1110 + if (chip->fixes) { 1364 1111 chip->quirks = chip->fixes->quirks; 1112 + chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; 1113 + } 1365 1114 chip->num_slots = slots; 1366 1115 1367 1116 pci_set_drvdata(pdev, chip); ··· 1388 1133 chip->slots[i] = slot; 1389 1134 } 1390 1135 1136 + if (chip->allow_runtime_pm) 1137 + sdhci_pci_runtime_pm_allow(&pdev->dev); 1138 + 1391 1139 return 0; 1392 1140 1393 1141 free: ··· 1410 1152 chip = pci_get_drvdata(pdev); 1411 1153 1412 1154 if (chip) { 1155 + if (chip->allow_runtime_pm) 1156 + sdhci_pci_runtime_pm_forbid(&pdev->dev); 1157 + 1413 1158 for (i = 0; i < chip->num_slots; i++) 1414 1159 sdhci_pci_remove_slot(chip->slots[i]); 1415 1160 ··· 1430 1169 .remove = __devexit_p(sdhci_pci_remove), 1431 1170 .suspend = sdhci_pci_suspend, 1432 1171 .resume = sdhci_pci_resume, 1172 + .driver = { 1173 + .pm = &sdhci_pci_pm_ops 1174 + }, 1433 1175 }; 1434 1176 1435 1177 /*****************************************************************************\
+1
drivers/mmc/host/sdhci-pltfm.c
··· 29 29 */ 30 30 31 31 #include <linux/err.h> 32 + #include <linux/module.h> 32 33 #include <linux/of.h> 33 34 #ifdef CONFIG_PPC 34 35 #include <asm/machdep.h>
+3 -2
drivers/mmc/host/sdhci-pxav2.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/platform_device.h> 23 23 #include <linux/clk.h> 24 + #include <linux/module.h> 24 25 #include <linux/io.h> 25 26 #include <linux/gpio.h> 26 27 #include <linux/mmc/card.h> ··· 60 59 * tune timing of read data/command when crc error happen 61 60 * no performance impact 62 61 */ 63 - if (pdata->clk_delay_sel == 1) { 62 + if (pdata && pdata->clk_delay_sel == 1) { 64 63 tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); 65 64 66 65 tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT); ··· 72 71 writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); 73 72 } 74 73 75 - if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) { 74 + if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) { 76 75 tmp = readw(host->ioaddr + SD_FIFO_PARAM); 77 76 tmp &= ~CLK_GATE_SETTING_BITS; 78 77 writew(tmp, host->ioaddr + SD_FIFO_PARAM);
+1
drivers/mmc/host/sdhci-pxav3.c
··· 27 27 #include <linux/platform_data/pxa_sdhci.h> 28 28 #include <linux/slab.h> 29 29 #include <linux/delay.h> 30 + #include <linux/module.h> 30 31 #include "sdhci.h" 31 32 #include "sdhci-pltfm.h" 32 33
+19 -11
drivers/mmc/host/sdhci-s3c.c
··· 203 203 writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); 204 204 } 205 205 206 - /* reconfigure the hardware for new clock rate */ 206 + /* reprogram default hardware configuration */ 207 + writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA, 208 + host->ioaddr + S3C64XX_SDHCI_CONTROL4); 207 209 208 - { 209 - struct mmc_ios ios; 210 + ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); 211 + ctrl |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR | 212 + S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK | 213 + S3C_SDHCI_CTRL2_ENFBCLKRX | 214 + S3C_SDHCI_CTRL2_DFCNT_NONE | 215 + S3C_SDHCI_CTRL2_ENCLKOUTHOLD); 216 + writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); 210 217 211 - ios.clock = clock; 212 - 213 - if (ourhost->pdata->cfg_card) 214 - (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr, 215 - &ios, NULL); 216 - } 218 + /* reconfigure the controller for new clock rate */ 219 + ctrl = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); 220 + if (clock < 25 * 1000000) 221 + ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2); 222 + writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); 217 223 } 218 224 219 225 /** ··· 567 561 568 562 err_req_regs: 569 563 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { 570 - clk_disable(sc->clk_bus[ptr]); 571 - clk_put(sc->clk_bus[ptr]); 564 + if (sc->clk_bus[ptr]) { 565 + clk_disable(sc->clk_bus[ptr]); 566 + clk_put(sc->clk_bus[ptr]); 567 + } 572 568 } 573 569 574 570 err_no_busclks:
+1 -2
drivers/mmc/host/sdhci-spear.c
··· 17 17 #include <linux/delay.h> 18 18 #include <linux/gpio.h> 19 19 #include <linux/highmem.h> 20 + #include <linux/module.h> 20 21 #include <linux/interrupt.h> 21 22 #include <linux/irq.h> 22 23 #include <linux/platform_device.h> ··· 178 177 sdhci->data->card_power_gpio); 179 178 goto err_pgpio_direction; 180 179 } 181 - 182 - gpio_set_value(sdhci->data->card_power_gpio, 1); 183 180 } 184 181 185 182 if (sdhci->data->card_int_gpio >= 0) {
+44 -11
drivers/mmc/host/sdhci-tegra.c
··· 17 17 #include <linux/platform_device.h> 18 18 #include <linux/clk.h> 19 19 #include <linux/io.h> 20 + #include <linux/of.h> 21 + #include <linux/of_gpio.h> 20 22 #include <linux/gpio.h> 21 23 #include <linux/mmc/card.h> 22 24 #include <linux/mmc/host.h> 25 + #include <linux/module.h> 23 26 24 27 #include <asm/gpio.h> 25 28 ··· 78 75 79 76 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci) 80 77 { 81 - struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc)); 82 - struct tegra_sdhci_platform_data *plat; 83 - 84 - plat = pdev->dev.platform_data; 78 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); 79 + struct tegra_sdhci_platform_data *plat = pltfm_host->priv; 85 80 86 81 if (!gpio_is_valid(plat->wp_gpio)) 87 82 return -1; ··· 97 96 98 97 static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) 99 98 { 100 - struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); 101 - struct tegra_sdhci_platform_data *plat; 99 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 100 + struct tegra_sdhci_platform_data *plat = pltfm_host->priv; 102 101 u32 ctrl; 103 - 104 - plat = pdev->dev.platform_data; 105 102 106 103 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 107 104 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) { ··· 132 133 .ops = &tegra_sdhci_ops, 133 134 }; 134 135 136 + static const struct of_device_id sdhci_tegra_dt_match[] __devinitdata = { 137 + { .compatible = "nvidia,tegra20-sdhci", }, 138 + {} 139 + }; 140 + MODULE_DEVICE_TABLE(of, sdhci_dt_ids); 141 + 142 + static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata( 143 + struct platform_device *pdev) 144 + { 145 + struct tegra_sdhci_platform_data *plat; 146 + struct device_node *np = pdev->dev.of_node; 147 + 148 + if (!np) 149 + return NULL; 150 + 151 + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 152 + if (!plat) { 153 + dev_err(&pdev->dev, "Can't allocate platform data\n"); 154 + return NULL; 155 + } 156 + 157 + plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); 158 + plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 159 + plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0); 160 + if (of_find_property(np, "support-8bit", NULL)) 161 + plat->is_8bit = 1; 162 + 163 + return plat; 164 + } 165 + 135 166 static int __devinit sdhci_tegra_probe(struct platform_device *pdev) 136 167 { 137 168 struct sdhci_pltfm_host *pltfm_host; ··· 178 149 179 150 plat = pdev->dev.platform_data; 180 151 152 + if (plat == NULL) 153 + plat = sdhci_tegra_dt_parse_pdata(pdev); 154 + 181 155 if (plat == NULL) { 182 156 dev_err(mmc_dev(host->mmc), "missing platform data\n"); 183 157 rc = -ENXIO; 184 158 goto err_no_plat; 185 159 } 160 + 161 + pltfm_host->priv = plat; 186 162 187 163 if (gpio_is_valid(plat->power_gpio)) { 188 164 rc = gpio_request(plat->power_gpio, "sdhci_power"); ··· 283 249 { 284 250 struct sdhci_host *host = platform_get_drvdata(pdev); 285 251 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 286 - struct tegra_sdhci_platform_data *plat; 252 + struct tegra_sdhci_platform_data *plat = pltfm_host->priv; 287 253 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); 288 254 289 255 sdhci_remove_host(host, dead); 290 - 291 - plat = pdev->dev.platform_data; 292 256 293 257 if (gpio_is_valid(plat->wp_gpio)) { 294 258 tegra_gpio_disable(plat->wp_gpio); ··· 316 284 .driver = { 317 285 .name = "sdhci-tegra", 318 286 .owner = THIS_MODULE, 287 + .of_match_table = sdhci_tegra_dt_match, 319 288 }, 320 289 .probe = sdhci_tegra_probe, 321 290 .remove = __devexit_p(sdhci_tegra_remove),
+271 -87
drivers/mmc/host/sdhci.c
··· 16 16 #include <linux/delay.h> 17 17 #include <linux/highmem.h> 18 18 #include <linux/io.h> 19 + #include <linux/module.h> 19 20 #include <linux/dma-mapping.h> 20 21 #include <linux/slab.h> 21 22 #include <linux/scatterlist.h> 22 23 #include <linux/regulator/consumer.h> 24 + #include <linux/pm_runtime.h> 23 25 24 26 #include <linux/leds.h> 25 27 ··· 43 41 #define MAX_TUNING_LOOP 40 44 42 45 43 static unsigned int debug_quirks = 0; 44 + static unsigned int debug_quirks2; 46 45 47 46 static void sdhci_finish_data(struct sdhci_host *); 48 47 ··· 52 49 static int sdhci_execute_tuning(struct mmc_host *mmc); 53 50 static void sdhci_tuning_timer(unsigned long data); 54 51 52 + #ifdef CONFIG_PM_RUNTIME 53 + static int sdhci_runtime_pm_get(struct sdhci_host *host); 54 + static int sdhci_runtime_pm_put(struct sdhci_host *host); 55 + #else 56 + static inline int sdhci_runtime_pm_get(struct sdhci_host *host) 57 + { 58 + return 0; 59 + } 60 + static inline int sdhci_runtime_pm_put(struct sdhci_host *host) 61 + { 62 + return 0; 63 + } 64 + #endif 65 + 55 66 static void sdhci_dumpregs(struct sdhci_host *host) 56 67 { 57 - printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 68 + pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 58 69 mmc_hostname(host->mmc)); 59 70 60 - printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 71 + pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 61 72 sdhci_readl(host, SDHCI_DMA_ADDRESS), 62 73 sdhci_readw(host, SDHCI_HOST_VERSION)); 63 - printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 74 + pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 64 75 sdhci_readw(host, SDHCI_BLOCK_SIZE), 65 76 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 66 - printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 77 + pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 67 78 sdhci_readl(host, SDHCI_ARGUMENT), 68 79 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 69 - printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 80 + pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 70 81 sdhci_readl(host, SDHCI_PRESENT_STATE), 71 82 sdhci_readb(host, SDHCI_HOST_CONTROL)); 72 - printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 83 + pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 73 84 sdhci_readb(host, SDHCI_POWER_CONTROL), 74 85 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 75 - printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 86 + pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 76 87 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 77 88 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 78 - printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 89 + pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 79 90 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 80 91 sdhci_readl(host, SDHCI_INT_STATUS)); 81 - printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 92 + pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 82 93 sdhci_readl(host, SDHCI_INT_ENABLE), 83 94 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 84 - printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 95 + pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 85 96 sdhci_readw(host, SDHCI_ACMD12_ERR), 86 97 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 87 - printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 98 + pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 88 99 sdhci_readl(host, SDHCI_CAPABILITIES), 89 100 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 90 - printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 101 + pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 91 102 sdhci_readw(host, SDHCI_COMMAND), 92 103 sdhci_readl(host, SDHCI_MAX_CURRENT)); 93 - printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n", 104 + pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", 94 105 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 95 106 96 107 if (host->flags & SDHCI_USE_ADMA) 97 - printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 + pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 98 109 readl(host->ioaddr + SDHCI_ADMA_ERROR), 99 110 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 100 111 101 - printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n"); 112 + pr_debug(DRIVER_NAME ": ===========================================\n"); 102 113 } 103 114 104 115 /*****************************************************************************\ ··· 147 130 u32 present, irqs; 148 131 149 132 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 133 + return; 134 + 135 + if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION) 150 136 return; 151 137 152 138 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & ··· 200 180 /* hw clears the bit when it's done */ 201 181 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 202 182 if (timeout == 0) { 203 - printk(KERN_ERR "%s: Reset 0x%x never completed.\n", 183 + pr_err("%s: Reset 0x%x never completed.\n", 204 184 mmc_hostname(host->mmc), (int)mask); 205 185 sdhci_dumpregs(host); 206 186 return; ··· 271 251 272 252 spin_lock_irqsave(&host->lock, flags); 273 253 254 + if (host->runtime_suspended) 255 + goto out; 256 + 274 257 if (brightness == LED_OFF) 275 258 sdhci_deactivate_led(host); 276 259 else 277 260 sdhci_activate_led(host); 278 - 261 + out: 279 262 spin_unlock_irqrestore(&host->lock, flags); 280 263 } 281 264 #endif ··· 677 654 } 678 655 679 656 if (count >= 0xF) { 680 - printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n", 657 + pr_warning("%s: Too large timeout requested for CMD%d!\n", 681 658 mmc_hostname(host->mmc), cmd->opcode); 682 659 count = 0xE; 683 660 } ··· 972 949 973 950 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 974 951 if (timeout == 0) { 975 - printk(KERN_ERR "%s: Controller never released " 952 + pr_err("%s: Controller never released " 976 953 "inhibit bit(s).\n", mmc_hostname(host->mmc)); 977 954 sdhci_dumpregs(host); 978 955 cmd->error = -EIO; ··· 994 971 sdhci_set_transfer_mode(host, cmd); 995 972 996 973 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 997 - printk(KERN_ERR "%s: Unsupported response type!\n", 974 + pr_err("%s: Unsupported response type!\n", 998 975 mmc_hostname(host->mmc)); 999 976 cmd->error = -EINVAL; 1000 977 tasklet_schedule(&host->finish_tasklet); ··· 1144 1121 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1145 1122 & SDHCI_CLOCK_INT_STABLE)) { 1146 1123 if (timeout == 0) { 1147 - printk(KERN_ERR "%s: Internal clock never " 1124 + pr_err("%s: Internal clock never " 1148 1125 "stabilised.\n", mmc_hostname(host->mmc)); 1149 1126 sdhci_dumpregs(host); 1150 1127 return; ··· 1232 1209 1233 1210 host = mmc_priv(mmc); 1234 1211 1212 + sdhci_runtime_pm_get(host); 1213 + 1235 1214 spin_lock_irqsave(&host->lock, flags); 1236 1215 1237 1216 WARN_ON(host->mrq != NULL); ··· 1294 1269 spin_unlock_irqrestore(&host->lock, flags); 1295 1270 } 1296 1271 1297 - static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1272 + static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) 1298 1273 { 1299 - struct sdhci_host *host; 1300 1274 unsigned long flags; 1301 1275 u8 ctrl; 1302 - 1303 - host = mmc_priv(mmc); 1304 1276 1305 1277 spin_lock_irqsave(&host->lock, flags); 1306 1278 ··· 1448 1426 spin_unlock_irqrestore(&host->lock, flags); 1449 1427 } 1450 1428 1451 - static int check_ro(struct sdhci_host *host) 1429 + static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1430 + { 1431 + struct sdhci_host *host = mmc_priv(mmc); 1432 + 1433 + sdhci_runtime_pm_get(host); 1434 + sdhci_do_set_ios(host, ios); 1435 + sdhci_runtime_pm_put(host); 1436 + } 1437 + 1438 + static int sdhci_check_ro(struct sdhci_host *host) 1452 1439 { 1453 1440 unsigned long flags; 1454 1441 int is_readonly; ··· 1481 1450 1482 1451 #define SAMPLE_COUNT 5 1483 1452 1484 - static int sdhci_get_ro(struct mmc_host *mmc) 1453 + static int sdhci_do_get_ro(struct sdhci_host *host) 1485 1454 { 1486 - struct sdhci_host *host; 1487 1455 int i, ro_count; 1488 1456 1489 - host = mmc_priv(mmc); 1490 - 1491 1457 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 1492 - return check_ro(host); 1458 + return sdhci_check_ro(host); 1493 1459 1494 1460 ro_count = 0; 1495 1461 for (i = 0; i < SAMPLE_COUNT; i++) { 1496 - if (check_ro(host)) { 1462 + if (sdhci_check_ro(host)) { 1497 1463 if (++ro_count > SAMPLE_COUNT / 2) 1498 1464 return 1; 1499 1465 } ··· 1499 1471 return 0; 1500 1472 } 1501 1473 1502 - static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1474 + static void sdhci_hw_reset(struct mmc_host *mmc) 1503 1475 { 1504 - struct sdhci_host *host; 1505 - unsigned long flags; 1476 + struct sdhci_host *host = mmc_priv(mmc); 1506 1477 1507 - host = mmc_priv(mmc); 1478 + if (host->ops && host->ops->hw_reset) 1479 + host->ops->hw_reset(host); 1480 + } 1508 1481 1509 - spin_lock_irqsave(&host->lock, flags); 1482 + static int sdhci_get_ro(struct mmc_host *mmc) 1483 + { 1484 + struct sdhci_host *host = mmc_priv(mmc); 1485 + int ret; 1510 1486 1487 + sdhci_runtime_pm_get(host); 1488 + ret = sdhci_do_get_ro(host); 1489 + sdhci_runtime_pm_put(host); 1490 + return ret; 1491 + } 1492 + 1493 + static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1494 + { 1511 1495 if (host->flags & SDHCI_DEVICE_DEAD) 1496 + goto out; 1497 + 1498 + if (enable) 1499 + host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1500 + else 1501 + host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1502 + 1503 + /* SDIO IRQ will be enabled as appropriate in runtime resume */ 1504 + if (host->runtime_suspended) 1512 1505 goto out; 1513 1506 1514 1507 if (enable) ··· 1538 1489 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); 1539 1490 out: 1540 1491 mmiowb(); 1492 + } 1541 1493 1494 + static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1495 + { 1496 + struct sdhci_host *host = mmc_priv(mmc); 1497 + unsigned long flags; 1498 + 1499 + spin_lock_irqsave(&host->lock, flags); 1500 + sdhci_enable_sdio_irq_nolock(host, enable); 1542 1501 spin_unlock_irqrestore(&host->lock, flags); 1543 1502 } 1544 1503 1545 - static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1546 - struct mmc_ios *ios) 1504 + static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, 1505 + struct mmc_ios *ios) 1547 1506 { 1548 - struct sdhci_host *host; 1549 1507 u8 pwr; 1550 1508 u16 clk, ctrl; 1551 1509 u32 present_state; 1552 - 1553 - host = mmc_priv(mmc); 1554 1510 1555 1511 /* 1556 1512 * Signal Voltage Switching is only applicable for Host Controllers ··· 1582 1528 if (!(ctrl & SDHCI_CTRL_VDD_180)) 1583 1529 return 0; 1584 1530 else { 1585 - printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V " 1531 + pr_info(DRIVER_NAME ": Switching to 3.3V " 1586 1532 "signalling voltage failed\n"); 1587 1533 return -EIO; 1588 1534 } ··· 1641 1587 pwr |= SDHCI_POWER_ON; 1642 1588 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1643 1589 1644 - printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling " 1590 + pr_info(DRIVER_NAME ": Switching to 1.8V signalling " 1645 1591 "voltage failed, retrying with S18R set to 0\n"); 1646 1592 return -EAGAIN; 1647 1593 } else 1648 1594 /* No signal voltage switch required */ 1649 1595 return 0; 1596 + } 1597 + 1598 + static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1599 + struct mmc_ios *ios) 1600 + { 1601 + struct sdhci_host *host = mmc_priv(mmc); 1602 + int err; 1603 + 1604 + if (host->version < SDHCI_SPEC_300) 1605 + return 0; 1606 + sdhci_runtime_pm_get(host); 1607 + err = sdhci_do_start_signal_voltage_switch(host, ios); 1608 + sdhci_runtime_pm_put(host); 1609 + return err; 1650 1610 } 1651 1611 1652 1612 static int sdhci_execute_tuning(struct mmc_host *mmc) ··· 1674 1606 1675 1607 host = mmc_priv(mmc); 1676 1608 1609 + sdhci_runtime_pm_get(host); 1677 1610 disable_irq(host->irq); 1678 1611 spin_lock(&host->lock); 1679 1612 ··· 1692 1623 else { 1693 1624 spin_unlock(&host->lock); 1694 1625 enable_irq(host->irq); 1626 + sdhci_runtime_pm_put(host); 1695 1627 return 0; 1696 1628 } 1697 1629 ··· 1718 1648 timeout = 150; 1719 1649 do { 1720 1650 struct mmc_command cmd = {0}; 1721 - struct mmc_request mrq = {0}; 1651 + struct mmc_request mrq = {NULL}; 1722 1652 1723 1653 if (!tuning_loop_counter && !timeout) 1724 1654 break; ··· 1764 1694 spin_lock(&host->lock); 1765 1695 1766 1696 if (!host->tuning_done) { 1767 - printk(KERN_INFO DRIVER_NAME ": Timeout waiting for " 1697 + pr_info(DRIVER_NAME ": Timeout waiting for " 1768 1698 "Buffer Read Ready interrupt during tuning " 1769 1699 "procedure, falling back to fixed sampling " 1770 1700 "clock\n"); ··· 1794 1724 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1795 1725 } else { 1796 1726 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 1797 - printk(KERN_INFO DRIVER_NAME ": Tuning procedure" 1727 + pr_info(DRIVER_NAME ": Tuning procedure" 1798 1728 " failed, falling back to fixed sampling" 1799 1729 " clock\n"); 1800 1730 err = -EIO; ··· 1836 1766 sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); 1837 1767 spin_unlock(&host->lock); 1838 1768 enable_irq(host->irq); 1769 + sdhci_runtime_pm_put(host); 1839 1770 1840 1771 return err; 1841 1772 } 1842 1773 1843 - static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) 1774 + static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable) 1844 1775 { 1845 - struct sdhci_host *host; 1846 1776 u16 ctrl; 1847 1777 unsigned long flags; 1848 - 1849 - host = mmc_priv(mmc); 1850 1778 1851 1779 /* Host Controller v3.00 defines preset value registers */ 1852 1780 if (host->version < SDHCI_SPEC_300) ··· 1861 1793 if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 1862 1794 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 1863 1795 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1796 + host->flags |= SDHCI_PV_ENABLED; 1864 1797 } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 1865 1798 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 1866 1799 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1800 + host->flags &= ~SDHCI_PV_ENABLED; 1867 1801 } 1868 1802 1869 1803 spin_unlock_irqrestore(&host->lock, flags); 1804 + } 1805 + 1806 + static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) 1807 + { 1808 + struct sdhci_host *host = mmc_priv(mmc); 1809 + 1810 + sdhci_runtime_pm_get(host); 1811 + sdhci_do_enable_preset_value(host, enable); 1812 + sdhci_runtime_pm_put(host); 1870 1813 } 1871 1814 1872 1815 static const struct mmc_host_ops sdhci_ops = { 1873 1816 .request = sdhci_request, 1874 1817 .set_ios = sdhci_set_ios, 1875 1818 .get_ro = sdhci_get_ro, 1819 + .hw_reset = sdhci_hw_reset, 1876 1820 .enable_sdio_irq = sdhci_enable_sdio_irq, 1877 1821 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 1878 1822 .execute_tuning = sdhci_execute_tuning, ··· 1906 1826 1907 1827 spin_lock_irqsave(&host->lock, flags); 1908 1828 1909 - if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 1910 - if (host->mrq) { 1911 - printk(KERN_ERR "%s: Card removed during transfer!\n", 1912 - mmc_hostname(host->mmc)); 1913 - printk(KERN_ERR "%s: Resetting controller.\n", 1914 - mmc_hostname(host->mmc)); 1829 + /* Check host->mrq first in case we are runtime suspended */ 1830 + if (host->mrq && 1831 + !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 1832 + pr_err("%s: Card removed during transfer!\n", 1833 + mmc_hostname(host->mmc)); 1834 + pr_err("%s: Resetting controller.\n", 1835 + mmc_hostname(host->mmc)); 1915 1836 1916 - sdhci_reset(host, SDHCI_RESET_CMD); 1917 - sdhci_reset(host, SDHCI_RESET_DATA); 1837 + sdhci_reset(host, SDHCI_RESET_CMD); 1838 + sdhci_reset(host, SDHCI_RESET_DATA); 1918 1839 1919 - host->mrq->cmd->error = -ENOMEDIUM; 1920 - tasklet_schedule(&host->finish_tasklet); 1921 - } 1840 + host->mrq->cmd->error = -ENOMEDIUM; 1841 + tasklet_schedule(&host->finish_tasklet); 1922 1842 } 1923 1843 1924 1844 spin_unlock_irqrestore(&host->lock, flags); ··· 1934 1854 1935 1855 host = (struct sdhci_host*)param; 1936 1856 1857 + spin_lock_irqsave(&host->lock, flags); 1858 + 1937 1859 /* 1938 1860 * If this tasklet gets rescheduled while running, it will 1939 1861 * be run again afterwards but without any active request. 1940 1862 */ 1941 - if (!host->mrq) 1863 + if (!host->mrq) { 1864 + spin_unlock_irqrestore(&host->lock, flags); 1942 1865 return; 1943 - 1944 - spin_lock_irqsave(&host->lock, flags); 1866 + } 1945 1867 1946 1868 del_timer(&host->timer); 1947 1869 ··· 1987 1905 spin_unlock_irqrestore(&host->lock, flags); 1988 1906 1989 1907 mmc_request_done(host->mmc, mrq); 1908 + sdhci_runtime_pm_put(host); 1990 1909 } 1991 1910 1992 1911 static void sdhci_timeout_timer(unsigned long data) ··· 2000 1917 spin_lock_irqsave(&host->lock, flags); 2001 1918 2002 1919 if (host->mrq) { 2003 - printk(KERN_ERR "%s: Timeout waiting for hardware " 1920 + pr_err("%s: Timeout waiting for hardware " 2004 1921 "interrupt.\n", mmc_hostname(host->mmc)); 2005 1922 sdhci_dumpregs(host); 2006 1923 ··· 2046 1963 BUG_ON(intmask == 0); 2047 1964 2048 1965 if (!host->cmd) { 2049 - printk(KERN_ERR "%s: Got command interrupt 0x%08x even " 1966 + pr_err("%s: Got command interrupt 0x%08x even " 2050 1967 "though no command operation was in progress.\n", 2051 1968 mmc_hostname(host->mmc), (unsigned)intmask); 2052 1969 sdhci_dumpregs(host); ··· 2146 2063 } 2147 2064 } 2148 2065 2149 - printk(KERN_ERR "%s: Got data interrupt 0x%08x even " 2066 + pr_err("%s: Got data interrupt 0x%08x even " 2150 2067 "though no data operation was in progress.\n", 2151 2068 mmc_hostname(host->mmc), (unsigned)intmask); 2152 2069 sdhci_dumpregs(host); ··· 2163 2080 != MMC_BUS_TEST_R) 2164 2081 host->data->error = -EILSEQ; 2165 2082 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2166 - printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 2083 + pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2167 2084 sdhci_show_adma_error(host); 2168 2085 host->data->error = -EIO; 2169 2086 } ··· 2219 2136 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2220 2137 { 2221 2138 irqreturn_t result; 2222 - struct sdhci_host* host = dev_id; 2139 + struct sdhci_host *host = dev_id; 2223 2140 u32 intmask; 2224 2141 int cardint = 0; 2225 2142 2226 2143 spin_lock(&host->lock); 2144 + 2145 + if (host->runtime_suspended) { 2146 + spin_unlock(&host->lock); 2147 + pr_warning("%s: got irq while runtime suspended\n", 2148 + mmc_hostname(host->mmc)); 2149 + return IRQ_HANDLED; 2150 + } 2227 2151 2228 2152 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2229 2153 ··· 2284 2194 intmask &= ~SDHCI_INT_ERROR; 2285 2195 2286 2196 if (intmask & SDHCI_INT_BUS_POWER) { 2287 - printk(KERN_ERR "%s: Card is consuming too much power!\n", 2197 + pr_err("%s: Card is consuming too much power!\n", 2288 2198 mmc_hostname(host->mmc)); 2289 2199 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); 2290 2200 } ··· 2297 2207 intmask &= ~SDHCI_INT_CARD_INT; 2298 2208 2299 2209 if (intmask) { 2300 - printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n", 2210 + pr_err("%s: Unexpected interrupt 0x%08x.\n", 2301 2211 mmc_hostname(host->mmc), intmask); 2302 2212 sdhci_dumpregs(host); 2303 2213 ··· 2365 2275 return ret; 2366 2276 } 2367 2277 2368 - 2369 2278 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2370 2279 if (host->ops->enable_dma) 2371 2280 host->ops->enable_dma(host); ··· 2402 2313 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 2403 2314 2404 2315 #endif /* CONFIG_PM */ 2316 + 2317 + #ifdef CONFIG_PM_RUNTIME 2318 + 2319 + static int sdhci_runtime_pm_get(struct sdhci_host *host) 2320 + { 2321 + return pm_runtime_get_sync(host->mmc->parent); 2322 + } 2323 + 2324 + static int sdhci_runtime_pm_put(struct sdhci_host *host) 2325 + { 2326 + pm_runtime_mark_last_busy(host->mmc->parent); 2327 + return pm_runtime_put_autosuspend(host->mmc->parent); 2328 + } 2329 + 2330 + int sdhci_runtime_suspend_host(struct sdhci_host *host) 2331 + { 2332 + unsigned long flags; 2333 + int ret = 0; 2334 + 2335 + /* Disable tuning since we are suspending */ 2336 + if (host->version >= SDHCI_SPEC_300 && 2337 + host->tuning_mode == SDHCI_TUNING_MODE_1) { 2338 + del_timer_sync(&host->tuning_timer); 2339 + host->flags &= ~SDHCI_NEEDS_RETUNING; 2340 + } 2341 + 2342 + spin_lock_irqsave(&host->lock, flags); 2343 + sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); 2344 + spin_unlock_irqrestore(&host->lock, flags); 2345 + 2346 + synchronize_irq(host->irq); 2347 + 2348 + spin_lock_irqsave(&host->lock, flags); 2349 + host->runtime_suspended = true; 2350 + spin_unlock_irqrestore(&host->lock, flags); 2351 + 2352 + return ret; 2353 + } 2354 + EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 2355 + 2356 + int sdhci_runtime_resume_host(struct sdhci_host *host) 2357 + { 2358 + unsigned long flags; 2359 + int ret = 0, host_flags = host->flags; 2360 + 2361 + if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2362 + if (host->ops->enable_dma) 2363 + host->ops->enable_dma(host); 2364 + } 2365 + 2366 + sdhci_init(host, 0); 2367 + 2368 + /* Force clock and power re-program */ 2369 + host->pwr = 0; 2370 + host->clock = 0; 2371 + sdhci_do_set_ios(host, &host->mmc->ios); 2372 + 2373 + sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); 2374 + if (host_flags & SDHCI_PV_ENABLED) 2375 + sdhci_do_enable_preset_value(host, true); 2376 + 2377 + /* Set the re-tuning expiration flag */ 2378 + if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && 2379 + (host->tuning_mode == SDHCI_TUNING_MODE_1)) 2380 + host->flags |= SDHCI_NEEDS_RETUNING; 2381 + 2382 + spin_lock_irqsave(&host->lock, flags); 2383 + 2384 + host->runtime_suspended = false; 2385 + 2386 + /* Enable SDIO IRQ */ 2387 + if ((host->flags & SDHCI_SDIO_IRQ_ENABLED)) 2388 + sdhci_enable_sdio_irq_nolock(host, true); 2389 + 2390 + /* Enable Card Detection */ 2391 + sdhci_enable_card_detection(host); 2392 + 2393 + spin_unlock_irqrestore(&host->lock, flags); 2394 + 2395 + return ret; 2396 + } 2397 + EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 2398 + 2399 + #endif 2405 2400 2406 2401 /*****************************************************************************\ 2407 2402 * * ··· 2529 2356 2530 2357 if (debug_quirks) 2531 2358 host->quirks = debug_quirks; 2359 + if (debug_quirks2) 2360 + host->quirks2 = debug_quirks2; 2532 2361 2533 2362 sdhci_reset(host, SDHCI_RESET_ALL); 2534 2363 ··· 2538 2363 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2539 2364 >> SDHCI_SPEC_VER_SHIFT; 2540 2365 if (host->version > SDHCI_SPEC_300) { 2541 - printk(KERN_ERR "%s: Unknown controller version (%d). " 2366 + pr_err("%s: Unknown controller version (%d). " 2542 2367 "You may experience problems.\n", mmc_hostname(mmc), 2543 2368 host->version); 2544 2369 } ··· 2575 2400 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2576 2401 if (host->ops->enable_dma) { 2577 2402 if (host->ops->enable_dma(host)) { 2578 - printk(KERN_WARNING "%s: No suitable DMA " 2403 + pr_warning("%s: No suitable DMA " 2579 2404 "available. Falling back to PIO.\n", 2580 2405 mmc_hostname(mmc)); 2581 2406 host->flags &= ··· 2595 2420 if (!host->adma_desc || !host->align_buffer) { 2596 2421 kfree(host->adma_desc); 2597 2422 kfree(host->align_buffer); 2598 - printk(KERN_WARNING "%s: Unable to allocate ADMA " 2423 + pr_warning("%s: Unable to allocate ADMA " 2599 2424 "buffers. Falling back to standard DMA.\n", 2600 2425 mmc_hostname(mmc)); 2601 2426 host->flags &= ~SDHCI_USE_ADMA; ··· 2623 2448 if (host->max_clk == 0 || host->quirks & 2624 2449 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 2625 2450 if (!host->ops->get_max_clock) { 2626 - printk(KERN_ERR 2627 - "%s: Hardware doesn't specify base clock " 2451 + pr_err("%s: Hardware doesn't specify base clock " 2628 2452 "frequency.\n", mmc_hostname(mmc)); 2629 2453 return -ENODEV; 2630 2454 } ··· 2669 2495 host->timeout_clk = host->ops->get_timeout_clock(host); 2670 2496 } else if (!(host->quirks & 2671 2497 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 2672 - printk(KERN_ERR 2673 - "%s: Hardware doesn't specify timeout clock " 2498 + pr_err("%s: Hardware doesn't specify timeout clock " 2674 2499 "frequency.\n", mmc_hostname(mmc)); 2675 2500 return -ENODEV; 2676 2501 } ··· 2738 2565 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 2739 2566 if (caps[1] & SDHCI_DRIVER_TYPE_D) 2740 2567 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 2568 + 2569 + /* 2570 + * If Power Off Notify capability is enabled by the host, 2571 + * set notify to short power off notify timeout value. 2572 + */ 2573 + if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) 2574 + mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 2575 + else 2576 + mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; 2741 2577 2742 2578 /* Initial value for re-tuning timer count */ 2743 2579 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> ··· 2837 2655 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 2838 2656 2839 2657 if (mmc->ocr_avail == 0) { 2840 - printk(KERN_ERR "%s: Hardware doesn't report any " 2658 + pr_err("%s: Hardware doesn't report any " 2841 2659 "support voltages.\n", mmc_hostname(mmc)); 2842 2660 return -ENODEV; 2843 2661 } ··· 2885 2703 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> 2886 2704 SDHCI_MAX_BLOCK_SHIFT; 2887 2705 if (mmc->max_blk_size >= 3) { 2888 - printk(KERN_WARNING "%s: Invalid maximum block size, " 2706 + pr_warning("%s: Invalid maximum block size, " 2889 2707 "assuming 512 bytes\n", mmc_hostname(mmc)); 2890 2708 mmc->max_blk_size = 0; 2891 2709 } ··· 2924 2742 2925 2743 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 2926 2744 if (IS_ERR(host->vmmc)) { 2927 - printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); 2745 + pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 2928 2746 host->vmmc = NULL; 2929 2747 } else { 2930 2748 regulator_enable(host->vmmc); ··· 2953 2771 2954 2772 mmc_add_host(mmc); 2955 2773 2956 - printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", 2774 + pr_info("%s: SDHCI controller on %s [%s] using %s\n", 2957 2775 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 2958 2776 (host->flags & SDHCI_USE_ADMA) ? "ADMA" : 2959 2777 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); ··· 2986 2804 host->flags |= SDHCI_DEVICE_DEAD; 2987 2805 2988 2806 if (host->mrq) { 2989 - printk(KERN_ERR "%s: Controller removed during " 2807 + pr_err("%s: Controller removed during " 2990 2808 " transfer!\n", mmc_hostname(host->mmc)); 2991 2809 2992 2810 host->mrq->cmd->error = -ENOMEDIUM; ··· 3045 2863 3046 2864 static int __init sdhci_drv_init(void) 3047 2865 { 3048 - printk(KERN_INFO DRIVER_NAME 2866 + pr_info(DRIVER_NAME 3049 2867 ": Secure Digital Host Controller Interface driver\n"); 3050 - printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 2868 + pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 3051 2869 3052 2870 return 0; 3053 2871 } ··· 3060 2878 module_exit(sdhci_drv_exit); 3061 2879 3062 2880 module_param(debug_quirks, uint, 0444); 2881 + module_param(debug_quirks2, uint, 0444); 3063 2882 3064 2883 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 3065 2884 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 3066 2885 MODULE_LICENSE("GPL"); 3067 2886 3068 2887 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 2888 + MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
+6 -1
drivers/mmc/host/sdhci.h
··· 273 273 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); 274 274 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); 275 275 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 276 - 276 + void (*hw_reset)(struct sdhci_host *host); 277 277 }; 278 278 279 279 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS ··· 377 377 extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); 378 378 extern int sdhci_resume_host(struct sdhci_host *host); 379 379 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); 380 + #endif 381 + 382 + #ifdef CONFIG_PM_RUNTIME 383 + extern int sdhci_runtime_suspend_host(struct sdhci_host *host); 384 + extern int sdhci_runtime_resume_host(struct sdhci_host *host); 380 385 #endif 381 386 382 387 #endif /* __SDHCI_HW_H */
+1
drivers/mmc/host/sdricoh_cs.c
··· 26 26 */ 27 27 #include <linux/delay.h> 28 28 #include <linux/highmem.h> 29 + #include <linux/module.h> 29 30 #include <linux/pci.h> 30 31 #include <linux/ioport.h> 31 32 #include <linux/scatterlist.h>
+17 -4
drivers/mmc/host/sh_mmcif.c
··· 31 31 #include <linux/platform_device.h> 32 32 #include <linux/pm_runtime.h> 33 33 #include <linux/spinlock.h> 34 + #include <linux/module.h> 34 35 35 36 #define DRIVER_NAME "sh_mmcif" 36 37 #define DRIVER_VERSION "2010-04-28" ··· 166 165 struct mmc_host *mmc; 167 166 struct mmc_data *data; 168 167 struct platform_device *pd; 168 + struct sh_dmae_slave dma_slave_tx; 169 + struct sh_dmae_slave dma_slave_rx; 169 170 struct clk *hclk; 170 171 unsigned int clk; 171 172 int bus_width; ··· 326 323 static void sh_mmcif_request_dma(struct sh_mmcif_host *host, 327 324 struct sh_mmcif_plat_data *pdata) 328 325 { 326 + struct sh_dmae_slave *tx, *rx; 329 327 host->dma_active = false; 330 328 331 329 /* We can only either use DMA for both Tx and Rx or not use it at all */ 332 330 if (pdata->dma) { 331 + dev_warn(&host->pd->dev, 332 + "Update your platform to use embedded DMA slave IDs\n"); 333 + tx = &pdata->dma->chan_priv_tx; 334 + rx = &pdata->dma->chan_priv_rx; 335 + } else { 336 + tx = &host->dma_slave_tx; 337 + tx->slave_id = pdata->slave_id_tx; 338 + rx = &host->dma_slave_rx; 339 + rx->slave_id = pdata->slave_id_rx; 340 + } 341 + if (tx->slave_id > 0 && rx->slave_id > 0) { 333 342 dma_cap_mask_t mask; 334 343 335 344 dma_cap_zero(mask); 336 345 dma_cap_set(DMA_SLAVE, mask); 337 346 338 - host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, 339 - &pdata->dma->chan_priv_tx); 347 + host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx); 340 348 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, 341 349 host->chan_tx); 342 350 343 351 if (!host->chan_tx) 344 352 return; 345 353 346 - host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, 347 - &pdata->dma->chan_priv_rx); 354 + host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx); 348 355 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, 349 356 host->chan_rx); 350 357
+75 -26
drivers/mmc/host/sh_mobile_sdhi.c
··· 21 21 #include <linux/kernel.h> 22 22 #include <linux/clk.h> 23 23 #include <linux/slab.h> 24 + #include <linux/module.h> 24 25 #include <linux/platform_device.h> 25 26 #include <linux/mmc/host.h> 26 27 #include <linux/mmc/sh_mobile_sdhi.h> ··· 97 96 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 98 97 struct tmio_mmc_host *host; 99 98 char clk_name[8]; 100 - int i, irq, ret; 99 + int irq, ret, i = 0; 100 + bool multiplexed_isr = true; 101 101 102 102 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); 103 103 if (priv == NULL) { ··· 155 153 if (ret < 0) 156 154 goto eprobe; 157 155 158 - for (i = 0; i < 3; i++) { 159 - irq = platform_get_irq(pdev, i); 160 - if (irq < 0) { 161 - if (i) { 162 - continue; 163 - } else { 164 - ret = irq; 165 - goto eirq; 166 - } 167 - } 168 - ret = request_irq(irq, tmio_mmc_irq, 0, 156 + /* 157 + * Allow one or more specific (named) ISRs or 158 + * one or more multiplexed (un-named) ISRs. 159 + */ 160 + 161 + irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); 162 + if (irq >= 0) { 163 + multiplexed_isr = false; 164 + ret = request_irq(irq, tmio_mmc_card_detect_irq, 0, 169 165 dev_name(&pdev->dev), host); 170 - if (ret) { 171 - while (i--) { 172 - irq = platform_get_irq(pdev, i); 173 - if (irq >= 0) 174 - free_irq(irq, host); 175 - } 176 - goto eirq; 177 - } 166 + if (ret) 167 + goto eirq_card_detect; 178 168 } 169 + 170 + irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); 171 + if (irq >= 0) { 172 + multiplexed_isr = false; 173 + ret = request_irq(irq, tmio_mmc_sdio_irq, 0, 174 + dev_name(&pdev->dev), host); 175 + if (ret) 176 + goto eirq_sdio; 177 + } 178 + 179 + irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); 180 + if (irq >= 0) { 181 + multiplexed_isr = false; 182 + ret = request_irq(irq, tmio_mmc_sdcard_irq, 0, 183 + dev_name(&pdev->dev), host); 184 + if (ret) 185 + goto eirq_sdcard; 186 + } else if (!multiplexed_isr) { 187 + dev_err(&pdev->dev, 188 + "Principal SD-card IRQ is missing among named interrupts\n"); 189 + ret = irq; 190 + goto eirq_sdcard; 191 + } 192 + 193 + if (multiplexed_isr) { 194 + while (1) { 195 + irq = platform_get_irq(pdev, i); 196 + if (irq < 0) 197 + break; 198 + i++; 199 + ret = request_irq(irq, tmio_mmc_irq, 0, 200 + dev_name(&pdev->dev), host); 201 + if (ret) 202 + goto eirq_multiplexed; 203 + } 204 + 205 + /* There must be at least one IRQ source */ 206 + if (!i) 207 + goto eirq_multiplexed; 208 + } 209 + 179 210 dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", 180 211 mmc_hostname(host->mmc), (unsigned long) 181 212 (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start), ··· 216 181 217 182 return ret; 218 183 219 - eirq: 184 + eirq_multiplexed: 185 + while (i--) { 186 + irq = platform_get_irq(pdev, i); 187 + free_irq(irq, host); 188 + } 189 + eirq_sdcard: 190 + irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); 191 + if (irq >= 0) 192 + free_irq(irq, host); 193 + eirq_sdio: 194 + irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); 195 + if (irq >= 0) 196 + free_irq(irq, host); 197 + eirq_card_detect: 220 198 tmio_mmc_host_remove(host); 221 199 eprobe: 222 200 clk_disable(priv->clk); ··· 245 197 struct tmio_mmc_host *host = mmc_priv(mmc); 246 198 struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); 247 199 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 248 - int i, irq; 200 + int i = 0, irq; 249 201 250 202 p->pdata = NULL; 251 203 252 204 tmio_mmc_host_remove(host); 253 205 254 - for (i = 0; i < 3; i++) { 255 - irq = platform_get_irq(pdev, i); 256 - if (irq >= 0) 257 - free_irq(irq, host); 206 + while (1) { 207 + irq = platform_get_irq(pdev, i++); 208 + if (irq < 0) 209 + break; 210 + free_irq(irq, host); 258 211 } 259 212 260 213 clk_disable(priv->clk);
+9 -10
drivers/mmc/host/tifm_sd.c
··· 16 16 #include <linux/mmc/host.h> 17 17 #include <linux/highmem.h> 18 18 #include <linux/scatterlist.h> 19 + #include <linux/module.h> 19 20 #include <asm/io.h> 20 21 21 22 #define DRIVER_NAME "tifm_sd" ··· 632 631 } 633 632 634 633 if (host->req) { 635 - printk(KERN_ERR "%s : unfinished request detected\n", 634 + pr_err("%s : unfinished request detected\n", 636 635 dev_name(&sock->dev)); 637 636 mrq->cmd->error = -ETIMEDOUT; 638 637 goto err_out; ··· 672 671 r_data->flags & MMC_DATA_WRITE 673 672 ? PCI_DMA_TODEVICE 674 673 : PCI_DMA_FROMDEVICE)) { 675 - printk(KERN_ERR "%s : scatterlist map failed\n", 674 + pr_err("%s : scatterlist map failed\n", 676 675 dev_name(&sock->dev)); 677 676 mrq->cmd->error = -ENOMEM; 678 677 goto err_out; ··· 684 683 ? PCI_DMA_TODEVICE 685 684 : PCI_DMA_FROMDEVICE); 686 685 if (host->sg_len < 1) { 687 - printk(KERN_ERR "%s : scatterlist map failed\n", 686 + pr_err("%s : scatterlist map failed\n", 688 687 dev_name(&sock->dev)); 689 688 tifm_unmap_sg(sock, &host->bounce_buf, 1, 690 689 r_data->flags & MMC_DATA_WRITE ··· 748 747 host->req = NULL; 749 748 750 749 if (!mrq) { 751 - printk(KERN_ERR " %s : no request to complete?\n", 750 + pr_err(" %s : no request to complete?\n", 752 751 dev_name(&sock->dev)); 753 752 spin_unlock_irqrestore(&sock->lock, flags); 754 753 return; ··· 787 786 { 788 787 struct tifm_sd *host = (struct tifm_sd*)data; 789 788 790 - printk(KERN_ERR 791 - "%s : card failed to respond for a long period of time " 789 + pr_err("%s : card failed to respond for a long period of time " 792 790 "(%x, %x)\n", 793 791 dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); 794 792 ··· 905 905 } 906 906 907 907 if (rc) { 908 - printk(KERN_ERR "%s : controller failed to reset\n", 908 + pr_err("%s : controller failed to reset\n", 909 909 dev_name(&sock->dev)); 910 910 return -ENODEV; 911 911 } ··· 931 931 } 932 932 933 933 if (rc) { 934 - printk(KERN_ERR 935 - "%s : card not ready - probe failed on initialization\n", 934 + pr_err("%s : card not ready - probe failed on initialization\n", 936 935 dev_name(&sock->dev)); 937 936 return -ENODEV; 938 937 } ··· 952 953 953 954 if (!(TIFM_SOCK_STATE_OCCUPIED 954 955 & readl(sock->addr + SOCK_PRESENT_STATE))) { 955 - printk(KERN_WARNING "%s : card gone, unexpectedly\n", 956 + pr_warning("%s : card gone, unexpectedly\n", 956 957 dev_name(&sock->dev)); 957 958 return rc; 958 959 }
+2 -2
drivers/mmc/host/tmio_mmc.c
··· 88 88 if (ret) 89 89 goto cell_disable; 90 90 91 - ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED | 92 - IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host); 91 + ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING, 92 + dev_name(&pdev->dev), host); 93 93 if (ret) 94 94 goto host_remove; 95 95
+7
drivers/mmc/host/tmio_mmc.h
··· 79 79 struct delayed_work delayed_reset_work; 80 80 struct work_struct done; 81 81 82 + /* Cache IRQ mask */ 83 + u32 sdcard_irq_mask; 84 + u32 sdio_irq_mask; 85 + 82 86 spinlock_t lock; /* protect host private data */ 83 87 unsigned long last_req_ts; 84 88 struct mutex ios_lock; /* protect set_ios() context */ ··· 97 93 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 98 94 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 99 95 irqreturn_t tmio_mmc_irq(int irq, void *devid); 96 + irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid); 97 + irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid); 98 + irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid); 100 99 101 100 static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 102 101 unsigned long *flags)
+95 -58
drivers/mmc/host/tmio_mmc_pio.c
··· 48 48 49 49 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 50 50 { 51 - u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); 52 - sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 51 + host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); 52 + sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); 53 53 } 54 54 55 55 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 56 56 { 57 - u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); 58 - sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 57 + host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); 58 + sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); 59 59 } 60 60 61 61 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) ··· 92 92 static void pr_debug_status(u32 status) 93 93 { 94 94 int i = 0; 95 - printk(KERN_DEBUG "status: %08x = ", status); 95 + pr_debug("status: %08x = ", status); 96 96 STATUS_TO_TEXT(CARD_REMOVE, status, i); 97 97 STATUS_TO_TEXT(CARD_INSERT, status, i); 98 98 STATUS_TO_TEXT(SIGSTATE, status, i); ··· 127 127 128 128 if (enable) { 129 129 host->sdio_irq_enabled = 1; 130 + host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & 131 + ~TMIO_SDIO_STAT_IOIRQ; 130 132 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 131 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 132 - (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 133 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 133 134 } else { 134 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 135 + host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; 136 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 135 137 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 136 138 host->sdio_irq_enabled = 0; 137 139 } ··· 545 543 spin_unlock(&host->lock); 546 544 } 547 545 548 - irqreturn_t tmio_mmc_irq(int irq, void *devid) 546 + static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host, 547 + int *ireg, int *status) 549 548 { 550 - struct tmio_mmc_host *host = devid; 549 + *status = sd_ctrl_read32(host, CTL_STATUS); 550 + *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; 551 + 552 + pr_debug_status(*status); 553 + pr_debug_status(*ireg); 554 + } 555 + 556 + static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, 557 + int ireg, int status) 558 + { 551 559 struct mmc_host *mmc = host->mmc; 552 - struct tmio_mmc_data *pdata = host->pdata; 553 - unsigned int ireg, irq_mask, status; 554 - unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 555 - 556 - pr_debug("MMC IRQ begin\n"); 557 - 558 - status = sd_ctrl_read32(host, CTL_STATUS); 559 - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 560 - ireg = status & TMIO_MASK_IRQ & ~irq_mask; 561 - 562 - sdio_ireg = 0; 563 - if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 564 - sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 565 - sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 566 - sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 567 - 568 - sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 569 - 570 - if (sdio_ireg && !host->sdio_irq_enabled) { 571 - pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 572 - sdio_status, sdio_irq_mask, sdio_ireg); 573 - tmio_mmc_enable_sdio_irq(mmc, 0); 574 - goto out; 575 - } 576 - 577 - if (mmc->caps & MMC_CAP_SDIO_IRQ && 578 - sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 579 - mmc_signal_sdio_irq(mmc); 580 - 581 - if (sdio_ireg) 582 - goto out; 583 - } 584 - 585 - pr_debug_status(status); 586 - pr_debug_status(ireg); 587 560 588 561 /* Card insert / remove attempts */ 589 562 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { ··· 568 591 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && 569 592 !work_pending(&mmc->detect.work)) 570 593 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 571 - goto out; 594 + return true; 572 595 } 573 596 574 - /* CRC and other errors */ 575 - /* if (ireg & TMIO_STAT_ERR_IRQ) 576 - * handled |= tmio_error_irq(host, irq, stat); 577 - */ 597 + return false; 598 + } 578 599 600 + irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid) 601 + { 602 + unsigned int ireg, status; 603 + struct tmio_mmc_host *host = devid; 604 + 605 + tmio_mmc_card_irq_status(host, &ireg, &status); 606 + __tmio_mmc_card_detect_irq(host, ireg, status); 607 + 608 + return IRQ_HANDLED; 609 + } 610 + EXPORT_SYMBOL(tmio_mmc_card_detect_irq); 611 + 612 + static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, 613 + int ireg, int status) 614 + { 579 615 /* Command completion */ 580 616 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 581 617 tmio_mmc_ack_mmc_irqs(host, 582 618 TMIO_STAT_CMDRESPEND | 583 619 TMIO_STAT_CMDTIMEOUT); 584 620 tmio_mmc_cmd_irq(host, status); 585 - goto out; 621 + return true; 586 622 } 587 623 588 624 /* Data transfer */ 589 625 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 590 626 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 591 627 tmio_mmc_pio_irq(host); 592 - goto out; 628 + return true; 593 629 } 594 630 595 631 /* Data transfer completion */ 596 632 if (ireg & TMIO_STAT_DATAEND) { 597 633 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); 598 634 tmio_mmc_data_irq(host); 599 - goto out; 635 + return true; 600 636 } 601 637 602 - pr_warning("tmio_mmc: Spurious irq, disabling! " 603 - "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 604 - pr_debug_status(status); 605 - tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); 638 + return false; 639 + } 606 640 607 - out: 641 + irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid) 642 + { 643 + unsigned int ireg, status; 644 + struct tmio_mmc_host *host = devid; 645 + 646 + tmio_mmc_card_irq_status(host, &ireg, &status); 647 + __tmio_mmc_sdcard_irq(host, ireg, status); 648 + 649 + return IRQ_HANDLED; 650 + } 651 + EXPORT_SYMBOL(tmio_mmc_sdcard_irq); 652 + 653 + irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid) 654 + { 655 + struct tmio_mmc_host *host = devid; 656 + struct mmc_host *mmc = host->mmc; 657 + struct tmio_mmc_data *pdata = host->pdata; 658 + unsigned int ireg, status; 659 + 660 + if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) 661 + return IRQ_HANDLED; 662 + 663 + status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 664 + ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask; 665 + 666 + sd_ctrl_write16(host, CTL_SDIO_STATUS, status & ~TMIO_SDIO_MASK_ALL); 667 + 668 + if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) 669 + mmc_signal_sdio_irq(mmc); 670 + 671 + return IRQ_HANDLED; 672 + } 673 + EXPORT_SYMBOL(tmio_mmc_sdio_irq); 674 + 675 + irqreturn_t tmio_mmc_irq(int irq, void *devid) 676 + { 677 + struct tmio_mmc_host *host = devid; 678 + unsigned int ireg, status; 679 + 680 + pr_debug("MMC IRQ begin\n"); 681 + 682 + tmio_mmc_card_irq_status(host, &ireg, &status); 683 + if (__tmio_mmc_card_detect_irq(host, ireg, status)) 684 + return IRQ_HANDLED; 685 + if (__tmio_mmc_sdcard_irq(host, ireg, status)) 686 + return IRQ_HANDLED; 687 + 688 + tmio_mmc_sdio_irq(irq, devid); 689 + 608 690 return IRQ_HANDLED; 609 691 } 610 692 EXPORT_SYMBOL(tmio_mmc_irq); ··· 918 882 tmio_mmc_clk_stop(_host); 919 883 tmio_mmc_reset(_host); 920 884 885 + _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK); 921 886 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 922 887 if (pdata->flags & TMIO_MMC_SDIO_IRQ) 923 888 tmio_mmc_enable_sdio_irq(mmc, 0);
+2 -1
drivers/mmc/host/via-sdmmc.c
··· 9 9 */ 10 10 11 11 #include <linux/pci.h> 12 + #include <linux/module.h> 12 13 #include <linux/dma-mapping.h> 13 14 #include <linux/highmem.h> 14 15 #include <linux/delay.h> ··· 1192 1191 mmiowb(); 1193 1192 1194 1193 if (sdhost->mrq) { 1195 - printk(KERN_ERR "%s: Controller removed during " 1194 + pr_err("%s: Controller removed during " 1196 1195 "transfer\n", mmc_hostname(sdhost->mmc)); 1197 1196 1198 1197 /* make sure all DMA is stopped */
+11 -11
drivers/mmc/host/wbsd.c
··· 194 194 { 195 195 u8 setup; 196 196 197 - printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc)); 197 + pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc)); 198 198 199 199 /* 200 200 * Soft reset of chip (SD/MMC part). ··· 721 721 * Any leftover data? 722 722 */ 723 723 if (count) { 724 - printk(KERN_ERR "%s: Incomplete DMA transfer. " 724 + pr_err("%s: Incomplete DMA transfer. " 725 725 "%d bytes left.\n", 726 726 mmc_hostname(host->mmc), count); 727 727 ··· 803 803 804 804 default: 805 805 #ifdef CONFIG_MMC_DEBUG 806 - printk(KERN_WARNING "%s: Data command %d is not " 806 + pr_warning("%s: Data command %d is not " 807 807 "supported by this controller.\n", 808 808 mmc_hostname(host->mmc), cmd->opcode); 809 809 #endif ··· 1029 1029 host->flags &= ~WBSD_FCARD_PRESENT; 1030 1030 1031 1031 if (host->mrq) { 1032 - printk(KERN_ERR "%s: Card removed during transfer!\n", 1032 + pr_err("%s: Card removed during transfer!\n", 1033 1033 mmc_hostname(host->mmc)); 1034 1034 wbsd_reset(host); 1035 1035 ··· 1429 1429 free_dma(dma); 1430 1430 1431 1431 err: 1432 - printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. " 1432 + pr_warning(DRIVER_NAME ": Unable to allocate DMA %d. " 1433 1433 "Falling back on FIFO.\n", dma); 1434 1434 } 1435 1435 ··· 1664 1664 ret = wbsd_scan(host); 1665 1665 if (ret) { 1666 1666 if (pnp && (ret == -ENODEV)) { 1667 - printk(KERN_WARNING DRIVER_NAME 1667 + pr_warning(DRIVER_NAME 1668 1668 ": Unable to confirm device presence. You may " 1669 1669 "experience lock-ups.\n"); 1670 1670 } else { ··· 1688 1688 */ 1689 1689 if (pnp) { 1690 1690 if ((host->config != 0) && !wbsd_chip_validate(host)) { 1691 - printk(KERN_WARNING DRIVER_NAME 1691 + pr_warning(DRIVER_NAME 1692 1692 ": PnP active but chip not configured! " 1693 1693 "You probably have a buggy BIOS. " 1694 1694 "Configuring chip manually.\n"); ··· 1720 1720 1721 1721 mmc_add_host(mmc); 1722 1722 1723 - printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc)); 1723 + pr_info("%s: W83L51xD", mmc_hostname(mmc)); 1724 1724 if (host->chip_id != 0) 1725 1725 printk(" id %x", (int)host->chip_id); 1726 1726 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); ··· 1909 1909 */ 1910 1910 if (host->config != 0) { 1911 1911 if (!wbsd_chip_validate(host)) { 1912 - printk(KERN_WARNING DRIVER_NAME 1912 + pr_warning(DRIVER_NAME 1913 1913 ": PnP active but chip not configured! " 1914 1914 "You probably have a buggy BIOS. " 1915 1915 "Configuring chip manually.\n"); ··· 1973 1973 { 1974 1974 int result; 1975 1975 1976 - printk(KERN_INFO DRIVER_NAME 1976 + pr_info(DRIVER_NAME 1977 1977 ": Winbond W83L51xD SD/MMC card interface driver\n"); 1978 - printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 1978 + pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 1979 1979 1980 1980 #ifdef CONFIG_PNP 1981 1981
+2 -2
include/linux/atmel-mci.h
··· 1 1 #ifndef __LINUX_ATMEL_MCI_H 2 2 #define __LINUX_ATMEL_MCI_H 3 3 4 - #define ATMEL_MCI_MAX_NR_SLOTS 2 4 + #define ATMCI_MAX_NR_SLOTS 2 5 5 6 6 /** 7 7 * struct mci_slot_pdata - board-specific per-slot configuration ··· 33 33 */ 34 34 struct mci_platform_data { 35 35 struct mci_dma_data *dma_slave; 36 - struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; 36 + struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS]; 37 37 }; 38 38 39 39 #endif /* __LINUX_ATMEL_MCI_H */
+2
include/linux/atmel_pdc.h
··· 33 33 34 34 #define ATMEL_PDC_PTSR 0x124 /* Transfer Status Register */ 35 35 36 + #define ATMEL_PDC_SCND_BUF_OFF 0x10 /* Offset between first and second buffer registers */ 37 + 36 38 #endif
+57 -1
include/linux/mmc/card.h
··· 50 50 u8 rel_sectors; 51 51 u8 rel_param; 52 52 u8 part_config; 53 + u8 cache_ctrl; 54 + u8 rst_n_function; 53 55 unsigned int part_time; /* Units: ms */ 54 56 unsigned int sa_timeout; /* Units: 100ns */ 57 + unsigned int generic_cmd6_time; /* Units: 10ms */ 58 + unsigned int power_off_longtime; /* Units: ms */ 55 59 unsigned int hs_max_dtr; 56 60 unsigned int sectors; 57 61 unsigned int card_type; ··· 67 63 bool enhanced_area_en; /* enable bit */ 68 64 unsigned long long enhanced_area_offset; /* Units: Byte */ 69 65 unsigned int enhanced_area_size; /* Units: KB */ 70 - unsigned int boot_size; /* in bytes */ 66 + unsigned int cache_size; /* Units: KB */ 67 + bool hpi_en; /* HPI enablebit */ 68 + bool hpi; /* HPI support bit */ 69 + unsigned int hpi_cmd; /* cmd used as HPI */ 71 70 u8 raw_partition_support; /* 160 */ 72 71 u8 raw_erased_mem_count; /* 181 */ 73 72 u8 raw_ext_csd_structure; /* 194 */ 74 73 u8 raw_card_type; /* 196 */ 74 + u8 out_of_int_time; /* 198 */ 75 75 u8 raw_s_a_timeout; /* 217 */ 76 76 u8 raw_hc_erase_gap_size; /* 221 */ 77 77 u8 raw_erase_timeout_mult; /* 223 */ ··· 85 77 u8 raw_sec_feature_support;/* 231 */ 86 78 u8 raw_trim_mult; /* 232 */ 87 79 u8 raw_sectors[4]; /* 212 - 4 bytes */ 80 + 81 + unsigned int feature_support; 82 + #define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */ 88 83 }; 89 84 90 85 struct sd_scr { ··· 168 157 169 158 #define SDIO_MAX_FUNCS 7 170 159 160 + /* The number of MMC physical partitions. These consist of: 161 + * boot partitions (2), general purpose partitions (4) in MMC v4.4. 162 + */ 163 + #define MMC_NUM_BOOT_PARTITION 2 164 + #define MMC_NUM_GP_PARTITION 4 165 + #define MMC_NUM_PHY_PARTITION 6 166 + #define MAX_MMC_PART_NAME_LEN 20 167 + 168 + /* 169 + * MMC Physical partitions 170 + */ 171 + struct mmc_part { 172 + unsigned int size; /* partition size (in bytes) */ 173 + unsigned int part_cfg; /* partition type */ 174 + char name[MAX_MMC_PART_NAME_LEN]; 175 + bool force_ro; /* to make boot parts RO by default */ 176 + }; 177 + 171 178 /* 172 179 * MMC device 173 180 */ ··· 217 188 #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ 218 189 #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ 219 190 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ 191 + #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ 192 + /* byte mode */ 193 + unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ 194 + #define MMC_NO_POWER_NOTIFICATION 0 195 + #define MMC_POWERED_ON 1 196 + #define MMC_POWEROFF_SHORT 2 197 + #define MMC_POWEROFF_LONG 3 220 198 221 199 unsigned int erase_size; /* erase size in sectors */ 222 200 unsigned int erase_shift; /* if erase unit is power 2 */ ··· 252 216 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ 253 217 254 218 struct dentry *debugfs_root; 219 + struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ 220 + unsigned int nr_parts; 255 221 }; 222 + 223 + /* 224 + * This function fill contents in mmc_part. 225 + */ 226 + static inline void mmc_part_add(struct mmc_card *card, unsigned int size, 227 + unsigned int part_cfg, char *name, int idx, bool ro) 228 + { 229 + card->part[card->nr_parts].size = size; 230 + card->part[card->nr_parts].part_cfg = part_cfg; 231 + sprintf(card->part[card->nr_parts].name, name, idx); 232 + card->part[card->nr_parts].force_ro = ro; 233 + card->nr_parts++; 234 + } 256 235 257 236 /* 258 237 * The world is not perfect and supplies us with broken mmc/sdio devices. ··· 426 375 static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c) 427 376 { 428 377 return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF; 378 + } 379 + 380 + static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) 381 + { 382 + return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; 429 383 } 430 384 431 385 #define mmc_card_name(c) ((c)->cid.prod_name)
+9
include/linux/mmc/core.h
··· 136 136 137 137 extern struct mmc_async_req *mmc_start_req(struct mmc_host *, 138 138 struct mmc_async_req *, int *); 139 + extern int mmc_interrupt_hpi(struct mmc_card *); 139 140 extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); 140 141 extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); 141 142 extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); ··· 147 146 #define MMC_ERASE_ARG 0x00000000 148 147 #define MMC_SECURE_ERASE_ARG 0x80000000 149 148 #define MMC_TRIM_ARG 0x00000001 149 + #define MMC_DISCARD_ARG 0x00000003 150 150 #define MMC_SECURE_TRIM1_ARG 0x80000001 151 151 #define MMC_SECURE_TRIM2_ARG 0x80008000 152 152 ··· 158 156 unsigned int arg); 159 157 extern int mmc_can_erase(struct mmc_card *card); 160 158 extern int mmc_can_trim(struct mmc_card *card); 159 + extern int mmc_can_discard(struct mmc_card *card); 160 + extern int mmc_can_sanitize(struct mmc_card *card); 161 161 extern int mmc_can_secure_erase_trim(struct mmc_card *card); 162 162 extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 163 163 unsigned int nr); 164 164 extern unsigned int mmc_calc_max_discard(struct mmc_card *card); 165 165 166 166 extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen); 167 + extern int mmc_hw_reset(struct mmc_host *host); 168 + extern int mmc_hw_reset_check(struct mmc_host *host); 169 + extern int mmc_can_reset(struct mmc_card *card); 167 170 168 171 extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); 169 172 extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); ··· 177 170 extern void mmc_release_host(struct mmc_host *host); 178 171 extern void mmc_do_release_host(struct mmc_host *host); 179 172 extern int mmc_try_claim_host(struct mmc_host *host); 173 + 174 + extern int mmc_flush_cache(struct mmc_card *); 180 175 181 176 /** 182 177 * mmc_claim_host - exclusively claim a host
+4
include/linux/mmc/dw_mmc.h
··· 72 72 * rate and timeout calculations. 73 73 * @current_speed: Configured rate of the controller. 74 74 * @num_slots: Number of slots available. 75 + * @verid: Denote Version ID. 76 + * @data_offset: Set the offset of DATA register according to VERID. 75 77 * @pdev: Platform device associated with the MMC controller. 76 78 * @pdata: Platform data associated with the MMC controller. 77 79 * @slot: Slots sharing this MMC controller. ··· 149 147 u32 current_speed; 150 148 u32 num_slots; 151 149 u32 fifoth_val; 150 + u16 verid; 151 + u16 data_offset; 152 152 struct platform_device *pdev; 153 153 struct dw_mci_board *pdata; 154 154 struct dw_mci_slot *slot[MAX_MCI_SLOTS];
+29
include/linux/mmc/host.h
··· 12 12 13 13 #include <linux/leds.h> 14 14 #include <linux/sched.h> 15 + #include <linux/fault-inject.h> 15 16 16 17 #include <linux/mmc/core.h> 17 18 #include <linux/mmc/pm.h> ··· 109 108 * It is optional for the host to implement pre_req and post_req in 110 109 * order to support double buffering of requests (prepare one 111 110 * request while another request is active). 111 + * pre_req() must always be followed by a post_req(). 112 + * To undo a call made to pre_req(), call post_req() with 113 + * a nonzero err condition. 112 114 */ 113 115 void (*post_req)(struct mmc_host *host, struct mmc_request *req, 114 116 int err); ··· 151 147 int (*execute_tuning)(struct mmc_host *host); 152 148 void (*enable_preset_value)(struct mmc_host *host, bool enable); 153 149 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); 150 + void (*hw_reset)(struct mmc_host *host); 154 151 }; 155 152 156 153 struct mmc_card; ··· 234 229 #define MMC_CAP_MAX_CURRENT_600 (1 << 28) /* Host max current limit is 600mA */ 235 230 #define MMC_CAP_MAX_CURRENT_800 (1 << 29) /* Host max current limit is 800mA */ 236 231 #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ 232 + #define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ 233 + 234 + unsigned int caps2; /* More host capabilities */ 235 + 236 + #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ 237 + #define MMC_CAP2_CACHE_CTRL (1 << 1) /* Allow cache control */ 238 + #define MMC_CAP2_POWEROFF_NOTIFY (1 << 2) /* Notify poweroff supported */ 239 + #define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */ 237 240 238 241 mmc_pm_flag_t pm_caps; /* supported pm features */ 242 + unsigned int power_notify_type; 243 + #define MMC_HOST_PW_NOTIFY_NONE 0 244 + #define MMC_HOST_PW_NOTIFY_SHORT 1 245 + #define MMC_HOST_PW_NOTIFY_LONG 2 239 246 240 247 #ifdef CONFIG_MMC_CLKGATE 241 248 int clk_requests; /* internal reference counter */ ··· 319 302 320 303 struct mmc_async_req *areq; /* active async req */ 321 304 305 + #ifdef CONFIG_FAIL_MMC_REQUEST 306 + struct fault_attr fail_mmc_request; 307 + #endif 308 + 322 309 unsigned long private[0] ____cacheline_aligned; 323 310 }; 324 311 ··· 350 329 351 330 extern void mmc_detect_change(struct mmc_host *, unsigned long delay); 352 331 extern void mmc_request_done(struct mmc_host *, struct mmc_request *); 332 + 333 + extern int mmc_cache_ctrl(struct mmc_host *, u8); 353 334 354 335 static inline void mmc_signal_sdio_irq(struct mmc_host *host) 355 336 { ··· 417 394 { 418 395 return host->caps & MMC_CAP_CMD23; 419 396 } 397 + 398 + static inline int mmc_boot_partition_access(struct mmc_host *host) 399 + { 400 + return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); 401 + } 402 + 420 403 #endif /* LINUX_MMC_HOST_H */
+37 -1
include/linux/mmc/mmc.h
··· 270 270 * EXT_CSD fields 271 271 */ 272 272 273 + #define EXT_CSD_FLUSH_CACHE 32 /* W */ 274 + #define EXT_CSD_CACHE_CTRL 33 /* R/W */ 275 + #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ 276 + #define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ 273 277 #define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ 274 278 #define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ 279 + #define EXT_CSD_HPI_MGMT 161 /* R/W */ 280 + #define EXT_CSD_RST_N_FUNCTION 162 /* R/W */ 281 + #define EXT_CSD_SANITIZE_START 165 /* W */ 275 282 #define EXT_CSD_WR_REL_PARAM 166 /* RO */ 276 283 #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ 277 284 #define EXT_CSD_PART_CONFIG 179 /* R/W */ 278 285 #define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ 279 286 #define EXT_CSD_BUS_WIDTH 183 /* R/W */ 280 287 #define EXT_CSD_HS_TIMING 185 /* R/W */ 288 + #define EXT_CSD_POWER_CLASS 187 /* R/W */ 281 289 #define EXT_CSD_REV 192 /* RO */ 282 290 #define EXT_CSD_STRUCTURE 194 /* RO */ 283 291 #define EXT_CSD_CARD_TYPE 196 /* RO */ 292 + #define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */ 284 293 #define EXT_CSD_PART_SWITCH_TIME 199 /* RO */ 294 + #define EXT_CSD_PWR_CL_52_195 200 /* RO */ 295 + #define EXT_CSD_PWR_CL_26_195 201 /* RO */ 296 + #define EXT_CSD_PWR_CL_52_360 202 /* RO */ 297 + #define EXT_CSD_PWR_CL_26_360 203 /* RO */ 285 298 #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ 286 299 #define EXT_CSD_S_A_TIMEOUT 217 /* RO */ 287 300 #define EXT_CSD_REL_WR_SEC_C 222 /* RO */ ··· 306 293 #define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ 307 294 #define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ 308 295 #define EXT_CSD_TRIM_MULT 232 /* RO */ 296 + #define EXT_CSD_PWR_CL_200_195 236 /* RO */ 297 + #define EXT_CSD_PWR_CL_200_360 237 /* RO */ 298 + #define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */ 299 + #define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */ 300 + #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ 301 + #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ 302 + #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ 303 + #define EXT_CSD_HPI_FEATURES 503 /* RO */ 309 304 310 305 /* 311 306 * EXT_CSD field definitions ··· 323 302 324 303 #define EXT_CSD_PART_CONFIG_ACC_MASK (0x7) 325 304 #define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1) 326 - #define EXT_CSD_PART_CONFIG_ACC_BOOT1 (0x2) 305 + #define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) 306 + 307 + #define EXT_CSD_PART_SUPPORT_PART_EN (0x1) 327 308 328 309 #define EXT_CSD_CMD_SET_NORMAL (1<<0) 329 310 #define EXT_CSD_CMD_SET_SECURE (1<<1) ··· 350 327 #define EXT_CSD_SEC_ER_EN BIT(0) 351 328 #define EXT_CSD_SEC_BD_BLK_EN BIT(2) 352 329 #define EXT_CSD_SEC_GB_CL_EN BIT(4) 330 + #define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */ 353 331 332 + #define EXT_CSD_RST_N_EN_MASK 0x3 333 + #define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */ 334 + 335 + #define EXT_CSD_NO_POWER_NOTIFICATION 0 336 + #define EXT_CSD_POWER_ON 1 337 + #define EXT_CSD_POWER_OFF_SHORT 2 338 + #define EXT_CSD_POWER_OFF_LONG 3 339 + 340 + #define EXT_CSD_PWR_CL_8BIT_MASK 0xF0 /* 8 bit PWR CLS */ 341 + #define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */ 342 + #define EXT_CSD_PWR_CL_8BIT_SHIFT 4 343 + #define EXT_CSD_PWR_CL_4BIT_SHIFT 0 354 344 /* 355 345 * MMC_SWITCH access modes 356 346 */
+8
include/linux/mmc/sdhci.h
··· 88 88 /* The read-only detection via SDHCI_PRESENT_STATE register is unstable */ 89 89 #define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31) 90 90 91 + unsigned int quirks2; /* More deviations from spec. */ 92 + 93 + #define SDHCI_QUIRK2_OWN_CARD_DETECTION (1<<0) 94 + 91 95 int irq; /* Device IRQ */ 92 96 void __iomem *ioaddr; /* Mapped address */ 93 97 ··· 119 115 #define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */ 120 116 #define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */ 121 117 #define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ 118 + #define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ 119 + #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ 122 120 123 121 unsigned int version; /* SDHCI spec. version */ 124 122 ··· 130 124 131 125 unsigned int clock; /* Current clock (MHz) */ 132 126 u8 pwr; /* Current voltage */ 127 + 128 + bool runtime_suspended; /* Host is runtime suspended */ 133 129 134 130 struct mmc_request *mrq; /* Current request */ 135 131 struct mmc_command *cmd; /* Current command */
+2
include/linux/mmc/sdio.h
··· 72 72 #define SDIO_CCCR_REV_1_00 0 /* CCCR/FBR Version 1.00 */ 73 73 #define SDIO_CCCR_REV_1_10 1 /* CCCR/FBR Version 1.10 */ 74 74 #define SDIO_CCCR_REV_1_20 2 /* CCCR/FBR Version 1.20 */ 75 + #define SDIO_CCCR_REV_3_00 3 /* CCCR/FBR Version 3.00 */ 75 76 76 77 #define SDIO_SDIO_REV_1_00 0 /* SDIO Spec Version 1.00 */ 77 78 #define SDIO_SDIO_REV_1_10 1 /* SDIO Spec Version 1.10 */ 78 79 #define SDIO_SDIO_REV_1_20 2 /* SDIO Spec Version 1.20 */ 79 80 #define SDIO_SDIO_REV_2_00 3 /* SDIO Spec Version 2.00 */ 81 + #define SDIO_SDIO_REV_3_00 4 /* SDIO Spec Version 3.00 */ 80 82 81 83 #define SDIO_CCCR_SD 0x01 82 84
+3 -1
include/linux/mmc/sh_mmcif.h
··· 41 41 void (*set_pwr)(struct platform_device *pdev, int state); 42 42 void (*down_pwr)(struct platform_device *pdev); 43 43 int (*get_cd)(struct platform_device *pdef); 44 - struct sh_mmcif_dma *dma; 44 + struct sh_mmcif_dma *dma; /* Deprecated. Instead */ 45 + unsigned int slave_id_tx; /* use embedded slave_id_[tr]x */ 46 + unsigned int slave_id_rx; 45 47 u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ 46 48 unsigned long caps; 47 49 u32 ocr;
+4
include/linux/mmc/sh_mobile_sdhi.h
··· 6 6 struct platform_device; 7 7 struct tmio_mmc_data; 8 8 9 + #define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect" 10 + #define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" 11 + #define SH_MOBILE_SDHI_IRQ_SDIO "sdio" 12 + 9 13 struct sh_mobile_sdhi_info { 10 14 int dma_slave_tx; 11 15 int dma_slave_rx;
+11
lib/Kconfig.debug
··· 1070 1070 Only works with drivers that use the generic timeout handling, 1071 1071 for others it wont do anything. 1072 1072 1073 + config FAIL_MMC_REQUEST 1074 + bool "Fault-injection capability for MMC IO" 1075 + select DEBUG_FS 1076 + depends on FAULT_INJECTION && MMC 1077 + help 1078 + Provide fault-injection capability for MMC IO. 1079 + This will make the mmc core return data errors. This is 1080 + useful to test the error handling in the mmc block device 1081 + and to test how the mmc host driver handles retries from 1082 + the block device. 1083 + 1073 1084 config FAULT_INJECTION_DEBUG_FS 1074 1085 bool "Debugfs entries for fault-injection capabilities" 1075 1086 depends on FAULT_INJECTION && SYSFS && DEBUG_FS
+4 -1
lib/fault-inject.c
··· 14 14 * setup_fault_attr() is a helper function for various __setup handlers, so it 15 15 * returns 0 on error, because that is what __setup handlers do. 16 16 */ 17 - int __init setup_fault_attr(struct fault_attr *attr, char *str) 17 + int setup_fault_attr(struct fault_attr *attr, char *str) 18 18 { 19 19 unsigned long probability; 20 20 unsigned long interval; ··· 36 36 37 37 return 1; 38 38 } 39 + EXPORT_SYMBOL_GPL(setup_fault_attr); 39 40 40 41 static void fail_dump(struct fault_attr *attr) 41 42 { ··· 131 130 132 131 return true; 133 132 } 133 + EXPORT_SYMBOL_GPL(should_fail); 134 134 135 135 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 136 136 ··· 245 243 246 244 return ERR_PTR(-ENOMEM); 247 245 } 246 + EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); 248 247 249 248 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */