Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mmc-merge-for-3.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

MMC highlights for 3.3:

Core:
* Support for the HS200 high-speed eMMC mode.
* Support SDIO 3.0 Ultra High Speed cards.
* Kill pending block requests immediately if card is removed.
* Enable the eMMC feature for locking boot partitions read-only
until next power on, exposed via sysfs.

Drivers:
* Runtime PM support for Intel Medfield SDIO.
* Suspend/resume support for sdhci-spear.
* sh-mmcif now processes requests asynchronously.

* tag 'mmc-merge-for-3.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (58 commits)
mmc: fix a deadlock between system suspend and MMC block IO
mmc: sdhci: restore the enabled dma when do reset all
mmc: dw_mmc: miscaculated the fifo-depth with wrong bit operation
mmc: host: Adds support for eMMC 4.5 HS200 mode
mmc: core: HS200 mode support for eMMC 4.5
mmc: dw_mmc: fixed wrong bit operation for SDMMC_GET_FCNT()
mmc: core: Separate the timeout value for cache-ctrl
mmc: sdhci-spear: Fix compilation error
mmc: sdhci: Deal with failure case in sdhci_suspend_host
mmc: dw_mmc: Clear the DDR mode for non-DDR
mmc: sd: Fix SDR12 timing regression
mmc: sdhci: Fix tuning timer incorrect setting when suspending host
mmc: core: Add option to prevent eMMC sleep command
mmc: omap_hsmmc: use threaded irq handler for card-detect.
mmc: sdhci-pci: enable runtime PM for Medfield SDIO
mmc: sdhci: Always pass clock request value zero to set_clock host op
mmc: sdhci-pci: remove SDHCI_QUIRK2_OWN_CARD_DETECTION
mmc: sdhci-pci: get gpio numbers from platform data
mmc: sdhci-pci: add platform data
mmc: sdhci: prevent card detection activity for non-removable cards
...

+1989 -844
+10
Documentation/mmc/mmc-dev-attrs.txt
··· 64 64 size specified by the card. 65 65 66 66 "preferred_erase_size" is in bytes. 67 + 68 + SD/MMC/SDIO Clock Gating Attribute 69 + ================================== 70 + 71 + Read and write access is provided to following attribute. 72 + This attribute appears only if CONFIG_MMC_CLKGATE is enabled. 73 + 74 + clkgate_delay Tune the clock gating delay with desired value in milliseconds. 75 + 76 + echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
+13
Documentation/mmc/mmc-dev-parts.txt
··· 25 25 To re-enable read-only access: 26 26 27 27 echo 1 > /sys/block/mmcblkXbootY/force_ro 28 + 29 + The boot partitions can also be locked read only until the next power on, 30 + with: 31 + 32 + echo 1 > /sys/block/mmcblkXbootY/ro_lock_until_next_power_on 33 + 34 + This is a feature of the card and not of the kernel. If the card does 35 + not support boot partition locking, the file will not exist. If the 36 + feature has been disabled on the card, the file will be read-only. 37 + 38 + The boot partitions can also be locked permanently, but this feature is 39 + not accessible through sysfs in order to avoid accidental or malicious 40 + bricking.
+1
arch/arm/plat-samsung/include/plat/sdhci.h
··· 63 63 struct s3c_sdhci_platdata { 64 64 unsigned int max_width; 65 65 unsigned int host_caps; 66 + unsigned int pm_caps; 66 67 enum cd_types cd_type; 67 68 enum clk_types clk_type; 68 69
+2
arch/arm/plat-samsung/platformdata.c
··· 53 53 set->cfg_gpio = pd->cfg_gpio; 54 54 if (pd->host_caps) 55 55 set->host_caps |= pd->host_caps; 56 + if (pd->pm_caps) 57 + set->pm_caps |= pd->pm_caps; 56 58 if (pd->clk_type) 57 59 set->clk_type = pd->clk_type; 58 60 }
+1 -1
drivers/Makefile
··· 97 97 obj-y += lguest/ 98 98 obj-$(CONFIG_CPU_FREQ) += cpufreq/ 99 99 obj-$(CONFIG_CPU_IDLE) += cpuidle/ 100 - obj-$(CONFIG_MMC) += mmc/ 100 + obj-y += mmc/ 101 101 obj-$(CONFIG_MEMSTICK) += memstick/ 102 102 obj-y += leds/ 103 103 obj-$(CONFIG_INFINIBAND) += infiniband/
+1 -2
drivers/mmc/Makefile
··· 6 6 7 7 obj-$(CONFIG_MMC) += core/ 8 8 obj-$(CONFIG_MMC) += card/ 9 - obj-$(CONFIG_MMC) += host/ 10 - 9 + obj-$(subst m,y,$(CONFIG_MMC)) += host/
+192 -55
drivers/mmc/card/block.c
··· 107 107 */ 108 108 unsigned int part_curr; 109 109 struct device_attribute force_ro; 110 + struct device_attribute power_ro_lock; 111 + int area_type; 110 112 }; 111 113 112 114 static DEFINE_MUTEX(open_lock); ··· 121 119 MMC_BLK_ABORT, 122 120 MMC_BLK_DATA_ERR, 123 121 MMC_BLK_ECC_ERR, 122 + MMC_BLK_NOMEDIUM, 124 123 }; 125 124 126 125 module_param(perdev_minors, int, 0444); ··· 166 163 kfree(md); 167 164 } 168 165 mutex_unlock(&open_lock); 166 + } 167 + 168 + static ssize_t power_ro_lock_show(struct device *dev, 169 + struct device_attribute *attr, char *buf) 170 + { 171 + int ret; 172 + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 173 + struct mmc_card *card = md->queue.card; 174 + int locked = 0; 175 + 176 + if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 177 + locked = 2; 178 + else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 179 + locked = 1; 180 + 181 + ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 182 + 183 + return ret; 184 + } 185 + 186 + static ssize_t power_ro_lock_store(struct device *dev, 187 + struct device_attribute *attr, const char *buf, size_t count) 188 + { 189 + int ret; 190 + struct mmc_blk_data *md, *part_md; 191 + struct mmc_card *card; 192 + unsigned long set; 193 + 194 + if (kstrtoul(buf, 0, &set)) 195 + return -EINVAL; 196 + 197 + if (set != 1) 198 + return count; 199 + 200 + md = mmc_blk_get(dev_to_disk(dev)); 201 + card = md->queue.card; 202 + 203 + mmc_claim_host(card->host); 204 + 205 + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 206 + card->ext_csd.boot_ro_lock | 207 + EXT_CSD_BOOT_WP_B_PWR_WP_EN, 208 + card->ext_csd.part_time); 209 + if (ret) 210 + pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); 211 + else 212 + card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; 213 + 214 + mmc_release_host(card->host); 215 + 216 + if (!ret) { 217 + pr_info("%s: Locking boot partition ro until next power on\n", 218 + md->disk->disk_name); 219 + set_disk_ro(md->disk, 1); 220 + 221 + list_for_each_entry(part_md, &md->part, part) 222 + if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 223 + pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 224 + set_disk_ro(part_md->disk, 1); 225 + } 226 + } 227 + 228 + mmc_blk_put(md); 229 + return count; 169 230 } 170 231 171 232 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, ··· 333 266 goto idata_err; 334 267 } 335 268 269 + if (!idata->buf_bytes) 270 + return idata; 271 + 336 272 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); 337 273 if (!idata->buf) { 338 274 err = -ENOMEM; ··· 382 312 if (IS_ERR(idata)) 383 313 return PTR_ERR(idata); 384 314 385 - cmd.opcode = idata->ic.opcode; 386 - cmd.arg = idata->ic.arg; 387 - cmd.flags = idata->ic.flags; 388 - 389 - data.sg = &sg; 390 - data.sg_len = 1; 391 - data.blksz = idata->ic.blksz; 392 - data.blocks = idata->ic.blocks; 393 - 394 - sg_init_one(data.sg, idata->buf, idata->buf_bytes); 395 - 396 - if (idata->ic.write_flag) 397 - data.flags = MMC_DATA_WRITE; 398 - else 399 - data.flags = MMC_DATA_READ; 400 - 401 - mrq.cmd = &cmd; 402 - mrq.data = &data; 403 - 404 315 md = mmc_blk_get(bdev->bd_disk); 405 316 if (!md) { 406 317 err = -EINVAL; ··· 394 343 goto cmd_done; 395 344 } 396 345 346 + cmd.opcode = idata->ic.opcode; 347 + cmd.arg = idata->ic.arg; 348 + cmd.flags = idata->ic.flags; 349 + 350 + if (idata->buf_bytes) { 351 + data.sg = &sg; 352 + data.sg_len = 1; 353 + data.blksz = idata->ic.blksz; 354 + data.blocks = idata->ic.blocks; 355 + 356 + sg_init_one(data.sg, idata->buf, idata->buf_bytes); 357 + 358 + if (idata->ic.write_flag) 359 + data.flags = MMC_DATA_WRITE; 360 + else 361 + data.flags = MMC_DATA_READ; 362 + 363 + /* data.flags must already be set before doing this. */ 364 + mmc_set_data_timeout(&data, card); 365 + 366 + /* Allow overriding the timeout_ns for empirical tuning. */ 367 + if (idata->ic.data_timeout_ns) 368 + data.timeout_ns = idata->ic.data_timeout_ns; 369 + 370 + if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 371 + /* 372 + * Pretend this is a data transfer and rely on the 373 + * host driver to compute timeout. When all host 374 + * drivers support cmd.cmd_timeout for R1B, this 375 + * can be changed to: 376 + * 377 + * mrq.data = NULL; 378 + * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 379 + */ 380 + data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 381 + } 382 + 383 + mrq.data = &data; 384 + } 385 + 386 + mrq.cmd = &cmd; 387 + 397 388 mmc_claim_host(card->host); 398 389 399 390 if (idata->ic.is_acmd) { 400 391 err = mmc_app_cmd(card->host, card); 401 392 if (err) 402 393 goto cmd_rel_host; 403 - } 404 - 405 - /* data.flags must already be set before doing this. */ 406 - mmc_set_data_timeout(&data, card); 407 - /* Allow overriding the timeout_ns for empirical tuning. */ 408 - if (idata->ic.data_timeout_ns) 409 - data.timeout_ns = idata->ic.data_timeout_ns; 410 - 411 - if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 412 - /* 413 - * Pretend this is a data transfer and rely on the host driver 414 - * to compute timeout. When all host drivers support 415 - * cmd.cmd_timeout for R1B, this can be changed to: 416 - * 417 - * mrq.data = NULL; 418 - * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 419 - */ 420 - data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 421 394 } 422 395 423 396 mmc_wait_for_req(card->host, &mrq); ··· 640 565 return err; 641 566 } 642 567 568 + #define ERR_NOMEDIUM 3 643 569 #define ERR_RETRY 2 644 570 #define ERR_ABORT 1 645 571 #define ERR_CONTINUE 0 ··· 708 632 u32 status, stop_status = 0; 709 633 int err, retry; 710 634 635 + if (mmc_card_removed(card)) 636 + return ERR_NOMEDIUM; 637 + 711 638 /* 712 639 * Try to get card status which indicates both the card state 713 640 * and why there was no response. If the first attempt fails, ··· 727 648 } 728 649 729 650 /* We couldn't get a response from the card. Give up. */ 730 - if (err) 651 + if (err) { 652 + /* Check if the card is removed */ 653 + if (mmc_detect_card_removed(card->host)) 654 + return ERR_NOMEDIUM; 731 655 return ERR_ABORT; 656 + } 732 657 733 658 /* Flag ECC errors */ 734 659 if ((status & R1_CARD_ECC_FAILED) || ··· 1005 922 return MMC_BLK_RETRY; 1006 923 case ERR_ABORT: 1007 924 return MMC_BLK_ABORT; 925 + case ERR_NOMEDIUM: 926 + return MMC_BLK_NOMEDIUM; 1008 927 case ERR_CONTINUE: 1009 928 break; 1010 929 } ··· 1340 1255 if (!ret) 1341 1256 goto start_new_req; 1342 1257 break; 1258 + case MMC_BLK_NOMEDIUM: 1259 + goto cmd_abort; 1343 1260 } 1344 1261 1345 1262 if (ret) { ··· 1358 1271 1359 1272 cmd_abort: 1360 1273 spin_lock_irq(&md->lock); 1274 + if (mmc_card_removed(card)) 1275 + req->cmd_flags |= REQ_QUIET; 1361 1276 while (ret) 1362 1277 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1363 1278 spin_unlock_irq(&md->lock); ··· 1428 1339 struct device *parent, 1429 1340 sector_t size, 1430 1341 bool default_ro, 1431 - const char *subname) 1342 + const char *subname, 1343 + int area_type) 1432 1344 { 1433 1345 struct mmc_blk_data *md; 1434 1346 int devidx, ret; ··· 1454 1364 if (!subname) { 1455 1365 md->name_idx = find_first_zero_bit(name_use, max_devices); 1456 1366 __set_bit(md->name_idx, name_use); 1457 - } 1458 - else 1367 + } else 1459 1368 md->name_idx = ((struct mmc_blk_data *) 1460 1369 dev_to_disk(parent)->private_data)->name_idx; 1370 + 1371 + md->area_type = area_type; 1461 1372 1462 1373 /* 1463 1374 * Set the read-only status based on the supported commands ··· 1553 1462 size = card->csd.capacity << (card->csd.read_blkbits - 9); 1554 1463 } 1555 1464 1556 - md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL); 1465 + md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 1466 + MMC_BLK_DATA_AREA_MAIN); 1557 1467 return md; 1558 1468 } 1559 1469 ··· 1563 1471 unsigned int part_type, 1564 1472 sector_t size, 1565 1473 bool default_ro, 1566 - const char *subname) 1474 + const char *subname, 1475 + int area_type) 1567 1476 { 1568 1477 char cap_str[10]; 1569 1478 struct mmc_blk_data *part_md; 1570 1479 1571 1480 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 1572 - subname); 1481 + subname, area_type); 1573 1482 if (IS_ERR(part_md)) 1574 1483 return PTR_ERR(part_md); 1575 1484 part_md->part_type = part_type; ··· 1603 1510 card->part[idx].part_cfg, 1604 1511 card->part[idx].size >> 9, 1605 1512 card->part[idx].force_ro, 1606 - card->part[idx].name); 1513 + card->part[idx].name, 1514 + card->part[idx].area_type); 1607 1515 if (ret) 1608 1516 return ret; 1609 1517 } ··· 1633 1539 1634 1540 static void mmc_blk_remove_req(struct mmc_blk_data *md) 1635 1541 { 1542 + struct mmc_card *card; 1543 + 1636 1544 if (md) { 1545 + card = md->queue.card; 1637 1546 if (md->disk->flags & GENHD_FL_UP) { 1638 1547 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 1548 + if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 1549 + card->ext_csd.boot_ro_lockable) 1550 + device_remove_file(disk_to_dev(md->disk), 1551 + &md->power_ro_lock); 1639 1552 1640 1553 /* Stop new requests from getting into the queue */ 1641 1554 del_gendisk(md->disk); ··· 1671 1570 static int mmc_add_disk(struct mmc_blk_data *md) 1672 1571 { 1673 1572 int ret; 1573 + struct mmc_card *card = md->queue.card; 1674 1574 1675 1575 add_disk(md->disk); 1676 1576 md->force_ro.show = force_ro_show; ··· 1681 1579 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; 1682 1580 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); 1683 1581 if (ret) 1684 - del_gendisk(md->disk); 1582 + goto force_ro_fail; 1583 + 1584 + if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 1585 + card->ext_csd.boot_ro_lockable) { 1586 + mode_t mode; 1587 + 1588 + if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) 1589 + mode = S_IRUGO; 1590 + else 1591 + mode = S_IRUGO | S_IWUSR; 1592 + 1593 + md->power_ro_lock.show = power_ro_lock_show; 1594 + md->power_ro_lock.store = power_ro_lock_store; 1595 + md->power_ro_lock.attr.mode = mode; 1596 + md->power_ro_lock.attr.name = 1597 + "ro_lock_until_next_power_on"; 1598 + ret = device_create_file(disk_to_dev(md->disk), 1599 + &md->power_ro_lock); 1600 + if (ret) 1601 + goto power_ro_lock_fail; 1602 + } 1603 + return ret; 1604 + 1605 + power_ro_lock_fail: 1606 + device_remove_file(disk_to_dev(md->disk), &md->force_ro); 1607 + force_ro_fail: 1608 + del_gendisk(md->disk); 1685 1609 1686 1610 return ret; 1687 1611 } 1688 1612 1613 + #define CID_MANFID_SANDISK 0x2 1614 + #define CID_MANFID_TOSHIBA 0x11 1615 + #define CID_MANFID_MICRON 0x13 1616 + 1689 1617 static const struct mmc_fixup blk_fixups[] = 1690 1618 { 1691 - MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1692 - MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1693 - MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1694 - MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1695 - MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1619 + MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, 1620 + MMC_QUIRK_INAND_CMD38), 1621 + MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, 1622 + MMC_QUIRK_INAND_CMD38), 1623 + MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, 1624 + MMC_QUIRK_INAND_CMD38), 1625 + MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, 1626 + MMC_QUIRK_INAND_CMD38), 1627 + MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, 1628 + MMC_QUIRK_INAND_CMD38), 1696 1629 1697 1630 /* 1698 1631 * Some MMC cards experience performance degradation with CMD23 ··· 1737 1600 * 1738 1601 * N.B. This doesn't affect SD cards. 1739 1602 */ 1740 - MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1603 + MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1741 1604 MMC_QUIRK_BLK_NO_CMD23), 1742 - MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1605 + MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1743 1606 MMC_QUIRK_BLK_NO_CMD23), 1744 - MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1607 + MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1745 1608 MMC_QUIRK_BLK_NO_CMD23), 1746 1609 1747 1610 /* 1748 1611 * Some Micron MMC cards needs longer data read timeout than 1749 1612 * indicated in CSD. 1750 1613 */ 1751 - MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc, 1614 + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 1752 1615 MMC_QUIRK_LONG_READ_TIME), 1753 1616 1754 1617 END_FIXUP
+1
drivers/mmc/card/mmc_test.c
··· 1581 1581 1582 1582 t->max_segs = test->card->host->max_segs; 1583 1583 t->max_seg_sz = test->card->host->max_seg_size; 1584 + t->max_seg_sz -= t->max_seg_sz % 512; 1584 1585 1585 1586 t->max_tfr = t->max_sz; 1586 1587 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
+5
drivers/mmc/card/queue.c
··· 29 29 */ 30 30 static int mmc_prep_request(struct request_queue *q, struct request *req) 31 31 { 32 + struct mmc_queue *mq = q->queuedata; 33 + 32 34 /* 33 35 * We only like normal block requests and discards. 34 36 */ ··· 38 36 blk_dump_rq_flags(req, "MMC bad request"); 39 37 return BLKPREP_KILL; 40 38 } 39 + 40 + if (mq && mmc_card_removed(mq->card)) 41 + return BLKPREP_KILL; 41 42 42 43 req->cmd_flags |= REQ_DONTPREP; 43 44
+1 -1
drivers/mmc/core/Makefile
··· 7 7 mmc.o mmc_ops.o sd.o sd_ops.o \ 8 8 sdio.o sdio_ops.o sdio_bus.o \ 9 9 sdio_cis.o sdio_io.o sdio_irq.o \ 10 - quirks.o 10 + quirks.o cd-gpio.o 11 11 12 12 mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+3 -2
drivers/mmc/core/bus.c
··· 303 303 mmc_card_ddr_mode(card) ? "DDR " : "", 304 304 type); 305 305 } else { 306 - printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", 306 + pr_info("%s: new %s%s%s%s card at address %04x\n", 307 307 mmc_hostname(card->host), 308 - mmc_sd_card_uhs(card) ? "ultra high speed " : 308 + mmc_card_uhs(card) ? "ultra high speed " : 309 309 (mmc_card_highspeed(card) ? "high speed " : ""), 310 + (mmc_card_hs200(card) ? "HS200 " : ""), 310 311 mmc_card_ddr_mode(card) ? "DDR " : "", 311 312 type, card->rca); 312 313 }
+74
drivers/mmc/core/cd-gpio.c
··· 1 + /* 2 + * Generic GPIO card-detect helper 3 + * 4 + * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/err.h> 12 + #include <linux/gpio.h> 13 + #include <linux/interrupt.h> 14 + #include <linux/jiffies.h> 15 + #include <linux/mmc/host.h> 16 + #include <linux/module.h> 17 + #include <linux/slab.h> 18 + 19 + struct mmc_cd_gpio { 20 + unsigned int gpio; 21 + char label[0]; 22 + }; 23 + 24 + static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id) 25 + { 26 + /* Schedule a card detection after a debounce timeout */ 27 + mmc_detect_change(dev_id, msecs_to_jiffies(100)); 28 + return IRQ_HANDLED; 29 + } 30 + 31 + int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio, 32 + unsigned int irq, unsigned long flags) 33 + { 34 + size_t len = strlen(dev_name(host->parent)) + 4; 35 + struct mmc_cd_gpio *cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL); 36 + int ret; 37 + 38 + if (!cd) 39 + return -ENOMEM; 40 + 41 + snprintf(cd->label, len, "%s cd", dev_name(host->parent)); 42 + 43 + ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label); 44 + if (ret < 0) 45 + goto egpioreq; 46 + 47 + ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt, 48 + flags, cd->label, host); 49 + if (ret < 0) 50 + goto eirqreq; 51 + 52 + cd->gpio = gpio; 53 + host->hotplug.irq = irq; 54 + host->hotplug.handler_priv = cd; 55 + 56 + return 0; 57 + 58 + eirqreq: 59 + gpio_free(gpio); 60 + egpioreq: 61 + kfree(cd); 62 + return ret; 63 + } 64 + EXPORT_SYMBOL(mmc_cd_gpio_request); 65 + 66 + void mmc_cd_gpio_free(struct mmc_host *host) 67 + { 68 + struct mmc_cd_gpio *cd = host->hotplug.handler_priv; 69 + 70 + free_irq(host->hotplug.irq, host); 71 + gpio_free(cd->gpio); 72 + kfree(cd); 73 + } 74 + EXPORT_SYMBOL(mmc_cd_gpio_free);
+79 -16
drivers/mmc/core/core.c
··· 140 140 cmd->retries = 0; 141 141 } 142 142 143 - if (err && cmd->retries) { 143 + if (err && cmd->retries && !mmc_card_removed(host->card)) { 144 144 /* 145 145 * Request starter must handle retries - see 146 146 * mmc_wait_for_req_done(). ··· 247 247 { 248 248 init_completion(&mrq->completion); 249 249 mrq->done = mmc_wait_done; 250 + if (mmc_card_removed(host->card)) { 251 + mrq->cmd->error = -ENOMEDIUM; 252 + complete(&mrq->completion); 253 + return; 254 + } 250 255 mmc_start_request(host, mrq); 251 256 } 252 257 ··· 264 259 wait_for_completion(&mrq->completion); 265 260 266 261 cmd = mrq->cmd; 267 - if (!cmd->error || !cmd->retries) 262 + if (!cmd->error || !cmd->retries || 263 + mmc_card_removed(host->card)) 268 264 break; 269 265 270 266 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", ··· 1462 1456 WARN_ON(host->removed); 1463 1457 spin_unlock_irqrestore(&host->lock, flags); 1464 1458 #endif 1465 - 1459 + host->detect_change = 1; 1466 1460 mmc_schedule_delayed_work(&host->detect, delay); 1467 1461 } 1468 1462 ··· 2055 2049 return -EIO; 2056 2050 } 2057 2051 2052 + int _mmc_detect_card_removed(struct mmc_host *host) 2053 + { 2054 + int ret; 2055 + 2056 + if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) 2057 + return 0; 2058 + 2059 + if (!host->card || mmc_card_removed(host->card)) 2060 + return 1; 2061 + 2062 + ret = host->bus_ops->alive(host); 2063 + if (ret) { 2064 + mmc_card_set_removed(host->card); 2065 + pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2066 + } 2067 + 2068 + return ret; 2069 + } 2070 + 2071 + int mmc_detect_card_removed(struct mmc_host *host) 2072 + { 2073 + struct mmc_card *card = host->card; 2074 + 2075 + WARN_ON(!host->claimed); 2076 + /* 2077 + * The card will be considered unchanged unless we have been asked to 2078 + * detect a change or host requires polling to provide card detection. 2079 + */ 2080 + if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) 2081 + return mmc_card_removed(card); 2082 + 2083 + host->detect_change = 0; 2084 + 2085 + return _mmc_detect_card_removed(host); 2086 + } 2087 + EXPORT_SYMBOL(mmc_detect_card_removed); 2088 + 2058 2089 void mmc_rescan(struct work_struct *work) 2059 2090 { 2060 2091 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; ··· 2111 2068 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2112 2069 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2113 2070 host->bus_ops->detect(host); 2071 + 2072 + host->detect_change = 0; 2114 2073 2115 2074 /* 2116 2075 * Let mmc_bus_put() free the bus/bus_ops if we've found that ··· 2175 2130 2176 2131 mmc_bus_get(host); 2177 2132 if (host->bus_ops && !host->bus_dead) { 2133 + /* Calling bus_ops->remove() with a claimed host can deadlock */ 2178 2134 if (host->bus_ops->remove) 2179 2135 host->bus_ops->remove(host); 2180 2136 ··· 2247 2201 { 2248 2202 int err = -ENOSYS; 2249 2203 2204 + if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2205 + return 0; 2206 + 2250 2207 mmc_bus_get(host); 2251 2208 2252 2209 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) ··· 2264 2215 int mmc_card_sleep(struct mmc_host *host) 2265 2216 { 2266 2217 int err = -ENOSYS; 2218 + 2219 + if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2220 + return 0; 2267 2221 2268 2222 mmc_bus_get(host); 2269 2223 ··· 2322 2270 int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2323 2271 { 2324 2272 struct mmc_card *card = host->card; 2273 + unsigned int timeout; 2325 2274 int err = 0; 2326 2275 2327 2276 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || ··· 2333 2280 (card->ext_csd.cache_size > 0)) { 2334 2281 enable = !!enable; 2335 2282 2336 - if (card->ext_csd.cache_ctrl ^ enable) 2283 + if (card->ext_csd.cache_ctrl ^ enable) { 2284 + timeout = enable ? card->ext_csd.generic_cmd6_time : 0; 2337 2285 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2338 - EXT_CSD_CACHE_CTRL, enable, 0); 2339 - if (err) 2340 - pr_err("%s: cache %s error %d\n", 2341 - mmc_hostname(card->host), 2342 - enable ? "on" : "off", 2343 - err); 2344 - else 2345 - card->ext_csd.cache_ctrl = enable; 2286 + EXT_CSD_CACHE_CTRL, enable, timeout); 2287 + if (err) 2288 + pr_err("%s: cache %s error %d\n", 2289 + mmc_hostname(card->host), 2290 + enable ? "on" : "off", 2291 + err); 2292 + else 2293 + card->ext_csd.cache_ctrl = enable; 2294 + } 2346 2295 } 2347 2296 2348 2297 return err; ··· 2365 2310 cancel_delayed_work(&host->disable); 2366 2311 cancel_delayed_work(&host->detect); 2367 2312 mmc_flush_scheduled_work(); 2368 - err = mmc_cache_ctrl(host, 0); 2313 + if (mmc_try_claim_host(host)) { 2314 + err = mmc_cache_ctrl(host, 0); 2315 + mmc_do_release_host(host); 2316 + } else { 2317 + err = -EBUSY; 2318 + } 2319 + 2369 2320 if (err) 2370 2321 goto out; 2371 2322 ··· 2399 2338 if (err == -ENOSYS || !host->bus_ops->resume) { 2400 2339 /* 2401 2340 * We simply "remove" the card in this case. 2402 - * It will be redetected on resume. 2341 + * It will be redetected on resume. (Calling 2342 + * bus_ops->remove() with a claimed host can 2343 + * deadlock.) 2403 2344 */ 2404 2345 if (host->bus_ops->remove) 2405 2346 host->bus_ops->remove(host); ··· 2494 2431 if (!host->bus_ops || host->bus_ops->suspend) 2495 2432 break; 2496 2433 2497 - mmc_claim_host(host); 2498 - 2434 + /* Calling bus_ops->remove() with a claimed host can deadlock */ 2499 2435 if (host->bus_ops->remove) 2500 2436 host->bus_ops->remove(host); 2501 2437 2438 + mmc_claim_host(host); 2502 2439 mmc_detach_bus(host); 2503 2440 mmc_power_off(host); 2504 2441 mmc_release_host(host);
+3
drivers/mmc/core/core.h
··· 24 24 int (*resume)(struct mmc_host *); 25 25 int (*power_save)(struct mmc_host *); 26 26 int (*power_restore)(struct mmc_host *); 27 + int (*alive)(struct mmc_host *); 27 28 }; 28 29 29 30 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); ··· 59 58 void mmc_rescan(struct work_struct *work); 60 59 void mmc_start_host(struct mmc_host *host); 61 60 void mmc_stop_host(struct mmc_host *host); 61 + 62 + int _mmc_detect_card_removed(struct mmc_host *host); 62 63 63 64 int mmc_attach_mmc(struct mmc_host *host); 64 65 int mmc_attach_sd(struct mmc_host *host);
+5
drivers/mmc/core/debugfs.c
··· 57 57 const char *str; 58 58 59 59 seq_printf(s, "clock:\t\t%u Hz\n", ios->clock); 60 + if (host->actual_clock) 61 + seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock); 60 62 seq_printf(s, "vdd:\t\t%u ", ios->vdd); 61 63 if ((1 << ios->vdd) & MMC_VDD_165_195) 62 64 seq_printf(s, "(1.65 - 1.95 V)\n"); ··· 134 132 break; 135 133 case MMC_TIMING_UHS_DDR50: 136 134 str = "sd uhs DDR50"; 135 + break; 136 + case MMC_TIMING_MMC_HS200: 137 + str = "mmc high-speed SDR200"; 137 138 break; 138 139 default: 139 140 str = "invalid";
+49 -4
drivers/mmc/core/host.c
··· 54 54 static DEFINE_SPINLOCK(mmc_host_lock); 55 55 56 56 #ifdef CONFIG_MMC_CLKGATE 57 + static ssize_t clkgate_delay_show(struct device *dev, 58 + struct device_attribute *attr, char *buf) 59 + { 60 + struct mmc_host *host = cls_dev_to_mmc_host(dev); 61 + return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); 62 + } 63 + 64 + static ssize_t clkgate_delay_store(struct device *dev, 65 + struct device_attribute *attr, const char *buf, size_t count) 66 + { 67 + struct mmc_host *host = cls_dev_to_mmc_host(dev); 68 + unsigned long flags, value; 69 + 70 + if (kstrtoul(buf, 0, &value)) 71 + return -EINVAL; 72 + 73 + spin_lock_irqsave(&host->clk_lock, flags); 74 + host->clkgate_delay = value; 75 + spin_unlock_irqrestore(&host->clk_lock, flags); 76 + return count; 77 + } 57 78 58 79 /* 59 80 * Enabling clock gating will make the core call out to the host ··· 135 114 static void mmc_host_clk_gate_work(struct work_struct *work) 136 115 { 137 116 struct mmc_host *host = container_of(work, struct mmc_host, 138 - clk_gate_work); 117 + clk_gate_work.work); 139 118 140 119 mmc_host_clk_gate_delayed(host); 141 120 } ··· 152 131 { 153 132 unsigned long flags; 154 133 134 + /* cancel any clock gating work scheduled by mmc_host_clk_release() */ 135 + cancel_delayed_work_sync(&host->clk_gate_work); 155 136 mutex_lock(&host->clk_gate_mutex); 156 137 spin_lock_irqsave(&host->clk_lock, flags); 157 138 if (host->clk_gated) { ··· 203 180 host->clk_requests--; 204 181 if (mmc_host_may_gate_card(host->card) && 205 182 !host->clk_requests) 206 - queue_work(system_nrt_wq, &host->clk_gate_work); 183 + queue_delayed_work(system_nrt_wq, &host->clk_gate_work, 184 + msecs_to_jiffies(host->clkgate_delay)); 207 185 spin_unlock_irqrestore(&host->clk_lock, flags); 208 186 } 209 187 ··· 237 213 host->clk_requests = 0; 238 214 /* Hold MCI clock for 8 cycles by default */ 239 215 host->clk_delay = 8; 216 + /* 217 + * Default clock gating delay is 200ms. 218 + * This value can be tuned by writing into sysfs entry. 219 + */ 220 + host->clkgate_delay = 200; 240 221 host->clk_gated = false; 241 - INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 222 + INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 242 223 spin_lock_init(&host->clk_lock); 243 224 mutex_init(&host->clk_gate_mutex); 244 225 } ··· 258 229 * Wait for any outstanding gate and then make sure we're 259 230 * ungated before exiting. 260 231 */ 261 - if (cancel_work_sync(&host->clk_gate_work)) 232 + if (cancel_delayed_work_sync(&host->clk_gate_work)) 262 233 mmc_host_clk_gate_delayed(host); 263 234 if (host->clk_gated) 264 235 mmc_host_clk_hold(host); ··· 266 237 WARN_ON(host->clk_requests > 1); 267 238 } 268 239 240 + static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 241 + { 242 + host->clkgate_delay_attr.show = clkgate_delay_show; 243 + host->clkgate_delay_attr.store = clkgate_delay_store; 244 + sysfs_attr_init(&host->clkgate_delay_attr.attr); 245 + host->clkgate_delay_attr.attr.name = "clkgate_delay"; 246 + host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; 247 + if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) 248 + pr_err("%s: Failed to create clkgate_delay sysfs entry\n", 249 + mmc_hostname(host)); 250 + } 269 251 #else 270 252 271 253 static inline void mmc_host_clk_init(struct mmc_host *host) ··· 284 244 } 285 245 286 246 static inline void mmc_host_clk_exit(struct mmc_host *host) 247 + { 248 + } 249 + 250 + static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 287 251 { 288 252 } 289 253 ··· 379 335 #ifdef CONFIG_DEBUG_FS 380 336 mmc_add_host_debugfs(host); 381 337 #endif 338 + mmc_host_clk_sysfs_init(host); 382 339 383 340 mmc_start_host(host); 384 341 register_pm_notifier(&host->pm_notify);
+188 -15
drivers/mmc/core/mmc.c
··· 286 286 } 287 287 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 288 288 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { 289 + case EXT_CSD_CARD_TYPE_SDR_ALL: 290 + case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V: 291 + case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V: 292 + case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52: 293 + card->ext_csd.hs_max_dtr = 200000000; 294 + card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200; 295 + break; 296 + case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL: 297 + case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V: 298 + case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V: 299 + case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52: 300 + card->ext_csd.hs_max_dtr = 200000000; 301 + card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V; 302 + break; 303 + case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL: 304 + case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V: 305 + case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V: 306 + case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52: 307 + card->ext_csd.hs_max_dtr = 200000000; 308 + card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V; 309 + break; 289 310 case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | 290 311 EXT_CSD_CARD_TYPE_26: 291 312 card->ext_csd.hs_max_dtr = 52000000; ··· 369 348 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 370 349 mmc_part_add(card, part_size, 371 350 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, 372 - "boot%d", idx, true); 351 + "boot%d", idx, true, 352 + MMC_BLK_DATA_AREA_BOOT); 373 353 } 374 354 } 375 355 } ··· 457 435 hc_wp_grp_sz); 458 436 mmc_part_add(card, part_size << 19, 459 437 EXT_CSD_PART_CONFIG_ACC_GP0 + idx, 460 - "gp%d", idx, false); 438 + "gp%d", idx, false, 439 + MMC_BLK_DATA_AREA_GP); 461 440 } 462 441 } 463 442 card->ext_csd.sec_trim_mult = ··· 469 446 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 470 447 card->ext_csd.trim_timeout = 300 * 471 448 ext_csd[EXT_CSD_TRIM_MULT]; 449 + 450 + /* 451 + * Note that the call to mmc_part_add above defaults to read 452 + * only. If this default assumption is changed, the call must 453 + * take into account the value of boot_locked below. 454 + */ 455 + card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; 456 + card->ext_csd.boot_ro_lockable = true; 472 457 } 473 458 474 459 if (card->ext_csd.rev >= 5) { ··· 721 690 } 722 691 723 692 /* 693 + * Selects the desired buswidth and switch to the HS200 mode 694 + * if bus width set without error 695 + */ 696 + static int mmc_select_hs200(struct mmc_card *card) 697 + { 698 + int idx, err = 0; 699 + struct mmc_host *host; 700 + static unsigned ext_csd_bits[] = { 701 + EXT_CSD_BUS_WIDTH_4, 702 + EXT_CSD_BUS_WIDTH_8, 703 + }; 704 + static unsigned bus_widths[] = { 705 + MMC_BUS_WIDTH_4, 706 + MMC_BUS_WIDTH_8, 707 + }; 708 + 709 + BUG_ON(!card); 710 + 711 + host = card->host; 712 + 713 + if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 714 + host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 715 + if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0)) 716 + err = mmc_set_signal_voltage(host, 717 + MMC_SIGNAL_VOLTAGE_180, 0); 718 + 719 + /* If fails try again during next card power cycle */ 720 + if (err) 721 + goto err; 722 + 723 + idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; 724 + 725 + /* 726 + * Unlike SD, MMC cards dont have a configuration register to notify 727 + * supported bus width. So bus test command should be run to identify 728 + * the supported bus width or compare the ext csd values of current 729 + * bus width and ext csd values of 1 bit mode read earlier. 730 + */ 731 + for (; idx >= 0; idx--) { 732 + 733 + /* 734 + * Host is capable of 8bit transfer, then switch 735 + * the device to work in 8bit transfer mode. If the 736 + * mmc switch command returns error then switch to 737 + * 4bit transfer mode. On success set the corresponding 738 + * bus width on the host. 739 + */ 740 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 741 + EXT_CSD_BUS_WIDTH, 742 + ext_csd_bits[idx], 743 + card->ext_csd.generic_cmd6_time); 744 + if (err) 745 + continue; 746 + 747 + mmc_set_bus_width(card->host, bus_widths[idx]); 748 + 749 + if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 750 + err = mmc_compare_ext_csds(card, bus_widths[idx]); 751 + else 752 + err = mmc_bus_test(card, bus_widths[idx]); 753 + if (!err) 754 + break; 755 + } 756 + 757 + /* switch to HS200 mode if bus width set successfully */ 758 + if (!err) 759 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 760 + EXT_CSD_HS_TIMING, 2, 0); 761 + err: 762 + return err; 763 + } 764 + 765 + /* 724 766 * Handle the detection and initialisation of a card. 725 767 * 726 768 * In the case of a resume, "oldcard" will contain the card ··· 999 895 /* 1000 896 * Activate high speed (if supported) 1001 897 */ 1002 - if ((card->ext_csd.hs_max_dtr != 0) && 1003 - (host->caps & MMC_CAP_MMC_HIGHSPEED)) { 1004 - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1005 - EXT_CSD_HS_TIMING, 1, 1006 - card->ext_csd.generic_cmd6_time); 898 + if (card->ext_csd.hs_max_dtr != 0) { 899 + err = 0; 900 + if (card->ext_csd.hs_max_dtr > 52000000 && 901 + host->caps2 & MMC_CAP2_HS200) 902 + err = mmc_select_hs200(card); 903 + else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 904 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 905 + EXT_CSD_HS_TIMING, 1, 0); 906 + 1007 907 if (err && err != -EBADMSG) 1008 908 goto free_card; 1009 909 ··· 1016 908 mmc_hostname(card->host)); 1017 909 err = 0; 1018 910 } else { 1019 - mmc_card_set_highspeed(card); 1020 - mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 911 + if (card->ext_csd.hs_max_dtr > 52000000 && 912 + host->caps2 & MMC_CAP2_HS200) { 913 + mmc_card_set_hs200(card); 914 + mmc_set_timing(card->host, 915 + MMC_TIMING_MMC_HS200); 916 + } else { 917 + mmc_card_set_highspeed(card); 918 + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 919 + } 1021 920 } 1022 921 } 1023 922 ··· 1049 934 */ 1050 935 max_dtr = (unsigned int)-1; 1051 936 1052 - if (mmc_card_highspeed(card)) { 937 + if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { 1053 938 if (max_dtr > card->ext_csd.hs_max_dtr) 1054 939 max_dtr = card->ext_csd.hs_max_dtr; 1055 940 } else if (max_dtr > card->csd.max_dtr) { ··· 1075 960 } 1076 961 1077 962 /* 963 + * Indicate HS200 SDR mode (if supported). 964 + */ 965 + if (mmc_card_hs200(card)) { 966 + u32 ext_csd_bits; 967 + u32 bus_width = card->host->ios.bus_width; 968 + 969 + /* 970 + * For devices supporting HS200 mode, the bus width has 971 + * to be set before executing the tuning function. If 972 + * set before tuning, then device will respond with CRC 973 + * errors for responses on CMD line. So for HS200 the 974 + * sequence will be 975 + * 1. set bus width 4bit / 8 bit (1 bit not supported) 976 + * 2. switch to HS200 mode 977 + * 3. set the clock to > 52Mhz <=200MHz and 978 + * 4. execute tuning for HS200 979 + */ 980 + if ((host->caps2 & MMC_CAP2_HS200) && 981 + card->host->ops->execute_tuning) 982 + err = card->host->ops->execute_tuning(card->host, 983 + MMC_SEND_TUNING_BLOCK_HS200); 984 + if (err) { 985 + pr_warning("%s: tuning execution failed\n", 986 + mmc_hostname(card->host)); 987 + goto err; 988 + } 989 + 990 + ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 991 + EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 992 + err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); 993 + if (err) { 994 + pr_err("%s: power class selection to bus width %d failed\n", 995 + mmc_hostname(card->host), 1 << bus_width); 996 + goto err; 997 + } 998 + } 999 + 1000 + /* 1078 1001 * Activate wide bus and DDR (if supported). 1079 1002 */ 1080 - if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1003 + if (!mmc_card_hs200(card) && 1004 + (card->csd.mmca_vsn >= CSD_SPEC_VER_3) && 1081 1005 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1082 1006 static unsigned ext_csd_bits[][2] = { 1083 1007 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, ··· 1202 1048 * 1203 1049 * WARNING: eMMC rules are NOT the same as SD DDR 1204 1050 */ 1205 - if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) { 1051 + if (ddr == MMC_1_2V_DDR_MODE) { 1206 1052 err = mmc_set_signal_voltage(host, 1207 1053 MMC_SIGNAL_VOLTAGE_120, 0); 1208 1054 if (err) ··· 1221 1067 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && 1222 1068 card->ext_csd.cache_size > 0) { 1223 1069 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1224 - EXT_CSD_CACHE_CTRL, 1, 0); 1070 + EXT_CSD_CACHE_CTRL, 1, 1071 + card->ext_csd.generic_cmd6_time); 1225 1072 if (err && err != -EBADMSG) 1226 1073 goto free_card; 1227 1074 1228 1075 /* 1229 1076 * Only if no error, cache is turned on successfully. 1230 1077 */ 1231 - card->ext_csd.cache_ctrl = err ? 0 : 1; 1078 + if (err) { 1079 + pr_warning("%s: Cache is supported, " 1080 + "but failed to turn on (%d)\n", 1081 + mmc_hostname(card->host), err); 1082 + card->ext_csd.cache_ctrl = 0; 1083 + err = 0; 1084 + } else { 1085 + card->ext_csd.cache_ctrl = 1; 1086 + } 1232 1087 } 1233 1088 1234 1089 if (!oldcard) ··· 1268 1105 } 1269 1106 1270 1107 /* 1108 + * Card detection - card is alive. 1109 + */ 1110 + static int mmc_alive(struct mmc_host *host) 1111 + { 1112 + return mmc_send_status(host->card, NULL); 1113 + } 1114 + 1115 + /* 1271 1116 * Card detection callback from host. 1272 1117 */ 1273 1118 static void mmc_detect(struct mmc_host *host) ··· 1290 1119 /* 1291 1120 * Just check if our card has been removed. 1292 1121 */ 1293 - err = mmc_send_status(host->card, NULL); 1122 + err = _mmc_detect_card_removed(host); 1294 1123 1295 1124 mmc_release_host(host); 1296 1125 ··· 1395 1224 .suspend = NULL, 1396 1225 .resume = NULL, 1397 1226 .power_restore = mmc_power_restore, 1227 + .alive = mmc_alive, 1398 1228 }; 1399 1229 1400 1230 static const struct mmc_bus_ops mmc_ops_unsafe = { ··· 1406 1234 .suspend = mmc_suspend, 1407 1235 .resume = mmc_resume, 1408 1236 .power_restore = mmc_power_restore, 1237 + .alive = mmc_alive, 1409 1238 }; 1410 1239 1411 1240 static void mmc_attach_bus_ops(struct mmc_host *host)
+16 -5
drivers/mmc/core/sd.c
··· 307 307 goto out; 308 308 } 309 309 310 - if (status[13] & UHS_SDR50_BUS_SPEED) 311 - card->sw_caps.hs_max_dtr = 50000000; 310 + if (status[13] & SD_MODE_HIGH_SPEED) 311 + card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR; 312 312 313 313 if (card->scr.sda_spec3) { 314 314 card->sw_caps.sd3_bus_mode = status[13]; ··· 661 661 662 662 /* SPI mode doesn't define CMD19 */ 663 663 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) 664 - err = card->host->ops->execute_tuning(card->host); 664 + err = card->host->ops->execute_tuning(card->host, 665 + MMC_SEND_TUNING_BLOCK); 665 666 666 667 out: 667 668 kfree(status); ··· 961 960 goto free_card; 962 961 963 962 /* Card is an ultra-high-speed card */ 964 - mmc_sd_card_set_uhs(card); 963 + mmc_card_set_uhs(card); 965 964 966 965 /* 967 966 * Since initialization is now complete, enable preset ··· 1020 1019 } 1021 1020 1022 1021 /* 1022 + * Card detection - card is alive. 1023 + */ 1024 + static int mmc_sd_alive(struct mmc_host *host) 1025 + { 1026 + return mmc_send_status(host->card, NULL); 1027 + } 1028 + 1029 + /* 1023 1030 * Card detection callback from host. 1024 1031 */ 1025 1032 static void mmc_sd_detect(struct mmc_host *host) ··· 1042 1033 /* 1043 1034 * Just check if our card has been removed. 1044 1035 */ 1045 - err = mmc_send_status(host->card, NULL); 1036 + err = _mmc_detect_card_removed(host); 1046 1037 1047 1038 mmc_release_host(host); 1048 1039 ··· 1111 1102 .suspend = NULL, 1112 1103 .resume = NULL, 1113 1104 .power_restore = mmc_sd_power_restore, 1105 + .alive = mmc_sd_alive, 1114 1106 }; 1115 1107 1116 1108 static const struct mmc_bus_ops mmc_sd_ops_unsafe = { ··· 1120 1110 .suspend = mmc_sd_suspend, 1121 1111 .resume = mmc_sd_resume, 1122 1112 .power_restore = mmc_sd_power_restore, 1113 + .alive = mmc_sd_alive, 1123 1114 }; 1124 1115 1125 1116 static void mmc_sd_attach_bus_ops(struct mmc_host *host)
+316 -26
drivers/mmc/core/sdio.c
··· 14 14 15 15 #include <linux/mmc/host.h> 16 16 #include <linux/mmc/card.h> 17 + #include <linux/mmc/mmc.h> 17 18 #include <linux/mmc/sdio.h> 18 19 #include <linux/mmc/sdio_func.h> 19 20 #include <linux/mmc/sdio_ids.h> ··· 103 102 int ret; 104 103 int cccr_vsn; 105 104 unsigned char data; 105 + unsigned char speed; 106 106 107 107 memset(&card->cccr, 0, sizeof(struct sdio_cccr)); 108 108 ··· 142 140 } 143 141 144 142 if (cccr_vsn >= SDIO_CCCR_REV_1_20) { 145 - ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &data); 143 + ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed); 146 144 if (ret) 147 145 goto out; 148 146 149 - if (data & SDIO_SPEED_SHS) 150 - card->cccr.high_speed = 1; 147 + card->scr.sda_spec3 = 0; 148 + card->sw_caps.sd3_bus_mode = 0; 149 + card->sw_caps.sd3_drv_type = 0; 150 + if (cccr_vsn >= SDIO_CCCR_REV_3_00) { 151 + card->scr.sda_spec3 = 1; 152 + ret = mmc_io_rw_direct(card, 0, 0, 153 + SDIO_CCCR_UHS, 0, &data); 154 + if (ret) 155 + goto out; 156 + 157 + if (card->host->caps & 158 + (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 159 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 160 + MMC_CAP_UHS_DDR50)) { 161 + if (data & SDIO_UHS_DDR50) 162 + card->sw_caps.sd3_bus_mode 163 + |= SD_MODE_UHS_DDR50; 164 + 165 + if (data & SDIO_UHS_SDR50) 166 + card->sw_caps.sd3_bus_mode 167 + |= SD_MODE_UHS_SDR50; 168 + 169 + if (data & SDIO_UHS_SDR104) 170 + card->sw_caps.sd3_bus_mode 171 + |= SD_MODE_UHS_SDR104; 172 + } 173 + 174 + ret = mmc_io_rw_direct(card, 0, 0, 175 + SDIO_CCCR_DRIVE_STRENGTH, 0, &data); 176 + if (ret) 177 + goto out; 178 + 179 + if (data & SDIO_DRIVE_SDTA) 180 + card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A; 181 + if (data & SDIO_DRIVE_SDTC) 182 + card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C; 183 + if (data & SDIO_DRIVE_SDTD) 184 + card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D; 185 + } 186 + 187 + /* if no uhs mode ensure we check for high speed */ 188 + if (!card->sw_caps.sd3_bus_mode) { 189 + if (speed & SDIO_SPEED_SHS) { 190 + card->cccr.high_speed = 1; 191 + card->sw_caps.hs_max_dtr = 50000000; 192 + } else { 193 + card->cccr.high_speed = 0; 194 + card->sw_caps.hs_max_dtr = 25000000; 195 + } 196 + } 151 197 } 152 198 153 199 out: ··· 377 327 return max_dtr; 378 328 } 379 329 330 + static unsigned char host_drive_to_sdio_drive(int host_strength) 331 + { 332 + switch (host_strength) { 333 + case MMC_SET_DRIVER_TYPE_A: 334 + return SDIO_DTSx_SET_TYPE_A; 335 + case MMC_SET_DRIVER_TYPE_B: 336 + return SDIO_DTSx_SET_TYPE_B; 337 + case MMC_SET_DRIVER_TYPE_C: 338 + return SDIO_DTSx_SET_TYPE_C; 339 + case MMC_SET_DRIVER_TYPE_D: 340 + return SDIO_DTSx_SET_TYPE_D; 341 + default: 342 + return SDIO_DTSx_SET_TYPE_B; 343 + } 344 + } 345 + 346 + static void sdio_select_driver_type(struct mmc_card *card) 347 + { 348 + int host_drv_type = SD_DRIVER_TYPE_B; 349 + int card_drv_type = SD_DRIVER_TYPE_B; 350 + int drive_strength; 351 + unsigned char card_strength; 352 + int err; 353 + 354 + /* 355 + * If the host doesn't support any of the Driver Types A,C or D, 356 + * or there is no board specific handler then default Driver 357 + * Type B is used. 358 + */ 359 + if (!(card->host->caps & 360 + (MMC_CAP_DRIVER_TYPE_A | 361 + MMC_CAP_DRIVER_TYPE_C | 362 + MMC_CAP_DRIVER_TYPE_D))) 363 + return; 364 + 365 + if (!card->host->ops->select_drive_strength) 366 + return; 367 + 368 + if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) 369 + host_drv_type |= SD_DRIVER_TYPE_A; 370 + 371 + if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) 372 + host_drv_type |= SD_DRIVER_TYPE_C; 373 + 374 + if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) 375 + host_drv_type |= SD_DRIVER_TYPE_D; 376 + 377 + if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) 378 + card_drv_type |= SD_DRIVER_TYPE_A; 379 + 380 + if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) 381 + card_drv_type |= SD_DRIVER_TYPE_C; 382 + 383 + if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D) 384 + card_drv_type |= SD_DRIVER_TYPE_D; 385 + 386 + /* 387 + * The drive strength that the hardware can support 388 + * depends on the board design. Pass the appropriate 389 + * information and let the hardware specific code 390 + * return what is possible given the options 391 + */ 392 + drive_strength = card->host->ops->select_drive_strength( 393 + card->sw_caps.uhs_max_dtr, 394 + host_drv_type, card_drv_type); 395 + 396 + /* if error just use default for drive strength B */ 397 + err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0, 398 + &card_strength); 399 + if (err) 400 + return; 401 + 402 + card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT); 403 + card_strength |= host_drive_to_sdio_drive(drive_strength); 404 + 405 + err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH, 406 + card_strength, NULL); 407 + 408 + /* if error default to drive strength B */ 409 + if (!err) 410 + mmc_set_driver_type(card->host, drive_strength); 411 + } 412 + 413 + 414 + static int sdio_set_bus_speed_mode(struct mmc_card *card) 415 + { 416 + unsigned int bus_speed, timing; 417 + int err; 418 + unsigned char speed; 419 + 420 + /* 421 + * If the host doesn't support any of the UHS-I modes, fallback on 422 + * default speed. 423 + */ 424 + if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 425 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) 426 + return 0; 427 + 428 + bus_speed = SDIO_SPEED_SDR12; 429 + timing = MMC_TIMING_UHS_SDR12; 430 + if ((card->host->caps & MMC_CAP_UHS_SDR104) && 431 + (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { 432 + bus_speed = SDIO_SPEED_SDR104; 433 + timing = MMC_TIMING_UHS_SDR104; 434 + card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; 435 + } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 436 + (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 437 + bus_speed = SDIO_SPEED_DDR50; 438 + timing = MMC_TIMING_UHS_DDR50; 439 + card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; 440 + } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 441 + MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 442 + SD_MODE_UHS_SDR50)) { 443 + bus_speed = SDIO_SPEED_SDR50; 444 + timing = MMC_TIMING_UHS_SDR50; 445 + card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; 446 + } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 447 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 448 + (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 449 + bus_speed = SDIO_SPEED_SDR25; 450 + timing = MMC_TIMING_UHS_SDR25; 451 + card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; 452 + } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 453 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 454 + MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 455 + SD_MODE_UHS_SDR12)) { 456 + bus_speed = SDIO_SPEED_SDR12; 457 + timing = MMC_TIMING_UHS_SDR12; 458 + card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 459 + } 460 + 461 + err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed); 462 + if (err) 463 + return err; 464 + 465 + speed &= ~SDIO_SPEED_BSS_MASK; 466 + speed |= bus_speed; 467 + err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL); 468 + if (err) 469 + return err; 470 + 471 + if (bus_speed) { 472 + mmc_set_timing(card->host, timing); 473 + mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); 474 + } 475 + 476 + return 0; 477 + } 478 + 479 + /* 480 + * UHS-I specific initialization procedure 481 + */ 482 + static int mmc_sdio_init_uhs_card(struct mmc_card *card) 483 + { 484 + int err; 485 + 486 + if (!card->scr.sda_spec3) 487 + return 0; 488 + 489 + /* 490 + * Switch to wider bus (if supported). 491 + */ 492 + if (card->host->caps & MMC_CAP_4_BIT_DATA) { 493 + err = sdio_enable_4bit_bus(card); 494 + if (err > 0) { 495 + mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 496 + err = 0; 497 + } 498 + } 499 + 500 + /* Set the driver strength for the card */ 501 + sdio_select_driver_type(card); 502 + 503 + /* Set bus speed mode of the card */ 504 + err = sdio_set_bus_speed_mode(card); 505 + if (err) 506 + goto out; 507 + 508 + /* Initialize and start re-tuning timer */ 509 + if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) 510 + err = card->host->ops->execute_tuning(card->host, 511 + MMC_SEND_TUNING_BLOCK); 512 + 513 + out: 514 + 515 + return err; 516 + } 517 + 380 518 /* 381 519 * Handle the detection and initialisation of a card. 382 520 * ··· 630 392 */ 631 393 if (host->ops->init_card) 632 394 host->ops->init_card(host, card); 395 + 396 + /* 397 + * If the host and card support UHS-I mode request the card 398 + * to switch to 1.8V signaling level. No 1.8v signalling if 399 + * UHS mode is not enabled to maintain compatibilty and some 400 + * systems that claim 1.8v signalling in fact do not support 401 + * it. 402 + */ 403 + if ((ocr & R4_18V_PRESENT) && 404 + (host->caps & 405 + (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 406 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 407 + MMC_CAP_UHS_DDR50))) { 408 + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 409 + true); 410 + if (err) { 411 + ocr &= ~R4_18V_PRESENT; 412 + host->ocr &= ~R4_18V_PRESENT; 413 + } 414 + err = 0; 415 + } else { 416 + ocr &= ~R4_18V_PRESENT; 417 + host->ocr &= ~R4_18V_PRESENT; 418 + } 633 419 634 420 /* 635 421 * For native busses: set card RCA and quit open drain mode. ··· 754 492 if (err) 755 493 goto remove; 756 494 757 - /* 758 - * Switch to high-speed (if supported). 759 - */ 760 - err = sdio_enable_hs(card); 761 - if (err > 0) 762 - mmc_sd_go_highspeed(card); 763 - else if (err) 764 - goto remove; 495 + /* Initialization sequence for UHS-I cards */ 496 + /* Only if card supports 1.8v and UHS signaling */ 497 + if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) { 498 + err = mmc_sdio_init_uhs_card(card); 499 + if (err) 500 + goto remove; 765 501 766 - /* 767 - * Change to the card's maximum speed. 768 - */ 769 - mmc_set_clock(host, mmc_sdio_get_max_clock(card)); 502 + /* Card is an ultra-high-speed card */ 503 + mmc_card_set_uhs(card); 504 + } else { 505 + /* 506 + * Switch to high-speed (if supported). 507 + */ 508 + err = sdio_enable_hs(card); 509 + if (err > 0) 510 + mmc_sd_go_highspeed(card); 511 + else if (err) 512 + goto remove; 770 513 771 - /* 772 - * Switch to wider bus (if supported). 773 - */ 774 - err = sdio_enable_4bit_bus(card); 775 - if (err > 0) 776 - mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 777 - else if (err) 778 - goto remove; 514 + /* 515 + * Change to the card's maximum speed. 516 + */ 517 + mmc_set_clock(host, mmc_sdio_get_max_clock(card)); 779 518 519 + /* 520 + * Switch to wider bus (if supported). 521 + */ 522 + err = sdio_enable_4bit_bus(card); 523 + if (err > 0) 524 + mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 525 + else if (err) 526 + goto remove; 527 + } 780 528 finish: 781 529 if (!oldcard) 782 530 host->card = card; ··· 822 550 } 823 551 824 552 /* 553 + * Card detection - card is alive. 554 + */ 555 + static int mmc_sdio_alive(struct mmc_host *host) 556 + { 557 + return mmc_select_card(host->card); 558 + } 559 + 560 + /* 825 561 * Card detection callback from host. 826 562 */ 827 563 static void mmc_sdio_detect(struct mmc_host *host) ··· 851 571 /* 852 572 * Just check if our card has been removed. 853 573 */ 854 - err = mmc_select_card(host->card); 574 + err = _mmc_detect_card_removed(host); 855 575 856 576 mmc_release_host(host); 857 577 ··· 1029 749 .suspend = mmc_sdio_suspend, 1030 750 .resume = mmc_sdio_resume, 1031 751 .power_restore = mmc_sdio_power_restore, 752 + .alive = mmc_sdio_alive, 1032 753 }; 1033 754 1034 755 ··· 1078 797 * Detect and init the card. 1079 798 */ 1080 799 err = mmc_sdio_init_card(host, host->ocr, NULL, 0); 1081 - if (err) 1082 - goto err; 800 + if (err) { 801 + if (err == -EAGAIN) { 802 + /* 803 + * Retry initialization with S18R set to 0. 804 + */ 805 + host->ocr &= ~R4_18V_PRESENT; 806 + err = mmc_sdio_init_card(host, host->ocr, NULL, 0); 807 + } 808 + if (err) 809 + goto err; 810 + } 1083 811 card = host->card; 1084 812 1085 813 /*
+6 -2
drivers/mmc/core/sdio_io.c
··· 196 196 else 197 197 mval = min(mval, func->max_blksize); 198 198 199 + if (mmc_card_broken_byte_mode_512(func->card)) 200 + return min(mval, 511u); 201 + 199 202 return min(mval, 512u); /* maximum size for byte mode */ 200 203 } 201 204 ··· 317 314 func->card->host->max_seg_size / func->cur_blksize); 318 315 max_blocks = min(max_blocks, 511u); 319 316 320 - while (remainder > func->cur_blksize) { 317 + while (remainder >= func->cur_blksize) { 321 318 unsigned blocks; 322 319 323 320 blocks = remainder / func->cur_blksize; ··· 342 339 while (remainder > 0) { 343 340 size = min(remainder, sdio_max_byte_size(func)); 344 341 342 + /* Indicate byte mode by setting "blocks" = 0 */ 345 343 ret = mmc_io_rw_extended(func->card, write, func->num, addr, 346 - incr_addr, buf, 1, size); 344 + incr_addr, buf, 0, size); 347 345 if (ret) 348 346 return ret; 349 347
+5 -9
drivers/mmc/core/sdio_ops.c
··· 128 128 129 129 BUG_ON(!card); 130 130 BUG_ON(fn > 7); 131 - BUG_ON(blocks == 1 && blksz > 512); 132 - WARN_ON(blocks == 0); 133 131 WARN_ON(blksz == 0); 134 132 135 133 /* sanity check */ ··· 142 144 cmd.arg |= fn << 28; 143 145 cmd.arg |= incr_addr ? 0x04000000 : 0x00000000; 144 146 cmd.arg |= addr << 9; 145 - if (blocks == 1 && blksz < 512) 146 - cmd.arg |= blksz; /* byte mode */ 147 - else if (blocks == 1 && blksz == 512 && 148 - !(mmc_card_broken_byte_mode_512(card))) 149 - cmd.arg |= 0; /* byte mode, 0==512 */ 147 + if (blocks == 0) 148 + cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */ 150 149 else 151 150 cmd.arg |= 0x08000000 | blocks; /* block mode */ 152 151 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; 153 152 154 153 data.blksz = blksz; 155 - data.blocks = blocks; 154 + /* Code in host drivers/fwk assumes that "blocks" always is >=1 */ 155 + data.blocks = blocks ? blocks : 1; 156 156 data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 157 157 data.sg = &sg; 158 158 data.sg_len = 1; 159 159 160 - sg_init_one(&sg, buf, blksz * blocks); 160 + sg_init_one(&sg, buf, data.blksz * data.blocks); 161 161 162 162 mmc_set_data_timeout(&data, card); 163 163
+1
drivers/mmc/host/Makefile
··· 9 9 obj-$(CONFIG_MMC_MXS) += mxs-mmc.o 10 10 obj-$(CONFIG_MMC_SDHCI) += sdhci.o 11 11 obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 12 + obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o 12 13 obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o 13 14 obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o 14 15 obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
+4 -4
drivers/mmc/host/at91_mci.c
··· 236 236 237 237 sg = &data->sg[i]; 238 238 239 - sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 239 + sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset; 240 240 amount = min(size, sg->length); 241 241 size -= amount; 242 242 ··· 252 252 dmabuf = (unsigned *)tmpv; 253 253 } 254 254 255 - kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 255 + kunmap_atomic(sgbuffer); 256 256 257 257 if (size == 0) 258 258 break; ··· 302 302 303 303 sg = &data->sg[i]; 304 304 305 - sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 305 + sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset; 306 306 amount = min(size, sg->length); 307 307 size -= amount; 308 308 ··· 318 318 } 319 319 320 320 flush_kernel_dcache_page(sg_page(sg)); 321 - kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 321 + kunmap_atomic(sgbuffer); 322 322 data->bytes_xfered += amount; 323 323 if (size == 0) 324 324 break;
+1 -11
drivers/mmc/host/bfin_sdh.c
··· 627 627 }, 628 628 }; 629 629 630 - static int __init sdh_init(void) 631 - { 632 - return platform_driver_register(&sdh_driver); 633 - } 634 - module_init(sdh_init); 635 - 636 - static void __exit sdh_exit(void) 637 - { 638 - platform_driver_unregister(&sdh_driver); 639 - } 640 - module_exit(sdh_exit); 630 + module_platform_driver(sdh_driver); 641 631 642 632 MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver"); 643 633 MODULE_AUTHOR("Cliff Cai, Roy Huang");
+1 -12
drivers/mmc/host/cb710-mmc.c
··· 780 780 #endif 781 781 }; 782 782 783 - static int __init cb710_mmc_init_module(void) 784 - { 785 - return platform_driver_register(&cb710_mmc_driver); 786 - } 787 - 788 - static void __exit cb710_mmc_cleanup_module(void) 789 - { 790 - platform_driver_unregister(&cb710_mmc_driver); 791 - } 792 - 793 - module_init(cb710_mmc_init_module); 794 - module_exit(cb710_mmc_cleanup_module); 783 + module_platform_driver(cb710_mmc_driver); 795 784 796 785 MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>"); 797 786 MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part");
+49 -22
drivers/mmc/host/dw_mmc.c
··· 588 588 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 589 589 } 590 590 591 - static void dw_mci_start_request(struct dw_mci *host, 592 - struct dw_mci_slot *slot) 591 + static void __dw_mci_start_request(struct dw_mci *host, 592 + struct dw_mci_slot *slot, 593 + struct mmc_command *cmd) 593 594 { 594 595 struct mmc_request *mrq; 595 - struct mmc_command *cmd; 596 596 struct mmc_data *data; 597 597 u32 cmdflags; 598 598 ··· 610 610 host->completed_events = 0; 611 611 host->data_status = 0; 612 612 613 - data = mrq->data; 613 + data = cmd->data; 614 614 if (data) { 615 615 dw_mci_set_timeout(host); 616 616 mci_writel(host, BYTCNT, data->blksz*data->blocks); 617 617 mci_writel(host, BLKSIZ, data->blksz); 618 618 } 619 619 620 - cmd = mrq->cmd; 621 620 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 622 621 623 622 /* this is the first command, send the initialization clock */ ··· 632 633 633 634 if (mrq->stop) 634 635 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 636 + } 637 + 638 + static void dw_mci_start_request(struct dw_mci *host, 639 + struct dw_mci_slot *slot) 640 + { 641 + struct mmc_request *mrq = slot->mrq; 642 + struct mmc_command *cmd; 643 + 644 + cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 645 + __dw_mci_start_request(host, slot, cmd); 635 646 } 636 647 637 648 /* must be called with host->lock held */ ··· 707 698 break; 708 699 } 709 700 701 + regs = mci_readl(slot->host, UHS_REG); 702 + 710 703 /* DDR mode set */ 711 - if (ios->timing == MMC_TIMING_UHS_DDR50) { 712 - regs = mci_readl(slot->host, UHS_REG); 704 + if (ios->timing == MMC_TIMING_UHS_DDR50) 713 705 regs |= (0x1 << slot->id) << 16; 714 - mci_writel(slot->host, UHS_REG, regs); 715 - } 706 + else 707 + regs &= ~(0x1 << slot->id) << 16; 708 + 709 + mci_writel(slot->host, UHS_REG, regs); 716 710 717 711 if (ios->clock) { 718 712 /* ··· 901 889 cmd = host->cmd; 902 890 host->cmd = NULL; 903 891 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 904 - dw_mci_command_complete(host, host->mrq->cmd); 892 + dw_mci_command_complete(host, cmd); 893 + if (cmd == host->mrq->sbc && !cmd->error) { 894 + prev_state = state = STATE_SENDING_CMD; 895 + __dw_mci_start_request(host, host->cur_slot, 896 + host->mrq->cmd); 897 + goto unlock; 898 + } 899 + 905 900 if (!host->mrq->data || cmd->error) { 906 901 dw_mci_request_end(host, host->mrq); 907 902 goto unlock; ··· 982 963 } 983 964 984 965 if (!data->stop) { 966 + dw_mci_request_end(host, host->mrq); 967 + goto unlock; 968 + } 969 + 970 + if (host->mrq->sbc && !data->error) { 971 + data->stop->error = 0; 985 972 dw_mci_request_end(host, host->mrq); 986 973 goto unlock; 987 974 } ··· 1703 1678 1704 1679 if (host->pdata->caps) 1705 1680 mmc->caps = host->pdata->caps; 1706 - else 1707 - mmc->caps = 0; 1681 + 1682 + if (host->pdata->caps2) 1683 + mmc->caps2 = host->pdata->caps2; 1708 1684 1709 1685 if (host->pdata->get_bus_wd) 1710 1686 if (host->pdata->get_bus_wd(slot->id) >= 4) ··· 1949 1923 * should put it in the platform data. 1950 1924 */ 1951 1925 fifo_size = mci_readl(host, FIFOTH); 1952 - fifo_size = 1 + ((fifo_size >> 16) & 0x7ff); 1926 + fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 1953 1927 } else { 1954 1928 fifo_size = host->pdata->fifo_depth; 1955 1929 } ··· 2088 2062 return 0; 2089 2063 } 2090 2064 2091 - #ifdef CONFIG_PM 2065 + #ifdef CONFIG_PM_SLEEP 2092 2066 /* 2093 2067 * TODO: we should probably disable the clock to the card in the suspend path. 2094 2068 */ 2095 - static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg) 2069 + static int dw_mci_suspend(struct device *dev) 2096 2070 { 2097 2071 int i, ret; 2098 - struct dw_mci *host = platform_get_drvdata(pdev); 2072 + struct dw_mci *host = dev_get_drvdata(dev); 2099 2073 2100 2074 for (i = 0; i < host->num_slots; i++) { 2101 2075 struct dw_mci_slot *slot = host->slot[i]; ··· 2118 2092 return 0; 2119 2093 } 2120 2094 2121 - static int dw_mci_resume(struct platform_device *pdev) 2095 + static int dw_mci_resume(struct device *dev) 2122 2096 { 2123 2097 int i, ret; 2124 - struct dw_mci *host = platform_get_drvdata(pdev); 2098 + struct dw_mci *host = dev_get_drvdata(dev); 2125 2099 2126 2100 if (host->vmmc) 2127 2101 regulator_enable(host->vmmc); ··· 2129 2103 if (host->dma_ops->init) 2130 2104 host->dma_ops->init(host); 2131 2105 2132 - if (!mci_wait_reset(&pdev->dev, host)) { 2106 + if (!mci_wait_reset(dev, host)) { 2133 2107 ret = -ENODEV; 2134 2108 return ret; 2135 2109 } ··· 2157 2131 #else 2158 2132 #define dw_mci_suspend NULL 2159 2133 #define dw_mci_resume NULL 2160 - #endif /* CONFIG_PM */ 2134 + #endif /* CONFIG_PM_SLEEP */ 2135 + 2136 + static SIMPLE_DEV_PM_OPS(dw_mci_pmops, dw_mci_suspend, dw_mci_resume); 2161 2137 2162 2138 static struct platform_driver dw_mci_driver = { 2163 2139 .remove = __exit_p(dw_mci_remove), 2164 - .suspend = dw_mci_suspend, 2165 - .resume = dw_mci_resume, 2166 2140 .driver = { 2167 2141 .name = "dw_mmc", 2142 + .pm = &dw_mci_pmops, 2168 2143 }, 2169 2144 }; 2170 2145
+1 -1
drivers/mmc/host/dw_mmc.h
··· 126 126 #define SDMMC_CMD_RESP_EXP BIT(6) 127 127 #define SDMMC_CMD_INDX(n) ((n) & 0x1F) 128 128 /* Status register defines */ 129 - #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF) 129 + #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF) 130 130 /* Internal DMAC interrupt defines */ 131 131 #define SDMMC_IDMAC_INT_AI BIT(9) 132 132 #define SDMMC_IDMAC_INT_NI BIT(8)
+1 -11
drivers/mmc/host/jz4740_mmc.c
··· 1012 1012 }, 1013 1013 }; 1014 1014 1015 - static int __init jz4740_mmc_init(void) 1016 - { 1017 - return platform_driver_register(&jz4740_mmc_driver); 1018 - } 1019 - module_init(jz4740_mmc_init); 1020 - 1021 - static void __exit jz4740_mmc_exit(void) 1022 - { 1023 - platform_driver_unregister(&jz4740_mmc_driver); 1024 - } 1025 - module_exit(jz4740_mmc_exit); 1015 + module_platform_driver(jz4740_mmc_driver); 1026 1016 1027 1017 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); 1028 1018 MODULE_LICENSE("GPL");
-1
drivers/mmc/host/mmc_spi.c
··· 1525 1525 static struct spi_driver mmc_spi_driver = { 1526 1526 .driver = { 1527 1527 .name = "mmc_spi", 1528 - .bus = &spi_bus_type, 1529 1528 .owner = THIS_MODULE, 1530 1529 .of_match_table = mmc_spi_of_match_table, 1531 1530 },
+1
drivers/mmc/host/mmci.c
··· 1245 1245 if (host->vcc == NULL) 1246 1246 mmc->ocr_avail = plat->ocr_mask; 1247 1247 mmc->caps = plat->capabilities; 1248 + mmc->caps2 = plat->capabilities2; 1248 1249 1249 1250 /* 1250 1251 * We can do SGIO
+4 -15
drivers/mmc/host/msm_sdcc.c
··· 689 689 690 690 /* Map the current scatter buffer */ 691 691 local_irq_save(flags); 692 - buffer = kmap_atomic(sg_page(host->pio.sg), 693 - KM_BIO_SRC_IRQ) + host->pio.sg->offset; 692 + buffer = kmap_atomic(sg_page(host->pio.sg)) 693 + + host->pio.sg->offset; 694 694 buffer += host->pio.sg_off; 695 695 remain = host->pio.sg->length - host->pio.sg_off; 696 696 len = 0; ··· 700 700 len = msmsdcc_pio_write(host, buffer, remain, status); 701 701 702 702 /* Unmap the buffer */ 703 - kunmap_atomic(buffer, KM_BIO_SRC_IRQ); 703 + kunmap_atomic(buffer); 704 704 local_irq_restore(flags); 705 705 706 706 host->pio.sg_off += len; ··· 1480 1480 }, 1481 1481 }; 1482 1482 1483 - static int __init msmsdcc_init(void) 1484 - { 1485 - return platform_driver_register(&msmsdcc_driver); 1486 - } 1487 - 1488 - static void __exit msmsdcc_exit(void) 1489 - { 1490 - platform_driver_unregister(&msmsdcc_driver); 1491 - } 1492 - 1493 - module_init(msmsdcc_init); 1494 - module_exit(msmsdcc_exit); 1483 + module_platform_driver(msmsdcc_driver); 1495 1484 1496 1485 MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver"); 1497 1486 MODULE_LICENSE("GPL");
+1 -12
drivers/mmc/host/mxcmmc.c
··· 1047 1047 } 1048 1048 }; 1049 1049 1050 - static int __init mxcmci_init(void) 1051 - { 1052 - return platform_driver_register(&mxcmci_driver); 1053 - } 1054 - 1055 - static void __exit mxcmci_exit(void) 1056 - { 1057 - platform_driver_unregister(&mxcmci_driver); 1058 - } 1059 - 1060 - module_init(mxcmci_init); 1061 - module_exit(mxcmci_exit); 1050 + module_platform_driver(mxcmci_driver); 1062 1051 1063 1052 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); 1064 1053 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+1 -12
drivers/mmc/host/mxs-mmc.c
··· 855 855 }, 856 856 }; 857 857 858 - static int __init mxs_mmc_init(void) 859 - { 860 - return platform_driver_register(&mxs_mmc_driver); 861 - } 862 - 863 - static void __exit mxs_mmc_exit(void) 864 - { 865 - platform_driver_unregister(&mxs_mmc_driver); 866 - } 867 - 868 - module_init(mxs_mmc_init); 869 - module_exit(mxs_mmc_exit); 858 + module_platform_driver(mxs_mmc_driver); 870 859 871 860 MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); 872 861 MODULE_AUTHOR("Freescale Semiconductor");
+12 -31
drivers/mmc/host/omap_hsmmc.c
··· 24 24 #include <linux/delay.h> 25 25 #include <linux/dma-mapping.h> 26 26 #include <linux/platform_device.h> 27 - #include <linux/workqueue.h> 28 27 #include <linux/timer.h> 29 28 #include <linux/clk.h> 30 29 #include <linux/mmc/host.h> ··· 119 120 120 121 #define MMC_AUTOSUSPEND_DELAY 100 121 122 #define MMC_TIMEOUT_MS 20 122 - #define OMAP_MMC_MASTER_CLOCK 96000000 123 123 #define OMAP_MMC_MIN_CLOCK 400000 124 124 #define OMAP_MMC_MAX_CLOCK 52000000 125 125 #define DRIVER_NAME "omap_hsmmc" ··· 161 163 */ 162 164 struct regulator *vcc; 163 165 struct regulator *vcc_aux; 164 - struct work_struct mmc_carddetect_work; 165 166 void __iomem *base; 166 167 resource_size_t mapbase; 167 168 spinlock_t irq_lock; /* Prevent races with irq handler */ ··· 595 598 } 596 599 597 600 /* Calculate divisor for the given clock frequency */ 598 - static u16 calc_divisor(struct mmc_ios *ios) 601 + static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) 599 602 { 600 603 u16 dsor = 0; 601 604 602 605 if (ios->clock) { 603 - dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock); 606 + dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); 604 607 if (dsor > 250) 605 608 dsor = 250; 606 609 } ··· 620 623 621 624 regval = OMAP_HSMMC_READ(host->base, SYSCTL); 622 625 regval = regval & ~(CLKD_MASK | DTO_MASK); 623 - regval = regval | (calc_divisor(ios) << 6) | (DTO << 16); 626 + regval = regval | (calc_divisor(host, ios) << 6) | (DTO << 16); 624 627 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); 625 628 OMAP_HSMMC_WRITE(host->base, SYSCTL, 626 629 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); ··· 1277 1280 } 1278 1281 1279 1282 /* 1280 - * Work Item to notify the core about card insertion/removal 1283 + * irq handler to notify the core about card insertion/removal 1281 1284 */ 1282 - static void omap_hsmmc_detect(struct work_struct *work) 1285 + static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) 1283 1286 { 1284 - struct omap_hsmmc_host *host = 1285 - container_of(work, struct omap_hsmmc_host, mmc_carddetect_work); 1287 + struct omap_hsmmc_host *host = dev_id; 1286 1288 struct omap_mmc_slot_data *slot = &mmc_slot(host); 1287 1289 int carddetect; 1288 1290 1289 1291 if (host->suspended) 1290 - return; 1292 + return IRQ_HANDLED; 1291 1293 1292 1294 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); 1293 1295 ··· 1301 1305 mmc_detect_change(host->mmc, (HZ * 200) / 1000); 1302 1306 else 1303 1307 mmc_detect_change(host->mmc, (HZ * 50) / 1000); 1304 - } 1305 - 1306 - /* 1307 - * ISR for handling card insertion and removal 1308 - */ 1309 - static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id) 1310 - { 1311 - struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id; 1312 - 1313 - if (host->suspended) 1314 - return IRQ_HANDLED; 1315 - schedule_work(&host->mmc_carddetect_work); 1316 - 1317 1308 return IRQ_HANDLED; 1318 1309 } 1319 1310 ··· 1902 1919 host->next_data.cookie = 1; 1903 1920 1904 1921 platform_set_drvdata(pdev, host); 1905 - INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); 1906 1922 1907 1923 mmc->ops = &omap_hsmmc_ops; 1908 1924 ··· 2031 2049 2032 2050 /* Request IRQ for card detect */ 2033 2051 if ((mmc_slot(host).card_detect_irq)) { 2034 - ret = request_irq(mmc_slot(host).card_detect_irq, 2035 - omap_hsmmc_cd_handler, 2036 - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 2037 - mmc_hostname(mmc), host); 2052 + ret = request_threaded_irq(mmc_slot(host).card_detect_irq, 2053 + NULL, 2054 + omap_hsmmc_detect, 2055 + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 2056 + mmc_hostname(mmc), host); 2038 2057 if (ret) { 2039 2058 dev_dbg(mmc_dev(host->mmc), 2040 2059 "Unable to grab MMC CD IRQ\n"); ··· 2114 2131 free_irq(host->irq, host); 2115 2132 if (mmc_slot(host).card_detect_irq) 2116 2133 free_irq(mmc_slot(host).card_detect_irq, host); 2117 - flush_work_sync(&host->mmc_carddetect_work); 2118 2134 2119 2135 pm_runtime_put_sync(host->dev); 2120 2136 pm_runtime_disable(host->dev); ··· 2160 2178 return ret; 2161 2179 } 2162 2180 } 2163 - cancel_work_sync(&host->mmc_carddetect_work); 2164 2181 ret = mmc_suspend_host(host->mmc); 2165 2182 2166 2183 if (ret) {
+1 -12
drivers/mmc/host/pxamci.c
··· 872 872 }, 873 873 }; 874 874 875 - static int __init pxamci_init(void) 876 - { 877 - return platform_driver_register(&pxamci_driver); 878 - } 879 - 880 - static void __exit pxamci_exit(void) 881 - { 882 - platform_driver_unregister(&pxamci_driver); 883 - } 884 - 885 - module_init(pxamci_init); 886 - module_exit(pxamci_exit); 875 + module_platform_driver(pxamci_driver); 887 876 888 877 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); 889 878 MODULE_LICENSE("GPL");
+1 -12
drivers/mmc/host/s3cmci.c
··· 1914 1914 .shutdown = s3cmci_shutdown, 1915 1915 }; 1916 1916 1917 - static int __init s3cmci_init(void) 1918 - { 1919 - return platform_driver_register(&s3cmci_driver); 1920 - } 1921 - 1922 - static void __exit s3cmci_exit(void) 1923 - { 1924 - platform_driver_unregister(&s3cmci_driver); 1925 - } 1926 - 1927 - module_init(s3cmci_init); 1928 - module_exit(s3cmci_exit); 1917 + module_platform_driver(s3cmci_driver); 1929 1918 1930 1919 MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); 1931 1920 MODULE_LICENSE("GPL v2");
+1 -11
drivers/mmc/host/sdhci-cns3xxx.c
··· 115 115 .remove = __devexit_p(sdhci_cns3xxx_remove), 116 116 }; 117 117 118 - static int __init sdhci_cns3xxx_init(void) 119 - { 120 - return platform_driver_register(&sdhci_cns3xxx_driver); 121 - } 122 - module_init(sdhci_cns3xxx_init); 123 - 124 - static void __exit sdhci_cns3xxx_exit(void) 125 - { 126 - platform_driver_unregister(&sdhci_cns3xxx_driver); 127 - } 128 - module_exit(sdhci_cns3xxx_exit); 118 + module_platform_driver(sdhci_cns3xxx_driver); 129 119 130 120 MODULE_DESCRIPTION("SDHCI driver for CNS3xxx"); 131 121 MODULE_AUTHOR("Scott Shu, "
+1 -11
drivers/mmc/host/sdhci-dove.c
··· 88 88 .remove = __devexit_p(sdhci_dove_remove), 89 89 }; 90 90 91 - static int __init sdhci_dove_init(void) 92 - { 93 - return platform_driver_register(&sdhci_dove_driver); 94 - } 95 - module_init(sdhci_dove_init); 96 - 97 - static void __exit sdhci_dove_exit(void) 98 - { 99 - platform_driver_unregister(&sdhci_dove_driver); 100 - } 101 - module_exit(sdhci_dove_exit); 91 + module_platform_driver(sdhci_dove_driver); 102 92 103 93 MODULE_DESCRIPTION("SDHCI driver for Dove"); 104 94 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
+1 -11
drivers/mmc/host/sdhci-esdhc-imx.c
··· 606 606 .remove = __devexit_p(sdhci_esdhc_imx_remove), 607 607 }; 608 608 609 - static int __init sdhci_esdhc_imx_init(void) 610 - { 611 - return platform_driver_register(&sdhci_esdhc_imx_driver); 612 - } 613 - module_init(sdhci_esdhc_imx_init); 614 - 615 - static void __exit sdhci_esdhc_imx_exit(void) 616 - { 617 - platform_driver_unregister(&sdhci_esdhc_imx_driver); 618 - } 619 - module_exit(sdhci_esdhc_imx_exit); 609 + module_platform_driver(sdhci_esdhc_imx_driver); 620 610 621 611 MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); 622 612 MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+1 -1
drivers/mmc/host/sdhci-esdhc.h
··· 73 73 | (div << ESDHC_DIVIDER_SHIFT) 74 74 | (pre_div << ESDHC_PREDIV_SHIFT)); 75 75 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 76 - mdelay(100); 76 + mdelay(1); 77 77 out: 78 78 host->clock = clock; 79 79 }
+1 -11
drivers/mmc/host/sdhci-of-esdhc.c
··· 131 131 .remove = __devexit_p(sdhci_esdhc_remove), 132 132 }; 133 133 134 - static int __init sdhci_esdhc_init(void) 135 - { 136 - return platform_driver_register(&sdhci_esdhc_driver); 137 - } 138 - module_init(sdhci_esdhc_init); 139 - 140 - static void __exit sdhci_esdhc_exit(void) 141 - { 142 - platform_driver_unregister(&sdhci_esdhc_driver); 143 - } 144 - module_exit(sdhci_esdhc_exit); 134 + module_platform_driver(sdhci_esdhc_driver); 145 135 146 136 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC"); 147 137 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
+1 -11
drivers/mmc/host/sdhci-of-hlwd.c
··· 93 93 .remove = __devexit_p(sdhci_hlwd_remove), 94 94 }; 95 95 96 - static int __init sdhci_hlwd_init(void) 97 - { 98 - return platform_driver_register(&sdhci_hlwd_driver); 99 - } 100 - module_init(sdhci_hlwd_init); 101 - 102 - static void __exit sdhci_hlwd_exit(void) 103 - { 104 - platform_driver_unregister(&sdhci_hlwd_driver); 105 - } 106 - module_exit(sdhci_hlwd_exit); 96 + module_platform_driver(sdhci_hlwd_driver); 107 97 108 98 MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); 109 99 MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz");
+5
drivers/mmc/host/sdhci-pci-data.c
··· 1 + #include <linux/module.h> 2 + #include <linux/mmc/sdhci-pci-data.h> 3 + 4 + struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); 5 + EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
+91 -90
drivers/mmc/host/sdhci-pci.c
··· 23 23 #include <linux/scatterlist.h> 24 24 #include <linux/io.h> 25 25 #include <linux/gpio.h> 26 - #include <linux/sfi.h> 27 26 #include <linux/pm_runtime.h> 27 + #include <linux/mmc/sdhci-pci-data.h> 28 28 29 29 #include "sdhci.h" 30 30 ··· 61 61 struct sdhci_pci_slot { 62 62 struct sdhci_pci_chip *chip; 63 63 struct sdhci_host *host; 64 + struct sdhci_pci_data *data; 64 65 65 66 int pci_bar; 66 67 int rst_n_gpio; ··· 172 171 return 0; 173 172 } 174 173 175 - /* Medfield eMMC hardware reset GPIOs */ 176 - static int mfd_emmc0_rst_gpio = -EINVAL; 177 - static int mfd_emmc1_rst_gpio = -EINVAL; 178 - 179 - static int mfd_emmc_gpio_parse(struct sfi_table_header *table) 180 - { 181 - struct sfi_table_simple *sb = (struct sfi_table_simple *)table; 182 - struct sfi_gpio_table_entry *entry; 183 - int i, num; 184 - 185 - num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry); 186 - entry = (struct sfi_gpio_table_entry *)sb->pentry; 187 - 188 - for (i = 0; i < num; i++, entry++) { 189 - if (!strncmp(entry->pin_name, "emmc0_rst", SFI_NAME_LEN)) 190 - mfd_emmc0_rst_gpio = entry->pin_no; 191 - else if (!strncmp(entry->pin_name, "emmc1_rst", SFI_NAME_LEN)) 192 - mfd_emmc1_rst_gpio = entry->pin_no; 193 - } 194 - 195 - return 0; 196 - } 197 - 198 174 #ifdef CONFIG_PM_RUNTIME 199 175 200 - static irqreturn_t mfd_sd_cd(int irq, void *dev_id) 176 + static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) 201 177 { 202 178 struct sdhci_pci_slot *slot = dev_id; 203 179 struct sdhci_host *host = slot->host; ··· 183 205 return IRQ_HANDLED; 184 206 } 185 207 186 - #define MFLD_SD_CD_PIN 69 187 - 188 - static int mfd_sd_probe_slot(struct sdhci_pci_slot *slot) 208 + static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) 189 209 { 190 - int err, irq, gpio = MFLD_SD_CD_PIN; 210 + int err, irq, gpio = slot->cd_gpio; 191 211 192 212 slot->cd_gpio = -EINVAL; 193 213 slot->cd_irq = -EINVAL; 214 + 215 + if (!gpio_is_valid(gpio)) 216 + return; 194 217 195 218 err = gpio_request(gpio, "sd_cd"); 196 219 if (err < 0) ··· 205 226 if (irq < 0) 206 227 goto out_free; 207 228 208 - err = request_irq(irq, mfd_sd_cd, IRQF_TRIGGER_RISING | 229 + err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING | 209 230 IRQF_TRIGGER_FALLING, "sd_cd", slot); 210 231 if (err) 211 232 goto out_free; 212 233 213 234 slot->cd_gpio = gpio; 214 235 slot->cd_irq = irq; 215 - slot->host->quirks2 |= SDHCI_QUIRK2_OWN_CARD_DETECTION; 216 236 217 - return 0; 237 + return; 218 238 219 239 out_free: 220 240 gpio_free(gpio); 221 241 out: 222 242 dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); 223 - return 0; 224 243 } 225 244 226 - static void mfd_sd_remove_slot(struct sdhci_pci_slot *slot, int dead) 245 + static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) 227 246 { 228 247 if (slot->cd_irq >= 0) 229 248 free_irq(slot->cd_irq, slot); 230 - gpio_free(slot->cd_gpio); 249 + if (gpio_is_valid(slot->cd_gpio)) 250 + gpio_free(slot->cd_gpio); 231 251 } 232 252 233 253 #else 234 254 235 - #define mfd_sd_probe_slot NULL 236 - #define mfd_sd_remove_slot NULL 255 + static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) 256 + { 257 + } 258 + 259 + static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) 260 + { 261 + } 237 262 238 263 #endif 239 264 240 265 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) 241 266 { 242 - const char *name = NULL; 243 - int gpio = -EINVAL; 244 - 245 - sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, mfd_emmc_gpio_parse); 246 - 247 - switch (slot->chip->pdev->device) { 248 - case PCI_DEVICE_ID_INTEL_MFD_EMMC0: 249 - gpio = mfd_emmc0_rst_gpio; 250 - name = "eMMC0_reset"; 251 - break; 252 - case PCI_DEVICE_ID_INTEL_MFD_EMMC1: 253 - gpio = mfd_emmc1_rst_gpio; 254 - name = "eMMC1_reset"; 255 - break; 256 - } 257 - 258 - if (!gpio_request(gpio, name)) { 259 - gpio_direction_output(gpio, 1); 260 - slot->rst_n_gpio = gpio; 261 - slot->host->mmc->caps |= MMC_CAP_HW_RESET; 262 - } 263 - 264 267 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; 265 - 266 268 slot->host->mmc->caps2 = MMC_CAP2_BOOTPART_NOACC; 267 - 268 269 return 0; 269 270 } 270 271 271 - static void mfd_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead) 272 + static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) 272 273 { 273 - gpio_free(slot->rst_n_gpio); 274 + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD; 275 + return 0; 274 276 } 275 277 276 278 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { ··· 267 307 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { 268 308 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 269 309 .allow_runtime_pm = true, 270 - .probe_slot = mfd_sd_probe_slot, 271 - .remove_slot = mfd_sd_remove_slot, 272 310 }; 273 311 274 312 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { 275 313 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 276 314 .allow_runtime_pm = true, 315 + .probe_slot = mfd_sdio_probe_slot, 277 316 }; 278 317 279 318 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { 280 319 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 281 320 .allow_runtime_pm = true, 282 321 .probe_slot = mfd_emmc_probe_slot, 283 - .remove_slot = mfd_emmc_remove_slot, 284 322 }; 285 323 286 324 /* O2Micro extra registers */ ··· 970 1012 971 1013 ret = sdhci_suspend_host(slot->host); 972 1014 973 - if (ret) { 974 - for (i--; i >= 0; i--) 975 - sdhci_resume_host(chip->slots[i]->host); 976 - return ret; 977 - } 1015 + if (ret) 1016 + goto err_pci_suspend; 978 1017 979 1018 slot_pm_flags = slot->host->mmc->pm_flags; 980 1019 if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) ··· 982 1027 983 1028 if (chip->fixes && chip->fixes->suspend) { 984 1029 ret = chip->fixes->suspend(chip); 985 - if (ret) { 986 - for (i = chip->num_slots - 1; i >= 0; i--) 987 - sdhci_resume_host(chip->slots[i]->host); 988 - return ret; 989 - } 1030 + if (ret) 1031 + goto err_pci_suspend; 990 1032 } 991 1033 992 1034 pci_save_state(pdev); ··· 1000 1048 } 1001 1049 1002 1050 return 0; 1051 + 1052 + err_pci_suspend: 1053 + while (--i >= 0) 1054 + sdhci_resume_host(chip->slots[i]->host); 1055 + return ret; 1003 1056 } 1004 1057 1005 1058 static int sdhci_pci_resume(struct device *dev) ··· 1070 1113 1071 1114 ret = sdhci_runtime_suspend_host(slot->host); 1072 1115 1073 - if (ret) { 1074 - for (i--; i >= 0; i--) 1075 - sdhci_runtime_resume_host(chip->slots[i]->host); 1076 - return ret; 1077 - } 1116 + if (ret) 1117 + goto err_pci_runtime_suspend; 1078 1118 } 1079 1119 1080 1120 if (chip->fixes && chip->fixes->suspend) { 1081 1121 ret = chip->fixes->suspend(chip); 1082 - if (ret) { 1083 - for (i = chip->num_slots - 1; i >= 0; i--) 1084 - sdhci_runtime_resume_host(chip->slots[i]->host); 1085 - return ret; 1086 - } 1122 + if (ret) 1123 + goto err_pci_runtime_suspend; 1087 1124 } 1088 1125 1089 1126 return 0; 1127 + 1128 + err_pci_runtime_suspend: 1129 + while (--i >= 0) 1130 + sdhci_runtime_resume_host(chip->slots[i]->host); 1131 + return ret; 1090 1132 } 1091 1133 1092 1134 static int sdhci_pci_runtime_resume(struct device *dev) ··· 1146 1190 \*****************************************************************************/ 1147 1191 1148 1192 static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( 1149 - struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar) 1193 + struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, 1194 + int slotno) 1150 1195 { 1151 1196 struct sdhci_pci_slot *slot; 1152 1197 struct sdhci_host *host; 1153 - int ret; 1198 + int ret, bar = first_bar + slotno; 1154 1199 1155 1200 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 1156 1201 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); ··· 1185 1228 slot->host = host; 1186 1229 slot->pci_bar = bar; 1187 1230 slot->rst_n_gpio = -EINVAL; 1231 + slot->cd_gpio = -EINVAL; 1232 + 1233 + /* Retrieve platform data if there is any */ 1234 + if (*sdhci_pci_get_data) 1235 + slot->data = sdhci_pci_get_data(pdev, slotno); 1236 + 1237 + if (slot->data) { 1238 + if (slot->data->setup) { 1239 + ret = slot->data->setup(slot->data); 1240 + if (ret) { 1241 + dev_err(&pdev->dev, "platform setup failed\n"); 1242 + goto free; 1243 + } 1244 + } 1245 + slot->rst_n_gpio = slot->data->rst_n_gpio; 1246 + slot->cd_gpio = slot->data->cd_gpio; 1247 + } 1188 1248 1189 1249 host->hw_name = "PCI"; 1190 1250 host->ops = &sdhci_pci_ops; ··· 1212 1238 ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); 1213 1239 if (ret) { 1214 1240 dev_err(&pdev->dev, "cannot request region\n"); 1215 - goto free; 1241 + goto cleanup; 1216 1242 } 1217 1243 1218 1244 host->ioaddr = pci_ioremap_bar(pdev, bar); ··· 1228 1254 goto unmap; 1229 1255 } 1230 1256 1257 + if (gpio_is_valid(slot->rst_n_gpio)) { 1258 + if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) { 1259 + gpio_direction_output(slot->rst_n_gpio, 1); 1260 + slot->host->mmc->caps |= MMC_CAP_HW_RESET; 1261 + } else { 1262 + dev_warn(&pdev->dev, "failed to request rst_n_gpio\n"); 1263 + slot->rst_n_gpio = -EINVAL; 1264 + } 1265 + } 1266 + 1231 1267 host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; 1232 1268 1233 1269 ret = sdhci_add_host(host); 1234 1270 if (ret) 1235 1271 goto remove; 1236 1272 1273 + sdhci_pci_add_own_cd(slot); 1274 + 1237 1275 return slot; 1238 1276 1239 1277 remove: 1278 + if (gpio_is_valid(slot->rst_n_gpio)) 1279 + gpio_free(slot->rst_n_gpio); 1280 + 1240 1281 if (chip->fixes && chip->fixes->remove_slot) 1241 1282 chip->fixes->remove_slot(slot, 0); 1242 1283 ··· 1260 1271 1261 1272 release: 1262 1273 pci_release_region(pdev, bar); 1274 + 1275 + cleanup: 1276 + if (slot->data && slot->data->cleanup) 1277 + slot->data->cleanup(slot->data); 1263 1278 1264 1279 free: 1265 1280 sdhci_free_host(host); ··· 1276 1283 int dead; 1277 1284 u32 scratch; 1278 1285 1286 + sdhci_pci_remove_own_cd(slot); 1287 + 1279 1288 dead = 0; 1280 1289 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); 1281 1290 if (scratch == (u32)-1) ··· 1285 1290 1286 1291 sdhci_remove_host(slot->host, dead); 1287 1292 1293 + if (gpio_is_valid(slot->rst_n_gpio)) 1294 + gpio_free(slot->rst_n_gpio); 1295 + 1288 1296 if (slot->chip->fixes && slot->chip->fixes->remove_slot) 1289 1297 slot->chip->fixes->remove_slot(slot, dead); 1298 + 1299 + if (slot->data && slot->data->cleanup) 1300 + slot->data->cleanup(slot->data); 1290 1301 1291 1302 pci_release_region(slot->chip->pdev, slot->pci_bar); 1292 1303 ··· 1380 1379 slots = chip->num_slots; /* Quirk may have changed this */ 1381 1380 1382 1381 for (i = 0; i < slots; i++) { 1383 - slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); 1382 + slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); 1384 1383 if (IS_ERR(slot)) { 1385 1384 for (i--; i >= 0; i--) 1386 1385 sdhci_pci_remove_slot(chip->slots[i]);
+1 -11
drivers/mmc/host/sdhci-pxav2.c
··· 223 223 .probe = sdhci_pxav2_probe, 224 224 .remove = __devexit_p(sdhci_pxav2_remove), 225 225 }; 226 - static int __init sdhci_pxav2_init(void) 227 - { 228 - return platform_driver_register(&sdhci_pxav2_driver); 229 - } 230 226 231 - static void __exit sdhci_pxav2_exit(void) 232 - { 233 - platform_driver_unregister(&sdhci_pxav2_driver); 234 - } 235 - 236 - module_init(sdhci_pxav2_init); 237 - module_exit(sdhci_pxav2_exit); 227 + module_platform_driver(sdhci_pxav2_driver); 238 228 239 229 MODULE_DESCRIPTION("SDHCI driver for pxav2"); 240 230 MODULE_AUTHOR("Marvell International Ltd.");
+1 -11
drivers/mmc/host/sdhci-pxav3.c
··· 269 269 .probe = sdhci_pxav3_probe, 270 270 .remove = __devexit_p(sdhci_pxav3_remove), 271 271 }; 272 - static int __init sdhci_pxav3_init(void) 273 - { 274 - return platform_driver_register(&sdhci_pxav3_driver); 275 - } 276 272 277 - static void __exit sdhci_pxav3_exit(void) 278 - { 279 - platform_driver_unregister(&sdhci_pxav3_driver); 280 - } 281 - 282 - module_init(sdhci_pxav3_init); 283 - module_exit(sdhci_pxav3_exit); 273 + module_platform_driver(sdhci_pxav3_driver); 284 274 285 275 MODULE_DESCRIPTION("SDHCI driver for pxav3"); 286 276 MODULE_AUTHOR("Marvell International Ltd.");
+5 -13
drivers/mmc/host/sdhci-s3c.c
··· 80 80 81 81 tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; 82 82 tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; 83 - writel(tmp, host->ioaddr + 0x80); 83 + writel(tmp, host->ioaddr + S3C_SDHCI_CONTROL2); 84 84 } 85 85 } 86 86 ··· 521 521 if (pdata->host_caps) 522 522 host->mmc->caps |= pdata->host_caps; 523 523 524 + if (pdata->pm_caps) 525 + host->mmc->pm_caps |= pdata->pm_caps; 526 + 524 527 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | 525 528 SDHCI_QUIRK_32BIT_DMA_SIZE); 526 529 ··· 657 654 }, 658 655 }; 659 656 660 - static int __init sdhci_s3c_init(void) 661 - { 662 - return platform_driver_register(&sdhci_s3c_driver); 663 - } 664 - 665 - static void __exit sdhci_s3c_exit(void) 666 - { 667 - platform_driver_unregister(&sdhci_s3c_driver); 668 - } 669 - 670 - module_init(sdhci_s3c_init); 671 - module_exit(sdhci_s3c_exit); 657 + module_platform_driver(sdhci_s3c_driver); 672 658 673 659 MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue"); 674 660 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+40 -11
drivers/mmc/host/sdhci-spear.c
··· 21 21 #include <linux/interrupt.h> 22 22 #include <linux/irq.h> 23 23 #include <linux/platform_device.h> 24 + #include <linux/pm.h> 24 25 #include <linux/slab.h> 25 26 #include <linux/mmc/host.h> 26 27 #include <linux/mmc/sdhci-spear.h> ··· 272 271 return 0; 273 272 } 274 273 274 + #ifdef CONFIG_PM 275 + static int sdhci_suspend(struct device *dev) 276 + { 277 + struct sdhci_host *host = dev_get_drvdata(dev); 278 + struct spear_sdhci *sdhci = dev_get_platdata(dev); 279 + int ret; 280 + 281 + ret = sdhci_suspend_host(host); 282 + if (!ret) 283 + clk_disable(sdhci->clk); 284 + 285 + return ret; 286 + } 287 + 288 + static int sdhci_resume(struct device *dev) 289 + { 290 + struct sdhci_host *host = dev_get_drvdata(dev); 291 + struct spear_sdhci *sdhci = dev_get_platdata(dev); 292 + int ret; 293 + 294 + ret = clk_enable(sdhci->clk); 295 + if (ret) { 296 + dev_dbg(dev, "Resume: Error enabling clock\n"); 297 + return ret; 298 + } 299 + 300 + return sdhci_resume_host(host); 301 + } 302 + 303 + const struct dev_pm_ops sdhci_pm_ops = { 304 + .suspend = sdhci_suspend, 305 + .resume = sdhci_resume, 306 + }; 307 + #endif 308 + 275 309 static struct platform_driver sdhci_driver = { 276 310 .driver = { 277 311 .name = "sdhci", 278 312 .owner = THIS_MODULE, 313 + #ifdef CONFIG_PM 314 + .pm = &sdhci_pm_ops, 315 + #endif 279 316 }, 280 317 .probe = sdhci_probe, 281 318 .remove = __devexit_p(sdhci_remove), 282 319 }; 283 320 284 - static int __init sdhci_init(void) 285 - { 286 - return platform_driver_register(&sdhci_driver); 287 - } 288 - module_init(sdhci_init); 289 - 290 - static void __exit sdhci_exit(void) 291 - { 292 - platform_driver_unregister(&sdhci_driver); 293 - } 294 - module_exit(sdhci_exit); 321 + module_platform_driver(sdhci_driver); 295 322 296 323 MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); 297 324 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+1 -11
drivers/mmc/host/sdhci-tegra.c
··· 324 324 .remove = __devexit_p(sdhci_tegra_remove), 325 325 }; 326 326 327 - static int __init sdhci_tegra_init(void) 328 - { 329 - return platform_driver_register(&sdhci_tegra_driver); 330 - } 331 - module_init(sdhci_tegra_init); 332 - 333 - static void __exit sdhci_tegra_exit(void) 334 - { 335 - platform_driver_unregister(&sdhci_tegra_driver); 336 - } 337 - module_exit(sdhci_tegra_exit); 327 + module_platform_driver(sdhci_tegra_driver); 338 328 339 329 MODULE_DESCRIPTION("SDHCI driver for Tegra"); 340 330 MODULE_AUTHOR(" Google, Inc.");
+99 -51
drivers/mmc/host/sdhci.c
··· 49 49 50 50 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 51 51 static void sdhci_finish_command(struct sdhci_host *); 52 - static int sdhci_execute_tuning(struct mmc_host *mmc); 52 + static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 53 53 static void sdhci_tuning_timer(unsigned long data); 54 54 55 55 #ifdef CONFIG_PM_RUNTIME ··· 146 146 { 147 147 u32 present, irqs; 148 148 149 - if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 150 - return; 151 - 152 - if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION) 149 + if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 150 + !mmc_card_is_removable(host->mmc)) 153 151 return; 154 152 155 153 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & ··· 212 214 213 215 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 214 216 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 217 + 218 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 219 + if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL)) 220 + host->ops->enable_dma(host); 221 + } 215 222 } 216 223 217 224 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); ··· 426 423 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 427 424 { 428 425 local_irq_save(*flags); 429 - return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 426 + return kmap_atomic(sg_page(sg)) + sg->offset; 430 427 } 431 428 432 429 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 433 430 { 434 - kunmap_atomic(buffer, KM_BIO_SRC_IRQ); 431 + kunmap_atomic(buffer); 435 432 local_irq_restore(*flags); 436 433 } 437 434 ··· 1019 1016 flags |= SDHCI_CMD_INDEX; 1020 1017 1021 1018 /* CMD19 is special in that the Data Present Select should be set */ 1022 - if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK)) 1019 + if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1020 + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1023 1021 flags |= SDHCI_CMD_DATA; 1024 1022 1025 1023 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); ··· 1070 1066 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1071 1067 { 1072 1068 int div = 0; /* Initialized for compiler warning */ 1069 + int real_div = div, clk_mul = 1; 1073 1070 u16 clk = 0; 1074 1071 unsigned long timeout; 1075 1072 1076 - if (clock == host->clock) 1073 + if (clock && clock == host->clock) 1077 1074 return; 1075 + 1076 + host->mmc->actual_clock = 0; 1078 1077 1079 1078 if (host->ops->set_clock) { 1080 1079 host->ops->set_clock(host, clock); ··· 1116 1109 * Control register. 1117 1110 */ 1118 1111 clk = SDHCI_PROG_CLOCK_MODE; 1112 + real_div = div; 1113 + clk_mul = host->clk_mul; 1119 1114 div--; 1120 1115 } 1121 1116 } else { ··· 1131 1122 break; 1132 1123 } 1133 1124 } 1125 + real_div = div; 1134 1126 div >>= 1; 1135 1127 } 1136 1128 } else { ··· 1140 1130 if ((host->max_clk / div) <= clock) 1141 1131 break; 1142 1132 } 1133 + real_div = div; 1143 1134 div >>= 1; 1144 1135 } 1136 + 1137 + if (real_div) 1138 + host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; 1145 1139 1146 1140 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1147 1141 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) ··· 1174 1160 host->clock = clock; 1175 1161 } 1176 1162 1177 - static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 1163 + static int sdhci_set_power(struct sdhci_host *host, unsigned short power) 1178 1164 { 1179 1165 u8 pwr = 0; 1180 1166 ··· 1197 1183 } 1198 1184 1199 1185 if (host->pwr == pwr) 1200 - return; 1186 + return -1; 1201 1187 1202 1188 host->pwr = pwr; 1203 1189 1204 1190 if (pwr == 0) { 1205 1191 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1206 - return; 1192 + return 0; 1207 1193 } 1208 1194 1209 1195 /* ··· 1230 1216 */ 1231 1217 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1232 1218 mdelay(10); 1219 + 1220 + return power; 1233 1221 } 1234 1222 1235 1223 /*****************************************************************************\ ··· 1293 1277 if ((host->flags & SDHCI_NEEDS_RETUNING) && 1294 1278 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { 1295 1279 spin_unlock_irqrestore(&host->lock, flags); 1296 - sdhci_execute_tuning(mmc); 1280 + sdhci_execute_tuning(mmc, mrq->cmd->opcode); 1297 1281 spin_lock_irqsave(&host->lock, flags); 1298 1282 1299 1283 /* Restore original mmc_request structure */ ··· 1313 1297 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) 1314 1298 { 1315 1299 unsigned long flags; 1300 + int vdd_bit = -1; 1316 1301 u8 ctrl; 1317 1302 1318 1303 spin_lock_irqsave(&host->lock, flags); 1319 1304 1320 - if (host->flags & SDHCI_DEVICE_DEAD) 1321 - goto out; 1305 + if (host->flags & SDHCI_DEVICE_DEAD) { 1306 + spin_unlock_irqrestore(&host->lock, flags); 1307 + if (host->vmmc && ios->power_mode == MMC_POWER_OFF) 1308 + mmc_regulator_set_ocr(host->mmc, host->vmmc, 0); 1309 + return; 1310 + } 1322 1311 1323 1312 /* 1324 1313 * Reset the chip on each power off. ··· 1337 1316 sdhci_set_clock(host, ios->clock); 1338 1317 1339 1318 if (ios->power_mode == MMC_POWER_OFF) 1340 - sdhci_set_power(host, -1); 1319 + vdd_bit = sdhci_set_power(host, -1); 1341 1320 else 1342 - sdhci_set_power(host, ios->vdd); 1321 + vdd_bit = sdhci_set_power(host, ios->vdd); 1322 + 1323 + if (host->vmmc && vdd_bit != -1) { 1324 + spin_unlock_irqrestore(&host->lock, flags); 1325 + mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit); 1326 + spin_lock_irqsave(&host->lock, flags); 1327 + } 1343 1328 1344 1329 if (host->ops->platform_send_init_74_clocks) 1345 1330 host->ops->platform_send_init_74_clocks(host, ios->power_mode); ··· 1388 1361 unsigned int clock; 1389 1362 1390 1363 /* In case of UHS-I modes, set High Speed Enable */ 1391 - if ((ios->timing == MMC_TIMING_UHS_SDR50) || 1364 + if ((ios->timing == MMC_TIMING_MMC_HS200) || 1365 + (ios->timing == MMC_TIMING_UHS_SDR50) || 1392 1366 (ios->timing == MMC_TIMING_UHS_SDR104) || 1393 1367 (ios->timing == MMC_TIMING_UHS_DDR50) || 1394 - (ios->timing == MMC_TIMING_UHS_SDR25) || 1395 - (ios->timing == MMC_TIMING_UHS_SDR12)) 1368 + (ios->timing == MMC_TIMING_UHS_SDR25)) 1396 1369 ctrl |= SDHCI_CTRL_HISPD; 1397 1370 1398 1371 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); ··· 1442 1415 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1443 1416 /* Select Bus Speed Mode for host */ 1444 1417 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1445 - if (ios->timing == MMC_TIMING_UHS_SDR12) 1418 + if (ios->timing == MMC_TIMING_MMC_HS200) 1419 + ctrl_2 |= SDHCI_CTRL_HS_SDR200; 1420 + else if (ios->timing == MMC_TIMING_UHS_SDR12) 1446 1421 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1447 1422 else if (ios->timing == MMC_TIMING_UHS_SDR25) 1448 1423 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; ··· 1472 1443 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1473 1444 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1474 1445 1475 - out: 1476 1446 mmiowb(); 1477 1447 spin_unlock_irqrestore(&host->lock, flags); 1478 1448 } ··· 1691 1663 return err; 1692 1664 } 1693 1665 1694 - static int sdhci_execute_tuning(struct mmc_host *mmc) 1666 + static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1695 1667 { 1696 1668 struct sdhci_host *host; 1697 1669 u16 ctrl; ··· 1699 1671 int tuning_loop_counter = MAX_TUNING_LOOP; 1700 1672 unsigned long timeout; 1701 1673 int err = 0; 1674 + bool requires_tuning_nonuhs = false; 1702 1675 1703 1676 host = mmc_priv(mmc); 1704 1677 ··· 1710 1681 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1711 1682 1712 1683 /* 1713 - * Host Controller needs tuning only in case of SDR104 mode 1714 - * and for SDR50 mode when Use Tuning for SDR50 is set in 1684 + * The Host Controller needs tuning only in case of SDR104 mode 1685 + * and for SDR50 mode when Use Tuning for SDR50 is set in the 1715 1686 * Capabilities register. 1687 + * If the Host Controller supports the HS200 mode then the 1688 + * tuning function has to be executed. 1716 1689 */ 1690 + if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && 1691 + (host->flags & SDHCI_SDR50_NEEDS_TUNING || 1692 + host->flags & SDHCI_HS200_NEEDS_TUNING)) 1693 + requires_tuning_nonuhs = true; 1694 + 1717 1695 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || 1718 - (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && 1719 - (host->flags & SDHCI_SDR50_NEEDS_TUNING))) 1696 + requires_tuning_nonuhs) 1720 1697 ctrl |= SDHCI_CTRL_EXEC_TUNING; 1721 1698 else { 1722 1699 spin_unlock(&host->lock); ··· 1758 1723 if (!tuning_loop_counter && !timeout) 1759 1724 break; 1760 1725 1761 - cmd.opcode = MMC_SEND_TUNING_BLOCK; 1726 + cmd.opcode = opcode; 1762 1727 cmd.arg = 0; 1763 1728 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1764 1729 cmd.retries = 0; ··· 1773 1738 * block to the Host Controller. So we set the block size 1774 1739 * to 64 here. 1775 1740 */ 1776 - sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE); 1741 + if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { 1742 + if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) 1743 + sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), 1744 + SDHCI_BLOCK_SIZE); 1745 + else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) 1746 + sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1747 + SDHCI_BLOCK_SIZE); 1748 + } else { 1749 + sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1750 + SDHCI_BLOCK_SIZE); 1751 + } 1777 1752 1778 1753 /* 1779 1754 * The tuning block is sent by the card to the host controller. ··· 2166 2121 2167 2122 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2168 2123 { 2124 + u32 command; 2169 2125 BUG_ON(intmask == 0); 2170 2126 2171 2127 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2172 2128 if (intmask & SDHCI_INT_DATA_AVAIL) { 2173 - if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) == 2174 - MMC_SEND_TUNING_BLOCK) { 2129 + command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2130 + if (command == MMC_SEND_TUNING_BLOCK || 2131 + command == MMC_SEND_TUNING_BLOCK_HS200) { 2175 2132 host->tuning_done = 1; 2176 2133 wake_up(&host->buf_ready_int); 2177 2134 return; ··· 2377 2330 int sdhci_suspend_host(struct sdhci_host *host) 2378 2331 { 2379 2332 int ret; 2333 + bool has_tuning_timer; 2380 2334 2381 2335 sdhci_disable_card_detection(host); 2382 2336 2383 2337 /* Disable tuning since we are suspending */ 2384 - if (host->version >= SDHCI_SPEC_300 && host->tuning_count && 2385 - host->tuning_mode == SDHCI_TUNING_MODE_1) { 2338 + has_tuning_timer = host->version >= SDHCI_SPEC_300 && 2339 + host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1; 2340 + if (has_tuning_timer) { 2341 + del_timer_sync(&host->tuning_timer); 2386 2342 host->flags &= ~SDHCI_NEEDS_RETUNING; 2387 - mod_timer(&host->tuning_timer, jiffies + 2388 - host->tuning_count * HZ); 2389 2343 } 2390 2344 2391 2345 ret = mmc_suspend_host(host->mmc); 2392 - if (ret) 2346 + if (ret) { 2347 + if (has_tuning_timer) { 2348 + host->flags |= SDHCI_NEEDS_RETUNING; 2349 + mod_timer(&host->tuning_timer, jiffies + 2350 + host->tuning_count * HZ); 2351 + } 2352 + 2353 + sdhci_enable_card_detection(host); 2354 + 2393 2355 return ret; 2356 + } 2394 2357 2395 2358 free_irq(host->irq, host); 2396 - 2397 - if (host->vmmc) 2398 - ret = regulator_disable(host->vmmc); 2399 2359 2400 2360 return ret; 2401 2361 } ··· 2412 2358 int sdhci_resume_host(struct sdhci_host *host) 2413 2359 { 2414 2360 int ret; 2415 - 2416 - if (host->vmmc) { 2417 - int ret = regulator_enable(host->vmmc); 2418 - if (ret) 2419 - return ret; 2420 - } 2421 2361 2422 2362 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2423 2363 if (host->ops->enable_dma) ··· 2775 2727 if (caps[1] & SDHCI_SUPPORT_DDR50) 2776 2728 mmc->caps |= MMC_CAP_UHS_DDR50; 2777 2729 2778 - /* Does the host needs tuning for SDR50? */ 2730 + /* Does the host need tuning for SDR50? */ 2779 2731 if (caps[1] & SDHCI_USE_SDR50_TUNING) 2780 2732 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 2733 + 2734 + /* Does the host need tuning for HS200? */ 2735 + if (mmc->caps2 & MMC_CAP2_HS200) 2736 + host->flags |= SDHCI_HS200_NEEDS_TUNING; 2781 2737 2782 2738 /* Driver Type(s) (A, C, D) supported by the host */ 2783 2739 if (caps[1] & SDHCI_DRIVER_TYPE_A) ··· 2978 2926 if (IS_ERR(host->vmmc)) { 2979 2927 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 2980 2928 host->vmmc = NULL; 2981 - } else { 2982 - regulator_enable(host->vmmc); 2983 2929 } 2984 2930 2985 2931 sdhci_init(host, 0); ··· 3066 3016 tasklet_kill(&host->card_tasklet); 3067 3017 tasklet_kill(&host->finish_tasklet); 3068 3018 3069 - if (host->vmmc) { 3070 - regulator_disable(host->vmmc); 3019 + if (host->vmmc) 3071 3020 regulator_put(host->vmmc); 3072 - } 3073 3021 3074 3022 kfree(host->adma_desc); 3075 3023 kfree(host->align_buffer);
+1
drivers/mmc/host/sdhci.h
··· 158 158 #define SDHCI_CTRL_UHS_SDR50 0x0002 159 159 #define SDHCI_CTRL_UHS_SDR104 0x0003 160 160 #define SDHCI_CTRL_UHS_DDR50 0x0004 161 + #define SDHCI_CTRL_HS_SDR200 0x0005 /* reserved value in SDIO spec */ 161 162 #define SDHCI_CTRL_VDD_180 0x0008 162 163 #define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 163 164 #define SDHCI_CTRL_DRV_TYPE_B 0x0000
+472 -244
drivers/mmc/host/sh_mmcif.c
··· 16 16 * 17 17 */ 18 18 19 + /* 20 + * The MMCIF driver is now processing MMC requests asynchronously, according 21 + * to the Linux MMC API requirement. 22 + * 23 + * The MMCIF driver processes MMC requests in up to 3 stages: command, optional 24 + * data, and optional stop. To achieve asynchronous processing each of these 25 + * stages is split into two halves: a top and a bottom half. The top half 26 + * initialises the hardware, installs a timeout handler to handle completion 27 + * timeouts, and returns. In case of the command stage this immediately returns 28 + * control to the caller, leaving all further processing to run asynchronously. 29 + * All further request processing is performed by the bottom halves. 30 + * 31 + * The bottom half further consists of a "hard" IRQ handler, an IRQ handler 32 + * thread, a DMA completion callback, if DMA is used, a timeout work, and 33 + * request- and stage-specific handler methods. 34 + * 35 + * Each bottom half run begins with either a hardware interrupt, a DMA callback 36 + * invocation, or a timeout work run. In case of an error or a successful 37 + * processing completion, the MMC core is informed and the request processing is 38 + * finished. In case processing has to continue, i.e., if data has to be read 39 + * from or written to the card, or if a stop command has to be sent, the next 40 + * top half is called, which performs the necessary hardware handling and 41 + * reschedules the timeout work. This returns the driver state machine into the 42 + * bottom half waiting state. 43 + */ 44 + 45 + #include <linux/bitops.h> 19 46 #include <linux/clk.h> 20 47 #include <linux/completion.h> 21 48 #include <linux/delay.h> ··· 150 123 #define MASK_MRBSYTO (1 << 1) 151 124 #define MASK_MRSPTO (1 << 0) 152 125 126 + #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \ 127 + MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \ 128 + MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ 129 + MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) 130 + 153 131 /* CE_HOST_STS1 */ 154 132 #define STS1_CMDSEQ (1 << 31) 155 133 ··· 194 162 STATE_IOS, 195 163 }; 196 164 165 + enum mmcif_wait_for { 166 + MMCIF_WAIT_FOR_REQUEST, 167 + MMCIF_WAIT_FOR_CMD, 168 + MMCIF_WAIT_FOR_MREAD, 169 + MMCIF_WAIT_FOR_MWRITE, 170 + MMCIF_WAIT_FOR_READ, 171 + MMCIF_WAIT_FOR_WRITE, 172 + MMCIF_WAIT_FOR_READ_END, 173 + MMCIF_WAIT_FOR_WRITE_END, 174 + MMCIF_WAIT_FOR_STOP, 175 + }; 176 + 197 177 struct sh_mmcif_host { 198 178 struct mmc_host *mmc; 199 - struct mmc_data *data; 179 + struct mmc_request *mrq; 200 180 struct platform_device *pd; 201 181 struct sh_dmae_slave dma_slave_tx; 202 182 struct sh_dmae_slave dma_slave_rx; ··· 216 172 unsigned int clk; 217 173 int bus_width; 218 174 bool sd_error; 175 + bool dying; 219 176 long timeout; 220 177 void __iomem *addr; 221 - struct completion intr_wait; 178 + u32 *pio_ptr; 179 + spinlock_t lock; /* protect sh_mmcif_host::state */ 222 180 enum mmcif_state state; 223 - spinlock_t lock; 181 + enum mmcif_wait_for wait_for; 182 + struct delayed_work timeout_work; 183 + size_t blocksize; 184 + int sg_idx; 185 + int sg_blkidx; 224 186 bool power; 225 187 bool card_present; 226 188 ··· 252 202 static void mmcif_dma_complete(void *arg) 253 203 { 254 204 struct sh_mmcif_host *host = arg; 205 + struct mmc_data *data = host->mrq->data; 206 + 255 207 dev_dbg(&host->pd->dev, "Command completed\n"); 256 208 257 - if (WARN(!host->data, "%s: NULL data in DMA completion!\n", 209 + if (WARN(!data, "%s: NULL data in DMA completion!\n", 258 210 dev_name(&host->pd->dev))) 259 211 return; 260 212 261 - if (host->data->flags & MMC_DATA_READ) 213 + if (data->flags & MMC_DATA_READ) 262 214 dma_unmap_sg(host->chan_rx->device->dev, 263 - host->data->sg, host->data->sg_len, 215 + data->sg, data->sg_len, 264 216 DMA_FROM_DEVICE); 265 217 else 266 218 dma_unmap_sg(host->chan_tx->device->dev, 267 - host->data->sg, host->data->sg_len, 219 + data->sg, data->sg_len, 268 220 DMA_TO_DEVICE); 269 221 270 222 complete(&host->dma_complete); ··· 274 222 275 223 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) 276 224 { 277 - struct scatterlist *sg = host->data->sg; 225 + struct mmc_data *data = host->mrq->data; 226 + struct scatterlist *sg = data->sg; 278 227 struct dma_async_tx_descriptor *desc = NULL; 279 228 struct dma_chan *chan = host->chan_rx; 280 229 dma_cookie_t cookie = -EINVAL; 281 230 int ret; 282 231 283 - ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, 232 + ret = dma_map_sg(chan->device->dev, sg, data->sg_len, 284 233 DMA_FROM_DEVICE); 285 234 if (ret > 0) { 286 235 host->dma_active = true; ··· 297 244 dma_async_issue_pending(chan); 298 245 } 299 246 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", 300 - __func__, host->data->sg_len, ret, cookie); 247 + __func__, data->sg_len, ret, cookie); 301 248 302 249 if (!desc) { 303 250 /* DMA failed, fall back to PIO */ ··· 318 265 } 319 266 320 267 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 321 - desc, cookie, host->data->sg_len); 268 + desc, cookie, data->sg_len); 322 269 } 323 270 324 271 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) 325 272 { 326 - struct scatterlist *sg = host->data->sg; 273 + struct mmc_data *data = host->mrq->data; 274 + struct scatterlist *sg = data->sg; 327 275 struct dma_async_tx_descriptor *desc = NULL; 328 276 struct dma_chan *chan = host->chan_tx; 329 277 dma_cookie_t cookie = -EINVAL; 330 278 int ret; 331 279 332 - ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, 280 + ret = dma_map_sg(chan->device->dev, sg, data->sg_len, 333 281 DMA_TO_DEVICE); 334 282 if (ret > 0) { 335 283 host->dma_active = true; ··· 346 292 dma_async_issue_pending(chan); 347 293 } 348 294 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", 349 - __func__, host->data->sg_len, ret, cookie); 295 + __func__, data->sg_len, ret, cookie); 350 296 351 297 if (!desc) { 352 298 /* DMA failed, fall back to PIO */ ··· 453 399 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); 454 400 else 455 401 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & 456 - (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16)); 402 + ((fls(host->clk / clk) - 1) << 16)); 457 403 458 404 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); 459 405 } ··· 475 421 static int sh_mmcif_error_manage(struct sh_mmcif_host *host) 476 422 { 477 423 u32 state1, state2; 478 - int ret, timeout = 10000000; 424 + int ret, timeout; 479 425 480 426 host->sd_error = false; 481 427 ··· 487 433 if (state1 & STS1_CMDSEQ) { 488 434 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); 489 435 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); 490 - while (1) { 491 - timeout--; 492 - if (timeout < 0) { 493 - dev_err(&host->pd->dev, 494 - "Forceed end of command sequence timeout err\n"); 495 - return -EIO; 496 - } 436 + for (timeout = 10000000; timeout; timeout--) { 497 437 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) 498 - & STS1_CMDSEQ)) 438 + & STS1_CMDSEQ)) 499 439 break; 500 440 mdelay(1); 441 + } 442 + if (!timeout) { 443 + dev_err(&host->pd->dev, 444 + "Forced end of command sequence timeout err\n"); 445 + return -EIO; 501 446 } 502 447 sh_mmcif_sync_reset(host); 503 448 dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); ··· 504 451 } 505 452 506 453 if (state2 & STS2_CRC_ERR) { 507 - dev_dbg(&host->pd->dev, ": Happened CRC error\n"); 454 + dev_dbg(&host->pd->dev, ": CRC error\n"); 508 455 ret = -EIO; 509 456 } else if (state2 & STS2_TIMEOUT_ERR) { 510 - dev_dbg(&host->pd->dev, ": Happened Timeout error\n"); 457 + dev_dbg(&host->pd->dev, ": Timeout\n"); 511 458 ret = -ETIMEDOUT; 512 459 } else { 513 - dev_dbg(&host->pd->dev, ": Happened End/Index error\n"); 460 + dev_dbg(&host->pd->dev, ": End/Index error\n"); 514 461 ret = -EIO; 515 462 } 516 463 return ret; 517 464 } 518 465 519 - static int sh_mmcif_single_read(struct sh_mmcif_host *host, 520 - struct mmc_request *mrq) 466 + static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) 521 467 { 522 - struct mmc_data *data = mrq->data; 523 - long time; 524 - u32 blocksize, i, *p = sg_virt(data->sg); 468 + struct mmc_data *data = host->mrq->data; 469 + 470 + host->sg_blkidx += host->blocksize; 471 + 472 + /* data->sg->length must be a multiple of host->blocksize? */ 473 + BUG_ON(host->sg_blkidx > data->sg->length); 474 + 475 + if (host->sg_blkidx == data->sg->length) { 476 + host->sg_blkidx = 0; 477 + if (++host->sg_idx < data->sg_len) 478 + host->pio_ptr = sg_virt(++data->sg); 479 + } else { 480 + host->pio_ptr = p; 481 + } 482 + 483 + if (host->sg_idx == data->sg_len) 484 + return false; 485 + 486 + return true; 487 + } 488 + 489 + static void sh_mmcif_single_read(struct sh_mmcif_host *host, 490 + struct mmc_request *mrq) 491 + { 492 + host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 493 + BLOCK_SIZE_MASK) + 3; 494 + 495 + host->wait_for = MMCIF_WAIT_FOR_READ; 496 + schedule_delayed_work(&host->timeout_work, host->timeout); 525 497 526 498 /* buf read enable */ 527 499 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 528 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 529 - host->timeout); 530 - if (time <= 0 || host->sd_error) 531 - return sh_mmcif_error_manage(host); 500 + } 532 501 533 - blocksize = (BLOCK_SIZE_MASK & 534 - sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 535 - for (i = 0; i < blocksize / 4; i++) 502 + static bool sh_mmcif_read_block(struct sh_mmcif_host *host) 503 + { 504 + struct mmc_data *data = host->mrq->data; 505 + u32 *p = sg_virt(data->sg); 506 + int i; 507 + 508 + if (host->sd_error) { 509 + data->error = sh_mmcif_error_manage(host); 510 + return false; 511 + } 512 + 513 + for (i = 0; i < host->blocksize / 4; i++) 536 514 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); 537 515 538 516 /* buffer read end */ 539 517 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); 540 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 541 - host->timeout); 542 - if (time <= 0 || host->sd_error) 543 - return sh_mmcif_error_manage(host); 518 + host->wait_for = MMCIF_WAIT_FOR_READ_END; 544 519 545 - return 0; 520 + return true; 546 521 } 547 522 548 - static int sh_mmcif_multi_read(struct sh_mmcif_host *host, 549 - struct mmc_request *mrq) 523 + static void sh_mmcif_multi_read(struct sh_mmcif_host *host, 524 + struct mmc_request *mrq) 550 525 { 551 526 struct mmc_data *data = mrq->data; 552 - long time; 553 - u32 blocksize, i, j, sec, *p; 554 527 555 - blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, 556 - MMCIF_CE_BLOCK_SET); 557 - for (j = 0; j < data->sg_len; j++) { 558 - p = sg_virt(data->sg); 559 - for (sec = 0; sec < data->sg->length / blocksize; sec++) { 560 - sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 561 - /* buf read enable */ 562 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 563 - host->timeout); 528 + if (!data->sg_len || !data->sg->length) 529 + return; 564 530 565 - if (time <= 0 || host->sd_error) 566 - return sh_mmcif_error_manage(host); 531 + host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 532 + BLOCK_SIZE_MASK; 567 533 568 - for (i = 0; i < blocksize / 4; i++) 569 - *p++ = sh_mmcif_readl(host->addr, 570 - MMCIF_CE_DATA); 571 - } 572 - if (j < data->sg_len - 1) 573 - data->sg++; 534 + host->wait_for = MMCIF_WAIT_FOR_MREAD; 535 + host->sg_idx = 0; 536 + host->sg_blkidx = 0; 537 + host->pio_ptr = sg_virt(data->sg); 538 + schedule_delayed_work(&host->timeout_work, host->timeout); 539 + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 540 + } 541 + 542 + static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) 543 + { 544 + struct mmc_data *data = host->mrq->data; 545 + u32 *p = host->pio_ptr; 546 + int i; 547 + 548 + if (host->sd_error) { 549 + data->error = sh_mmcif_error_manage(host); 550 + return false; 574 551 } 575 - return 0; 552 + 553 + BUG_ON(!data->sg->length); 554 + 555 + for (i = 0; i < host->blocksize / 4; i++) 556 + *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); 557 + 558 + if (!sh_mmcif_next_block(host, p)) 559 + return false; 560 + 561 + schedule_delayed_work(&host->timeout_work, host->timeout); 562 + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 563 + 564 + return true; 576 565 } 577 566 578 - static int sh_mmcif_single_write(struct sh_mmcif_host *host, 567 + static void sh_mmcif_single_write(struct sh_mmcif_host *host, 579 568 struct mmc_request *mrq) 580 569 { 581 - struct mmc_data *data = mrq->data; 582 - long time; 583 - u32 blocksize, i, *p = sg_virt(data->sg); 570 + host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 571 + BLOCK_SIZE_MASK) + 3; 584 572 585 - sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 573 + host->wait_for = MMCIF_WAIT_FOR_WRITE; 574 + schedule_delayed_work(&host->timeout_work, host->timeout); 586 575 587 576 /* buf write enable */ 588 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 589 - host->timeout); 590 - if (time <= 0 || host->sd_error) 591 - return sh_mmcif_error_manage(host); 577 + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 578 + } 592 579 593 - blocksize = (BLOCK_SIZE_MASK & 594 - sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 595 - for (i = 0; i < blocksize / 4; i++) 580 + static bool sh_mmcif_write_block(struct sh_mmcif_host *host) 581 + { 582 + struct mmc_data *data = host->mrq->data; 583 + u32 *p = sg_virt(data->sg); 584 + int i; 585 + 586 + if (host->sd_error) { 587 + data->error = sh_mmcif_error_manage(host); 588 + return false; 589 + } 590 + 591 + for (i = 0; i < host->blocksize / 4; i++) 596 592 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); 597 593 598 594 /* buffer write end */ 599 595 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); 596 + host->wait_for = MMCIF_WAIT_FOR_WRITE_END; 600 597 601 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 602 - host->timeout); 603 - if (time <= 0 || host->sd_error) 604 - return sh_mmcif_error_manage(host); 605 - 606 - return 0; 598 + return true; 607 599 } 608 600 609 - static int sh_mmcif_multi_write(struct sh_mmcif_host *host, 610 - struct mmc_request *mrq) 601 + static void sh_mmcif_multi_write(struct sh_mmcif_host *host, 602 + struct mmc_request *mrq) 611 603 { 612 604 struct mmc_data *data = mrq->data; 613 - long time; 614 - u32 i, sec, j, blocksize, *p; 615 605 616 - blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, 617 - MMCIF_CE_BLOCK_SET); 606 + if (!data->sg_len || !data->sg->length) 607 + return; 618 608 619 - for (j = 0; j < data->sg_len; j++) { 620 - p = sg_virt(data->sg); 621 - for (sec = 0; sec < data->sg->length / blocksize; sec++) { 622 - sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 623 - /* buf write enable*/ 624 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 625 - host->timeout); 609 + host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 610 + BLOCK_SIZE_MASK; 626 611 627 - if (time <= 0 || host->sd_error) 628 - return sh_mmcif_error_manage(host); 612 + host->wait_for = MMCIF_WAIT_FOR_MWRITE; 613 + host->sg_idx = 0; 614 + host->sg_blkidx = 0; 615 + host->pio_ptr = sg_virt(data->sg); 616 + schedule_delayed_work(&host->timeout_work, host->timeout); 617 + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 618 + } 629 619 630 - for (i = 0; i < blocksize / 4; i++) 631 - sh_mmcif_writel(host->addr, 632 - MMCIF_CE_DATA, *p++); 633 - } 634 - if (j < data->sg_len - 1) 635 - data->sg++; 620 + static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) 621 + { 622 + struct mmc_data *data = host->mrq->data; 623 + u32 *p = host->pio_ptr; 624 + int i; 625 + 626 + if (host->sd_error) { 627 + data->error = sh_mmcif_error_manage(host); 628 + return false; 636 629 } 637 - return 0; 630 + 631 + BUG_ON(!data->sg->length); 632 + 633 + for (i = 0; i < host->blocksize / 4; i++) 634 + sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); 635 + 636 + if (!sh_mmcif_next_block(host, p)) 637 + return false; 638 + 639 + schedule_delayed_work(&host->timeout_work, host->timeout); 640 + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 641 + 642 + return true; 638 643 } 639 644 640 645 static void sh_mmcif_get_response(struct sh_mmcif_host *host, ··· 714 603 } 715 604 716 605 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, 717 - struct mmc_request *mrq, struct mmc_command *cmd, u32 opc) 606 + struct mmc_request *mrq) 718 607 { 608 + struct mmc_data *data = mrq->data; 609 + struct mmc_command *cmd = mrq->cmd; 610 + u32 opc = cmd->opcode; 719 611 u32 tmp = 0; 720 612 721 613 /* Response Type check */ ··· 750 636 break; 751 637 } 752 638 /* WDAT / DATW */ 753 - if (host->data) { 639 + if (data) { 754 640 tmp |= CMD_SET_WDAT; 755 641 switch (host->bus_width) { 756 642 case MMC_BUS_WIDTH_1: ··· 774 660 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { 775 661 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; 776 662 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, 777 - mrq->data->blocks << 16); 663 + data->blocks << 16); 778 664 } 779 665 /* RIDXC[1:0] check bits */ 780 666 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || ··· 788 674 opc == MMC_SEND_CSD || opc == MMC_SEND_CID) 789 675 tmp |= CMD_SET_CRC7C_INTERNAL; 790 676 791 - return opc = ((opc << 24) | tmp); 677 + return (opc << 24) | tmp; 792 678 } 793 679 794 680 static int sh_mmcif_data_trans(struct sh_mmcif_host *host, 795 - struct mmc_request *mrq, u32 opc) 681 + struct mmc_request *mrq, u32 opc) 796 682 { 797 - int ret; 798 - 799 683 switch (opc) { 800 684 case MMC_READ_MULTIPLE_BLOCK: 801 - ret = sh_mmcif_multi_read(host, mrq); 802 - break; 685 + sh_mmcif_multi_read(host, mrq); 686 + return 0; 803 687 case MMC_WRITE_MULTIPLE_BLOCK: 804 - ret = sh_mmcif_multi_write(host, mrq); 805 - break; 688 + sh_mmcif_multi_write(host, mrq); 689 + return 0; 806 690 case MMC_WRITE_BLOCK: 807 - ret = sh_mmcif_single_write(host, mrq); 808 - break; 691 + sh_mmcif_single_write(host, mrq); 692 + return 0; 809 693 case MMC_READ_SINGLE_BLOCK: 810 694 case MMC_SEND_EXT_CSD: 811 - ret = sh_mmcif_single_read(host, mrq); 812 - break; 695 + sh_mmcif_single_read(host, mrq); 696 + return 0; 813 697 default: 814 698 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); 815 - ret = -EINVAL; 816 - break; 699 + return -EINVAL; 817 700 } 818 - return ret; 819 701 } 820 702 821 703 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, 822 - struct mmc_request *mrq, struct mmc_command *cmd) 704 + struct mmc_request *mrq) 823 705 { 824 - long time; 825 - int ret = 0, mask = 0; 706 + struct mmc_command *cmd = mrq->cmd; 826 707 u32 opc = cmd->opcode; 708 + u32 mask; 827 709 828 710 switch (opc) { 829 - /* respons busy check */ 711 + /* response busy check */ 830 712 case MMC_SWITCH: 831 713 case MMC_STOP_TRANSMISSION: 832 714 case MMC_SET_WRITE_PROT: 833 715 case MMC_CLR_WRITE_PROT: 834 716 case MMC_ERASE: 835 717 case MMC_GEN_CMD: 836 - mask = MASK_MRBSYE; 718 + mask = MASK_START_CMD | MASK_MRBSYE; 837 719 break; 838 720 default: 839 - mask = MASK_MCRSPE; 721 + mask = MASK_START_CMD | MASK_MCRSPE; 840 722 break; 841 723 } 842 - mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | 843 - MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | 844 - MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | 845 - MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO; 846 724 847 - if (host->data) { 725 + if (mrq->data) { 848 726 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); 849 727 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 850 728 mrq->data->blksz); 851 729 } 852 - opc = sh_mmcif_set_cmd(host, mrq, cmd, opc); 730 + opc = sh_mmcif_set_cmd(host, mrq); 853 731 854 732 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); 855 733 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); ··· 850 744 /* set cmd */ 851 745 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); 852 746 853 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 854 - host->timeout); 855 - if (time <= 0) { 856 - cmd->error = sh_mmcif_error_manage(host); 857 - return; 858 - } 859 - if (host->sd_error) { 860 - switch (cmd->opcode) { 861 - case MMC_ALL_SEND_CID: 862 - case MMC_SELECT_CARD: 863 - case MMC_APP_CMD: 864 - cmd->error = -ETIMEDOUT; 865 - break; 866 - default: 867 - dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n", 868 - cmd->opcode); 869 - cmd->error = sh_mmcif_error_manage(host); 870 - break; 871 - } 872 - host->sd_error = false; 873 - return; 874 - } 875 - if (!(cmd->flags & MMC_RSP_PRESENT)) { 876 - cmd->error = 0; 877 - return; 878 - } 879 - sh_mmcif_get_response(host, cmd); 880 - if (host->data) { 881 - if (!host->dma_active) { 882 - ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); 883 - } else { 884 - long time = 885 - wait_for_completion_interruptible_timeout(&host->dma_complete, 886 - host->timeout); 887 - if (!time) 888 - ret = -ETIMEDOUT; 889 - else if (time < 0) 890 - ret = time; 891 - sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, 892 - BUF_ACC_DMAREN | BUF_ACC_DMAWEN); 893 - host->dma_active = false; 894 - } 895 - if (ret < 0) 896 - mrq->data->bytes_xfered = 0; 897 - else 898 - mrq->data->bytes_xfered = 899 - mrq->data->blocks * mrq->data->blksz; 900 - } 901 - cmd->error = ret; 747 + host->wait_for = MMCIF_WAIT_FOR_CMD; 748 + schedule_delayed_work(&host->timeout_work, host->timeout); 902 749 } 903 750 904 751 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, 905 - struct mmc_request *mrq, struct mmc_command *cmd) 752 + struct mmc_request *mrq) 906 753 { 907 - long time; 908 - 909 - if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) 754 + switch (mrq->cmd->opcode) { 755 + case MMC_READ_MULTIPLE_BLOCK: 910 756 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); 911 - else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) 757 + break; 758 + case MMC_WRITE_MULTIPLE_BLOCK: 912 759 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); 913 - else { 760 + break; 761 + default: 914 762 dev_err(&host->pd->dev, "unsupported stop cmd\n"); 915 - cmd->error = sh_mmcif_error_manage(host); 763 + mrq->stop->error = sh_mmcif_error_manage(host); 916 764 return; 917 765 } 918 766 919 - time = wait_for_completion_interruptible_timeout(&host->intr_wait, 920 - host->timeout); 921 - if (time <= 0 || host->sd_error) { 922 - cmd->error = sh_mmcif_error_manage(host); 923 - return; 924 - } 925 - sh_mmcif_get_cmd12response(host, cmd); 926 - cmd->error = 0; 767 + host->wait_for = MMCIF_WAIT_FOR_STOP; 768 + schedule_delayed_work(&host->timeout_work, host->timeout); 927 769 } 928 770 929 771 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) ··· 910 856 default: 911 857 break; 912 858 } 913 - host->data = mrq->data; 914 - if (mrq->data) { 915 - if (mrq->data->flags & MMC_DATA_READ) { 916 - if (host->chan_rx) 917 - sh_mmcif_start_dma_rx(host); 918 - } else { 919 - if (host->chan_tx) 920 - sh_mmcif_start_dma_tx(host); 921 - } 922 - } 923 - sh_mmcif_start_cmd(host, mrq, mrq->cmd); 924 - host->data = NULL; 925 859 926 - if (!mrq->cmd->error && mrq->stop) 927 - sh_mmcif_stop_cmd(host, mrq, mrq->stop); 928 - host->state = STATE_IDLE; 929 - mmc_request_done(mmc, mrq); 860 + host->mrq = mrq; 861 + 862 + sh_mmcif_start_cmd(host, mrq); 930 863 } 931 864 932 865 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ··· 988 947 .get_cd = sh_mmcif_get_cd, 989 948 }; 990 949 991 - static void sh_mmcif_detect(struct mmc_host *mmc) 950 + static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) 992 951 { 993 - mmc_detect_change(mmc, 0); 952 + struct mmc_command *cmd = host->mrq->cmd; 953 + struct mmc_data *data = host->mrq->data; 954 + long time; 955 + 956 + if (host->sd_error) { 957 + switch (cmd->opcode) { 958 + case MMC_ALL_SEND_CID: 959 + case MMC_SELECT_CARD: 960 + case MMC_APP_CMD: 961 + cmd->error = -ETIMEDOUT; 962 + host->sd_error = false; 963 + break; 964 + default: 965 + cmd->error = sh_mmcif_error_manage(host); 966 + dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n", 967 + cmd->opcode, cmd->error); 968 + break; 969 + } 970 + return false; 971 + } 972 + if (!(cmd->flags & MMC_RSP_PRESENT)) { 973 + cmd->error = 0; 974 + return false; 975 + } 976 + 977 + sh_mmcif_get_response(host, cmd); 978 + 979 + if (!data) 980 + return false; 981 + 982 + if (data->flags & MMC_DATA_READ) { 983 + if (host->chan_rx) 984 + sh_mmcif_start_dma_rx(host); 985 + } else { 986 + if (host->chan_tx) 987 + sh_mmcif_start_dma_tx(host); 988 + } 989 + 990 + if (!host->dma_active) { 991 + data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); 992 + if (!data->error) 993 + return true; 994 + return false; 995 + } 996 + 997 + /* Running in the IRQ thread, can sleep */ 998 + time = wait_for_completion_interruptible_timeout(&host->dma_complete, 999 + host->timeout); 1000 + if (host->sd_error) { 1001 + dev_err(host->mmc->parent, 1002 + "Error IRQ while waiting for DMA completion!\n"); 1003 + /* Woken up by an error IRQ: abort DMA */ 1004 + if (data->flags & MMC_DATA_READ) 1005 + dmaengine_terminate_all(host->chan_rx); 1006 + else 1007 + dmaengine_terminate_all(host->chan_tx); 1008 + data->error = sh_mmcif_error_manage(host); 1009 + } else if (!time) { 1010 + data->error = -ETIMEDOUT; 1011 + } else if (time < 0) { 1012 + data->error = time; 1013 + } 1014 + sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, 1015 + BUF_ACC_DMAREN | BUF_ACC_DMAWEN); 1016 + host->dma_active = false; 1017 + 1018 + if (data->error) 1019 + data->bytes_xfered = 0; 1020 + 1021 + return false; 1022 + } 1023 + 1024 + static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) 1025 + { 1026 + struct sh_mmcif_host *host = dev_id; 1027 + struct mmc_request *mrq = host->mrq; 1028 + struct mmc_data *data = mrq->data; 1029 + 1030 + cancel_delayed_work_sync(&host->timeout_work); 1031 + 1032 + /* 1033 + * All handlers return true, if processing continues, and false, if the 1034 + * request has to be completed - successfully or not 1035 + */ 1036 + switch (host->wait_for) { 1037 + case MMCIF_WAIT_FOR_REQUEST: 1038 + /* We're too late, the timeout has already kicked in */ 1039 + return IRQ_HANDLED; 1040 + case MMCIF_WAIT_FOR_CMD: 1041 + if (sh_mmcif_end_cmd(host)) 1042 + /* Wait for data */ 1043 + return IRQ_HANDLED; 1044 + break; 1045 + case MMCIF_WAIT_FOR_MREAD: 1046 + if (sh_mmcif_mread_block(host)) 1047 + /* Wait for more data */ 1048 + return IRQ_HANDLED; 1049 + break; 1050 + case MMCIF_WAIT_FOR_READ: 1051 + if (sh_mmcif_read_block(host)) 1052 + /* Wait for data end */ 1053 + return IRQ_HANDLED; 1054 + break; 1055 + case MMCIF_WAIT_FOR_MWRITE: 1056 + if (sh_mmcif_mwrite_block(host)) 1057 + /* Wait data to write */ 1058 + return IRQ_HANDLED; 1059 + break; 1060 + case MMCIF_WAIT_FOR_WRITE: 1061 + if (sh_mmcif_write_block(host)) 1062 + /* Wait for data end */ 1063 + return IRQ_HANDLED; 1064 + break; 1065 + case MMCIF_WAIT_FOR_STOP: 1066 + if (host->sd_error) { 1067 + mrq->stop->error = sh_mmcif_error_manage(host); 1068 + break; 1069 + } 1070 + sh_mmcif_get_cmd12response(host, mrq->stop); 1071 + mrq->stop->error = 0; 1072 + break; 1073 + case MMCIF_WAIT_FOR_READ_END: 1074 + case MMCIF_WAIT_FOR_WRITE_END: 1075 + if (host->sd_error) 1076 + data->error = sh_mmcif_error_manage(host); 1077 + break; 1078 + default: 1079 + BUG(); 1080 + } 1081 + 1082 + if (host->wait_for != MMCIF_WAIT_FOR_STOP) { 1083 + if (!mrq->cmd->error && data && !data->error) 1084 + data->bytes_xfered = 1085 + data->blocks * data->blksz; 1086 + 1087 + if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { 1088 + sh_mmcif_stop_cmd(host, mrq); 1089 + if (!mrq->stop->error) 1090 + return IRQ_HANDLED; 1091 + } 1092 + } 1093 + 1094 + host->wait_for = MMCIF_WAIT_FOR_REQUEST; 1095 + host->state = STATE_IDLE; 1096 + host->mrq = NULL; 1097 + mmc_request_done(host->mmc, mrq); 1098 + 1099 + return IRQ_HANDLED; 994 1100 } 995 1101 996 1102 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) ··· 1148 960 1149 961 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); 1150 962 1151 - if (state & INT_RBSYE) { 963 + if (state & INT_ERR_STS) { 964 + /* error interrupts - process first */ 965 + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); 966 + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 967 + err = 1; 968 + } else if (state & INT_RBSYE) { 1152 969 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 1153 970 ~(INT_RBSYE | INT_CRSPE)); 1154 971 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); ··· 1181 988 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 1182 989 ~(INT_CMD12RBE | INT_CMD12CRE)); 1183 990 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); 1184 - } else if (state & INT_ERR_STS) { 1185 - /* err interrupts */ 1186 - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); 1187 - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 1188 - err = 1; 1189 991 } else { 1190 992 dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); 1191 993 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); ··· 1191 1003 host->sd_error = true; 1192 1004 dev_dbg(&host->pd->dev, "int err state = %08x\n", state); 1193 1005 } 1194 - if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) 1195 - complete(&host->intr_wait); 1196 - else 1006 + if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { 1007 + if (!host->dma_active) 1008 + return IRQ_WAKE_THREAD; 1009 + else if (host->sd_error) 1010 + mmcif_dma_complete(host); 1011 + } else { 1197 1012 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); 1013 + } 1198 1014 1199 1015 return IRQ_HANDLED; 1016 + } 1017 + 1018 + static void mmcif_timeout_work(struct work_struct *work) 1019 + { 1020 + struct delayed_work *d = container_of(work, struct delayed_work, work); 1021 + struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); 1022 + struct mmc_request *mrq = host->mrq; 1023 + 1024 + if (host->dying) 1025 + /* Don't run after mmc_remove_host() */ 1026 + return; 1027 + 1028 + /* 1029 + * Handle races with cancel_delayed_work(), unless 1030 + * cancel_delayed_work_sync() is used 1031 + */ 1032 + switch (host->wait_for) { 1033 + case MMCIF_WAIT_FOR_CMD: 1034 + mrq->cmd->error = sh_mmcif_error_manage(host); 1035 + break; 1036 + case MMCIF_WAIT_FOR_STOP: 1037 + mrq->stop->error = sh_mmcif_error_manage(host); 1038 + break; 1039 + case MMCIF_WAIT_FOR_MREAD: 1040 + case MMCIF_WAIT_FOR_MWRITE: 1041 + case MMCIF_WAIT_FOR_READ: 1042 + case MMCIF_WAIT_FOR_WRITE: 1043 + case MMCIF_WAIT_FOR_READ_END: 1044 + case MMCIF_WAIT_FOR_WRITE_END: 1045 + mrq->data->error = sh_mmcif_error_manage(host); 1046 + break; 1047 + default: 1048 + BUG(); 1049 + } 1050 + 1051 + host->state = STATE_IDLE; 1052 + host->wait_for = MMCIF_WAIT_FOR_REQUEST; 1053 + host->mrq = NULL; 1054 + mmc_request_done(host->mmc, mrq); 1200 1055 } 1201 1056 1202 1057 static int __devinit sh_mmcif_probe(struct platform_device *pdev) ··· 1295 1064 host->clk = clk_get_rate(host->hclk); 1296 1065 host->pd = pdev; 1297 1066 1298 - init_completion(&host->intr_wait); 1299 1067 spin_lock_init(&host->lock); 1300 1068 1301 1069 mmc->ops = &sh_mmcif_ops; ··· 1331 1101 1332 1102 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1333 1103 1334 - ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); 1104 + ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); 1335 1105 if (ret) { 1336 1106 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); 1337 1107 goto clean_up3; 1338 1108 } 1339 - ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); 1109 + ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); 1340 1110 if (ret) { 1341 1111 free_irq(irq[0], host); 1342 1112 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); 1343 1113 goto clean_up3; 1344 1114 } 1345 1115 1346 - sh_mmcif_detect(host->mmc); 1116 + INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); 1117 + 1118 + mmc_detect_change(host->mmc, 0); 1347 1119 1348 1120 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); 1349 1121 dev_dbg(&pdev->dev, "chip ver H'%04x\n", ··· 1371 1139 struct sh_mmcif_host *host = platform_get_drvdata(pdev); 1372 1140 int irq[2]; 1373 1141 1142 + host->dying = true; 1374 1143 pm_runtime_get_sync(&pdev->dev); 1375 1144 1376 1145 mmc_remove_host(host->mmc); 1377 1146 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1147 + 1148 + /* 1149 + * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the 1150 + * mmc_remove_host() call above. But swapping order doesn't help either 1151 + * (a query on the linux-mmc mailing list didn't bring any replies). 1152 + */ 1153 + cancel_delayed_work_sync(&host->timeout_work); 1378 1154 1379 1155 if (host->addr) 1380 1156 iounmap(host->addr); ··· 1446 1206 }, 1447 1207 }; 1448 1208 1449 - static int __init sh_mmcif_init(void) 1450 - { 1451 - return platform_driver_register(&sh_mmcif_driver); 1452 - } 1453 - 1454 - static void __exit sh_mmcif_exit(void) 1455 - { 1456 - platform_driver_unregister(&sh_mmcif_driver); 1457 - } 1458 - 1459 - module_init(sh_mmcif_init); 1460 - module_exit(sh_mmcif_exit); 1461 - 1209 + module_platform_driver(sh_mmcif_driver); 1462 1210 1463 1211 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); 1464 1212 MODULE_LICENSE("GPL");
+1 -12
drivers/mmc/host/sh_mobile_sdhi.c
··· 282 282 .remove = __devexit_p(sh_mobile_sdhi_remove), 283 283 }; 284 284 285 - static int __init sh_mobile_sdhi_init(void) 286 - { 287 - return platform_driver_register(&sh_mobile_sdhi_driver); 288 - } 289 - 290 - static void __exit sh_mobile_sdhi_exit(void) 291 - { 292 - platform_driver_unregister(&sh_mobile_sdhi_driver); 293 - } 294 - 295 - module_init(sh_mobile_sdhi_init); 296 - module_exit(sh_mobile_sdhi_exit); 285 + module_platform_driver(sh_mobile_sdhi_driver); 297 286 298 287 MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); 299 288 MODULE_AUTHOR("Magnus Damm");
+8 -8
drivers/mmc/host/tifm_sd.c
··· 118 118 unsigned char *buf; 119 119 unsigned int pos = 0, val; 120 120 121 - buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off; 121 + buf = kmap_atomic(pg) + off; 122 122 if (host->cmd_flags & DATA_CARRY) { 123 123 buf[pos++] = host->bounce_buf_data[0]; 124 124 host->cmd_flags &= ~DATA_CARRY; ··· 134 134 } 135 135 buf[pos++] = (val >> 8) & 0xff; 136 136 } 137 - kunmap_atomic(buf - off, KM_BIO_DST_IRQ); 137 + kunmap_atomic(buf - off); 138 138 } 139 139 140 140 static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, ··· 144 144 unsigned char *buf; 145 145 unsigned int pos = 0, val; 146 146 147 - buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off; 147 + buf = kmap_atomic(pg) + off; 148 148 if (host->cmd_flags & DATA_CARRY) { 149 149 val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); 150 150 writel(val, sock->addr + SOCK_MMCSD_DATA); ··· 161 161 val |= (buf[pos++] << 8) & 0xff00; 162 162 writel(val, sock->addr + SOCK_MMCSD_DATA); 163 163 } 164 - kunmap_atomic(buf - off, KM_BIO_SRC_IRQ); 164 + kunmap_atomic(buf - off); 165 165 } 166 166 167 167 static void tifm_sd_transfer_data(struct tifm_sd *host) ··· 212 212 struct page *src, unsigned int src_off, 213 213 unsigned int count) 214 214 { 215 - unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off; 216 - unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off; 215 + unsigned char *src_buf = kmap_atomic(src) + src_off; 216 + unsigned char *dst_buf = kmap_atomic(dst) + dst_off; 217 217 218 218 memcpy(dst_buf, src_buf, count); 219 219 220 - kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ); 221 - kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ); 220 + kunmap_atomic(dst_buf - dst_off); 221 + kunmap_atomic(src_buf - src_off); 222 222 } 223 223 224 224 static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
+1 -13
drivers/mmc/host/tmio_mmc.c
··· 138 138 .resume = tmio_mmc_resume, 139 139 }; 140 140 141 - 142 - static int __init tmio_mmc_init(void) 143 - { 144 - return platform_driver_register(&tmio_mmc_driver); 145 - } 146 - 147 - static void __exit tmio_mmc_exit(void) 148 - { 149 - platform_driver_unregister(&tmio_mmc_driver); 150 - } 151 - 152 - module_init(tmio_mmc_init); 153 - module_exit(tmio_mmc_exit); 141 + module_platform_driver(tmio_mmc_driver); 154 142 155 143 MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver"); 156 144 MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
+2 -2
drivers/mmc/host/tmio_mmc.h
··· 105 105 unsigned long *flags) 106 106 { 107 107 local_irq_save(*flags); 108 - return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 108 + return kmap_atomic(sg_page(sg)) + sg->offset; 109 109 } 110 110 111 111 static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, 112 112 unsigned long *flags, void *virt) 113 113 { 114 - kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); 114 + kunmap_atomic(virt - sg->offset); 115 115 local_irq_restore(*flags); 116 116 } 117 117
+21 -9
drivers/mmc/host/tmio_mmc_pio.c
··· 800 800 } else if (ios->power_mode != MMC_POWER_UP) { 801 801 if (host->set_pwr && ios->power_mode == MMC_POWER_OFF) 802 802 host->set_pwr(host->pdev, 0); 803 - if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && 804 - pdata->power) { 803 + if (pdata->power) { 805 804 pdata->power = false; 806 805 pm_runtime_put(&host->pdev->dev); 807 806 } ··· 914 915 if (ret < 0) 915 916 goto pm_disable; 916 917 918 + /* 919 + * There are 4 different scenarios for the card detection: 920 + * 1) an external gpio irq handles the cd (best for power savings) 921 + * 2) internal sdhi irq handles the cd 922 + * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL 923 + * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE 924 + * 925 + * While we increment the rtpm counter for all scenarios when the mmc 926 + * core activates us by calling an appropriate set_ios(), we must 927 + * additionally ensure that in case 2) the tmio mmc hardware stays 928 + * powered on during runtime for the card detection to work. 929 + */ 930 + if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD 931 + || mmc->caps & MMC_CAP_NEEDS_POLL 932 + || mmc->caps & MMC_CAP_NONREMOVABLE)) 933 + pm_runtime_get_noresume(&pdev->dev); 934 + 917 935 tmio_mmc_clk_stop(_host); 918 936 tmio_mmc_reset(_host); 919 937 ··· 948 932 949 933 /* See if we also get DMA */ 950 934 tmio_mmc_request_dma(_host, pdata); 951 - 952 - /* We have to keep the device powered for its card detection to work */ 953 - if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) { 954 - pdata->power = true; 955 - pm_runtime_get_noresume(&pdev->dev); 956 - } 957 935 958 936 mmc_add_host(mmc); 959 937 ··· 984 974 * the controller, the runtime PM is suspended and pdata->power == false, 985 975 * so, our .runtime_resume() will not try to detect a card in the slot. 986 976 */ 987 - if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD) 977 + if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD 978 + || host->mmc->caps & MMC_CAP_NEEDS_POLL 979 + || host->mmc->caps & MMC_CAP_NONREMOVABLE) 988 980 pm_runtime_get_sync(&pdev->dev); 989 981 990 982 mmc_remove_host(host->mmc);
+2
include/linux/amba/mmci.h
··· 30 30 * @cd_invert: true if the gpio_cd pin value is active low 31 31 * @capabilities: the capabilities of the block as implemented in 32 32 * this platform, signify anything MMC_CAP_* from mmc/host.h 33 + * @capabilities2: more capabilities, MMC_CAP2_* from mmc/host.h 33 34 * @dma_filter: function used to select an appropriate RX and TX 34 35 * DMA channel to be used for DMA, if and only if you're deploying the 35 36 * generic DMA engine ··· 53 52 int gpio_cd; 54 53 bool cd_invert; 55 54 unsigned long capabilities; 55 + unsigned long capabilities2; 56 56 bool (*dma_filter)(struct dma_chan *chan, void *filter_param); 57 57 void *dma_rx_param; 58 58 void *dma_tx_param;
+21 -2
include/linux/mmc/card.h
··· 71 71 bool hpi_en; /* HPI enablebit */ 72 72 bool hpi; /* HPI support bit */ 73 73 unsigned int hpi_cmd; /* cmd used as HPI */ 74 + unsigned int boot_ro_lock; /* ro lock support */ 75 + bool boot_ro_lockable; 74 76 u8 raw_partition_support; /* 160 */ 75 77 u8 raw_erased_mem_count; /* 181 */ 76 78 u8 raw_ext_csd_structure; /* 194 */ ··· 112 110 struct sd_switch_caps { 113 111 unsigned int hs_max_dtr; 114 112 unsigned int uhs_max_dtr; 113 + #define HIGH_SPEED_MAX_DTR 50000000 115 114 #define UHS_SDR104_MAX_DTR 208000000 116 115 #define UHS_SDR50_MAX_DTR 100000000 117 116 #define UHS_DDR50_MAX_DTR 50000000 ··· 120 117 #define UHS_SDR12_MAX_DTR 25000000 121 118 unsigned int sd3_bus_mode; 122 119 #define UHS_SDR12_BUS_SPEED 0 120 + #define HIGH_SPEED_BUS_SPEED 1 123 121 #define UHS_SDR25_BUS_SPEED 1 124 122 #define UHS_SDR50_BUS_SPEED 2 125 123 #define UHS_SDR104_BUS_SPEED 3 126 124 #define UHS_DDR50_BUS_SPEED 4 127 125 126 + #define SD_MODE_HIGH_SPEED (1 << HIGH_SPEED_BUS_SPEED) 128 127 #define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED) 129 128 #define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED) 130 129 #define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED) ··· 189 184 unsigned int part_cfg; /* partition type */ 190 185 char name[MAX_MMC_PART_NAME_LEN]; 191 186 bool force_ro; /* to make boot parts RO by default */ 187 + unsigned int area_type; 188 + #define MMC_BLK_DATA_AREA_MAIN (1<<0) 189 + #define MMC_BLK_DATA_AREA_BOOT (1<<1) 190 + #define MMC_BLK_DATA_AREA_GP (1<<2) 192 191 }; 193 192 194 193 /* ··· 215 206 #define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */ 216 207 #define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */ 217 208 #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ 209 + #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ 210 + #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ 218 211 unsigned int quirks; /* card quirks */ 219 212 #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 220 213 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ ··· 272 261 * This function fill contents in mmc_part. 273 262 */ 274 263 static inline void mmc_part_add(struct mmc_card *card, unsigned int size, 275 - unsigned int part_cfg, char *name, int idx, bool ro) 264 + unsigned int part_cfg, char *name, int idx, bool ro, 265 + int area_type) 276 266 { 277 267 card->part[card->nr_parts].size = size; 278 268 card->part[card->nr_parts].part_cfg = part_cfg; 279 269 sprintf(card->part[card->nr_parts].name, name, idx); 280 270 card->part[card->nr_parts].force_ro = ro; 271 + card->part[card->nr_parts].area_type = area_type; 281 272 card->nr_parts++; 282 273 } 283 274 ··· 375 362 #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) 376 363 #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) 377 364 #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 365 + #define mmc_card_hs200(c) ((c)->state & MMC_STATE_HIGHSPEED_200) 378 366 #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) 379 367 #define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) 380 - #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 368 + #define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 369 + #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 381 370 #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) 371 + #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) 382 372 383 373 #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 384 374 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 385 375 #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 376 + #define mmc_card_set_hs200(c) ((c)->state |= MMC_STATE_HIGHSPEED_200) 386 377 #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 387 378 #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) 379 + #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) 388 380 #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) 389 381 #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) 382 + #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) 390 383 391 384 /* 392 385 * Quirk add/remove for MMC products.
+19
include/linux/mmc/cd-gpio.h
··· 1 + /* 2 + * Generic GPIO card-detect helper header 3 + * 4 + * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #ifndef MMC_CD_GPIO_H 12 + #define MMC_CD_GPIO_H 13 + 14 + struct mmc_host; 15 + int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio, 16 + unsigned int irq, unsigned long flags); 17 + void mmc_cd_gpio_free(struct mmc_host *host); 18 + 19 + #endif
+2
include/linux/mmc/core.h
··· 180 180 181 181 extern int mmc_flush_cache(struct mmc_card *); 182 182 183 + extern int mmc_detect_card_removed(struct mmc_host *host); 184 + 183 185 /** 184 186 * mmc_claim_host - exclusively claim a host 185 187 * @host: mmc host to claim
+1
include/linux/mmc/dw_mmc.h
··· 214 214 unsigned int bus_hz; /* Bus speed */ 215 215 216 216 unsigned int caps; /* Capabilities */ 217 + unsigned int caps2; /* More capabilities */ 217 218 /* 218 219 * Override fifo depth. If 0, autodetect it from the FIFOTH register, 219 220 * but note that this may not be reliable after a bootloader has used
+23 -2
include/linux/mmc/host.h
··· 56 56 #define MMC_TIMING_UHS_SDR50 3 57 57 #define MMC_TIMING_UHS_SDR104 4 58 58 #define MMC_TIMING_UHS_DDR50 5 59 + #define MMC_TIMING_MMC_HS200 6 59 60 60 61 #define MMC_SDR_MODE 0 61 62 #define MMC_1_2V_DDR_MODE 1 62 63 #define MMC_1_8V_DDR_MODE 2 64 + #define MMC_1_2V_SDR_MODE 3 65 + #define MMC_1_8V_SDR_MODE 4 63 66 64 67 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ 65 68 ··· 151 148 void (*init_card)(struct mmc_host *host, struct mmc_card *card); 152 149 153 150 int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); 154 - int (*execute_tuning)(struct mmc_host *host); 151 + 152 + /* The tuning command opcode value is different for SD and eMMC cards */ 153 + int (*execute_tuning)(struct mmc_host *host, u32 opcode); 155 154 void (*enable_preset_value)(struct mmc_host *host, bool enable); 156 155 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); 157 156 void (*hw_reset)(struct mmc_host *host); ··· 170 165 * Returns 0 if success otherwise non zero. 171 166 */ 172 167 int (*err_check) (struct mmc_card *, struct mmc_async_req *); 168 + }; 169 + 170 + struct mmc_hotplug { 171 + unsigned int irq; 172 + void *handler_priv; 173 173 }; 174 174 175 175 struct mmc_host { ··· 252 242 #define MMC_CAP2_CACHE_CTRL (1 << 1) /* Allow cache control */ 253 243 #define MMC_CAP2_POWEROFF_NOTIFY (1 << 2) /* Notify poweroff supported */ 254 244 #define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */ 245 + #define MMC_CAP2_NO_SLEEP_CMD (1 << 4) /* Don't allow sleep command */ 246 + #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ 247 + #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ 248 + #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ 249 + MMC_CAP2_HS200_1_2V_SDR) 255 250 256 251 mmc_pm_flag_t pm_caps; /* supported pm features */ 257 252 unsigned int power_notify_type; ··· 268 253 int clk_requests; /* internal reference counter */ 269 254 unsigned int clk_delay; /* number of MCI clk hold cycles */ 270 255 bool clk_gated; /* clock gated */ 271 - struct work_struct clk_gate_work; /* delayed clock gate */ 256 + struct delayed_work clk_gate_work; /* delayed clock gate */ 272 257 unsigned int clk_old; /* old clock value cache */ 273 258 spinlock_t clk_lock; /* lock for clk fields */ 274 259 struct mutex clk_gate_mutex; /* mutex for clock gating */ 260 + struct device_attribute clkgate_delay_attr; 261 + unsigned long clkgate_delay; 275 262 #endif 276 263 277 264 /* host specific block data */ ··· 314 297 int claim_cnt; /* "claim" nesting count */ 315 298 316 299 struct delayed_work detect; 300 + int detect_change; /* card detect flag */ 301 + struct mmc_hotplug hotplug; 317 302 318 303 const struct mmc_bus_ops *bus_ops; /* current bus driver */ 319 304 unsigned int bus_refs; /* reference counter */ ··· 341 322 #ifdef CONFIG_FAIL_MMC_REQUEST 342 323 struct fault_attr fail_mmc_request; 343 324 #endif 325 + 326 + unsigned int actual_clock; /* Actual HC clock rate */ 344 327 345 328 unsigned long private[0] ____cacheline_aligned; 346 329 };
+71 -1
include/linux/mmc/mmc.h
··· 51 51 #define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ 52 52 #define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ 53 53 #define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ 54 + #define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ 54 55 55 56 /* class 3 */ 56 57 #define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ ··· 281 280 #define EXT_CSD_RST_N_FUNCTION 162 /* R/W */ 282 281 #define EXT_CSD_SANITIZE_START 165 /* W */ 283 282 #define EXT_CSD_WR_REL_PARAM 166 /* RO */ 283 + #define EXT_CSD_BOOT_WP 173 /* R/W */ 284 284 #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ 285 285 #define EXT_CSD_PART_CONFIG 179 /* R/W */ 286 286 #define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ ··· 323 321 324 322 #define EXT_CSD_WR_REL_PARAM_EN (1<<2) 325 323 324 + #define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40) 325 + #define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10) 326 + #define EXT_CSD_BOOT_WP_B_PERM_WP_EN (0x04) 327 + #define EXT_CSD_BOOT_WP_B_PWR_WP_EN (0x01) 328 + 326 329 #define EXT_CSD_PART_CONFIG_ACC_MASK (0x7) 327 330 #define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1) 328 331 #define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) ··· 340 333 341 334 #define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ 342 335 #define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ 343 - #define EXT_CSD_CARD_TYPE_MASK 0xF /* Mask out reserved bits */ 336 + #define EXT_CSD_CARD_TYPE_MASK 0x3F /* Mask out reserved bits */ 344 337 #define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */ 345 338 /* DDR mode @1.8V or 3V I/O */ 346 339 #define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */ 347 340 /* DDR mode @1.2V I/O */ 348 341 #define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \ 349 342 | EXT_CSD_CARD_TYPE_DDR_1_2V) 343 + #define EXT_CSD_CARD_TYPE_SDR_1_8V (1<<4) /* Card can run at 200MHz */ 344 + #define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */ 345 + /* SDR mode @1.2V I/O */ 346 + 347 + #define EXT_CSD_CARD_TYPE_SDR_200 (EXT_CSD_CARD_TYPE_SDR_1_8V | \ 348 + EXT_CSD_CARD_TYPE_SDR_1_2V) 349 + 350 + #define EXT_CSD_CARD_TYPE_SDR_ALL (EXT_CSD_CARD_TYPE_SDR_200 | \ 351 + EXT_CSD_CARD_TYPE_52 | \ 352 + EXT_CSD_CARD_TYPE_26) 353 + 354 + #define EXT_CSD_CARD_TYPE_SDR_1_2V_ALL (EXT_CSD_CARD_TYPE_SDR_1_2V | \ 355 + EXT_CSD_CARD_TYPE_52 | \ 356 + EXT_CSD_CARD_TYPE_26) 357 + 358 + #define EXT_CSD_CARD_TYPE_SDR_1_8V_ALL (EXT_CSD_CARD_TYPE_SDR_1_8V | \ 359 + EXT_CSD_CARD_TYPE_52 | \ 360 + EXT_CSD_CARD_TYPE_26) 361 + 362 + #define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_2V | \ 363 + EXT_CSD_CARD_TYPE_DDR_1_8V | \ 364 + EXT_CSD_CARD_TYPE_52 | \ 365 + EXT_CSD_CARD_TYPE_26) 366 + 367 + #define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_8V | \ 368 + EXT_CSD_CARD_TYPE_DDR_1_8V | \ 369 + EXT_CSD_CARD_TYPE_52 | \ 370 + EXT_CSD_CARD_TYPE_26) 371 + 372 + #define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_2V | \ 373 + EXT_CSD_CARD_TYPE_DDR_1_2V | \ 374 + EXT_CSD_CARD_TYPE_52 | \ 375 + EXT_CSD_CARD_TYPE_26) 376 + 377 + #define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_8V | \ 378 + EXT_CSD_CARD_TYPE_DDR_1_2V | \ 379 + EXT_CSD_CARD_TYPE_52 | \ 380 + EXT_CSD_CARD_TYPE_26) 381 + 382 + #define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_2V | \ 383 + EXT_CSD_CARD_TYPE_DDR_52 | \ 384 + EXT_CSD_CARD_TYPE_52 | \ 385 + EXT_CSD_CARD_TYPE_26) 386 + 387 + #define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_8V | \ 388 + EXT_CSD_CARD_TYPE_DDR_52 | \ 389 + EXT_CSD_CARD_TYPE_52 | \ 390 + EXT_CSD_CARD_TYPE_26) 391 + 392 + #define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_200 | \ 393 + EXT_CSD_CARD_TYPE_DDR_1_8V | \ 394 + EXT_CSD_CARD_TYPE_52 | \ 395 + EXT_CSD_CARD_TYPE_26) 396 + 397 + #define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_200 | \ 398 + EXT_CSD_CARD_TYPE_DDR_1_2V | \ 399 + EXT_CSD_CARD_TYPE_52 | \ 400 + EXT_CSD_CARD_TYPE_26) 401 + 402 + #define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52 (EXT_CSD_CARD_TYPE_SDR_200 | \ 403 + EXT_CSD_CARD_TYPE_DDR_52 | \ 404 + EXT_CSD_CARD_TYPE_52 | \ 405 + EXT_CSD_CARD_TYPE_26) 350 406 351 407 #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ 352 408 #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
+18
include/linux/mmc/sdhci-pci-data.h
··· 1 + #ifndef LINUX_MMC_SDHCI_PCI_DATA_H 2 + #define LINUX_MMC_SDHCI_PCI_DATA_H 3 + 4 + struct pci_dev; 5 + 6 + struct sdhci_pci_data { 7 + struct pci_dev *pdev; 8 + int slotno; 9 + int rst_n_gpio; /* Set to -EINVAL if unused */ 10 + int cd_gpio; /* Set to -EINVAL if unused */ 11 + int (*setup)(struct sdhci_pci_data *data); 12 + void (*cleanup)(struct sdhci_pci_data *data); 13 + }; 14 + 15 + extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, 16 + int slotno); 17 + 18 + #endif
+1 -2
include/linux/mmc/sdhci.h
··· 90 90 91 91 unsigned int quirks2; /* More deviations from spec. */ 92 92 93 - #define SDHCI_QUIRK2_OWN_CARD_DETECTION (1<<0) 94 - 95 93 int irq; /* Device IRQ */ 96 94 void __iomem *ioaddr; /* Mapped address */ 97 95 ··· 119 121 #define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ 120 122 #define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ 121 123 #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ 124 + #define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */ 122 125 123 126 unsigned int version; /* SDHCI spec. version */ 124 127
+26 -1
include/linux/mmc/sdio.h
··· 38 38 * [8:0] Byte/block count 39 39 */ 40 40 41 + #define R4_18V_PRESENT (1<<24) 41 42 #define R4_MEMORY_PRESENT (1 << 27) 42 43 43 44 /* ··· 86 85 #define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */ 87 86 #define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */ 88 87 #define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */ 88 + #define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */ 89 89 90 90 #define SDIO_CCCR_IOEx 0x02 91 91 #define SDIO_CCCR_IORx 0x03 ··· 136 134 #define SDIO_CCCR_SPEED 0x13 137 135 138 136 #define SDIO_SPEED_SHS 0x01 /* Supports High-Speed mode */ 139 - #define SDIO_SPEED_EHS 0x02 /* Enable High-Speed mode */ 137 + #define SDIO_SPEED_BSS_SHIFT 1 138 + #define SDIO_SPEED_BSS_MASK (7<<SDIO_SPEED_BSS_SHIFT) 139 + #define SDIO_SPEED_SDR12 (0<<SDIO_SPEED_BSS_SHIFT) 140 + #define SDIO_SPEED_SDR25 (1<<SDIO_SPEED_BSS_SHIFT) 141 + #define SDIO_SPEED_SDR50 (2<<SDIO_SPEED_BSS_SHIFT) 142 + #define SDIO_SPEED_SDR104 (3<<SDIO_SPEED_BSS_SHIFT) 143 + #define SDIO_SPEED_DDR50 (4<<SDIO_SPEED_BSS_SHIFT) 144 + #define SDIO_SPEED_EHS SDIO_SPEED_SDR25 /* Enable High-Speed */ 140 145 146 + #define SDIO_CCCR_UHS 0x14 147 + #define SDIO_UHS_SDR50 0x01 148 + #define SDIO_UHS_SDR104 0x02 149 + #define SDIO_UHS_DDR50 0x04 150 + 151 + #define SDIO_CCCR_DRIVE_STRENGTH 0x15 152 + #define SDIO_SDTx_MASK 0x07 153 + #define SDIO_DRIVE_SDTA (1<<0) 154 + #define SDIO_DRIVE_SDTC (1<<1) 155 + #define SDIO_DRIVE_SDTD (1<<2) 156 + #define SDIO_DRIVE_DTSx_MASK 0x03 157 + #define SDIO_DRIVE_DTSx_SHIFT 4 158 + #define SDIO_DTSx_SET_TYPE_B (0 << SDIO_DRIVE_DTSx_SHIFT) 159 + #define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT) 160 + #define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT) 161 + #define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT) 141 162 /* 142 163 * Function Basic Registers (FBR) 143 164 */