Merge tag 'mmc-v4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC fixes from Ulf Hansson:
"MMC core:

- Fix driver strength selection when selecting hs400es

- Delete bounce buffer handling:

This change fixes a problem related to how bounce buffers are being
allocated. However, instead of trying to fix that, let's just
remove the mmc bounce buffer code altogether, as it has practically
no use.

MMC host:

- meson-gx: A couple of fixes related to clock/phase/tuning

- sdhci-xenon: Fix clock resource by adding an optional bus clock"

* tag 'mmc-v4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
mmc: sdhci-xenon: Fix clock resource by adding an optional bus clock
mmc: meson-gx: include tx phase in the tuning process
mmc: meson-gx: fix rx phase reset
mmc: meson-gx: make sure the clock is rounded down
mmc: Delete bounce buffer handling
mmc: core: add driver strength selection when selecting hs400es

Changed files
+81 -162
Documentation
devicetree
drivers
include
linux
mmc
+7 -5
Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
··· 16 16 17 17 - clocks: 18 18 Array of clocks required for SDHC. 19 - Require at least input clock for Xenon IP core. 19 + Require at least input clock for Xenon IP core. For Armada AP806 and 20 + CP110, the AXI clock is also mandatory. 20 21 21 22 - clock-names: 22 23 Array of names corresponding to clocks property. 23 24 The input clock for Xenon IP core should be named as "core". 25 + The input clock for the AXI bus must be named as "axi". 24 26 25 27 - reg: 26 28 * For "marvell,armada-3700-sdhci", two register areas. ··· 108 106 compatible = "marvell,armada-ap806-sdhci"; 109 107 reg = <0xaa0000 0x1000>; 110 108 interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH> 111 - clocks = <&emmc_clk>; 112 - clock-names = "core"; 109 + clocks = <&emmc_clk>,<&axi_clk>; 110 + clock-names = "core", "axi"; 113 111 bus-width = <4>; 114 112 marvell,xenon-phy-slow-mode; 115 113 marvell,xenon-tun-count = <11>; ··· 128 126 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH> 129 127 vqmmc-supply = <&sd_vqmmc_regulator>; 130 128 vmmc-supply = <&sd_vmmc_regulator>; 131 - clocks = <&sdclk>; 132 - clock-names = "core"; 129 + clocks = <&sdclk>, <&axi_clk>; 130 + clock-names = "core", "axi"; 133 131 bus-width = <4>; 134 132 marvell,xenon-tun-count = <9>; 135 133 };
-3
drivers/mmc/core/block.c
··· 1634 1634 } 1635 1635 1636 1636 mqrq->areq.mrq = &brq->mrq; 1637 - 1638 - mmc_queue_bounce_pre(mqrq); 1639 1637 } 1640 1638 1641 1639 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, ··· 1827 1829 brq = &mq_rq->brq; 1828 1830 old_req = mmc_queue_req_to_req(mq_rq); 1829 1831 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1830 - mmc_queue_bounce_post(mq_rq); 1831 1832 1832 1833 switch (status) { 1833 1834 case MMC_BLK_SUCCESS:
+19 -17
drivers/mmc/core/mmc.c
··· 1286 1286 return err; 1287 1287 } 1288 1288 1289 + static void mmc_select_driver_type(struct mmc_card *card) 1290 + { 1291 + int card_drv_type, drive_strength, drv_type; 1292 + 1293 + card_drv_type = card->ext_csd.raw_driver_strength | 1294 + mmc_driver_type_mask(0); 1295 + 1296 + drive_strength = mmc_select_drive_strength(card, 1297 + card->ext_csd.hs200_max_dtr, 1298 + card_drv_type, &drv_type); 1299 + 1300 + card->drive_strength = drive_strength; 1301 + 1302 + if (drv_type) 1303 + mmc_set_driver_type(card->host, drv_type); 1304 + } 1305 + 1289 1306 static int mmc_select_hs400es(struct mmc_card *card) 1290 1307 { 1291 1308 struct mmc_host *host = card->host; ··· 1358 1341 goto out_err; 1359 1342 } 1360 1343 1344 + mmc_select_driver_type(card); 1345 + 1361 1346 /* Switch card to HS400 */ 1362 1347 val = EXT_CSD_TIMING_HS400 | 1363 1348 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; ··· 1391 1372 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host), 1392 1373 __func__, err); 1393 1374 return err; 1394 - } 1395 - 1396 - static void mmc_select_driver_type(struct mmc_card *card) 1397 - { 1398 - int card_drv_type, drive_strength, drv_type; 1399 - 1400 - card_drv_type = card->ext_csd.raw_driver_strength | 1401 - mmc_driver_type_mask(0); 1402 - 1403 - drive_strength = mmc_select_drive_strength(card, 1404 - card->ext_csd.hs200_max_dtr, 1405 - card_drv_type, &drv_type); 1406 - 1407 - card->drive_strength = drive_strength; 1408 - 1409 - if (drv_type) 1410 - mmc_set_driver_type(card->host, drv_type); 1411 1375 } 1412 1376 1413 1377 /*
+9 -116
drivers/mmc/core/queue.c
··· 23 23 #include "core.h" 24 24 #include "card.h" 25 25 26 - #define MMC_QUEUE_BOUNCESZ 65536 27 - 28 26 /* 29 27 * Prepare a MMC request. This just filters out odd stuff. 30 28 */ ··· 148 150 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 149 151 } 150 152 151 - static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) 152 - { 153 - unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; 154 - 155 - if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) 156 - return 0; 157 - 158 - if (bouncesz > host->max_req_size) 159 - bouncesz = host->max_req_size; 160 - if (bouncesz > host->max_seg_size) 161 - bouncesz = host->max_seg_size; 162 - if (bouncesz > host->max_blk_count * 512) 163 - bouncesz = host->max_blk_count * 512; 164 - 165 - if (bouncesz <= 512) 166 - return 0; 167 - 168 - return bouncesz; 169 - } 170 - 171 153 /** 172 154 * mmc_init_request() - initialize the MMC-specific per-request data 173 155 * @q: the request queue ··· 162 184 struct mmc_card *card = mq->card; 163 185 struct mmc_host *host = card->host; 164 186 165 - if (card->bouncesz) { 166 - mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); 167 - if (!mq_rq->bounce_buf) 168 - return -ENOMEM; 169 - if (card->bouncesz > 512) { 170 - mq_rq->sg = mmc_alloc_sg(1, gfp); 171 - if (!mq_rq->sg) 172 - return -ENOMEM; 173 - mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, 174 - gfp); 175 - if (!mq_rq->bounce_sg) 176 - return -ENOMEM; 177 - } 178 - } else { 179 - mq_rq->bounce_buf = NULL; 180 - mq_rq->bounce_sg = NULL; 181 - mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); 182 - if (!mq_rq->sg) 183 - return -ENOMEM; 184 - } 187 + mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); 188 + if (!mq_rq->sg) 189 + return -ENOMEM; 185 190 186 191 return 0; 187 192 } ··· 172 211 static void mmc_exit_request(struct request_queue *q, struct request *req) 173 212 { 174 213 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 175 - 176 - /* It is OK to kfree(NULL) so this will be smooth */ 177 - kfree(mq_rq->bounce_sg); 178 - mq_rq->bounce_sg = NULL; 179 - 180 - kfree(mq_rq->bounce_buf); 181 - mq_rq->bounce_buf = NULL; 182 214 183 215 kfree(mq_rq->sg); 184 216 mq_rq->sg = NULL; ··· 196 242 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 197 243 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 198 244 199 - /* 200 - * mmc_init_request() depends on card->bouncesz so it must be calculated 201 - * before blk_init_allocated_queue() starts allocating requests. 202 - */ 203 - card->bouncesz = mmc_queue_calc_bouncesz(host); 204 - 205 245 mq->card = card; 206 246 mq->queue = blk_alloc_queue(GFP_KERNEL); 207 247 if (!mq->queue) ··· 219 271 if (mmc_can_erase(card)) 220 272 mmc_queue_setup_discard(mq->queue, card); 221 273 222 - if (card->bouncesz) { 223 - blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); 224 - blk_queue_max_segments(mq->queue, card->bouncesz / 512); 225 - blk_queue_max_segment_size(mq->queue, card->bouncesz); 226 - } else { 227 - blk_queue_bounce_limit(mq->queue, limit); 228 - blk_queue_max_hw_sectors(mq->queue, 229 - min(host->max_blk_count, host->max_req_size / 512)); 230 - blk_queue_max_segments(mq->queue, host->max_segs); 231 - blk_queue_max_segment_size(mq->queue, host->max_seg_size); 232 - } 274 + blk_queue_bounce_limit(mq->queue, limit); 275 + blk_queue_max_hw_sectors(mq->queue, 276 + min(host->max_blk_count, host->max_req_size / 512)); 277 + blk_queue_max_segments(mq->queue, host->max_segs); 278 + blk_queue_max_segment_size(mq->queue, host->max_seg_size); 233 279 234 280 sema_init(&mq->thread_sem, 1); 235 281 ··· 312 370 */ 313 371 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 314 372 { 315 - unsigned int sg_len; 316 - size_t buflen; 317 - struct scatterlist *sg; 318 373 struct request *req = mmc_queue_req_to_req(mqrq); 319 - int i; 320 374 321 - if (!mqrq->bounce_buf) 322 - return blk_rq_map_sg(mq->queue, req, mqrq->sg); 323 - 324 - sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); 325 - 326 - mqrq->bounce_sg_len = sg_len; 327 - 328 - buflen = 0; 329 - for_each_sg(mqrq->bounce_sg, sg, sg_len, i) 330 - buflen += sg->length; 331 - 332 - sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); 333 - 334 - return 1; 335 - } 336 - 337 - /* 338 - * If writing, bounce the data to the buffer before the request 339 - * is sent to the host driver 340 - */ 341 - void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) 342 - { 343 - if (!mqrq->bounce_buf) 344 - return; 345 - 346 - if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) 347 - return; 348 - 349 - sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 350 - mqrq->bounce_buf, mqrq->sg[0].length); 351 - } 352 - 353 - /* 354 - * If reading, bounce the data from the buffer after the request 355 - * has been handled by the host driver 356 - */ 357 - void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) 358 - { 359 - if (!mqrq->bounce_buf) 360 - return; 361 - 362 - if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) 363 - return; 364 - 365 - sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 366 - mqrq->bounce_buf, mqrq->sg[0].length); 375 + return blk_rq_map_sg(mq->queue, req, mqrq->sg); 367 376 }
-6
drivers/mmc/core/queue.h
··· 49 49 struct mmc_queue_req { 50 50 struct mmc_blk_request brq; 51 51 struct scatterlist *sg; 52 - char *bounce_buf; 53 - struct scatterlist *bounce_sg; 54 - unsigned int bounce_sg_len; 55 52 struct mmc_async_req areq; 56 53 enum mmc_drv_op drv_op; 57 54 int drv_op_result; ··· 78 81 extern void mmc_cleanup_queue(struct mmc_queue *); 79 82 extern void mmc_queue_suspend(struct mmc_queue *); 80 83 extern void mmc_queue_resume(struct mmc_queue *); 81 - 82 84 extern unsigned int mmc_queue_map_sg(struct mmc_queue *, 83 85 struct mmc_queue_req *); 84 - extern void mmc_queue_bounce_pre(struct mmc_queue_req *); 85 - extern void mmc_queue_bounce_post(struct mmc_queue_req *); 86 86 87 87 extern int mmc_access_rpmb(struct mmc_queue *); 88 88
+1 -1
drivers/mmc/host/cavium.c
··· 1038 1038 */ 1039 1039 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1040 1040 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | 1041 - MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; 1041 + MMC_CAP_3_3V_DDR; 1042 1042 1043 1043 if (host->use_sg) 1044 1044 mmc->max_segs = 16;
+22 -4
drivers/mmc/host/meson-gx-mmc.c
··· 531 531 div->shift = __ffs(CLK_DIV_MASK); 532 532 div->width = __builtin_popcountl(CLK_DIV_MASK); 533 533 div->hw.init = &init; 534 - div->flags = (CLK_DIVIDER_ONE_BASED | 535 - CLK_DIVIDER_ROUND_CLOSEST); 534 + div->flags = CLK_DIVIDER_ONE_BASED; 536 535 537 536 clk = devm_clk_register(host->dev, &div->hw); 538 537 if (WARN_ON(IS_ERR(clk))) ··· 716 717 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 717 718 { 718 719 struct meson_host *host = mmc_priv(mmc); 720 + int ret; 721 + 722 + /* 723 + * If this is the initial tuning, try to get a sane Rx starting 724 + * phase before doing the actual tuning. 725 + */ 726 + if (!mmc->doing_retune) { 727 + ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 728 + 729 + if (ret) 730 + return ret; 731 + } 732 + 733 + ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); 734 + if (ret) 735 + return ret; 719 736 720 737 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 721 738 } ··· 761 746 case MMC_POWER_UP: 762 747 if (!IS_ERR(mmc->supply.vmmc)) 763 748 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 749 + 750 + /* Reset phases */ 751 + clk_set_phase(host->rx_clk, 0); 752 + clk_set_phase(host->tx_clk, 270); 753 + 764 754 break; 765 755 766 756 case MMC_POWER_ON: ··· 779 759 host->vqmmc_enabled = true; 780 760 } 781 761 782 - /* Reset rx phase */ 783 - clk_set_phase(host->rx_clk, 0); 784 762 break; 785 763 } 786 764
+1 -5
drivers/mmc/host/pxamci.c
··· 702 702 703 703 pxamci_init_ocr(host); 704 704 705 - /* 706 - * This architecture used to disable bounce buffers through its 707 - * defconfig, now it is done at runtime as a host property. 708 - */ 709 - mmc->caps = MMC_CAP_NO_BOUNCE_BUFF; 705 + mmc->caps = 0; 710 706 host->cmdat = 0; 711 707 if (!cpu_is_pxa25x()) { 712 708 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+20 -4
drivers/mmc/host/sdhci-xenon.c
··· 466 466 { 467 467 struct sdhci_pltfm_host *pltfm_host; 468 468 struct sdhci_host *host; 469 + struct xenon_priv *priv; 469 470 int err; 470 471 471 472 host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, ··· 475 474 return PTR_ERR(host); 476 475 477 476 pltfm_host = sdhci_priv(host); 477 + priv = sdhci_pltfm_priv(pltfm_host); 478 478 479 479 /* 480 480 * Link Xenon specific mmc_host_ops function, ··· 493 491 if (err) 494 492 goto free_pltfm; 495 493 494 + priv->axi_clk = devm_clk_get(&pdev->dev, "axi"); 495 + if (IS_ERR(priv->axi_clk)) { 496 + err = PTR_ERR(priv->axi_clk); 497 + if (err == -EPROBE_DEFER) 498 + goto err_clk; 499 + } else { 500 + err = clk_prepare_enable(priv->axi_clk); 501 + if (err) 502 + goto err_clk; 503 + } 504 + 496 505 err = mmc_of_parse(host->mmc); 497 506 if (err) 498 - goto err_clk; 507 + goto err_clk_axi; 499 508 500 509 sdhci_get_of_property(pdev); 501 510 ··· 515 502 /* Xenon specific dt parse */ 516 503 err = xenon_probe_dt(pdev); 517 504 if (err) 518 - goto err_clk; 505 + goto err_clk_axi; 519 506 520 507 err = xenon_sdhc_prepare(host); 521 508 if (err) 522 - goto err_clk; 509 + goto err_clk_axi; 523 510 524 511 pm_runtime_get_noresume(&pdev->dev); 525 512 pm_runtime_set_active(&pdev->dev); ··· 540 527 pm_runtime_disable(&pdev->dev); 541 528 pm_runtime_put_noidle(&pdev->dev); 542 529 xenon_sdhc_unprepare(host); 530 + err_clk_axi: 531 + clk_disable_unprepare(priv->axi_clk); 543 532 err_clk: 544 533 clk_disable_unprepare(pltfm_host->clk); 545 534 free_pltfm: ··· 553 538 { 554 539 struct sdhci_host *host = platform_get_drvdata(pdev); 555 540 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 541 + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); 556 542 557 543 pm_runtime_get_sync(&pdev->dev); 558 544 pm_runtime_disable(&pdev->dev); ··· 562 546 sdhci_remove_host(host, 0); 563 547 564 548 xenon_sdhc_unprepare(host); 565 - 549 + clk_disable_unprepare(priv->axi_clk); 566 550 clk_disable_unprepare(pltfm_host->clk); 567 551 568 552 sdhci_pltfm_free(pdev);
+1
drivers/mmc/host/sdhci-xenon.h
··· 83 83 unsigned char bus_width; 84 84 unsigned char timing; 85 85 unsigned int clock; 86 + struct clk *axi_clk; 86 87 87 88 int phy_type; 88 89 /*
+1 -1
include/linux/mmc/host.h
··· 316 316 #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ 317 317 #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ 318 318 #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ 319 - #define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */ 319 + /* (1 << 21) is free for reuse */ 320 320 #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ 321 321 #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ 322 322 #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */