Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc: sdhci: replace mmc->parent with mmc_dev() for consistency

As pointed out by Ulf, "both "mmc->parent" and mmc_dev(mmc) are being
used in the entire c-file". Convert all the mmc->parent usage in all
sdhci host driver to mmc_dev() for consistency.

Suggested-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
Link: https://lore.kernel.org/r/20210324155013.1e5faa3c@xhacker.debian
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>

authored by

Jisheng Zhang and committed by
Ulf Hansson
bac53336 57ac3084

+35 -35
+4 -4
drivers/mmc/host/sdhci-esdhc-mcf.c
··· 367 367 struct pltfm_mcf_data *mcf_data) 368 368 { 369 369 struct mcf_esdhc_platform_data *plat_data; 370 + struct device *dev = mmc_dev(host->mmc); 370 371 371 - if (!host->mmc->parent->platform_data) { 372 - dev_err(mmc_dev(host->mmc), "no platform data!\n"); 372 + if (!dev->platform_data) { 373 + dev_err(dev, "no platform data!\n"); 373 374 return -EINVAL; 374 375 } 375 376 376 - plat_data = (struct mcf_esdhc_platform_data *) 377 - host->mmc->parent->platform_data; 377 + plat_data = (struct mcf_esdhc_platform_data *)dev->platform_data; 378 378 379 379 /* Card_detect */ 380 380 switch (plat_data->cd_type) {
+1 -1
drivers/mmc/host/sdhci-of-aspeed.c
··· 181 181 struct aspeed_sdhci *sdhci; 182 182 struct device *dev; 183 183 184 - dev = host->mmc->parent; 184 + dev = mmc_dev(host->mmc); 185 185 sdhci = sdhci_pltfm_priv(sdhci_priv(host)); 186 186 187 187 if (!sdhci->phase_desc)
+17 -17
drivers/mmc/host/sdhci-tegra.c
··· 596 596 &tegra_host->autocal_offsets; 597 597 int err; 598 598 599 - err = device_property_read_u32(host->mmc->parent, 599 + err = device_property_read_u32(mmc_dev(host->mmc), 600 600 "nvidia,pad-autocal-pull-up-offset-3v3", 601 601 &autocal->pull_up_3v3); 602 602 if (err) 603 603 autocal->pull_up_3v3 = 0; 604 604 605 - err = device_property_read_u32(host->mmc->parent, 605 + err = device_property_read_u32(mmc_dev(host->mmc), 606 606 "nvidia,pad-autocal-pull-down-offset-3v3", 607 607 &autocal->pull_down_3v3); 608 608 if (err) 609 609 autocal->pull_down_3v3 = 0; 610 610 611 - err = device_property_read_u32(host->mmc->parent, 611 + err = device_property_read_u32(mmc_dev(host->mmc), 612 612 "nvidia,pad-autocal-pull-up-offset-1v8", 613 613 &autocal->pull_up_1v8); 614 614 if (err) 615 615 autocal->pull_up_1v8 = 0; 616 616 617 - err = device_property_read_u32(host->mmc->parent, 617 + err = device_property_read_u32(mmc_dev(host->mmc), 618 618 "nvidia,pad-autocal-pull-down-offset-1v8", 619 619 &autocal->pull_down_1v8); 620 620 if (err) 621 621 autocal->pull_down_1v8 = 0; 622 622 623 - err = device_property_read_u32(host->mmc->parent, 623 + err = device_property_read_u32(mmc_dev(host->mmc), 624 624 "nvidia,pad-autocal-pull-up-offset-sdr104", 625 625 &autocal->pull_up_sdr104); 626 626 if (err) 627 627 autocal->pull_up_sdr104 = autocal->pull_up_1v8; 628 628 629 - err = device_property_read_u32(host->mmc->parent, 629 + err = device_property_read_u32(mmc_dev(host->mmc), 630 630 "nvidia,pad-autocal-pull-down-offset-sdr104", 631 631 &autocal->pull_down_sdr104); 632 632 if (err) 633 633 autocal->pull_down_sdr104 = autocal->pull_down_1v8; 634 634 635 - err = device_property_read_u32(host->mmc->parent, 635 + err = device_property_read_u32(mmc_dev(host->mmc), 636 636 "nvidia,pad-autocal-pull-up-offset-hs400", 637 637 &autocal->pull_up_hs400); 638 638 if (err) 639 639 autocal->pull_up_hs400 = autocal->pull_up_1v8; 640 640 641 - err = device_property_read_u32(host->mmc->parent, 641 + err = device_property_read_u32(mmc_dev(host->mmc), 642 642 "nvidia,pad-autocal-pull-down-offset-hs400", 643 643 &autocal->pull_down_hs400); 644 644 if (err) ··· 653 653 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 654 654 return; 655 655 656 - err = device_property_read_u32(host->mmc->parent, 656 + err = device_property_read_u32(mmc_dev(host->mmc), 657 657 "nvidia,pad-autocal-pull-up-offset-3v3-timeout", 658 658 &autocal->pull_up_3v3_timeout); 659 659 if (err) { ··· 664 664 autocal->pull_up_3v3_timeout = 0; 665 665 } 666 666 667 - err = device_property_read_u32(host->mmc->parent, 667 + err = device_property_read_u32(mmc_dev(host->mmc), 668 668 "nvidia,pad-autocal-pull-down-offset-3v3-timeout", 669 669 &autocal->pull_down_3v3_timeout); 670 670 if (err) { ··· 675 675 autocal->pull_down_3v3_timeout = 0; 676 676 } 677 677 678 - err = device_property_read_u32(host->mmc->parent, 678 + err = device_property_read_u32(mmc_dev(host->mmc), 679 679 "nvidia,pad-autocal-pull-up-offset-1v8-timeout", 680 680 &autocal->pull_up_1v8_timeout); 681 681 if (err) { ··· 686 686 autocal->pull_up_1v8_timeout = 0; 687 687 } 688 688 689 - err = device_property_read_u32(host->mmc->parent, 689 + err = device_property_read_u32(mmc_dev(host->mmc), 690 690 "nvidia,pad-autocal-pull-down-offset-1v8-timeout", 691 691 &autocal->pull_down_1v8_timeout); 692 692 if (err) { ··· 720 720 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 721 721 int err; 722 722 723 - err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap", 723 + err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap", 724 724 &tegra_host->default_tap); 725 725 if (err) 726 726 tegra_host->default_tap = 0; 727 727 728 - err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim", 728 + err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim", 729 729 &tegra_host->default_trim); 730 730 if (err) 731 731 tegra_host->default_trim = 0; 732 732 733 - err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim", 733 + err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim", 734 734 &tegra_host->dqs_trim); 735 735 if (err) 736 736 tegra_host->dqs_trim = 0x11; ··· 741 741 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 742 742 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 743 743 744 - if (device_property_read_bool(host->mmc->parent, "supports-cqe")) 744 + if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe")) 745 745 tegra_host->enable_hwcq = true; 746 746 else 747 747 tegra_host->enable_hwcq = false; ··· 1529 1529 1530 1530 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 1531 1531 1532 - cq_host = devm_kzalloc(host->mmc->parent, 1532 + cq_host = devm_kzalloc(mmc_dev(host->mmc), 1533 1533 sizeof(*cq_host), GFP_KERNEL); 1534 1534 if (!cq_host) { 1535 1535 ret = -ENOMEM;
+12 -12
drivers/mmc/host/sdhci.c
··· 188 188 if (host->bus_on) 189 189 return; 190 190 host->bus_on = true; 191 - pm_runtime_get_noresume(host->mmc->parent); 191 + pm_runtime_get_noresume(mmc_dev(host->mmc)); 192 192 } 193 193 194 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) ··· 196 196 if (!host->bus_on) 197 197 return; 198 198 host->bus_on = false; 199 - pm_runtime_put_noidle(host->mmc->parent); 199 + pm_runtime_put_noidle(mmc_dev(host->mmc)); 200 200 } 201 201 202 202 void sdhci_reset(struct sdhci_host *host, u8 mask) ··· 648 648 } 649 649 } 650 650 /* Switch ownership to the DMA */ 651 - dma_sync_single_for_device(host->mmc->parent, 651 + dma_sync_single_for_device(mmc_dev(host->mmc), 652 652 host->bounce_addr, 653 653 host->bounce_buffer_size, 654 654 mmc_get_dma_dir(data)); ··· 1176 1176 int ret = 0; 1177 1177 struct mmc_host *mmc = host->mmc; 1178 1178 1179 - host->tx_chan = dma_request_chan(mmc->parent, "tx"); 1179 + host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1180 1180 if (IS_ERR(host->tx_chan)) { 1181 1181 ret = PTR_ERR(host->tx_chan); 1182 1182 if (ret != -EPROBE_DEFER) ··· 1185 1185 return ret; 1186 1186 } 1187 1187 1188 - host->rx_chan = dma_request_chan(mmc->parent, "rx"); 1188 + host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1189 1189 if (IS_ERR(host->rx_chan)) { 1190 1190 if (host->tx_chan) { 1191 1191 dma_release_channel(host->tx_chan); ··· 2489 2489 unsigned long flags; 2490 2490 2491 2491 if (enable) 2492 - pm_runtime_get_noresume(mmc->parent); 2492 + pm_runtime_get_noresume(mmc_dev(mmc)); 2493 2493 2494 2494 spin_lock_irqsave(&host->lock, flags); 2495 2495 sdhci_enable_sdio_irq_nolock(host, enable); 2496 2496 spin_unlock_irqrestore(&host->lock, flags); 2497 2497 2498 2498 if (!enable) 2499 - pm_runtime_put_noidle(mmc->parent); 2499 + pm_runtime_put_noidle(mmc_dev(mmc)); 2500 2500 } 2501 2501 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2502 2502 ··· 3063 3063 length = host->bounce_buffer_size; 3064 3064 } 3065 3065 dma_sync_single_for_cpu( 3066 - host->mmc->parent, 3066 + mmc_dev(host->mmc), 3067 3067 host->bounce_addr, 3068 3068 host->bounce_buffer_size, 3069 3069 DMA_FROM_DEVICE); ··· 3074 3074 } else { 3075 3075 /* No copying, just switch ownership */ 3076 3076 dma_sync_single_for_cpu( 3077 - host->mmc->parent, 3077 + mmc_dev(host->mmc), 3078 3078 host->bounce_addr, 3079 3079 host->bounce_buffer_size, 3080 3080 mmc_get_dma_dir(data)); ··· 4053 4053 * speedups by the help of a bounce buffer to group scattered 4054 4054 * reads/writes together. 4055 4055 */ 4056 - host->bounce_buffer = devm_kmalloc(mmc->parent, 4056 + host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4057 4057 bounce_size, 4058 4058 GFP_KERNEL); 4059 4059 if (!host->bounce_buffer) { ··· 4067 4067 return; 4068 4068 } 4069 4069 4070 - host->bounce_addr = dma_map_single(mmc->parent, 4070 + host->bounce_addr = dma_map_single(mmc_dev(mmc), 4071 4071 host->bounce_buffer, 4072 4072 bounce_size, 4073 4073 DMA_BIDIRECTIONAL); 4074 - ret = dma_mapping_error(mmc->parent, host->bounce_addr); 4074 + ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4075 4075 if (ret) 4076 4076 /* Again fall back to max_segs == 1 */ 4077 4077 return;
+1 -1
drivers/mmc/host/sdhci_am654.c
··· 558 558 struct cqhci_host *cq_host; 559 559 int ret; 560 560 561 - cq_host = devm_kzalloc(host->mmc->parent, sizeof(struct cqhci_host), 561 + cq_host = devm_kzalloc(mmc_dev(host->mmc), sizeof(struct cqhci_host), 562 562 GFP_KERNEL); 563 563 if (!cq_host) 564 564 return -ENOMEM;