Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mmc-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC updates from Ulf Hansson:
"MMC core:
- Drop the use of BLK_BOUNCE_HIGH
- Fix partition switch for GP3
- Remove usage of the deprecated ida_simple API

MMC host:
- cqhci: Update bouncing email-addresses in MAINTAINERS
- davinci_mmc: Use sg_miter for PIO
- dw_mmc-hi3798cv200: Convert the DT bindings to YAML
- dw_mmc-hi3798mv200: Add driver for the new dw_mmc variant
- fsl-imx-esdhc: A couple of corrections/updates to the DT bindings
- meson-mx-sdhc: Drop use of the ->card_hw_reset() callback
- moxart-mmc: Use sg_miter for PIO
- moxart-mmc: Fix accounting for DMA transfers
- mvsdio: Use sg_miter for PIO
- mxcmmc: Use sg_miter for PIO
- omap: Use sg_miter for PIO
- renesas,sdhi: Add support for R-Car V4M variant
- sdhci-esdhc-mcf: Use sg_miter for swapping
- sdhci-of-dwcmshc: Add support for Sophgo CV1800B and SG2002 variants
- sh_mmcif: Use sg_miter for PIO
- tmio: Avoid concurrent runs of mmc_request_done()"

* tag 'mmc-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (44 commits)
mmc: core: make mmc_host_class constant
mmc: core: Fix switch on gp3 partition
mmc: tmio: comment the ERR_PTR usage in this driver
mmc: mmc_spi: Don't mention DMA direction
mmc: dw_mmc: Remove unused of_gpio.h
mmc: dw_mmc: add support for hi3798mv200
dt-bindings: mmc: hisilicon,hi3798cv200-dw-mshc: add Hi3798MV200 binding
dt-bindings: mmc: dw-mshc-hi3798cv200: convert to YAML
mmc: dw_mmc-hi3798cv200: remove MODULE_ALIAS()
mmc: core: Use a struct device* as in-param to mmc_of_parse_clk_phase()
mmc: wmt-sdmmc: remove an incorrect release_mem_region() call in the .remove function
mmc: tmio: avoid concurrent runs of mmc_request_done()
dt-bindings: mmc: fsl-imx-mmc: Document the required clocks
mmc: sh_mmcif: Advance sg_miter before reading blocks
mmc: sh_mmcif: sg_miter must not be atomic
mmc: sdhci-esdhc-mcf: Flag the sg_miter as atomic
dt-bindings: mmc: fsl-imx-esdhc: add default and 100mhz state
mmc: core: constify the struct device_type usage
mmc: sdhci-of-dwcmshc: Add support for Sophgo CV1800B and SG2002
dt-bindings: mmc: sdhci-of-dwcmhsc: Add Sophgo CV1800B and SG2002 support
...

+791 -319
+10 -1
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
··· 55 55 - enum: 56 56 - fsl,imx8mn-usdhc 57 57 - fsl,imx8mp-usdhc 58 - - fsl,imx93-usdhc 59 58 - fsl,imx8ulp-usdhc 59 + - fsl,imx93-usdhc 60 + - fsl,imx95-usdhc 60 61 - const: fsl,imx8mm-usdhc 61 62 - items: 62 63 - enum: ··· 163 162 - const: ahb 164 163 - const: per 165 164 165 + iommus: 166 + maxItems: 1 167 + 166 168 power-domains: 167 169 maxItems: 1 168 170 ··· 176 172 - const: default 177 173 - const: state_100mhz 178 174 - const: state_200mhz 175 + - const: sleep 176 + - minItems: 2 177 + items: 178 + - const: default 179 + - const: state_100mhz 179 180 - const: sleep 180 181 - minItems: 1 181 182 items:
+12
Documentation/devicetree/bindings/mmc/fsl-imx-mmc.yaml
··· 24 24 reg: 25 25 maxItems: 1 26 26 27 + clocks: 28 + maxItems: 2 29 + 30 + clock-names: 31 + items: 32 + - const: ipg 33 + - const: per 34 + 27 35 interrupts: 28 36 maxItems: 1 29 37 ··· 42 34 const: rx-tx 43 35 44 36 required: 37 + - clocks 38 + - clock-names 45 39 - compatible 46 40 - reg 47 41 - interrupts ··· 56 46 compatible = "fsl,imx27-mmc", "fsl,imx21-mmc"; 57 47 reg = <0x10014000 0x1000>; 58 48 interrupts = <11>; 49 + clocks = <&clks 29>, <&clks 60>; 50 + clock-names = "ipg", "per"; 59 51 dmas = <&dma 7>; 60 52 dma-names = "rx-tx"; 61 53 bus-width = <4>;
-40
Documentation/devicetree/bindings/mmc/hi3798cv200-dw-mshc.txt
··· 1 - * Hisilicon Hi3798CV200 specific extensions to the Synopsys Designware Mobile 2 - Storage Host Controller 3 - 4 - Read synopsys-dw-mshc.txt for more details 5 - 6 - The Synopsys designware mobile storage host controller is used to interface 7 - a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 8 - differences between the core Synopsys dw mshc controller properties described 9 - by synopsys-dw-mshc.txt and the properties used by the Hisilicon Hi3798CV200 10 - specific extensions to the Synopsys Designware Mobile Storage Host Controller. 11 - 12 - Required Properties: 13 - - compatible: Should contain "hisilicon,hi3798cv200-dw-mshc". 14 - - clocks: A list of phandle + clock-specifier pairs for the clocks listed 15 - in clock-names. 16 - - clock-names: Should contain the following: 17 - "ciu" - The ciu clock described in synopsys-dw-mshc.txt. 18 - "biu" - The biu clock described in synopsys-dw-mshc.txt. 19 - "ciu-sample" - Hi3798CV200 extended phase clock for ciu sampling. 20 - "ciu-drive" - Hi3798CV200 extended phase clock for ciu driving. 21 - 22 - Example: 23 - 24 - emmc: mmc@9830000 { 25 - compatible = "hisilicon,hi3798cv200-dw-mshc"; 26 - reg = <0x9830000 0x10000>; 27 - interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; 28 - clocks = <&crg HISTB_MMC_CIU_CLK>, 29 - <&crg HISTB_MMC_BIU_CLK>, 30 - <&crg HISTB_MMC_SAMPLE_CLK>, 31 - <&crg HISTB_MMC_DRV_CLK>; 32 - clock-names = "ciu", "biu", "ciu-sample", "ciu-drive"; 33 - fifo-depth = <256>; 34 - clock-frequency = <200000000>; 35 - cap-mmc-highspeed; 36 - mmc-ddr-1_8v; 37 - mmc-hs200-1_8v; 38 - non-removable; 39 - bus-width = <8>; 40 - };
+97
Documentation/devicetree/bindings/mmc/hisilicon,hi3798cv200-dw-mshc.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/mmc/hisilicon,hi3798cv200-dw-mshc.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Hisilicon HiSTB SoCs specific extensions to the Synopsys DWMMC controller 8 + 9 + maintainers: 10 + - Yang Xiwen <forbidden405@outlook.com> 11 + 12 + properties: 13 + compatible: 14 + enum: 15 + - hisilicon,hi3798cv200-dw-mshc 16 + - hisilicon,hi3798mv200-dw-mshc 17 + 18 + reg: 19 + maxItems: 1 20 + 21 + interrupts: 22 + maxItems: 1 23 + 24 + clocks: 25 + items: 26 + - description: bus interface unit clock 27 + - description: card interface unit clock 28 + - description: card input sample phase clock 29 + - description: controller output drive phase clock 30 + 31 + clock-names: 32 + items: 33 + - const: ciu 34 + - const: biu 35 + - const: ciu-sample 36 + - const: ciu-drive 37 + 38 + hisilicon,sap-dll-reg: 39 + $ref: /schemas/types.yaml#/definitions/phandle-array 40 + description: | 41 + DWMMC core on Hi3798MV2x SoCs has a delay-locked-loop(DLL) attached to card data input path. 42 + It is integrated into CRG core on the SoC and has to be controlled during tuning. 43 + items: 44 + - description: A phandle pointed to the CRG syscon node 45 + - description: Sample DLL register offset in CRG address space 46 + 47 + required: 48 + - compatible 49 + - reg 50 + - interrupts 51 + - clocks 52 + - clock-names 53 + 54 + allOf: 55 + - $ref: synopsys-dw-mshc-common.yaml# 56 + 57 + - if: 58 + properties: 59 + compatible: 60 + contains: 61 + const: hisilicon,hi3798mv200-dw-mshc 62 + then: 63 + required: 64 + - hisilicon,sap-dll-reg 65 + else: 66 + properties: 67 + hisilicon,sap-dll-reg: false 68 + 69 + unevaluatedProperties: false 70 + 71 + examples: 72 + - | 73 + #include <dt-bindings/clock/histb-clock.h> 74 + #include <dt-bindings/interrupt-controller/arm-gic.h> 75 + 76 + mmc@9830000 { 77 + compatible = "hisilicon,hi3798cv200-dw-mshc"; 78 + reg = <0x9830000 0x10000>; 79 + interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; 80 + clocks = <&crg HISTB_MMC_CIU_CLK>, 81 + <&crg HISTB_MMC_BIU_CLK>, 82 + <&crg HISTB_MMC_SAMPLE_CLK>, 83 + <&crg HISTB_MMC_DRV_CLK>; 84 + clock-names = "ciu", "biu", "ciu-sample", "ciu-drive"; 85 + resets = <&crg 0xa0 4>; 86 + reset-names = "reset"; 87 + pinctrl-names = "default"; 88 + pinctrl-0 = <&emmc_pins_1 &emmc_pins_2 89 + &emmc_pins_3 &emmc_pins_4>; 90 + fifo-depth = <256>; 91 + clock-frequency = <200000000>; 92 + cap-mmc-highspeed; 93 + mmc-ddr-1_8v; 94 + mmc-hs200-1_8v; 95 + non-removable; 96 + bus-width = <8>; 97 + };
+1
Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
··· 67 67 - renesas,sdhi-r8a779a0 # R-Car V3U 68 68 - renesas,sdhi-r8a779f0 # R-Car S4-8 69 69 - renesas,sdhi-r8a779g0 # R-Car V4H 70 + - renesas,sdhi-r8a779h0 # R-Car V4M 70 71 - const: renesas,rcar-gen4-sdhi # R-Car Gen4 71 72 72 73 reg:
+2
Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
··· 19 19 - rockchip,rk3568-dwcmshc 20 20 - rockchip,rk3588-dwcmshc 21 21 - snps,dwcmshc-sdhci 22 + - sophgo,cv1800b-dwcmshc 23 + - sophgo,sg2002-dwcmshc 22 24 - thead,th1520-dwcmshc 23 25 24 26 reg:
+2 -2
MAINTAINERS
··· 7809 7809 7810 7810 EMMC CMDQ HOST CONTROLLER INTERFACE (CQHCI) DRIVER 7811 7811 M: Adrian Hunter <adrian.hunter@intel.com> 7812 - M: Ritesh Harjani <riteshh@codeaurora.org> 7813 - M: Asutosh Das <asutoshd@codeaurora.org> 7812 + M: Asutosh Das <quic_asutoshd@quicinc.com> 7813 + R: Ritesh Harjani <ritesh.list@gmail.com> 7814 7814 L: linux-mmc@vger.kernel.org 7815 7815 S: Supported 7816 7816 F: drivers/mmc/host/cqhci*
+1 -1
drivers/memstick/core/memstick.c
··· 164 164 }; 165 165 ATTRIBUTE_GROUPS(memstick_dev); 166 166 167 - static struct bus_type memstick_bus_type = { 167 + static const struct bus_type memstick_bus_type = { 168 168 .name = "memstick", 169 169 .dev_groups = memstick_dev_groups, 170 170 .match = memstick_bus_match,
+13 -11
drivers/mmc/core/block.c
··· 144 144 static dev_t mmc_rpmb_devt; 145 145 146 146 /* Bus type for RPMB character devices */ 147 - static struct bus_type mmc_rpmb_bus_type = { 147 + static const struct bus_type mmc_rpmb_bus_type = { 148 148 .name = "mmc_rpmb", 149 149 }; 150 150 ··· 206 206 int devidx; 207 207 208 208 devidx = mmc_get_devidx(md->disk); 209 - ida_simple_remove(&mmc_blk_ida, devidx); 209 + ida_free(&mmc_blk_ida, devidx); 210 210 211 211 mutex_lock(&open_lock); 212 212 md->disk->private_data = NULL; ··· 874 874 static int mmc_blk_part_switch_pre(struct mmc_card *card, 875 875 unsigned int part_type) 876 876 { 877 - const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; 877 + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; 878 + const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; 878 879 int ret = 0; 879 880 880 - if ((part_type & mask) == mask) { 881 + if ((part_type & mask) == rpmb) { 881 882 if (card->ext_csd.cmdq_en) { 882 883 ret = mmc_cmdq_disable(card); 883 884 if (ret) ··· 893 892 static int mmc_blk_part_switch_post(struct mmc_card *card, 894 893 unsigned int part_type) 895 894 { 896 - const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; 895 + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; 896 + const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; 897 897 int ret = 0; 898 898 899 - if ((part_type & mask) == mask) { 899 + if ((part_type & mask) == rpmb) { 900 900 mmc_retune_unpause(card->host); 901 901 if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 902 902 ret = mmc_cmdq_enable(card); ··· 2469 2467 bool cache_enabled = false; 2470 2468 bool fua_enabled = false; 2471 2469 2472 - devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); 2470 + devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL); 2473 2471 if (devidx < 0) { 2474 2472 /* 2475 2473 * We get -ENOSPC because there are no more any available ··· 2579 2577 err_kfree: 2580 2578 kfree(md); 2581 2579 out: 2582 - ida_simple_remove(&mmc_blk_ida, devidx); 2580 + ida_free(&mmc_blk_ida, devidx); 2583 2581 return ERR_PTR(ret); 2584 2582 } 2585 2583 ··· 2705 2703 { 2706 2704 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); 2707 2705 2708 - ida_simple_remove(&mmc_rpmb_ida, rpmb->id); 2706 + ida_free(&mmc_rpmb_ida, rpmb->id); 2709 2707 kfree(rpmb); 2710 2708 } 2711 2709 ··· 2721 2719 struct mmc_rpmb_data *rpmb; 2722 2720 2723 2721 /* This creates the minor number for the RPMB char device */ 2724 - devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); 2722 + devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL); 2725 2723 if (devidx < 0) 2726 2724 return devidx; 2727 2725 2728 2726 rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); 2729 2727 if (!rpmb) { 2730 - ida_simple_remove(&mmc_rpmb_ida, devidx); 2728 + ida_free(&mmc_rpmb_ida, devidx); 2731 2729 return -ENOMEM; 2732 2730 } 2733 2731
+2 -2
drivers/mmc/core/bus.c
··· 214 214 SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume) 215 215 }; 216 216 217 - static struct bus_type mmc_bus_type = { 217 + static const struct bus_type mmc_bus_type = { 218 218 .name = "mmc", 219 219 .dev_groups = mmc_dev_groups, 220 220 .uevent = mmc_bus_uevent, ··· 272 272 /* 273 273 * Allocate and initialise a new MMC card structure. 274 274 */ 275 - struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type) 275 + struct mmc_card *mmc_alloc_card(struct mmc_host *host, const struct device_type *type) 276 276 { 277 277 struct mmc_card *card; 278 278
+1 -1
drivers/mmc/core/bus.h
··· 23 23 static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL) 24 24 25 25 struct mmc_card *mmc_alloc_card(struct mmc_host *host, 26 - struct device_type *type); 26 + const struct device_type *type); 27 27 int mmc_add_card(struct mmc_card *card); 28 28 void mmc_remove_card(struct mmc_card *card); 29 29
+5 -6
drivers/mmc/core/host.c
··· 76 76 struct mmc_host *host = cls_dev_to_mmc_host(dev); 77 77 wakeup_source_unregister(host->ws); 78 78 if (of_alias_get_id(host->parent->of_node, "mmc") < 0) 79 - ida_simple_remove(&mmc_host_ida, host->index); 79 + ida_free(&mmc_host_ida, host->index); 80 80 kfree(host); 81 81 } 82 82 ··· 88 88 return 0; 89 89 } 90 90 91 - static struct class mmc_host_class = { 91 + static const struct class mmc_host_class = { 92 92 .name = "mmc_host", 93 93 .dev_release = mmc_host_classdev_release, 94 94 .shutdown_pre = mmc_host_classdev_shutdown, ··· 234 234 } 235 235 236 236 void 237 - mmc_of_parse_clk_phase(struct mmc_host *host, struct mmc_clk_phase_map *map) 237 + mmc_of_parse_clk_phase(struct device *dev, struct mmc_clk_phase_map *map) 238 238 { 239 - struct device *dev = host->parent; 240 - 241 239 mmc_of_parse_timing_phase(dev, "clk-phase-legacy", 242 240 &map->phase[MMC_TIMING_LEGACY]); 243 241 mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs", ··· 536 538 min_idx = mmc_first_nonreserved_index(); 537 539 max_idx = 0; 538 540 539 - index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL); 541 + index = ida_alloc_range(&mmc_host_ida, min_idx, max_idx - 1, 542 + GFP_KERNEL); 540 543 if (index < 0) { 541 544 kfree(host); 542 545 return NULL;
+1 -1
drivers/mmc/core/mmc.c
··· 883 883 }; 884 884 ATTRIBUTE_GROUPS(mmc_std); 885 885 886 - static struct device_type mmc_type = { 886 + static const struct device_type mmc_type = { 887 887 .groups = mmc_std_groups, 888 888 }; 889 889
-3
drivers/mmc/core/queue.c
··· 353 353 if (mmc_can_erase(card)) 354 354 mmc_queue_setup_discard(card, &lim); 355 355 356 - if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) 357 - lim.bounce = BLK_BOUNCE_HIGH; 358 - 359 356 lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512); 360 357 361 358 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size)
+1 -1
drivers/mmc/core/sd.c
··· 805 805 }; 806 806 __ATTRIBUTE_GROUPS(sd_std); 807 807 808 - struct device_type sd_type = { 808 + const struct device_type sd_type = { 809 809 .groups = sd_std_groups, 810 810 }; 811 811
+1 -1
drivers/mmc/core/sd.h
··· 4 4 5 5 #include <linux/types.h> 6 6 7 - extern struct device_type sd_type; 7 + extern const struct device_type sd_type; 8 8 9 9 struct mmc_host; 10 10 struct mmc_card;
+1 -1
drivers/mmc/core/sdio.c
··· 66 66 }; 67 67 ATTRIBUTE_GROUPS(sdio_std); 68 68 69 - static struct device_type sdio_type = { 69 + static const struct device_type sdio_type = { 70 70 .groups = sdio_std_groups, 71 71 }; 72 72
+1 -1
drivers/mmc/core/sdio_bus.c
··· 244 244 ) 245 245 }; 246 246 247 - static struct bus_type sdio_bus_type = { 247 + static const struct bus_type sdio_bus_type = { 248 248 .name = "sdio", 249 249 .dev_groups = sdio_dev_groups, 250 250 .match = sdio_bus_match,
+9
drivers/mmc/host/Kconfig
··· 798 798 Synopsys DesignWare Memory Card Interface driver. Select this option 799 799 for platforms based on HiSilicon Hi3798CV200 SoC. 800 800 801 + config MMC_DW_HI3798MV200 802 + tristate "Hi3798MV200 specific extensions for Synopsys DW Memory Card Interface" 803 + depends on MMC_DW 804 + select MMC_DW_PLTFM 805 + help 806 + This selects support for HiSilicon Hi3798MV200 SoC specific extensions to the 807 + Synopsys DesignWare Memory Card Interface driver. Select this option 808 + for platforms based on HiSilicon Hi3798MV200 SoC. 809 + 801 810 config MMC_DW_K3 802 811 tristate "K3 specific extensions for Synopsys DW Memory Card Interface" 803 812 depends on MMC_DW
+1
drivers/mmc/host/Makefile
··· 51 51 obj-$(CONFIG_MMC_DW_BLUEFIELD) += dw_mmc-bluefield.o 52 52 obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o 53 53 obj-$(CONFIG_MMC_DW_HI3798CV200) += dw_mmc-hi3798cv200.o 54 + obj-$(CONFIG_MMC_DW_HI3798MV200) += dw_mmc-hi3798mv200.o 54 55 obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o 55 56 obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o 56 57 obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
+26 -33
drivers/mmc/host/davinci_mmc.c
··· 180 180 #define DAVINCI_MMC_DATADIR_WRITE 2 181 181 unsigned char data_dir; 182 182 183 - /* buffer is used during PIO of one scatterlist segment, and 184 - * is updated along with buffer_bytes_left. bytes_left applies 185 - * to all N blocks of the PIO transfer. 186 - */ 187 - u8 *buffer; 188 - u32 buffer_bytes_left; 189 183 u32 bytes_left; 190 184 191 185 struct dma_chan *dma_tx; ··· 190 196 bool active_request; 191 197 192 198 /* For PIO we walk scatterlists one segment at a time. */ 199 + struct sg_mapping_iter sg_miter; 193 200 unsigned int sg_len; 194 - struct scatterlist *sg; 195 201 196 202 /* Version of the MMC/SD controller */ 197 203 u8 version; ··· 207 213 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 208 214 209 215 /* PIO only */ 210 - static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 211 - { 212 - host->buffer_bytes_left = sg_dma_len(host->sg); 213 - host->buffer = sg_virt(host->sg); 214 - if (host->buffer_bytes_left > host->bytes_left) 215 - host->buffer_bytes_left = host->bytes_left; 216 - } 217 - 218 216 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 219 217 unsigned int n) 220 218 { 219 + struct sg_mapping_iter *sgm = &host->sg_miter; 221 220 u8 *p; 222 221 unsigned int i; 223 222 224 - if (host->buffer_bytes_left == 0) { 225 - host->sg = sg_next(host->data->sg); 226 - mmc_davinci_sg_to_buf(host); 223 + /* 224 + * By adjusting sgm->consumed this will give a pointer to the 225 + * current index into the sgm. 226 + */ 227 + if (!sg_miter_next(sgm)) { 228 + dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n"); 229 + return; 227 230 } 228 - 229 - p = host->buffer; 230 - if (n > host->buffer_bytes_left) 231 - n = host->buffer_bytes_left; 232 - host->buffer_bytes_left -= n; 233 - host->bytes_left -= n; 231 + p = sgm->addr; 234 232 235 233 /* NOTE: we never transfer more than rw_threshold bytes 236 234 * to/from the fifo here; there's no I/O overlap. ··· 247 261 p = p + (n & 3); 248 262 } 249 263 } 250 - host->buffer = p; 264 + 265 + sgm->consumed = n; 266 + host->bytes_left -= n; 251 267 } 252 268 253 269 static void mmc_davinci_start_command(struct mmc_davinci_host *host, ··· 505 517 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 506 518 int timeout; 507 519 struct mmc_data *data = req->data; 520 + unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */ 508 521 509 522 if (host->version == MMC_CTLR_VERSION_2) 510 523 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; ··· 534 545 535 546 /* Configure the FIFO */ 536 547 if (data->flags & MMC_DATA_WRITE) { 548 + flags |= SG_MITER_FROM_SG; 537 549 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 538 550 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 539 551 host->base + DAVINCI_MMCFIFOCTL); 540 552 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 541 553 host->base + DAVINCI_MMCFIFOCTL); 542 554 } else { 555 + flags |= SG_MITER_TO_SG; 543 556 host->data_dir = DAVINCI_MMC_DATADIR_READ; 544 557 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 545 558 host->base + DAVINCI_MMCFIFOCTL); ··· 549 558 host->base + DAVINCI_MMCFIFOCTL); 550 559 } 551 560 552 - host->buffer = NULL; 553 561 host->bytes_left = data->blocks * data->blksz; 554 562 555 563 /* For now we try to use DMA whenever we won't need partial FIFO ··· 566 576 } else { 567 577 /* Revert to CPU Copy */ 568 578 host->sg_len = data->sg_len; 569 - host->sg = host->data->sg; 570 - mmc_davinci_sg_to_buf(host); 579 + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 571 580 } 572 581 } 573 582 ··· 832 843 { 833 844 mmc_davinci_reset_ctrl(host, 1); 834 845 mmc_davinci_reset_ctrl(host, 0); 846 + if (!host->do_dma) 847 + sg_miter_stop(&host->sg_miter); 835 848 } 836 849 837 850 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) ··· 910 919 if (qstatus & MMCST0_DATDNE) { 911 920 /* All blocks sent/received, and CRC checks passed */ 912 921 if (data != NULL) { 913 - if ((host->do_dma == 0) && (host->bytes_left > 0)) { 914 - /* if datasize < rw_threshold 915 - * no RX ints are generated 916 - */ 917 - davinci_fifo_data_trans(host, host->bytes_left); 922 + if (!host->do_dma) { 923 + if (host->bytes_left > 0) 924 + /* if datasize < rw_threshold 925 + * no RX ints are generated 926 + */ 927 + davinci_fifo_data_trans(host, host->bytes_left); 928 + sg_miter_stop(&host->sg_miter); 918 929 } 919 930 end_transfer = 1; 920 931 data->bytes_xfered = data->blocks * data->blksz;
-1
drivers/mmc/host/dw_mmc-exynos.c
··· 11 11 #include <linux/mmc/host.h> 12 12 #include <linux/mmc/mmc.h> 13 13 #include <linux/of.h> 14 - #include <linux/of_gpio.h> 15 14 #include <linux/pm_runtime.h> 16 15 #include <linux/slab.h> 17 16
-1
drivers/mmc/host/dw_mmc-hi3798cv200.c
··· 201 201 202 202 MODULE_DESCRIPTION("HiSilicon Hi3798CV200 Specific DW-MSHC Driver Extension"); 203 203 MODULE_LICENSE("GPL v2"); 204 - MODULE_ALIAS("platform:dwmmc_hi3798cv200");
+251
drivers/mmc/host/dw_mmc-hi3798mv200.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Modified from dw_mmc-hi3798cv200.c 4 + * 5 + * Copyright (c) 2024 Yang Xiwen <forbidden405@outlook.com> 6 + * Copyright (c) 2018 HiSilicon Technologies Co., Ltd. 7 + */ 8 + 9 + #include <linux/clk.h> 10 + #include <linux/mfd/syscon.h> 11 + #include <linux/mmc/host.h> 12 + #include <linux/module.h> 13 + #include <linux/of_address.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/regmap.h> 16 + 17 + #include "dw_mmc.h" 18 + #include "dw_mmc-pltfm.h" 19 + 20 + #define SDMMC_TUNING_CTRL 0x118 21 + #define SDMMC_TUNING_FIND_EDGE BIT(5) 22 + 23 + #define ALL_INT_CLR 0x1ffff 24 + 25 + /* DLL ctrl reg */ 26 + #define SAP_DLL_CTRL_DLLMODE BIT(16) 27 + 28 + struct dw_mci_hi3798mv200_priv { 29 + struct clk *sample_clk; 30 + struct clk *drive_clk; 31 + struct regmap *crg_reg; 32 + u32 sap_dll_offset; 33 + struct mmc_clk_phase_map phase_map; 34 + }; 35 + 36 + static void dw_mci_hi3798mv200_set_ios(struct dw_mci *host, struct mmc_ios *ios) 37 + { 38 + struct dw_mci_hi3798mv200_priv *priv = host->priv; 39 + struct mmc_clk_phase phase = priv->phase_map.phase[ios->timing]; 40 + u32 val; 41 + 42 + val = mci_readl(host, ENABLE_SHIFT); 43 + if (ios->timing == MMC_TIMING_MMC_DDR52 44 + || ios->timing == MMC_TIMING_UHS_DDR50) 45 + val |= SDMMC_ENABLE_PHASE; 46 + else 47 + val &= ~SDMMC_ENABLE_PHASE; 48 + mci_writel(host, ENABLE_SHIFT, val); 49 + 50 + val = mci_readl(host, DDR_REG); 51 + if (ios->timing == MMC_TIMING_MMC_HS400) 52 + val |= SDMMC_DDR_HS400; 53 + else 54 + val &= ~SDMMC_DDR_HS400; 55 + mci_writel(host, DDR_REG, val); 56 + 57 + if (clk_set_rate(host->ciu_clk, ios->clock)) 58 + dev_warn(host->dev, "Failed to set rate to %u\n", ios->clock); 59 + else 60 + /* 61 + * CLK_MUX_ROUND_NEAREST is enabled for this clock 62 + * The actual clock rate is not what we set, but a rounded value 63 + * so we should get the rate once again 64 + */ 65 + host->bus_hz = clk_get_rate(host->ciu_clk); 66 + 67 + if (phase.valid) { 68 + clk_set_phase(priv->drive_clk, phase.out_deg); 69 + clk_set_phase(priv->sample_clk, phase.in_deg); 70 + } else { 71 + dev_warn(host->dev, 72 + "The phase entry for timing mode %d is missing in device tree.\n", 73 + ios->timing); 74 + } 75 + } 76 + 77 + static inline int dw_mci_hi3798mv200_enable_tuning(struct dw_mci_slot *slot) 78 + { 79 + struct dw_mci_hi3798mv200_priv *priv = slot->host->priv; 80 + 81 + return regmap_clear_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE); 82 + } 83 + 84 + static inline int dw_mci_hi3798mv200_disable_tuning(struct dw_mci_slot *slot) 85 + { 86 + struct dw_mci_hi3798mv200_priv *priv = slot->host->priv; 87 + 88 + return regmap_set_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE); 89 + } 90 + 91 + static int dw_mci_hi3798mv200_execute_tuning_mix_mode(struct dw_mci_slot *slot, 92 + u32 opcode) 93 + { 94 + static const int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 }; 95 + struct dw_mci *host = slot->host; 96 + struct dw_mci_hi3798mv200_priv *priv = host->priv; 97 + int raise_point = -1, fall_point = -1, mid; 98 + int err, prev_err = -1; 99 + int found = 0; 100 + int regval; 101 + int i; 102 + int ret; 103 + 104 + ret = dw_mci_hi3798mv200_enable_tuning(slot); 105 + if (ret < 0) 106 + return ret; 107 + 108 + for (i = 0; i < ARRAY_SIZE(degrees); i++) { 109 + clk_set_phase(priv->sample_clk, degrees[i]); 110 + mci_writel(host, RINTSTS, ALL_INT_CLR); 111 + 112 + /* 113 + * HiSilicon implemented a tuning mechanism. 114 + * It needs special interaction with the DLL. 115 + * 116 + * Treat edge(flip) found as an error too. 117 + */ 118 + err = mmc_send_tuning(slot->mmc, opcode, NULL); 119 + regval = mci_readl(host, TUNING_CTRL); 120 + if (err || (regval & SDMMC_TUNING_FIND_EDGE)) 121 + err = 1; 122 + else 123 + found = 1; 124 + 125 + if (i > 0) { 126 + if (err && !prev_err) 127 + fall_point = i - 1; 128 + if (!err && prev_err) 129 + raise_point = i; 130 + } 131 + 132 + if (raise_point != -1 && fall_point != -1) 133 + goto tuning_out; 134 + 135 + prev_err = err; 136 + err = 0; 137 + } 138 + 139 + tuning_out: 140 + ret = dw_mci_hi3798mv200_disable_tuning(slot); 141 + if (ret < 0) 142 + return ret; 143 + 144 + if (found) { 145 + if (raise_point == -1) 146 + raise_point = 0; 147 + if (fall_point == -1) 148 + fall_point = ARRAY_SIZE(degrees) - 1; 149 + if (fall_point < raise_point) { 150 + if ((raise_point + fall_point) > 151 + (ARRAY_SIZE(degrees) - 1)) 152 + mid = fall_point / 2; 153 + else 154 + mid = (raise_point + ARRAY_SIZE(degrees) - 1) / 2; 155 + } else { 156 + mid = (raise_point + fall_point) / 2; 157 + } 158 + 159 + /* 160 + * We don't care what timing we are tuning for, 161 + * simply use the same phase for all timing needs tuning. 162 + */ 163 + priv->phase_map.phase[MMC_TIMING_MMC_HS200].in_deg = degrees[mid]; 164 + priv->phase_map.phase[MMC_TIMING_MMC_HS400].in_deg = degrees[mid]; 165 + priv->phase_map.phase[MMC_TIMING_UHS_SDR104].in_deg = degrees[mid]; 166 + 167 + clk_set_phase(priv->sample_clk, degrees[mid]); 168 + dev_dbg(host->dev, "Tuning clk_sample[%d, %d], set[%d]\n", 169 + raise_point, fall_point, degrees[mid]); 170 + ret = 0; 171 + } else { 172 + dev_err(host->dev, "No valid clk_sample shift!\n"); 173 + ret = -EINVAL; 174 + } 175 + 176 + mci_writel(host, RINTSTS, ALL_INT_CLR); 177 + 178 + return ret; 179 + } 180 + 181 + static int dw_mci_hi3798mv200_init(struct dw_mci *host) 182 + { 183 + struct dw_mci_hi3798mv200_priv *priv; 184 + struct device_node *np = host->dev->of_node; 185 + int ret; 186 + 187 + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); 188 + if (!priv) 189 + return -ENOMEM; 190 + 191 + mmc_of_parse_clk_phase(host->dev, &priv->phase_map); 192 + 193 + priv->sample_clk = devm_clk_get_enabled(host->dev, "ciu-sample"); 194 + if (IS_ERR(priv->sample_clk)) 195 + return dev_err_probe(host->dev, PTR_ERR(priv->sample_clk), 196 + "failed to get enabled ciu-sample clock\n"); 197 + 198 + priv->drive_clk = devm_clk_get_enabled(host->dev, "ciu-drive"); 199 + if (IS_ERR(priv->drive_clk)) 200 + return dev_err_probe(host->dev, PTR_ERR(priv->drive_clk), 201 + "failed to get enabled ciu-drive clock\n"); 202 + 203 + priv->crg_reg = syscon_regmap_lookup_by_phandle(np, "hisilicon,sap-dll-reg"); 204 + if (IS_ERR(priv->crg_reg)) 205 + return dev_err_probe(host->dev, PTR_ERR(priv->crg_reg), 206 + "failed to get CRG reg\n"); 207 + 208 + ret = of_property_read_u32_index(np, "hisilicon,sap-dll-reg", 1, &priv->sap_dll_offset); 209 + if (ret) 210 + return dev_err_probe(host->dev, ret, "failed to get sample DLL register offset\n"); 211 + 212 + host->priv = priv; 213 + return 0; 214 + } 215 + 216 + static const struct dw_mci_drv_data hi3798mv200_data = { 217 + .common_caps = MMC_CAP_CMD23, 218 + .init = dw_mci_hi3798mv200_init, 219 + .set_ios = dw_mci_hi3798mv200_set_ios, 220 + .execute_tuning = dw_mci_hi3798mv200_execute_tuning_mix_mode, 221 + }; 222 + 223 + static const struct of_device_id dw_mci_hi3798mv200_match[] = { 224 + { .compatible = "hisilicon,hi3798mv200-dw-mshc" }, 225 + {}, 226 + }; 227 + 228 + static int dw_mci_hi3798mv200_probe(struct platform_device *pdev) 229 + { 230 + return dw_mci_pltfm_register(pdev, &hi3798mv200_data); 231 + } 232 + 233 + static void dw_mci_hi3798mv200_remove(struct platform_device *pdev) 234 + { 235 + dw_mci_pltfm_remove(pdev); 236 + } 237 + 238 + MODULE_DEVICE_TABLE(of, dw_mci_hi3798mv200_match); 239 + static struct platform_driver dw_mci_hi3798mv200_driver = { 240 + .probe = dw_mci_hi3798mv200_probe, 241 + .remove_new = dw_mci_hi3798mv200_remove, 242 + .driver = { 243 + .name = "dwmmc_hi3798mv200", 244 + .probe_type = PROBE_PREFER_ASYNCHRONOUS, 245 + .of_match_table = dw_mci_hi3798mv200_match, 246 + }, 247 + }; 248 + module_platform_driver(dw_mci_hi3798mv200_driver); 249 + 250 + MODULE_DESCRIPTION("HiSilicon Hi3798MV200 Specific DW-MSHC Driver Extension"); 251 + MODULE_LICENSE("GPL");
-1
drivers/mmc/host/dw_mmc.c
··· 35 35 #include <linux/bitops.h> 36 36 #include <linux/regulator/consumer.h> 37 37 #include <linux/of.h> 38 - #include <linux/of_gpio.h> 39 38 #include <linux/mmc/slot-gpio.h> 40 39 41 40 #include "dw_mmc.h"
+23 -20
drivers/mmc/host/meson-mx-sdhc-clkc.c
··· 71 71 static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev, 72 72 const char *name_suffix, 73 73 struct clk_hw *parent, 74 - struct clk_hw *hw) 74 + struct clk_hw *hw, 75 + struct clk_bulk_data *clk_bulk_data, 76 + u8 bulk_index) 75 77 { 76 78 struct clk_parent_data parent_data = { .hw = parent }; 79 + int ret; 77 80 78 - return meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1, 79 - &clk_gate_ops, hw); 81 + ret = meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1, 82 + &clk_gate_ops, hw); 83 + if (ret) 84 + return ret; 85 + 86 + clk_bulk_data[bulk_index].clk = devm_clk_hw_get_clk(dev, hw, name_suffix); 87 + if (IS_ERR(clk_bulk_data[bulk_index].clk)) 88 + return PTR_ERR(clk_bulk_data[bulk_index].clk); 89 + 90 + return 0; 80 91 } 81 92 82 93 int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base, ··· 126 115 clkc_data->mod_clk_en.bit_idx = 15; 127 116 ret = meson_mx_sdhc_gate_clk_hw_register(dev, "mod_clk_on", 128 117 &clkc_data->div.hw, 129 - &clkc_data->mod_clk_en.hw); 118 + &clkc_data->mod_clk_en.hw, 119 + clk_bulk_data, 0); 130 120 if (ret) 131 121 return ret; 132 122 ··· 135 123 clkc_data->tx_clk_en.bit_idx = 14; 136 124 ret = meson_mx_sdhc_gate_clk_hw_register(dev, "tx_clk_on", 137 125 &clkc_data->div.hw, 138 - &clkc_data->tx_clk_en.hw); 126 + &clkc_data->tx_clk_en.hw, 127 + clk_bulk_data, 1); 139 128 if (ret) 140 129 return ret; 141 130 ··· 144 131 clkc_data->rx_clk_en.bit_idx = 13; 145 132 ret = meson_mx_sdhc_gate_clk_hw_register(dev, "rx_clk_on", 146 133 &clkc_data->div.hw, 147 - &clkc_data->rx_clk_en.hw); 134 + &clkc_data->rx_clk_en.hw, 135 + clk_bulk_data, 2); 148 136 if (ret) 149 137 return ret; 150 138 ··· 153 139 clkc_data->sd_clk_en.bit_idx = 12; 154 140 ret = meson_mx_sdhc_gate_clk_hw_register(dev, "sd_clk_on", 155 141 &clkc_data->div.hw, 156 - &clkc_data->sd_clk_en.hw); 157 - if (ret) 158 - return ret; 159 - 160 - /* 161 - * TODO: Replace clk_hw.clk with devm_clk_hw_get_clk() once that is 162 - * available. 163 - */ 164 - clk_bulk_data[0].clk = clkc_data->mod_clk_en.hw.clk; 165 - clk_bulk_data[1].clk = clkc_data->sd_clk_en.hw.clk; 166 - clk_bulk_data[2].clk = clkc_data->tx_clk_en.hw.clk; 167 - clk_bulk_data[3].clk = clkc_data->rx_clk_en.hw.clk; 168 - 169 - return 0; 142 + &clkc_data->sd_clk_en.hw, 143 + clk_bulk_data, 3); 144 + return ret; 170 145 }
+5 -8
drivers/mmc/host/meson-mx-sdhc-mmc.c
··· 65 65 .max_register = MESON_SDHC_CLK2, 66 66 }; 67 67 68 - static void meson_mx_sdhc_hw_reset(struct mmc_host *mmc) 68 + static void meson_mx_sdhc_reset(struct meson_mx_sdhc_host *host) 69 69 { 70 - struct meson_mx_sdhc_host *host = mmc_priv(mmc); 71 - 72 70 regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_MAIN_CTRL | 73 71 MESON_SDHC_SRST_RXFIFO | MESON_SDHC_SRST_TXFIFO | 74 72 MESON_SDHC_SRST_DPHY_RX | MESON_SDHC_SRST_DPHY_TX | ··· 114 116 dev_warn(mmc_dev(mmc), 115 117 "Failed to poll for CMD_BUSY while processing CMD%d\n", 116 118 host->cmd->opcode); 117 - meson_mx_sdhc_hw_reset(mmc); 119 + meson_mx_sdhc_reset(host); 118 120 } 119 121 120 122 ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, esta, ··· 125 127 dev_warn(mmc_dev(mmc), 126 128 "Failed to poll for ESTA[13:11] while processing CMD%d\n", 127 129 host->cmd->opcode); 128 - meson_mx_sdhc_hw_reset(mmc); 130 + meson_mx_sdhc_reset(host); 129 131 } 130 132 } 131 133 ··· 493 495 } 494 496 495 497 static const struct mmc_host_ops meson_mx_sdhc_ops = { 496 - .card_hw_reset = meson_mx_sdhc_hw_reset, 497 498 .request = meson_mx_sdhc_request, 498 499 .set_ios = meson_mx_sdhc_set_ios, 499 500 .card_busy = meson_mx_sdhc_card_busy, ··· 615 618 } 616 619 617 620 if (cmd->error == -EIO || cmd->error == -ETIMEDOUT) 618 - meson_mx_sdhc_hw_reset(host->mmc); 621 + meson_mx_sdhc_reset(host); 619 622 else if (cmd->data) 620 623 /* 621 624 * Clear the FIFOs after completing data transfers to prevent ··· 725 728 { 726 729 struct meson_mx_sdhc_host *host = mmc_priv(mmc); 727 730 728 - meson_mx_sdhc_hw_reset(mmc); 731 + meson_mx_sdhc_reset(host); 729 732 730 733 regmap_write(host->regmap, MESON_SDHC_CTRL, 731 734 FIELD_PREP(MESON_SDHC_CTRL_RX_PERIOD, 0xf) |
+14 -16
drivers/mmc/host/mmc_spi.c
··· 15 15 #include <linux/slab.h> 16 16 #include <linux/module.h> 17 17 #include <linux/bio.h> 18 - #include <linux/dma-direction.h> 19 18 #include <linux/crc7.h> 20 19 #include <linux/crc-itu-t.h> 21 20 #include <linux/scatterlist.h> ··· 509 510 * so we explicitly initialize it to all ones on RX paths. 510 511 */ 511 512 static void 512 - mmc_spi_setup_data_message( 513 - struct mmc_spi_host *host, 514 - bool multiple, 515 - enum dma_data_direction direction) 513 + mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write) 516 514 { 517 515 struct spi_transfer *t; 518 516 struct scratch *scratch = host->data; ··· 519 523 /* for reads, readblock() skips 0xff bytes before finding 520 524 * the token; for writes, this transfer issues that token. 521 525 */ 522 - if (direction == DMA_TO_DEVICE) { 526 + if (write) { 523 527 t = &host->token; 524 528 memset(t, 0, sizeof(*t)); 525 529 t->len = 1; ··· 543 547 t = &host->crc; 544 548 memset(t, 0, sizeof(*t)); 545 549 t->len = 2; 546 - if (direction == DMA_TO_DEVICE) { 550 + if (write) { 547 551 /* the actual CRC may get written later */ 548 552 t->tx_buf = &scratch->crc_val; 549 553 } else { ··· 566 570 * the next token (next data block, or STOP_TRAN). We can try to 567 571 * minimize I/O ops by using a single read to collect end-of-busy. 568 572 */ 569 - if (multiple || direction == DMA_TO_DEVICE) { 573 + if (multiple || write) { 570 574 t = &host->early_status; 571 575 memset(t, 0, sizeof(*t)); 572 - t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1; 576 + t->len = write ? sizeof(scratch->status) : 1; 573 577 t->tx_buf = host->ones; 574 578 t->rx_buf = scratch->status; 575 579 t->cs_change = 1; ··· 773 777 { 774 778 struct spi_device *spi = host->spi; 775 779 struct spi_transfer *t; 776 - enum dma_data_direction direction = mmc_get_dma_dir(data); 777 780 struct scatterlist *sg; 778 781 unsigned n_sg; 779 782 bool multiple = (data->blocks > 1); 780 - const char *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read"; 783 + bool write = (data->flags & MMC_DATA_WRITE); 784 + const char *write_or_read = write ? "write" : "read"; 781 785 u32 clock_rate; 782 786 unsigned long timeout; 783 787 784 - mmc_spi_setup_data_message(host, multiple, direction); 788 + mmc_spi_setup_data_message(host, multiple, write); 785 789 t = &host->t; 786 790 787 791 if (t->speed_hz) ··· 803 807 804 808 /* allow pio too; we don't allow highmem */ 805 809 kmap_addr = kmap(sg_page(sg)); 806 - if (direction == DMA_TO_DEVICE) 810 + if (write) 807 811 t->tx_buf = kmap_addr + sg->offset; 808 812 else 809 813 t->rx_buf = kmap_addr + sg->offset; ··· 814 818 815 819 dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len); 816 820 817 - if (direction == DMA_TO_DEVICE) 821 + if (write) 818 822 status = mmc_spi_writeblock(host, t, timeout); 819 823 else 820 824 status = mmc_spi_readblock(host, t, timeout); ··· 829 833 } 830 834 831 835 /* discard mappings */ 832 - if (direction == DMA_FROM_DEVICE) 836 + if (write) 837 + /* nothing to do */; 838 + else 833 839 flush_dcache_page(sg_page(sg)); 834 840 kunmap(sg_page(sg)); 835 841 ··· 848 850 * that can affect the STOP_TRAN logic. Complete (and current) 849 851 * MMC specs should sort that out before Linux starts using CMD23. 850 852 */ 851 - if (direction == DMA_TO_DEVICE && multiple) { 853 + if (write && multiple) { 852 854 struct scratch *scratch = host->data; 853 855 int tmp; 854 856 const unsigned statlen = sizeof(scratch->status);
+43 -47
drivers/mmc/host/moxart-mmc.c
··· 131 131 struct dma_async_tx_descriptor *tx_desc; 132 132 struct mmc_host *mmc; 133 133 struct mmc_request *mrq; 134 - struct scatterlist *cur_sg; 135 134 struct completion dma_complete; 136 135 struct completion pio_complete; 137 136 138 - u32 num_sg; 139 - u32 data_remain; 137 + struct sg_mapping_iter sg_miter; 140 138 u32 data_len; 141 139 u32 fifo_width; 142 140 u32 timeout; ··· 145 147 bool have_dma; 146 148 bool is_removed; 147 149 }; 148 - 149 - static inline void moxart_init_sg(struct moxart_host *host, 150 - struct mmc_data *data) 151 - { 152 - host->cur_sg = data->sg; 153 - host->num_sg = data->sg_len; 154 - host->data_remain = host->cur_sg->length; 155 - 156 - if (host->data_remain > host->data_len) 157 - host->data_remain = host->data_len; 158 - } 159 - 160 - static inline int moxart_next_sg(struct moxart_host *host) 161 - { 162 - int remain; 163 - struct mmc_data *data = host->mrq->cmd->data; 164 - 165 - host->cur_sg++; 166 - host->num_sg--; 167 - 168 - if (host->num_sg > 0) { 169 - host->data_remain = host->cur_sg->length; 170 - remain = host->data_len - data->bytes_xfered; 171 - if (remain > 0 && remain < host->data_remain) 172 - host->data_remain = remain; 173 - } 174 - 175 - return host->num_sg; 176 - } 177 150 178 151 static int moxart_wait_for_status(struct moxart_host *host, 179 152 u32 mask, u32 *status) ··· 223 254 complete(&host->dma_complete); 224 255 } 225 256 257 + static bool moxart_use_dma(struct moxart_host *host) 258 + { 259 + return (host->data_len > host->fifo_width) && host->have_dma; 260 + } 261 + 226 262 static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) 227 263 { 228 264 u32 len, dir_slave; ··· 265 291 dma_async_issue_pending(dma_chan); 266 292 } 267 293 268 - data->bytes_xfered += host->data_remain; 269 - 270 294 wait_for_completion_interruptible_timeout(&host->dma_complete, 271 295 host->timeout); 296 + 297 + data->bytes_xfered = host->data_len; 272 298 273 299 dma_unmap_sg(dma_chan->device->dev, 274 300 data->sg, data->sg_len, ··· 278 304 279 305 static void moxart_transfer_pio(struct moxart_host *host) 280 306 { 307 + struct sg_mapping_iter *sgm = &host->sg_miter; 281 308 struct mmc_data *data = host->mrq->cmd->data; 282 309 u32 *sgp, len = 0, remain, status; 283 310 284 311 if (host->data_len == data->bytes_xfered) 285 312 return; 286 313 287 - sgp = sg_virt(host->cur_sg); 288 - remain = host->data_remain; 314 + /* 315 + * By updating sgm->consumes this will get a proper pointer into the 316 + * buffer at any time. 317 + */ 318 + if (!sg_miter_next(sgm)) { 319 + /* This shold not happen */ 320 + dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n"); 321 + data->error = -EINVAL; 322 + complete(&host->pio_complete); 323 + return; 324 + } 325 + sgp = sgm->addr; 326 + remain = sgm->length; 327 + if (remain > host->data_len) 328 + remain = host->data_len; 289 329 290 330 if (data->flags & MMC_DATA_WRITE) { 291 331 while (remain > 0) { ··· 314 326 sgp++; 315 327 len += 4; 316 328 } 329 + sgm->consumed += len; 317 330 remain -= len; 318 331 } 319 332 ··· 331 342 sgp++; 332 343 len += 4; 333 344 } 345 + sgm->consumed += len; 334 346 remain -= len; 335 347 } 336 348 } 337 349 338 - data->bytes_xfered += host->data_remain - remain; 339 - host->data_remain = remain; 340 - 341 - if (host->data_len != data->bytes_xfered) 342 - moxart_next_sg(host); 343 - else 350 + data->bytes_xfered += sgm->consumed; 351 + if (host->data_len == data->bytes_xfered) { 344 352 complete(&host->pio_complete); 353 + return; 354 + } 345 355 } 346 356 347 357 static void moxart_prepare_data(struct moxart_host *host) 348 358 { 349 359 struct mmc_data *data = host->mrq->cmd->data; 360 + unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */ 350 361 u32 datactrl; 351 362 int blksz_bits; 352 363 ··· 357 368 blksz_bits = ffs(data->blksz) - 1; 358 369 BUG_ON(1 << blksz_bits != data->blksz); 359 370 360 - moxart_init_sg(host, data); 361 - 362 371 datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE); 363 372 364 - if (data->flags & MMC_DATA_WRITE) 373 + if (data->flags & MMC_DATA_WRITE) { 374 + flags |= SG_MITER_FROM_SG; 365 375 datactrl |= DCR_DATA_WRITE; 376 + } else { 377 + flags |= SG_MITER_TO_SG; 378 + } 366 379 367 - if ((host->data_len > host->fifo_width) && host->have_dma) 380 + if (moxart_use_dma(host)) 368 381 datactrl |= DCR_DMA_EN; 382 + else 383 + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 369 384 370 385 writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL); 371 386 writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR); ··· 400 407 moxart_send_command(host, host->mrq->cmd); 401 408 402 409 if (mrq->cmd->data) { 403 - if ((host->data_len > host->fifo_width) && host->have_dma) { 410 + if (moxart_use_dma(host)) { 404 411 405 412 writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); 406 413 ··· 442 449 } 443 450 444 451 request_done: 452 + if (!moxart_use_dma(host)) 453 + sg_miter_stop(&host->sg_miter); 454 + 445 455 spin_unlock_irqrestore(&host->lock, flags); 446 456 mmc_request_done(host->mmc, mrq); 447 457 }
+53 -18
drivers/mmc/host/mvsdio.c
··· 38 38 unsigned int xfer_mode; 39 39 unsigned int intr_en; 40 40 unsigned int ctrl; 41 + bool use_pio; 42 + struct sg_mapping_iter sg_miter; 41 43 unsigned int pio_size; 42 - void *pio_ptr; 43 44 unsigned int sg_frags; 44 45 unsigned int ns_per_clk; 45 46 unsigned int clock; ··· 115 114 * data when the buffer is not aligned on a 64 byte 116 115 * boundary. 117 116 */ 117 + unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */ 118 + 119 + if (data->flags & MMC_DATA_READ) 120 + miter_flags |= SG_MITER_TO_SG; 121 + else 122 + miter_flags |= SG_MITER_FROM_SG; 123 + 118 124 host->pio_size = data->blocks * data->blksz; 119 - host->pio_ptr = sg_virt(data->sg); 125 + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags); 120 126 if (!nodma) 121 - dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", 122 - host->pio_ptr, host->pio_size); 127 + dev_dbg(host->dev, "fallback to PIO for data\n"); 128 + host->use_pio = true; 123 129 return 1; 124 130 } else { 125 131 dma_addr_t phys_addr; ··· 137 129 phys_addr = sg_dma_address(data->sg); 138 130 mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff); 139 131 mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16); 132 + host->use_pio = false; 140 133 return 0; 141 134 } 142 135 } ··· 297 288 { 298 289 void __iomem *iobase = host->base; 299 290 300 - if (host->pio_ptr) { 301 - host->pio_ptr = NULL; 291 + if (host->use_pio) { 292 + sg_miter_stop(&host->sg_miter); 302 293 host->pio_size = 0; 303 294 } else { 304 295 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, ··· 353 344 static irqreturn_t mvsd_irq(int irq, void *dev) 354 345 { 355 346 struct mvsd_host *host = dev; 347 + struct sg_mapping_iter *sgm = &host->sg_miter; 356 348 void __iomem *iobase = host->base; 357 349 u32 intr_status, intr_done_mask; 358 350 int irq_handled = 0; 351 + u16 *p; 352 + int s; 359 353 360 354 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); 361 355 dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n", ··· 382 370 spin_lock(&host->lock); 383 371 384 372 /* PIO handling, if needed. Messy business... */ 385 - if (host->pio_size && 373 + if (host->use_pio) { 374 + /* 375 + * As we set sgm->consumed this always gives a valid buffer 376 + * position. 377 + */ 378 + if (!sg_miter_next(sgm)) { 379 + /* This should not happen */ 380 + dev_err(host->dev, "ran out of scatter segments\n"); 381 + spin_unlock(&host->lock); 382 + host->intr_en &= 383 + ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W | 384 + MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); 385 + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); 386 + return IRQ_HANDLED; 387 + } 388 + p = sgm->addr; 389 + s = sgm->length; 390 + if (s > host->pio_size) 391 + s = host->pio_size; 392 + } 393 + 394 + if (host->use_pio && 386 395 (intr_status & host->intr_en & 387 396 (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) { 388 - u16 *p = host->pio_ptr; 389 - int s = host->pio_size; 397 + 390 398 while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) { 391 399 readsw(iobase + MVSD_FIFO, p, 16); 392 400 p += 16; 393 401 s -= 32; 402 + sgm->consumed += 32; 394 403 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); 395 404 } 396 405 /* ··· 424 391 put_unaligned(mvsd_read(MVSD_FIFO), p++); 425 392 put_unaligned(mvsd_read(MVSD_FIFO), p++); 426 393 s -= 4; 394 + sgm->consumed += 4; 427 395 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); 428 396 } 429 397 if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) { ··· 432 398 val[0] = mvsd_read(MVSD_FIFO); 433 399 val[1] = mvsd_read(MVSD_FIFO); 434 400 memcpy(p, ((void *)&val) + 4 - s, s); 401 + sgm->consumed += s; 435 402 s = 0; 436 403 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); 437 404 } 438 - if (s == 0) { 405 + /* PIO transfer done */ 406 + host->pio_size -= sgm->consumed; 407 + if (host->pio_size == 0) { 439 408 host->intr_en &= 440 409 ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W); 441 410 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); ··· 450 413 } 451 414 dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", 452 415 s, intr_status, mvsd_read(MVSD_HW_STATE)); 453 - host->pio_ptr = p; 454 - host->pio_size = s; 455 416 irq_handled = 1; 456 - } else if (host->pio_size && 417 + } else if (host->use_pio && 457 418 (intr_status & host->intr_en & 458 419 (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) { 459 - u16 *p = host->pio_ptr; 460 - int s = host->pio_size; 461 420 /* 462 421 * The TX_FIFO_8W bit is unreliable. When set, bursting 463 422 * 16 halfwords all at once in the FIFO drops data. Actually ··· 464 431 mvsd_write(MVSD_FIFO, get_unaligned(p++)); 465 432 mvsd_write(MVSD_FIFO, get_unaligned(p++)); 466 433 s -= 4; 434 + sgm->consumed += 4; 467 435 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); 468 436 } 469 437 if (s < 4) { ··· 473 439 memcpy(((void *)&val) + 4 - s, p, s); 474 440 mvsd_write(MVSD_FIFO, val[0]); 475 441 mvsd_write(MVSD_FIFO, val[1]); 442 + sgm->consumed += s; 476 443 s = 0; 477 444 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); 478 445 } 479 - if (s == 0) { 446 + /* PIO transfer done */ 447 + host->pio_size -= sgm->consumed; 448 + if (host->pio_size == 0) { 480 449 host->intr_en &= 481 450 ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); 482 451 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); ··· 487 450 } 488 451 dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", 489 452 s, intr_status, mvsd_read(MVSD_HW_STATE)); 490 - host->pio_ptr = p; 491 - host->pio_size = s; 492 453 irq_handled = 1; 493 454 } 494 455
+33 -20
drivers/mmc/host/mxcmmc.c
··· 266 266 267 267 static void mxcmci_swap_buffers(struct mmc_data *data) 268 268 { 269 - struct scatterlist *sg; 270 - int i; 269 + struct sg_mapping_iter sgm; 270 + u32 *buf; 271 271 272 - for_each_sg(data->sg, sg, data->sg_len, i) 273 - buffer_swap32(sg_virt(sg), sg->length); 272 + sg_miter_start(&sgm, data->sg, data->sg_len, 273 + SG_MITER_TO_SG | SG_MITER_FROM_SG); 274 + 275 + while (sg_miter_next(&sgm)) { 276 + buf = sgm.addr; 277 + buffer_swap32(buf, sgm.length); 278 + } 279 + 280 + sg_miter_stop(&sgm); 274 281 } 275 282 #else 276 283 static inline void mxcmci_swap_buffers(struct mmc_data *data) {} ··· 533 526 } while (1); 534 527 } 535 528 536 - static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes) 529 + static int mxcmci_pull(struct mxcmci_host *host, u32 *buf, int bytes) 537 530 { 538 531 unsigned int stat; 539 - u32 *buf = _buf; 540 532 541 533 while (bytes > 3) { 542 534 stat = mxcmci_poll_status(host, ··· 561 555 return 0; 562 556 } 563 557 564 - static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes) 558 + static int mxcmci_push(struct mxcmci_host *host, u32 *buf, int bytes) 565 559 { 566 560 unsigned int stat; 567 - u32 *buf = _buf; 568 561 569 562 while (bytes > 3) { 570 563 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); ··· 591 586 static int mxcmci_transfer_data(struct mxcmci_host *host) 592 587 { 593 588 struct mmc_data *data = host->req->data; 594 - struct scatterlist *sg; 595 - int stat, i; 589 + struct sg_mapping_iter sgm; 590 + int stat; 591 + u32 *buf; 596 592 597 593 host->data = data; 598 594 host->datasize = 0; 595 + sg_miter_start(&sgm, data->sg, data->sg_len, 596 + (data->flags & MMC_DATA_READ) ? SG_MITER_TO_SG : SG_MITER_FROM_SG); 599 597 600 598 if (data->flags & MMC_DATA_READ) { 601 - for_each_sg(data->sg, sg, data->sg_len, i) { 602 - stat = mxcmci_pull(host, sg_virt(sg), sg->length); 599 + while (sg_miter_next(&sgm)) { 600 + buf = sgm.addr; 601 + stat = mxcmci_pull(host, buf, sgm.length); 603 602 if (stat) 604 - return stat; 605 - host->datasize += sg->length; 603 + goto transfer_error; 604 + host->datasize += sgm.length; 606 605 } 607 606 } else { 608 - for_each_sg(data->sg, sg, data->sg_len, i) { 609 - stat = mxcmci_push(host, sg_virt(sg), sg->length); 607 + while (sg_miter_next(&sgm)) { 608 + buf = sgm.addr; 609 + stat = mxcmci_push(host, buf, sgm.length); 610 610 if (stat) 611 - return stat; 612 - host->datasize += sg->length; 611 + goto transfer_error; 612 + host->datasize += sgm.length; 613 613 } 614 614 stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE); 615 615 if (stat) 616 - return stat; 616 + goto transfer_error; 617 617 } 618 - return 0; 618 + 619 + transfer_error: 620 + sg_miter_stop(&sgm); 621 + return stat; 619 622 } 620 623 621 624 static void mxcmci_datawork(struct work_struct *work)
+24 -29
drivers/mmc/host/omap.c
··· 148 148 struct work_struct send_stop_work; 149 149 struct mmc_data *stop_data; 150 150 151 + struct sg_mapping_iter sg_miter; 151 152 unsigned int sg_len; 152 - int sg_idx; 153 - u16 * buffer; 154 - u32 buffer_bytes_left; 155 153 u32 total_bytes_left; 156 154 157 155 unsigned features; ··· 454 456 { 455 457 if (host->dma_in_use) 456 458 mmc_omap_release_dma(host, data, data->error); 459 + else 460 + sg_miter_stop(&host->sg_miter); 457 461 458 462 host->data = NULL; 459 463 host->sg_len = 0; ··· 651 651 spin_unlock_irqrestore(&host->slot_lock, flags); 652 652 } 653 653 654 - /* PIO only */ 655 - static void 656 - mmc_omap_sg_to_buf(struct mmc_omap_host *host) 657 - { 658 - struct scatterlist *sg; 659 - 660 - sg = host->data->sg + host->sg_idx; 661 - host->buffer_bytes_left = sg->length; 662 - host->buffer = sg_virt(sg); 663 - if (host->buffer_bytes_left > host->total_bytes_left) 664 - host->buffer_bytes_left = host->total_bytes_left; 665 - } 666 - 667 654 static void 668 655 mmc_omap_clk_timer(struct timer_list *t) 669 656 { ··· 663 676 static void 664 677 mmc_omap_xfer_data(struct mmc_omap_host *host, int write) 665 678 { 679 + struct sg_mapping_iter *sgm = &host->sg_miter; 666 680 int n, nwords; 681 + u16 *buffer; 667 682 668 - if (host->buffer_bytes_left == 0) { 669 - host->sg_idx++; 670 - BUG_ON(host->sg_idx == host->sg_len); 671 - mmc_omap_sg_to_buf(host); 683 + if (!sg_miter_next(sgm)) { 684 + /* This should not happen */ 685 + dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n"); 686 + return; 672 687 } 688 + buffer = sgm->addr; 689 + 673 690 n = 64; 674 - if (n > host->buffer_bytes_left) 675 - n = host->buffer_bytes_left; 691 + if (n > sgm->length) 692 + n = sgm->length; 693 + if (n > host->total_bytes_left) 694 + n = host->total_bytes_left; 676 695 677 696 /* Round up to handle odd number of bytes to transfer */ 678 697 nwords = DIV_ROUND_UP(n, 2); 679 698 680 - host->buffer_bytes_left -= n; 699 + sgm->consumed = n; 681 700 host->total_bytes_left -= n; 682 701 host->data->bytes_xfered += n; 683 702 684 703 if (write) { 685 704 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), 686 - host->buffer, nwords); 705 + buffer, nwords); 687 706 } else { 688 707 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), 689 - host->buffer, nwords); 708 + buffer, nwords); 690 709 } 691 - 692 - host->buffer += nwords; 693 710 } 694 711 695 712 #ifdef CONFIG_MMC_DEBUG ··· 947 956 static void 948 957 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) 949 958 { 959 + unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */ 950 960 struct mmc_data *data = req->data; 951 961 int i, use_dma = 1, block_size; 952 962 struct scatterlist *sg; ··· 982 990 } 983 991 } 984 992 985 - host->sg_idx = 0; 986 993 if (use_dma) { 987 994 enum dma_data_direction dma_data_dir; 988 995 struct dma_async_tx_descriptor *tx; ··· 1062 1071 OMAP_MMC_WRITE(host, BUF, 0x1f1f); 1063 1072 host->total_bytes_left = data->blocks * block_size; 1064 1073 host->sg_len = sg_len; 1065 - mmc_omap_sg_to_buf(host); 1074 + if (data->flags & MMC_DATA_READ) 1075 + miter_flags |= SG_MITER_TO_SG; 1076 + else 1077 + miter_flags |= SG_MITER_FROM_SG; 1078 + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags); 1066 1079 host->dma_in_use = 0; 1067 1080 } 1068 1081
+2 -1
drivers/mmc/host/renesas_sdhi.h
··· 9 9 #ifndef RENESAS_SDHI_H 10 10 #define RENESAS_SDHI_H 11 11 12 + #include <linux/dmaengine.h> 12 13 #include <linux/platform_device.h> 13 14 #include "tmio_mmc.h" 14 15 ··· 64 63 struct renesas_sdhi_dma { 65 64 unsigned long end_flags; 66 65 enum dma_slave_buswidth dma_buswidth; 67 - bool (*filter)(struct dma_chan *chan, void *arg); 66 + dma_filter_fn filter; 68 67 void (*enable)(struct tmio_mmc_host *host, bool enable); 69 68 struct completion dma_dataend; 70 69 struct tasklet_struct dma_complete;
+7 -5
drivers/mmc/host/sdhci-esdhc-mcf.c
··· 299 299 static void esdhc_mcf_request_done(struct sdhci_host *host, 300 300 struct mmc_request *mrq) 301 301 { 302 - struct scatterlist *sg; 302 + struct sg_mapping_iter sgm; 303 303 u32 *buffer; 304 - int i; 305 304 306 305 if (!mrq->data || !mrq->data->bytes_xfered) 307 306 goto exit_done; ··· 312 313 * On mcf5441x there is no hw sdma option/flag to select the dma 313 314 * transfer endiannes. A swap after the transfer is needed. 314 315 */ 315 - for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) { 316 - buffer = (u32 *)sg_virt(sg); 317 - esdhc_mcf_buffer_swap32(buffer, sg->length); 316 + sg_miter_start(&sgm, mrq->data->sg, mrq->data->sg_len, 317 + SG_MITER_ATOMIC | SG_MITER_TO_SG | SG_MITER_FROM_SG); 318 + while (sg_miter_next(&sgm)) { 319 + buffer = sgm.addr; 320 + esdhc_mcf_buffer_swap32(buffer, sgm.length); 318 321 } 322 + sg_miter_stop(&sgm); 319 323 320 324 exit_done: 321 325 mmc_request_done(host->mmc, mrq);
+1 -1
drivers/mmc/host/sdhci-of-aspeed.c
··· 435 435 goto err_sdhci_add; 436 436 437 437 if (dev->phase_desc) 438 - mmc_of_parse_clk_phase(host->mmc, &dev->phase_map); 438 + mmc_of_parse_clk_phase(&pdev->dev, &dev->phase_map); 439 439 440 440 ret = sdhci_add_host(host); 441 441 if (ret)
+66
drivers/mmc/host/sdhci-of-dwcmshc.c
··· 52 52 #define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */ 53 53 #define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */ 54 54 55 + /* Sophgo CV18XX specific Registers */ 56 + #define CV18XX_SDHCI_MSHC_CTRL 0x00 57 + #define CV18XX_EMMC_FUNC_EN BIT(0) 58 + #define CV18XX_LATANCY_1T BIT(1) 59 + #define CV18XX_SDHCI_PHY_TX_RX_DLY 0x40 60 + #define CV18XX_PHY_TX_DLY_MSK GENMASK(6, 0) 61 + #define CV18XX_PHY_TX_SRC_MSK GENMASK(9, 8) 62 + #define CV18XX_PHY_TX_SRC_INVERT_CLK_TX 0x1 63 + #define CV18XX_PHY_RX_DLY_MSK GENMASK(22, 16) 64 + #define CV18XX_PHY_RX_SRC_MSK GENMASK(25, 24) 65 + #define CV18XX_PHY_RX_SRC_INVERT_RX_CLK 0x1 66 + #define CV18XX_SDHCI_PHY_CONFIG 0x4c 67 + #define CV18XX_PHY_TX_BPS BIT(0) 68 + 55 69 /* Rockchip specific Registers */ 56 70 #define DWCMSHC_EMMC_DLL_CTRL 0x800 57 71 #define DWCMSHC_EMMC_DLL_RXCLK 0x804 ··· 656 642 } 657 643 } 658 644 645 + static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask) 646 + { 647 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 648 + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); 649 + u32 val, emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO; 650 + 651 + sdhci_reset(host, mask); 652 + 653 + if ((host->mmc->caps2 & emmc_caps) == emmc_caps) { 654 + val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL); 655 + val |= CV18XX_EMMC_FUNC_EN; 656 + sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL); 657 + } 658 + 659 + val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL); 660 + val |= CV18XX_LATANCY_1T; 661 + sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL); 662 + 663 + val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG); 664 + val |= CV18XX_PHY_TX_BPS; 665 + sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG); 666 + 667 + val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) | 668 + FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) | 669 + FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, 0) | 670 + FIELD_PREP(CV18XX_PHY_RX_SRC_MSK, CV18XX_PHY_RX_SRC_INVERT_RX_CLK)); 671 + sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY); 672 + } 673 + 659 674 static const struct sdhci_ops sdhci_dwcmshc_ops = { 660 675 .set_clock = sdhci_set_clock, 661 676 .set_bus_width = sdhci_set_bus_width, ··· 714 671 .platform_execute_tuning = &th1520_execute_tuning, 715 672 }; 716 673 674 + static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = { 675 + .set_clock = sdhci_set_clock, 676 + .set_bus_width = sdhci_set_bus_width, 677 + .set_uhs_signaling = dwcmshc_set_uhs_signaling, 678 + .get_max_clock = dwcmshc_get_max_clock, 679 + .reset = cv18xx_sdhci_reset, 680 + .adma_write_desc = dwcmshc_adma_write_desc, 681 + }; 682 + 717 683 static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { 718 684 .ops = &sdhci_dwcmshc_ops, 719 685 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, ··· 748 696 749 697 static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = { 750 698 .ops = &sdhci_dwcmshc_th1520_ops, 699 + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 700 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 701 + }; 702 + 703 + static const struct sdhci_pltfm_data sdhci_dwcmshc_cv18xx_pdata = { 704 + .ops = &sdhci_dwcmshc_cv18xx_ops, 751 705 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 752 706 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 753 707 }; ··· 825 767 { 826 768 .compatible = "snps,dwcmshc-sdhci", 827 769 .data = &sdhci_dwcmshc_pdata, 770 + }, 771 + { 772 + .compatible = "sophgo,cv1800b-dwcmshc", 773 + .data = &sdhci_dwcmshc_cv18xx_pdata, 774 + }, 775 + { 776 + .compatible = "sophgo,sg2002-dwcmshc", 777 + .data = &sdhci_dwcmshc_cv18xx_pdata, 828 778 }, 829 779 { 830 780 .compatible = "thead,th1520-dwcmshc",
+76 -38
drivers/mmc/host/sh_mmcif.c
··· 227 227 bool dying; 228 228 long timeout; 229 229 void __iomem *addr; 230 - u32 *pio_ptr; 231 230 spinlock_t lock; /* protect sh_mmcif_host::state */ 232 231 enum sh_mmcif_state state; 233 232 enum sh_mmcif_wait_for wait_for; 234 233 struct delayed_work timeout_work; 235 234 size_t blocksize; 236 - int sg_idx; 237 - int sg_blkidx; 235 + struct sg_mapping_iter sg_miter; 238 236 bool power; 239 237 bool ccs_enable; /* Command Completion Signal support */ 240 238 bool clk_ctrl2_enable; ··· 598 600 return ret; 599 601 } 600 602 601 - static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) 602 - { 603 - struct mmc_data *data = host->mrq->data; 604 - 605 - host->sg_blkidx += host->blocksize; 606 - 607 - /* data->sg->length must be a multiple of host->blocksize? */ 608 - BUG_ON(host->sg_blkidx > data->sg->length); 609 - 610 - if (host->sg_blkidx == data->sg->length) { 611 - host->sg_blkidx = 0; 612 - if (++host->sg_idx < data->sg_len) 613 - host->pio_ptr = sg_virt(++data->sg); 614 - } else { 615 - host->pio_ptr = p; 616 - } 617 - 618 - return host->sg_idx != data->sg_len; 619 - } 620 - 621 603 static void sh_mmcif_single_read(struct sh_mmcif_host *host, 622 604 struct mmc_request *mrq) 623 605 { 606 + struct mmc_data *data = mrq->data; 607 + 624 608 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 625 609 BLOCK_SIZE_MASK) + 3; 610 + 611 + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, 612 + SG_MITER_TO_SG); 626 613 627 614 host->wait_for = MMCIF_WAIT_FOR_READ; 628 615 ··· 617 634 618 635 static bool sh_mmcif_read_block(struct sh_mmcif_host *host) 619 636 { 637 + struct sg_mapping_iter *sgm = &host->sg_miter; 620 638 struct device *dev = sh_mmcif_host_to_dev(host); 621 639 struct mmc_data *data = host->mrq->data; 622 - u32 *p = sg_virt(data->sg); 640 + u32 *p; 623 641 int i; 624 642 625 643 if (host->sd_error) { 644 + sg_miter_stop(sgm); 626 645 data->error = sh_mmcif_error_manage(host); 627 646 dev_dbg(dev, "%s(): %d\n", __func__, data->error); 628 647 return false; 629 648 } 630 649 650 + if (!sg_miter_next(sgm)) { 651 + /* This should not happen on single blocks */ 652 + sg_miter_stop(sgm); 653 + return false; 654 + } 655 + 656 + p = sgm->addr; 657 + 631 658 for (i = 0; i < host->blocksize / 4; i++) 632 659 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); 660 + 661 + sg_miter_stop(&host->sg_miter); 633 662 634 663 /* buffer read end */ 635 664 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); ··· 653 658 static void sh_mmcif_multi_read(struct sh_mmcif_host *host, 654 659 struct mmc_request *mrq) 655 660 { 661 + struct sg_mapping_iter *sgm = &host->sg_miter; 656 662 struct mmc_data *data = mrq->data; 657 663 658 664 if (!data->sg_len || !data->sg->length) ··· 662 666 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 663 667 BLOCK_SIZE_MASK; 664 668 669 + sg_miter_start(sgm, data->sg, data->sg_len, 670 + SG_MITER_TO_SG); 671 + 672 + /* Advance to the first sglist entry */ 673 + if (!sg_miter_next(sgm)) { 674 + sg_miter_stop(sgm); 675 + return; 676 + } 677 + 665 678 host->wait_for = MMCIF_WAIT_FOR_MREAD; 666 - host->sg_idx = 0; 667 - host->sg_blkidx = 0; 668 - host->pio_ptr = sg_virt(data->sg); 669 679 670 680 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 671 681 } 672 682 673 683 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) 674 684 { 685 + struct sg_mapping_iter *sgm = &host->sg_miter; 675 686 struct device *dev = sh_mmcif_host_to_dev(host); 676 687 struct mmc_data *data = host->mrq->data; 677 - u32 *p = host->pio_ptr; 688 + u32 *p; 678 689 int i; 679 690 680 691 if (host->sd_error) { 692 + sg_miter_stop(sgm); 681 693 data->error = sh_mmcif_error_manage(host); 682 694 dev_dbg(dev, "%s(): %d\n", __func__, data->error); 683 695 return false; 684 696 } 685 697 686 - BUG_ON(!data->sg->length); 698 + p = sgm->addr; 687 699 688 700 for (i = 0; i < host->blocksize / 4; i++) 689 701 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); 690 702 691 - if (!sh_mmcif_next_block(host, p)) 692 - return false; 703 + sgm->consumed = host->blocksize; 693 704 694 705 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 706 + 707 + if (!sg_miter_next(sgm)) { 708 + sg_miter_stop(sgm); 709 + return false; 710 + } 695 711 696 712 return true; 697 713 } ··· 711 703 static void sh_mmcif_single_write(struct sh_mmcif_host *host, 712 704 struct mmc_request *mrq) 713 705 { 706 + struct mmc_data *data = mrq->data; 707 + 714 708 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 715 709 BLOCK_SIZE_MASK) + 3; 710 + 711 + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, 712 + SG_MITER_FROM_SG); 716 713 717 714 host->wait_for = MMCIF_WAIT_FOR_WRITE; 718 715 ··· 727 714 728 715 static bool sh_mmcif_write_block(struct sh_mmcif_host *host) 729 716 { 717 + struct sg_mapping_iter *sgm = &host->sg_miter; 730 718 struct device *dev = sh_mmcif_host_to_dev(host); 731 719 struct mmc_data *data = host->mrq->data; 732 - u32 *p = sg_virt(data->sg); 720 + u32 *p; 733 721 int i; 734 722 735 723 if (host->sd_error) { 724 + sg_miter_stop(sgm); 736 725 data->error = sh_mmcif_error_manage(host); 737 726 dev_dbg(dev, "%s(): %d\n", __func__, data->error); 738 727 return false; 739 728 } 740 729 730 + if (!sg_miter_next(sgm)) { 731 + /* This should not happen on single blocks */ 732 + sg_miter_stop(sgm); 733 + return false; 734 + } 735 + 736 + p = sgm->addr; 737 + 741 738 for (i = 0; i < host->blocksize / 4; i++) 742 739 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); 740 + 741 + sg_miter_stop(&host->sg_miter); 743 742 744 743 /* buffer write end */ 745 744 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); ··· 763 738 static void sh_mmcif_multi_write(struct sh_mmcif_host *host, 764 739 struct mmc_request *mrq) 765 740 { 741 + struct sg_mapping_iter *sgm = &host->sg_miter; 766 742 struct mmc_data *data = mrq->data; 767 743 768 744 if (!data->sg_len || !data->sg->length) ··· 772 746 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & 773 747 BLOCK_SIZE_MASK; 774 748 749 + sg_miter_start(sgm, data->sg, data->sg_len, 750 + SG_MITER_FROM_SG); 751 + 752 + /* Advance to the first sglist entry */ 753 + if (!sg_miter_next(sgm)) { 754 + sg_miter_stop(sgm); 755 + return; 756 + } 757 + 775 758 host->wait_for = MMCIF_WAIT_FOR_MWRITE; 776 - host->sg_idx = 0; 777 - host->sg_blkidx = 0; 778 - host->pio_ptr = sg_virt(data->sg); 779 759 780 760 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 781 761 } 782 762 783 763 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) 784 764 { 765 + struct sg_mapping_iter *sgm = &host->sg_miter; 785 766 struct device *dev = sh_mmcif_host_to_dev(host); 786 767 struct mmc_data *data = host->mrq->data; 787 - u32 *p = host->pio_ptr; 768 + u32 *p; 788 769 int i; 789 770 790 771 if (host->sd_error) { 772 + sg_miter_stop(sgm); 791 773 data->error = sh_mmcif_error_manage(host); 792 774 dev_dbg(dev, "%s(): %d\n", __func__, data->error); 793 775 return false; 794 776 } 795 777 796 - BUG_ON(!data->sg->length); 778 + p = sgm->addr; 797 779 798 780 for (i = 0; i < host->blocksize / 4; i++) 799 781 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); 800 782 801 - if (!sh_mmcif_next_block(host, p)) 783 + sgm->consumed = host->blocksize; 784 + 785 + if (!sg_miter_next(sgm)) { 786 + sg_miter_stop(sgm); 802 787 return false; 788 + } 803 789 804 790 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 805 791
+5 -1
drivers/mmc/host/tmio_mmc_core.c
··· 259 259 else 260 260 mrq->cmd->error = -ETIMEDOUT; 261 261 262 + /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */ 263 + host->mrq = ERR_PTR(-EBUSY); 262 264 host->cmd = NULL; 263 265 host->data = NULL; 264 266 ··· 972 970 return; 973 971 } 974 972 973 + /* Disallow new mrqs and work handlers to run */ 975 974 host->mrq = ERR_PTR(-EBUSY); 976 975 977 976 spin_unlock_irqrestore(&host->lock, flags); ··· 1007 1004 "%s.%d: IOS interrupted: clk %u, mode %u", 1008 1005 current->comm, task_pid_nr(current), 1009 1006 ios->clock, ios->power_mode); 1010 - host->mrq = NULL; 1011 1007 1008 + /* Ready for new mrqs */ 1009 + host->mrq = NULL; 1012 1010 host->clk_cache = ios->clock; 1013 1011 1014 1012 mutex_unlock(&host->ios_lock);
-2
drivers/mmc/host/wbsd.c
··· 1284 1284 continue; 1285 1285 1286 1286 for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) { 1287 - id = 0xFFFF; 1288 - 1289 1287 host->config = config_ports[i]; 1290 1288 host->unlock_code = unlock_codes[j]; 1291 1289
-4
drivers/mmc/host/wmt-sdmmc.c
··· 883 883 { 884 884 struct mmc_host *mmc; 885 885 struct wmt_mci_priv *priv; 886 - struct resource *res; 887 886 u32 reg_tmp; 888 887 889 888 mmc = platform_get_drvdata(pdev); ··· 909 910 910 911 clk_disable_unprepare(priv->clk_sdmmc); 911 912 clk_put(priv->clk_sdmmc); 912 - 913 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 914 - release_mem_region(res->start, resource_size(res)); 915 913 916 914 mmc_free_host(mmc); 917 915
+1 -1
include/linux/mmc/host.h
··· 539 539 int mmc_add_host(struct mmc_host *); 540 540 void mmc_remove_host(struct mmc_host *); 541 541 void mmc_free_host(struct mmc_host *); 542 - void mmc_of_parse_clk_phase(struct mmc_host *host, 542 + void mmc_of_parse_clk_phase(struct device *dev, 543 543 struct mmc_clk_phase_map *map); 544 544 int mmc_of_parse(struct mmc_host *host); 545 545 int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask);