Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/highmem.h>
23#include <linux/log2.h>
24#include <linux/mmc/mmc.h>
25#include <linux/mmc/pm.h>
26#include <linux/mmc/host.h>
27#include <linux/mmc/card.h>
28#include <linux/mmc/slot-gpio.h>
29#include <linux/amba/bus.h>
30#include <linux/clk.h>
31#include <linux/scatterlist.h>
32#include <linux/of.h>
33#include <linux/regulator/consumer.h>
34#include <linux/dmaengine.h>
35#include <linux/dma-mapping.h>
36#include <linux/amba/mmci.h>
37#include <linux/pm_runtime.h>
38#include <linux/types.h>
39#include <linux/pinctrl/consumer.h>
40#include <linux/reset.h>
41
42#include <asm/div64.h>
43#include <asm/io.h>
44
45#include "mmci.h"
46#include "mmci_qcom_dml.h"
47
48#define DRIVER_NAME "mmci-pl18x"
49
50#ifdef CONFIG_DMA_ENGINE
51void mmci_variant_init(struct mmci_host *host);
52#else
53static inline void mmci_variant_init(struct mmci_host *host) {}
54#endif
55
56#ifdef CONFIG_MMC_STM32_SDMMC
57void sdmmc_variant_init(struct mmci_host *host);
58#else
59static inline void sdmmc_variant_init(struct mmci_host *host) {}
60#endif
61
62static unsigned int fmax = 515633;
63
64static struct variant_data variant_arm = {
65 .fifosize = 16 * 4,
66 .fifohalfsize = 8 * 4,
67 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
68 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
69 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
70 .cmdreg_srsp = MCI_CPSM_RESPONSE,
71 .datalength_bits = 16,
72 .datactrl_blocksz = 11,
73 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
74 .pwrreg_powerup = MCI_PWR_UP,
75 .f_max = 100000000,
76 .reversed_irq_handling = true,
77 .mmcimask1 = true,
78 .irq_pio_mask = MCI_IRQ_PIO_MASK,
79 .start_err = MCI_STARTBITERR,
80 .opendrain = MCI_ROD,
81 .init = mmci_variant_init,
82};
83
84static struct variant_data variant_arm_extended_fifo = {
85 .fifosize = 128 * 4,
86 .fifohalfsize = 64 * 4,
87 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
88 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
89 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
90 .cmdreg_srsp = MCI_CPSM_RESPONSE,
91 .datalength_bits = 16,
92 .datactrl_blocksz = 11,
93 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
94 .pwrreg_powerup = MCI_PWR_UP,
95 .f_max = 100000000,
96 .mmcimask1 = true,
97 .irq_pio_mask = MCI_IRQ_PIO_MASK,
98 .start_err = MCI_STARTBITERR,
99 .opendrain = MCI_ROD,
100 .init = mmci_variant_init,
101};
102
103static struct variant_data variant_arm_extended_fifo_hwfc = {
104 .fifosize = 128 * 4,
105 .fifohalfsize = 64 * 4,
106 .clkreg_enable = MCI_ARM_HWFCEN,
107 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
108 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
109 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
110 .cmdreg_srsp = MCI_CPSM_RESPONSE,
111 .datalength_bits = 16,
112 .datactrl_blocksz = 11,
113 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
114 .pwrreg_powerup = MCI_PWR_UP,
115 .f_max = 100000000,
116 .mmcimask1 = true,
117 .irq_pio_mask = MCI_IRQ_PIO_MASK,
118 .start_err = MCI_STARTBITERR,
119 .opendrain = MCI_ROD,
120 .init = mmci_variant_init,
121};
122
123static struct variant_data variant_u300 = {
124 .fifosize = 16 * 4,
125 .fifohalfsize = 8 * 4,
126 .clkreg_enable = MCI_ST_U300_HWFCEN,
127 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
128 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
129 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
130 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
131 .cmdreg_srsp = MCI_CPSM_RESPONSE,
132 .datalength_bits = 16,
133 .datactrl_blocksz = 11,
134 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
135 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
136 .st_sdio = true,
137 .pwrreg_powerup = MCI_PWR_ON,
138 .f_max = 100000000,
139 .signal_direction = true,
140 .pwrreg_clkgate = true,
141 .pwrreg_nopower = true,
142 .mmcimask1 = true,
143 .irq_pio_mask = MCI_IRQ_PIO_MASK,
144 .start_err = MCI_STARTBITERR,
145 .opendrain = MCI_OD,
146 .init = mmci_variant_init,
147};
148
149static struct variant_data variant_nomadik = {
150 .fifosize = 16 * 4,
151 .fifohalfsize = 8 * 4,
152 .clkreg = MCI_CLK_ENABLE,
153 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
154 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
155 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
156 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
157 .cmdreg_srsp = MCI_CPSM_RESPONSE,
158 .datalength_bits = 24,
159 .datactrl_blocksz = 11,
160 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
161 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
162 .st_sdio = true,
163 .st_clkdiv = true,
164 .pwrreg_powerup = MCI_PWR_ON,
165 .f_max = 100000000,
166 .signal_direction = true,
167 .pwrreg_clkgate = true,
168 .pwrreg_nopower = true,
169 .mmcimask1 = true,
170 .irq_pio_mask = MCI_IRQ_PIO_MASK,
171 .start_err = MCI_STARTBITERR,
172 .opendrain = MCI_OD,
173 .init = mmci_variant_init,
174};
175
176static struct variant_data variant_ux500 = {
177 .fifosize = 30 * 4,
178 .fifohalfsize = 8 * 4,
179 .clkreg = MCI_CLK_ENABLE,
180 .clkreg_enable = MCI_ST_UX500_HWFCEN,
181 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
182 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
183 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
184 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
185 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
186 .cmdreg_srsp = MCI_CPSM_RESPONSE,
187 .datalength_bits = 24,
188 .datactrl_blocksz = 11,
189 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
190 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
191 .st_sdio = true,
192 .st_clkdiv = true,
193 .pwrreg_powerup = MCI_PWR_ON,
194 .f_max = 100000000,
195 .signal_direction = true,
196 .pwrreg_clkgate = true,
197 .busy_detect = true,
198 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
199 .busy_detect_flag = MCI_ST_CARDBUSY,
200 .busy_detect_mask = MCI_ST_BUSYENDMASK,
201 .pwrreg_nopower = true,
202 .mmcimask1 = true,
203 .irq_pio_mask = MCI_IRQ_PIO_MASK,
204 .start_err = MCI_STARTBITERR,
205 .opendrain = MCI_OD,
206 .init = mmci_variant_init,
207};
208
209static struct variant_data variant_ux500v2 = {
210 .fifosize = 30 * 4,
211 .fifohalfsize = 8 * 4,
212 .clkreg = MCI_CLK_ENABLE,
213 .clkreg_enable = MCI_ST_UX500_HWFCEN,
214 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
215 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
216 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
217 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
218 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
219 .cmdreg_srsp = MCI_CPSM_RESPONSE,
220 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
221 .datalength_bits = 24,
222 .datactrl_blocksz = 11,
223 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
224 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
225 .st_sdio = true,
226 .st_clkdiv = true,
227 .blksz_datactrl16 = true,
228 .pwrreg_powerup = MCI_PWR_ON,
229 .f_max = 100000000,
230 .signal_direction = true,
231 .pwrreg_clkgate = true,
232 .busy_detect = true,
233 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
234 .busy_detect_flag = MCI_ST_CARDBUSY,
235 .busy_detect_mask = MCI_ST_BUSYENDMASK,
236 .pwrreg_nopower = true,
237 .mmcimask1 = true,
238 .irq_pio_mask = MCI_IRQ_PIO_MASK,
239 .start_err = MCI_STARTBITERR,
240 .opendrain = MCI_OD,
241 .init = mmci_variant_init,
242};
243
244static struct variant_data variant_stm32 = {
245 .fifosize = 32 * 4,
246 .fifohalfsize = 8 * 4,
247 .clkreg = MCI_CLK_ENABLE,
248 .clkreg_enable = MCI_ST_UX500_HWFCEN,
249 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
250 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
251 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
252 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
253 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
254 .cmdreg_srsp = MCI_CPSM_RESPONSE,
255 .irq_pio_mask = MCI_IRQ_PIO_MASK,
256 .datalength_bits = 24,
257 .datactrl_blocksz = 11,
258 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
259 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
260 .st_sdio = true,
261 .st_clkdiv = true,
262 .pwrreg_powerup = MCI_PWR_ON,
263 .f_max = 48000000,
264 .pwrreg_clkgate = true,
265 .pwrreg_nopower = true,
266 .init = mmci_variant_init,
267};
268
269static struct variant_data variant_stm32_sdmmc = {
270 .fifosize = 16 * 4,
271 .fifohalfsize = 8 * 4,
272 .f_max = 208000000,
273 .stm32_clkdiv = true,
274 .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
275 .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
276 .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
277 .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
278 .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
279 .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
280 .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
281 .datactrl_first = true,
282 .datacnt_useless = true,
283 .datalength_bits = 25,
284 .datactrl_blocksz = 14,
285 .stm32_idmabsize_mask = GENMASK(12, 5),
286 .init = sdmmc_variant_init,
287};
288
289static struct variant_data variant_qcom = {
290 .fifosize = 16 * 4,
291 .fifohalfsize = 8 * 4,
292 .clkreg = MCI_CLK_ENABLE,
293 .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
294 MCI_QCOM_CLK_SELECT_IN_FBCLK,
295 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
296 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
297 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
298 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
299 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
300 .cmdreg_srsp = MCI_CPSM_RESPONSE,
301 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
302 .blksz_datactrl4 = true,
303 .datalength_bits = 24,
304 .datactrl_blocksz = 11,
305 .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
306 .pwrreg_powerup = MCI_PWR_UP,
307 .f_max = 208000000,
308 .explicit_mclk_control = true,
309 .qcom_fifo = true,
310 .qcom_dml = true,
311 .mmcimask1 = true,
312 .irq_pio_mask = MCI_IRQ_PIO_MASK,
313 .start_err = MCI_STARTBITERR,
314 .opendrain = MCI_ROD,
315 .init = qcom_variant_init,
316};
317
318/* Busy detection for the ST Micro variant */
319static int mmci_card_busy(struct mmc_host *mmc)
320{
321 struct mmci_host *host = mmc_priv(mmc);
322 unsigned long flags;
323 int busy = 0;
324
325 spin_lock_irqsave(&host->lock, flags);
326 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
327 busy = 1;
328 spin_unlock_irqrestore(&host->lock, flags);
329
330 return busy;
331}
332
333static void mmci_reg_delay(struct mmci_host *host)
334{
335 /*
336 * According to the spec, at least three feedback clock cycles
337 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
338 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
339 * Worst delay time during card init is at 100 kHz => 30 us.
340 * Worst delay time when up and running is at 25 MHz => 120 ns.
341 */
342 if (host->cclk < 25000000)
343 udelay(30);
344 else
345 ndelay(120);
346}
347
348/*
349 * This must be called with host->lock held
350 */
351void mmci_write_clkreg(struct mmci_host *host, u32 clk)
352{
353 if (host->clk_reg != clk) {
354 host->clk_reg = clk;
355 writel(clk, host->base + MMCICLOCK);
356 }
357}
358
359/*
360 * This must be called with host->lock held
361 */
362void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
363{
364 if (host->pwr_reg != pwr) {
365 host->pwr_reg = pwr;
366 writel(pwr, host->base + MMCIPOWER);
367 }
368}
369
370/*
371 * This must be called with host->lock held
372 */
373static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
374{
375 /* Keep busy mode in DPSM if enabled */
376 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
377
378 if (host->datactrl_reg != datactrl) {
379 host->datactrl_reg = datactrl;
380 writel(datactrl, host->base + MMCIDATACTRL);
381 }
382}
383
384/*
385 * This must be called with host->lock held
386 */
387static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
388{
389 struct variant_data *variant = host->variant;
390 u32 clk = variant->clkreg;
391
392 /* Make sure cclk reflects the current calculated clock */
393 host->cclk = 0;
394
395 if (desired) {
396 if (variant->explicit_mclk_control) {
397 host->cclk = host->mclk;
398 } else if (desired >= host->mclk) {
399 clk = MCI_CLK_BYPASS;
400 if (variant->st_clkdiv)
401 clk |= MCI_ST_UX500_NEG_EDGE;
402 host->cclk = host->mclk;
403 } else if (variant->st_clkdiv) {
404 /*
405 * DB8500 TRM says f = mclk / (clkdiv + 2)
406 * => clkdiv = (mclk / f) - 2
407 * Round the divider up so we don't exceed the max
408 * frequency
409 */
410 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
411 if (clk >= 256)
412 clk = 255;
413 host->cclk = host->mclk / (clk + 2);
414 } else {
415 /*
416 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
417 * => clkdiv = mclk / (2 * f) - 1
418 */
419 clk = host->mclk / (2 * desired) - 1;
420 if (clk >= 256)
421 clk = 255;
422 host->cclk = host->mclk / (2 * (clk + 1));
423 }
424
425 clk |= variant->clkreg_enable;
426 clk |= MCI_CLK_ENABLE;
427 /* This hasn't proven to be worthwhile */
428 /* clk |= MCI_CLK_PWRSAVE; */
429 }
430
431 /* Set actual clock for debug */
432 host->mmc->actual_clock = host->cclk;
433
434 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
435 clk |= MCI_4BIT_BUS;
436 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
437 clk |= variant->clkreg_8bit_bus_enable;
438
439 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
440 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
441 clk |= variant->clkreg_neg_edge_enable;
442
443 mmci_write_clkreg(host, clk);
444}
445
446void mmci_dma_release(struct mmci_host *host)
447{
448 if (host->ops && host->ops->dma_release)
449 host->ops->dma_release(host);
450
451 host->use_dma = false;
452}
453
454void mmci_dma_setup(struct mmci_host *host)
455{
456 if (!host->ops || !host->ops->dma_setup)
457 return;
458
459 if (host->ops->dma_setup(host))
460 return;
461
462 /* initialize pre request cookie */
463 host->next_cookie = 1;
464
465 host->use_dma = true;
466}
467
468/*
469 * Validate mmc prerequisites
470 */
471static int mmci_validate_data(struct mmci_host *host,
472 struct mmc_data *data)
473{
474 if (!data)
475 return 0;
476
477 if (!is_power_of_2(data->blksz)) {
478 dev_err(mmc_dev(host->mmc),
479 "unsupported block size (%d bytes)\n", data->blksz);
480 return -EINVAL;
481 }
482
483 if (host->ops && host->ops->validate_data)
484 return host->ops->validate_data(host, data);
485
486 return 0;
487}
488
489int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
490{
491 int err;
492
493 if (!host->ops || !host->ops->prep_data)
494 return 0;
495
496 err = host->ops->prep_data(host, data, next);
497
498 if (next && !err)
499 data->host_cookie = ++host->next_cookie < 0 ?
500 1 : host->next_cookie;
501
502 return err;
503}
504
505void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
506 int err)
507{
508 if (host->ops && host->ops->unprep_data)
509 host->ops->unprep_data(host, data, err);
510
511 data->host_cookie = 0;
512}
513
514void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
515{
516 WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
517
518 if (host->ops && host->ops->get_next_data)
519 host->ops->get_next_data(host, data);
520}
521
522int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
523{
524 struct mmc_data *data = host->data;
525 int ret;
526
527 if (!host->use_dma)
528 return -EINVAL;
529
530 ret = mmci_prep_data(host, data, false);
531 if (ret)
532 return ret;
533
534 if (!host->ops || !host->ops->dma_start)
535 return -EINVAL;
536
537 /* Okay, go for it. */
538 dev_vdbg(mmc_dev(host->mmc),
539 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
540 data->sg_len, data->blksz, data->blocks, data->flags);
541
542 host->ops->dma_start(host, &datactrl);
543
544 /* Trigger the DMA transfer */
545 mmci_write_datactrlreg(host, datactrl);
546
547 /*
548 * Let the MMCI say when the data is ended and it's time
549 * to fire next DMA request. When that happens, MMCI will
550 * call mmci_data_end()
551 */
552 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
553 host->base + MMCIMASK0);
554 return 0;
555}
556
557void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
558{
559 if (!host->use_dma)
560 return;
561
562 if (host->ops && host->ops->dma_finalize)
563 host->ops->dma_finalize(host, data);
564}
565
566void mmci_dma_error(struct mmci_host *host)
567{
568 if (!host->use_dma)
569 return;
570
571 if (host->ops && host->ops->dma_error)
572 host->ops->dma_error(host);
573}
574
575static void
576mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
577{
578 writel(0, host->base + MMCICOMMAND);
579
580 BUG_ON(host->data);
581
582 host->mrq = NULL;
583 host->cmd = NULL;
584
585 mmc_request_done(host->mmc, mrq);
586}
587
588static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
589{
590 void __iomem *base = host->base;
591 struct variant_data *variant = host->variant;
592
593 if (host->singleirq) {
594 unsigned int mask0 = readl(base + MMCIMASK0);
595
596 mask0 &= ~variant->irq_pio_mask;
597 mask0 |= mask;
598
599 writel(mask0, base + MMCIMASK0);
600 }
601
602 if (variant->mmcimask1)
603 writel(mask, base + MMCIMASK1);
604
605 host->mask1_reg = mask;
606}
607
608static void mmci_stop_data(struct mmci_host *host)
609{
610 mmci_write_datactrlreg(host, 0);
611 mmci_set_mask1(host, 0);
612 host->data = NULL;
613}
614
615static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
616{
617 unsigned int flags = SG_MITER_ATOMIC;
618
619 if (data->flags & MMC_DATA_READ)
620 flags |= SG_MITER_TO_SG;
621 else
622 flags |= SG_MITER_FROM_SG;
623
624 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
625}
626
627/*
628 * All the DMA operation mode stuff goes inside this ifdef.
629 * This assumes that you have a generic DMA device interface,
630 * no custom DMA interfaces are supported.
631 */
632#ifdef CONFIG_DMA_ENGINE
633struct mmci_dmae_next {
634 struct dma_async_tx_descriptor *desc;
635 struct dma_chan *chan;
636};
637
638struct mmci_dmae_priv {
639 struct dma_chan *cur;
640 struct dma_chan *rx_channel;
641 struct dma_chan *tx_channel;
642 struct dma_async_tx_descriptor *desc_current;
643 struct mmci_dmae_next next_data;
644};
645
646int mmci_dmae_setup(struct mmci_host *host)
647{
648 const char *rxname, *txname;
649 struct mmci_dmae_priv *dmae;
650
651 dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
652 if (!dmae)
653 return -ENOMEM;
654
655 host->dma_priv = dmae;
656
657 dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
658 "rx");
659 dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
660 "tx");
661
662 /*
663 * If only an RX channel is specified, the driver will
664 * attempt to use it bidirectionally, however if it is
665 * is specified but cannot be located, DMA will be disabled.
666 */
667 if (dmae->rx_channel && !dmae->tx_channel)
668 dmae->tx_channel = dmae->rx_channel;
669
670 if (dmae->rx_channel)
671 rxname = dma_chan_name(dmae->rx_channel);
672 else
673 rxname = "none";
674
675 if (dmae->tx_channel)
676 txname = dma_chan_name(dmae->tx_channel);
677 else
678 txname = "none";
679
680 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
681 rxname, txname);
682
683 /*
684 * Limit the maximum segment size in any SG entry according to
685 * the parameters of the DMA engine device.
686 */
687 if (dmae->tx_channel) {
688 struct device *dev = dmae->tx_channel->device->dev;
689 unsigned int max_seg_size = dma_get_max_seg_size(dev);
690
691 if (max_seg_size < host->mmc->max_seg_size)
692 host->mmc->max_seg_size = max_seg_size;
693 }
694 if (dmae->rx_channel) {
695 struct device *dev = dmae->rx_channel->device->dev;
696 unsigned int max_seg_size = dma_get_max_seg_size(dev);
697
698 if (max_seg_size < host->mmc->max_seg_size)
699 host->mmc->max_seg_size = max_seg_size;
700 }
701
702 if (!dmae->tx_channel || !dmae->rx_channel) {
703 mmci_dmae_release(host);
704 return -EINVAL;
705 }
706
707 return 0;
708}
709
710/*
711 * This is used in or so inline it
712 * so it can be discarded.
713 */
714void mmci_dmae_release(struct mmci_host *host)
715{
716 struct mmci_dmae_priv *dmae = host->dma_priv;
717
718 if (dmae->rx_channel)
719 dma_release_channel(dmae->rx_channel);
720 if (dmae->tx_channel)
721 dma_release_channel(dmae->tx_channel);
722 dmae->rx_channel = dmae->tx_channel = NULL;
723}
724
725static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
726{
727 struct mmci_dmae_priv *dmae = host->dma_priv;
728 struct dma_chan *chan;
729
730 if (data->flags & MMC_DATA_READ)
731 chan = dmae->rx_channel;
732 else
733 chan = dmae->tx_channel;
734
735 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
736 mmc_get_dma_dir(data));
737}
738
739void mmci_dmae_error(struct mmci_host *host)
740{
741 struct mmci_dmae_priv *dmae = host->dma_priv;
742
743 if (!dma_inprogress(host))
744 return;
745
746 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
747 dmaengine_terminate_all(dmae->cur);
748 host->dma_in_progress = false;
749 dmae->cur = NULL;
750 dmae->desc_current = NULL;
751 host->data->host_cookie = 0;
752
753 mmci_dma_unmap(host, host->data);
754}
755
756void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
757{
758 struct mmci_dmae_priv *dmae = host->dma_priv;
759 u32 status;
760 int i;
761
762 if (!dma_inprogress(host))
763 return;
764
765 /* Wait up to 1ms for the DMA to complete */
766 for (i = 0; ; i++) {
767 status = readl(host->base + MMCISTATUS);
768 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
769 break;
770 udelay(10);
771 }
772
773 /*
774 * Check to see whether we still have some data left in the FIFO -
775 * this catches DMA controllers which are unable to monitor the
776 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
777 * contiguous buffers. On TX, we'll get a FIFO underrun error.
778 */
779 if (status & MCI_RXDATAAVLBLMASK) {
780 mmci_dma_error(host);
781 if (!data->error)
782 data->error = -EIO;
783 } else if (!data->host_cookie) {
784 mmci_dma_unmap(host, data);
785 }
786
787 /*
788 * Use of DMA with scatter-gather is impossible.
789 * Give up with DMA and switch back to PIO mode.
790 */
791 if (status & MCI_RXDATAAVLBLMASK) {
792 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
793 mmci_dma_release(host);
794 }
795
796 host->dma_in_progress = false;
797 dmae->cur = NULL;
798 dmae->desc_current = NULL;
799}
800
801/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
802static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
803 struct dma_chan **dma_chan,
804 struct dma_async_tx_descriptor **dma_desc)
805{
806 struct mmci_dmae_priv *dmae = host->dma_priv;
807 struct variant_data *variant = host->variant;
808 struct dma_slave_config conf = {
809 .src_addr = host->phybase + MMCIFIFO,
810 .dst_addr = host->phybase + MMCIFIFO,
811 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
812 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
813 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
814 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
815 .device_fc = false,
816 };
817 struct dma_chan *chan;
818 struct dma_device *device;
819 struct dma_async_tx_descriptor *desc;
820 int nr_sg;
821 unsigned long flags = DMA_CTRL_ACK;
822
823 if (data->flags & MMC_DATA_READ) {
824 conf.direction = DMA_DEV_TO_MEM;
825 chan = dmae->rx_channel;
826 } else {
827 conf.direction = DMA_MEM_TO_DEV;
828 chan = dmae->tx_channel;
829 }
830
831 /* If there's no DMA channel, fall back to PIO */
832 if (!chan)
833 return -EINVAL;
834
835 /* If less than or equal to the fifo size, don't bother with DMA */
836 if (data->blksz * data->blocks <= variant->fifosize)
837 return -EINVAL;
838
839 device = chan->device;
840 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
841 mmc_get_dma_dir(data));
842 if (nr_sg == 0)
843 return -EINVAL;
844
845 if (host->variant->qcom_dml)
846 flags |= DMA_PREP_INTERRUPT;
847
848 dmaengine_slave_config(chan, &conf);
849 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
850 conf.direction, flags);
851 if (!desc)
852 goto unmap_exit;
853
854 *dma_chan = chan;
855 *dma_desc = desc;
856
857 return 0;
858
859 unmap_exit:
860 dma_unmap_sg(device->dev, data->sg, data->sg_len,
861 mmc_get_dma_dir(data));
862 return -ENOMEM;
863}
864
865int mmci_dmae_prep_data(struct mmci_host *host,
866 struct mmc_data *data,
867 bool next)
868{
869 struct mmci_dmae_priv *dmae = host->dma_priv;
870 struct mmci_dmae_next *nd = &dmae->next_data;
871
872 if (!host->use_dma)
873 return -EINVAL;
874
875 if (next)
876 return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
877 /* Check if next job is already prepared. */
878 if (dmae->cur && dmae->desc_current)
879 return 0;
880
881 /* No job were prepared thus do it now. */
882 return _mmci_dmae_prep_data(host, data, &dmae->cur,
883 &dmae->desc_current);
884}
885
886int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
887{
888 struct mmci_dmae_priv *dmae = host->dma_priv;
889 struct mmc_data *data = host->data;
890
891 host->dma_in_progress = true;
892 dmaengine_submit(dmae->desc_current);
893 dma_async_issue_pending(dmae->cur);
894
895 if (host->variant->qcom_dml)
896 dml_start_xfer(host, data);
897
898 *datactrl |= MCI_DPSM_DMAENABLE;
899
900 return 0;
901}
902
903void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
904{
905 struct mmci_dmae_priv *dmae = host->dma_priv;
906 struct mmci_dmae_next *next = &dmae->next_data;
907
908 if (!host->use_dma)
909 return;
910
911 WARN_ON(!data->host_cookie && (next->desc || next->chan));
912
913 dmae->desc_current = next->desc;
914 dmae->cur = next->chan;
915 next->desc = NULL;
916 next->chan = NULL;
917}
918
919void mmci_dmae_unprep_data(struct mmci_host *host,
920 struct mmc_data *data, int err)
921
922{
923 struct mmci_dmae_priv *dmae = host->dma_priv;
924
925 if (!host->use_dma)
926 return;
927
928 mmci_dma_unmap(host, data);
929
930 if (err) {
931 struct mmci_dmae_next *next = &dmae->next_data;
932 struct dma_chan *chan;
933 if (data->flags & MMC_DATA_READ)
934 chan = dmae->rx_channel;
935 else
936 chan = dmae->tx_channel;
937 dmaengine_terminate_all(chan);
938
939 if (dmae->desc_current == next->desc)
940 dmae->desc_current = NULL;
941
942 if (dmae->cur == next->chan) {
943 host->dma_in_progress = false;
944 dmae->cur = NULL;
945 }
946
947 next->desc = NULL;
948 next->chan = NULL;
949 }
950}
951
952static struct mmci_host_ops mmci_variant_ops = {
953 .prep_data = mmci_dmae_prep_data,
954 .unprep_data = mmci_dmae_unprep_data,
955 .get_next_data = mmci_dmae_get_next_data,
956 .dma_setup = mmci_dmae_setup,
957 .dma_release = mmci_dmae_release,
958 .dma_start = mmci_dmae_start,
959 .dma_finalize = mmci_dmae_finalize,
960 .dma_error = mmci_dmae_error,
961};
962
963void mmci_variant_init(struct mmci_host *host)
964{
965 host->ops = &mmci_variant_ops;
966}
967#endif
968
969static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
970{
971 struct mmci_host *host = mmc_priv(mmc);
972 struct mmc_data *data = mrq->data;
973
974 if (!data)
975 return;
976
977 WARN_ON(data->host_cookie);
978
979 if (mmci_validate_data(host, data))
980 return;
981
982 mmci_prep_data(host, data, true);
983}
984
985static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
986 int err)
987{
988 struct mmci_host *host = mmc_priv(mmc);
989 struct mmc_data *data = mrq->data;
990
991 if (!data || !data->host_cookie)
992 return;
993
994 mmci_unprep_data(host, data, err);
995}
996
997static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
998{
999 struct variant_data *variant = host->variant;
1000 unsigned int datactrl, timeout, irqmask;
1001 unsigned long long clks;
1002 void __iomem *base;
1003 int blksz_bits;
1004
1005 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1006 data->blksz, data->blocks, data->flags);
1007
1008 host->data = data;
1009 host->size = data->blksz * data->blocks;
1010 data->bytes_xfered = 0;
1011
1012 clks = (unsigned long long)data->timeout_ns * host->cclk;
1013 do_div(clks, NSEC_PER_SEC);
1014
1015 timeout = data->timeout_clks + (unsigned int)clks;
1016
1017 base = host->base;
1018 writel(timeout, base + MMCIDATATIMER);
1019 writel(host->size, base + MMCIDATALENGTH);
1020
1021 blksz_bits = ffs(data->blksz) - 1;
1022 BUG_ON(1 << blksz_bits != data->blksz);
1023
1024 if (variant->blksz_datactrl16)
1025 datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16);
1026 else if (variant->blksz_datactrl4)
1027 datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4);
1028 else
1029 datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4;
1030
1031 if (data->flags & MMC_DATA_READ)
1032 datactrl |= MCI_DPSM_DIRECTION;
1033
1034 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1035 u32 clk;
1036
1037 datactrl |= variant->datactrl_mask_sdio;
1038
1039 /*
1040 * The ST Micro variant for SDIO small write transfers
1041 * needs to have clock H/W flow control disabled,
1042 * otherwise the transfer will not start. The threshold
1043 * depends on the rate of MCLK.
1044 */
1045 if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1046 (host->size < 8 ||
1047 (host->size <= 8 && host->mclk > 50000000)))
1048 clk = host->clk_reg & ~variant->clkreg_enable;
1049 else
1050 clk = host->clk_reg | variant->clkreg_enable;
1051
1052 mmci_write_clkreg(host, clk);
1053 }
1054
1055 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1056 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1057 datactrl |= variant->datactrl_mask_ddrmode;
1058
1059 /*
1060 * Attempt to use DMA operation mode, if this
1061 * should fail, fall back to PIO mode
1062 */
1063 if (!mmci_dma_start(host, datactrl))
1064 return;
1065
1066 /* IRQ mode, map the SG list for CPU reading/writing */
1067 mmci_init_sg(host, data);
1068
1069 if (data->flags & MMC_DATA_READ) {
1070 irqmask = MCI_RXFIFOHALFFULLMASK;
1071
1072 /*
1073 * If we have less than the fifo 'half-full' threshold to
1074 * transfer, trigger a PIO interrupt as soon as any data
1075 * is available.
1076 */
1077 if (host->size < variant->fifohalfsize)
1078 irqmask |= MCI_RXDATAAVLBLMASK;
1079 } else {
1080 /*
1081 * We don't actually need to include "FIFO empty" here
1082 * since its implicit in "FIFO half empty".
1083 */
1084 irqmask = MCI_TXFIFOHALFEMPTYMASK;
1085 }
1086
1087 mmci_write_datactrlreg(host, datactrl);
1088 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1089 mmci_set_mask1(host, irqmask);
1090}
1091
1092static void
1093mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1094{
1095 void __iomem *base = host->base;
1096
1097 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1098 cmd->opcode, cmd->arg, cmd->flags);
1099
1100 if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1101 writel(0, base + MMCICOMMAND);
1102 mmci_reg_delay(host);
1103 }
1104
1105 if (host->variant->cmdreg_stop &&
1106 cmd->opcode == MMC_STOP_TRANSMISSION)
1107 c |= host->variant->cmdreg_stop;
1108
1109 c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1110 if (cmd->flags & MMC_RSP_PRESENT) {
1111 if (cmd->flags & MMC_RSP_136)
1112 c |= host->variant->cmdreg_lrsp_crc;
1113 else if (cmd->flags & MMC_RSP_CRC)
1114 c |= host->variant->cmdreg_srsp_crc;
1115 else
1116 c |= host->variant->cmdreg_srsp;
1117 }
1118 if (/*interrupt*/0)
1119 c |= MCI_CPSM_INTERRUPT;
1120
1121 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1122 c |= host->variant->data_cmd_enable;
1123
1124 host->cmd = cmd;
1125
1126 writel(cmd->arg, base + MMCIARGUMENT);
1127 writel(c, base + MMCICOMMAND);
1128}
1129
1130static void mmci_stop_command(struct mmci_host *host)
1131{
1132 host->stop_abort.error = 0;
1133 mmci_start_command(host, &host->stop_abort, 0);
1134}
1135
1136static void
1137mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1138 unsigned int status)
1139{
1140 unsigned int status_err;
1141
1142 /* Make sure we have data to handle */
1143 if (!data)
1144 return;
1145
1146 /* First check for errors */
1147 status_err = status & (host->variant->start_err |
1148 MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1149 MCI_TXUNDERRUN | MCI_RXOVERRUN);
1150
1151 if (status_err) {
1152 u32 remain, success;
1153
1154 /* Terminate the DMA transfer */
1155 mmci_dma_error(host);
1156
1157 /*
1158 * Calculate how far we are into the transfer. Note that
1159 * the data counter gives the number of bytes transferred
1160 * on the MMC bus, not on the host side. On reads, this
1161 * can be as much as a FIFO-worth of data ahead. This
1162 * matters for FIFO overruns only.
1163 */
1164 if (!host->variant->datacnt_useless) {
1165 remain = readl(host->base + MMCIDATACNT);
1166 success = data->blksz * data->blocks - remain;
1167 } else {
1168 success = 0;
1169 }
1170
1171 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1172 status_err, success);
1173 if (status_err & MCI_DATACRCFAIL) {
1174 /* Last block was not successful */
1175 success -= 1;
1176 data->error = -EILSEQ;
1177 } else if (status_err & MCI_DATATIMEOUT) {
1178 data->error = -ETIMEDOUT;
1179 } else if (status_err & MCI_STARTBITERR) {
1180 data->error = -ECOMM;
1181 } else if (status_err & MCI_TXUNDERRUN) {
1182 data->error = -EIO;
1183 } else if (status_err & MCI_RXOVERRUN) {
1184 if (success > host->variant->fifosize)
1185 success -= host->variant->fifosize;
1186 else
1187 success = 0;
1188 data->error = -EIO;
1189 }
1190 data->bytes_xfered = round_down(success, data->blksz);
1191 }
1192
1193 if (status & MCI_DATABLOCKEND)
1194 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1195
1196 if (status & MCI_DATAEND || data->error) {
1197 mmci_dma_finalize(host, data);
1198
1199 mmci_stop_data(host);
1200
1201 if (!data->error)
1202 /* The error clause is handled above, success! */
1203 data->bytes_xfered = data->blksz * data->blocks;
1204
1205 if (!data->stop) {
1206 if (host->variant->cmdreg_stop && data->error)
1207 mmci_stop_command(host);
1208 else
1209 mmci_request_end(host, data->mrq);
1210 } else if (host->mrq->sbc && !data->error) {
1211 mmci_request_end(host, data->mrq);
1212 } else {
1213 mmci_start_command(host, data->stop, 0);
1214 }
1215 }
1216}
1217
1218static void
1219mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1220 unsigned int status)
1221{
1222 void __iomem *base = host->base;
1223 bool sbc;
1224
1225 if (!cmd)
1226 return;
1227
1228 sbc = (cmd == host->mrq->sbc);
1229
1230 /*
1231 * We need to be one of these interrupts to be considered worth
1232 * handling. Note that we tag on any latent IRQs postponed
1233 * due to waiting for busy status.
1234 */
1235 if (!((status|host->busy_status) &
1236 (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
1237 return;
1238
1239 /*
1240 * ST Micro variant: handle busy detection.
1241 */
1242 if (host->variant->busy_detect) {
1243 bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1244
1245 /* We are busy with a command, return */
1246 if (host->busy_status &&
1247 (status & host->variant->busy_detect_flag))
1248 return;
1249
1250 /*
1251 * We were not busy, but we now got a busy response on
1252 * something that was not an error, and we double-check
1253 * that the special busy status bit is still set before
1254 * proceeding.
1255 */
1256 if (!host->busy_status && busy_resp &&
1257 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1258 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1259
1260 /* Clear the busy start IRQ */
1261 writel(host->variant->busy_detect_mask,
1262 host->base + MMCICLEAR);
1263
1264 /* Unmask the busy end IRQ */
1265 writel(readl(base + MMCIMASK0) |
1266 host->variant->busy_detect_mask,
1267 base + MMCIMASK0);
1268 /*
1269 * Now cache the last response status code (until
1270 * the busy bit goes low), and return.
1271 */
1272 host->busy_status =
1273 status & (MCI_CMDSENT|MCI_CMDRESPEND);
1274 return;
1275 }
1276
1277 /*
1278 * At this point we are not busy with a command, we have
1279 * not received a new busy request, clear and mask the busy
1280 * end IRQ and fall through to process the IRQ.
1281 */
1282 if (host->busy_status) {
1283
1284 writel(host->variant->busy_detect_mask,
1285 host->base + MMCICLEAR);
1286
1287 writel(readl(base + MMCIMASK0) &
1288 ~host->variant->busy_detect_mask,
1289 base + MMCIMASK0);
1290 host->busy_status = 0;
1291 }
1292 }
1293
1294 host->cmd = NULL;
1295
1296 if (status & MCI_CMDTIMEOUT) {
1297 cmd->error = -ETIMEDOUT;
1298 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1299 cmd->error = -EILSEQ;
1300 } else {
1301 cmd->resp[0] = readl(base + MMCIRESPONSE0);
1302 cmd->resp[1] = readl(base + MMCIRESPONSE1);
1303 cmd->resp[2] = readl(base + MMCIRESPONSE2);
1304 cmd->resp[3] = readl(base + MMCIRESPONSE3);
1305 }
1306
1307 if ((!sbc && !cmd->data) || cmd->error) {
1308 if (host->data) {
1309 /* Terminate the DMA transfer */
1310 mmci_dma_error(host);
1311
1312 mmci_stop_data(host);
1313 if (host->variant->cmdreg_stop && cmd->error) {
1314 mmci_stop_command(host);
1315 return;
1316 }
1317 }
1318 mmci_request_end(host, host->mrq);
1319 } else if (sbc) {
1320 mmci_start_command(host, host->mrq->cmd, 0);
1321 } else if (!host->variant->datactrl_first &&
1322 !(cmd->data->flags & MMC_DATA_READ)) {
1323 mmci_start_data(host, cmd->data);
1324 }
1325}
1326
1327static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1328{
1329 return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1330}
1331
1332static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1333{
1334 /*
1335 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1336 * from the fifo range should be used
1337 */
1338 if (status & MCI_RXFIFOHALFFULL)
1339 return host->variant->fifohalfsize;
1340 else if (status & MCI_RXDATAAVLBL)
1341 return 4;
1342
1343 return 0;
1344}
1345
1346static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1347{
1348 void __iomem *base = host->base;
1349 char *ptr = buffer;
1350 u32 status = readl(host->base + MMCISTATUS);
1351 int host_remain = host->size;
1352
1353 do {
1354 int count = host->get_rx_fifocnt(host, status, host_remain);
1355
1356 if (count > remain)
1357 count = remain;
1358
1359 if (count <= 0)
1360 break;
1361
1362 /*
1363 * SDIO especially may want to send something that is
1364 * not divisible by 4 (as opposed to card sectors
1365 * etc). Therefore make sure to always read the last bytes
1366 * while only doing full 32-bit reads towards the FIFO.
1367 */
1368 if (unlikely(count & 0x3)) {
1369 if (count < 4) {
1370 unsigned char buf[4];
1371 ioread32_rep(base + MMCIFIFO, buf, 1);
1372 memcpy(ptr, buf, count);
1373 } else {
1374 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1375 count &= ~0x3;
1376 }
1377 } else {
1378 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1379 }
1380
1381 ptr += count;
1382 remain -= count;
1383 host_remain -= count;
1384
1385 if (remain == 0)
1386 break;
1387
1388 status = readl(base + MMCISTATUS);
1389 } while (status & MCI_RXDATAAVLBL);
1390
1391 return ptr - buffer;
1392}
1393
1394static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1395{
1396 struct variant_data *variant = host->variant;
1397 void __iomem *base = host->base;
1398 char *ptr = buffer;
1399
1400 do {
1401 unsigned int count, maxcnt;
1402
1403 maxcnt = status & MCI_TXFIFOEMPTY ?
1404 variant->fifosize : variant->fifohalfsize;
1405 count = min(remain, maxcnt);
1406
1407 /*
1408 * SDIO especially may want to send something that is
1409 * not divisible by 4 (as opposed to card sectors
1410 * etc), and the FIFO only accept full 32-bit writes.
1411 * So compensate by adding +3 on the count, a single
1412 * byte become a 32bit write, 7 bytes will be two
1413 * 32bit writes etc.
1414 */
1415 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1416
1417 ptr += count;
1418 remain -= count;
1419
1420 if (remain == 0)
1421 break;
1422
1423 status = readl(base + MMCISTATUS);
1424 } while (status & MCI_TXFIFOHALFEMPTY);
1425
1426 return ptr - buffer;
1427}
1428
1429/*
1430 * PIO data transfer IRQ handler.
1431 */
1432static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1433{
1434 struct mmci_host *host = dev_id;
1435 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1436 struct variant_data *variant = host->variant;
1437 void __iomem *base = host->base;
1438 u32 status;
1439
1440 status = readl(base + MMCISTATUS);
1441
1442 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1443
1444 do {
1445 unsigned int remain, len;
1446 char *buffer;
1447
1448 /*
1449 * For write, we only need to test the half-empty flag
1450 * here - if the FIFO is completely empty, then by
1451 * definition it is more than half empty.
1452 *
1453 * For read, check for data available.
1454 */
1455 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1456 break;
1457
1458 if (!sg_miter_next(sg_miter))
1459 break;
1460
1461 buffer = sg_miter->addr;
1462 remain = sg_miter->length;
1463
1464 len = 0;
1465 if (status & MCI_RXACTIVE)
1466 len = mmci_pio_read(host, buffer, remain);
1467 if (status & MCI_TXACTIVE)
1468 len = mmci_pio_write(host, buffer, remain, status);
1469
1470 sg_miter->consumed = len;
1471
1472 host->size -= len;
1473 remain -= len;
1474
1475 if (remain)
1476 break;
1477
1478 status = readl(base + MMCISTATUS);
1479 } while (1);
1480
1481 sg_miter_stop(sg_miter);
1482
1483 /*
1484 * If we have less than the fifo 'half-full' threshold to transfer,
1485 * trigger a PIO interrupt as soon as any data is available.
1486 */
1487 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1488 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1489
1490 /*
1491 * If we run out of data, disable the data IRQs; this
1492 * prevents a race where the FIFO becomes empty before
1493 * the chip itself has disabled the data path, and
1494 * stops us racing with our data end IRQ.
1495 */
1496 if (host->size == 0) {
1497 mmci_set_mask1(host, 0);
1498 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1499 }
1500
1501 return IRQ_HANDLED;
1502}
1503
1504/*
1505 * Handle completion of command and data transfers.
1506 */
1507static irqreturn_t mmci_irq(int irq, void *dev_id)
1508{
1509 struct mmci_host *host = dev_id;
1510 u32 status;
1511 int ret = 0;
1512
1513 spin_lock(&host->lock);
1514
1515 do {
1516 status = readl(host->base + MMCISTATUS);
1517
1518 if (host->singleirq) {
1519 if (status & host->mask1_reg)
1520 mmci_pio_irq(irq, dev_id);
1521
1522 status &= ~host->variant->irq_pio_mask;
1523 }
1524
1525 /*
1526 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
1527 * enabled) in mmci_cmd_irq() function where ST Micro busy
1528 * detection variant is handled. Considering the HW seems to be
1529 * triggering the IRQ on both edges while monitoring DAT0 for
1530 * busy completion and that same status bit is used to monitor
1531 * start and end of busy detection, special care must be taken
1532 * to make sure that both start and end interrupts are always
1533 * cleared one after the other.
1534 */
1535 status &= readl(host->base + MMCIMASK0);
1536 if (host->variant->busy_detect)
1537 writel(status & ~host->variant->busy_detect_mask,
1538 host->base + MMCICLEAR);
1539 else
1540 writel(status, host->base + MMCICLEAR);
1541
1542 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1543
1544 if (host->variant->reversed_irq_handling) {
1545 mmci_data_irq(host, host->data, status);
1546 mmci_cmd_irq(host, host->cmd, status);
1547 } else {
1548 mmci_cmd_irq(host, host->cmd, status);
1549 mmci_data_irq(host, host->data, status);
1550 }
1551
1552 /*
1553 * Don't poll for busy completion in irq context.
1554 */
1555 if (host->variant->busy_detect && host->busy_status)
1556 status &= ~host->variant->busy_detect_flag;
1557
1558 ret = 1;
1559 } while (status);
1560
1561 spin_unlock(&host->lock);
1562
1563 return IRQ_RETVAL(ret);
1564}
1565
1566static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1567{
1568 struct mmci_host *host = mmc_priv(mmc);
1569 unsigned long flags;
1570
1571 WARN_ON(host->mrq != NULL);
1572
1573 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1574 if (mrq->cmd->error) {
1575 mmc_request_done(mmc, mrq);
1576 return;
1577 }
1578
1579 spin_lock_irqsave(&host->lock, flags);
1580
1581 host->mrq = mrq;
1582
1583 if (mrq->data)
1584 mmci_get_next_data(host, mrq->data);
1585
1586 if (mrq->data &&
1587 (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1588 mmci_start_data(host, mrq->data);
1589
1590 if (mrq->sbc)
1591 mmci_start_command(host, mrq->sbc, 0);
1592 else
1593 mmci_start_command(host, mrq->cmd, 0);
1594
1595 spin_unlock_irqrestore(&host->lock, flags);
1596}
1597
1598static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1599{
1600 struct mmci_host *host = mmc_priv(mmc);
1601 struct variant_data *variant = host->variant;
1602 u32 pwr = 0;
1603 unsigned long flags;
1604 int ret;
1605
1606 if (host->plat->ios_handler &&
1607 host->plat->ios_handler(mmc_dev(mmc), ios))
1608 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1609
1610 switch (ios->power_mode) {
1611 case MMC_POWER_OFF:
1612 if (!IS_ERR(mmc->supply.vmmc))
1613 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1614
1615 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1616 regulator_disable(mmc->supply.vqmmc);
1617 host->vqmmc_enabled = false;
1618 }
1619
1620 break;
1621 case MMC_POWER_UP:
1622 if (!IS_ERR(mmc->supply.vmmc))
1623 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1624
1625 /*
1626 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1627 * and instead uses MCI_PWR_ON so apply whatever value is
1628 * configured in the variant data.
1629 */
1630 pwr |= variant->pwrreg_powerup;
1631
1632 break;
1633 case MMC_POWER_ON:
1634 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1635 ret = regulator_enable(mmc->supply.vqmmc);
1636 if (ret < 0)
1637 dev_err(mmc_dev(mmc),
1638 "failed to enable vqmmc regulator\n");
1639 else
1640 host->vqmmc_enabled = true;
1641 }
1642
1643 pwr |= MCI_PWR_ON;
1644 break;
1645 }
1646
1647 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1648 /*
1649 * The ST Micro variant has some additional bits
1650 * indicating signal direction for the signals in
1651 * the SD/MMC bus and feedback-clock usage.
1652 */
1653 pwr |= host->pwr_reg_add;
1654
1655 if (ios->bus_width == MMC_BUS_WIDTH_4)
1656 pwr &= ~MCI_ST_DATA74DIREN;
1657 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1658 pwr &= (~MCI_ST_DATA74DIREN &
1659 ~MCI_ST_DATA31DIREN &
1660 ~MCI_ST_DATA2DIREN);
1661 }
1662
1663 if (variant->opendrain) {
1664 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1665 pwr |= variant->opendrain;
1666 } else {
1667 /*
1668 * If the variant cannot configure the pads by its own, then we
1669 * expect the pinctrl to be able to do that for us
1670 */
1671 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1672 pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1673 else
1674 pinctrl_select_state(host->pinctrl, host->pins_default);
1675 }
1676
1677 /*
1678 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1679 * gating the clock, the MCI_PWR_ON bit is cleared.
1680 */
1681 if (!ios->clock && variant->pwrreg_clkgate)
1682 pwr &= ~MCI_PWR_ON;
1683
1684 if (host->variant->explicit_mclk_control &&
1685 ios->clock != host->clock_cache) {
1686 ret = clk_set_rate(host->clk, ios->clock);
1687 if (ret < 0)
1688 dev_err(mmc_dev(host->mmc),
1689 "Error setting clock rate (%d)\n", ret);
1690 else
1691 host->mclk = clk_get_rate(host->clk);
1692 }
1693 host->clock_cache = ios->clock;
1694
1695 spin_lock_irqsave(&host->lock, flags);
1696
1697 if (host->ops && host->ops->set_clkreg)
1698 host->ops->set_clkreg(host, ios->clock);
1699 else
1700 mmci_set_clkreg(host, ios->clock);
1701
1702 if (host->ops && host->ops->set_pwrreg)
1703 host->ops->set_pwrreg(host, pwr);
1704 else
1705 mmci_write_pwrreg(host, pwr);
1706
1707 mmci_reg_delay(host);
1708
1709 spin_unlock_irqrestore(&host->lock, flags);
1710}
1711
1712static int mmci_get_cd(struct mmc_host *mmc)
1713{
1714 struct mmci_host *host = mmc_priv(mmc);
1715 struct mmci_platform_data *plat = host->plat;
1716 unsigned int status = mmc_gpio_get_cd(mmc);
1717
1718 if (status == -ENOSYS) {
1719 if (!plat->status)
1720 return 1; /* Assume always present */
1721
1722 status = plat->status(mmc_dev(host->mmc));
1723 }
1724 return status;
1725}
1726
1727static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1728{
1729 int ret = 0;
1730
1731 if (!IS_ERR(mmc->supply.vqmmc)) {
1732
1733 switch (ios->signal_voltage) {
1734 case MMC_SIGNAL_VOLTAGE_330:
1735 ret = regulator_set_voltage(mmc->supply.vqmmc,
1736 2700000, 3600000);
1737 break;
1738 case MMC_SIGNAL_VOLTAGE_180:
1739 ret = regulator_set_voltage(mmc->supply.vqmmc,
1740 1700000, 1950000);
1741 break;
1742 case MMC_SIGNAL_VOLTAGE_120:
1743 ret = regulator_set_voltage(mmc->supply.vqmmc,
1744 1100000, 1300000);
1745 break;
1746 }
1747
1748 if (ret)
1749 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1750 }
1751
1752 return ret;
1753}
1754
1755static struct mmc_host_ops mmci_ops = {
1756 .request = mmci_request,
1757 .pre_req = mmci_pre_request,
1758 .post_req = mmci_post_request,
1759 .set_ios = mmci_set_ios,
1760 .get_ro = mmc_gpio_get_ro,
1761 .get_cd = mmci_get_cd,
1762 .start_signal_voltage_switch = mmci_sig_volt_switch,
1763};
1764
1765static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1766{
1767 struct mmci_host *host = mmc_priv(mmc);
1768 int ret = mmc_of_parse(mmc);
1769
1770 if (ret)
1771 return ret;
1772
1773 if (of_get_property(np, "st,sig-dir-dat0", NULL))
1774 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1775 if (of_get_property(np, "st,sig-dir-dat2", NULL))
1776 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1777 if (of_get_property(np, "st,sig-dir-dat31", NULL))
1778 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1779 if (of_get_property(np, "st,sig-dir-dat74", NULL))
1780 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1781 if (of_get_property(np, "st,sig-dir-cmd", NULL))
1782 host->pwr_reg_add |= MCI_ST_CMDDIREN;
1783 if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1784 host->pwr_reg_add |= MCI_ST_FBCLKEN;
1785 if (of_get_property(np, "st,sig-dir", NULL))
1786 host->pwr_reg_add |= MCI_STM32_DIRPOL;
1787 if (of_get_property(np, "st,neg-edge", NULL))
1788 host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
1789 if (of_get_property(np, "st,use-ckin", NULL))
1790 host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
1791
1792 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1793 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1794 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1795 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1796
1797 return 0;
1798}
1799
1800static int mmci_probe(struct amba_device *dev,
1801 const struct amba_id *id)
1802{
1803 struct mmci_platform_data *plat = dev->dev.platform_data;
1804 struct device_node *np = dev->dev.of_node;
1805 struct variant_data *variant = id->data;
1806 struct mmci_host *host;
1807 struct mmc_host *mmc;
1808 int ret;
1809
1810 /* Must have platform data or Device Tree. */
1811 if (!plat && !np) {
1812 dev_err(&dev->dev, "No plat data or DT found\n");
1813 return -EINVAL;
1814 }
1815
1816 if (!plat) {
1817 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1818 if (!plat)
1819 return -ENOMEM;
1820 }
1821
1822 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1823 if (!mmc)
1824 return -ENOMEM;
1825
1826 ret = mmci_of_parse(np, mmc);
1827 if (ret)
1828 goto host_free;
1829
1830 host = mmc_priv(mmc);
1831 host->mmc = mmc;
1832
1833 /*
1834 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1835 * pins can be set accordingly using pinctrl
1836 */
1837 if (!variant->opendrain) {
1838 host->pinctrl = devm_pinctrl_get(&dev->dev);
1839 if (IS_ERR(host->pinctrl)) {
1840 dev_err(&dev->dev, "failed to get pinctrl");
1841 ret = PTR_ERR(host->pinctrl);
1842 goto host_free;
1843 }
1844
1845 host->pins_default = pinctrl_lookup_state(host->pinctrl,
1846 PINCTRL_STATE_DEFAULT);
1847 if (IS_ERR(host->pins_default)) {
1848 dev_err(mmc_dev(mmc), "Can't select default pins\n");
1849 ret = PTR_ERR(host->pins_default);
1850 goto host_free;
1851 }
1852
1853 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
1854 MMCI_PINCTRL_STATE_OPENDRAIN);
1855 if (IS_ERR(host->pins_opendrain)) {
1856 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
1857 ret = PTR_ERR(host->pins_opendrain);
1858 goto host_free;
1859 }
1860 }
1861
1862 host->hw_designer = amba_manf(dev);
1863 host->hw_revision = amba_rev(dev);
1864 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1865 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1866
1867 host->clk = devm_clk_get(&dev->dev, NULL);
1868 if (IS_ERR(host->clk)) {
1869 ret = PTR_ERR(host->clk);
1870 goto host_free;
1871 }
1872
1873 ret = clk_prepare_enable(host->clk);
1874 if (ret)
1875 goto host_free;
1876
1877 if (variant->qcom_fifo)
1878 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
1879 else
1880 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
1881
1882 host->plat = plat;
1883 host->variant = variant;
1884 host->mclk = clk_get_rate(host->clk);
1885 /*
1886 * According to the spec, mclk is max 100 MHz,
1887 * so we try to adjust the clock down to this,
1888 * (if possible).
1889 */
1890 if (host->mclk > variant->f_max) {
1891 ret = clk_set_rate(host->clk, variant->f_max);
1892 if (ret < 0)
1893 goto clk_disable;
1894 host->mclk = clk_get_rate(host->clk);
1895 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1896 host->mclk);
1897 }
1898
1899 host->phybase = dev->res.start;
1900 host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1901 if (IS_ERR(host->base)) {
1902 ret = PTR_ERR(host->base);
1903 goto clk_disable;
1904 }
1905
1906 if (variant->init)
1907 variant->init(host);
1908
1909 /*
1910 * The ARM and ST versions of the block have slightly different
1911 * clock divider equations which means that the minimum divider
1912 * differs too.
1913 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1914 */
1915 if (variant->st_clkdiv)
1916 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1917 else if (variant->stm32_clkdiv)
1918 mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
1919 else if (variant->explicit_mclk_control)
1920 mmc->f_min = clk_round_rate(host->clk, 100000);
1921 else
1922 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1923 /*
1924 * If no maximum operating frequency is supplied, fall back to use
1925 * the module parameter, which has a (low) default value in case it
1926 * is not specified. Either value must not exceed the clock rate into
1927 * the block, of course.
1928 */
1929 if (mmc->f_max)
1930 mmc->f_max = variant->explicit_mclk_control ?
1931 min(variant->f_max, mmc->f_max) :
1932 min(host->mclk, mmc->f_max);
1933 else
1934 mmc->f_max = variant->explicit_mclk_control ?
1935 fmax : min(host->mclk, fmax);
1936
1937
1938 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1939
1940 host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
1941 if (IS_ERR(host->rst)) {
1942 ret = PTR_ERR(host->rst);
1943 goto clk_disable;
1944 }
1945
1946 /* Get regulators and the supported OCR mask */
1947 ret = mmc_regulator_get_supply(mmc);
1948 if (ret)
1949 goto clk_disable;
1950
1951 if (!mmc->ocr_avail)
1952 mmc->ocr_avail = plat->ocr_mask;
1953 else if (plat->ocr_mask)
1954 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1955
1956 /* We support these capabilities. */
1957 mmc->caps |= MMC_CAP_CMD23;
1958
1959 /*
1960 * Enable busy detection.
1961 */
1962 if (variant->busy_detect) {
1963 mmci_ops.card_busy = mmci_card_busy;
1964 /*
1965 * Not all variants have a flag to enable busy detection
1966 * in the DPSM, but if they do, set it here.
1967 */
1968 if (variant->busy_dpsm_flag)
1969 mmci_write_datactrlreg(host,
1970 host->variant->busy_dpsm_flag);
1971 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1972 mmc->max_busy_timeout = 0;
1973 }
1974
1975 /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
1976 host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
1977 host->stop_abort.arg = 0;
1978 host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
1979
1980 mmc->ops = &mmci_ops;
1981
1982 /* We support these PM capabilities. */
1983 mmc->pm_caps |= MMC_PM_KEEP_POWER;
1984
1985 /*
1986 * We can do SGIO
1987 */
1988 mmc->max_segs = NR_SG;
1989
1990 /*
1991 * Since only a certain number of bits are valid in the data length
1992 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1993 * single request.
1994 */
1995 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1996
1997 /*
1998 * Set the maximum segment size. Since we aren't doing DMA
1999 * (yet) we are only limited by the data length register.
2000 */
2001 mmc->max_seg_size = mmc->max_req_size;
2002
2003 /*
2004 * Block size can be up to 2048 bytes, but must be a power of two.
2005 */
2006 mmc->max_blk_size = 1 << variant->datactrl_blocksz;
2007
2008 /*
2009 * Limit the number of blocks transferred so that we don't overflow
2010 * the maximum request size.
2011 */
2012 mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
2013
2014 spin_lock_init(&host->lock);
2015
2016 writel(0, host->base + MMCIMASK0);
2017
2018 if (variant->mmcimask1)
2019 writel(0, host->base + MMCIMASK1);
2020
2021 writel(0xfff, host->base + MMCICLEAR);
2022
2023 /*
2024 * If:
2025 * - not using DT but using a descriptor table, or
2026 * - using a table of descriptors ALONGSIDE DT, or
2027 * look up these descriptors named "cd" and "wp" right here, fail
2028 * silently of these do not exist
2029 */
2030 if (!np) {
2031 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
2032 if (ret == -EPROBE_DEFER)
2033 goto clk_disable;
2034
2035 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
2036 if (ret == -EPROBE_DEFER)
2037 goto clk_disable;
2038 }
2039
2040 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
2041 DRIVER_NAME " (cmd)", host);
2042 if (ret)
2043 goto clk_disable;
2044
2045 if (!dev->irq[1])
2046 host->singleirq = true;
2047 else {
2048 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2049 IRQF_SHARED, DRIVER_NAME " (pio)", host);
2050 if (ret)
2051 goto clk_disable;
2052 }
2053
2054 writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
2055
2056 amba_set_drvdata(dev, mmc);
2057
2058 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2059 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2060 amba_rev(dev), (unsigned long long)dev->res.start,
2061 dev->irq[0], dev->irq[1]);
2062
2063 mmci_dma_setup(host);
2064
2065 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2066 pm_runtime_use_autosuspend(&dev->dev);
2067
2068 mmc_add_host(mmc);
2069
2070 pm_runtime_put(&dev->dev);
2071 return 0;
2072
2073 clk_disable:
2074 clk_disable_unprepare(host->clk);
2075 host_free:
2076 mmc_free_host(mmc);
2077 return ret;
2078}
2079
2080static int mmci_remove(struct amba_device *dev)
2081{
2082 struct mmc_host *mmc = amba_get_drvdata(dev);
2083
2084 if (mmc) {
2085 struct mmci_host *host = mmc_priv(mmc);
2086 struct variant_data *variant = host->variant;
2087
2088 /*
2089 * Undo pm_runtime_put() in probe. We use the _sync
2090 * version here so that we can access the primecell.
2091 */
2092 pm_runtime_get_sync(&dev->dev);
2093
2094 mmc_remove_host(mmc);
2095
2096 writel(0, host->base + MMCIMASK0);
2097
2098 if (variant->mmcimask1)
2099 writel(0, host->base + MMCIMASK1);
2100
2101 writel(0, host->base + MMCICOMMAND);
2102 writel(0, host->base + MMCIDATACTRL);
2103
2104 mmci_dma_release(host);
2105 clk_disable_unprepare(host->clk);
2106 mmc_free_host(mmc);
2107 }
2108
2109 return 0;
2110}
2111
2112#ifdef CONFIG_PM
2113static void mmci_save(struct mmci_host *host)
2114{
2115 unsigned long flags;
2116
2117 spin_lock_irqsave(&host->lock, flags);
2118
2119 writel(0, host->base + MMCIMASK0);
2120 if (host->variant->pwrreg_nopower) {
2121 writel(0, host->base + MMCIDATACTRL);
2122 writel(0, host->base + MMCIPOWER);
2123 writel(0, host->base + MMCICLOCK);
2124 }
2125 mmci_reg_delay(host);
2126
2127 spin_unlock_irqrestore(&host->lock, flags);
2128}
2129
2130static void mmci_restore(struct mmci_host *host)
2131{
2132 unsigned long flags;
2133
2134 spin_lock_irqsave(&host->lock, flags);
2135
2136 if (host->variant->pwrreg_nopower) {
2137 writel(host->clk_reg, host->base + MMCICLOCK);
2138 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2139 writel(host->pwr_reg, host->base + MMCIPOWER);
2140 }
2141 writel(MCI_IRQENABLE | host->variant->start_err,
2142 host->base + MMCIMASK0);
2143 mmci_reg_delay(host);
2144
2145 spin_unlock_irqrestore(&host->lock, flags);
2146}
2147
2148static int mmci_runtime_suspend(struct device *dev)
2149{
2150 struct amba_device *adev = to_amba_device(dev);
2151 struct mmc_host *mmc = amba_get_drvdata(adev);
2152
2153 if (mmc) {
2154 struct mmci_host *host = mmc_priv(mmc);
2155 pinctrl_pm_select_sleep_state(dev);
2156 mmci_save(host);
2157 clk_disable_unprepare(host->clk);
2158 }
2159
2160 return 0;
2161}
2162
2163static int mmci_runtime_resume(struct device *dev)
2164{
2165 struct amba_device *adev = to_amba_device(dev);
2166 struct mmc_host *mmc = amba_get_drvdata(adev);
2167
2168 if (mmc) {
2169 struct mmci_host *host = mmc_priv(mmc);
2170 clk_prepare_enable(host->clk);
2171 mmci_restore(host);
2172 pinctrl_pm_select_default_state(dev);
2173 }
2174
2175 return 0;
2176}
2177#endif
2178
2179static const struct dev_pm_ops mmci_dev_pm_ops = {
2180 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2181 pm_runtime_force_resume)
2182 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2183};
2184
2185static const struct amba_id mmci_ids[] = {
2186 {
2187 .id = 0x00041180,
2188 .mask = 0xff0fffff,
2189 .data = &variant_arm,
2190 },
2191 {
2192 .id = 0x01041180,
2193 .mask = 0xff0fffff,
2194 .data = &variant_arm_extended_fifo,
2195 },
2196 {
2197 .id = 0x02041180,
2198 .mask = 0xff0fffff,
2199 .data = &variant_arm_extended_fifo_hwfc,
2200 },
2201 {
2202 .id = 0x00041181,
2203 .mask = 0x000fffff,
2204 .data = &variant_arm,
2205 },
2206 /* ST Micro variants */
2207 {
2208 .id = 0x00180180,
2209 .mask = 0x00ffffff,
2210 .data = &variant_u300,
2211 },
2212 {
2213 .id = 0x10180180,
2214 .mask = 0xf0ffffff,
2215 .data = &variant_nomadik,
2216 },
2217 {
2218 .id = 0x00280180,
2219 .mask = 0x00ffffff,
2220 .data = &variant_nomadik,
2221 },
2222 {
2223 .id = 0x00480180,
2224 .mask = 0xf0ffffff,
2225 .data = &variant_ux500,
2226 },
2227 {
2228 .id = 0x10480180,
2229 .mask = 0xf0ffffff,
2230 .data = &variant_ux500v2,
2231 },
2232 {
2233 .id = 0x00880180,
2234 .mask = 0x00ffffff,
2235 .data = &variant_stm32,
2236 },
2237 {
2238 .id = 0x10153180,
2239 .mask = 0xf0ffffff,
2240 .data = &variant_stm32_sdmmc,
2241 },
2242 /* Qualcomm variants */
2243 {
2244 .id = 0x00051180,
2245 .mask = 0x000fffff,
2246 .data = &variant_qcom,
2247 },
2248 { 0, 0 },
2249};
2250
2251MODULE_DEVICE_TABLE(amba, mmci_ids);
2252
2253static struct amba_driver mmci_driver = {
2254 .drv = {
2255 .name = DRIVER_NAME,
2256 .pm = &mmci_dev_pm_ops,
2257 },
2258 .probe = mmci_probe,
2259 .remove = mmci_remove,
2260 .id_table = mmci_ids,
2261};
2262
2263module_amba_driver(mmci_driver);
2264
2265module_param(fmax, uint, 0444);
2266
2267MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2268MODULE_LICENSE("GPL");