Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/highmem.h>
23#include <linux/log2.h>
24#include <linux/mmc/pm.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/card.h>
27#include <linux/mmc/slot-gpio.h>
28#include <linux/amba/bus.h>
29#include <linux/clk.h>
30#include <linux/scatterlist.h>
31#include <linux/gpio.h>
32#include <linux/of_gpio.h>
33#include <linux/regulator/consumer.h>
34#include <linux/dmaengine.h>
35#include <linux/dma-mapping.h>
36#include <linux/amba/mmci.h>
37#include <linux/pm_runtime.h>
38#include <linux/types.h>
39#include <linux/pinctrl/consumer.h>
40
41#include <asm/div64.h>
42#include <asm/io.h>
43
44#include "mmci.h"
45#include "mmci_qcom_dml.h"
46
47#define DRIVER_NAME "mmci-pl18x"
48
49static unsigned int fmax = 515633;
50
51/**
52 * struct variant_data - MMCI variant-specific quirks
53 * @clkreg: default value for MCICLOCK register
54 * @clkreg_enable: enable value for MMCICLOCK register
55 * @clkreg_8bit_bus_enable: enable value for 8 bit bus
56 * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
57 * @datalength_bits: number of bits in the MMCIDATALENGTH register
58 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
59 * is asserted (likewise for RX)
60 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
61 * is asserted (likewise for RX)
62 * @data_cmd_enable: enable value for data commands.
63 * @st_sdio: enable ST specific SDIO logic
64 * @st_clkdiv: true if using a ST-specific clock divider algorithm
65 * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
66 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
67 * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
68 * register
69 * @datactrl_mask_sdio: SDIO enable mask in datactrl register
70 * @pwrreg_powerup: power up value for MMCIPOWER register
71 * @f_max: maximum clk frequency supported by the controller.
72 * @signal_direction: input/out direction of bus signals can be indicated
73 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
74 * @busy_detect: true if the variant supports busy detection on DAT0.
75 * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
76 * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
77 * indicating that the card is busy
78 * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for
79 * getting busy end detection interrupts
80 * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
81 * @explicit_mclk_control: enable explicit mclk control in driver.
82 * @qcom_fifo: enables qcom specific fifo pio read logic.
83 * @qcom_dml: enables qcom specific dma glue for dma transfers.
84 * @reversed_irq_handling: handle data irq before cmd irq.
85 * @mmcimask1: true if variant have a MMCIMASK1 register.
86 * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
87 * register.
88 * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
89 */
90struct variant_data {
91 unsigned int clkreg;
92 unsigned int clkreg_enable;
93 unsigned int clkreg_8bit_bus_enable;
94 unsigned int clkreg_neg_edge_enable;
95 unsigned int datalength_bits;
96 unsigned int fifosize;
97 unsigned int fifohalfsize;
98 unsigned int data_cmd_enable;
99 unsigned int datactrl_mask_ddrmode;
100 unsigned int datactrl_mask_sdio;
101 bool st_sdio;
102 bool st_clkdiv;
103 bool blksz_datactrl16;
104 bool blksz_datactrl4;
105 u32 pwrreg_powerup;
106 u32 f_max;
107 bool signal_direction;
108 bool pwrreg_clkgate;
109 bool busy_detect;
110 u32 busy_dpsm_flag;
111 u32 busy_detect_flag;
112 u32 busy_detect_mask;
113 bool pwrreg_nopower;
114 bool explicit_mclk_control;
115 bool qcom_fifo;
116 bool qcom_dml;
117 bool reversed_irq_handling;
118 bool mmcimask1;
119 u32 start_err;
120 u32 opendrain;
121};
122
123static struct variant_data variant_arm = {
124 .fifosize = 16 * 4,
125 .fifohalfsize = 8 * 4,
126 .datalength_bits = 16,
127 .pwrreg_powerup = MCI_PWR_UP,
128 .f_max = 100000000,
129 .reversed_irq_handling = true,
130 .mmcimask1 = true,
131 .start_err = MCI_STARTBITERR,
132 .opendrain = MCI_ROD,
133};
134
135static struct variant_data variant_arm_extended_fifo = {
136 .fifosize = 128 * 4,
137 .fifohalfsize = 64 * 4,
138 .datalength_bits = 16,
139 .pwrreg_powerup = MCI_PWR_UP,
140 .f_max = 100000000,
141 .mmcimask1 = true,
142 .start_err = MCI_STARTBITERR,
143 .opendrain = MCI_ROD,
144};
145
146static struct variant_data variant_arm_extended_fifo_hwfc = {
147 .fifosize = 128 * 4,
148 .fifohalfsize = 64 * 4,
149 .clkreg_enable = MCI_ARM_HWFCEN,
150 .datalength_bits = 16,
151 .pwrreg_powerup = MCI_PWR_UP,
152 .f_max = 100000000,
153 .mmcimask1 = true,
154 .start_err = MCI_STARTBITERR,
155 .opendrain = MCI_ROD,
156};
157
158static struct variant_data variant_u300 = {
159 .fifosize = 16 * 4,
160 .fifohalfsize = 8 * 4,
161 .clkreg_enable = MCI_ST_U300_HWFCEN,
162 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
163 .datalength_bits = 16,
164 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
165 .st_sdio = true,
166 .pwrreg_powerup = MCI_PWR_ON,
167 .f_max = 100000000,
168 .signal_direction = true,
169 .pwrreg_clkgate = true,
170 .pwrreg_nopower = true,
171 .mmcimask1 = true,
172 .start_err = MCI_STARTBITERR,
173 .opendrain = MCI_OD,
174};
175
176static struct variant_data variant_nomadik = {
177 .fifosize = 16 * 4,
178 .fifohalfsize = 8 * 4,
179 .clkreg = MCI_CLK_ENABLE,
180 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
181 .datalength_bits = 24,
182 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
183 .st_sdio = true,
184 .st_clkdiv = true,
185 .pwrreg_powerup = MCI_PWR_ON,
186 .f_max = 100000000,
187 .signal_direction = true,
188 .pwrreg_clkgate = true,
189 .pwrreg_nopower = true,
190 .mmcimask1 = true,
191 .start_err = MCI_STARTBITERR,
192 .opendrain = MCI_OD,
193};
194
195static struct variant_data variant_ux500 = {
196 .fifosize = 30 * 4,
197 .fifohalfsize = 8 * 4,
198 .clkreg = MCI_CLK_ENABLE,
199 .clkreg_enable = MCI_ST_UX500_HWFCEN,
200 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
201 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
202 .datalength_bits = 24,
203 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
204 .st_sdio = true,
205 .st_clkdiv = true,
206 .pwrreg_powerup = MCI_PWR_ON,
207 .f_max = 100000000,
208 .signal_direction = true,
209 .pwrreg_clkgate = true,
210 .busy_detect = true,
211 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
212 .busy_detect_flag = MCI_ST_CARDBUSY,
213 .busy_detect_mask = MCI_ST_BUSYENDMASK,
214 .pwrreg_nopower = true,
215 .mmcimask1 = true,
216 .start_err = MCI_STARTBITERR,
217 .opendrain = MCI_OD,
218};
219
220static struct variant_data variant_ux500v2 = {
221 .fifosize = 30 * 4,
222 .fifohalfsize = 8 * 4,
223 .clkreg = MCI_CLK_ENABLE,
224 .clkreg_enable = MCI_ST_UX500_HWFCEN,
225 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
226 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
227 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
228 .datalength_bits = 24,
229 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
230 .st_sdio = true,
231 .st_clkdiv = true,
232 .blksz_datactrl16 = true,
233 .pwrreg_powerup = MCI_PWR_ON,
234 .f_max = 100000000,
235 .signal_direction = true,
236 .pwrreg_clkgate = true,
237 .busy_detect = true,
238 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
239 .busy_detect_flag = MCI_ST_CARDBUSY,
240 .busy_detect_mask = MCI_ST_BUSYENDMASK,
241 .pwrreg_nopower = true,
242 .mmcimask1 = true,
243 .start_err = MCI_STARTBITERR,
244 .opendrain = MCI_OD,
245};
246
247static struct variant_data variant_stm32 = {
248 .fifosize = 32 * 4,
249 .fifohalfsize = 8 * 4,
250 .clkreg = MCI_CLK_ENABLE,
251 .clkreg_enable = MCI_ST_UX500_HWFCEN,
252 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
253 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
254 .datalength_bits = 24,
255 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
256 .st_sdio = true,
257 .st_clkdiv = true,
258 .pwrreg_powerup = MCI_PWR_ON,
259 .f_max = 48000000,
260 .pwrreg_clkgate = true,
261 .pwrreg_nopower = true,
262};
263
264static struct variant_data variant_qcom = {
265 .fifosize = 16 * 4,
266 .fifohalfsize = 8 * 4,
267 .clkreg = MCI_CLK_ENABLE,
268 .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
269 MCI_QCOM_CLK_SELECT_IN_FBCLK,
270 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
271 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
272 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
273 .blksz_datactrl4 = true,
274 .datalength_bits = 24,
275 .pwrreg_powerup = MCI_PWR_UP,
276 .f_max = 208000000,
277 .explicit_mclk_control = true,
278 .qcom_fifo = true,
279 .qcom_dml = true,
280 .mmcimask1 = true,
281 .start_err = MCI_STARTBITERR,
282 .opendrain = MCI_ROD,
283};
284
285/* Busy detection for the ST Micro variant */
286static int mmci_card_busy(struct mmc_host *mmc)
287{
288 struct mmci_host *host = mmc_priv(mmc);
289 unsigned long flags;
290 int busy = 0;
291
292 spin_lock_irqsave(&host->lock, flags);
293 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
294 busy = 1;
295 spin_unlock_irqrestore(&host->lock, flags);
296
297 return busy;
298}
299
300/*
301 * Validate mmc prerequisites
302 */
303static int mmci_validate_data(struct mmci_host *host,
304 struct mmc_data *data)
305{
306 if (!data)
307 return 0;
308
309 if (!is_power_of_2(data->blksz)) {
310 dev_err(mmc_dev(host->mmc),
311 "unsupported block size (%d bytes)\n", data->blksz);
312 return -EINVAL;
313 }
314
315 return 0;
316}
317
318static void mmci_reg_delay(struct mmci_host *host)
319{
320 /*
321 * According to the spec, at least three feedback clock cycles
322 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
323 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
324 * Worst delay time during card init is at 100 kHz => 30 us.
325 * Worst delay time when up and running is at 25 MHz => 120 ns.
326 */
327 if (host->cclk < 25000000)
328 udelay(30);
329 else
330 ndelay(120);
331}
332
333/*
334 * This must be called with host->lock held
335 */
336static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
337{
338 if (host->clk_reg != clk) {
339 host->clk_reg = clk;
340 writel(clk, host->base + MMCICLOCK);
341 }
342}
343
344/*
345 * This must be called with host->lock held
346 */
347static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
348{
349 if (host->pwr_reg != pwr) {
350 host->pwr_reg = pwr;
351 writel(pwr, host->base + MMCIPOWER);
352 }
353}
354
355/*
356 * This must be called with host->lock held
357 */
358static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
359{
360 /* Keep busy mode in DPSM if enabled */
361 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
362
363 if (host->datactrl_reg != datactrl) {
364 host->datactrl_reg = datactrl;
365 writel(datactrl, host->base + MMCIDATACTRL);
366 }
367}
368
369/*
370 * This must be called with host->lock held
371 */
372static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
373{
374 struct variant_data *variant = host->variant;
375 u32 clk = variant->clkreg;
376
377 /* Make sure cclk reflects the current calculated clock */
378 host->cclk = 0;
379
380 if (desired) {
381 if (variant->explicit_mclk_control) {
382 host->cclk = host->mclk;
383 } else if (desired >= host->mclk) {
384 clk = MCI_CLK_BYPASS;
385 if (variant->st_clkdiv)
386 clk |= MCI_ST_UX500_NEG_EDGE;
387 host->cclk = host->mclk;
388 } else if (variant->st_clkdiv) {
389 /*
390 * DB8500 TRM says f = mclk / (clkdiv + 2)
391 * => clkdiv = (mclk / f) - 2
392 * Round the divider up so we don't exceed the max
393 * frequency
394 */
395 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
396 if (clk >= 256)
397 clk = 255;
398 host->cclk = host->mclk / (clk + 2);
399 } else {
400 /*
401 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
402 * => clkdiv = mclk / (2 * f) - 1
403 */
404 clk = host->mclk / (2 * desired) - 1;
405 if (clk >= 256)
406 clk = 255;
407 host->cclk = host->mclk / (2 * (clk + 1));
408 }
409
410 clk |= variant->clkreg_enable;
411 clk |= MCI_CLK_ENABLE;
412 /* This hasn't proven to be worthwhile */
413 /* clk |= MCI_CLK_PWRSAVE; */
414 }
415
416 /* Set actual clock for debug */
417 host->mmc->actual_clock = host->cclk;
418
419 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
420 clk |= MCI_4BIT_BUS;
421 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
422 clk |= variant->clkreg_8bit_bus_enable;
423
424 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
425 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
426 clk |= variant->clkreg_neg_edge_enable;
427
428 mmci_write_clkreg(host, clk);
429}
430
431static void
432mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
433{
434 writel(0, host->base + MMCICOMMAND);
435
436 BUG_ON(host->data);
437
438 host->mrq = NULL;
439 host->cmd = NULL;
440
441 mmc_request_done(host->mmc, mrq);
442}
443
444static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
445{
446 void __iomem *base = host->base;
447 struct variant_data *variant = host->variant;
448
449 if (host->singleirq) {
450 unsigned int mask0 = readl(base + MMCIMASK0);
451
452 mask0 &= ~MCI_IRQ1MASK;
453 mask0 |= mask;
454
455 writel(mask0, base + MMCIMASK0);
456 }
457
458 if (variant->mmcimask1)
459 writel(mask, base + MMCIMASK1);
460
461 host->mask1_reg = mask;
462}
463
464static void mmci_stop_data(struct mmci_host *host)
465{
466 mmci_write_datactrlreg(host, 0);
467 mmci_set_mask1(host, 0);
468 host->data = NULL;
469}
470
471static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
472{
473 unsigned int flags = SG_MITER_ATOMIC;
474
475 if (data->flags & MMC_DATA_READ)
476 flags |= SG_MITER_TO_SG;
477 else
478 flags |= SG_MITER_FROM_SG;
479
480 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
481}
482
483/*
484 * All the DMA operation mode stuff goes inside this ifdef.
485 * This assumes that you have a generic DMA device interface,
486 * no custom DMA interfaces are supported.
487 */
488#ifdef CONFIG_DMA_ENGINE
489static void mmci_dma_setup(struct mmci_host *host)
490{
491 const char *rxname, *txname;
492 struct variant_data *variant = host->variant;
493
494 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
495 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
496
497 /* initialize pre request cookie */
498 host->next_data.cookie = 1;
499
500 /*
501 * If only an RX channel is specified, the driver will
502 * attempt to use it bidirectionally, however if it is
503 * is specified but cannot be located, DMA will be disabled.
504 */
505 if (host->dma_rx_channel && !host->dma_tx_channel)
506 host->dma_tx_channel = host->dma_rx_channel;
507
508 if (host->dma_rx_channel)
509 rxname = dma_chan_name(host->dma_rx_channel);
510 else
511 rxname = "none";
512
513 if (host->dma_tx_channel)
514 txname = dma_chan_name(host->dma_tx_channel);
515 else
516 txname = "none";
517
518 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
519 rxname, txname);
520
521 /*
522 * Limit the maximum segment size in any SG entry according to
523 * the parameters of the DMA engine device.
524 */
525 if (host->dma_tx_channel) {
526 struct device *dev = host->dma_tx_channel->device->dev;
527 unsigned int max_seg_size = dma_get_max_seg_size(dev);
528
529 if (max_seg_size < host->mmc->max_seg_size)
530 host->mmc->max_seg_size = max_seg_size;
531 }
532 if (host->dma_rx_channel) {
533 struct device *dev = host->dma_rx_channel->device->dev;
534 unsigned int max_seg_size = dma_get_max_seg_size(dev);
535
536 if (max_seg_size < host->mmc->max_seg_size)
537 host->mmc->max_seg_size = max_seg_size;
538 }
539
540 if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
541 if (dml_hw_init(host, host->mmc->parent->of_node))
542 variant->qcom_dml = false;
543}
544
545/*
546 * This is used in or so inline it
547 * so it can be discarded.
548 */
549static inline void mmci_dma_release(struct mmci_host *host)
550{
551 if (host->dma_rx_channel)
552 dma_release_channel(host->dma_rx_channel);
553 if (host->dma_tx_channel)
554 dma_release_channel(host->dma_tx_channel);
555 host->dma_rx_channel = host->dma_tx_channel = NULL;
556}
557
558static void mmci_dma_data_error(struct mmci_host *host)
559{
560 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
561 dmaengine_terminate_all(host->dma_current);
562 host->dma_in_progress = false;
563 host->dma_current = NULL;
564 host->dma_desc_current = NULL;
565 host->data->host_cookie = 0;
566}
567
568static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
569{
570 struct dma_chan *chan;
571
572 if (data->flags & MMC_DATA_READ)
573 chan = host->dma_rx_channel;
574 else
575 chan = host->dma_tx_channel;
576
577 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
578 mmc_get_dma_dir(data));
579}
580
581static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
582{
583 u32 status;
584 int i;
585
586 /* Wait up to 1ms for the DMA to complete */
587 for (i = 0; ; i++) {
588 status = readl(host->base + MMCISTATUS);
589 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
590 break;
591 udelay(10);
592 }
593
594 /*
595 * Check to see whether we still have some data left in the FIFO -
596 * this catches DMA controllers which are unable to monitor the
597 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
598 * contiguous buffers. On TX, we'll get a FIFO underrun error.
599 */
600 if (status & MCI_RXDATAAVLBLMASK) {
601 mmci_dma_data_error(host);
602 if (!data->error)
603 data->error = -EIO;
604 }
605
606 if (!data->host_cookie)
607 mmci_dma_unmap(host, data);
608
609 /*
610 * Use of DMA with scatter-gather is impossible.
611 * Give up with DMA and switch back to PIO mode.
612 */
613 if (status & MCI_RXDATAAVLBLMASK) {
614 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
615 mmci_dma_release(host);
616 }
617
618 host->dma_in_progress = false;
619 host->dma_current = NULL;
620 host->dma_desc_current = NULL;
621}
622
623/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
624static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
625 struct dma_chan **dma_chan,
626 struct dma_async_tx_descriptor **dma_desc)
627{
628 struct variant_data *variant = host->variant;
629 struct dma_slave_config conf = {
630 .src_addr = host->phybase + MMCIFIFO,
631 .dst_addr = host->phybase + MMCIFIFO,
632 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
633 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
634 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
635 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
636 .device_fc = false,
637 };
638 struct dma_chan *chan;
639 struct dma_device *device;
640 struct dma_async_tx_descriptor *desc;
641 int nr_sg;
642 unsigned long flags = DMA_CTRL_ACK;
643
644 if (data->flags & MMC_DATA_READ) {
645 conf.direction = DMA_DEV_TO_MEM;
646 chan = host->dma_rx_channel;
647 } else {
648 conf.direction = DMA_MEM_TO_DEV;
649 chan = host->dma_tx_channel;
650 }
651
652 /* If there's no DMA channel, fall back to PIO */
653 if (!chan)
654 return -EINVAL;
655
656 /* If less than or equal to the fifo size, don't bother with DMA */
657 if (data->blksz * data->blocks <= variant->fifosize)
658 return -EINVAL;
659
660 device = chan->device;
661 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
662 mmc_get_dma_dir(data));
663 if (nr_sg == 0)
664 return -EINVAL;
665
666 if (host->variant->qcom_dml)
667 flags |= DMA_PREP_INTERRUPT;
668
669 dmaengine_slave_config(chan, &conf);
670 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
671 conf.direction, flags);
672 if (!desc)
673 goto unmap_exit;
674
675 *dma_chan = chan;
676 *dma_desc = desc;
677
678 return 0;
679
680 unmap_exit:
681 dma_unmap_sg(device->dev, data->sg, data->sg_len,
682 mmc_get_dma_dir(data));
683 return -ENOMEM;
684}
685
686static inline int mmci_dma_prep_data(struct mmci_host *host,
687 struct mmc_data *data)
688{
689 /* Check if next job is already prepared. */
690 if (host->dma_current && host->dma_desc_current)
691 return 0;
692
693 /* No job were prepared thus do it now. */
694 return __mmci_dma_prep_data(host, data, &host->dma_current,
695 &host->dma_desc_current);
696}
697
698static inline int mmci_dma_prep_next(struct mmci_host *host,
699 struct mmc_data *data)
700{
701 struct mmci_host_next *nd = &host->next_data;
702 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
703}
704
705static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
706{
707 int ret;
708 struct mmc_data *data = host->data;
709
710 ret = mmci_dma_prep_data(host, host->data);
711 if (ret)
712 return ret;
713
714 /* Okay, go for it. */
715 dev_vdbg(mmc_dev(host->mmc),
716 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
717 data->sg_len, data->blksz, data->blocks, data->flags);
718 host->dma_in_progress = true;
719 dmaengine_submit(host->dma_desc_current);
720 dma_async_issue_pending(host->dma_current);
721
722 if (host->variant->qcom_dml)
723 dml_start_xfer(host, data);
724
725 datactrl |= MCI_DPSM_DMAENABLE;
726
727 /* Trigger the DMA transfer */
728 mmci_write_datactrlreg(host, datactrl);
729
730 /*
731 * Let the MMCI say when the data is ended and it's time
732 * to fire next DMA request. When that happens, MMCI will
733 * call mmci_data_end()
734 */
735 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
736 host->base + MMCIMASK0);
737 return 0;
738}
739
740static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
741{
742 struct mmci_host_next *next = &host->next_data;
743
744 WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
745 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
746
747 host->dma_desc_current = next->dma_desc;
748 host->dma_current = next->dma_chan;
749 next->dma_desc = NULL;
750 next->dma_chan = NULL;
751}
752
753static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
754{
755 struct mmci_host *host = mmc_priv(mmc);
756 struct mmc_data *data = mrq->data;
757 struct mmci_host_next *nd = &host->next_data;
758
759 if (!data)
760 return;
761
762 BUG_ON(data->host_cookie);
763
764 if (mmci_validate_data(host, data))
765 return;
766
767 if (!mmci_dma_prep_next(host, data))
768 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
769}
770
771static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
772 int err)
773{
774 struct mmci_host *host = mmc_priv(mmc);
775 struct mmc_data *data = mrq->data;
776
777 if (!data || !data->host_cookie)
778 return;
779
780 mmci_dma_unmap(host, data);
781
782 if (err) {
783 struct mmci_host_next *next = &host->next_data;
784 struct dma_chan *chan;
785 if (data->flags & MMC_DATA_READ)
786 chan = host->dma_rx_channel;
787 else
788 chan = host->dma_tx_channel;
789 dmaengine_terminate_all(chan);
790
791 if (host->dma_desc_current == next->dma_desc)
792 host->dma_desc_current = NULL;
793
794 if (host->dma_current == next->dma_chan) {
795 host->dma_in_progress = false;
796 host->dma_current = NULL;
797 }
798
799 next->dma_desc = NULL;
800 next->dma_chan = NULL;
801 data->host_cookie = 0;
802 }
803}
804
805#else
806/* Blank functions if the DMA engine is not available */
807static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
808{
809}
810static inline void mmci_dma_setup(struct mmci_host *host)
811{
812}
813
814static inline void mmci_dma_release(struct mmci_host *host)
815{
816}
817
818static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
819{
820}
821
822static inline void mmci_dma_finalize(struct mmci_host *host,
823 struct mmc_data *data)
824{
825}
826
827static inline void mmci_dma_data_error(struct mmci_host *host)
828{
829}
830
831static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
832{
833 return -ENOSYS;
834}
835
836#define mmci_pre_request NULL
837#define mmci_post_request NULL
838
839#endif
840
841static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
842{
843 struct variant_data *variant = host->variant;
844 unsigned int datactrl, timeout, irqmask;
845 unsigned long long clks;
846 void __iomem *base;
847 int blksz_bits;
848
849 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
850 data->blksz, data->blocks, data->flags);
851
852 host->data = data;
853 host->size = data->blksz * data->blocks;
854 data->bytes_xfered = 0;
855
856 clks = (unsigned long long)data->timeout_ns * host->cclk;
857 do_div(clks, NSEC_PER_SEC);
858
859 timeout = data->timeout_clks + (unsigned int)clks;
860
861 base = host->base;
862 writel(timeout, base + MMCIDATATIMER);
863 writel(host->size, base + MMCIDATALENGTH);
864
865 blksz_bits = ffs(data->blksz) - 1;
866 BUG_ON(1 << blksz_bits != data->blksz);
867
868 if (variant->blksz_datactrl16)
869 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
870 else if (variant->blksz_datactrl4)
871 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
872 else
873 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
874
875 if (data->flags & MMC_DATA_READ)
876 datactrl |= MCI_DPSM_DIRECTION;
877
878 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
879 u32 clk;
880
881 datactrl |= variant->datactrl_mask_sdio;
882
883 /*
884 * The ST Micro variant for SDIO small write transfers
885 * needs to have clock H/W flow control disabled,
886 * otherwise the transfer will not start. The threshold
887 * depends on the rate of MCLK.
888 */
889 if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
890 (host->size < 8 ||
891 (host->size <= 8 && host->mclk > 50000000)))
892 clk = host->clk_reg & ~variant->clkreg_enable;
893 else
894 clk = host->clk_reg | variant->clkreg_enable;
895
896 mmci_write_clkreg(host, clk);
897 }
898
899 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
900 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
901 datactrl |= variant->datactrl_mask_ddrmode;
902
903 /*
904 * Attempt to use DMA operation mode, if this
905 * should fail, fall back to PIO mode
906 */
907 if (!mmci_dma_start_data(host, datactrl))
908 return;
909
910 /* IRQ mode, map the SG list for CPU reading/writing */
911 mmci_init_sg(host, data);
912
913 if (data->flags & MMC_DATA_READ) {
914 irqmask = MCI_RXFIFOHALFFULLMASK;
915
916 /*
917 * If we have less than the fifo 'half-full' threshold to
918 * transfer, trigger a PIO interrupt as soon as any data
919 * is available.
920 */
921 if (host->size < variant->fifohalfsize)
922 irqmask |= MCI_RXDATAAVLBLMASK;
923 } else {
924 /*
925 * We don't actually need to include "FIFO empty" here
926 * since its implicit in "FIFO half empty".
927 */
928 irqmask = MCI_TXFIFOHALFEMPTYMASK;
929 }
930
931 mmci_write_datactrlreg(host, datactrl);
932 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
933 mmci_set_mask1(host, irqmask);
934}
935
936static void
937mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
938{
939 void __iomem *base = host->base;
940
941 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
942 cmd->opcode, cmd->arg, cmd->flags);
943
944 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
945 writel(0, base + MMCICOMMAND);
946 mmci_reg_delay(host);
947 }
948
949 c |= cmd->opcode | MCI_CPSM_ENABLE;
950 if (cmd->flags & MMC_RSP_PRESENT) {
951 if (cmd->flags & MMC_RSP_136)
952 c |= MCI_CPSM_LONGRSP;
953 c |= MCI_CPSM_RESPONSE;
954 }
955 if (/*interrupt*/0)
956 c |= MCI_CPSM_INTERRUPT;
957
958 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
959 c |= host->variant->data_cmd_enable;
960
961 host->cmd = cmd;
962
963 writel(cmd->arg, base + MMCIARGUMENT);
964 writel(c, base + MMCICOMMAND);
965}
966
967static void
968mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
969 unsigned int status)
970{
971 /* Make sure we have data to handle */
972 if (!data)
973 return;
974
975 /* First check for errors */
976 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
977 host->variant->start_err |
978 MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
979 u32 remain, success;
980
981 /* Terminate the DMA transfer */
982 if (dma_inprogress(host)) {
983 mmci_dma_data_error(host);
984 mmci_dma_unmap(host, data);
985 }
986
987 /*
988 * Calculate how far we are into the transfer. Note that
989 * the data counter gives the number of bytes transferred
990 * on the MMC bus, not on the host side. On reads, this
991 * can be as much as a FIFO-worth of data ahead. This
992 * matters for FIFO overruns only.
993 */
994 remain = readl(host->base + MMCIDATACNT);
995 success = data->blksz * data->blocks - remain;
996
997 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
998 status, success);
999 if (status & MCI_DATACRCFAIL) {
1000 /* Last block was not successful */
1001 success -= 1;
1002 data->error = -EILSEQ;
1003 } else if (status & MCI_DATATIMEOUT) {
1004 data->error = -ETIMEDOUT;
1005 } else if (status & MCI_STARTBITERR) {
1006 data->error = -ECOMM;
1007 } else if (status & MCI_TXUNDERRUN) {
1008 data->error = -EIO;
1009 } else if (status & MCI_RXOVERRUN) {
1010 if (success > host->variant->fifosize)
1011 success -= host->variant->fifosize;
1012 else
1013 success = 0;
1014 data->error = -EIO;
1015 }
1016 data->bytes_xfered = round_down(success, data->blksz);
1017 }
1018
1019 if (status & MCI_DATABLOCKEND)
1020 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1021
1022 if (status & MCI_DATAEND || data->error) {
1023 if (dma_inprogress(host))
1024 mmci_dma_finalize(host, data);
1025 mmci_stop_data(host);
1026
1027 if (!data->error)
1028 /* The error clause is handled above, success! */
1029 data->bytes_xfered = data->blksz * data->blocks;
1030
1031 if (!data->stop || host->mrq->sbc) {
1032 mmci_request_end(host, data->mrq);
1033 } else {
1034 mmci_start_command(host, data->stop, 0);
1035 }
1036 }
1037}
1038
1039static void
1040mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1041 unsigned int status)
1042{
1043 void __iomem *base = host->base;
1044 bool sbc;
1045
1046 if (!cmd)
1047 return;
1048
1049 sbc = (cmd == host->mrq->sbc);
1050
1051 /*
1052 * We need to be one of these interrupts to be considered worth
1053 * handling. Note that we tag on any latent IRQs postponed
1054 * due to waiting for busy status.
1055 */
1056 if (!((status|host->busy_status) &
1057 (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
1058 return;
1059
1060 /*
1061 * ST Micro variant: handle busy detection.
1062 */
1063 if (host->variant->busy_detect) {
1064 bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1065
1066 /* We are busy with a command, return */
1067 if (host->busy_status &&
1068 (status & host->variant->busy_detect_flag))
1069 return;
1070
1071 /*
1072 * We were not busy, but we now got a busy response on
1073 * something that was not an error, and we double-check
1074 * that the special busy status bit is still set before
1075 * proceeding.
1076 */
1077 if (!host->busy_status && busy_resp &&
1078 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1079 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1080
1081 /* Clear the busy start IRQ */
1082 writel(host->variant->busy_detect_mask,
1083 host->base + MMCICLEAR);
1084
1085 /* Unmask the busy end IRQ */
1086 writel(readl(base + MMCIMASK0) |
1087 host->variant->busy_detect_mask,
1088 base + MMCIMASK0);
1089 /*
1090 * Now cache the last response status code (until
1091 * the busy bit goes low), and return.
1092 */
1093 host->busy_status =
1094 status & (MCI_CMDSENT|MCI_CMDRESPEND);
1095 return;
1096 }
1097
1098 /*
1099 * At this point we are not busy with a command, we have
1100 * not received a new busy request, clear and mask the busy
1101 * end IRQ and fall through to process the IRQ.
1102 */
1103 if (host->busy_status) {
1104
1105 writel(host->variant->busy_detect_mask,
1106 host->base + MMCICLEAR);
1107
1108 writel(readl(base + MMCIMASK0) &
1109 ~host->variant->busy_detect_mask,
1110 base + MMCIMASK0);
1111 host->busy_status = 0;
1112 }
1113 }
1114
1115 host->cmd = NULL;
1116
1117 if (status & MCI_CMDTIMEOUT) {
1118 cmd->error = -ETIMEDOUT;
1119 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1120 cmd->error = -EILSEQ;
1121 } else {
1122 cmd->resp[0] = readl(base + MMCIRESPONSE0);
1123 cmd->resp[1] = readl(base + MMCIRESPONSE1);
1124 cmd->resp[2] = readl(base + MMCIRESPONSE2);
1125 cmd->resp[3] = readl(base + MMCIRESPONSE3);
1126 }
1127
1128 if ((!sbc && !cmd->data) || cmd->error) {
1129 if (host->data) {
1130 /* Terminate the DMA transfer */
1131 if (dma_inprogress(host)) {
1132 mmci_dma_data_error(host);
1133 mmci_dma_unmap(host, host->data);
1134 }
1135 mmci_stop_data(host);
1136 }
1137 mmci_request_end(host, host->mrq);
1138 } else if (sbc) {
1139 mmci_start_command(host, host->mrq->cmd, 0);
1140 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
1141 mmci_start_data(host, cmd->data);
1142 }
1143}
1144
1145static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1146{
1147 return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1148}
1149
1150static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1151{
1152 /*
1153 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1154 * from the fifo range should be used
1155 */
1156 if (status & MCI_RXFIFOHALFFULL)
1157 return host->variant->fifohalfsize;
1158 else if (status & MCI_RXDATAAVLBL)
1159 return 4;
1160
1161 return 0;
1162}
1163
1164static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1165{
1166 void __iomem *base = host->base;
1167 char *ptr = buffer;
1168 u32 status = readl(host->base + MMCISTATUS);
1169 int host_remain = host->size;
1170
1171 do {
1172 int count = host->get_rx_fifocnt(host, status, host_remain);
1173
1174 if (count > remain)
1175 count = remain;
1176
1177 if (count <= 0)
1178 break;
1179
1180 /*
1181 * SDIO especially may want to send something that is
1182 * not divisible by 4 (as opposed to card sectors
1183 * etc). Therefore make sure to always read the last bytes
1184 * while only doing full 32-bit reads towards the FIFO.
1185 */
1186 if (unlikely(count & 0x3)) {
1187 if (count < 4) {
1188 unsigned char buf[4];
1189 ioread32_rep(base + MMCIFIFO, buf, 1);
1190 memcpy(ptr, buf, count);
1191 } else {
1192 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1193 count &= ~0x3;
1194 }
1195 } else {
1196 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1197 }
1198
1199 ptr += count;
1200 remain -= count;
1201 host_remain -= count;
1202
1203 if (remain == 0)
1204 break;
1205
1206 status = readl(base + MMCISTATUS);
1207 } while (status & MCI_RXDATAAVLBL);
1208
1209 return ptr - buffer;
1210}
1211
1212static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1213{
1214 struct variant_data *variant = host->variant;
1215 void __iomem *base = host->base;
1216 char *ptr = buffer;
1217
1218 do {
1219 unsigned int count, maxcnt;
1220
1221 maxcnt = status & MCI_TXFIFOEMPTY ?
1222 variant->fifosize : variant->fifohalfsize;
1223 count = min(remain, maxcnt);
1224
1225 /*
1226 * SDIO especially may want to send something that is
1227 * not divisible by 4 (as opposed to card sectors
1228 * etc), and the FIFO only accept full 32-bit writes.
1229 * So compensate by adding +3 on the count, a single
1230 * byte become a 32bit write, 7 bytes will be two
1231 * 32bit writes etc.
1232 */
1233 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1234
1235 ptr += count;
1236 remain -= count;
1237
1238 if (remain == 0)
1239 break;
1240
1241 status = readl(base + MMCISTATUS);
1242 } while (status & MCI_TXFIFOHALFEMPTY);
1243
1244 return ptr - buffer;
1245}
1246
1247/*
1248 * PIO data transfer IRQ handler.
1249 */
1250static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1251{
1252 struct mmci_host *host = dev_id;
1253 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1254 struct variant_data *variant = host->variant;
1255 void __iomem *base = host->base;
1256 u32 status;
1257
1258 status = readl(base + MMCISTATUS);
1259
1260 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1261
1262 do {
1263 unsigned int remain, len;
1264 char *buffer;
1265
1266 /*
1267 * For write, we only need to test the half-empty flag
1268 * here - if the FIFO is completely empty, then by
1269 * definition it is more than half empty.
1270 *
1271 * For read, check for data available.
1272 */
1273 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1274 break;
1275
1276 if (!sg_miter_next(sg_miter))
1277 break;
1278
1279 buffer = sg_miter->addr;
1280 remain = sg_miter->length;
1281
1282 len = 0;
1283 if (status & MCI_RXACTIVE)
1284 len = mmci_pio_read(host, buffer, remain);
1285 if (status & MCI_TXACTIVE)
1286 len = mmci_pio_write(host, buffer, remain, status);
1287
1288 sg_miter->consumed = len;
1289
1290 host->size -= len;
1291 remain -= len;
1292
1293 if (remain)
1294 break;
1295
1296 status = readl(base + MMCISTATUS);
1297 } while (1);
1298
1299 sg_miter_stop(sg_miter);
1300
1301 /*
1302 * If we have less than the fifo 'half-full' threshold to transfer,
1303 * trigger a PIO interrupt as soon as any data is available.
1304 */
1305 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1306 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1307
1308 /*
1309 * If we run out of data, disable the data IRQs; this
1310 * prevents a race where the FIFO becomes empty before
1311 * the chip itself has disabled the data path, and
1312 * stops us racing with our data end IRQ.
1313 */
1314 if (host->size == 0) {
1315 mmci_set_mask1(host, 0);
1316 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1317 }
1318
1319 return IRQ_HANDLED;
1320}
1321
1322/*
1323 * Handle completion of command and data transfers.
1324 */
1325static irqreturn_t mmci_irq(int irq, void *dev_id)
1326{
1327 struct mmci_host *host = dev_id;
1328 u32 status;
1329 int ret = 0;
1330
1331 spin_lock(&host->lock);
1332
1333 do {
1334 status = readl(host->base + MMCISTATUS);
1335
1336 if (host->singleirq) {
1337 if (status & host->mask1_reg)
1338 mmci_pio_irq(irq, dev_id);
1339
1340 status &= ~MCI_IRQ1MASK;
1341 }
1342
1343 /*
1344 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
1345 * enabled) in mmci_cmd_irq() function where ST Micro busy
1346 * detection variant is handled. Considering the HW seems to be
1347 * triggering the IRQ on both edges while monitoring DAT0 for
1348 * busy completion and that same status bit is used to monitor
1349 * start and end of busy detection, special care must be taken
1350 * to make sure that both start and end interrupts are always
1351 * cleared one after the other.
1352 */
1353 status &= readl(host->base + MMCIMASK0);
1354 if (host->variant->busy_detect)
1355 writel(status & ~host->variant->busy_detect_mask,
1356 host->base + MMCICLEAR);
1357 else
1358 writel(status, host->base + MMCICLEAR);
1359
1360 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1361
1362 if (host->variant->reversed_irq_handling) {
1363 mmci_data_irq(host, host->data, status);
1364 mmci_cmd_irq(host, host->cmd, status);
1365 } else {
1366 mmci_cmd_irq(host, host->cmd, status);
1367 mmci_data_irq(host, host->data, status);
1368 }
1369
1370 /*
1371 * Don't poll for busy completion in irq context.
1372 */
1373 if (host->variant->busy_detect && host->busy_status)
1374 status &= ~host->variant->busy_detect_flag;
1375
1376 ret = 1;
1377 } while (status);
1378
1379 spin_unlock(&host->lock);
1380
1381 return IRQ_RETVAL(ret);
1382}
1383
1384static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1385{
1386 struct mmci_host *host = mmc_priv(mmc);
1387 unsigned long flags;
1388
1389 WARN_ON(host->mrq != NULL);
1390
1391 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1392 if (mrq->cmd->error) {
1393 mmc_request_done(mmc, mrq);
1394 return;
1395 }
1396
1397 spin_lock_irqsave(&host->lock, flags);
1398
1399 host->mrq = mrq;
1400
1401 if (mrq->data)
1402 mmci_get_next_data(host, mrq->data);
1403
1404 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1405 mmci_start_data(host, mrq->data);
1406
1407 if (mrq->sbc)
1408 mmci_start_command(host, mrq->sbc, 0);
1409 else
1410 mmci_start_command(host, mrq->cmd, 0);
1411
1412 spin_unlock_irqrestore(&host->lock, flags);
1413}
1414
1415static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1416{
1417 struct mmci_host *host = mmc_priv(mmc);
1418 struct variant_data *variant = host->variant;
1419 u32 pwr = 0;
1420 unsigned long flags;
1421 int ret;
1422
1423 if (host->plat->ios_handler &&
1424 host->plat->ios_handler(mmc_dev(mmc), ios))
1425 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1426
1427 switch (ios->power_mode) {
1428 case MMC_POWER_OFF:
1429 if (!IS_ERR(mmc->supply.vmmc))
1430 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1431
1432 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1433 regulator_disable(mmc->supply.vqmmc);
1434 host->vqmmc_enabled = false;
1435 }
1436
1437 break;
1438 case MMC_POWER_UP:
1439 if (!IS_ERR(mmc->supply.vmmc))
1440 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1441
1442 /*
1443 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1444 * and instead uses MCI_PWR_ON so apply whatever value is
1445 * configured in the variant data.
1446 */
1447 pwr |= variant->pwrreg_powerup;
1448
1449 break;
1450 case MMC_POWER_ON:
1451 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1452 ret = regulator_enable(mmc->supply.vqmmc);
1453 if (ret < 0)
1454 dev_err(mmc_dev(mmc),
1455 "failed to enable vqmmc regulator\n");
1456 else
1457 host->vqmmc_enabled = true;
1458 }
1459
1460 pwr |= MCI_PWR_ON;
1461 break;
1462 }
1463
1464 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1465 /*
1466 * The ST Micro variant has some additional bits
1467 * indicating signal direction for the signals in
1468 * the SD/MMC bus and feedback-clock usage.
1469 */
1470 pwr |= host->pwr_reg_add;
1471
1472 if (ios->bus_width == MMC_BUS_WIDTH_4)
1473 pwr &= ~MCI_ST_DATA74DIREN;
1474 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1475 pwr &= (~MCI_ST_DATA74DIREN &
1476 ~MCI_ST_DATA31DIREN &
1477 ~MCI_ST_DATA2DIREN);
1478 }
1479
1480 if (variant->opendrain) {
1481 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1482 pwr |= variant->opendrain;
1483 } else {
1484 /*
1485 * If the variant cannot configure the pads by its own, then we
1486 * expect the pinctrl to be able to do that for us
1487 */
1488 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1489 pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1490 else
1491 pinctrl_select_state(host->pinctrl, host->pins_default);
1492 }
1493
1494 /*
1495 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1496 * gating the clock, the MCI_PWR_ON bit is cleared.
1497 */
1498 if (!ios->clock && variant->pwrreg_clkgate)
1499 pwr &= ~MCI_PWR_ON;
1500
1501 if (host->variant->explicit_mclk_control &&
1502 ios->clock != host->clock_cache) {
1503 ret = clk_set_rate(host->clk, ios->clock);
1504 if (ret < 0)
1505 dev_err(mmc_dev(host->mmc),
1506 "Error setting clock rate (%d)\n", ret);
1507 else
1508 host->mclk = clk_get_rate(host->clk);
1509 }
1510 host->clock_cache = ios->clock;
1511
1512 spin_lock_irqsave(&host->lock, flags);
1513
1514 mmci_set_clkreg(host, ios->clock);
1515 mmci_write_pwrreg(host, pwr);
1516 mmci_reg_delay(host);
1517
1518 spin_unlock_irqrestore(&host->lock, flags);
1519}
1520
1521static int mmci_get_cd(struct mmc_host *mmc)
1522{
1523 struct mmci_host *host = mmc_priv(mmc);
1524 struct mmci_platform_data *plat = host->plat;
1525 unsigned int status = mmc_gpio_get_cd(mmc);
1526
1527 if (status == -ENOSYS) {
1528 if (!plat->status)
1529 return 1; /* Assume always present */
1530
1531 status = plat->status(mmc_dev(host->mmc));
1532 }
1533 return status;
1534}
1535
1536static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1537{
1538 int ret = 0;
1539
1540 if (!IS_ERR(mmc->supply.vqmmc)) {
1541
1542 switch (ios->signal_voltage) {
1543 case MMC_SIGNAL_VOLTAGE_330:
1544 ret = regulator_set_voltage(mmc->supply.vqmmc,
1545 2700000, 3600000);
1546 break;
1547 case MMC_SIGNAL_VOLTAGE_180:
1548 ret = regulator_set_voltage(mmc->supply.vqmmc,
1549 1700000, 1950000);
1550 break;
1551 case MMC_SIGNAL_VOLTAGE_120:
1552 ret = regulator_set_voltage(mmc->supply.vqmmc,
1553 1100000, 1300000);
1554 break;
1555 }
1556
1557 if (ret)
1558 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1559 }
1560
1561 return ret;
1562}
1563
1564static struct mmc_host_ops mmci_ops = {
1565 .request = mmci_request,
1566 .pre_req = mmci_pre_request,
1567 .post_req = mmci_post_request,
1568 .set_ios = mmci_set_ios,
1569 .get_ro = mmc_gpio_get_ro,
1570 .get_cd = mmci_get_cd,
1571 .start_signal_voltage_switch = mmci_sig_volt_switch,
1572};
1573
1574static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1575{
1576 struct mmci_host *host = mmc_priv(mmc);
1577 int ret = mmc_of_parse(mmc);
1578
1579 if (ret)
1580 return ret;
1581
1582 if (of_get_property(np, "st,sig-dir-dat0", NULL))
1583 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1584 if (of_get_property(np, "st,sig-dir-dat2", NULL))
1585 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1586 if (of_get_property(np, "st,sig-dir-dat31", NULL))
1587 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1588 if (of_get_property(np, "st,sig-dir-dat74", NULL))
1589 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1590 if (of_get_property(np, "st,sig-dir-cmd", NULL))
1591 host->pwr_reg_add |= MCI_ST_CMDDIREN;
1592 if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1593 host->pwr_reg_add |= MCI_ST_FBCLKEN;
1594
1595 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1596 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1597 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1598 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1599
1600 return 0;
1601}
1602
1603static int mmci_probe(struct amba_device *dev,
1604 const struct amba_id *id)
1605{
1606 struct mmci_platform_data *plat = dev->dev.platform_data;
1607 struct device_node *np = dev->dev.of_node;
1608 struct variant_data *variant = id->data;
1609 struct mmci_host *host;
1610 struct mmc_host *mmc;
1611 int ret;
1612
1613 /* Must have platform data or Device Tree. */
1614 if (!plat && !np) {
1615 dev_err(&dev->dev, "No plat data or DT found\n");
1616 return -EINVAL;
1617 }
1618
1619 if (!plat) {
1620 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1621 if (!plat)
1622 return -ENOMEM;
1623 }
1624
1625 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1626 if (!mmc)
1627 return -ENOMEM;
1628
1629 ret = mmci_of_parse(np, mmc);
1630 if (ret)
1631 goto host_free;
1632
1633 host = mmc_priv(mmc);
1634 host->mmc = mmc;
1635
1636 /*
1637 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1638 * pins can be set accordingly using pinctrl
1639 */
1640 if (!variant->opendrain) {
1641 host->pinctrl = devm_pinctrl_get(&dev->dev);
1642 if (IS_ERR(host->pinctrl)) {
1643 dev_err(&dev->dev, "failed to get pinctrl");
1644 ret = PTR_ERR(host->pinctrl);
1645 goto host_free;
1646 }
1647
1648 host->pins_default = pinctrl_lookup_state(host->pinctrl,
1649 PINCTRL_STATE_DEFAULT);
1650 if (IS_ERR(host->pins_default)) {
1651 dev_err(mmc_dev(mmc), "Can't select default pins\n");
1652 ret = PTR_ERR(host->pins_default);
1653 goto host_free;
1654 }
1655
1656 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
1657 MMCI_PINCTRL_STATE_OPENDRAIN);
1658 if (IS_ERR(host->pins_opendrain)) {
1659 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
1660 ret = PTR_ERR(host->pins_opendrain);
1661 goto host_free;
1662 }
1663 }
1664
1665 host->hw_designer = amba_manf(dev);
1666 host->hw_revision = amba_rev(dev);
1667 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1668 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1669
1670 host->clk = devm_clk_get(&dev->dev, NULL);
1671 if (IS_ERR(host->clk)) {
1672 ret = PTR_ERR(host->clk);
1673 goto host_free;
1674 }
1675
1676 ret = clk_prepare_enable(host->clk);
1677 if (ret)
1678 goto host_free;
1679
1680 if (variant->qcom_fifo)
1681 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
1682 else
1683 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
1684
1685 host->plat = plat;
1686 host->variant = variant;
1687 host->mclk = clk_get_rate(host->clk);
1688 /*
1689 * According to the spec, mclk is max 100 MHz,
1690 * so we try to adjust the clock down to this,
1691 * (if possible).
1692 */
1693 if (host->mclk > variant->f_max) {
1694 ret = clk_set_rate(host->clk, variant->f_max);
1695 if (ret < 0)
1696 goto clk_disable;
1697 host->mclk = clk_get_rate(host->clk);
1698 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1699 host->mclk);
1700 }
1701
1702 host->phybase = dev->res.start;
1703 host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1704 if (IS_ERR(host->base)) {
1705 ret = PTR_ERR(host->base);
1706 goto clk_disable;
1707 }
1708
1709 /*
1710 * The ARM and ST versions of the block have slightly different
1711 * clock divider equations which means that the minimum divider
1712 * differs too.
1713 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1714 */
1715 if (variant->st_clkdiv)
1716 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1717 else if (variant->explicit_mclk_control)
1718 mmc->f_min = clk_round_rate(host->clk, 100000);
1719 else
1720 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1721 /*
1722 * If no maximum operating frequency is supplied, fall back to use
1723 * the module parameter, which has a (low) default value in case it
1724 * is not specified. Either value must not exceed the clock rate into
1725 * the block, of course.
1726 */
1727 if (mmc->f_max)
1728 mmc->f_max = variant->explicit_mclk_control ?
1729 min(variant->f_max, mmc->f_max) :
1730 min(host->mclk, mmc->f_max);
1731 else
1732 mmc->f_max = variant->explicit_mclk_control ?
1733 fmax : min(host->mclk, fmax);
1734
1735
1736 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1737
1738 /* Get regulators and the supported OCR mask */
1739 ret = mmc_regulator_get_supply(mmc);
1740 if (ret)
1741 goto clk_disable;
1742
1743 if (!mmc->ocr_avail)
1744 mmc->ocr_avail = plat->ocr_mask;
1745 else if (plat->ocr_mask)
1746 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1747
1748 /* DT takes precedence over platform data. */
1749 if (!np) {
1750 if (!plat->cd_invert)
1751 mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
1752 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1753 }
1754
1755 /* We support these capabilities. */
1756 mmc->caps |= MMC_CAP_CMD23;
1757
1758 /*
1759 * Enable busy detection.
1760 */
1761 if (variant->busy_detect) {
1762 mmci_ops.card_busy = mmci_card_busy;
1763 /*
1764 * Not all variants have a flag to enable busy detection
1765 * in the DPSM, but if they do, set it here.
1766 */
1767 if (variant->busy_dpsm_flag)
1768 mmci_write_datactrlreg(host,
1769 host->variant->busy_dpsm_flag);
1770 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1771 mmc->max_busy_timeout = 0;
1772 }
1773
1774 mmc->ops = &mmci_ops;
1775
1776 /* We support these PM capabilities. */
1777 mmc->pm_caps |= MMC_PM_KEEP_POWER;
1778
1779 /*
1780 * We can do SGIO
1781 */
1782 mmc->max_segs = NR_SG;
1783
1784 /*
1785 * Since only a certain number of bits are valid in the data length
1786 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1787 * single request.
1788 */
1789 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1790
1791 /*
1792 * Set the maximum segment size. Since we aren't doing DMA
1793 * (yet) we are only limited by the data length register.
1794 */
1795 mmc->max_seg_size = mmc->max_req_size;
1796
1797 /*
1798 * Block size can be up to 2048 bytes, but must be a power of two.
1799 */
1800 mmc->max_blk_size = 1 << 11;
1801
1802 /*
1803 * Limit the number of blocks transferred so that we don't overflow
1804 * the maximum request size.
1805 */
1806 mmc->max_blk_count = mmc->max_req_size >> 11;
1807
1808 spin_lock_init(&host->lock);
1809
1810 writel(0, host->base + MMCIMASK0);
1811
1812 if (variant->mmcimask1)
1813 writel(0, host->base + MMCIMASK1);
1814
1815 writel(0xfff, host->base + MMCICLEAR);
1816
1817 /*
1818 * If:
1819 * - not using DT but using a descriptor table, or
1820 * - using a table of descriptors ALONGSIDE DT, or
1821 * look up these descriptors named "cd" and "wp" right here, fail
1822 * silently of these do not exist and proceed to try platform data
1823 */
1824 if (!np) {
1825 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
1826 if (ret < 0) {
1827 if (ret == -EPROBE_DEFER)
1828 goto clk_disable;
1829 else if (gpio_is_valid(plat->gpio_cd)) {
1830 ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
1831 if (ret)
1832 goto clk_disable;
1833 }
1834 }
1835
1836 ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
1837 if (ret < 0) {
1838 if (ret == -EPROBE_DEFER)
1839 goto clk_disable;
1840 else if (gpio_is_valid(plat->gpio_wp)) {
1841 ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
1842 if (ret)
1843 goto clk_disable;
1844 }
1845 }
1846 }
1847
1848 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
1849 DRIVER_NAME " (cmd)", host);
1850 if (ret)
1851 goto clk_disable;
1852
1853 if (!dev->irq[1])
1854 host->singleirq = true;
1855 else {
1856 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
1857 IRQF_SHARED, DRIVER_NAME " (pio)", host);
1858 if (ret)
1859 goto clk_disable;
1860 }
1861
1862 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1863
1864 amba_set_drvdata(dev, mmc);
1865
1866 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1867 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1868 amba_rev(dev), (unsigned long long)dev->res.start,
1869 dev->irq[0], dev->irq[1]);
1870
1871 mmci_dma_setup(host);
1872
1873 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1874 pm_runtime_use_autosuspend(&dev->dev);
1875
1876 mmc_add_host(mmc);
1877
1878 pm_runtime_put(&dev->dev);
1879 return 0;
1880
1881 clk_disable:
1882 clk_disable_unprepare(host->clk);
1883 host_free:
1884 mmc_free_host(mmc);
1885 return ret;
1886}
1887
1888static int mmci_remove(struct amba_device *dev)
1889{
1890 struct mmc_host *mmc = amba_get_drvdata(dev);
1891
1892 if (mmc) {
1893 struct mmci_host *host = mmc_priv(mmc);
1894 struct variant_data *variant = host->variant;
1895
1896 /*
1897 * Undo pm_runtime_put() in probe. We use the _sync
1898 * version here so that we can access the primecell.
1899 */
1900 pm_runtime_get_sync(&dev->dev);
1901
1902 mmc_remove_host(mmc);
1903
1904 writel(0, host->base + MMCIMASK0);
1905
1906 if (variant->mmcimask1)
1907 writel(0, host->base + MMCIMASK1);
1908
1909 writel(0, host->base + MMCICOMMAND);
1910 writel(0, host->base + MMCIDATACTRL);
1911
1912 mmci_dma_release(host);
1913 clk_disable_unprepare(host->clk);
1914 mmc_free_host(mmc);
1915 }
1916
1917 return 0;
1918}
1919
1920#ifdef CONFIG_PM
1921static void mmci_save(struct mmci_host *host)
1922{
1923 unsigned long flags;
1924
1925 spin_lock_irqsave(&host->lock, flags);
1926
1927 writel(0, host->base + MMCIMASK0);
1928 if (host->variant->pwrreg_nopower) {
1929 writel(0, host->base + MMCIDATACTRL);
1930 writel(0, host->base + MMCIPOWER);
1931 writel(0, host->base + MMCICLOCK);
1932 }
1933 mmci_reg_delay(host);
1934
1935 spin_unlock_irqrestore(&host->lock, flags);
1936}
1937
1938static void mmci_restore(struct mmci_host *host)
1939{
1940 unsigned long flags;
1941
1942 spin_lock_irqsave(&host->lock, flags);
1943
1944 if (host->variant->pwrreg_nopower) {
1945 writel(host->clk_reg, host->base + MMCICLOCK);
1946 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
1947 writel(host->pwr_reg, host->base + MMCIPOWER);
1948 }
1949 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1950 mmci_reg_delay(host);
1951
1952 spin_unlock_irqrestore(&host->lock, flags);
1953}
1954
1955static int mmci_runtime_suspend(struct device *dev)
1956{
1957 struct amba_device *adev = to_amba_device(dev);
1958 struct mmc_host *mmc = amba_get_drvdata(adev);
1959
1960 if (mmc) {
1961 struct mmci_host *host = mmc_priv(mmc);
1962 pinctrl_pm_select_sleep_state(dev);
1963 mmci_save(host);
1964 clk_disable_unprepare(host->clk);
1965 }
1966
1967 return 0;
1968}
1969
1970static int mmci_runtime_resume(struct device *dev)
1971{
1972 struct amba_device *adev = to_amba_device(dev);
1973 struct mmc_host *mmc = amba_get_drvdata(adev);
1974
1975 if (mmc) {
1976 struct mmci_host *host = mmc_priv(mmc);
1977 clk_prepare_enable(host->clk);
1978 mmci_restore(host);
1979 pinctrl_pm_select_default_state(dev);
1980 }
1981
1982 return 0;
1983}
1984#endif
1985
1986static const struct dev_pm_ops mmci_dev_pm_ops = {
1987 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1988 pm_runtime_force_resume)
1989 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1990};
1991
1992static const struct amba_id mmci_ids[] = {
1993 {
1994 .id = 0x00041180,
1995 .mask = 0xff0fffff,
1996 .data = &variant_arm,
1997 },
1998 {
1999 .id = 0x01041180,
2000 .mask = 0xff0fffff,
2001 .data = &variant_arm_extended_fifo,
2002 },
2003 {
2004 .id = 0x02041180,
2005 .mask = 0xff0fffff,
2006 .data = &variant_arm_extended_fifo_hwfc,
2007 },
2008 {
2009 .id = 0x00041181,
2010 .mask = 0x000fffff,
2011 .data = &variant_arm,
2012 },
2013 /* ST Micro variants */
2014 {
2015 .id = 0x00180180,
2016 .mask = 0x00ffffff,
2017 .data = &variant_u300,
2018 },
2019 {
2020 .id = 0x10180180,
2021 .mask = 0xf0ffffff,
2022 .data = &variant_nomadik,
2023 },
2024 {
2025 .id = 0x00280180,
2026 .mask = 0x00ffffff,
2027 .data = &variant_nomadik,
2028 },
2029 {
2030 .id = 0x00480180,
2031 .mask = 0xf0ffffff,
2032 .data = &variant_ux500,
2033 },
2034 {
2035 .id = 0x10480180,
2036 .mask = 0xf0ffffff,
2037 .data = &variant_ux500v2,
2038 },
2039 {
2040 .id = 0x00880180,
2041 .mask = 0x00ffffff,
2042 .data = &variant_stm32,
2043 },
2044 /* Qualcomm variants */
2045 {
2046 .id = 0x00051180,
2047 .mask = 0x000fffff,
2048 .data = &variant_qcom,
2049 },
2050 { 0, 0 },
2051};
2052
2053MODULE_DEVICE_TABLE(amba, mmci_ids);
2054
2055static struct amba_driver mmci_driver = {
2056 .drv = {
2057 .name = DRIVER_NAME,
2058 .pm = &mmci_dev_pm_ops,
2059 },
2060 .probe = mmci_probe,
2061 .remove = mmci_remove,
2062 .id_table = mmci_ids,
2063};
2064
2065module_amba_driver(mmci_driver);
2066
2067module_param(fmax, uint, 0444);
2068
2069MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2070MODULE_LICENSE("GPL");