Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/iopoll.h>
23#include <linux/ioport.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <linux/stat.h>
30#include <linux/delay.h>
31#include <linux/irq.h>
32#include <linux/mmc/card.h>
33#include <linux/mmc/host.h>
34#include <linux/mmc/mmc.h>
35#include <linux/mmc/sd.h>
36#include <linux/mmc/sdio.h>
37#include <linux/bitops.h>
38#include <linux/regulator/consumer.h>
39#include <linux/of.h>
40#include <linux/of_gpio.h>
41#include <linux/mmc/slot-gpio.h>
42
43#include "dw_mmc.h"
44
45/* Common flag combinations */
46#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
47 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 SDMMC_INT_EBE | SDMMC_INT_HLE)
49#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
51#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
52 DW_MCI_CMD_ERROR_FLAGS)
53#define DW_MCI_SEND_STATUS 1
54#define DW_MCI_RECV_STATUS 2
55#define DW_MCI_DMA_THRESHOLD 16
56
57#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
58#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
59
60#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63 SDMMC_IDMAC_INT_TI)
64
65#define DESC_RING_BUF_SZ PAGE_SIZE
66
67struct idmac_desc_64addr {
68 u32 des0; /* Control Descriptor */
69#define IDMAC_OWN_CLR64(x) \
70 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
71
72 u32 des1; /* Reserved */
73
74 u32 des2; /*Buffer sizes */
75#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
76 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
77 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
78
79 u32 des3; /* Reserved */
80
81 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
82 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
83
84 u32 des6; /* Lower 32-bits of Next Descriptor Address */
85 u32 des7; /* Upper 32-bits of Next Descriptor Address */
86};
87
88struct idmac_desc {
89 __le32 des0; /* Control Descriptor */
90#define IDMAC_DES0_DIC BIT(1)
91#define IDMAC_DES0_LD BIT(2)
92#define IDMAC_DES0_FD BIT(3)
93#define IDMAC_DES0_CH BIT(4)
94#define IDMAC_DES0_ER BIT(5)
95#define IDMAC_DES0_CES BIT(30)
96#define IDMAC_DES0_OWN BIT(31)
97
98 __le32 des1; /* Buffer sizes */
99#define IDMAC_SET_BUFFER1_SIZE(d, s) \
100 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
101
102 __le32 des2; /* buffer 1 physical address */
103
104 __le32 des3; /* buffer 2 physical address */
105};
106
107/* Each descriptor can transfer up to 4KB of data in chained mode */
108#define DW_MCI_DESC_DATA_LENGTH 0x1000
109
110#if defined(CONFIG_DEBUG_FS)
111static int dw_mci_req_show(struct seq_file *s, void *v)
112{
113 struct dw_mci_slot *slot = s->private;
114 struct mmc_request *mrq;
115 struct mmc_command *cmd;
116 struct mmc_command *stop;
117 struct mmc_data *data;
118
119 /* Make sure we get a consistent snapshot */
120 spin_lock_bh(&slot->host->lock);
121 mrq = slot->mrq;
122
123 if (mrq) {
124 cmd = mrq->cmd;
125 data = mrq->data;
126 stop = mrq->stop;
127
128 if (cmd)
129 seq_printf(s,
130 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
131 cmd->opcode, cmd->arg, cmd->flags,
132 cmd->resp[0], cmd->resp[1], cmd->resp[2],
133 cmd->resp[2], cmd->error);
134 if (data)
135 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
136 data->bytes_xfered, data->blocks,
137 data->blksz, data->flags, data->error);
138 if (stop)
139 seq_printf(s,
140 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
141 stop->opcode, stop->arg, stop->flags,
142 stop->resp[0], stop->resp[1], stop->resp[2],
143 stop->resp[2], stop->error);
144 }
145
146 spin_unlock_bh(&slot->host->lock);
147
148 return 0;
149}
150DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
151
152static int dw_mci_regs_show(struct seq_file *s, void *v)
153{
154 struct dw_mci *host = s->private;
155
156 pm_runtime_get_sync(host->dev);
157
158 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
159 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
160 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
161 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
162 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
163 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
164
165 pm_runtime_put_autosuspend(host->dev);
166
167 return 0;
168}
169DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
170
171static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
172{
173 struct mmc_host *mmc = slot->mmc;
174 struct dw_mci *host = slot->host;
175 struct dentry *root;
176 struct dentry *node;
177
178 root = mmc->debugfs_root;
179 if (!root)
180 return;
181
182 node = debugfs_create_file("regs", S_IRUSR, root, host,
183 &dw_mci_regs_fops);
184 if (!node)
185 goto err;
186
187 node = debugfs_create_file("req", S_IRUSR, root, slot,
188 &dw_mci_req_fops);
189 if (!node)
190 goto err;
191
192 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
193 if (!node)
194 goto err;
195
196 node = debugfs_create_x32("pending_events", S_IRUSR, root,
197 (u32 *)&host->pending_events);
198 if (!node)
199 goto err;
200
201 node = debugfs_create_x32("completed_events", S_IRUSR, root,
202 (u32 *)&host->completed_events);
203 if (!node)
204 goto err;
205
206 return;
207
208err:
209 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
210}
211#endif /* defined(CONFIG_DEBUG_FS) */
212
213static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
214{
215 u32 ctrl;
216
217 ctrl = mci_readl(host, CTRL);
218 ctrl |= reset;
219 mci_writel(host, CTRL, ctrl);
220
221 /* wait till resets clear */
222 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
223 !(ctrl & reset),
224 1, 500 * USEC_PER_MSEC)) {
225 dev_err(host->dev,
226 "Timeout resetting block (ctrl reset %#x)\n",
227 ctrl & reset);
228 return false;
229 }
230
231 return true;
232}
233
234static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
235{
236 u32 status;
237
238 /*
239 * Databook says that before issuing a new data transfer command
240 * we need to check to see if the card is busy. Data transfer commands
241 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
242 *
243 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
244 * expected.
245 */
246 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
247 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
248 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
249 status,
250 !(status & SDMMC_STATUS_BUSY),
251 10, 500 * USEC_PER_MSEC))
252 dev_err(host->dev, "Busy; trying anyway\n");
253 }
254}
255
256static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
257{
258 struct dw_mci *host = slot->host;
259 unsigned int cmd_status = 0;
260
261 mci_writel(host, CMDARG, arg);
262 wmb(); /* drain writebuffer */
263 dw_mci_wait_while_busy(host, cmd);
264 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
265
266 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
267 !(cmd_status & SDMMC_CMD_START),
268 1, 500 * USEC_PER_MSEC))
269 dev_err(&slot->mmc->class_dev,
270 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
271 cmd, arg, cmd_status);
272}
273
274static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
275{
276 struct dw_mci_slot *slot = mmc_priv(mmc);
277 struct dw_mci *host = slot->host;
278 u32 cmdr;
279
280 cmd->error = -EINPROGRESS;
281 cmdr = cmd->opcode;
282
283 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
284 cmd->opcode == MMC_GO_IDLE_STATE ||
285 cmd->opcode == MMC_GO_INACTIVE_STATE ||
286 (cmd->opcode == SD_IO_RW_DIRECT &&
287 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
288 cmdr |= SDMMC_CMD_STOP;
289 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
290 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
291
292 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
293 u32 clk_en_a;
294
295 /* Special bit makes CMD11 not die */
296 cmdr |= SDMMC_CMD_VOLT_SWITCH;
297
298 /* Change state to continue to handle CMD11 weirdness */
299 WARN_ON(slot->host->state != STATE_SENDING_CMD);
300 slot->host->state = STATE_SENDING_CMD11;
301
302 /*
303 * We need to disable low power mode (automatic clock stop)
304 * while doing voltage switch so we don't confuse the card,
305 * since stopping the clock is a specific part of the UHS
306 * voltage change dance.
307 *
308 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
309 * unconditionally turned back on in dw_mci_setup_bus() if it's
310 * ever called with a non-zero clock. That shouldn't happen
311 * until the voltage change is all done.
312 */
313 clk_en_a = mci_readl(host, CLKENA);
314 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
315 mci_writel(host, CLKENA, clk_en_a);
316 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
317 SDMMC_CMD_PRV_DAT_WAIT, 0);
318 }
319
320 if (cmd->flags & MMC_RSP_PRESENT) {
321 /* We expect a response, so set this bit */
322 cmdr |= SDMMC_CMD_RESP_EXP;
323 if (cmd->flags & MMC_RSP_136)
324 cmdr |= SDMMC_CMD_RESP_LONG;
325 }
326
327 if (cmd->flags & MMC_RSP_CRC)
328 cmdr |= SDMMC_CMD_RESP_CRC;
329
330 if (cmd->data) {
331 cmdr |= SDMMC_CMD_DAT_EXP;
332 if (cmd->data->flags & MMC_DATA_WRITE)
333 cmdr |= SDMMC_CMD_DAT_WR;
334 }
335
336 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
337 cmdr |= SDMMC_CMD_USE_HOLD_REG;
338
339 return cmdr;
340}
341
342static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
343{
344 struct mmc_command *stop;
345 u32 cmdr;
346
347 if (!cmd->data)
348 return 0;
349
350 stop = &host->stop_abort;
351 cmdr = cmd->opcode;
352 memset(stop, 0, sizeof(struct mmc_command));
353
354 if (cmdr == MMC_READ_SINGLE_BLOCK ||
355 cmdr == MMC_READ_MULTIPLE_BLOCK ||
356 cmdr == MMC_WRITE_BLOCK ||
357 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
358 cmdr == MMC_SEND_TUNING_BLOCK ||
359 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
360 stop->opcode = MMC_STOP_TRANSMISSION;
361 stop->arg = 0;
362 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
363 } else if (cmdr == SD_IO_RW_EXTENDED) {
364 stop->opcode = SD_IO_RW_DIRECT;
365 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
366 ((cmd->arg >> 28) & 0x7);
367 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
368 } else {
369 return 0;
370 }
371
372 cmdr = stop->opcode | SDMMC_CMD_STOP |
373 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
374
375 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
376 cmdr |= SDMMC_CMD_USE_HOLD_REG;
377
378 return cmdr;
379}
380
381static inline void dw_mci_set_cto(struct dw_mci *host)
382{
383 unsigned int cto_clks;
384 unsigned int cto_div;
385 unsigned int cto_ms;
386 unsigned long irqflags;
387
388 cto_clks = mci_readl(host, TMOUT) & 0xff;
389 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
390 if (cto_div == 0)
391 cto_div = 1;
392
393 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
394 host->bus_hz);
395
396 /* add a bit spare time */
397 cto_ms += 10;
398
399 /*
400 * The durations we're working with are fairly short so we have to be
401 * extra careful about synchronization here. Specifically in hardware a
402 * command timeout is _at most_ 5.1 ms, so that means we expect an
403 * interrupt (either command done or timeout) to come rather quickly
404 * after the mci_writel. ...but just in case we have a long interrupt
405 * latency let's add a bit of paranoia.
406 *
407 * In general we'll assume that at least an interrupt will be asserted
408 * in hardware by the time the cto_timer runs. ...and if it hasn't
409 * been asserted in hardware by that time then we'll assume it'll never
410 * come.
411 */
412 spin_lock_irqsave(&host->irq_lock, irqflags);
413 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
414 mod_timer(&host->cto_timer,
415 jiffies + msecs_to_jiffies(cto_ms) + 1);
416 spin_unlock_irqrestore(&host->irq_lock, irqflags);
417}
418
419static void dw_mci_start_command(struct dw_mci *host,
420 struct mmc_command *cmd, u32 cmd_flags)
421{
422 host->cmd = cmd;
423 dev_vdbg(host->dev,
424 "start command: ARGR=0x%08x CMDR=0x%08x\n",
425 cmd->arg, cmd_flags);
426
427 mci_writel(host, CMDARG, cmd->arg);
428 wmb(); /* drain writebuffer */
429 dw_mci_wait_while_busy(host, cmd_flags);
430
431 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
432
433 /* response expected command only */
434 if (cmd_flags & SDMMC_CMD_RESP_EXP)
435 dw_mci_set_cto(host);
436}
437
438static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
439{
440 struct mmc_command *stop = &host->stop_abort;
441
442 dw_mci_start_command(host, stop, host->stop_cmdr);
443}
444
445/* DMA interface functions */
446static void dw_mci_stop_dma(struct dw_mci *host)
447{
448 if (host->using_dma) {
449 host->dma_ops->stop(host);
450 host->dma_ops->cleanup(host);
451 }
452
453 /* Data transfer was stopped by the interrupt handler */
454 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
455}
456
457static void dw_mci_dma_cleanup(struct dw_mci *host)
458{
459 struct mmc_data *data = host->data;
460
461 if (data && data->host_cookie == COOKIE_MAPPED) {
462 dma_unmap_sg(host->dev,
463 data->sg,
464 data->sg_len,
465 mmc_get_dma_dir(data));
466 data->host_cookie = COOKIE_UNMAPPED;
467 }
468}
469
470static void dw_mci_idmac_reset(struct dw_mci *host)
471{
472 u32 bmod = mci_readl(host, BMOD);
473 /* Software reset of DMA */
474 bmod |= SDMMC_IDMAC_SWRESET;
475 mci_writel(host, BMOD, bmod);
476}
477
478static void dw_mci_idmac_stop_dma(struct dw_mci *host)
479{
480 u32 temp;
481
482 /* Disable and reset the IDMAC interface */
483 temp = mci_readl(host, CTRL);
484 temp &= ~SDMMC_CTRL_USE_IDMAC;
485 temp |= SDMMC_CTRL_DMA_RESET;
486 mci_writel(host, CTRL, temp);
487
488 /* Stop the IDMAC running */
489 temp = mci_readl(host, BMOD);
490 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
491 temp |= SDMMC_IDMAC_SWRESET;
492 mci_writel(host, BMOD, temp);
493}
494
495static void dw_mci_dmac_complete_dma(void *arg)
496{
497 struct dw_mci *host = arg;
498 struct mmc_data *data = host->data;
499
500 dev_vdbg(host->dev, "DMA complete\n");
501
502 if ((host->use_dma == TRANS_MODE_EDMAC) &&
503 data && (data->flags & MMC_DATA_READ))
504 /* Invalidate cache after read */
505 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
506 data->sg,
507 data->sg_len,
508 DMA_FROM_DEVICE);
509
510 host->dma_ops->cleanup(host);
511
512 /*
513 * If the card was removed, data will be NULL. No point in trying to
514 * send the stop command or waiting for NBUSY in this case.
515 */
516 if (data) {
517 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
518 tasklet_schedule(&host->tasklet);
519 }
520}
521
522static int dw_mci_idmac_init(struct dw_mci *host)
523{
524 int i;
525
526 if (host->dma_64bit_address == 1) {
527 struct idmac_desc_64addr *p;
528 /* Number of descriptors in the ring buffer */
529 host->ring_size =
530 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
531
532 /* Forward link the descriptor list */
533 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
534 i++, p++) {
535 p->des6 = (host->sg_dma +
536 (sizeof(struct idmac_desc_64addr) *
537 (i + 1))) & 0xffffffff;
538
539 p->des7 = (u64)(host->sg_dma +
540 (sizeof(struct idmac_desc_64addr) *
541 (i + 1))) >> 32;
542 /* Initialize reserved and buffer size fields to "0" */
543 p->des0 = 0;
544 p->des1 = 0;
545 p->des2 = 0;
546 p->des3 = 0;
547 }
548
549 /* Set the last descriptor as the end-of-ring descriptor */
550 p->des6 = host->sg_dma & 0xffffffff;
551 p->des7 = (u64)host->sg_dma >> 32;
552 p->des0 = IDMAC_DES0_ER;
553
554 } else {
555 struct idmac_desc *p;
556 /* Number of descriptors in the ring buffer */
557 host->ring_size =
558 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
559
560 /* Forward link the descriptor list */
561 for (i = 0, p = host->sg_cpu;
562 i < host->ring_size - 1;
563 i++, p++) {
564 p->des3 = cpu_to_le32(host->sg_dma +
565 (sizeof(struct idmac_desc) * (i + 1)));
566 p->des0 = 0;
567 p->des1 = 0;
568 }
569
570 /* Set the last descriptor as the end-of-ring descriptor */
571 p->des3 = cpu_to_le32(host->sg_dma);
572 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
573 }
574
575 dw_mci_idmac_reset(host);
576
577 if (host->dma_64bit_address == 1) {
578 /* Mask out interrupts - get Tx & Rx complete only */
579 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
580 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
581 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
582
583 /* Set the descriptor base address */
584 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
585 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
586
587 } else {
588 /* Mask out interrupts - get Tx & Rx complete only */
589 mci_writel(host, IDSTS, IDMAC_INT_CLR);
590 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
591 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
592
593 /* Set the descriptor base address */
594 mci_writel(host, DBADDR, host->sg_dma);
595 }
596
597 return 0;
598}
599
600static inline int dw_mci_prepare_desc64(struct dw_mci *host,
601 struct mmc_data *data,
602 unsigned int sg_len)
603{
604 unsigned int desc_len;
605 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
606 u32 val;
607 int i;
608
609 desc_first = desc_last = desc = host->sg_cpu;
610
611 for (i = 0; i < sg_len; i++) {
612 unsigned int length = sg_dma_len(&data->sg[i]);
613
614 u64 mem_addr = sg_dma_address(&data->sg[i]);
615
616 for ( ; length ; desc++) {
617 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
618 length : DW_MCI_DESC_DATA_LENGTH;
619
620 length -= desc_len;
621
622 /*
623 * Wait for the former clear OWN bit operation
624 * of IDMAC to make sure that this descriptor
625 * isn't still owned by IDMAC as IDMAC's write
626 * ops and CPU's read ops are asynchronous.
627 */
628 if (readl_poll_timeout_atomic(&desc->des0, val,
629 !(val & IDMAC_DES0_OWN),
630 10, 100 * USEC_PER_MSEC))
631 goto err_own_bit;
632
633 /*
634 * Set the OWN bit and disable interrupts
635 * for this descriptor
636 */
637 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
638 IDMAC_DES0_CH;
639
640 /* Buffer length */
641 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
642
643 /* Physical address to DMA to/from */
644 desc->des4 = mem_addr & 0xffffffff;
645 desc->des5 = mem_addr >> 32;
646
647 /* Update physical address for the next desc */
648 mem_addr += desc_len;
649
650 /* Save pointer to the last descriptor */
651 desc_last = desc;
652 }
653 }
654
655 /* Set first descriptor */
656 desc_first->des0 |= IDMAC_DES0_FD;
657
658 /* Set last descriptor */
659 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
660 desc_last->des0 |= IDMAC_DES0_LD;
661
662 return 0;
663err_own_bit:
664 /* restore the descriptor chain as it's polluted */
665 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
666 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
667 dw_mci_idmac_init(host);
668 return -EINVAL;
669}
670
671
672static inline int dw_mci_prepare_desc32(struct dw_mci *host,
673 struct mmc_data *data,
674 unsigned int sg_len)
675{
676 unsigned int desc_len;
677 struct idmac_desc *desc_first, *desc_last, *desc;
678 u32 val;
679 int i;
680
681 desc_first = desc_last = desc = host->sg_cpu;
682
683 for (i = 0; i < sg_len; i++) {
684 unsigned int length = sg_dma_len(&data->sg[i]);
685
686 u32 mem_addr = sg_dma_address(&data->sg[i]);
687
688 for ( ; length ; desc++) {
689 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
690 length : DW_MCI_DESC_DATA_LENGTH;
691
692 length -= desc_len;
693
694 /*
695 * Wait for the former clear OWN bit operation
696 * of IDMAC to make sure that this descriptor
697 * isn't still owned by IDMAC as IDMAC's write
698 * ops and CPU's read ops are asynchronous.
699 */
700 if (readl_poll_timeout_atomic(&desc->des0, val,
701 IDMAC_OWN_CLR64(val),
702 10,
703 100 * USEC_PER_MSEC))
704 goto err_own_bit;
705
706 /*
707 * Set the OWN bit and disable interrupts
708 * for this descriptor
709 */
710 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
711 IDMAC_DES0_DIC |
712 IDMAC_DES0_CH);
713
714 /* Buffer length */
715 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
716
717 /* Physical address to DMA to/from */
718 desc->des2 = cpu_to_le32(mem_addr);
719
720 /* Update physical address for the next desc */
721 mem_addr += desc_len;
722
723 /* Save pointer to the last descriptor */
724 desc_last = desc;
725 }
726 }
727
728 /* Set first descriptor */
729 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
730
731 /* Set last descriptor */
732 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
733 IDMAC_DES0_DIC));
734 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
735
736 return 0;
737err_own_bit:
738 /* restore the descriptor chain as it's polluted */
739 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
740 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
741 dw_mci_idmac_init(host);
742 return -EINVAL;
743}
744
745static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
746{
747 u32 temp;
748 int ret;
749
750 if (host->dma_64bit_address == 1)
751 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
752 else
753 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
754
755 if (ret)
756 goto out;
757
758 /* drain writebuffer */
759 wmb();
760
761 /* Make sure to reset DMA in case we did PIO before this */
762 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
763 dw_mci_idmac_reset(host);
764
765 /* Select IDMAC interface */
766 temp = mci_readl(host, CTRL);
767 temp |= SDMMC_CTRL_USE_IDMAC;
768 mci_writel(host, CTRL, temp);
769
770 /* drain writebuffer */
771 wmb();
772
773 /* Enable the IDMAC */
774 temp = mci_readl(host, BMOD);
775 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
776 mci_writel(host, BMOD, temp);
777
778 /* Start it running */
779 mci_writel(host, PLDMND, 1);
780
781out:
782 return ret;
783}
784
785static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
786 .init = dw_mci_idmac_init,
787 .start = dw_mci_idmac_start_dma,
788 .stop = dw_mci_idmac_stop_dma,
789 .complete = dw_mci_dmac_complete_dma,
790 .cleanup = dw_mci_dma_cleanup,
791};
792
793static void dw_mci_edmac_stop_dma(struct dw_mci *host)
794{
795 dmaengine_terminate_async(host->dms->ch);
796}
797
798static int dw_mci_edmac_start_dma(struct dw_mci *host,
799 unsigned int sg_len)
800{
801 struct dma_slave_config cfg;
802 struct dma_async_tx_descriptor *desc = NULL;
803 struct scatterlist *sgl = host->data->sg;
804 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
805 u32 sg_elems = host->data->sg_len;
806 u32 fifoth_val;
807 u32 fifo_offset = host->fifo_reg - host->regs;
808 int ret = 0;
809
810 /* Set external dma config: burst size, burst width */
811 cfg.dst_addr = host->phy_regs + fifo_offset;
812 cfg.src_addr = cfg.dst_addr;
813 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
814 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
815
816 /* Match burst msize with external dma config */
817 fifoth_val = mci_readl(host, FIFOTH);
818 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
819 cfg.src_maxburst = cfg.dst_maxburst;
820
821 if (host->data->flags & MMC_DATA_WRITE)
822 cfg.direction = DMA_MEM_TO_DEV;
823 else
824 cfg.direction = DMA_DEV_TO_MEM;
825
826 ret = dmaengine_slave_config(host->dms->ch, &cfg);
827 if (ret) {
828 dev_err(host->dev, "Failed to config edmac.\n");
829 return -EBUSY;
830 }
831
832 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
833 sg_len, cfg.direction,
834 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
835 if (!desc) {
836 dev_err(host->dev, "Can't prepare slave sg.\n");
837 return -EBUSY;
838 }
839
840 /* Set dw_mci_dmac_complete_dma as callback */
841 desc->callback = dw_mci_dmac_complete_dma;
842 desc->callback_param = (void *)host;
843 dmaengine_submit(desc);
844
845 /* Flush cache before write */
846 if (host->data->flags & MMC_DATA_WRITE)
847 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
848 sg_elems, DMA_TO_DEVICE);
849
850 dma_async_issue_pending(host->dms->ch);
851
852 return 0;
853}
854
855static int dw_mci_edmac_init(struct dw_mci *host)
856{
857 /* Request external dma channel */
858 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
859 if (!host->dms)
860 return -ENOMEM;
861
862 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
863 if (!host->dms->ch) {
864 dev_err(host->dev, "Failed to get external DMA channel.\n");
865 kfree(host->dms);
866 host->dms = NULL;
867 return -ENXIO;
868 }
869
870 return 0;
871}
872
873static void dw_mci_edmac_exit(struct dw_mci *host)
874{
875 if (host->dms) {
876 if (host->dms->ch) {
877 dma_release_channel(host->dms->ch);
878 host->dms->ch = NULL;
879 }
880 kfree(host->dms);
881 host->dms = NULL;
882 }
883}
884
885static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
886 .init = dw_mci_edmac_init,
887 .exit = dw_mci_edmac_exit,
888 .start = dw_mci_edmac_start_dma,
889 .stop = dw_mci_edmac_stop_dma,
890 .complete = dw_mci_dmac_complete_dma,
891 .cleanup = dw_mci_dma_cleanup,
892};
893
894static int dw_mci_pre_dma_transfer(struct dw_mci *host,
895 struct mmc_data *data,
896 int cookie)
897{
898 struct scatterlist *sg;
899 unsigned int i, sg_len;
900
901 if (data->host_cookie == COOKIE_PRE_MAPPED)
902 return data->sg_len;
903
904 /*
905 * We don't do DMA on "complex" transfers, i.e. with
906 * non-word-aligned buffers or lengths. Also, we don't bother
907 * with all the DMA setup overhead for short transfers.
908 */
909 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
910 return -EINVAL;
911
912 if (data->blksz & 3)
913 return -EINVAL;
914
915 for_each_sg(data->sg, sg, data->sg_len, i) {
916 if (sg->offset & 3 || sg->length & 3)
917 return -EINVAL;
918 }
919
920 sg_len = dma_map_sg(host->dev,
921 data->sg,
922 data->sg_len,
923 mmc_get_dma_dir(data));
924 if (sg_len == 0)
925 return -EINVAL;
926
927 data->host_cookie = cookie;
928
929 return sg_len;
930}
931
932static void dw_mci_pre_req(struct mmc_host *mmc,
933 struct mmc_request *mrq)
934{
935 struct dw_mci_slot *slot = mmc_priv(mmc);
936 struct mmc_data *data = mrq->data;
937
938 if (!slot->host->use_dma || !data)
939 return;
940
941 /* This data might be unmapped at this time */
942 data->host_cookie = COOKIE_UNMAPPED;
943
944 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
945 COOKIE_PRE_MAPPED) < 0)
946 data->host_cookie = COOKIE_UNMAPPED;
947}
948
949static void dw_mci_post_req(struct mmc_host *mmc,
950 struct mmc_request *mrq,
951 int err)
952{
953 struct dw_mci_slot *slot = mmc_priv(mmc);
954 struct mmc_data *data = mrq->data;
955
956 if (!slot->host->use_dma || !data)
957 return;
958
959 if (data->host_cookie != COOKIE_UNMAPPED)
960 dma_unmap_sg(slot->host->dev,
961 data->sg,
962 data->sg_len,
963 mmc_get_dma_dir(data));
964 data->host_cookie = COOKIE_UNMAPPED;
965}
966
967static int dw_mci_get_cd(struct mmc_host *mmc)
968{
969 int present;
970 struct dw_mci_slot *slot = mmc_priv(mmc);
971 struct dw_mci *host = slot->host;
972 int gpio_cd = mmc_gpio_get_cd(mmc);
973
974 /* Use platform get_cd function, else try onboard card detect */
975 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
976 || !mmc_card_is_removable(mmc))) {
977 present = 1;
978
979 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
980 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
981 dev_info(&mmc->class_dev,
982 "card is polling.\n");
983 } else {
984 dev_info(&mmc->class_dev,
985 "card is non-removable.\n");
986 }
987 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
988 }
989
990 return present;
991 } else if (gpio_cd >= 0)
992 present = gpio_cd;
993 else
994 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
995 == 0 ? 1 : 0;
996
997 spin_lock_bh(&host->lock);
998 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
999 dev_dbg(&mmc->class_dev, "card is present\n");
1000 else if (!present &&
1001 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
1002 dev_dbg(&mmc->class_dev, "card is not present\n");
1003 spin_unlock_bh(&host->lock);
1004
1005 return present;
1006}
1007
1008static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
1009{
1010 unsigned int blksz = data->blksz;
1011 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
1012 u32 fifo_width = 1 << host->data_shift;
1013 u32 blksz_depth = blksz / fifo_width, fifoth_val;
1014 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
1015 int idx = ARRAY_SIZE(mszs) - 1;
1016
1017 /* pio should ship this scenario */
1018 if (!host->use_dma)
1019 return;
1020
1021 tx_wmark = (host->fifo_depth) / 2;
1022 tx_wmark_invers = host->fifo_depth - tx_wmark;
1023
1024 /*
1025 * MSIZE is '1',
1026 * if blksz is not a multiple of the FIFO width
1027 */
1028 if (blksz % fifo_width)
1029 goto done;
1030
1031 do {
1032 if (!((blksz_depth % mszs[idx]) ||
1033 (tx_wmark_invers % mszs[idx]))) {
1034 msize = idx;
1035 rx_wmark = mszs[idx] - 1;
1036 break;
1037 }
1038 } while (--idx > 0);
1039 /*
1040 * If idx is '0', it won't be tried
1041 * Thus, initial values are uesed
1042 */
1043done:
1044 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1045 mci_writel(host, FIFOTH, fifoth_val);
1046}
1047
1048static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1049{
1050 unsigned int blksz = data->blksz;
1051 u32 blksz_depth, fifo_depth;
1052 u16 thld_size;
1053 u8 enable;
1054
1055 /*
1056 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1057 * in the FIFO region, so we really shouldn't access it).
1058 */
1059 if (host->verid < DW_MMC_240A ||
1060 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1061 return;
1062
1063 /*
1064 * Card write Threshold is introduced since 2.80a
1065 * It's used when HS400 mode is enabled.
1066 */
1067 if (data->flags & MMC_DATA_WRITE &&
1068 !(host->timing != MMC_TIMING_MMC_HS400))
1069 return;
1070
1071 if (data->flags & MMC_DATA_WRITE)
1072 enable = SDMMC_CARD_WR_THR_EN;
1073 else
1074 enable = SDMMC_CARD_RD_THR_EN;
1075
1076 if (host->timing != MMC_TIMING_MMC_HS200 &&
1077 host->timing != MMC_TIMING_UHS_SDR104)
1078 goto disable;
1079
1080 blksz_depth = blksz / (1 << host->data_shift);
1081 fifo_depth = host->fifo_depth;
1082
1083 if (blksz_depth > fifo_depth)
1084 goto disable;
1085
1086 /*
1087 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1088 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1089 * Currently just choose blksz.
1090 */
1091 thld_size = blksz;
1092 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1093 return;
1094
1095disable:
1096 mci_writel(host, CDTHRCTL, 0);
1097}
1098
1099static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1100{
1101 unsigned long irqflags;
1102 int sg_len;
1103 u32 temp;
1104
1105 host->using_dma = 0;
1106
1107 /* If we don't have a channel, we can't do DMA */
1108 if (!host->use_dma)
1109 return -ENODEV;
1110
1111 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1112 if (sg_len < 0) {
1113 host->dma_ops->stop(host);
1114 return sg_len;
1115 }
1116
1117 host->using_dma = 1;
1118
1119 if (host->use_dma == TRANS_MODE_IDMAC)
1120 dev_vdbg(host->dev,
1121 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1122 (unsigned long)host->sg_cpu,
1123 (unsigned long)host->sg_dma,
1124 sg_len);
1125
1126 /*
1127 * Decide the MSIZE and RX/TX Watermark.
1128 * If current block size is same with previous size,
1129 * no need to update fifoth.
1130 */
1131 if (host->prev_blksz != data->blksz)
1132 dw_mci_adjust_fifoth(host, data);
1133
1134 /* Enable the DMA interface */
1135 temp = mci_readl(host, CTRL);
1136 temp |= SDMMC_CTRL_DMA_ENABLE;
1137 mci_writel(host, CTRL, temp);
1138
1139 /* Disable RX/TX IRQs, let DMA handle it */
1140 spin_lock_irqsave(&host->irq_lock, irqflags);
1141 temp = mci_readl(host, INTMASK);
1142 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1143 mci_writel(host, INTMASK, temp);
1144 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1145
1146 if (host->dma_ops->start(host, sg_len)) {
1147 host->dma_ops->stop(host);
1148 /* We can't do DMA, try PIO for this one */
1149 dev_dbg(host->dev,
1150 "%s: fall back to PIO mode for current transfer\n",
1151 __func__);
1152 return -ENODEV;
1153 }
1154
1155 return 0;
1156}
1157
1158static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1159{
1160 unsigned long irqflags;
1161 int flags = SG_MITER_ATOMIC;
1162 u32 temp;
1163
1164 data->error = -EINPROGRESS;
1165
1166 WARN_ON(host->data);
1167 host->sg = NULL;
1168 host->data = data;
1169
1170 if (data->flags & MMC_DATA_READ)
1171 host->dir_status = DW_MCI_RECV_STATUS;
1172 else
1173 host->dir_status = DW_MCI_SEND_STATUS;
1174
1175 dw_mci_ctrl_thld(host, data);
1176
1177 if (dw_mci_submit_data_dma(host, data)) {
1178 if (host->data->flags & MMC_DATA_READ)
1179 flags |= SG_MITER_TO_SG;
1180 else
1181 flags |= SG_MITER_FROM_SG;
1182
1183 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1184 host->sg = data->sg;
1185 host->part_buf_start = 0;
1186 host->part_buf_count = 0;
1187
1188 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1189
1190 spin_lock_irqsave(&host->irq_lock, irqflags);
1191 temp = mci_readl(host, INTMASK);
1192 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1193 mci_writel(host, INTMASK, temp);
1194 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1195
1196 temp = mci_readl(host, CTRL);
1197 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1198 mci_writel(host, CTRL, temp);
1199
1200 /*
1201 * Use the initial fifoth_val for PIO mode. If wm_algined
1202 * is set, we set watermark same as data size.
1203 * If next issued data may be transfered by DMA mode,
1204 * prev_blksz should be invalidated.
1205 */
1206 if (host->wm_aligned)
1207 dw_mci_adjust_fifoth(host, data);
1208 else
1209 mci_writel(host, FIFOTH, host->fifoth_val);
1210 host->prev_blksz = 0;
1211 } else {
1212 /*
1213 * Keep the current block size.
1214 * It will be used to decide whether to update
1215 * fifoth register next time.
1216 */
1217 host->prev_blksz = data->blksz;
1218 }
1219}
1220
1221static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1222{
1223 struct dw_mci *host = slot->host;
1224 unsigned int clock = slot->clock;
1225 u32 div;
1226 u32 clk_en_a;
1227 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1228
1229 /* We must continue to set bit 28 in CMD until the change is complete */
1230 if (host->state == STATE_WAITING_CMD11_DONE)
1231 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1232
1233 if (!clock) {
1234 mci_writel(host, CLKENA, 0);
1235 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1236 } else if (clock != host->current_speed || force_clkinit) {
1237 div = host->bus_hz / clock;
1238 if (host->bus_hz % clock && host->bus_hz > clock)
1239 /*
1240 * move the + 1 after the divide to prevent
1241 * over-clocking the card.
1242 */
1243 div += 1;
1244
1245 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1246
1247 if ((clock != slot->__clk_old &&
1248 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1249 force_clkinit) {
1250 /* Silent the verbose log if calling from PM context */
1251 if (!force_clkinit)
1252 dev_info(&slot->mmc->class_dev,
1253 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1254 slot->id, host->bus_hz, clock,
1255 div ? ((host->bus_hz / div) >> 1) :
1256 host->bus_hz, div);
1257
1258 /*
1259 * If card is polling, display the message only
1260 * one time at boot time.
1261 */
1262 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1263 slot->mmc->f_min == clock)
1264 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1265 }
1266
1267 /* disable clock */
1268 mci_writel(host, CLKENA, 0);
1269 mci_writel(host, CLKSRC, 0);
1270
1271 /* inform CIU */
1272 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1273
1274 /* set clock to desired speed */
1275 mci_writel(host, CLKDIV, div);
1276
1277 /* inform CIU */
1278 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1279
1280 /* enable clock; only low power if no SDIO */
1281 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1282 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1283 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1284 mci_writel(host, CLKENA, clk_en_a);
1285
1286 /* inform CIU */
1287 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1288
1289 /* keep the last clock value that was requested from core */
1290 slot->__clk_old = clock;
1291 }
1292
1293 host->current_speed = clock;
1294
1295 /* Set the current slot bus width */
1296 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1297}
1298
1299static void __dw_mci_start_request(struct dw_mci *host,
1300 struct dw_mci_slot *slot,
1301 struct mmc_command *cmd)
1302{
1303 struct mmc_request *mrq;
1304 struct mmc_data *data;
1305 u32 cmdflags;
1306
1307 mrq = slot->mrq;
1308
1309 host->mrq = mrq;
1310
1311 host->pending_events = 0;
1312 host->completed_events = 0;
1313 host->cmd_status = 0;
1314 host->data_status = 0;
1315 host->dir_status = 0;
1316
1317 data = cmd->data;
1318 if (data) {
1319 mci_writel(host, TMOUT, 0xFFFFFFFF);
1320 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1321 mci_writel(host, BLKSIZ, data->blksz);
1322 }
1323
1324 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1325
1326 /* this is the first command, send the initialization clock */
1327 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1328 cmdflags |= SDMMC_CMD_INIT;
1329
1330 if (data) {
1331 dw_mci_submit_data(host, data);
1332 wmb(); /* drain writebuffer */
1333 }
1334
1335 dw_mci_start_command(host, cmd, cmdflags);
1336
1337 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1338 unsigned long irqflags;
1339
1340 /*
1341 * Databook says to fail after 2ms w/ no response, but evidence
1342 * shows that sometimes the cmd11 interrupt takes over 130ms.
1343 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1344 * is just about to roll over.
1345 *
1346 * We do this whole thing under spinlock and only if the
1347 * command hasn't already completed (indicating the the irq
1348 * already ran so we don't want the timeout).
1349 */
1350 spin_lock_irqsave(&host->irq_lock, irqflags);
1351 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1352 mod_timer(&host->cmd11_timer,
1353 jiffies + msecs_to_jiffies(500) + 1);
1354 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1355 }
1356
1357 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1358}
1359
1360static void dw_mci_start_request(struct dw_mci *host,
1361 struct dw_mci_slot *slot)
1362{
1363 struct mmc_request *mrq = slot->mrq;
1364 struct mmc_command *cmd;
1365
1366 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1367 __dw_mci_start_request(host, slot, cmd);
1368}
1369
1370/* must be called with host->lock held */
1371static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1372 struct mmc_request *mrq)
1373{
1374 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1375 host->state);
1376
1377 slot->mrq = mrq;
1378
1379 if (host->state == STATE_WAITING_CMD11_DONE) {
1380 dev_warn(&slot->mmc->class_dev,
1381 "Voltage change didn't complete\n");
1382 /*
1383 * this case isn't expected to happen, so we can
1384 * either crash here or just try to continue on
1385 * in the closest possible state
1386 */
1387 host->state = STATE_IDLE;
1388 }
1389
1390 if (host->state == STATE_IDLE) {
1391 host->state = STATE_SENDING_CMD;
1392 dw_mci_start_request(host, slot);
1393 } else {
1394 list_add_tail(&slot->queue_node, &host->queue);
1395 }
1396}
1397
1398static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1399{
1400 struct dw_mci_slot *slot = mmc_priv(mmc);
1401 struct dw_mci *host = slot->host;
1402
1403 WARN_ON(slot->mrq);
1404
1405 /*
1406 * The check for card presence and queueing of the request must be
1407 * atomic, otherwise the card could be removed in between and the
1408 * request wouldn't fail until another card was inserted.
1409 */
1410
1411 if (!dw_mci_get_cd(mmc)) {
1412 mrq->cmd->error = -ENOMEDIUM;
1413 mmc_request_done(mmc, mrq);
1414 return;
1415 }
1416
1417 spin_lock_bh(&host->lock);
1418
1419 dw_mci_queue_request(host, slot, mrq);
1420
1421 spin_unlock_bh(&host->lock);
1422}
1423
1424static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1425{
1426 struct dw_mci_slot *slot = mmc_priv(mmc);
1427 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1428 u32 regs;
1429 int ret;
1430
1431 switch (ios->bus_width) {
1432 case MMC_BUS_WIDTH_4:
1433 slot->ctype = SDMMC_CTYPE_4BIT;
1434 break;
1435 case MMC_BUS_WIDTH_8:
1436 slot->ctype = SDMMC_CTYPE_8BIT;
1437 break;
1438 default:
1439 /* set default 1 bit mode */
1440 slot->ctype = SDMMC_CTYPE_1BIT;
1441 }
1442
1443 regs = mci_readl(slot->host, UHS_REG);
1444
1445 /* DDR mode set */
1446 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1447 ios->timing == MMC_TIMING_UHS_DDR50 ||
1448 ios->timing == MMC_TIMING_MMC_HS400)
1449 regs |= ((0x1 << slot->id) << 16);
1450 else
1451 regs &= ~((0x1 << slot->id) << 16);
1452
1453 mci_writel(slot->host, UHS_REG, regs);
1454 slot->host->timing = ios->timing;
1455
1456 /*
1457 * Use mirror of ios->clock to prevent race with mmc
1458 * core ios update when finding the minimum.
1459 */
1460 slot->clock = ios->clock;
1461
1462 if (drv_data && drv_data->set_ios)
1463 drv_data->set_ios(slot->host, ios);
1464
1465 switch (ios->power_mode) {
1466 case MMC_POWER_UP:
1467 if (!IS_ERR(mmc->supply.vmmc)) {
1468 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1469 ios->vdd);
1470 if (ret) {
1471 dev_err(slot->host->dev,
1472 "failed to enable vmmc regulator\n");
1473 /*return, if failed turn on vmmc*/
1474 return;
1475 }
1476 }
1477 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1478 regs = mci_readl(slot->host, PWREN);
1479 regs |= (1 << slot->id);
1480 mci_writel(slot->host, PWREN, regs);
1481 break;
1482 case MMC_POWER_ON:
1483 if (!slot->host->vqmmc_enabled) {
1484 if (!IS_ERR(mmc->supply.vqmmc)) {
1485 ret = regulator_enable(mmc->supply.vqmmc);
1486 if (ret < 0)
1487 dev_err(slot->host->dev,
1488 "failed to enable vqmmc\n");
1489 else
1490 slot->host->vqmmc_enabled = true;
1491
1492 } else {
1493 /* Keep track so we don't reset again */
1494 slot->host->vqmmc_enabled = true;
1495 }
1496
1497 /* Reset our state machine after powering on */
1498 dw_mci_ctrl_reset(slot->host,
1499 SDMMC_CTRL_ALL_RESET_FLAGS);
1500 }
1501
1502 /* Adjust clock / bus width after power is up */
1503 dw_mci_setup_bus(slot, false);
1504
1505 break;
1506 case MMC_POWER_OFF:
1507 /* Turn clock off before power goes down */
1508 dw_mci_setup_bus(slot, false);
1509
1510 if (!IS_ERR(mmc->supply.vmmc))
1511 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1512
1513 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1514 regulator_disable(mmc->supply.vqmmc);
1515 slot->host->vqmmc_enabled = false;
1516
1517 regs = mci_readl(slot->host, PWREN);
1518 regs &= ~(1 << slot->id);
1519 mci_writel(slot->host, PWREN, regs);
1520 break;
1521 default:
1522 break;
1523 }
1524
1525 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1526 slot->host->state = STATE_IDLE;
1527}
1528
1529static int dw_mci_card_busy(struct mmc_host *mmc)
1530{
1531 struct dw_mci_slot *slot = mmc_priv(mmc);
1532 u32 status;
1533
1534 /*
1535 * Check the busy bit which is low when DAT[3:0]
1536 * (the data lines) are 0000
1537 */
1538 status = mci_readl(slot->host, STATUS);
1539
1540 return !!(status & SDMMC_STATUS_BUSY);
1541}
1542
1543static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1544{
1545 struct dw_mci_slot *slot = mmc_priv(mmc);
1546 struct dw_mci *host = slot->host;
1547 const struct dw_mci_drv_data *drv_data = host->drv_data;
1548 u32 uhs;
1549 u32 v18 = SDMMC_UHS_18V << slot->id;
1550 int ret;
1551
1552 if (drv_data && drv_data->switch_voltage)
1553 return drv_data->switch_voltage(mmc, ios);
1554
1555 /*
1556 * Program the voltage. Note that some instances of dw_mmc may use
1557 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1558 * does no harm but you need to set the regulator directly. Try both.
1559 */
1560 uhs = mci_readl(host, UHS_REG);
1561 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1562 uhs &= ~v18;
1563 else
1564 uhs |= v18;
1565
1566 if (!IS_ERR(mmc->supply.vqmmc)) {
1567 ret = mmc_regulator_set_vqmmc(mmc, ios);
1568
1569 if (ret) {
1570 dev_dbg(&mmc->class_dev,
1571 "Regulator set error %d - %s V\n",
1572 ret, uhs & v18 ? "1.8" : "3.3");
1573 return ret;
1574 }
1575 }
1576 mci_writel(host, UHS_REG, uhs);
1577
1578 return 0;
1579}
1580
1581static int dw_mci_get_ro(struct mmc_host *mmc)
1582{
1583 int read_only;
1584 struct dw_mci_slot *slot = mmc_priv(mmc);
1585 int gpio_ro = mmc_gpio_get_ro(mmc);
1586
1587 /* Use platform get_ro function, else try on board write protect */
1588 if (gpio_ro >= 0)
1589 read_only = gpio_ro;
1590 else
1591 read_only =
1592 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1593
1594 dev_dbg(&mmc->class_dev, "card is %s\n",
1595 read_only ? "read-only" : "read-write");
1596
1597 return read_only;
1598}
1599
1600static void dw_mci_hw_reset(struct mmc_host *mmc)
1601{
1602 struct dw_mci_slot *slot = mmc_priv(mmc);
1603 struct dw_mci *host = slot->host;
1604 int reset;
1605
1606 if (host->use_dma == TRANS_MODE_IDMAC)
1607 dw_mci_idmac_reset(host);
1608
1609 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1610 SDMMC_CTRL_FIFO_RESET))
1611 return;
1612
1613 /*
1614 * According to eMMC spec, card reset procedure:
1615 * tRstW >= 1us: RST_n pulse width
1616 * tRSCA >= 200us: RST_n to Command time
1617 * tRSTH >= 1us: RST_n high period
1618 */
1619 reset = mci_readl(host, RST_N);
1620 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1621 mci_writel(host, RST_N, reset);
1622 usleep_range(1, 2);
1623 reset |= SDMMC_RST_HWACTIVE << slot->id;
1624 mci_writel(host, RST_N, reset);
1625 usleep_range(200, 300);
1626}
1627
1628static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1629{
1630 struct dw_mci_slot *slot = mmc_priv(mmc);
1631 struct dw_mci *host = slot->host;
1632
1633 /*
1634 * Low power mode will stop the card clock when idle. According to the
1635 * description of the CLKENA register we should disable low power mode
1636 * for SDIO cards if we need SDIO interrupts to work.
1637 */
1638 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1639 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1640 u32 clk_en_a_old;
1641 u32 clk_en_a;
1642
1643 clk_en_a_old = mci_readl(host, CLKENA);
1644
1645 if (card->type == MMC_TYPE_SDIO ||
1646 card->type == MMC_TYPE_SD_COMBO) {
1647 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1648 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1649 } else {
1650 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1651 clk_en_a = clk_en_a_old | clken_low_pwr;
1652 }
1653
1654 if (clk_en_a != clk_en_a_old) {
1655 mci_writel(host, CLKENA, clk_en_a);
1656 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1657 SDMMC_CMD_PRV_DAT_WAIT, 0);
1658 }
1659 }
1660}
1661
1662static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1663{
1664 struct dw_mci *host = slot->host;
1665 unsigned long irqflags;
1666 u32 int_mask;
1667
1668 spin_lock_irqsave(&host->irq_lock, irqflags);
1669
1670 /* Enable/disable Slot Specific SDIO interrupt */
1671 int_mask = mci_readl(host, INTMASK);
1672 if (enb)
1673 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1674 else
1675 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1676 mci_writel(host, INTMASK, int_mask);
1677
1678 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1679}
1680
1681static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1682{
1683 struct dw_mci_slot *slot = mmc_priv(mmc);
1684 struct dw_mci *host = slot->host;
1685
1686 __dw_mci_enable_sdio_irq(slot, enb);
1687
1688 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1689 if (enb)
1690 pm_runtime_get_noresume(host->dev);
1691 else
1692 pm_runtime_put_noidle(host->dev);
1693}
1694
1695static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1696{
1697 struct dw_mci_slot *slot = mmc_priv(mmc);
1698
1699 __dw_mci_enable_sdio_irq(slot, 1);
1700}
1701
1702static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1703{
1704 struct dw_mci_slot *slot = mmc_priv(mmc);
1705 struct dw_mci *host = slot->host;
1706 const struct dw_mci_drv_data *drv_data = host->drv_data;
1707 int err = -EINVAL;
1708
1709 if (drv_data && drv_data->execute_tuning)
1710 err = drv_data->execute_tuning(slot, opcode);
1711 return err;
1712}
1713
1714static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1715 struct mmc_ios *ios)
1716{
1717 struct dw_mci_slot *slot = mmc_priv(mmc);
1718 struct dw_mci *host = slot->host;
1719 const struct dw_mci_drv_data *drv_data = host->drv_data;
1720
1721 if (drv_data && drv_data->prepare_hs400_tuning)
1722 return drv_data->prepare_hs400_tuning(host, ios);
1723
1724 return 0;
1725}
1726
1727static bool dw_mci_reset(struct dw_mci *host)
1728{
1729 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1730 bool ret = false;
1731 u32 status = 0;
1732
1733 /*
1734 * Resetting generates a block interrupt, hence setting
1735 * the scatter-gather pointer to NULL.
1736 */
1737 if (host->sg) {
1738 sg_miter_stop(&host->sg_miter);
1739 host->sg = NULL;
1740 }
1741
1742 if (host->use_dma)
1743 flags |= SDMMC_CTRL_DMA_RESET;
1744
1745 if (dw_mci_ctrl_reset(host, flags)) {
1746 /*
1747 * In all cases we clear the RAWINTS
1748 * register to clear any interrupts.
1749 */
1750 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1751
1752 if (!host->use_dma) {
1753 ret = true;
1754 goto ciu_out;
1755 }
1756
1757 /* Wait for dma_req to be cleared */
1758 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1759 status,
1760 !(status & SDMMC_STATUS_DMA_REQ),
1761 1, 500 * USEC_PER_MSEC)) {
1762 dev_err(host->dev,
1763 "%s: Timeout waiting for dma_req to be cleared\n",
1764 __func__);
1765 goto ciu_out;
1766 }
1767
1768 /* when using DMA next we reset the fifo again */
1769 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1770 goto ciu_out;
1771 } else {
1772 /* if the controller reset bit did clear, then set clock regs */
1773 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1774 dev_err(host->dev,
1775 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1776 __func__);
1777 goto ciu_out;
1778 }
1779 }
1780
1781 if (host->use_dma == TRANS_MODE_IDMAC)
1782 /* It is also required that we reinit idmac */
1783 dw_mci_idmac_init(host);
1784
1785 ret = true;
1786
1787ciu_out:
1788 /* After a CTRL reset we need to have CIU set clock registers */
1789 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1790
1791 return ret;
1792}
1793
1794static const struct mmc_host_ops dw_mci_ops = {
1795 .request = dw_mci_request,
1796 .pre_req = dw_mci_pre_req,
1797 .post_req = dw_mci_post_req,
1798 .set_ios = dw_mci_set_ios,
1799 .get_ro = dw_mci_get_ro,
1800 .get_cd = dw_mci_get_cd,
1801 .hw_reset = dw_mci_hw_reset,
1802 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1803 .ack_sdio_irq = dw_mci_ack_sdio_irq,
1804 .execute_tuning = dw_mci_execute_tuning,
1805 .card_busy = dw_mci_card_busy,
1806 .start_signal_voltage_switch = dw_mci_switch_voltage,
1807 .init_card = dw_mci_init_card,
1808 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1809};
1810
1811static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1812 __releases(&host->lock)
1813 __acquires(&host->lock)
1814{
1815 struct dw_mci_slot *slot;
1816 struct mmc_host *prev_mmc = host->slot->mmc;
1817
1818 WARN_ON(host->cmd || host->data);
1819
1820 host->slot->mrq = NULL;
1821 host->mrq = NULL;
1822 if (!list_empty(&host->queue)) {
1823 slot = list_entry(host->queue.next,
1824 struct dw_mci_slot, queue_node);
1825 list_del(&slot->queue_node);
1826 dev_vdbg(host->dev, "list not empty: %s is next\n",
1827 mmc_hostname(slot->mmc));
1828 host->state = STATE_SENDING_CMD;
1829 dw_mci_start_request(host, slot);
1830 } else {
1831 dev_vdbg(host->dev, "list empty\n");
1832
1833 if (host->state == STATE_SENDING_CMD11)
1834 host->state = STATE_WAITING_CMD11_DONE;
1835 else
1836 host->state = STATE_IDLE;
1837 }
1838
1839 spin_unlock(&host->lock);
1840 mmc_request_done(prev_mmc, mrq);
1841 spin_lock(&host->lock);
1842}
1843
1844static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1845{
1846 u32 status = host->cmd_status;
1847
1848 host->cmd_status = 0;
1849
1850 /* Read the response from the card (up to 16 bytes) */
1851 if (cmd->flags & MMC_RSP_PRESENT) {
1852 if (cmd->flags & MMC_RSP_136) {
1853 cmd->resp[3] = mci_readl(host, RESP0);
1854 cmd->resp[2] = mci_readl(host, RESP1);
1855 cmd->resp[1] = mci_readl(host, RESP2);
1856 cmd->resp[0] = mci_readl(host, RESP3);
1857 } else {
1858 cmd->resp[0] = mci_readl(host, RESP0);
1859 cmd->resp[1] = 0;
1860 cmd->resp[2] = 0;
1861 cmd->resp[3] = 0;
1862 }
1863 }
1864
1865 if (status & SDMMC_INT_RTO)
1866 cmd->error = -ETIMEDOUT;
1867 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1868 cmd->error = -EILSEQ;
1869 else if (status & SDMMC_INT_RESP_ERR)
1870 cmd->error = -EIO;
1871 else
1872 cmd->error = 0;
1873
1874 return cmd->error;
1875}
1876
1877static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1878{
1879 u32 status = host->data_status;
1880
1881 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1882 if (status & SDMMC_INT_DRTO) {
1883 data->error = -ETIMEDOUT;
1884 } else if (status & SDMMC_INT_DCRC) {
1885 data->error = -EILSEQ;
1886 } else if (status & SDMMC_INT_EBE) {
1887 if (host->dir_status ==
1888 DW_MCI_SEND_STATUS) {
1889 /*
1890 * No data CRC status was returned.
1891 * The number of bytes transferred
1892 * will be exaggerated in PIO mode.
1893 */
1894 data->bytes_xfered = 0;
1895 data->error = -ETIMEDOUT;
1896 } else if (host->dir_status ==
1897 DW_MCI_RECV_STATUS) {
1898 data->error = -EILSEQ;
1899 }
1900 } else {
1901 /* SDMMC_INT_SBE is included */
1902 data->error = -EILSEQ;
1903 }
1904
1905 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1906
1907 /*
1908 * After an error, there may be data lingering
1909 * in the FIFO
1910 */
1911 dw_mci_reset(host);
1912 } else {
1913 data->bytes_xfered = data->blocks * data->blksz;
1914 data->error = 0;
1915 }
1916
1917 return data->error;
1918}
1919
1920static void dw_mci_set_drto(struct dw_mci *host)
1921{
1922 unsigned int drto_clks;
1923 unsigned int drto_div;
1924 unsigned int drto_ms;
1925 unsigned long irqflags;
1926
1927 drto_clks = mci_readl(host, TMOUT) >> 8;
1928 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1929 if (drto_div == 0)
1930 drto_div = 1;
1931
1932 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1933 host->bus_hz);
1934
1935 /* add a bit spare time */
1936 drto_ms += 10;
1937
1938 spin_lock_irqsave(&host->irq_lock, irqflags);
1939 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1940 mod_timer(&host->dto_timer,
1941 jiffies + msecs_to_jiffies(drto_ms));
1942 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1943}
1944
1945static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1946{
1947 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1948 return false;
1949
1950 /*
1951 * Really be certain that the timer has stopped. This is a bit of
1952 * paranoia and could only really happen if we had really bad
1953 * interrupt latency and the interrupt routine and timeout were
1954 * running concurrently so that the del_timer() in the interrupt
1955 * handler couldn't run.
1956 */
1957 WARN_ON(del_timer_sync(&host->cto_timer));
1958 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1959
1960 return true;
1961}
1962
1963static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
1964{
1965 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1966 return false;
1967
1968 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1969 WARN_ON(del_timer_sync(&host->dto_timer));
1970 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1971
1972 return true;
1973}
1974
1975static void dw_mci_tasklet_func(unsigned long priv)
1976{
1977 struct dw_mci *host = (struct dw_mci *)priv;
1978 struct mmc_data *data;
1979 struct mmc_command *cmd;
1980 struct mmc_request *mrq;
1981 enum dw_mci_state state;
1982 enum dw_mci_state prev_state;
1983 unsigned int err;
1984
1985 spin_lock(&host->lock);
1986
1987 state = host->state;
1988 data = host->data;
1989 mrq = host->mrq;
1990
1991 do {
1992 prev_state = state;
1993
1994 switch (state) {
1995 case STATE_IDLE:
1996 case STATE_WAITING_CMD11_DONE:
1997 break;
1998
1999 case STATE_SENDING_CMD11:
2000 case STATE_SENDING_CMD:
2001 if (!dw_mci_clear_pending_cmd_complete(host))
2002 break;
2003
2004 cmd = host->cmd;
2005 host->cmd = NULL;
2006 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2007 err = dw_mci_command_complete(host, cmd);
2008 if (cmd == mrq->sbc && !err) {
2009 __dw_mci_start_request(host, host->slot,
2010 mrq->cmd);
2011 goto unlock;
2012 }
2013
2014 if (cmd->data && err) {
2015 /*
2016 * During UHS tuning sequence, sending the stop
2017 * command after the response CRC error would
2018 * throw the system into a confused state
2019 * causing all future tuning phases to report
2020 * failure.
2021 *
2022 * In such case controller will move into a data
2023 * transfer state after a response error or
2024 * response CRC error. Let's let that finish
2025 * before trying to send a stop, so we'll go to
2026 * STATE_SENDING_DATA.
2027 *
2028 * Although letting the data transfer take place
2029 * will waste a bit of time (we already know
2030 * the command was bad), it can't cause any
2031 * errors since it's possible it would have
2032 * taken place anyway if this tasklet got
2033 * delayed. Allowing the transfer to take place
2034 * avoids races and keeps things simple.
2035 */
2036 if ((err != -ETIMEDOUT) &&
2037 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
2038 state = STATE_SENDING_DATA;
2039 continue;
2040 }
2041
2042 dw_mci_stop_dma(host);
2043 send_stop_abort(host, data);
2044 state = STATE_SENDING_STOP;
2045 break;
2046 }
2047
2048 if (!cmd->data || err) {
2049 dw_mci_request_end(host, mrq);
2050 goto unlock;
2051 }
2052
2053 prev_state = state = STATE_SENDING_DATA;
2054 /* fall through */
2055
2056 case STATE_SENDING_DATA:
2057 /*
2058 * We could get a data error and never a transfer
2059 * complete so we'd better check for it here.
2060 *
2061 * Note that we don't really care if we also got a
2062 * transfer complete; stopping the DMA and sending an
2063 * abort won't hurt.
2064 */
2065 if (test_and_clear_bit(EVENT_DATA_ERROR,
2066 &host->pending_events)) {
2067 dw_mci_stop_dma(host);
2068 if (!(host->data_status & (SDMMC_INT_DRTO |
2069 SDMMC_INT_EBE)))
2070 send_stop_abort(host, data);
2071 state = STATE_DATA_ERROR;
2072 break;
2073 }
2074
2075 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2076 &host->pending_events)) {
2077 /*
2078 * If all data-related interrupts don't come
2079 * within the given time in reading data state.
2080 */
2081 if (host->dir_status == DW_MCI_RECV_STATUS)
2082 dw_mci_set_drto(host);
2083 break;
2084 }
2085
2086 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2087
2088 /*
2089 * Handle an EVENT_DATA_ERROR that might have shown up
2090 * before the transfer completed. This might not have
2091 * been caught by the check above because the interrupt
2092 * could have gone off between the previous check and
2093 * the check for transfer complete.
2094 *
2095 * Technically this ought not be needed assuming we
2096 * get a DATA_COMPLETE eventually (we'll notice the
2097 * error and end the request), but it shouldn't hurt.
2098 *
2099 * This has the advantage of sending the stop command.
2100 */
2101 if (test_and_clear_bit(EVENT_DATA_ERROR,
2102 &host->pending_events)) {
2103 dw_mci_stop_dma(host);
2104 if (!(host->data_status & (SDMMC_INT_DRTO |
2105 SDMMC_INT_EBE)))
2106 send_stop_abort(host, data);
2107 state = STATE_DATA_ERROR;
2108 break;
2109 }
2110 prev_state = state = STATE_DATA_BUSY;
2111
2112 /* fall through */
2113
2114 case STATE_DATA_BUSY:
2115 if (!dw_mci_clear_pending_data_complete(host)) {
2116 /*
2117 * If data error interrupt comes but data over
2118 * interrupt doesn't come within the given time.
2119 * in reading data state.
2120 */
2121 if (host->dir_status == DW_MCI_RECV_STATUS)
2122 dw_mci_set_drto(host);
2123 break;
2124 }
2125
2126 host->data = NULL;
2127 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2128 err = dw_mci_data_complete(host, data);
2129
2130 if (!err) {
2131 if (!data->stop || mrq->sbc) {
2132 if (mrq->sbc && data->stop)
2133 data->stop->error = 0;
2134 dw_mci_request_end(host, mrq);
2135 goto unlock;
2136 }
2137
2138 /* stop command for open-ended transfer*/
2139 if (data->stop)
2140 send_stop_abort(host, data);
2141 } else {
2142 /*
2143 * If we don't have a command complete now we'll
2144 * never get one since we just reset everything;
2145 * better end the request.
2146 *
2147 * If we do have a command complete we'll fall
2148 * through to the SENDING_STOP command and
2149 * everything will be peachy keen.
2150 */
2151 if (!test_bit(EVENT_CMD_COMPLETE,
2152 &host->pending_events)) {
2153 host->cmd = NULL;
2154 dw_mci_request_end(host, mrq);
2155 goto unlock;
2156 }
2157 }
2158
2159 /*
2160 * If err has non-zero,
2161 * stop-abort command has been already issued.
2162 */
2163 prev_state = state = STATE_SENDING_STOP;
2164
2165 /* fall through */
2166
2167 case STATE_SENDING_STOP:
2168 if (!dw_mci_clear_pending_cmd_complete(host))
2169 break;
2170
2171 /* CMD error in data command */
2172 if (mrq->cmd->error && mrq->data)
2173 dw_mci_reset(host);
2174
2175 host->cmd = NULL;
2176 host->data = NULL;
2177
2178 if (!mrq->sbc && mrq->stop)
2179 dw_mci_command_complete(host, mrq->stop);
2180 else
2181 host->cmd_status = 0;
2182
2183 dw_mci_request_end(host, mrq);
2184 goto unlock;
2185
2186 case STATE_DATA_ERROR:
2187 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2188 &host->pending_events))
2189 break;
2190
2191 state = STATE_DATA_BUSY;
2192 break;
2193 }
2194 } while (state != prev_state);
2195
2196 host->state = state;
2197unlock:
2198 spin_unlock(&host->lock);
2199
2200}
2201
2202/* push final bytes to part_buf, only use during push */
2203static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2204{
2205 memcpy((void *)&host->part_buf, buf, cnt);
2206 host->part_buf_count = cnt;
2207}
2208
2209/* append bytes to part_buf, only use during push */
2210static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2211{
2212 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2213 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2214 host->part_buf_count += cnt;
2215 return cnt;
2216}
2217
2218/* pull first bytes from part_buf, only use during pull */
2219static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2220{
2221 cnt = min_t(int, cnt, host->part_buf_count);
2222 if (cnt) {
2223 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2224 cnt);
2225 host->part_buf_count -= cnt;
2226 host->part_buf_start += cnt;
2227 }
2228 return cnt;
2229}
2230
2231/* pull final bytes from the part_buf, assuming it's just been filled */
2232static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2233{
2234 memcpy(buf, &host->part_buf, cnt);
2235 host->part_buf_start = cnt;
2236 host->part_buf_count = (1 << host->data_shift) - cnt;
2237}
2238
2239static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2240{
2241 struct mmc_data *data = host->data;
2242 int init_cnt = cnt;
2243
2244 /* try and push anything in the part_buf */
2245 if (unlikely(host->part_buf_count)) {
2246 int len = dw_mci_push_part_bytes(host, buf, cnt);
2247
2248 buf += len;
2249 cnt -= len;
2250 if (host->part_buf_count == 2) {
2251 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2252 host->part_buf_count = 0;
2253 }
2254 }
2255#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2256 if (unlikely((unsigned long)buf & 0x1)) {
2257 while (cnt >= 2) {
2258 u16 aligned_buf[64];
2259 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2260 int items = len >> 1;
2261 int i;
2262 /* memcpy from input buffer into aligned buffer */
2263 memcpy(aligned_buf, buf, len);
2264 buf += len;
2265 cnt -= len;
2266 /* push data from aligned buffer into fifo */
2267 for (i = 0; i < items; ++i)
2268 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2269 }
2270 } else
2271#endif
2272 {
2273 u16 *pdata = buf;
2274
2275 for (; cnt >= 2; cnt -= 2)
2276 mci_fifo_writew(host->fifo_reg, *pdata++);
2277 buf = pdata;
2278 }
2279 /* put anything remaining in the part_buf */
2280 if (cnt) {
2281 dw_mci_set_part_bytes(host, buf, cnt);
2282 /* Push data if we have reached the expected data length */
2283 if ((data->bytes_xfered + init_cnt) ==
2284 (data->blksz * data->blocks))
2285 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2286 }
2287}
2288
2289static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2290{
2291#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2292 if (unlikely((unsigned long)buf & 0x1)) {
2293 while (cnt >= 2) {
2294 /* pull data from fifo into aligned buffer */
2295 u16 aligned_buf[64];
2296 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2297 int items = len >> 1;
2298 int i;
2299
2300 for (i = 0; i < items; ++i)
2301 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2302 /* memcpy from aligned buffer into output buffer */
2303 memcpy(buf, aligned_buf, len);
2304 buf += len;
2305 cnt -= len;
2306 }
2307 } else
2308#endif
2309 {
2310 u16 *pdata = buf;
2311
2312 for (; cnt >= 2; cnt -= 2)
2313 *pdata++ = mci_fifo_readw(host->fifo_reg);
2314 buf = pdata;
2315 }
2316 if (cnt) {
2317 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2318 dw_mci_pull_final_bytes(host, buf, cnt);
2319 }
2320}
2321
2322static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2323{
2324 struct mmc_data *data = host->data;
2325 int init_cnt = cnt;
2326
2327 /* try and push anything in the part_buf */
2328 if (unlikely(host->part_buf_count)) {
2329 int len = dw_mci_push_part_bytes(host, buf, cnt);
2330
2331 buf += len;
2332 cnt -= len;
2333 if (host->part_buf_count == 4) {
2334 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2335 host->part_buf_count = 0;
2336 }
2337 }
2338#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2339 if (unlikely((unsigned long)buf & 0x3)) {
2340 while (cnt >= 4) {
2341 u32 aligned_buf[32];
2342 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2343 int items = len >> 2;
2344 int i;
2345 /* memcpy from input buffer into aligned buffer */
2346 memcpy(aligned_buf, buf, len);
2347 buf += len;
2348 cnt -= len;
2349 /* push data from aligned buffer into fifo */
2350 for (i = 0; i < items; ++i)
2351 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2352 }
2353 } else
2354#endif
2355 {
2356 u32 *pdata = buf;
2357
2358 for (; cnt >= 4; cnt -= 4)
2359 mci_fifo_writel(host->fifo_reg, *pdata++);
2360 buf = pdata;
2361 }
2362 /* put anything remaining in the part_buf */
2363 if (cnt) {
2364 dw_mci_set_part_bytes(host, buf, cnt);
2365 /* Push data if we have reached the expected data length */
2366 if ((data->bytes_xfered + init_cnt) ==
2367 (data->blksz * data->blocks))
2368 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2369 }
2370}
2371
2372static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2373{
2374#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2375 if (unlikely((unsigned long)buf & 0x3)) {
2376 while (cnt >= 4) {
2377 /* pull data from fifo into aligned buffer */
2378 u32 aligned_buf[32];
2379 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2380 int items = len >> 2;
2381 int i;
2382
2383 for (i = 0; i < items; ++i)
2384 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2385 /* memcpy from aligned buffer into output buffer */
2386 memcpy(buf, aligned_buf, len);
2387 buf += len;
2388 cnt -= len;
2389 }
2390 } else
2391#endif
2392 {
2393 u32 *pdata = buf;
2394
2395 for (; cnt >= 4; cnt -= 4)
2396 *pdata++ = mci_fifo_readl(host->fifo_reg);
2397 buf = pdata;
2398 }
2399 if (cnt) {
2400 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2401 dw_mci_pull_final_bytes(host, buf, cnt);
2402 }
2403}
2404
2405static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2406{
2407 struct mmc_data *data = host->data;
2408 int init_cnt = cnt;
2409
2410 /* try and push anything in the part_buf */
2411 if (unlikely(host->part_buf_count)) {
2412 int len = dw_mci_push_part_bytes(host, buf, cnt);
2413
2414 buf += len;
2415 cnt -= len;
2416
2417 if (host->part_buf_count == 8) {
2418 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2419 host->part_buf_count = 0;
2420 }
2421 }
2422#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2423 if (unlikely((unsigned long)buf & 0x7)) {
2424 while (cnt >= 8) {
2425 u64 aligned_buf[16];
2426 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2427 int items = len >> 3;
2428 int i;
2429 /* memcpy from input buffer into aligned buffer */
2430 memcpy(aligned_buf, buf, len);
2431 buf += len;
2432 cnt -= len;
2433 /* push data from aligned buffer into fifo */
2434 for (i = 0; i < items; ++i)
2435 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2436 }
2437 } else
2438#endif
2439 {
2440 u64 *pdata = buf;
2441
2442 for (; cnt >= 8; cnt -= 8)
2443 mci_fifo_writeq(host->fifo_reg, *pdata++);
2444 buf = pdata;
2445 }
2446 /* put anything remaining in the part_buf */
2447 if (cnt) {
2448 dw_mci_set_part_bytes(host, buf, cnt);
2449 /* Push data if we have reached the expected data length */
2450 if ((data->bytes_xfered + init_cnt) ==
2451 (data->blksz * data->blocks))
2452 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2453 }
2454}
2455
2456static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2457{
2458#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2459 if (unlikely((unsigned long)buf & 0x7)) {
2460 while (cnt >= 8) {
2461 /* pull data from fifo into aligned buffer */
2462 u64 aligned_buf[16];
2463 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2464 int items = len >> 3;
2465 int i;
2466
2467 for (i = 0; i < items; ++i)
2468 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2469
2470 /* memcpy from aligned buffer into output buffer */
2471 memcpy(buf, aligned_buf, len);
2472 buf += len;
2473 cnt -= len;
2474 }
2475 } else
2476#endif
2477 {
2478 u64 *pdata = buf;
2479
2480 for (; cnt >= 8; cnt -= 8)
2481 *pdata++ = mci_fifo_readq(host->fifo_reg);
2482 buf = pdata;
2483 }
2484 if (cnt) {
2485 host->part_buf = mci_fifo_readq(host->fifo_reg);
2486 dw_mci_pull_final_bytes(host, buf, cnt);
2487 }
2488}
2489
2490static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2491{
2492 int len;
2493
2494 /* get remaining partial bytes */
2495 len = dw_mci_pull_part_bytes(host, buf, cnt);
2496 if (unlikely(len == cnt))
2497 return;
2498 buf += len;
2499 cnt -= len;
2500
2501 /* get the rest of the data */
2502 host->pull_data(host, buf, cnt);
2503}
2504
2505static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2506{
2507 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2508 void *buf;
2509 unsigned int offset;
2510 struct mmc_data *data = host->data;
2511 int shift = host->data_shift;
2512 u32 status;
2513 unsigned int len;
2514 unsigned int remain, fcnt;
2515
2516 do {
2517 if (!sg_miter_next(sg_miter))
2518 goto done;
2519
2520 host->sg = sg_miter->piter.sg;
2521 buf = sg_miter->addr;
2522 remain = sg_miter->length;
2523 offset = 0;
2524
2525 do {
2526 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2527 << shift) + host->part_buf_count;
2528 len = min(remain, fcnt);
2529 if (!len)
2530 break;
2531 dw_mci_pull_data(host, (void *)(buf + offset), len);
2532 data->bytes_xfered += len;
2533 offset += len;
2534 remain -= len;
2535 } while (remain);
2536
2537 sg_miter->consumed = offset;
2538 status = mci_readl(host, MINTSTS);
2539 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2540 /* if the RXDR is ready read again */
2541 } while ((status & SDMMC_INT_RXDR) ||
2542 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2543
2544 if (!remain) {
2545 if (!sg_miter_next(sg_miter))
2546 goto done;
2547 sg_miter->consumed = 0;
2548 }
2549 sg_miter_stop(sg_miter);
2550 return;
2551
2552done:
2553 sg_miter_stop(sg_miter);
2554 host->sg = NULL;
2555 smp_wmb(); /* drain writebuffer */
2556 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2557}
2558
2559static void dw_mci_write_data_pio(struct dw_mci *host)
2560{
2561 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2562 void *buf;
2563 unsigned int offset;
2564 struct mmc_data *data = host->data;
2565 int shift = host->data_shift;
2566 u32 status;
2567 unsigned int len;
2568 unsigned int fifo_depth = host->fifo_depth;
2569 unsigned int remain, fcnt;
2570
2571 do {
2572 if (!sg_miter_next(sg_miter))
2573 goto done;
2574
2575 host->sg = sg_miter->piter.sg;
2576 buf = sg_miter->addr;
2577 remain = sg_miter->length;
2578 offset = 0;
2579
2580 do {
2581 fcnt = ((fifo_depth -
2582 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2583 << shift) - host->part_buf_count;
2584 len = min(remain, fcnt);
2585 if (!len)
2586 break;
2587 host->push_data(host, (void *)(buf + offset), len);
2588 data->bytes_xfered += len;
2589 offset += len;
2590 remain -= len;
2591 } while (remain);
2592
2593 sg_miter->consumed = offset;
2594 status = mci_readl(host, MINTSTS);
2595 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2596 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2597
2598 if (!remain) {
2599 if (!sg_miter_next(sg_miter))
2600 goto done;
2601 sg_miter->consumed = 0;
2602 }
2603 sg_miter_stop(sg_miter);
2604 return;
2605
2606done:
2607 sg_miter_stop(sg_miter);
2608 host->sg = NULL;
2609 smp_wmb(); /* drain writebuffer */
2610 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2611}
2612
2613static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2614{
2615 del_timer(&host->cto_timer);
2616
2617 if (!host->cmd_status)
2618 host->cmd_status = status;
2619
2620 smp_wmb(); /* drain writebuffer */
2621
2622 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2623 tasklet_schedule(&host->tasklet);
2624}
2625
2626static void dw_mci_handle_cd(struct dw_mci *host)
2627{
2628 struct dw_mci_slot *slot = host->slot;
2629
2630 if (slot->mmc->ops->card_event)
2631 slot->mmc->ops->card_event(slot->mmc);
2632 mmc_detect_change(slot->mmc,
2633 msecs_to_jiffies(host->pdata->detect_delay_ms));
2634}
2635
2636static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2637{
2638 struct dw_mci *host = dev_id;
2639 u32 pending;
2640 struct dw_mci_slot *slot = host->slot;
2641 unsigned long irqflags;
2642
2643 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2644
2645 if (pending) {
2646 /* Check volt switch first, since it can look like an error */
2647 if ((host->state == STATE_SENDING_CMD11) &&
2648 (pending & SDMMC_INT_VOLT_SWITCH)) {
2649 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2650 pending &= ~SDMMC_INT_VOLT_SWITCH;
2651
2652 /*
2653 * Hold the lock; we know cmd11_timer can't be kicked
2654 * off after the lock is released, so safe to delete.
2655 */
2656 spin_lock_irqsave(&host->irq_lock, irqflags);
2657 dw_mci_cmd_interrupt(host, pending);
2658 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2659
2660 del_timer(&host->cmd11_timer);
2661 }
2662
2663 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2664 spin_lock_irqsave(&host->irq_lock, irqflags);
2665
2666 del_timer(&host->cto_timer);
2667 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2668 host->cmd_status = pending;
2669 smp_wmb(); /* drain writebuffer */
2670 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2671
2672 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2673 }
2674
2675 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2676 /* if there is an error report DATA_ERROR */
2677 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2678 host->data_status = pending;
2679 smp_wmb(); /* drain writebuffer */
2680 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2681 tasklet_schedule(&host->tasklet);
2682 }
2683
2684 if (pending & SDMMC_INT_DATA_OVER) {
2685 spin_lock_irqsave(&host->irq_lock, irqflags);
2686
2687 del_timer(&host->dto_timer);
2688
2689 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2690 if (!host->data_status)
2691 host->data_status = pending;
2692 smp_wmb(); /* drain writebuffer */
2693 if (host->dir_status == DW_MCI_RECV_STATUS) {
2694 if (host->sg != NULL)
2695 dw_mci_read_data_pio(host, true);
2696 }
2697 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2698 tasklet_schedule(&host->tasklet);
2699
2700 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2701 }
2702
2703 if (pending & SDMMC_INT_RXDR) {
2704 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2705 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2706 dw_mci_read_data_pio(host, false);
2707 }
2708
2709 if (pending & SDMMC_INT_TXDR) {
2710 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2711 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2712 dw_mci_write_data_pio(host);
2713 }
2714
2715 if (pending & SDMMC_INT_CMD_DONE) {
2716 spin_lock_irqsave(&host->irq_lock, irqflags);
2717
2718 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2719 dw_mci_cmd_interrupt(host, pending);
2720
2721 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2722 }
2723
2724 if (pending & SDMMC_INT_CD) {
2725 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2726 dw_mci_handle_cd(host);
2727 }
2728
2729 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2730 mci_writel(host, RINTSTS,
2731 SDMMC_INT_SDIO(slot->sdio_id));
2732 __dw_mci_enable_sdio_irq(slot, 0);
2733 sdio_signal_irq(slot->mmc);
2734 }
2735
2736 }
2737
2738 if (host->use_dma != TRANS_MODE_IDMAC)
2739 return IRQ_HANDLED;
2740
2741 /* Handle IDMA interrupts */
2742 if (host->dma_64bit_address == 1) {
2743 pending = mci_readl(host, IDSTS64);
2744 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2745 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2746 SDMMC_IDMAC_INT_RI);
2747 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2748 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2749 host->dma_ops->complete((void *)host);
2750 }
2751 } else {
2752 pending = mci_readl(host, IDSTS);
2753 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2754 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2755 SDMMC_IDMAC_INT_RI);
2756 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2757 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2758 host->dma_ops->complete((void *)host);
2759 }
2760 }
2761
2762 return IRQ_HANDLED;
2763}
2764
2765static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2766{
2767 struct dw_mci *host = slot->host;
2768 const struct dw_mci_drv_data *drv_data = host->drv_data;
2769 struct mmc_host *mmc = slot->mmc;
2770 int ctrl_id;
2771
2772 if (host->pdata->caps)
2773 mmc->caps = host->pdata->caps;
2774
2775 /*
2776 * Support MMC_CAP_ERASE by default.
2777 * It needs to use trim/discard/erase commands.
2778 */
2779 mmc->caps |= MMC_CAP_ERASE;
2780
2781 if (host->pdata->pm_caps)
2782 mmc->pm_caps = host->pdata->pm_caps;
2783
2784 if (host->dev->of_node) {
2785 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2786 if (ctrl_id < 0)
2787 ctrl_id = 0;
2788 } else {
2789 ctrl_id = to_platform_device(host->dev)->id;
2790 }
2791
2792 if (drv_data && drv_data->caps) {
2793 if (ctrl_id >= drv_data->num_caps) {
2794 dev_err(host->dev, "invalid controller id %d\n",
2795 ctrl_id);
2796 return -EINVAL;
2797 }
2798 mmc->caps |= drv_data->caps[ctrl_id];
2799 }
2800
2801 if (host->pdata->caps2)
2802 mmc->caps2 = host->pdata->caps2;
2803
2804 mmc->f_min = DW_MCI_FREQ_MIN;
2805 if (!mmc->f_max)
2806 mmc->f_max = DW_MCI_FREQ_MAX;
2807
2808 /* Process SDIO IRQs through the sdio_irq_work. */
2809 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2810 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2811
2812 return 0;
2813}
2814
2815static int dw_mci_init_slot(struct dw_mci *host)
2816{
2817 struct mmc_host *mmc;
2818 struct dw_mci_slot *slot;
2819 int ret;
2820
2821 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2822 if (!mmc)
2823 return -ENOMEM;
2824
2825 slot = mmc_priv(mmc);
2826 slot->id = 0;
2827 slot->sdio_id = host->sdio_id0 + slot->id;
2828 slot->mmc = mmc;
2829 slot->host = host;
2830 host->slot = slot;
2831
2832 mmc->ops = &dw_mci_ops;
2833
2834 /*if there are external regulators, get them*/
2835 ret = mmc_regulator_get_supply(mmc);
2836 if (ret)
2837 goto err_host_allocated;
2838
2839 if (!mmc->ocr_avail)
2840 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2841
2842 ret = mmc_of_parse(mmc);
2843 if (ret)
2844 goto err_host_allocated;
2845
2846 ret = dw_mci_init_slot_caps(slot);
2847 if (ret)
2848 goto err_host_allocated;
2849
2850 /* Useful defaults if platform data is unset. */
2851 if (host->use_dma == TRANS_MODE_IDMAC) {
2852 mmc->max_segs = host->ring_size;
2853 mmc->max_blk_size = 65535;
2854 mmc->max_seg_size = 0x1000;
2855 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2856 mmc->max_blk_count = mmc->max_req_size / 512;
2857 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2858 mmc->max_segs = 64;
2859 mmc->max_blk_size = 65535;
2860 mmc->max_blk_count = 65535;
2861 mmc->max_req_size =
2862 mmc->max_blk_size * mmc->max_blk_count;
2863 mmc->max_seg_size = mmc->max_req_size;
2864 } else {
2865 /* TRANS_MODE_PIO */
2866 mmc->max_segs = 64;
2867 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2868 mmc->max_blk_count = 512;
2869 mmc->max_req_size = mmc->max_blk_size *
2870 mmc->max_blk_count;
2871 mmc->max_seg_size = mmc->max_req_size;
2872 }
2873
2874 dw_mci_get_cd(mmc);
2875
2876 ret = mmc_add_host(mmc);
2877 if (ret)
2878 goto err_host_allocated;
2879
2880#if defined(CONFIG_DEBUG_FS)
2881 dw_mci_init_debugfs(slot);
2882#endif
2883
2884 return 0;
2885
2886err_host_allocated:
2887 mmc_free_host(mmc);
2888 return ret;
2889}
2890
2891static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2892{
2893 /* Debugfs stuff is cleaned up by mmc core */
2894 mmc_remove_host(slot->mmc);
2895 slot->host->slot = NULL;
2896 mmc_free_host(slot->mmc);
2897}
2898
2899static void dw_mci_init_dma(struct dw_mci *host)
2900{
2901 int addr_config;
2902 struct device *dev = host->dev;
2903
2904 /*
2905 * Check tansfer mode from HCON[17:16]
2906 * Clear the ambiguous description of dw_mmc databook:
2907 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2908 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2909 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2910 * 2b'11: Non DW DMA Interface -> pio only
2911 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2912 * simpler request/acknowledge handshake mechanism and both of them
2913 * are regarded as external dma master for dw_mmc.
2914 */
2915 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2916 if (host->use_dma == DMA_INTERFACE_IDMA) {
2917 host->use_dma = TRANS_MODE_IDMAC;
2918 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2919 host->use_dma == DMA_INTERFACE_GDMA) {
2920 host->use_dma = TRANS_MODE_EDMAC;
2921 } else {
2922 goto no_dma;
2923 }
2924
2925 /* Determine which DMA interface to use */
2926 if (host->use_dma == TRANS_MODE_IDMAC) {
2927 /*
2928 * Check ADDR_CONFIG bit in HCON to find
2929 * IDMAC address bus width
2930 */
2931 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2932
2933 if (addr_config == 1) {
2934 /* host supports IDMAC in 64-bit address mode */
2935 host->dma_64bit_address = 1;
2936 dev_info(host->dev,
2937 "IDMAC supports 64-bit address mode.\n");
2938 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2939 dma_set_coherent_mask(host->dev,
2940 DMA_BIT_MASK(64));
2941 } else {
2942 /* host supports IDMAC in 32-bit address mode */
2943 host->dma_64bit_address = 0;
2944 dev_info(host->dev,
2945 "IDMAC supports 32-bit address mode.\n");
2946 }
2947
2948 /* Alloc memory for sg translation */
2949 host->sg_cpu = dmam_alloc_coherent(host->dev,
2950 DESC_RING_BUF_SZ,
2951 &host->sg_dma, GFP_KERNEL);
2952 if (!host->sg_cpu) {
2953 dev_err(host->dev,
2954 "%s: could not alloc DMA memory\n",
2955 __func__);
2956 goto no_dma;
2957 }
2958
2959 host->dma_ops = &dw_mci_idmac_ops;
2960 dev_info(host->dev, "Using internal DMA controller.\n");
2961 } else {
2962 /* TRANS_MODE_EDMAC: check dma bindings again */
2963 if ((device_property_read_string_array(dev, "dma-names",
2964 NULL, 0) < 0) ||
2965 !device_property_present(dev, "dmas")) {
2966 goto no_dma;
2967 }
2968 host->dma_ops = &dw_mci_edmac_ops;
2969 dev_info(host->dev, "Using external DMA controller.\n");
2970 }
2971
2972 if (host->dma_ops->init && host->dma_ops->start &&
2973 host->dma_ops->stop && host->dma_ops->cleanup) {
2974 if (host->dma_ops->init(host)) {
2975 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2976 __func__);
2977 goto no_dma;
2978 }
2979 } else {
2980 dev_err(host->dev, "DMA initialization not found.\n");
2981 goto no_dma;
2982 }
2983
2984 return;
2985
2986no_dma:
2987 dev_info(host->dev, "Using PIO mode.\n");
2988 host->use_dma = TRANS_MODE_PIO;
2989}
2990
2991static void dw_mci_cmd11_timer(struct timer_list *t)
2992{
2993 struct dw_mci *host = from_timer(host, t, cmd11_timer);
2994
2995 if (host->state != STATE_SENDING_CMD11) {
2996 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2997 return;
2998 }
2999
3000 host->cmd_status = SDMMC_INT_RTO;
3001 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3002 tasklet_schedule(&host->tasklet);
3003}
3004
3005static void dw_mci_cto_timer(struct timer_list *t)
3006{
3007 struct dw_mci *host = from_timer(host, t, cto_timer);
3008 unsigned long irqflags;
3009 u32 pending;
3010
3011 spin_lock_irqsave(&host->irq_lock, irqflags);
3012
3013 /*
3014 * If somehow we have very bad interrupt latency it's remotely possible
3015 * that the timer could fire while the interrupt is still pending or
3016 * while the interrupt is midway through running. Let's be paranoid
3017 * and detect those two cases. Note that this is paranoia is somewhat
3018 * justified because in this function we don't actually cancel the
3019 * pending command in the controller--we just assume it will never come.
3020 */
3021 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3022 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3023 /* The interrupt should fire; no need to act but we can warn */
3024 dev_warn(host->dev, "Unexpected interrupt latency\n");
3025 goto exit;
3026 }
3027 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3028 /* Presumably interrupt handler couldn't delete the timer */
3029 dev_warn(host->dev, "CTO timeout when already completed\n");
3030 goto exit;
3031 }
3032
3033 /*
3034 * Continued paranoia to make sure we're in the state we expect.
3035 * This paranoia isn't really justified but it seems good to be safe.
3036 */
3037 switch (host->state) {
3038 case STATE_SENDING_CMD11:
3039 case STATE_SENDING_CMD:
3040 case STATE_SENDING_STOP:
3041 /*
3042 * If CMD_DONE interrupt does NOT come in sending command
3043 * state, we should notify the driver to terminate current
3044 * transfer and report a command timeout to the core.
3045 */
3046 host->cmd_status = SDMMC_INT_RTO;
3047 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3048 tasklet_schedule(&host->tasklet);
3049 break;
3050 default:
3051 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3052 host->state);
3053 break;
3054 }
3055
3056exit:
3057 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3058}
3059
3060static void dw_mci_dto_timer(struct timer_list *t)
3061{
3062 struct dw_mci *host = from_timer(host, t, dto_timer);
3063 unsigned long irqflags;
3064 u32 pending;
3065
3066 spin_lock_irqsave(&host->irq_lock, irqflags);
3067
3068 /*
3069 * The DTO timer is much longer than the CTO timer, so it's even less
3070 * likely that we'll these cases, but it pays to be paranoid.
3071 */
3072 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3073 if (pending & SDMMC_INT_DATA_OVER) {
3074 /* The interrupt should fire; no need to act but we can warn */
3075 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3076 goto exit;
3077 }
3078 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3079 /* Presumably interrupt handler couldn't delete the timer */
3080 dev_warn(host->dev, "DTO timeout when already completed\n");
3081 goto exit;
3082 }
3083
3084 /*
3085 * Continued paranoia to make sure we're in the state we expect.
3086 * This paranoia isn't really justified but it seems good to be safe.
3087 */
3088 switch (host->state) {
3089 case STATE_SENDING_DATA:
3090 case STATE_DATA_BUSY:
3091 /*
3092 * If DTO interrupt does NOT come in sending data state,
3093 * we should notify the driver to terminate current transfer
3094 * and report a data timeout to the core.
3095 */
3096 host->data_status = SDMMC_INT_DRTO;
3097 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3098 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3099 tasklet_schedule(&host->tasklet);
3100 break;
3101 default:
3102 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3103 host->state);
3104 break;
3105 }
3106
3107exit:
3108 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3109}
3110
3111#ifdef CONFIG_OF
3112static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3113{
3114 struct dw_mci_board *pdata;
3115 struct device *dev = host->dev;
3116 const struct dw_mci_drv_data *drv_data = host->drv_data;
3117 int ret;
3118 u32 clock_frequency;
3119
3120 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3121 if (!pdata)
3122 return ERR_PTR(-ENOMEM);
3123
3124 /* find reset controller when exist */
3125 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3126 if (IS_ERR(pdata->rstc)) {
3127 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
3128 return ERR_PTR(-EPROBE_DEFER);
3129 }
3130
3131 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3132 dev_info(dev,
3133 "fifo-depth property not found, using value of FIFOTH register as default\n");
3134
3135 device_property_read_u32(dev, "card-detect-delay",
3136 &pdata->detect_delay_ms);
3137
3138 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3139
3140 if (device_property_present(dev, "fifo-watermark-aligned"))
3141 host->wm_aligned = true;
3142
3143 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3144 pdata->bus_hz = clock_frequency;
3145
3146 if (drv_data && drv_data->parse_dt) {
3147 ret = drv_data->parse_dt(host);
3148 if (ret)
3149 return ERR_PTR(ret);
3150 }
3151
3152 return pdata;
3153}
3154
3155#else /* CONFIG_OF */
3156static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3157{
3158 return ERR_PTR(-EINVAL);
3159}
3160#endif /* CONFIG_OF */
3161
3162static void dw_mci_enable_cd(struct dw_mci *host)
3163{
3164 unsigned long irqflags;
3165 u32 temp;
3166
3167 /*
3168 * No need for CD if all slots have a non-error GPIO
3169 * as well as broken card detection is found.
3170 */
3171 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3172 return;
3173
3174 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3175 spin_lock_irqsave(&host->irq_lock, irqflags);
3176 temp = mci_readl(host, INTMASK);
3177 temp |= SDMMC_INT_CD;
3178 mci_writel(host, INTMASK, temp);
3179 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3180 }
3181}
3182
3183int dw_mci_probe(struct dw_mci *host)
3184{
3185 const struct dw_mci_drv_data *drv_data = host->drv_data;
3186 int width, i, ret = 0;
3187 u32 fifo_size;
3188
3189 if (!host->pdata) {
3190 host->pdata = dw_mci_parse_dt(host);
3191 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3192 return -EPROBE_DEFER;
3193 } else if (IS_ERR(host->pdata)) {
3194 dev_err(host->dev, "platform data not available\n");
3195 return -EINVAL;
3196 }
3197 }
3198
3199 host->biu_clk = devm_clk_get(host->dev, "biu");
3200 if (IS_ERR(host->biu_clk)) {
3201 dev_dbg(host->dev, "biu clock not available\n");
3202 } else {
3203 ret = clk_prepare_enable(host->biu_clk);
3204 if (ret) {
3205 dev_err(host->dev, "failed to enable biu clock\n");
3206 return ret;
3207 }
3208 }
3209
3210 host->ciu_clk = devm_clk_get(host->dev, "ciu");
3211 if (IS_ERR(host->ciu_clk)) {
3212 dev_dbg(host->dev, "ciu clock not available\n");
3213 host->bus_hz = host->pdata->bus_hz;
3214 } else {
3215 ret = clk_prepare_enable(host->ciu_clk);
3216 if (ret) {
3217 dev_err(host->dev, "failed to enable ciu clock\n");
3218 goto err_clk_biu;
3219 }
3220
3221 if (host->pdata->bus_hz) {
3222 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3223 if (ret)
3224 dev_warn(host->dev,
3225 "Unable to set bus rate to %uHz\n",
3226 host->pdata->bus_hz);
3227 }
3228 host->bus_hz = clk_get_rate(host->ciu_clk);
3229 }
3230
3231 if (!host->bus_hz) {
3232 dev_err(host->dev,
3233 "Platform data must supply bus speed\n");
3234 ret = -ENODEV;
3235 goto err_clk_ciu;
3236 }
3237
3238 if (!IS_ERR(host->pdata->rstc)) {
3239 reset_control_assert(host->pdata->rstc);
3240 usleep_range(10, 50);
3241 reset_control_deassert(host->pdata->rstc);
3242 }
3243
3244 if (drv_data && drv_data->init) {
3245 ret = drv_data->init(host);
3246 if (ret) {
3247 dev_err(host->dev,
3248 "implementation specific init failed\n");
3249 goto err_clk_ciu;
3250 }
3251 }
3252
3253 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3254 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3255 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3256
3257 spin_lock_init(&host->lock);
3258 spin_lock_init(&host->irq_lock);
3259 INIT_LIST_HEAD(&host->queue);
3260
3261 /*
3262 * Get the host data width - this assumes that HCON has been set with
3263 * the correct values.
3264 */
3265 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3266 if (!i) {
3267 host->push_data = dw_mci_push_data16;
3268 host->pull_data = dw_mci_pull_data16;
3269 width = 16;
3270 host->data_shift = 1;
3271 } else if (i == 2) {
3272 host->push_data = dw_mci_push_data64;
3273 host->pull_data = dw_mci_pull_data64;
3274 width = 64;
3275 host->data_shift = 3;
3276 } else {
3277 /* Check for a reserved value, and warn if it is */
3278 WARN((i != 1),
3279 "HCON reports a reserved host data width!\n"
3280 "Defaulting to 32-bit access.\n");
3281 host->push_data = dw_mci_push_data32;
3282 host->pull_data = dw_mci_pull_data32;
3283 width = 32;
3284 host->data_shift = 2;
3285 }
3286
3287 /* Reset all blocks */
3288 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3289 ret = -ENODEV;
3290 goto err_clk_ciu;
3291 }
3292
3293 host->dma_ops = host->pdata->dma_ops;
3294 dw_mci_init_dma(host);
3295
3296 /* Clear the interrupts for the host controller */
3297 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3298 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3299
3300 /* Put in max timeout */
3301 mci_writel(host, TMOUT, 0xFFFFFFFF);
3302
3303 /*
3304 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3305 * Tx Mark = fifo_size / 2 DMA Size = 8
3306 */
3307 if (!host->pdata->fifo_depth) {
3308 /*
3309 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3310 * have been overwritten by the bootloader, just like we're
3311 * about to do, so if you know the value for your hardware, you
3312 * should put it in the platform data.
3313 */
3314 fifo_size = mci_readl(host, FIFOTH);
3315 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3316 } else {
3317 fifo_size = host->pdata->fifo_depth;
3318 }
3319 host->fifo_depth = fifo_size;
3320 host->fifoth_val =
3321 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3322 mci_writel(host, FIFOTH, host->fifoth_val);
3323
3324 /* disable clock to CIU */
3325 mci_writel(host, CLKENA, 0);
3326 mci_writel(host, CLKSRC, 0);
3327
3328 /*
3329 * In 2.40a spec, Data offset is changed.
3330 * Need to check the version-id and set data-offset for DATA register.
3331 */
3332 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3333 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3334
3335 if (host->data_addr_override)
3336 host->fifo_reg = host->regs + host->data_addr_override;
3337 else if (host->verid < DW_MMC_240A)
3338 host->fifo_reg = host->regs + DATA_OFFSET;
3339 else
3340 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3341
3342 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3343 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3344 host->irq_flags, "dw-mci", host);
3345 if (ret)
3346 goto err_dmaunmap;
3347
3348 /*
3349 * Enable interrupts for command done, data over, data empty,
3350 * receive ready and error such as transmit, receive timeout, crc error
3351 */
3352 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3353 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3354 DW_MCI_ERROR_FLAGS);
3355 /* Enable mci interrupt */
3356 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3357
3358 dev_info(host->dev,
3359 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3360 host->irq, width, fifo_size);
3361
3362 /* We need at least one slot to succeed */
3363 ret = dw_mci_init_slot(host);
3364 if (ret) {
3365 dev_dbg(host->dev, "slot %d init failed\n", i);
3366 goto err_dmaunmap;
3367 }
3368
3369 /* Now that slots are all setup, we can enable card detect */
3370 dw_mci_enable_cd(host);
3371
3372 return 0;
3373
3374err_dmaunmap:
3375 if (host->use_dma && host->dma_ops->exit)
3376 host->dma_ops->exit(host);
3377
3378 if (!IS_ERR(host->pdata->rstc))
3379 reset_control_assert(host->pdata->rstc);
3380
3381err_clk_ciu:
3382 clk_disable_unprepare(host->ciu_clk);
3383
3384err_clk_biu:
3385 clk_disable_unprepare(host->biu_clk);
3386
3387 return ret;
3388}
3389EXPORT_SYMBOL(dw_mci_probe);
3390
3391void dw_mci_remove(struct dw_mci *host)
3392{
3393 dev_dbg(host->dev, "remove slot\n");
3394 if (host->slot)
3395 dw_mci_cleanup_slot(host->slot);
3396
3397 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3398 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3399
3400 /* disable clock to CIU */
3401 mci_writel(host, CLKENA, 0);
3402 mci_writel(host, CLKSRC, 0);
3403
3404 if (host->use_dma && host->dma_ops->exit)
3405 host->dma_ops->exit(host);
3406
3407 if (!IS_ERR(host->pdata->rstc))
3408 reset_control_assert(host->pdata->rstc);
3409
3410 clk_disable_unprepare(host->ciu_clk);
3411 clk_disable_unprepare(host->biu_clk);
3412}
3413EXPORT_SYMBOL(dw_mci_remove);
3414
3415
3416
3417#ifdef CONFIG_PM
3418int dw_mci_runtime_suspend(struct device *dev)
3419{
3420 struct dw_mci *host = dev_get_drvdata(dev);
3421
3422 if (host->use_dma && host->dma_ops->exit)
3423 host->dma_ops->exit(host);
3424
3425 clk_disable_unprepare(host->ciu_clk);
3426
3427 if (host->slot &&
3428 (mmc_can_gpio_cd(host->slot->mmc) ||
3429 !mmc_card_is_removable(host->slot->mmc)))
3430 clk_disable_unprepare(host->biu_clk);
3431
3432 return 0;
3433}
3434EXPORT_SYMBOL(dw_mci_runtime_suspend);
3435
3436int dw_mci_runtime_resume(struct device *dev)
3437{
3438 int ret = 0;
3439 struct dw_mci *host = dev_get_drvdata(dev);
3440
3441 if (host->slot &&
3442 (mmc_can_gpio_cd(host->slot->mmc) ||
3443 !mmc_card_is_removable(host->slot->mmc))) {
3444 ret = clk_prepare_enable(host->biu_clk);
3445 if (ret)
3446 return ret;
3447 }
3448
3449 ret = clk_prepare_enable(host->ciu_clk);
3450 if (ret)
3451 goto err;
3452
3453 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3454 clk_disable_unprepare(host->ciu_clk);
3455 ret = -ENODEV;
3456 goto err;
3457 }
3458
3459 if (host->use_dma && host->dma_ops->init)
3460 host->dma_ops->init(host);
3461
3462 /*
3463 * Restore the initial value at FIFOTH register
3464 * And Invalidate the prev_blksz with zero
3465 */
3466 mci_writel(host, FIFOTH, host->fifoth_val);
3467 host->prev_blksz = 0;
3468
3469 /* Put in max timeout */
3470 mci_writel(host, TMOUT, 0xFFFFFFFF);
3471
3472 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3473 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3474 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3475 DW_MCI_ERROR_FLAGS);
3476 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3477
3478
3479 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3480 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3481
3482 /* Force setup bus to guarantee available clock output */
3483 dw_mci_setup_bus(host->slot, true);
3484
3485 /* Now that slots are all setup, we can enable card detect */
3486 dw_mci_enable_cd(host);
3487
3488 return 0;
3489
3490err:
3491 if (host->slot &&
3492 (mmc_can_gpio_cd(host->slot->mmc) ||
3493 !mmc_card_is_removable(host->slot->mmc)))
3494 clk_disable_unprepare(host->biu_clk);
3495
3496 return ret;
3497}
3498EXPORT_SYMBOL(dw_mci_runtime_resume);
3499#endif /* CONFIG_PM */
3500
3501static int __init dw_mci_init(void)
3502{
3503 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3504 return 0;
3505}
3506
3507static void __exit dw_mci_exit(void)
3508{
3509}
3510
3511module_init(dw_mci_init);
3512module_exit(dw_mci_exit);
3513
3514MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3515MODULE_AUTHOR("NXP Semiconductor VietNam");
3516MODULE_AUTHOR("Imagination Technologies Ltd");
3517MODULE_LICENSE("GPL v2");