Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/*
2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
14 */
15
16#include <linux/delay.h>
17#include <linux/highmem.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/dma-mapping.h>
21#include <linux/slab.h>
22#include <linux/scatterlist.h>
23#include <linux/regulator/consumer.h>
24#include <linux/pm_runtime.h>
25#include <linux/of.h>
26
27#include <linux/leds.h>
28
29#include <linux/mmc/mmc.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/card.h>
32#include <linux/mmc/sdio.h>
33#include <linux/mmc/slot-gpio.h>
34
35#include "sdhci.h"
36
37#define DRIVER_NAME "sdhci"
38
39#define DBG(f, x...) \
40 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41
42#define MAX_TUNING_LOOP 40
43
44static unsigned int debug_quirks = 0;
45static unsigned int debug_quirks2;
46
47static void sdhci_finish_data(struct sdhci_host *);
48
49static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
50
51static void sdhci_dumpregs(struct sdhci_host *host)
52{
53 pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
54 mmc_hostname(host->mmc));
55
56 pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
57 sdhci_readl(host, SDHCI_DMA_ADDRESS),
58 sdhci_readw(host, SDHCI_HOST_VERSION));
59 pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
60 sdhci_readw(host, SDHCI_BLOCK_SIZE),
61 sdhci_readw(host, SDHCI_BLOCK_COUNT));
62 pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
63 sdhci_readl(host, SDHCI_ARGUMENT),
64 sdhci_readw(host, SDHCI_TRANSFER_MODE));
65 pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
66 sdhci_readl(host, SDHCI_PRESENT_STATE),
67 sdhci_readb(host, SDHCI_HOST_CONTROL));
68 pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
69 sdhci_readb(host, SDHCI_POWER_CONTROL),
70 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
71 pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
72 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
73 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
74 pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
75 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
76 sdhci_readl(host, SDHCI_INT_STATUS));
77 pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
78 sdhci_readl(host, SDHCI_INT_ENABLE),
79 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
80 pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
81 sdhci_readw(host, SDHCI_ACMD12_ERR),
82 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
83 pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
84 sdhci_readl(host, SDHCI_CAPABILITIES),
85 sdhci_readl(host, SDHCI_CAPABILITIES_1));
86 pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
87 sdhci_readw(host, SDHCI_COMMAND),
88 sdhci_readl(host, SDHCI_MAX_CURRENT));
89 pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
90 sdhci_readw(host, SDHCI_HOST_CONTROL2));
91
92 if (host->flags & SDHCI_USE_ADMA) {
93 if (host->flags & SDHCI_USE_64_BIT_DMA)
94 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
95 readl(host->ioaddr + SDHCI_ADMA_ERROR),
96 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
97 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
98 else
99 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
100 readl(host->ioaddr + SDHCI_ADMA_ERROR),
101 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
102 }
103
104 pr_err(DRIVER_NAME ": ===========================================\n");
105}
106
107/*****************************************************************************\
108 * *
109 * Low level functions *
110 * *
111\*****************************************************************************/
112
113static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
114{
115 return cmd->data || cmd->flags & MMC_RSP_BUSY;
116}
117
118static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
119{
120 u32 present;
121
122 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
123 !mmc_card_is_removable(host->mmc))
124 return;
125
126 if (enable) {
127 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
128 SDHCI_CARD_PRESENT;
129
130 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
131 SDHCI_INT_CARD_INSERT;
132 } else {
133 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
134 }
135
136 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
137 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
138}
139
140static void sdhci_enable_card_detection(struct sdhci_host *host)
141{
142 sdhci_set_card_detection(host, true);
143}
144
145static void sdhci_disable_card_detection(struct sdhci_host *host)
146{
147 sdhci_set_card_detection(host, false);
148}
149
150static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
151{
152 if (host->bus_on)
153 return;
154 host->bus_on = true;
155 pm_runtime_get_noresume(host->mmc->parent);
156}
157
158static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
159{
160 if (!host->bus_on)
161 return;
162 host->bus_on = false;
163 pm_runtime_put_noidle(host->mmc->parent);
164}
165
166void sdhci_reset(struct sdhci_host *host, u8 mask)
167{
168 unsigned long timeout;
169
170 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
171
172 if (mask & SDHCI_RESET_ALL) {
173 host->clock = 0;
174 /* Reset-all turns off SD Bus Power */
175 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
176 sdhci_runtime_pm_bus_off(host);
177 }
178
179 /* Wait max 100 ms */
180 timeout = 100;
181
182 /* hw clears the bit when it's done */
183 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
184 if (timeout == 0) {
185 pr_err("%s: Reset 0x%x never completed.\n",
186 mmc_hostname(host->mmc), (int)mask);
187 sdhci_dumpregs(host);
188 return;
189 }
190 timeout--;
191 mdelay(1);
192 }
193}
194EXPORT_SYMBOL_GPL(sdhci_reset);
195
196static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
197{
198 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
199 struct mmc_host *mmc = host->mmc;
200
201 if (!mmc->ops->get_cd(mmc))
202 return;
203 }
204
205 host->ops->reset(host, mask);
206
207 if (mask & SDHCI_RESET_ALL) {
208 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
209 if (host->ops->enable_dma)
210 host->ops->enable_dma(host);
211 }
212
213 /* Resetting the controller clears many */
214 host->preset_enabled = false;
215 }
216}
217
218static void sdhci_init(struct sdhci_host *host, int soft)
219{
220 struct mmc_host *mmc = host->mmc;
221
222 if (soft)
223 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
224 else
225 sdhci_do_reset(host, SDHCI_RESET_ALL);
226
227 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
228 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
229 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
230 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
231 SDHCI_INT_RESPONSE;
232
233 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
234 host->tuning_mode == SDHCI_TUNING_MODE_3)
235 host->ier |= SDHCI_INT_RETUNE;
236
237 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
238 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
239
240 if (soft) {
241 /* force clock reconfiguration */
242 host->clock = 0;
243 mmc->ops->set_ios(mmc, &mmc->ios);
244 }
245}
246
247static void sdhci_reinit(struct sdhci_host *host)
248{
249 sdhci_init(host, 0);
250 sdhci_enable_card_detection(host);
251}
252
253static void __sdhci_led_activate(struct sdhci_host *host)
254{
255 u8 ctrl;
256
257 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
258 ctrl |= SDHCI_CTRL_LED;
259 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
260}
261
262static void __sdhci_led_deactivate(struct sdhci_host *host)
263{
264 u8 ctrl;
265
266 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
267 ctrl &= ~SDHCI_CTRL_LED;
268 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
269}
270
271#if IS_REACHABLE(CONFIG_LEDS_CLASS)
272static void sdhci_led_control(struct led_classdev *led,
273 enum led_brightness brightness)
274{
275 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
276 unsigned long flags;
277
278 spin_lock_irqsave(&host->lock, flags);
279
280 if (host->runtime_suspended)
281 goto out;
282
283 if (brightness == LED_OFF)
284 __sdhci_led_deactivate(host);
285 else
286 __sdhci_led_activate(host);
287out:
288 spin_unlock_irqrestore(&host->lock, flags);
289}
290
291static int sdhci_led_register(struct sdhci_host *host)
292{
293 struct mmc_host *mmc = host->mmc;
294
295 snprintf(host->led_name, sizeof(host->led_name),
296 "%s::", mmc_hostname(mmc));
297
298 host->led.name = host->led_name;
299 host->led.brightness = LED_OFF;
300 host->led.default_trigger = mmc_hostname(mmc);
301 host->led.brightness_set = sdhci_led_control;
302
303 return led_classdev_register(mmc_dev(mmc), &host->led);
304}
305
306static void sdhci_led_unregister(struct sdhci_host *host)
307{
308 led_classdev_unregister(&host->led);
309}
310
311static inline void sdhci_led_activate(struct sdhci_host *host)
312{
313}
314
315static inline void sdhci_led_deactivate(struct sdhci_host *host)
316{
317}
318
319#else
320
321static inline int sdhci_led_register(struct sdhci_host *host)
322{
323 return 0;
324}
325
326static inline void sdhci_led_unregister(struct sdhci_host *host)
327{
328}
329
330static inline void sdhci_led_activate(struct sdhci_host *host)
331{
332 __sdhci_led_activate(host);
333}
334
335static inline void sdhci_led_deactivate(struct sdhci_host *host)
336{
337 __sdhci_led_deactivate(host);
338}
339
340#endif
341
342/*****************************************************************************\
343 * *
344 * Core functions *
345 * *
346\*****************************************************************************/
347
348static void sdhci_read_block_pio(struct sdhci_host *host)
349{
350 unsigned long flags;
351 size_t blksize, len, chunk;
352 u32 uninitialized_var(scratch);
353 u8 *buf;
354
355 DBG("PIO reading\n");
356
357 blksize = host->data->blksz;
358 chunk = 0;
359
360 local_irq_save(flags);
361
362 while (blksize) {
363 BUG_ON(!sg_miter_next(&host->sg_miter));
364
365 len = min(host->sg_miter.length, blksize);
366
367 blksize -= len;
368 host->sg_miter.consumed = len;
369
370 buf = host->sg_miter.addr;
371
372 while (len) {
373 if (chunk == 0) {
374 scratch = sdhci_readl(host, SDHCI_BUFFER);
375 chunk = 4;
376 }
377
378 *buf = scratch & 0xFF;
379
380 buf++;
381 scratch >>= 8;
382 chunk--;
383 len--;
384 }
385 }
386
387 sg_miter_stop(&host->sg_miter);
388
389 local_irq_restore(flags);
390}
391
392static void sdhci_write_block_pio(struct sdhci_host *host)
393{
394 unsigned long flags;
395 size_t blksize, len, chunk;
396 u32 scratch;
397 u8 *buf;
398
399 DBG("PIO writing\n");
400
401 blksize = host->data->blksz;
402 chunk = 0;
403 scratch = 0;
404
405 local_irq_save(flags);
406
407 while (blksize) {
408 BUG_ON(!sg_miter_next(&host->sg_miter));
409
410 len = min(host->sg_miter.length, blksize);
411
412 blksize -= len;
413 host->sg_miter.consumed = len;
414
415 buf = host->sg_miter.addr;
416
417 while (len) {
418 scratch |= (u32)*buf << (chunk * 8);
419
420 buf++;
421 chunk++;
422 len--;
423
424 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
425 sdhci_writel(host, scratch, SDHCI_BUFFER);
426 chunk = 0;
427 scratch = 0;
428 }
429 }
430 }
431
432 sg_miter_stop(&host->sg_miter);
433
434 local_irq_restore(flags);
435}
436
437static void sdhci_transfer_pio(struct sdhci_host *host)
438{
439 u32 mask;
440
441 if (host->blocks == 0)
442 return;
443
444 if (host->data->flags & MMC_DATA_READ)
445 mask = SDHCI_DATA_AVAILABLE;
446 else
447 mask = SDHCI_SPACE_AVAILABLE;
448
449 /*
450 * Some controllers (JMicron JMB38x) mess up the buffer bits
451 * for transfers < 4 bytes. As long as it is just one block,
452 * we can ignore the bits.
453 */
454 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
455 (host->data->blocks == 1))
456 mask = ~0;
457
458 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
459 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
460 udelay(100);
461
462 if (host->data->flags & MMC_DATA_READ)
463 sdhci_read_block_pio(host);
464 else
465 sdhci_write_block_pio(host);
466
467 host->blocks--;
468 if (host->blocks == 0)
469 break;
470 }
471
472 DBG("PIO transfer complete.\n");
473}
474
475static int sdhci_pre_dma_transfer(struct sdhci_host *host,
476 struct mmc_data *data, int cookie)
477{
478 int sg_count;
479
480 /*
481 * If the data buffers are already mapped, return the previous
482 * dma_map_sg() result.
483 */
484 if (data->host_cookie == COOKIE_PRE_MAPPED)
485 return data->sg_count;
486
487 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
488 data->flags & MMC_DATA_WRITE ?
489 DMA_TO_DEVICE : DMA_FROM_DEVICE);
490
491 if (sg_count == 0)
492 return -ENOSPC;
493
494 data->sg_count = sg_count;
495 data->host_cookie = cookie;
496
497 return sg_count;
498}
499
500static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
501{
502 local_irq_save(*flags);
503 return kmap_atomic(sg_page(sg)) + sg->offset;
504}
505
506static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
507{
508 kunmap_atomic(buffer);
509 local_irq_restore(*flags);
510}
511
512static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
513 dma_addr_t addr, int len, unsigned cmd)
514{
515 struct sdhci_adma2_64_desc *dma_desc = desc;
516
517 /* 32-bit and 64-bit descriptors have these members in same position */
518 dma_desc->cmd = cpu_to_le16(cmd);
519 dma_desc->len = cpu_to_le16(len);
520 dma_desc->addr_lo = cpu_to_le32((u32)addr);
521
522 if (host->flags & SDHCI_USE_64_BIT_DMA)
523 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
524}
525
526static void sdhci_adma_mark_end(void *desc)
527{
528 struct sdhci_adma2_64_desc *dma_desc = desc;
529
530 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
531 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
532}
533
534static void sdhci_adma_table_pre(struct sdhci_host *host,
535 struct mmc_data *data, int sg_count)
536{
537 struct scatterlist *sg;
538 unsigned long flags;
539 dma_addr_t addr, align_addr;
540 void *desc, *align;
541 char *buffer;
542 int len, offset, i;
543
544 /*
545 * The spec does not specify endianness of descriptor table.
546 * We currently guess that it is LE.
547 */
548
549 host->sg_count = sg_count;
550
551 desc = host->adma_table;
552 align = host->align_buffer;
553
554 align_addr = host->align_addr;
555
556 for_each_sg(data->sg, sg, host->sg_count, i) {
557 addr = sg_dma_address(sg);
558 len = sg_dma_len(sg);
559
560 /*
561 * The SDHCI specification states that ADMA addresses must
562 * be 32-bit aligned. If they aren't, then we use a bounce
563 * buffer for the (up to three) bytes that screw up the
564 * alignment.
565 */
566 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
567 SDHCI_ADMA2_MASK;
568 if (offset) {
569 if (data->flags & MMC_DATA_WRITE) {
570 buffer = sdhci_kmap_atomic(sg, &flags);
571 memcpy(align, buffer, offset);
572 sdhci_kunmap_atomic(buffer, &flags);
573 }
574
575 /* tran, valid */
576 sdhci_adma_write_desc(host, desc, align_addr, offset,
577 ADMA2_TRAN_VALID);
578
579 BUG_ON(offset > 65536);
580
581 align += SDHCI_ADMA2_ALIGN;
582 align_addr += SDHCI_ADMA2_ALIGN;
583
584 desc += host->desc_sz;
585
586 addr += offset;
587 len -= offset;
588 }
589
590 BUG_ON(len > 65536);
591
592 if (len) {
593 /* tran, valid */
594 sdhci_adma_write_desc(host, desc, addr, len,
595 ADMA2_TRAN_VALID);
596 desc += host->desc_sz;
597 }
598
599 /*
600 * If this triggers then we have a calculation bug
601 * somewhere. :/
602 */
603 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
604 }
605
606 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
607 /* Mark the last descriptor as the terminating descriptor */
608 if (desc != host->adma_table) {
609 desc -= host->desc_sz;
610 sdhci_adma_mark_end(desc);
611 }
612 } else {
613 /* Add a terminating entry - nop, end, valid */
614 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
615 }
616}
617
618static void sdhci_adma_table_post(struct sdhci_host *host,
619 struct mmc_data *data)
620{
621 struct scatterlist *sg;
622 int i, size;
623 void *align;
624 char *buffer;
625 unsigned long flags;
626
627 if (data->flags & MMC_DATA_READ) {
628 bool has_unaligned = false;
629
630 /* Do a quick scan of the SG list for any unaligned mappings */
631 for_each_sg(data->sg, sg, host->sg_count, i)
632 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
633 has_unaligned = true;
634 break;
635 }
636
637 if (has_unaligned) {
638 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
639 data->sg_len, DMA_FROM_DEVICE);
640
641 align = host->align_buffer;
642
643 for_each_sg(data->sg, sg, host->sg_count, i) {
644 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
645 size = SDHCI_ADMA2_ALIGN -
646 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
647
648 buffer = sdhci_kmap_atomic(sg, &flags);
649 memcpy(buffer, align, size);
650 sdhci_kunmap_atomic(buffer, &flags);
651
652 align += SDHCI_ADMA2_ALIGN;
653 }
654 }
655 }
656 }
657}
658
659static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
660{
661 u8 count;
662 struct mmc_data *data = cmd->data;
663 unsigned target_timeout, current_timeout;
664
665 /*
666 * If the host controller provides us with an incorrect timeout
667 * value, just skip the check and use 0xE. The hardware may take
668 * longer to time out, but that's much better than having a too-short
669 * timeout value.
670 */
671 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
672 return 0xE;
673
674 /* Unspecified timeout, assume max */
675 if (!data && !cmd->busy_timeout)
676 return 0xE;
677
678 /* timeout in us */
679 if (!data)
680 target_timeout = cmd->busy_timeout * 1000;
681 else {
682 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
683 if (host->clock && data->timeout_clks) {
684 unsigned long long val;
685
686 /*
687 * data->timeout_clks is in units of clock cycles.
688 * host->clock is in Hz. target_timeout is in us.
689 * Hence, us = 1000000 * cycles / Hz. Round up.
690 */
691 val = 1000000ULL * data->timeout_clks;
692 if (do_div(val, host->clock))
693 target_timeout++;
694 target_timeout += val;
695 }
696 }
697
698 /*
699 * Figure out needed cycles.
700 * We do this in steps in order to fit inside a 32 bit int.
701 * The first step is the minimum timeout, which will have a
702 * minimum resolution of 6 bits:
703 * (1) 2^13*1000 > 2^22,
704 * (2) host->timeout_clk < 2^16
705 * =>
706 * (1) / (2) > 2^6
707 */
708 count = 0;
709 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
710 while (current_timeout < target_timeout) {
711 count++;
712 current_timeout <<= 1;
713 if (count >= 0xF)
714 break;
715 }
716
717 if (count >= 0xF) {
718 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
719 mmc_hostname(host->mmc), count, cmd->opcode);
720 count = 0xE;
721 }
722
723 return count;
724}
725
726static void sdhci_set_transfer_irqs(struct sdhci_host *host)
727{
728 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
729 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
730
731 if (host->flags & SDHCI_REQ_USE_DMA)
732 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
733 else
734 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
735
736 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
737 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
738}
739
740static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
741{
742 u8 count;
743
744 if (host->ops->set_timeout) {
745 host->ops->set_timeout(host, cmd);
746 } else {
747 count = sdhci_calc_timeout(host, cmd);
748 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
749 }
750}
751
752static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
753{
754 u8 ctrl;
755 struct mmc_data *data = cmd->data;
756
757 if (sdhci_data_line_cmd(cmd))
758 sdhci_set_timeout(host, cmd);
759
760 if (!data)
761 return;
762
763 WARN_ON(host->data);
764
765 /* Sanity checks */
766 BUG_ON(data->blksz * data->blocks > 524288);
767 BUG_ON(data->blksz > host->mmc->max_blk_size);
768 BUG_ON(data->blocks > 65535);
769
770 host->data = data;
771 host->data_early = 0;
772 host->data->bytes_xfered = 0;
773
774 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
775 struct scatterlist *sg;
776 unsigned int length_mask, offset_mask;
777 int i;
778
779 host->flags |= SDHCI_REQ_USE_DMA;
780
781 /*
782 * FIXME: This doesn't account for merging when mapping the
783 * scatterlist.
784 *
785 * The assumption here being that alignment and lengths are
786 * the same after DMA mapping to device address space.
787 */
788 length_mask = 0;
789 offset_mask = 0;
790 if (host->flags & SDHCI_USE_ADMA) {
791 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
792 length_mask = 3;
793 /*
794 * As we use up to 3 byte chunks to work
795 * around alignment problems, we need to
796 * check the offset as well.
797 */
798 offset_mask = 3;
799 }
800 } else {
801 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
802 length_mask = 3;
803 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
804 offset_mask = 3;
805 }
806
807 if (unlikely(length_mask | offset_mask)) {
808 for_each_sg(data->sg, sg, data->sg_len, i) {
809 if (sg->length & length_mask) {
810 DBG("Reverting to PIO because of transfer size (%d)\n",
811 sg->length);
812 host->flags &= ~SDHCI_REQ_USE_DMA;
813 break;
814 }
815 if (sg->offset & offset_mask) {
816 DBG("Reverting to PIO because of bad alignment\n");
817 host->flags &= ~SDHCI_REQ_USE_DMA;
818 break;
819 }
820 }
821 }
822 }
823
824 if (host->flags & SDHCI_REQ_USE_DMA) {
825 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
826
827 if (sg_cnt <= 0) {
828 /*
829 * This only happens when someone fed
830 * us an invalid request.
831 */
832 WARN_ON(1);
833 host->flags &= ~SDHCI_REQ_USE_DMA;
834 } else if (host->flags & SDHCI_USE_ADMA) {
835 sdhci_adma_table_pre(host, data, sg_cnt);
836
837 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
838 if (host->flags & SDHCI_USE_64_BIT_DMA)
839 sdhci_writel(host,
840 (u64)host->adma_addr >> 32,
841 SDHCI_ADMA_ADDRESS_HI);
842 } else {
843 WARN_ON(sg_cnt != 1);
844 sdhci_writel(host, sg_dma_address(data->sg),
845 SDHCI_DMA_ADDRESS);
846 }
847 }
848
849 /*
850 * Always adjust the DMA selection as some controllers
851 * (e.g. JMicron) can't do PIO properly when the selection
852 * is ADMA.
853 */
854 if (host->version >= SDHCI_SPEC_200) {
855 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
856 ctrl &= ~SDHCI_CTRL_DMA_MASK;
857 if ((host->flags & SDHCI_REQ_USE_DMA) &&
858 (host->flags & SDHCI_USE_ADMA)) {
859 if (host->flags & SDHCI_USE_64_BIT_DMA)
860 ctrl |= SDHCI_CTRL_ADMA64;
861 else
862 ctrl |= SDHCI_CTRL_ADMA32;
863 } else {
864 ctrl |= SDHCI_CTRL_SDMA;
865 }
866 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
867 }
868
869 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
870 int flags;
871
872 flags = SG_MITER_ATOMIC;
873 if (host->data->flags & MMC_DATA_READ)
874 flags |= SG_MITER_TO_SG;
875 else
876 flags |= SG_MITER_FROM_SG;
877 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
878 host->blocks = data->blocks;
879 }
880
881 sdhci_set_transfer_irqs(host);
882
883 /* Set the DMA boundary value and block size */
884 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
885 data->blksz), SDHCI_BLOCK_SIZE);
886 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
887}
888
889static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
890 struct mmc_request *mrq)
891{
892 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
893 !mrq->cap_cmd_during_tfr;
894}
895
896static void sdhci_set_transfer_mode(struct sdhci_host *host,
897 struct mmc_command *cmd)
898{
899 u16 mode = 0;
900 struct mmc_data *data = cmd->data;
901
902 if (data == NULL) {
903 if (host->quirks2 &
904 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
905 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
906 } else {
907 /* clear Auto CMD settings for no data CMDs */
908 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
909 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
910 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
911 }
912 return;
913 }
914
915 WARN_ON(!host->data);
916
917 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
918 mode = SDHCI_TRNS_BLK_CNT_EN;
919
920 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
921 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
922 /*
923 * If we are sending CMD23, CMD12 never gets sent
924 * on successful completion (so no Auto-CMD12).
925 */
926 if (sdhci_auto_cmd12(host, cmd->mrq) &&
927 (cmd->opcode != SD_IO_RW_EXTENDED))
928 mode |= SDHCI_TRNS_AUTO_CMD12;
929 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
930 mode |= SDHCI_TRNS_AUTO_CMD23;
931 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
932 }
933 }
934
935 if (data->flags & MMC_DATA_READ)
936 mode |= SDHCI_TRNS_READ;
937 if (host->flags & SDHCI_REQ_USE_DMA)
938 mode |= SDHCI_TRNS_DMA;
939
940 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
941}
942
943static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
944{
945 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
946 ((mrq->cmd && mrq->cmd->error) ||
947 (mrq->sbc && mrq->sbc->error) ||
948 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
949 (mrq->data->stop && mrq->data->stop->error))) ||
950 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
951}
952
953static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
954{
955 int i;
956
957 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
958 if (host->mrqs_done[i] == mrq) {
959 WARN_ON(1);
960 return;
961 }
962 }
963
964 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
965 if (!host->mrqs_done[i]) {
966 host->mrqs_done[i] = mrq;
967 break;
968 }
969 }
970
971 WARN_ON(i >= SDHCI_MAX_MRQS);
972
973 tasklet_schedule(&host->finish_tasklet);
974}
975
976static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
977{
978 if (host->cmd && host->cmd->mrq == mrq)
979 host->cmd = NULL;
980
981 if (host->data_cmd && host->data_cmd->mrq == mrq)
982 host->data_cmd = NULL;
983
984 if (host->data && host->data->mrq == mrq)
985 host->data = NULL;
986
987 if (sdhci_needs_reset(host, mrq))
988 host->pending_reset = true;
989
990 __sdhci_finish_mrq(host, mrq);
991}
992
993static void sdhci_finish_data(struct sdhci_host *host)
994{
995 struct mmc_command *data_cmd = host->data_cmd;
996 struct mmc_data *data = host->data;
997
998 host->data = NULL;
999 host->data_cmd = NULL;
1000
1001 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1002 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1003 sdhci_adma_table_post(host, data);
1004
1005 /*
1006 * The specification states that the block count register must
1007 * be updated, but it does not specify at what point in the
1008 * data flow. That makes the register entirely useless to read
1009 * back so we have to assume that nothing made it to the card
1010 * in the event of an error.
1011 */
1012 if (data->error)
1013 data->bytes_xfered = 0;
1014 else
1015 data->bytes_xfered = data->blksz * data->blocks;
1016
1017 /*
1018 * Need to send CMD12 if -
1019 * a) open-ended multiblock transfer (no CMD23)
1020 * b) error in multiblock transfer
1021 */
1022 if (data->stop &&
1023 (data->error ||
1024 !data->mrq->sbc)) {
1025
1026 /*
1027 * The controller needs a reset of internal state machines
1028 * upon error conditions.
1029 */
1030 if (data->error) {
1031 if (!host->cmd || host->cmd == data_cmd)
1032 sdhci_do_reset(host, SDHCI_RESET_CMD);
1033 sdhci_do_reset(host, SDHCI_RESET_DATA);
1034 }
1035
1036 /*
1037 * 'cap_cmd_during_tfr' request must not use the command line
1038 * after mmc_command_done() has been called. It is upper layer's
1039 * responsibility to send the stop command if required.
1040 */
1041 if (data->mrq->cap_cmd_during_tfr) {
1042 sdhci_finish_mrq(host, data->mrq);
1043 } else {
1044 /* Avoid triggering warning in sdhci_send_command() */
1045 host->cmd = NULL;
1046 sdhci_send_command(host, data->stop);
1047 }
1048 } else {
1049 sdhci_finish_mrq(host, data->mrq);
1050 }
1051}
1052
1053static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1054 unsigned long timeout)
1055{
1056 if (sdhci_data_line_cmd(mrq->cmd))
1057 mod_timer(&host->data_timer, timeout);
1058 else
1059 mod_timer(&host->timer, timeout);
1060}
1061
1062static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1063{
1064 if (sdhci_data_line_cmd(mrq->cmd))
1065 del_timer(&host->data_timer);
1066 else
1067 del_timer(&host->timer);
1068}
1069
1070void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1071{
1072 int flags;
1073 u32 mask;
1074 unsigned long timeout;
1075
1076 WARN_ON(host->cmd);
1077
1078 /* Initially, a command has no error */
1079 cmd->error = 0;
1080
1081 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1082 cmd->opcode == MMC_STOP_TRANSMISSION)
1083 cmd->flags |= MMC_RSP_BUSY;
1084
1085 /* Wait max 10 ms */
1086 timeout = 10;
1087
1088 mask = SDHCI_CMD_INHIBIT;
1089 if (sdhci_data_line_cmd(cmd))
1090 mask |= SDHCI_DATA_INHIBIT;
1091
1092 /* We shouldn't wait for data inihibit for stop commands, even
1093 though they might use busy signaling */
1094 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1095 mask &= ~SDHCI_DATA_INHIBIT;
1096
1097 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1098 if (timeout == 0) {
1099 pr_err("%s: Controller never released inhibit bit(s).\n",
1100 mmc_hostname(host->mmc));
1101 sdhci_dumpregs(host);
1102 cmd->error = -EIO;
1103 sdhci_finish_mrq(host, cmd->mrq);
1104 return;
1105 }
1106 timeout--;
1107 mdelay(1);
1108 }
1109
1110 timeout = jiffies;
1111 if (!cmd->data && cmd->busy_timeout > 9000)
1112 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1113 else
1114 timeout += 10 * HZ;
1115 sdhci_mod_timer(host, cmd->mrq, timeout);
1116
1117 host->cmd = cmd;
1118 if (sdhci_data_line_cmd(cmd)) {
1119 WARN_ON(host->data_cmd);
1120 host->data_cmd = cmd;
1121 }
1122
1123 sdhci_prepare_data(host, cmd);
1124
1125 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1126
1127 sdhci_set_transfer_mode(host, cmd);
1128
1129 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1130 pr_err("%s: Unsupported response type!\n",
1131 mmc_hostname(host->mmc));
1132 cmd->error = -EINVAL;
1133 sdhci_finish_mrq(host, cmd->mrq);
1134 return;
1135 }
1136
1137 if (!(cmd->flags & MMC_RSP_PRESENT))
1138 flags = SDHCI_CMD_RESP_NONE;
1139 else if (cmd->flags & MMC_RSP_136)
1140 flags = SDHCI_CMD_RESP_LONG;
1141 else if (cmd->flags & MMC_RSP_BUSY)
1142 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1143 else
1144 flags = SDHCI_CMD_RESP_SHORT;
1145
1146 if (cmd->flags & MMC_RSP_CRC)
1147 flags |= SDHCI_CMD_CRC;
1148 if (cmd->flags & MMC_RSP_OPCODE)
1149 flags |= SDHCI_CMD_INDEX;
1150
1151 /* CMD19 is special in that the Data Present Select should be set */
1152 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1153 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1154 flags |= SDHCI_CMD_DATA;
1155
1156 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1157}
1158EXPORT_SYMBOL_GPL(sdhci_send_command);
1159
1160static void sdhci_finish_command(struct sdhci_host *host)
1161{
1162 struct mmc_command *cmd = host->cmd;
1163 int i;
1164
1165 host->cmd = NULL;
1166
1167 if (cmd->flags & MMC_RSP_PRESENT) {
1168 if (cmd->flags & MMC_RSP_136) {
1169 /* CRC is stripped so we need to do some shifting. */
1170 for (i = 0;i < 4;i++) {
1171 cmd->resp[i] = sdhci_readl(host,
1172 SDHCI_RESPONSE + (3-i)*4) << 8;
1173 if (i != 3)
1174 cmd->resp[i] |=
1175 sdhci_readb(host,
1176 SDHCI_RESPONSE + (3-i)*4-1);
1177 }
1178 } else {
1179 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1180 }
1181 }
1182
1183 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1184 mmc_command_done(host->mmc, cmd->mrq);
1185
1186 /*
1187 * The host can send and interrupt when the busy state has
1188 * ended, allowing us to wait without wasting CPU cycles.
1189 * The busy signal uses DAT0 so this is similar to waiting
1190 * for data to complete.
1191 *
1192 * Note: The 1.0 specification is a bit ambiguous about this
1193 * feature so there might be some problems with older
1194 * controllers.
1195 */
1196 if (cmd->flags & MMC_RSP_BUSY) {
1197 if (cmd->data) {
1198 DBG("Cannot wait for busy signal when also doing a data transfer");
1199 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1200 cmd == host->data_cmd) {
1201 /* Command complete before busy is ended */
1202 return;
1203 }
1204 }
1205
1206 /* Finished CMD23, now send actual command. */
1207 if (cmd == cmd->mrq->sbc) {
1208 sdhci_send_command(host, cmd->mrq->cmd);
1209 } else {
1210
1211 /* Processed actual command. */
1212 if (host->data && host->data_early)
1213 sdhci_finish_data(host);
1214
1215 if (!cmd->data)
1216 sdhci_finish_mrq(host, cmd->mrq);
1217 }
1218}
1219
1220static u16 sdhci_get_preset_value(struct sdhci_host *host)
1221{
1222 u16 preset = 0;
1223
1224 switch (host->timing) {
1225 case MMC_TIMING_UHS_SDR12:
1226 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1227 break;
1228 case MMC_TIMING_UHS_SDR25:
1229 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1230 break;
1231 case MMC_TIMING_UHS_SDR50:
1232 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1233 break;
1234 case MMC_TIMING_UHS_SDR104:
1235 case MMC_TIMING_MMC_HS200:
1236 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1237 break;
1238 case MMC_TIMING_UHS_DDR50:
1239 case MMC_TIMING_MMC_DDR52:
1240 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1241 break;
1242 case MMC_TIMING_MMC_HS400:
1243 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1244 break;
1245 default:
1246 pr_warn("%s: Invalid UHS-I mode selected\n",
1247 mmc_hostname(host->mmc));
1248 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1249 break;
1250 }
1251 return preset;
1252}
1253
1254u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1255 unsigned int *actual_clock)
1256{
1257 int div = 0; /* Initialized for compiler warning */
1258 int real_div = div, clk_mul = 1;
1259 u16 clk = 0;
1260 bool switch_base_clk = false;
1261
1262 if (host->version >= SDHCI_SPEC_300) {
1263 if (host->preset_enabled) {
1264 u16 pre_val;
1265
1266 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1267 pre_val = sdhci_get_preset_value(host);
1268 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1269 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1270 if (host->clk_mul &&
1271 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1272 clk = SDHCI_PROG_CLOCK_MODE;
1273 real_div = div + 1;
1274 clk_mul = host->clk_mul;
1275 } else {
1276 real_div = max_t(int, 1, div << 1);
1277 }
1278 goto clock_set;
1279 }
1280
1281 /*
1282 * Check if the Host Controller supports Programmable Clock
1283 * Mode.
1284 */
1285 if (host->clk_mul) {
1286 for (div = 1; div <= 1024; div++) {
1287 if ((host->max_clk * host->clk_mul / div)
1288 <= clock)
1289 break;
1290 }
1291 if ((host->max_clk * host->clk_mul / div) <= clock) {
1292 /*
1293 * Set Programmable Clock Mode in the Clock
1294 * Control register.
1295 */
1296 clk = SDHCI_PROG_CLOCK_MODE;
1297 real_div = div;
1298 clk_mul = host->clk_mul;
1299 div--;
1300 } else {
1301 /*
1302 * Divisor can be too small to reach clock
1303 * speed requirement. Then use the base clock.
1304 */
1305 switch_base_clk = true;
1306 }
1307 }
1308
1309 if (!host->clk_mul || switch_base_clk) {
1310 /* Version 3.00 divisors must be a multiple of 2. */
1311 if (host->max_clk <= clock)
1312 div = 1;
1313 else {
1314 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1315 div += 2) {
1316 if ((host->max_clk / div) <= clock)
1317 break;
1318 }
1319 }
1320 real_div = div;
1321 div >>= 1;
1322 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1323 && !div && host->max_clk <= 25000000)
1324 div = 1;
1325 }
1326 } else {
1327 /* Version 2.00 divisors must be a power of 2. */
1328 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1329 if ((host->max_clk / div) <= clock)
1330 break;
1331 }
1332 real_div = div;
1333 div >>= 1;
1334 }
1335
1336clock_set:
1337 if (real_div)
1338 *actual_clock = (host->max_clk * clk_mul) / real_div;
1339 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1340 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1341 << SDHCI_DIVIDER_HI_SHIFT;
1342
1343 return clk;
1344}
1345EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1346
1347void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1348{
1349 unsigned long timeout;
1350
1351 clk |= SDHCI_CLOCK_INT_EN;
1352 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1353
1354 /* Wait max 20 ms */
1355 timeout = 20;
1356 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1357 & SDHCI_CLOCK_INT_STABLE)) {
1358 if (timeout == 0) {
1359 pr_err("%s: Internal clock never stabilised.\n",
1360 mmc_hostname(host->mmc));
1361 sdhci_dumpregs(host);
1362 return;
1363 }
1364 timeout--;
1365 mdelay(1);
1366 }
1367
1368 clk |= SDHCI_CLOCK_CARD_EN;
1369 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1370}
1371EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1372
1373void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1374{
1375 u16 clk;
1376
1377 host->mmc->actual_clock = 0;
1378
1379 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1380
1381 if (clock == 0)
1382 return;
1383
1384 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1385 sdhci_enable_clk(host, clk);
1386}
1387EXPORT_SYMBOL_GPL(sdhci_set_clock);
1388
1389static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1390 unsigned short vdd)
1391{
1392 struct mmc_host *mmc = host->mmc;
1393
1394 spin_unlock_irq(&host->lock);
1395 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1396 spin_lock_irq(&host->lock);
1397
1398 if (mode != MMC_POWER_OFF)
1399 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1400 else
1401 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1402}
1403
1404void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1405 unsigned short vdd)
1406{
1407 u8 pwr = 0;
1408
1409 if (mode != MMC_POWER_OFF) {
1410 switch (1 << vdd) {
1411 case MMC_VDD_165_195:
1412 pwr = SDHCI_POWER_180;
1413 break;
1414 case MMC_VDD_29_30:
1415 case MMC_VDD_30_31:
1416 pwr = SDHCI_POWER_300;
1417 break;
1418 case MMC_VDD_32_33:
1419 case MMC_VDD_33_34:
1420 pwr = SDHCI_POWER_330;
1421 break;
1422 default:
1423 WARN(1, "%s: Invalid vdd %#x\n",
1424 mmc_hostname(host->mmc), vdd);
1425 break;
1426 }
1427 }
1428
1429 if (host->pwr == pwr)
1430 return;
1431
1432 host->pwr = pwr;
1433
1434 if (pwr == 0) {
1435 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1436 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1437 sdhci_runtime_pm_bus_off(host);
1438 } else {
1439 /*
1440 * Spec says that we should clear the power reg before setting
1441 * a new value. Some controllers don't seem to like this though.
1442 */
1443 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1444 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1445
1446 /*
1447 * At least the Marvell CaFe chip gets confused if we set the
1448 * voltage and set turn on power at the same time, so set the
1449 * voltage first.
1450 */
1451 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1452 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1453
1454 pwr |= SDHCI_POWER_ON;
1455
1456 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1457
1458 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1459 sdhci_runtime_pm_bus_on(host);
1460
1461 /*
1462 * Some controllers need an extra 10ms delay of 10ms before
1463 * they can apply clock after applying power
1464 */
1465 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1466 mdelay(10);
1467 }
1468}
1469EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1470
1471void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1472 unsigned short vdd)
1473{
1474 if (IS_ERR(host->mmc->supply.vmmc))
1475 sdhci_set_power_noreg(host, mode, vdd);
1476 else
1477 sdhci_set_power_reg(host, mode, vdd);
1478}
1479EXPORT_SYMBOL_GPL(sdhci_set_power);
1480
1481/*****************************************************************************\
1482 * *
1483 * MMC callbacks *
1484 * *
1485\*****************************************************************************/
1486
1487static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1488{
1489 struct sdhci_host *host;
1490 int present;
1491 unsigned long flags;
1492
1493 host = mmc_priv(mmc);
1494
1495 /* Firstly check card presence */
1496 present = mmc->ops->get_cd(mmc);
1497
1498 spin_lock_irqsave(&host->lock, flags);
1499
1500 sdhci_led_activate(host);
1501
1502 /*
1503 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1504 * requests if Auto-CMD12 is enabled.
1505 */
1506 if (sdhci_auto_cmd12(host, mrq)) {
1507 if (mrq->stop) {
1508 mrq->data->stop = NULL;
1509 mrq->stop = NULL;
1510 }
1511 }
1512
1513 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1514 mrq->cmd->error = -ENOMEDIUM;
1515 sdhci_finish_mrq(host, mrq);
1516 } else {
1517 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1518 sdhci_send_command(host, mrq->sbc);
1519 else
1520 sdhci_send_command(host, mrq->cmd);
1521 }
1522
1523 mmiowb();
1524 spin_unlock_irqrestore(&host->lock, flags);
1525}
1526
1527void sdhci_set_bus_width(struct sdhci_host *host, int width)
1528{
1529 u8 ctrl;
1530
1531 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1532 if (width == MMC_BUS_WIDTH_8) {
1533 ctrl &= ~SDHCI_CTRL_4BITBUS;
1534 if (host->version >= SDHCI_SPEC_300)
1535 ctrl |= SDHCI_CTRL_8BITBUS;
1536 } else {
1537 if (host->version >= SDHCI_SPEC_300)
1538 ctrl &= ~SDHCI_CTRL_8BITBUS;
1539 if (width == MMC_BUS_WIDTH_4)
1540 ctrl |= SDHCI_CTRL_4BITBUS;
1541 else
1542 ctrl &= ~SDHCI_CTRL_4BITBUS;
1543 }
1544 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1545}
1546EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1547
1548void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1549{
1550 u16 ctrl_2;
1551
1552 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1553 /* Select Bus Speed Mode for host */
1554 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1555 if ((timing == MMC_TIMING_MMC_HS200) ||
1556 (timing == MMC_TIMING_UHS_SDR104))
1557 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1558 else if (timing == MMC_TIMING_UHS_SDR12)
1559 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1560 else if (timing == MMC_TIMING_UHS_SDR25)
1561 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1562 else if (timing == MMC_TIMING_UHS_SDR50)
1563 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1564 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1565 (timing == MMC_TIMING_MMC_DDR52))
1566 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1567 else if (timing == MMC_TIMING_MMC_HS400)
1568 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1569 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1570}
1571EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1572
1573static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1574{
1575 struct sdhci_host *host = mmc_priv(mmc);
1576 unsigned long flags;
1577 u8 ctrl;
1578
1579 if (ios->power_mode == MMC_POWER_UNDEFINED)
1580 return;
1581
1582 spin_lock_irqsave(&host->lock, flags);
1583
1584 if (host->flags & SDHCI_DEVICE_DEAD) {
1585 spin_unlock_irqrestore(&host->lock, flags);
1586 if (!IS_ERR(mmc->supply.vmmc) &&
1587 ios->power_mode == MMC_POWER_OFF)
1588 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1589 return;
1590 }
1591
1592 /*
1593 * Reset the chip on each power off.
1594 * Should clear out any weird states.
1595 */
1596 if (ios->power_mode == MMC_POWER_OFF) {
1597 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1598 sdhci_reinit(host);
1599 }
1600
1601 if (host->version >= SDHCI_SPEC_300 &&
1602 (ios->power_mode == MMC_POWER_UP) &&
1603 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1604 sdhci_enable_preset_value(host, false);
1605
1606 if (!ios->clock || ios->clock != host->clock) {
1607 host->ops->set_clock(host, ios->clock);
1608 host->clock = ios->clock;
1609
1610 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1611 host->clock) {
1612 host->timeout_clk = host->mmc->actual_clock ?
1613 host->mmc->actual_clock / 1000 :
1614 host->clock / 1000;
1615 host->mmc->max_busy_timeout =
1616 host->ops->get_max_timeout_count ?
1617 host->ops->get_max_timeout_count(host) :
1618 1 << 27;
1619 host->mmc->max_busy_timeout /= host->timeout_clk;
1620 }
1621 }
1622
1623 if (host->ops->set_power)
1624 host->ops->set_power(host, ios->power_mode, ios->vdd);
1625 else
1626 sdhci_set_power(host, ios->power_mode, ios->vdd);
1627
1628 if (host->ops->platform_send_init_74_clocks)
1629 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1630
1631 host->ops->set_bus_width(host, ios->bus_width);
1632
1633 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1634
1635 if ((ios->timing == MMC_TIMING_SD_HS ||
1636 ios->timing == MMC_TIMING_MMC_HS ||
1637 ios->timing == MMC_TIMING_MMC_HS400 ||
1638 ios->timing == MMC_TIMING_MMC_HS200 ||
1639 ios->timing == MMC_TIMING_MMC_DDR52 ||
1640 ios->timing == MMC_TIMING_UHS_SDR50 ||
1641 ios->timing == MMC_TIMING_UHS_SDR104 ||
1642 ios->timing == MMC_TIMING_UHS_DDR50 ||
1643 ios->timing == MMC_TIMING_UHS_SDR25)
1644 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1645 ctrl |= SDHCI_CTRL_HISPD;
1646 else
1647 ctrl &= ~SDHCI_CTRL_HISPD;
1648
1649 if (host->version >= SDHCI_SPEC_300) {
1650 u16 clk, ctrl_2;
1651
1652 if (!host->preset_enabled) {
1653 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1654 /*
1655 * We only need to set Driver Strength if the
1656 * preset value enable is not set.
1657 */
1658 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1659 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1660 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1661 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1662 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1663 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1664 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1665 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1666 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1667 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1668 else {
1669 pr_warn("%s: invalid driver type, default to driver type B\n",
1670 mmc_hostname(mmc));
1671 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1672 }
1673
1674 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1675 } else {
1676 /*
1677 * According to SDHC Spec v3.00, if the Preset Value
1678 * Enable in the Host Control 2 register is set, we
1679 * need to reset SD Clock Enable before changing High
1680 * Speed Enable to avoid generating clock gliches.
1681 */
1682
1683 /* Reset SD Clock Enable */
1684 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1685 clk &= ~SDHCI_CLOCK_CARD_EN;
1686 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1687
1688 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1689
1690 /* Re-enable SD Clock */
1691 host->ops->set_clock(host, host->clock);
1692 }
1693
1694 /* Reset SD Clock Enable */
1695 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1696 clk &= ~SDHCI_CLOCK_CARD_EN;
1697 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1698
1699 host->ops->set_uhs_signaling(host, ios->timing);
1700 host->timing = ios->timing;
1701
1702 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1703 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1704 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1705 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1706 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1707 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1708 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1709 u16 preset;
1710
1711 sdhci_enable_preset_value(host, true);
1712 preset = sdhci_get_preset_value(host);
1713 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1714 >> SDHCI_PRESET_DRV_SHIFT;
1715 }
1716
1717 /* Re-enable SD Clock */
1718 host->ops->set_clock(host, host->clock);
1719 } else
1720 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1721
1722 /*
1723 * Some (ENE) controllers go apeshit on some ios operation,
1724 * signalling timeout and CRC errors even on CMD0. Resetting
1725 * it on each ios seems to solve the problem.
1726 */
1727 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1728 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1729
1730 mmiowb();
1731 spin_unlock_irqrestore(&host->lock, flags);
1732}
1733
1734static int sdhci_get_cd(struct mmc_host *mmc)
1735{
1736 struct sdhci_host *host = mmc_priv(mmc);
1737 int gpio_cd = mmc_gpio_get_cd(mmc);
1738
1739 if (host->flags & SDHCI_DEVICE_DEAD)
1740 return 0;
1741
1742 /* If nonremovable, assume that the card is always present. */
1743 if (!mmc_card_is_removable(host->mmc))
1744 return 1;
1745
1746 /*
1747 * Try slot gpio detect, if defined it take precedence
1748 * over build in controller functionality
1749 */
1750 if (gpio_cd >= 0)
1751 return !!gpio_cd;
1752
1753 /* If polling, assume that the card is always present. */
1754 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1755 return 1;
1756
1757 /* Host native card detect */
1758 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1759}
1760
1761static int sdhci_check_ro(struct sdhci_host *host)
1762{
1763 unsigned long flags;
1764 int is_readonly;
1765
1766 spin_lock_irqsave(&host->lock, flags);
1767
1768 if (host->flags & SDHCI_DEVICE_DEAD)
1769 is_readonly = 0;
1770 else if (host->ops->get_ro)
1771 is_readonly = host->ops->get_ro(host);
1772 else
1773 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1774 & SDHCI_WRITE_PROTECT);
1775
1776 spin_unlock_irqrestore(&host->lock, flags);
1777
1778 /* This quirk needs to be replaced by a callback-function later */
1779 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1780 !is_readonly : is_readonly;
1781}
1782
1783#define SAMPLE_COUNT 5
1784
1785static int sdhci_get_ro(struct mmc_host *mmc)
1786{
1787 struct sdhci_host *host = mmc_priv(mmc);
1788 int i, ro_count;
1789
1790 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1791 return sdhci_check_ro(host);
1792
1793 ro_count = 0;
1794 for (i = 0; i < SAMPLE_COUNT; i++) {
1795 if (sdhci_check_ro(host)) {
1796 if (++ro_count > SAMPLE_COUNT / 2)
1797 return 1;
1798 }
1799 msleep(30);
1800 }
1801 return 0;
1802}
1803
1804static void sdhci_hw_reset(struct mmc_host *mmc)
1805{
1806 struct sdhci_host *host = mmc_priv(mmc);
1807
1808 if (host->ops && host->ops->hw_reset)
1809 host->ops->hw_reset(host);
1810}
1811
1812static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1813{
1814 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1815 if (enable)
1816 host->ier |= SDHCI_INT_CARD_INT;
1817 else
1818 host->ier &= ~SDHCI_INT_CARD_INT;
1819
1820 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1821 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1822 mmiowb();
1823 }
1824}
1825
1826static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1827{
1828 struct sdhci_host *host = mmc_priv(mmc);
1829 unsigned long flags;
1830
1831 spin_lock_irqsave(&host->lock, flags);
1832 if (enable)
1833 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1834 else
1835 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1836
1837 sdhci_enable_sdio_irq_nolock(host, enable);
1838 spin_unlock_irqrestore(&host->lock, flags);
1839}
1840
1841static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1842 struct mmc_ios *ios)
1843{
1844 struct sdhci_host *host = mmc_priv(mmc);
1845 u16 ctrl;
1846 int ret;
1847
1848 /*
1849 * Signal Voltage Switching is only applicable for Host Controllers
1850 * v3.00 and above.
1851 */
1852 if (host->version < SDHCI_SPEC_300)
1853 return 0;
1854
1855 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1856
1857 switch (ios->signal_voltage) {
1858 case MMC_SIGNAL_VOLTAGE_330:
1859 if (!(host->flags & SDHCI_SIGNALING_330))
1860 return -EINVAL;
1861 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1862 ctrl &= ~SDHCI_CTRL_VDD_180;
1863 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1864
1865 if (!IS_ERR(mmc->supply.vqmmc)) {
1866 ret = mmc_regulator_set_vqmmc(mmc, ios);
1867 if (ret) {
1868 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1869 mmc_hostname(mmc));
1870 return -EIO;
1871 }
1872 }
1873 /* Wait for 5ms */
1874 usleep_range(5000, 5500);
1875
1876 /* 3.3V regulator output should be stable within 5 ms */
1877 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1878 if (!(ctrl & SDHCI_CTRL_VDD_180))
1879 return 0;
1880
1881 pr_warn("%s: 3.3V regulator output did not became stable\n",
1882 mmc_hostname(mmc));
1883
1884 return -EAGAIN;
1885 case MMC_SIGNAL_VOLTAGE_180:
1886 if (!(host->flags & SDHCI_SIGNALING_180))
1887 return -EINVAL;
1888 if (!IS_ERR(mmc->supply.vqmmc)) {
1889 ret = mmc_regulator_set_vqmmc(mmc, ios);
1890 if (ret) {
1891 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1892 mmc_hostname(mmc));
1893 return -EIO;
1894 }
1895 }
1896
1897 /*
1898 * Enable 1.8V Signal Enable in the Host Control2
1899 * register
1900 */
1901 ctrl |= SDHCI_CTRL_VDD_180;
1902 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1903
1904 /* Some controller need to do more when switching */
1905 if (host->ops->voltage_switch)
1906 host->ops->voltage_switch(host);
1907
1908 /* 1.8V regulator output should be stable within 5 ms */
1909 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1910 if (ctrl & SDHCI_CTRL_VDD_180)
1911 return 0;
1912
1913 pr_warn("%s: 1.8V regulator output did not became stable\n",
1914 mmc_hostname(mmc));
1915
1916 return -EAGAIN;
1917 case MMC_SIGNAL_VOLTAGE_120:
1918 if (!(host->flags & SDHCI_SIGNALING_120))
1919 return -EINVAL;
1920 if (!IS_ERR(mmc->supply.vqmmc)) {
1921 ret = mmc_regulator_set_vqmmc(mmc, ios);
1922 if (ret) {
1923 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1924 mmc_hostname(mmc));
1925 return -EIO;
1926 }
1927 }
1928 return 0;
1929 default:
1930 /* No signal voltage switch required */
1931 return 0;
1932 }
1933}
1934
1935static int sdhci_card_busy(struct mmc_host *mmc)
1936{
1937 struct sdhci_host *host = mmc_priv(mmc);
1938 u32 present_state;
1939
1940 /* Check whether DAT[0] is 0 */
1941 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1942
1943 return !(present_state & SDHCI_DATA_0_LVL_MASK);
1944}
1945
1946static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1947{
1948 struct sdhci_host *host = mmc_priv(mmc);
1949 unsigned long flags;
1950
1951 spin_lock_irqsave(&host->lock, flags);
1952 host->flags |= SDHCI_HS400_TUNING;
1953 spin_unlock_irqrestore(&host->lock, flags);
1954
1955 return 0;
1956}
1957
1958static void sdhci_start_tuning(struct sdhci_host *host)
1959{
1960 u16 ctrl;
1961
1962 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1963 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1964 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1965 ctrl |= SDHCI_CTRL_TUNED_CLK;
1966 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1967
1968 /*
1969 * As per the Host Controller spec v3.00, tuning command
1970 * generates Buffer Read Ready interrupt, so enable that.
1971 *
1972 * Note: The spec clearly says that when tuning sequence
1973 * is being performed, the controller does not generate
1974 * interrupts other than Buffer Read Ready interrupt. But
1975 * to make sure we don't hit a controller bug, we _only_
1976 * enable Buffer Read Ready interrupt here.
1977 */
1978 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1979 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1980}
1981
1982static void sdhci_end_tuning(struct sdhci_host *host)
1983{
1984 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1985 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1986}
1987
1988static void sdhci_reset_tuning(struct sdhci_host *host)
1989{
1990 u16 ctrl;
1991
1992 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1993 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1994 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1995 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1996}
1997
1998static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode,
1999 unsigned long flags)
2000{
2001 sdhci_reset_tuning(host);
2002
2003 sdhci_do_reset(host, SDHCI_RESET_CMD);
2004 sdhci_do_reset(host, SDHCI_RESET_DATA);
2005
2006 sdhci_end_tuning(host);
2007
2008 spin_unlock_irqrestore(&host->lock, flags);
2009 mmc_abort_tuning(host->mmc, opcode);
2010 spin_lock_irqsave(&host->lock, flags);
2011}
2012
2013/*
2014 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2015 * tuning command does not have a data payload (or rather the hardware does it
2016 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2017 * interrupt setup is different to other commands and there is no timeout
2018 * interrupt so special handling is needed.
2019 */
2020static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode,
2021 unsigned long flags)
2022{
2023 struct mmc_host *mmc = host->mmc;
2024 struct mmc_command cmd = {0};
2025 struct mmc_request mrq = {NULL};
2026
2027 cmd.opcode = opcode;
2028 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2029 cmd.mrq = &mrq;
2030
2031 mrq.cmd = &cmd;
2032 /*
2033 * In response to CMD19, the card sends 64 bytes of tuning
2034 * block to the Host Controller. So we set the block size
2035 * to 64 here.
2036 */
2037 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2038 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2039 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
2040 else
2041 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
2042
2043 /*
2044 * The tuning block is sent by the card to the host controller.
2045 * So we set the TRNS_READ bit in the Transfer Mode register.
2046 * This also takes care of setting DMA Enable and Multi Block
2047 * Select in the same register to 0.
2048 */
2049 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2050
2051 sdhci_send_command(host, &cmd);
2052
2053 host->cmd = NULL;
2054
2055 sdhci_del_timer(host, &mrq);
2056
2057 host->tuning_done = 0;
2058
2059 spin_unlock_irqrestore(&host->lock, flags);
2060
2061 /* Wait for Buffer Read Ready interrupt */
2062 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2063 msecs_to_jiffies(50));
2064
2065 spin_lock_irqsave(&host->lock, flags);
2066}
2067
2068static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode,
2069 unsigned long flags)
2070{
2071 int i;
2072
2073 /*
2074 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2075 * of loops reaches 40 times.
2076 */
2077 for (i = 0; i < MAX_TUNING_LOOP; i++) {
2078 u16 ctrl;
2079
2080 sdhci_send_tuning(host, opcode, flags);
2081
2082 if (!host->tuning_done) {
2083 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2084 mmc_hostname(host->mmc));
2085 sdhci_abort_tuning(host, opcode, flags);
2086 return;
2087 }
2088
2089 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2090 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2091 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2092 return; /* Success! */
2093 break;
2094 }
2095
2096 /* eMMC spec does not require a delay between tuning cycles */
2097 if (opcode == MMC_SEND_TUNING_BLOCK)
2098 mdelay(1);
2099 }
2100
2101 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2102 mmc_hostname(host->mmc));
2103 sdhci_reset_tuning(host);
2104}
2105
2106int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2107{
2108 struct sdhci_host *host = mmc_priv(mmc);
2109 int err = 0;
2110 unsigned long flags;
2111 unsigned int tuning_count = 0;
2112 bool hs400_tuning;
2113
2114 spin_lock_irqsave(&host->lock, flags);
2115
2116 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2117 host->flags &= ~SDHCI_HS400_TUNING;
2118
2119 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2120 tuning_count = host->tuning_count;
2121
2122 /*
2123 * The Host Controller needs tuning in case of SDR104 and DDR50
2124 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2125 * the Capabilities register.
2126 * If the Host Controller supports the HS200 mode then the
2127 * tuning function has to be executed.
2128 */
2129 switch (host->timing) {
2130 /* HS400 tuning is done in HS200 mode */
2131 case MMC_TIMING_MMC_HS400:
2132 err = -EINVAL;
2133 goto out_unlock;
2134
2135 case MMC_TIMING_MMC_HS200:
2136 /*
2137 * Periodic re-tuning for HS400 is not expected to be needed, so
2138 * disable it here.
2139 */
2140 if (hs400_tuning)
2141 tuning_count = 0;
2142 break;
2143
2144 case MMC_TIMING_UHS_SDR104:
2145 case MMC_TIMING_UHS_DDR50:
2146 break;
2147
2148 case MMC_TIMING_UHS_SDR50:
2149 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2150 break;
2151 /* FALLTHROUGH */
2152
2153 default:
2154 goto out_unlock;
2155 }
2156
2157 if (host->ops->platform_execute_tuning) {
2158 spin_unlock_irqrestore(&host->lock, flags);
2159 return host->ops->platform_execute_tuning(host, opcode);
2160 }
2161
2162 host->mmc->retune_period = tuning_count;
2163
2164 sdhci_start_tuning(host);
2165
2166 __sdhci_execute_tuning(host, opcode, flags);
2167
2168 sdhci_end_tuning(host);
2169out_unlock:
2170 spin_unlock_irqrestore(&host->lock, flags);
2171
2172 return err;
2173}
2174EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2175
2176static int sdhci_select_drive_strength(struct mmc_card *card,
2177 unsigned int max_dtr, int host_drv,
2178 int card_drv, int *drv_type)
2179{
2180 struct sdhci_host *host = mmc_priv(card->host);
2181
2182 if (!host->ops->select_drive_strength)
2183 return 0;
2184
2185 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2186 card_drv, drv_type);
2187}
2188
2189static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2190{
2191 /* Host Controller v3.00 defines preset value registers */
2192 if (host->version < SDHCI_SPEC_300)
2193 return;
2194
2195 /*
2196 * We only enable or disable Preset Value if they are not already
2197 * enabled or disabled respectively. Otherwise, we bail out.
2198 */
2199 if (host->preset_enabled != enable) {
2200 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2201
2202 if (enable)
2203 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2204 else
2205 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2206
2207 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2208
2209 if (enable)
2210 host->flags |= SDHCI_PV_ENABLED;
2211 else
2212 host->flags &= ~SDHCI_PV_ENABLED;
2213
2214 host->preset_enabled = enable;
2215 }
2216}
2217
2218static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2219 int err)
2220{
2221 struct sdhci_host *host = mmc_priv(mmc);
2222 struct mmc_data *data = mrq->data;
2223
2224 if (data->host_cookie != COOKIE_UNMAPPED)
2225 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2226 data->flags & MMC_DATA_WRITE ?
2227 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2228
2229 data->host_cookie = COOKIE_UNMAPPED;
2230}
2231
2232static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2233{
2234 struct sdhci_host *host = mmc_priv(mmc);
2235
2236 mrq->data->host_cookie = COOKIE_UNMAPPED;
2237
2238 if (host->flags & SDHCI_REQ_USE_DMA)
2239 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2240}
2241
2242static inline bool sdhci_has_requests(struct sdhci_host *host)
2243{
2244 return host->cmd || host->data_cmd;
2245}
2246
2247static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2248{
2249 if (host->data_cmd) {
2250 host->data_cmd->error = err;
2251 sdhci_finish_mrq(host, host->data_cmd->mrq);
2252 }
2253
2254 if (host->cmd) {
2255 host->cmd->error = err;
2256 sdhci_finish_mrq(host, host->cmd->mrq);
2257 }
2258}
2259
2260static void sdhci_card_event(struct mmc_host *mmc)
2261{
2262 struct sdhci_host *host = mmc_priv(mmc);
2263 unsigned long flags;
2264 int present;
2265
2266 /* First check if client has provided their own card event */
2267 if (host->ops->card_event)
2268 host->ops->card_event(host);
2269
2270 present = mmc->ops->get_cd(mmc);
2271
2272 spin_lock_irqsave(&host->lock, flags);
2273
2274 /* Check sdhci_has_requests() first in case we are runtime suspended */
2275 if (sdhci_has_requests(host) && !present) {
2276 pr_err("%s: Card removed during transfer!\n",
2277 mmc_hostname(host->mmc));
2278 pr_err("%s: Resetting controller.\n",
2279 mmc_hostname(host->mmc));
2280
2281 sdhci_do_reset(host, SDHCI_RESET_CMD);
2282 sdhci_do_reset(host, SDHCI_RESET_DATA);
2283
2284 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2285 }
2286
2287 spin_unlock_irqrestore(&host->lock, flags);
2288}
2289
2290static const struct mmc_host_ops sdhci_ops = {
2291 .request = sdhci_request,
2292 .post_req = sdhci_post_req,
2293 .pre_req = sdhci_pre_req,
2294 .set_ios = sdhci_set_ios,
2295 .get_cd = sdhci_get_cd,
2296 .get_ro = sdhci_get_ro,
2297 .hw_reset = sdhci_hw_reset,
2298 .enable_sdio_irq = sdhci_enable_sdio_irq,
2299 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2300 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2301 .execute_tuning = sdhci_execute_tuning,
2302 .select_drive_strength = sdhci_select_drive_strength,
2303 .card_event = sdhci_card_event,
2304 .card_busy = sdhci_card_busy,
2305};
2306
2307/*****************************************************************************\
2308 * *
2309 * Tasklets *
2310 * *
2311\*****************************************************************************/
2312
2313static bool sdhci_request_done(struct sdhci_host *host)
2314{
2315 unsigned long flags;
2316 struct mmc_request *mrq;
2317 int i;
2318
2319 spin_lock_irqsave(&host->lock, flags);
2320
2321 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2322 mrq = host->mrqs_done[i];
2323 if (mrq)
2324 break;
2325 }
2326
2327 if (!mrq) {
2328 spin_unlock_irqrestore(&host->lock, flags);
2329 return true;
2330 }
2331
2332 sdhci_del_timer(host, mrq);
2333
2334 /*
2335 * Always unmap the data buffers if they were mapped by
2336 * sdhci_prepare_data() whenever we finish with a request.
2337 * This avoids leaking DMA mappings on error.
2338 */
2339 if (host->flags & SDHCI_REQ_USE_DMA) {
2340 struct mmc_data *data = mrq->data;
2341
2342 if (data && data->host_cookie == COOKIE_MAPPED) {
2343 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2344 (data->flags & MMC_DATA_READ) ?
2345 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2346 data->host_cookie = COOKIE_UNMAPPED;
2347 }
2348 }
2349
2350 /*
2351 * The controller needs a reset of internal state machines
2352 * upon error conditions.
2353 */
2354 if (sdhci_needs_reset(host, mrq)) {
2355 /*
2356 * Do not finish until command and data lines are available for
2357 * reset. Note there can only be one other mrq, so it cannot
2358 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2359 * would both be null.
2360 */
2361 if (host->cmd || host->data_cmd) {
2362 spin_unlock_irqrestore(&host->lock, flags);
2363 return true;
2364 }
2365
2366 /* Some controllers need this kick or reset won't work here */
2367 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2368 /* This is to force an update */
2369 host->ops->set_clock(host, host->clock);
2370
2371 /* Spec says we should do both at the same time, but Ricoh
2372 controllers do not like that. */
2373 sdhci_do_reset(host, SDHCI_RESET_CMD);
2374 sdhci_do_reset(host, SDHCI_RESET_DATA);
2375
2376 host->pending_reset = false;
2377 }
2378
2379 if (!sdhci_has_requests(host))
2380 sdhci_led_deactivate(host);
2381
2382 host->mrqs_done[i] = NULL;
2383
2384 mmiowb();
2385 spin_unlock_irqrestore(&host->lock, flags);
2386
2387 mmc_request_done(host->mmc, mrq);
2388
2389 return false;
2390}
2391
2392static void sdhci_tasklet_finish(unsigned long param)
2393{
2394 struct sdhci_host *host = (struct sdhci_host *)param;
2395
2396 while (!sdhci_request_done(host))
2397 ;
2398}
2399
2400static void sdhci_timeout_timer(unsigned long data)
2401{
2402 struct sdhci_host *host;
2403 unsigned long flags;
2404
2405 host = (struct sdhci_host*)data;
2406
2407 spin_lock_irqsave(&host->lock, flags);
2408
2409 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2410 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2411 mmc_hostname(host->mmc));
2412 sdhci_dumpregs(host);
2413
2414 host->cmd->error = -ETIMEDOUT;
2415 sdhci_finish_mrq(host, host->cmd->mrq);
2416 }
2417
2418 mmiowb();
2419 spin_unlock_irqrestore(&host->lock, flags);
2420}
2421
2422static void sdhci_timeout_data_timer(unsigned long data)
2423{
2424 struct sdhci_host *host;
2425 unsigned long flags;
2426
2427 host = (struct sdhci_host *)data;
2428
2429 spin_lock_irqsave(&host->lock, flags);
2430
2431 if (host->data || host->data_cmd ||
2432 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2433 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2434 mmc_hostname(host->mmc));
2435 sdhci_dumpregs(host);
2436
2437 if (host->data) {
2438 host->data->error = -ETIMEDOUT;
2439 sdhci_finish_data(host);
2440 } else if (host->data_cmd) {
2441 host->data_cmd->error = -ETIMEDOUT;
2442 sdhci_finish_mrq(host, host->data_cmd->mrq);
2443 } else {
2444 host->cmd->error = -ETIMEDOUT;
2445 sdhci_finish_mrq(host, host->cmd->mrq);
2446 }
2447 }
2448
2449 mmiowb();
2450 spin_unlock_irqrestore(&host->lock, flags);
2451}
2452
2453/*****************************************************************************\
2454 * *
2455 * Interrupt handling *
2456 * *
2457\*****************************************************************************/
2458
2459static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2460{
2461 if (!host->cmd) {
2462 /*
2463 * SDHCI recovers from errors by resetting the cmd and data
2464 * circuits. Until that is done, there very well might be more
2465 * interrupts, so ignore them in that case.
2466 */
2467 if (host->pending_reset)
2468 return;
2469 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2470 mmc_hostname(host->mmc), (unsigned)intmask);
2471 sdhci_dumpregs(host);
2472 return;
2473 }
2474
2475 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2476 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2477 if (intmask & SDHCI_INT_TIMEOUT)
2478 host->cmd->error = -ETIMEDOUT;
2479 else
2480 host->cmd->error = -EILSEQ;
2481
2482 /*
2483 * If this command initiates a data phase and a response
2484 * CRC error is signalled, the card can start transferring
2485 * data - the card may have received the command without
2486 * error. We must not terminate the mmc_request early.
2487 *
2488 * If the card did not receive the command or returned an
2489 * error which prevented it sending data, the data phase
2490 * will time out.
2491 */
2492 if (host->cmd->data &&
2493 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2494 SDHCI_INT_CRC) {
2495 host->cmd = NULL;
2496 return;
2497 }
2498
2499 sdhci_finish_mrq(host, host->cmd->mrq);
2500 return;
2501 }
2502
2503 if (intmask & SDHCI_INT_RESPONSE)
2504 sdhci_finish_command(host);
2505}
2506
2507#ifdef CONFIG_MMC_DEBUG
2508static void sdhci_adma_show_error(struct sdhci_host *host)
2509{
2510 const char *name = mmc_hostname(host->mmc);
2511 void *desc = host->adma_table;
2512
2513 sdhci_dumpregs(host);
2514
2515 while (true) {
2516 struct sdhci_adma2_64_desc *dma_desc = desc;
2517
2518 if (host->flags & SDHCI_USE_64_BIT_DMA)
2519 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2520 name, desc, le32_to_cpu(dma_desc->addr_hi),
2521 le32_to_cpu(dma_desc->addr_lo),
2522 le16_to_cpu(dma_desc->len),
2523 le16_to_cpu(dma_desc->cmd));
2524 else
2525 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2526 name, desc, le32_to_cpu(dma_desc->addr_lo),
2527 le16_to_cpu(dma_desc->len),
2528 le16_to_cpu(dma_desc->cmd));
2529
2530 desc += host->desc_sz;
2531
2532 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2533 break;
2534 }
2535}
2536#else
2537static void sdhci_adma_show_error(struct sdhci_host *host) { }
2538#endif
2539
2540static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2541{
2542 u32 command;
2543
2544 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2545 if (intmask & SDHCI_INT_DATA_AVAIL) {
2546 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2547 if (command == MMC_SEND_TUNING_BLOCK ||
2548 command == MMC_SEND_TUNING_BLOCK_HS200) {
2549 host->tuning_done = 1;
2550 wake_up(&host->buf_ready_int);
2551 return;
2552 }
2553 }
2554
2555 if (!host->data) {
2556 struct mmc_command *data_cmd = host->data_cmd;
2557
2558 /*
2559 * The "data complete" interrupt is also used to
2560 * indicate that a busy state has ended. See comment
2561 * above in sdhci_cmd_irq().
2562 */
2563 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2564 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2565 host->data_cmd = NULL;
2566 data_cmd->error = -ETIMEDOUT;
2567 sdhci_finish_mrq(host, data_cmd->mrq);
2568 return;
2569 }
2570 if (intmask & SDHCI_INT_DATA_END) {
2571 host->data_cmd = NULL;
2572 /*
2573 * Some cards handle busy-end interrupt
2574 * before the command completed, so make
2575 * sure we do things in the proper order.
2576 */
2577 if (host->cmd == data_cmd)
2578 return;
2579
2580 sdhci_finish_mrq(host, data_cmd->mrq);
2581 return;
2582 }
2583 }
2584
2585 /*
2586 * SDHCI recovers from errors by resetting the cmd and data
2587 * circuits. Until that is done, there very well might be more
2588 * interrupts, so ignore them in that case.
2589 */
2590 if (host->pending_reset)
2591 return;
2592
2593 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2594 mmc_hostname(host->mmc), (unsigned)intmask);
2595 sdhci_dumpregs(host);
2596
2597 return;
2598 }
2599
2600 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2601 host->data->error = -ETIMEDOUT;
2602 else if (intmask & SDHCI_INT_DATA_END_BIT)
2603 host->data->error = -EILSEQ;
2604 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2605 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2606 != MMC_BUS_TEST_R)
2607 host->data->error = -EILSEQ;
2608 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2609 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2610 sdhci_adma_show_error(host);
2611 host->data->error = -EIO;
2612 if (host->ops->adma_workaround)
2613 host->ops->adma_workaround(host, intmask);
2614 }
2615
2616 if (host->data->error)
2617 sdhci_finish_data(host);
2618 else {
2619 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2620 sdhci_transfer_pio(host);
2621
2622 /*
2623 * We currently don't do anything fancy with DMA
2624 * boundaries, but as we can't disable the feature
2625 * we need to at least restart the transfer.
2626 *
2627 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2628 * should return a valid address to continue from, but as
2629 * some controllers are faulty, don't trust them.
2630 */
2631 if (intmask & SDHCI_INT_DMA_END) {
2632 u32 dmastart, dmanow;
2633 dmastart = sg_dma_address(host->data->sg);
2634 dmanow = dmastart + host->data->bytes_xfered;
2635 /*
2636 * Force update to the next DMA block boundary.
2637 */
2638 dmanow = (dmanow &
2639 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2640 SDHCI_DEFAULT_BOUNDARY_SIZE;
2641 host->data->bytes_xfered = dmanow - dmastart;
2642 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2643 " next 0x%08x\n",
2644 mmc_hostname(host->mmc), dmastart,
2645 host->data->bytes_xfered, dmanow);
2646 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2647 }
2648
2649 if (intmask & SDHCI_INT_DATA_END) {
2650 if (host->cmd == host->data_cmd) {
2651 /*
2652 * Data managed to finish before the
2653 * command completed. Make sure we do
2654 * things in the proper order.
2655 */
2656 host->data_early = 1;
2657 } else {
2658 sdhci_finish_data(host);
2659 }
2660 }
2661 }
2662}
2663
2664static irqreturn_t sdhci_irq(int irq, void *dev_id)
2665{
2666 irqreturn_t result = IRQ_NONE;
2667 struct sdhci_host *host = dev_id;
2668 u32 intmask, mask, unexpected = 0;
2669 int max_loops = 16;
2670
2671 spin_lock(&host->lock);
2672
2673 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2674 spin_unlock(&host->lock);
2675 return IRQ_NONE;
2676 }
2677
2678 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2679 if (!intmask || intmask == 0xffffffff) {
2680 result = IRQ_NONE;
2681 goto out;
2682 }
2683
2684 do {
2685 /* Clear selected interrupts. */
2686 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2687 SDHCI_INT_BUS_POWER);
2688 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2689
2690 DBG("*** %s got interrupt: 0x%08x\n",
2691 mmc_hostname(host->mmc), intmask);
2692
2693 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2694 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2695 SDHCI_CARD_PRESENT;
2696
2697 /*
2698 * There is a observation on i.mx esdhc. INSERT
2699 * bit will be immediately set again when it gets
2700 * cleared, if a card is inserted. We have to mask
2701 * the irq to prevent interrupt storm which will
2702 * freeze the system. And the REMOVE gets the
2703 * same situation.
2704 *
2705 * More testing are needed here to ensure it works
2706 * for other platforms though.
2707 */
2708 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2709 SDHCI_INT_CARD_REMOVE);
2710 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2711 SDHCI_INT_CARD_INSERT;
2712 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2713 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2714
2715 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2716 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2717
2718 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2719 SDHCI_INT_CARD_REMOVE);
2720 result = IRQ_WAKE_THREAD;
2721 }
2722
2723 if (intmask & SDHCI_INT_CMD_MASK)
2724 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2725
2726 if (intmask & SDHCI_INT_DATA_MASK)
2727 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2728
2729 if (intmask & SDHCI_INT_BUS_POWER)
2730 pr_err("%s: Card is consuming too much power!\n",
2731 mmc_hostname(host->mmc));
2732
2733 if (intmask & SDHCI_INT_RETUNE)
2734 mmc_retune_needed(host->mmc);
2735
2736 if (intmask & SDHCI_INT_CARD_INT) {
2737 sdhci_enable_sdio_irq_nolock(host, false);
2738 host->thread_isr |= SDHCI_INT_CARD_INT;
2739 result = IRQ_WAKE_THREAD;
2740 }
2741
2742 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2743 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2744 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2745 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2746
2747 if (intmask) {
2748 unexpected |= intmask;
2749 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2750 }
2751
2752 if (result == IRQ_NONE)
2753 result = IRQ_HANDLED;
2754
2755 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2756 } while (intmask && --max_loops);
2757out:
2758 spin_unlock(&host->lock);
2759
2760 if (unexpected) {
2761 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2762 mmc_hostname(host->mmc), unexpected);
2763 sdhci_dumpregs(host);
2764 }
2765
2766 return result;
2767}
2768
2769static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2770{
2771 struct sdhci_host *host = dev_id;
2772 unsigned long flags;
2773 u32 isr;
2774
2775 spin_lock_irqsave(&host->lock, flags);
2776 isr = host->thread_isr;
2777 host->thread_isr = 0;
2778 spin_unlock_irqrestore(&host->lock, flags);
2779
2780 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2781 struct mmc_host *mmc = host->mmc;
2782
2783 mmc->ops->card_event(mmc);
2784 mmc_detect_change(mmc, msecs_to_jiffies(200));
2785 }
2786
2787 if (isr & SDHCI_INT_CARD_INT) {
2788 sdio_run_irqs(host->mmc);
2789
2790 spin_lock_irqsave(&host->lock, flags);
2791 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2792 sdhci_enable_sdio_irq_nolock(host, true);
2793 spin_unlock_irqrestore(&host->lock, flags);
2794 }
2795
2796 return isr ? IRQ_HANDLED : IRQ_NONE;
2797}
2798
2799/*****************************************************************************\
2800 * *
2801 * Suspend/resume *
2802 * *
2803\*****************************************************************************/
2804
2805#ifdef CONFIG_PM
2806/*
2807 * To enable wakeup events, the corresponding events have to be enabled in
2808 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2809 * Table' in the SD Host Controller Standard Specification.
2810 * It is useless to restore SDHCI_INT_ENABLE state in
2811 * sdhci_disable_irq_wakeups() since it will be set by
2812 * sdhci_enable_card_detection() or sdhci_init().
2813 */
2814void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2815{
2816 u8 val;
2817 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2818 | SDHCI_WAKE_ON_INT;
2819 u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2820 SDHCI_INT_CARD_INT;
2821
2822 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2823 val |= mask ;
2824 /* Avoid fake wake up */
2825 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2826 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2827 irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2828 }
2829 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2830 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2831}
2832EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2833
2834static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2835{
2836 u8 val;
2837 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2838 | SDHCI_WAKE_ON_INT;
2839
2840 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2841 val &= ~mask;
2842 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2843}
2844
2845int sdhci_suspend_host(struct sdhci_host *host)
2846{
2847 sdhci_disable_card_detection(host);
2848
2849 mmc_retune_timer_stop(host->mmc);
2850 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
2851 mmc_retune_needed(host->mmc);
2852
2853 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2854 host->ier = 0;
2855 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2856 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2857 free_irq(host->irq, host);
2858 } else {
2859 sdhci_enable_irq_wakeups(host);
2860 enable_irq_wake(host->irq);
2861 }
2862 return 0;
2863}
2864
2865EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2866
2867int sdhci_resume_host(struct sdhci_host *host)
2868{
2869 struct mmc_host *mmc = host->mmc;
2870 int ret = 0;
2871
2872 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2873 if (host->ops->enable_dma)
2874 host->ops->enable_dma(host);
2875 }
2876
2877 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2878 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2879 /* Card keeps power but host controller does not */
2880 sdhci_init(host, 0);
2881 host->pwr = 0;
2882 host->clock = 0;
2883 mmc->ops->set_ios(mmc, &mmc->ios);
2884 } else {
2885 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2886 mmiowb();
2887 }
2888
2889 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2890 ret = request_threaded_irq(host->irq, sdhci_irq,
2891 sdhci_thread_irq, IRQF_SHARED,
2892 mmc_hostname(host->mmc), host);
2893 if (ret)
2894 return ret;
2895 } else {
2896 sdhci_disable_irq_wakeups(host);
2897 disable_irq_wake(host->irq);
2898 }
2899
2900 sdhci_enable_card_detection(host);
2901
2902 return ret;
2903}
2904
2905EXPORT_SYMBOL_GPL(sdhci_resume_host);
2906
2907int sdhci_runtime_suspend_host(struct sdhci_host *host)
2908{
2909 unsigned long flags;
2910
2911 mmc_retune_timer_stop(host->mmc);
2912 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
2913 mmc_retune_needed(host->mmc);
2914
2915 spin_lock_irqsave(&host->lock, flags);
2916 host->ier &= SDHCI_INT_CARD_INT;
2917 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2918 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2919 spin_unlock_irqrestore(&host->lock, flags);
2920
2921 synchronize_hardirq(host->irq);
2922
2923 spin_lock_irqsave(&host->lock, flags);
2924 host->runtime_suspended = true;
2925 spin_unlock_irqrestore(&host->lock, flags);
2926
2927 return 0;
2928}
2929EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2930
2931int sdhci_runtime_resume_host(struct sdhci_host *host)
2932{
2933 struct mmc_host *mmc = host->mmc;
2934 unsigned long flags;
2935 int host_flags = host->flags;
2936
2937 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2938 if (host->ops->enable_dma)
2939 host->ops->enable_dma(host);
2940 }
2941
2942 sdhci_init(host, 0);
2943
2944 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
2945 /* Force clock and power re-program */
2946 host->pwr = 0;
2947 host->clock = 0;
2948 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2949 mmc->ops->set_ios(mmc, &mmc->ios);
2950
2951 if ((host_flags & SDHCI_PV_ENABLED) &&
2952 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2953 spin_lock_irqsave(&host->lock, flags);
2954 sdhci_enable_preset_value(host, true);
2955 spin_unlock_irqrestore(&host->lock, flags);
2956 }
2957
2958 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2959 mmc->ops->hs400_enhanced_strobe)
2960 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2961 }
2962
2963 spin_lock_irqsave(&host->lock, flags);
2964
2965 host->runtime_suspended = false;
2966
2967 /* Enable SDIO IRQ */
2968 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2969 sdhci_enable_sdio_irq_nolock(host, true);
2970
2971 /* Enable Card Detection */
2972 sdhci_enable_card_detection(host);
2973
2974 spin_unlock_irqrestore(&host->lock, flags);
2975
2976 return 0;
2977}
2978EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2979
2980#endif /* CONFIG_PM */
2981
2982/*****************************************************************************\
2983 * *
2984 * Device allocation/registration *
2985 * *
2986\*****************************************************************************/
2987
2988struct sdhci_host *sdhci_alloc_host(struct device *dev,
2989 size_t priv_size)
2990{
2991 struct mmc_host *mmc;
2992 struct sdhci_host *host;
2993
2994 WARN_ON(dev == NULL);
2995
2996 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2997 if (!mmc)
2998 return ERR_PTR(-ENOMEM);
2999
3000 host = mmc_priv(mmc);
3001 host->mmc = mmc;
3002 host->mmc_host_ops = sdhci_ops;
3003 mmc->ops = &host->mmc_host_ops;
3004
3005 host->flags = SDHCI_SIGNALING_330;
3006
3007 return host;
3008}
3009
3010EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3011
3012static int sdhci_set_dma_mask(struct sdhci_host *host)
3013{
3014 struct mmc_host *mmc = host->mmc;
3015 struct device *dev = mmc_dev(mmc);
3016 int ret = -EINVAL;
3017
3018 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3019 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3020
3021 /* Try 64-bit mask if hardware is capable of it */
3022 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3023 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3024 if (ret) {
3025 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3026 mmc_hostname(mmc));
3027 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3028 }
3029 }
3030
3031 /* 32-bit mask as default & fallback */
3032 if (ret) {
3033 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3034 if (ret)
3035 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3036 mmc_hostname(mmc));
3037 }
3038
3039 return ret;
3040}
3041
3042void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3043{
3044 u16 v;
3045 u64 dt_caps_mask = 0;
3046 u64 dt_caps = 0;
3047
3048 if (host->read_caps)
3049 return;
3050
3051 host->read_caps = true;
3052
3053 if (debug_quirks)
3054 host->quirks = debug_quirks;
3055
3056 if (debug_quirks2)
3057 host->quirks2 = debug_quirks2;
3058
3059 sdhci_do_reset(host, SDHCI_RESET_ALL);
3060
3061 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3062 "sdhci-caps-mask", &dt_caps_mask);
3063 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3064 "sdhci-caps", &dt_caps);
3065
3066 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3067 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3068
3069 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3070 return;
3071
3072 if (caps) {
3073 host->caps = *caps;
3074 } else {
3075 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3076 host->caps &= ~lower_32_bits(dt_caps_mask);
3077 host->caps |= lower_32_bits(dt_caps);
3078 }
3079
3080 if (host->version < SDHCI_SPEC_300)
3081 return;
3082
3083 if (caps1) {
3084 host->caps1 = *caps1;
3085 } else {
3086 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3087 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3088 host->caps1 |= upper_32_bits(dt_caps);
3089 }
3090}
3091EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3092
3093int sdhci_setup_host(struct sdhci_host *host)
3094{
3095 struct mmc_host *mmc;
3096 u32 max_current_caps;
3097 unsigned int ocr_avail;
3098 unsigned int override_timeout_clk;
3099 u32 max_clk;
3100 int ret;
3101
3102 WARN_ON(host == NULL);
3103 if (host == NULL)
3104 return -EINVAL;
3105
3106 mmc = host->mmc;
3107
3108 /*
3109 * If there are external regulators, get them. Note this must be done
3110 * early before resetting the host and reading the capabilities so that
3111 * the host can take the appropriate action if regulators are not
3112 * available.
3113 */
3114 ret = mmc_regulator_get_supply(mmc);
3115 if (ret == -EPROBE_DEFER)
3116 return ret;
3117
3118 sdhci_read_caps(host);
3119
3120 override_timeout_clk = host->timeout_clk;
3121
3122 if (host->version > SDHCI_SPEC_300) {
3123 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3124 mmc_hostname(mmc), host->version);
3125 }
3126
3127 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3128 host->flags |= SDHCI_USE_SDMA;
3129 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3130 DBG("Controller doesn't have SDMA capability\n");
3131 else
3132 host->flags |= SDHCI_USE_SDMA;
3133
3134 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3135 (host->flags & SDHCI_USE_SDMA)) {
3136 DBG("Disabling DMA as it is marked broken\n");
3137 host->flags &= ~SDHCI_USE_SDMA;
3138 }
3139
3140 if ((host->version >= SDHCI_SPEC_200) &&
3141 (host->caps & SDHCI_CAN_DO_ADMA2))
3142 host->flags |= SDHCI_USE_ADMA;
3143
3144 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3145 (host->flags & SDHCI_USE_ADMA)) {
3146 DBG("Disabling ADMA as it is marked broken\n");
3147 host->flags &= ~SDHCI_USE_ADMA;
3148 }
3149
3150 /*
3151 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3152 * and *must* do 64-bit DMA. A driver has the opportunity to change
3153 * that during the first call to ->enable_dma(). Similarly
3154 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3155 * implement.
3156 */
3157 if (host->caps & SDHCI_CAN_64BIT)
3158 host->flags |= SDHCI_USE_64_BIT_DMA;
3159
3160 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3161 ret = sdhci_set_dma_mask(host);
3162
3163 if (!ret && host->ops->enable_dma)
3164 ret = host->ops->enable_dma(host);
3165
3166 if (ret) {
3167 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3168 mmc_hostname(mmc));
3169 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3170
3171 ret = 0;
3172 }
3173 }
3174
3175 /* SDMA does not support 64-bit DMA */
3176 if (host->flags & SDHCI_USE_64_BIT_DMA)
3177 host->flags &= ~SDHCI_USE_SDMA;
3178
3179 if (host->flags & SDHCI_USE_ADMA) {
3180 dma_addr_t dma;
3181 void *buf;
3182
3183 /*
3184 * The DMA descriptor table size is calculated as the maximum
3185 * number of segments times 2, to allow for an alignment
3186 * descriptor for each segment, plus 1 for a nop end descriptor,
3187 * all multipled by the descriptor size.
3188 */
3189 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3190 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3191 SDHCI_ADMA2_64_DESC_SZ;
3192 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3193 } else {
3194 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3195 SDHCI_ADMA2_32_DESC_SZ;
3196 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3197 }
3198
3199 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3200 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3201 host->adma_table_sz, &dma, GFP_KERNEL);
3202 if (!buf) {
3203 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3204 mmc_hostname(mmc));
3205 host->flags &= ~SDHCI_USE_ADMA;
3206 } else if ((dma + host->align_buffer_sz) &
3207 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3208 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3209 mmc_hostname(mmc));
3210 host->flags &= ~SDHCI_USE_ADMA;
3211 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3212 host->adma_table_sz, buf, dma);
3213 } else {
3214 host->align_buffer = buf;
3215 host->align_addr = dma;
3216
3217 host->adma_table = buf + host->align_buffer_sz;
3218 host->adma_addr = dma + host->align_buffer_sz;
3219 }
3220 }
3221
3222 /*
3223 * If we use DMA, then it's up to the caller to set the DMA
3224 * mask, but PIO does not need the hw shim so we set a new
3225 * mask here in that case.
3226 */
3227 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3228 host->dma_mask = DMA_BIT_MASK(64);
3229 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3230 }
3231
3232 if (host->version >= SDHCI_SPEC_300)
3233 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3234 >> SDHCI_CLOCK_BASE_SHIFT;
3235 else
3236 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3237 >> SDHCI_CLOCK_BASE_SHIFT;
3238
3239 host->max_clk *= 1000000;
3240 if (host->max_clk == 0 || host->quirks &
3241 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3242 if (!host->ops->get_max_clock) {
3243 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3244 mmc_hostname(mmc));
3245 ret = -ENODEV;
3246 goto undma;
3247 }
3248 host->max_clk = host->ops->get_max_clock(host);
3249 }
3250
3251 /*
3252 * In case of Host Controller v3.00, find out whether clock
3253 * multiplier is supported.
3254 */
3255 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3256 SDHCI_CLOCK_MUL_SHIFT;
3257
3258 /*
3259 * In case the value in Clock Multiplier is 0, then programmable
3260 * clock mode is not supported, otherwise the actual clock
3261 * multiplier is one more than the value of Clock Multiplier
3262 * in the Capabilities Register.
3263 */
3264 if (host->clk_mul)
3265 host->clk_mul += 1;
3266
3267 /*
3268 * Set host parameters.
3269 */
3270 max_clk = host->max_clk;
3271
3272 if (host->ops->get_min_clock)
3273 mmc->f_min = host->ops->get_min_clock(host);
3274 else if (host->version >= SDHCI_SPEC_300) {
3275 if (host->clk_mul) {
3276 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3277 max_clk = host->max_clk * host->clk_mul;
3278 } else
3279 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3280 } else
3281 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3282
3283 if (!mmc->f_max || mmc->f_max > max_clk)
3284 mmc->f_max = max_clk;
3285
3286 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3287 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3288 SDHCI_TIMEOUT_CLK_SHIFT;
3289 if (host->timeout_clk == 0) {
3290 if (host->ops->get_timeout_clock) {
3291 host->timeout_clk =
3292 host->ops->get_timeout_clock(host);
3293 } else {
3294 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3295 mmc_hostname(mmc));
3296 ret = -ENODEV;
3297 goto undma;
3298 }
3299 }
3300
3301 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3302 host->timeout_clk *= 1000;
3303
3304 if (override_timeout_clk)
3305 host->timeout_clk = override_timeout_clk;
3306
3307 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3308 host->ops->get_max_timeout_count(host) : 1 << 27;
3309 mmc->max_busy_timeout /= host->timeout_clk;
3310 }
3311
3312 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3313 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3314
3315 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3316 host->flags |= SDHCI_AUTO_CMD12;
3317
3318 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3319 if ((host->version >= SDHCI_SPEC_300) &&
3320 ((host->flags & SDHCI_USE_ADMA) ||
3321 !(host->flags & SDHCI_USE_SDMA)) &&
3322 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3323 host->flags |= SDHCI_AUTO_CMD23;
3324 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3325 } else {
3326 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3327 }
3328
3329 /*
3330 * A controller may support 8-bit width, but the board itself
3331 * might not have the pins brought out. Boards that support
3332 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3333 * their platform code before calling sdhci_add_host(), and we
3334 * won't assume 8-bit width for hosts without that CAP.
3335 */
3336 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3337 mmc->caps |= MMC_CAP_4_BIT_DATA;
3338
3339 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3340 mmc->caps &= ~MMC_CAP_CMD23;
3341
3342 if (host->caps & SDHCI_CAN_DO_HISPD)
3343 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3344
3345 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3346 mmc_card_is_removable(mmc) &&
3347 mmc_gpio_get_cd(host->mmc) < 0)
3348 mmc->caps |= MMC_CAP_NEEDS_POLL;
3349
3350 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3351 if (!IS_ERR(mmc->supply.vqmmc)) {
3352 ret = regulator_enable(mmc->supply.vqmmc);
3353 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3354 1950000))
3355 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3356 SDHCI_SUPPORT_SDR50 |
3357 SDHCI_SUPPORT_DDR50);
3358 if (ret) {
3359 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3360 mmc_hostname(mmc), ret);
3361 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3362 }
3363 }
3364
3365 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3366 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3367 SDHCI_SUPPORT_DDR50);
3368 }
3369
3370 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3371 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3372 SDHCI_SUPPORT_DDR50))
3373 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3374
3375 /* SDR104 supports also implies SDR50 support */
3376 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3377 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3378 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3379 * field can be promoted to support HS200.
3380 */
3381 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3382 mmc->caps2 |= MMC_CAP2_HS200;
3383 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3384 mmc->caps |= MMC_CAP_UHS_SDR50;
3385 }
3386
3387 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3388 (host->caps1 & SDHCI_SUPPORT_HS400))
3389 mmc->caps2 |= MMC_CAP2_HS400;
3390
3391 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3392 (IS_ERR(mmc->supply.vqmmc) ||
3393 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3394 1300000)))
3395 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3396
3397 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3398 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3399 mmc->caps |= MMC_CAP_UHS_DDR50;
3400
3401 /* Does the host need tuning for SDR50? */
3402 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3403 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3404
3405 /* Driver Type(s) (A, C, D) supported by the host */
3406 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3407 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3408 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3409 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3410 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3411 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3412
3413 /* Initial value for re-tuning timer count */
3414 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3415 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3416
3417 /*
3418 * In case Re-tuning Timer is not disabled, the actual value of
3419 * re-tuning timer will be 2 ^ (n - 1).
3420 */
3421 if (host->tuning_count)
3422 host->tuning_count = 1 << (host->tuning_count - 1);
3423
3424 /* Re-tuning mode supported by the Host Controller */
3425 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3426 SDHCI_RETUNING_MODE_SHIFT;
3427
3428 ocr_avail = 0;
3429
3430 /*
3431 * According to SD Host Controller spec v3.00, if the Host System
3432 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3433 * the value is meaningful only if Voltage Support in the Capabilities
3434 * register is set. The actual current value is 4 times the register
3435 * value.
3436 */
3437 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3438 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3439 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3440 if (curr > 0) {
3441
3442 /* convert to SDHCI_MAX_CURRENT format */
3443 curr = curr/1000; /* convert to mA */
3444 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3445
3446 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3447 max_current_caps =
3448 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3449 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3450 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3451 }
3452 }
3453
3454 if (host->caps & SDHCI_CAN_VDD_330) {
3455 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3456
3457 mmc->max_current_330 = ((max_current_caps &
3458 SDHCI_MAX_CURRENT_330_MASK) >>
3459 SDHCI_MAX_CURRENT_330_SHIFT) *
3460 SDHCI_MAX_CURRENT_MULTIPLIER;
3461 }
3462 if (host->caps & SDHCI_CAN_VDD_300) {
3463 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3464
3465 mmc->max_current_300 = ((max_current_caps &
3466 SDHCI_MAX_CURRENT_300_MASK) >>
3467 SDHCI_MAX_CURRENT_300_SHIFT) *
3468 SDHCI_MAX_CURRENT_MULTIPLIER;
3469 }
3470 if (host->caps & SDHCI_CAN_VDD_180) {
3471 ocr_avail |= MMC_VDD_165_195;
3472
3473 mmc->max_current_180 = ((max_current_caps &
3474 SDHCI_MAX_CURRENT_180_MASK) >>
3475 SDHCI_MAX_CURRENT_180_SHIFT) *
3476 SDHCI_MAX_CURRENT_MULTIPLIER;
3477 }
3478
3479 /* If OCR set by host, use it instead. */
3480 if (host->ocr_mask)
3481 ocr_avail = host->ocr_mask;
3482
3483 /* If OCR set by external regulators, give it highest prio. */
3484 if (mmc->ocr_avail)
3485 ocr_avail = mmc->ocr_avail;
3486
3487 mmc->ocr_avail = ocr_avail;
3488 mmc->ocr_avail_sdio = ocr_avail;
3489 if (host->ocr_avail_sdio)
3490 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3491 mmc->ocr_avail_sd = ocr_avail;
3492 if (host->ocr_avail_sd)
3493 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3494 else /* normal SD controllers don't support 1.8V */
3495 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3496 mmc->ocr_avail_mmc = ocr_avail;
3497 if (host->ocr_avail_mmc)
3498 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3499
3500 if (mmc->ocr_avail == 0) {
3501 pr_err("%s: Hardware doesn't report any support voltages.\n",
3502 mmc_hostname(mmc));
3503 ret = -ENODEV;
3504 goto unreg;
3505 }
3506
3507 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3508 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3509 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3510 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3511 host->flags |= SDHCI_SIGNALING_180;
3512
3513 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3514 host->flags |= SDHCI_SIGNALING_120;
3515
3516 spin_lock_init(&host->lock);
3517
3518 /*
3519 * Maximum number of segments. Depends on if the hardware
3520 * can do scatter/gather or not.
3521 */
3522 if (host->flags & SDHCI_USE_ADMA)
3523 mmc->max_segs = SDHCI_MAX_SEGS;
3524 else if (host->flags & SDHCI_USE_SDMA)
3525 mmc->max_segs = 1;
3526 else /* PIO */
3527 mmc->max_segs = SDHCI_MAX_SEGS;
3528
3529 /*
3530 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3531 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3532 * is less anyway.
3533 */
3534 mmc->max_req_size = 524288;
3535
3536 /*
3537 * Maximum segment size. Could be one segment with the maximum number
3538 * of bytes. When doing hardware scatter/gather, each entry cannot
3539 * be larger than 64 KiB though.
3540 */
3541 if (host->flags & SDHCI_USE_ADMA) {
3542 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3543 mmc->max_seg_size = 65535;
3544 else
3545 mmc->max_seg_size = 65536;
3546 } else {
3547 mmc->max_seg_size = mmc->max_req_size;
3548 }
3549
3550 /*
3551 * Maximum block size. This varies from controller to controller and
3552 * is specified in the capabilities register.
3553 */
3554 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3555 mmc->max_blk_size = 2;
3556 } else {
3557 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3558 SDHCI_MAX_BLOCK_SHIFT;
3559 if (mmc->max_blk_size >= 3) {
3560 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3561 mmc_hostname(mmc));
3562 mmc->max_blk_size = 0;
3563 }
3564 }
3565
3566 mmc->max_blk_size = 512 << mmc->max_blk_size;
3567
3568 /*
3569 * Maximum block count.
3570 */
3571 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3572
3573 return 0;
3574
3575unreg:
3576 if (!IS_ERR(mmc->supply.vqmmc))
3577 regulator_disable(mmc->supply.vqmmc);
3578undma:
3579 if (host->align_buffer)
3580 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3581 host->adma_table_sz, host->align_buffer,
3582 host->align_addr);
3583 host->adma_table = NULL;
3584 host->align_buffer = NULL;
3585
3586 return ret;
3587}
3588EXPORT_SYMBOL_GPL(sdhci_setup_host);
3589
3590int __sdhci_add_host(struct sdhci_host *host)
3591{
3592 struct mmc_host *mmc = host->mmc;
3593 int ret;
3594
3595 /*
3596 * Init tasklets.
3597 */
3598 tasklet_init(&host->finish_tasklet,
3599 sdhci_tasklet_finish, (unsigned long)host);
3600
3601 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3602 setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3603 (unsigned long)host);
3604
3605 init_waitqueue_head(&host->buf_ready_int);
3606
3607 sdhci_init(host, 0);
3608
3609 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3610 IRQF_SHARED, mmc_hostname(mmc), host);
3611 if (ret) {
3612 pr_err("%s: Failed to request IRQ %d: %d\n",
3613 mmc_hostname(mmc), host->irq, ret);
3614 goto untasklet;
3615 }
3616
3617#ifdef CONFIG_MMC_DEBUG
3618 sdhci_dumpregs(host);
3619#endif
3620
3621 ret = sdhci_led_register(host);
3622 if (ret) {
3623 pr_err("%s: Failed to register LED device: %d\n",
3624 mmc_hostname(mmc), ret);
3625 goto unirq;
3626 }
3627
3628 mmiowb();
3629
3630 ret = mmc_add_host(mmc);
3631 if (ret)
3632 goto unled;
3633
3634 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3635 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3636 (host->flags & SDHCI_USE_ADMA) ?
3637 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3638 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3639
3640 sdhci_enable_card_detection(host);
3641
3642 return 0;
3643
3644unled:
3645 sdhci_led_unregister(host);
3646unirq:
3647 sdhci_do_reset(host, SDHCI_RESET_ALL);
3648 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3649 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3650 free_irq(host->irq, host);
3651untasklet:
3652 tasklet_kill(&host->finish_tasklet);
3653
3654 if (!IS_ERR(mmc->supply.vqmmc))
3655 regulator_disable(mmc->supply.vqmmc);
3656
3657 if (host->align_buffer)
3658 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3659 host->adma_table_sz, host->align_buffer,
3660 host->align_addr);
3661 host->adma_table = NULL;
3662 host->align_buffer = NULL;
3663
3664 return ret;
3665}
3666EXPORT_SYMBOL_GPL(__sdhci_add_host);
3667
3668int sdhci_add_host(struct sdhci_host *host)
3669{
3670 int ret;
3671
3672 ret = sdhci_setup_host(host);
3673 if (ret)
3674 return ret;
3675
3676 return __sdhci_add_host(host);
3677}
3678EXPORT_SYMBOL_GPL(sdhci_add_host);
3679
3680void sdhci_remove_host(struct sdhci_host *host, int dead)
3681{
3682 struct mmc_host *mmc = host->mmc;
3683 unsigned long flags;
3684
3685 if (dead) {
3686 spin_lock_irqsave(&host->lock, flags);
3687
3688 host->flags |= SDHCI_DEVICE_DEAD;
3689
3690 if (sdhci_has_requests(host)) {
3691 pr_err("%s: Controller removed during "
3692 " transfer!\n", mmc_hostname(mmc));
3693 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3694 }
3695
3696 spin_unlock_irqrestore(&host->lock, flags);
3697 }
3698
3699 sdhci_disable_card_detection(host);
3700
3701 mmc_remove_host(mmc);
3702
3703 sdhci_led_unregister(host);
3704
3705 if (!dead)
3706 sdhci_do_reset(host, SDHCI_RESET_ALL);
3707
3708 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3709 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3710 free_irq(host->irq, host);
3711
3712 del_timer_sync(&host->timer);
3713 del_timer_sync(&host->data_timer);
3714
3715 tasklet_kill(&host->finish_tasklet);
3716
3717 if (!IS_ERR(mmc->supply.vqmmc))
3718 regulator_disable(mmc->supply.vqmmc);
3719
3720 if (host->align_buffer)
3721 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3722 host->adma_table_sz, host->align_buffer,
3723 host->align_addr);
3724
3725 host->adma_table = NULL;
3726 host->align_buffer = NULL;
3727}
3728
3729EXPORT_SYMBOL_GPL(sdhci_remove_host);
3730
3731void sdhci_free_host(struct sdhci_host *host)
3732{
3733 mmc_free_host(host->mmc);
3734}
3735
3736EXPORT_SYMBOL_GPL(sdhci_free_host);
3737
3738/*****************************************************************************\
3739 * *
3740 * Driver init/exit *
3741 * *
3742\*****************************************************************************/
3743
3744static int __init sdhci_drv_init(void)
3745{
3746 pr_info(DRIVER_NAME
3747 ": Secure Digital Host Controller Interface driver\n");
3748 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3749
3750 return 0;
3751}
3752
3753static void __exit sdhci_drv_exit(void)
3754{
3755}
3756
3757module_init(sdhci_drv_init);
3758module_exit(sdhci_drv_exit);
3759
3760module_param(debug_quirks, uint, 0444);
3761module_param(debug_quirks2, uint, 0444);
3762
3763MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3764MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3765MODULE_LICENSE("GPL");
3766
3767MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3768MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");