Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12#include <linux/bitfield.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/ktime.h>
16#include <linux/highmem.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/dma-mapping.h>
20#include <linux/slab.h>
21#include <linux/scatterlist.h>
22#include <linux/sizes.h>
23#include <linux/regulator/consumer.h>
24#include <linux/pm_runtime.h>
25#include <linux/of.h>
26
27#include <linux/leds.h>
28
29#include <linux/mmc/mmc.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/card.h>
32#include <linux/mmc/sdio.h>
33#include <linux/mmc/slot-gpio.h>
34
35#include "sdhci.h"
36
37#define DRIVER_NAME "sdhci"
38
39#define DBG(f, x...) \
40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41
42#define SDHCI_DUMP(f, x...) \
43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45#define MAX_TUNING_LOOP 40
46
47static unsigned int debug_quirks = 0;
48static unsigned int debug_quirks2;
49
50static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51
52static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
53
54void sdhci_dumpregs(struct sdhci_host *host)
55{
56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
57
58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
92 sdhci_readl(host, SDHCI_RESPONSE),
93 sdhci_readl(host, SDHCI_RESPONSE + 4));
94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE + 8),
96 sdhci_readl(host, SDHCI_RESPONSE + 12));
97 SDHCI_DUMP("Host ctl2: 0x%08x\n",
98 sdhci_readw(host, SDHCI_HOST_CONTROL2));
99
100 if (host->flags & SDHCI_USE_ADMA) {
101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 sdhci_readl(host, SDHCI_ADMA_ERROR),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106 } else {
107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
108 sdhci_readl(host, SDHCI_ADMA_ERROR),
109 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110 }
111 }
112
113 if (host->ops->dump_vendor_regs)
114 host->ops->dump_vendor_regs(host);
115
116 SDHCI_DUMP("============================================\n");
117}
118EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120/*****************************************************************************\
121 * *
122 * Low level functions *
123 * *
124\*****************************************************************************/
125
126static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127{
128 u16 ctrl2;
129
130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
132 return;
133
134 ctrl2 |= SDHCI_CTRL_V4_MODE;
135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136}
137
138/*
139 * This can be called before sdhci_add_host() by Vendor's host controller
140 * driver to enable v4 mode if supported.
141 */
142void sdhci_enable_v4_mode(struct sdhci_host *host)
143{
144 host->v4_mode = true;
145 sdhci_do_enable_v4_mode(host);
146}
147EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148
149static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150{
151 return cmd->data || cmd->flags & MMC_RSP_BUSY;
152}
153
154static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155{
156 u32 present;
157
158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
160 return;
161
162 if (enable) {
163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164 SDHCI_CARD_PRESENT;
165
166 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167 SDHCI_INT_CARD_INSERT;
168 } else {
169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170 }
171
172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174}
175
176static void sdhci_enable_card_detection(struct sdhci_host *host)
177{
178 sdhci_set_card_detection(host, true);
179}
180
181static void sdhci_disable_card_detection(struct sdhci_host *host)
182{
183 sdhci_set_card_detection(host, false);
184}
185
186static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187{
188 if (host->bus_on)
189 return;
190 host->bus_on = true;
191 pm_runtime_get_noresume(mmc_dev(host->mmc));
192}
193
194static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195{
196 if (!host->bus_on)
197 return;
198 host->bus_on = false;
199 pm_runtime_put_noidle(mmc_dev(host->mmc));
200}
201
202void sdhci_reset(struct sdhci_host *host, u8 mask)
203{
204 ktime_t timeout;
205
206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207
208 if (mask & SDHCI_RESET_ALL) {
209 host->clock = 0;
210 /* Reset-all turns off SD Bus Power */
211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212 sdhci_runtime_pm_bus_off(host);
213 }
214
215 /* Wait max 100 ms */
216 timeout = ktime_add_ms(ktime_get(), 100);
217
218 /* hw clears the bit when it's done */
219 while (1) {
220 bool timedout = ktime_after(ktime_get(), timeout);
221
222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223 break;
224 if (timedout) {
225 pr_err("%s: Reset 0x%x never completed.\n",
226 mmc_hostname(host->mmc), (int)mask);
227 sdhci_dumpregs(host);
228 return;
229 }
230 udelay(10);
231 }
232}
233EXPORT_SYMBOL_GPL(sdhci_reset);
234
235static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
236{
237 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
238 struct mmc_host *mmc = host->mmc;
239
240 if (!mmc->ops->get_cd(mmc))
241 return;
242 }
243
244 host->ops->reset(host, mask);
245
246 if (mask & SDHCI_RESET_ALL) {
247 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
248 if (host->ops->enable_dma)
249 host->ops->enable_dma(host);
250 }
251
252 /* Resetting the controller clears many */
253 host->preset_enabled = false;
254 }
255}
256
257static void sdhci_set_default_irqs(struct sdhci_host *host)
258{
259 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
260 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
261 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
262 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
263 SDHCI_INT_RESPONSE;
264
265 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
266 host->tuning_mode == SDHCI_TUNING_MODE_3)
267 host->ier |= SDHCI_INT_RETUNE;
268
269 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
270 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
271}
272
273static void sdhci_config_dma(struct sdhci_host *host)
274{
275 u8 ctrl;
276 u16 ctrl2;
277
278 if (host->version < SDHCI_SPEC_200)
279 return;
280
281 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
282
283 /*
284 * Always adjust the DMA selection as some controllers
285 * (e.g. JMicron) can't do PIO properly when the selection
286 * is ADMA.
287 */
288 ctrl &= ~SDHCI_CTRL_DMA_MASK;
289 if (!(host->flags & SDHCI_REQ_USE_DMA))
290 goto out;
291
292 /* Note if DMA Select is zero then SDMA is selected */
293 if (host->flags & SDHCI_USE_ADMA)
294 ctrl |= SDHCI_CTRL_ADMA32;
295
296 if (host->flags & SDHCI_USE_64_BIT_DMA) {
297 /*
298 * If v4 mode, all supported DMA can be 64-bit addressing if
299 * controller supports 64-bit system address, otherwise only
300 * ADMA can support 64-bit addressing.
301 */
302 if (host->v4_mode) {
303 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
304 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
305 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
306 } else if (host->flags & SDHCI_USE_ADMA) {
307 /*
308 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
309 * set SDHCI_CTRL_ADMA64.
310 */
311 ctrl |= SDHCI_CTRL_ADMA64;
312 }
313 }
314
315out:
316 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
317}
318
319static void sdhci_init(struct sdhci_host *host, int soft)
320{
321 struct mmc_host *mmc = host->mmc;
322 unsigned long flags;
323
324 if (soft)
325 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
326 else
327 sdhci_do_reset(host, SDHCI_RESET_ALL);
328
329 if (host->v4_mode)
330 sdhci_do_enable_v4_mode(host);
331
332 spin_lock_irqsave(&host->lock, flags);
333 sdhci_set_default_irqs(host);
334 spin_unlock_irqrestore(&host->lock, flags);
335
336 host->cqe_on = false;
337
338 if (soft) {
339 /* force clock reconfiguration */
340 host->clock = 0;
341 mmc->ops->set_ios(mmc, &mmc->ios);
342 }
343}
344
345static void sdhci_reinit(struct sdhci_host *host)
346{
347 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
348
349 sdhci_init(host, 0);
350 sdhci_enable_card_detection(host);
351
352 /*
353 * A change to the card detect bits indicates a change in present state,
354 * refer sdhci_set_card_detection(). A card detect interrupt might have
355 * been missed while the host controller was being reset, so trigger a
356 * rescan to check.
357 */
358 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
359 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
360}
361
362static void __sdhci_led_activate(struct sdhci_host *host)
363{
364 u8 ctrl;
365
366 if (host->quirks & SDHCI_QUIRK_NO_LED)
367 return;
368
369 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
370 ctrl |= SDHCI_CTRL_LED;
371 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
372}
373
374static void __sdhci_led_deactivate(struct sdhci_host *host)
375{
376 u8 ctrl;
377
378 if (host->quirks & SDHCI_QUIRK_NO_LED)
379 return;
380
381 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
382 ctrl &= ~SDHCI_CTRL_LED;
383 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
384}
385
386#if IS_REACHABLE(CONFIG_LEDS_CLASS)
387static void sdhci_led_control(struct led_classdev *led,
388 enum led_brightness brightness)
389{
390 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
391 unsigned long flags;
392
393 spin_lock_irqsave(&host->lock, flags);
394
395 if (host->runtime_suspended)
396 goto out;
397
398 if (brightness == LED_OFF)
399 __sdhci_led_deactivate(host);
400 else
401 __sdhci_led_activate(host);
402out:
403 spin_unlock_irqrestore(&host->lock, flags);
404}
405
406static int sdhci_led_register(struct sdhci_host *host)
407{
408 struct mmc_host *mmc = host->mmc;
409
410 if (host->quirks & SDHCI_QUIRK_NO_LED)
411 return 0;
412
413 snprintf(host->led_name, sizeof(host->led_name),
414 "%s::", mmc_hostname(mmc));
415
416 host->led.name = host->led_name;
417 host->led.brightness = LED_OFF;
418 host->led.default_trigger = mmc_hostname(mmc);
419 host->led.brightness_set = sdhci_led_control;
420
421 return led_classdev_register(mmc_dev(mmc), &host->led);
422}
423
424static void sdhci_led_unregister(struct sdhci_host *host)
425{
426 if (host->quirks & SDHCI_QUIRK_NO_LED)
427 return;
428
429 led_classdev_unregister(&host->led);
430}
431
432static inline void sdhci_led_activate(struct sdhci_host *host)
433{
434}
435
436static inline void sdhci_led_deactivate(struct sdhci_host *host)
437{
438}
439
440#else
441
442static inline int sdhci_led_register(struct sdhci_host *host)
443{
444 return 0;
445}
446
447static inline void sdhci_led_unregister(struct sdhci_host *host)
448{
449}
450
451static inline void sdhci_led_activate(struct sdhci_host *host)
452{
453 __sdhci_led_activate(host);
454}
455
456static inline void sdhci_led_deactivate(struct sdhci_host *host)
457{
458 __sdhci_led_deactivate(host);
459}
460
461#endif
462
463static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
464 unsigned long timeout)
465{
466 if (sdhci_data_line_cmd(mrq->cmd))
467 mod_timer(&host->data_timer, timeout);
468 else
469 mod_timer(&host->timer, timeout);
470}
471
472static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
473{
474 if (sdhci_data_line_cmd(mrq->cmd))
475 del_timer(&host->data_timer);
476 else
477 del_timer(&host->timer);
478}
479
480static inline bool sdhci_has_requests(struct sdhci_host *host)
481{
482 return host->cmd || host->data_cmd;
483}
484
485/*****************************************************************************\
486 * *
487 * Core functions *
488 * *
489\*****************************************************************************/
490
491static void sdhci_read_block_pio(struct sdhci_host *host)
492{
493 unsigned long flags;
494 size_t blksize, len, chunk;
495 u32 scratch;
496 u8 *buf;
497
498 DBG("PIO reading\n");
499
500 blksize = host->data->blksz;
501 chunk = 0;
502
503 local_irq_save(flags);
504
505 while (blksize) {
506 BUG_ON(!sg_miter_next(&host->sg_miter));
507
508 len = min(host->sg_miter.length, blksize);
509
510 blksize -= len;
511 host->sg_miter.consumed = len;
512
513 buf = host->sg_miter.addr;
514
515 while (len) {
516 if (chunk == 0) {
517 scratch = sdhci_readl(host, SDHCI_BUFFER);
518 chunk = 4;
519 }
520
521 *buf = scratch & 0xFF;
522
523 buf++;
524 scratch >>= 8;
525 chunk--;
526 len--;
527 }
528 }
529
530 sg_miter_stop(&host->sg_miter);
531
532 local_irq_restore(flags);
533}
534
535static void sdhci_write_block_pio(struct sdhci_host *host)
536{
537 unsigned long flags;
538 size_t blksize, len, chunk;
539 u32 scratch;
540 u8 *buf;
541
542 DBG("PIO writing\n");
543
544 blksize = host->data->blksz;
545 chunk = 0;
546 scratch = 0;
547
548 local_irq_save(flags);
549
550 while (blksize) {
551 BUG_ON(!sg_miter_next(&host->sg_miter));
552
553 len = min(host->sg_miter.length, blksize);
554
555 blksize -= len;
556 host->sg_miter.consumed = len;
557
558 buf = host->sg_miter.addr;
559
560 while (len) {
561 scratch |= (u32)*buf << (chunk * 8);
562
563 buf++;
564 chunk++;
565 len--;
566
567 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
568 sdhci_writel(host, scratch, SDHCI_BUFFER);
569 chunk = 0;
570 scratch = 0;
571 }
572 }
573 }
574
575 sg_miter_stop(&host->sg_miter);
576
577 local_irq_restore(flags);
578}
579
580static void sdhci_transfer_pio(struct sdhci_host *host)
581{
582 u32 mask;
583
584 if (host->blocks == 0)
585 return;
586
587 if (host->data->flags & MMC_DATA_READ)
588 mask = SDHCI_DATA_AVAILABLE;
589 else
590 mask = SDHCI_SPACE_AVAILABLE;
591
592 /*
593 * Some controllers (JMicron JMB38x) mess up the buffer bits
594 * for transfers < 4 bytes. As long as it is just one block,
595 * we can ignore the bits.
596 */
597 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
598 (host->data->blocks == 1))
599 mask = ~0;
600
601 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
602 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
603 udelay(100);
604
605 if (host->data->flags & MMC_DATA_READ)
606 sdhci_read_block_pio(host);
607 else
608 sdhci_write_block_pio(host);
609
610 host->blocks--;
611 if (host->blocks == 0)
612 break;
613 }
614
615 DBG("PIO transfer complete.\n");
616}
617
618static int sdhci_pre_dma_transfer(struct sdhci_host *host,
619 struct mmc_data *data, int cookie)
620{
621 int sg_count;
622
623 /*
624 * If the data buffers are already mapped, return the previous
625 * dma_map_sg() result.
626 */
627 if (data->host_cookie == COOKIE_PRE_MAPPED)
628 return data->sg_count;
629
630 /* Bounce write requests to the bounce buffer */
631 if (host->bounce_buffer) {
632 unsigned int length = data->blksz * data->blocks;
633
634 if (length > host->bounce_buffer_size) {
635 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
636 mmc_hostname(host->mmc), length,
637 host->bounce_buffer_size);
638 return -EIO;
639 }
640 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
641 /* Copy the data to the bounce buffer */
642 if (host->ops->copy_to_bounce_buffer) {
643 host->ops->copy_to_bounce_buffer(host,
644 data, length);
645 } else {
646 sg_copy_to_buffer(data->sg, data->sg_len,
647 host->bounce_buffer, length);
648 }
649 }
650 /* Switch ownership to the DMA */
651 dma_sync_single_for_device(mmc_dev(host->mmc),
652 host->bounce_addr,
653 host->bounce_buffer_size,
654 mmc_get_dma_dir(data));
655 /* Just a dummy value */
656 sg_count = 1;
657 } else {
658 /* Just access the data directly from memory */
659 sg_count = dma_map_sg(mmc_dev(host->mmc),
660 data->sg, data->sg_len,
661 mmc_get_dma_dir(data));
662 }
663
664 if (sg_count == 0)
665 return -ENOSPC;
666
667 data->sg_count = sg_count;
668 data->host_cookie = cookie;
669
670 return sg_count;
671}
672
673static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
674{
675 local_irq_save(*flags);
676 return kmap_atomic(sg_page(sg)) + sg->offset;
677}
678
679static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
680{
681 kunmap_atomic(buffer);
682 local_irq_restore(*flags);
683}
684
685void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
686 dma_addr_t addr, int len, unsigned int cmd)
687{
688 struct sdhci_adma2_64_desc *dma_desc = *desc;
689
690 /* 32-bit and 64-bit descriptors have these members in same position */
691 dma_desc->cmd = cpu_to_le16(cmd);
692 dma_desc->len = cpu_to_le16(len);
693 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
694
695 if (host->flags & SDHCI_USE_64_BIT_DMA)
696 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
697
698 *desc += host->desc_sz;
699}
700EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
701
702static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
703 void **desc, dma_addr_t addr,
704 int len, unsigned int cmd)
705{
706 if (host->ops->adma_write_desc)
707 host->ops->adma_write_desc(host, desc, addr, len, cmd);
708 else
709 sdhci_adma_write_desc(host, desc, addr, len, cmd);
710}
711
712static void sdhci_adma_mark_end(void *desc)
713{
714 struct sdhci_adma2_64_desc *dma_desc = desc;
715
716 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
717 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
718}
719
720static void sdhci_adma_table_pre(struct sdhci_host *host,
721 struct mmc_data *data, int sg_count)
722{
723 struct scatterlist *sg;
724 unsigned long flags;
725 dma_addr_t addr, align_addr;
726 void *desc, *align;
727 char *buffer;
728 int len, offset, i;
729
730 /*
731 * The spec does not specify endianness of descriptor table.
732 * We currently guess that it is LE.
733 */
734
735 host->sg_count = sg_count;
736
737 desc = host->adma_table;
738 align = host->align_buffer;
739
740 align_addr = host->align_addr;
741
742 for_each_sg(data->sg, sg, host->sg_count, i) {
743 addr = sg_dma_address(sg);
744 len = sg_dma_len(sg);
745
746 /*
747 * The SDHCI specification states that ADMA addresses must
748 * be 32-bit aligned. If they aren't, then we use a bounce
749 * buffer for the (up to three) bytes that screw up the
750 * alignment.
751 */
752 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
753 SDHCI_ADMA2_MASK;
754 if (offset) {
755 if (data->flags & MMC_DATA_WRITE) {
756 buffer = sdhci_kmap_atomic(sg, &flags);
757 memcpy(align, buffer, offset);
758 sdhci_kunmap_atomic(buffer, &flags);
759 }
760
761 /* tran, valid */
762 __sdhci_adma_write_desc(host, &desc, align_addr,
763 offset, ADMA2_TRAN_VALID);
764
765 BUG_ON(offset > 65536);
766
767 align += SDHCI_ADMA2_ALIGN;
768 align_addr += SDHCI_ADMA2_ALIGN;
769
770 addr += offset;
771 len -= offset;
772 }
773
774 BUG_ON(len > 65536);
775
776 /* tran, valid */
777 if (len)
778 __sdhci_adma_write_desc(host, &desc, addr, len,
779 ADMA2_TRAN_VALID);
780
781 /*
782 * If this triggers then we have a calculation bug
783 * somewhere. :/
784 */
785 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
786 }
787
788 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
789 /* Mark the last descriptor as the terminating descriptor */
790 if (desc != host->adma_table) {
791 desc -= host->desc_sz;
792 sdhci_adma_mark_end(desc);
793 }
794 } else {
795 /* Add a terminating entry - nop, end, valid */
796 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
797 }
798}
799
800static void sdhci_adma_table_post(struct sdhci_host *host,
801 struct mmc_data *data)
802{
803 struct scatterlist *sg;
804 int i, size;
805 void *align;
806 char *buffer;
807 unsigned long flags;
808
809 if (data->flags & MMC_DATA_READ) {
810 bool has_unaligned = false;
811
812 /* Do a quick scan of the SG list for any unaligned mappings */
813 for_each_sg(data->sg, sg, host->sg_count, i)
814 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
815 has_unaligned = true;
816 break;
817 }
818
819 if (has_unaligned) {
820 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
821 data->sg_len, DMA_FROM_DEVICE);
822
823 align = host->align_buffer;
824
825 for_each_sg(data->sg, sg, host->sg_count, i) {
826 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
827 size = SDHCI_ADMA2_ALIGN -
828 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
829
830 buffer = sdhci_kmap_atomic(sg, &flags);
831 memcpy(buffer, align, size);
832 sdhci_kunmap_atomic(buffer, &flags);
833
834 align += SDHCI_ADMA2_ALIGN;
835 }
836 }
837 }
838 }
839}
840
841static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
842{
843 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
844 if (host->flags & SDHCI_USE_64_BIT_DMA)
845 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
846}
847
848static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
849{
850 if (host->bounce_buffer)
851 return host->bounce_addr;
852 else
853 return sg_dma_address(host->data->sg);
854}
855
856static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
857{
858 if (host->v4_mode)
859 sdhci_set_adma_addr(host, addr);
860 else
861 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
862}
863
864static unsigned int sdhci_target_timeout(struct sdhci_host *host,
865 struct mmc_command *cmd,
866 struct mmc_data *data)
867{
868 unsigned int target_timeout;
869
870 /* timeout in us */
871 if (!data) {
872 target_timeout = cmd->busy_timeout * 1000;
873 } else {
874 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
875 if (host->clock && data->timeout_clks) {
876 unsigned long long val;
877
878 /*
879 * data->timeout_clks is in units of clock cycles.
880 * host->clock is in Hz. target_timeout is in us.
881 * Hence, us = 1000000 * cycles / Hz. Round up.
882 */
883 val = 1000000ULL * data->timeout_clks;
884 if (do_div(val, host->clock))
885 target_timeout++;
886 target_timeout += val;
887 }
888 }
889
890 return target_timeout;
891}
892
893static void sdhci_calc_sw_timeout(struct sdhci_host *host,
894 struct mmc_command *cmd)
895{
896 struct mmc_data *data = cmd->data;
897 struct mmc_host *mmc = host->mmc;
898 struct mmc_ios *ios = &mmc->ios;
899 unsigned char bus_width = 1 << ios->bus_width;
900 unsigned int blksz;
901 unsigned int freq;
902 u64 target_timeout;
903 u64 transfer_time;
904
905 target_timeout = sdhci_target_timeout(host, cmd, data);
906 target_timeout *= NSEC_PER_USEC;
907
908 if (data) {
909 blksz = data->blksz;
910 freq = mmc->actual_clock ? : host->clock;
911 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
912 do_div(transfer_time, freq);
913 /* multiply by '2' to account for any unknowns */
914 transfer_time = transfer_time * 2;
915 /* calculate timeout for the entire data */
916 host->data_timeout = data->blocks * target_timeout +
917 transfer_time;
918 } else {
919 host->data_timeout = target_timeout;
920 }
921
922 if (host->data_timeout)
923 host->data_timeout += MMC_CMD_TRANSFER_TIME;
924}
925
926static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
927 bool *too_big)
928{
929 u8 count;
930 struct mmc_data *data;
931 unsigned target_timeout, current_timeout;
932
933 *too_big = true;
934
935 /*
936 * If the host controller provides us with an incorrect timeout
937 * value, just skip the check and use the maximum. The hardware may take
938 * longer to time out, but that's much better than having a too-short
939 * timeout value.
940 */
941 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
942 return host->max_timeout_count;
943
944 /* Unspecified command, asume max */
945 if (cmd == NULL)
946 return host->max_timeout_count;
947
948 data = cmd->data;
949 /* Unspecified timeout, assume max */
950 if (!data && !cmd->busy_timeout)
951 return host->max_timeout_count;
952
953 /* timeout in us */
954 target_timeout = sdhci_target_timeout(host, cmd, data);
955
956 /*
957 * Figure out needed cycles.
958 * We do this in steps in order to fit inside a 32 bit int.
959 * The first step is the minimum timeout, which will have a
960 * minimum resolution of 6 bits:
961 * (1) 2^13*1000 > 2^22,
962 * (2) host->timeout_clk < 2^16
963 * =>
964 * (1) / (2) > 2^6
965 */
966 count = 0;
967 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
968 while (current_timeout < target_timeout) {
969 count++;
970 current_timeout <<= 1;
971 if (count > host->max_timeout_count)
972 break;
973 }
974
975 if (count > host->max_timeout_count) {
976 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
977 DBG("Too large timeout 0x%x requested for CMD%d!\n",
978 count, cmd->opcode);
979 count = host->max_timeout_count;
980 } else {
981 *too_big = false;
982 }
983
984 return count;
985}
986
987static void sdhci_set_transfer_irqs(struct sdhci_host *host)
988{
989 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
990 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
991
992 if (host->flags & SDHCI_REQ_USE_DMA)
993 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
994 else
995 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
996
997 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
998 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
999 else
1000 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1001
1002 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1003 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1004}
1005
1006void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1007{
1008 if (enable)
1009 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1010 else
1011 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1012 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1013 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1014}
1015EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1016
1017void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1018{
1019 bool too_big = false;
1020 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1021
1022 if (too_big &&
1023 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1024 sdhci_calc_sw_timeout(host, cmd);
1025 sdhci_set_data_timeout_irq(host, false);
1026 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1027 sdhci_set_data_timeout_irq(host, true);
1028 }
1029
1030 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1031}
1032EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1033
1034static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1035{
1036 if (host->ops->set_timeout)
1037 host->ops->set_timeout(host, cmd);
1038 else
1039 __sdhci_set_timeout(host, cmd);
1040}
1041
1042static void sdhci_initialize_data(struct sdhci_host *host,
1043 struct mmc_data *data)
1044{
1045 WARN_ON(host->data);
1046
1047 /* Sanity checks */
1048 BUG_ON(data->blksz * data->blocks > 524288);
1049 BUG_ON(data->blksz > host->mmc->max_blk_size);
1050 BUG_ON(data->blocks > 65535);
1051
1052 host->data = data;
1053 host->data_early = 0;
1054 host->data->bytes_xfered = 0;
1055}
1056
1057static inline void sdhci_set_block_info(struct sdhci_host *host,
1058 struct mmc_data *data)
1059{
1060 /* Set the DMA boundary value and block size */
1061 sdhci_writew(host,
1062 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1063 SDHCI_BLOCK_SIZE);
1064 /*
1065 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1066 * can be supported, in that case 16-bit block count register must be 0.
1067 */
1068 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1069 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1070 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1071 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1072 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1073 } else {
1074 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1075 }
1076}
1077
1078static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1079{
1080 struct mmc_data *data = cmd->data;
1081
1082 sdhci_initialize_data(host, data);
1083
1084 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1085 struct scatterlist *sg;
1086 unsigned int length_mask, offset_mask;
1087 int i;
1088
1089 host->flags |= SDHCI_REQ_USE_DMA;
1090
1091 /*
1092 * FIXME: This doesn't account for merging when mapping the
1093 * scatterlist.
1094 *
1095 * The assumption here being that alignment and lengths are
1096 * the same after DMA mapping to device address space.
1097 */
1098 length_mask = 0;
1099 offset_mask = 0;
1100 if (host->flags & SDHCI_USE_ADMA) {
1101 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1102 length_mask = 3;
1103 /*
1104 * As we use up to 3 byte chunks to work
1105 * around alignment problems, we need to
1106 * check the offset as well.
1107 */
1108 offset_mask = 3;
1109 }
1110 } else {
1111 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1112 length_mask = 3;
1113 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1114 offset_mask = 3;
1115 }
1116
1117 if (unlikely(length_mask | offset_mask)) {
1118 for_each_sg(data->sg, sg, data->sg_len, i) {
1119 if (sg->length & length_mask) {
1120 DBG("Reverting to PIO because of transfer size (%d)\n",
1121 sg->length);
1122 host->flags &= ~SDHCI_REQ_USE_DMA;
1123 break;
1124 }
1125 if (sg->offset & offset_mask) {
1126 DBG("Reverting to PIO because of bad alignment\n");
1127 host->flags &= ~SDHCI_REQ_USE_DMA;
1128 break;
1129 }
1130 }
1131 }
1132 }
1133
1134 if (host->flags & SDHCI_REQ_USE_DMA) {
1135 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1136
1137 if (sg_cnt <= 0) {
1138 /*
1139 * This only happens when someone fed
1140 * us an invalid request.
1141 */
1142 WARN_ON(1);
1143 host->flags &= ~SDHCI_REQ_USE_DMA;
1144 } else if (host->flags & SDHCI_USE_ADMA) {
1145 sdhci_adma_table_pre(host, data, sg_cnt);
1146 sdhci_set_adma_addr(host, host->adma_addr);
1147 } else {
1148 WARN_ON(sg_cnt != 1);
1149 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1150 }
1151 }
1152
1153 sdhci_config_dma(host);
1154
1155 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1156 int flags;
1157
1158 flags = SG_MITER_ATOMIC;
1159 if (host->data->flags & MMC_DATA_READ)
1160 flags |= SG_MITER_TO_SG;
1161 else
1162 flags |= SG_MITER_FROM_SG;
1163 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1164 host->blocks = data->blocks;
1165 }
1166
1167 sdhci_set_transfer_irqs(host);
1168
1169 sdhci_set_block_info(host, data);
1170}
1171
1172#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1173
1174static int sdhci_external_dma_init(struct sdhci_host *host)
1175{
1176 int ret = 0;
1177 struct mmc_host *mmc = host->mmc;
1178
1179 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1180 if (IS_ERR(host->tx_chan)) {
1181 ret = PTR_ERR(host->tx_chan);
1182 if (ret != -EPROBE_DEFER)
1183 pr_warn("Failed to request TX DMA channel.\n");
1184 host->tx_chan = NULL;
1185 return ret;
1186 }
1187
1188 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1189 if (IS_ERR(host->rx_chan)) {
1190 if (host->tx_chan) {
1191 dma_release_channel(host->tx_chan);
1192 host->tx_chan = NULL;
1193 }
1194
1195 ret = PTR_ERR(host->rx_chan);
1196 if (ret != -EPROBE_DEFER)
1197 pr_warn("Failed to request RX DMA channel.\n");
1198 host->rx_chan = NULL;
1199 }
1200
1201 return ret;
1202}
1203
1204static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1205 struct mmc_data *data)
1206{
1207 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1208}
1209
1210static int sdhci_external_dma_setup(struct sdhci_host *host,
1211 struct mmc_command *cmd)
1212{
1213 int ret, i;
1214 enum dma_transfer_direction dir;
1215 struct dma_async_tx_descriptor *desc;
1216 struct mmc_data *data = cmd->data;
1217 struct dma_chan *chan;
1218 struct dma_slave_config cfg;
1219 dma_cookie_t cookie;
1220 int sg_cnt;
1221
1222 if (!host->mapbase)
1223 return -EINVAL;
1224
1225 memset(&cfg, 0, sizeof(cfg));
1226 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1227 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1228 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1229 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1230 cfg.src_maxburst = data->blksz / 4;
1231 cfg.dst_maxburst = data->blksz / 4;
1232
1233 /* Sanity check: all the SG entries must be aligned by block size. */
1234 for (i = 0; i < data->sg_len; i++) {
1235 if ((data->sg + i)->length % data->blksz)
1236 return -EINVAL;
1237 }
1238
1239 chan = sdhci_external_dma_channel(host, data);
1240
1241 ret = dmaengine_slave_config(chan, &cfg);
1242 if (ret)
1243 return ret;
1244
1245 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1246 if (sg_cnt <= 0)
1247 return -EINVAL;
1248
1249 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1250 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1251 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1252 if (!desc)
1253 return -EINVAL;
1254
1255 desc->callback = NULL;
1256 desc->callback_param = NULL;
1257
1258 cookie = dmaengine_submit(desc);
1259 if (dma_submit_error(cookie))
1260 ret = cookie;
1261
1262 return ret;
1263}
1264
1265static void sdhci_external_dma_release(struct sdhci_host *host)
1266{
1267 if (host->tx_chan) {
1268 dma_release_channel(host->tx_chan);
1269 host->tx_chan = NULL;
1270 }
1271
1272 if (host->rx_chan) {
1273 dma_release_channel(host->rx_chan);
1274 host->rx_chan = NULL;
1275 }
1276
1277 sdhci_switch_external_dma(host, false);
1278}
1279
1280static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1281 struct mmc_command *cmd)
1282{
1283 struct mmc_data *data = cmd->data;
1284
1285 sdhci_initialize_data(host, data);
1286
1287 host->flags |= SDHCI_REQ_USE_DMA;
1288 sdhci_set_transfer_irqs(host);
1289
1290 sdhci_set_block_info(host, data);
1291}
1292
1293static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1294 struct mmc_command *cmd)
1295{
1296 if (!sdhci_external_dma_setup(host, cmd)) {
1297 __sdhci_external_dma_prepare_data(host, cmd);
1298 } else {
1299 sdhci_external_dma_release(host);
1300 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1301 mmc_hostname(host->mmc));
1302 sdhci_prepare_data(host, cmd);
1303 }
1304}
1305
1306static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1307 struct mmc_command *cmd)
1308{
1309 struct dma_chan *chan;
1310
1311 if (!cmd->data)
1312 return;
1313
1314 chan = sdhci_external_dma_channel(host, cmd->data);
1315 if (chan)
1316 dma_async_issue_pending(chan);
1317}
1318
1319#else
1320
1321static inline int sdhci_external_dma_init(struct sdhci_host *host)
1322{
1323 return -EOPNOTSUPP;
1324}
1325
1326static inline void sdhci_external_dma_release(struct sdhci_host *host)
1327{
1328}
1329
1330static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1331 struct mmc_command *cmd)
1332{
1333 /* This should never happen */
1334 WARN_ON_ONCE(1);
1335}
1336
1337static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1338 struct mmc_command *cmd)
1339{
1340}
1341
1342static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1343 struct mmc_data *data)
1344{
1345 return NULL;
1346}
1347
1348#endif
1349
1350void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1351{
1352 host->use_external_dma = en;
1353}
1354EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1355
1356static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1357 struct mmc_request *mrq)
1358{
1359 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1360 !mrq->cap_cmd_during_tfr;
1361}
1362
1363static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1364 struct mmc_request *mrq)
1365{
1366 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1367}
1368
1369static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1370 struct mmc_request *mrq)
1371{
1372 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1373}
1374
1375static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1376 struct mmc_command *cmd,
1377 u16 *mode)
1378{
1379 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1380 (cmd->opcode != SD_IO_RW_EXTENDED);
1381 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1382 u16 ctrl2;
1383
1384 /*
1385 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1386 * Select' is recommended rather than use of 'Auto CMD12
1387 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1388 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1389 */
1390 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1391 (use_cmd12 || use_cmd23)) {
1392 *mode |= SDHCI_TRNS_AUTO_SEL;
1393
1394 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1395 if (use_cmd23)
1396 ctrl2 |= SDHCI_CMD23_ENABLE;
1397 else
1398 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1399 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1400
1401 return;
1402 }
1403
1404 /*
1405 * If we are sending CMD23, CMD12 never gets sent
1406 * on successful completion (so no Auto-CMD12).
1407 */
1408 if (use_cmd12)
1409 *mode |= SDHCI_TRNS_AUTO_CMD12;
1410 else if (use_cmd23)
1411 *mode |= SDHCI_TRNS_AUTO_CMD23;
1412}
1413
1414static void sdhci_set_transfer_mode(struct sdhci_host *host,
1415 struct mmc_command *cmd)
1416{
1417 u16 mode = 0;
1418 struct mmc_data *data = cmd->data;
1419
1420 if (data == NULL) {
1421 if (host->quirks2 &
1422 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1423 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1424 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1425 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1426 } else {
1427 /* clear Auto CMD settings for no data CMDs */
1428 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1429 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1430 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1431 }
1432 return;
1433 }
1434
1435 WARN_ON(!host->data);
1436
1437 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1438 mode = SDHCI_TRNS_BLK_CNT_EN;
1439
1440 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1441 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1442 sdhci_auto_cmd_select(host, cmd, &mode);
1443 if (sdhci_auto_cmd23(host, cmd->mrq))
1444 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1445 }
1446
1447 if (data->flags & MMC_DATA_READ)
1448 mode |= SDHCI_TRNS_READ;
1449 if (host->flags & SDHCI_REQ_USE_DMA)
1450 mode |= SDHCI_TRNS_DMA;
1451
1452 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1453}
1454
1455static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1456{
1457 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1458 ((mrq->cmd && mrq->cmd->error) ||
1459 (mrq->sbc && mrq->sbc->error) ||
1460 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1461 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1462}
1463
1464static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1465{
1466 int i;
1467
1468 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1469 if (host->mrqs_done[i] == mrq) {
1470 WARN_ON(1);
1471 return;
1472 }
1473 }
1474
1475 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1476 if (!host->mrqs_done[i]) {
1477 host->mrqs_done[i] = mrq;
1478 break;
1479 }
1480 }
1481
1482 WARN_ON(i >= SDHCI_MAX_MRQS);
1483}
1484
1485static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1486{
1487 if (host->cmd && host->cmd->mrq == mrq)
1488 host->cmd = NULL;
1489
1490 if (host->data_cmd && host->data_cmd->mrq == mrq)
1491 host->data_cmd = NULL;
1492
1493 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1494 host->deferred_cmd = NULL;
1495
1496 if (host->data && host->data->mrq == mrq)
1497 host->data = NULL;
1498
1499 if (sdhci_needs_reset(host, mrq))
1500 host->pending_reset = true;
1501
1502 sdhci_set_mrq_done(host, mrq);
1503
1504 sdhci_del_timer(host, mrq);
1505
1506 if (!sdhci_has_requests(host))
1507 sdhci_led_deactivate(host);
1508}
1509
1510static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1511{
1512 __sdhci_finish_mrq(host, mrq);
1513
1514 queue_work(host->complete_wq, &host->complete_work);
1515}
1516
1517static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1518{
1519 struct mmc_command *data_cmd = host->data_cmd;
1520 struct mmc_data *data = host->data;
1521
1522 host->data = NULL;
1523 host->data_cmd = NULL;
1524
1525 /*
1526 * The controller needs a reset of internal state machines upon error
1527 * conditions.
1528 */
1529 if (data->error) {
1530 if (!host->cmd || host->cmd == data_cmd)
1531 sdhci_do_reset(host, SDHCI_RESET_CMD);
1532 sdhci_do_reset(host, SDHCI_RESET_DATA);
1533 }
1534
1535 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1536 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1537 sdhci_adma_table_post(host, data);
1538
1539 /*
1540 * The specification states that the block count register must
1541 * be updated, but it does not specify at what point in the
1542 * data flow. That makes the register entirely useless to read
1543 * back so we have to assume that nothing made it to the card
1544 * in the event of an error.
1545 */
1546 if (data->error)
1547 data->bytes_xfered = 0;
1548 else
1549 data->bytes_xfered = data->blksz * data->blocks;
1550
1551 /*
1552 * Need to send CMD12 if -
1553 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1554 * b) error in multiblock transfer
1555 */
1556 if (data->stop &&
1557 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1558 data->error)) {
1559 /*
1560 * 'cap_cmd_during_tfr' request must not use the command line
1561 * after mmc_command_done() has been called. It is upper layer's
1562 * responsibility to send the stop command if required.
1563 */
1564 if (data->mrq->cap_cmd_during_tfr) {
1565 __sdhci_finish_mrq(host, data->mrq);
1566 } else {
1567 /* Avoid triggering warning in sdhci_send_command() */
1568 host->cmd = NULL;
1569 if (!sdhci_send_command(host, data->stop)) {
1570 if (sw_data_timeout) {
1571 /*
1572 * This is anyway a sw data timeout, so
1573 * give up now.
1574 */
1575 data->stop->error = -EIO;
1576 __sdhci_finish_mrq(host, data->mrq);
1577 } else {
1578 WARN_ON(host->deferred_cmd);
1579 host->deferred_cmd = data->stop;
1580 }
1581 }
1582 }
1583 } else {
1584 __sdhci_finish_mrq(host, data->mrq);
1585 }
1586}
1587
1588static void sdhci_finish_data(struct sdhci_host *host)
1589{
1590 __sdhci_finish_data(host, false);
1591}
1592
1593static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1594{
1595 int flags;
1596 u32 mask;
1597 unsigned long timeout;
1598
1599 WARN_ON(host->cmd);
1600
1601 /* Initially, a command has no error */
1602 cmd->error = 0;
1603
1604 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1605 cmd->opcode == MMC_STOP_TRANSMISSION)
1606 cmd->flags |= MMC_RSP_BUSY;
1607
1608 mask = SDHCI_CMD_INHIBIT;
1609 if (sdhci_data_line_cmd(cmd))
1610 mask |= SDHCI_DATA_INHIBIT;
1611
1612 /* We shouldn't wait for data inihibit for stop commands, even
1613 though they might use busy signaling */
1614 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1615 mask &= ~SDHCI_DATA_INHIBIT;
1616
1617 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1618 return false;
1619
1620 host->cmd = cmd;
1621 host->data_timeout = 0;
1622 if (sdhci_data_line_cmd(cmd)) {
1623 WARN_ON(host->data_cmd);
1624 host->data_cmd = cmd;
1625 sdhci_set_timeout(host, cmd);
1626 }
1627
1628 if (cmd->data) {
1629 if (host->use_external_dma)
1630 sdhci_external_dma_prepare_data(host, cmd);
1631 else
1632 sdhci_prepare_data(host, cmd);
1633 }
1634
1635 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1636
1637 sdhci_set_transfer_mode(host, cmd);
1638
1639 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1640 WARN_ONCE(1, "Unsupported response type!\n");
1641 /*
1642 * This does not happen in practice because 136-bit response
1643 * commands never have busy waiting, so rather than complicate
1644 * the error path, just remove busy waiting and continue.
1645 */
1646 cmd->flags &= ~MMC_RSP_BUSY;
1647 }
1648
1649 if (!(cmd->flags & MMC_RSP_PRESENT))
1650 flags = SDHCI_CMD_RESP_NONE;
1651 else if (cmd->flags & MMC_RSP_136)
1652 flags = SDHCI_CMD_RESP_LONG;
1653 else if (cmd->flags & MMC_RSP_BUSY)
1654 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1655 else
1656 flags = SDHCI_CMD_RESP_SHORT;
1657
1658 if (cmd->flags & MMC_RSP_CRC)
1659 flags |= SDHCI_CMD_CRC;
1660 if (cmd->flags & MMC_RSP_OPCODE)
1661 flags |= SDHCI_CMD_INDEX;
1662
1663 /* CMD19 is special in that the Data Present Select should be set */
1664 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1665 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1666 flags |= SDHCI_CMD_DATA;
1667
1668 timeout = jiffies;
1669 if (host->data_timeout)
1670 timeout += nsecs_to_jiffies(host->data_timeout);
1671 else if (!cmd->data && cmd->busy_timeout > 9000)
1672 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1673 else
1674 timeout += 10 * HZ;
1675 sdhci_mod_timer(host, cmd->mrq, timeout);
1676
1677 if (host->use_external_dma)
1678 sdhci_external_dma_pre_transfer(host, cmd);
1679
1680 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1681
1682 return true;
1683}
1684
1685static bool sdhci_present_error(struct sdhci_host *host,
1686 struct mmc_command *cmd, bool present)
1687{
1688 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1689 cmd->error = -ENOMEDIUM;
1690 return true;
1691 }
1692
1693 return false;
1694}
1695
1696static bool sdhci_send_command_retry(struct sdhci_host *host,
1697 struct mmc_command *cmd,
1698 unsigned long flags)
1699 __releases(host->lock)
1700 __acquires(host->lock)
1701{
1702 struct mmc_command *deferred_cmd = host->deferred_cmd;
1703 int timeout = 10; /* Approx. 10 ms */
1704 bool present;
1705
1706 while (!sdhci_send_command(host, cmd)) {
1707 if (!timeout--) {
1708 pr_err("%s: Controller never released inhibit bit(s).\n",
1709 mmc_hostname(host->mmc));
1710 sdhci_dumpregs(host);
1711 cmd->error = -EIO;
1712 return false;
1713 }
1714
1715 spin_unlock_irqrestore(&host->lock, flags);
1716
1717 usleep_range(1000, 1250);
1718
1719 present = host->mmc->ops->get_cd(host->mmc);
1720
1721 spin_lock_irqsave(&host->lock, flags);
1722
1723 /* A deferred command might disappear, handle that */
1724 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1725 return true;
1726
1727 if (sdhci_present_error(host, cmd, present))
1728 return false;
1729 }
1730
1731 if (cmd == host->deferred_cmd)
1732 host->deferred_cmd = NULL;
1733
1734 return true;
1735}
1736
1737static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1738{
1739 int i, reg;
1740
1741 for (i = 0; i < 4; i++) {
1742 reg = SDHCI_RESPONSE + (3 - i) * 4;
1743 cmd->resp[i] = sdhci_readl(host, reg);
1744 }
1745
1746 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1747 return;
1748
1749 /* CRC is stripped so we need to do some shifting */
1750 for (i = 0; i < 4; i++) {
1751 cmd->resp[i] <<= 8;
1752 if (i != 3)
1753 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1754 }
1755}
1756
1757static void sdhci_finish_command(struct sdhci_host *host)
1758{
1759 struct mmc_command *cmd = host->cmd;
1760
1761 host->cmd = NULL;
1762
1763 if (cmd->flags & MMC_RSP_PRESENT) {
1764 if (cmd->flags & MMC_RSP_136) {
1765 sdhci_read_rsp_136(host, cmd);
1766 } else {
1767 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1768 }
1769 }
1770
1771 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1772 mmc_command_done(host->mmc, cmd->mrq);
1773
1774 /*
1775 * The host can send and interrupt when the busy state has
1776 * ended, allowing us to wait without wasting CPU cycles.
1777 * The busy signal uses DAT0 so this is similar to waiting
1778 * for data to complete.
1779 *
1780 * Note: The 1.0 specification is a bit ambiguous about this
1781 * feature so there might be some problems with older
1782 * controllers.
1783 */
1784 if (cmd->flags & MMC_RSP_BUSY) {
1785 if (cmd->data) {
1786 DBG("Cannot wait for busy signal when also doing a data transfer");
1787 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1788 cmd == host->data_cmd) {
1789 /* Command complete before busy is ended */
1790 return;
1791 }
1792 }
1793
1794 /* Finished CMD23, now send actual command. */
1795 if (cmd == cmd->mrq->sbc) {
1796 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1797 WARN_ON(host->deferred_cmd);
1798 host->deferred_cmd = cmd->mrq->cmd;
1799 }
1800 } else {
1801
1802 /* Processed actual command. */
1803 if (host->data && host->data_early)
1804 sdhci_finish_data(host);
1805
1806 if (!cmd->data)
1807 __sdhci_finish_mrq(host, cmd->mrq);
1808 }
1809}
1810
1811static u16 sdhci_get_preset_value(struct sdhci_host *host)
1812{
1813 u16 preset = 0;
1814
1815 switch (host->timing) {
1816 case MMC_TIMING_MMC_HS:
1817 case MMC_TIMING_SD_HS:
1818 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1819 break;
1820 case MMC_TIMING_UHS_SDR12:
1821 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1822 break;
1823 case MMC_TIMING_UHS_SDR25:
1824 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1825 break;
1826 case MMC_TIMING_UHS_SDR50:
1827 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1828 break;
1829 case MMC_TIMING_UHS_SDR104:
1830 case MMC_TIMING_MMC_HS200:
1831 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1832 break;
1833 case MMC_TIMING_UHS_DDR50:
1834 case MMC_TIMING_MMC_DDR52:
1835 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1836 break;
1837 case MMC_TIMING_MMC_HS400:
1838 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1839 break;
1840 default:
1841 pr_warn("%s: Invalid UHS-I mode selected\n",
1842 mmc_hostname(host->mmc));
1843 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1844 break;
1845 }
1846 return preset;
1847}
1848
1849u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1850 unsigned int *actual_clock)
1851{
1852 int div = 0; /* Initialized for compiler warning */
1853 int real_div = div, clk_mul = 1;
1854 u16 clk = 0;
1855 bool switch_base_clk = false;
1856
1857 if (host->version >= SDHCI_SPEC_300) {
1858 if (host->preset_enabled) {
1859 u16 pre_val;
1860
1861 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1862 pre_val = sdhci_get_preset_value(host);
1863 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1864 if (host->clk_mul &&
1865 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1866 clk = SDHCI_PROG_CLOCK_MODE;
1867 real_div = div + 1;
1868 clk_mul = host->clk_mul;
1869 } else {
1870 real_div = max_t(int, 1, div << 1);
1871 }
1872 goto clock_set;
1873 }
1874
1875 /*
1876 * Check if the Host Controller supports Programmable Clock
1877 * Mode.
1878 */
1879 if (host->clk_mul) {
1880 for (div = 1; div <= 1024; div++) {
1881 if ((host->max_clk * host->clk_mul / div)
1882 <= clock)
1883 break;
1884 }
1885 if ((host->max_clk * host->clk_mul / div) <= clock) {
1886 /*
1887 * Set Programmable Clock Mode in the Clock
1888 * Control register.
1889 */
1890 clk = SDHCI_PROG_CLOCK_MODE;
1891 real_div = div;
1892 clk_mul = host->clk_mul;
1893 div--;
1894 } else {
1895 /*
1896 * Divisor can be too small to reach clock
1897 * speed requirement. Then use the base clock.
1898 */
1899 switch_base_clk = true;
1900 }
1901 }
1902
1903 if (!host->clk_mul || switch_base_clk) {
1904 /* Version 3.00 divisors must be a multiple of 2. */
1905 if (host->max_clk <= clock)
1906 div = 1;
1907 else {
1908 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1909 div += 2) {
1910 if ((host->max_clk / div) <= clock)
1911 break;
1912 }
1913 }
1914 real_div = div;
1915 div >>= 1;
1916 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1917 && !div && host->max_clk <= 25000000)
1918 div = 1;
1919 }
1920 } else {
1921 /* Version 2.00 divisors must be a power of 2. */
1922 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1923 if ((host->max_clk / div) <= clock)
1924 break;
1925 }
1926 real_div = div;
1927 div >>= 1;
1928 }
1929
1930clock_set:
1931 if (real_div)
1932 *actual_clock = (host->max_clk * clk_mul) / real_div;
1933 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1934 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1935 << SDHCI_DIVIDER_HI_SHIFT;
1936
1937 return clk;
1938}
1939EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1940
1941void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1942{
1943 ktime_t timeout;
1944
1945 clk |= SDHCI_CLOCK_INT_EN;
1946 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1947
1948 /* Wait max 150 ms */
1949 timeout = ktime_add_ms(ktime_get(), 150);
1950 while (1) {
1951 bool timedout = ktime_after(ktime_get(), timeout);
1952
1953 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1954 if (clk & SDHCI_CLOCK_INT_STABLE)
1955 break;
1956 if (timedout) {
1957 pr_err("%s: Internal clock never stabilised.\n",
1958 mmc_hostname(host->mmc));
1959 sdhci_dumpregs(host);
1960 return;
1961 }
1962 udelay(10);
1963 }
1964
1965 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1966 clk |= SDHCI_CLOCK_PLL_EN;
1967 clk &= ~SDHCI_CLOCK_INT_STABLE;
1968 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1969
1970 /* Wait max 150 ms */
1971 timeout = ktime_add_ms(ktime_get(), 150);
1972 while (1) {
1973 bool timedout = ktime_after(ktime_get(), timeout);
1974
1975 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1976 if (clk & SDHCI_CLOCK_INT_STABLE)
1977 break;
1978 if (timedout) {
1979 pr_err("%s: PLL clock never stabilised.\n",
1980 mmc_hostname(host->mmc));
1981 sdhci_dumpregs(host);
1982 return;
1983 }
1984 udelay(10);
1985 }
1986 }
1987
1988 clk |= SDHCI_CLOCK_CARD_EN;
1989 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1990}
1991EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1992
1993void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1994{
1995 u16 clk;
1996
1997 host->mmc->actual_clock = 0;
1998
1999 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2000
2001 if (clock == 0)
2002 return;
2003
2004 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2005 sdhci_enable_clk(host, clk);
2006}
2007EXPORT_SYMBOL_GPL(sdhci_set_clock);
2008
2009static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2010 unsigned short vdd)
2011{
2012 struct mmc_host *mmc = host->mmc;
2013
2014 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2015
2016 if (mode != MMC_POWER_OFF)
2017 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2018 else
2019 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2020}
2021
2022void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2023 unsigned short vdd)
2024{
2025 u8 pwr = 0;
2026
2027 if (mode != MMC_POWER_OFF) {
2028 switch (1 << vdd) {
2029 case MMC_VDD_165_195:
2030 /*
2031 * Without a regulator, SDHCI does not support 2.0v
2032 * so we only get here if the driver deliberately
2033 * added the 2.0v range to ocr_avail. Map it to 1.8v
2034 * for the purpose of turning on the power.
2035 */
2036 case MMC_VDD_20_21:
2037 pwr = SDHCI_POWER_180;
2038 break;
2039 case MMC_VDD_29_30:
2040 case MMC_VDD_30_31:
2041 pwr = SDHCI_POWER_300;
2042 break;
2043 case MMC_VDD_32_33:
2044 case MMC_VDD_33_34:
2045 pwr = SDHCI_POWER_330;
2046 break;
2047 default:
2048 WARN(1, "%s: Invalid vdd %#x\n",
2049 mmc_hostname(host->mmc), vdd);
2050 break;
2051 }
2052 }
2053
2054 if (host->pwr == pwr)
2055 return;
2056
2057 host->pwr = pwr;
2058
2059 if (pwr == 0) {
2060 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2061 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2062 sdhci_runtime_pm_bus_off(host);
2063 } else {
2064 /*
2065 * Spec says that we should clear the power reg before setting
2066 * a new value. Some controllers don't seem to like this though.
2067 */
2068 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2069 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2070
2071 /*
2072 * At least the Marvell CaFe chip gets confused if we set the
2073 * voltage and set turn on power at the same time, so set the
2074 * voltage first.
2075 */
2076 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2077 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2078
2079 pwr |= SDHCI_POWER_ON;
2080
2081 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2082
2083 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2084 sdhci_runtime_pm_bus_on(host);
2085
2086 /*
2087 * Some controllers need an extra 10ms delay of 10ms before
2088 * they can apply clock after applying power
2089 */
2090 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2091 mdelay(10);
2092 }
2093}
2094EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2095
2096void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2097 unsigned short vdd)
2098{
2099 if (IS_ERR(host->mmc->supply.vmmc))
2100 sdhci_set_power_noreg(host, mode, vdd);
2101 else
2102 sdhci_set_power_reg(host, mode, vdd);
2103}
2104EXPORT_SYMBOL_GPL(sdhci_set_power);
2105
2106/*
2107 * Some controllers need to configure a valid bus voltage on their power
2108 * register regardless of whether an external regulator is taking care of power
2109 * supply. This helper function takes care of it if set as the controller's
2110 * sdhci_ops.set_power callback.
2111 */
2112void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2113 unsigned char mode,
2114 unsigned short vdd)
2115{
2116 if (!IS_ERR(host->mmc->supply.vmmc)) {
2117 struct mmc_host *mmc = host->mmc;
2118
2119 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2120 }
2121 sdhci_set_power_noreg(host, mode, vdd);
2122}
2123EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2124
2125/*****************************************************************************\
2126 * *
2127 * MMC callbacks *
2128 * *
2129\*****************************************************************************/
2130
2131void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2132{
2133 struct sdhci_host *host = mmc_priv(mmc);
2134 struct mmc_command *cmd;
2135 unsigned long flags;
2136 bool present;
2137
2138 /* Firstly check card presence */
2139 present = mmc->ops->get_cd(mmc);
2140
2141 spin_lock_irqsave(&host->lock, flags);
2142
2143 sdhci_led_activate(host);
2144
2145 if (sdhci_present_error(host, mrq->cmd, present))
2146 goto out_finish;
2147
2148 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2149
2150 if (!sdhci_send_command_retry(host, cmd, flags))
2151 goto out_finish;
2152
2153 spin_unlock_irqrestore(&host->lock, flags);
2154
2155 return;
2156
2157out_finish:
2158 sdhci_finish_mrq(host, mrq);
2159 spin_unlock_irqrestore(&host->lock, flags);
2160}
2161EXPORT_SYMBOL_GPL(sdhci_request);
2162
2163int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2164{
2165 struct sdhci_host *host = mmc_priv(mmc);
2166 struct mmc_command *cmd;
2167 unsigned long flags;
2168 int ret = 0;
2169
2170 spin_lock_irqsave(&host->lock, flags);
2171
2172 if (sdhci_present_error(host, mrq->cmd, true)) {
2173 sdhci_finish_mrq(host, mrq);
2174 goto out_finish;
2175 }
2176
2177 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2178
2179 /*
2180 * The HSQ may send a command in interrupt context without polling
2181 * the busy signaling, which means we should return BUSY if controller
2182 * has not released inhibit bits to allow HSQ trying to send request
2183 * again in non-atomic context. So we should not finish this request
2184 * here.
2185 */
2186 if (!sdhci_send_command(host, cmd))
2187 ret = -EBUSY;
2188 else
2189 sdhci_led_activate(host);
2190
2191out_finish:
2192 spin_unlock_irqrestore(&host->lock, flags);
2193 return ret;
2194}
2195EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2196
2197void sdhci_set_bus_width(struct sdhci_host *host, int width)
2198{
2199 u8 ctrl;
2200
2201 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2202 if (width == MMC_BUS_WIDTH_8) {
2203 ctrl &= ~SDHCI_CTRL_4BITBUS;
2204 ctrl |= SDHCI_CTRL_8BITBUS;
2205 } else {
2206 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2207 ctrl &= ~SDHCI_CTRL_8BITBUS;
2208 if (width == MMC_BUS_WIDTH_4)
2209 ctrl |= SDHCI_CTRL_4BITBUS;
2210 else
2211 ctrl &= ~SDHCI_CTRL_4BITBUS;
2212 }
2213 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2214}
2215EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2216
2217void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2218{
2219 u16 ctrl_2;
2220
2221 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2222 /* Select Bus Speed Mode for host */
2223 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2224 if ((timing == MMC_TIMING_MMC_HS200) ||
2225 (timing == MMC_TIMING_UHS_SDR104))
2226 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2227 else if (timing == MMC_TIMING_UHS_SDR12)
2228 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2229 else if (timing == MMC_TIMING_UHS_SDR25)
2230 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2231 else if (timing == MMC_TIMING_UHS_SDR50)
2232 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2233 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2234 (timing == MMC_TIMING_MMC_DDR52))
2235 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2236 else if (timing == MMC_TIMING_MMC_HS400)
2237 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2238 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2239}
2240EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2241
2242void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2243{
2244 struct sdhci_host *host = mmc_priv(mmc);
2245 u8 ctrl;
2246
2247 if (ios->power_mode == MMC_POWER_UNDEFINED)
2248 return;
2249
2250 if (host->flags & SDHCI_DEVICE_DEAD) {
2251 if (!IS_ERR(mmc->supply.vmmc) &&
2252 ios->power_mode == MMC_POWER_OFF)
2253 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2254 return;
2255 }
2256
2257 /*
2258 * Reset the chip on each power off.
2259 * Should clear out any weird states.
2260 */
2261 if (ios->power_mode == MMC_POWER_OFF) {
2262 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2263 sdhci_reinit(host);
2264 }
2265
2266 if (host->version >= SDHCI_SPEC_300 &&
2267 (ios->power_mode == MMC_POWER_UP) &&
2268 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2269 sdhci_enable_preset_value(host, false);
2270
2271 if (!ios->clock || ios->clock != host->clock) {
2272 host->ops->set_clock(host, ios->clock);
2273 host->clock = ios->clock;
2274
2275 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2276 host->clock) {
2277 host->timeout_clk = mmc->actual_clock ?
2278 mmc->actual_clock / 1000 :
2279 host->clock / 1000;
2280 mmc->max_busy_timeout =
2281 host->ops->get_max_timeout_count ?
2282 host->ops->get_max_timeout_count(host) :
2283 1 << 27;
2284 mmc->max_busy_timeout /= host->timeout_clk;
2285 }
2286 }
2287
2288 if (host->ops->set_power)
2289 host->ops->set_power(host, ios->power_mode, ios->vdd);
2290 else
2291 sdhci_set_power(host, ios->power_mode, ios->vdd);
2292
2293 if (host->ops->platform_send_init_74_clocks)
2294 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2295
2296 host->ops->set_bus_width(host, ios->bus_width);
2297
2298 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2299
2300 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2301 if (ios->timing == MMC_TIMING_SD_HS ||
2302 ios->timing == MMC_TIMING_MMC_HS ||
2303 ios->timing == MMC_TIMING_MMC_HS400 ||
2304 ios->timing == MMC_TIMING_MMC_HS200 ||
2305 ios->timing == MMC_TIMING_MMC_DDR52 ||
2306 ios->timing == MMC_TIMING_UHS_SDR50 ||
2307 ios->timing == MMC_TIMING_UHS_SDR104 ||
2308 ios->timing == MMC_TIMING_UHS_DDR50 ||
2309 ios->timing == MMC_TIMING_UHS_SDR25)
2310 ctrl |= SDHCI_CTRL_HISPD;
2311 else
2312 ctrl &= ~SDHCI_CTRL_HISPD;
2313 }
2314
2315 if (host->version >= SDHCI_SPEC_300) {
2316 u16 clk, ctrl_2;
2317
2318 if (!host->preset_enabled) {
2319 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2320 /*
2321 * We only need to set Driver Strength if the
2322 * preset value enable is not set.
2323 */
2324 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2325 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2326 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2327 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2328 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2329 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2330 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2331 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2332 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2333 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2334 else {
2335 pr_warn("%s: invalid driver type, default to driver type B\n",
2336 mmc_hostname(mmc));
2337 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2338 }
2339
2340 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2341 } else {
2342 /*
2343 * According to SDHC Spec v3.00, if the Preset Value
2344 * Enable in the Host Control 2 register is set, we
2345 * need to reset SD Clock Enable before changing High
2346 * Speed Enable to avoid generating clock gliches.
2347 */
2348
2349 /* Reset SD Clock Enable */
2350 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2351 clk &= ~SDHCI_CLOCK_CARD_EN;
2352 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2353
2354 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2355
2356 /* Re-enable SD Clock */
2357 host->ops->set_clock(host, host->clock);
2358 }
2359
2360 /* Reset SD Clock Enable */
2361 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2362 clk &= ~SDHCI_CLOCK_CARD_EN;
2363 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2364
2365 host->ops->set_uhs_signaling(host, ios->timing);
2366 host->timing = ios->timing;
2367
2368 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2369 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2370 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2371 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2372 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2373 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2374 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2375 u16 preset;
2376
2377 sdhci_enable_preset_value(host, true);
2378 preset = sdhci_get_preset_value(host);
2379 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2380 preset);
2381 }
2382
2383 /* Re-enable SD Clock */
2384 host->ops->set_clock(host, host->clock);
2385 } else
2386 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2387
2388 /*
2389 * Some (ENE) controllers go apeshit on some ios operation,
2390 * signalling timeout and CRC errors even on CMD0. Resetting
2391 * it on each ios seems to solve the problem.
2392 */
2393 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2394 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2395}
2396EXPORT_SYMBOL_GPL(sdhci_set_ios);
2397
2398static int sdhci_get_cd(struct mmc_host *mmc)
2399{
2400 struct sdhci_host *host = mmc_priv(mmc);
2401 int gpio_cd = mmc_gpio_get_cd(mmc);
2402
2403 if (host->flags & SDHCI_DEVICE_DEAD)
2404 return 0;
2405
2406 /* If nonremovable, assume that the card is always present. */
2407 if (!mmc_card_is_removable(mmc))
2408 return 1;
2409
2410 /*
2411 * Try slot gpio detect, if defined it take precedence
2412 * over build in controller functionality
2413 */
2414 if (gpio_cd >= 0)
2415 return !!gpio_cd;
2416
2417 /* If polling, assume that the card is always present. */
2418 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2419 return 1;
2420
2421 /* Host native card detect */
2422 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2423}
2424
2425static int sdhci_check_ro(struct sdhci_host *host)
2426{
2427 unsigned long flags;
2428 int is_readonly;
2429
2430 spin_lock_irqsave(&host->lock, flags);
2431
2432 if (host->flags & SDHCI_DEVICE_DEAD)
2433 is_readonly = 0;
2434 else if (host->ops->get_ro)
2435 is_readonly = host->ops->get_ro(host);
2436 else if (mmc_can_gpio_ro(host->mmc))
2437 is_readonly = mmc_gpio_get_ro(host->mmc);
2438 else
2439 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2440 & SDHCI_WRITE_PROTECT);
2441
2442 spin_unlock_irqrestore(&host->lock, flags);
2443
2444 /* This quirk needs to be replaced by a callback-function later */
2445 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2446 !is_readonly : is_readonly;
2447}
2448
2449#define SAMPLE_COUNT 5
2450
2451static int sdhci_get_ro(struct mmc_host *mmc)
2452{
2453 struct sdhci_host *host = mmc_priv(mmc);
2454 int i, ro_count;
2455
2456 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2457 return sdhci_check_ro(host);
2458
2459 ro_count = 0;
2460 for (i = 0; i < SAMPLE_COUNT; i++) {
2461 if (sdhci_check_ro(host)) {
2462 if (++ro_count > SAMPLE_COUNT / 2)
2463 return 1;
2464 }
2465 msleep(30);
2466 }
2467 return 0;
2468}
2469
2470static void sdhci_hw_reset(struct mmc_host *mmc)
2471{
2472 struct sdhci_host *host = mmc_priv(mmc);
2473
2474 if (host->ops && host->ops->hw_reset)
2475 host->ops->hw_reset(host);
2476}
2477
2478static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2479{
2480 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2481 if (enable)
2482 host->ier |= SDHCI_INT_CARD_INT;
2483 else
2484 host->ier &= ~SDHCI_INT_CARD_INT;
2485
2486 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2487 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2488 }
2489}
2490
2491void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2492{
2493 struct sdhci_host *host = mmc_priv(mmc);
2494 unsigned long flags;
2495
2496 if (enable)
2497 pm_runtime_get_noresume(mmc_dev(mmc));
2498
2499 spin_lock_irqsave(&host->lock, flags);
2500 sdhci_enable_sdio_irq_nolock(host, enable);
2501 spin_unlock_irqrestore(&host->lock, flags);
2502
2503 if (!enable)
2504 pm_runtime_put_noidle(mmc_dev(mmc));
2505}
2506EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2507
2508static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2509{
2510 struct sdhci_host *host = mmc_priv(mmc);
2511 unsigned long flags;
2512
2513 spin_lock_irqsave(&host->lock, flags);
2514 sdhci_enable_sdio_irq_nolock(host, true);
2515 spin_unlock_irqrestore(&host->lock, flags);
2516}
2517
2518int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2519 struct mmc_ios *ios)
2520{
2521 struct sdhci_host *host = mmc_priv(mmc);
2522 u16 ctrl;
2523 int ret;
2524
2525 /*
2526 * Signal Voltage Switching is only applicable for Host Controllers
2527 * v3.00 and above.
2528 */
2529 if (host->version < SDHCI_SPEC_300)
2530 return 0;
2531
2532 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2533
2534 switch (ios->signal_voltage) {
2535 case MMC_SIGNAL_VOLTAGE_330:
2536 if (!(host->flags & SDHCI_SIGNALING_330))
2537 return -EINVAL;
2538 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2539 ctrl &= ~SDHCI_CTRL_VDD_180;
2540 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2541
2542 if (!IS_ERR(mmc->supply.vqmmc)) {
2543 ret = mmc_regulator_set_vqmmc(mmc, ios);
2544 if (ret < 0) {
2545 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2546 mmc_hostname(mmc));
2547 return -EIO;
2548 }
2549 }
2550 /* Wait for 5ms */
2551 usleep_range(5000, 5500);
2552
2553 /* 3.3V regulator output should be stable within 5 ms */
2554 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2555 if (!(ctrl & SDHCI_CTRL_VDD_180))
2556 return 0;
2557
2558 pr_warn("%s: 3.3V regulator output did not become stable\n",
2559 mmc_hostname(mmc));
2560
2561 return -EAGAIN;
2562 case MMC_SIGNAL_VOLTAGE_180:
2563 if (!(host->flags & SDHCI_SIGNALING_180))
2564 return -EINVAL;
2565 if (!IS_ERR(mmc->supply.vqmmc)) {
2566 ret = mmc_regulator_set_vqmmc(mmc, ios);
2567 if (ret < 0) {
2568 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2569 mmc_hostname(mmc));
2570 return -EIO;
2571 }
2572 }
2573
2574 /*
2575 * Enable 1.8V Signal Enable in the Host Control2
2576 * register
2577 */
2578 ctrl |= SDHCI_CTRL_VDD_180;
2579 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2580
2581 /* Some controller need to do more when switching */
2582 if (host->ops->voltage_switch)
2583 host->ops->voltage_switch(host);
2584
2585 /* 1.8V regulator output should be stable within 5 ms */
2586 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2587 if (ctrl & SDHCI_CTRL_VDD_180)
2588 return 0;
2589
2590 pr_warn("%s: 1.8V regulator output did not become stable\n",
2591 mmc_hostname(mmc));
2592
2593 return -EAGAIN;
2594 case MMC_SIGNAL_VOLTAGE_120:
2595 if (!(host->flags & SDHCI_SIGNALING_120))
2596 return -EINVAL;
2597 if (!IS_ERR(mmc->supply.vqmmc)) {
2598 ret = mmc_regulator_set_vqmmc(mmc, ios);
2599 if (ret < 0) {
2600 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2601 mmc_hostname(mmc));
2602 return -EIO;
2603 }
2604 }
2605 return 0;
2606 default:
2607 /* No signal voltage switch required */
2608 return 0;
2609 }
2610}
2611EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2612
2613static int sdhci_card_busy(struct mmc_host *mmc)
2614{
2615 struct sdhci_host *host = mmc_priv(mmc);
2616 u32 present_state;
2617
2618 /* Check whether DAT[0] is 0 */
2619 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2620
2621 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2622}
2623
2624static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2625{
2626 struct sdhci_host *host = mmc_priv(mmc);
2627 unsigned long flags;
2628
2629 spin_lock_irqsave(&host->lock, flags);
2630 host->flags |= SDHCI_HS400_TUNING;
2631 spin_unlock_irqrestore(&host->lock, flags);
2632
2633 return 0;
2634}
2635
2636void sdhci_start_tuning(struct sdhci_host *host)
2637{
2638 u16 ctrl;
2639
2640 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2641 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2642 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2643 ctrl |= SDHCI_CTRL_TUNED_CLK;
2644 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2645
2646 /*
2647 * As per the Host Controller spec v3.00, tuning command
2648 * generates Buffer Read Ready interrupt, so enable that.
2649 *
2650 * Note: The spec clearly says that when tuning sequence
2651 * is being performed, the controller does not generate
2652 * interrupts other than Buffer Read Ready interrupt. But
2653 * to make sure we don't hit a controller bug, we _only_
2654 * enable Buffer Read Ready interrupt here.
2655 */
2656 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2657 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2658}
2659EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2660
2661void sdhci_end_tuning(struct sdhci_host *host)
2662{
2663 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2664 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2665}
2666EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2667
2668void sdhci_reset_tuning(struct sdhci_host *host)
2669{
2670 u16 ctrl;
2671
2672 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2673 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2674 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2675 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2676}
2677EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2678
2679void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2680{
2681 sdhci_reset_tuning(host);
2682
2683 sdhci_do_reset(host, SDHCI_RESET_CMD);
2684 sdhci_do_reset(host, SDHCI_RESET_DATA);
2685
2686 sdhci_end_tuning(host);
2687
2688 mmc_send_abort_tuning(host->mmc, opcode);
2689}
2690EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2691
2692/*
2693 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2694 * tuning command does not have a data payload (or rather the hardware does it
2695 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2696 * interrupt setup is different to other commands and there is no timeout
2697 * interrupt so special handling is needed.
2698 */
2699void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2700{
2701 struct mmc_host *mmc = host->mmc;
2702 struct mmc_command cmd = {};
2703 struct mmc_request mrq = {};
2704 unsigned long flags;
2705 u32 b = host->sdma_boundary;
2706
2707 spin_lock_irqsave(&host->lock, flags);
2708
2709 cmd.opcode = opcode;
2710 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2711 cmd.mrq = &mrq;
2712
2713 mrq.cmd = &cmd;
2714 /*
2715 * In response to CMD19, the card sends 64 bytes of tuning
2716 * block to the Host Controller. So we set the block size
2717 * to 64 here.
2718 */
2719 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2720 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2721 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2722 else
2723 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2724
2725 /*
2726 * The tuning block is sent by the card to the host controller.
2727 * So we set the TRNS_READ bit in the Transfer Mode register.
2728 * This also takes care of setting DMA Enable and Multi Block
2729 * Select in the same register to 0.
2730 */
2731 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2732
2733 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2734 spin_unlock_irqrestore(&host->lock, flags);
2735 host->tuning_done = 0;
2736 return;
2737 }
2738
2739 host->cmd = NULL;
2740
2741 sdhci_del_timer(host, &mrq);
2742
2743 host->tuning_done = 0;
2744
2745 spin_unlock_irqrestore(&host->lock, flags);
2746
2747 /* Wait for Buffer Read Ready interrupt */
2748 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2749 msecs_to_jiffies(50));
2750
2751}
2752EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2753
2754static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2755{
2756 int i;
2757
2758 /*
2759 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2760 * of loops reaches tuning loop count.
2761 */
2762 for (i = 0; i < host->tuning_loop_count; i++) {
2763 u16 ctrl;
2764
2765 sdhci_send_tuning(host, opcode);
2766
2767 if (!host->tuning_done) {
2768 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2769 mmc_hostname(host->mmc));
2770 sdhci_abort_tuning(host, opcode);
2771 return -ETIMEDOUT;
2772 }
2773
2774 /* Spec does not require a delay between tuning cycles */
2775 if (host->tuning_delay > 0)
2776 mdelay(host->tuning_delay);
2777
2778 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2779 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2780 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2781 return 0; /* Success! */
2782 break;
2783 }
2784
2785 }
2786
2787 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2788 mmc_hostname(host->mmc));
2789 sdhci_reset_tuning(host);
2790 return -EAGAIN;
2791}
2792
2793int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2794{
2795 struct sdhci_host *host = mmc_priv(mmc);
2796 int err = 0;
2797 unsigned int tuning_count = 0;
2798 bool hs400_tuning;
2799
2800 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2801
2802 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2803 tuning_count = host->tuning_count;
2804
2805 /*
2806 * The Host Controller needs tuning in case of SDR104 and DDR50
2807 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2808 * the Capabilities register.
2809 * If the Host Controller supports the HS200 mode then the
2810 * tuning function has to be executed.
2811 */
2812 switch (host->timing) {
2813 /* HS400 tuning is done in HS200 mode */
2814 case MMC_TIMING_MMC_HS400:
2815 err = -EINVAL;
2816 goto out;
2817
2818 case MMC_TIMING_MMC_HS200:
2819 /*
2820 * Periodic re-tuning for HS400 is not expected to be needed, so
2821 * disable it here.
2822 */
2823 if (hs400_tuning)
2824 tuning_count = 0;
2825 break;
2826
2827 case MMC_TIMING_UHS_SDR104:
2828 case MMC_TIMING_UHS_DDR50:
2829 break;
2830
2831 case MMC_TIMING_UHS_SDR50:
2832 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2833 break;
2834 fallthrough;
2835
2836 default:
2837 goto out;
2838 }
2839
2840 if (host->ops->platform_execute_tuning) {
2841 err = host->ops->platform_execute_tuning(host, opcode);
2842 goto out;
2843 }
2844
2845 mmc->retune_period = tuning_count;
2846
2847 if (host->tuning_delay < 0)
2848 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2849
2850 sdhci_start_tuning(host);
2851
2852 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2853
2854 sdhci_end_tuning(host);
2855out:
2856 host->flags &= ~SDHCI_HS400_TUNING;
2857
2858 return err;
2859}
2860EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2861
2862static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2863{
2864 /* Host Controller v3.00 defines preset value registers */
2865 if (host->version < SDHCI_SPEC_300)
2866 return;
2867
2868 /*
2869 * We only enable or disable Preset Value if they are not already
2870 * enabled or disabled respectively. Otherwise, we bail out.
2871 */
2872 if (host->preset_enabled != enable) {
2873 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2874
2875 if (enable)
2876 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2877 else
2878 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2879
2880 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2881
2882 if (enable)
2883 host->flags |= SDHCI_PV_ENABLED;
2884 else
2885 host->flags &= ~SDHCI_PV_ENABLED;
2886
2887 host->preset_enabled = enable;
2888 }
2889}
2890
2891static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2892 int err)
2893{
2894 struct mmc_data *data = mrq->data;
2895
2896 if (data->host_cookie != COOKIE_UNMAPPED)
2897 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2898 mmc_get_dma_dir(data));
2899
2900 data->host_cookie = COOKIE_UNMAPPED;
2901}
2902
2903static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2904{
2905 struct sdhci_host *host = mmc_priv(mmc);
2906
2907 mrq->data->host_cookie = COOKIE_UNMAPPED;
2908
2909 /*
2910 * No pre-mapping in the pre hook if we're using the bounce buffer,
2911 * for that we would need two bounce buffers since one buffer is
2912 * in flight when this is getting called.
2913 */
2914 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2915 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2916}
2917
2918static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2919{
2920 if (host->data_cmd) {
2921 host->data_cmd->error = err;
2922 sdhci_finish_mrq(host, host->data_cmd->mrq);
2923 }
2924
2925 if (host->cmd) {
2926 host->cmd->error = err;
2927 sdhci_finish_mrq(host, host->cmd->mrq);
2928 }
2929}
2930
2931static void sdhci_card_event(struct mmc_host *mmc)
2932{
2933 struct sdhci_host *host = mmc_priv(mmc);
2934 unsigned long flags;
2935 int present;
2936
2937 /* First check if client has provided their own card event */
2938 if (host->ops->card_event)
2939 host->ops->card_event(host);
2940
2941 present = mmc->ops->get_cd(mmc);
2942
2943 spin_lock_irqsave(&host->lock, flags);
2944
2945 /* Check sdhci_has_requests() first in case we are runtime suspended */
2946 if (sdhci_has_requests(host) && !present) {
2947 pr_err("%s: Card removed during transfer!\n",
2948 mmc_hostname(mmc));
2949 pr_err("%s: Resetting controller.\n",
2950 mmc_hostname(mmc));
2951
2952 sdhci_do_reset(host, SDHCI_RESET_CMD);
2953 sdhci_do_reset(host, SDHCI_RESET_DATA);
2954
2955 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2956 }
2957
2958 spin_unlock_irqrestore(&host->lock, flags);
2959}
2960
2961static const struct mmc_host_ops sdhci_ops = {
2962 .request = sdhci_request,
2963 .post_req = sdhci_post_req,
2964 .pre_req = sdhci_pre_req,
2965 .set_ios = sdhci_set_ios,
2966 .get_cd = sdhci_get_cd,
2967 .get_ro = sdhci_get_ro,
2968 .hw_reset = sdhci_hw_reset,
2969 .enable_sdio_irq = sdhci_enable_sdio_irq,
2970 .ack_sdio_irq = sdhci_ack_sdio_irq,
2971 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2972 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2973 .execute_tuning = sdhci_execute_tuning,
2974 .card_event = sdhci_card_event,
2975 .card_busy = sdhci_card_busy,
2976};
2977
2978/*****************************************************************************\
2979 * *
2980 * Request done *
2981 * *
2982\*****************************************************************************/
2983
2984static bool sdhci_request_done(struct sdhci_host *host)
2985{
2986 unsigned long flags;
2987 struct mmc_request *mrq;
2988 int i;
2989
2990 spin_lock_irqsave(&host->lock, flags);
2991
2992 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2993 mrq = host->mrqs_done[i];
2994 if (mrq)
2995 break;
2996 }
2997
2998 if (!mrq) {
2999 spin_unlock_irqrestore(&host->lock, flags);
3000 return true;
3001 }
3002
3003 /*
3004 * The controller needs a reset of internal state machines
3005 * upon error conditions.
3006 */
3007 if (sdhci_needs_reset(host, mrq)) {
3008 /*
3009 * Do not finish until command and data lines are available for
3010 * reset. Note there can only be one other mrq, so it cannot
3011 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3012 * would both be null.
3013 */
3014 if (host->cmd || host->data_cmd) {
3015 spin_unlock_irqrestore(&host->lock, flags);
3016 return true;
3017 }
3018
3019 /* Some controllers need this kick or reset won't work here */
3020 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3021 /* This is to force an update */
3022 host->ops->set_clock(host, host->clock);
3023
3024 /*
3025 * Spec says we should do both at the same time, but Ricoh
3026 * controllers do not like that.
3027 */
3028 sdhci_do_reset(host, SDHCI_RESET_CMD);
3029 sdhci_do_reset(host, SDHCI_RESET_DATA);
3030
3031 host->pending_reset = false;
3032 }
3033
3034 /*
3035 * Always unmap the data buffers if they were mapped by
3036 * sdhci_prepare_data() whenever we finish with a request.
3037 * This avoids leaking DMA mappings on error.
3038 */
3039 if (host->flags & SDHCI_REQ_USE_DMA) {
3040 struct mmc_data *data = mrq->data;
3041
3042 if (host->use_external_dma && data &&
3043 (mrq->cmd->error || data->error)) {
3044 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3045
3046 host->mrqs_done[i] = NULL;
3047 spin_unlock_irqrestore(&host->lock, flags);
3048 dmaengine_terminate_sync(chan);
3049 spin_lock_irqsave(&host->lock, flags);
3050 sdhci_set_mrq_done(host, mrq);
3051 }
3052
3053 if (data && data->host_cookie == COOKIE_MAPPED) {
3054 if (host->bounce_buffer) {
3055 /*
3056 * On reads, copy the bounced data into the
3057 * sglist
3058 */
3059 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3060 unsigned int length = data->bytes_xfered;
3061
3062 if (length > host->bounce_buffer_size) {
3063 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3064 mmc_hostname(host->mmc),
3065 host->bounce_buffer_size,
3066 data->bytes_xfered);
3067 /* Cap it down and continue */
3068 length = host->bounce_buffer_size;
3069 }
3070 dma_sync_single_for_cpu(
3071 mmc_dev(host->mmc),
3072 host->bounce_addr,
3073 host->bounce_buffer_size,
3074 DMA_FROM_DEVICE);
3075 sg_copy_from_buffer(data->sg,
3076 data->sg_len,
3077 host->bounce_buffer,
3078 length);
3079 } else {
3080 /* No copying, just switch ownership */
3081 dma_sync_single_for_cpu(
3082 mmc_dev(host->mmc),
3083 host->bounce_addr,
3084 host->bounce_buffer_size,
3085 mmc_get_dma_dir(data));
3086 }
3087 } else {
3088 /* Unmap the raw data */
3089 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3090 data->sg_len,
3091 mmc_get_dma_dir(data));
3092 }
3093 data->host_cookie = COOKIE_UNMAPPED;
3094 }
3095 }
3096
3097 host->mrqs_done[i] = NULL;
3098
3099 spin_unlock_irqrestore(&host->lock, flags);
3100
3101 if (host->ops->request_done)
3102 host->ops->request_done(host, mrq);
3103 else
3104 mmc_request_done(host->mmc, mrq);
3105
3106 return false;
3107}
3108
3109static void sdhci_complete_work(struct work_struct *work)
3110{
3111 struct sdhci_host *host = container_of(work, struct sdhci_host,
3112 complete_work);
3113
3114 while (!sdhci_request_done(host))
3115 ;
3116}
3117
3118static void sdhci_timeout_timer(struct timer_list *t)
3119{
3120 struct sdhci_host *host;
3121 unsigned long flags;
3122
3123 host = from_timer(host, t, timer);
3124
3125 spin_lock_irqsave(&host->lock, flags);
3126
3127 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3128 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3129 mmc_hostname(host->mmc));
3130 sdhci_dumpregs(host);
3131
3132 host->cmd->error = -ETIMEDOUT;
3133 sdhci_finish_mrq(host, host->cmd->mrq);
3134 }
3135
3136 spin_unlock_irqrestore(&host->lock, flags);
3137}
3138
3139static void sdhci_timeout_data_timer(struct timer_list *t)
3140{
3141 struct sdhci_host *host;
3142 unsigned long flags;
3143
3144 host = from_timer(host, t, data_timer);
3145
3146 spin_lock_irqsave(&host->lock, flags);
3147
3148 if (host->data || host->data_cmd ||
3149 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3150 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3151 mmc_hostname(host->mmc));
3152 sdhci_dumpregs(host);
3153
3154 if (host->data) {
3155 host->data->error = -ETIMEDOUT;
3156 __sdhci_finish_data(host, true);
3157 queue_work(host->complete_wq, &host->complete_work);
3158 } else if (host->data_cmd) {
3159 host->data_cmd->error = -ETIMEDOUT;
3160 sdhci_finish_mrq(host, host->data_cmd->mrq);
3161 } else {
3162 host->cmd->error = -ETIMEDOUT;
3163 sdhci_finish_mrq(host, host->cmd->mrq);
3164 }
3165 }
3166
3167 spin_unlock_irqrestore(&host->lock, flags);
3168}
3169
3170/*****************************************************************************\
3171 * *
3172 * Interrupt handling *
3173 * *
3174\*****************************************************************************/
3175
3176static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3177{
3178 /* Handle auto-CMD12 error */
3179 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3180 struct mmc_request *mrq = host->data_cmd->mrq;
3181 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3182 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3183 SDHCI_INT_DATA_TIMEOUT :
3184 SDHCI_INT_DATA_CRC;
3185
3186 /* Treat auto-CMD12 error the same as data error */
3187 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3188 *intmask_p |= data_err_bit;
3189 return;
3190 }
3191 }
3192
3193 if (!host->cmd) {
3194 /*
3195 * SDHCI recovers from errors by resetting the cmd and data
3196 * circuits. Until that is done, there very well might be more
3197 * interrupts, so ignore them in that case.
3198 */
3199 if (host->pending_reset)
3200 return;
3201 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3202 mmc_hostname(host->mmc), (unsigned)intmask);
3203 sdhci_dumpregs(host);
3204 return;
3205 }
3206
3207 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3208 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3209 if (intmask & SDHCI_INT_TIMEOUT)
3210 host->cmd->error = -ETIMEDOUT;
3211 else
3212 host->cmd->error = -EILSEQ;
3213
3214 /* Treat data command CRC error the same as data CRC error */
3215 if (host->cmd->data &&
3216 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3217 SDHCI_INT_CRC) {
3218 host->cmd = NULL;
3219 *intmask_p |= SDHCI_INT_DATA_CRC;
3220 return;
3221 }
3222
3223 __sdhci_finish_mrq(host, host->cmd->mrq);
3224 return;
3225 }
3226
3227 /* Handle auto-CMD23 error */
3228 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3229 struct mmc_request *mrq = host->cmd->mrq;
3230 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3231 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3232 -ETIMEDOUT :
3233 -EILSEQ;
3234
3235 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3236 mrq->sbc->error = err;
3237 __sdhci_finish_mrq(host, mrq);
3238 return;
3239 }
3240 }
3241
3242 if (intmask & SDHCI_INT_RESPONSE)
3243 sdhci_finish_command(host);
3244}
3245
3246static void sdhci_adma_show_error(struct sdhci_host *host)
3247{
3248 void *desc = host->adma_table;
3249 dma_addr_t dma = host->adma_addr;
3250
3251 sdhci_dumpregs(host);
3252
3253 while (true) {
3254 struct sdhci_adma2_64_desc *dma_desc = desc;
3255
3256 if (host->flags & SDHCI_USE_64_BIT_DMA)
3257 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3258 (unsigned long long)dma,
3259 le32_to_cpu(dma_desc->addr_hi),
3260 le32_to_cpu(dma_desc->addr_lo),
3261 le16_to_cpu(dma_desc->len),
3262 le16_to_cpu(dma_desc->cmd));
3263 else
3264 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3265 (unsigned long long)dma,
3266 le32_to_cpu(dma_desc->addr_lo),
3267 le16_to_cpu(dma_desc->len),
3268 le16_to_cpu(dma_desc->cmd));
3269
3270 desc += host->desc_sz;
3271 dma += host->desc_sz;
3272
3273 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3274 break;
3275 }
3276}
3277
3278static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3279{
3280 u32 command;
3281
3282 /*
3283 * CMD19 generates _only_ Buffer Read Ready interrupt if
3284 * use sdhci_send_tuning.
3285 * Need to exclude this case: PIO mode and use mmc_send_tuning,
3286 * If not, sdhci_transfer_pio will never be called, make the
3287 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
3288 */
3289 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
3290 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3291 if (command == MMC_SEND_TUNING_BLOCK ||
3292 command == MMC_SEND_TUNING_BLOCK_HS200) {
3293 host->tuning_done = 1;
3294 wake_up(&host->buf_ready_int);
3295 return;
3296 }
3297 }
3298
3299 if (!host->data) {
3300 struct mmc_command *data_cmd = host->data_cmd;
3301
3302 /*
3303 * The "data complete" interrupt is also used to
3304 * indicate that a busy state has ended. See comment
3305 * above in sdhci_cmd_irq().
3306 */
3307 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3308 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3309 host->data_cmd = NULL;
3310 data_cmd->error = -ETIMEDOUT;
3311 __sdhci_finish_mrq(host, data_cmd->mrq);
3312 return;
3313 }
3314 if (intmask & SDHCI_INT_DATA_END) {
3315 host->data_cmd = NULL;
3316 /*
3317 * Some cards handle busy-end interrupt
3318 * before the command completed, so make
3319 * sure we do things in the proper order.
3320 */
3321 if (host->cmd == data_cmd)
3322 return;
3323
3324 __sdhci_finish_mrq(host, data_cmd->mrq);
3325 return;
3326 }
3327 }
3328
3329 /*
3330 * SDHCI recovers from errors by resetting the cmd and data
3331 * circuits. Until that is done, there very well might be more
3332 * interrupts, so ignore them in that case.
3333 */
3334 if (host->pending_reset)
3335 return;
3336
3337 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3338 mmc_hostname(host->mmc), (unsigned)intmask);
3339 sdhci_dumpregs(host);
3340
3341 return;
3342 }
3343
3344 if (intmask & SDHCI_INT_DATA_TIMEOUT)
3345 host->data->error = -ETIMEDOUT;
3346 else if (intmask & SDHCI_INT_DATA_END_BIT)
3347 host->data->error = -EILSEQ;
3348 else if ((intmask & SDHCI_INT_DATA_CRC) &&
3349 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3350 != MMC_BUS_TEST_R)
3351 host->data->error = -EILSEQ;
3352 else if (intmask & SDHCI_INT_ADMA_ERROR) {
3353 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3354 intmask);
3355 sdhci_adma_show_error(host);
3356 host->data->error = -EIO;
3357 if (host->ops->adma_workaround)
3358 host->ops->adma_workaround(host, intmask);
3359 }
3360
3361 if (host->data->error)
3362 sdhci_finish_data(host);
3363 else {
3364 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3365 sdhci_transfer_pio(host);
3366
3367 /*
3368 * We currently don't do anything fancy with DMA
3369 * boundaries, but as we can't disable the feature
3370 * we need to at least restart the transfer.
3371 *
3372 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3373 * should return a valid address to continue from, but as
3374 * some controllers are faulty, don't trust them.
3375 */
3376 if (intmask & SDHCI_INT_DMA_END) {
3377 dma_addr_t dmastart, dmanow;
3378
3379 dmastart = sdhci_sdma_address(host);
3380 dmanow = dmastart + host->data->bytes_xfered;
3381 /*
3382 * Force update to the next DMA block boundary.
3383 */
3384 dmanow = (dmanow &
3385 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3386 SDHCI_DEFAULT_BOUNDARY_SIZE;
3387 host->data->bytes_xfered = dmanow - dmastart;
3388 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3389 &dmastart, host->data->bytes_xfered, &dmanow);
3390 sdhci_set_sdma_addr(host, dmanow);
3391 }
3392
3393 if (intmask & SDHCI_INT_DATA_END) {
3394 if (host->cmd == host->data_cmd) {
3395 /*
3396 * Data managed to finish before the
3397 * command completed. Make sure we do
3398 * things in the proper order.
3399 */
3400 host->data_early = 1;
3401 } else {
3402 sdhci_finish_data(host);
3403 }
3404 }
3405 }
3406}
3407
3408static inline bool sdhci_defer_done(struct sdhci_host *host,
3409 struct mmc_request *mrq)
3410{
3411 struct mmc_data *data = mrq->data;
3412
3413 return host->pending_reset || host->always_defer_done ||
3414 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3415 data->host_cookie == COOKIE_MAPPED);
3416}
3417
3418static irqreturn_t sdhci_irq(int irq, void *dev_id)
3419{
3420 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3421 irqreturn_t result = IRQ_NONE;
3422 struct sdhci_host *host = dev_id;
3423 u32 intmask, mask, unexpected = 0;
3424 int max_loops = 16;
3425 int i;
3426
3427 spin_lock(&host->lock);
3428
3429 if (host->runtime_suspended) {
3430 spin_unlock(&host->lock);
3431 return IRQ_NONE;
3432 }
3433
3434 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3435 if (!intmask || intmask == 0xffffffff) {
3436 result = IRQ_NONE;
3437 goto out;
3438 }
3439
3440 do {
3441 DBG("IRQ status 0x%08x\n", intmask);
3442
3443 if (host->ops->irq) {
3444 intmask = host->ops->irq(host, intmask);
3445 if (!intmask)
3446 goto cont;
3447 }
3448
3449 /* Clear selected interrupts. */
3450 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3451 SDHCI_INT_BUS_POWER);
3452 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3453
3454 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3455 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3456 SDHCI_CARD_PRESENT;
3457
3458 /*
3459 * There is a observation on i.mx esdhc. INSERT
3460 * bit will be immediately set again when it gets
3461 * cleared, if a card is inserted. We have to mask
3462 * the irq to prevent interrupt storm which will
3463 * freeze the system. And the REMOVE gets the
3464 * same situation.
3465 *
3466 * More testing are needed here to ensure it works
3467 * for other platforms though.
3468 */
3469 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3470 SDHCI_INT_CARD_REMOVE);
3471 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3472 SDHCI_INT_CARD_INSERT;
3473 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3474 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3475
3476 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3477 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3478
3479 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3480 SDHCI_INT_CARD_REMOVE);
3481 result = IRQ_WAKE_THREAD;
3482 }
3483
3484 if (intmask & SDHCI_INT_CMD_MASK)
3485 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3486
3487 if (intmask & SDHCI_INT_DATA_MASK)
3488 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3489
3490 if (intmask & SDHCI_INT_BUS_POWER)
3491 pr_err("%s: Card is consuming too much power!\n",
3492 mmc_hostname(host->mmc));
3493
3494 if (intmask & SDHCI_INT_RETUNE)
3495 mmc_retune_needed(host->mmc);
3496
3497 if ((intmask & SDHCI_INT_CARD_INT) &&
3498 (host->ier & SDHCI_INT_CARD_INT)) {
3499 sdhci_enable_sdio_irq_nolock(host, false);
3500 sdio_signal_irq(host->mmc);
3501 }
3502
3503 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3504 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3505 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3506 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3507
3508 if (intmask) {
3509 unexpected |= intmask;
3510 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3511 }
3512cont:
3513 if (result == IRQ_NONE)
3514 result = IRQ_HANDLED;
3515
3516 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3517 } while (intmask && --max_loops);
3518
3519 /* Determine if mrqs can be completed immediately */
3520 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3521 struct mmc_request *mrq = host->mrqs_done[i];
3522
3523 if (!mrq)
3524 continue;
3525
3526 if (sdhci_defer_done(host, mrq)) {
3527 result = IRQ_WAKE_THREAD;
3528 } else {
3529 mrqs_done[i] = mrq;
3530 host->mrqs_done[i] = NULL;
3531 }
3532 }
3533out:
3534 if (host->deferred_cmd)
3535 result = IRQ_WAKE_THREAD;
3536
3537 spin_unlock(&host->lock);
3538
3539 /* Process mrqs ready for immediate completion */
3540 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3541 if (!mrqs_done[i])
3542 continue;
3543
3544 if (host->ops->request_done)
3545 host->ops->request_done(host, mrqs_done[i]);
3546 else
3547 mmc_request_done(host->mmc, mrqs_done[i]);
3548 }
3549
3550 if (unexpected) {
3551 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3552 mmc_hostname(host->mmc), unexpected);
3553 sdhci_dumpregs(host);
3554 }
3555
3556 return result;
3557}
3558
3559static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3560{
3561 struct sdhci_host *host = dev_id;
3562 struct mmc_command *cmd;
3563 unsigned long flags;
3564 u32 isr;
3565
3566 while (!sdhci_request_done(host))
3567 ;
3568
3569 spin_lock_irqsave(&host->lock, flags);
3570
3571 isr = host->thread_isr;
3572 host->thread_isr = 0;
3573
3574 cmd = host->deferred_cmd;
3575 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3576 sdhci_finish_mrq(host, cmd->mrq);
3577
3578 spin_unlock_irqrestore(&host->lock, flags);
3579
3580 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3581 struct mmc_host *mmc = host->mmc;
3582
3583 mmc->ops->card_event(mmc);
3584 mmc_detect_change(mmc, msecs_to_jiffies(200));
3585 }
3586
3587 return IRQ_HANDLED;
3588}
3589
3590/*****************************************************************************\
3591 * *
3592 * Suspend/resume *
3593 * *
3594\*****************************************************************************/
3595
3596#ifdef CONFIG_PM
3597
3598static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3599{
3600 return mmc_card_is_removable(host->mmc) &&
3601 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3602 !mmc_can_gpio_cd(host->mmc);
3603}
3604
3605/*
3606 * To enable wakeup events, the corresponding events have to be enabled in
3607 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3608 * Table' in the SD Host Controller Standard Specification.
3609 * It is useless to restore SDHCI_INT_ENABLE state in
3610 * sdhci_disable_irq_wakeups() since it will be set by
3611 * sdhci_enable_card_detection() or sdhci_init().
3612 */
3613static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3614{
3615 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3616 SDHCI_WAKE_ON_INT;
3617 u32 irq_val = 0;
3618 u8 wake_val = 0;
3619 u8 val;
3620
3621 if (sdhci_cd_irq_can_wakeup(host)) {
3622 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3623 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3624 }
3625
3626 if (mmc_card_wake_sdio_irq(host->mmc)) {
3627 wake_val |= SDHCI_WAKE_ON_INT;
3628 irq_val |= SDHCI_INT_CARD_INT;
3629 }
3630
3631 if (!irq_val)
3632 return false;
3633
3634 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3635 val &= ~mask;
3636 val |= wake_val;
3637 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3638
3639 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3640
3641 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3642
3643 return host->irq_wake_enabled;
3644}
3645
3646static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3647{
3648 u8 val;
3649 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3650 | SDHCI_WAKE_ON_INT;
3651
3652 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3653 val &= ~mask;
3654 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3655
3656 disable_irq_wake(host->irq);
3657
3658 host->irq_wake_enabled = false;
3659}
3660
3661int sdhci_suspend_host(struct sdhci_host *host)
3662{
3663 sdhci_disable_card_detection(host);
3664
3665 mmc_retune_timer_stop(host->mmc);
3666
3667 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3668 !sdhci_enable_irq_wakeups(host)) {
3669 host->ier = 0;
3670 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3671 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3672 free_irq(host->irq, host);
3673 }
3674
3675 return 0;
3676}
3677
3678EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3679
3680int sdhci_resume_host(struct sdhci_host *host)
3681{
3682 struct mmc_host *mmc = host->mmc;
3683 int ret = 0;
3684
3685 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3686 if (host->ops->enable_dma)
3687 host->ops->enable_dma(host);
3688 }
3689
3690 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3691 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3692 /* Card keeps power but host controller does not */
3693 sdhci_init(host, 0);
3694 host->pwr = 0;
3695 host->clock = 0;
3696 mmc->ops->set_ios(mmc, &mmc->ios);
3697 } else {
3698 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3699 }
3700
3701 if (host->irq_wake_enabled) {
3702 sdhci_disable_irq_wakeups(host);
3703 } else {
3704 ret = request_threaded_irq(host->irq, sdhci_irq,
3705 sdhci_thread_irq, IRQF_SHARED,
3706 mmc_hostname(mmc), host);
3707 if (ret)
3708 return ret;
3709 }
3710
3711 sdhci_enable_card_detection(host);
3712
3713 return ret;
3714}
3715
3716EXPORT_SYMBOL_GPL(sdhci_resume_host);
3717
3718int sdhci_runtime_suspend_host(struct sdhci_host *host)
3719{
3720 unsigned long flags;
3721
3722 mmc_retune_timer_stop(host->mmc);
3723
3724 spin_lock_irqsave(&host->lock, flags);
3725 host->ier &= SDHCI_INT_CARD_INT;
3726 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3727 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3728 spin_unlock_irqrestore(&host->lock, flags);
3729
3730 synchronize_hardirq(host->irq);
3731
3732 spin_lock_irqsave(&host->lock, flags);
3733 host->runtime_suspended = true;
3734 spin_unlock_irqrestore(&host->lock, flags);
3735
3736 return 0;
3737}
3738EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3739
3740int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3741{
3742 struct mmc_host *mmc = host->mmc;
3743 unsigned long flags;
3744 int host_flags = host->flags;
3745
3746 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3747 if (host->ops->enable_dma)
3748 host->ops->enable_dma(host);
3749 }
3750
3751 sdhci_init(host, soft_reset);
3752
3753 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3754 mmc->ios.power_mode != MMC_POWER_OFF) {
3755 /* Force clock and power re-program */
3756 host->pwr = 0;
3757 host->clock = 0;
3758 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3759 mmc->ops->set_ios(mmc, &mmc->ios);
3760
3761 if ((host_flags & SDHCI_PV_ENABLED) &&
3762 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3763 spin_lock_irqsave(&host->lock, flags);
3764 sdhci_enable_preset_value(host, true);
3765 spin_unlock_irqrestore(&host->lock, flags);
3766 }
3767
3768 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3769 mmc->ops->hs400_enhanced_strobe)
3770 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3771 }
3772
3773 spin_lock_irqsave(&host->lock, flags);
3774
3775 host->runtime_suspended = false;
3776
3777 /* Enable SDIO IRQ */
3778 if (sdio_irq_claimed(mmc))
3779 sdhci_enable_sdio_irq_nolock(host, true);
3780
3781 /* Enable Card Detection */
3782 sdhci_enable_card_detection(host);
3783
3784 spin_unlock_irqrestore(&host->lock, flags);
3785
3786 return 0;
3787}
3788EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3789
3790#endif /* CONFIG_PM */
3791
3792/*****************************************************************************\
3793 * *
3794 * Command Queue Engine (CQE) helpers *
3795 * *
3796\*****************************************************************************/
3797
3798void sdhci_cqe_enable(struct mmc_host *mmc)
3799{
3800 struct sdhci_host *host = mmc_priv(mmc);
3801 unsigned long flags;
3802 u8 ctrl;
3803
3804 spin_lock_irqsave(&host->lock, flags);
3805
3806 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3807 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3808 /*
3809 * Host from V4.10 supports ADMA3 DMA type.
3810 * ADMA3 performs integrated descriptor which is more suitable
3811 * for cmd queuing to fetch both command and transfer descriptors.
3812 */
3813 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3814 ctrl |= SDHCI_CTRL_ADMA3;
3815 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3816 ctrl |= SDHCI_CTRL_ADMA64;
3817 else
3818 ctrl |= SDHCI_CTRL_ADMA32;
3819 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3820
3821 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3822 SDHCI_BLOCK_SIZE);
3823
3824 /* Set maximum timeout */
3825 sdhci_set_timeout(host, NULL);
3826
3827 host->ier = host->cqe_ier;
3828
3829 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3830 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3831
3832 host->cqe_on = true;
3833
3834 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3835 mmc_hostname(mmc), host->ier,
3836 sdhci_readl(host, SDHCI_INT_STATUS));
3837
3838 spin_unlock_irqrestore(&host->lock, flags);
3839}
3840EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3841
3842void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3843{
3844 struct sdhci_host *host = mmc_priv(mmc);
3845 unsigned long flags;
3846
3847 spin_lock_irqsave(&host->lock, flags);
3848
3849 sdhci_set_default_irqs(host);
3850
3851 host->cqe_on = false;
3852
3853 if (recovery) {
3854 sdhci_do_reset(host, SDHCI_RESET_CMD);
3855 sdhci_do_reset(host, SDHCI_RESET_DATA);
3856 }
3857
3858 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3859 mmc_hostname(mmc), host->ier,
3860 sdhci_readl(host, SDHCI_INT_STATUS));
3861
3862 spin_unlock_irqrestore(&host->lock, flags);
3863}
3864EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3865
3866bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3867 int *data_error)
3868{
3869 u32 mask;
3870
3871 if (!host->cqe_on)
3872 return false;
3873
3874 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3875 *cmd_error = -EILSEQ;
3876 else if (intmask & SDHCI_INT_TIMEOUT)
3877 *cmd_error = -ETIMEDOUT;
3878 else
3879 *cmd_error = 0;
3880
3881 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3882 *data_error = -EILSEQ;
3883 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3884 *data_error = -ETIMEDOUT;
3885 else if (intmask & SDHCI_INT_ADMA_ERROR)
3886 *data_error = -EIO;
3887 else
3888 *data_error = 0;
3889
3890 /* Clear selected interrupts. */
3891 mask = intmask & host->cqe_ier;
3892 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3893
3894 if (intmask & SDHCI_INT_BUS_POWER)
3895 pr_err("%s: Card is consuming too much power!\n",
3896 mmc_hostname(host->mmc));
3897
3898 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3899 if (intmask) {
3900 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3901 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3902 mmc_hostname(host->mmc), intmask);
3903 sdhci_dumpregs(host);
3904 }
3905
3906 return true;
3907}
3908EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3909
3910/*****************************************************************************\
3911 * *
3912 * Device allocation/registration *
3913 * *
3914\*****************************************************************************/
3915
3916struct sdhci_host *sdhci_alloc_host(struct device *dev,
3917 size_t priv_size)
3918{
3919 struct mmc_host *mmc;
3920 struct sdhci_host *host;
3921
3922 WARN_ON(dev == NULL);
3923
3924 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3925 if (!mmc)
3926 return ERR_PTR(-ENOMEM);
3927
3928 host = mmc_priv(mmc);
3929 host->mmc = mmc;
3930 host->mmc_host_ops = sdhci_ops;
3931 mmc->ops = &host->mmc_host_ops;
3932
3933 host->flags = SDHCI_SIGNALING_330;
3934
3935 host->cqe_ier = SDHCI_CQE_INT_MASK;
3936 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3937
3938 host->tuning_delay = -1;
3939 host->tuning_loop_count = MAX_TUNING_LOOP;
3940
3941 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3942
3943 /*
3944 * The DMA table descriptor count is calculated as the maximum
3945 * number of segments times 2, to allow for an alignment
3946 * descriptor for each segment, plus 1 for a nop end descriptor.
3947 */
3948 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3949
3950 host->max_timeout_count = 0xE;
3951
3952 return host;
3953}
3954
3955EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3956
3957static int sdhci_set_dma_mask(struct sdhci_host *host)
3958{
3959 struct mmc_host *mmc = host->mmc;
3960 struct device *dev = mmc_dev(mmc);
3961 int ret = -EINVAL;
3962
3963 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3964 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3965
3966 /* Try 64-bit mask if hardware is capable of it */
3967 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3968 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3969 if (ret) {
3970 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3971 mmc_hostname(mmc));
3972 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3973 }
3974 }
3975
3976 /* 32-bit mask as default & fallback */
3977 if (ret) {
3978 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3979 if (ret)
3980 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3981 mmc_hostname(mmc));
3982 }
3983
3984 return ret;
3985}
3986
3987void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3988 const u32 *caps, const u32 *caps1)
3989{
3990 u16 v;
3991 u64 dt_caps_mask = 0;
3992 u64 dt_caps = 0;
3993
3994 if (host->read_caps)
3995 return;
3996
3997 host->read_caps = true;
3998
3999 if (debug_quirks)
4000 host->quirks = debug_quirks;
4001
4002 if (debug_quirks2)
4003 host->quirks2 = debug_quirks2;
4004
4005 sdhci_do_reset(host, SDHCI_RESET_ALL);
4006
4007 if (host->v4_mode)
4008 sdhci_do_enable_v4_mode(host);
4009
4010 device_property_read_u64(mmc_dev(host->mmc),
4011 "sdhci-caps-mask", &dt_caps_mask);
4012 device_property_read_u64(mmc_dev(host->mmc),
4013 "sdhci-caps", &dt_caps);
4014
4015 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4016 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4017
4018 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4019 return;
4020
4021 if (caps) {
4022 host->caps = *caps;
4023 } else {
4024 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4025 host->caps &= ~lower_32_bits(dt_caps_mask);
4026 host->caps |= lower_32_bits(dt_caps);
4027 }
4028
4029 if (host->version < SDHCI_SPEC_300)
4030 return;
4031
4032 if (caps1) {
4033 host->caps1 = *caps1;
4034 } else {
4035 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4036 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4037 host->caps1 |= upper_32_bits(dt_caps);
4038 }
4039}
4040EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4041
4042static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4043{
4044 struct mmc_host *mmc = host->mmc;
4045 unsigned int max_blocks;
4046 unsigned int bounce_size;
4047 int ret;
4048
4049 /*
4050 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4051 * has diminishing returns, this is probably because SD/MMC
4052 * cards are usually optimized to handle this size of requests.
4053 */
4054 bounce_size = SZ_64K;
4055 /*
4056 * Adjust downwards to maximum request size if this is less
4057 * than our segment size, else hammer down the maximum
4058 * request size to the maximum buffer size.
4059 */
4060 if (mmc->max_req_size < bounce_size)
4061 bounce_size = mmc->max_req_size;
4062 max_blocks = bounce_size / 512;
4063
4064 /*
4065 * When we just support one segment, we can get significant
4066 * speedups by the help of a bounce buffer to group scattered
4067 * reads/writes together.
4068 */
4069 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4070 bounce_size,
4071 GFP_KERNEL);
4072 if (!host->bounce_buffer) {
4073 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4074 mmc_hostname(mmc),
4075 bounce_size);
4076 /*
4077 * Exiting with zero here makes sure we proceed with
4078 * mmc->max_segs == 1.
4079 */
4080 return;
4081 }
4082
4083 host->bounce_addr = dma_map_single(mmc_dev(mmc),
4084 host->bounce_buffer,
4085 bounce_size,
4086 DMA_BIDIRECTIONAL);
4087 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4088 if (ret) {
4089 devm_kfree(mmc_dev(mmc), host->bounce_buffer);
4090 host->bounce_buffer = NULL;
4091 /* Again fall back to max_segs == 1 */
4092 return;
4093 }
4094
4095 host->bounce_buffer_size = bounce_size;
4096
4097 /* Lie about this since we're bouncing */
4098 mmc->max_segs = max_blocks;
4099 mmc->max_seg_size = bounce_size;
4100 mmc->max_req_size = bounce_size;
4101
4102 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4103 mmc_hostname(mmc), max_blocks, bounce_size);
4104}
4105
4106static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4107{
4108 /*
4109 * According to SD Host Controller spec v4.10, bit[27] added from
4110 * version 4.10 in Capabilities Register is used as 64-bit System
4111 * Address support for V4 mode.
4112 */
4113 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4114 return host->caps & SDHCI_CAN_64BIT_V4;
4115
4116 return host->caps & SDHCI_CAN_64BIT;
4117}
4118
4119int sdhci_setup_host(struct sdhci_host *host)
4120{
4121 struct mmc_host *mmc;
4122 u32 max_current_caps;
4123 unsigned int ocr_avail;
4124 unsigned int override_timeout_clk;
4125 u32 max_clk;
4126 int ret = 0;
4127 bool enable_vqmmc = false;
4128
4129 WARN_ON(host == NULL);
4130 if (host == NULL)
4131 return -EINVAL;
4132
4133 mmc = host->mmc;
4134
4135 /*
4136 * If there are external regulators, get them. Note this must be done
4137 * early before resetting the host and reading the capabilities so that
4138 * the host can take the appropriate action if regulators are not
4139 * available.
4140 */
4141 if (!mmc->supply.vqmmc) {
4142 ret = mmc_regulator_get_supply(mmc);
4143 if (ret)
4144 return ret;
4145 enable_vqmmc = true;
4146 }
4147
4148 DBG("Version: 0x%08x | Present: 0x%08x\n",
4149 sdhci_readw(host, SDHCI_HOST_VERSION),
4150 sdhci_readl(host, SDHCI_PRESENT_STATE));
4151 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4152 sdhci_readl(host, SDHCI_CAPABILITIES),
4153 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4154
4155 sdhci_read_caps(host);
4156
4157 override_timeout_clk = host->timeout_clk;
4158
4159 if (host->version > SDHCI_SPEC_420) {
4160 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4161 mmc_hostname(mmc), host->version);
4162 }
4163
4164 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4165 host->flags |= SDHCI_USE_SDMA;
4166 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4167 DBG("Controller doesn't have SDMA capability\n");
4168 else
4169 host->flags |= SDHCI_USE_SDMA;
4170
4171 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4172 (host->flags & SDHCI_USE_SDMA)) {
4173 DBG("Disabling DMA as it is marked broken\n");
4174 host->flags &= ~SDHCI_USE_SDMA;
4175 }
4176
4177 if ((host->version >= SDHCI_SPEC_200) &&
4178 (host->caps & SDHCI_CAN_DO_ADMA2))
4179 host->flags |= SDHCI_USE_ADMA;
4180
4181 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4182 (host->flags & SDHCI_USE_ADMA)) {
4183 DBG("Disabling ADMA as it is marked broken\n");
4184 host->flags &= ~SDHCI_USE_ADMA;
4185 }
4186
4187 if (sdhci_can_64bit_dma(host))
4188 host->flags |= SDHCI_USE_64_BIT_DMA;
4189
4190 if (host->use_external_dma) {
4191 ret = sdhci_external_dma_init(host);
4192 if (ret == -EPROBE_DEFER)
4193 goto unreg;
4194 /*
4195 * Fall back to use the DMA/PIO integrated in standard SDHCI
4196 * instead of external DMA devices.
4197 */
4198 else if (ret)
4199 sdhci_switch_external_dma(host, false);
4200 /* Disable internal DMA sources */
4201 else
4202 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4203 }
4204
4205 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4206 if (host->ops->set_dma_mask)
4207 ret = host->ops->set_dma_mask(host);
4208 else
4209 ret = sdhci_set_dma_mask(host);
4210
4211 if (!ret && host->ops->enable_dma)
4212 ret = host->ops->enable_dma(host);
4213
4214 if (ret) {
4215 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4216 mmc_hostname(mmc));
4217 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4218
4219 ret = 0;
4220 }
4221 }
4222
4223 /* SDMA does not support 64-bit DMA if v4 mode not set */
4224 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4225 host->flags &= ~SDHCI_USE_SDMA;
4226
4227 if (host->flags & SDHCI_USE_ADMA) {
4228 dma_addr_t dma;
4229 void *buf;
4230
4231 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4232 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4233 else if (!host->alloc_desc_sz)
4234 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4235
4236 host->desc_sz = host->alloc_desc_sz;
4237 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4238
4239 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4240 /*
4241 * Use zalloc to zero the reserved high 32-bits of 128-bit
4242 * descriptors so that they never need to be written.
4243 */
4244 buf = dma_alloc_coherent(mmc_dev(mmc),
4245 host->align_buffer_sz + host->adma_table_sz,
4246 &dma, GFP_KERNEL);
4247 if (!buf) {
4248 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4249 mmc_hostname(mmc));
4250 host->flags &= ~SDHCI_USE_ADMA;
4251 } else if ((dma + host->align_buffer_sz) &
4252 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4253 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4254 mmc_hostname(mmc));
4255 host->flags &= ~SDHCI_USE_ADMA;
4256 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4257 host->adma_table_sz, buf, dma);
4258 } else {
4259 host->align_buffer = buf;
4260 host->align_addr = dma;
4261
4262 host->adma_table = buf + host->align_buffer_sz;
4263 host->adma_addr = dma + host->align_buffer_sz;
4264 }
4265 }
4266
4267 /*
4268 * If we use DMA, then it's up to the caller to set the DMA
4269 * mask, but PIO does not need the hw shim so we set a new
4270 * mask here in that case.
4271 */
4272 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4273 host->dma_mask = DMA_BIT_MASK(64);
4274 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4275 }
4276
4277 if (host->version >= SDHCI_SPEC_300)
4278 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4279 else
4280 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4281
4282 host->max_clk *= 1000000;
4283 if (host->max_clk == 0 || host->quirks &
4284 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4285 if (!host->ops->get_max_clock) {
4286 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4287 mmc_hostname(mmc));
4288 ret = -ENODEV;
4289 goto undma;
4290 }
4291 host->max_clk = host->ops->get_max_clock(host);
4292 }
4293
4294 /*
4295 * In case of Host Controller v3.00, find out whether clock
4296 * multiplier is supported.
4297 */
4298 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4299
4300 /*
4301 * In case the value in Clock Multiplier is 0, then programmable
4302 * clock mode is not supported, otherwise the actual clock
4303 * multiplier is one more than the value of Clock Multiplier
4304 * in the Capabilities Register.
4305 */
4306 if (host->clk_mul)
4307 host->clk_mul += 1;
4308
4309 /*
4310 * Set host parameters.
4311 */
4312 max_clk = host->max_clk;
4313
4314 if (host->ops->get_min_clock)
4315 mmc->f_min = host->ops->get_min_clock(host);
4316 else if (host->version >= SDHCI_SPEC_300) {
4317 if (host->clk_mul)
4318 max_clk = host->max_clk * host->clk_mul;
4319 /*
4320 * Divided Clock Mode minimum clock rate is always less than
4321 * Programmable Clock Mode minimum clock rate.
4322 */
4323 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4324 } else
4325 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4326
4327 if (!mmc->f_max || mmc->f_max > max_clk)
4328 mmc->f_max = max_clk;
4329
4330 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4331 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4332
4333 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4334 host->timeout_clk *= 1000;
4335
4336 if (host->timeout_clk == 0) {
4337 if (!host->ops->get_timeout_clock) {
4338 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4339 mmc_hostname(mmc));
4340 ret = -ENODEV;
4341 goto undma;
4342 }
4343
4344 host->timeout_clk =
4345 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4346 1000);
4347 }
4348
4349 if (override_timeout_clk)
4350 host->timeout_clk = override_timeout_clk;
4351
4352 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4353 host->ops->get_max_timeout_count(host) : 1 << 27;
4354 mmc->max_busy_timeout /= host->timeout_clk;
4355 }
4356
4357 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4358 !host->ops->get_max_timeout_count)
4359 mmc->max_busy_timeout = 0;
4360
4361 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4362 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4363
4364 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4365 host->flags |= SDHCI_AUTO_CMD12;
4366
4367 /*
4368 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4369 * For v4 mode, SDMA may use Auto-CMD23 as well.
4370 */
4371 if ((host->version >= SDHCI_SPEC_300) &&
4372 ((host->flags & SDHCI_USE_ADMA) ||
4373 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4374 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4375 host->flags |= SDHCI_AUTO_CMD23;
4376 DBG("Auto-CMD23 available\n");
4377 } else {
4378 DBG("Auto-CMD23 unavailable\n");
4379 }
4380
4381 /*
4382 * A controller may support 8-bit width, but the board itself
4383 * might not have the pins brought out. Boards that support
4384 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4385 * their platform code before calling sdhci_add_host(), and we
4386 * won't assume 8-bit width for hosts without that CAP.
4387 */
4388 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4389 mmc->caps |= MMC_CAP_4_BIT_DATA;
4390
4391 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4392 mmc->caps &= ~MMC_CAP_CMD23;
4393
4394 if (host->caps & SDHCI_CAN_DO_HISPD)
4395 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4396
4397 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4398 mmc_card_is_removable(mmc) &&
4399 mmc_gpio_get_cd(mmc) < 0)
4400 mmc->caps |= MMC_CAP_NEEDS_POLL;
4401
4402 if (!IS_ERR(mmc->supply.vqmmc)) {
4403 if (enable_vqmmc) {
4404 ret = regulator_enable(mmc->supply.vqmmc);
4405 host->sdhci_core_to_disable_vqmmc = !ret;
4406 }
4407
4408 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4409 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4410 1950000))
4411 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4412 SDHCI_SUPPORT_SDR50 |
4413 SDHCI_SUPPORT_DDR50);
4414
4415 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4416 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4417 3600000))
4418 host->flags &= ~SDHCI_SIGNALING_330;
4419
4420 if (ret) {
4421 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4422 mmc_hostname(mmc), ret);
4423 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4424 }
4425
4426 }
4427
4428 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4429 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4430 SDHCI_SUPPORT_DDR50);
4431 /*
4432 * The SDHCI controller in a SoC might support HS200/HS400
4433 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4434 * but if the board is modeled such that the IO lines are not
4435 * connected to 1.8v then HS200/HS400 cannot be supported.
4436 * Disable HS200/HS400 if the board does not have 1.8v connected
4437 * to the IO lines. (Applicable for other modes in 1.8v)
4438 */
4439 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4440 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4441 }
4442
4443 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4444 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4445 SDHCI_SUPPORT_DDR50))
4446 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4447
4448 /* SDR104 supports also implies SDR50 support */
4449 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4450 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4451 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4452 * field can be promoted to support HS200.
4453 */
4454 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4455 mmc->caps2 |= MMC_CAP2_HS200;
4456 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4457 mmc->caps |= MMC_CAP_UHS_SDR50;
4458 }
4459
4460 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4461 (host->caps1 & SDHCI_SUPPORT_HS400))
4462 mmc->caps2 |= MMC_CAP2_HS400;
4463
4464 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4465 (IS_ERR(mmc->supply.vqmmc) ||
4466 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4467 1300000)))
4468 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4469
4470 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4471 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4472 mmc->caps |= MMC_CAP_UHS_DDR50;
4473
4474 /* Does the host need tuning for SDR50? */
4475 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4476 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4477
4478 /* Driver Type(s) (A, C, D) supported by the host */
4479 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4480 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4481 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4482 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4483 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4484 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4485
4486 /* Initial value for re-tuning timer count */
4487 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4488 host->caps1);
4489
4490 /*
4491 * In case Re-tuning Timer is not disabled, the actual value of
4492 * re-tuning timer will be 2 ^ (n - 1).
4493 */
4494 if (host->tuning_count)
4495 host->tuning_count = 1 << (host->tuning_count - 1);
4496
4497 /* Re-tuning mode supported by the Host Controller */
4498 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4499
4500 ocr_avail = 0;
4501
4502 /*
4503 * According to SD Host Controller spec v3.00, if the Host System
4504 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4505 * the value is meaningful only if Voltage Support in the Capabilities
4506 * register is set. The actual current value is 4 times the register
4507 * value.
4508 */
4509 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4510 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4511 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4512 if (curr > 0) {
4513
4514 /* convert to SDHCI_MAX_CURRENT format */
4515 curr = curr/1000; /* convert to mA */
4516 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4517
4518 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4519 max_current_caps =
4520 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4521 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4522 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4523 }
4524 }
4525
4526 if (host->caps & SDHCI_CAN_VDD_330) {
4527 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4528
4529 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4530 max_current_caps) *
4531 SDHCI_MAX_CURRENT_MULTIPLIER;
4532 }
4533 if (host->caps & SDHCI_CAN_VDD_300) {
4534 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4535
4536 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4537 max_current_caps) *
4538 SDHCI_MAX_CURRENT_MULTIPLIER;
4539 }
4540 if (host->caps & SDHCI_CAN_VDD_180) {
4541 ocr_avail |= MMC_VDD_165_195;
4542
4543 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4544 max_current_caps) *
4545 SDHCI_MAX_CURRENT_MULTIPLIER;
4546 }
4547
4548 /* If OCR set by host, use it instead. */
4549 if (host->ocr_mask)
4550 ocr_avail = host->ocr_mask;
4551
4552 /* If OCR set by external regulators, give it highest prio. */
4553 if (mmc->ocr_avail)
4554 ocr_avail = mmc->ocr_avail;
4555
4556 mmc->ocr_avail = ocr_avail;
4557 mmc->ocr_avail_sdio = ocr_avail;
4558 if (host->ocr_avail_sdio)
4559 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4560 mmc->ocr_avail_sd = ocr_avail;
4561 if (host->ocr_avail_sd)
4562 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4563 else /* normal SD controllers don't support 1.8V */
4564 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4565 mmc->ocr_avail_mmc = ocr_avail;
4566 if (host->ocr_avail_mmc)
4567 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4568
4569 if (mmc->ocr_avail == 0) {
4570 pr_err("%s: Hardware doesn't report any support voltages.\n",
4571 mmc_hostname(mmc));
4572 ret = -ENODEV;
4573 goto unreg;
4574 }
4575
4576 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4577 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4578 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4579 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4580 host->flags |= SDHCI_SIGNALING_180;
4581
4582 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4583 host->flags |= SDHCI_SIGNALING_120;
4584
4585 spin_lock_init(&host->lock);
4586
4587 /*
4588 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4589 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4590 * is less anyway.
4591 */
4592 mmc->max_req_size = 524288;
4593
4594 /*
4595 * Maximum number of segments. Depends on if the hardware
4596 * can do scatter/gather or not.
4597 */
4598 if (host->flags & SDHCI_USE_ADMA) {
4599 mmc->max_segs = SDHCI_MAX_SEGS;
4600 } else if (host->flags & SDHCI_USE_SDMA) {
4601 mmc->max_segs = 1;
4602 mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4603 dma_max_mapping_size(mmc_dev(mmc)));
4604 } else { /* PIO */
4605 mmc->max_segs = SDHCI_MAX_SEGS;
4606 }
4607
4608 /*
4609 * Maximum segment size. Could be one segment with the maximum number
4610 * of bytes. When doing hardware scatter/gather, each entry cannot
4611 * be larger than 64 KiB though.
4612 */
4613 if (host->flags & SDHCI_USE_ADMA) {
4614 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4615 mmc->max_seg_size = 65535;
4616 else
4617 mmc->max_seg_size = 65536;
4618 } else {
4619 mmc->max_seg_size = mmc->max_req_size;
4620 }
4621
4622 /*
4623 * Maximum block size. This varies from controller to controller and
4624 * is specified in the capabilities register.
4625 */
4626 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4627 mmc->max_blk_size = 2;
4628 } else {
4629 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4630 SDHCI_MAX_BLOCK_SHIFT;
4631 if (mmc->max_blk_size >= 3) {
4632 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4633 mmc_hostname(mmc));
4634 mmc->max_blk_size = 0;
4635 }
4636 }
4637
4638 mmc->max_blk_size = 512 << mmc->max_blk_size;
4639
4640 /*
4641 * Maximum block count.
4642 */
4643 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4644
4645 if (mmc->max_segs == 1)
4646 /* This may alter mmc->*_blk_* parameters */
4647 sdhci_allocate_bounce_buffer(host);
4648
4649 return 0;
4650
4651unreg:
4652 if (host->sdhci_core_to_disable_vqmmc)
4653 regulator_disable(mmc->supply.vqmmc);
4654undma:
4655 if (host->align_buffer)
4656 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4657 host->adma_table_sz, host->align_buffer,
4658 host->align_addr);
4659 host->adma_table = NULL;
4660 host->align_buffer = NULL;
4661
4662 return ret;
4663}
4664EXPORT_SYMBOL_GPL(sdhci_setup_host);
4665
4666void sdhci_cleanup_host(struct sdhci_host *host)
4667{
4668 struct mmc_host *mmc = host->mmc;
4669
4670 if (host->sdhci_core_to_disable_vqmmc)
4671 regulator_disable(mmc->supply.vqmmc);
4672
4673 if (host->align_buffer)
4674 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4675 host->adma_table_sz, host->align_buffer,
4676 host->align_addr);
4677
4678 if (host->use_external_dma)
4679 sdhci_external_dma_release(host);
4680
4681 host->adma_table = NULL;
4682 host->align_buffer = NULL;
4683}
4684EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4685
4686int __sdhci_add_host(struct sdhci_host *host)
4687{
4688 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4689 struct mmc_host *mmc = host->mmc;
4690 int ret;
4691
4692 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4693 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4694 mmc->caps2 &= ~MMC_CAP2_CQE;
4695 mmc->cqe_ops = NULL;
4696 }
4697
4698 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4699 if (!host->complete_wq)
4700 return -ENOMEM;
4701
4702 INIT_WORK(&host->complete_work, sdhci_complete_work);
4703
4704 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4705 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4706
4707 init_waitqueue_head(&host->buf_ready_int);
4708
4709 sdhci_init(host, 0);
4710
4711 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4712 IRQF_SHARED, mmc_hostname(mmc), host);
4713 if (ret) {
4714 pr_err("%s: Failed to request IRQ %d: %d\n",
4715 mmc_hostname(mmc), host->irq, ret);
4716 goto unwq;
4717 }
4718
4719 ret = sdhci_led_register(host);
4720 if (ret) {
4721 pr_err("%s: Failed to register LED device: %d\n",
4722 mmc_hostname(mmc), ret);
4723 goto unirq;
4724 }
4725
4726 ret = mmc_add_host(mmc);
4727 if (ret)
4728 goto unled;
4729
4730 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4731 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4732 host->use_external_dma ? "External DMA" :
4733 (host->flags & SDHCI_USE_ADMA) ?
4734 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4735 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4736
4737 sdhci_enable_card_detection(host);
4738
4739 return 0;
4740
4741unled:
4742 sdhci_led_unregister(host);
4743unirq:
4744 sdhci_do_reset(host, SDHCI_RESET_ALL);
4745 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4746 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4747 free_irq(host->irq, host);
4748unwq:
4749 destroy_workqueue(host->complete_wq);
4750
4751 return ret;
4752}
4753EXPORT_SYMBOL_GPL(__sdhci_add_host);
4754
4755int sdhci_add_host(struct sdhci_host *host)
4756{
4757 int ret;
4758
4759 ret = sdhci_setup_host(host);
4760 if (ret)
4761 return ret;
4762
4763 ret = __sdhci_add_host(host);
4764 if (ret)
4765 goto cleanup;
4766
4767 return 0;
4768
4769cleanup:
4770 sdhci_cleanup_host(host);
4771
4772 return ret;
4773}
4774EXPORT_SYMBOL_GPL(sdhci_add_host);
4775
4776void sdhci_remove_host(struct sdhci_host *host, int dead)
4777{
4778 struct mmc_host *mmc = host->mmc;
4779 unsigned long flags;
4780
4781 if (dead) {
4782 spin_lock_irqsave(&host->lock, flags);
4783
4784 host->flags |= SDHCI_DEVICE_DEAD;
4785
4786 if (sdhci_has_requests(host)) {
4787 pr_err("%s: Controller removed during "
4788 " transfer!\n", mmc_hostname(mmc));
4789 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4790 }
4791
4792 spin_unlock_irqrestore(&host->lock, flags);
4793 }
4794
4795 sdhci_disable_card_detection(host);
4796
4797 mmc_remove_host(mmc);
4798
4799 sdhci_led_unregister(host);
4800
4801 if (!dead)
4802 sdhci_do_reset(host, SDHCI_RESET_ALL);
4803
4804 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4805 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4806 free_irq(host->irq, host);
4807
4808 del_timer_sync(&host->timer);
4809 del_timer_sync(&host->data_timer);
4810
4811 destroy_workqueue(host->complete_wq);
4812
4813 if (host->sdhci_core_to_disable_vqmmc)
4814 regulator_disable(mmc->supply.vqmmc);
4815
4816 if (host->align_buffer)
4817 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4818 host->adma_table_sz, host->align_buffer,
4819 host->align_addr);
4820
4821 if (host->use_external_dma)
4822 sdhci_external_dma_release(host);
4823
4824 host->adma_table = NULL;
4825 host->align_buffer = NULL;
4826}
4827
4828EXPORT_SYMBOL_GPL(sdhci_remove_host);
4829
4830void sdhci_free_host(struct sdhci_host *host)
4831{
4832 mmc_free_host(host->mmc);
4833}
4834
4835EXPORT_SYMBOL_GPL(sdhci_free_host);
4836
4837/*****************************************************************************\
4838 * *
4839 * Driver init/exit *
4840 * *
4841\*****************************************************************************/
4842
4843static int __init sdhci_drv_init(void)
4844{
4845 pr_info(DRIVER_NAME
4846 ": Secure Digital Host Controller Interface driver\n");
4847 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4848
4849 return 0;
4850}
4851
4852static void __exit sdhci_drv_exit(void)
4853{
4854}
4855
4856module_init(sdhci_drv_init);
4857module_exit(sdhci_drv_exit);
4858
4859module_param(debug_quirks, uint, 0444);
4860module_param(debug_quirks2, uint, 0444);
4861
4862MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4863MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4864MODULE_LICENSE("GPL");
4865
4866MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4867MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");