Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * drivers/mmc/host/omap_hsmmc.c
3 *
4 * Driver for OMAP2430/3430 MMC controller.
5 *
6 * Copyright (C) 2007 Texas Instruments.
7 *
8 * Authors:
9 * Syed Mohammed Khasim <x0khasim@ti.com>
10 * Madhusudhan <madhu.cr@ti.com>
11 * Mohit Jalori <mjalori@ti.com>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/debugfs.h>
22#include <linux/dmaengine.h>
23#include <linux/seq_file.h>
24#include <linux/sizes.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
29#include <linux/timer.h>
30#include <linux/clk.h>
31#include <linux/of.h>
32#include <linux/of_irq.h>
33#include <linux/of_gpio.h>
34#include <linux/of_device.h>
35#include <linux/omap-dmaengine.h>
36#include <linux/mmc/host.h>
37#include <linux/mmc/core.h>
38#include <linux/mmc/mmc.h>
39#include <linux/io.h>
40#include <linux/irq.h>
41#include <linux/gpio.h>
42#include <linux/regulator/consumer.h>
43#include <linux/pinctrl/consumer.h>
44#include <linux/pm_runtime.h>
45#include <linux/platform_data/mmc-omap.h>
46
47/* OMAP HSMMC Host Controller Registers */
48#define OMAP_HSMMC_SYSSTATUS 0x0014
49#define OMAP_HSMMC_CON 0x002C
50#define OMAP_HSMMC_SDMASA 0x0100
51#define OMAP_HSMMC_BLK 0x0104
52#define OMAP_HSMMC_ARG 0x0108
53#define OMAP_HSMMC_CMD 0x010C
54#define OMAP_HSMMC_RSP10 0x0110
55#define OMAP_HSMMC_RSP32 0x0114
56#define OMAP_HSMMC_RSP54 0x0118
57#define OMAP_HSMMC_RSP76 0x011C
58#define OMAP_HSMMC_DATA 0x0120
59#define OMAP_HSMMC_PSTATE 0x0124
60#define OMAP_HSMMC_HCTL 0x0128
61#define OMAP_HSMMC_SYSCTL 0x012C
62#define OMAP_HSMMC_STAT 0x0130
63#define OMAP_HSMMC_IE 0x0134
64#define OMAP_HSMMC_ISE 0x0138
65#define OMAP_HSMMC_AC12 0x013C
66#define OMAP_HSMMC_CAPA 0x0140
67
68#define VS18 (1 << 26)
69#define VS30 (1 << 25)
70#define HSS (1 << 21)
71#define SDVS18 (0x5 << 9)
72#define SDVS30 (0x6 << 9)
73#define SDVS33 (0x7 << 9)
74#define SDVS_MASK 0x00000E00
75#define SDVSCLR 0xFFFFF1FF
76#define SDVSDET 0x00000400
77#define AUTOIDLE 0x1
78#define SDBP (1 << 8)
79#define DTO 0xe
80#define ICE 0x1
81#define ICS 0x2
82#define CEN (1 << 2)
83#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */
84#define CLKD_MASK 0x0000FFC0
85#define CLKD_SHIFT 6
86#define DTO_MASK 0x000F0000
87#define DTO_SHIFT 16
88#define INIT_STREAM (1 << 1)
89#define ACEN_ACMD23 (2 << 2)
90#define DP_SELECT (1 << 21)
91#define DDIR (1 << 4)
92#define DMAE 0x1
93#define MSBS (1 << 5)
94#define BCE (1 << 1)
95#define FOUR_BIT (1 << 1)
96#define HSPE (1 << 2)
97#define IWE (1 << 24)
98#define DDR (1 << 19)
99#define CLKEXTFREE (1 << 16)
100#define CTPL (1 << 11)
101#define DW8 (1 << 5)
102#define OD 0x1
103#define STAT_CLEAR 0xFFFFFFFF
104#define INIT_STREAM_CMD 0x00000000
105#define DUAL_VOLT_OCR_BIT 7
106#define SRC (1 << 25)
107#define SRD (1 << 26)
108#define SOFTRESET (1 << 1)
109
110/* PSTATE */
111#define DLEV_DAT(x) (1 << (20 + (x)))
112
113/* Interrupt masks for IE and ISE register */
114#define CC_EN (1 << 0)
115#define TC_EN (1 << 1)
116#define BWR_EN (1 << 4)
117#define BRR_EN (1 << 5)
118#define CIRQ_EN (1 << 8)
119#define ERR_EN (1 << 15)
120#define CTO_EN (1 << 16)
121#define CCRC_EN (1 << 17)
122#define CEB_EN (1 << 18)
123#define CIE_EN (1 << 19)
124#define DTO_EN (1 << 20)
125#define DCRC_EN (1 << 21)
126#define DEB_EN (1 << 22)
127#define ACE_EN (1 << 24)
128#define CERR_EN (1 << 28)
129#define BADA_EN (1 << 29)
130
131#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
132 DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
133 BRR_EN | BWR_EN | TC_EN | CC_EN)
134
135#define CNI (1 << 7)
136#define ACIE (1 << 4)
137#define ACEB (1 << 3)
138#define ACCE (1 << 2)
139#define ACTO (1 << 1)
140#define ACNE (1 << 0)
141
142#define MMC_AUTOSUSPEND_DELAY 100
143#define MMC_TIMEOUT_MS 20 /* 20 mSec */
144#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
145#define OMAP_MMC_MIN_CLOCK 400000
146#define OMAP_MMC_MAX_CLOCK 52000000
147#define DRIVER_NAME "omap_hsmmc"
148
149#define VDD_1V8 1800000 /* 180000 uV */
150#define VDD_3V0 3000000 /* 300000 uV */
151#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
152
153/*
154 * One controller can have multiple slots, like on some omap boards using
155 * omap.c controller driver. Luckily this is not currently done on any known
156 * omap_hsmmc.c device.
157 */
158#define mmc_slot(host) (host->pdata->slots[host->slot_id])
159
160/*
161 * MMC Host controller read/write API's
162 */
163#define OMAP_HSMMC_READ(base, reg) \
164 __raw_readl((base) + OMAP_HSMMC_##reg)
165
166#define OMAP_HSMMC_WRITE(base, reg, val) \
167 __raw_writel((val), (base) + OMAP_HSMMC_##reg)
168
169struct omap_hsmmc_next {
170 unsigned int dma_len;
171 s32 cookie;
172};
173
174struct omap_hsmmc_host {
175 struct device *dev;
176 struct mmc_host *mmc;
177 struct mmc_request *mrq;
178 struct mmc_command *cmd;
179 struct mmc_data *data;
180 struct clk *fclk;
181 struct clk *dbclk;
182 /*
183 * vcc == configured supply
184 * vcc_aux == optional
185 * - MMC1, supply for DAT4..DAT7
186 * - MMC2/MMC2, external level shifter voltage supply, for
187 * chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
188 */
189 struct regulator *vcc;
190 struct regulator *vcc_aux;
191 struct regulator *pbias;
192 bool pbias_enabled;
193 void __iomem *base;
194 resource_size_t mapbase;
195 spinlock_t irq_lock; /* Prevent races with irq handler */
196 unsigned int dma_len;
197 unsigned int dma_sg_idx;
198 unsigned char bus_mode;
199 unsigned char power_mode;
200 int suspended;
201 u32 con;
202 u32 hctl;
203 u32 sysctl;
204 u32 capa;
205 int irq;
206 int wake_irq;
207 int use_dma, dma_ch;
208 struct dma_chan *tx_chan;
209 struct dma_chan *rx_chan;
210 int slot_id;
211 int response_busy;
212 int context_loss;
213 int protect_card;
214 int reqs_blocked;
215 int use_reg;
216 int req_in_progress;
217 unsigned long clk_rate;
218 unsigned int flags;
219#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */
220#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
221#define HSMMC_WAKE_IRQ_ENABLED (1 << 2)
222 struct omap_hsmmc_next next_data;
223 struct omap_mmc_platform_data *pdata;
224};
225
226struct omap_mmc_of_data {
227 u32 reg_offset;
228 u8 controller_flags;
229};
230
231static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
232
233static int omap_hsmmc_card_detect(struct device *dev, int slot)
234{
235 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
236 struct omap_mmc_platform_data *mmc = host->pdata;
237
238 /* NOTE: assumes card detect signal is active-low */
239 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
240}
241
242static int omap_hsmmc_get_wp(struct device *dev, int slot)
243{
244 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
245 struct omap_mmc_platform_data *mmc = host->pdata;
246
247 /* NOTE: assumes write protect signal is active-high */
248 return gpio_get_value_cansleep(mmc->slots[0].gpio_wp);
249}
250
251static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
252{
253 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
254 struct omap_mmc_platform_data *mmc = host->pdata;
255
256 /* NOTE: assumes card detect signal is active-low */
257 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
258}
259
260#ifdef CONFIG_PM
261
262static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
263{
264 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
265 struct omap_mmc_platform_data *mmc = host->pdata;
266
267 disable_irq(mmc->slots[0].card_detect_irq);
268 return 0;
269}
270
271static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
272{
273 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
274 struct omap_mmc_platform_data *mmc = host->pdata;
275
276 enable_irq(mmc->slots[0].card_detect_irq);
277 return 0;
278}
279
280#else
281
282#define omap_hsmmc_suspend_cdirq NULL
283#define omap_hsmmc_resume_cdirq NULL
284
285#endif
286
287#ifdef CONFIG_REGULATOR
288
289static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
290 int vdd)
291{
292 struct omap_hsmmc_host *host =
293 platform_get_drvdata(to_platform_device(dev));
294 int ret = 0;
295
296 /*
297 * If we don't see a Vcc regulator, assume it's a fixed
298 * voltage always-on regulator.
299 */
300 if (!host->vcc)
301 return 0;
302
303 if (mmc_slot(host).before_set_reg)
304 mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
305
306 if (host->pbias) {
307 if (host->pbias_enabled == 1) {
308 ret = regulator_disable(host->pbias);
309 if (!ret)
310 host->pbias_enabled = 0;
311 }
312 regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
313 }
314
315 /*
316 * Assume Vcc regulator is used only to power the card ... OMAP
317 * VDDS is used to power the pins, optionally with a transceiver to
318 * support cards using voltages other than VDDS (1.8V nominal). When a
319 * transceiver is used, DAT3..7 are muxed as transceiver control pins.
320 *
321 * In some cases this regulator won't support enable/disable;
322 * e.g. it's a fixed rail for a WLAN chip.
323 *
324 * In other cases vcc_aux switches interface power. Example, for
325 * eMMC cards it represents VccQ. Sometimes transceivers or SDIO
326 * chips/cards need an interface voltage rail too.
327 */
328 if (power_on) {
329 if (host->vcc)
330 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
331 /* Enable interface voltage rail, if needed */
332 if (ret == 0 && host->vcc_aux) {
333 ret = regulator_enable(host->vcc_aux);
334 if (ret < 0 && host->vcc)
335 ret = mmc_regulator_set_ocr(host->mmc,
336 host->vcc, 0);
337 }
338 } else {
339 /* Shut down the rail */
340 if (host->vcc_aux)
341 ret = regulator_disable(host->vcc_aux);
342 if (host->vcc) {
343 /* Then proceed to shut down the local regulator */
344 ret = mmc_regulator_set_ocr(host->mmc,
345 host->vcc, 0);
346 }
347 }
348
349 if (host->pbias) {
350 if (vdd <= VDD_165_195)
351 ret = regulator_set_voltage(host->pbias, VDD_1V8,
352 VDD_1V8);
353 else
354 ret = regulator_set_voltage(host->pbias, VDD_3V0,
355 VDD_3V0);
356 if (ret < 0)
357 goto error_set_power;
358
359 if (host->pbias_enabled == 0) {
360 ret = regulator_enable(host->pbias);
361 if (!ret)
362 host->pbias_enabled = 1;
363 }
364 }
365
366 if (mmc_slot(host).after_set_reg)
367 mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
368
369error_set_power:
370 return ret;
371}
372
373static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
374{
375 struct regulator *reg;
376 int ocr_value = 0;
377
378 reg = devm_regulator_get(host->dev, "vmmc");
379 if (IS_ERR(reg)) {
380 dev_err(host->dev, "unable to get vmmc regulator %ld\n",
381 PTR_ERR(reg));
382 return PTR_ERR(reg);
383 } else {
384 host->vcc = reg;
385 ocr_value = mmc_regulator_get_ocrmask(reg);
386 if (!mmc_slot(host).ocr_mask) {
387 mmc_slot(host).ocr_mask = ocr_value;
388 } else {
389 if (!(mmc_slot(host).ocr_mask & ocr_value)) {
390 dev_err(host->dev, "ocrmask %x is not supported\n",
391 mmc_slot(host).ocr_mask);
392 mmc_slot(host).ocr_mask = 0;
393 return -EINVAL;
394 }
395 }
396 }
397 mmc_slot(host).set_power = omap_hsmmc_set_power;
398
399 /* Allow an aux regulator */
400 reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
401 host->vcc_aux = IS_ERR(reg) ? NULL : reg;
402
403 reg = devm_regulator_get_optional(host->dev, "pbias");
404 host->pbias = IS_ERR(reg) ? NULL : reg;
405
406 /* For eMMC do not power off when not in sleep state */
407 if (mmc_slot(host).no_regulator_off_init)
408 return 0;
409 /*
410 * To disable boot_on regulator, enable regulator
411 * to increase usecount and then disable it.
412 */
413 if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
414 (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
415 int vdd = ffs(mmc_slot(host).ocr_mask) - 1;
416
417 mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd);
418 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
419 }
420
421 return 0;
422}
423
424static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
425{
426 mmc_slot(host).set_power = NULL;
427}
428
429static inline int omap_hsmmc_have_reg(void)
430{
431 return 1;
432}
433
434#else
435
436static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
437{
438 return -EINVAL;
439}
440
441static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
442{
443}
444
445static inline int omap_hsmmc_have_reg(void)
446{
447 return 0;
448}
449
450#endif
451
452static int omap_hsmmc_gpio_init(struct omap_mmc_platform_data *pdata)
453{
454 int ret;
455
456 if (gpio_is_valid(pdata->slots[0].switch_pin)) {
457 if (pdata->slots[0].cover)
458 pdata->slots[0].get_cover_state =
459 omap_hsmmc_get_cover_state;
460 else
461 pdata->slots[0].card_detect = omap_hsmmc_card_detect;
462 pdata->slots[0].card_detect_irq =
463 gpio_to_irq(pdata->slots[0].switch_pin);
464 ret = gpio_request(pdata->slots[0].switch_pin, "mmc_cd");
465 if (ret)
466 return ret;
467 ret = gpio_direction_input(pdata->slots[0].switch_pin);
468 if (ret)
469 goto err_free_sp;
470 } else
471 pdata->slots[0].switch_pin = -EINVAL;
472
473 if (gpio_is_valid(pdata->slots[0].gpio_wp)) {
474 pdata->slots[0].get_ro = omap_hsmmc_get_wp;
475 ret = gpio_request(pdata->slots[0].gpio_wp, "mmc_wp");
476 if (ret)
477 goto err_free_cd;
478 ret = gpio_direction_input(pdata->slots[0].gpio_wp);
479 if (ret)
480 goto err_free_wp;
481 } else
482 pdata->slots[0].gpio_wp = -EINVAL;
483
484 return 0;
485
486err_free_wp:
487 gpio_free(pdata->slots[0].gpio_wp);
488err_free_cd:
489 if (gpio_is_valid(pdata->slots[0].switch_pin))
490err_free_sp:
491 gpio_free(pdata->slots[0].switch_pin);
492 return ret;
493}
494
495static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata)
496{
497 if (gpio_is_valid(pdata->slots[0].gpio_wp))
498 gpio_free(pdata->slots[0].gpio_wp);
499 if (gpio_is_valid(pdata->slots[0].switch_pin))
500 gpio_free(pdata->slots[0].switch_pin);
501}
502
503/*
504 * Start clock to the card
505 */
506static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
507{
508 OMAP_HSMMC_WRITE(host->base, SYSCTL,
509 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
510}
511
512/*
513 * Stop clock to the card
514 */
515static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
516{
517 OMAP_HSMMC_WRITE(host->base, SYSCTL,
518 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
519 if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
520 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
521}
522
523static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
524 struct mmc_command *cmd)
525{
526 u32 irq_mask = INT_EN_MASK;
527 unsigned long flags;
528
529 if (host->use_dma)
530 irq_mask &= ~(BRR_EN | BWR_EN);
531
532 /* Disable timeout for erases */
533 if (cmd->opcode == MMC_ERASE)
534 irq_mask &= ~DTO_EN;
535
536 spin_lock_irqsave(&host->irq_lock, flags);
537 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
538 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
539
540 /* latch pending CIRQ, but don't signal MMC core */
541 if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
542 irq_mask |= CIRQ_EN;
543 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
544 spin_unlock_irqrestore(&host->irq_lock, flags);
545}
546
547static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
548{
549 u32 irq_mask = 0;
550 unsigned long flags;
551
552 spin_lock_irqsave(&host->irq_lock, flags);
553 /* no transfer running but need to keep cirq if enabled */
554 if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
555 irq_mask |= CIRQ_EN;
556 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
557 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
558 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
559 spin_unlock_irqrestore(&host->irq_lock, flags);
560}
561
562/* Calculate divisor for the given clock frequency */
563static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
564{
565 u16 dsor = 0;
566
567 if (ios->clock) {
568 dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
569 if (dsor > CLKD_MAX)
570 dsor = CLKD_MAX;
571 }
572
573 return dsor;
574}
575
576static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
577{
578 struct mmc_ios *ios = &host->mmc->ios;
579 unsigned long regval;
580 unsigned long timeout;
581 unsigned long clkdiv;
582
583 dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
584
585 omap_hsmmc_stop_clock(host);
586
587 regval = OMAP_HSMMC_READ(host->base, SYSCTL);
588 regval = regval & ~(CLKD_MASK | DTO_MASK);
589 clkdiv = calc_divisor(host, ios);
590 regval = regval | (clkdiv << 6) | (DTO << 16);
591 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
592 OMAP_HSMMC_WRITE(host->base, SYSCTL,
593 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
594
595 /* Wait till the ICS bit is set */
596 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
597 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
598 && time_before(jiffies, timeout))
599 cpu_relax();
600
601 /*
602 * Enable High-Speed Support
603 * Pre-Requisites
604 * - Controller should support High-Speed-Enable Bit
605 * - Controller should not be using DDR Mode
606 * - Controller should advertise that it supports High Speed
607 * in capabilities register
608 * - MMC/SD clock coming out of controller > 25MHz
609 */
610 if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) &&
611 (ios->timing != MMC_TIMING_MMC_DDR52) &&
612 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
613 regval = OMAP_HSMMC_READ(host->base, HCTL);
614 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
615 regval |= HSPE;
616 else
617 regval &= ~HSPE;
618
619 OMAP_HSMMC_WRITE(host->base, HCTL, regval);
620 }
621
622 omap_hsmmc_start_clock(host);
623}
624
625static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
626{
627 struct mmc_ios *ios = &host->mmc->ios;
628 u32 con;
629
630 con = OMAP_HSMMC_READ(host->base, CON);
631 if (ios->timing == MMC_TIMING_MMC_DDR52)
632 con |= DDR; /* configure in DDR mode */
633 else
634 con &= ~DDR;
635 switch (ios->bus_width) {
636 case MMC_BUS_WIDTH_8:
637 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
638 break;
639 case MMC_BUS_WIDTH_4:
640 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
641 OMAP_HSMMC_WRITE(host->base, HCTL,
642 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
643 break;
644 case MMC_BUS_WIDTH_1:
645 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
646 OMAP_HSMMC_WRITE(host->base, HCTL,
647 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
648 break;
649 }
650}
651
652static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
653{
654 struct mmc_ios *ios = &host->mmc->ios;
655 u32 con;
656
657 con = OMAP_HSMMC_READ(host->base, CON);
658 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
659 OMAP_HSMMC_WRITE(host->base, CON, con | OD);
660 else
661 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
662}
663
664#ifdef CONFIG_PM
665
666/*
667 * Restore the MMC host context, if it was lost as result of a
668 * power state change.
669 */
670static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
671{
672 struct mmc_ios *ios = &host->mmc->ios;
673 u32 hctl, capa;
674 unsigned long timeout;
675
676 if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
677 host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
678 host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
679 host->capa == OMAP_HSMMC_READ(host->base, CAPA))
680 return 0;
681
682 host->context_loss++;
683
684 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
685 if (host->power_mode != MMC_POWER_OFF &&
686 (1 << ios->vdd) <= MMC_VDD_23_24)
687 hctl = SDVS18;
688 else
689 hctl = SDVS30;
690 capa = VS30 | VS18;
691 } else {
692 hctl = SDVS18;
693 capa = VS18;
694 }
695
696 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
697 hctl |= IWE;
698
699 OMAP_HSMMC_WRITE(host->base, HCTL,
700 OMAP_HSMMC_READ(host->base, HCTL) | hctl);
701
702 OMAP_HSMMC_WRITE(host->base, CAPA,
703 OMAP_HSMMC_READ(host->base, CAPA) | capa);
704
705 OMAP_HSMMC_WRITE(host->base, HCTL,
706 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
707
708 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
709 while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
710 && time_before(jiffies, timeout))
711 ;
712
713 OMAP_HSMMC_WRITE(host->base, ISE, 0);
714 OMAP_HSMMC_WRITE(host->base, IE, 0);
715 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
716
717 /* Do not initialize card-specific things if the power is off */
718 if (host->power_mode == MMC_POWER_OFF)
719 goto out;
720
721 omap_hsmmc_set_bus_width(host);
722
723 omap_hsmmc_set_clock(host);
724
725 omap_hsmmc_set_bus_mode(host);
726
727out:
728 dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
729 host->context_loss);
730 return 0;
731}
732
733/*
734 * Save the MMC host context (store the number of power state changes so far).
735 */
736static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
737{
738 host->con = OMAP_HSMMC_READ(host->base, CON);
739 host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
740 host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL);
741 host->capa = OMAP_HSMMC_READ(host->base, CAPA);
742}
743
744#else
745
746static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
747{
748 return 0;
749}
750
751static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
752{
753}
754
755#endif
756
757/*
758 * Send init stream sequence to card
759 * before sending IDLE command
760 */
761static void send_init_stream(struct omap_hsmmc_host *host)
762{
763 int reg = 0;
764 unsigned long timeout;
765
766 if (host->protect_card)
767 return;
768
769 disable_irq(host->irq);
770
771 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
772 OMAP_HSMMC_WRITE(host->base, CON,
773 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
774 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
775
776 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
777 while ((reg != CC_EN) && time_before(jiffies, timeout))
778 reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
779
780 OMAP_HSMMC_WRITE(host->base, CON,
781 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
782
783 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
784 OMAP_HSMMC_READ(host->base, STAT);
785
786 enable_irq(host->irq);
787}
788
789static inline
790int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
791{
792 int r = 1;
793
794 if (mmc_slot(host).get_cover_state)
795 r = mmc_slot(host).get_cover_state(host->dev, host->slot_id);
796 return r;
797}
798
799static ssize_t
800omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
801 char *buf)
802{
803 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
804 struct omap_hsmmc_host *host = mmc_priv(mmc);
805
806 return sprintf(buf, "%s\n",
807 omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
808}
809
810static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
811
812static ssize_t
813omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
814 char *buf)
815{
816 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
817 struct omap_hsmmc_host *host = mmc_priv(mmc);
818
819 return sprintf(buf, "%s\n", mmc_slot(host).name);
820}
821
822static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
823
824/*
825 * Configure the response type and send the cmd.
826 */
827static void
828omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
829 struct mmc_data *data)
830{
831 int cmdreg = 0, resptype = 0, cmdtype = 0;
832
833 dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
834 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
835 host->cmd = cmd;
836
837 omap_hsmmc_enable_irq(host, cmd);
838
839 host->response_busy = 0;
840 if (cmd->flags & MMC_RSP_PRESENT) {
841 if (cmd->flags & MMC_RSP_136)
842 resptype = 1;
843 else if (cmd->flags & MMC_RSP_BUSY) {
844 resptype = 3;
845 host->response_busy = 1;
846 } else
847 resptype = 2;
848 }
849
850 /*
851 * Unlike OMAP1 controller, the cmdtype does not seem to be based on
852 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
853 * a val of 0x3, rest 0x0.
854 */
855 if (cmd == host->mrq->stop)
856 cmdtype = 0x3;
857
858 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
859
860 if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
861 host->mrq->sbc) {
862 cmdreg |= ACEN_ACMD23;
863 OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
864 }
865 if (data) {
866 cmdreg |= DP_SELECT | MSBS | BCE;
867 if (data->flags & MMC_DATA_READ)
868 cmdreg |= DDIR;
869 else
870 cmdreg &= ~(DDIR);
871 }
872
873 if (host->use_dma)
874 cmdreg |= DMAE;
875
876 host->req_in_progress = 1;
877
878 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
879 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
880}
881
882static int
883omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
884{
885 if (data->flags & MMC_DATA_WRITE)
886 return DMA_TO_DEVICE;
887 else
888 return DMA_FROM_DEVICE;
889}
890
891static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
892 struct mmc_data *data)
893{
894 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
895}
896
897static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
898{
899 int dma_ch;
900 unsigned long flags;
901
902 spin_lock_irqsave(&host->irq_lock, flags);
903 host->req_in_progress = 0;
904 dma_ch = host->dma_ch;
905 spin_unlock_irqrestore(&host->irq_lock, flags);
906
907 omap_hsmmc_disable_irq(host);
908 /* Do not complete the request if DMA is still in progress */
909 if (mrq->data && host->use_dma && dma_ch != -1)
910 return;
911 host->mrq = NULL;
912 mmc_request_done(host->mmc, mrq);
913}
914
915/*
916 * Notify the transfer complete to MMC core
917 */
918static void
919omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
920{
921 if (!data) {
922 struct mmc_request *mrq = host->mrq;
923
924 /* TC before CC from CMD6 - don't know why, but it happens */
925 if (host->cmd && host->cmd->opcode == 6 &&
926 host->response_busy) {
927 host->response_busy = 0;
928 return;
929 }
930
931 omap_hsmmc_request_done(host, mrq);
932 return;
933 }
934
935 host->data = NULL;
936
937 if (!data->error)
938 data->bytes_xfered += data->blocks * (data->blksz);
939 else
940 data->bytes_xfered = 0;
941
942 if (data->stop && (data->error || !host->mrq->sbc))
943 omap_hsmmc_start_command(host, data->stop, NULL);
944 else
945 omap_hsmmc_request_done(host, data->mrq);
946}
947
948/*
949 * Notify the core about command completion
950 */
951static void
952omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
953{
954 if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
955 !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
956 host->cmd = NULL;
957 omap_hsmmc_start_dma_transfer(host);
958 omap_hsmmc_start_command(host, host->mrq->cmd,
959 host->mrq->data);
960 return;
961 }
962
963 host->cmd = NULL;
964
965 if (cmd->flags & MMC_RSP_PRESENT) {
966 if (cmd->flags & MMC_RSP_136) {
967 /* response type 2 */
968 cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
969 cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
970 cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
971 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
972 } else {
973 /* response types 1, 1b, 3, 4, 5, 6 */
974 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
975 }
976 }
977 if ((host->data == NULL && !host->response_busy) || cmd->error)
978 omap_hsmmc_request_done(host, host->mrq);
979}
980
981/*
982 * DMA clean up for command errors
983 */
984static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
985{
986 int dma_ch;
987 unsigned long flags;
988
989 host->data->error = errno;
990
991 spin_lock_irqsave(&host->irq_lock, flags);
992 dma_ch = host->dma_ch;
993 host->dma_ch = -1;
994 spin_unlock_irqrestore(&host->irq_lock, flags);
995
996 if (host->use_dma && dma_ch != -1) {
997 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
998
999 dmaengine_terminate_all(chan);
1000 dma_unmap_sg(chan->device->dev,
1001 host->data->sg, host->data->sg_len,
1002 omap_hsmmc_get_dma_dir(host, host->data));
1003
1004 host->data->host_cookie = 0;
1005 }
1006 host->data = NULL;
1007}
1008
1009/*
1010 * Readable error output
1011 */
1012#ifdef CONFIG_MMC_DEBUG
1013static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
1014{
1015 /* --- means reserved bit without definition at documentation */
1016 static const char *omap_hsmmc_status_bits[] = {
1017 "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
1018 "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
1019 "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
1020 "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
1021 };
1022 char res[256];
1023 char *buf = res;
1024 int len, i;
1025
1026 len = sprintf(buf, "MMC IRQ 0x%x :", status);
1027 buf += len;
1028
1029 for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
1030 if (status & (1 << i)) {
1031 len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
1032 buf += len;
1033 }
1034
1035 dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
1036}
1037#else
1038static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
1039 u32 status)
1040{
1041}
1042#endif /* CONFIG_MMC_DEBUG */
1043
1044/*
1045 * MMC controller internal state machines reset
1046 *
1047 * Used to reset command or data internal state machines, using respectively
1048 * SRC or SRD bit of SYSCTL register
1049 * Can be called from interrupt context
1050 */
1051static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
1052 unsigned long bit)
1053{
1054 unsigned long i = 0;
1055 unsigned long limit = MMC_TIMEOUT_US;
1056
1057 OMAP_HSMMC_WRITE(host->base, SYSCTL,
1058 OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
1059
1060 /*
1061 * OMAP4 ES2 and greater has an updated reset logic.
1062 * Monitor a 0->1 transition first
1063 */
1064 if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) {
1065 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1066 && (i++ < limit))
1067 udelay(1);
1068 }
1069 i = 0;
1070
1071 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
1072 (i++ < limit))
1073 udelay(1);
1074
1075 if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
1076 dev_err(mmc_dev(host->mmc),
1077 "Timeout waiting on controller reset in %s\n",
1078 __func__);
1079}
1080
1081static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
1082 int err, int end_cmd)
1083{
1084 if (end_cmd) {
1085 omap_hsmmc_reset_controller_fsm(host, SRC);
1086 if (host->cmd)
1087 host->cmd->error = err;
1088 }
1089
1090 if (host->data) {
1091 omap_hsmmc_reset_controller_fsm(host, SRD);
1092 omap_hsmmc_dma_cleanup(host, err);
1093 } else if (host->mrq && host->mrq->cmd)
1094 host->mrq->cmd->error = err;
1095}
1096
1097static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1098{
1099 struct mmc_data *data;
1100 int end_cmd = 0, end_trans = 0;
1101 int error = 0;
1102
1103 data = host->data;
1104 dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1105
1106 if (status & ERR_EN) {
1107 omap_hsmmc_dbg_report_irq(host, status);
1108
1109 if (status & (CTO_EN | CCRC_EN))
1110 end_cmd = 1;
1111 if (status & (CTO_EN | DTO_EN))
1112 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1113 else if (status & (CCRC_EN | DCRC_EN))
1114 hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1115
1116 if (status & ACE_EN) {
1117 u32 ac12;
1118 ac12 = OMAP_HSMMC_READ(host->base, AC12);
1119 if (!(ac12 & ACNE) && host->mrq->sbc) {
1120 end_cmd = 1;
1121 if (ac12 & ACTO)
1122 error = -ETIMEDOUT;
1123 else if (ac12 & (ACCE | ACEB | ACIE))
1124 error = -EILSEQ;
1125 host->mrq->sbc->error = error;
1126 hsmmc_command_incomplete(host, error, end_cmd);
1127 }
1128 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
1129 }
1130 if (host->data || host->response_busy) {
1131 end_trans = !end_cmd;
1132 host->response_busy = 0;
1133 }
1134 }
1135
1136 OMAP_HSMMC_WRITE(host->base, STAT, status);
1137 if (end_cmd || ((status & CC_EN) && host->cmd))
1138 omap_hsmmc_cmd_done(host, host->cmd);
1139 if ((end_trans || (status & TC_EN)) && host->mrq)
1140 omap_hsmmc_xfer_done(host, data);
1141}
1142
1143/*
1144 * MMC controller IRQ handler
1145 */
1146static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1147{
1148 struct omap_hsmmc_host *host = dev_id;
1149 int status;
1150
1151 status = OMAP_HSMMC_READ(host->base, STAT);
1152 while (status & (INT_EN_MASK | CIRQ_EN)) {
1153 if (host->req_in_progress)
1154 omap_hsmmc_do_irq(host, status);
1155
1156 if (status & CIRQ_EN)
1157 mmc_signal_sdio_irq(host->mmc);
1158
1159 /* Flush posted write */
1160 status = OMAP_HSMMC_READ(host->base, STAT);
1161 }
1162
1163 return IRQ_HANDLED;
1164}
1165
1166static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id)
1167{
1168 struct omap_hsmmc_host *host = dev_id;
1169
1170 /* cirq is level triggered, disable to avoid infinite loop */
1171 spin_lock(&host->irq_lock);
1172 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
1173 disable_irq_nosync(host->wake_irq);
1174 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
1175 }
1176 spin_unlock(&host->irq_lock);
1177 pm_request_resume(host->dev); /* no use counter */
1178
1179 return IRQ_HANDLED;
1180}
1181
1182static void set_sd_bus_power(struct omap_hsmmc_host *host)
1183{
1184 unsigned long i;
1185
1186 OMAP_HSMMC_WRITE(host->base, HCTL,
1187 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
1188 for (i = 0; i < loops_per_jiffy; i++) {
1189 if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
1190 break;
1191 cpu_relax();
1192 }
1193}
1194
1195/*
1196 * Switch MMC interface voltage ... only relevant for MMC1.
1197 *
1198 * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver.
1199 * The MMC2 transceiver controls are used instead of DAT4..DAT7.
1200 * Some chips, like eMMC ones, use internal transceivers.
1201 */
1202static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1203{
1204 u32 reg_val = 0;
1205 int ret;
1206
1207 /* Disable the clocks */
1208 pm_runtime_put_sync(host->dev);
1209 if (host->dbclk)
1210 clk_disable_unprepare(host->dbclk);
1211
1212 /* Turn the power off */
1213 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
1214
1215 /* Turn the power ON with given VDD 1.8 or 3.0v */
1216 if (!ret)
1217 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
1218 vdd);
1219 pm_runtime_get_sync(host->dev);
1220 if (host->dbclk)
1221 clk_prepare_enable(host->dbclk);
1222
1223 if (ret != 0)
1224 goto err;
1225
1226 OMAP_HSMMC_WRITE(host->base, HCTL,
1227 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
1228 reg_val = OMAP_HSMMC_READ(host->base, HCTL);
1229
1230 /*
1231 * If a MMC dual voltage card is detected, the set_ios fn calls
1232 * this fn with VDD bit set for 1.8V. Upon card removal from the
1233 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
1234 *
1235 * Cope with a bit of slop in the range ... per data sheets:
1236 * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
1237 * but recommended values are 1.71V to 1.89V
1238 * - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max,
1239 * but recommended values are 2.7V to 3.3V
1240 *
1241 * Board setup code shouldn't permit anything very out-of-range.
1242 * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the
1243 * middle range) but VSIM can't power DAT4..DAT7 at more than 3V.
1244 */
1245 if ((1 << vdd) <= MMC_VDD_23_24)
1246 reg_val |= SDVS18;
1247 else
1248 reg_val |= SDVS30;
1249
1250 OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
1251 set_sd_bus_power(host);
1252
1253 return 0;
1254err:
1255 dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
1256 return ret;
1257}
1258
1259/* Protect the card while the cover is open */
1260static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
1261{
1262 if (!mmc_slot(host).get_cover_state)
1263 return;
1264
1265 host->reqs_blocked = 0;
1266 if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) {
1267 if (host->protect_card) {
1268 dev_info(host->dev, "%s: cover is closed, "
1269 "card is now accessible\n",
1270 mmc_hostname(host->mmc));
1271 host->protect_card = 0;
1272 }
1273 } else {
1274 if (!host->protect_card) {
1275 dev_info(host->dev, "%s: cover is open, "
1276 "card is now inaccessible\n",
1277 mmc_hostname(host->mmc));
1278 host->protect_card = 1;
1279 }
1280 }
1281}
1282
1283/*
1284 * irq handler to notify the core about card insertion/removal
1285 */
1286static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
1287{
1288 struct omap_hsmmc_host *host = dev_id;
1289 struct omap_mmc_slot_data *slot = &mmc_slot(host);
1290 int carddetect;
1291
1292 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
1293
1294 if (slot->card_detect)
1295 carddetect = slot->card_detect(host->dev, host->slot_id);
1296 else {
1297 omap_hsmmc_protect_card(host);
1298 carddetect = -ENOSYS;
1299 }
1300
1301 if (carddetect)
1302 mmc_detect_change(host->mmc, (HZ * 200) / 1000);
1303 else
1304 mmc_detect_change(host->mmc, (HZ * 50) / 1000);
1305 return IRQ_HANDLED;
1306}
1307
1308static void omap_hsmmc_dma_callback(void *param)
1309{
1310 struct omap_hsmmc_host *host = param;
1311 struct dma_chan *chan;
1312 struct mmc_data *data;
1313 int req_in_progress;
1314
1315 spin_lock_irq(&host->irq_lock);
1316 if (host->dma_ch < 0) {
1317 spin_unlock_irq(&host->irq_lock);
1318 return;
1319 }
1320
1321 data = host->mrq->data;
1322 chan = omap_hsmmc_get_dma_chan(host, data);
1323 if (!data->host_cookie)
1324 dma_unmap_sg(chan->device->dev,
1325 data->sg, data->sg_len,
1326 omap_hsmmc_get_dma_dir(host, data));
1327
1328 req_in_progress = host->req_in_progress;
1329 host->dma_ch = -1;
1330 spin_unlock_irq(&host->irq_lock);
1331
1332 /* If DMA has finished after TC, complete the request */
1333 if (!req_in_progress) {
1334 struct mmc_request *mrq = host->mrq;
1335
1336 host->mrq = NULL;
1337 mmc_request_done(host->mmc, mrq);
1338 }
1339}
1340
1341static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1342 struct mmc_data *data,
1343 struct omap_hsmmc_next *next,
1344 struct dma_chan *chan)
1345{
1346 int dma_len;
1347
1348 if (!next && data->host_cookie &&
1349 data->host_cookie != host->next_data.cookie) {
1350 dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
1351 " host->next_data.cookie %d\n",
1352 __func__, data->host_cookie, host->next_data.cookie);
1353 data->host_cookie = 0;
1354 }
1355
1356 /* Check if next job is already prepared */
1357 if (next || data->host_cookie != host->next_data.cookie) {
1358 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1359 omap_hsmmc_get_dma_dir(host, data));
1360
1361 } else {
1362 dma_len = host->next_data.dma_len;
1363 host->next_data.dma_len = 0;
1364 }
1365
1366
1367 if (dma_len == 0)
1368 return -EINVAL;
1369
1370 if (next) {
1371 next->dma_len = dma_len;
1372 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
1373 } else
1374 host->dma_len = dma_len;
1375
1376 return 0;
1377}
1378
1379/*
1380 * Routine to configure and start DMA for the MMC card
1381 */
1382static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1383 struct mmc_request *req)
1384{
1385 struct dma_slave_config cfg;
1386 struct dma_async_tx_descriptor *tx;
1387 int ret = 0, i;
1388 struct mmc_data *data = req->data;
1389 struct dma_chan *chan;
1390
1391 /* Sanity check: all the SG entries must be aligned by block size. */
1392 for (i = 0; i < data->sg_len; i++) {
1393 struct scatterlist *sgl;
1394
1395 sgl = data->sg + i;
1396 if (sgl->length % data->blksz)
1397 return -EINVAL;
1398 }
1399 if ((data->blksz % 4) != 0)
1400 /* REVISIT: The MMC buffer increments only when MSB is written.
1401 * Return error for blksz which is non multiple of four.
1402 */
1403 return -EINVAL;
1404
1405 BUG_ON(host->dma_ch != -1);
1406
1407 chan = omap_hsmmc_get_dma_chan(host, data);
1408
1409 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
1410 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
1411 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1412 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1413 cfg.src_maxburst = data->blksz / 4;
1414 cfg.dst_maxburst = data->blksz / 4;
1415
1416 ret = dmaengine_slave_config(chan, &cfg);
1417 if (ret)
1418 return ret;
1419
1420 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1421 if (ret)
1422 return ret;
1423
1424 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
1425 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1426 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1427 if (!tx) {
1428 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
1429 /* FIXME: cleanup */
1430 return -1;
1431 }
1432
1433 tx->callback = omap_hsmmc_dma_callback;
1434 tx->callback_param = host;
1435
1436 /* Does not fail */
1437 dmaengine_submit(tx);
1438
1439 host->dma_ch = 1;
1440
1441 return 0;
1442}
1443
1444static void set_data_timeout(struct omap_hsmmc_host *host,
1445 unsigned int timeout_ns,
1446 unsigned int timeout_clks)
1447{
1448 unsigned int timeout, cycle_ns;
1449 uint32_t reg, clkd, dto = 0;
1450
1451 reg = OMAP_HSMMC_READ(host->base, SYSCTL);
1452 clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
1453 if (clkd == 0)
1454 clkd = 1;
1455
1456 cycle_ns = 1000000000 / (host->clk_rate / clkd);
1457 timeout = timeout_ns / cycle_ns;
1458 timeout += timeout_clks;
1459 if (timeout) {
1460 while ((timeout & 0x80000000) == 0) {
1461 dto += 1;
1462 timeout <<= 1;
1463 }
1464 dto = 31 - dto;
1465 timeout <<= 1;
1466 if (timeout && dto)
1467 dto += 1;
1468 if (dto >= 13)
1469 dto -= 13;
1470 else
1471 dto = 0;
1472 if (dto > 14)
1473 dto = 14;
1474 }
1475
1476 reg &= ~DTO_MASK;
1477 reg |= dto << DTO_SHIFT;
1478 OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
1479}
1480
1481static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
1482{
1483 struct mmc_request *req = host->mrq;
1484 struct dma_chan *chan;
1485
1486 if (!req->data)
1487 return;
1488 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
1489 | (req->data->blocks << 16));
1490 set_data_timeout(host, req->data->timeout_ns,
1491 req->data->timeout_clks);
1492 chan = omap_hsmmc_get_dma_chan(host, req->data);
1493 dma_async_issue_pending(chan);
1494}
1495
1496/*
1497 * Configure block length for MMC/SD cards and initiate the transfer.
1498 */
1499static int
1500omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
1501{
1502 int ret;
1503 host->data = req->data;
1504
1505 if (req->data == NULL) {
1506 OMAP_HSMMC_WRITE(host->base, BLK, 0);
1507 /*
1508 * Set an arbitrary 100ms data timeout for commands with
1509 * busy signal.
1510 */
1511 if (req->cmd->flags & MMC_RSP_BUSY)
1512 set_data_timeout(host, 100000000U, 0);
1513 return 0;
1514 }
1515
1516 if (host->use_dma) {
1517 ret = omap_hsmmc_setup_dma_transfer(host, req);
1518 if (ret != 0) {
1519 dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
1520 return ret;
1521 }
1522 }
1523 return 0;
1524}
1525
1526static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1527 int err)
1528{
1529 struct omap_hsmmc_host *host = mmc_priv(mmc);
1530 struct mmc_data *data = mrq->data;
1531
1532 if (host->use_dma && data->host_cookie) {
1533 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
1534
1535 dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
1536 omap_hsmmc_get_dma_dir(host, data));
1537 data->host_cookie = 0;
1538 }
1539}
1540
1541static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
1542 bool is_first_req)
1543{
1544 struct omap_hsmmc_host *host = mmc_priv(mmc);
1545
1546 if (mrq->data->host_cookie) {
1547 mrq->data->host_cookie = 0;
1548 return ;
1549 }
1550
1551 if (host->use_dma) {
1552 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
1553
1554 if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1555 &host->next_data, c))
1556 mrq->data->host_cookie = 0;
1557 }
1558}
1559
1560/*
1561 * Request function. for read/write operation
1562 */
1563static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1564{
1565 struct omap_hsmmc_host *host = mmc_priv(mmc);
1566 int err;
1567
1568 BUG_ON(host->req_in_progress);
1569 BUG_ON(host->dma_ch != -1);
1570 if (host->protect_card) {
1571 if (host->reqs_blocked < 3) {
1572 /*
1573 * Ensure the controller is left in a consistent
1574 * state by resetting the command and data state
1575 * machines.
1576 */
1577 omap_hsmmc_reset_controller_fsm(host, SRD);
1578 omap_hsmmc_reset_controller_fsm(host, SRC);
1579 host->reqs_blocked += 1;
1580 }
1581 req->cmd->error = -EBADF;
1582 if (req->data)
1583 req->data->error = -EBADF;
1584 req->cmd->retries = 0;
1585 mmc_request_done(mmc, req);
1586 return;
1587 } else if (host->reqs_blocked)
1588 host->reqs_blocked = 0;
1589 WARN_ON(host->mrq != NULL);
1590 host->mrq = req;
1591 host->clk_rate = clk_get_rate(host->fclk);
1592 err = omap_hsmmc_prepare_data(host, req);
1593 if (err) {
1594 req->cmd->error = err;
1595 if (req->data)
1596 req->data->error = err;
1597 host->mrq = NULL;
1598 mmc_request_done(mmc, req);
1599 return;
1600 }
1601 if (req->sbc && !(host->flags & AUTO_CMD23)) {
1602 omap_hsmmc_start_command(host, req->sbc, NULL);
1603 return;
1604 }
1605
1606 omap_hsmmc_start_dma_transfer(host);
1607 omap_hsmmc_start_command(host, req->cmd, req->data);
1608}
1609
1610/* Routine to configure clock values. Exposed API to core */
1611static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1612{
1613 struct omap_hsmmc_host *host = mmc_priv(mmc);
1614 int do_send_init_stream = 0;
1615
1616 pm_runtime_get_sync(host->dev);
1617
1618 if (ios->power_mode != host->power_mode) {
1619 switch (ios->power_mode) {
1620 case MMC_POWER_OFF:
1621 mmc_slot(host).set_power(host->dev, host->slot_id,
1622 0, 0);
1623 break;
1624 case MMC_POWER_UP:
1625 mmc_slot(host).set_power(host->dev, host->slot_id,
1626 1, ios->vdd);
1627 break;
1628 case MMC_POWER_ON:
1629 do_send_init_stream = 1;
1630 break;
1631 }
1632 host->power_mode = ios->power_mode;
1633 }
1634
1635 /* FIXME: set registers based only on changes to ios */
1636
1637 omap_hsmmc_set_bus_width(host);
1638
1639 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1640 /* Only MMC1 can interface at 3V without some flavor
1641 * of external transceiver; but they all handle 1.8V.
1642 */
1643 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
1644 (ios->vdd == DUAL_VOLT_OCR_BIT)) {
1645 /*
1646 * The mmc_select_voltage fn of the core does
1647 * not seem to set the power_mode to
1648 * MMC_POWER_UP upon recalculating the voltage.
1649 * vdd 1.8v.
1650 */
1651 if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
1652 dev_dbg(mmc_dev(host->mmc),
1653 "Switch operation failed\n");
1654 }
1655 }
1656
1657 omap_hsmmc_set_clock(host);
1658
1659 if (do_send_init_stream)
1660 send_init_stream(host);
1661
1662 omap_hsmmc_set_bus_mode(host);
1663
1664 pm_runtime_put_autosuspend(host->dev);
1665}
1666
1667static int omap_hsmmc_get_cd(struct mmc_host *mmc)
1668{
1669 struct omap_hsmmc_host *host = mmc_priv(mmc);
1670
1671 if (!mmc_slot(host).card_detect)
1672 return -ENOSYS;
1673 return mmc_slot(host).card_detect(host->dev, host->slot_id);
1674}
1675
1676static int omap_hsmmc_get_ro(struct mmc_host *mmc)
1677{
1678 struct omap_hsmmc_host *host = mmc_priv(mmc);
1679
1680 if (!mmc_slot(host).get_ro)
1681 return -ENOSYS;
1682 return mmc_slot(host).get_ro(host->dev, 0);
1683}
1684
1685static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
1686{
1687 struct omap_hsmmc_host *host = mmc_priv(mmc);
1688
1689 if (mmc_slot(host).init_card)
1690 mmc_slot(host).init_card(card);
1691}
1692
1693static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
1694{
1695 struct omap_hsmmc_host *host = mmc_priv(mmc);
1696 u32 irq_mask, con;
1697 unsigned long flags;
1698
1699 spin_lock_irqsave(&host->irq_lock, flags);
1700
1701 con = OMAP_HSMMC_READ(host->base, CON);
1702 irq_mask = OMAP_HSMMC_READ(host->base, ISE);
1703 if (enable) {
1704 host->flags |= HSMMC_SDIO_IRQ_ENABLED;
1705 irq_mask |= CIRQ_EN;
1706 con |= CTPL | CLKEXTFREE;
1707 } else {
1708 host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
1709 irq_mask &= ~CIRQ_EN;
1710 con &= ~(CTPL | CLKEXTFREE);
1711 }
1712 OMAP_HSMMC_WRITE(host->base, CON, con);
1713 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
1714
1715 /*
1716 * if enable, piggy back detection on current request
1717 * but always disable immediately
1718 */
1719 if (!host->req_in_progress || !enable)
1720 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
1721
1722 /* flush posted write */
1723 OMAP_HSMMC_READ(host->base, IE);
1724
1725 spin_unlock_irqrestore(&host->irq_lock, flags);
1726}
1727
1728static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
1729{
1730 struct mmc_host *mmc = host->mmc;
1731 int ret;
1732
1733 /*
1734 * For omaps with wake-up path, wakeirq will be irq from pinctrl and
1735 * for other omaps, wakeirq will be from GPIO (dat line remuxed to
1736 * gpio). wakeirq is needed to detect sdio irq in runtime suspend state
1737 * with functional clock disabled.
1738 */
1739 if (!host->dev->of_node || !host->wake_irq)
1740 return -ENODEV;
1741
1742 /* Prevent auto-enabling of IRQ */
1743 irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
1744 ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
1745 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1746 mmc_hostname(mmc), host);
1747 if (ret) {
1748 dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
1749 goto err;
1750 }
1751
1752 /*
1753 * Some omaps don't have wake-up path from deeper idle states
1754 * and need to remux SDIO DAT1 to GPIO for wake-up from idle.
1755 */
1756 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
1757 struct pinctrl *p = devm_pinctrl_get(host->dev);
1758 if (!p) {
1759 ret = -ENODEV;
1760 goto err_free_irq;
1761 }
1762 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
1763 dev_info(host->dev, "missing default pinctrl state\n");
1764 devm_pinctrl_put(p);
1765 ret = -EINVAL;
1766 goto err_free_irq;
1767 }
1768
1769 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
1770 dev_info(host->dev, "missing idle pinctrl state\n");
1771 devm_pinctrl_put(p);
1772 ret = -EINVAL;
1773 goto err_free_irq;
1774 }
1775 devm_pinctrl_put(p);
1776 }
1777
1778 OMAP_HSMMC_WRITE(host->base, HCTL,
1779 OMAP_HSMMC_READ(host->base, HCTL) | IWE);
1780 return 0;
1781
1782err_free_irq:
1783 devm_free_irq(host->dev, host->wake_irq, host);
1784err:
1785 dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
1786 host->wake_irq = 0;
1787 return ret;
1788}
1789
1790static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1791{
1792 u32 hctl, capa, value;
1793
1794 /* Only MMC1 supports 3.0V */
1795 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1796 hctl = SDVS30;
1797 capa = VS30 | VS18;
1798 } else {
1799 hctl = SDVS18;
1800 capa = VS18;
1801 }
1802
1803 value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
1804 OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);
1805
1806 value = OMAP_HSMMC_READ(host->base, CAPA);
1807 OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);
1808
1809 /* Set SD bus power bit */
1810 set_sd_bus_power(host);
1811}
1812
1813static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
1814{
1815 struct omap_hsmmc_host *host = mmc_priv(mmc);
1816
1817 pm_runtime_get_sync(host->dev);
1818
1819 return 0;
1820}
1821
1822static int omap_hsmmc_disable_fclk(struct mmc_host *mmc)
1823{
1824 struct omap_hsmmc_host *host = mmc_priv(mmc);
1825
1826 pm_runtime_mark_last_busy(host->dev);
1827 pm_runtime_put_autosuspend(host->dev);
1828
1829 return 0;
1830}
1831
1832static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
1833 unsigned int direction, int blk_size)
1834{
1835 /* This controller can't do multiblock reads due to hw bugs */
1836 if (direction == MMC_DATA_READ)
1837 return 1;
1838
1839 return blk_size;
1840}
1841
1842static struct mmc_host_ops omap_hsmmc_ops = {
1843 .enable = omap_hsmmc_enable_fclk,
1844 .disable = omap_hsmmc_disable_fclk,
1845 .post_req = omap_hsmmc_post_req,
1846 .pre_req = omap_hsmmc_pre_req,
1847 .request = omap_hsmmc_request,
1848 .set_ios = omap_hsmmc_set_ios,
1849 .get_cd = omap_hsmmc_get_cd,
1850 .get_ro = omap_hsmmc_get_ro,
1851 .init_card = omap_hsmmc_init_card,
1852 .enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
1853};
1854
1855#ifdef CONFIG_DEBUG_FS
1856
1857static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1858{
1859 struct mmc_host *mmc = s->private;
1860 struct omap_hsmmc_host *host = mmc_priv(mmc);
1861
1862 seq_printf(s, "mmc%d:\n", mmc->index);
1863 seq_printf(s, "sdio irq mode\t%s\n",
1864 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling");
1865
1866 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1867 seq_printf(s, "sdio irq \t%s\n",
1868 (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled"
1869 : "disabled");
1870 }
1871 seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
1872
1873 pm_runtime_get_sync(host->dev);
1874 seq_puts(s, "\nregs:\n");
1875 seq_printf(s, "CON:\t\t0x%08x\n",
1876 OMAP_HSMMC_READ(host->base, CON));
1877 seq_printf(s, "PSTATE:\t\t0x%08x\n",
1878 OMAP_HSMMC_READ(host->base, PSTATE));
1879 seq_printf(s, "HCTL:\t\t0x%08x\n",
1880 OMAP_HSMMC_READ(host->base, HCTL));
1881 seq_printf(s, "SYSCTL:\t\t0x%08x\n",
1882 OMAP_HSMMC_READ(host->base, SYSCTL));
1883 seq_printf(s, "IE:\t\t0x%08x\n",
1884 OMAP_HSMMC_READ(host->base, IE));
1885 seq_printf(s, "ISE:\t\t0x%08x\n",
1886 OMAP_HSMMC_READ(host->base, ISE));
1887 seq_printf(s, "CAPA:\t\t0x%08x\n",
1888 OMAP_HSMMC_READ(host->base, CAPA));
1889
1890 pm_runtime_mark_last_busy(host->dev);
1891 pm_runtime_put_autosuspend(host->dev);
1892
1893 return 0;
1894}
1895
1896static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
1897{
1898 return single_open(file, omap_hsmmc_regs_show, inode->i_private);
1899}
1900
1901static const struct file_operations mmc_regs_fops = {
1902 .open = omap_hsmmc_regs_open,
1903 .read = seq_read,
1904 .llseek = seq_lseek,
1905 .release = single_release,
1906};
1907
1908static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1909{
1910 if (mmc->debugfs_root)
1911 debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
1912 mmc, &mmc_regs_fops);
1913}
1914
1915#else
1916
1917static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1918{
1919}
1920
1921#endif
1922
1923#ifdef CONFIG_OF
1924static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
1925 /* See 35xx errata 2.1.1.128 in SPRZ278F */
1926 .controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
1927};
1928
1929static const struct omap_mmc_of_data omap4_mmc_of_data = {
1930 .reg_offset = 0x100,
1931};
1932static const struct omap_mmc_of_data am33xx_mmc_of_data = {
1933 .reg_offset = 0x100,
1934 .controller_flags = OMAP_HSMMC_SWAKEUP_MISSING,
1935};
1936
1937static const struct of_device_id omap_mmc_of_match[] = {
1938 {
1939 .compatible = "ti,omap2-hsmmc",
1940 },
1941 {
1942 .compatible = "ti,omap3-pre-es3-hsmmc",
1943 .data = &omap3_pre_es3_mmc_of_data,
1944 },
1945 {
1946 .compatible = "ti,omap3-hsmmc",
1947 },
1948 {
1949 .compatible = "ti,omap4-hsmmc",
1950 .data = &omap4_mmc_of_data,
1951 },
1952 {
1953 .compatible = "ti,am33xx-hsmmc",
1954 .data = &am33xx_mmc_of_data,
1955 },
1956 {},
1957};
1958MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
1959
1960static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1961{
1962 struct omap_mmc_platform_data *pdata;
1963 struct device_node *np = dev->of_node;
1964 u32 bus_width, max_freq;
1965 int cd_gpio, wp_gpio;
1966
1967 cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
1968 wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
1969 if (cd_gpio == -EPROBE_DEFER || wp_gpio == -EPROBE_DEFER)
1970 return ERR_PTR(-EPROBE_DEFER);
1971
1972 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1973 if (!pdata)
1974 return ERR_PTR(-ENOMEM); /* out of memory */
1975
1976 if (of_find_property(np, "ti,dual-volt", NULL))
1977 pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
1978
1979 /* This driver only supports 1 slot */
1980 pdata->nr_slots = 1;
1981 pdata->slots[0].switch_pin = cd_gpio;
1982 pdata->slots[0].gpio_wp = wp_gpio;
1983
1984 if (of_find_property(np, "ti,non-removable", NULL)) {
1985 pdata->slots[0].nonremovable = true;
1986 pdata->slots[0].no_regulator_off_init = true;
1987 }
1988 of_property_read_u32(np, "bus-width", &bus_width);
1989 if (bus_width == 4)
1990 pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
1991 else if (bus_width == 8)
1992 pdata->slots[0].caps |= MMC_CAP_8_BIT_DATA;
1993
1994 if (of_find_property(np, "ti,needs-special-reset", NULL))
1995 pdata->slots[0].features |= HSMMC_HAS_UPDATED_RESET;
1996
1997 if (!of_property_read_u32(np, "max-frequency", &max_freq))
1998 pdata->max_freq = max_freq;
1999
2000 if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
2001 pdata->slots[0].features |= HSMMC_HAS_HSPE_SUPPORT;
2002
2003 if (of_find_property(np, "keep-power-in-suspend", NULL))
2004 pdata->slots[0].pm_caps |= MMC_PM_KEEP_POWER;
2005
2006 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2007 pdata->slots[0].pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2008
2009 return pdata;
2010}
2011#else
2012static inline struct omap_mmc_platform_data
2013 *of_get_hsmmc_pdata(struct device *dev)
2014{
2015 return ERR_PTR(-EINVAL);
2016}
2017#endif
2018
2019static int omap_hsmmc_probe(struct platform_device *pdev)
2020{
2021 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
2022 struct mmc_host *mmc;
2023 struct omap_hsmmc_host *host = NULL;
2024 struct resource *res;
2025 int ret, irq;
2026 const struct of_device_id *match;
2027 dma_cap_mask_t mask;
2028 unsigned tx_req, rx_req;
2029 const struct omap_mmc_of_data *data;
2030 void __iomem *base;
2031
2032 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
2033 if (match) {
2034 pdata = of_get_hsmmc_pdata(&pdev->dev);
2035
2036 if (IS_ERR(pdata))
2037 return PTR_ERR(pdata);
2038
2039 if (match->data) {
2040 data = match->data;
2041 pdata->reg_offset = data->reg_offset;
2042 pdata->controller_flags |= data->controller_flags;
2043 }
2044 }
2045
2046 if (pdata == NULL) {
2047 dev_err(&pdev->dev, "Platform Data is missing\n");
2048 return -ENXIO;
2049 }
2050
2051 if (pdata->nr_slots == 0) {
2052 dev_err(&pdev->dev, "No Slots\n");
2053 return -ENXIO;
2054 }
2055
2056 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2057 irq = platform_get_irq(pdev, 0);
2058 if (res == NULL || irq < 0)
2059 return -ENXIO;
2060
2061 base = devm_ioremap_resource(&pdev->dev, res);
2062 if (IS_ERR(base))
2063 return PTR_ERR(base);
2064
2065 ret = omap_hsmmc_gpio_init(pdata);
2066 if (ret)
2067 goto err;
2068
2069 mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
2070 if (!mmc) {
2071 ret = -ENOMEM;
2072 goto err_alloc;
2073 }
2074
2075 host = mmc_priv(mmc);
2076 host->mmc = mmc;
2077 host->pdata = pdata;
2078 host->dev = &pdev->dev;
2079 host->use_dma = 1;
2080 host->dma_ch = -1;
2081 host->irq = irq;
2082 host->slot_id = 0;
2083 host->mapbase = res->start + pdata->reg_offset;
2084 host->base = base + pdata->reg_offset;
2085 host->power_mode = MMC_POWER_OFF;
2086 host->next_data.cookie = 1;
2087 host->pbias_enabled = 0;
2088
2089 platform_set_drvdata(pdev, host);
2090
2091 if (pdev->dev.of_node)
2092 host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);
2093
2094 mmc->ops = &omap_hsmmc_ops;
2095
2096 mmc->f_min = OMAP_MMC_MIN_CLOCK;
2097
2098 if (pdata->max_freq > 0)
2099 mmc->f_max = pdata->max_freq;
2100 else
2101 mmc->f_max = OMAP_MMC_MAX_CLOCK;
2102
2103 spin_lock_init(&host->irq_lock);
2104
2105 host->fclk = devm_clk_get(&pdev->dev, "fck");
2106 if (IS_ERR(host->fclk)) {
2107 ret = PTR_ERR(host->fclk);
2108 host->fclk = NULL;
2109 goto err1;
2110 }
2111
2112 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
2113 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
2114 omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
2115 }
2116
2117 pm_runtime_enable(host->dev);
2118 pm_runtime_get_sync(host->dev);
2119 pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
2120 pm_runtime_use_autosuspend(host->dev);
2121
2122 omap_hsmmc_context_save(host);
2123
2124 host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
2125 /*
2126 * MMC can still work without debounce clock.
2127 */
2128 if (IS_ERR(host->dbclk)) {
2129 host->dbclk = NULL;
2130 } else if (clk_prepare_enable(host->dbclk) != 0) {
2131 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
2132 host->dbclk = NULL;
2133 }
2134
2135 /* Since we do only SG emulation, we can have as many segs
2136 * as we want. */
2137 mmc->max_segs = 1024;
2138
2139 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
2140 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
2141 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2142 mmc->max_seg_size = mmc->max_req_size;
2143
2144 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2145 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
2146
2147 mmc->caps |= mmc_slot(host).caps;
2148 if (mmc->caps & MMC_CAP_8_BIT_DATA)
2149 mmc->caps |= MMC_CAP_4_BIT_DATA;
2150
2151 if (mmc_slot(host).nonremovable)
2152 mmc->caps |= MMC_CAP_NONREMOVABLE;
2153
2154 mmc->pm_caps = mmc_slot(host).pm_caps;
2155
2156 omap_hsmmc_conf_bus_power(host);
2157
2158 if (!pdev->dev.of_node) {
2159 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
2160 if (!res) {
2161 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
2162 ret = -ENXIO;
2163 goto err_irq;
2164 }
2165 tx_req = res->start;
2166
2167 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
2168 if (!res) {
2169 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
2170 ret = -ENXIO;
2171 goto err_irq;
2172 }
2173 rx_req = res->start;
2174 }
2175
2176 dma_cap_zero(mask);
2177 dma_cap_set(DMA_SLAVE, mask);
2178
2179 host->rx_chan =
2180 dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
2181 &rx_req, &pdev->dev, "rx");
2182
2183 if (!host->rx_chan) {
2184 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
2185 ret = -ENXIO;
2186 goto err_irq;
2187 }
2188
2189 host->tx_chan =
2190 dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
2191 &tx_req, &pdev->dev, "tx");
2192
2193 if (!host->tx_chan) {
2194 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
2195 ret = -ENXIO;
2196 goto err_irq;
2197 }
2198
2199 /* Request IRQ for MMC operations */
2200 ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
2201 mmc_hostname(mmc), host);
2202 if (ret) {
2203 dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
2204 goto err_irq;
2205 }
2206
2207 if (pdata->init != NULL) {
2208 if (pdata->init(&pdev->dev) != 0) {
2209 dev_err(mmc_dev(host->mmc),
2210 "Unable to configure MMC IRQs\n");
2211 goto err_irq;
2212 }
2213 }
2214
2215 if (omap_hsmmc_have_reg() && !mmc_slot(host).set_power) {
2216 ret = omap_hsmmc_reg_get(host);
2217 if (ret)
2218 goto err_reg;
2219 host->use_reg = 1;
2220 }
2221
2222 mmc->ocr_avail = mmc_slot(host).ocr_mask;
2223
2224 /* Request IRQ for card detect */
2225 if ((mmc_slot(host).card_detect_irq)) {
2226 ret = devm_request_threaded_irq(&pdev->dev,
2227 mmc_slot(host).card_detect_irq,
2228 NULL, omap_hsmmc_detect,
2229 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2230 mmc_hostname(mmc), host);
2231 if (ret) {
2232 dev_err(mmc_dev(host->mmc),
2233 "Unable to grab MMC CD IRQ\n");
2234 goto err_irq_cd;
2235 }
2236 pdata->suspend = omap_hsmmc_suspend_cdirq;
2237 pdata->resume = omap_hsmmc_resume_cdirq;
2238 }
2239
2240 omap_hsmmc_disable_irq(host);
2241
2242 /*
2243 * For now, only support SDIO interrupt if we have a separate
2244 * wake-up interrupt configured from device tree. This is because
2245 * the wake-up interrupt is needed for idle state and some
2246 * platforms need special quirks. And we don't want to add new
2247 * legacy mux platform init code callbacks any longer as we
2248 * are moving to DT based booting anyways.
2249 */
2250 ret = omap_hsmmc_configure_wake_irq(host);
2251 if (!ret)
2252 mmc->caps |= MMC_CAP_SDIO_IRQ;
2253
2254 omap_hsmmc_protect_card(host);
2255
2256 mmc_add_host(mmc);
2257
2258 if (mmc_slot(host).name != NULL) {
2259 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
2260 if (ret < 0)
2261 goto err_slot_name;
2262 }
2263 if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) {
2264 ret = device_create_file(&mmc->class_dev,
2265 &dev_attr_cover_switch);
2266 if (ret < 0)
2267 goto err_slot_name;
2268 }
2269
2270 omap_hsmmc_debugfs(mmc);
2271 pm_runtime_mark_last_busy(host->dev);
2272 pm_runtime_put_autosuspend(host->dev);
2273
2274 return 0;
2275
2276err_slot_name:
2277 mmc_remove_host(mmc);
2278err_irq_cd:
2279 if (host->use_reg)
2280 omap_hsmmc_reg_put(host);
2281err_reg:
2282 if (host->pdata->cleanup)
2283 host->pdata->cleanup(&pdev->dev);
2284err_irq:
2285 if (host->tx_chan)
2286 dma_release_channel(host->tx_chan);
2287 if (host->rx_chan)
2288 dma_release_channel(host->rx_chan);
2289 pm_runtime_put_sync(host->dev);
2290 pm_runtime_disable(host->dev);
2291 if (host->dbclk)
2292 clk_disable_unprepare(host->dbclk);
2293err1:
2294 mmc_free_host(mmc);
2295err_alloc:
2296 omap_hsmmc_gpio_free(pdata);
2297err:
2298 return ret;
2299}
2300
2301static int omap_hsmmc_remove(struct platform_device *pdev)
2302{
2303 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2304
2305 pm_runtime_get_sync(host->dev);
2306 mmc_remove_host(host->mmc);
2307 if (host->use_reg)
2308 omap_hsmmc_reg_put(host);
2309 if (host->pdata->cleanup)
2310 host->pdata->cleanup(&pdev->dev);
2311
2312 if (host->tx_chan)
2313 dma_release_channel(host->tx_chan);
2314 if (host->rx_chan)
2315 dma_release_channel(host->rx_chan);
2316
2317 pm_runtime_put_sync(host->dev);
2318 pm_runtime_disable(host->dev);
2319 if (host->dbclk)
2320 clk_disable_unprepare(host->dbclk);
2321
2322 omap_hsmmc_gpio_free(host->pdata);
2323 mmc_free_host(host->mmc);
2324
2325 return 0;
2326}
2327
2328#ifdef CONFIG_PM
2329static int omap_hsmmc_prepare(struct device *dev)
2330{
2331 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2332
2333 if (host->pdata->suspend)
2334 return host->pdata->suspend(dev, host->slot_id);
2335
2336 return 0;
2337}
2338
2339static void omap_hsmmc_complete(struct device *dev)
2340{
2341 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2342
2343 if (host->pdata->resume)
2344 host->pdata->resume(dev, host->slot_id);
2345
2346}
2347
2348static int omap_hsmmc_suspend(struct device *dev)
2349{
2350 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2351
2352 if (!host)
2353 return 0;
2354
2355 pm_runtime_get_sync(host->dev);
2356
2357 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
2358 OMAP_HSMMC_WRITE(host->base, ISE, 0);
2359 OMAP_HSMMC_WRITE(host->base, IE, 0);
2360 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2361 OMAP_HSMMC_WRITE(host->base, HCTL,
2362 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2363 }
2364
2365 /* do not wake up due to sdio irq */
2366 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2367 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
2368 disable_irq(host->wake_irq);
2369
2370 if (host->dbclk)
2371 clk_disable_unprepare(host->dbclk);
2372
2373 pm_runtime_put_sync(host->dev);
2374 return 0;
2375}
2376
2377/* Routine to resume the MMC device */
2378static int omap_hsmmc_resume(struct device *dev)
2379{
2380 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2381
2382 if (!host)
2383 return 0;
2384
2385 pm_runtime_get_sync(host->dev);
2386
2387 if (host->dbclk)
2388 clk_prepare_enable(host->dbclk);
2389
2390 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
2391 omap_hsmmc_conf_bus_power(host);
2392
2393 omap_hsmmc_protect_card(host);
2394
2395 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2396 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
2397 enable_irq(host->wake_irq);
2398
2399 pm_runtime_mark_last_busy(host->dev);
2400 pm_runtime_put_autosuspend(host->dev);
2401 return 0;
2402}
2403
2404#else
2405#define omap_hsmmc_prepare NULL
2406#define omap_hsmmc_complete NULL
2407#define omap_hsmmc_suspend NULL
2408#define omap_hsmmc_resume NULL
2409#endif
2410
2411static int omap_hsmmc_runtime_suspend(struct device *dev)
2412{
2413 struct omap_hsmmc_host *host;
2414 unsigned long flags;
2415 int ret = 0;
2416
2417 host = platform_get_drvdata(to_platform_device(dev));
2418 omap_hsmmc_context_save(host);
2419 dev_dbg(dev, "disabled\n");
2420
2421 spin_lock_irqsave(&host->irq_lock, flags);
2422 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2423 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
2424 /* disable sdio irq handling to prevent race */
2425 OMAP_HSMMC_WRITE(host->base, ISE, 0);
2426 OMAP_HSMMC_WRITE(host->base, IE, 0);
2427
2428 if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
2429 /*
2430 * dat1 line low, pending sdio irq
2431 * race condition: possible irq handler running on
2432 * multi-core, abort
2433 */
2434 dev_dbg(dev, "pending sdio irq, abort suspend\n");
2435 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2436 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
2437 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2438 pm_runtime_mark_last_busy(dev);
2439 ret = -EBUSY;
2440 goto abort;
2441 }
2442
2443 pinctrl_pm_select_idle_state(dev);
2444
2445 WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
2446 enable_irq(host->wake_irq);
2447 host->flags |= HSMMC_WAKE_IRQ_ENABLED;
2448 } else {
2449 pinctrl_pm_select_idle_state(dev);
2450 }
2451
2452abort:
2453 spin_unlock_irqrestore(&host->irq_lock, flags);
2454 return ret;
2455}
2456
2457static int omap_hsmmc_runtime_resume(struct device *dev)
2458{
2459 struct omap_hsmmc_host *host;
2460 unsigned long flags;
2461
2462 host = platform_get_drvdata(to_platform_device(dev));
2463 omap_hsmmc_context_restore(host);
2464 dev_dbg(dev, "enabled\n");
2465
2466 spin_lock_irqsave(&host->irq_lock, flags);
2467 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
2468 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
2469 /* sdio irq flag can't change while in runtime suspend */
2470 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
2471 disable_irq_nosync(host->wake_irq);
2472 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
2473 }
2474
2475 pinctrl_pm_select_default_state(host->dev);
2476
2477 /* irq lost, if pinmux incorrect */
2478 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2479 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
2480 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2481 } else {
2482 pinctrl_pm_select_default_state(host->dev);
2483 }
2484 spin_unlock_irqrestore(&host->irq_lock, flags);
2485 return 0;
2486}
2487
2488static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2489 .suspend = omap_hsmmc_suspend,
2490 .resume = omap_hsmmc_resume,
2491 .prepare = omap_hsmmc_prepare,
2492 .complete = omap_hsmmc_complete,
2493 .runtime_suspend = omap_hsmmc_runtime_suspend,
2494 .runtime_resume = omap_hsmmc_runtime_resume,
2495};
2496
2497static struct platform_driver omap_hsmmc_driver = {
2498 .probe = omap_hsmmc_probe,
2499 .remove = omap_hsmmc_remove,
2500 .driver = {
2501 .name = DRIVER_NAME,
2502 .pm = &omap_hsmmc_dev_pm_ops,
2503 .of_match_table = of_match_ptr(omap_mmc_of_match),
2504 },
2505};
2506
2507module_platform_driver(omap_hsmmc_driver);
2508MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
2509MODULE_LICENSE("GPL");
2510MODULE_ALIAS("platform:" DRIVER_NAME);
2511MODULE_AUTHOR("Texas Instruments Inc");