Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc8 3415 lines 88 kB view raw
1/* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14#include <linux/blkdev.h> 15#include <linux/clk.h> 16#include <linux/debugfs.h> 17#include <linux/device.h> 18#include <linux/dma-mapping.h> 19#include <linux/err.h> 20#include <linux/init.h> 21#include <linux/interrupt.h> 22#include <linux/ioport.h> 23#include <linux/module.h> 24#include <linux/platform_device.h> 25#include <linux/pm_runtime.h> 26#include <linux/seq_file.h> 27#include <linux/slab.h> 28#include <linux/stat.h> 29#include <linux/delay.h> 30#include <linux/irq.h> 31#include <linux/mmc/card.h> 32#include <linux/mmc/host.h> 33#include <linux/mmc/mmc.h> 34#include <linux/mmc/sd.h> 35#include <linux/mmc/sdio.h> 36#include <linux/bitops.h> 37#include <linux/regulator/consumer.h> 38#include <linux/of.h> 39#include <linux/of_gpio.h> 40#include <linux/mmc/slot-gpio.h> 41 42#include "dw_mmc.h" 43 44/* Common flag combinations */ 45#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 46 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 47 SDMMC_INT_EBE | SDMMC_INT_HLE) 48#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 49 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 50#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 51 DW_MCI_CMD_ERROR_FLAGS) 52#define DW_MCI_SEND_STATUS 1 53#define DW_MCI_RECV_STATUS 2 54#define DW_MCI_DMA_THRESHOLD 16 55 56#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 57#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 58 59#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 62 SDMMC_IDMAC_INT_TI) 63 64#define DESC_RING_BUF_SZ PAGE_SIZE 65 66struct idmac_desc_64addr { 67 u32 des0; /* Control Descriptor */ 68 69 u32 des1; /* Reserved */ 70 71 u32 des2; /*Buffer sizes */ 72#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 73 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 74 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 75 76 u32 des3; /* Reserved */ 77 78 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 79 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 80 81 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 82 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 83}; 84 85struct idmac_desc { 86 __le32 des0; /* Control Descriptor */ 87#define IDMAC_DES0_DIC BIT(1) 88#define IDMAC_DES0_LD BIT(2) 89#define IDMAC_DES0_FD BIT(3) 90#define IDMAC_DES0_CH BIT(4) 91#define IDMAC_DES0_ER BIT(5) 92#define IDMAC_DES0_CES BIT(30) 93#define IDMAC_DES0_OWN BIT(31) 94 95 __le32 des1; /* Buffer sizes */ 96#define IDMAC_SET_BUFFER1_SIZE(d, s) \ 97 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 98 99 __le32 des2; /* buffer 1 physical address */ 100 101 __le32 des3; /* buffer 2 physical address */ 102}; 103 104/* Each descriptor can transfer up to 4KB of data in chained mode */ 105#define DW_MCI_DESC_DATA_LENGTH 0x1000 106 107static bool dw_mci_reset(struct dw_mci *host); 108static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 109static int dw_mci_card_busy(struct mmc_host *mmc); 110static int dw_mci_get_cd(struct mmc_host *mmc); 111 112#if defined(CONFIG_DEBUG_FS) 113static int dw_mci_req_show(struct seq_file *s, void *v) 114{ 115 struct dw_mci_slot *slot = s->private; 116 struct mmc_request *mrq; 117 struct mmc_command *cmd; 118 struct mmc_command *stop; 119 struct mmc_data *data; 120 121 /* Make sure we get a consistent snapshot */ 122 spin_lock_bh(&slot->host->lock); 123 mrq = slot->mrq; 124 125 if (mrq) { 126 cmd = mrq->cmd; 127 data = mrq->data; 128 stop = mrq->stop; 129 130 if (cmd) 131 seq_printf(s, 132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 133 cmd->opcode, cmd->arg, cmd->flags, 134 cmd->resp[0], cmd->resp[1], cmd->resp[2], 135 cmd->resp[2], cmd->error); 136 if (data) 137 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 138 data->bytes_xfered, data->blocks, 139 data->blksz, data->flags, data->error); 140 if (stop) 141 seq_printf(s, 142 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 143 stop->opcode, stop->arg, stop->flags, 144 stop->resp[0], stop->resp[1], stop->resp[2], 145 stop->resp[2], stop->error); 146 } 147 148 spin_unlock_bh(&slot->host->lock); 149 150 return 0; 151} 152 153static int dw_mci_req_open(struct inode *inode, struct file *file) 154{ 155 return single_open(file, dw_mci_req_show, inode->i_private); 156} 157 158static const struct file_operations dw_mci_req_fops = { 159 .owner = THIS_MODULE, 160 .open = dw_mci_req_open, 161 .read = seq_read, 162 .llseek = seq_lseek, 163 .release = single_release, 164}; 165 166static int dw_mci_regs_show(struct seq_file *s, void *v) 167{ 168 struct dw_mci *host = s->private; 169 170 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 171 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 172 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 173 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 174 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 175 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 176 177 return 0; 178} 179 180static int dw_mci_regs_open(struct inode *inode, struct file *file) 181{ 182 return single_open(file, dw_mci_regs_show, inode->i_private); 183} 184 185static const struct file_operations dw_mci_regs_fops = { 186 .owner = THIS_MODULE, 187 .open = dw_mci_regs_open, 188 .read = seq_read, 189 .llseek = seq_lseek, 190 .release = single_release, 191}; 192 193static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 194{ 195 struct mmc_host *mmc = slot->mmc; 196 struct dw_mci *host = slot->host; 197 struct dentry *root; 198 struct dentry *node; 199 200 root = mmc->debugfs_root; 201 if (!root) 202 return; 203 204 node = debugfs_create_file("regs", S_IRUSR, root, host, 205 &dw_mci_regs_fops); 206 if (!node) 207 goto err; 208 209 node = debugfs_create_file("req", S_IRUSR, root, slot, 210 &dw_mci_req_fops); 211 if (!node) 212 goto err; 213 214 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 215 if (!node) 216 goto err; 217 218 node = debugfs_create_x32("pending_events", S_IRUSR, root, 219 (u32 *)&host->pending_events); 220 if (!node) 221 goto err; 222 223 node = debugfs_create_x32("completed_events", S_IRUSR, root, 224 (u32 *)&host->completed_events); 225 if (!node) 226 goto err; 227 228 return; 229 230err: 231 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 232} 233#endif /* defined(CONFIG_DEBUG_FS) */ 234 235static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg); 236 237static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 238{ 239 struct dw_mci_slot *slot = mmc_priv(mmc); 240 struct dw_mci *host = slot->host; 241 u32 cmdr; 242 243 cmd->error = -EINPROGRESS; 244 cmdr = cmd->opcode; 245 246 if (cmd->opcode == MMC_STOP_TRANSMISSION || 247 cmd->opcode == MMC_GO_IDLE_STATE || 248 cmd->opcode == MMC_GO_INACTIVE_STATE || 249 (cmd->opcode == SD_IO_RW_DIRECT && 250 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 251 cmdr |= SDMMC_CMD_STOP; 252 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 253 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 254 255 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 256 u32 clk_en_a; 257 258 /* Special bit makes CMD11 not die */ 259 cmdr |= SDMMC_CMD_VOLT_SWITCH; 260 261 /* Change state to continue to handle CMD11 weirdness */ 262 WARN_ON(slot->host->state != STATE_SENDING_CMD); 263 slot->host->state = STATE_SENDING_CMD11; 264 265 /* 266 * We need to disable low power mode (automatic clock stop) 267 * while doing voltage switch so we don't confuse the card, 268 * since stopping the clock is a specific part of the UHS 269 * voltage change dance. 270 * 271 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 272 * unconditionally turned back on in dw_mci_setup_bus() if it's 273 * ever called with a non-zero clock. That shouldn't happen 274 * until the voltage change is all done. 275 */ 276 clk_en_a = mci_readl(host, CLKENA); 277 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 278 mci_writel(host, CLKENA, clk_en_a); 279 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 280 SDMMC_CMD_PRV_DAT_WAIT, 0); 281 } 282 283 if (cmd->flags & MMC_RSP_PRESENT) { 284 /* We expect a response, so set this bit */ 285 cmdr |= SDMMC_CMD_RESP_EXP; 286 if (cmd->flags & MMC_RSP_136) 287 cmdr |= SDMMC_CMD_RESP_LONG; 288 } 289 290 if (cmd->flags & MMC_RSP_CRC) 291 cmdr |= SDMMC_CMD_RESP_CRC; 292 293 if (cmd->data) { 294 cmdr |= SDMMC_CMD_DAT_EXP; 295 if (cmd->data->flags & MMC_DATA_WRITE) 296 cmdr |= SDMMC_CMD_DAT_WR; 297 } 298 299 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 300 cmdr |= SDMMC_CMD_USE_HOLD_REG; 301 302 return cmdr; 303} 304 305static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 306{ 307 struct mmc_command *stop; 308 u32 cmdr; 309 310 if (!cmd->data) 311 return 0; 312 313 stop = &host->stop_abort; 314 cmdr = cmd->opcode; 315 memset(stop, 0, sizeof(struct mmc_command)); 316 317 if (cmdr == MMC_READ_SINGLE_BLOCK || 318 cmdr == MMC_READ_MULTIPLE_BLOCK || 319 cmdr == MMC_WRITE_BLOCK || 320 cmdr == MMC_WRITE_MULTIPLE_BLOCK || 321 cmdr == MMC_SEND_TUNING_BLOCK || 322 cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 323 stop->opcode = MMC_STOP_TRANSMISSION; 324 stop->arg = 0; 325 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 326 } else if (cmdr == SD_IO_RW_EXTENDED) { 327 stop->opcode = SD_IO_RW_DIRECT; 328 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 329 ((cmd->arg >> 28) & 0x7); 330 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 331 } else { 332 return 0; 333 } 334 335 cmdr = stop->opcode | SDMMC_CMD_STOP | 336 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 337 338 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags)) 339 cmdr |= SDMMC_CMD_USE_HOLD_REG; 340 341 return cmdr; 342} 343 344static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 345{ 346 unsigned long timeout = jiffies + msecs_to_jiffies(500); 347 348 /* 349 * Databook says that before issuing a new data transfer command 350 * we need to check to see if the card is busy. Data transfer commands 351 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 352 * 353 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 354 * expected. 355 */ 356 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 357 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 358 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) { 359 if (time_after(jiffies, timeout)) { 360 /* Command will fail; we'll pass error then */ 361 dev_err(host->dev, "Busy; trying anyway\n"); 362 break; 363 } 364 udelay(10); 365 } 366 } 367} 368 369static void dw_mci_start_command(struct dw_mci *host, 370 struct mmc_command *cmd, u32 cmd_flags) 371{ 372 host->cmd = cmd; 373 dev_vdbg(host->dev, 374 "start command: ARGR=0x%08x CMDR=0x%08x\n", 375 cmd->arg, cmd_flags); 376 377 mci_writel(host, CMDARG, cmd->arg); 378 wmb(); /* drain writebuffer */ 379 dw_mci_wait_while_busy(host, cmd_flags); 380 381 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 382} 383 384static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 385{ 386 struct mmc_command *stop = &host->stop_abort; 387 388 dw_mci_start_command(host, stop, host->stop_cmdr); 389} 390 391/* DMA interface functions */ 392static void dw_mci_stop_dma(struct dw_mci *host) 393{ 394 if (host->using_dma) { 395 host->dma_ops->stop(host); 396 host->dma_ops->cleanup(host); 397 } 398 399 /* Data transfer was stopped by the interrupt handler */ 400 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 401} 402 403static int dw_mci_get_dma_dir(struct mmc_data *data) 404{ 405 if (data->flags & MMC_DATA_WRITE) 406 return DMA_TO_DEVICE; 407 else 408 return DMA_FROM_DEVICE; 409} 410 411static void dw_mci_dma_cleanup(struct dw_mci *host) 412{ 413 struct mmc_data *data = host->data; 414 415 if (data && data->host_cookie == COOKIE_MAPPED) { 416 dma_unmap_sg(host->dev, 417 data->sg, 418 data->sg_len, 419 dw_mci_get_dma_dir(data)); 420 data->host_cookie = COOKIE_UNMAPPED; 421 } 422} 423 424static void dw_mci_idmac_reset(struct dw_mci *host) 425{ 426 u32 bmod = mci_readl(host, BMOD); 427 /* Software reset of DMA */ 428 bmod |= SDMMC_IDMAC_SWRESET; 429 mci_writel(host, BMOD, bmod); 430} 431 432static void dw_mci_idmac_stop_dma(struct dw_mci *host) 433{ 434 u32 temp; 435 436 /* Disable and reset the IDMAC interface */ 437 temp = mci_readl(host, CTRL); 438 temp &= ~SDMMC_CTRL_USE_IDMAC; 439 temp |= SDMMC_CTRL_DMA_RESET; 440 mci_writel(host, CTRL, temp); 441 442 /* Stop the IDMAC running */ 443 temp = mci_readl(host, BMOD); 444 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 445 temp |= SDMMC_IDMAC_SWRESET; 446 mci_writel(host, BMOD, temp); 447} 448 449static void dw_mci_dmac_complete_dma(void *arg) 450{ 451 struct dw_mci *host = arg; 452 struct mmc_data *data = host->data; 453 454 dev_vdbg(host->dev, "DMA complete\n"); 455 456 if ((host->use_dma == TRANS_MODE_EDMAC) && 457 data && (data->flags & MMC_DATA_READ)) 458 /* Invalidate cache after read */ 459 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc), 460 data->sg, 461 data->sg_len, 462 DMA_FROM_DEVICE); 463 464 host->dma_ops->cleanup(host); 465 466 /* 467 * If the card was removed, data will be NULL. No point in trying to 468 * send the stop command or waiting for NBUSY in this case. 469 */ 470 if (data) { 471 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 472 tasklet_schedule(&host->tasklet); 473 } 474} 475 476static int dw_mci_idmac_init(struct dw_mci *host) 477{ 478 int i; 479 480 if (host->dma_64bit_address == 1) { 481 struct idmac_desc_64addr *p; 482 /* Number of descriptors in the ring buffer */ 483 host->ring_size = 484 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 485 486 /* Forward link the descriptor list */ 487 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 488 i++, p++) { 489 p->des6 = (host->sg_dma + 490 (sizeof(struct idmac_desc_64addr) * 491 (i + 1))) & 0xffffffff; 492 493 p->des7 = (u64)(host->sg_dma + 494 (sizeof(struct idmac_desc_64addr) * 495 (i + 1))) >> 32; 496 /* Initialize reserved and buffer size fields to "0" */ 497 p->des1 = 0; 498 p->des2 = 0; 499 p->des3 = 0; 500 } 501 502 /* Set the last descriptor as the end-of-ring descriptor */ 503 p->des6 = host->sg_dma & 0xffffffff; 504 p->des7 = (u64)host->sg_dma >> 32; 505 p->des0 = IDMAC_DES0_ER; 506 507 } else { 508 struct idmac_desc *p; 509 /* Number of descriptors in the ring buffer */ 510 host->ring_size = 511 DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 512 513 /* Forward link the descriptor list */ 514 for (i = 0, p = host->sg_cpu; 515 i < host->ring_size - 1; 516 i++, p++) { 517 p->des3 = cpu_to_le32(host->sg_dma + 518 (sizeof(struct idmac_desc) * (i + 1))); 519 p->des1 = 0; 520 } 521 522 /* Set the last descriptor as the end-of-ring descriptor */ 523 p->des3 = cpu_to_le32(host->sg_dma); 524 p->des0 = cpu_to_le32(IDMAC_DES0_ER); 525 } 526 527 dw_mci_idmac_reset(host); 528 529 if (host->dma_64bit_address == 1) { 530 /* Mask out interrupts - get Tx & Rx complete only */ 531 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 532 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 533 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 534 535 /* Set the descriptor base address */ 536 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 537 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 538 539 } else { 540 /* Mask out interrupts - get Tx & Rx complete only */ 541 mci_writel(host, IDSTS, IDMAC_INT_CLR); 542 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 543 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 544 545 /* Set the descriptor base address */ 546 mci_writel(host, DBADDR, host->sg_dma); 547 } 548 549 return 0; 550} 551 552static inline int dw_mci_prepare_desc64(struct dw_mci *host, 553 struct mmc_data *data, 554 unsigned int sg_len) 555{ 556 unsigned int desc_len; 557 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 558 unsigned long timeout; 559 int i; 560 561 desc_first = desc_last = desc = host->sg_cpu; 562 563 for (i = 0; i < sg_len; i++) { 564 unsigned int length = sg_dma_len(&data->sg[i]); 565 566 u64 mem_addr = sg_dma_address(&data->sg[i]); 567 568 for ( ; length ; desc++) { 569 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 570 length : DW_MCI_DESC_DATA_LENGTH; 571 572 length -= desc_len; 573 574 /* 575 * Wait for the former clear OWN bit operation 576 * of IDMAC to make sure that this descriptor 577 * isn't still owned by IDMAC as IDMAC's write 578 * ops and CPU's read ops are asynchronous. 579 */ 580 timeout = jiffies + msecs_to_jiffies(100); 581 while (readl(&desc->des0) & IDMAC_DES0_OWN) { 582 if (time_after(jiffies, timeout)) 583 goto err_own_bit; 584 udelay(10); 585 } 586 587 /* 588 * Set the OWN bit and disable interrupts 589 * for this descriptor 590 */ 591 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 592 IDMAC_DES0_CH; 593 594 /* Buffer length */ 595 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 596 597 /* Physical address to DMA to/from */ 598 desc->des4 = mem_addr & 0xffffffff; 599 desc->des5 = mem_addr >> 32; 600 601 /* Update physical address for the next desc */ 602 mem_addr += desc_len; 603 604 /* Save pointer to the last descriptor */ 605 desc_last = desc; 606 } 607 } 608 609 /* Set first descriptor */ 610 desc_first->des0 |= IDMAC_DES0_FD; 611 612 /* Set last descriptor */ 613 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 614 desc_last->des0 |= IDMAC_DES0_LD; 615 616 return 0; 617err_own_bit: 618 /* restore the descriptor chain as it's polluted */ 619 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 620 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 621 dw_mci_idmac_init(host); 622 return -EINVAL; 623} 624 625 626static inline int dw_mci_prepare_desc32(struct dw_mci *host, 627 struct mmc_data *data, 628 unsigned int sg_len) 629{ 630 unsigned int desc_len; 631 struct idmac_desc *desc_first, *desc_last, *desc; 632 unsigned long timeout; 633 int i; 634 635 desc_first = desc_last = desc = host->sg_cpu; 636 637 for (i = 0; i < sg_len; i++) { 638 unsigned int length = sg_dma_len(&data->sg[i]); 639 640 u32 mem_addr = sg_dma_address(&data->sg[i]); 641 642 for ( ; length ; desc++) { 643 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 644 length : DW_MCI_DESC_DATA_LENGTH; 645 646 length -= desc_len; 647 648 /* 649 * Wait for the former clear OWN bit operation 650 * of IDMAC to make sure that this descriptor 651 * isn't still owned by IDMAC as IDMAC's write 652 * ops and CPU's read ops are asynchronous. 653 */ 654 timeout = jiffies + msecs_to_jiffies(100); 655 while (readl(&desc->des0) & 656 cpu_to_le32(IDMAC_DES0_OWN)) { 657 if (time_after(jiffies, timeout)) 658 goto err_own_bit; 659 udelay(10); 660 } 661 662 /* 663 * Set the OWN bit and disable interrupts 664 * for this descriptor 665 */ 666 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 667 IDMAC_DES0_DIC | 668 IDMAC_DES0_CH); 669 670 /* Buffer length */ 671 IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 672 673 /* Physical address to DMA to/from */ 674 desc->des2 = cpu_to_le32(mem_addr); 675 676 /* Update physical address for the next desc */ 677 mem_addr += desc_len; 678 679 /* Save pointer to the last descriptor */ 680 desc_last = desc; 681 } 682 } 683 684 /* Set first descriptor */ 685 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 686 687 /* Set last descriptor */ 688 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 689 IDMAC_DES0_DIC)); 690 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 691 692 return 0; 693err_own_bit: 694 /* restore the descriptor chain as it's polluted */ 695 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 696 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 697 dw_mci_idmac_init(host); 698 return -EINVAL; 699} 700 701static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 702{ 703 u32 temp; 704 int ret; 705 706 if (host->dma_64bit_address == 1) 707 ret = dw_mci_prepare_desc64(host, host->data, sg_len); 708 else 709 ret = dw_mci_prepare_desc32(host, host->data, sg_len); 710 711 if (ret) 712 goto out; 713 714 /* drain writebuffer */ 715 wmb(); 716 717 /* Make sure to reset DMA in case we did PIO before this */ 718 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 719 dw_mci_idmac_reset(host); 720 721 /* Select IDMAC interface */ 722 temp = mci_readl(host, CTRL); 723 temp |= SDMMC_CTRL_USE_IDMAC; 724 mci_writel(host, CTRL, temp); 725 726 /* drain writebuffer */ 727 wmb(); 728 729 /* Enable the IDMAC */ 730 temp = mci_readl(host, BMOD); 731 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 732 mci_writel(host, BMOD, temp); 733 734 /* Start it running */ 735 mci_writel(host, PLDMND, 1); 736 737out: 738 return ret; 739} 740 741static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 742 .init = dw_mci_idmac_init, 743 .start = dw_mci_idmac_start_dma, 744 .stop = dw_mci_idmac_stop_dma, 745 .complete = dw_mci_dmac_complete_dma, 746 .cleanup = dw_mci_dma_cleanup, 747}; 748 749static void dw_mci_edmac_stop_dma(struct dw_mci *host) 750{ 751 dmaengine_terminate_async(host->dms->ch); 752} 753 754static int dw_mci_edmac_start_dma(struct dw_mci *host, 755 unsigned int sg_len) 756{ 757 struct dma_slave_config cfg; 758 struct dma_async_tx_descriptor *desc = NULL; 759 struct scatterlist *sgl = host->data->sg; 760 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 761 u32 sg_elems = host->data->sg_len; 762 u32 fifoth_val; 763 u32 fifo_offset = host->fifo_reg - host->regs; 764 int ret = 0; 765 766 /* Set external dma config: burst size, burst width */ 767 cfg.dst_addr = host->phy_regs + fifo_offset; 768 cfg.src_addr = cfg.dst_addr; 769 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 770 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 771 772 /* Match burst msize with external dma config */ 773 fifoth_val = mci_readl(host, FIFOTH); 774 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 775 cfg.src_maxburst = cfg.dst_maxburst; 776 777 if (host->data->flags & MMC_DATA_WRITE) 778 cfg.direction = DMA_MEM_TO_DEV; 779 else 780 cfg.direction = DMA_DEV_TO_MEM; 781 782 ret = dmaengine_slave_config(host->dms->ch, &cfg); 783 if (ret) { 784 dev_err(host->dev, "Failed to config edmac.\n"); 785 return -EBUSY; 786 } 787 788 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 789 sg_len, cfg.direction, 790 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 791 if (!desc) { 792 dev_err(host->dev, "Can't prepare slave sg.\n"); 793 return -EBUSY; 794 } 795 796 /* Set dw_mci_dmac_complete_dma as callback */ 797 desc->callback = dw_mci_dmac_complete_dma; 798 desc->callback_param = (void *)host; 799 dmaengine_submit(desc); 800 801 /* Flush cache before write */ 802 if (host->data->flags & MMC_DATA_WRITE) 803 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl, 804 sg_elems, DMA_TO_DEVICE); 805 806 dma_async_issue_pending(host->dms->ch); 807 808 return 0; 809} 810 811static int dw_mci_edmac_init(struct dw_mci *host) 812{ 813 /* Request external dma channel */ 814 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 815 if (!host->dms) 816 return -ENOMEM; 817 818 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); 819 if (!host->dms->ch) { 820 dev_err(host->dev, "Failed to get external DMA channel.\n"); 821 kfree(host->dms); 822 host->dms = NULL; 823 return -ENXIO; 824 } 825 826 return 0; 827} 828 829static void dw_mci_edmac_exit(struct dw_mci *host) 830{ 831 if (host->dms) { 832 if (host->dms->ch) { 833 dma_release_channel(host->dms->ch); 834 host->dms->ch = NULL; 835 } 836 kfree(host->dms); 837 host->dms = NULL; 838 } 839} 840 841static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 842 .init = dw_mci_edmac_init, 843 .exit = dw_mci_edmac_exit, 844 .start = dw_mci_edmac_start_dma, 845 .stop = dw_mci_edmac_stop_dma, 846 .complete = dw_mci_dmac_complete_dma, 847 .cleanup = dw_mci_dma_cleanup, 848}; 849 850static int dw_mci_pre_dma_transfer(struct dw_mci *host, 851 struct mmc_data *data, 852 int cookie) 853{ 854 struct scatterlist *sg; 855 unsigned int i, sg_len; 856 857 if (data->host_cookie == COOKIE_PRE_MAPPED) 858 return data->sg_len; 859 860 /* 861 * We don't do DMA on "complex" transfers, i.e. with 862 * non-word-aligned buffers or lengths. Also, we don't bother 863 * with all the DMA setup overhead for short transfers. 864 */ 865 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 866 return -EINVAL; 867 868 if (data->blksz & 3) 869 return -EINVAL; 870 871 for_each_sg(data->sg, sg, data->sg_len, i) { 872 if (sg->offset & 3 || sg->length & 3) 873 return -EINVAL; 874 } 875 876 sg_len = dma_map_sg(host->dev, 877 data->sg, 878 data->sg_len, 879 dw_mci_get_dma_dir(data)); 880 if (sg_len == 0) 881 return -EINVAL; 882 883 data->host_cookie = cookie; 884 885 return sg_len; 886} 887 888static void dw_mci_pre_req(struct mmc_host *mmc, 889 struct mmc_request *mrq) 890{ 891 struct dw_mci_slot *slot = mmc_priv(mmc); 892 struct mmc_data *data = mrq->data; 893 894 if (!slot->host->use_dma || !data) 895 return; 896 897 /* This data might be unmapped at this time */ 898 data->host_cookie = COOKIE_UNMAPPED; 899 900 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 901 COOKIE_PRE_MAPPED) < 0) 902 data->host_cookie = COOKIE_UNMAPPED; 903} 904 905static void dw_mci_post_req(struct mmc_host *mmc, 906 struct mmc_request *mrq, 907 int err) 908{ 909 struct dw_mci_slot *slot = mmc_priv(mmc); 910 struct mmc_data *data = mrq->data; 911 912 if (!slot->host->use_dma || !data) 913 return; 914 915 if (data->host_cookie != COOKIE_UNMAPPED) 916 dma_unmap_sg(slot->host->dev, 917 data->sg, 918 data->sg_len, 919 dw_mci_get_dma_dir(data)); 920 data->host_cookie = COOKIE_UNMAPPED; 921} 922 923static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 924{ 925 unsigned int blksz = data->blksz; 926 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 927 u32 fifo_width = 1 << host->data_shift; 928 u32 blksz_depth = blksz / fifo_width, fifoth_val; 929 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 930 int idx = ARRAY_SIZE(mszs) - 1; 931 932 /* pio should ship this scenario */ 933 if (!host->use_dma) 934 return; 935 936 tx_wmark = (host->fifo_depth) / 2; 937 tx_wmark_invers = host->fifo_depth - tx_wmark; 938 939 /* 940 * MSIZE is '1', 941 * if blksz is not a multiple of the FIFO width 942 */ 943 if (blksz % fifo_width) 944 goto done; 945 946 do { 947 if (!((blksz_depth % mszs[idx]) || 948 (tx_wmark_invers % mszs[idx]))) { 949 msize = idx; 950 rx_wmark = mszs[idx] - 1; 951 break; 952 } 953 } while (--idx > 0); 954 /* 955 * If idx is '0', it won't be tried 956 * Thus, initial values are uesed 957 */ 958done: 959 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 960 mci_writel(host, FIFOTH, fifoth_val); 961} 962 963static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 964{ 965 unsigned int blksz = data->blksz; 966 u32 blksz_depth, fifo_depth; 967 u16 thld_size; 968 u8 enable; 969 970 /* 971 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 972 * in the FIFO region, so we really shouldn't access it). 973 */ 974 if (host->verid < DW_MMC_240A || 975 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 976 return; 977 978 /* 979 * Card write Threshold is introduced since 2.80a 980 * It's used when HS400 mode is enabled. 981 */ 982 if (data->flags & MMC_DATA_WRITE && 983 !(host->timing != MMC_TIMING_MMC_HS400)) 984 return; 985 986 if (data->flags & MMC_DATA_WRITE) 987 enable = SDMMC_CARD_WR_THR_EN; 988 else 989 enable = SDMMC_CARD_RD_THR_EN; 990 991 if (host->timing != MMC_TIMING_MMC_HS200 && 992 host->timing != MMC_TIMING_UHS_SDR104) 993 goto disable; 994 995 blksz_depth = blksz / (1 << host->data_shift); 996 fifo_depth = host->fifo_depth; 997 998 if (blksz_depth > fifo_depth) 999 goto disable; 1000 1001 /* 1002 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1003 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1004 * Currently just choose blksz. 1005 */ 1006 thld_size = blksz; 1007 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1008 return; 1009 1010disable: 1011 mci_writel(host, CDTHRCTL, 0); 1012} 1013 1014static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 1015{ 1016 unsigned long irqflags; 1017 int sg_len; 1018 u32 temp; 1019 1020 host->using_dma = 0; 1021 1022 /* If we don't have a channel, we can't do DMA */ 1023 if (!host->use_dma) 1024 return -ENODEV; 1025 1026 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1027 if (sg_len < 0) { 1028 host->dma_ops->stop(host); 1029 return sg_len; 1030 } 1031 1032 host->using_dma = 1; 1033 1034 if (host->use_dma == TRANS_MODE_IDMAC) 1035 dev_vdbg(host->dev, 1036 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 1037 (unsigned long)host->sg_cpu, 1038 (unsigned long)host->sg_dma, 1039 sg_len); 1040 1041 /* 1042 * Decide the MSIZE and RX/TX Watermark. 1043 * If current block size is same with previous size, 1044 * no need to update fifoth. 1045 */ 1046 if (host->prev_blksz != data->blksz) 1047 dw_mci_adjust_fifoth(host, data); 1048 1049 /* Enable the DMA interface */ 1050 temp = mci_readl(host, CTRL); 1051 temp |= SDMMC_CTRL_DMA_ENABLE; 1052 mci_writel(host, CTRL, temp); 1053 1054 /* Disable RX/TX IRQs, let DMA handle it */ 1055 spin_lock_irqsave(&host->irq_lock, irqflags); 1056 temp = mci_readl(host, INTMASK); 1057 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1058 mci_writel(host, INTMASK, temp); 1059 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1060 1061 if (host->dma_ops->start(host, sg_len)) { 1062 host->dma_ops->stop(host); 1063 /* We can't do DMA, try PIO for this one */ 1064 dev_dbg(host->dev, 1065 "%s: fall back to PIO mode for current transfer\n", 1066 __func__); 1067 return -ENODEV; 1068 } 1069 1070 return 0; 1071} 1072 1073static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1074{ 1075 unsigned long irqflags; 1076 int flags = SG_MITER_ATOMIC; 1077 u32 temp; 1078 1079 data->error = -EINPROGRESS; 1080 1081 WARN_ON(host->data); 1082 host->sg = NULL; 1083 host->data = data; 1084 1085 if (data->flags & MMC_DATA_READ) 1086 host->dir_status = DW_MCI_RECV_STATUS; 1087 else 1088 host->dir_status = DW_MCI_SEND_STATUS; 1089 1090 dw_mci_ctrl_thld(host, data); 1091 1092 if (dw_mci_submit_data_dma(host, data)) { 1093 if (host->data->flags & MMC_DATA_READ) 1094 flags |= SG_MITER_TO_SG; 1095 else 1096 flags |= SG_MITER_FROM_SG; 1097 1098 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1099 host->sg = data->sg; 1100 host->part_buf_start = 0; 1101 host->part_buf_count = 0; 1102 1103 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1104 1105 spin_lock_irqsave(&host->irq_lock, irqflags); 1106 temp = mci_readl(host, INTMASK); 1107 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1108 mci_writel(host, INTMASK, temp); 1109 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1110 1111 temp = mci_readl(host, CTRL); 1112 temp &= ~SDMMC_CTRL_DMA_ENABLE; 1113 mci_writel(host, CTRL, temp); 1114 1115 /* 1116 * Use the initial fifoth_val for PIO mode. If wm_algined 1117 * is set, we set watermark same as data size. 1118 * If next issued data may be transfered by DMA mode, 1119 * prev_blksz should be invalidated. 1120 */ 1121 if (host->wm_aligned) 1122 dw_mci_adjust_fifoth(host, data); 1123 else 1124 mci_writel(host, FIFOTH, host->fifoth_val); 1125 host->prev_blksz = 0; 1126 } else { 1127 /* 1128 * Keep the current block size. 1129 * It will be used to decide whether to update 1130 * fifoth register next time. 1131 */ 1132 host->prev_blksz = data->blksz; 1133 } 1134} 1135 1136static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 1137{ 1138 struct dw_mci *host = slot->host; 1139 unsigned long timeout = jiffies + msecs_to_jiffies(500); 1140 unsigned int cmd_status = 0; 1141 1142 mci_writel(host, CMDARG, arg); 1143 wmb(); /* drain writebuffer */ 1144 dw_mci_wait_while_busy(host, cmd); 1145 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 1146 1147 while (time_before(jiffies, timeout)) { 1148 cmd_status = mci_readl(host, CMD); 1149 if (!(cmd_status & SDMMC_CMD_START)) 1150 return; 1151 } 1152 dev_err(&slot->mmc->class_dev, 1153 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 1154 cmd, arg, cmd_status); 1155} 1156 1157static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1158{ 1159 struct dw_mci *host = slot->host; 1160 unsigned int clock = slot->clock; 1161 u32 div; 1162 u32 clk_en_a; 1163 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 1164 1165 /* We must continue to set bit 28 in CMD until the change is complete */ 1166 if (host->state == STATE_WAITING_CMD11_DONE) 1167 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1168 1169 if (!clock) { 1170 mci_writel(host, CLKENA, 0); 1171 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1172 } else if (clock != host->current_speed || force_clkinit) { 1173 div = host->bus_hz / clock; 1174 if (host->bus_hz % clock && host->bus_hz > clock) 1175 /* 1176 * move the + 1 after the divide to prevent 1177 * over-clocking the card. 1178 */ 1179 div += 1; 1180 1181 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1182 1183 if ((clock != slot->__clk_old && 1184 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1185 force_clkinit) { 1186 /* Silent the verbose log if calling from PM context */ 1187 if (!force_clkinit) 1188 dev_info(&slot->mmc->class_dev, 1189 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1190 slot->id, host->bus_hz, clock, 1191 div ? ((host->bus_hz / div) >> 1) : 1192 host->bus_hz, div); 1193 1194 /* 1195 * If card is polling, display the message only 1196 * one time at boot time. 1197 */ 1198 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1199 slot->mmc->f_min == clock) 1200 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1201 } 1202 1203 /* disable clock */ 1204 mci_writel(host, CLKENA, 0); 1205 mci_writel(host, CLKSRC, 0); 1206 1207 /* inform CIU */ 1208 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1209 1210 /* set clock to desired speed */ 1211 mci_writel(host, CLKDIV, div); 1212 1213 /* inform CIU */ 1214 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1215 1216 /* enable clock; only low power if no SDIO */ 1217 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1218 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 1219 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 1220 mci_writel(host, CLKENA, clk_en_a); 1221 1222 /* inform CIU */ 1223 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1224 1225 /* keep the last clock value that was requested from core */ 1226 slot->__clk_old = clock; 1227 } 1228 1229 host->current_speed = clock; 1230 1231 /* Set the current slot bus width */ 1232 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1233} 1234 1235static void __dw_mci_start_request(struct dw_mci *host, 1236 struct dw_mci_slot *slot, 1237 struct mmc_command *cmd) 1238{ 1239 struct mmc_request *mrq; 1240 struct mmc_data *data; 1241 u32 cmdflags; 1242 1243 mrq = slot->mrq; 1244 1245 host->cur_slot = slot; 1246 host->mrq = mrq; 1247 1248 host->pending_events = 0; 1249 host->completed_events = 0; 1250 host->cmd_status = 0; 1251 host->data_status = 0; 1252 host->dir_status = 0; 1253 1254 data = cmd->data; 1255 if (data) { 1256 mci_writel(host, TMOUT, 0xFFFFFFFF); 1257 mci_writel(host, BYTCNT, data->blksz*data->blocks); 1258 mci_writel(host, BLKSIZ, data->blksz); 1259 } 1260 1261 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1262 1263 /* this is the first command, send the initialization clock */ 1264 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1265 cmdflags |= SDMMC_CMD_INIT; 1266 1267 if (data) { 1268 dw_mci_submit_data(host, data); 1269 wmb(); /* drain writebuffer */ 1270 } 1271 1272 dw_mci_start_command(host, cmd, cmdflags); 1273 1274 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 1275 unsigned long irqflags; 1276 1277 /* 1278 * Databook says to fail after 2ms w/ no response, but evidence 1279 * shows that sometimes the cmd11 interrupt takes over 130ms. 1280 * We'll set to 500ms, plus an extra jiffy just in case jiffies 1281 * is just about to roll over. 1282 * 1283 * We do this whole thing under spinlock and only if the 1284 * command hasn't already completed (indicating the the irq 1285 * already ran so we don't want the timeout). 1286 */ 1287 spin_lock_irqsave(&host->irq_lock, irqflags); 1288 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1289 mod_timer(&host->cmd11_timer, 1290 jiffies + msecs_to_jiffies(500) + 1); 1291 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1292 } 1293 1294 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1295} 1296 1297static void dw_mci_start_request(struct dw_mci *host, 1298 struct dw_mci_slot *slot) 1299{ 1300 struct mmc_request *mrq = slot->mrq; 1301 struct mmc_command *cmd; 1302 1303 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1304 __dw_mci_start_request(host, slot, cmd); 1305} 1306 1307/* must be called with host->lock held */ 1308static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1309 struct mmc_request *mrq) 1310{ 1311 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1312 host->state); 1313 1314 slot->mrq = mrq; 1315 1316 if (host->state == STATE_WAITING_CMD11_DONE) { 1317 dev_warn(&slot->mmc->class_dev, 1318 "Voltage change didn't complete\n"); 1319 /* 1320 * this case isn't expected to happen, so we can 1321 * either crash here or just try to continue on 1322 * in the closest possible state 1323 */ 1324 host->state = STATE_IDLE; 1325 } 1326 1327 if (host->state == STATE_IDLE) { 1328 host->state = STATE_SENDING_CMD; 1329 dw_mci_start_request(host, slot); 1330 } else { 1331 list_add_tail(&slot->queue_node, &host->queue); 1332 } 1333} 1334 1335static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1336{ 1337 struct dw_mci_slot *slot = mmc_priv(mmc); 1338 struct dw_mci *host = slot->host; 1339 1340 WARN_ON(slot->mrq); 1341 1342 /* 1343 * The check for card presence and queueing of the request must be 1344 * atomic, otherwise the card could be removed in between and the 1345 * request wouldn't fail until another card was inserted. 1346 */ 1347 1348 if (!dw_mci_get_cd(mmc)) { 1349 mrq->cmd->error = -ENOMEDIUM; 1350 mmc_request_done(mmc, mrq); 1351 return; 1352 } 1353 1354 spin_lock_bh(&host->lock); 1355 1356 dw_mci_queue_request(host, slot, mrq); 1357 1358 spin_unlock_bh(&host->lock); 1359} 1360 1361static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1362{ 1363 struct dw_mci_slot *slot = mmc_priv(mmc); 1364 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1365 u32 regs; 1366 int ret; 1367 1368 switch (ios->bus_width) { 1369 case MMC_BUS_WIDTH_4: 1370 slot->ctype = SDMMC_CTYPE_4BIT; 1371 break; 1372 case MMC_BUS_WIDTH_8: 1373 slot->ctype = SDMMC_CTYPE_8BIT; 1374 break; 1375 default: 1376 /* set default 1 bit mode */ 1377 slot->ctype = SDMMC_CTYPE_1BIT; 1378 } 1379 1380 regs = mci_readl(slot->host, UHS_REG); 1381 1382 /* DDR mode set */ 1383 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1384 ios->timing == MMC_TIMING_UHS_DDR50 || 1385 ios->timing == MMC_TIMING_MMC_HS400) 1386 regs |= ((0x1 << slot->id) << 16); 1387 else 1388 regs &= ~((0x1 << slot->id) << 16); 1389 1390 mci_writel(slot->host, UHS_REG, regs); 1391 slot->host->timing = ios->timing; 1392 1393 /* 1394 * Use mirror of ios->clock to prevent race with mmc 1395 * core ios update when finding the minimum. 1396 */ 1397 slot->clock = ios->clock; 1398 1399 if (drv_data && drv_data->set_ios) 1400 drv_data->set_ios(slot->host, ios); 1401 1402 switch (ios->power_mode) { 1403 case MMC_POWER_UP: 1404 if (!IS_ERR(mmc->supply.vmmc)) { 1405 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1406 ios->vdd); 1407 if (ret) { 1408 dev_err(slot->host->dev, 1409 "failed to enable vmmc regulator\n"); 1410 /*return, if failed turn on vmmc*/ 1411 return; 1412 } 1413 } 1414 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1415 regs = mci_readl(slot->host, PWREN); 1416 regs |= (1 << slot->id); 1417 mci_writel(slot->host, PWREN, regs); 1418 break; 1419 case MMC_POWER_ON: 1420 if (!slot->host->vqmmc_enabled) { 1421 if (!IS_ERR(mmc->supply.vqmmc)) { 1422 ret = regulator_enable(mmc->supply.vqmmc); 1423 if (ret < 0) 1424 dev_err(slot->host->dev, 1425 "failed to enable vqmmc\n"); 1426 else 1427 slot->host->vqmmc_enabled = true; 1428 1429 } else { 1430 /* Keep track so we don't reset again */ 1431 slot->host->vqmmc_enabled = true; 1432 } 1433 1434 /* Reset our state machine after powering on */ 1435 dw_mci_ctrl_reset(slot->host, 1436 SDMMC_CTRL_ALL_RESET_FLAGS); 1437 } 1438 1439 /* Adjust clock / bus width after power is up */ 1440 dw_mci_setup_bus(slot, false); 1441 1442 break; 1443 case MMC_POWER_OFF: 1444 /* Turn clock off before power goes down */ 1445 dw_mci_setup_bus(slot, false); 1446 1447 if (!IS_ERR(mmc->supply.vmmc)) 1448 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1449 1450 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 1451 regulator_disable(mmc->supply.vqmmc); 1452 slot->host->vqmmc_enabled = false; 1453 1454 regs = mci_readl(slot->host, PWREN); 1455 regs &= ~(1 << slot->id); 1456 mci_writel(slot->host, PWREN, regs); 1457 break; 1458 default: 1459 break; 1460 } 1461 1462 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1463 slot->host->state = STATE_IDLE; 1464} 1465 1466static int dw_mci_card_busy(struct mmc_host *mmc) 1467{ 1468 struct dw_mci_slot *slot = mmc_priv(mmc); 1469 u32 status; 1470 1471 /* 1472 * Check the busy bit which is low when DAT[3:0] 1473 * (the data lines) are 0000 1474 */ 1475 status = mci_readl(slot->host, STATUS); 1476 1477 return !!(status & SDMMC_STATUS_BUSY); 1478} 1479 1480static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1481{ 1482 struct dw_mci_slot *slot = mmc_priv(mmc); 1483 struct dw_mci *host = slot->host; 1484 const struct dw_mci_drv_data *drv_data = host->drv_data; 1485 u32 uhs; 1486 u32 v18 = SDMMC_UHS_18V << slot->id; 1487 int ret; 1488 1489 if (drv_data && drv_data->switch_voltage) 1490 return drv_data->switch_voltage(mmc, ios); 1491 1492 /* 1493 * Program the voltage. Note that some instances of dw_mmc may use 1494 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1495 * does no harm but you need to set the regulator directly. Try both. 1496 */ 1497 uhs = mci_readl(host, UHS_REG); 1498 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1499 uhs &= ~v18; 1500 else 1501 uhs |= v18; 1502 1503 if (!IS_ERR(mmc->supply.vqmmc)) { 1504 ret = mmc_regulator_set_vqmmc(mmc, ios); 1505 1506 if (ret) { 1507 dev_dbg(&mmc->class_dev, 1508 "Regulator set error %d - %s V\n", 1509 ret, uhs & v18 ? "1.8" : "3.3"); 1510 return ret; 1511 } 1512 } 1513 mci_writel(host, UHS_REG, uhs); 1514 1515 return 0; 1516} 1517 1518static int dw_mci_get_ro(struct mmc_host *mmc) 1519{ 1520 int read_only; 1521 struct dw_mci_slot *slot = mmc_priv(mmc); 1522 int gpio_ro = mmc_gpio_get_ro(mmc); 1523 1524 /* Use platform get_ro function, else try on board write protect */ 1525 if (gpio_ro >= 0) 1526 read_only = gpio_ro; 1527 else 1528 read_only = 1529 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1530 1531 dev_dbg(&mmc->class_dev, "card is %s\n", 1532 read_only ? "read-only" : "read-write"); 1533 1534 return read_only; 1535} 1536 1537static int dw_mci_get_cd(struct mmc_host *mmc) 1538{ 1539 int present; 1540 struct dw_mci_slot *slot = mmc_priv(mmc); 1541 struct dw_mci *host = slot->host; 1542 int gpio_cd = mmc_gpio_get_cd(mmc); 1543 1544 /* Use platform get_cd function, else try onboard card detect */ 1545 if (((mmc->caps & MMC_CAP_NEEDS_POLL) 1546 || !mmc_card_is_removable(mmc))) { 1547 present = 1; 1548 1549 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 1550 if (mmc->caps & MMC_CAP_NEEDS_POLL) { 1551 dev_info(&mmc->class_dev, 1552 "card is polling.\n"); 1553 } else { 1554 dev_info(&mmc->class_dev, 1555 "card is non-removable.\n"); 1556 } 1557 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1558 } 1559 1560 return present; 1561 } else if (gpio_cd >= 0) 1562 present = gpio_cd; 1563 else 1564 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 1565 == 0 ? 1 : 0; 1566 1567 spin_lock_bh(&host->lock); 1568 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 1569 dev_dbg(&mmc->class_dev, "card is present\n"); 1570 else if (!present && 1571 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 1572 dev_dbg(&mmc->class_dev, "card is not present\n"); 1573 spin_unlock_bh(&host->lock); 1574 1575 return present; 1576} 1577 1578static void dw_mci_hw_reset(struct mmc_host *mmc) 1579{ 1580 struct dw_mci_slot *slot = mmc_priv(mmc); 1581 struct dw_mci *host = slot->host; 1582 int reset; 1583 1584 if (host->use_dma == TRANS_MODE_IDMAC) 1585 dw_mci_idmac_reset(host); 1586 1587 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1588 SDMMC_CTRL_FIFO_RESET)) 1589 return; 1590 1591 /* 1592 * According to eMMC spec, card reset procedure: 1593 * tRstW >= 1us: RST_n pulse width 1594 * tRSCA >= 200us: RST_n to Command time 1595 * tRSTH >= 1us: RST_n high period 1596 */ 1597 reset = mci_readl(host, RST_N); 1598 reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1599 mci_writel(host, RST_N, reset); 1600 usleep_range(1, 2); 1601 reset |= SDMMC_RST_HWACTIVE << slot->id; 1602 mci_writel(host, RST_N, reset); 1603 usleep_range(200, 300); 1604} 1605 1606static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) 1607{ 1608 struct dw_mci_slot *slot = mmc_priv(mmc); 1609 struct dw_mci *host = slot->host; 1610 1611 /* 1612 * Low power mode will stop the card clock when idle. According to the 1613 * description of the CLKENA register we should disable low power mode 1614 * for SDIO cards if we need SDIO interrupts to work. 1615 */ 1616 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1617 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1618 u32 clk_en_a_old; 1619 u32 clk_en_a; 1620 1621 clk_en_a_old = mci_readl(host, CLKENA); 1622 1623 if (card->type == MMC_TYPE_SDIO || 1624 card->type == MMC_TYPE_SD_COMBO) { 1625 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) { 1626 pm_runtime_get_noresume(mmc->parent); 1627 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1628 } 1629 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1630 } else { 1631 if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) { 1632 pm_runtime_put_noidle(mmc->parent); 1633 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1634 } 1635 clk_en_a = clk_en_a_old | clken_low_pwr; 1636 } 1637 1638 if (clk_en_a != clk_en_a_old) { 1639 mci_writel(host, CLKENA, clk_en_a); 1640 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 1641 SDMMC_CMD_PRV_DAT_WAIT, 0); 1642 } 1643 } 1644} 1645 1646static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1647{ 1648 struct dw_mci_slot *slot = mmc_priv(mmc); 1649 struct dw_mci *host = slot->host; 1650 unsigned long irqflags; 1651 u32 int_mask; 1652 1653 spin_lock_irqsave(&host->irq_lock, irqflags); 1654 1655 /* Enable/disable Slot Specific SDIO interrupt */ 1656 int_mask = mci_readl(host, INTMASK); 1657 if (enb) 1658 int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1659 else 1660 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1661 mci_writel(host, INTMASK, int_mask); 1662 1663 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1664} 1665 1666static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1667{ 1668 struct dw_mci_slot *slot = mmc_priv(mmc); 1669 struct dw_mci *host = slot->host; 1670 const struct dw_mci_drv_data *drv_data = host->drv_data; 1671 int err = -EINVAL; 1672 1673 if (drv_data && drv_data->execute_tuning) 1674 err = drv_data->execute_tuning(slot, opcode); 1675 return err; 1676} 1677 1678static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1679 struct mmc_ios *ios) 1680{ 1681 struct dw_mci_slot *slot = mmc_priv(mmc); 1682 struct dw_mci *host = slot->host; 1683 const struct dw_mci_drv_data *drv_data = host->drv_data; 1684 1685 if (drv_data && drv_data->prepare_hs400_tuning) 1686 return drv_data->prepare_hs400_tuning(host, ios); 1687 1688 return 0; 1689} 1690 1691static const struct mmc_host_ops dw_mci_ops = { 1692 .request = dw_mci_request, 1693 .pre_req = dw_mci_pre_req, 1694 .post_req = dw_mci_post_req, 1695 .set_ios = dw_mci_set_ios, 1696 .get_ro = dw_mci_get_ro, 1697 .get_cd = dw_mci_get_cd, 1698 .hw_reset = dw_mci_hw_reset, 1699 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1700 .execute_tuning = dw_mci_execute_tuning, 1701 .card_busy = dw_mci_card_busy, 1702 .start_signal_voltage_switch = dw_mci_switch_voltage, 1703 .init_card = dw_mci_init_card, 1704 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1705}; 1706 1707static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1708 __releases(&host->lock) 1709 __acquires(&host->lock) 1710{ 1711 struct dw_mci_slot *slot; 1712 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1713 1714 WARN_ON(host->cmd || host->data); 1715 1716 host->cur_slot->mrq = NULL; 1717 host->mrq = NULL; 1718 if (!list_empty(&host->queue)) { 1719 slot = list_entry(host->queue.next, 1720 struct dw_mci_slot, queue_node); 1721 list_del(&slot->queue_node); 1722 dev_vdbg(host->dev, "list not empty: %s is next\n", 1723 mmc_hostname(slot->mmc)); 1724 host->state = STATE_SENDING_CMD; 1725 dw_mci_start_request(host, slot); 1726 } else { 1727 dev_vdbg(host->dev, "list empty\n"); 1728 1729 if (host->state == STATE_SENDING_CMD11) 1730 host->state = STATE_WAITING_CMD11_DONE; 1731 else 1732 host->state = STATE_IDLE; 1733 } 1734 1735 spin_unlock(&host->lock); 1736 mmc_request_done(prev_mmc, mrq); 1737 spin_lock(&host->lock); 1738} 1739 1740static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1741{ 1742 u32 status = host->cmd_status; 1743 1744 host->cmd_status = 0; 1745 1746 /* Read the response from the card (up to 16 bytes) */ 1747 if (cmd->flags & MMC_RSP_PRESENT) { 1748 if (cmd->flags & MMC_RSP_136) { 1749 cmd->resp[3] = mci_readl(host, RESP0); 1750 cmd->resp[2] = mci_readl(host, RESP1); 1751 cmd->resp[1] = mci_readl(host, RESP2); 1752 cmd->resp[0] = mci_readl(host, RESP3); 1753 } else { 1754 cmd->resp[0] = mci_readl(host, RESP0); 1755 cmd->resp[1] = 0; 1756 cmd->resp[2] = 0; 1757 cmd->resp[3] = 0; 1758 } 1759 } 1760 1761 if (status & SDMMC_INT_RTO) 1762 cmd->error = -ETIMEDOUT; 1763 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1764 cmd->error = -EILSEQ; 1765 else if (status & SDMMC_INT_RESP_ERR) 1766 cmd->error = -EIO; 1767 else 1768 cmd->error = 0; 1769 1770 return cmd->error; 1771} 1772 1773static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1774{ 1775 u32 status = host->data_status; 1776 1777 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1778 if (status & SDMMC_INT_DRTO) { 1779 data->error = -ETIMEDOUT; 1780 } else if (status & SDMMC_INT_DCRC) { 1781 data->error = -EILSEQ; 1782 } else if (status & SDMMC_INT_EBE) { 1783 if (host->dir_status == 1784 DW_MCI_SEND_STATUS) { 1785 /* 1786 * No data CRC status was returned. 1787 * The number of bytes transferred 1788 * will be exaggerated in PIO mode. 1789 */ 1790 data->bytes_xfered = 0; 1791 data->error = -ETIMEDOUT; 1792 } else if (host->dir_status == 1793 DW_MCI_RECV_STATUS) { 1794 data->error = -EILSEQ; 1795 } 1796 } else { 1797 /* SDMMC_INT_SBE is included */ 1798 data->error = -EILSEQ; 1799 } 1800 1801 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1802 1803 /* 1804 * After an error, there may be data lingering 1805 * in the FIFO 1806 */ 1807 dw_mci_reset(host); 1808 } else { 1809 data->bytes_xfered = data->blocks * data->blksz; 1810 data->error = 0; 1811 } 1812 1813 return data->error; 1814} 1815 1816static void dw_mci_set_drto(struct dw_mci *host) 1817{ 1818 unsigned int drto_clks; 1819 unsigned int drto_ms; 1820 1821 drto_clks = mci_readl(host, TMOUT) >> 8; 1822 drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); 1823 1824 /* add a bit spare time */ 1825 drto_ms += 10; 1826 1827 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); 1828} 1829 1830static void dw_mci_tasklet_func(unsigned long priv) 1831{ 1832 struct dw_mci *host = (struct dw_mci *)priv; 1833 struct mmc_data *data; 1834 struct mmc_command *cmd; 1835 struct mmc_request *mrq; 1836 enum dw_mci_state state; 1837 enum dw_mci_state prev_state; 1838 unsigned int err; 1839 1840 spin_lock(&host->lock); 1841 1842 state = host->state; 1843 data = host->data; 1844 mrq = host->mrq; 1845 1846 do { 1847 prev_state = state; 1848 1849 switch (state) { 1850 case STATE_IDLE: 1851 case STATE_WAITING_CMD11_DONE: 1852 break; 1853 1854 case STATE_SENDING_CMD11: 1855 case STATE_SENDING_CMD: 1856 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1857 &host->pending_events)) 1858 break; 1859 1860 cmd = host->cmd; 1861 host->cmd = NULL; 1862 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1863 err = dw_mci_command_complete(host, cmd); 1864 if (cmd == mrq->sbc && !err) { 1865 prev_state = state = STATE_SENDING_CMD; 1866 __dw_mci_start_request(host, host->cur_slot, 1867 mrq->cmd); 1868 goto unlock; 1869 } 1870 1871 if (cmd->data && err) { 1872 /* 1873 * During UHS tuning sequence, sending the stop 1874 * command after the response CRC error would 1875 * throw the system into a confused state 1876 * causing all future tuning phases to report 1877 * failure. 1878 * 1879 * In such case controller will move into a data 1880 * transfer state after a response error or 1881 * response CRC error. Let's let that finish 1882 * before trying to send a stop, so we'll go to 1883 * STATE_SENDING_DATA. 1884 * 1885 * Although letting the data transfer take place 1886 * will waste a bit of time (we already know 1887 * the command was bad), it can't cause any 1888 * errors since it's possible it would have 1889 * taken place anyway if this tasklet got 1890 * delayed. Allowing the transfer to take place 1891 * avoids races and keeps things simple. 1892 */ 1893 if ((err != -ETIMEDOUT) && 1894 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { 1895 state = STATE_SENDING_DATA; 1896 continue; 1897 } 1898 1899 dw_mci_stop_dma(host); 1900 send_stop_abort(host, data); 1901 state = STATE_SENDING_STOP; 1902 break; 1903 } 1904 1905 if (!cmd->data || err) { 1906 dw_mci_request_end(host, mrq); 1907 goto unlock; 1908 } 1909 1910 prev_state = state = STATE_SENDING_DATA; 1911 /* fall through */ 1912 1913 case STATE_SENDING_DATA: 1914 /* 1915 * We could get a data error and never a transfer 1916 * complete so we'd better check for it here. 1917 * 1918 * Note that we don't really care if we also got a 1919 * transfer complete; stopping the DMA and sending an 1920 * abort won't hurt. 1921 */ 1922 if (test_and_clear_bit(EVENT_DATA_ERROR, 1923 &host->pending_events)) { 1924 dw_mci_stop_dma(host); 1925 if (!(host->data_status & (SDMMC_INT_DRTO | 1926 SDMMC_INT_EBE))) 1927 send_stop_abort(host, data); 1928 state = STATE_DATA_ERROR; 1929 break; 1930 } 1931 1932 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1933 &host->pending_events)) { 1934 /* 1935 * If all data-related interrupts don't come 1936 * within the given time in reading data state. 1937 */ 1938 if (host->dir_status == DW_MCI_RECV_STATUS) 1939 dw_mci_set_drto(host); 1940 break; 1941 } 1942 1943 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1944 1945 /* 1946 * Handle an EVENT_DATA_ERROR that might have shown up 1947 * before the transfer completed. This might not have 1948 * been caught by the check above because the interrupt 1949 * could have gone off between the previous check and 1950 * the check for transfer complete. 1951 * 1952 * Technically this ought not be needed assuming we 1953 * get a DATA_COMPLETE eventually (we'll notice the 1954 * error and end the request), but it shouldn't hurt. 1955 * 1956 * This has the advantage of sending the stop command. 1957 */ 1958 if (test_and_clear_bit(EVENT_DATA_ERROR, 1959 &host->pending_events)) { 1960 dw_mci_stop_dma(host); 1961 if (!(host->data_status & (SDMMC_INT_DRTO | 1962 SDMMC_INT_EBE))) 1963 send_stop_abort(host, data); 1964 state = STATE_DATA_ERROR; 1965 break; 1966 } 1967 prev_state = state = STATE_DATA_BUSY; 1968 1969 /* fall through */ 1970 1971 case STATE_DATA_BUSY: 1972 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1973 &host->pending_events)) { 1974 /* 1975 * If data error interrupt comes but data over 1976 * interrupt doesn't come within the given time. 1977 * in reading data state. 1978 */ 1979 if (host->dir_status == DW_MCI_RECV_STATUS) 1980 dw_mci_set_drto(host); 1981 break; 1982 } 1983 1984 host->data = NULL; 1985 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1986 err = dw_mci_data_complete(host, data); 1987 1988 if (!err) { 1989 if (!data->stop || mrq->sbc) { 1990 if (mrq->sbc && data->stop) 1991 data->stop->error = 0; 1992 dw_mci_request_end(host, mrq); 1993 goto unlock; 1994 } 1995 1996 /* stop command for open-ended transfer*/ 1997 if (data->stop) 1998 send_stop_abort(host, data); 1999 } else { 2000 /* 2001 * If we don't have a command complete now we'll 2002 * never get one since we just reset everything; 2003 * better end the request. 2004 * 2005 * If we do have a command complete we'll fall 2006 * through to the SENDING_STOP command and 2007 * everything will be peachy keen. 2008 */ 2009 if (!test_bit(EVENT_CMD_COMPLETE, 2010 &host->pending_events)) { 2011 host->cmd = NULL; 2012 dw_mci_request_end(host, mrq); 2013 goto unlock; 2014 } 2015 } 2016 2017 /* 2018 * If err has non-zero, 2019 * stop-abort command has been already issued. 2020 */ 2021 prev_state = state = STATE_SENDING_STOP; 2022 2023 /* fall through */ 2024 2025 case STATE_SENDING_STOP: 2026 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 2027 &host->pending_events)) 2028 break; 2029 2030 /* CMD error in data command */ 2031 if (mrq->cmd->error && mrq->data) 2032 dw_mci_reset(host); 2033 2034 host->cmd = NULL; 2035 host->data = NULL; 2036 2037 if (!mrq->sbc && mrq->stop) 2038 dw_mci_command_complete(host, mrq->stop); 2039 else 2040 host->cmd_status = 0; 2041 2042 dw_mci_request_end(host, mrq); 2043 goto unlock; 2044 2045 case STATE_DATA_ERROR: 2046 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2047 &host->pending_events)) 2048 break; 2049 2050 state = STATE_DATA_BUSY; 2051 break; 2052 } 2053 } while (state != prev_state); 2054 2055 host->state = state; 2056unlock: 2057 spin_unlock(&host->lock); 2058 2059} 2060 2061/* push final bytes to part_buf, only use during push */ 2062static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 2063{ 2064 memcpy((void *)&host->part_buf, buf, cnt); 2065 host->part_buf_count = cnt; 2066} 2067 2068/* append bytes to part_buf, only use during push */ 2069static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 2070{ 2071 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 2072 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 2073 host->part_buf_count += cnt; 2074 return cnt; 2075} 2076 2077/* pull first bytes from part_buf, only use during pull */ 2078static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 2079{ 2080 cnt = min_t(int, cnt, host->part_buf_count); 2081 if (cnt) { 2082 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 2083 cnt); 2084 host->part_buf_count -= cnt; 2085 host->part_buf_start += cnt; 2086 } 2087 return cnt; 2088} 2089 2090/* pull final bytes from the part_buf, assuming it's just been filled */ 2091static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 2092{ 2093 memcpy(buf, &host->part_buf, cnt); 2094 host->part_buf_start = cnt; 2095 host->part_buf_count = (1 << host->data_shift) - cnt; 2096} 2097 2098static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2099{ 2100 struct mmc_data *data = host->data; 2101 int init_cnt = cnt; 2102 2103 /* try and push anything in the part_buf */ 2104 if (unlikely(host->part_buf_count)) { 2105 int len = dw_mci_push_part_bytes(host, buf, cnt); 2106 2107 buf += len; 2108 cnt -= len; 2109 if (host->part_buf_count == 2) { 2110 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2111 host->part_buf_count = 0; 2112 } 2113 } 2114#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2115 if (unlikely((unsigned long)buf & 0x1)) { 2116 while (cnt >= 2) { 2117 u16 aligned_buf[64]; 2118 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2119 int items = len >> 1; 2120 int i; 2121 /* memcpy from input buffer into aligned buffer */ 2122 memcpy(aligned_buf, buf, len); 2123 buf += len; 2124 cnt -= len; 2125 /* push data from aligned buffer into fifo */ 2126 for (i = 0; i < items; ++i) 2127 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 2128 } 2129 } else 2130#endif 2131 { 2132 u16 *pdata = buf; 2133 2134 for (; cnt >= 2; cnt -= 2) 2135 mci_fifo_writew(host->fifo_reg, *pdata++); 2136 buf = pdata; 2137 } 2138 /* put anything remaining in the part_buf */ 2139 if (cnt) { 2140 dw_mci_set_part_bytes(host, buf, cnt); 2141 /* Push data if we have reached the expected data length */ 2142 if ((data->bytes_xfered + init_cnt) == 2143 (data->blksz * data->blocks)) 2144 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2145 } 2146} 2147 2148static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2149{ 2150#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2151 if (unlikely((unsigned long)buf & 0x1)) { 2152 while (cnt >= 2) { 2153 /* pull data from fifo into aligned buffer */ 2154 u16 aligned_buf[64]; 2155 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2156 int items = len >> 1; 2157 int i; 2158 2159 for (i = 0; i < items; ++i) 2160 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 2161 /* memcpy from aligned buffer into output buffer */ 2162 memcpy(buf, aligned_buf, len); 2163 buf += len; 2164 cnt -= len; 2165 } 2166 } else 2167#endif 2168 { 2169 u16 *pdata = buf; 2170 2171 for (; cnt >= 2; cnt -= 2) 2172 *pdata++ = mci_fifo_readw(host->fifo_reg); 2173 buf = pdata; 2174 } 2175 if (cnt) { 2176 host->part_buf16 = mci_fifo_readw(host->fifo_reg); 2177 dw_mci_pull_final_bytes(host, buf, cnt); 2178 } 2179} 2180 2181static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2182{ 2183 struct mmc_data *data = host->data; 2184 int init_cnt = cnt; 2185 2186 /* try and push anything in the part_buf */ 2187 if (unlikely(host->part_buf_count)) { 2188 int len = dw_mci_push_part_bytes(host, buf, cnt); 2189 2190 buf += len; 2191 cnt -= len; 2192 if (host->part_buf_count == 4) { 2193 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2194 host->part_buf_count = 0; 2195 } 2196 } 2197#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2198 if (unlikely((unsigned long)buf & 0x3)) { 2199 while (cnt >= 4) { 2200 u32 aligned_buf[32]; 2201 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2202 int items = len >> 2; 2203 int i; 2204 /* memcpy from input buffer into aligned buffer */ 2205 memcpy(aligned_buf, buf, len); 2206 buf += len; 2207 cnt -= len; 2208 /* push data from aligned buffer into fifo */ 2209 for (i = 0; i < items; ++i) 2210 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 2211 } 2212 } else 2213#endif 2214 { 2215 u32 *pdata = buf; 2216 2217 for (; cnt >= 4; cnt -= 4) 2218 mci_fifo_writel(host->fifo_reg, *pdata++); 2219 buf = pdata; 2220 } 2221 /* put anything remaining in the part_buf */ 2222 if (cnt) { 2223 dw_mci_set_part_bytes(host, buf, cnt); 2224 /* Push data if we have reached the expected data length */ 2225 if ((data->bytes_xfered + init_cnt) == 2226 (data->blksz * data->blocks)) 2227 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2228 } 2229} 2230 2231static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2232{ 2233#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2234 if (unlikely((unsigned long)buf & 0x3)) { 2235 while (cnt >= 4) { 2236 /* pull data from fifo into aligned buffer */ 2237 u32 aligned_buf[32]; 2238 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2239 int items = len >> 2; 2240 int i; 2241 2242 for (i = 0; i < items; ++i) 2243 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 2244 /* memcpy from aligned buffer into output buffer */ 2245 memcpy(buf, aligned_buf, len); 2246 buf += len; 2247 cnt -= len; 2248 } 2249 } else 2250#endif 2251 { 2252 u32 *pdata = buf; 2253 2254 for (; cnt >= 4; cnt -= 4) 2255 *pdata++ = mci_fifo_readl(host->fifo_reg); 2256 buf = pdata; 2257 } 2258 if (cnt) { 2259 host->part_buf32 = mci_fifo_readl(host->fifo_reg); 2260 dw_mci_pull_final_bytes(host, buf, cnt); 2261 } 2262} 2263 2264static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2265{ 2266 struct mmc_data *data = host->data; 2267 int init_cnt = cnt; 2268 2269 /* try and push anything in the part_buf */ 2270 if (unlikely(host->part_buf_count)) { 2271 int len = dw_mci_push_part_bytes(host, buf, cnt); 2272 2273 buf += len; 2274 cnt -= len; 2275 2276 if (host->part_buf_count == 8) { 2277 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2278 host->part_buf_count = 0; 2279 } 2280 } 2281#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2282 if (unlikely((unsigned long)buf & 0x7)) { 2283 while (cnt >= 8) { 2284 u64 aligned_buf[16]; 2285 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2286 int items = len >> 3; 2287 int i; 2288 /* memcpy from input buffer into aligned buffer */ 2289 memcpy(aligned_buf, buf, len); 2290 buf += len; 2291 cnt -= len; 2292 /* push data from aligned buffer into fifo */ 2293 for (i = 0; i < items; ++i) 2294 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 2295 } 2296 } else 2297#endif 2298 { 2299 u64 *pdata = buf; 2300 2301 for (; cnt >= 8; cnt -= 8) 2302 mci_fifo_writeq(host->fifo_reg, *pdata++); 2303 buf = pdata; 2304 } 2305 /* put anything remaining in the part_buf */ 2306 if (cnt) { 2307 dw_mci_set_part_bytes(host, buf, cnt); 2308 /* Push data if we have reached the expected data length */ 2309 if ((data->bytes_xfered + init_cnt) == 2310 (data->blksz * data->blocks)) 2311 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2312 } 2313} 2314 2315static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2316{ 2317#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2318 if (unlikely((unsigned long)buf & 0x7)) { 2319 while (cnt >= 8) { 2320 /* pull data from fifo into aligned buffer */ 2321 u64 aligned_buf[16]; 2322 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2323 int items = len >> 3; 2324 int i; 2325 2326 for (i = 0; i < items; ++i) 2327 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2328 2329 /* memcpy from aligned buffer into output buffer */ 2330 memcpy(buf, aligned_buf, len); 2331 buf += len; 2332 cnt -= len; 2333 } 2334 } else 2335#endif 2336 { 2337 u64 *pdata = buf; 2338 2339 for (; cnt >= 8; cnt -= 8) 2340 *pdata++ = mci_fifo_readq(host->fifo_reg); 2341 buf = pdata; 2342 } 2343 if (cnt) { 2344 host->part_buf = mci_fifo_readq(host->fifo_reg); 2345 dw_mci_pull_final_bytes(host, buf, cnt); 2346 } 2347} 2348 2349static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 2350{ 2351 int len; 2352 2353 /* get remaining partial bytes */ 2354 len = dw_mci_pull_part_bytes(host, buf, cnt); 2355 if (unlikely(len == cnt)) 2356 return; 2357 buf += len; 2358 cnt -= len; 2359 2360 /* get the rest of the data */ 2361 host->pull_data(host, buf, cnt); 2362} 2363 2364static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2365{ 2366 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2367 void *buf; 2368 unsigned int offset; 2369 struct mmc_data *data = host->data; 2370 int shift = host->data_shift; 2371 u32 status; 2372 unsigned int len; 2373 unsigned int remain, fcnt; 2374 2375 do { 2376 if (!sg_miter_next(sg_miter)) 2377 goto done; 2378 2379 host->sg = sg_miter->piter.sg; 2380 buf = sg_miter->addr; 2381 remain = sg_miter->length; 2382 offset = 0; 2383 2384 do { 2385 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2386 << shift) + host->part_buf_count; 2387 len = min(remain, fcnt); 2388 if (!len) 2389 break; 2390 dw_mci_pull_data(host, (void *)(buf + offset), len); 2391 data->bytes_xfered += len; 2392 offset += len; 2393 remain -= len; 2394 } while (remain); 2395 2396 sg_miter->consumed = offset; 2397 status = mci_readl(host, MINTSTS); 2398 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2399 /* if the RXDR is ready read again */ 2400 } while ((status & SDMMC_INT_RXDR) || 2401 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2402 2403 if (!remain) { 2404 if (!sg_miter_next(sg_miter)) 2405 goto done; 2406 sg_miter->consumed = 0; 2407 } 2408 sg_miter_stop(sg_miter); 2409 return; 2410 2411done: 2412 sg_miter_stop(sg_miter); 2413 host->sg = NULL; 2414 smp_wmb(); /* drain writebuffer */ 2415 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2416} 2417 2418static void dw_mci_write_data_pio(struct dw_mci *host) 2419{ 2420 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2421 void *buf; 2422 unsigned int offset; 2423 struct mmc_data *data = host->data; 2424 int shift = host->data_shift; 2425 u32 status; 2426 unsigned int len; 2427 unsigned int fifo_depth = host->fifo_depth; 2428 unsigned int remain, fcnt; 2429 2430 do { 2431 if (!sg_miter_next(sg_miter)) 2432 goto done; 2433 2434 host->sg = sg_miter->piter.sg; 2435 buf = sg_miter->addr; 2436 remain = sg_miter->length; 2437 offset = 0; 2438 2439 do { 2440 fcnt = ((fifo_depth - 2441 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2442 << shift) - host->part_buf_count; 2443 len = min(remain, fcnt); 2444 if (!len) 2445 break; 2446 host->push_data(host, (void *)(buf + offset), len); 2447 data->bytes_xfered += len; 2448 offset += len; 2449 remain -= len; 2450 } while (remain); 2451 2452 sg_miter->consumed = offset; 2453 status = mci_readl(host, MINTSTS); 2454 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2455 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2456 2457 if (!remain) { 2458 if (!sg_miter_next(sg_miter)) 2459 goto done; 2460 sg_miter->consumed = 0; 2461 } 2462 sg_miter_stop(sg_miter); 2463 return; 2464 2465done: 2466 sg_miter_stop(sg_miter); 2467 host->sg = NULL; 2468 smp_wmb(); /* drain writebuffer */ 2469 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2470} 2471 2472static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2473{ 2474 if (!host->cmd_status) 2475 host->cmd_status = status; 2476 2477 smp_wmb(); /* drain writebuffer */ 2478 2479 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2480 tasklet_schedule(&host->tasklet); 2481} 2482 2483static void dw_mci_handle_cd(struct dw_mci *host) 2484{ 2485 int i; 2486 2487 for (i = 0; i < host->num_slots; i++) { 2488 struct dw_mci_slot *slot = host->slot[i]; 2489 2490 if (!slot) 2491 continue; 2492 2493 if (slot->mmc->ops->card_event) 2494 slot->mmc->ops->card_event(slot->mmc); 2495 mmc_detect_change(slot->mmc, 2496 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2497 } 2498} 2499 2500static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2501{ 2502 struct dw_mci *host = dev_id; 2503 u32 pending; 2504 int i; 2505 2506 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2507 2508 if (pending) { 2509 /* Check volt switch first, since it can look like an error */ 2510 if ((host->state == STATE_SENDING_CMD11) && 2511 (pending & SDMMC_INT_VOLT_SWITCH)) { 2512 unsigned long irqflags; 2513 2514 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2515 pending &= ~SDMMC_INT_VOLT_SWITCH; 2516 2517 /* 2518 * Hold the lock; we know cmd11_timer can't be kicked 2519 * off after the lock is released, so safe to delete. 2520 */ 2521 spin_lock_irqsave(&host->irq_lock, irqflags); 2522 dw_mci_cmd_interrupt(host, pending); 2523 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2524 2525 del_timer(&host->cmd11_timer); 2526 } 2527 2528 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2529 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2530 host->cmd_status = pending; 2531 smp_wmb(); /* drain writebuffer */ 2532 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2533 } 2534 2535 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2536 /* if there is an error report DATA_ERROR */ 2537 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2538 host->data_status = pending; 2539 smp_wmb(); /* drain writebuffer */ 2540 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2541 tasklet_schedule(&host->tasklet); 2542 } 2543 2544 if (pending & SDMMC_INT_DATA_OVER) { 2545 del_timer(&host->dto_timer); 2546 2547 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2548 if (!host->data_status) 2549 host->data_status = pending; 2550 smp_wmb(); /* drain writebuffer */ 2551 if (host->dir_status == DW_MCI_RECV_STATUS) { 2552 if (host->sg != NULL) 2553 dw_mci_read_data_pio(host, true); 2554 } 2555 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2556 tasklet_schedule(&host->tasklet); 2557 } 2558 2559 if (pending & SDMMC_INT_RXDR) { 2560 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2561 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2562 dw_mci_read_data_pio(host, false); 2563 } 2564 2565 if (pending & SDMMC_INT_TXDR) { 2566 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2567 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2568 dw_mci_write_data_pio(host); 2569 } 2570 2571 if (pending & SDMMC_INT_CMD_DONE) { 2572 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2573 dw_mci_cmd_interrupt(host, pending); 2574 } 2575 2576 if (pending & SDMMC_INT_CD) { 2577 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2578 dw_mci_handle_cd(host); 2579 } 2580 2581 /* Handle SDIO Interrupts */ 2582 for (i = 0; i < host->num_slots; i++) { 2583 struct dw_mci_slot *slot = host->slot[i]; 2584 2585 if (!slot) 2586 continue; 2587 2588 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2589 mci_writel(host, RINTSTS, 2590 SDMMC_INT_SDIO(slot->sdio_id)); 2591 mmc_signal_sdio_irq(slot->mmc); 2592 } 2593 } 2594 2595 } 2596 2597 if (host->use_dma != TRANS_MODE_IDMAC) 2598 return IRQ_HANDLED; 2599 2600 /* Handle IDMA interrupts */ 2601 if (host->dma_64bit_address == 1) { 2602 pending = mci_readl(host, IDSTS64); 2603 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2604 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2605 SDMMC_IDMAC_INT_RI); 2606 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2607 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2608 host->dma_ops->complete((void *)host); 2609 } 2610 } else { 2611 pending = mci_readl(host, IDSTS); 2612 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2613 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2614 SDMMC_IDMAC_INT_RI); 2615 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2616 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2617 host->dma_ops->complete((void *)host); 2618 } 2619 } 2620 2621 return IRQ_HANDLED; 2622} 2623 2624static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 2625{ 2626 struct mmc_host *mmc; 2627 struct dw_mci_slot *slot; 2628 const struct dw_mci_drv_data *drv_data = host->drv_data; 2629 int ctrl_id, ret; 2630 u32 freq[2]; 2631 2632 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2633 if (!mmc) 2634 return -ENOMEM; 2635 2636 slot = mmc_priv(mmc); 2637 slot->id = id; 2638 slot->sdio_id = host->sdio_id0 + id; 2639 slot->mmc = mmc; 2640 slot->host = host; 2641 host->slot[id] = slot; 2642 2643 mmc->ops = &dw_mci_ops; 2644 if (of_property_read_u32_array(host->dev->of_node, 2645 "clock-freq-min-max", freq, 2)) { 2646 mmc->f_min = DW_MCI_FREQ_MIN; 2647 mmc->f_max = DW_MCI_FREQ_MAX; 2648 } else { 2649 dev_info(host->dev, 2650 "'clock-freq-min-max' property was deprecated.\n"); 2651 mmc->f_min = freq[0]; 2652 mmc->f_max = freq[1]; 2653 } 2654 2655 /*if there are external regulators, get them*/ 2656 ret = mmc_regulator_get_supply(mmc); 2657 if (ret == -EPROBE_DEFER) 2658 goto err_host_allocated; 2659 2660 if (!mmc->ocr_avail) 2661 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2662 2663 if (host->pdata->caps) 2664 mmc->caps = host->pdata->caps; 2665 2666 /* 2667 * Support MMC_CAP_ERASE by default. 2668 * It needs to use trim/discard/erase commands. 2669 */ 2670 mmc->caps |= MMC_CAP_ERASE; 2671 2672 if (host->pdata->pm_caps) 2673 mmc->pm_caps = host->pdata->pm_caps; 2674 2675 if (host->dev->of_node) { 2676 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2677 if (ctrl_id < 0) 2678 ctrl_id = 0; 2679 } else { 2680 ctrl_id = to_platform_device(host->dev)->id; 2681 } 2682 if (drv_data && drv_data->caps) 2683 mmc->caps |= drv_data->caps[ctrl_id]; 2684 2685 if (host->pdata->caps2) 2686 mmc->caps2 = host->pdata->caps2; 2687 2688 ret = mmc_of_parse(mmc); 2689 if (ret) 2690 goto err_host_allocated; 2691 2692 /* Useful defaults if platform data is unset. */ 2693 if (host->use_dma == TRANS_MODE_IDMAC) { 2694 mmc->max_segs = host->ring_size; 2695 mmc->max_blk_size = 65535; 2696 mmc->max_seg_size = 0x1000; 2697 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2698 mmc->max_blk_count = mmc->max_req_size / 512; 2699 } else if (host->use_dma == TRANS_MODE_EDMAC) { 2700 mmc->max_segs = 64; 2701 mmc->max_blk_size = 65535; 2702 mmc->max_blk_count = 65535; 2703 mmc->max_req_size = 2704 mmc->max_blk_size * mmc->max_blk_count; 2705 mmc->max_seg_size = mmc->max_req_size; 2706 } else { 2707 /* TRANS_MODE_PIO */ 2708 mmc->max_segs = 64; 2709 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2710 mmc->max_blk_count = 512; 2711 mmc->max_req_size = mmc->max_blk_size * 2712 mmc->max_blk_count; 2713 mmc->max_seg_size = mmc->max_req_size; 2714 } 2715 2716 dw_mci_get_cd(mmc); 2717 2718 ret = mmc_add_host(mmc); 2719 if (ret) 2720 goto err_host_allocated; 2721 2722#if defined(CONFIG_DEBUG_FS) 2723 dw_mci_init_debugfs(slot); 2724#endif 2725 2726 return 0; 2727 2728err_host_allocated: 2729 mmc_free_host(mmc); 2730 return ret; 2731} 2732 2733static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2734{ 2735 /* Debugfs stuff is cleaned up by mmc core */ 2736 mmc_remove_host(slot->mmc); 2737 slot->host->slot[id] = NULL; 2738 mmc_free_host(slot->mmc); 2739} 2740 2741static void dw_mci_init_dma(struct dw_mci *host) 2742{ 2743 int addr_config; 2744 struct device *dev = host->dev; 2745 struct device_node *np = dev->of_node; 2746 2747 /* 2748 * Check tansfer mode from HCON[17:16] 2749 * Clear the ambiguous description of dw_mmc databook: 2750 * 2b'00: No DMA Interface -> Actually means using Internal DMA block 2751 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 2752 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 2753 * 2b'11: Non DW DMA Interface -> pio only 2754 * Compared to DesignWare DMA Interface, Generic DMA Interface has a 2755 * simpler request/acknowledge handshake mechanism and both of them 2756 * are regarded as external dma master for dw_mmc. 2757 */ 2758 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 2759 if (host->use_dma == DMA_INTERFACE_IDMA) { 2760 host->use_dma = TRANS_MODE_IDMAC; 2761 } else if (host->use_dma == DMA_INTERFACE_DWDMA || 2762 host->use_dma == DMA_INTERFACE_GDMA) { 2763 host->use_dma = TRANS_MODE_EDMAC; 2764 } else { 2765 goto no_dma; 2766 } 2767 2768 /* Determine which DMA interface to use */ 2769 if (host->use_dma == TRANS_MODE_IDMAC) { 2770 /* 2771 * Check ADDR_CONFIG bit in HCON to find 2772 * IDMAC address bus width 2773 */ 2774 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 2775 2776 if (addr_config == 1) { 2777 /* host supports IDMAC in 64-bit address mode */ 2778 host->dma_64bit_address = 1; 2779 dev_info(host->dev, 2780 "IDMAC supports 64-bit address mode.\n"); 2781 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 2782 dma_set_coherent_mask(host->dev, 2783 DMA_BIT_MASK(64)); 2784 } else { 2785 /* host supports IDMAC in 32-bit address mode */ 2786 host->dma_64bit_address = 0; 2787 dev_info(host->dev, 2788 "IDMAC supports 32-bit address mode.\n"); 2789 } 2790 2791 /* Alloc memory for sg translation */ 2792 host->sg_cpu = dmam_alloc_coherent(host->dev, 2793 DESC_RING_BUF_SZ, 2794 &host->sg_dma, GFP_KERNEL); 2795 if (!host->sg_cpu) { 2796 dev_err(host->dev, 2797 "%s: could not alloc DMA memory\n", 2798 __func__); 2799 goto no_dma; 2800 } 2801 2802 host->dma_ops = &dw_mci_idmac_ops; 2803 dev_info(host->dev, "Using internal DMA controller.\n"); 2804 } else { 2805 /* TRANS_MODE_EDMAC: check dma bindings again */ 2806 if ((of_property_count_strings(np, "dma-names") < 0) || 2807 (!of_find_property(np, "dmas", NULL))) { 2808 goto no_dma; 2809 } 2810 host->dma_ops = &dw_mci_edmac_ops; 2811 dev_info(host->dev, "Using external DMA controller.\n"); 2812 } 2813 2814 if (host->dma_ops->init && host->dma_ops->start && 2815 host->dma_ops->stop && host->dma_ops->cleanup) { 2816 if (host->dma_ops->init(host)) { 2817 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 2818 __func__); 2819 goto no_dma; 2820 } 2821 } else { 2822 dev_err(host->dev, "DMA initialization not found.\n"); 2823 goto no_dma; 2824 } 2825 2826 return; 2827 2828no_dma: 2829 dev_info(host->dev, "Using PIO mode.\n"); 2830 host->use_dma = TRANS_MODE_PIO; 2831} 2832 2833static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2834{ 2835 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2836 u32 ctrl; 2837 2838 ctrl = mci_readl(host, CTRL); 2839 ctrl |= reset; 2840 mci_writel(host, CTRL, ctrl); 2841 2842 /* wait till resets clear */ 2843 do { 2844 ctrl = mci_readl(host, CTRL); 2845 if (!(ctrl & reset)) 2846 return true; 2847 } while (time_before(jiffies, timeout)); 2848 2849 dev_err(host->dev, 2850 "Timeout resetting block (ctrl reset %#x)\n", 2851 ctrl & reset); 2852 2853 return false; 2854} 2855 2856static bool dw_mci_reset(struct dw_mci *host) 2857{ 2858 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 2859 bool ret = false; 2860 2861 /* 2862 * Reseting generates a block interrupt, hence setting 2863 * the scatter-gather pointer to NULL. 2864 */ 2865 if (host->sg) { 2866 sg_miter_stop(&host->sg_miter); 2867 host->sg = NULL; 2868 } 2869 2870 if (host->use_dma) 2871 flags |= SDMMC_CTRL_DMA_RESET; 2872 2873 if (dw_mci_ctrl_reset(host, flags)) { 2874 /* 2875 * In all cases we clear the RAWINTS register to clear any 2876 * interrupts. 2877 */ 2878 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2879 2880 /* if using dma we wait for dma_req to clear */ 2881 if (host->use_dma) { 2882 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2883 u32 status; 2884 2885 do { 2886 status = mci_readl(host, STATUS); 2887 if (!(status & SDMMC_STATUS_DMA_REQ)) 2888 break; 2889 cpu_relax(); 2890 } while (time_before(jiffies, timeout)); 2891 2892 if (status & SDMMC_STATUS_DMA_REQ) { 2893 dev_err(host->dev, 2894 "%s: Timeout waiting for dma_req to clear during reset\n", 2895 __func__); 2896 goto ciu_out; 2897 } 2898 2899 /* when using DMA next we reset the fifo again */ 2900 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 2901 goto ciu_out; 2902 } 2903 } else { 2904 /* if the controller reset bit did clear, then set clock regs */ 2905 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 2906 dev_err(host->dev, 2907 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 2908 __func__); 2909 goto ciu_out; 2910 } 2911 } 2912 2913 if (host->use_dma == TRANS_MODE_IDMAC) 2914 /* It is also recommended that we reset and reprogram idmac */ 2915 dw_mci_idmac_reset(host); 2916 2917 ret = true; 2918 2919ciu_out: 2920 /* After a CTRL reset we need to have CIU set clock registers */ 2921 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); 2922 2923 return ret; 2924} 2925 2926static void dw_mci_cmd11_timer(unsigned long arg) 2927{ 2928 struct dw_mci *host = (struct dw_mci *)arg; 2929 2930 if (host->state != STATE_SENDING_CMD11) { 2931 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 2932 return; 2933 } 2934 2935 host->cmd_status = SDMMC_INT_RTO; 2936 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2937 tasklet_schedule(&host->tasklet); 2938} 2939 2940static void dw_mci_dto_timer(unsigned long arg) 2941{ 2942 struct dw_mci *host = (struct dw_mci *)arg; 2943 2944 switch (host->state) { 2945 case STATE_SENDING_DATA: 2946 case STATE_DATA_BUSY: 2947 /* 2948 * If DTO interrupt does NOT come in sending data state, 2949 * we should notify the driver to terminate current transfer 2950 * and report a data timeout to the core. 2951 */ 2952 host->data_status = SDMMC_INT_DRTO; 2953 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2954 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2955 tasklet_schedule(&host->tasklet); 2956 break; 2957 default: 2958 break; 2959 } 2960} 2961 2962#ifdef CONFIG_OF 2963static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2964{ 2965 struct dw_mci_board *pdata; 2966 struct device *dev = host->dev; 2967 struct device_node *np = dev->of_node; 2968 const struct dw_mci_drv_data *drv_data = host->drv_data; 2969 int ret; 2970 u32 clock_frequency; 2971 2972 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2973 if (!pdata) 2974 return ERR_PTR(-ENOMEM); 2975 2976 /* find reset controller when exist */ 2977 pdata->rstc = devm_reset_control_get_optional(dev, "reset"); 2978 if (IS_ERR(pdata->rstc)) { 2979 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) 2980 return ERR_PTR(-EPROBE_DEFER); 2981 } 2982 2983 /* find out number of slots supported */ 2984 of_property_read_u32(np, "num-slots", &pdata->num_slots); 2985 2986 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2987 dev_info(dev, 2988 "fifo-depth property not found, using value of FIFOTH register as default\n"); 2989 2990 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2991 2992 of_property_read_u32(np, "data-addr", &host->data_addr_override); 2993 2994 if (of_get_property(np, "fifo-watermark-aligned", NULL)) 2995 host->wm_aligned = true; 2996 2997 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) 2998 pdata->bus_hz = clock_frequency; 2999 3000 if (drv_data && drv_data->parse_dt) { 3001 ret = drv_data->parse_dt(host); 3002 if (ret) 3003 return ERR_PTR(ret); 3004 } 3005 3006 return pdata; 3007} 3008 3009#else /* CONFIG_OF */ 3010static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3011{ 3012 return ERR_PTR(-EINVAL); 3013} 3014#endif /* CONFIG_OF */ 3015 3016static void dw_mci_enable_cd(struct dw_mci *host) 3017{ 3018 unsigned long irqflags; 3019 u32 temp; 3020 int i; 3021 struct dw_mci_slot *slot; 3022 3023 /* 3024 * No need for CD if all slots have a non-error GPIO 3025 * as well as broken card detection is found. 3026 */ 3027 for (i = 0; i < host->num_slots; i++) { 3028 slot = host->slot[i]; 3029 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3030 return; 3031 3032 if (mmc_gpio_get_cd(slot->mmc) < 0) 3033 break; 3034 } 3035 if (i == host->num_slots) 3036 return; 3037 3038 spin_lock_irqsave(&host->irq_lock, irqflags); 3039 temp = mci_readl(host, INTMASK); 3040 temp |= SDMMC_INT_CD; 3041 mci_writel(host, INTMASK, temp); 3042 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3043} 3044 3045int dw_mci_probe(struct dw_mci *host) 3046{ 3047 const struct dw_mci_drv_data *drv_data = host->drv_data; 3048 int width, i, ret = 0; 3049 u32 fifo_size; 3050 int init_slots = 0; 3051 3052 if (!host->pdata) { 3053 host->pdata = dw_mci_parse_dt(host); 3054 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) { 3055 return -EPROBE_DEFER; 3056 } else if (IS_ERR(host->pdata)) { 3057 dev_err(host->dev, "platform data not available\n"); 3058 return -EINVAL; 3059 } 3060 } 3061 3062 host->biu_clk = devm_clk_get(host->dev, "biu"); 3063 if (IS_ERR(host->biu_clk)) { 3064 dev_dbg(host->dev, "biu clock not available\n"); 3065 } else { 3066 ret = clk_prepare_enable(host->biu_clk); 3067 if (ret) { 3068 dev_err(host->dev, "failed to enable biu clock\n"); 3069 return ret; 3070 } 3071 } 3072 3073 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3074 if (IS_ERR(host->ciu_clk)) { 3075 dev_dbg(host->dev, "ciu clock not available\n"); 3076 host->bus_hz = host->pdata->bus_hz; 3077 } else { 3078 ret = clk_prepare_enable(host->ciu_clk); 3079 if (ret) { 3080 dev_err(host->dev, "failed to enable ciu clock\n"); 3081 goto err_clk_biu; 3082 } 3083 3084 if (host->pdata->bus_hz) { 3085 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 3086 if (ret) 3087 dev_warn(host->dev, 3088 "Unable to set bus rate to %uHz\n", 3089 host->pdata->bus_hz); 3090 } 3091 host->bus_hz = clk_get_rate(host->ciu_clk); 3092 } 3093 3094 if (!host->bus_hz) { 3095 dev_err(host->dev, 3096 "Platform data must supply bus speed\n"); 3097 ret = -ENODEV; 3098 goto err_clk_ciu; 3099 } 3100 3101 if (drv_data && drv_data->init) { 3102 ret = drv_data->init(host); 3103 if (ret) { 3104 dev_err(host->dev, 3105 "implementation specific init failed\n"); 3106 goto err_clk_ciu; 3107 } 3108 } 3109 3110 if (!IS_ERR(host->pdata->rstc)) { 3111 reset_control_assert(host->pdata->rstc); 3112 usleep_range(10, 50); 3113 reset_control_deassert(host->pdata->rstc); 3114 } 3115 3116 setup_timer(&host->cmd11_timer, 3117 dw_mci_cmd11_timer, (unsigned long)host); 3118 3119 setup_timer(&host->dto_timer, 3120 dw_mci_dto_timer, (unsigned long)host); 3121 3122 spin_lock_init(&host->lock); 3123 spin_lock_init(&host->irq_lock); 3124 INIT_LIST_HEAD(&host->queue); 3125 3126 /* 3127 * Get the host data width - this assumes that HCON has been set with 3128 * the correct values. 3129 */ 3130 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3131 if (!i) { 3132 host->push_data = dw_mci_push_data16; 3133 host->pull_data = dw_mci_pull_data16; 3134 width = 16; 3135 host->data_shift = 1; 3136 } else if (i == 2) { 3137 host->push_data = dw_mci_push_data64; 3138 host->pull_data = dw_mci_pull_data64; 3139 width = 64; 3140 host->data_shift = 3; 3141 } else { 3142 /* Check for a reserved value, and warn if it is */ 3143 WARN((i != 1), 3144 "HCON reports a reserved host data width!\n" 3145 "Defaulting to 32-bit access.\n"); 3146 host->push_data = dw_mci_push_data32; 3147 host->pull_data = dw_mci_pull_data32; 3148 width = 32; 3149 host->data_shift = 2; 3150 } 3151 3152 /* Reset all blocks */ 3153 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3154 ret = -ENODEV; 3155 goto err_clk_ciu; 3156 } 3157 3158 host->dma_ops = host->pdata->dma_ops; 3159 dw_mci_init_dma(host); 3160 3161 /* Clear the interrupts for the host controller */ 3162 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3163 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3164 3165 /* Put in max timeout */ 3166 mci_writel(host, TMOUT, 0xFFFFFFFF); 3167 3168 /* 3169 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3170 * Tx Mark = fifo_size / 2 DMA Size = 8 3171 */ 3172 if (!host->pdata->fifo_depth) { 3173 /* 3174 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3175 * have been overwritten by the bootloader, just like we're 3176 * about to do, so if you know the value for your hardware, you 3177 * should put it in the platform data. 3178 */ 3179 fifo_size = mci_readl(host, FIFOTH); 3180 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3181 } else { 3182 fifo_size = host->pdata->fifo_depth; 3183 } 3184 host->fifo_depth = fifo_size; 3185 host->fifoth_val = 3186 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3187 mci_writel(host, FIFOTH, host->fifoth_val); 3188 3189 /* disable clock to CIU */ 3190 mci_writel(host, CLKENA, 0); 3191 mci_writel(host, CLKSRC, 0); 3192 3193 /* 3194 * In 2.40a spec, Data offset is changed. 3195 * Need to check the version-id and set data-offset for DATA register. 3196 */ 3197 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 3198 dev_info(host->dev, "Version ID is %04x\n", host->verid); 3199 3200 if (host->data_addr_override) 3201 host->fifo_reg = host->regs + host->data_addr_override; 3202 else if (host->verid < DW_MMC_240A) 3203 host->fifo_reg = host->regs + DATA_OFFSET; 3204 else 3205 host->fifo_reg = host->regs + DATA_240A_OFFSET; 3206 3207 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 3208 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3209 host->irq_flags, "dw-mci", host); 3210 if (ret) 3211 goto err_dmaunmap; 3212 3213 if (host->pdata->num_slots) 3214 host->num_slots = host->pdata->num_slots; 3215 else 3216 host->num_slots = 1; 3217 3218 if (host->num_slots < 1 || 3219 host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) { 3220 dev_err(host->dev, 3221 "Platform data must supply correct num_slots.\n"); 3222 ret = -ENODEV; 3223 goto err_clk_ciu; 3224 } 3225 3226 /* 3227 * Enable interrupts for command done, data over, data empty, 3228 * receive ready and error such as transmit, receive timeout, crc error 3229 */ 3230 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3231 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3232 DW_MCI_ERROR_FLAGS); 3233 /* Enable mci interrupt */ 3234 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3235 3236 dev_info(host->dev, 3237 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 3238 host->irq, width, fifo_size); 3239 3240 /* We need at least one slot to succeed */ 3241 for (i = 0; i < host->num_slots; i++) { 3242 ret = dw_mci_init_slot(host, i); 3243 if (ret) 3244 dev_dbg(host->dev, "slot %d init failed\n", i); 3245 else 3246 init_slots++; 3247 } 3248 3249 if (init_slots) { 3250 dev_info(host->dev, "%d slots initialized\n", init_slots); 3251 } else { 3252 dev_dbg(host->dev, 3253 "attempted to initialize %d slots, but failed on all\n", 3254 host->num_slots); 3255 goto err_dmaunmap; 3256 } 3257 3258 /* Now that slots are all setup, we can enable card detect */ 3259 dw_mci_enable_cd(host); 3260 3261 return 0; 3262 3263err_dmaunmap: 3264 if (host->use_dma && host->dma_ops->exit) 3265 host->dma_ops->exit(host); 3266 3267 if (!IS_ERR(host->pdata->rstc)) 3268 reset_control_assert(host->pdata->rstc); 3269 3270err_clk_ciu: 3271 clk_disable_unprepare(host->ciu_clk); 3272 3273err_clk_biu: 3274 clk_disable_unprepare(host->biu_clk); 3275 3276 return ret; 3277} 3278EXPORT_SYMBOL(dw_mci_probe); 3279 3280void dw_mci_remove(struct dw_mci *host) 3281{ 3282 int i; 3283 3284 for (i = 0; i < host->num_slots; i++) { 3285 dev_dbg(host->dev, "remove slot %d\n", i); 3286 if (host->slot[i]) 3287 dw_mci_cleanup_slot(host->slot[i], i); 3288 } 3289 3290 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3291 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3292 3293 /* disable clock to CIU */ 3294 mci_writel(host, CLKENA, 0); 3295 mci_writel(host, CLKSRC, 0); 3296 3297 if (host->use_dma && host->dma_ops->exit) 3298 host->dma_ops->exit(host); 3299 3300 if (!IS_ERR(host->pdata->rstc)) 3301 reset_control_assert(host->pdata->rstc); 3302 3303 clk_disable_unprepare(host->ciu_clk); 3304 clk_disable_unprepare(host->biu_clk); 3305} 3306EXPORT_SYMBOL(dw_mci_remove); 3307 3308 3309 3310#ifdef CONFIG_PM 3311int dw_mci_runtime_suspend(struct device *dev) 3312{ 3313 struct dw_mci *host = dev_get_drvdata(dev); 3314 3315 if (host->use_dma && host->dma_ops->exit) 3316 host->dma_ops->exit(host); 3317 3318 clk_disable_unprepare(host->ciu_clk); 3319 3320 if (host->cur_slot && 3321 (mmc_can_gpio_cd(host->cur_slot->mmc) || 3322 !mmc_card_is_removable(host->cur_slot->mmc))) 3323 clk_disable_unprepare(host->biu_clk); 3324 3325 return 0; 3326} 3327EXPORT_SYMBOL(dw_mci_runtime_suspend); 3328 3329int dw_mci_runtime_resume(struct device *dev) 3330{ 3331 int i, ret = 0; 3332 struct dw_mci *host = dev_get_drvdata(dev); 3333 3334 if (host->cur_slot && 3335 (mmc_can_gpio_cd(host->cur_slot->mmc) || 3336 !mmc_card_is_removable(host->cur_slot->mmc))) { 3337 ret = clk_prepare_enable(host->biu_clk); 3338 if (ret) 3339 return ret; 3340 } 3341 3342 ret = clk_prepare_enable(host->ciu_clk); 3343 if (ret) 3344 goto err; 3345 3346 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3347 clk_disable_unprepare(host->ciu_clk); 3348 ret = -ENODEV; 3349 goto err; 3350 } 3351 3352 if (host->use_dma && host->dma_ops->init) 3353 host->dma_ops->init(host); 3354 3355 /* 3356 * Restore the initial value at FIFOTH register 3357 * And Invalidate the prev_blksz with zero 3358 */ 3359 mci_writel(host, FIFOTH, host->fifoth_val); 3360 host->prev_blksz = 0; 3361 3362 /* Put in max timeout */ 3363 mci_writel(host, TMOUT, 0xFFFFFFFF); 3364 3365 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3366 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3367 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3368 DW_MCI_ERROR_FLAGS); 3369 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3370 3371 for (i = 0; i < host->num_slots; i++) { 3372 struct dw_mci_slot *slot = host->slot[i]; 3373 3374 if (!slot) 3375 continue; 3376 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3377 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3378 3379 /* Force setup bus to guarantee available clock output */ 3380 dw_mci_setup_bus(slot, true); 3381 } 3382 3383 /* Now that slots are all setup, we can enable card detect */ 3384 dw_mci_enable_cd(host); 3385 3386 return 0; 3387 3388err: 3389 if (host->cur_slot && 3390 (mmc_can_gpio_cd(host->cur_slot->mmc) || 3391 !mmc_card_is_removable(host->cur_slot->mmc))) 3392 clk_disable_unprepare(host->biu_clk); 3393 3394 return ret; 3395} 3396EXPORT_SYMBOL(dw_mci_runtime_resume); 3397#endif /* CONFIG_PM */ 3398 3399static int __init dw_mci_init(void) 3400{ 3401 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 3402 return 0; 3403} 3404 3405static void __exit dw_mci_exit(void) 3406{ 3407} 3408 3409module_init(dw_mci_init); 3410module_exit(dw_mci_exit); 3411 3412MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3413MODULE_AUTHOR("NXP Semiconductor VietNam"); 3414MODULE_AUTHOR("Imagination Technologies Ltd"); 3415MODULE_LICENSE("GPL v2");