Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.1-rc8 1024 lines 27 kB view raw
1/* 2 * linux/drivers/mmc/host/tmio_mmc_pio.c 3 * 4 * Copyright (C) 2011 Guennadi Liakhovetski 5 * Copyright (C) 2007 Ian Molton 6 * Copyright (C) 2004 Ian Molton 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Driver for the MMC / SD / SDIO IP found in: 13 * 14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs 15 * 16 * This driver draws mainly on scattered spec sheets, Reverse engineering 17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 18 * support). (Further 4 bit support from a later datasheet). 19 * 20 * TODO: 21 * Investigate using a workqueue for PIO transfers 22 * Eliminate FIXMEs 23 * SDIO support 24 * Better Power management 25 * Handle MMC errors better 26 * double buffer support 27 * 28 */ 29 30#include <linux/delay.h> 31#include <linux/device.h> 32#include <linux/highmem.h> 33#include <linux/interrupt.h> 34#include <linux/io.h> 35#include <linux/irq.h> 36#include <linux/mfd/tmio.h> 37#include <linux/mmc/host.h> 38#include <linux/mmc/tmio.h> 39#include <linux/module.h> 40#include <linux/pagemap.h> 41#include <linux/platform_device.h> 42#include <linux/pm_runtime.h> 43#include <linux/scatterlist.h> 44#include <linux/workqueue.h> 45#include <linux/spinlock.h> 46 47#include "tmio_mmc.h" 48 49void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 50{ 51 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); 52 sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 53} 54 55void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 56{ 57 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); 58 sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 59} 60 61static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) 62{ 63 sd_ctrl_write32(host, CTL_STATUS, ~i); 64} 65 66static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 67{ 68 host->sg_len = data->sg_len; 69 host->sg_ptr = data->sg; 70 host->sg_orig = data->sg; 71 host->sg_off = 0; 72} 73 74static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 75{ 76 host->sg_ptr = sg_next(host->sg_ptr); 77 host->sg_off = 0; 78 return --host->sg_len; 79} 80 81#ifdef CONFIG_MMC_DEBUG 82 83#define STATUS_TO_TEXT(a, status, i) \ 84 do { \ 85 if (status & TMIO_STAT_##a) { \ 86 if (i++) \ 87 printk(" | "); \ 88 printk(#a); \ 89 } \ 90 } while (0) 91 92static void pr_debug_status(u32 status) 93{ 94 int i = 0; 95 printk(KERN_DEBUG "status: %08x = ", status); 96 STATUS_TO_TEXT(CARD_REMOVE, status, i); 97 STATUS_TO_TEXT(CARD_INSERT, status, i); 98 STATUS_TO_TEXT(SIGSTATE, status, i); 99 STATUS_TO_TEXT(WRPROTECT, status, i); 100 STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 101 STATUS_TO_TEXT(CARD_INSERT_A, status, i); 102 STATUS_TO_TEXT(SIGSTATE_A, status, i); 103 STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 104 STATUS_TO_TEXT(STOPBIT_ERR, status, i); 105 STATUS_TO_TEXT(ILL_FUNC, status, i); 106 STATUS_TO_TEXT(CMD_BUSY, status, i); 107 STATUS_TO_TEXT(CMDRESPEND, status, i); 108 STATUS_TO_TEXT(DATAEND, status, i); 109 STATUS_TO_TEXT(CRCFAIL, status, i); 110 STATUS_TO_TEXT(DATATIMEOUT, status, i); 111 STATUS_TO_TEXT(CMDTIMEOUT, status, i); 112 STATUS_TO_TEXT(RXOVERFLOW, status, i); 113 STATUS_TO_TEXT(TXUNDERRUN, status, i); 114 STATUS_TO_TEXT(RXRDY, status, i); 115 STATUS_TO_TEXT(TXRQ, status, i); 116 STATUS_TO_TEXT(ILL_ACCESS, status, i); 117 printk("\n"); 118} 119 120#else 121#define pr_debug_status(s) do { } while (0) 122#endif 123 124static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 125{ 126 struct tmio_mmc_host *host = mmc_priv(mmc); 127 128 if (enable) { 129 host->sdio_irq_enabled = 1; 130 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 131 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 132 (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 133 } else { 134 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 135 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 136 host->sdio_irq_enabled = 0; 137 } 138} 139 140static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 141{ 142 u32 clk = 0, clock; 143 144 if (new_clock) { 145 for (clock = host->mmc->f_min, clk = 0x80000080; 146 new_clock >= (clock<<1); clk >>= 1) 147 clock <<= 1; 148 clk |= 0x100; 149 } 150 151 if (host->set_clk_div) 152 host->set_clk_div(host->pdev, (clk>>22) & 1); 153 154 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 155} 156 157static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 158{ 159 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 160 161 /* implicit BUG_ON(!res) */ 162 if (resource_size(res) > 0x100) { 163 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 164 msleep(10); 165 } 166 167 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 168 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 169 msleep(10); 170} 171 172static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 173{ 174 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 175 176 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 177 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 178 msleep(10); 179 180 /* implicit BUG_ON(!res) */ 181 if (resource_size(res) > 0x100) { 182 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 183 msleep(10); 184 } 185} 186 187static void tmio_mmc_reset(struct tmio_mmc_host *host) 188{ 189 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 190 191 /* FIXME - should we set stop clock reg here */ 192 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 193 /* implicit BUG_ON(!res) */ 194 if (resource_size(res) > 0x100) 195 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 196 msleep(10); 197 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 198 if (resource_size(res) > 0x100) 199 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 200 msleep(10); 201} 202 203static void tmio_mmc_reset_work(struct work_struct *work) 204{ 205 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 206 delayed_reset_work.work); 207 struct mmc_request *mrq; 208 unsigned long flags; 209 210 spin_lock_irqsave(&host->lock, flags); 211 mrq = host->mrq; 212 213 /* 214 * is request already finished? Since we use a non-blocking 215 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts 216 * us, so, have to check for IS_ERR(host->mrq) 217 */ 218 if (IS_ERR_OR_NULL(mrq) 219 || time_is_after_jiffies(host->last_req_ts + 220 msecs_to_jiffies(2000))) { 221 spin_unlock_irqrestore(&host->lock, flags); 222 return; 223 } 224 225 dev_warn(&host->pdev->dev, 226 "timeout waiting for hardware interrupt (CMD%u)\n", 227 mrq->cmd->opcode); 228 229 if (host->data) 230 host->data->error = -ETIMEDOUT; 231 else if (host->cmd) 232 host->cmd->error = -ETIMEDOUT; 233 else 234 mrq->cmd->error = -ETIMEDOUT; 235 236 host->cmd = NULL; 237 host->data = NULL; 238 host->force_pio = false; 239 240 spin_unlock_irqrestore(&host->lock, flags); 241 242 tmio_mmc_reset(host); 243 244 /* Ready for new calls */ 245 host->mrq = NULL; 246 247 mmc_request_done(host->mmc, mrq); 248} 249 250/* called with host->lock held, interrupts disabled */ 251static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 252{ 253 struct mmc_request *mrq; 254 unsigned long flags; 255 256 spin_lock_irqsave(&host->lock, flags); 257 258 mrq = host->mrq; 259 if (IS_ERR_OR_NULL(mrq)) { 260 spin_unlock_irqrestore(&host->lock, flags); 261 return; 262 } 263 264 host->cmd = NULL; 265 host->data = NULL; 266 host->force_pio = false; 267 268 cancel_delayed_work(&host->delayed_reset_work); 269 270 host->mrq = NULL; 271 spin_unlock_irqrestore(&host->lock, flags); 272 273 mmc_request_done(host->mmc, mrq); 274} 275 276static void tmio_mmc_done_work(struct work_struct *work) 277{ 278 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 279 done); 280 tmio_mmc_finish_request(host); 281} 282 283/* These are the bitmasks the tmio chip requires to implement the MMC response 284 * types. Note that R1 and R6 are the same in this scheme. */ 285#define APP_CMD 0x0040 286#define RESP_NONE 0x0300 287#define RESP_R1 0x0400 288#define RESP_R1B 0x0500 289#define RESP_R2 0x0600 290#define RESP_R3 0x0700 291#define DATA_PRESENT 0x0800 292#define TRANSFER_READ 0x1000 293#define TRANSFER_MULTI 0x2000 294#define SECURITY_CMD 0x4000 295 296static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 297{ 298 struct mmc_data *data = host->data; 299 int c = cmd->opcode; 300 301 /* Command 12 is handled by hardware */ 302 if (cmd->opcode == 12 && !cmd->arg) { 303 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 304 return 0; 305 } 306 307 switch (mmc_resp_type(cmd)) { 308 case MMC_RSP_NONE: c |= RESP_NONE; break; 309 case MMC_RSP_R1: c |= RESP_R1; break; 310 case MMC_RSP_R1B: c |= RESP_R1B; break; 311 case MMC_RSP_R2: c |= RESP_R2; break; 312 case MMC_RSP_R3: c |= RESP_R3; break; 313 default: 314 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 315 return -EINVAL; 316 } 317 318 host->cmd = cmd; 319 320/* FIXME - this seems to be ok commented out but the spec suggest this bit 321 * should be set when issuing app commands. 322 * if(cmd->flags & MMC_FLAG_ACMD) 323 * c |= APP_CMD; 324 */ 325 if (data) { 326 c |= DATA_PRESENT; 327 if (data->blocks > 1) { 328 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 329 c |= TRANSFER_MULTI; 330 } 331 if (data->flags & MMC_DATA_READ) 332 c |= TRANSFER_READ; 333 } 334 335 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); 336 337 /* Fire off the command */ 338 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 339 sd_ctrl_write16(host, CTL_SD_CMD, c); 340 341 return 0; 342} 343 344/* 345 * This chip always returns (at least?) as much data as you ask for. 346 * I'm unsure what happens if you ask for less than a block. This should be 347 * looked into to ensure that a funny length read doesn't hose the controller. 348 */ 349static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 350{ 351 struct mmc_data *data = host->data; 352 void *sg_virt; 353 unsigned short *buf; 354 unsigned int count; 355 unsigned long flags; 356 357 if ((host->chan_tx || host->chan_rx) && !host->force_pio) { 358 pr_err("PIO IRQ in DMA mode!\n"); 359 return; 360 } else if (!data) { 361 pr_debug("Spurious PIO IRQ\n"); 362 return; 363 } 364 365 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 366 buf = (unsigned short *)(sg_virt + host->sg_off); 367 368 count = host->sg_ptr->length - host->sg_off; 369 if (count > data->blksz) 370 count = data->blksz; 371 372 pr_debug("count: %08x offset: %08x flags %08x\n", 373 count, host->sg_off, data->flags); 374 375 /* Transfer the data */ 376 if (data->flags & MMC_DATA_READ) 377 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 378 else 379 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 380 381 host->sg_off += count; 382 383 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 384 385 if (host->sg_off == host->sg_ptr->length) 386 tmio_mmc_next_sg(host); 387 388 return; 389} 390 391static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) 392{ 393 if (host->sg_ptr == &host->bounce_sg) { 394 unsigned long flags; 395 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 396 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 397 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 398 } 399} 400 401/* needs to be called with host->lock held */ 402void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 403{ 404 struct mmc_data *data = host->data; 405 struct mmc_command *stop; 406 407 host->data = NULL; 408 409 if (!data) { 410 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 411 return; 412 } 413 stop = data->stop; 414 415 /* FIXME - return correct transfer count on errors */ 416 if (!data->error) 417 data->bytes_xfered = data->blocks * data->blksz; 418 else 419 data->bytes_xfered = 0; 420 421 pr_debug("Completed data request\n"); 422 423 /* 424 * FIXME: other drivers allow an optional stop command of any given type 425 * which we dont do, as the chip can auto generate them. 426 * Perhaps we can be smarter about when to use auto CMD12 and 427 * only issue the auto request when we know this is the desired 428 * stop command, allowing fallback to the stop command the 429 * upper layers expect. For now, we do what works. 430 */ 431 432 if (data->flags & MMC_DATA_READ) { 433 if (host->chan_rx && !host->force_pio) 434 tmio_mmc_check_bounce_buffer(host); 435 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 436 host->mrq); 437 } else { 438 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 439 host->mrq); 440 } 441 442 if (stop) { 443 if (stop->opcode == 12 && !stop->arg) 444 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 445 else 446 BUG(); 447 } 448 449 schedule_work(&host->done); 450} 451 452static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 453{ 454 struct mmc_data *data; 455 spin_lock(&host->lock); 456 data = host->data; 457 458 if (!data) 459 goto out; 460 461 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { 462 /* 463 * Has all data been written out yet? Testing on SuperH showed, 464 * that in most cases the first interrupt comes already with the 465 * BUSY status bit clear, but on some operations, like mount or 466 * in the beginning of a write / sync / umount, there is one 467 * DATAEND interrupt with the BUSY bit set, in this cases 468 * waiting for one more interrupt fixes the problem. 469 */ 470 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 471 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 472 tasklet_schedule(&host->dma_complete); 473 } 474 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 475 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 476 tasklet_schedule(&host->dma_complete); 477 } else { 478 tmio_mmc_do_data_irq(host); 479 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 480 } 481out: 482 spin_unlock(&host->lock); 483} 484 485static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 486 unsigned int stat) 487{ 488 struct mmc_command *cmd = host->cmd; 489 int i, addr; 490 491 spin_lock(&host->lock); 492 493 if (!host->cmd) { 494 pr_debug("Spurious CMD irq\n"); 495 goto out; 496 } 497 498 host->cmd = NULL; 499 500 /* This controller is sicker than the PXA one. Not only do we need to 501 * drop the top 8 bits of the first response word, we also need to 502 * modify the order of the response for short response command types. 503 */ 504 505 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 506 cmd->resp[i] = sd_ctrl_read32(host, addr); 507 508 if (cmd->flags & MMC_RSP_136) { 509 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 510 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 511 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 512 cmd->resp[3] <<= 8; 513 } else if (cmd->flags & MMC_RSP_R3) { 514 cmd->resp[0] = cmd->resp[3]; 515 } 516 517 if (stat & TMIO_STAT_CMDTIMEOUT) 518 cmd->error = -ETIMEDOUT; 519 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 520 cmd->error = -EILSEQ; 521 522 /* If there is data to handle we enable data IRQs here, and 523 * we will ultimatley finish the request in the data_end handler. 524 * If theres no data or we encountered an error, finish now. 525 */ 526 if (host->data && !cmd->error) { 527 if (host->data->flags & MMC_DATA_READ) { 528 if (host->force_pio || !host->chan_rx) 529 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); 530 else 531 tasklet_schedule(&host->dma_issue); 532 } else { 533 if (host->force_pio || !host->chan_tx) 534 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 535 else 536 tasklet_schedule(&host->dma_issue); 537 } 538 } else { 539 schedule_work(&host->done); 540 } 541 542out: 543 spin_unlock(&host->lock); 544} 545 546irqreturn_t tmio_mmc_irq(int irq, void *devid) 547{ 548 struct tmio_mmc_host *host = devid; 549 struct mmc_host *mmc = host->mmc; 550 struct tmio_mmc_data *pdata = host->pdata; 551 unsigned int ireg, irq_mask, status; 552 unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 553 554 pr_debug("MMC IRQ begin\n"); 555 556 status = sd_ctrl_read32(host, CTL_STATUS); 557 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 558 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 559 560 sdio_ireg = 0; 561 if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 562 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 563 sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 564 sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 565 566 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 567 568 if (sdio_ireg && !host->sdio_irq_enabled) { 569 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 570 sdio_status, sdio_irq_mask, sdio_ireg); 571 tmio_mmc_enable_sdio_irq(mmc, 0); 572 goto out; 573 } 574 575 if (mmc->caps & MMC_CAP_SDIO_IRQ && 576 sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 577 mmc_signal_sdio_irq(mmc); 578 579 if (sdio_ireg) 580 goto out; 581 } 582 583 pr_debug_status(status); 584 pr_debug_status(ireg); 585 586 /* Card insert / remove attempts */ 587 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 588 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 589 TMIO_STAT_CARD_REMOVE); 590 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) || 591 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && 592 !work_pending(&mmc->detect.work)) 593 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 594 goto out; 595 } 596 597 /* CRC and other errors */ 598/* if (ireg & TMIO_STAT_ERR_IRQ) 599 * handled |= tmio_error_irq(host, irq, stat); 600 */ 601 602 /* Command completion */ 603 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 604 tmio_mmc_ack_mmc_irqs(host, 605 TMIO_STAT_CMDRESPEND | 606 TMIO_STAT_CMDTIMEOUT); 607 tmio_mmc_cmd_irq(host, status); 608 goto out; 609 } 610 611 /* Data transfer */ 612 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 613 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 614 tmio_mmc_pio_irq(host); 615 goto out; 616 } 617 618 /* Data transfer completion */ 619 if (ireg & TMIO_STAT_DATAEND) { 620 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); 621 tmio_mmc_data_irq(host); 622 goto out; 623 } 624 625 pr_warning("tmio_mmc: Spurious irq, disabling! " 626 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 627 pr_debug_status(status); 628 tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); 629 630out: 631 return IRQ_HANDLED; 632} 633EXPORT_SYMBOL(tmio_mmc_irq); 634 635static int tmio_mmc_start_data(struct tmio_mmc_host *host, 636 struct mmc_data *data) 637{ 638 struct tmio_mmc_data *pdata = host->pdata; 639 640 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 641 data->blksz, data->blocks); 642 643 /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 644 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 645 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 646 647 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 648 pr_err("%s: %d byte block unsupported in 4 bit mode\n", 649 mmc_hostname(host->mmc), data->blksz); 650 return -EINVAL; 651 } 652 } 653 654 tmio_mmc_init_sg(host, data); 655 host->data = data; 656 657 /* Set transfer length / blocksize */ 658 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 659 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 660 661 tmio_mmc_start_dma(host, data); 662 663 return 0; 664} 665 666/* Process requests from the MMC layer */ 667static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 668{ 669 struct tmio_mmc_host *host = mmc_priv(mmc); 670 unsigned long flags; 671 int ret; 672 673 spin_lock_irqsave(&host->lock, flags); 674 675 if (host->mrq) { 676 pr_debug("request not null\n"); 677 if (IS_ERR(host->mrq)) { 678 spin_unlock_irqrestore(&host->lock, flags); 679 mrq->cmd->error = -EAGAIN; 680 mmc_request_done(mmc, mrq); 681 return; 682 } 683 } 684 685 host->last_req_ts = jiffies; 686 wmb(); 687 host->mrq = mrq; 688 689 spin_unlock_irqrestore(&host->lock, flags); 690 691 if (mrq->data) { 692 ret = tmio_mmc_start_data(host, mrq->data); 693 if (ret) 694 goto fail; 695 } 696 697 ret = tmio_mmc_start_command(host, mrq->cmd); 698 if (!ret) { 699 schedule_delayed_work(&host->delayed_reset_work, 700 msecs_to_jiffies(2000)); 701 return; 702 } 703 704fail: 705 host->force_pio = false; 706 host->mrq = NULL; 707 mrq->cmd->error = ret; 708 mmc_request_done(mmc, mrq); 709} 710 711/* Set MMC clock / power. 712 * Note: This controller uses a simple divider scheme therefore it cannot 713 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 714 * MMC wont run that fast, it has to be clocked at 12MHz which is the next 715 * slowest setting. 716 */ 717static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 718{ 719 struct tmio_mmc_host *host = mmc_priv(mmc); 720 struct tmio_mmc_data *pdata = host->pdata; 721 unsigned long flags; 722 723 mutex_lock(&host->ios_lock); 724 725 spin_lock_irqsave(&host->lock, flags); 726 if (host->mrq) { 727 if (IS_ERR(host->mrq)) { 728 dev_dbg(&host->pdev->dev, 729 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", 730 current->comm, task_pid_nr(current), 731 ios->clock, ios->power_mode); 732 host->mrq = ERR_PTR(-EINTR); 733 } else { 734 dev_dbg(&host->pdev->dev, 735 "%s.%d: CMD%u active since %lu, now %lu!\n", 736 current->comm, task_pid_nr(current), 737 host->mrq->cmd->opcode, host->last_req_ts, jiffies); 738 } 739 spin_unlock_irqrestore(&host->lock, flags); 740 741 mutex_unlock(&host->ios_lock); 742 return; 743 } 744 745 host->mrq = ERR_PTR(-EBUSY); 746 747 spin_unlock_irqrestore(&host->lock, flags); 748 749 /* 750 * pdata->power == false only if COLD_CD is available, otherwise only 751 * in short time intervals during probing or resuming 752 */ 753 if (ios->power_mode == MMC_POWER_ON && ios->clock) { 754 if (!pdata->power) { 755 pm_runtime_get_sync(&host->pdev->dev); 756 pdata->power = true; 757 } 758 tmio_mmc_set_clock(host, ios->clock); 759 /* power up SD bus */ 760 if (host->set_pwr) 761 host->set_pwr(host->pdev, 1); 762 /* start bus clock */ 763 tmio_mmc_clk_start(host); 764 } else if (ios->power_mode != MMC_POWER_UP) { 765 if (host->set_pwr) 766 host->set_pwr(host->pdev, 0); 767 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && 768 pdata->power) { 769 pdata->power = false; 770 pm_runtime_put(&host->pdev->dev); 771 } 772 tmio_mmc_clk_stop(host); 773 } 774 775 switch (ios->bus_width) { 776 case MMC_BUS_WIDTH_1: 777 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 778 break; 779 case MMC_BUS_WIDTH_4: 780 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 781 break; 782 } 783 784 /* Let things settle. delay taken from winCE driver */ 785 udelay(140); 786 if (PTR_ERR(host->mrq) == -EINTR) 787 dev_dbg(&host->pdev->dev, 788 "%s.%d: IOS interrupted: clk %u, mode %u", 789 current->comm, task_pid_nr(current), 790 ios->clock, ios->power_mode); 791 host->mrq = NULL; 792 793 mutex_unlock(&host->ios_lock); 794} 795 796static int tmio_mmc_get_ro(struct mmc_host *mmc) 797{ 798 struct tmio_mmc_host *host = mmc_priv(mmc); 799 struct tmio_mmc_data *pdata = host->pdata; 800 801 return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 802 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 803} 804 805static int tmio_mmc_get_cd(struct mmc_host *mmc) 806{ 807 struct tmio_mmc_host *host = mmc_priv(mmc); 808 struct tmio_mmc_data *pdata = host->pdata; 809 810 if (!pdata->get_cd) 811 return -ENOSYS; 812 else 813 return pdata->get_cd(host->pdev); 814} 815 816static const struct mmc_host_ops tmio_mmc_ops = { 817 .request = tmio_mmc_request, 818 .set_ios = tmio_mmc_set_ios, 819 .get_ro = tmio_mmc_get_ro, 820 .get_cd = tmio_mmc_get_cd, 821 .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 822}; 823 824int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, 825 struct platform_device *pdev, 826 struct tmio_mmc_data *pdata) 827{ 828 struct tmio_mmc_host *_host; 829 struct mmc_host *mmc; 830 struct resource *res_ctl; 831 int ret; 832 u32 irq_mask = TMIO_MASK_CMD; 833 834 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); 835 if (!res_ctl) 836 return -EINVAL; 837 838 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); 839 if (!mmc) 840 return -ENOMEM; 841 842 pdata->dev = &pdev->dev; 843 _host = mmc_priv(mmc); 844 _host->pdata = pdata; 845 _host->mmc = mmc; 846 _host->pdev = pdev; 847 platform_set_drvdata(pdev, mmc); 848 849 _host->set_pwr = pdata->set_pwr; 850 _host->set_clk_div = pdata->set_clk_div; 851 852 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 853 _host->bus_shift = resource_size(res_ctl) >> 10; 854 855 _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 856 if (!_host->ctl) { 857 ret = -ENOMEM; 858 goto host_free; 859 } 860 861 mmc->ops = &tmio_mmc_ops; 862 mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; 863 mmc->f_max = pdata->hclk; 864 mmc->f_min = mmc->f_max / 512; 865 mmc->max_segs = 32; 866 mmc->max_blk_size = 512; 867 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 868 mmc->max_segs; 869 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 870 mmc->max_seg_size = mmc->max_req_size; 871 if (pdata->ocr_mask) 872 mmc->ocr_avail = pdata->ocr_mask; 873 else 874 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 875 876 pdata->power = false; 877 pm_runtime_enable(&pdev->dev); 878 ret = pm_runtime_resume(&pdev->dev); 879 if (ret < 0) 880 goto pm_disable; 881 882 tmio_mmc_clk_stop(_host); 883 tmio_mmc_reset(_host); 884 885 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 886 if (pdata->flags & TMIO_MMC_SDIO_IRQ) 887 tmio_mmc_enable_sdio_irq(mmc, 0); 888 889 spin_lock_init(&_host->lock); 890 mutex_init(&_host->ios_lock); 891 892 /* Init delayed work for request timeouts */ 893 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 894 INIT_WORK(&_host->done, tmio_mmc_done_work); 895 896 /* See if we also get DMA */ 897 tmio_mmc_request_dma(_host, pdata); 898 899 /* We have to keep the device powered for its card detection to work */ 900 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) { 901 pdata->power = true; 902 pm_runtime_get_noresume(&pdev->dev); 903 } 904 905 mmc_add_host(mmc); 906 907 /* Unmask the IRQs we want to know about */ 908 if (!_host->chan_rx) 909 irq_mask |= TMIO_MASK_READOP; 910 if (!_host->chan_tx) 911 irq_mask |= TMIO_MASK_WRITEOP; 912 913 tmio_mmc_enable_mmc_irqs(_host, irq_mask); 914 915 *host = _host; 916 917 return 0; 918 919pm_disable: 920 pm_runtime_disable(&pdev->dev); 921 iounmap(_host->ctl); 922host_free: 923 mmc_free_host(mmc); 924 925 return ret; 926} 927EXPORT_SYMBOL(tmio_mmc_host_probe); 928 929void tmio_mmc_host_remove(struct tmio_mmc_host *host) 930{ 931 struct platform_device *pdev = host->pdev; 932 933 /* 934 * We don't have to manipulate pdata->power here: if there is a card in 935 * the slot, the runtime PM is active and our .runtime_resume() will not 936 * be run. If there is no card in the slot and the platform can suspend 937 * the controller, the runtime PM is suspended and pdata->power == false, 938 * so, our .runtime_resume() will not try to detect a card in the slot. 939 */ 940 if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD) 941 pm_runtime_get_sync(&pdev->dev); 942 943 mmc_remove_host(host->mmc); 944 cancel_work_sync(&host->done); 945 cancel_delayed_work_sync(&host->delayed_reset_work); 946 tmio_mmc_release_dma(host); 947 948 pm_runtime_put_sync(&pdev->dev); 949 pm_runtime_disable(&pdev->dev); 950 951 iounmap(host->ctl); 952 mmc_free_host(host->mmc); 953} 954EXPORT_SYMBOL(tmio_mmc_host_remove); 955 956#ifdef CONFIG_PM 957int tmio_mmc_host_suspend(struct device *dev) 958{ 959 struct mmc_host *mmc = dev_get_drvdata(dev); 960 struct tmio_mmc_host *host = mmc_priv(mmc); 961 int ret = mmc_suspend_host(mmc); 962 963 if (!ret) 964 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); 965 966 host->pm_error = pm_runtime_put_sync(dev); 967 968 return ret; 969} 970EXPORT_SYMBOL(tmio_mmc_host_suspend); 971 972int tmio_mmc_host_resume(struct device *dev) 973{ 974 struct mmc_host *mmc = dev_get_drvdata(dev); 975 struct tmio_mmc_host *host = mmc_priv(mmc); 976 977 /* The MMC core will perform the complete set up */ 978 host->pdata->power = false; 979 980 host->pm_global = true; 981 if (!host->pm_error) 982 pm_runtime_get_sync(dev); 983 984 if (host->pm_global) { 985 /* Runtime PM resume callback didn't run */ 986 tmio_mmc_reset(host); 987 tmio_mmc_enable_dma(host, true); 988 host->pm_global = false; 989 } 990 991 return mmc_resume_host(mmc); 992} 993EXPORT_SYMBOL(tmio_mmc_host_resume); 994 995#endif /* CONFIG_PM */ 996 997int tmio_mmc_host_runtime_suspend(struct device *dev) 998{ 999 return 0; 1000} 1001EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend); 1002 1003int tmio_mmc_host_runtime_resume(struct device *dev) 1004{ 1005 struct mmc_host *mmc = dev_get_drvdata(dev); 1006 struct tmio_mmc_host *host = mmc_priv(mmc); 1007 struct tmio_mmc_data *pdata = host->pdata; 1008 1009 tmio_mmc_reset(host); 1010 tmio_mmc_enable_dma(host, true); 1011 1012 if (pdata->power) { 1013 /* Only entered after a card-insert interrupt */ 1014 if (!mmc->card) 1015 tmio_mmc_set_ios(mmc, &mmc->ios); 1016 mmc_detect_change(mmc, msecs_to_jiffies(100)); 1017 } 1018 host->pm_global = false; 1019 1020 return 0; 1021} 1022EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); 1023 1024MODULE_LICENSE("GPL v2");