Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.36-rc2 957 lines 24 kB view raw
1/* 2 * linux/drivers/mmc/tmio_mmc.c 3 * 4 * Copyright (C) 2004 Ian Molton 5 * Copyright (C) 2007 Ian Molton 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * Driver for the MMC / SD / SDIO cell found in: 12 * 13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 * 15 * This driver draws mainly on scattered spec sheets, Reverse engineering 16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 17 * support). (Further 4 bit support from a later datasheet). 18 * 19 * TODO: 20 * Investigate using a workqueue for PIO transfers 21 * Eliminate FIXMEs 22 * SDIO support 23 * Better Power management 24 * Handle MMC errors better 25 * double buffer support 26 * 27 */ 28#include <linux/module.h> 29#include <linux/irq.h> 30#include <linux/device.h> 31#include <linux/delay.h> 32#include <linux/dmaengine.h> 33#include <linux/mmc/host.h> 34#include <linux/mfd/core.h> 35#include <linux/mfd/tmio.h> 36 37#include "tmio_mmc.h" 38 39static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 40{ 41 u32 clk = 0, clock; 42 43 if (new_clock) { 44 for (clock = host->mmc->f_min, clk = 0x80000080; 45 new_clock >= (clock<<1); clk >>= 1) 46 clock <<= 1; 47 clk |= 0x100; 48 } 49 50 if (host->set_clk_div) 51 host->set_clk_div(host->pdev, (clk>>22) & 1); 52 53 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 54} 55 56static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 57{ 58 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 59 msleep(10); 60 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 61 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 62 msleep(10); 63} 64 65static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 66{ 67 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 68 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 69 msleep(10); 70 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 71 msleep(10); 72} 73 74static void reset(struct tmio_mmc_host *host) 75{ 76 /* FIXME - should we set stop clock reg here */ 77 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 78 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 79 msleep(10); 80 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 81 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 82 msleep(10); 83} 84 85static void 86tmio_mmc_finish_request(struct tmio_mmc_host *host) 87{ 88 struct mmc_request *mrq = host->mrq; 89 90 host->mrq = NULL; 91 host->cmd = NULL; 92 host->data = NULL; 93 94 mmc_request_done(host->mmc, mrq); 95} 96 97/* These are the bitmasks the tmio chip requires to implement the MMC response 98 * types. Note that R1 and R6 are the same in this scheme. */ 99#define APP_CMD 0x0040 100#define RESP_NONE 0x0300 101#define RESP_R1 0x0400 102#define RESP_R1B 0x0500 103#define RESP_R2 0x0600 104#define RESP_R3 0x0700 105#define DATA_PRESENT 0x0800 106#define TRANSFER_READ 0x1000 107#define TRANSFER_MULTI 0x2000 108#define SECURITY_CMD 0x4000 109 110static int 111tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 112{ 113 struct mmc_data *data = host->data; 114 int c = cmd->opcode; 115 116 /* Command 12 is handled by hardware */ 117 if (cmd->opcode == 12 && !cmd->arg) { 118 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 119 return 0; 120 } 121 122 switch (mmc_resp_type(cmd)) { 123 case MMC_RSP_NONE: c |= RESP_NONE; break; 124 case MMC_RSP_R1: c |= RESP_R1; break; 125 case MMC_RSP_R1B: c |= RESP_R1B; break; 126 case MMC_RSP_R2: c |= RESP_R2; break; 127 case MMC_RSP_R3: c |= RESP_R3; break; 128 default: 129 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 130 return -EINVAL; 131 } 132 133 host->cmd = cmd; 134 135/* FIXME - this seems to be ok commented out but the spec suggest this bit 136 * should be set when issuing app commands. 137 * if(cmd->flags & MMC_FLAG_ACMD) 138 * c |= APP_CMD; 139 */ 140 if (data) { 141 c |= DATA_PRESENT; 142 if (data->blocks > 1) { 143 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 144 c |= TRANSFER_MULTI; 145 } 146 if (data->flags & MMC_DATA_READ) 147 c |= TRANSFER_READ; 148 } 149 150 enable_mmc_irqs(host, TMIO_MASK_CMD); 151 152 /* Fire off the command */ 153 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 154 sd_ctrl_write16(host, CTL_SD_CMD, c); 155 156 return 0; 157} 158 159/* 160 * This chip always returns (at least?) as much data as you ask for. 161 * I'm unsure what happens if you ask for less than a block. This should be 162 * looked into to ensure that a funny length read doesnt hose the controller. 163 */ 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 165{ 166 struct mmc_data *data = host->data; 167 unsigned short *buf; 168 unsigned int count; 169 unsigned long flags; 170 171 if (!data) { 172 pr_debug("Spurious PIO IRQ\n"); 173 return; 174 } 175 176 buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + 177 host->sg_off); 178 179 count = host->sg_ptr->length - host->sg_off; 180 if (count > data->blksz) 181 count = data->blksz; 182 183 pr_debug("count: %08x offset: %08x flags %08x\n", 184 count, host->sg_off, data->flags); 185 186 /* Transfer the data */ 187 if (data->flags & MMC_DATA_READ) 188 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 189 else 190 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 191 192 host->sg_off += count; 193 194 tmio_mmc_kunmap_atomic(host, &flags); 195 196 if (host->sg_off == host->sg_ptr->length) 197 tmio_mmc_next_sg(host); 198 199 return; 200} 201 202static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 203{ 204 struct mmc_data *data = host->data; 205 struct mmc_command *stop; 206 207 host->data = NULL; 208 209 if (!data) { 210 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 211 return; 212 } 213 stop = data->stop; 214 215 /* FIXME - return correct transfer count on errors */ 216 if (!data->error) 217 data->bytes_xfered = data->blocks * data->blksz; 218 else 219 data->bytes_xfered = 0; 220 221 pr_debug("Completed data request\n"); 222 223 /* 224 * FIXME: other drivers allow an optional stop command of any given type 225 * which we dont do, as the chip can auto generate them. 226 * Perhaps we can be smarter about when to use auto CMD12 and 227 * only issue the auto request when we know this is the desired 228 * stop command, allowing fallback to the stop command the 229 * upper layers expect. For now, we do what works. 230 */ 231 232 if (data->flags & MMC_DATA_READ) { 233 if (!host->chan_rx) 234 disable_mmc_irqs(host, TMIO_MASK_READOP); 235 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 236 host->mrq); 237 } else { 238 if (!host->chan_tx) 239 disable_mmc_irqs(host, TMIO_MASK_WRITEOP); 240 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 241 host->mrq); 242 } 243 244 if (stop) { 245 if (stop->opcode == 12 && !stop->arg) 246 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 247 else 248 BUG(); 249 } 250 251 tmio_mmc_finish_request(host); 252} 253 254static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 255{ 256 struct mmc_data *data = host->data; 257 258 if (!data) 259 return; 260 261 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 262 /* 263 * Has all data been written out yet? Testing on SuperH showed, 264 * that in most cases the first interrupt comes already with the 265 * BUSY status bit clear, but on some operations, like mount or 266 * in the beginning of a write / sync / umount, there is one 267 * DATAEND interrupt with the BUSY bit set, in this cases 268 * waiting for one more interrupt fixes the problem. 269 */ 270 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 271 disable_mmc_irqs(host, TMIO_STAT_DATAEND); 272 tasklet_schedule(&host->dma_complete); 273 } 274 } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { 275 disable_mmc_irqs(host, TMIO_STAT_DATAEND); 276 tasklet_schedule(&host->dma_complete); 277 } else { 278 tmio_mmc_do_data_irq(host); 279 } 280} 281 282static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 283 unsigned int stat) 284{ 285 struct mmc_command *cmd = host->cmd; 286 int i, addr; 287 288 if (!host->cmd) { 289 pr_debug("Spurious CMD irq\n"); 290 return; 291 } 292 293 host->cmd = NULL; 294 295 /* This controller is sicker than the PXA one. Not only do we need to 296 * drop the top 8 bits of the first response word, we also need to 297 * modify the order of the response for short response command types. 298 */ 299 300 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 301 cmd->resp[i] = sd_ctrl_read32(host, addr); 302 303 if (cmd->flags & MMC_RSP_136) { 304 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 305 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 306 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 307 cmd->resp[3] <<= 8; 308 } else if (cmd->flags & MMC_RSP_R3) { 309 cmd->resp[0] = cmd->resp[3]; 310 } 311 312 if (stat & TMIO_STAT_CMDTIMEOUT) 313 cmd->error = -ETIMEDOUT; 314 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 315 cmd->error = -EILSEQ; 316 317 /* If there is data to handle we enable data IRQs here, and 318 * we will ultimatley finish the request in the data_end handler. 319 * If theres no data or we encountered an error, finish now. 320 */ 321 if (host->data && !cmd->error) { 322 if (host->data->flags & MMC_DATA_READ) { 323 if (!host->chan_rx) 324 enable_mmc_irqs(host, TMIO_MASK_READOP); 325 } else { 326 struct dma_chan *chan = host->chan_tx; 327 if (!chan) 328 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 329 else 330 tasklet_schedule(&host->dma_issue); 331 } 332 } else { 333 tmio_mmc_finish_request(host); 334 } 335 336 return; 337} 338 339static irqreturn_t tmio_mmc_irq(int irq, void *devid) 340{ 341 struct tmio_mmc_host *host = devid; 342 unsigned int ireg, irq_mask, status; 343 344 pr_debug("MMC IRQ begin\n"); 345 346 status = sd_ctrl_read32(host, CTL_STATUS); 347 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 348 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 349 350 pr_debug_status(status); 351 pr_debug_status(ireg); 352 353 if (!ireg) { 354 disable_mmc_irqs(host, status & ~irq_mask); 355 356 pr_warning("tmio_mmc: Spurious irq, disabling! " 357 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 358 pr_debug_status(status); 359 360 goto out; 361 } 362 363 while (ireg) { 364 /* Card insert / remove attempts */ 365 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 366 ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 367 TMIO_STAT_CARD_REMOVE); 368 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 369 } 370 371 /* CRC and other errors */ 372/* if (ireg & TMIO_STAT_ERR_IRQ) 373 * handled |= tmio_error_irq(host, irq, stat); 374 */ 375 376 /* Command completion */ 377 if (ireg & TMIO_MASK_CMD) { 378 ack_mmc_irqs(host, TMIO_MASK_CMD); 379 tmio_mmc_cmd_irq(host, status); 380 } 381 382 /* Data transfer */ 383 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 384 ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 385 tmio_mmc_pio_irq(host); 386 } 387 388 /* Data transfer completion */ 389 if (ireg & TMIO_STAT_DATAEND) { 390 ack_mmc_irqs(host, TMIO_STAT_DATAEND); 391 tmio_mmc_data_irq(host); 392 } 393 394 /* Check status - keep going until we've handled it all */ 395 status = sd_ctrl_read32(host, CTL_STATUS); 396 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 397 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 398 399 pr_debug("Status at end of loop: %08x\n", status); 400 pr_debug_status(status); 401 } 402 pr_debug("MMC IRQ end\n"); 403 404out: 405 return IRQ_HANDLED; 406} 407 408#ifdef CONFIG_TMIO_MMC_DMA 409static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 410{ 411#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 412 /* Switch DMA mode on or off - SuperH specific? */ 413 sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); 414#endif 415} 416 417static void tmio_dma_complete(void *arg) 418{ 419 struct tmio_mmc_host *host = arg; 420 421 dev_dbg(&host->pdev->dev, "Command completed\n"); 422 423 if (!host->data) 424 dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); 425 else 426 enable_mmc_irqs(host, TMIO_STAT_DATAEND); 427} 428 429static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 430{ 431 struct scatterlist *sg = host->sg_ptr; 432 struct dma_async_tx_descriptor *desc = NULL; 433 struct dma_chan *chan = host->chan_rx; 434 int ret; 435 436 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); 437 if (ret > 0) { 438 host->dma_sglen = ret; 439 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 440 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 441 } 442 443 if (desc) { 444 host->desc = desc; 445 desc->callback = tmio_dma_complete; 446 desc->callback_param = host; 447 host->cookie = desc->tx_submit(desc); 448 if (host->cookie < 0) { 449 host->desc = NULL; 450 ret = host->cookie; 451 } else { 452 chan->device->device_issue_pending(chan); 453 } 454 } 455 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 456 __func__, host->sg_len, ret, host->cookie, host->mrq); 457 458 if (!host->desc) { 459 /* DMA failed, fall back to PIO */ 460 if (ret >= 0) 461 ret = -EIO; 462 host->chan_rx = NULL; 463 dma_release_channel(chan); 464 /* Free the Tx channel too */ 465 chan = host->chan_tx; 466 if (chan) { 467 host->chan_tx = NULL; 468 dma_release_channel(chan); 469 } 470 dev_warn(&host->pdev->dev, 471 "DMA failed: %d, falling back to PIO\n", ret); 472 tmio_mmc_enable_dma(host, false); 473 reset(host); 474 /* Fail this request, let above layers recover */ 475 host->mrq->cmd->error = ret; 476 tmio_mmc_finish_request(host); 477 } 478 479 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 480 desc, host->cookie, host->sg_len); 481 482 return ret > 0 ? 0 : ret; 483} 484 485static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 486{ 487 struct scatterlist *sg = host->sg_ptr; 488 struct dma_async_tx_descriptor *desc = NULL; 489 struct dma_chan *chan = host->chan_tx; 490 int ret; 491 492 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); 493 if (ret > 0) { 494 host->dma_sglen = ret; 495 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 496 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 497 } 498 499 if (desc) { 500 host->desc = desc; 501 desc->callback = tmio_dma_complete; 502 desc->callback_param = host; 503 host->cookie = desc->tx_submit(desc); 504 if (host->cookie < 0) { 505 host->desc = NULL; 506 ret = host->cookie; 507 } 508 } 509 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 510 __func__, host->sg_len, ret, host->cookie, host->mrq); 511 512 if (!host->desc) { 513 /* DMA failed, fall back to PIO */ 514 if (ret >= 0) 515 ret = -EIO; 516 host->chan_tx = NULL; 517 dma_release_channel(chan); 518 /* Free the Rx channel too */ 519 chan = host->chan_rx; 520 if (chan) { 521 host->chan_rx = NULL; 522 dma_release_channel(chan); 523 } 524 dev_warn(&host->pdev->dev, 525 "DMA failed: %d, falling back to PIO\n", ret); 526 tmio_mmc_enable_dma(host, false); 527 reset(host); 528 /* Fail this request, let above layers recover */ 529 host->mrq->cmd->error = ret; 530 tmio_mmc_finish_request(host); 531 } 532 533 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 534 desc, host->cookie); 535 536 return ret > 0 ? 0 : ret; 537} 538 539static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 540 struct mmc_data *data) 541{ 542 if (data->flags & MMC_DATA_READ) { 543 if (host->chan_rx) 544 return tmio_mmc_start_dma_rx(host); 545 } else { 546 if (host->chan_tx) 547 return tmio_mmc_start_dma_tx(host); 548 } 549 550 return 0; 551} 552 553static void tmio_issue_tasklet_fn(unsigned long priv) 554{ 555 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 556 struct dma_chan *chan = host->chan_tx; 557 558 chan->device->device_issue_pending(chan); 559} 560 561static void tmio_tasklet_fn(unsigned long arg) 562{ 563 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 564 565 if (host->data->flags & MMC_DATA_READ) 566 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 567 DMA_FROM_DEVICE); 568 else 569 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 570 DMA_TO_DEVICE); 571 572 tmio_mmc_do_data_irq(host); 573} 574 575/* It might be necessary to make filter MFD specific */ 576static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) 577{ 578 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 579 chan->private = arg; 580 return true; 581} 582 583static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 584 struct tmio_mmc_data *pdata) 585{ 586 host->cookie = -EINVAL; 587 host->desc = NULL; 588 589 /* We can only either use DMA for both Tx and Rx or not use it at all */ 590 if (pdata->dma) { 591 dma_cap_mask_t mask; 592 593 dma_cap_zero(mask); 594 dma_cap_set(DMA_SLAVE, mask); 595 596 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, 597 pdata->dma->chan_priv_tx); 598 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 599 host->chan_tx); 600 601 if (!host->chan_tx) 602 return; 603 604 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, 605 pdata->dma->chan_priv_rx); 606 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 607 host->chan_rx); 608 609 if (!host->chan_rx) { 610 dma_release_channel(host->chan_tx); 611 host->chan_tx = NULL; 612 return; 613 } 614 615 tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); 616 tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); 617 618 tmio_mmc_enable_dma(host, true); 619 } 620} 621 622static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 623{ 624 if (host->chan_tx) { 625 struct dma_chan *chan = host->chan_tx; 626 host->chan_tx = NULL; 627 dma_release_channel(chan); 628 } 629 if (host->chan_rx) { 630 struct dma_chan *chan = host->chan_rx; 631 host->chan_rx = NULL; 632 dma_release_channel(chan); 633 } 634 635 host->cookie = -EINVAL; 636 host->desc = NULL; 637} 638#else 639static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 640 struct mmc_data *data) 641{ 642 return 0; 643} 644 645static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 646 struct tmio_mmc_data *pdata) 647{ 648 host->chan_tx = NULL; 649 host->chan_rx = NULL; 650} 651 652static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 653{ 654} 655#endif 656 657static int tmio_mmc_start_data(struct tmio_mmc_host *host, 658 struct mmc_data *data) 659{ 660 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 661 data->blksz, data->blocks); 662 663 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */ 664 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 665 pr_err("%s: %d byte block unsupported in 4 bit mode\n", 666 mmc_hostname(host->mmc), data->blksz); 667 return -EINVAL; 668 } 669 670 tmio_mmc_init_sg(host, data); 671 host->data = data; 672 673 /* Set transfer length / blocksize */ 674 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 675 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 676 677 return tmio_mmc_start_dma(host, data); 678} 679 680/* Process requests from the MMC layer */ 681static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 682{ 683 struct tmio_mmc_host *host = mmc_priv(mmc); 684 int ret; 685 686 if (host->mrq) 687 pr_debug("request not null\n"); 688 689 host->mrq = mrq; 690 691 if (mrq->data) { 692 ret = tmio_mmc_start_data(host, mrq->data); 693 if (ret) 694 goto fail; 695 } 696 697 ret = tmio_mmc_start_command(host, mrq->cmd); 698 if (!ret) 699 return; 700 701fail: 702 mrq->cmd->error = ret; 703 mmc_request_done(mmc, mrq); 704} 705 706/* Set MMC clock / power. 707 * Note: This controller uses a simple divider scheme therefore it cannot 708 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 709 * MMC wont run that fast, it has to be clocked at 12MHz which is the next 710 * slowest setting. 711 */ 712static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 713{ 714 struct tmio_mmc_host *host = mmc_priv(mmc); 715 716 if (ios->clock) 717 tmio_mmc_set_clock(host, ios->clock); 718 719 /* Power sequence - OFF -> ON -> UP */ 720 switch (ios->power_mode) { 721 case MMC_POWER_OFF: /* power down SD bus */ 722 if (host->set_pwr) 723 host->set_pwr(host->pdev, 0); 724 tmio_mmc_clk_stop(host); 725 break; 726 case MMC_POWER_ON: /* power up SD bus */ 727 if (host->set_pwr) 728 host->set_pwr(host->pdev, 1); 729 break; 730 case MMC_POWER_UP: /* start bus clock */ 731 tmio_mmc_clk_start(host); 732 break; 733 } 734 735 switch (ios->bus_width) { 736 case MMC_BUS_WIDTH_1: 737 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 738 break; 739 case MMC_BUS_WIDTH_4: 740 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 741 break; 742 } 743 744 /* Let things settle. delay taken from winCE driver */ 745 udelay(140); 746} 747 748static int tmio_mmc_get_ro(struct mmc_host *mmc) 749{ 750 struct tmio_mmc_host *host = mmc_priv(mmc); 751 struct mfd_cell *cell = host->pdev->dev.platform_data; 752 struct tmio_mmc_data *pdata = cell->driver_data; 753 754 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 755 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; 756} 757 758static const struct mmc_host_ops tmio_mmc_ops = { 759 .request = tmio_mmc_request, 760 .set_ios = tmio_mmc_set_ios, 761 .get_ro = tmio_mmc_get_ro, 762}; 763 764#ifdef CONFIG_PM 765static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) 766{ 767 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 768 struct mmc_host *mmc = platform_get_drvdata(dev); 769 int ret; 770 771 ret = mmc_suspend_host(mmc); 772 773 /* Tell MFD core it can disable us now.*/ 774 if (!ret && cell->disable) 775 cell->disable(dev); 776 777 return ret; 778} 779 780static int tmio_mmc_resume(struct platform_device *dev) 781{ 782 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 783 struct mmc_host *mmc = platform_get_drvdata(dev); 784 int ret = 0; 785 786 /* Tell the MFD core we are ready to be enabled */ 787 if (cell->resume) { 788 ret = cell->resume(dev); 789 if (ret) 790 goto out; 791 } 792 793 mmc_resume_host(mmc); 794 795out: 796 return ret; 797} 798#else 799#define tmio_mmc_suspend NULL 800#define tmio_mmc_resume NULL 801#endif 802 803static int __devinit tmio_mmc_probe(struct platform_device *dev) 804{ 805 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 806 struct tmio_mmc_data *pdata; 807 struct resource *res_ctl; 808 struct tmio_mmc_host *host; 809 struct mmc_host *mmc; 810 int ret = -EINVAL; 811 u32 irq_mask = TMIO_MASK_CMD; 812 813 if (dev->num_resources != 2) 814 goto out; 815 816 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 817 if (!res_ctl) 818 goto out; 819 820 pdata = cell->driver_data; 821 if (!pdata || !pdata->hclk) 822 goto out; 823 824 ret = -ENOMEM; 825 826 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); 827 if (!mmc) 828 goto out; 829 830 host = mmc_priv(mmc); 831 host->mmc = mmc; 832 host->pdev = dev; 833 platform_set_drvdata(dev, mmc); 834 835 host->set_pwr = pdata->set_pwr; 836 host->set_clk_div = pdata->set_clk_div; 837 838 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 839 host->bus_shift = resource_size(res_ctl) >> 10; 840 841 host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 842 if (!host->ctl) 843 goto host_free; 844 845 mmc->ops = &tmio_mmc_ops; 846 mmc->caps = MMC_CAP_4_BIT_DATA; 847 mmc->caps |= pdata->capabilities; 848 mmc->f_max = pdata->hclk; 849 mmc->f_min = mmc->f_max / 512; 850 if (pdata->ocr_mask) 851 mmc->ocr_avail = pdata->ocr_mask; 852 else 853 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 854 855 /* Tell the MFD core we are ready to be enabled */ 856 if (cell->enable) { 857 ret = cell->enable(dev); 858 if (ret) 859 goto unmap_ctl; 860 } 861 862 tmio_mmc_clk_stop(host); 863 reset(host); 864 865 ret = platform_get_irq(dev, 0); 866 if (ret >= 0) 867 host->irq = ret; 868 else 869 goto cell_disable; 870 871 disable_mmc_irqs(host, TMIO_MASK_ALL); 872 873 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 874 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); 875 if (ret) 876 goto cell_disable; 877 878 /* See if we also get DMA */ 879 tmio_mmc_request_dma(host, pdata); 880 881 mmc_add_host(mmc); 882 883 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 884 (unsigned long)host->ctl, host->irq); 885 886 /* Unmask the IRQs we want to know about */ 887 if (!host->chan_rx) 888 irq_mask |= TMIO_MASK_READOP; 889 if (!host->chan_tx) 890 irq_mask |= TMIO_MASK_WRITEOP; 891 enable_mmc_irqs(host, irq_mask); 892 893 return 0; 894 895cell_disable: 896 if (cell->disable) 897 cell->disable(dev); 898unmap_ctl: 899 iounmap(host->ctl); 900host_free: 901 mmc_free_host(mmc); 902out: 903 return ret; 904} 905 906static int __devexit tmio_mmc_remove(struct platform_device *dev) 907{ 908 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 909 struct mmc_host *mmc = platform_get_drvdata(dev); 910 911 platform_set_drvdata(dev, NULL); 912 913 if (mmc) { 914 struct tmio_mmc_host *host = mmc_priv(mmc); 915 mmc_remove_host(mmc); 916 tmio_mmc_release_dma(host); 917 free_irq(host->irq, host); 918 if (cell->disable) 919 cell->disable(dev); 920 iounmap(host->ctl); 921 mmc_free_host(mmc); 922 } 923 924 return 0; 925} 926 927/* ------------------- device registration ----------------------- */ 928 929static struct platform_driver tmio_mmc_driver = { 930 .driver = { 931 .name = "tmio-mmc", 932 .owner = THIS_MODULE, 933 }, 934 .probe = tmio_mmc_probe, 935 .remove = __devexit_p(tmio_mmc_remove), 936 .suspend = tmio_mmc_suspend, 937 .resume = tmio_mmc_resume, 938}; 939 940 941static int __init tmio_mmc_init(void) 942{ 943 return platform_driver_register(&tmio_mmc_driver); 944} 945 946static void __exit tmio_mmc_exit(void) 947{ 948 platform_driver_unregister(&tmio_mmc_driver); 949} 950 951module_init(tmio_mmc_init); 952module_exit(tmio_mmc_exit); 953 954MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver"); 955MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); 956MODULE_LICENSE("GPL v2"); 957MODULE_ALIAS("platform:tmio-mmc");