mmc: tmio: split core functionality, DMA and MFD glue

TMIO MMC chips contain an SD / SDIO IP core from Panasonic, similar to
the one, used in MN5774 and other MN57xx controllers. These IP cores are
included in many multifunction devices, in sh-mobile chips from Renesas,
in the latter case they can also use DMA. Some sh-mobile implementations
also have some other specialities, that MFD-based solutions don't have.
This makes supporting all these features in a monolithic driver inconveniet
and error-prone. This patch splits the driver into 3 parts: the core,
the MFD glue and the DMA support. In case of a modular build, two modules
will be built: mmc_tmio_core and mmc_tmio.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Chris Ball <cjb@laptop.org>

authored by Guennadi Liakhovetski and committed by Chris Ball b6147490 5f52c355

+1409 -1285
+4
drivers/mmc/host/Kconfig
··· 439 439 To compile this driver as a module, choose M here: the 440 440 module will be called sdricoh_cs. 441 441 442 + config MMC_TMIO_CORE 443 + tristate 444 + 442 445 config MMC_TMIO 443 446 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" 444 447 depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI 448 + select MMC_TMIO_CORE 445 449 help 446 450 This provides support for the SD/MMC cell found in TC6393XB, 447 451 T7L66XB and also HTC ASIC3
+3
drivers/mmc/host/Makefile
··· 29 29 obj-$(CONFIG_MMC_S3C) += s3cmci.o 30 30 obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 31 31 obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 32 + obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o 33 + tmio_mmc_core-y := tmio_mmc_pio.o 34 + tmio_mmc_core-$(CONFIG_TMIO_MMC_DMA) += tmio_mmc_dma.o 32 35 obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 33 36 obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 34 37 obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+18 -1285
drivers/mmc/host/tmio_mmc.c
··· 1 1 /* 2 - * linux/drivers/mmc/tmio_mmc.c 2 + * linux/drivers/mmc/host/tmio_mmc.c 3 3 * 4 - * Copyright (C) 2004 Ian Molton 5 - * Copyright (C) 2007 Ian Molton 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify 8 8 * it under the terms of the GNU General Public License version 2 as ··· 11 11 * Driver for the MMC / SD / SDIO cell found in: 12 12 * 13 13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 - * 15 - * This driver draws mainly on scattered spec sheets, Reverse engineering 16 - * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 17 - * support). (Further 4 bit support from a later datasheet). 18 - * 19 - * TODO: 20 - * Investigate using a workqueue for PIO transfers 21 - * Eliminate FIXMEs 22 - * SDIO support 23 - * Better Power management 24 - * Handle MMC errors better 25 - * double buffer support 26 - * 27 14 */ 28 15 29 - #include <linux/delay.h> 30 16 #include <linux/device.h> 31 - #include <linux/dmaengine.h> 32 - #include <linux/highmem.h> 33 - #include <linux/interrupt.h> 34 - #include <linux/io.h> 35 - #include <linux/irq.h> 36 17 #include <linux/mfd/core.h> 37 18 #include <linux/mfd/tmio.h> 38 19 #include <linux/mmc/host.h> 39 20 #include <linux/module.h> 40 21 #include <linux/pagemap.h> 41 22 #include <linux/scatterlist.h> 42 - #include <linux/workqueue.h> 43 - #include <linux/spinlock.h> 44 23 45 - #define CTL_SD_CMD 0x00 46 - #define CTL_ARG_REG 0x04 47 - #define CTL_STOP_INTERNAL_ACTION 0x08 48 - #define CTL_XFER_BLK_COUNT 0xa 49 - #define CTL_RESPONSE 0x0c 50 - #define CTL_STATUS 0x1c 51 - #define CTL_IRQ_MASK 0x20 52 - #define CTL_SD_CARD_CLK_CTL 0x24 53 - #define CTL_SD_XFER_LEN 0x26 54 - #define CTL_SD_MEM_CARD_OPT 0x28 55 - #define CTL_SD_ERROR_DETAIL_STATUS 0x2c 56 - #define CTL_SD_DATA_PORT 0x30 57 - #define CTL_TRANSACTION_CTL 0x34 58 - #define CTL_SDIO_STATUS 0x36 59 - #define CTL_SDIO_IRQ_MASK 0x38 60 - #define CTL_RESET_SD 0xe0 61 - #define CTL_SDIO_REGS 0x100 62 - #define CTL_CLK_AND_WAIT_CTL 0x138 63 - #define CTL_RESET_SDIO 0x1e0 64 - 65 - /* Definitions for values the CTRL_STATUS register can take. */ 66 - #define TMIO_STAT_CMDRESPEND 0x00000001 67 - #define TMIO_STAT_DATAEND 0x00000004 68 - #define TMIO_STAT_CARD_REMOVE 0x00000008 69 - #define TMIO_STAT_CARD_INSERT 0x00000010 70 - #define TMIO_STAT_SIGSTATE 0x00000020 71 - #define TMIO_STAT_WRPROTECT 0x00000080 72 - #define TMIO_STAT_CARD_REMOVE_A 0x00000100 73 - #define TMIO_STAT_CARD_INSERT_A 0x00000200 74 - #define TMIO_STAT_SIGSTATE_A 0x00000400 75 - #define TMIO_STAT_CMD_IDX_ERR 0x00010000 76 - #define TMIO_STAT_CRCFAIL 0x00020000 77 - #define TMIO_STAT_STOPBIT_ERR 0x00040000 78 - #define TMIO_STAT_DATATIMEOUT 0x00080000 79 - #define TMIO_STAT_RXOVERFLOW 0x00100000 80 - #define TMIO_STAT_TXUNDERRUN 0x00200000 81 - #define TMIO_STAT_CMDTIMEOUT 0x00400000 82 - #define TMIO_STAT_RXRDY 0x01000000 83 - #define TMIO_STAT_TXRQ 0x02000000 84 - #define TMIO_STAT_ILL_FUNC 0x20000000 85 - #define TMIO_STAT_CMD_BUSY 0x40000000 86 - #define TMIO_STAT_ILL_ACCESS 0x80000000 87 - 88 - /* Definitions for values the CTRL_SDIO_STATUS register can take. */ 89 - #define TMIO_SDIO_STAT_IOIRQ 0x0001 90 - #define TMIO_SDIO_STAT_EXPUB52 0x4000 91 - #define TMIO_SDIO_STAT_EXWT 0x8000 92 - #define TMIO_SDIO_MASK_ALL 0xc007 93 - 94 - /* Define some IRQ masks */ 95 - /* This is the mask used at reset by the chip */ 96 - #define TMIO_MASK_ALL 0x837f031d 97 - #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) 98 - #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) 99 - #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ 100 - TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 101 - #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 102 - 103 - #define TMIO_MIN_DMA_LEN 8 104 - 105 - #define enable_mmc_irqs(host, i) \ 106 - do { \ 107 - u32 mask;\ 108 - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ 109 - mask &= ~((i) & TMIO_MASK_IRQ); \ 110 - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ 111 - } while (0) 112 - 113 - #define disable_mmc_irqs(host, i) \ 114 - do { \ 115 - u32 mask;\ 116 - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ 117 - mask |= ((i) & TMIO_MASK_IRQ); \ 118 - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ 119 - } while (0) 120 - 121 - #define ack_mmc_irqs(host, i) \ 122 - do { \ 123 - sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ 124 - } while (0) 125 - 126 - /* This is arbitrary, just noone needed any higher alignment yet */ 127 - #define MAX_ALIGN 4 128 - 129 - struct tmio_mmc_host { 130 - void __iomem *ctl; 131 - unsigned long bus_shift; 132 - struct mmc_command *cmd; 133 - struct mmc_request *mrq; 134 - struct mmc_data *data; 135 - struct mmc_host *mmc; 136 - int irq; 137 - unsigned int sdio_irq_enabled; 138 - 139 - /* Callbacks for clock / power control */ 140 - void (*set_pwr)(struct platform_device *host, int state); 141 - void (*set_clk_div)(struct platform_device *host, int state); 142 - 143 - /* pio related stuff */ 144 - struct scatterlist *sg_ptr; 145 - struct scatterlist *sg_orig; 146 - unsigned int sg_len; 147 - unsigned int sg_off; 148 - 149 - struct platform_device *pdev; 150 - 151 - /* DMA support */ 152 - bool force_pio; 153 - struct dma_chan *chan_rx; 154 - struct dma_chan *chan_tx; 155 - struct tasklet_struct dma_complete; 156 - struct tasklet_struct dma_issue; 157 - #ifdef CONFIG_TMIO_MMC_DMA 158 - u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); 159 - struct scatterlist bounce_sg; 160 - #endif 161 - 162 - /* Track lost interrupts */ 163 - struct delayed_work delayed_reset_work; 164 - spinlock_t lock; 165 - unsigned long last_req_ts; 166 - }; 167 - 168 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); 169 - 170 - static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) 171 - { 172 - return readw(host->ctl + (addr << host->bus_shift)); 173 - } 174 - 175 - static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, 176 - u16 *buf, int count) 177 - { 178 - readsw(host->ctl + (addr << host->bus_shift), buf, count); 179 - } 180 - 181 - static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) 182 - { 183 - return readw(host->ctl + (addr << host->bus_shift)) | 184 - readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; 185 - } 186 - 187 - static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) 188 - { 189 - writew(val, host->ctl + (addr << host->bus_shift)); 190 - } 191 - 192 - static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, 193 - u16 *buf, int count) 194 - { 195 - writesw(host->ctl + (addr << host->bus_shift), buf, count); 196 - } 197 - 198 - static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) 199 - { 200 - writew(val, host->ctl + (addr << host->bus_shift)); 201 - writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 202 - } 203 - 204 - static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 205 - { 206 - host->sg_len = data->sg_len; 207 - host->sg_ptr = data->sg; 208 - host->sg_orig = data->sg; 209 - host->sg_off = 0; 210 - } 211 - 212 - static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 213 - { 214 - host->sg_ptr = sg_next(host->sg_ptr); 215 - host->sg_off = 0; 216 - return --host->sg_len; 217 - } 218 - 219 - static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 220 - { 221 - local_irq_save(*flags); 222 - return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 223 - } 224 - 225 - static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt) 226 - { 227 - kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); 228 - local_irq_restore(*flags); 229 - } 230 - 231 - #ifdef CONFIG_MMC_DEBUG 232 - 233 - #define STATUS_TO_TEXT(a, status, i) \ 234 - do { \ 235 - if (status & TMIO_STAT_##a) { \ 236 - if (i++) \ 237 - printk(" | "); \ 238 - printk(#a); \ 239 - } \ 240 - } while (0) 241 - 242 - void pr_debug_status(u32 status) 243 - { 244 - int i = 0; 245 - printk(KERN_DEBUG "status: %08x = ", status); 246 - STATUS_TO_TEXT(CARD_REMOVE, status, i); 247 - STATUS_TO_TEXT(CARD_INSERT, status, i); 248 - STATUS_TO_TEXT(SIGSTATE, status, i); 249 - STATUS_TO_TEXT(WRPROTECT, status, i); 250 - STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 251 - STATUS_TO_TEXT(CARD_INSERT_A, status, i); 252 - STATUS_TO_TEXT(SIGSTATE_A, status, i); 253 - STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 254 - STATUS_TO_TEXT(STOPBIT_ERR, status, i); 255 - STATUS_TO_TEXT(ILL_FUNC, status, i); 256 - STATUS_TO_TEXT(CMD_BUSY, status, i); 257 - STATUS_TO_TEXT(CMDRESPEND, status, i); 258 - STATUS_TO_TEXT(DATAEND, status, i); 259 - STATUS_TO_TEXT(CRCFAIL, status, i); 260 - STATUS_TO_TEXT(DATATIMEOUT, status, i); 261 - STATUS_TO_TEXT(CMDTIMEOUT, status, i); 262 - STATUS_TO_TEXT(RXOVERFLOW, status, i); 263 - STATUS_TO_TEXT(TXUNDERRUN, status, i); 264 - STATUS_TO_TEXT(RXRDY, status, i); 265 - STATUS_TO_TEXT(TXRQ, status, i); 266 - STATUS_TO_TEXT(ILL_ACCESS, status, i); 267 - printk("\n"); 268 - } 269 - 270 - #else 271 - #define pr_debug_status(s) do { } while (0) 272 - #endif 273 - 274 - static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 275 - { 276 - struct tmio_mmc_host *host = mmc_priv(mmc); 277 - 278 - if (enable) { 279 - host->sdio_irq_enabled = 1; 280 - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 281 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 282 - (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 283 - } else { 284 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 285 - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 286 - host->sdio_irq_enabled = 0; 287 - } 288 - } 289 - 290 - static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 291 - { 292 - u32 clk = 0, clock; 293 - 294 - if (new_clock) { 295 - for (clock = host->mmc->f_min, clk = 0x80000080; 296 - new_clock >= (clock<<1); clk >>= 1) 297 - clock <<= 1; 298 - clk |= 0x100; 299 - } 300 - 301 - if (host->set_clk_div) 302 - host->set_clk_div(host->pdev, (clk>>22) & 1); 303 - 304 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 305 - } 306 - 307 - static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 308 - { 309 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 310 - 311 - /* 312 - * Testing on sh-mobile showed that SDIO IRQs are unmasked when 313 - * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the 314 - * device IRQ here and restore the SDIO IRQ mask before 315 - * re-enabling the device IRQ. 316 - */ 317 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 318 - disable_irq(host->irq); 319 - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 320 - msleep(10); 321 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 322 - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 323 - enable_irq(host->irq); 324 - } 325 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 326 - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 327 - msleep(10); 328 - } 329 - 330 - static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 331 - { 332 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 333 - 334 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 335 - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 336 - msleep(10); 337 - /* see comment in tmio_mmc_clk_stop above */ 338 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 339 - disable_irq(host->irq); 340 - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 341 - msleep(10); 342 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 343 - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 344 - enable_irq(host->irq); 345 - } 346 - } 347 - 348 - static void reset(struct tmio_mmc_host *host) 349 - { 350 - /* FIXME - should we set stop clock reg here */ 351 - sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 352 - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 353 - msleep(10); 354 - sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 355 - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 356 - msleep(10); 357 - } 358 - 359 - static void tmio_mmc_reset_work(struct work_struct *work) 360 - { 361 - struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 362 - delayed_reset_work.work); 363 - struct mmc_request *mrq; 364 - unsigned long flags; 365 - 366 - spin_lock_irqsave(&host->lock, flags); 367 - mrq = host->mrq; 368 - 369 - /* request already finished */ 370 - if (!mrq 371 - || time_is_after_jiffies(host->last_req_ts + 372 - msecs_to_jiffies(2000))) { 373 - spin_unlock_irqrestore(&host->lock, flags); 374 - return; 375 - } 376 - 377 - dev_warn(&host->pdev->dev, 378 - "timeout waiting for hardware interrupt (CMD%u)\n", 379 - mrq->cmd->opcode); 380 - 381 - if (host->data) 382 - host->data->error = -ETIMEDOUT; 383 - else if (host->cmd) 384 - host->cmd->error = -ETIMEDOUT; 385 - else 386 - mrq->cmd->error = -ETIMEDOUT; 387 - 388 - host->cmd = NULL; 389 - host->data = NULL; 390 - host->mrq = NULL; 391 - host->force_pio = false; 392 - 393 - spin_unlock_irqrestore(&host->lock, flags); 394 - 395 - reset(host); 396 - 397 - mmc_request_done(host->mmc, mrq); 398 - } 399 - 400 - static void 401 - tmio_mmc_finish_request(struct tmio_mmc_host *host) 402 - { 403 - struct mmc_request *mrq = host->mrq; 404 - 405 - if (!mrq) 406 - return; 407 - 408 - host->mrq = NULL; 409 - host->cmd = NULL; 410 - host->data = NULL; 411 - host->force_pio = false; 412 - 413 - cancel_delayed_work(&host->delayed_reset_work); 414 - 415 - mmc_request_done(host->mmc, mrq); 416 - } 417 - 418 - /* These are the bitmasks the tmio chip requires to implement the MMC response 419 - * types. Note that R1 and R6 are the same in this scheme. */ 420 - #define APP_CMD 0x0040 421 - #define RESP_NONE 0x0300 422 - #define RESP_R1 0x0400 423 - #define RESP_R1B 0x0500 424 - #define RESP_R2 0x0600 425 - #define RESP_R3 0x0700 426 - #define DATA_PRESENT 0x0800 427 - #define TRANSFER_READ 0x1000 428 - #define TRANSFER_MULTI 0x2000 429 - #define SECURITY_CMD 0x4000 430 - 431 - static int 432 - tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 433 - { 434 - struct mmc_data *data = host->data; 435 - int c = cmd->opcode; 436 - 437 - /* Command 12 is handled by hardware */ 438 - if (cmd->opcode == 12 && !cmd->arg) { 439 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 440 - return 0; 441 - } 442 - 443 - switch (mmc_resp_type(cmd)) { 444 - case MMC_RSP_NONE: c |= RESP_NONE; break; 445 - case MMC_RSP_R1: c |= RESP_R1; break; 446 - case MMC_RSP_R1B: c |= RESP_R1B; break; 447 - case MMC_RSP_R2: c |= RESP_R2; break; 448 - case MMC_RSP_R3: c |= RESP_R3; break; 449 - default: 450 - pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 451 - return -EINVAL; 452 - } 453 - 454 - host->cmd = cmd; 455 - 456 - /* FIXME - this seems to be ok commented out but the spec suggest this bit 457 - * should be set when issuing app commands. 458 - * if(cmd->flags & MMC_FLAG_ACMD) 459 - * c |= APP_CMD; 460 - */ 461 - if (data) { 462 - c |= DATA_PRESENT; 463 - if (data->blocks > 1) { 464 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 465 - c |= TRANSFER_MULTI; 466 - } 467 - if (data->flags & MMC_DATA_READ) 468 - c |= TRANSFER_READ; 469 - } 470 - 471 - enable_mmc_irqs(host, TMIO_MASK_CMD); 472 - 473 - /* Fire off the command */ 474 - sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 475 - sd_ctrl_write16(host, CTL_SD_CMD, c); 476 - 477 - return 0; 478 - } 479 - 480 - /* 481 - * This chip always returns (at least?) as much data as you ask for. 482 - * I'm unsure what happens if you ask for less than a block. This should be 483 - * looked into to ensure that a funny length read doesnt hose the controller. 484 - */ 485 - static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 486 - { 487 - struct mmc_data *data = host->data; 488 - void *sg_virt; 489 - unsigned short *buf; 490 - unsigned int count; 491 - unsigned long flags; 492 - 493 - if ((host->chan_tx || host->chan_rx) && !host->force_pio) { 494 - pr_err("PIO IRQ in DMA mode!\n"); 495 - return; 496 - } else if (!data) { 497 - pr_debug("Spurious PIO IRQ\n"); 498 - return; 499 - } 500 - 501 - sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 502 - buf = (unsigned short *)(sg_virt + host->sg_off); 503 - 504 - count = host->sg_ptr->length - host->sg_off; 505 - if (count > data->blksz) 506 - count = data->blksz; 507 - 508 - pr_debug("count: %08x offset: %08x flags %08x\n", 509 - count, host->sg_off, data->flags); 510 - 511 - /* Transfer the data */ 512 - if (data->flags & MMC_DATA_READ) 513 - sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 514 - else 515 - sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 516 - 517 - host->sg_off += count; 518 - 519 - tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 520 - 521 - if (host->sg_off == host->sg_ptr->length) 522 - tmio_mmc_next_sg(host); 523 - 524 - return; 525 - } 526 - 527 - /* needs to be called with host->lock held */ 528 - static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 529 - { 530 - struct mmc_data *data = host->data; 531 - struct mmc_command *stop; 532 - 533 - host->data = NULL; 534 - 535 - if (!data) { 536 - dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 537 - return; 538 - } 539 - stop = data->stop; 540 - 541 - /* FIXME - return correct transfer count on errors */ 542 - if (!data->error) 543 - data->bytes_xfered = data->blocks * data->blksz; 544 - else 545 - data->bytes_xfered = 0; 546 - 547 - pr_debug("Completed data request\n"); 548 - 549 - /* 550 - * FIXME: other drivers allow an optional stop command of any given type 551 - * which we dont do, as the chip can auto generate them. 552 - * Perhaps we can be smarter about when to use auto CMD12 and 553 - * only issue the auto request when we know this is the desired 554 - * stop command, allowing fallback to the stop command the 555 - * upper layers expect. For now, we do what works. 556 - */ 557 - 558 - if (data->flags & MMC_DATA_READ) { 559 - if (host->chan_rx && !host->force_pio) 560 - tmio_check_bounce_buffer(host); 561 - dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 562 - host->mrq); 563 - } else { 564 - dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 565 - host->mrq); 566 - } 567 - 568 - if (stop) { 569 - if (stop->opcode == 12 && !stop->arg) 570 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 571 - else 572 - BUG(); 573 - } 574 - 575 - tmio_mmc_finish_request(host); 576 - } 577 - 578 - static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 579 - { 580 - struct mmc_data *data; 581 - spin_lock(&host->lock); 582 - data = host->data; 583 - 584 - if (!data) 585 - goto out; 586 - 587 - if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { 588 - /* 589 - * Has all data been written out yet? Testing on SuperH showed, 590 - * that in most cases the first interrupt comes already with the 591 - * BUSY status bit clear, but on some operations, like mount or 592 - * in the beginning of a write / sync / umount, there is one 593 - * DATAEND interrupt with the BUSY bit set, in this cases 594 - * waiting for one more interrupt fixes the problem. 595 - */ 596 - if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 597 - disable_mmc_irqs(host, TMIO_STAT_DATAEND); 598 - tasklet_schedule(&host->dma_complete); 599 - } 600 - } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 601 - disable_mmc_irqs(host, TMIO_STAT_DATAEND); 602 - tasklet_schedule(&host->dma_complete); 603 - } else { 604 - tmio_mmc_do_data_irq(host); 605 - disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 606 - } 607 - out: 608 - spin_unlock(&host->lock); 609 - } 610 - 611 - static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 612 - unsigned int stat) 613 - { 614 - struct mmc_command *cmd = host->cmd; 615 - int i, addr; 616 - 617 - spin_lock(&host->lock); 618 - 619 - if (!host->cmd) { 620 - pr_debug("Spurious CMD irq\n"); 621 - goto out; 622 - } 623 - 624 - host->cmd = NULL; 625 - 626 - /* This controller is sicker than the PXA one. Not only do we need to 627 - * drop the top 8 bits of the first response word, we also need to 628 - * modify the order of the response for short response command types. 629 - */ 630 - 631 - for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 632 - cmd->resp[i] = sd_ctrl_read32(host, addr); 633 - 634 - if (cmd->flags & MMC_RSP_136) { 635 - cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 636 - cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 637 - cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 638 - cmd->resp[3] <<= 8; 639 - } else if (cmd->flags & MMC_RSP_R3) { 640 - cmd->resp[0] = cmd->resp[3]; 641 - } 642 - 643 - if (stat & TMIO_STAT_CMDTIMEOUT) 644 - cmd->error = -ETIMEDOUT; 645 - else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 646 - cmd->error = -EILSEQ; 647 - 648 - /* If there is data to handle we enable data IRQs here, and 649 - * we will ultimatley finish the request in the data_end handler. 650 - * If theres no data or we encountered an error, finish now. 651 - */ 652 - if (host->data && !cmd->error) { 653 - if (host->data->flags & MMC_DATA_READ) { 654 - if (host->force_pio || !host->chan_rx) 655 - enable_mmc_irqs(host, TMIO_MASK_READOP); 656 - else 657 - tasklet_schedule(&host->dma_issue); 658 - } else { 659 - if (host->force_pio || !host->chan_tx) 660 - enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 661 - else 662 - tasklet_schedule(&host->dma_issue); 663 - } 664 - } else { 665 - tmio_mmc_finish_request(host); 666 - } 667 - 668 - out: 669 - spin_unlock(&host->lock); 670 - 671 - return; 672 - } 673 - 674 - static irqreturn_t tmio_mmc_irq(int irq, void *devid) 675 - { 676 - struct tmio_mmc_host *host = devid; 677 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 678 - unsigned int ireg, irq_mask, status; 679 - unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 680 - 681 - pr_debug("MMC IRQ begin\n"); 682 - 683 - status = sd_ctrl_read32(host, CTL_STATUS); 684 - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 685 - ireg = status & TMIO_MASK_IRQ & ~irq_mask; 686 - 687 - sdio_ireg = 0; 688 - if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 689 - sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 690 - sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 691 - sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 692 - 693 - sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 694 - 695 - if (sdio_ireg && !host->sdio_irq_enabled) { 696 - pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 697 - sdio_status, sdio_irq_mask, sdio_ireg); 698 - tmio_mmc_enable_sdio_irq(host->mmc, 0); 699 - goto out; 700 - } 701 - 702 - if (host->mmc->caps & MMC_CAP_SDIO_IRQ && 703 - sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 704 - mmc_signal_sdio_irq(host->mmc); 705 - 706 - if (sdio_ireg) 707 - goto out; 708 - } 709 - 710 - pr_debug_status(status); 711 - pr_debug_status(ireg); 712 - 713 - if (!ireg) { 714 - disable_mmc_irqs(host, status & ~irq_mask); 715 - 716 - pr_warning("tmio_mmc: Spurious irq, disabling! " 717 - "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 718 - pr_debug_status(status); 719 - 720 - goto out; 721 - } 722 - 723 - while (ireg) { 724 - /* Card insert / remove attempts */ 725 - if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 726 - ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 727 - TMIO_STAT_CARD_REMOVE); 728 - mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 729 - } 730 - 731 - /* CRC and other errors */ 732 - /* if (ireg & TMIO_STAT_ERR_IRQ) 733 - * handled |= tmio_error_irq(host, irq, stat); 734 - */ 735 - 736 - /* Command completion */ 737 - if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 738 - ack_mmc_irqs(host, 739 - TMIO_STAT_CMDRESPEND | 740 - TMIO_STAT_CMDTIMEOUT); 741 - tmio_mmc_cmd_irq(host, status); 742 - } 743 - 744 - /* Data transfer */ 745 - if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 746 - ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 747 - tmio_mmc_pio_irq(host); 748 - } 749 - 750 - /* Data transfer completion */ 751 - if (ireg & TMIO_STAT_DATAEND) { 752 - ack_mmc_irqs(host, TMIO_STAT_DATAEND); 753 - tmio_mmc_data_irq(host); 754 - } 755 - 756 - /* Check status - keep going until we've handled it all */ 757 - status = sd_ctrl_read32(host, CTL_STATUS); 758 - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 759 - ireg = status & TMIO_MASK_IRQ & ~irq_mask; 760 - 761 - pr_debug("Status at end of loop: %08x\n", status); 762 - pr_debug_status(status); 763 - } 764 - pr_debug("MMC IRQ end\n"); 765 - 766 - out: 767 - return IRQ_HANDLED; 768 - } 769 - 770 - #ifdef CONFIG_TMIO_MMC_DMA 771 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) 772 - { 773 - if (host->sg_ptr == &host->bounce_sg) { 774 - unsigned long flags; 775 - void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 776 - memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 777 - tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 778 - } 779 - } 780 - 781 - static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 782 - { 783 - #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 784 - /* Switch DMA mode on or off - SuperH specific? */ 785 - sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); 786 - #endif 787 - } 788 - 789 - static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 790 - { 791 - struct scatterlist *sg = host->sg_ptr, *sg_tmp; 792 - struct dma_async_tx_descriptor *desc = NULL; 793 - struct dma_chan *chan = host->chan_rx; 794 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 795 - dma_cookie_t cookie; 796 - int ret, i; 797 - bool aligned = true, multiple = true; 798 - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 799 - 800 - for_each_sg(sg, sg_tmp, host->sg_len, i) { 801 - if (sg_tmp->offset & align) 802 - aligned = false; 803 - if (sg_tmp->length & align) { 804 - multiple = false; 805 - break; 806 - } 807 - } 808 - 809 - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 810 - align >= MAX_ALIGN)) || !multiple) { 811 - ret = -EINVAL; 812 - goto pio; 813 - } 814 - 815 - if (sg->length < TMIO_MIN_DMA_LEN) { 816 - host->force_pio = true; 817 - return; 818 - } 819 - 820 - disable_mmc_irqs(host, TMIO_STAT_RXRDY); 821 - 822 - /* The only sg element can be unaligned, use our bounce buffer then */ 823 - if (!aligned) { 824 - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 825 - host->sg_ptr = &host->bounce_sg; 826 - sg = host->sg_ptr; 827 - } 828 - 829 - ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 830 - if (ret > 0) 831 - desc = chan->device->device_prep_slave_sg(chan, sg, ret, 832 - DMA_FROM_DEVICE, DMA_CTRL_ACK); 833 - 834 - if (desc) 835 - cookie = dmaengine_submit(desc); 836 - 837 - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 838 - __func__, host->sg_len, ret, cookie, host->mrq); 839 - 840 - pio: 841 - if (!desc) { 842 - /* DMA failed, fall back to PIO */ 843 - if (ret >= 0) 844 - ret = -EIO; 845 - host->chan_rx = NULL; 846 - dma_release_channel(chan); 847 - /* Free the Tx channel too */ 848 - chan = host->chan_tx; 849 - if (chan) { 850 - host->chan_tx = NULL; 851 - dma_release_channel(chan); 852 - } 853 - dev_warn(&host->pdev->dev, 854 - "DMA failed: %d, falling back to PIO\n", ret); 855 - tmio_mmc_enable_dma(host, false); 856 - } 857 - 858 - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 859 - desc, cookie, host->sg_len); 860 - } 861 - 862 - static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 863 - { 864 - struct scatterlist *sg = host->sg_ptr, *sg_tmp; 865 - struct dma_async_tx_descriptor *desc = NULL; 866 - struct dma_chan *chan = host->chan_tx; 867 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 868 - dma_cookie_t cookie; 869 - int ret, i; 870 - bool aligned = true, multiple = true; 871 - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 872 - 873 - for_each_sg(sg, sg_tmp, host->sg_len, i) { 874 - if (sg_tmp->offset & align) 875 - aligned = false; 876 - if (sg_tmp->length & align) { 877 - multiple = false; 878 - break; 879 - } 880 - } 881 - 882 - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 883 - align >= MAX_ALIGN)) || !multiple) { 884 - ret = -EINVAL; 885 - goto pio; 886 - } 887 - 888 - if (sg->length < TMIO_MIN_DMA_LEN) { 889 - host->force_pio = true; 890 - return; 891 - } 892 - 893 - disable_mmc_irqs(host, TMIO_STAT_TXRQ); 894 - 895 - /* The only sg element can be unaligned, use our bounce buffer then */ 896 - if (!aligned) { 897 - unsigned long flags; 898 - void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 899 - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 900 - memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 901 - tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 902 - host->sg_ptr = &host->bounce_sg; 903 - sg = host->sg_ptr; 904 - } 905 - 906 - ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 907 - if (ret > 0) 908 - desc = chan->device->device_prep_slave_sg(chan, sg, ret, 909 - DMA_TO_DEVICE, DMA_CTRL_ACK); 910 - 911 - if (desc) 912 - cookie = dmaengine_submit(desc); 913 - 914 - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 915 - __func__, host->sg_len, ret, cookie, host->mrq); 916 - 917 - pio: 918 - if (!desc) { 919 - /* DMA failed, fall back to PIO */ 920 - if (ret >= 0) 921 - ret = -EIO; 922 - host->chan_tx = NULL; 923 - dma_release_channel(chan); 924 - /* Free the Rx channel too */ 925 - chan = host->chan_rx; 926 - if (chan) { 927 - host->chan_rx = NULL; 928 - dma_release_channel(chan); 929 - } 930 - dev_warn(&host->pdev->dev, 931 - "DMA failed: %d, falling back to PIO\n", ret); 932 - tmio_mmc_enable_dma(host, false); 933 - } 934 - 935 - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 936 - desc, cookie); 937 - } 938 - 939 - static void tmio_mmc_start_dma(struct tmio_mmc_host *host, 940 - struct mmc_data *data) 941 - { 942 - if (data->flags & MMC_DATA_READ) { 943 - if (host->chan_rx) 944 - tmio_mmc_start_dma_rx(host); 945 - } else { 946 - if (host->chan_tx) 947 - tmio_mmc_start_dma_tx(host); 948 - } 949 - } 950 - 951 - static void tmio_issue_tasklet_fn(unsigned long priv) 952 - { 953 - struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 954 - struct dma_chan *chan = NULL; 955 - 956 - spin_lock_irq(&host->lock); 957 - 958 - if (host && host->data) { 959 - if (host->data->flags & MMC_DATA_READ) 960 - chan = host->chan_rx; 961 - else 962 - chan = host->chan_tx; 963 - } 964 - 965 - spin_unlock_irq(&host->lock); 966 - 967 - enable_mmc_irqs(host, TMIO_STAT_DATAEND); 968 - 969 - if (chan) 970 - dma_async_issue_pending(chan); 971 - } 972 - 973 - static void tmio_tasklet_fn(unsigned long arg) 974 - { 975 - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 976 - 977 - spin_lock_irq(&host->lock); 978 - 979 - if (!host->data) 980 - goto out; 981 - 982 - if (host->data->flags & MMC_DATA_READ) 983 - dma_unmap_sg(host->chan_rx->device->dev, 984 - host->sg_ptr, host->sg_len, 985 - DMA_FROM_DEVICE); 986 - else 987 - dma_unmap_sg(host->chan_tx->device->dev, 988 - host->sg_ptr, host->sg_len, 989 - DMA_TO_DEVICE); 990 - 991 - tmio_mmc_do_data_irq(host); 992 - out: 993 - spin_unlock_irq(&host->lock); 994 - } 995 - 996 - /* It might be necessary to make filter MFD specific */ 997 - static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) 998 - { 999 - dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 1000 - chan->private = arg; 1001 - return true; 1002 - } 1003 - 1004 - static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1005 - struct tmio_mmc_data *pdata) 1006 - { 1007 - /* We can only either use DMA for both Tx and Rx or not use it at all */ 1008 - if (pdata->dma) { 1009 - dma_cap_mask_t mask; 1010 - 1011 - dma_cap_zero(mask); 1012 - dma_cap_set(DMA_SLAVE, mask); 1013 - 1014 - host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, 1015 - pdata->dma->chan_priv_tx); 1016 - dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 1017 - host->chan_tx); 1018 - 1019 - if (!host->chan_tx) 1020 - return; 1021 - 1022 - host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, 1023 - pdata->dma->chan_priv_rx); 1024 - dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 1025 - host->chan_rx); 1026 - 1027 - if (!host->chan_rx) { 1028 - dma_release_channel(host->chan_tx); 1029 - host->chan_tx = NULL; 1030 - return; 1031 - } 1032 - 1033 - tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); 1034 - tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); 1035 - 1036 - tmio_mmc_enable_dma(host, true); 1037 - } 1038 - } 1039 - 1040 - static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 1041 - { 1042 - if (host->chan_tx) { 1043 - struct dma_chan *chan = host->chan_tx; 1044 - host->chan_tx = NULL; 1045 - dma_release_channel(chan); 1046 - } 1047 - if (host->chan_rx) { 1048 - struct dma_chan *chan = host->chan_rx; 1049 - host->chan_rx = NULL; 1050 - dma_release_channel(chan); 1051 - } 1052 - } 1053 - #else 1054 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) 1055 - { 1056 - } 1057 - 1058 - static void tmio_mmc_start_dma(struct tmio_mmc_host *host, 1059 - struct mmc_data *data) 1060 - { 1061 - } 1062 - 1063 - static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1064 - struct tmio_mmc_data *pdata) 1065 - { 1066 - host->chan_tx = NULL; 1067 - host->chan_rx = NULL; 1068 - } 1069 - 1070 - static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 1071 - { 1072 - } 1073 - #endif 1074 - 1075 - static int tmio_mmc_start_data(struct tmio_mmc_host *host, 1076 - struct mmc_data *data) 1077 - { 1078 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1079 - 1080 - pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 1081 - data->blksz, data->blocks); 1082 - 1083 - /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 1084 - if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 1085 - int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 1086 - 1087 - if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 1088 - pr_err("%s: %d byte block unsupported in 4 bit mode\n", 1089 - mmc_hostname(host->mmc), data->blksz); 1090 - return -EINVAL; 1091 - } 1092 - } 1093 - 1094 - tmio_mmc_init_sg(host, data); 1095 - host->data = data; 1096 - 1097 - /* Set transfer length / blocksize */ 1098 - sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 1099 - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 1100 - 1101 - tmio_mmc_start_dma(host, data); 1102 - 1103 - return 0; 1104 - } 1105 - 1106 - /* Process requests from the MMC layer */ 1107 - static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 1108 - { 1109 - struct tmio_mmc_host *host = mmc_priv(mmc); 1110 - int ret; 1111 - 1112 - if (host->mrq) 1113 - pr_debug("request not null\n"); 1114 - 1115 - host->last_req_ts = jiffies; 1116 - wmb(); 1117 - host->mrq = mrq; 1118 - 1119 - if (mrq->data) { 1120 - ret = tmio_mmc_start_data(host, mrq->data); 1121 - if (ret) 1122 - goto fail; 1123 - } 1124 - 1125 - ret = tmio_mmc_start_command(host, mrq->cmd); 1126 - if (!ret) { 1127 - schedule_delayed_work(&host->delayed_reset_work, 1128 - msecs_to_jiffies(2000)); 1129 - return; 1130 - } 1131 - 1132 - fail: 1133 - host->mrq = NULL; 1134 - host->force_pio = false; 1135 - mrq->cmd->error = ret; 1136 - mmc_request_done(mmc, mrq); 1137 - } 1138 - 1139 - /* Set MMC clock / power. 1140 - * Note: This controller uses a simple divider scheme therefore it cannot 1141 - * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 1142 - * MMC wont run that fast, it has to be clocked at 12MHz which is the next 1143 - * slowest setting. 1144 - */ 1145 - static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1146 - { 1147 - struct tmio_mmc_host *host = mmc_priv(mmc); 1148 - 1149 - if (ios->clock) 1150 - tmio_mmc_set_clock(host, ios->clock); 1151 - 1152 - /* Power sequence - OFF -> ON -> UP */ 1153 - switch (ios->power_mode) { 1154 - case MMC_POWER_OFF: /* power down SD bus */ 1155 - if (host->set_pwr) 1156 - host->set_pwr(host->pdev, 0); 1157 - tmio_mmc_clk_stop(host); 1158 - break; 1159 - case MMC_POWER_ON: /* power up SD bus */ 1160 - if (host->set_pwr) 1161 - host->set_pwr(host->pdev, 1); 1162 - break; 1163 - case MMC_POWER_UP: /* start bus clock */ 1164 - tmio_mmc_clk_start(host); 1165 - break; 1166 - } 1167 - 1168 - switch (ios->bus_width) { 1169 - case MMC_BUS_WIDTH_1: 1170 - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 1171 - break; 1172 - case MMC_BUS_WIDTH_4: 1173 - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 1174 - break; 1175 - } 1176 - 1177 - /* Let things settle. delay taken from winCE driver */ 1178 - udelay(140); 1179 - } 1180 - 1181 - static int tmio_mmc_get_ro(struct mmc_host *mmc) 1182 - { 1183 - struct tmio_mmc_host *host = mmc_priv(mmc); 1184 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1185 - 1186 - return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 1187 - (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; 1188 - } 1189 - 1190 - static int tmio_mmc_get_cd(struct mmc_host *mmc) 1191 - { 1192 - struct tmio_mmc_host *host = mmc_priv(mmc); 1193 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1194 - 1195 - if (!pdata->get_cd) 1196 - return -ENOSYS; 1197 - else 1198 - return pdata->get_cd(host->pdev); 1199 - } 1200 - 1201 - static const struct mmc_host_ops tmio_mmc_ops = { 1202 - .request = tmio_mmc_request, 1203 - .set_ios = tmio_mmc_set_ios, 1204 - .get_ro = tmio_mmc_get_ro, 1205 - .get_cd = tmio_mmc_get_cd, 1206 - .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 1207 - }; 24 + #include "tmio_mmc.h" 1208 25 1209 26 #ifdef CONFIG_PM 1210 27 static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) ··· 62 1245 #define tmio_mmc_resume NULL 63 1246 #endif 64 1247 65 - static int __devinit tmio_mmc_probe(struct platform_device *dev) 1248 + static int __devinit tmio_mmc_probe(struct platform_device *pdev) 66 1249 { 67 - const struct mfd_cell *cell = mfd_get_cell(dev); 1250 + const struct mfd_cell *cell = mfd_get_cell(pdev); 68 1251 struct tmio_mmc_data *pdata; 69 - struct resource *res_ctl; 70 1252 struct tmio_mmc_host *host; 71 - struct mmc_host *mmc; 72 1253 int ret = -EINVAL; 73 - u32 irq_mask = TMIO_MASK_CMD; 74 1254 75 - if (dev->num_resources != 2) 1255 + if (pdev->num_resources != 2) 76 1256 goto out; 77 1257 78 - res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 79 - if (!res_ctl) 80 - goto out; 81 - 82 - pdata = mfd_get_data(dev); 1258 + pdata = mfd_get_data(pdev); 83 1259 if (!pdata || !pdata->hclk) 84 1260 goto out; 85 1261 86 - ret = -ENOMEM; 87 - 88 - mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); 89 - if (!mmc) 90 - goto out; 91 - 92 - host = mmc_priv(mmc); 93 - host->mmc = mmc; 94 - host->pdev = dev; 95 - platform_set_drvdata(dev, mmc); 96 - 97 - host->set_pwr = pdata->set_pwr; 98 - host->set_clk_div = pdata->set_clk_div; 99 - 100 - /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 101 - host->bus_shift = resource_size(res_ctl) >> 10; 102 - 103 - host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 104 - if (!host->ctl) 105 - goto host_free; 106 - 107 - mmc->ops = &tmio_mmc_ops; 108 - mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; 109 - mmc->f_max = pdata->hclk; 110 - mmc->f_min = mmc->f_max / 512; 111 - mmc->max_segs = 32; 112 - mmc->max_blk_size = 512; 113 - mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 114 - mmc->max_segs; 115 - mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 116 - mmc->max_seg_size = mmc->max_req_size; 117 - if (pdata->ocr_mask) 118 - mmc->ocr_avail = pdata->ocr_mask; 119 - else 120 - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 121 - 122 1262 /* Tell the MFD core we are ready to be enabled */ 123 1263 if (cell->enable) { 124 - ret = cell->enable(dev); 1264 + ret = cell->enable(pdev); 125 1265 if (ret) 126 - goto unmap_ctl; 1266 + goto out; 127 1267 } 128 1268 129 - tmio_mmc_clk_stop(host); 130 - reset(host); 131 - 132 - ret = platform_get_irq(dev, 0); 133 - if (ret >= 0) 134 - host->irq = ret; 135 - else 136 - goto cell_disable; 137 - 138 - disable_mmc_irqs(host, TMIO_MASK_ALL); 139 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 140 - tmio_mmc_enable_sdio_irq(mmc, 0); 141 - 142 - ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 143 - IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); 1269 + ret = tmio_mmc_host_probe(&host, pdev, pdata); 144 1270 if (ret) 145 1271 goto cell_disable; 146 1272 147 - spin_lock_init(&host->lock); 148 - 149 - /* Init delayed work for request timeouts */ 150 - INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); 151 - 152 - /* See if we also get DMA */ 153 - tmio_mmc_request_dma(host, pdata); 154 - 155 - mmc_add_host(mmc); 156 - 157 1273 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 158 1274 (unsigned long)host->ctl, host->irq); 159 - 160 - /* Unmask the IRQs we want to know about */ 161 - if (!host->chan_rx) 162 - irq_mask |= TMIO_MASK_READOP; 163 - if (!host->chan_tx) 164 - irq_mask |= TMIO_MASK_WRITEOP; 165 - enable_mmc_irqs(host, irq_mask); 166 1275 167 1276 return 0; 168 1277 169 1278 cell_disable: 170 1279 if (cell->disable) 171 - cell->disable(dev); 172 - unmap_ctl: 173 - iounmap(host->ctl); 174 - host_free: 175 - mmc_free_host(mmc); 1280 + cell->disable(pdev); 176 1281 out: 177 1282 return ret; 178 1283 } 179 1284 180 - static int __devexit tmio_mmc_remove(struct platform_device *dev) 1285 + static int __devexit tmio_mmc_remove(struct platform_device *pdev) 181 1286 { 182 - const struct mfd_cell *cell = mfd_get_cell(dev); 183 - struct mmc_host *mmc = platform_get_drvdata(dev); 1287 + const struct mfd_cell *cell = mfd_get_cell(pdev); 1288 + struct mmc_host *mmc = platform_get_drvdata(pdev); 184 1289 185 - platform_set_drvdata(dev, NULL); 1290 + platform_set_drvdata(pdev, NULL); 186 1291 187 1292 if (mmc) { 188 - struct tmio_mmc_host *host = mmc_priv(mmc); 189 - mmc_remove_host(mmc); 190 - cancel_delayed_work_sync(&host->delayed_reset_work); 191 - tmio_mmc_release_dma(host); 192 - free_irq(host->irq, host); 1293 + tmio_mmc_host_remove(mmc_priv(mmc)); 193 1294 if (cell->disable) 194 - cell->disable(dev); 195 - iounmap(host->ctl); 196 - mmc_free_host(mmc); 1295 + cell->disable(pdev); 197 1296 } 198 1297 199 1298 return 0;
+145
drivers/mmc/host/tmio_mmc.h
··· 1 + /* 2 + * linux/drivers/mmc/host/tmio_mmc.h 3 + * 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * Driver for the MMC / SD / SDIO cell found in: 12 + * 13 + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 + */ 15 + 16 + #ifndef TMIO_MMC_H 17 + #define TMIO_MMC_H 18 + 19 + #include <linux/highmem.h> 20 + #include <linux/pagemap.h> 21 + 22 + /* Definitions for values the CTRL_STATUS register can take. */ 23 + #define TMIO_STAT_CMDRESPEND 0x00000001 24 + #define TMIO_STAT_DATAEND 0x00000004 25 + #define TMIO_STAT_CARD_REMOVE 0x00000008 26 + #define TMIO_STAT_CARD_INSERT 0x00000010 27 + #define TMIO_STAT_SIGSTATE 0x00000020 28 + #define TMIO_STAT_WRPROTECT 0x00000080 29 + #define TMIO_STAT_CARD_REMOVE_A 0x00000100 30 + #define TMIO_STAT_CARD_INSERT_A 0x00000200 31 + #define TMIO_STAT_SIGSTATE_A 0x00000400 32 + #define TMIO_STAT_CMD_IDX_ERR 0x00010000 33 + #define TMIO_STAT_CRCFAIL 0x00020000 34 + #define TMIO_STAT_STOPBIT_ERR 0x00040000 35 + #define TMIO_STAT_DATATIMEOUT 0x00080000 36 + #define TMIO_STAT_RXOVERFLOW 0x00100000 37 + #define TMIO_STAT_TXUNDERRUN 0x00200000 38 + #define TMIO_STAT_CMDTIMEOUT 0x00400000 39 + #define TMIO_STAT_RXRDY 0x01000000 40 + #define TMIO_STAT_TXRQ 0x02000000 41 + #define TMIO_STAT_ILL_FUNC 0x20000000 42 + #define TMIO_STAT_CMD_BUSY 0x40000000 43 + #define TMIO_STAT_ILL_ACCESS 0x80000000 44 + 45 + /* Definitions for values the CTRL_SDIO_STATUS register can take. */ 46 + #define TMIO_SDIO_STAT_IOIRQ 0x0001 47 + #define TMIO_SDIO_STAT_EXPUB52 0x4000 48 + #define TMIO_SDIO_STAT_EXWT 0x8000 49 + #define TMIO_SDIO_MASK_ALL 0xc007 50 + 51 + /* Define some IRQ masks */ 52 + /* This is the mask used at reset by the chip */ 53 + #define TMIO_MASK_ALL 0x837f031d 54 + #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) 55 + #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) 56 + #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ 57 + TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 58 + #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 59 + 60 + struct tmio_mmc_data; 61 + 62 + struct tmio_mmc_host { 63 + void __iomem *ctl; 64 + unsigned long bus_shift; 65 + struct mmc_command *cmd; 66 + struct mmc_request *mrq; 67 + struct mmc_data *data; 68 + struct mmc_host *mmc; 69 + int irq; 70 + unsigned int sdio_irq_enabled; 71 + 72 + /* Callbacks for clock / power control */ 73 + void (*set_pwr)(struct platform_device *host, int state); 74 + void (*set_clk_div)(struct platform_device *host, int state); 75 + 76 + /* pio related stuff */ 77 + struct scatterlist *sg_ptr; 78 + struct scatterlist *sg_orig; 79 + unsigned int sg_len; 80 + unsigned int sg_off; 81 + 82 + struct platform_device *pdev; 83 + struct tmio_mmc_data *pdata; 84 + 85 + /* DMA support */ 86 + bool force_pio; 87 + struct dma_chan *chan_rx; 88 + struct dma_chan *chan_tx; 89 + struct tasklet_struct dma_complete; 90 + struct tasklet_struct dma_issue; 91 + struct scatterlist bounce_sg; 92 + u8 *bounce_buf; 93 + 94 + /* Track lost interrupts */ 95 + struct delayed_work delayed_reset_work; 96 + spinlock_t lock; 97 + unsigned long last_req_ts; 98 + }; 99 + 100 + int tmio_mmc_host_probe(struct tmio_mmc_host **host, 101 + struct platform_device *pdev, 102 + struct tmio_mmc_data *pdata); 103 + void tmio_mmc_host_remove(struct tmio_mmc_host *host); 104 + void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); 105 + 106 + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 107 + void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 108 + 109 + static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 110 + unsigned long *flags) 111 + { 112 + local_irq_save(*flags); 113 + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 114 + } 115 + 116 + static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, 117 + unsigned long *flags, void *virt) 118 + { 119 + kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); 120 + local_irq_restore(*flags); 121 + } 122 + 123 + #ifdef CONFIG_TMIO_MMC_DMA 124 + void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); 125 + void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); 126 + void tmio_mmc_release_dma(struct tmio_mmc_host *host); 127 + #else 128 + static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, 129 + struct mmc_data *data) 130 + { 131 + } 132 + 133 + static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, 134 + struct tmio_mmc_data *pdata) 135 + { 136 + host->chan_tx = NULL; 137 + host->chan_rx = NULL; 138 + } 139 + 140 + static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) 141 + { 142 + } 143 + #endif 144 + 145 + #endif
+316
drivers/mmc/host/tmio_mmc_dma.c
··· 1 + /* 2 + * linux/drivers/mmc/tmio_mmc_dma.c 3 + * 4 + * Copyright (C) 2010-2011 Guennadi Liakhovetski 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * DMA function for TMIO MMC implementations 11 + */ 12 + 13 + #include <linux/device.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/mfd/tmio.h> 16 + #include <linux/mmc/host.h> 17 + #include <linux/pagemap.h> 18 + #include <linux/scatterlist.h> 19 + 20 + #include "tmio_mmc.h" 21 + 22 + #define TMIO_MMC_MIN_DMA_LEN 8 23 + 24 + static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 25 + { 26 + #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 27 + /* Switch DMA mode on or off - SuperH specific? */ 28 + writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); 29 + #endif 30 + } 31 + 32 + static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 33 + { 34 + struct scatterlist *sg = host->sg_ptr, *sg_tmp; 35 + struct dma_async_tx_descriptor *desc = NULL; 36 + struct dma_chan *chan = host->chan_rx; 37 + struct tmio_mmc_data *pdata = host->pdata; 38 + dma_cookie_t cookie; 39 + int ret, i; 40 + bool aligned = true, multiple = true; 41 + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 42 + 43 + for_each_sg(sg, sg_tmp, host->sg_len, i) { 44 + if (sg_tmp->offset & align) 45 + aligned = false; 46 + if (sg_tmp->length & align) { 47 + multiple = false; 48 + break; 49 + } 50 + } 51 + 52 + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 53 + (align & PAGE_MASK))) || !multiple) { 54 + ret = -EINVAL; 55 + goto pio; 56 + } 57 + 58 + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 59 + host->force_pio = true; 60 + return; 61 + } 62 + 63 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); 64 + 65 + /* The only sg element can be unaligned, use our bounce buffer then */ 66 + if (!aligned) { 67 + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 68 + host->sg_ptr = &host->bounce_sg; 69 + sg = host->sg_ptr; 70 + } 71 + 72 + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 73 + if (ret > 0) 74 + desc = chan->device->device_prep_slave_sg(chan, sg, ret, 75 + DMA_FROM_DEVICE, DMA_CTRL_ACK); 76 + 77 + if (desc) { 78 + cookie = dmaengine_submit(desc); 79 + if (cookie < 0) { 80 + desc = NULL; 81 + ret = cookie; 82 + } 83 + } 84 + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 85 + __func__, host->sg_len, ret, cookie, host->mrq); 86 + 87 + pio: 88 + if (!desc) { 89 + /* DMA failed, fall back to PIO */ 90 + if (ret >= 0) 91 + ret = -EIO; 92 + host->chan_rx = NULL; 93 + dma_release_channel(chan); 94 + /* Free the Tx channel too */ 95 + chan = host->chan_tx; 96 + if (chan) { 97 + host->chan_tx = NULL; 98 + dma_release_channel(chan); 99 + } 100 + dev_warn(&host->pdev->dev, 101 + "DMA failed: %d, falling back to PIO\n", ret); 102 + tmio_mmc_enable_dma(host, false); 103 + } 104 + 105 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 106 + desc, cookie, host->sg_len); 107 + } 108 + 109 + static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 110 + { 111 + struct scatterlist *sg = host->sg_ptr, *sg_tmp; 112 + struct dma_async_tx_descriptor *desc = NULL; 113 + struct dma_chan *chan = host->chan_tx; 114 + struct tmio_mmc_data *pdata = host->pdata; 115 + dma_cookie_t cookie; 116 + int ret, i; 117 + bool aligned = true, multiple = true; 118 + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 119 + 120 + for_each_sg(sg, sg_tmp, host->sg_len, i) { 121 + if (sg_tmp->offset & align) 122 + aligned = false; 123 + if (sg_tmp->length & align) { 124 + multiple = false; 125 + break; 126 + } 127 + } 128 + 129 + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 130 + (align & PAGE_MASK))) || !multiple) { 131 + ret = -EINVAL; 132 + goto pio; 133 + } 134 + 135 + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 136 + host->force_pio = true; 137 + return; 138 + } 139 + 140 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); 141 + 142 + /* The only sg element can be unaligned, use our bounce buffer then */ 143 + if (!aligned) { 144 + unsigned long flags; 145 + void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 146 + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 147 + memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 148 + tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 149 + host->sg_ptr = &host->bounce_sg; 150 + sg = host->sg_ptr; 151 + } 152 + 153 + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 154 + if (ret > 0) 155 + desc = chan->device->device_prep_slave_sg(chan, sg, ret, 156 + DMA_TO_DEVICE, DMA_CTRL_ACK); 157 + 158 + if (desc) { 159 + cookie = dmaengine_submit(desc); 160 + if (cookie < 0) { 161 + desc = NULL; 162 + ret = cookie; 163 + } 164 + } 165 + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 166 + __func__, host->sg_len, ret, cookie, host->mrq); 167 + 168 + pio: 169 + if (!desc) { 170 + /* DMA failed, fall back to PIO */ 171 + if (ret >= 0) 172 + ret = -EIO; 173 + host->chan_tx = NULL; 174 + dma_release_channel(chan); 175 + /* Free the Rx channel too */ 176 + chan = host->chan_rx; 177 + if (chan) { 178 + host->chan_rx = NULL; 179 + dma_release_channel(chan); 180 + } 181 + dev_warn(&host->pdev->dev, 182 + "DMA failed: %d, falling back to PIO\n", ret); 183 + tmio_mmc_enable_dma(host, false); 184 + } 185 + 186 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 187 + desc, cookie); 188 + } 189 + 190 + void tmio_mmc_start_dma(struct tmio_mmc_host *host, 191 + struct mmc_data *data) 192 + { 193 + if (data->flags & MMC_DATA_READ) { 194 + if (host->chan_rx) 195 + tmio_mmc_start_dma_rx(host); 196 + } else { 197 + if (host->chan_tx) 198 + tmio_mmc_start_dma_tx(host); 199 + } 200 + } 201 + 202 + static void tmio_mmc_issue_tasklet_fn(unsigned long priv) 203 + { 204 + struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 205 + struct dma_chan *chan = NULL; 206 + 207 + spin_lock_irq(&host->lock); 208 + 209 + if (host && host->data) { 210 + if (host->data->flags & MMC_DATA_READ) 211 + chan = host->chan_rx; 212 + else 213 + chan = host->chan_tx; 214 + } 215 + 216 + spin_unlock_irq(&host->lock); 217 + 218 + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 219 + 220 + if (chan) 221 + dma_async_issue_pending(chan); 222 + } 223 + 224 + static void tmio_mmc_tasklet_fn(unsigned long arg) 225 + { 226 + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 227 + 228 + spin_lock_irq(&host->lock); 229 + 230 + if (!host->data) 231 + goto out; 232 + 233 + if (host->data->flags & MMC_DATA_READ) 234 + dma_unmap_sg(host->chan_rx->device->dev, 235 + host->sg_ptr, host->sg_len, 236 + DMA_FROM_DEVICE); 237 + else 238 + dma_unmap_sg(host->chan_tx->device->dev, 239 + host->sg_ptr, host->sg_len, 240 + DMA_TO_DEVICE); 241 + 242 + tmio_mmc_do_data_irq(host); 243 + out: 244 + spin_unlock_irq(&host->lock); 245 + } 246 + 247 + /* It might be necessary to make filter MFD specific */ 248 + static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) 249 + { 250 + dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 251 + chan->private = arg; 252 + return true; 253 + } 254 + 255 + void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 256 + { 257 + /* We can only either use DMA for both Tx and Rx or not use it at all */ 258 + if (pdata->dma) { 259 + dma_cap_mask_t mask; 260 + 261 + dma_cap_zero(mask); 262 + dma_cap_set(DMA_SLAVE, mask); 263 + 264 + host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, 265 + pdata->dma->chan_priv_tx); 266 + dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 267 + host->chan_tx); 268 + 269 + if (!host->chan_tx) 270 + return; 271 + 272 + host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, 273 + pdata->dma->chan_priv_rx); 274 + dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 275 + host->chan_rx); 276 + 277 + if (!host->chan_rx) 278 + goto ereqrx; 279 + 280 + host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 281 + if (!host->bounce_buf) 282 + goto ebouncebuf; 283 + 284 + tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 285 + tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 286 + 287 + tmio_mmc_enable_dma(host, true); 288 + 289 + return; 290 + ebouncebuf: 291 + dma_release_channel(host->chan_rx); 292 + host->chan_rx = NULL; 293 + ereqrx: 294 + dma_release_channel(host->chan_tx); 295 + host->chan_tx = NULL; 296 + return; 297 + } 298 + } 299 + 300 + void tmio_mmc_release_dma(struct tmio_mmc_host *host) 301 + { 302 + if (host->chan_tx) { 303 + struct dma_chan *chan = host->chan_tx; 304 + host->chan_tx = NULL; 305 + dma_release_channel(chan); 306 + } 307 + if (host->chan_rx) { 308 + struct dma_chan *chan = host->chan_rx; 309 + host->chan_rx = NULL; 310 + dma_release_channel(chan); 311 + } 312 + if (host->bounce_buf) { 313 + free_pages((unsigned long)host->bounce_buf, 0); 314 + host->bounce_buf = NULL; 315 + } 316 + }
+923
drivers/mmc/host/tmio_mmc_pio.c
··· 1 + /* 2 + * linux/drivers/mmc/host/tmio_mmc_pio.c 3 + * 4 + * Copyright (C) 2011 Guennadi Liakhovetski 5 + * Copyright (C) 2007 Ian Molton 6 + * Copyright (C) 2004 Ian Molton 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * Driver for the MMC / SD / SDIO IP found in: 13 + * 14 + * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs 15 + * 16 + * This driver draws mainly on scattered spec sheets, Reverse engineering 17 + * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 18 + * support). (Further 4 bit support from a later datasheet). 19 + * 20 + * TODO: 21 + * Investigate using a workqueue for PIO transfers 22 + * Eliminate FIXMEs 23 + * SDIO support 24 + * Better Power management 25 + * Handle MMC errors better 26 + * double buffer support 27 + * 28 + */ 29 + 30 + #include <linux/delay.h> 31 + #include <linux/device.h> 32 + #include <linux/highmem.h> 33 + #include <linux/interrupt.h> 34 + #include <linux/io.h> 35 + #include <linux/irq.h> 36 + #include <linux/mfd/tmio.h> 37 + #include <linux/mmc/host.h> 38 + #include <linux/module.h> 39 + #include <linux/pagemap.h> 40 + #include <linux/platform_device.h> 41 + #include <linux/scatterlist.h> 42 + #include <linux/workqueue.h> 43 + #include <linux/spinlock.h> 44 + 45 + #include "tmio_mmc.h" 46 + 47 + #define CTL_SD_CMD 0x00 48 + #define CTL_ARG_REG 0x04 49 + #define CTL_STOP_INTERNAL_ACTION 0x08 50 + #define CTL_XFER_BLK_COUNT 0xa 51 + #define CTL_RESPONSE 0x0c 52 + #define CTL_STATUS 0x1c 53 + #define CTL_IRQ_MASK 0x20 54 + #define CTL_SD_CARD_CLK_CTL 0x24 55 + #define CTL_SD_XFER_LEN 0x26 56 + #define CTL_SD_MEM_CARD_OPT 0x28 57 + #define CTL_SD_ERROR_DETAIL_STATUS 0x2c 58 + #define CTL_SD_DATA_PORT 0x30 59 + #define CTL_TRANSACTION_CTL 0x34 60 + #define CTL_SDIO_STATUS 0x36 61 + #define CTL_SDIO_IRQ_MASK 0x38 62 + #define CTL_RESET_SD 0xe0 63 + #define CTL_SDIO_REGS 0x100 64 + #define CTL_CLK_AND_WAIT_CTL 0x138 65 + #define CTL_RESET_SDIO 0x1e0 66 + 67 + static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) 68 + { 69 + return readw(host->ctl + (addr << host->bus_shift)); 70 + } 71 + 72 + static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, 73 + u16 *buf, int count) 74 + { 75 + readsw(host->ctl + (addr << host->bus_shift), buf, count); 76 + } 77 + 78 + static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) 79 + { 80 + return readw(host->ctl + (addr << host->bus_shift)) | 81 + readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; 82 + } 83 + 84 + static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) 85 + { 86 + writew(val, host->ctl + (addr << host->bus_shift)); 87 + } 88 + 89 + static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, 90 + u16 *buf, int count) 91 + { 92 + writesw(host->ctl + (addr << host->bus_shift), buf, count); 93 + } 94 + 95 + static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) 96 + { 97 + writew(val, host->ctl + (addr << host->bus_shift)); 98 + writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 99 + } 100 + 101 + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 102 + { 103 + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); 104 + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 105 + } 106 + 107 + void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 108 + { 109 + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); 110 + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 111 + } 112 + 113 + static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) 114 + { 115 + sd_ctrl_write32(host, CTL_STATUS, ~i); 116 + } 117 + 118 + static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 119 + { 120 + host->sg_len = data->sg_len; 121 + host->sg_ptr = data->sg; 122 + host->sg_orig = data->sg; 123 + host->sg_off = 0; 124 + } 125 + 126 + static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 127 + { 128 + host->sg_ptr = sg_next(host->sg_ptr); 129 + host->sg_off = 0; 130 + return --host->sg_len; 131 + } 132 + 133 + #ifdef CONFIG_MMC_DEBUG 134 + 135 + #define STATUS_TO_TEXT(a, status, i) \ 136 + do { \ 137 + if (status & TMIO_STAT_##a) { \ 138 + if (i++) \ 139 + printk(" | "); \ 140 + printk(#a); \ 141 + } \ 142 + } while (0) 143 + 144 + static void pr_debug_status(u32 status) 145 + { 146 + int i = 0; 147 + printk(KERN_DEBUG "status: %08x = ", status); 148 + STATUS_TO_TEXT(CARD_REMOVE, status, i); 149 + STATUS_TO_TEXT(CARD_INSERT, status, i); 150 + STATUS_TO_TEXT(SIGSTATE, status, i); 151 + STATUS_TO_TEXT(WRPROTECT, status, i); 152 + STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 153 + STATUS_TO_TEXT(CARD_INSERT_A, status, i); 154 + STATUS_TO_TEXT(SIGSTATE_A, status, i); 155 + STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 156 + STATUS_TO_TEXT(STOPBIT_ERR, status, i); 157 + STATUS_TO_TEXT(ILL_FUNC, status, i); 158 + STATUS_TO_TEXT(CMD_BUSY, status, i); 159 + STATUS_TO_TEXT(CMDRESPEND, status, i); 160 + STATUS_TO_TEXT(DATAEND, status, i); 161 + STATUS_TO_TEXT(CRCFAIL, status, i); 162 + STATUS_TO_TEXT(DATATIMEOUT, status, i); 163 + STATUS_TO_TEXT(CMDTIMEOUT, status, i); 164 + STATUS_TO_TEXT(RXOVERFLOW, status, i); 165 + STATUS_TO_TEXT(TXUNDERRUN, status, i); 166 + STATUS_TO_TEXT(RXRDY, status, i); 167 + STATUS_TO_TEXT(TXRQ, status, i); 168 + STATUS_TO_TEXT(ILL_ACCESS, status, i); 169 + printk("\n"); 170 + } 171 + 172 + #else 173 + #define pr_debug_status(s) do { } while (0) 174 + #endif 175 + 176 + static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 177 + { 178 + struct tmio_mmc_host *host = mmc_priv(mmc); 179 + 180 + if (enable) { 181 + host->sdio_irq_enabled = 1; 182 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 183 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 184 + (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 185 + } else { 186 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 187 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 188 + host->sdio_irq_enabled = 0; 189 + } 190 + } 191 + 192 + static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 193 + { 194 + u32 clk = 0, clock; 195 + 196 + if (new_clock) { 197 + for (clock = host->mmc->f_min, clk = 0x80000080; 198 + new_clock >= (clock<<1); clk >>= 1) 199 + clock <<= 1; 200 + clk |= 0x100; 201 + } 202 + 203 + if (host->set_clk_div) 204 + host->set_clk_div(host->pdev, (clk>>22) & 1); 205 + 206 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 207 + } 208 + 209 + static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 210 + { 211 + struct tmio_mmc_data *pdata = host->pdata; 212 + 213 + /* 214 + * Testing on sh-mobile showed that SDIO IRQs are unmasked when 215 + * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the 216 + * device IRQ here and restore the SDIO IRQ mask before 217 + * re-enabling the device IRQ. 218 + */ 219 + if (pdata->flags & TMIO_MMC_SDIO_IRQ) 220 + disable_irq(host->irq); 221 + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 222 + msleep(10); 223 + if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 224 + tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 225 + enable_irq(host->irq); 226 + } 227 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 228 + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 229 + msleep(10); 230 + } 231 + 232 + static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 233 + { 234 + struct tmio_mmc_data *pdata = host->pdata; 235 + 236 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 237 + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 238 + msleep(10); 239 + /* see comment in tmio_mmc_clk_stop above */ 240 + if (pdata->flags & TMIO_MMC_SDIO_IRQ) 241 + disable_irq(host->irq); 242 + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 243 + msleep(10); 244 + if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 245 + tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 246 + enable_irq(host->irq); 247 + } 248 + } 249 + 250 + static void tmio_mmc_reset(struct tmio_mmc_host *host) 251 + { 252 + /* FIXME - should we set stop clock reg here */ 253 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 254 + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 255 + msleep(10); 256 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 257 + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 258 + msleep(10); 259 + } 260 + 261 + static void tmio_mmc_reset_work(struct work_struct *work) 262 + { 263 + struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 264 + delayed_reset_work.work); 265 + struct mmc_request *mrq; 266 + unsigned long flags; 267 + 268 + spin_lock_irqsave(&host->lock, flags); 269 + mrq = host->mrq; 270 + 271 + /* request already finished */ 272 + if (!mrq 273 + || time_is_after_jiffies(host->last_req_ts + 274 + msecs_to_jiffies(2000))) { 275 + spin_unlock_irqrestore(&host->lock, flags); 276 + return; 277 + } 278 + 279 + dev_warn(&host->pdev->dev, 280 + "timeout waiting for hardware interrupt (CMD%u)\n", 281 + mrq->cmd->opcode); 282 + 283 + if (host->data) 284 + host->data->error = -ETIMEDOUT; 285 + else if (host->cmd) 286 + host->cmd->error = -ETIMEDOUT; 287 + else 288 + mrq->cmd->error = -ETIMEDOUT; 289 + 290 + host->cmd = NULL; 291 + host->data = NULL; 292 + host->mrq = NULL; 293 + host->force_pio = false; 294 + 295 + spin_unlock_irqrestore(&host->lock, flags); 296 + 297 + tmio_mmc_reset(host); 298 + 299 + mmc_request_done(host->mmc, mrq); 300 + } 301 + 302 + static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 303 + { 304 + struct mmc_request *mrq = host->mrq; 305 + 306 + if (!mrq) 307 + return; 308 + 309 + host->mrq = NULL; 310 + host->cmd = NULL; 311 + host->data = NULL; 312 + host->force_pio = false; 313 + 314 + cancel_delayed_work(&host->delayed_reset_work); 315 + 316 + mmc_request_done(host->mmc, mrq); 317 + } 318 + 319 + /* These are the bitmasks the tmio chip requires to implement the MMC response 320 + * types. Note that R1 and R6 are the same in this scheme. */ 321 + #define APP_CMD 0x0040 322 + #define RESP_NONE 0x0300 323 + #define RESP_R1 0x0400 324 + #define RESP_R1B 0x0500 325 + #define RESP_R2 0x0600 326 + #define RESP_R3 0x0700 327 + #define DATA_PRESENT 0x0800 328 + #define TRANSFER_READ 0x1000 329 + #define TRANSFER_MULTI 0x2000 330 + #define SECURITY_CMD 0x4000 331 + 332 + static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 333 + { 334 + struct mmc_data *data = host->data; 335 + int c = cmd->opcode; 336 + 337 + /* Command 12 is handled by hardware */ 338 + if (cmd->opcode == 12 && !cmd->arg) { 339 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 340 + return 0; 341 + } 342 + 343 + switch (mmc_resp_type(cmd)) { 344 + case MMC_RSP_NONE: c |= RESP_NONE; break; 345 + case MMC_RSP_R1: c |= RESP_R1; break; 346 + case MMC_RSP_R1B: c |= RESP_R1B; break; 347 + case MMC_RSP_R2: c |= RESP_R2; break; 348 + case MMC_RSP_R3: c |= RESP_R3; break; 349 + default: 350 + pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 351 + return -EINVAL; 352 + } 353 + 354 + host->cmd = cmd; 355 + 356 + /* FIXME - this seems to be ok commented out but the spec suggest this bit 357 + * should be set when issuing app commands. 358 + * if(cmd->flags & MMC_FLAG_ACMD) 359 + * c |= APP_CMD; 360 + */ 361 + if (data) { 362 + c |= DATA_PRESENT; 363 + if (data->blocks > 1) { 364 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 365 + c |= TRANSFER_MULTI; 366 + } 367 + if (data->flags & MMC_DATA_READ) 368 + c |= TRANSFER_READ; 369 + } 370 + 371 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); 372 + 373 + /* Fire off the command */ 374 + sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 375 + sd_ctrl_write16(host, CTL_SD_CMD, c); 376 + 377 + return 0; 378 + } 379 + 380 + /* 381 + * This chip always returns (at least?) as much data as you ask for. 382 + * I'm unsure what happens if you ask for less than a block. This should be 383 + * looked into to ensure that a funny length read doesnt hose the controller. 384 + */ 385 + static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 386 + { 387 + struct mmc_data *data = host->data; 388 + void *sg_virt; 389 + unsigned short *buf; 390 + unsigned int count; 391 + unsigned long flags; 392 + 393 + if ((host->chan_tx || host->chan_rx) && !host->force_pio) { 394 + pr_err("PIO IRQ in DMA mode!\n"); 395 + return; 396 + } else if (!data) { 397 + pr_debug("Spurious PIO IRQ\n"); 398 + return; 399 + } 400 + 401 + sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 402 + buf = (unsigned short *)(sg_virt + host->sg_off); 403 + 404 + count = host->sg_ptr->length - host->sg_off; 405 + if (count > data->blksz) 406 + count = data->blksz; 407 + 408 + pr_debug("count: %08x offset: %08x flags %08x\n", 409 + count, host->sg_off, data->flags); 410 + 411 + /* Transfer the data */ 412 + if (data->flags & MMC_DATA_READ) 413 + sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 414 + else 415 + sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 416 + 417 + host->sg_off += count; 418 + 419 + tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 420 + 421 + if (host->sg_off == host->sg_ptr->length) 422 + tmio_mmc_next_sg(host); 423 + 424 + return; 425 + } 426 + 427 + static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) 428 + { 429 + if (host->sg_ptr == &host->bounce_sg) { 430 + unsigned long flags; 431 + void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 432 + memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 433 + tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 434 + } 435 + } 436 + 437 + /* needs to be called with host->lock held */ 438 + void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 439 + { 440 + struct mmc_data *data = host->data; 441 + struct mmc_command *stop; 442 + 443 + host->data = NULL; 444 + 445 + if (!data) { 446 + dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 447 + return; 448 + } 449 + stop = data->stop; 450 + 451 + /* FIXME - return correct transfer count on errors */ 452 + if (!data->error) 453 + data->bytes_xfered = data->blocks * data->blksz; 454 + else 455 + data->bytes_xfered = 0; 456 + 457 + pr_debug("Completed data request\n"); 458 + 459 + /* 460 + * FIXME: other drivers allow an optional stop command of any given type 461 + * which we dont do, as the chip can auto generate them. 462 + * Perhaps we can be smarter about when to use auto CMD12 and 463 + * only issue the auto request when we know this is the desired 464 + * stop command, allowing fallback to the stop command the 465 + * upper layers expect. For now, we do what works. 466 + */ 467 + 468 + if (data->flags & MMC_DATA_READ) { 469 + if (host->chan_rx && !host->force_pio) 470 + tmio_mmc_check_bounce_buffer(host); 471 + dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 472 + host->mrq); 473 + } else { 474 + dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 475 + host->mrq); 476 + } 477 + 478 + if (stop) { 479 + if (stop->opcode == 12 && !stop->arg) 480 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 481 + else 482 + BUG(); 483 + } 484 + 485 + tmio_mmc_finish_request(host); 486 + } 487 + 488 + static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 489 + { 490 + struct mmc_data *data; 491 + spin_lock(&host->lock); 492 + data = host->data; 493 + 494 + if (!data) 495 + goto out; 496 + 497 + if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { 498 + /* 499 + * Has all data been written out yet? Testing on SuperH showed, 500 + * that in most cases the first interrupt comes already with the 501 + * BUSY status bit clear, but on some operations, like mount or 502 + * in the beginning of a write / sync / umount, there is one 503 + * DATAEND interrupt with the BUSY bit set, in this cases 504 + * waiting for one more interrupt fixes the problem. 505 + */ 506 + if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 507 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 508 + tasklet_schedule(&host->dma_complete); 509 + } 510 + } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 511 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 512 + tasklet_schedule(&host->dma_complete); 513 + } else { 514 + tmio_mmc_do_data_irq(host); 515 + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 516 + } 517 + out: 518 + spin_unlock(&host->lock); 519 + } 520 + 521 + static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 522 + unsigned int stat) 523 + { 524 + struct mmc_command *cmd = host->cmd; 525 + int i, addr; 526 + 527 + spin_lock(&host->lock); 528 + 529 + if (!host->cmd) { 530 + pr_debug("Spurious CMD irq\n"); 531 + goto out; 532 + } 533 + 534 + host->cmd = NULL; 535 + 536 + /* This controller is sicker than the PXA one. Not only do we need to 537 + * drop the top 8 bits of the first response word, we also need to 538 + * modify the order of the response for short response command types. 539 + */ 540 + 541 + for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 542 + cmd->resp[i] = sd_ctrl_read32(host, addr); 543 + 544 + if (cmd->flags & MMC_RSP_136) { 545 + cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 546 + cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 547 + cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 548 + cmd->resp[3] <<= 8; 549 + } else if (cmd->flags & MMC_RSP_R3) { 550 + cmd->resp[0] = cmd->resp[3]; 551 + } 552 + 553 + if (stat & TMIO_STAT_CMDTIMEOUT) 554 + cmd->error = -ETIMEDOUT; 555 + else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 556 + cmd->error = -EILSEQ; 557 + 558 + /* If there is data to handle we enable data IRQs here, and 559 + * we will ultimatley finish the request in the data_end handler. 560 + * If theres no data or we encountered an error, finish now. 561 + */ 562 + if (host->data && !cmd->error) { 563 + if (host->data->flags & MMC_DATA_READ) { 564 + if (host->force_pio || !host->chan_rx) 565 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); 566 + else 567 + tasklet_schedule(&host->dma_issue); 568 + } else { 569 + if (host->force_pio || !host->chan_tx) 570 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 571 + else 572 + tasklet_schedule(&host->dma_issue); 573 + } 574 + } else { 575 + tmio_mmc_finish_request(host); 576 + } 577 + 578 + out: 579 + spin_unlock(&host->lock); 580 + } 581 + 582 + static irqreturn_t tmio_mmc_irq(int irq, void *devid) 583 + { 584 + struct tmio_mmc_host *host = devid; 585 + struct tmio_mmc_data *pdata = host->pdata; 586 + unsigned int ireg, irq_mask, status; 587 + unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 588 + 589 + pr_debug("MMC IRQ begin\n"); 590 + 591 + status = sd_ctrl_read32(host, CTL_STATUS); 592 + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 593 + ireg = status & TMIO_MASK_IRQ & ~irq_mask; 594 + 595 + sdio_ireg = 0; 596 + if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 597 + sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 598 + sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 599 + sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 600 + 601 + sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 602 + 603 + if (sdio_ireg && !host->sdio_irq_enabled) { 604 + pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 605 + sdio_status, sdio_irq_mask, sdio_ireg); 606 + tmio_mmc_enable_sdio_irq(host->mmc, 0); 607 + goto out; 608 + } 609 + 610 + if (host->mmc->caps & MMC_CAP_SDIO_IRQ && 611 + sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 612 + mmc_signal_sdio_irq(host->mmc); 613 + 614 + if (sdio_ireg) 615 + goto out; 616 + } 617 + 618 + pr_debug_status(status); 619 + pr_debug_status(ireg); 620 + 621 + if (!ireg) { 622 + tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); 623 + 624 + pr_warning("tmio_mmc: Spurious irq, disabling! " 625 + "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 626 + pr_debug_status(status); 627 + 628 + goto out; 629 + } 630 + 631 + while (ireg) { 632 + /* Card insert / remove attempts */ 633 + if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 634 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 635 + TMIO_STAT_CARD_REMOVE); 636 + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 637 + } 638 + 639 + /* CRC and other errors */ 640 + /* if (ireg & TMIO_STAT_ERR_IRQ) 641 + * handled |= tmio_error_irq(host, irq, stat); 642 + */ 643 + 644 + /* Command completion */ 645 + if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 646 + tmio_mmc_ack_mmc_irqs(host, 647 + TMIO_STAT_CMDRESPEND | 648 + TMIO_STAT_CMDTIMEOUT); 649 + tmio_mmc_cmd_irq(host, status); 650 + } 651 + 652 + /* Data transfer */ 653 + if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 654 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 655 + tmio_mmc_pio_irq(host); 656 + } 657 + 658 + /* Data transfer completion */ 659 + if (ireg & TMIO_STAT_DATAEND) { 660 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); 661 + tmio_mmc_data_irq(host); 662 + } 663 + 664 + /* Check status - keep going until we've handled it all */ 665 + status = sd_ctrl_read32(host, CTL_STATUS); 666 + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 667 + ireg = status & TMIO_MASK_IRQ & ~irq_mask; 668 + 669 + pr_debug("Status at end of loop: %08x\n", status); 670 + pr_debug_status(status); 671 + } 672 + pr_debug("MMC IRQ end\n"); 673 + 674 + out: 675 + return IRQ_HANDLED; 676 + } 677 + 678 + static int tmio_mmc_start_data(struct tmio_mmc_host *host, 679 + struct mmc_data *data) 680 + { 681 + struct tmio_mmc_data *pdata = host->pdata; 682 + 683 + pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 684 + data->blksz, data->blocks); 685 + 686 + /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 687 + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 688 + int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 689 + 690 + if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 691 + pr_err("%s: %d byte block unsupported in 4 bit mode\n", 692 + mmc_hostname(host->mmc), data->blksz); 693 + return -EINVAL; 694 + } 695 + } 696 + 697 + tmio_mmc_init_sg(host, data); 698 + host->data = data; 699 + 700 + /* Set transfer length / blocksize */ 701 + sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 702 + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 703 + 704 + tmio_mmc_start_dma(host, data); 705 + 706 + return 0; 707 + } 708 + 709 + /* Process requests from the MMC layer */ 710 + static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 711 + { 712 + struct tmio_mmc_host *host = mmc_priv(mmc); 713 + int ret; 714 + 715 + if (host->mrq) 716 + pr_debug("request not null\n"); 717 + 718 + host->last_req_ts = jiffies; 719 + wmb(); 720 + host->mrq = mrq; 721 + 722 + if (mrq->data) { 723 + ret = tmio_mmc_start_data(host, mrq->data); 724 + if (ret) 725 + goto fail; 726 + } 727 + 728 + ret = tmio_mmc_start_command(host, mrq->cmd); 729 + if (!ret) { 730 + schedule_delayed_work(&host->delayed_reset_work, 731 + msecs_to_jiffies(2000)); 732 + return; 733 + } 734 + 735 + fail: 736 + host->mrq = NULL; 737 + host->force_pio = false; 738 + mrq->cmd->error = ret; 739 + mmc_request_done(mmc, mrq); 740 + } 741 + 742 + /* Set MMC clock / power. 743 + * Note: This controller uses a simple divider scheme therefore it cannot 744 + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 745 + * MMC wont run that fast, it has to be clocked at 12MHz which is the next 746 + * slowest setting. 747 + */ 748 + static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 749 + { 750 + struct tmio_mmc_host *host = mmc_priv(mmc); 751 + 752 + if (ios->clock) 753 + tmio_mmc_set_clock(host, ios->clock); 754 + 755 + /* Power sequence - OFF -> ON -> UP */ 756 + switch (ios->power_mode) { 757 + case MMC_POWER_OFF: /* power down SD bus */ 758 + if (host->set_pwr) 759 + host->set_pwr(host->pdev, 0); 760 + tmio_mmc_clk_stop(host); 761 + break; 762 + case MMC_POWER_ON: /* power up SD bus */ 763 + if (host->set_pwr) 764 + host->set_pwr(host->pdev, 1); 765 + break; 766 + case MMC_POWER_UP: /* start bus clock */ 767 + tmio_mmc_clk_start(host); 768 + break; 769 + } 770 + 771 + switch (ios->bus_width) { 772 + case MMC_BUS_WIDTH_1: 773 + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 774 + break; 775 + case MMC_BUS_WIDTH_4: 776 + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 777 + break; 778 + } 779 + 780 + /* Let things settle. delay taken from winCE driver */ 781 + udelay(140); 782 + } 783 + 784 + static int tmio_mmc_get_ro(struct mmc_host *mmc) 785 + { 786 + struct tmio_mmc_host *host = mmc_priv(mmc); 787 + struct tmio_mmc_data *pdata = host->pdata; 788 + 789 + return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 790 + !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 791 + } 792 + 793 + static int tmio_mmc_get_cd(struct mmc_host *mmc) 794 + { 795 + struct tmio_mmc_host *host = mmc_priv(mmc); 796 + struct tmio_mmc_data *pdata = host->pdata; 797 + 798 + if (!pdata->get_cd) 799 + return -ENOSYS; 800 + else 801 + return pdata->get_cd(host->pdev); 802 + } 803 + 804 + static const struct mmc_host_ops tmio_mmc_ops = { 805 + .request = tmio_mmc_request, 806 + .set_ios = tmio_mmc_set_ios, 807 + .get_ro = tmio_mmc_get_ro, 808 + .get_cd = tmio_mmc_get_cd, 809 + .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 810 + }; 811 + 812 + int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, 813 + struct platform_device *pdev, 814 + struct tmio_mmc_data *pdata) 815 + { 816 + struct tmio_mmc_host *_host; 817 + struct mmc_host *mmc; 818 + struct resource *res_ctl; 819 + int ret; 820 + u32 irq_mask = TMIO_MASK_CMD; 821 + 822 + res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); 823 + if (!res_ctl) 824 + return -EINVAL; 825 + 826 + mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); 827 + if (!mmc) 828 + return -ENOMEM; 829 + 830 + _host = mmc_priv(mmc); 831 + _host->pdata = pdata; 832 + _host->mmc = mmc; 833 + _host->pdev = pdev; 834 + platform_set_drvdata(pdev, mmc); 835 + 836 + _host->set_pwr = pdata->set_pwr; 837 + _host->set_clk_div = pdata->set_clk_div; 838 + 839 + /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 840 + _host->bus_shift = resource_size(res_ctl) >> 10; 841 + 842 + _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 843 + if (!_host->ctl) { 844 + ret = -ENOMEM; 845 + goto host_free; 846 + } 847 + 848 + mmc->ops = &tmio_mmc_ops; 849 + mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; 850 + mmc->f_max = pdata->hclk; 851 + mmc->f_min = mmc->f_max / 512; 852 + mmc->max_segs = 32; 853 + mmc->max_blk_size = 512; 854 + mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 855 + mmc->max_segs; 856 + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 857 + mmc->max_seg_size = mmc->max_req_size; 858 + if (pdata->ocr_mask) 859 + mmc->ocr_avail = pdata->ocr_mask; 860 + else 861 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 862 + 863 + tmio_mmc_clk_stop(_host); 864 + tmio_mmc_reset(_host); 865 + 866 + ret = platform_get_irq(pdev, 0); 867 + if (ret < 0) 868 + goto unmap_ctl; 869 + 870 + _host->irq = ret; 871 + 872 + tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 873 + if (pdata->flags & TMIO_MMC_SDIO_IRQ) 874 + tmio_mmc_enable_sdio_irq(mmc, 0); 875 + 876 + ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | 877 + IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); 878 + if (ret) 879 + goto unmap_ctl; 880 + 881 + spin_lock_init(&_host->lock); 882 + 883 + /* Init delayed work for request timeouts */ 884 + INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 885 + 886 + /* See if we also get DMA */ 887 + tmio_mmc_request_dma(_host, pdata); 888 + 889 + mmc_add_host(mmc); 890 + 891 + /* Unmask the IRQs we want to know about */ 892 + if (!_host->chan_rx) 893 + irq_mask |= TMIO_MASK_READOP; 894 + if (!_host->chan_tx) 895 + irq_mask |= TMIO_MASK_WRITEOP; 896 + 897 + tmio_mmc_enable_mmc_irqs(_host, irq_mask); 898 + 899 + *host = _host; 900 + 901 + return 0; 902 + 903 + unmap_ctl: 904 + iounmap(_host->ctl); 905 + host_free: 906 + mmc_free_host(mmc); 907 + 908 + return ret; 909 + } 910 + EXPORT_SYMBOL(tmio_mmc_host_probe); 911 + 912 + void tmio_mmc_host_remove(struct tmio_mmc_host *host) 913 + { 914 + mmc_remove_host(host->mmc); 915 + cancel_delayed_work_sync(&host->delayed_reset_work); 916 + tmio_mmc_release_dma(host); 917 + free_irq(host->irq, host); 918 + iounmap(host->ctl); 919 + mmc_free_host(host->mmc); 920 + } 921 + EXPORT_SYMBOL(tmio_mmc_host_remove); 922 + 923 + MODULE_LICENSE("GPL v2");