[MMC] Add OMAP MMC host driver

Adds OMAP MMC driver.

Signed-off-by: Juha Yrj�l� <juha.yrjola@nokia.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Carlos Aguiar <carlos.aguiar@indt.org.br>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by Carlos Aguiar and committed by Russell King 730c9b7e 224b148e

+1298
+11
drivers/mmc/Kconfig
··· 60 60 61 61 If unsure, say N. 62 62 63 + config MMC_OMAP 64 + tristate "TI OMAP Multimedia Card Interface support" 65 + depends on ARCH_OMAP && MMC 66 + select TPS65010 if MACH_OMAP_H2 67 + help 68 + This selects the TI OMAP Multimedia card Interface. 69 + If you have an OMAP board with a Multimedia Card slot, 70 + say Y or M here. 71 + 72 + If unsure, say N. 73 + 63 74 config MMC_WBSD 64 75 tristate "Winbond W83L51xD SD/MMC Card Interface support" 65 76 depends on MMC && ISA_DMA_API
+1
drivers/mmc/Makefile
··· 20 20 obj-$(CONFIG_MMC_SDHCI) += sdhci.o 21 21 obj-$(CONFIG_MMC_WBSD) += wbsd.o 22 22 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 23 + obj-$(CONFIG_MMC_OMAP) += omap.o 23 24 24 25 mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o
+1231
drivers/mmc/omap.c
··· 1 + /* 2 + * linux/drivers/media/mmc/omap.c 3 + * 4 + * Copyright (C) 2004 Nokia Corporation 5 + * Written by Tuukka Tikkanen and Juha Yrj�l�<juha.yrjola@nokia.com> 6 + * Misc hacks here and there by Tony Lindgren <tony@atomide.com> 7 + * Other hacks (DMA, SD, etc) by David Brownell 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/config.h> 15 + 16 + #ifdef CONFIG_MMC_DEBUG 17 + #define DEBUG /* for dev_dbg(), pr_debug(), etc */ 18 + #endif 19 + 20 + #include <linux/module.h> 21 + #include <linux/moduleparam.h> 22 + #include <linux/init.h> 23 + #include <linux/ioport.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/interrupt.h> 26 + #include <linux/dma-mapping.h> 27 + #include <linux/delay.h> 28 + #include <linux/spinlock.h> 29 + #include <linux/timer.h> 30 + #include <linux/mmc/host.h> 31 + #include <linux/mmc/protocol.h> 32 + #include <linux/mmc/card.h> 33 + #include <linux/clk.h> 34 + 35 + #include <asm/io.h> 36 + #include <asm/irq.h> 37 + #include <asm/scatterlist.h> 38 + #include <asm/mach-types.h> 39 + 40 + #include <asm/arch/board.h> 41 + #include <asm/arch/gpio.h> 42 + #include <asm/arch/dma.h> 43 + #include <asm/arch/mux.h> 44 + #include <asm/arch/fpga.h> 45 + #include <asm/arch/tps65010.h> 46 + 47 + #include "omap.h" 48 + 49 + #define DRIVER_NAME "mmci-omap" 50 + #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) 51 + 52 + /* Specifies how often in millisecs to poll for card status changes 53 + * when the cover switch is open */ 54 + #define OMAP_MMC_SWITCH_POLL_DELAY 500 55 + 56 + static int mmc_omap_enable_poll = 1; 57 + 58 + struct mmc_omap_host { 59 + int initialized; 60 + int suspended; 61 + struct mmc_request * mrq; 62 + struct mmc_command * cmd; 63 + struct mmc_data * data; 64 + struct mmc_host * mmc; 65 + struct device * dev; 66 + unsigned char id; /* 16xx chips have 2 MMC blocks */ 67 + struct clk * iclk; 68 + struct clk * fclk; 69 + void __iomem *base; 70 + int irq; 71 + unsigned char bus_mode; 72 + unsigned char hw_bus_mode; 73 + 74 + unsigned int sg_len; 75 + int sg_idx; 76 + u16 * buffer; 77 + u32 buffer_bytes_left; 78 + u32 total_bytes_left; 79 + 80 + unsigned use_dma:1; 81 + unsigned brs_received:1, dma_done:1; 82 + unsigned dma_is_read:1; 83 + unsigned dma_in_use:1; 84 + int dma_ch; 85 + spinlock_t dma_lock; 86 + struct timer_list dma_timer; 87 + unsigned dma_len; 88 + 89 + short power_pin; 90 + short wp_pin; 91 + 92 + int switch_pin; 93 + struct work_struct switch_work; 94 + struct timer_list switch_timer; 95 + int switch_last_state; 96 + }; 97 + 98 + static inline int 99 + mmc_omap_cover_is_open(struct mmc_omap_host *host) 100 + { 101 + if (host->switch_pin < 0) 102 + return 0; 103 + return omap_get_gpio_datain(host->switch_pin); 104 + } 105 + 106 + static ssize_t 107 + mmc_omap_show_cover_switch(struct device *dev, 108 + struct device_attribute *attr, char *buf) 109 + { 110 + struct mmc_omap_host *host = dev_get_drvdata(dev); 111 + 112 + return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" : 113 + "closed"); 114 + } 115 + 116 + static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL); 117 + 118 + static ssize_t 119 + mmc_omap_show_enable_poll(struct device *dev, 120 + struct device_attribute *attr, char *buf) 121 + { 122 + return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll); 123 + } 124 + 125 + static ssize_t 126 + mmc_omap_store_enable_poll(struct device *dev, 127 + struct device_attribute *attr, const char *buf, 128 + size_t size) 129 + { 130 + int enable_poll; 131 + 132 + if (sscanf(buf, "%10d", &enable_poll) != 1) 133 + return -EINVAL; 134 + 135 + if (enable_poll != mmc_omap_enable_poll) { 136 + struct mmc_omap_host *host = dev_get_drvdata(dev); 137 + 138 + mmc_omap_enable_poll = enable_poll; 139 + if (enable_poll && host->switch_pin >= 0) 140 + schedule_work(&host->switch_work); 141 + } 142 + return size; 143 + } 144 + 145 + static DEVICE_ATTR(enable_poll, 0664, 146 + mmc_omap_show_enable_poll, mmc_omap_store_enable_poll); 147 + 148 + static void 149 + mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) 150 + { 151 + u32 cmdreg; 152 + u32 resptype; 153 + u32 cmdtype; 154 + 155 + host->cmd = cmd; 156 + 157 + resptype = 0; 158 + cmdtype = 0; 159 + 160 + /* Our hardware needs to know exact type */ 161 + switch (RSP_TYPE(mmc_resp_type(cmd))) { 162 + case RSP_TYPE(MMC_RSP_R1): 163 + /* resp 1, resp 1b */ 164 + resptype = 1; 165 + break; 166 + case RSP_TYPE(MMC_RSP_R2): 167 + resptype = 2; 168 + break; 169 + case RSP_TYPE(MMC_RSP_R3): 170 + resptype = 3; 171 + break; 172 + default: 173 + break; 174 + } 175 + 176 + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) { 177 + cmdtype = OMAP_MMC_CMDTYPE_ADTC; 178 + } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) { 179 + cmdtype = OMAP_MMC_CMDTYPE_BC; 180 + } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) { 181 + cmdtype = OMAP_MMC_CMDTYPE_BCR; 182 + } else { 183 + cmdtype = OMAP_MMC_CMDTYPE_AC; 184 + } 185 + 186 + cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); 187 + 188 + if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) 189 + cmdreg |= 1 << 6; 190 + 191 + if (cmd->flags & MMC_RSP_BUSY) 192 + cmdreg |= 1 << 11; 193 + 194 + if (host->data && !(host->data->flags & MMC_DATA_WRITE)) 195 + cmdreg |= 1 << 15; 196 + 197 + clk_enable(host->fclk); 198 + 199 + OMAP_MMC_WRITE(host->base, CTO, 200); 200 + OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff); 201 + OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16); 202 + OMAP_MMC_WRITE(host->base, IE, 203 + OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL | 204 + OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT | 205 + OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT | 206 + OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR | 207 + OMAP_MMC_STAT_END_OF_DATA); 208 + OMAP_MMC_WRITE(host->base, CMD, cmdreg); 209 + } 210 + 211 + static void 212 + mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) 213 + { 214 + if (host->dma_in_use) { 215 + enum dma_data_direction dma_data_dir; 216 + 217 + BUG_ON(host->dma_ch < 0); 218 + if (data->error != MMC_ERR_NONE) 219 + omap_stop_dma(host->dma_ch); 220 + /* Release DMA channel lazily */ 221 + mod_timer(&host->dma_timer, jiffies + HZ); 222 + if (data->flags & MMC_DATA_WRITE) 223 + dma_data_dir = DMA_TO_DEVICE; 224 + else 225 + dma_data_dir = DMA_FROM_DEVICE; 226 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, 227 + dma_data_dir); 228 + } 229 + host->data = NULL; 230 + host->sg_len = 0; 231 + clk_disable(host->fclk); 232 + 233 + /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing 234 + * dozens of requests until the card finishes writing data. 235 + * It'd be cheaper to just wait till an EOFB interrupt arrives... 236 + */ 237 + 238 + if (!data->stop) { 239 + host->mrq = NULL; 240 + mmc_request_done(host->mmc, data->mrq); 241 + return; 242 + } 243 + 244 + mmc_omap_start_command(host, data->stop); 245 + } 246 + 247 + static void 248 + mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) 249 + { 250 + unsigned long flags; 251 + int done; 252 + 253 + if (!host->dma_in_use) { 254 + mmc_omap_xfer_done(host, data); 255 + return; 256 + } 257 + done = 0; 258 + spin_lock_irqsave(&host->dma_lock, flags); 259 + if (host->dma_done) 260 + done = 1; 261 + else 262 + host->brs_received = 1; 263 + spin_unlock_irqrestore(&host->dma_lock, flags); 264 + if (done) 265 + mmc_omap_xfer_done(host, data); 266 + } 267 + 268 + static void 269 + mmc_omap_dma_timer(unsigned long data) 270 + { 271 + struct mmc_omap_host *host = (struct mmc_omap_host *) data; 272 + 273 + BUG_ON(host->dma_ch < 0); 274 + omap_free_dma(host->dma_ch); 275 + host->dma_ch = -1; 276 + } 277 + 278 + static void 279 + mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) 280 + { 281 + unsigned long flags; 282 + int done; 283 + 284 + done = 0; 285 + spin_lock_irqsave(&host->dma_lock, flags); 286 + if (host->brs_received) 287 + done = 1; 288 + else 289 + host->dma_done = 1; 290 + spin_unlock_irqrestore(&host->dma_lock, flags); 291 + if (done) 292 + mmc_omap_xfer_done(host, data); 293 + } 294 + 295 + static void 296 + mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) 297 + { 298 + host->cmd = NULL; 299 + 300 + if (cmd->flags & MMC_RSP_PRESENT) { 301 + if (cmd->flags & MMC_RSP_136) { 302 + /* response type 2 */ 303 + cmd->resp[3] = 304 + OMAP_MMC_READ(host->base, RSP0) | 305 + (OMAP_MMC_READ(host->base, RSP1) << 16); 306 + cmd->resp[2] = 307 + OMAP_MMC_READ(host->base, RSP2) | 308 + (OMAP_MMC_READ(host->base, RSP3) << 16); 309 + cmd->resp[1] = 310 + OMAP_MMC_READ(host->base, RSP4) | 311 + (OMAP_MMC_READ(host->base, RSP5) << 16); 312 + cmd->resp[0] = 313 + OMAP_MMC_READ(host->base, RSP6) | 314 + (OMAP_MMC_READ(host->base, RSP7) << 16); 315 + } else { 316 + /* response types 1, 1b, 3, 4, 5, 6 */ 317 + cmd->resp[0] = 318 + OMAP_MMC_READ(host->base, RSP6) | 319 + (OMAP_MMC_READ(host->base, RSP7) << 16); 320 + } 321 + } 322 + 323 + if (host->data == NULL || cmd->error != MMC_ERR_NONE) { 324 + host->mrq = NULL; 325 + clk_disable(host->fclk); 326 + mmc_request_done(host->mmc, cmd->mrq); 327 + } 328 + } 329 + 330 + /* PIO only */ 331 + static void 332 + mmc_omap_sg_to_buf(struct mmc_omap_host *host) 333 + { 334 + struct scatterlist *sg; 335 + 336 + sg = host->data->sg + host->sg_idx; 337 + host->buffer_bytes_left = sg->length; 338 + host->buffer = page_address(sg->page) + sg->offset; 339 + if (host->buffer_bytes_left > host->total_bytes_left) 340 + host->buffer_bytes_left = host->total_bytes_left; 341 + } 342 + 343 + /* PIO only */ 344 + static void 345 + mmc_omap_xfer_data(struct mmc_omap_host *host, int write) 346 + { 347 + int n; 348 + void __iomem *reg; 349 + u16 *p; 350 + 351 + if (host->buffer_bytes_left == 0) { 352 + host->sg_idx++; 353 + BUG_ON(host->sg_idx == host->sg_len); 354 + mmc_omap_sg_to_buf(host); 355 + } 356 + n = 64; 357 + if (n > host->buffer_bytes_left) 358 + n = host->buffer_bytes_left; 359 + host->buffer_bytes_left -= n; 360 + host->total_bytes_left -= n; 361 + host->data->bytes_xfered += n; 362 + 363 + if (write) { 364 + __raw_writesw(host->base + OMAP_MMC_REG_DATA, host->buffer, n); 365 + } else { 366 + __raw_readsw(host->base + OMAP_MMC_REG_DATA, host->buffer, n); 367 + } 368 + } 369 + 370 + static inline void mmc_omap_report_irq(u16 status) 371 + { 372 + static const char *mmc_omap_status_bits[] = { 373 + "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", 374 + "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" 375 + }; 376 + int i, c = 0; 377 + 378 + for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) 379 + if (status & (1 << i)) { 380 + if (c) 381 + printk(" "); 382 + printk("%s", mmc_omap_status_bits[i]); 383 + c++; 384 + } 385 + } 386 + 387 + static irqreturn_t mmc_omap_irq(int irq, void *dev_id, struct pt_regs *regs) 388 + { 389 + struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id; 390 + u16 status; 391 + int end_command; 392 + int end_transfer; 393 + int transfer_error; 394 + 395 + if (host->cmd == NULL && host->data == NULL) { 396 + status = OMAP_MMC_READ(host->base, STAT); 397 + dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status); 398 + if (status != 0) { 399 + OMAP_MMC_WRITE(host->base, STAT, status); 400 + OMAP_MMC_WRITE(host->base, IE, 0); 401 + } 402 + return IRQ_HANDLED; 403 + } 404 + 405 + end_command = 0; 406 + end_transfer = 0; 407 + transfer_error = 0; 408 + 409 + while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) { 410 + OMAP_MMC_WRITE(host->base, STAT, status); 411 + #ifdef CONFIG_MMC_DEBUG 412 + dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", 413 + status, host->cmd != NULL ? host->cmd->opcode : -1); 414 + mmc_omap_report_irq(status); 415 + printk("\n"); 416 + #endif 417 + if (host->total_bytes_left) { 418 + if ((status & OMAP_MMC_STAT_A_FULL) || 419 + (status & OMAP_MMC_STAT_END_OF_DATA)) 420 + mmc_omap_xfer_data(host, 0); 421 + if (status & OMAP_MMC_STAT_A_EMPTY) 422 + mmc_omap_xfer_data(host, 1); 423 + } 424 + 425 + if (status & OMAP_MMC_STAT_END_OF_DATA) { 426 + end_transfer = 1; 427 + } 428 + 429 + if (status & OMAP_MMC_STAT_DATA_TOUT) { 430 + dev_dbg(mmc_dev(host->mmc), "data timeout\n"); 431 + if (host->data) { 432 + host->data->error |= MMC_ERR_TIMEOUT; 433 + transfer_error = 1; 434 + } 435 + } 436 + 437 + if (status & OMAP_MMC_STAT_DATA_CRC) { 438 + if (host->data) { 439 + host->data->error |= MMC_ERR_BADCRC; 440 + dev_dbg(mmc_dev(host->mmc), 441 + "data CRC error, bytes left %d\n", 442 + host->total_bytes_left); 443 + transfer_error = 1; 444 + } else { 445 + dev_dbg(mmc_dev(host->mmc), "data CRC error\n"); 446 + } 447 + } 448 + 449 + if (status & OMAP_MMC_STAT_CMD_TOUT) { 450 + /* Timeouts are routine with some commands */ 451 + if (host->cmd) { 452 + if (host->cmd->opcode != MMC_ALL_SEND_CID && 453 + host->cmd->opcode != 454 + MMC_SEND_OP_COND && 455 + host->cmd->opcode != 456 + MMC_APP_CMD && 457 + !mmc_omap_cover_is_open(host)) 458 + dev_err(mmc_dev(host->mmc), 459 + "command timeout, CMD %d\n", 460 + host->cmd->opcode); 461 + host->cmd->error = MMC_ERR_TIMEOUT; 462 + end_command = 1; 463 + } 464 + } 465 + 466 + if (status & OMAP_MMC_STAT_CMD_CRC) { 467 + if (host->cmd) { 468 + dev_err(mmc_dev(host->mmc), 469 + "command CRC error (CMD%d, arg 0x%08x)\n", 470 + host->cmd->opcode, host->cmd->arg); 471 + host->cmd->error = MMC_ERR_BADCRC; 472 + end_command = 1; 473 + } else 474 + dev_err(mmc_dev(host->mmc), 475 + "command CRC error without cmd?\n"); 476 + } 477 + 478 + if (status & OMAP_MMC_STAT_CARD_ERR) { 479 + if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) { 480 + u32 response = OMAP_MMC_READ(host->base, RSP6) 481 + | (OMAP_MMC_READ(host->base, RSP7) << 16); 482 + /* STOP sometimes sets must-ignore bits */ 483 + if (!(response & (R1_CC_ERROR 484 + | R1_ILLEGAL_COMMAND 485 + | R1_COM_CRC_ERROR))) { 486 + end_command = 1; 487 + continue; 488 + } 489 + } 490 + 491 + dev_dbg(mmc_dev(host->mmc), "card status error (CMD%d)\n", 492 + host->cmd->opcode); 493 + if (host->cmd) { 494 + host->cmd->error = MMC_ERR_FAILED; 495 + end_command = 1; 496 + } 497 + if (host->data) { 498 + host->data->error = MMC_ERR_FAILED; 499 + transfer_error = 1; 500 + } 501 + } 502 + 503 + /* 504 + * NOTE: On 1610 the END_OF_CMD may come too early when 505 + * starting a write 506 + */ 507 + if ((status & OMAP_MMC_STAT_END_OF_CMD) && 508 + (!(status & OMAP_MMC_STAT_A_EMPTY))) { 509 + end_command = 1; 510 + } 511 + } 512 + 513 + if (end_command) { 514 + mmc_omap_cmd_done(host, host->cmd); 515 + } 516 + if (transfer_error) 517 + mmc_omap_xfer_done(host, host->data); 518 + else if (end_transfer) 519 + mmc_omap_end_of_data(host, host->data); 520 + 521 + return IRQ_HANDLED; 522 + } 523 + 524 + static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id, struct pt_regs *regs) 525 + { 526 + struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id; 527 + 528 + schedule_work(&host->switch_work); 529 + 530 + return IRQ_HANDLED; 531 + } 532 + 533 + static void mmc_omap_switch_timer(unsigned long arg) 534 + { 535 + struct mmc_omap_host *host = (struct mmc_omap_host *) arg; 536 + 537 + schedule_work(&host->switch_work); 538 + } 539 + 540 + /* FIXME: Handle card insertion and removal properly. Maybe use a mask 541 + * for MMC state? */ 542 + static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask) 543 + { 544 + } 545 + 546 + static void mmc_omap_switch_handler(void *data) 547 + { 548 + struct mmc_omap_host *host = (struct mmc_omap_host *) data; 549 + struct mmc_card *card; 550 + static int complained = 0; 551 + int cards = 0, cover_open; 552 + 553 + if (host->switch_pin == -1) 554 + return; 555 + cover_open = mmc_omap_cover_is_open(host); 556 + if (cover_open != host->switch_last_state) { 557 + kobject_uevent(&host->dev->kobj, KOBJ_CHANGE); 558 + host->switch_last_state = cover_open; 559 + } 560 + mmc_detect_change(host->mmc, 0); 561 + list_for_each_entry(card, &host->mmc->cards, node) { 562 + if (mmc_card_present(card)) 563 + cards++; 564 + } 565 + if (mmc_omap_cover_is_open(host)) { 566 + if (!complained) { 567 + dev_info(mmc_dev(host->mmc), "cover is open"); 568 + complained = 1; 569 + } 570 + if (mmc_omap_enable_poll) 571 + mod_timer(&host->switch_timer, jiffies + 572 + msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY)); 573 + } else { 574 + complained = 0; 575 + } 576 + } 577 + 578 + /* Prepare to transfer the next segment of a scatterlist */ 579 + static void 580 + mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) 581 + { 582 + int dma_ch = host->dma_ch; 583 + unsigned long data_addr; 584 + u16 buf, frame; 585 + u32 count; 586 + struct scatterlist *sg = &data->sg[host->sg_idx]; 587 + int src_port = 0; 588 + int dst_port = 0; 589 + int sync_dev = 0; 590 + 591 + data_addr = io_v2p((u32) host->base) + OMAP_MMC_REG_DATA; 592 + frame = 1 << data->blksz_bits; 593 + count = sg_dma_len(sg); 594 + 595 + if ((data->blocks == 1) && (count > (1 << data->blksz_bits))) 596 + count = frame; 597 + 598 + host->dma_len = count; 599 + 600 + /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. 601 + * Use 16 or 32 word frames when the blocksize is at least that large. 602 + * Blocksize is usually 512 bytes; but not for some SD reads. 603 + */ 604 + if (cpu_is_omap15xx() && frame > 32) 605 + frame = 32; 606 + else if (frame > 64) 607 + frame = 64; 608 + count /= frame; 609 + frame >>= 1; 610 + 611 + if (!(data->flags & MMC_DATA_WRITE)) { 612 + buf = 0x800f | ((frame - 1) << 8); 613 + 614 + if (cpu_class_is_omap1()) { 615 + src_port = OMAP_DMA_PORT_TIPB; 616 + dst_port = OMAP_DMA_PORT_EMIFF; 617 + } 618 + if (cpu_is_omap24xx()) 619 + sync_dev = OMAP24XX_DMA_MMC1_RX; 620 + 621 + omap_set_dma_src_params(dma_ch, src_port, 622 + OMAP_DMA_AMODE_CONSTANT, 623 + data_addr, 0, 0); 624 + omap_set_dma_dest_params(dma_ch, dst_port, 625 + OMAP_DMA_AMODE_POST_INC, 626 + sg_dma_address(sg), 0, 0); 627 + omap_set_dma_dest_data_pack(dma_ch, 1); 628 + omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); 629 + } else { 630 + buf = 0x0f80 | ((frame - 1) << 0); 631 + 632 + if (cpu_class_is_omap1()) { 633 + src_port = OMAP_DMA_PORT_EMIFF; 634 + dst_port = OMAP_DMA_PORT_TIPB; 635 + } 636 + if (cpu_is_omap24xx()) 637 + sync_dev = OMAP24XX_DMA_MMC1_TX; 638 + 639 + omap_set_dma_dest_params(dma_ch, dst_port, 640 + OMAP_DMA_AMODE_CONSTANT, 641 + data_addr, 0, 0); 642 + omap_set_dma_src_params(dma_ch, src_port, 643 + OMAP_DMA_AMODE_POST_INC, 644 + sg_dma_address(sg), 0, 0); 645 + omap_set_dma_src_data_pack(dma_ch, 1); 646 + omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); 647 + } 648 + 649 + /* Max limit for DMA frame count is 0xffff */ 650 + if (unlikely(count > 0xffff)) 651 + BUG(); 652 + 653 + OMAP_MMC_WRITE(host->base, BUF, buf); 654 + omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, 655 + frame, count, OMAP_DMA_SYNC_FRAME, 656 + sync_dev, 0); 657 + } 658 + 659 + /* A scatterlist segment completed */ 660 + static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) 661 + { 662 + struct mmc_omap_host *host = (struct mmc_omap_host *) data; 663 + struct mmc_data *mmcdat = host->data; 664 + 665 + if (unlikely(host->dma_ch < 0)) { 666 + dev_err(mmc_dev(host->mmc), "DMA callback while DMA not 667 + enabled\n"); 668 + return; 669 + } 670 + /* FIXME: We really should do something to _handle_ the errors */ 671 + if (ch_status & OMAP_DMA_TOUT_IRQ) { 672 + dev_err(mmc_dev(host->mmc),"DMA timeout\n"); 673 + return; 674 + } 675 + if (ch_status & OMAP_DMA_DROP_IRQ) { 676 + dev_err(mmc_dev(host->mmc), "DMA sync error\n"); 677 + return; 678 + } 679 + if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { 680 + return; 681 + } 682 + mmcdat->bytes_xfered += host->dma_len; 683 + host->sg_idx++; 684 + if (host->sg_idx < host->sg_len) { 685 + mmc_omap_prepare_dma(host, host->data); 686 + omap_start_dma(host->dma_ch); 687 + } else 688 + mmc_omap_dma_done(host, host->data); 689 + } 690 + 691 + static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) 692 + { 693 + const char *dev_name; 694 + int sync_dev, dma_ch, is_read, r; 695 + 696 + is_read = !(data->flags & MMC_DATA_WRITE); 697 + del_timer_sync(&host->dma_timer); 698 + if (host->dma_ch >= 0) { 699 + if (is_read == host->dma_is_read) 700 + return 0; 701 + omap_free_dma(host->dma_ch); 702 + host->dma_ch = -1; 703 + } 704 + 705 + if (is_read) { 706 + if (host->id == 1) { 707 + sync_dev = OMAP_DMA_MMC_RX; 708 + dev_name = "MMC1 read"; 709 + } else { 710 + sync_dev = OMAP_DMA_MMC2_RX; 711 + dev_name = "MMC2 read"; 712 + } 713 + } else { 714 + if (host->id == 1) { 715 + sync_dev = OMAP_DMA_MMC_TX; 716 + dev_name = "MMC1 write"; 717 + } else { 718 + sync_dev = OMAP_DMA_MMC2_TX; 719 + dev_name = "MMC2 write"; 720 + } 721 + } 722 + r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb, 723 + host, &dma_ch); 724 + if (r != 0) { 725 + dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); 726 + return r; 727 + } 728 + host->dma_ch = dma_ch; 729 + host->dma_is_read = is_read; 730 + 731 + return 0; 732 + } 733 + 734 + static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) 735 + { 736 + u16 reg; 737 + 738 + reg = OMAP_MMC_READ(host->base, SDIO); 739 + reg &= ~(1 << 5); 740 + OMAP_MMC_WRITE(host->base, SDIO, reg); 741 + /* Set maximum timeout */ 742 + OMAP_MMC_WRITE(host->base, CTO, 0xff); 743 + } 744 + 745 + static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) 746 + { 747 + int timeout; 748 + u16 reg; 749 + 750 + /* Convert ns to clock cycles by assuming 20MHz frequency 751 + * 1 cycle at 20MHz = 500 ns 752 + */ 753 + timeout = req->data->timeout_clks + req->data->timeout_ns / 500; 754 + 755 + /* Check if we need to use timeout multiplier register */ 756 + reg = OMAP_MMC_READ(host->base, SDIO); 757 + if (timeout > 0xffff) { 758 + reg |= (1 << 5); 759 + timeout /= 1024; 760 + } else 761 + reg &= ~(1 << 5); 762 + OMAP_MMC_WRITE(host->base, SDIO, reg); 763 + OMAP_MMC_WRITE(host->base, DTO, timeout); 764 + } 765 + 766 + static void 767 + mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) 768 + { 769 + struct mmc_data *data = req->data; 770 + int i, use_dma, block_size; 771 + unsigned sg_len; 772 + 773 + host->data = data; 774 + if (data == NULL) { 775 + OMAP_MMC_WRITE(host->base, BLEN, 0); 776 + OMAP_MMC_WRITE(host->base, NBLK, 0); 777 + OMAP_MMC_WRITE(host->base, BUF, 0); 778 + host->dma_in_use = 0; 779 + set_cmd_timeout(host, req); 780 + return; 781 + } 782 + 783 + 784 + block_size = 1 << data->blksz_bits; 785 + 786 + OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1); 787 + OMAP_MMC_WRITE(host->base, BLEN, block_size - 1); 788 + set_data_timeout(host, req); 789 + 790 + /* cope with calling layer confusion; it issues "single 791 + * block" writes using multi-block scatterlists. 792 + */ 793 + sg_len = (data->blocks == 1) ? 1 : data->sg_len; 794 + 795 + /* Only do DMA for entire blocks */ 796 + use_dma = host->use_dma; 797 + if (use_dma) { 798 + for (i = 0; i < sg_len; i++) { 799 + if ((data->sg[i].length % block_size) != 0) { 800 + use_dma = 0; 801 + break; 802 + } 803 + } 804 + } 805 + 806 + host->sg_idx = 0; 807 + if (use_dma) { 808 + if (mmc_omap_get_dma_channel(host, data) == 0) { 809 + enum dma_data_direction dma_data_dir; 810 + 811 + if (data->flags & MMC_DATA_WRITE) 812 + dma_data_dir = DMA_TO_DEVICE; 813 + else 814 + dma_data_dir = DMA_FROM_DEVICE; 815 + 816 + host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 817 + sg_len, dma_data_dir); 818 + host->total_bytes_left = 0; 819 + mmc_omap_prepare_dma(host, req->data); 820 + host->brs_received = 0; 821 + host->dma_done = 0; 822 + host->dma_in_use = 1; 823 + } else 824 + use_dma = 0; 825 + } 826 + 827 + /* Revert to PIO? */ 828 + if (!use_dma) { 829 + OMAP_MMC_WRITE(host->base, BUF, 0x1f1f); 830 + host->total_bytes_left = data->blocks * block_size; 831 + host->sg_len = sg_len; 832 + mmc_omap_sg_to_buf(host); 833 + host->dma_in_use = 0; 834 + } 835 + } 836 + 837 + static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) 838 + { 839 + struct mmc_omap_host *host = mmc_priv(mmc); 840 + 841 + WARN_ON(host->mrq != NULL); 842 + 843 + host->mrq = req; 844 + 845 + /* only touch fifo AFTER the controller readies it */ 846 + mmc_omap_prepare_data(host, req); 847 + mmc_omap_start_command(host, req->cmd); 848 + if (host->dma_in_use) 849 + omap_start_dma(host->dma_ch); 850 + } 851 + 852 + static void innovator_fpga_socket_power(int on) 853 + { 854 + #if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX) 855 + 856 + if (on) { 857 + fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3), 858 + OMAP1510_FPGA_POWER); 859 + } else { 860 + fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3), 861 + OMAP1510_FPGA_POWER); 862 + } 863 + #endif 864 + } 865 + 866 + /* 867 + * Turn the socket power on/off. Innovator uses FPGA, most boards 868 + * probably use GPIO. 869 + */ 870 + static void mmc_omap_power(struct mmc_omap_host *host, int on) 871 + { 872 + if (on) { 873 + if (machine_is_omap_innovator()) 874 + innovator_fpga_socket_power(1); 875 + else if (machine_is_omap_h2()) 876 + tps65010_set_gpio_out_value(GPIO3, HIGH); 877 + else if (machine_is_omap_h3()) 878 + /* GPIO 4 of TPS65010 sends SD_EN signal */ 879 + tps65010_set_gpio_out_value(GPIO4, HIGH); 880 + else if (cpu_is_omap24xx()) { 881 + u16 reg = OMAP_MMC_READ(host->base, CON); 882 + OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11)); 883 + } else 884 + if (host->power_pin >= 0) 885 + omap_set_gpio_dataout(host->power_pin, 1); 886 + } else { 887 + if (machine_is_omap_innovator()) 888 + innovator_fpga_socket_power(0); 889 + else if (machine_is_omap_h2()) 890 + tps65010_set_gpio_out_value(GPIO3, LOW); 891 + else if (machine_is_omap_h3()) 892 + tps65010_set_gpio_out_value(GPIO4, LOW); 893 + else if (cpu_is_omap24xx()) { 894 + u16 reg = OMAP_MMC_READ(host->base, CON); 895 + OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11)); 896 + } else 897 + if (host->power_pin >= 0) 898 + omap_set_gpio_dataout(host->power_pin, 0); 899 + } 900 + } 901 + 902 + static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 903 + { 904 + struct mmc_omap_host *host = mmc_priv(mmc); 905 + int dsor; 906 + int realclock, i; 907 + 908 + realclock = ios->clock; 909 + 910 + if (ios->clock == 0) 911 + dsor = 0; 912 + else { 913 + int func_clk_rate = clk_get_rate(host->fclk); 914 + 915 + dsor = func_clk_rate / realclock; 916 + if (dsor < 1) 917 + dsor = 1; 918 + 919 + if (func_clk_rate / dsor > realclock) 920 + dsor++; 921 + 922 + if (dsor > 250) 923 + dsor = 250; 924 + dsor++; 925 + 926 + if (ios->bus_width == MMC_BUS_WIDTH_4) 927 + dsor |= 1 << 15; 928 + } 929 + 930 + switch (ios->power_mode) { 931 + case MMC_POWER_OFF: 932 + mmc_omap_power(host, 0); 933 + break; 934 + case MMC_POWER_UP: 935 + case MMC_POWER_ON: 936 + mmc_omap_power(host, 1); 937 + dsor |= 1<<11; 938 + break; 939 + } 940 + 941 + host->bus_mode = ios->bus_mode; 942 + host->hw_bus_mode = host->bus_mode; 943 + 944 + clk_enable(host->fclk); 945 + 946 + /* On insanely high arm_per frequencies something sometimes 947 + * goes somehow out of sync, and the POW bit is not being set, 948 + * which results in the while loop below getting stuck. 949 + * Writing to the CON register twice seems to do the trick. */ 950 + for (i = 0; i < 2; i++) 951 + OMAP_MMC_WRITE(host->base, CON, dsor); 952 + if (ios->power_mode == MMC_POWER_UP) { 953 + /* Send clock cycles, poll completion */ 954 + OMAP_MMC_WRITE(host->base, IE, 0); 955 + OMAP_MMC_WRITE(host->base, STAT, 0xffff); 956 + OMAP_MMC_WRITE(host->base, CMD, 1<<7); 957 + while (0 == (OMAP_MMC_READ(host->base, STAT) & 1)); 958 + OMAP_MMC_WRITE(host->base, STAT, 1); 959 + } 960 + clk_disable(host->fclk); 961 + } 962 + 963 + static int mmc_omap_get_ro(struct mmc_host *mmc) 964 + { 965 + struct mmc_omap_host *host = mmc_priv(mmc); 966 + 967 + return host->wp_pin && omap_get_gpio_datain(host->wp_pin); 968 + } 969 + 970 + static struct mmc_host_ops mmc_omap_ops = { 971 + .request = mmc_omap_request, 972 + .set_ios = mmc_omap_set_ios, 973 + .get_ro = mmc_omap_get_ro, 974 + }; 975 + 976 + static int __init mmc_omap_probe(struct platform_device *pdev) 977 + { 978 + struct omap_mmc_conf *minfo = pdev->dev.platform_data; 979 + struct mmc_host *mmc; 980 + struct mmc_omap_host *host = NULL; 981 + int ret = 0; 982 + 983 + if (platform_get_resource(pdev, IORESOURCE_MEM, 0) || 984 + platform_get_irq(pdev, IORESOURCE_IRQ, 0)) { 985 + dev_err(&pdev->dev, "mmc_omap_probe: invalid resource type\n"); 986 + return -ENODEV; 987 + } 988 + 989 + if (!request_mem_region(pdev->resource[0].start, 990 + pdev->resource[0].end - pdev->resource[0].start + 1, 991 + pdev->name)) { 992 + dev_dbg(&pdev->dev, "request_mem_region failed\n"); 993 + return -EBUSY; 994 + } 995 + 996 + mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev); 997 + if (!mmc) { 998 + ret = -ENOMEM; 999 + goto out; 1000 + } 1001 + 1002 + host = mmc_priv(mmc); 1003 + host->mmc = mmc; 1004 + 1005 + spin_lock_init(&host->dma_lock); 1006 + init_timer(&host->dma_timer); 1007 + host->dma_timer.function = mmc_omap_dma_timer; 1008 + host->dma_timer.data = (unsigned long) host; 1009 + 1010 + host->id = pdev->id; 1011 + 1012 + if (cpu_is_omap24xx()) { 1013 + host->iclk = clk_get(&pdev->dev, "mmc_ick"); 1014 + if (IS_ERR(host->iclk)) 1015 + goto out; 1016 + clk_enable(host->iclk); 1017 + } 1018 + 1019 + if (!cpu_is_omap24xx()) 1020 + host->fclk = clk_get(&pdev->dev, "mmc_ck"); 1021 + else 1022 + host->fclk = clk_get(&pdev->dev, "mmc_fck"); 1023 + 1024 + if (IS_ERR(host->fclk)) { 1025 + ret = PTR_ERR(host->fclk); 1026 + goto out; 1027 + } 1028 + 1029 + /* REVISIT: 1030 + * Also, use minfo->cover to decide how to manage 1031 + * the card detect sensing. 1032 + */ 1033 + host->power_pin = minfo->power_pin; 1034 + host->switch_pin = minfo->switch_pin; 1035 + host->wp_pin = minfo->wp_pin; 1036 + host->use_dma = 1; 1037 + host->dma_ch = -1; 1038 + 1039 + host->irq = pdev->resource[1].start; 1040 + host->base = ioremap(pdev->res.start, SZ_4K); 1041 + if (!host->base) { 1042 + ret = -ENOMEM; 1043 + goto out; 1044 + } 1045 + 1046 + if (minfo->wire4) 1047 + mmc->caps |= MMC_CAP_4_BIT_DATA; 1048 + 1049 + mmc->ops = &mmc_omap_ops; 1050 + mmc->f_min = 400000; 1051 + mmc->f_max = 24000000; 1052 + mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; 1053 + 1054 + /* Use scatterlist DMA to reduce per-transfer costs. 1055 + * NOTE max_seg_size assumption that small blocks aren't 1056 + * normally used (except e.g. for reading SD registers). 1057 + */ 1058 + mmc->max_phys_segs = 32; 1059 + mmc->max_hw_segs = 32; 1060 + mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ 1061 + mmc->max_seg_size = mmc->max_sectors * 512; 1062 + 1063 + if (host->power_pin >= 0) { 1064 + if ((ret = omap_request_gpio(host->power_pin)) != 0) { 1065 + dev_err(mmc_dev(host->mmc), "Unable to get GPIO 1066 + pin for MMC power\n"); 1067 + goto out; 1068 + } 1069 + omap_set_gpio_direction(host->power_pin, 0); 1070 + } 1071 + 1072 + ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); 1073 + if (ret) 1074 + goto out; 1075 + 1076 + host->dev = &pdev->dev; 1077 + platform_set_drvdata(pdev, host); 1078 + 1079 + mmc_add_host(mmc); 1080 + 1081 + if (host->switch_pin >= 0) { 1082 + INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host); 1083 + init_timer(&host->switch_timer); 1084 + host->switch_timer.function = mmc_omap_switch_timer; 1085 + host->switch_timer.data = (unsigned long) host; 1086 + if (omap_request_gpio(host->switch_pin) != 0) { 1087 + dev_warn(mmc_dev(host->mmc), "Unable to get GPIO pin for MMC cover switch\n"); 1088 + host->switch_pin = -1; 1089 + goto no_switch; 1090 + } 1091 + 1092 + omap_set_gpio_direction(host->switch_pin, 1); 1093 + ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin), 1094 + mmc_omap_switch_irq, SA_TRIGGER_RISING, DRIVER_NAME, host); 1095 + if (ret) { 1096 + dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n"); 1097 + omap_free_gpio(host->switch_pin); 1098 + host->switch_pin = -1; 1099 + goto no_switch; 1100 + } 1101 + ret = device_create_file(&pdev->dev, &dev_attr_cover_switch); 1102 + if (ret == 0) { 1103 + ret = device_create_file(&pdev->dev, &dev_attr_enable_poll); 1104 + if (ret != 0) 1105 + device_remove_file(&pdev->dev, &dev_attr_cover_switch); 1106 + } 1107 + if (ret) { 1108 + dev_wan(mmc_dev(host->mmc), "Unable to create sysfs attributes\n"); 1109 + free_irq(OMAP_GPIO_IRQ(host->switch_pin), host); 1110 + omap_free_gpio(host->switch_pin); 1111 + host->switch_pin = -1; 1112 + goto no_switch; 1113 + } 1114 + if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host)) 1115 + schedule_work(&host->switch_work); 1116 + } 1117 + 1118 + no_switch: 1119 + return 0; 1120 + 1121 + out: 1122 + /* FIXME: Free other resources too. */ 1123 + if (host) { 1124 + if (host->iclk && !IS_ERR(host->iclk)) 1125 + clk_put(host->iclk); 1126 + if (host->fclk && !IS_ERR(host->fclk)) 1127 + clk_put(host->fclk); 1128 + mmc_free_host(host->mmc); 1129 + } 1130 + return ret; 1131 + } 1132 + 1133 + static int mmc_omap_remove(struct platform_device *pdev) 1134 + { 1135 + struct mmc_omap_host *host = platform_get_drvdata(pdev); 1136 + 1137 + platform_set_drvdata(pdev, NULL); 1138 + 1139 + if (host) { 1140 + mmc_remove_host(host->mmc); 1141 + free_irq(host->irq, host); 1142 + 1143 + if (host->power_pin >= 0) 1144 + omap_free_gpio(host->power_pin); 1145 + if (host->switch_pin >= 0) { 1146 + device_remove_file(&pdev->dev, &dev_attr_enable_poll); 1147 + device_remove_file(&pdev->dev, &dev_attr_cover_switch); 1148 + free_irq(OMAP_GPIO_IRQ(host->switch_pin), host); 1149 + omap_free_gpio(host->switch_pin); 1150 + host->switch_pin = -1; 1151 + del_timer_sync(&host->switch_timer); 1152 + flush_scheduled_work(); 1153 + } 1154 + if (host->iclk && !IS_ERR(host->iclk)) 1155 + clk_put(host->iclk); 1156 + if (host->fclk && !IS_ERR(host->fclk)) 1157 + clk_put(host->fclk); 1158 + mmc_free_host(host->mmc); 1159 + } 1160 + 1161 + release_mem_region(pdev->resource[0].start, 1162 + pdev->resource[0].end - pdev->resource[0].start + 1); 1163 + 1164 + return 0; 1165 + } 1166 + 1167 + #ifdef CONFIG_PM 1168 + static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) 1169 + { 1170 + int ret = 0; 1171 + struct mmc_omap_host *host = platform_get_drvdata(pdev); 1172 + 1173 + if (host && host->suspended) 1174 + return 0; 1175 + 1176 + if (host) { 1177 + ret = mmc_suspend_host(host->mmc, mesg); 1178 + if (ret == 0) 1179 + host->suspended = 1; 1180 + } 1181 + return ret; 1182 + } 1183 + 1184 + static int mmc_omap_resume(struct platform_device *pdev) 1185 + { 1186 + int ret = 0; 1187 + struct mmc_omap_host *host = platform_get_drvdata(pdev); 1188 + 1189 + if (host && !host->suspended) 1190 + return 0; 1191 + 1192 + if (host) { 1193 + ret = mmc_resume_host(host->mmc); 1194 + if (ret == 0) 1195 + host->suspended = 0; 1196 + } 1197 + 1198 + return ret; 1199 + } 1200 + #else 1201 + #define mmc_omap_suspend NULL 1202 + #define mmc_omap_resume NULL 1203 + #endif 1204 + 1205 + static struct platform_driver mmc_omap_driver = { 1206 + .probe = mmc_omap_probe, 1207 + .remove = mmc_omap_remove, 1208 + .suspend = mmc_omap_suspend, 1209 + .resume = mmc_omap_resume, 1210 + .driver = { 1211 + .name = DRIVER_NAME, 1212 + }, 1213 + }; 1214 + 1215 + static int __init mmc_omap_init(void) 1216 + { 1217 + return platform_driver_register(&mmc_omap_driver); 1218 + } 1219 + 1220 + static void __exit mmc_omap_exit(void) 1221 + { 1222 + platform_driver_unregister(&mmc_omap_driver); 1223 + } 1224 + 1225 + module_init(mmc_omap_init); 1226 + module_exit(mmc_omap_exit); 1227 + 1228 + MODULE_DESCRIPTION("OMAP Multimedia Card driver"); 1229 + MODULE_LICENSE("GPL"); 1230 + MODULE_ALIAS(DRIVER_NAME); 1231 + MODULE_AUTHOR("Juha Yrj�l�");
+55
drivers/mmc/omap.h
··· 1 + #ifndef DRIVERS_MEDIA_MMC_OMAP_H 2 + #define DRIVERS_MEDIA_MMC_OMAP_H 3 + 4 + #define OMAP_MMC_REG_CMD 0x00 5 + #define OMAP_MMC_REG_ARGL 0x04 6 + #define OMAP_MMC_REG_ARGH 0x08 7 + #define OMAP_MMC_REG_CON 0x0c 8 + #define OMAP_MMC_REG_STAT 0x10 9 + #define OMAP_MMC_REG_IE 0x14 10 + #define OMAP_MMC_REG_CTO 0x18 11 + #define OMAP_MMC_REG_DTO 0x1c 12 + #define OMAP_MMC_REG_DATA 0x20 13 + #define OMAP_MMC_REG_BLEN 0x24 14 + #define OMAP_MMC_REG_NBLK 0x28 15 + #define OMAP_MMC_REG_BUF 0x2c 16 + #define OMAP_MMC_REG_SDIO 0x34 17 + #define OMAP_MMC_REG_REV 0x3c 18 + #define OMAP_MMC_REG_RSP0 0x40 19 + #define OMAP_MMC_REG_RSP1 0x44 20 + #define OMAP_MMC_REG_RSP2 0x48 21 + #define OMAP_MMC_REG_RSP3 0x4c 22 + #define OMAP_MMC_REG_RSP4 0x50 23 + #define OMAP_MMC_REG_RSP5 0x54 24 + #define OMAP_MMC_REG_RSP6 0x58 25 + #define OMAP_MMC_REG_RSP7 0x5c 26 + #define OMAP_MMC_REG_IOSR 0x60 27 + #define OMAP_MMC_REG_SYSC 0x64 28 + #define OMAP_MMC_REG_SYSS 0x68 29 + 30 + #define OMAP_MMC_STAT_CARD_ERR (1 << 14) 31 + #define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 32 + #define OMAP_MMC_STAT_OCR_BUSY (1 << 12) 33 + #define OMAP_MMC_STAT_A_EMPTY (1 << 11) 34 + #define OMAP_MMC_STAT_A_FULL (1 << 10) 35 + #define OMAP_MMC_STAT_CMD_CRC (1 << 8) 36 + #define OMAP_MMC_STAT_CMD_TOUT (1 << 7) 37 + #define OMAP_MMC_STAT_DATA_CRC (1 << 6) 38 + #define OMAP_MMC_STAT_DATA_TOUT (1 << 5) 39 + #define OMAP_MMC_STAT_END_BUSY (1 << 4) 40 + #define OMAP_MMC_STAT_END_OF_DATA (1 << 3) 41 + #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 42 + #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 43 + 44 + #define OMAP_MMC_READ(base, reg) __raw_readw((base) + OMAP_MMC_REG_##reg) 45 + #define OMAP_MMC_WRITE(base, reg, val) __raw_writew((val), (base) + OMAP_MMC_REG_##reg) 46 + 47 + /* 48 + * Command types 49 + */ 50 + #define OMAP_MMC_CMDTYPE_BC 0 51 + #define OMAP_MMC_CMDTYPE_BCR 1 52 + #define OMAP_MMC_CMDTYPE_AC 2 53 + #define OMAP_MMC_CMDTYPE_ADTC 3 54 + 55 + #endif