Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 77b2555b52a894a2e39a42e43d993df875c46a6a 2075 lines 41 kB view raw
1/* 2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver 3 * 4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * 11 * Warning! 12 * 13 * Changes to the FIFO system should be done with extreme care since 14 * the hardware is full of bugs related to the FIFO. Known issues are: 15 * 16 * - FIFO size field in FSR is always zero. 17 * 18 * - FIFO interrupts tend not to work as they should. Interrupts are 19 * triggered only for full/empty events, not for threshold values. 20 * 21 * - On APIC systems the FIFO empty interrupt is sometimes lost. 22 */ 23 24#include <linux/config.h> 25#include <linux/module.h> 26#include <linux/moduleparam.h> 27#include <linux/init.h> 28#include <linux/ioport.h> 29#include <linux/device.h> 30#include <linux/interrupt.h> 31#include <linux/dma-mapping.h> 32#include <linux/delay.h> 33#include <linux/pnp.h> 34#include <linux/highmem.h> 35#include <linux/mmc/host.h> 36#include <linux/mmc/protocol.h> 37 38#include <asm/io.h> 39#include <asm/dma.h> 40#include <asm/scatterlist.h> 41 42#include "wbsd.h" 43 44#define DRIVER_NAME "wbsd" 45#define DRIVER_VERSION "1.4" 46 47#ifdef CONFIG_MMC_DEBUG 48#define DBG(x...) \ 49 printk(KERN_DEBUG DRIVER_NAME ": " x) 50#define DBGF(f, x...) \ 51 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x) 52#else 53#define DBG(x...) do { } while (0) 54#define DBGF(x...) do { } while (0) 55#endif 56 57/* 58 * Device resources 59 */ 60 61#ifdef CONFIG_PNP 62 63static const struct pnp_device_id pnp_dev_table[] = { 64 { "WEC0517", 0 }, 65 { "WEC0518", 0 }, 66 { "", 0 }, 67}; 68 69MODULE_DEVICE_TABLE(pnp, pnp_dev_table); 70 71#endif /* CONFIG_PNP */ 72 73static const int config_ports[] = { 0x2E, 0x4E }; 74static const int unlock_codes[] = { 0x83, 0x87 }; 75 76static const int valid_ids[] = { 77 0x7112, 78 }; 79 80#ifdef CONFIG_PNP 81static unsigned int nopnp = 0; 82#else 83static const unsigned int nopnp = 1; 84#endif 85static unsigned int io = 0x248; 86static unsigned int irq = 6; 87static int dma = 2; 88 89/* 90 * Basic functions 91 */ 92 93static inline void wbsd_unlock_config(struct wbsd_host* host) 94{ 95 BUG_ON(host->config == 0); 96 97 outb(host->unlock_code, host->config); 98 outb(host->unlock_code, host->config); 99} 100 101static inline void wbsd_lock_config(struct wbsd_host* host) 102{ 103 BUG_ON(host->config == 0); 104 105 outb(LOCK_CODE, host->config); 106} 107 108static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value) 109{ 110 BUG_ON(host->config == 0); 111 112 outb(reg, host->config); 113 outb(value, host->config + 1); 114} 115 116static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg) 117{ 118 BUG_ON(host->config == 0); 119 120 outb(reg, host->config); 121 return inb(host->config + 1); 122} 123 124static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value) 125{ 126 outb(index, host->base + WBSD_IDXR); 127 outb(value, host->base + WBSD_DATAR); 128} 129 130static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index) 131{ 132 outb(index, host->base + WBSD_IDXR); 133 return inb(host->base + WBSD_DATAR); 134} 135 136/* 137 * Common routines 138 */ 139 140static void wbsd_init_device(struct wbsd_host* host) 141{ 142 u8 setup, ier; 143 144 /* 145 * Reset chip (SD/MMC part) and fifo. 146 */ 147 setup = wbsd_read_index(host, WBSD_IDX_SETUP); 148 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET; 149 wbsd_write_index(host, WBSD_IDX_SETUP, setup); 150 151 /* 152 * Set DAT3 to input 153 */ 154 setup &= ~WBSD_DAT3_H; 155 wbsd_write_index(host, WBSD_IDX_SETUP, setup); 156 host->flags &= ~WBSD_FIGNORE_DETECT; 157 158 /* 159 * Read back default clock. 160 */ 161 host->clk = wbsd_read_index(host, WBSD_IDX_CLK); 162 163 /* 164 * Power down port. 165 */ 166 outb(WBSD_POWER_N, host->base + WBSD_CSR); 167 168 /* 169 * Set maximum timeout. 170 */ 171 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F); 172 173 /* 174 * Test for card presence 175 */ 176 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT) 177 host->flags |= WBSD_FCARD_PRESENT; 178 else 179 host->flags &= ~WBSD_FCARD_PRESENT; 180 181 /* 182 * Enable interesting interrupts. 183 */ 184 ier = 0; 185 ier |= WBSD_EINT_CARD; 186 ier |= WBSD_EINT_FIFO_THRE; 187 ier |= WBSD_EINT_CCRC; 188 ier |= WBSD_EINT_TIMEOUT; 189 ier |= WBSD_EINT_CRC; 190 ier |= WBSD_EINT_TC; 191 192 outb(ier, host->base + WBSD_EIR); 193 194 /* 195 * Clear interrupts. 196 */ 197 inb(host->base + WBSD_ISR); 198} 199 200static void wbsd_reset(struct wbsd_host* host) 201{ 202 u8 setup; 203 204 printk(KERN_ERR DRIVER_NAME ": Resetting chip\n"); 205 206 /* 207 * Soft reset of chip (SD/MMC part). 208 */ 209 setup = wbsd_read_index(host, WBSD_IDX_SETUP); 210 setup |= WBSD_SOFT_RESET; 211 wbsd_write_index(host, WBSD_IDX_SETUP, setup); 212} 213 214static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq) 215{ 216 unsigned long dmaflags; 217 218 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode); 219 220 if (host->dma >= 0) 221 { 222 /* 223 * Release ISA DMA controller. 224 */ 225 dmaflags = claim_dma_lock(); 226 disable_dma(host->dma); 227 clear_dma_ff(host->dma); 228 release_dma_lock(dmaflags); 229 230 /* 231 * Disable DMA on host. 232 */ 233 wbsd_write_index(host, WBSD_IDX_DMA, 0); 234 } 235 236 host->mrq = NULL; 237 238 /* 239 * MMC layer might call back into the driver so first unlock. 240 */ 241 spin_unlock(&host->lock); 242 mmc_request_done(host->mmc, mrq); 243 spin_lock(&host->lock); 244} 245 246/* 247 * Scatter/gather functions 248 */ 249 250static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data) 251{ 252 /* 253 * Get info. about SG list from data structure. 254 */ 255 host->cur_sg = data->sg; 256 host->num_sg = data->sg_len; 257 258 host->offset = 0; 259 host->remain = host->cur_sg->length; 260} 261 262static inline int wbsd_next_sg(struct wbsd_host* host) 263{ 264 /* 265 * Skip to next SG entry. 266 */ 267 host->cur_sg++; 268 host->num_sg--; 269 270 /* 271 * Any entries left? 272 */ 273 if (host->num_sg > 0) 274 { 275 host->offset = 0; 276 host->remain = host->cur_sg->length; 277 } 278 279 return host->num_sg; 280} 281 282static inline char* wbsd_kmap_sg(struct wbsd_host* host) 283{ 284 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + 285 host->cur_sg->offset; 286 return host->mapped_sg; 287} 288 289static inline void wbsd_kunmap_sg(struct wbsd_host* host) 290{ 291 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ); 292} 293 294static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data) 295{ 296 unsigned int len, i, size; 297 struct scatterlist* sg; 298 char* dmabuf = host->dma_buffer; 299 char* sgbuf; 300 301 size = host->size; 302 303 sg = data->sg; 304 len = data->sg_len; 305 306 /* 307 * Just loop through all entries. Size might not 308 * be the entire list though so make sure that 309 * we do not transfer too much. 310 */ 311 for (i = 0;i < len;i++) 312 { 313 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 314 if (size < sg[i].length) 315 memcpy(dmabuf, sgbuf, size); 316 else 317 memcpy(dmabuf, sgbuf, sg[i].length); 318 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ); 319 dmabuf += sg[i].length; 320 321 if (size < sg[i].length) 322 size = 0; 323 else 324 size -= sg[i].length; 325 326 if (size == 0) 327 break; 328 } 329 330 /* 331 * Check that we didn't get a request to transfer 332 * more data than can fit into the SG list. 333 */ 334 335 BUG_ON(size != 0); 336 337 host->size -= size; 338} 339 340static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data) 341{ 342 unsigned int len, i, size; 343 struct scatterlist* sg; 344 char* dmabuf = host->dma_buffer; 345 char* sgbuf; 346 347 size = host->size; 348 349 sg = data->sg; 350 len = data->sg_len; 351 352 /* 353 * Just loop through all entries. Size might not 354 * be the entire list though so make sure that 355 * we do not transfer too much. 356 */ 357 for (i = 0;i < len;i++) 358 { 359 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 360 if (size < sg[i].length) 361 memcpy(sgbuf, dmabuf, size); 362 else 363 memcpy(sgbuf, dmabuf, sg[i].length); 364 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ); 365 dmabuf += sg[i].length; 366 367 if (size < sg[i].length) 368 size = 0; 369 else 370 size -= sg[i].length; 371 372 if (size == 0) 373 break; 374 } 375 376 /* 377 * Check that we didn't get a request to transfer 378 * more data than can fit into the SG list. 379 */ 380 381 BUG_ON(size != 0); 382 383 host->size -= size; 384} 385 386/* 387 * Command handling 388 */ 389 390static inline void wbsd_get_short_reply(struct wbsd_host* host, 391 struct mmc_command* cmd) 392{ 393 /* 394 * Correct response type? 395 */ 396 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) 397 { 398 cmd->error = MMC_ERR_INVALID; 399 return; 400 } 401 402 cmd->resp[0] = 403 wbsd_read_index(host, WBSD_IDX_RESP12) << 24; 404 cmd->resp[0] |= 405 wbsd_read_index(host, WBSD_IDX_RESP13) << 16; 406 cmd->resp[0] |= 407 wbsd_read_index(host, WBSD_IDX_RESP14) << 8; 408 cmd->resp[0] |= 409 wbsd_read_index(host, WBSD_IDX_RESP15) << 0; 410 cmd->resp[1] = 411 wbsd_read_index(host, WBSD_IDX_RESP16) << 24; 412} 413 414static inline void wbsd_get_long_reply(struct wbsd_host* host, 415 struct mmc_command* cmd) 416{ 417 int i; 418 419 /* 420 * Correct response type? 421 */ 422 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) 423 { 424 cmd->error = MMC_ERR_INVALID; 425 return; 426 } 427 428 for (i = 0;i < 4;i++) 429 { 430 cmd->resp[i] = 431 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24; 432 cmd->resp[i] |= 433 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16; 434 cmd->resp[i] |= 435 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8; 436 cmd->resp[i] |= 437 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0; 438 } 439} 440 441static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd) 442{ 443 int i; 444 u8 status, isr; 445 446 DBGF("Sending cmd (%x)\n", cmd->opcode); 447 448 /* 449 * Clear accumulated ISR. The interrupt routine 450 * will fill this one with events that occur during 451 * transfer. 452 */ 453 host->isr = 0; 454 455 /* 456 * Send the command (CRC calculated by host). 457 */ 458 outb(cmd->opcode, host->base + WBSD_CMDR); 459 for (i = 3;i >= 0;i--) 460 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR); 461 462 cmd->error = MMC_ERR_NONE; 463 464 /* 465 * Wait for the request to complete. 466 */ 467 do { 468 status = wbsd_read_index(host, WBSD_IDX_STATUS); 469 } while (status & WBSD_CARDTRAFFIC); 470 471 /* 472 * Do we expect a reply? 473 */ 474 if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE) 475 { 476 /* 477 * Read back status. 478 */ 479 isr = host->isr; 480 481 /* Card removed? */ 482 if (isr & WBSD_INT_CARD) 483 cmd->error = MMC_ERR_TIMEOUT; 484 /* Timeout? */ 485 else if (isr & WBSD_INT_TIMEOUT) 486 cmd->error = MMC_ERR_TIMEOUT; 487 /* CRC? */ 488 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC)) 489 cmd->error = MMC_ERR_BADCRC; 490 /* All ok */ 491 else 492 { 493 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT) 494 wbsd_get_short_reply(host, cmd); 495 else 496 wbsd_get_long_reply(host, cmd); 497 } 498 } 499 500 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error); 501} 502 503/* 504 * Data functions 505 */ 506 507static void wbsd_empty_fifo(struct wbsd_host* host) 508{ 509 struct mmc_data* data = host->mrq->cmd->data; 510 char* buffer; 511 int i, fsr, fifo; 512 513 /* 514 * Handle excessive data. 515 */ 516 if (data->bytes_xfered == host->size) 517 return; 518 519 buffer = wbsd_kmap_sg(host) + host->offset; 520 521 /* 522 * Drain the fifo. This has a tendency to loop longer 523 * than the FIFO length (usually one block). 524 */ 525 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) 526 { 527 /* 528 * The size field in the FSR is broken so we have to 529 * do some guessing. 530 */ 531 if (fsr & WBSD_FIFO_FULL) 532 fifo = 16; 533 else if (fsr & WBSD_FIFO_FUTHRE) 534 fifo = 8; 535 else 536 fifo = 1; 537 538 for (i = 0;i < fifo;i++) 539 { 540 *buffer = inb(host->base + WBSD_DFR); 541 buffer++; 542 host->offset++; 543 host->remain--; 544 545 data->bytes_xfered++; 546 547 /* 548 * Transfer done? 549 */ 550 if (data->bytes_xfered == host->size) 551 { 552 wbsd_kunmap_sg(host); 553 return; 554 } 555 556 /* 557 * End of scatter list entry? 558 */ 559 if (host->remain == 0) 560 { 561 wbsd_kunmap_sg(host); 562 563 /* 564 * Get next entry. Check if last. 565 */ 566 if (!wbsd_next_sg(host)) 567 { 568 /* 569 * We should never reach this point. 570 * It means that we're trying to 571 * transfer more blocks than can fit 572 * into the scatter list. 573 */ 574 BUG_ON(1); 575 576 host->size = data->bytes_xfered; 577 578 return; 579 } 580 581 buffer = wbsd_kmap_sg(host); 582 } 583 } 584 } 585 586 wbsd_kunmap_sg(host); 587 588 /* 589 * This is a very dirty hack to solve a 590 * hardware problem. The chip doesn't trigger 591 * FIFO threshold interrupts properly. 592 */ 593 if ((host->size - data->bytes_xfered) < 16) 594 tasklet_schedule(&host->fifo_tasklet); 595} 596 597static void wbsd_fill_fifo(struct wbsd_host* host) 598{ 599 struct mmc_data* data = host->mrq->cmd->data; 600 char* buffer; 601 int i, fsr, fifo; 602 603 /* 604 * Check that we aren't being called after the 605 * entire buffer has been transfered. 606 */ 607 if (data->bytes_xfered == host->size) 608 return; 609 610 buffer = wbsd_kmap_sg(host) + host->offset; 611 612 /* 613 * Fill the fifo. This has a tendency to loop longer 614 * than the FIFO length (usually one block). 615 */ 616 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) 617 { 618 /* 619 * The size field in the FSR is broken so we have to 620 * do some guessing. 621 */ 622 if (fsr & WBSD_FIFO_EMPTY) 623 fifo = 0; 624 else if (fsr & WBSD_FIFO_EMTHRE) 625 fifo = 8; 626 else 627 fifo = 15; 628 629 for (i = 16;i > fifo;i--) 630 { 631 outb(*buffer, host->base + WBSD_DFR); 632 buffer++; 633 host->offset++; 634 host->remain--; 635 636 data->bytes_xfered++; 637 638 /* 639 * Transfer done? 640 */ 641 if (data->bytes_xfered == host->size) 642 { 643 wbsd_kunmap_sg(host); 644 return; 645 } 646 647 /* 648 * End of scatter list entry? 649 */ 650 if (host->remain == 0) 651 { 652 wbsd_kunmap_sg(host); 653 654 /* 655 * Get next entry. Check if last. 656 */ 657 if (!wbsd_next_sg(host)) 658 { 659 /* 660 * We should never reach this point. 661 * It means that we're trying to 662 * transfer more blocks than can fit 663 * into the scatter list. 664 */ 665 BUG_ON(1); 666 667 host->size = data->bytes_xfered; 668 669 return; 670 } 671 672 buffer = wbsd_kmap_sg(host); 673 } 674 } 675 } 676 677 wbsd_kunmap_sg(host); 678 679 /* 680 * The controller stops sending interrupts for 681 * 'FIFO empty' under certain conditions. So we 682 * need to be a bit more pro-active. 683 */ 684 tasklet_schedule(&host->fifo_tasklet); 685} 686 687static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data) 688{ 689 u16 blksize; 690 u8 setup; 691 unsigned long dmaflags; 692 693 DBGF("blksz %04x blks %04x flags %08x\n", 694 1 << data->blksz_bits, data->blocks, data->flags); 695 DBGF("tsac %d ms nsac %d clk\n", 696 data->timeout_ns / 1000000, data->timeout_clks); 697 698 /* 699 * Calculate size. 700 */ 701 host->size = data->blocks << data->blksz_bits; 702 703 /* 704 * Check timeout values for overflow. 705 * (Yes, some cards cause this value to overflow). 706 */ 707 if (data->timeout_ns > 127000000) 708 wbsd_write_index(host, WBSD_IDX_TAAC, 127); 709 else 710 wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000); 711 712 if (data->timeout_clks > 255) 713 wbsd_write_index(host, WBSD_IDX_NSAC, 255); 714 else 715 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks); 716 717 /* 718 * Inform the chip of how large blocks will be 719 * sent. It needs this to determine when to 720 * calculate CRC. 721 * 722 * Space for CRC must be included in the size. 723 * Two bytes are needed for each data line. 724 */ 725 if (host->bus_width == MMC_BUS_WIDTH_1) 726 { 727 blksize = (1 << data->blksz_bits) + 2; 728 729 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); 730 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); 731 } 732 else if (host->bus_width == MMC_BUS_WIDTH_4) 733 { 734 blksize = (1 << data->blksz_bits) + 2 * 4; 735 736 wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0) 737 | WBSD_DATA_WIDTH); 738 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); 739 } 740 else 741 { 742 data->error = MMC_ERR_INVALID; 743 return; 744 } 745 746 /* 747 * Clear the FIFO. This is needed even for DMA 748 * transfers since the chip still uses the FIFO 749 * internally. 750 */ 751 setup = wbsd_read_index(host, WBSD_IDX_SETUP); 752 setup |= WBSD_FIFO_RESET; 753 wbsd_write_index(host, WBSD_IDX_SETUP, setup); 754 755 /* 756 * DMA transfer? 757 */ 758 if (host->dma >= 0) 759 { 760 /* 761 * The buffer for DMA is only 64 kB. 762 */ 763 BUG_ON(host->size > 0x10000); 764 if (host->size > 0x10000) 765 { 766 data->error = MMC_ERR_INVALID; 767 return; 768 } 769 770 /* 771 * Transfer data from the SG list to 772 * the DMA buffer. 773 */ 774 if (data->flags & MMC_DATA_WRITE) 775 wbsd_sg_to_dma(host, data); 776 777 /* 778 * Initialise the ISA DMA controller. 779 */ 780 dmaflags = claim_dma_lock(); 781 disable_dma(host->dma); 782 clear_dma_ff(host->dma); 783 if (data->flags & MMC_DATA_READ) 784 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40); 785 else 786 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40); 787 set_dma_addr(host->dma, host->dma_addr); 788 set_dma_count(host->dma, host->size); 789 790 enable_dma(host->dma); 791 release_dma_lock(dmaflags); 792 793 /* 794 * Enable DMA on the host. 795 */ 796 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE); 797 } 798 else 799 { 800 /* 801 * This flag is used to keep printk 802 * output to a minimum. 803 */ 804 host->firsterr = 1; 805 806 /* 807 * Initialise the SG list. 808 */ 809 wbsd_init_sg(host, data); 810 811 /* 812 * Turn off DMA. 813 */ 814 wbsd_write_index(host, WBSD_IDX_DMA, 0); 815 816 /* 817 * Set up FIFO threshold levels (and fill 818 * buffer if doing a write). 819 */ 820 if (data->flags & MMC_DATA_READ) 821 { 822 wbsd_write_index(host, WBSD_IDX_FIFOEN, 823 WBSD_FIFOEN_FULL | 8); 824 } 825 else 826 { 827 wbsd_write_index(host, WBSD_IDX_FIFOEN, 828 WBSD_FIFOEN_EMPTY | 8); 829 wbsd_fill_fifo(host); 830 } 831 } 832 833 data->error = MMC_ERR_NONE; 834} 835 836static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data) 837{ 838 unsigned long dmaflags; 839 int count; 840 u8 status; 841 842 WARN_ON(host->mrq == NULL); 843 844 /* 845 * Send a stop command if needed. 846 */ 847 if (data->stop) 848 wbsd_send_command(host, data->stop); 849 850 /* 851 * Wait for the controller to leave data 852 * transfer state. 853 */ 854 do 855 { 856 status = wbsd_read_index(host, WBSD_IDX_STATUS); 857 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE)); 858 859 /* 860 * DMA transfer? 861 */ 862 if (host->dma >= 0) 863 { 864 /* 865 * Disable DMA on the host. 866 */ 867 wbsd_write_index(host, WBSD_IDX_DMA, 0); 868 869 /* 870 * Turn of ISA DMA controller. 871 */ 872 dmaflags = claim_dma_lock(); 873 disable_dma(host->dma); 874 clear_dma_ff(host->dma); 875 count = get_dma_residue(host->dma); 876 release_dma_lock(dmaflags); 877 878 /* 879 * Any leftover data? 880 */ 881 if (count) 882 { 883 printk(KERN_ERR DRIVER_NAME ": Incomplete DMA " 884 "transfer. %d bytes left.\n", count); 885 886 data->error = MMC_ERR_FAILED; 887 } 888 else 889 { 890 /* 891 * Transfer data from DMA buffer to 892 * SG list. 893 */ 894 if (data->flags & MMC_DATA_READ) 895 wbsd_dma_to_sg(host, data); 896 897 data->bytes_xfered = host->size; 898 } 899 } 900 901 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered); 902 903 wbsd_request_end(host, host->mrq); 904} 905 906/*****************************************************************************\ 907 * * 908 * MMC layer callbacks * 909 * * 910\*****************************************************************************/ 911 912static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq) 913{ 914 struct wbsd_host* host = mmc_priv(mmc); 915 struct mmc_command* cmd; 916 917 /* 918 * Disable tasklets to avoid a deadlock. 919 */ 920 spin_lock_bh(&host->lock); 921 922 BUG_ON(host->mrq != NULL); 923 924 cmd = mrq->cmd; 925 926 host->mrq = mrq; 927 928 /* 929 * If there is no card in the slot then 930 * timeout immediatly. 931 */ 932 if (!(host->flags & WBSD_FCARD_PRESENT)) 933 { 934 cmd->error = MMC_ERR_TIMEOUT; 935 goto done; 936 } 937 938 /* 939 * Does the request include data? 940 */ 941 if (cmd->data) 942 { 943 wbsd_prepare_data(host, cmd->data); 944 945 if (cmd->data->error != MMC_ERR_NONE) 946 goto done; 947 } 948 949 wbsd_send_command(host, cmd); 950 951 /* 952 * If this is a data transfer the request 953 * will be finished after the data has 954 * transfered. 955 */ 956 if (cmd->data && (cmd->error == MMC_ERR_NONE)) 957 { 958 /* 959 * Dirty fix for hardware bug. 960 */ 961 if (host->dma == -1) 962 tasklet_schedule(&host->fifo_tasklet); 963 964 spin_unlock_bh(&host->lock); 965 966 return; 967 } 968 969done: 970 wbsd_request_end(host, mrq); 971 972 spin_unlock_bh(&host->lock); 973} 974 975static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) 976{ 977 struct wbsd_host* host = mmc_priv(mmc); 978 u8 clk, setup, pwr; 979 980 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n", 981 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select, 982 ios->vdd, ios->bus_width); 983 984 spin_lock_bh(&host->lock); 985 986 /* 987 * Reset the chip on each power off. 988 * Should clear out any weird states. 989 */ 990 if (ios->power_mode == MMC_POWER_OFF) 991 wbsd_init_device(host); 992 993 if (ios->clock >= 24000000) 994 clk = WBSD_CLK_24M; 995 else if (ios->clock >= 16000000) 996 clk = WBSD_CLK_16M; 997 else if (ios->clock >= 12000000) 998 clk = WBSD_CLK_12M; 999 else 1000 clk = WBSD_CLK_375K; 1001 1002 /* 1003 * Only write to the clock register when 1004 * there is an actual change. 1005 */ 1006 if (clk != host->clk) 1007 { 1008 wbsd_write_index(host, WBSD_IDX_CLK, clk); 1009 host->clk = clk; 1010 } 1011 1012 /* 1013 * Power up card. 1014 */ 1015 if (ios->power_mode != MMC_POWER_OFF) 1016 { 1017 pwr = inb(host->base + WBSD_CSR); 1018 pwr &= ~WBSD_POWER_N; 1019 outb(pwr, host->base + WBSD_CSR); 1020 } 1021 1022 /* 1023 * MMC cards need to have pin 1 high during init. 1024 * It wreaks havoc with the card detection though so 1025 * that needs to be disabled. 1026 */ 1027 setup = wbsd_read_index(host, WBSD_IDX_SETUP); 1028 if (ios->chip_select == MMC_CS_HIGH) 1029 { 1030 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1); 1031 setup |= WBSD_DAT3_H; 1032 host->flags |= WBSD_FIGNORE_DETECT; 1033 } 1034 else 1035 { 1036 setup &= ~WBSD_DAT3_H; 1037 1038 /* 1039 * We cannot resume card detection immediatly 1040 * because of capacitance and delays in the chip. 1041 */ 1042 mod_timer(&host->ignore_timer, jiffies + HZ/100); 1043 } 1044 wbsd_write_index(host, WBSD_IDX_SETUP, setup); 1045 1046 /* 1047 * Store bus width for later. Will be used when 1048 * setting up the data transfer. 1049 */ 1050 host->bus_width = ios->bus_width; 1051 1052 spin_unlock_bh(&host->lock); 1053} 1054 1055static int wbsd_get_ro(struct mmc_host* mmc) 1056{ 1057 struct wbsd_host* host = mmc_priv(mmc); 1058 u8 csr; 1059 1060 spin_lock_bh(&host->lock); 1061 1062 csr = inb(host->base + WBSD_CSR); 1063 csr |= WBSD_MSLED; 1064 outb(csr, host->base + WBSD_CSR); 1065 1066 mdelay(1); 1067 1068 csr = inb(host->base + WBSD_CSR); 1069 csr &= ~WBSD_MSLED; 1070 outb(csr, host->base + WBSD_CSR); 1071 1072 spin_unlock_bh(&host->lock); 1073 1074 return csr & WBSD_WRPT; 1075} 1076 1077static struct mmc_host_ops wbsd_ops = { 1078 .request = wbsd_request, 1079 .set_ios = wbsd_set_ios, 1080 .get_ro = wbsd_get_ro, 1081}; 1082 1083/*****************************************************************************\ 1084 * * 1085 * Interrupt handling * 1086 * * 1087\*****************************************************************************/ 1088 1089/* 1090 * Helper function to reset detection ignore 1091 */ 1092 1093static void wbsd_reset_ignore(unsigned long data) 1094{ 1095 struct wbsd_host *host = (struct wbsd_host*)data; 1096 1097 BUG_ON(host == NULL); 1098 1099 DBG("Resetting card detection ignore\n"); 1100 1101 spin_lock_bh(&host->lock); 1102 1103 host->flags &= ~WBSD_FIGNORE_DETECT; 1104 1105 /* 1106 * Card status might have changed during the 1107 * blackout. 1108 */ 1109 tasklet_schedule(&host->card_tasklet); 1110 1111 spin_unlock_bh(&host->lock); 1112} 1113 1114/* 1115 * Tasklets 1116 */ 1117 1118static inline struct mmc_data* wbsd_get_data(struct wbsd_host* host) 1119{ 1120 WARN_ON(!host->mrq); 1121 if (!host->mrq) 1122 return NULL; 1123 1124 WARN_ON(!host->mrq->cmd); 1125 if (!host->mrq->cmd) 1126 return NULL; 1127 1128 WARN_ON(!host->mrq->cmd->data); 1129 if (!host->mrq->cmd->data) 1130 return NULL; 1131 1132 return host->mrq->cmd->data; 1133} 1134 1135static void wbsd_tasklet_card(unsigned long param) 1136{ 1137 struct wbsd_host* host = (struct wbsd_host*)param; 1138 u8 csr; 1139 int delay = -1; 1140 1141 spin_lock(&host->lock); 1142 1143 if (host->flags & WBSD_FIGNORE_DETECT) 1144 { 1145 spin_unlock(&host->lock); 1146 return; 1147 } 1148 1149 csr = inb(host->base + WBSD_CSR); 1150 WARN_ON(csr == 0xff); 1151 1152 if (csr & WBSD_CARDPRESENT) 1153 { 1154 if (!(host->flags & WBSD_FCARD_PRESENT)) 1155 { 1156 DBG("Card inserted\n"); 1157 host->flags |= WBSD_FCARD_PRESENT; 1158 1159 delay = 500; 1160 } 1161 } 1162 else if (host->flags & WBSD_FCARD_PRESENT) 1163 { 1164 DBG("Card removed\n"); 1165 host->flags &= ~WBSD_FCARD_PRESENT; 1166 1167 if (host->mrq) 1168 { 1169 printk(KERN_ERR DRIVER_NAME 1170 ": Card removed during transfer!\n"); 1171 wbsd_reset(host); 1172 1173 host->mrq->cmd->error = MMC_ERR_FAILED; 1174 tasklet_schedule(&host->finish_tasklet); 1175 } 1176 1177 delay = 0; 1178 } 1179 1180 /* 1181 * Unlock first since we might get a call back. 1182 */ 1183 1184 spin_unlock(&host->lock); 1185 1186 if (delay != -1) 1187 mmc_detect_change(host->mmc, msecs_to_jiffies(delay)); 1188} 1189 1190static void wbsd_tasklet_fifo(unsigned long param) 1191{ 1192 struct wbsd_host* host = (struct wbsd_host*)param; 1193 struct mmc_data* data; 1194 1195 spin_lock(&host->lock); 1196 1197 if (!host->mrq) 1198 goto end; 1199 1200 data = wbsd_get_data(host); 1201 if (!data) 1202 goto end; 1203 1204 if (data->flags & MMC_DATA_WRITE) 1205 wbsd_fill_fifo(host); 1206 else 1207 wbsd_empty_fifo(host); 1208 1209 /* 1210 * Done? 1211 */ 1212 if (host->size == data->bytes_xfered) 1213 { 1214 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0); 1215 tasklet_schedule(&host->finish_tasklet); 1216 } 1217 1218end: 1219 spin_unlock(&host->lock); 1220} 1221 1222static void wbsd_tasklet_crc(unsigned long param) 1223{ 1224 struct wbsd_host* host = (struct wbsd_host*)param; 1225 struct mmc_data* data; 1226 1227 spin_lock(&host->lock); 1228 1229 if (!host->mrq) 1230 goto end; 1231 1232 data = wbsd_get_data(host); 1233 if (!data) 1234 goto end; 1235 1236 DBGF("CRC error\n"); 1237 1238 data->error = MMC_ERR_BADCRC; 1239 1240 tasklet_schedule(&host->finish_tasklet); 1241 1242end: 1243 spin_unlock(&host->lock); 1244} 1245 1246static void wbsd_tasklet_timeout(unsigned long param) 1247{ 1248 struct wbsd_host* host = (struct wbsd_host*)param; 1249 struct mmc_data* data; 1250 1251 spin_lock(&host->lock); 1252 1253 if (!host->mrq) 1254 goto end; 1255 1256 data = wbsd_get_data(host); 1257 if (!data) 1258 goto end; 1259 1260 DBGF("Timeout\n"); 1261 1262 data->error = MMC_ERR_TIMEOUT; 1263 1264 tasklet_schedule(&host->finish_tasklet); 1265 1266end: 1267 spin_unlock(&host->lock); 1268} 1269 1270static void wbsd_tasklet_finish(unsigned long param) 1271{ 1272 struct wbsd_host* host = (struct wbsd_host*)param; 1273 struct mmc_data* data; 1274 1275 spin_lock(&host->lock); 1276 1277 WARN_ON(!host->mrq); 1278 if (!host->mrq) 1279 goto end; 1280 1281 data = wbsd_get_data(host); 1282 if (!data) 1283 goto end; 1284 1285 wbsd_finish_data(host, data); 1286 1287end: 1288 spin_unlock(&host->lock); 1289} 1290 1291static void wbsd_tasklet_block(unsigned long param) 1292{ 1293 struct wbsd_host* host = (struct wbsd_host*)param; 1294 struct mmc_data* data; 1295 1296 spin_lock(&host->lock); 1297 1298 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) != 1299 WBSD_CRC_OK) 1300 { 1301 data = wbsd_get_data(host); 1302 if (!data) 1303 goto end; 1304 1305 DBGF("CRC error\n"); 1306 1307 data->error = MMC_ERR_BADCRC; 1308 1309 tasklet_schedule(&host->finish_tasklet); 1310 } 1311 1312end: 1313 spin_unlock(&host->lock); 1314} 1315 1316/* 1317 * Interrupt handling 1318 */ 1319 1320static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs) 1321{ 1322 struct wbsd_host* host = dev_id; 1323 int isr; 1324 1325 isr = inb(host->base + WBSD_ISR); 1326 1327 /* 1328 * Was it actually our hardware that caused the interrupt? 1329 */ 1330 if (isr == 0xff || isr == 0x00) 1331 return IRQ_NONE; 1332 1333 host->isr |= isr; 1334 1335 /* 1336 * Schedule tasklets as needed. 1337 */ 1338 if (isr & WBSD_INT_CARD) 1339 tasklet_schedule(&host->card_tasklet); 1340 if (isr & WBSD_INT_FIFO_THRE) 1341 tasklet_schedule(&host->fifo_tasklet); 1342 if (isr & WBSD_INT_CRC) 1343 tasklet_hi_schedule(&host->crc_tasklet); 1344 if (isr & WBSD_INT_TIMEOUT) 1345 tasklet_hi_schedule(&host->timeout_tasklet); 1346 if (isr & WBSD_INT_BUSYEND) 1347 tasklet_hi_schedule(&host->block_tasklet); 1348 if (isr & WBSD_INT_TC) 1349 tasklet_schedule(&host->finish_tasklet); 1350 1351 return IRQ_HANDLED; 1352} 1353 1354/*****************************************************************************\ 1355 * * 1356 * Device initialisation and shutdown * 1357 * * 1358\*****************************************************************************/ 1359 1360/* 1361 * Allocate/free MMC structure. 1362 */ 1363 1364static int __devinit wbsd_alloc_mmc(struct device* dev) 1365{ 1366 struct mmc_host* mmc; 1367 struct wbsd_host* host; 1368 1369 /* 1370 * Allocate MMC structure. 1371 */ 1372 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev); 1373 if (!mmc) 1374 return -ENOMEM; 1375 1376 host = mmc_priv(mmc); 1377 host->mmc = mmc; 1378 1379 host->dma = -1; 1380 1381 /* 1382 * Set host parameters. 1383 */ 1384 mmc->ops = &wbsd_ops; 1385 mmc->f_min = 375000; 1386 mmc->f_max = 24000000; 1387 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; 1388 mmc->caps = MMC_CAP_4_BIT_DATA; 1389 1390 spin_lock_init(&host->lock); 1391 1392 /* 1393 * Set up timers 1394 */ 1395 init_timer(&host->ignore_timer); 1396 host->ignore_timer.data = (unsigned long)host; 1397 host->ignore_timer.function = wbsd_reset_ignore; 1398 1399 /* 1400 * Maximum number of segments. Worst case is one sector per segment 1401 * so this will be 64kB/512. 1402 */ 1403 mmc->max_hw_segs = 128; 1404 mmc->max_phys_segs = 128; 1405 1406 /* 1407 * Maximum number of sectors in one transfer. Also limited by 64kB 1408 * buffer. 1409 */ 1410 mmc->max_sectors = 128; 1411 1412 /* 1413 * Maximum segment size. Could be one segment with the maximum number 1414 * of segments. 1415 */ 1416 mmc->max_seg_size = mmc->max_sectors * 512; 1417 1418 dev_set_drvdata(dev, mmc); 1419 1420 return 0; 1421} 1422 1423static void __devexit wbsd_free_mmc(struct device* dev) 1424{ 1425 struct mmc_host* mmc; 1426 struct wbsd_host* host; 1427 1428 mmc = dev_get_drvdata(dev); 1429 if (!mmc) 1430 return; 1431 1432 host = mmc_priv(mmc); 1433 BUG_ON(host == NULL); 1434 1435 del_timer_sync(&host->ignore_timer); 1436 1437 mmc_free_host(mmc); 1438 1439 dev_set_drvdata(dev, NULL); 1440} 1441 1442/* 1443 * Scan for known chip id:s 1444 */ 1445 1446static int __devinit wbsd_scan(struct wbsd_host* host) 1447{ 1448 int i, j, k; 1449 int id; 1450 1451 /* 1452 * Iterate through all ports, all codes to 1453 * find hardware that is in our known list. 1454 */ 1455 for (i = 0;i < sizeof(config_ports)/sizeof(int);i++) 1456 { 1457 if (!request_region(config_ports[i], 2, DRIVER_NAME)) 1458 continue; 1459 1460 for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++) 1461 { 1462 id = 0xFFFF; 1463 1464 outb(unlock_codes[j], config_ports[i]); 1465 outb(unlock_codes[j], config_ports[i]); 1466 1467 outb(WBSD_CONF_ID_HI, config_ports[i]); 1468 id = inb(config_ports[i] + 1) << 8; 1469 1470 outb(WBSD_CONF_ID_LO, config_ports[i]); 1471 id |= inb(config_ports[i] + 1); 1472 1473 for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++) 1474 { 1475 if (id == valid_ids[k]) 1476 { 1477 host->chip_id = id; 1478 host->config = config_ports[i]; 1479 host->unlock_code = unlock_codes[i]; 1480 1481 return 0; 1482 } 1483 } 1484 1485 if (id != 0xFFFF) 1486 { 1487 DBG("Unknown hardware (id %x) found at %x\n", 1488 id, config_ports[i]); 1489 } 1490 1491 outb(LOCK_CODE, config_ports[i]); 1492 } 1493 1494 release_region(config_ports[i], 2); 1495 } 1496 1497 return -ENODEV; 1498} 1499 1500/* 1501 * Allocate/free io port ranges 1502 */ 1503 1504static int __devinit wbsd_request_region(struct wbsd_host* host, int base) 1505{ 1506 if (io & 0x7) 1507 return -EINVAL; 1508 1509 if (!request_region(base, 8, DRIVER_NAME)) 1510 return -EIO; 1511 1512 host->base = io; 1513 1514 return 0; 1515} 1516 1517static void __devexit wbsd_release_regions(struct wbsd_host* host) 1518{ 1519 if (host->base) 1520 release_region(host->base, 8); 1521 1522 host->base = 0; 1523 1524 if (host->config) 1525 release_region(host->config, 2); 1526 1527 host->config = 0; 1528} 1529 1530/* 1531 * Allocate/free DMA port and buffer 1532 */ 1533 1534static void __devinit wbsd_request_dma(struct wbsd_host* host, int dma) 1535{ 1536 if (dma < 0) 1537 return; 1538 1539 if (request_dma(dma, DRIVER_NAME)) 1540 goto err; 1541 1542 /* 1543 * We need to allocate a special buffer in 1544 * order for ISA to be able to DMA to it. 1545 */ 1546 host->dma_buffer = kmalloc(WBSD_DMA_SIZE, 1547 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN); 1548 if (!host->dma_buffer) 1549 goto free; 1550 1551 /* 1552 * Translate the address to a physical address. 1553 */ 1554 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer, 1555 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); 1556 1557 /* 1558 * ISA DMA must be aligned on a 64k basis. 1559 */ 1560 if ((host->dma_addr & 0xffff) != 0) 1561 goto kfree; 1562 /* 1563 * ISA cannot access memory above 16 MB. 1564 */ 1565 else if (host->dma_addr >= 0x1000000) 1566 goto kfree; 1567 1568 host->dma = dma; 1569 1570 return; 1571 1572kfree: 1573 /* 1574 * If we've gotten here then there is some kind of alignment bug 1575 */ 1576 BUG_ON(1); 1577 1578 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE, 1579 DMA_BIDIRECTIONAL); 1580 host->dma_addr = (dma_addr_t)NULL; 1581 1582 kfree(host->dma_buffer); 1583 host->dma_buffer = NULL; 1584 1585free: 1586 free_dma(dma); 1587 1588err: 1589 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. " 1590 "Falling back on FIFO.\n", dma); 1591} 1592 1593static void __devexit wbsd_release_dma(struct wbsd_host* host) 1594{ 1595 if (host->dma_addr) 1596 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE, 1597 DMA_BIDIRECTIONAL); 1598 if (host->dma_buffer) 1599 kfree(host->dma_buffer); 1600 if (host->dma >= 0) 1601 free_dma(host->dma); 1602 1603 host->dma = -1; 1604 host->dma_buffer = NULL; 1605 host->dma_addr = (dma_addr_t)NULL; 1606} 1607 1608/* 1609 * Allocate/free IRQ. 1610 */ 1611 1612static int __devinit wbsd_request_irq(struct wbsd_host* host, int irq) 1613{ 1614 int ret; 1615 1616 /* 1617 * Allocate interrupt. 1618 */ 1619 1620 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host); 1621 if (ret) 1622 return ret; 1623 1624 host->irq = irq; 1625 1626 /* 1627 * Set up tasklets. 1628 */ 1629 tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host); 1630 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host); 1631 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host); 1632 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host); 1633 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host); 1634 tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host); 1635 1636 return 0; 1637} 1638 1639static void __devexit wbsd_release_irq(struct wbsd_host* host) 1640{ 1641 if (!host->irq) 1642 return; 1643 1644 free_irq(host->irq, host); 1645 1646 host->irq = 0; 1647 1648 tasklet_kill(&host->card_tasklet); 1649 tasklet_kill(&host->fifo_tasklet); 1650 tasklet_kill(&host->crc_tasklet); 1651 tasklet_kill(&host->timeout_tasklet); 1652 tasklet_kill(&host->finish_tasklet); 1653 tasklet_kill(&host->block_tasklet); 1654} 1655 1656/* 1657 * Allocate all resources for the host. 1658 */ 1659 1660static int __devinit wbsd_request_resources(struct wbsd_host* host, 1661 int base, int irq, int dma) 1662{ 1663 int ret; 1664 1665 /* 1666 * Allocate I/O ports. 1667 */ 1668 ret = wbsd_request_region(host, base); 1669 if (ret) 1670 return ret; 1671 1672 /* 1673 * Allocate interrupt. 1674 */ 1675 ret = wbsd_request_irq(host, irq); 1676 if (ret) 1677 return ret; 1678 1679 /* 1680 * Allocate DMA. 1681 */ 1682 wbsd_request_dma(host, dma); 1683 1684 return 0; 1685} 1686 1687/* 1688 * Release all resources for the host. 1689 */ 1690 1691static void __devexit wbsd_release_resources(struct wbsd_host* host) 1692{ 1693 wbsd_release_dma(host); 1694 wbsd_release_irq(host); 1695 wbsd_release_regions(host); 1696} 1697 1698/* 1699 * Configure the resources the chip should use. 1700 */ 1701 1702static void __devinit wbsd_chip_config(struct wbsd_host* host) 1703{ 1704 /* 1705 * Reset the chip. 1706 */ 1707 wbsd_write_config(host, WBSD_CONF_SWRST, 1); 1708 wbsd_write_config(host, WBSD_CONF_SWRST, 0); 1709 1710 /* 1711 * Select SD/MMC function. 1712 */ 1713 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); 1714 1715 /* 1716 * Set up card detection. 1717 */ 1718 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11); 1719 1720 /* 1721 * Configure chip 1722 */ 1723 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8); 1724 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff); 1725 1726 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq); 1727 1728 if (host->dma >= 0) 1729 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma); 1730 1731 /* 1732 * Enable and power up chip. 1733 */ 1734 wbsd_write_config(host, WBSD_CONF_ENABLE, 1); 1735 wbsd_write_config(host, WBSD_CONF_POWER, 0x20); 1736} 1737 1738/* 1739 * Check that configured resources are correct. 1740 */ 1741 1742static int __devinit wbsd_chip_validate(struct wbsd_host* host) 1743{ 1744 int base, irq, dma; 1745 1746 /* 1747 * Select SD/MMC function. 1748 */ 1749 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); 1750 1751 /* 1752 * Read configuration. 1753 */ 1754 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8; 1755 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO); 1756 1757 irq = wbsd_read_config(host, WBSD_CONF_IRQ); 1758 1759 dma = wbsd_read_config(host, WBSD_CONF_DRQ); 1760 1761 /* 1762 * Validate against given configuration. 1763 */ 1764 if (base != host->base) 1765 return 0; 1766 if (irq != host->irq) 1767 return 0; 1768 if ((dma != host->dma) && (host->dma != -1)) 1769 return 0; 1770 1771 return 1; 1772} 1773 1774/*****************************************************************************\ 1775 * * 1776 * Devices setup and shutdown * 1777 * * 1778\*****************************************************************************/ 1779 1780static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma, 1781 int pnp) 1782{ 1783 struct wbsd_host* host = NULL; 1784 struct mmc_host* mmc = NULL; 1785 int ret; 1786 1787 ret = wbsd_alloc_mmc(dev); 1788 if (ret) 1789 return ret; 1790 1791 mmc = dev_get_drvdata(dev); 1792 host = mmc_priv(mmc); 1793 1794 /* 1795 * Scan for hardware. 1796 */ 1797 ret = wbsd_scan(host); 1798 if (ret) 1799 { 1800 if (pnp && (ret == -ENODEV)) 1801 { 1802 printk(KERN_WARNING DRIVER_NAME 1803 ": Unable to confirm device presence. You may " 1804 "experience lock-ups.\n"); 1805 } 1806 else 1807 { 1808 wbsd_free_mmc(dev); 1809 return ret; 1810 } 1811 } 1812 1813 /* 1814 * Request resources. 1815 */ 1816 ret = wbsd_request_resources(host, io, irq, dma); 1817 if (ret) 1818 { 1819 wbsd_release_resources(host); 1820 wbsd_free_mmc(dev); 1821 return ret; 1822 } 1823 1824 /* 1825 * See if chip needs to be configured. 1826 */ 1827 if (pnp && (host->config != 0)) 1828 { 1829 if (!wbsd_chip_validate(host)) 1830 { 1831 printk(KERN_WARNING DRIVER_NAME 1832 ": PnP active but chip not configured! " 1833 "You probably have a buggy BIOS. " 1834 "Configuring chip manually.\n"); 1835 wbsd_chip_config(host); 1836 } 1837 } 1838 else 1839 wbsd_chip_config(host); 1840 1841 /* 1842 * Power Management stuff. No idea how this works. 1843 * Not tested. 1844 */ 1845#ifdef CONFIG_PM 1846 if (host->config) 1847 wbsd_write_config(host, WBSD_CONF_PME, 0xA0); 1848#endif 1849 /* 1850 * Allow device to initialise itself properly. 1851 */ 1852 mdelay(5); 1853 1854 /* 1855 * Reset the chip into a known state. 1856 */ 1857 wbsd_init_device(host); 1858 1859 mmc_add_host(mmc); 1860 1861 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc)); 1862 if (host->chip_id != 0) 1863 printk(" id %x", (int)host->chip_id); 1864 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); 1865 if (host->dma >= 0) 1866 printk(" dma %d", (int)host->dma); 1867 else 1868 printk(" FIFO"); 1869 if (pnp) 1870 printk(" PnP"); 1871 printk("\n"); 1872 1873 return 0; 1874} 1875 1876static void __devexit wbsd_shutdown(struct device* dev, int pnp) 1877{ 1878 struct mmc_host* mmc = dev_get_drvdata(dev); 1879 struct wbsd_host* host; 1880 1881 if (!mmc) 1882 return; 1883 1884 host = mmc_priv(mmc); 1885 1886 mmc_remove_host(mmc); 1887 1888 if (!pnp) 1889 { 1890 /* 1891 * Power down the SD/MMC function. 1892 */ 1893 wbsd_unlock_config(host); 1894 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); 1895 wbsd_write_config(host, WBSD_CONF_ENABLE, 0); 1896 wbsd_lock_config(host); 1897 } 1898 1899 wbsd_release_resources(host); 1900 1901 wbsd_free_mmc(dev); 1902} 1903 1904/* 1905 * Non-PnP 1906 */ 1907 1908static int __devinit wbsd_probe(struct device* dev) 1909{ 1910 return wbsd_init(dev, io, irq, dma, 0); 1911} 1912 1913static int __devexit wbsd_remove(struct device* dev) 1914{ 1915 wbsd_shutdown(dev, 0); 1916 1917 return 0; 1918} 1919 1920/* 1921 * PnP 1922 */ 1923 1924#ifdef CONFIG_PNP 1925 1926static int __devinit 1927wbsd_pnp_probe(struct pnp_dev * pnpdev, const struct pnp_device_id *dev_id) 1928{ 1929 int io, irq, dma; 1930 1931 /* 1932 * Get resources from PnP layer. 1933 */ 1934 io = pnp_port_start(pnpdev, 0); 1935 irq = pnp_irq(pnpdev, 0); 1936 if (pnp_dma_valid(pnpdev, 0)) 1937 dma = pnp_dma(pnpdev, 0); 1938 else 1939 dma = -1; 1940 1941 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma); 1942 1943 return wbsd_init(&pnpdev->dev, io, irq, dma, 1); 1944} 1945 1946static void __devexit wbsd_pnp_remove(struct pnp_dev * dev) 1947{ 1948 wbsd_shutdown(&dev->dev, 1); 1949} 1950 1951#endif /* CONFIG_PNP */ 1952 1953/* 1954 * Power management 1955 */ 1956 1957#ifdef CONFIG_PM 1958static int wbsd_suspend(struct device *dev, pm_message_t state, u32 level) 1959{ 1960 DBGF("Not yet supported\n"); 1961 1962 return 0; 1963} 1964 1965static int wbsd_resume(struct device *dev, u32 level) 1966{ 1967 DBGF("Not yet supported\n"); 1968 1969 return 0; 1970} 1971#else 1972#define wbsd_suspend NULL 1973#define wbsd_resume NULL 1974#endif 1975 1976static struct platform_device *wbsd_device; 1977 1978static struct device_driver wbsd_driver = { 1979 .name = DRIVER_NAME, 1980 .bus = &platform_bus_type, 1981 .probe = wbsd_probe, 1982 .remove = wbsd_remove, 1983 1984 .suspend = wbsd_suspend, 1985 .resume = wbsd_resume, 1986}; 1987 1988#ifdef CONFIG_PNP 1989 1990static struct pnp_driver wbsd_pnp_driver = { 1991 .name = DRIVER_NAME, 1992 .id_table = pnp_dev_table, 1993 .probe = wbsd_pnp_probe, 1994 .remove = wbsd_pnp_remove, 1995}; 1996 1997#endif /* CONFIG_PNP */ 1998 1999/* 2000 * Module loading/unloading 2001 */ 2002 2003static int __init wbsd_drv_init(void) 2004{ 2005 int result; 2006 2007 printk(KERN_INFO DRIVER_NAME 2008 ": Winbond W83L51xD SD/MMC card interface driver, " 2009 DRIVER_VERSION "\n"); 2010 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 2011 2012#ifdef CONFIG_PNP 2013 2014 if (!nopnp) 2015 { 2016 result = pnp_register_driver(&wbsd_pnp_driver); 2017 if (result < 0) 2018 return result; 2019 } 2020 2021#endif /* CONFIG_PNP */ 2022 2023 if (nopnp) 2024 { 2025 result = driver_register(&wbsd_driver); 2026 if (result < 0) 2027 return result; 2028 2029 wbsd_device = platform_device_register_simple(DRIVER_NAME, -1, 2030 NULL, 0); 2031 if (IS_ERR(wbsd_device)) 2032 return PTR_ERR(wbsd_device); 2033 } 2034 2035 return 0; 2036} 2037 2038static void __exit wbsd_drv_exit(void) 2039{ 2040#ifdef CONFIG_PNP 2041 2042 if (!nopnp) 2043 pnp_unregister_driver(&wbsd_pnp_driver); 2044 2045#endif /* CONFIG_PNP */ 2046 2047 if (nopnp) 2048 { 2049 platform_device_unregister(wbsd_device); 2050 2051 driver_unregister(&wbsd_driver); 2052 } 2053 2054 DBG("unloaded\n"); 2055} 2056 2057module_init(wbsd_drv_init); 2058module_exit(wbsd_drv_exit); 2059#ifdef CONFIG_PNP 2060module_param(nopnp, uint, 0444); 2061#endif 2062module_param(io, uint, 0444); 2063module_param(irq, uint, 0444); 2064module_param(dma, int, 0444); 2065 2066MODULE_LICENSE("GPL"); 2067MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); 2068MODULE_VERSION(DRIVER_VERSION); 2069 2070#ifdef CONFIG_PNP 2071MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)"); 2072#endif 2073MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)"); 2074MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)"); 2075MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");