sdhci: use PIO when DMA can't satisfy the request

Some controllers have been designed on the assumption that all transfers
will be 32-bit aligned, both in start address and in size. This is not a
guarantee the SDHCI specification provides and not one we can provide.

Revert back to PIO for individual requests in order to work around the
hardware bug.

Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>

+31 -4
+29 -3
drivers/mmc/host/sdhci.c
··· 43 #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) 44 /* Controller has an unusable DMA engine */ 45 #define SDHCI_QUIRK_BROKEN_DMA (1<<5) 46 47 static const struct pci_device_id pci_ids[] __devinitdata = { 48 { ··· 433 434 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); 435 436 - if (host->flags & SDHCI_USE_DMA) { 437 int count; 438 439 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, ··· 492 mode |= SDHCI_TRNS_MULTI; 493 if (data->flags & MMC_DATA_READ) 494 mode |= SDHCI_TRNS_READ; 495 - if (host->flags & SDHCI_USE_DMA) 496 mode |= SDHCI_TRNS_DMA; 497 498 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE); ··· 508 data = host->data; 509 host->data = NULL; 510 511 - if (host->flags & SDHCI_USE_DMA) { 512 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, 513 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 514 }
··· 43 #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) 44 /* Controller has an unusable DMA engine */ 45 #define SDHCI_QUIRK_BROKEN_DMA (1<<5) 46 + /* Controller can only DMA from 32-bit aligned addresses */ 47 + #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) 48 + /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ 49 + #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) 50 51 static const struct pci_device_id pci_ids[] __devinitdata = { 52 { ··· 429 430 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); 431 432 + if (host->flags & SDHCI_USE_DMA) 433 + host->flags |= SDHCI_REQ_USE_DMA; 434 + 435 + if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 436 + (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 437 + ((data->blksz * data->blocks) & 0x3))) { 438 + DBG("Reverting to PIO because of transfer size (%d)\n", 439 + data->blksz * data->blocks); 440 + host->flags &= ~SDHCI_REQ_USE_DMA; 441 + } 442 + 443 + /* 444 + * The assumption here being that alignment is the same after 445 + * translation to device address space. 446 + */ 447 + if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 448 + (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 449 + (data->sg->offset & 0x3))) { 450 + DBG("Reverting to PIO because of bad alignment\n"); 451 + host->flags &= ~SDHCI_REQ_USE_DMA; 452 + } 453 + 454 + if (host->flags & SDHCI_REQ_USE_DMA) { 455 int count; 456 457 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, ··· 466 mode |= SDHCI_TRNS_MULTI; 467 if (data->flags & MMC_DATA_READ) 468 mode |= SDHCI_TRNS_READ; 469 + if (host->flags & SDHCI_REQ_USE_DMA) 470 mode |= SDHCI_TRNS_DMA; 471 472 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE); ··· 482 data = host->data; 483 host->data = NULL; 484 485 + if (host->flags & SDHCI_REQ_USE_DMA) { 486 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, 487 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 488 }
+2 -1
drivers/mmc/host/sdhci.h
··· 171 spinlock_t lock; /* Mutex */ 172 173 int flags; /* Host attributes */ 174 - #define SDHCI_USE_DMA (1<<0) 175 176 unsigned int max_clk; /* Max possible freq (MHz) */ 177 unsigned int timeout_clk; /* Timeout freq (KHz) */
··· 171 spinlock_t lock; /* Mutex */ 172 173 int flags; /* Host attributes */ 174 + #define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ 175 + #define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ 176 177 unsigned int max_clk; /* Max possible freq (MHz) */ 178 unsigned int timeout_clk; /* Timeout freq (KHz) */