Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtd: atmel_nand: Add DMA support to access Nandflash

Some SAM9 chips have the ability to perform DMA between CPU and SMC controller.
This patch adds DMA support for SAM9RL, SAM9G45, SSAM9G46,AM9M10, SAM9M11.

Signed-off-by: Hong Xu <hong.xu@atmel.com>
Tested-by: Ryan Mallon <ryan@bluewatersys.com>
Acked-by: Ryan Mallon <ryan@bluewatersys.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>

authored by

Hong Xu and committed by
David Woodhouse
cbc6c5e7 7f53f12f

+157 -9
+157 -9
drivers/mtd/nand/atmel_nand.c
··· 48 48 #define no_ecc 0 49 49 #endif 50 50 51 + static int use_dma = 1; 52 + module_param(use_dma, int, 0); 53 + 51 54 static int on_flash_bbt = 0; 52 55 module_param(on_flash_bbt, int, 0); 53 56 ··· 92 89 struct nand_chip nand_chip; 93 90 struct mtd_info mtd; 94 91 void __iomem *io_base; 92 + dma_addr_t io_phys; 95 93 struct atmel_nand_data *board; 96 94 struct device *dev; 97 95 void __iomem *ecc; 96 + 97 + struct completion comp; 98 + struct dma_chan *dma_chan; 98 99 }; 100 + 101 + static int cpu_has_dma(void) 102 + { 103 + return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); 104 + } 99 105 100 106 /* 101 107 * Enable NAND. ··· 162 150 /* 163 151 * Minimal-overhead PIO for data access. 164 152 */ 165 - static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) 153 + static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) 166 154 { 167 155 struct nand_chip *nand_chip = mtd->priv; 168 156 ··· 176 164 __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); 177 165 } 178 166 179 - static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 167 + static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) 180 168 { 181 169 struct nand_chip *nand_chip = mtd->priv; 182 170 ··· 188 176 struct nand_chip *nand_chip = mtd->priv; 189 177 190 178 __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); 179 + } 180 + 181 + static void dma_complete_func(void *completion) 182 + { 183 + complete(completion); 184 + } 185 + 186 + static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, 187 + int is_read) 188 + { 189 + struct dma_device *dma_dev; 190 + enum dma_ctrl_flags flags; 191 + dma_addr_t dma_src_addr, dma_dst_addr, phys_addr; 192 + struct dma_async_tx_descriptor *tx = NULL; 193 + dma_cookie_t cookie; 194 + struct nand_chip *chip = mtd->priv; 195 + struct atmel_nand_host *host = chip->priv; 196 + void *p = buf; 197 + int err = -EIO; 198 + enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 199 + 200 + if (buf >= high_memory) { 201 + struct page *pg; 202 + 203 + if (((size_t)buf & PAGE_MASK) != 204 + ((size_t)(buf + len - 1) & PAGE_MASK)) { 205 + dev_warn(host->dev, "Buffer not fit in one page\n"); 206 + goto err_buf; 207 + } 208 + 209 + pg = vmalloc_to_page(buf); 210 + if (pg == 0) { 211 + dev_err(host->dev, "Failed to vmalloc_to_page\n"); 212 + goto err_buf; 213 + } 214 + p = page_address(pg) + ((size_t)buf & ~PAGE_MASK); 215 + } 216 + 217 + dma_dev = host->dma_chan->device; 218 + 219 + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | 220 + DMA_COMPL_SKIP_DEST_UNMAP; 221 + 222 + phys_addr = dma_map_single(dma_dev->dev, p, len, dir); 223 + if (dma_mapping_error(dma_dev->dev, phys_addr)) { 224 + dev_err(host->dev, "Failed to dma_map_single\n"); 225 + goto err_buf; 226 + } 227 + 228 + if (is_read) { 229 + dma_src_addr = host->io_phys; 230 + dma_dst_addr = phys_addr; 231 + } else { 232 + dma_src_addr = phys_addr; 233 + dma_dst_addr = host->io_phys; 234 + } 235 + 236 + tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr, 237 + dma_src_addr, len, flags); 238 + if (!tx) { 239 + dev_err(host->dev, "Failed to prepare DMA memcpy\n"); 240 + goto err_dma; 241 + } 242 + 243 + init_completion(&host->comp); 244 + tx->callback = dma_complete_func; 245 + tx->callback_param = &host->comp; 246 + 247 + cookie = tx->tx_submit(tx); 248 + if (dma_submit_error(cookie)) { 249 + dev_err(host->dev, "Failed to do DMA tx_submit\n"); 250 + goto err_dma; 251 + } 252 + 253 + dma_async_issue_pending(host->dma_chan); 254 + wait_for_completion(&host->comp); 255 + 256 + err = 0; 257 + 258 + err_dma: 259 + dma_unmap_single(dma_dev->dev, phys_addr, len, dir); 260 + err_buf: 261 + if (err != 0) 262 + dev_warn(host->dev, "Fall back to CPU I/O\n"); 263 + return err; 264 + } 265 + 266 + static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) 267 + { 268 + struct nand_chip *chip = mtd->priv; 269 + struct atmel_nand_host *host = chip->priv; 270 + 271 + if (use_dma && len >= mtd->oobsize) 272 + if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) 273 + return; 274 + 275 + if (host->board->bus_width_16) 276 + atmel_read_buf16(mtd, buf, len); 277 + else 278 + atmel_read_buf8(mtd, buf, len); 279 + } 280 + 281 + static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 282 + { 283 + struct nand_chip *chip = mtd->priv; 284 + struct atmel_nand_host *host = chip->priv; 285 + 286 + if (use_dma && len >= mtd->oobsize) 287 + if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) 288 + return; 289 + 290 + if (host->board->bus_width_16) 291 + atmel_write_buf16(mtd, buf, len); 292 + else 293 + atmel_write_buf8(mtd, buf, len); 191 294 } 192 295 193 296 /* ··· 525 398 return -ENOMEM; 526 399 } 527 400 401 + host->io_phys = (dma_addr_t)mem->start; 402 + 528 403 host->io_base = ioremap(mem->start, mem->end - mem->start + 1); 529 404 if (host->io_base == NULL) { 530 405 printk(KERN_ERR "atmel_nand: ioremap failed\n"); ··· 577 448 578 449 nand_chip->chip_delay = 20; /* 20us command delay time */ 579 450 580 - if (host->board->bus_width_16) { /* 16-bit bus width */ 451 + if (host->board->bus_width_16) /* 16-bit bus width */ 581 452 nand_chip->options |= NAND_BUSWIDTH_16; 582 - nand_chip->read_buf = atmel_read_buf16; 583 - nand_chip->write_buf = atmel_write_buf16; 584 - } else { 585 - nand_chip->read_buf = atmel_read_buf; 586 - nand_chip->write_buf = atmel_write_buf; 587 - } 453 + 454 + nand_chip->read_buf = atmel_read_buf; 455 + nand_chip->write_buf = atmel_write_buf; 588 456 589 457 platform_set_drvdata(pdev, host); 590 458 atmel_nand_enable(host); ··· 598 472 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); 599 473 nand_chip->options |= NAND_USE_FLASH_BBT; 600 474 } 475 + 476 + if (cpu_has_dma() && use_dma) { 477 + dma_cap_mask_t mask; 478 + 479 + dma_cap_zero(mask); 480 + dma_cap_set(DMA_MEMCPY, mask); 481 + host->dma_chan = dma_request_channel(mask, 0, NULL); 482 + if (!host->dma_chan) { 483 + dev_err(host->dev, "Failed to request DMA channel\n"); 484 + use_dma = 0; 485 + } 486 + } 487 + if (use_dma) 488 + dev_info(host->dev, "Using DMA for NAND access.\n"); 489 + else 490 + dev_info(host->dev, "No DMA support for NAND access.\n"); 601 491 602 492 /* first scan to find the device and get the page size */ 603 493 if (nand_scan_ident(mtd, 1, NULL)) { ··· 697 555 err_no_card: 698 556 atmel_nand_disable(host); 699 557 platform_set_drvdata(pdev, NULL); 558 + if (host->dma_chan) 559 + dma_release_channel(host->dma_chan); 700 560 if (host->ecc) 701 561 iounmap(host->ecc); 702 562 err_ecc_ioremap: ··· 722 578 723 579 if (host->ecc) 724 580 iounmap(host->ecc); 581 + 582 + if (host->dma_chan) 583 + dma_release_channel(host->dma_chan); 584 + 725 585 iounmap(host->io_base); 726 586 kfree(host); 727 587