Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.18-rc4 358 lines 9.0 kB view raw
1/* 2 * linux/drivers/mmc/tmio_mmc_dma.c 3 * 4 * Copyright (C) 2010-2011 Guennadi Liakhovetski 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA function for TMIO MMC implementations 11 */ 12 13#include <linux/device.h> 14#include <linux/dma-mapping.h> 15#include <linux/dmaengine.h> 16#include <linux/mfd/tmio.h> 17#include <linux/mmc/host.h> 18#include <linux/mmc/tmio.h> 19#include <linux/pagemap.h> 20#include <linux/scatterlist.h> 21 22#include "tmio_mmc.h" 23 24#define TMIO_MMC_MIN_DMA_LEN 8 25 26void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 27{ 28 if (!host->chan_tx || !host->chan_rx) 29 return; 30 31 if (host->pdata->flags & TMIO_MMC_HAVE_CTL_DMA_REG) 32 sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0); 33} 34 35void tmio_mmc_abort_dma(struct tmio_mmc_host *host) 36{ 37 tmio_mmc_enable_dma(host, false); 38 39 if (host->chan_rx) 40 dmaengine_terminate_all(host->chan_rx); 41 if (host->chan_tx) 42 dmaengine_terminate_all(host->chan_tx); 43 44 tmio_mmc_enable_dma(host, true); 45} 46 47static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 48{ 49 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 50 struct dma_async_tx_descriptor *desc = NULL; 51 struct dma_chan *chan = host->chan_rx; 52 struct tmio_mmc_data *pdata = host->pdata; 53 dma_cookie_t cookie; 54 int ret, i; 55 bool aligned = true, multiple = true; 56 unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 57 58 for_each_sg(sg, sg_tmp, host->sg_len, i) { 59 if (sg_tmp->offset & align) 60 aligned = false; 61 if (sg_tmp->length & align) { 62 multiple = false; 63 break; 64 } 65 } 66 67 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 68 (align & PAGE_MASK))) || !multiple) { 69 ret = -EINVAL; 70 goto pio; 71 } 72 73 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 74 host->force_pio = true; 75 return; 76 } 77 78 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); 79 80 /* The only sg element can be unaligned, use our bounce buffer then */ 81 if (!aligned) { 82 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 83 host->sg_ptr = &host->bounce_sg; 84 sg = host->sg_ptr; 85 } 86 87 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 88 if (ret > 0) 89 desc = dmaengine_prep_slave_sg(chan, sg, ret, 90 DMA_DEV_TO_MEM, DMA_CTRL_ACK); 91 92 if (desc) { 93 cookie = dmaengine_submit(desc); 94 if (cookie < 0) { 95 desc = NULL; 96 ret = cookie; 97 } 98 } 99 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 100 __func__, host->sg_len, ret, cookie, host->mrq); 101 102pio: 103 if (!desc) { 104 /* DMA failed, fall back to PIO */ 105 tmio_mmc_enable_dma(host, false); 106 if (ret >= 0) 107 ret = -EIO; 108 host->chan_rx = NULL; 109 dma_release_channel(chan); 110 /* Free the Tx channel too */ 111 chan = host->chan_tx; 112 if (chan) { 113 host->chan_tx = NULL; 114 dma_release_channel(chan); 115 } 116 dev_warn(&host->pdev->dev, 117 "DMA failed: %d, falling back to PIO\n", ret); 118 } 119 120 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 121 desc, cookie, host->sg_len); 122} 123 124static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 125{ 126 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 127 struct dma_async_tx_descriptor *desc = NULL; 128 struct dma_chan *chan = host->chan_tx; 129 struct tmio_mmc_data *pdata = host->pdata; 130 dma_cookie_t cookie; 131 int ret, i; 132 bool aligned = true, multiple = true; 133 unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 134 135 for_each_sg(sg, sg_tmp, host->sg_len, i) { 136 if (sg_tmp->offset & align) 137 aligned = false; 138 if (sg_tmp->length & align) { 139 multiple = false; 140 break; 141 } 142 } 143 144 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 145 (align & PAGE_MASK))) || !multiple) { 146 ret = -EINVAL; 147 goto pio; 148 } 149 150 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 151 host->force_pio = true; 152 return; 153 } 154 155 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); 156 157 /* The only sg element can be unaligned, use our bounce buffer then */ 158 if (!aligned) { 159 unsigned long flags; 160 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 161 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 162 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 163 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 164 host->sg_ptr = &host->bounce_sg; 165 sg = host->sg_ptr; 166 } 167 168 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 169 if (ret > 0) 170 desc = dmaengine_prep_slave_sg(chan, sg, ret, 171 DMA_MEM_TO_DEV, DMA_CTRL_ACK); 172 173 if (desc) { 174 cookie = dmaengine_submit(desc); 175 if (cookie < 0) { 176 desc = NULL; 177 ret = cookie; 178 } 179 } 180 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 181 __func__, host->sg_len, ret, cookie, host->mrq); 182 183pio: 184 if (!desc) { 185 /* DMA failed, fall back to PIO */ 186 tmio_mmc_enable_dma(host, false); 187 if (ret >= 0) 188 ret = -EIO; 189 host->chan_tx = NULL; 190 dma_release_channel(chan); 191 /* Free the Rx channel too */ 192 chan = host->chan_rx; 193 if (chan) { 194 host->chan_rx = NULL; 195 dma_release_channel(chan); 196 } 197 dev_warn(&host->pdev->dev, 198 "DMA failed: %d, falling back to PIO\n", ret); 199 } 200 201 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 202 desc, cookie); 203} 204 205void tmio_mmc_start_dma(struct tmio_mmc_host *host, 206 struct mmc_data *data) 207{ 208 if (data->flags & MMC_DATA_READ) { 209 if (host->chan_rx) 210 tmio_mmc_start_dma_rx(host); 211 } else { 212 if (host->chan_tx) 213 tmio_mmc_start_dma_tx(host); 214 } 215} 216 217static void tmio_mmc_issue_tasklet_fn(unsigned long priv) 218{ 219 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 220 struct dma_chan *chan = NULL; 221 222 spin_lock_irq(&host->lock); 223 224 if (host && host->data) { 225 if (host->data->flags & MMC_DATA_READ) 226 chan = host->chan_rx; 227 else 228 chan = host->chan_tx; 229 } 230 231 spin_unlock_irq(&host->lock); 232 233 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 234 235 if (chan) 236 dma_async_issue_pending(chan); 237} 238 239static void tmio_mmc_tasklet_fn(unsigned long arg) 240{ 241 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 242 243 spin_lock_irq(&host->lock); 244 245 if (!host->data) 246 goto out; 247 248 if (host->data->flags & MMC_DATA_READ) 249 dma_unmap_sg(host->chan_rx->device->dev, 250 host->sg_ptr, host->sg_len, 251 DMA_FROM_DEVICE); 252 else 253 dma_unmap_sg(host->chan_tx->device->dev, 254 host->sg_ptr, host->sg_len, 255 DMA_TO_DEVICE); 256 257 tmio_mmc_do_data_irq(host); 258out: 259 spin_unlock_irq(&host->lock); 260} 261 262void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 263{ 264 /* We can only either use DMA for both Tx and Rx or not use it at all */ 265 if (!pdata->dma || (!host->pdev->dev.of_node && 266 (!pdata->dma->chan_priv_tx || !pdata->dma->chan_priv_rx))) 267 return; 268 269 if (!host->chan_tx && !host->chan_rx) { 270 struct resource *res = platform_get_resource(host->pdev, 271 IORESOURCE_MEM, 0); 272 struct dma_slave_config cfg = {}; 273 dma_cap_mask_t mask; 274 int ret; 275 276 if (!res) 277 return; 278 279 dma_cap_zero(mask); 280 dma_cap_set(DMA_SLAVE, mask); 281 282 host->chan_tx = dma_request_slave_channel_compat(mask, 283 pdata->dma->filter, pdata->dma->chan_priv_tx, 284 &host->pdev->dev, "tx"); 285 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 286 host->chan_tx); 287 288 if (!host->chan_tx) 289 return; 290 291 if (pdata->dma->chan_priv_tx) 292 cfg.slave_id = pdata->dma->slave_id_tx; 293 cfg.direction = DMA_MEM_TO_DEV; 294 cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->pdata->bus_shift); 295 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 296 cfg.src_addr = 0; 297 ret = dmaengine_slave_config(host->chan_tx, &cfg); 298 if (ret < 0) 299 goto ecfgtx; 300 301 host->chan_rx = dma_request_slave_channel_compat(mask, 302 pdata->dma->filter, pdata->dma->chan_priv_rx, 303 &host->pdev->dev, "rx"); 304 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 305 host->chan_rx); 306 307 if (!host->chan_rx) 308 goto ereqrx; 309 310 if (pdata->dma->chan_priv_rx) 311 cfg.slave_id = pdata->dma->slave_id_rx; 312 cfg.direction = DMA_DEV_TO_MEM; 313 cfg.src_addr = cfg.dst_addr + pdata->dma->dma_rx_offset; 314 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 315 cfg.dst_addr = 0; 316 ret = dmaengine_slave_config(host->chan_rx, &cfg); 317 if (ret < 0) 318 goto ecfgrx; 319 320 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 321 if (!host->bounce_buf) 322 goto ebouncebuf; 323 324 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 325 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 326 } 327 328 tmio_mmc_enable_dma(host, true); 329 330 return; 331 332ebouncebuf: 333ecfgrx: 334 dma_release_channel(host->chan_rx); 335 host->chan_rx = NULL; 336ereqrx: 337ecfgtx: 338 dma_release_channel(host->chan_tx); 339 host->chan_tx = NULL; 340} 341 342void tmio_mmc_release_dma(struct tmio_mmc_host *host) 343{ 344 if (host->chan_tx) { 345 struct dma_chan *chan = host->chan_tx; 346 host->chan_tx = NULL; 347 dma_release_channel(chan); 348 } 349 if (host->chan_rx) { 350 struct dma_chan *chan = host->chan_rx; 351 host->chan_rx = NULL; 352 dma_release_channel(chan); 353 } 354 if (host->bounce_buf) { 355 free_pages((unsigned long)host->bounce_buf, 0); 356 host->bounce_buf = NULL; 357 } 358}