Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.2-rc2 532 lines 14 kB view raw
1/* 2 * arch/arm/common/dmabounce.c 3 * 4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have 5 * limited DMA windows. These functions utilize bounce buffers to 6 * copy data to/from buffers located outside the DMA region. This 7 * only works for systems in which DMA memory is at the bottom of 8 * RAM, the remainder of memory is at the top and the DMA memory 9 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous 10 * DMA windows will require custom implementations that reserve memory 11 * areas at early bootup. 12 * 13 * Original version by Brad Parker (brad@heeltoe.com) 14 * Re-written by Christopher Hoover <ch@murgatroid.com> 15 * Made generic by Deepak Saxena <dsaxena@plexity.net> 16 * 17 * Copyright (C) 2002 Hewlett Packard Company. 18 * Copyright (C) 2004 MontaVista Software, Inc. 19 * 20 * This program is free software; you can redistribute it and/or 21 * modify it under the terms of the GNU General Public License 22 * version 2 as published by the Free Software Foundation. 23 */ 24 25#include <linux/module.h> 26#include <linux/init.h> 27#include <linux/slab.h> 28#include <linux/page-flags.h> 29#include <linux/device.h> 30#include <linux/dma-mapping.h> 31#include <linux/dmapool.h> 32#include <linux/list.h> 33#include <linux/scatterlist.h> 34 35#include <asm/cacheflush.h> 36 37#undef STATS 38 39#ifdef STATS 40#define DO_STATS(X) do { X ; } while (0) 41#else 42#define DO_STATS(X) do { } while (0) 43#endif 44 45/* ************************************************** */ 46 47struct safe_buffer { 48 struct list_head node; 49 50 /* original request */ 51 void *ptr; 52 size_t size; 53 int direction; 54 55 /* safe buffer info */ 56 struct dmabounce_pool *pool; 57 void *safe; 58 dma_addr_t safe_dma_addr; 59}; 60 61struct dmabounce_pool { 62 unsigned long size; 63 struct dma_pool *pool; 64#ifdef STATS 65 unsigned long allocs; 66#endif 67}; 68 69struct dmabounce_device_info { 70 struct device *dev; 71 struct list_head safe_buffers; 72#ifdef STATS 73 unsigned long total_allocs; 74 unsigned long map_op_count; 75 unsigned long bounce_count; 76 int attr_res; 77#endif 78 struct dmabounce_pool small; 79 struct dmabounce_pool large; 80 81 rwlock_t lock; 82 83 int (*needs_bounce)(struct device *, dma_addr_t, size_t); 84}; 85 86#ifdef STATS 87static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, 88 char *buf) 89{ 90 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 91 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", 92 device_info->small.allocs, 93 device_info->large.allocs, 94 device_info->total_allocs - device_info->small.allocs - 95 device_info->large.allocs, 96 device_info->total_allocs, 97 device_info->map_op_count, 98 device_info->bounce_count); 99} 100 101static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); 102#endif 103 104 105/* allocate a 'safe' buffer and keep track of it */ 106static inline struct safe_buffer * 107alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, 108 size_t size, enum dma_data_direction dir) 109{ 110 struct safe_buffer *buf; 111 struct dmabounce_pool *pool; 112 struct device *dev = device_info->dev; 113 unsigned long flags; 114 115 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", 116 __func__, ptr, size, dir); 117 118 if (size <= device_info->small.size) { 119 pool = &device_info->small; 120 } else if (size <= device_info->large.size) { 121 pool = &device_info->large; 122 } else { 123 pool = NULL; 124 } 125 126 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); 127 if (buf == NULL) { 128 dev_warn(dev, "%s: kmalloc failed\n", __func__); 129 return NULL; 130 } 131 132 buf->ptr = ptr; 133 buf->size = size; 134 buf->direction = dir; 135 buf->pool = pool; 136 137 if (pool) { 138 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, 139 &buf->safe_dma_addr); 140 } else { 141 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, 142 GFP_ATOMIC); 143 } 144 145 if (buf->safe == NULL) { 146 dev_warn(dev, 147 "%s: could not alloc dma memory (size=%d)\n", 148 __func__, size); 149 kfree(buf); 150 return NULL; 151 } 152 153#ifdef STATS 154 if (pool) 155 pool->allocs++; 156 device_info->total_allocs++; 157#endif 158 159 write_lock_irqsave(&device_info->lock, flags); 160 list_add(&buf->node, &device_info->safe_buffers); 161 write_unlock_irqrestore(&device_info->lock, flags); 162 163 return buf; 164} 165 166/* determine if a buffer is from our "safe" pool */ 167static inline struct safe_buffer * 168find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) 169{ 170 struct safe_buffer *b, *rb = NULL; 171 unsigned long flags; 172 173 read_lock_irqsave(&device_info->lock, flags); 174 175 list_for_each_entry(b, &device_info->safe_buffers, node) 176 if (b->safe_dma_addr == safe_dma_addr) { 177 rb = b; 178 break; 179 } 180 181 read_unlock_irqrestore(&device_info->lock, flags); 182 return rb; 183} 184 185static inline void 186free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) 187{ 188 unsigned long flags; 189 190 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); 191 192 write_lock_irqsave(&device_info->lock, flags); 193 194 list_del(&buf->node); 195 196 write_unlock_irqrestore(&device_info->lock, flags); 197 198 if (buf->pool) 199 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); 200 else 201 dma_free_coherent(device_info->dev, buf->size, buf->safe, 202 buf->safe_dma_addr); 203 204 kfree(buf); 205} 206 207/* ************************************************** */ 208 209static struct safe_buffer *find_safe_buffer_dev(struct device *dev, 210 dma_addr_t dma_addr, const char *where) 211{ 212 if (!dev || !dev->archdata.dmabounce) 213 return NULL; 214 if (dma_mapping_error(dev, dma_addr)) { 215 dev_err(dev, "Trying to %s invalid mapping\n", where); 216 return NULL; 217 } 218 return find_safe_buffer(dev->archdata.dmabounce, dma_addr); 219} 220 221static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) 222{ 223 if (!dev || !dev->archdata.dmabounce) 224 return 0; 225 226 if (dev->dma_mask) { 227 unsigned long limit, mask = *dev->dma_mask; 228 229 limit = (mask + 1) & ~mask; 230 if (limit && size > limit) { 231 dev_err(dev, "DMA mapping too big (requested %#x " 232 "mask %#Lx)\n", size, *dev->dma_mask); 233 return -E2BIG; 234 } 235 236 /* Figure out if we need to bounce from the DMA mask. */ 237 if ((dma_addr | (dma_addr + size - 1)) & ~mask) 238 return 1; 239 } 240 241 return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); 242} 243 244static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, 245 enum dma_data_direction dir) 246{ 247 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 248 struct safe_buffer *buf; 249 250 if (device_info) 251 DO_STATS ( device_info->map_op_count++ ); 252 253 buf = alloc_safe_buffer(device_info, ptr, size, dir); 254 if (buf == NULL) { 255 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 256 __func__, ptr); 257 return ~0; 258 } 259 260 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 261 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 262 buf->safe, buf->safe_dma_addr); 263 264 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { 265 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", 266 __func__, ptr, buf->safe, size); 267 memcpy(buf->safe, ptr, size); 268 } 269 270 return buf->safe_dma_addr; 271} 272 273static inline void unmap_single(struct device *dev, struct safe_buffer *buf, 274 size_t size, enum dma_data_direction dir) 275{ 276 BUG_ON(buf->size != size); 277 BUG_ON(buf->direction != dir); 278 279 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 280 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 281 buf->safe, buf->safe_dma_addr); 282 283 DO_STATS(dev->archdata.dmabounce->bounce_count++); 284 285 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 286 void *ptr = buf->ptr; 287 288 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", 289 __func__, buf->safe, ptr, size); 290 memcpy(ptr, buf->safe, size); 291 292 /* 293 * Since we may have written to a page cache page, 294 * we need to ensure that the data will be coherent 295 * with user mappings. 296 */ 297 __cpuc_flush_dcache_area(ptr, size); 298 } 299 free_safe_buffer(dev->archdata.dmabounce, buf); 300} 301 302/* ************************************************** */ 303 304/* 305 * see if a buffer address is in an 'unsafe' range. if it is 306 * allocate a 'safe' buffer and copy the unsafe buffer into it. 307 * substitute the safe buffer for the unsafe one. 308 * (basically move the buffer from an unsafe area to a safe one) 309 */ 310dma_addr_t __dma_map_page(struct device *dev, struct page *page, 311 unsigned long offset, size_t size, enum dma_data_direction dir) 312{ 313 dma_addr_t dma_addr; 314 int ret; 315 316 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", 317 __func__, page, offset, size, dir); 318 319 dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; 320 321 ret = needs_bounce(dev, dma_addr, size); 322 if (ret < 0) 323 return ~0; 324 325 if (ret == 0) { 326 __dma_page_cpu_to_dev(page, offset, size, dir); 327 return dma_addr; 328 } 329 330 if (PageHighMem(page)) { 331 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); 332 return ~0; 333 } 334 335 return map_single(dev, page_address(page) + offset, size, dir); 336} 337EXPORT_SYMBOL(__dma_map_page); 338 339/* 340 * see if a mapped address was really a "safe" buffer and if so, copy 341 * the data from the safe buffer back to the unsafe buffer and free up 342 * the safe buffer. (basically return things back to the way they 343 * should be) 344 */ 345void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 346 enum dma_data_direction dir) 347{ 348 struct safe_buffer *buf; 349 350 dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", 351 __func__, dma_addr, size, dir); 352 353 buf = find_safe_buffer_dev(dev, dma_addr, __func__); 354 if (!buf) { 355 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), 356 dma_addr & ~PAGE_MASK, size, dir); 357 return; 358 } 359 360 unmap_single(dev, buf, size, dir); 361} 362EXPORT_SYMBOL(__dma_unmap_page); 363 364int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 365 unsigned long off, size_t sz, enum dma_data_direction dir) 366{ 367 struct safe_buffer *buf; 368 369 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 370 __func__, addr, off, sz, dir); 371 372 buf = find_safe_buffer_dev(dev, addr, __func__); 373 if (!buf) 374 return 1; 375 376 BUG_ON(buf->direction != dir); 377 378 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 379 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 380 buf->safe, buf->safe_dma_addr); 381 382 DO_STATS(dev->archdata.dmabounce->bounce_count++); 383 384 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 385 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", 386 __func__, buf->safe + off, buf->ptr + off, sz); 387 memcpy(buf->ptr + off, buf->safe + off, sz); 388 } 389 return 0; 390} 391EXPORT_SYMBOL(dmabounce_sync_for_cpu); 392 393int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, 394 unsigned long off, size_t sz, enum dma_data_direction dir) 395{ 396 struct safe_buffer *buf; 397 398 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 399 __func__, addr, off, sz, dir); 400 401 buf = find_safe_buffer_dev(dev, addr, __func__); 402 if (!buf) 403 return 1; 404 405 BUG_ON(buf->direction != dir); 406 407 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 408 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 409 buf->safe, buf->safe_dma_addr); 410 411 DO_STATS(dev->archdata.dmabounce->bounce_count++); 412 413 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { 414 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", 415 __func__,buf->ptr + off, buf->safe + off, sz); 416 memcpy(buf->safe + off, buf->ptr + off, sz); 417 } 418 return 0; 419} 420EXPORT_SYMBOL(dmabounce_sync_for_device); 421 422static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, 423 const char *name, unsigned long size) 424{ 425 pool->size = size; 426 DO_STATS(pool->allocs = 0); 427 pool->pool = dma_pool_create(name, dev, size, 428 0 /* byte alignment */, 429 0 /* no page-crossing issues */); 430 431 return pool->pool ? 0 : -ENOMEM; 432} 433 434int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, 435 unsigned long large_buffer_size, 436 int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) 437{ 438 struct dmabounce_device_info *device_info; 439 int ret; 440 441 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); 442 if (!device_info) { 443 dev_err(dev, 444 "Could not allocated dmabounce_device_info\n"); 445 return -ENOMEM; 446 } 447 448 ret = dmabounce_init_pool(&device_info->small, dev, 449 "small_dmabounce_pool", small_buffer_size); 450 if (ret) { 451 dev_err(dev, 452 "dmabounce: could not allocate DMA pool for %ld byte objects\n", 453 small_buffer_size); 454 goto err_free; 455 } 456 457 if (large_buffer_size) { 458 ret = dmabounce_init_pool(&device_info->large, dev, 459 "large_dmabounce_pool", 460 large_buffer_size); 461 if (ret) { 462 dev_err(dev, 463 "dmabounce: could not allocate DMA pool for %ld byte objects\n", 464 large_buffer_size); 465 goto err_destroy; 466 } 467 } 468 469 device_info->dev = dev; 470 INIT_LIST_HEAD(&device_info->safe_buffers); 471 rwlock_init(&device_info->lock); 472 device_info->needs_bounce = needs_bounce_fn; 473 474#ifdef STATS 475 device_info->total_allocs = 0; 476 device_info->map_op_count = 0; 477 device_info->bounce_count = 0; 478 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); 479#endif 480 481 dev->archdata.dmabounce = device_info; 482 483 dev_info(dev, "dmabounce: registered device\n"); 484 485 return 0; 486 487 err_destroy: 488 dma_pool_destroy(device_info->small.pool); 489 err_free: 490 kfree(device_info); 491 return ret; 492} 493EXPORT_SYMBOL(dmabounce_register_dev); 494 495void dmabounce_unregister_dev(struct device *dev) 496{ 497 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 498 499 dev->archdata.dmabounce = NULL; 500 501 if (!device_info) { 502 dev_warn(dev, 503 "Never registered with dmabounce but attempting" 504 "to unregister!\n"); 505 return; 506 } 507 508 if (!list_empty(&device_info->safe_buffers)) { 509 dev_err(dev, 510 "Removing from dmabounce with pending buffers!\n"); 511 BUG(); 512 } 513 514 if (device_info->small.pool) 515 dma_pool_destroy(device_info->small.pool); 516 if (device_info->large.pool) 517 dma_pool_destroy(device_info->large.pool); 518 519#ifdef STATS 520 if (device_info->attr_res == 0) 521 device_remove_file(dev, &dev_attr_dmabounce_stats); 522#endif 523 524 kfree(device_info); 525 526 dev_info(dev, "dmabounce: device unregistered\n"); 527} 528EXPORT_SYMBOL(dmabounce_unregister_dev); 529 530MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); 531MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); 532MODULE_LICENSE("GPL");