1 2#include <linux/device.h> 3#include <linux/mm.h> 4#include <asm/io.h> /* Needed for i386 to build */ 5#include <asm/scatterlist.h> /* Needed for i386 to build */ 6#include <linux/dma-mapping.h> 7#include <linux/dmapool.h> 8#include <linux/slab.h> 9#include <linux/module.h> 10#include <linux/poison.h> 11 12/* 13 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so 14 * small blocks are easily used by drivers for bus mastering controllers. 15 * This should probably be sharing the guts of the slab allocator. 16 */ 17 18struct dma_pool { /* the pool */ 19 struct list_head page_list; 20 spinlock_t lock; 21 size_t blocks_per_page; 22 size_t size; 23 struct device *dev; 24 size_t allocation; 25 char name [32]; 26 wait_queue_head_t waitq; 27 struct list_head pools; 28}; 29 30struct dma_page { /* cacheable header for 'allocation' bytes */ 31 struct list_head page_list; 32 void *vaddr; 33 dma_addr_t dma; 34 unsigned in_use; 35 unsigned long bitmap [0]; 36}; 37 38#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 39 40static DECLARE_MUTEX (pools_lock); 41 42static ssize_t 43show_pools (struct device *dev, struct device_attribute *attr, char *buf) 44{ 45 unsigned temp; 46 unsigned size; 47 char *next; 48 struct dma_page *page; 49 struct dma_pool *pool; 50 51 next = buf; 52 size = PAGE_SIZE; 53 54 temp = scnprintf(next, size, "poolinfo - 0.1\n"); 55 size -= temp; 56 next += temp; 57 58 down (&pools_lock); 59 list_for_each_entry(pool, &dev->dma_pools, pools) { 60 unsigned pages = 0; 61 unsigned blocks = 0; 62 63 list_for_each_entry(page, &pool->page_list, page_list) { 64 pages++; 65 blocks += page->in_use; 66 } 67 68 /* per-pool info, no real statistics yet */ 69 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 70 pool->name, 71 blocks, pages * pool->blocks_per_page, 72 pool->size, pages); 73 size -= temp; 74 next += temp; 75 } 76 up (&pools_lock); 77 78 return PAGE_SIZE - size; 79} 80static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); 81 82/** 83 * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 84 * @name: name of pool, for diagnostics 85 * @dev: device that will be doing the DMA 86 * @size: size of the blocks in this pool. 87 * @align: alignment requirement for blocks; must be a power of two 88 * @allocation: returned blocks won't cross this boundary (or zero) 89 * Context: !in_interrupt() 90 * 91 * Returns a dma allocation pool with the requested characteristics, or 92 * null if one can't be created. Given one of these pools, dma_pool_alloc() 93 * may be used to allocate memory. Such memory will all have "consistent" 94 * DMA mappings, accessible by the device and its driver without using 95 * cache flushing primitives. The actual size of blocks allocated may be 96 * larger than requested because of alignment. 97 * 98 * If allocation is nonzero, objects returned from dma_pool_alloc() won't 99 * cross that size boundary. This is useful for devices which have 100 * addressing restrictions on individual DMA transfers, such as not crossing 101 * boundaries of 4KBytes. 102 */ 103struct dma_pool * 104dma_pool_create (const char *name, struct device *dev, 105 size_t size, size_t align, size_t allocation) 106{ 107 struct dma_pool *retval; 108 109 if (align == 0) 110 align = 1; 111 if (size == 0) 112 return NULL; 113 else if (size < align) 114 size = align; 115 else if ((size % align) != 0) { 116 size += align + 1; 117 size &= ~(align - 1); 118 } 119 120 if (allocation == 0) { 121 if (PAGE_SIZE < size) 122 allocation = size; 123 else 124 allocation = PAGE_SIZE; 125 // FIXME: round up for less fragmentation 126 } else if (allocation < size) 127 return NULL; 128 129 if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) 130 return retval; 131 132 strlcpy (retval->name, name, sizeof retval->name); 133 134 retval->dev = dev; 135 136 INIT_LIST_HEAD (&retval->page_list); 137 spin_lock_init (&retval->lock); 138 retval->size = size; 139 retval->allocation = allocation; 140 retval->blocks_per_page = allocation / size; 141 init_waitqueue_head (&retval->waitq); 142 143 if (dev) { 144 down (&pools_lock); 145 if (list_empty (&dev->dma_pools)) 146 device_create_file (dev, &dev_attr_pools); 147 /* note: not currently insisting "name" be unique */ 148 list_add (&retval->pools, &dev->dma_pools); 149 up (&pools_lock); 150 } else 151 INIT_LIST_HEAD (&retval->pools); 152 153 return retval; 154} 155 156 157static struct dma_page * 158pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) 159{ 160 struct dma_page *page; 161 int mapsize; 162 163 mapsize = pool->blocks_per_page; 164 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; 165 mapsize *= sizeof (long); 166 167 page = (struct dma_page *) kmalloc (mapsize + sizeof *page, mem_flags); 168 if (!page) 169 return NULL; 170 page->vaddr = dma_alloc_coherent (pool->dev, 171 pool->allocation, 172 &page->dma, 173 mem_flags); 174 if (page->vaddr) { 175 memset (page->bitmap, 0xff, mapsize); // bit set == free 176#ifdef CONFIG_DEBUG_SLAB 177 memset (page->vaddr, POOL_POISON_FREED, pool->allocation); 178#endif 179 list_add (&page->page_list, &pool->page_list); 180 page->in_use = 0; 181 } else { 182 kfree (page); 183 page = NULL; 184 } 185 return page; 186} 187 188 189static inline int 190is_page_busy (int blocks, unsigned long *bitmap) 191{ 192 while (blocks > 0) { 193 if (*bitmap++ != ~0UL) 194 return 1; 195 blocks -= BITS_PER_LONG; 196 } 197 return 0; 198} 199 200static void 201pool_free_page (struct dma_pool *pool, struct dma_page *page) 202{ 203 dma_addr_t dma = page->dma; 204 205#ifdef CONFIG_DEBUG_SLAB 206 memset (page->vaddr, POOL_POISON_FREED, pool->allocation); 207#endif 208 dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma); 209 list_del (&page->page_list); 210 kfree (page); 211} 212 213 214/** 215 * dma_pool_destroy - destroys a pool of dma memory blocks. 216 * @pool: dma pool that will be destroyed 217 * Context: !in_interrupt() 218 * 219 * Caller guarantees that no more memory from the pool is in use, 220 * and that nothing will try to use the pool after this call. 221 */ 222void 223dma_pool_destroy (struct dma_pool *pool) 224{ 225 down (&pools_lock); 226 list_del (&pool->pools); 227 if (pool->dev && list_empty (&pool->dev->dma_pools)) 228 device_remove_file (pool->dev, &dev_attr_pools); 229 up (&pools_lock); 230 231 while (!list_empty (&pool->page_list)) { 232 struct dma_page *page; 233 page = list_entry (pool->page_list.next, 234 struct dma_page, page_list); 235 if (is_page_busy (pool->blocks_per_page, page->bitmap)) { 236 if (pool->dev) 237 dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", 238 pool->name, page->vaddr); 239 else 240 printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", 241 pool->name, page->vaddr); 242 /* leak the still-in-use consistent memory */ 243 list_del (&page->page_list); 244 kfree (page); 245 } else 246 pool_free_page (pool, page); 247 } 248 249 kfree (pool); 250} 251 252 253/** 254 * dma_pool_alloc - get a block of consistent memory 255 * @pool: dma pool that will produce the block 256 * @mem_flags: GFP_* bitmask 257 * @handle: pointer to dma address of block 258 * 259 * This returns the kernel virtual address of a currently unused block, 260 * and reports its dma address through the handle. 261 * If such a memory block can't be allocated, null is returned. 262 */ 263void * 264dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) 265{ 266 unsigned long flags; 267 struct dma_page *page; 268 int map, block; 269 size_t offset; 270 void *retval; 271 272restart: 273 spin_lock_irqsave (&pool->lock, flags); 274 list_for_each_entry(page, &pool->page_list, page_list) { 275 int i; 276 /* only cachable accesses here ... */ 277 for (map = 0, i = 0; 278 i < pool->blocks_per_page; 279 i += BITS_PER_LONG, map++) { 280 if (page->bitmap [map] == 0) 281 continue; 282 block = ffz (~ page->bitmap [map]); 283 if ((i + block) < pool->blocks_per_page) { 284 clear_bit (block, &page->bitmap [map]); 285 offset = (BITS_PER_LONG * map) + block; 286 offset *= pool->size; 287 goto ready; 288 } 289 } 290 } 291 if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) { 292 if (mem_flags & __GFP_WAIT) { 293 DECLARE_WAITQUEUE (wait, current); 294 295 current->state = TASK_INTERRUPTIBLE; 296 add_wait_queue (&pool->waitq, &wait); 297 spin_unlock_irqrestore (&pool->lock, flags); 298 299 schedule_timeout (POOL_TIMEOUT_JIFFIES); 300 301 remove_wait_queue (&pool->waitq, &wait); 302 goto restart; 303 } 304 retval = NULL; 305 goto done; 306 } 307 308 clear_bit (0, &page->bitmap [0]); 309 offset = 0; 310ready: 311 page->in_use++; 312 retval = offset + page->vaddr; 313 *handle = offset + page->dma; 314#ifdef CONFIG_DEBUG_SLAB 315 memset (retval, POOL_POISON_ALLOCATED, pool->size); 316#endif 317done: 318 spin_unlock_irqrestore (&pool->lock, flags); 319 return retval; 320} 321 322 323static struct dma_page * 324pool_find_page (struct dma_pool *pool, dma_addr_t dma) 325{ 326 unsigned long flags; 327 struct dma_page *page; 328 329 spin_lock_irqsave (&pool->lock, flags); 330 list_for_each_entry(page, &pool->page_list, page_list) { 331 if (dma < page->dma) 332 continue; 333 if (dma < (page->dma + pool->allocation)) 334 goto done; 335 } 336 page = NULL; 337done: 338 spin_unlock_irqrestore (&pool->lock, flags); 339 return page; 340} 341 342 343/** 344 * dma_pool_free - put block back into dma pool 345 * @pool: the dma pool holding the block 346 * @vaddr: virtual address of block 347 * @dma: dma address of block 348 * 349 * Caller promises neither device nor driver will again touch this block 350 * unless it is first re-allocated. 351 */ 352void 353dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) 354{ 355 struct dma_page *page; 356 unsigned long flags; 357 int map, block; 358 359 if ((page = pool_find_page (pool, dma)) == 0) { 360 if (pool->dev) 361 dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", 362 pool->name, vaddr, (unsigned long) dma); 363 else 364 printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", 365 pool->name, vaddr, (unsigned long) dma); 366 return; 367 } 368 369 block = dma - page->dma; 370 block /= pool->size; 371 map = block / BITS_PER_LONG; 372 block %= BITS_PER_LONG; 373 374#ifdef CONFIG_DEBUG_SLAB 375 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { 376 if (pool->dev) 377 dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 378 pool->name, vaddr, (unsigned long long) dma); 379 else 380 printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 381 pool->name, vaddr, (unsigned long long) dma); 382 return; 383 } 384 if (page->bitmap [map] & (1UL << block)) { 385 if (pool->dev) 386 dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", 387 pool->name, (unsigned long long)dma); 388 else 389 printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", 390 pool->name, (unsigned long long)dma); 391 return; 392 } 393 memset (vaddr, POOL_POISON_FREED, pool->size); 394#endif 395 396 spin_lock_irqsave (&pool->lock, flags); 397 page->in_use--; 398 set_bit (block, &page->bitmap [map]); 399 if (waitqueue_active (&pool->waitq)) 400 wake_up (&pool->waitq); 401 /* 402 * Resist a temptation to do 403 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); 404 * Better have a few empty pages hang around. 405 */ 406 spin_unlock_irqrestore (&pool->lock, flags); 407} 408 409 410EXPORT_SYMBOL (dma_pool_create); 411EXPORT_SYMBOL (dma_pool_destroy); 412EXPORT_SYMBOL (dma_pool_alloc); 413EXPORT_SYMBOL (dma_pool_free);