Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33 445 lines 14 kB view raw
1#ifndef ASMARM_DMA_MAPPING_H 2#define ASMARM_DMA_MAPPING_H 3 4#ifdef __KERNEL__ 5 6#include <linux/mm_types.h> 7#include <linux/scatterlist.h> 8 9#include <asm-generic/dma-coherent.h> 10#include <asm/memory.h> 11 12/* 13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions 14 * used internally by the DMA-mapping API to provide DMA addresses. They 15 * must not be used by drivers. 16 */ 17#ifndef __arch_page_to_dma 18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 19{ 20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); 21} 22 23static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 24{ 25 return pfn_to_page(__bus_to_pfn(addr)); 26} 27 28static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 29{ 30 return (void *)__bus_to_virt(addr); 31} 32 33static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) 34{ 35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); 36} 37#else 38static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 39{ 40 return __arch_page_to_dma(dev, page); 41} 42 43static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 44{ 45 return __arch_dma_to_page(dev, addr); 46} 47 48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 49{ 50 return __arch_dma_to_virt(dev, addr); 51} 52 53static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) 54{ 55 return __arch_virt_to_dma(dev, addr); 56} 57#endif 58 59/* 60 * DMA-consistent mapping functions. These allocate/free a region of 61 * uncached, unwrite-buffered mapped memory space for use with DMA 62 * devices. This is the "generic" version. The PCI specific version 63 * is in pci.h 64 * 65 * Note: Drivers should NOT use this function directly, as it will break 66 * platforms with CONFIG_DMABOUNCE. 67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 68 */ 69extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 70extern void dma_cache_maint_page(struct page *page, unsigned long offset, 71 size_t size, int rw); 72 73/* 74 * Return whether the given device DMA address mask can be supported 75 * properly. For example, if your device can only drive the low 24-bits 76 * during bus mastering, then you would pass 0x00ffffff as the mask 77 * to this function. 78 * 79 * FIXME: This should really be a platform specific issue - we should 80 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. 81 */ 82static inline int dma_supported(struct device *dev, u64 mask) 83{ 84 if (mask < ISA_DMA_THRESHOLD) 85 return 0; 86 return 1; 87} 88 89static inline int dma_set_mask(struct device *dev, u64 dma_mask) 90{ 91 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 92 return -EIO; 93 94 *dev->dma_mask = dma_mask; 95 96 return 0; 97} 98 99static inline int dma_get_cache_alignment(void) 100{ 101 return 32; 102} 103 104static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) 105{ 106 return !!arch_is_coherent(); 107} 108 109/* 110 * DMA errors are defined by all-bits-set in the DMA address. 111 */ 112static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 113{ 114 return dma_addr == ~0; 115} 116 117/* 118 * Dummy noncoherent implementation. We don't provide a dma_cache_sync 119 * function so drivers using this API are highlighted with build warnings. 120 */ 121static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 122 dma_addr_t *handle, gfp_t gfp) 123{ 124 return NULL; 125} 126 127static inline void dma_free_noncoherent(struct device *dev, size_t size, 128 void *cpu_addr, dma_addr_t handle) 129{ 130} 131 132/** 133 * dma_alloc_coherent - allocate consistent memory for DMA 134 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 135 * @size: required memory size 136 * @handle: bus-specific DMA address 137 * 138 * Allocate some uncached, unbuffered memory for a device for 139 * performing DMA. This function allocates pages, and will 140 * return the CPU-viewed address, and sets @handle to be the 141 * device-viewed address. 142 */ 143extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); 144 145/** 146 * dma_free_coherent - free memory allocated by dma_alloc_coherent 147 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 148 * @size: size of memory originally requested in dma_alloc_coherent 149 * @cpu_addr: CPU-view address returned from dma_alloc_coherent 150 * @handle: device-view address returned from dma_alloc_coherent 151 * 152 * Free (and unmap) a DMA buffer previously allocated by 153 * dma_alloc_coherent(). 154 * 155 * References to memory and mappings associated with cpu_addr/handle 156 * during and after this call executing are illegal. 157 */ 158extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); 159 160/** 161 * dma_mmap_coherent - map a coherent DMA allocation into user space 162 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 163 * @vma: vm_area_struct describing requested user mapping 164 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent 165 * @handle: device-view address returned from dma_alloc_coherent 166 * @size: size of memory originally requested in dma_alloc_coherent 167 * 168 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent 169 * into user space. The coherent DMA buffer must not be freed by the 170 * driver until the user space mapping has been released. 171 */ 172int dma_mmap_coherent(struct device *, struct vm_area_struct *, 173 void *, dma_addr_t, size_t); 174 175 176/** 177 * dma_alloc_writecombine - allocate writecombining memory for DMA 178 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 179 * @size: required memory size 180 * @handle: bus-specific DMA address 181 * 182 * Allocate some uncached, buffered memory for a device for 183 * performing DMA. This function allocates pages, and will 184 * return the CPU-viewed address, and sets @handle to be the 185 * device-viewed address. 186 */ 187extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, 188 gfp_t); 189 190#define dma_free_writecombine(dev,size,cpu_addr,handle) \ 191 dma_free_coherent(dev,size,cpu_addr,handle) 192 193int dma_mmap_writecombine(struct device *, struct vm_area_struct *, 194 void *, dma_addr_t, size_t); 195 196 197#ifdef CONFIG_DMABOUNCE 198/* 199 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" 200 * and utilize bounce buffers as needed to work around limited DMA windows. 201 * 202 * On the SA-1111, a bug limits DMA to only certain regions of RAM. 203 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) 204 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) 205 * 206 * The following are helper functions used by the dmabounce subystem 207 * 208 */ 209 210/** 211 * dmabounce_register_dev 212 * 213 * @dev: valid struct device pointer 214 * @small_buf_size: size of buffers to use with small buffer pool 215 * @large_buf_size: size of buffers to use with large buffer pool (can be 0) 216 * 217 * This function should be called by low-level platform code to register 218 * a device as requireing DMA buffer bouncing. The function will allocate 219 * appropriate DMA pools for the device. 220 * 221 */ 222extern int dmabounce_register_dev(struct device *, unsigned long, 223 unsigned long); 224 225/** 226 * dmabounce_unregister_dev 227 * 228 * @dev: valid struct device pointer 229 * 230 * This function should be called by low-level platform code when device 231 * that was previously registered with dmabounce_register_dev is removed 232 * from the system. 233 * 234 */ 235extern void dmabounce_unregister_dev(struct device *); 236 237/** 238 * dma_needs_bounce 239 * 240 * @dev: valid struct device pointer 241 * @dma_handle: dma_handle of unbounced buffer 242 * @size: size of region being mapped 243 * 244 * Platforms that utilize the dmabounce mechanism must implement 245 * this function. 246 * 247 * The dmabounce routines call this function whenever a dma-mapping 248 * is requested to determine whether a given buffer needs to be bounced 249 * or not. The function must return 0 if the buffer is OK for 250 * DMA access and 1 if the buffer needs to be bounced. 251 * 252 */ 253extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); 254 255/* 256 * The DMA API, implemented by dmabounce.c. See below for descriptions. 257 */ 258extern dma_addr_t dma_map_single(struct device *, void *, size_t, 259 enum dma_data_direction); 260extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 261 enum dma_data_direction); 262extern dma_addr_t dma_map_page(struct device *, struct page *, 263 unsigned long, size_t, enum dma_data_direction); 264extern void dma_unmap_page(struct device *, dma_addr_t, size_t, 265 enum dma_data_direction); 266 267/* 268 * Private functions 269 */ 270int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, 271 size_t, enum dma_data_direction); 272int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, 273 size_t, enum dma_data_direction); 274#else 275static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, 276 unsigned long offset, size_t size, enum dma_data_direction dir) 277{ 278 return 1; 279} 280 281static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, 282 unsigned long offset, size_t size, enum dma_data_direction dir) 283{ 284 return 1; 285} 286 287 288/** 289 * dma_map_single - map a single buffer for streaming DMA 290 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 291 * @cpu_addr: CPU direct mapped address of buffer 292 * @size: size of buffer to map 293 * @dir: DMA transfer direction 294 * 295 * Ensure that any data held in the cache is appropriately discarded 296 * or written back. 297 * 298 * The device owns this memory once this call has completed. The CPU 299 * can regain ownership by calling dma_unmap_single() or 300 * dma_sync_single_for_cpu(). 301 */ 302static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 303 size_t size, enum dma_data_direction dir) 304{ 305 BUG_ON(!valid_dma_direction(dir)); 306 307 if (!arch_is_coherent()) 308 dma_cache_maint(cpu_addr, size, dir); 309 310 return virt_to_dma(dev, cpu_addr); 311} 312 313/** 314 * dma_map_page - map a portion of a page for streaming DMA 315 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 316 * @page: page that buffer resides in 317 * @offset: offset into page for start of buffer 318 * @size: size of buffer to map 319 * @dir: DMA transfer direction 320 * 321 * Ensure that any data held in the cache is appropriately discarded 322 * or written back. 323 * 324 * The device owns this memory once this call has completed. The CPU 325 * can regain ownership by calling dma_unmap_page(). 326 */ 327static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 328 unsigned long offset, size_t size, enum dma_data_direction dir) 329{ 330 BUG_ON(!valid_dma_direction(dir)); 331 332 if (!arch_is_coherent()) 333 dma_cache_maint_page(page, offset, size, dir); 334 335 return page_to_dma(dev, page) + offset; 336} 337 338/** 339 * dma_unmap_single - unmap a single buffer previously mapped 340 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 341 * @handle: DMA address of buffer 342 * @size: size of buffer (same as passed to dma_map_single) 343 * @dir: DMA transfer direction (same as passed to dma_map_single) 344 * 345 * Unmap a single streaming mode DMA translation. The handle and size 346 * must match what was provided in the previous dma_map_single() call. 347 * All other usages are undefined. 348 * 349 * After this call, reads by the CPU to the buffer are guaranteed to see 350 * whatever the device wrote there. 351 */ 352static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 353 size_t size, enum dma_data_direction dir) 354{ 355 /* nothing to do */ 356} 357 358/** 359 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 360 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 361 * @handle: DMA address of buffer 362 * @size: size of buffer (same as passed to dma_map_page) 363 * @dir: DMA transfer direction (same as passed to dma_map_page) 364 * 365 * Unmap a page streaming mode DMA translation. The handle and size 366 * must match what was provided in the previous dma_map_page() call. 367 * All other usages are undefined. 368 * 369 * After this call, reads by the CPU to the buffer are guaranteed to see 370 * whatever the device wrote there. 371 */ 372static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 373 size_t size, enum dma_data_direction dir) 374{ 375 /* nothing to do */ 376} 377#endif /* CONFIG_DMABOUNCE */ 378 379/** 380 * dma_sync_single_range_for_cpu 381 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 382 * @handle: DMA address of buffer 383 * @offset: offset of region to start sync 384 * @size: size of region to sync 385 * @dir: DMA transfer direction (same as passed to dma_map_single) 386 * 387 * Make physical memory consistent for a single streaming mode DMA 388 * translation after a transfer. 389 * 390 * If you perform a dma_map_single() but wish to interrogate the 391 * buffer using the cpu, yet do not wish to teardown the PCI dma 392 * mapping, you must call this function before doing so. At the 393 * next point you give the PCI dma address back to the card, you 394 * must first the perform a dma_sync_for_device, and then the 395 * device again owns the buffer. 396 */ 397static inline void dma_sync_single_range_for_cpu(struct device *dev, 398 dma_addr_t handle, unsigned long offset, size_t size, 399 enum dma_data_direction dir) 400{ 401 BUG_ON(!valid_dma_direction(dir)); 402 403 dmabounce_sync_for_cpu(dev, handle, offset, size, dir); 404} 405 406static inline void dma_sync_single_range_for_device(struct device *dev, 407 dma_addr_t handle, unsigned long offset, size_t size, 408 enum dma_data_direction dir) 409{ 410 BUG_ON(!valid_dma_direction(dir)); 411 412 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 413 return; 414 415 if (!arch_is_coherent()) 416 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); 417} 418 419static inline void dma_sync_single_for_cpu(struct device *dev, 420 dma_addr_t handle, size_t size, enum dma_data_direction dir) 421{ 422 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); 423} 424 425static inline void dma_sync_single_for_device(struct device *dev, 426 dma_addr_t handle, size_t size, enum dma_data_direction dir) 427{ 428 dma_sync_single_range_for_device(dev, handle, 0, size, dir); 429} 430 431/* 432 * The scatter list versions of the above methods. 433 */ 434extern int dma_map_sg(struct device *, struct scatterlist *, int, 435 enum dma_data_direction); 436extern void dma_unmap_sg(struct device *, struct scatterlist *, int, 437 enum dma_data_direction); 438extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, 439 enum dma_data_direction); 440extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, 441 enum dma_data_direction); 442 443 444#endif /* __KERNEL__ */ 445#endif