Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.37-rc6 484 lines 16 kB view raw
1#ifndef ASMARM_DMA_MAPPING_H 2#define ASMARM_DMA_MAPPING_H 3 4#ifdef __KERNEL__ 5 6#include <linux/mm_types.h> 7#include <linux/scatterlist.h> 8 9#include <asm-generic/dma-coherent.h> 10#include <asm/memory.h> 11 12/* 13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions 14 * used internally by the DMA-mapping API to provide DMA addresses. They 15 * must not be used by drivers. 16 */ 17#ifndef __arch_page_to_dma 18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 19{ 20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); 21} 22 23static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 24{ 25 return pfn_to_page(__bus_to_pfn(addr)); 26} 27 28static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 29{ 30 return (void *)__bus_to_virt(addr); 31} 32 33static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) 34{ 35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); 36} 37#else 38static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 39{ 40 return __arch_page_to_dma(dev, page); 41} 42 43static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 44{ 45 return __arch_dma_to_page(dev, addr); 46} 47 48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 49{ 50 return __arch_dma_to_virt(dev, addr); 51} 52 53static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) 54{ 55 return __arch_virt_to_dma(dev, addr); 56} 57#endif 58 59/* 60 * The DMA API is built upon the notion of "buffer ownership". A buffer 61 * is either exclusively owned by the CPU (and therefore may be accessed 62 * by it) or exclusively owned by the DMA device. These helper functions 63 * represent the transitions between these two ownership states. 64 * 65 * Note, however, that on later ARMs, this notion does not work due to 66 * speculative prefetches. We model our approach on the assumption that 67 * the CPU does do speculative prefetches, which means we clean caches 68 * before transfers and delay cache invalidation until transfer completion. 69 * 70 * Private support functions: these are not part of the API and are 71 * liable to change. Drivers must not use these. 72 */ 73static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, 74 enum dma_data_direction dir) 75{ 76 extern void ___dma_single_cpu_to_dev(const void *, size_t, 77 enum dma_data_direction); 78 79 if (!arch_is_coherent()) 80 ___dma_single_cpu_to_dev(kaddr, size, dir); 81} 82 83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, 84 enum dma_data_direction dir) 85{ 86 extern void ___dma_single_dev_to_cpu(const void *, size_t, 87 enum dma_data_direction); 88 89 if (!arch_is_coherent()) 90 ___dma_single_dev_to_cpu(kaddr, size, dir); 91} 92 93static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 94 size_t size, enum dma_data_direction dir) 95{ 96 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, 97 size_t, enum dma_data_direction); 98 99 if (!arch_is_coherent()) 100 ___dma_page_cpu_to_dev(page, off, size, dir); 101} 102 103static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 104 size_t size, enum dma_data_direction dir) 105{ 106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, 107 size_t, enum dma_data_direction); 108 109 if (!arch_is_coherent()) 110 ___dma_page_dev_to_cpu(page, off, size, dir); 111} 112 113/* 114 * Return whether the given device DMA address mask can be supported 115 * properly. For example, if your device can only drive the low 24-bits 116 * during bus mastering, then you would pass 0x00ffffff as the mask 117 * to this function. 118 * 119 * FIXME: This should really be a platform specific issue - we should 120 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. 121 */ 122static inline int dma_supported(struct device *dev, u64 mask) 123{ 124 if (mask < ISA_DMA_THRESHOLD) 125 return 0; 126 return 1; 127} 128 129static inline int dma_set_mask(struct device *dev, u64 dma_mask) 130{ 131#ifdef CONFIG_DMABOUNCE 132 if (dev->archdata.dmabounce) { 133 if (dma_mask >= ISA_DMA_THRESHOLD) 134 return 0; 135 else 136 return -EIO; 137 } 138#endif 139 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 140 return -EIO; 141 142 *dev->dma_mask = dma_mask; 143 144 return 0; 145} 146 147/* 148 * DMA errors are defined by all-bits-set in the DMA address. 149 */ 150static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 151{ 152 return dma_addr == ~0; 153} 154 155/* 156 * Dummy noncoherent implementation. We don't provide a dma_cache_sync 157 * function so drivers using this API are highlighted with build warnings. 158 */ 159static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 160 dma_addr_t *handle, gfp_t gfp) 161{ 162 return NULL; 163} 164 165static inline void dma_free_noncoherent(struct device *dev, size_t size, 166 void *cpu_addr, dma_addr_t handle) 167{ 168} 169 170/** 171 * dma_alloc_coherent - allocate consistent memory for DMA 172 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 173 * @size: required memory size 174 * @handle: bus-specific DMA address 175 * 176 * Allocate some uncached, unbuffered memory for a device for 177 * performing DMA. This function allocates pages, and will 178 * return the CPU-viewed address, and sets @handle to be the 179 * device-viewed address. 180 */ 181extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); 182 183/** 184 * dma_free_coherent - free memory allocated by dma_alloc_coherent 185 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 186 * @size: size of memory originally requested in dma_alloc_coherent 187 * @cpu_addr: CPU-view address returned from dma_alloc_coherent 188 * @handle: device-view address returned from dma_alloc_coherent 189 * 190 * Free (and unmap) a DMA buffer previously allocated by 191 * dma_alloc_coherent(). 192 * 193 * References to memory and mappings associated with cpu_addr/handle 194 * during and after this call executing are illegal. 195 */ 196extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); 197 198/** 199 * dma_mmap_coherent - map a coherent DMA allocation into user space 200 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 201 * @vma: vm_area_struct describing requested user mapping 202 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent 203 * @handle: device-view address returned from dma_alloc_coherent 204 * @size: size of memory originally requested in dma_alloc_coherent 205 * 206 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent 207 * into user space. The coherent DMA buffer must not be freed by the 208 * driver until the user space mapping has been released. 209 */ 210int dma_mmap_coherent(struct device *, struct vm_area_struct *, 211 void *, dma_addr_t, size_t); 212 213 214/** 215 * dma_alloc_writecombine - allocate writecombining memory for DMA 216 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 217 * @size: required memory size 218 * @handle: bus-specific DMA address 219 * 220 * Allocate some uncached, buffered memory for a device for 221 * performing DMA. This function allocates pages, and will 222 * return the CPU-viewed address, and sets @handle to be the 223 * device-viewed address. 224 */ 225extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, 226 gfp_t); 227 228#define dma_free_writecombine(dev,size,cpu_addr,handle) \ 229 dma_free_coherent(dev,size,cpu_addr,handle) 230 231int dma_mmap_writecombine(struct device *, struct vm_area_struct *, 232 void *, dma_addr_t, size_t); 233 234 235#ifdef CONFIG_DMABOUNCE 236/* 237 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" 238 * and utilize bounce buffers as needed to work around limited DMA windows. 239 * 240 * On the SA-1111, a bug limits DMA to only certain regions of RAM. 241 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) 242 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) 243 * 244 * The following are helper functions used by the dmabounce subystem 245 * 246 */ 247 248/** 249 * dmabounce_register_dev 250 * 251 * @dev: valid struct device pointer 252 * @small_buf_size: size of buffers to use with small buffer pool 253 * @large_buf_size: size of buffers to use with large buffer pool (can be 0) 254 * 255 * This function should be called by low-level platform code to register 256 * a device as requireing DMA buffer bouncing. The function will allocate 257 * appropriate DMA pools for the device. 258 * 259 */ 260extern int dmabounce_register_dev(struct device *, unsigned long, 261 unsigned long); 262 263/** 264 * dmabounce_unregister_dev 265 * 266 * @dev: valid struct device pointer 267 * 268 * This function should be called by low-level platform code when device 269 * that was previously registered with dmabounce_register_dev is removed 270 * from the system. 271 * 272 */ 273extern void dmabounce_unregister_dev(struct device *); 274 275/** 276 * dma_needs_bounce 277 * 278 * @dev: valid struct device pointer 279 * @dma_handle: dma_handle of unbounced buffer 280 * @size: size of region being mapped 281 * 282 * Platforms that utilize the dmabounce mechanism must implement 283 * this function. 284 * 285 * The dmabounce routines call this function whenever a dma-mapping 286 * is requested to determine whether a given buffer needs to be bounced 287 * or not. The function must return 0 if the buffer is OK for 288 * DMA access and 1 if the buffer needs to be bounced. 289 * 290 */ 291extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); 292 293/* 294 * The DMA API, implemented by dmabounce.c. See below for descriptions. 295 */ 296extern dma_addr_t dma_map_single(struct device *, void *, size_t, 297 enum dma_data_direction); 298extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 299 enum dma_data_direction); 300extern dma_addr_t dma_map_page(struct device *, struct page *, 301 unsigned long, size_t, enum dma_data_direction); 302extern void dma_unmap_page(struct device *, dma_addr_t, size_t, 303 enum dma_data_direction); 304 305/* 306 * Private functions 307 */ 308int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, 309 size_t, enum dma_data_direction); 310int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, 311 size_t, enum dma_data_direction); 312#else 313static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, 314 unsigned long offset, size_t size, enum dma_data_direction dir) 315{ 316 return 1; 317} 318 319static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, 320 unsigned long offset, size_t size, enum dma_data_direction dir) 321{ 322 return 1; 323} 324 325 326/** 327 * dma_map_single - map a single buffer for streaming DMA 328 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 329 * @cpu_addr: CPU direct mapped address of buffer 330 * @size: size of buffer to map 331 * @dir: DMA transfer direction 332 * 333 * Ensure that any data held in the cache is appropriately discarded 334 * or written back. 335 * 336 * The device owns this memory once this call has completed. The CPU 337 * can regain ownership by calling dma_unmap_single() or 338 * dma_sync_single_for_cpu(). 339 */ 340static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 341 size_t size, enum dma_data_direction dir) 342{ 343 BUG_ON(!valid_dma_direction(dir)); 344 345 __dma_single_cpu_to_dev(cpu_addr, size, dir); 346 347 return virt_to_dma(dev, cpu_addr); 348} 349 350/** 351 * dma_map_page - map a portion of a page for streaming DMA 352 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 353 * @page: page that buffer resides in 354 * @offset: offset into page for start of buffer 355 * @size: size of buffer to map 356 * @dir: DMA transfer direction 357 * 358 * Ensure that any data held in the cache is appropriately discarded 359 * or written back. 360 * 361 * The device owns this memory once this call has completed. The CPU 362 * can regain ownership by calling dma_unmap_page(). 363 */ 364static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 365 unsigned long offset, size_t size, enum dma_data_direction dir) 366{ 367 BUG_ON(!valid_dma_direction(dir)); 368 369 __dma_page_cpu_to_dev(page, offset, size, dir); 370 371 return page_to_dma(dev, page) + offset; 372} 373 374/** 375 * dma_unmap_single - unmap a single buffer previously mapped 376 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 377 * @handle: DMA address of buffer 378 * @size: size of buffer (same as passed to dma_map_single) 379 * @dir: DMA transfer direction (same as passed to dma_map_single) 380 * 381 * Unmap a single streaming mode DMA translation. The handle and size 382 * must match what was provided in the previous dma_map_single() call. 383 * All other usages are undefined. 384 * 385 * After this call, reads by the CPU to the buffer are guaranteed to see 386 * whatever the device wrote there. 387 */ 388static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 389 size_t size, enum dma_data_direction dir) 390{ 391 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); 392} 393 394/** 395 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 396 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 397 * @handle: DMA address of buffer 398 * @size: size of buffer (same as passed to dma_map_page) 399 * @dir: DMA transfer direction (same as passed to dma_map_page) 400 * 401 * Unmap a page streaming mode DMA translation. The handle and size 402 * must match what was provided in the previous dma_map_page() call. 403 * All other usages are undefined. 404 * 405 * After this call, reads by the CPU to the buffer are guaranteed to see 406 * whatever the device wrote there. 407 */ 408static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 409 size_t size, enum dma_data_direction dir) 410{ 411 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, 412 size, dir); 413} 414#endif /* CONFIG_DMABOUNCE */ 415 416/** 417 * dma_sync_single_range_for_cpu 418 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 419 * @handle: DMA address of buffer 420 * @offset: offset of region to start sync 421 * @size: size of region to sync 422 * @dir: DMA transfer direction (same as passed to dma_map_single) 423 * 424 * Make physical memory consistent for a single streaming mode DMA 425 * translation after a transfer. 426 * 427 * If you perform a dma_map_single() but wish to interrogate the 428 * buffer using the cpu, yet do not wish to teardown the PCI dma 429 * mapping, you must call this function before doing so. At the 430 * next point you give the PCI dma address back to the card, you 431 * must first the perform a dma_sync_for_device, and then the 432 * device again owns the buffer. 433 */ 434static inline void dma_sync_single_range_for_cpu(struct device *dev, 435 dma_addr_t handle, unsigned long offset, size_t size, 436 enum dma_data_direction dir) 437{ 438 BUG_ON(!valid_dma_direction(dir)); 439 440 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) 441 return; 442 443 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); 444} 445 446static inline void dma_sync_single_range_for_device(struct device *dev, 447 dma_addr_t handle, unsigned long offset, size_t size, 448 enum dma_data_direction dir) 449{ 450 BUG_ON(!valid_dma_direction(dir)); 451 452 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 453 return; 454 455 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); 456} 457 458static inline void dma_sync_single_for_cpu(struct device *dev, 459 dma_addr_t handle, size_t size, enum dma_data_direction dir) 460{ 461 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); 462} 463 464static inline void dma_sync_single_for_device(struct device *dev, 465 dma_addr_t handle, size_t size, enum dma_data_direction dir) 466{ 467 dma_sync_single_range_for_device(dev, handle, 0, size, dir); 468} 469 470/* 471 * The scatter list versions of the above methods. 472 */ 473extern int dma_map_sg(struct device *, struct scatterlist *, int, 474 enum dma_data_direction); 475extern void dma_unmap_sg(struct device *, struct scatterlist *, int, 476 enum dma_data_direction); 477extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, 478 enum dma_data_direction); 479extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, 480 enum dma_data_direction); 481 482 483#endif /* __KERNEL__ */ 484#endif