Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.26-rc9 389 lines 8.8 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> 7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> 8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. 9 */ 10 11#include <linux/types.h> 12#include <linux/dma-mapping.h> 13#include <linux/mm.h> 14#include <linux/module.h> 15#include <linux/scatterlist.h> 16#include <linux/string.h> 17 18#include <asm/cache.h> 19#include <asm/io.h> 20 21#include <dma-coherence.h> 22 23static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) 24{ 25 unsigned long addr = plat_dma_addr_to_phys(dma_addr); 26 27 return (unsigned long)phys_to_virt(addr); 28} 29 30/* 31 * Warning on the terminology - Linux calls an uncached area coherent; 32 * MIPS terminology calls memory areas with hardware maintained coherency 33 * coherent. 34 */ 35 36static inline int cpu_is_noncoherent_r10000(struct device *dev) 37{ 38 return !plat_device_is_coherent(dev) && 39 (current_cpu_type() == CPU_R10000 || 40 current_cpu_type() == CPU_R12000); 41} 42 43static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) 44{ 45 /* ignore region specifiers */ 46 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 47 48#ifdef CONFIG_ZONE_DMA 49 if (dev == NULL) 50 gfp |= __GFP_DMA; 51 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) 52 gfp |= __GFP_DMA; 53 else 54#endif 55#ifdef CONFIG_ZONE_DMA32 56 if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) 57 gfp |= __GFP_DMA32; 58 else 59#endif 60 ; 61 62 /* Don't invoke OOM killer */ 63 gfp |= __GFP_NORETRY; 64 65 return gfp; 66} 67 68void *dma_alloc_noncoherent(struct device *dev, size_t size, 69 dma_addr_t * dma_handle, gfp_t gfp) 70{ 71 void *ret; 72 73 gfp = massage_gfp_flags(dev, gfp); 74 75 ret = (void *) __get_free_pages(gfp, get_order(size)); 76 77 if (ret != NULL) { 78 memset(ret, 0, size); 79 *dma_handle = plat_map_dma_mem(dev, ret, size); 80 } 81 82 return ret; 83} 84 85EXPORT_SYMBOL(dma_alloc_noncoherent); 86 87void *dma_alloc_coherent(struct device *dev, size_t size, 88 dma_addr_t * dma_handle, gfp_t gfp) 89{ 90 void *ret; 91 92 gfp = massage_gfp_flags(dev, gfp); 93 94 ret = (void *) __get_free_pages(gfp, get_order(size)); 95 96 if (ret) { 97 memset(ret, 0, size); 98 *dma_handle = plat_map_dma_mem(dev, ret, size); 99 100 if (!plat_device_is_coherent(dev)) { 101 dma_cache_wback_inv((unsigned long) ret, size); 102 ret = UNCAC_ADDR(ret); 103 } 104 } 105 106 return ret; 107} 108 109EXPORT_SYMBOL(dma_alloc_coherent); 110 111void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, 112 dma_addr_t dma_handle) 113{ 114 free_pages((unsigned long) vaddr, get_order(size)); 115} 116 117EXPORT_SYMBOL(dma_free_noncoherent); 118 119void dma_free_coherent(struct device *dev, size_t size, void *vaddr, 120 dma_addr_t dma_handle) 121{ 122 unsigned long addr = (unsigned long) vaddr; 123 124 if (!plat_device_is_coherent(dev)) 125 addr = CAC_ADDR(addr); 126 127 free_pages(addr, get_order(size)); 128} 129 130EXPORT_SYMBOL(dma_free_coherent); 131 132static inline void __dma_sync(unsigned long addr, size_t size, 133 enum dma_data_direction direction) 134{ 135 switch (direction) { 136 case DMA_TO_DEVICE: 137 dma_cache_wback(addr, size); 138 break; 139 140 case DMA_FROM_DEVICE: 141 dma_cache_inv(addr, size); 142 break; 143 144 case DMA_BIDIRECTIONAL: 145 dma_cache_wback_inv(addr, size); 146 break; 147 148 default: 149 BUG(); 150 } 151} 152 153dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 154 enum dma_data_direction direction) 155{ 156 unsigned long addr = (unsigned long) ptr; 157 158 if (!plat_device_is_coherent(dev)) 159 __dma_sync(addr, size, direction); 160 161 return plat_map_dma_mem(dev, ptr, size); 162} 163 164EXPORT_SYMBOL(dma_map_single); 165 166void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 167 enum dma_data_direction direction) 168{ 169 if (cpu_is_noncoherent_r10000(dev)) 170 __dma_sync(dma_addr_to_virt(dma_addr), size, 171 direction); 172 173 plat_unmap_dma_mem(dma_addr); 174} 175 176EXPORT_SYMBOL(dma_unmap_single); 177 178int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 179 enum dma_data_direction direction) 180{ 181 int i; 182 183 BUG_ON(direction == DMA_NONE); 184 185 for (i = 0; i < nents; i++, sg++) { 186 unsigned long addr; 187 188 addr = (unsigned long) sg_virt(sg); 189 if (!plat_device_is_coherent(dev) && addr) 190 __dma_sync(addr, sg->length, direction); 191 sg->dma_address = plat_map_dma_mem(dev, 192 (void *)addr, sg->length); 193 } 194 195 return nents; 196} 197 198EXPORT_SYMBOL(dma_map_sg); 199 200dma_addr_t dma_map_page(struct device *dev, struct page *page, 201 unsigned long offset, size_t size, enum dma_data_direction direction) 202{ 203 BUG_ON(direction == DMA_NONE); 204 205 if (!plat_device_is_coherent(dev)) { 206 unsigned long addr; 207 208 addr = (unsigned long) page_address(page) + offset; 209 dma_cache_wback_inv(addr, size); 210 } 211 212 return plat_map_dma_mem_page(dev, page) + offset; 213} 214 215EXPORT_SYMBOL(dma_map_page); 216 217void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 218 enum dma_data_direction direction) 219{ 220 BUG_ON(direction == DMA_NONE); 221 222 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { 223 unsigned long addr; 224 225 addr = plat_dma_addr_to_phys(dma_address); 226 dma_cache_wback_inv(addr, size); 227 } 228 229 plat_unmap_dma_mem(dma_address); 230} 231 232EXPORT_SYMBOL(dma_unmap_page); 233 234void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 235 enum dma_data_direction direction) 236{ 237 unsigned long addr; 238 int i; 239 240 BUG_ON(direction == DMA_NONE); 241 242 for (i = 0; i < nhwentries; i++, sg++) { 243 if (!plat_device_is_coherent(dev) && 244 direction != DMA_TO_DEVICE) { 245 addr = (unsigned long) sg_virt(sg); 246 if (addr) 247 __dma_sync(addr, sg->length, direction); 248 } 249 plat_unmap_dma_mem(sg->dma_address); 250 } 251} 252 253EXPORT_SYMBOL(dma_unmap_sg); 254 255void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 256 size_t size, enum dma_data_direction direction) 257{ 258 BUG_ON(direction == DMA_NONE); 259 260 if (cpu_is_noncoherent_r10000(dev)) { 261 unsigned long addr; 262 263 addr = dma_addr_to_virt(dma_handle); 264 __dma_sync(addr, size, direction); 265 } 266} 267 268EXPORT_SYMBOL(dma_sync_single_for_cpu); 269 270void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 271 size_t size, enum dma_data_direction direction) 272{ 273 BUG_ON(direction == DMA_NONE); 274 275 if (!plat_device_is_coherent(dev)) { 276 unsigned long addr; 277 278 addr = dma_addr_to_virt(dma_handle); 279 __dma_sync(addr, size, direction); 280 } 281} 282 283EXPORT_SYMBOL(dma_sync_single_for_device); 284 285void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 286 unsigned long offset, size_t size, enum dma_data_direction direction) 287{ 288 BUG_ON(direction == DMA_NONE); 289 290 if (cpu_is_noncoherent_r10000(dev)) { 291 unsigned long addr; 292 293 addr = dma_addr_to_virt(dma_handle); 294 __dma_sync(addr + offset, size, direction); 295 } 296} 297 298EXPORT_SYMBOL(dma_sync_single_range_for_cpu); 299 300void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 301 unsigned long offset, size_t size, enum dma_data_direction direction) 302{ 303 BUG_ON(direction == DMA_NONE); 304 305 if (!plat_device_is_coherent(dev)) { 306 unsigned long addr; 307 308 addr = dma_addr_to_virt(dma_handle); 309 __dma_sync(addr + offset, size, direction); 310 } 311} 312 313EXPORT_SYMBOL(dma_sync_single_range_for_device); 314 315void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 316 enum dma_data_direction direction) 317{ 318 int i; 319 320 BUG_ON(direction == DMA_NONE); 321 322 /* Make sure that gcc doesn't leave the empty loop body. */ 323 for (i = 0; i < nelems; i++, sg++) { 324 if (cpu_is_noncoherent_r10000(dev)) 325 __dma_sync((unsigned long)page_address(sg_page(sg)), 326 sg->length, direction); 327 plat_unmap_dma_mem(sg->dma_address); 328 } 329} 330 331EXPORT_SYMBOL(dma_sync_sg_for_cpu); 332 333void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 334 enum dma_data_direction direction) 335{ 336 int i; 337 338 BUG_ON(direction == DMA_NONE); 339 340 /* Make sure that gcc doesn't leave the empty loop body. */ 341 for (i = 0; i < nelems; i++, sg++) { 342 if (!plat_device_is_coherent(dev)) 343 __dma_sync((unsigned long)page_address(sg_page(sg)), 344 sg->length, direction); 345 plat_unmap_dma_mem(sg->dma_address); 346 } 347} 348 349EXPORT_SYMBOL(dma_sync_sg_for_device); 350 351int dma_mapping_error(dma_addr_t dma_addr) 352{ 353 return 0; 354} 355 356EXPORT_SYMBOL(dma_mapping_error); 357 358int dma_supported(struct device *dev, u64 mask) 359{ 360 /* 361 * we fall back to GFP_DMA when the mask isn't all 1s, 362 * so we can't guarantee allocations that must be 363 * within a tighter range than GFP_DMA.. 364 */ 365 if (mask < DMA_BIT_MASK(24)) 366 return 0; 367 368 return 1; 369} 370 371EXPORT_SYMBOL(dma_supported); 372 373int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) 374{ 375 return plat_device_is_coherent(dev); 376} 377 378EXPORT_SYMBOL(dma_is_consistent); 379 380void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 381 enum dma_data_direction direction) 382{ 383 BUG_ON(direction == DMA_NONE); 384 385 if (!plat_device_is_coherent(dev)) 386 __dma_sync((unsigned long)vaddr, size, direction); 387} 388 389EXPORT_SYMBOL(dma_cache_sync);