Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'dma' of http://git.linaro.org/git/people/nico/linux into devel-stable

+309 -293
+88 -115
arch/arm/common/dmabounce.c
··· 79 79 struct dmabounce_pool large; 80 80 81 81 rwlock_t lock; 82 + 83 + int (*needs_bounce)(struct device *, dma_addr_t, size_t); 82 84 }; 83 85 84 86 #ifdef STATS ··· 212 210 if (!dev || !dev->archdata.dmabounce) 213 211 return NULL; 214 212 if (dma_mapping_error(dev, dma_addr)) { 215 - if (dev) 216 - dev_err(dev, "Trying to %s invalid mapping\n", where); 217 - else 218 - pr_err("unknown device: Trying to %s invalid mapping\n", where); 213 + dev_err(dev, "Trying to %s invalid mapping\n", where); 219 214 return NULL; 220 215 } 221 216 return find_safe_buffer(dev->archdata.dmabounce, dma_addr); 217 + } 218 + 219 + static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) 220 + { 221 + if (!dev || !dev->archdata.dmabounce) 222 + return 0; 223 + 224 + if (dev->dma_mask) { 225 + unsigned long limit, mask = *dev->dma_mask; 226 + 227 + limit = (mask + 1) & ~mask; 228 + if (limit && size > limit) { 229 + dev_err(dev, "DMA mapping too big (requested %#x " 230 + "mask %#Lx)\n", size, *dev->dma_mask); 231 + return -E2BIG; 232 + } 233 + 234 + /* Figure out if we need to bounce from the DMA mask. */ 235 + if ((dma_addr | (dma_addr + size - 1)) & ~mask) 236 + return 1; 237 + } 238 + 239 + return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); 222 240 } 223 241 224 242 static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, 225 243 enum dma_data_direction dir) 226 244 { 227 245 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 228 - dma_addr_t dma_addr; 229 - int needs_bounce = 0; 246 + struct safe_buffer *buf; 230 247 231 248 if (device_info) 232 249 DO_STATS ( device_info->map_op_count++ ); 233 250 234 - dma_addr = virt_to_dma(dev, ptr); 235 - 236 - if (dev->dma_mask) { 237 - unsigned long mask = *dev->dma_mask; 238 - unsigned long limit; 239 - 240 - limit = (mask + 1) & ~mask; 241 - if (limit && size > limit) { 242 - dev_err(dev, "DMA mapping too big (requested %#x " 243 - "mask %#Lx)\n", size, *dev->dma_mask); 244 - return ~0; 245 - } 246 - 247 - /* 248 - * Figure out if we need to bounce from the DMA mask. 249 - */ 250 - needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; 251 + buf = alloc_safe_buffer(device_info, ptr, size, dir); 252 + if (buf == NULL) { 253 + dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 254 + __func__, ptr); 255 + return ~0; 251 256 } 252 257 253 - if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { 254 - struct safe_buffer *buf; 258 + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 259 + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 260 + buf->safe, buf->safe_dma_addr); 255 261 256 - buf = alloc_safe_buffer(device_info, ptr, size, dir); 257 - if (buf == 0) { 258 - dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 259 - __func__, ptr); 260 - return ~0; 261 - } 262 - 263 - dev_dbg(dev, 264 - "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 265 - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 266 - buf->safe, buf->safe_dma_addr); 267 - 268 - if ((dir == DMA_TO_DEVICE) || 269 - (dir == DMA_BIDIRECTIONAL)) { 270 - dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", 271 - __func__, ptr, buf->safe, size); 272 - memcpy(buf->safe, ptr, size); 273 - } 274 - ptr = buf->safe; 275 - 276 - dma_addr = buf->safe_dma_addr; 277 - } else { 278 - /* 279 - * We don't need to sync the DMA buffer since 280 - * it was allocated via the coherent allocators. 281 - */ 282 - __dma_single_cpu_to_dev(ptr, size, dir); 262 + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { 263 + dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", 264 + __func__, ptr, buf->safe, size); 265 + memcpy(buf->safe, ptr, size); 283 266 } 284 267 285 - return dma_addr; 268 + return buf->safe_dma_addr; 286 269 } 287 270 288 - static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, 271 + static inline void unmap_single(struct device *dev, struct safe_buffer *buf, 289 272 size_t size, enum dma_data_direction dir) 290 273 { 291 - struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); 274 + BUG_ON(buf->size != size); 275 + BUG_ON(buf->direction != dir); 292 276 293 - if (buf) { 294 - BUG_ON(buf->size != size); 295 - BUG_ON(buf->direction != dir); 277 + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 278 + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 279 + buf->safe, buf->safe_dma_addr); 296 280 297 - dev_dbg(dev, 298 - "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 299 - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 300 - buf->safe, buf->safe_dma_addr); 281 + DO_STATS(dev->archdata.dmabounce->bounce_count++); 301 282 302 - DO_STATS(dev->archdata.dmabounce->bounce_count++); 283 + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 284 + void *ptr = buf->ptr; 303 285 304 - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 305 - void *ptr = buf->ptr; 286 + dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", 287 + __func__, buf->safe, ptr, size); 288 + memcpy(ptr, buf->safe, size); 306 289 307 - dev_dbg(dev, 308 - "%s: copy back safe %p to unsafe %p size %d\n", 309 - __func__, buf->safe, ptr, size); 310 - memcpy(ptr, buf->safe, size); 311 - 312 - /* 313 - * Since we may have written to a page cache page, 314 - * we need to ensure that the data will be coherent 315 - * with user mappings. 316 - */ 317 - __cpuc_flush_dcache_area(ptr, size); 318 - } 319 - free_safe_buffer(dev->archdata.dmabounce, buf); 320 - } else { 321 - __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); 290 + /* 291 + * Since we may have written to a page cache page, 292 + * we need to ensure that the data will be coherent 293 + * with user mappings. 294 + */ 295 + __cpuc_flush_dcache_area(ptr, size); 322 296 } 297 + free_safe_buffer(dev->archdata.dmabounce, buf); 323 298 } 324 299 325 300 /* ************************************************** */ ··· 307 328 * substitute the safe buffer for the unsafe one. 308 329 * (basically move the buffer from an unsafe area to a safe one) 309 330 */ 310 - dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, 311 - enum dma_data_direction dir) 312 - { 313 - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 314 - __func__, ptr, size, dir); 315 - 316 - BUG_ON(!valid_dma_direction(dir)); 317 - 318 - return map_single(dev, ptr, size, dir); 319 - } 320 - EXPORT_SYMBOL(__dma_map_single); 321 - 322 - /* 323 - * see if a mapped address was really a "safe" buffer and if so, copy 324 - * the data from the safe buffer back to the unsafe buffer and free up 325 - * the safe buffer. (basically return things back to the way they 326 - * should be) 327 - */ 328 - void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 329 - enum dma_data_direction dir) 330 - { 331 - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 332 - __func__, (void *) dma_addr, size, dir); 333 - 334 - unmap_single(dev, dma_addr, size, dir); 335 - } 336 - EXPORT_SYMBOL(__dma_unmap_single); 337 - 338 331 dma_addr_t __dma_map_page(struct device *dev, struct page *page, 339 332 unsigned long offset, size_t size, enum dma_data_direction dir) 340 333 { 334 + dma_addr_t dma_addr; 335 + int ret; 336 + 341 337 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", 342 338 __func__, page, offset, size, dir); 343 339 344 - BUG_ON(!valid_dma_direction(dir)); 340 + dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; 341 + 342 + ret = needs_bounce(dev, dma_addr, size); 343 + if (ret < 0) 344 + return ~0; 345 + 346 + if (ret == 0) { 347 + __dma_page_cpu_to_dev(page, offset, size, dir); 348 + return dma_addr; 349 + } 345 350 346 351 if (PageHighMem(page)) { 347 - dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " 348 - "is not supported\n"); 352 + dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); 349 353 return ~0; 350 354 } 351 355 ··· 345 383 void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 346 384 enum dma_data_direction dir) 347 385 { 348 - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 349 - __func__, (void *) dma_addr, size, dir); 386 + struct safe_buffer *buf; 350 387 351 - unmap_single(dev, dma_addr, size, dir); 388 + dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", 389 + __func__, dma_addr, size, dir); 390 + 391 + buf = find_safe_buffer_dev(dev, dma_addr, __func__); 392 + if (!buf) { 393 + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), 394 + dma_addr & ~PAGE_MASK, size, dir); 395 + return; 396 + } 397 + 398 + unmap_single(dev, buf, size, dir); 352 399 } 353 400 EXPORT_SYMBOL(__dma_unmap_page); 354 401 ··· 432 461 } 433 462 434 463 int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, 435 - unsigned long large_buffer_size) 464 + unsigned long large_buffer_size, 465 + int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) 436 466 { 437 467 struct dmabounce_device_info *device_info; 438 468 int ret; ··· 469 497 device_info->dev = dev; 470 498 INIT_LIST_HEAD(&device_info->safe_buffers); 471 499 rwlock_init(&device_info->lock); 500 + device_info->needs_bounce = needs_bounce_fn; 472 501 473 502 #ifdef STATS 474 503 device_info->total_allocs = 0;
+7 -9
arch/arm/common/it8152.c
··· 243 243 * ITE8152 chip can address up to 64MByte, so all the devices 244 244 * connected to ITE8152 (PCI and USB) should have limited DMA window 245 245 */ 246 + static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) 247 + { 248 + dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", 249 + __func__, dma_addr, size); 250 + return (dma_addr + size - PHYS_OFFSET) >= SZ_64M; 251 + } 246 252 247 253 /* 248 254 * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all ··· 260 254 if (dev->dma_mask) 261 255 *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 262 256 dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 263 - dmabounce_register_dev(dev, 2048, 4096); 257 + dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce); 264 258 } 265 259 return 0; 266 260 } ··· 271 265 dmabounce_unregister_dev(dev); 272 266 273 267 return 0; 274 - } 275 - 276 - int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) 277 - { 278 - dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", 279 - __func__, dma_addr, size); 280 - return (dev->bus == &pci_bus_type) && 281 - ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); 282 268 } 283 269 284 270 int dma_set_coherent_mask(struct device *dev, u64 mask)
+31 -29
arch/arm/common/sa1111.c
··· 579 579 580 580 sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; 581 581 } 582 + #endif 582 583 584 + #ifdef CONFIG_DMABOUNCE 585 + /* 586 + * According to the "Intel StrongARM SA-1111 Microprocessor Companion 587 + * Chip Specification Update" (June 2000), erratum #7, there is a 588 + * significant bug in the SA1111 SDRAM shared memory controller. If 589 + * an access to a region of memory above 1MB relative to the bank base, 590 + * it is important that address bit 10 _NOT_ be asserted. Depending 591 + * on the configuration of the RAM, bit 10 may correspond to one 592 + * of several different (processor-relative) address bits. 593 + * 594 + * This routine only identifies whether or not a given DMA address 595 + * is susceptible to the bug. 596 + * 597 + * This should only get called for sa1111_device types due to the 598 + * way we configure our device dma_masks. 599 + */ 600 + static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) 601 + { 602 + /* 603 + * Section 4.6 of the "Intel StrongARM SA-1111 Development Module 604 + * User's Guide" mentions that jumpers R51 and R52 control the 605 + * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or 606 + * SDRAM bank 1 on Neponset). The default configuration selects 607 + * Assabet, so any address in bank 1 is necessarily invalid. 608 + */ 609 + return (machine_is_assabet() || machine_is_pfs168()) && 610 + (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); 611 + } 583 612 #endif 584 613 585 614 static void sa1111_dev_release(struct device *_dev) ··· 673 644 dev->dev.dma_mask = &dev->dma_mask; 674 645 675 646 if (dev->dma_mask != 0xffffffffUL) { 676 - ret = dmabounce_register_dev(&dev->dev, 1024, 4096); 647 + ret = dmabounce_register_dev(&dev->dev, 1024, 4096, 648 + sa1111_needs_bounce); 677 649 if (ret) { 678 650 dev_err(&dev->dev, "SA1111: Failed to register" 679 651 " with dmabounce\n"); ··· 846 816 iounmap(sachip->base); 847 817 clk_put(sachip->clk); 848 818 kfree(sachip); 849 - } 850 - 851 - /* 852 - * According to the "Intel StrongARM SA-1111 Microprocessor Companion 853 - * Chip Specification Update" (June 2000), erratum #7, there is a 854 - * significant bug in the SA1111 SDRAM shared memory controller. If 855 - * an access to a region of memory above 1MB relative to the bank base, 856 - * it is important that address bit 10 _NOT_ be asserted. Depending 857 - * on the configuration of the RAM, bit 10 may correspond to one 858 - * of several different (processor-relative) address bits. 859 - * 860 - * This routine only identifies whether or not a given DMA address 861 - * is susceptible to the bug. 862 - * 863 - * This should only get called for sa1111_device types due to the 864 - * way we configure our device dma_masks. 865 - */ 866 - int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) 867 - { 868 - /* 869 - * Section 4.6 of the "Intel StrongARM SA-1111 Development Module 870 - * User's Guide" mentions that jumpers R51 and R52 control the 871 - * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or 872 - * SDRAM bank 1 on Neponset). The default configuration selects 873 - * Assabet, so any address in bank 1 is necessarily invalid. 874 - */ 875 - return ((machine_is_assabet() || machine_is_pfs168()) && 876 - (addr >= 0xc8000000 || (addr + size) >= 0xc8000000)); 877 819 } 878 820 879 821 struct sa1111_save_data {
+13 -75
arch/arm/include/asm/dma-mapping.h
··· 115 115 ___dma_page_dev_to_cpu(page, off, size, dir); 116 116 } 117 117 118 - /* 119 - * Return whether the given device DMA address mask can be supported 120 - * properly. For example, if your device can only drive the low 24-bits 121 - * during bus mastering, then you would pass 0x00ffffff as the mask 122 - * to this function. 123 - * 124 - * FIXME: This should really be a platform specific issue - we should 125 - * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. 126 - */ 127 - static inline int dma_supported(struct device *dev, u64 mask) 128 - { 129 - if (mask < ISA_DMA_THRESHOLD) 130 - return 0; 131 - return 1; 132 - } 133 - 134 - static inline int dma_set_mask(struct device *dev, u64 dma_mask) 135 - { 136 - #ifdef CONFIG_DMABOUNCE 137 - if (dev->archdata.dmabounce) { 138 - if (dma_mask >= ISA_DMA_THRESHOLD) 139 - return 0; 140 - else 141 - return -EIO; 142 - } 143 - #endif 144 - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 145 - return -EIO; 146 - 147 - *dev->dma_mask = dma_mask; 148 - 149 - return 0; 150 - } 118 + extern int dma_supported(struct device *, u64); 119 + extern int dma_set_mask(struct device *, u64); 151 120 152 121 /* 153 122 * DMA errors are defined by all-bits-set in the DMA address. ··· 225 256 * @dev: valid struct device pointer 226 257 * @small_buf_size: size of buffers to use with small buffer pool 227 258 * @large_buf_size: size of buffers to use with large buffer pool (can be 0) 259 + * @needs_bounce_fn: called to determine whether buffer needs bouncing 228 260 * 229 261 * This function should be called by low-level platform code to register 230 262 * a device as requireing DMA buffer bouncing. The function will allocate 231 263 * appropriate DMA pools for the device. 232 - * 233 264 */ 234 265 extern int dmabounce_register_dev(struct device *, unsigned long, 235 - unsigned long); 266 + unsigned long, int (*)(struct device *, dma_addr_t, size_t)); 236 267 237 268 /** 238 269 * dmabounce_unregister_dev ··· 246 277 */ 247 278 extern void dmabounce_unregister_dev(struct device *); 248 279 249 - /** 250 - * dma_needs_bounce 251 - * 252 - * @dev: valid struct device pointer 253 - * @dma_handle: dma_handle of unbounced buffer 254 - * @size: size of region being mapped 255 - * 256 - * Platforms that utilize the dmabounce mechanism must implement 257 - * this function. 258 - * 259 - * The dmabounce routines call this function whenever a dma-mapping 260 - * is requested to determine whether a given buffer needs to be bounced 261 - * or not. The function must return 0 if the buffer is OK for 262 - * DMA access and 1 if the buffer needs to be bounced. 263 - * 264 - */ 265 - extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); 266 - 267 280 /* 268 281 * The DMA API, implemented by dmabounce.c. See below for descriptions. 269 282 */ 270 - extern dma_addr_t __dma_map_single(struct device *, void *, size_t, 271 - enum dma_data_direction); 272 - extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, 273 - enum dma_data_direction); 274 283 extern dma_addr_t __dma_map_page(struct device *, struct page *, 275 284 unsigned long, size_t, enum dma_data_direction); 276 285 extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, ··· 275 328 } 276 329 277 330 278 - static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, 279 - size_t size, enum dma_data_direction dir) 280 - { 281 - __dma_single_cpu_to_dev(cpu_addr, size, dir); 282 - return virt_to_dma(dev, cpu_addr); 283 - } 284 - 285 331 static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, 286 332 unsigned long offset, size_t size, enum dma_data_direction dir) 287 333 { 288 334 __dma_page_cpu_to_dev(page, offset, size, dir); 289 335 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 290 - } 291 - 292 - static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, 293 - size_t size, enum dma_data_direction dir) 294 - { 295 - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); 296 336 } 297 337 298 338 static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, ··· 307 373 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 308 374 size_t size, enum dma_data_direction dir) 309 375 { 376 + unsigned long offset; 377 + struct page *page; 310 378 dma_addr_t addr; 311 379 380 + BUG_ON(!virt_addr_valid(cpu_addr)); 381 + BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); 312 382 BUG_ON(!valid_dma_direction(dir)); 313 383 314 - addr = __dma_map_single(dev, cpu_addr, size, dir); 315 - debug_dma_map_page(dev, virt_to_page(cpu_addr), 316 - (unsigned long)cpu_addr & ~PAGE_MASK, size, 317 - dir, addr, true); 384 + page = virt_to_page(cpu_addr); 385 + offset = (unsigned long)cpu_addr & ~PAGE_MASK; 386 + addr = __dma_map_page(dev, page, offset, size, dir); 387 + debug_dma_map_page(dev, page, offset, size, dir, addr, true); 318 388 319 389 return addr; 320 390 } ··· 368 430 size_t size, enum dma_data_direction dir) 369 431 { 370 432 debug_dma_unmap_page(dev, handle, size, dir, true); 371 - __dma_unmap_single(dev, handle, size, dir); 433 + __dma_unmap_page(dev, handle, size, dir); 372 434 } 373 435 374 436 /**
+6 -5
arch/arm/include/asm/dma.h
··· 1 1 #ifndef __ASM_ARM_DMA_H 2 2 #define __ASM_ARM_DMA_H 3 3 4 - #include <asm/memory.h> 5 - 6 4 /* 7 5 * This is the maximum virtual address which can be DMA'd from. 8 6 */ 9 - #ifndef ARM_DMA_ZONE_SIZE 10 - #define MAX_DMA_ADDRESS 0xffffffff 7 + #ifndef CONFIG_ZONE_DMA 8 + #define MAX_DMA_ADDRESS 0xffffffffUL 11 9 #else 12 - #define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE) 10 + #define MAX_DMA_ADDRESS ({ \ 11 + extern unsigned long arm_dma_zone_size; \ 12 + arm_dma_zone_size ? \ 13 + (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) 13 14 #endif 14 15 15 16 #ifdef CONFIG_ISA_DMA_API
+4
arch/arm/include/asm/mach/arch.h
··· 23 23 24 24 unsigned int nr_irqs; /* number of IRQs */ 25 25 26 + #ifdef CONFIG_ZONE_DMA 27 + unsigned long dma_zone_size; /* size of DMA-able area */ 28 + #endif 29 + 26 30 unsigned int video_start; /* start of video RAM */ 27 31 unsigned int video_end; /* end of video RAM */ 28 32
-12
arch/arm/include/asm/memory.h
··· 204 204 #endif 205 205 206 206 /* 207 - * The DMA mask corresponding to the maximum bus address allocatable 208 - * using GFP_DMA. The default here places no restriction on DMA 209 - * allocations. This must be the smallest DMA mask in the system, 210 - * so a successful GFP_DMA allocation will always satisfy this. 211 - */ 212 - #ifndef ARM_DMA_ZONE_SIZE 213 - #define ISA_DMA_THRESHOLD (0xffffffffULL) 214 - #else 215 - #define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1) 216 - #endif 217 - 218 - /* 219 207 * PFNs are used to describe any physical page; this means 220 208 * PFN 0 == physical address 0. 221 209 *
+6
arch/arm/kernel/setup.c
··· 918 918 cpu_init(); 919 919 tcm_init(); 920 920 921 + #ifdef CONFIG_ZONE_DMA 922 + if (mdesc->dma_zone_size) { 923 + extern unsigned long arm_dma_zone_size; 924 + arm_dma_zone_size = mdesc->dma_zone_size; 925 + } 926 + #endif 921 927 #ifdef CONFIG_MULTI_IRQ_HANDLER 922 928 handle_arch_irq = mdesc->handle_irq; 923 929 #endif
+1
arch/arm/mach-davinci/board-da830-evm.c
··· 681 681 .init_irq = cp_intc_init, 682 682 .timer = &davinci_timer, 683 683 .init_machine = da830_evm_init, 684 + .dma_zone_size = SZ_128M, 684 685 MACHINE_END
+1
arch/arm/mach-davinci/board-da850-evm.c
··· 1261 1261 .init_irq = cp_intc_init, 1262 1262 .timer = &davinci_timer, 1263 1263 .init_machine = da850_evm_init, 1264 + .dma_zone_size = SZ_128M, 1264 1265 MACHINE_END
+1
arch/arm/mach-davinci/board-dm355-evm.c
··· 356 356 .init_irq = davinci_irq_init, 357 357 .timer = &davinci_timer, 358 358 .init_machine = dm355_evm_init, 359 + .dma_zone_size = SZ_128M, 359 360 MACHINE_END
+1
arch/arm/mach-davinci/board-dm355-leopard.c
··· 275 275 .init_irq = davinci_irq_init, 276 276 .timer = &davinci_timer, 277 277 .init_machine = dm355_leopard_init, 278 + .dma_zone_size = SZ_128M, 278 279 MACHINE_END
+1
arch/arm/mach-davinci/board-dm365-evm.c
··· 617 617 .init_irq = davinci_irq_init, 618 618 .timer = &davinci_timer, 619 619 .init_machine = dm365_evm_init, 620 + .dma_zone_size = SZ_128M, 620 621 MACHINE_END 621 622
+1
arch/arm/mach-davinci/board-dm644x-evm.c
··· 717 717 .init_irq = davinci_irq_init, 718 718 .timer = &davinci_timer, 719 719 .init_machine = davinci_evm_init, 720 + .dma_zone_size = SZ_128M, 720 721 MACHINE_END
+2
arch/arm/mach-davinci/board-dm646x-evm.c
··· 802 802 .init_irq = davinci_irq_init, 803 803 .timer = &davinci_timer, 804 804 .init_machine = evm_init, 805 + .dma_zone_size = SZ_128M, 805 806 MACHINE_END 806 807 807 808 MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") ··· 811 810 .init_irq = davinci_irq_init, 812 811 .timer = &davinci_timer, 813 812 .init_machine = evm_init, 813 + .dma_zone_size = SZ_128M, 814 814 MACHINE_END 815 815
+1
arch/arm/mach-davinci/board-mityomapl138.c
··· 570 570 .init_irq = cp_intc_init, 571 571 .timer = &davinci_timer, 572 572 .init_machine = mityomapl138_init, 573 + .dma_zone_size = SZ_128M, 573 574 MACHINE_END
+1
arch/arm/mach-davinci/board-neuros-osd2.c
··· 277 277 .init_irq = davinci_irq_init, 278 278 .timer = &davinci_timer, 279 279 .init_machine = davinci_ntosd2_init, 280 + .dma_zone_size = SZ_128M, 280 281 MACHINE_END
+1
arch/arm/mach-davinci/board-omapl138-hawk.c
··· 343 343 .init_irq = cp_intc_init, 344 344 .timer = &davinci_timer, 345 345 .init_machine = omapl138_hawk_init, 346 + .dma_zone_size = SZ_128M, 346 347 MACHINE_END
+1
arch/arm/mach-davinci/board-sffsdr.c
··· 156 156 .init_irq = davinci_irq_init, 157 157 .timer = &davinci_timer, 158 158 .init_machine = davinci_sffsdr_init, 159 + .dma_zone_size = SZ_128M, 159 160 MACHINE_END
+1
arch/arm/mach-davinci/board-tnetv107x-evm.c
··· 282 282 .init_irq = cp_intc_init, 283 283 .timer = &davinci_timer, 284 284 .init_machine = tnetv107x_evm_board_init, 285 + .dma_zone_size = SZ_128M, 285 286 MACHINE_END
-7
arch/arm/mach-davinci/include/mach/memory.h
··· 41 41 */ 42 42 #define CONSISTENT_DMA_SIZE (14<<20) 43 43 44 - /* 45 - * Restrict DMA-able region to workaround silicon bug. The bug 46 - * restricts buffers available for DMA to video hardware to be 47 - * below 128M 48 - */ 49 - #define ARM_DMA_ZONE_SIZE SZ_128M 50 - 51 44 #endif /* __ASM_ARCH_MEMORY_H */
+1
arch/arm/mach-h720x/h7201-eval.c
··· 33 33 .map_io = h720x_map_io, 34 34 .init_irq = h720x_init_irq, 35 35 .timer = &h7201_timer, 36 + .dma_zone_size = SZ_256M, 36 37 MACHINE_END
+1
arch/arm/mach-h720x/h7202-eval.c
··· 76 76 .init_irq = h7202_init_irq, 77 77 .timer = &h7202_timer, 78 78 .init_machine = init_eval_h7202, 79 + .dma_zone_size = SZ_256M, 79 80 MACHINE_END
-7
arch/arm/mach-h720x/include/mach/memory.h
··· 8 8 #define __ASM_ARCH_MEMORY_H 9 9 10 10 #define PLAT_PHYS_OFFSET UL(0x40000000) 11 - /* 12 - * This is the maximum DMA address that can be DMAd to. 13 - * There should not be more than (0xd0000000 - 0xc0000000) 14 - * bytes of RAM. 15 - */ 16 - #define ARM_DMA_ZONE_SIZE SZ_256M 17 - 18 11 #endif
+6
arch/arm/mach-ixp4xx/avila-setup.c
··· 169 169 .timer = &ixp4xx_timer, 170 170 .boot_params = 0x0100, 171 171 .init_machine = avila_init, 172 + #if defined(CONFIG_PCI) 173 + .dma_zone_size = SZ_64M, 174 + #endif 172 175 MACHINE_END 173 176 174 177 /* ··· 187 184 .timer = &ixp4xx_timer, 188 185 .boot_params = 0x0100, 189 186 .init_machine = avila_init, 187 + #if defined(CONFIG_PCI) 188 + .dma_zone_size = SZ_64M, 189 + #endif 190 190 MACHINE_END 191 191 #endif 192 192
+6 -6
arch/arm/mach-ixp4xx/common-pci.c
··· 316 316 } 317 317 318 318 319 + static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) 320 + { 321 + return (dma_addr + size) >= SZ_64M; 322 + } 323 + 319 324 /* 320 325 * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. 321 326 */ ··· 329 324 if(dev->bus == &pci_bus_type) { 330 325 *dev->dma_mask = SZ_64M - 1; 331 326 dev->coherent_dma_mask = SZ_64M - 1; 332 - dmabounce_register_dev(dev, 2048, 4096); 327 + dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); 333 328 } 334 329 return 0; 335 330 } ··· 340 335 dmabounce_unregister_dev(dev); 341 336 } 342 337 return 0; 343 - } 344 - 345 - int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) 346 - { 347 - return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M); 348 338 } 349 339 350 340 void __init ixp4xx_pci_preinit(void)
+3
arch/arm/mach-ixp4xx/coyote-setup.c
··· 114 114 .timer = &ixp4xx_timer, 115 115 .boot_params = 0x0100, 116 116 .init_machine = coyote_init, 117 + #if defined(CONFIG_PCI) 118 + .dma_zone_size = SZ_64M, 119 + #endif 117 120 MACHINE_END 118 121 #endif 119 122
+3
arch/arm/mach-ixp4xx/dsmg600-setup.c
··· 284 284 .init_irq = ixp4xx_init_irq, 285 285 .timer = &dsmg600_timer, 286 286 .init_machine = dsmg600_init, 287 + #if defined(CONFIG_PCI) 288 + .dma_zone_size = SZ_64M, 289 + #endif 287 290 MACHINE_END
+3
arch/arm/mach-ixp4xx/fsg-setup.c
··· 275 275 .timer = &ixp4xx_timer, 276 276 .boot_params = 0x0100, 277 277 .init_machine = fsg_init, 278 + #if defined(CONFIG_PCI) 279 + .dma_zone_size = SZ_64M, 280 + #endif 278 281 MACHINE_END 279 282
+3
arch/arm/mach-ixp4xx/gateway7001-setup.c
··· 101 101 .timer = &ixp4xx_timer, 102 102 .boot_params = 0x0100, 103 103 .init_machine = gateway7001_init, 104 + #if defined(CONFIG_PCI) 105 + .dma_zone_size = SZ_64M, 106 + #endif 104 107 MACHINE_END 105 108 #endif
+3
arch/arm/mach-ixp4xx/goramo_mlr.c
··· 501 501 .timer = &ixp4xx_timer, 502 502 .boot_params = 0x0100, 503 503 .init_machine = gmlr_init, 504 + #if defined(CONFIG_PCI) 505 + .dma_zone_size = SZ_64M, 506 + #endif 504 507 MACHINE_END
+3
arch/arm/mach-ixp4xx/gtwx5715-setup.c
··· 169 169 .timer = &ixp4xx_timer, 170 170 .boot_params = 0x0100, 171 171 .init_machine = gtwx5715_init, 172 + #if defined(CONFIG_PCI) 173 + .dma_zone_size = SZ_64M, 174 + #endif 172 175 MACHINE_END 173 176 174 177
-4
arch/arm/mach-ixp4xx/include/mach/memory.h
··· 14 14 */ 15 15 #define PLAT_PHYS_OFFSET UL(0x00000000) 16 16 17 - #ifdef CONFIG_PCI 18 - #define ARM_DMA_ZONE_SIZE SZ_64M 19 - #endif 20 - 21 17 #endif
+12
arch/arm/mach-ixp4xx/ixdp425-setup.c
··· 258 258 .timer = &ixp4xx_timer, 259 259 .boot_params = 0x0100, 260 260 .init_machine = ixdp425_init, 261 + #if defined(CONFIG_PCI) 262 + .dma_zone_size = SZ_64M, 263 + #endif 261 264 MACHINE_END 262 265 #endif 263 266 ··· 272 269 .timer = &ixp4xx_timer, 273 270 .boot_params = 0x0100, 274 271 .init_machine = ixdp425_init, 272 + #if defined(CONFIG_PCI) 273 + .dma_zone_size = SZ_64M, 274 + #endif 275 275 MACHINE_END 276 276 #endif 277 277 ··· 286 280 .timer = &ixp4xx_timer, 287 281 .boot_params = 0x0100, 288 282 .init_machine = ixdp425_init, 283 + #if defined(CONFIG_PCI) 284 + .dma_zone_size = SZ_64M, 285 + #endif 289 286 MACHINE_END 290 287 #endif 291 288 ··· 300 291 .timer = &ixp4xx_timer, 301 292 .boot_params = 0x0100, 302 293 .init_machine = ixdp425_init, 294 + #if defined(CONFIG_PCI) 295 + .dma_zone_size = SZ_64M, 296 + #endif 303 297 MACHINE_END 304 298 #endif
+3
arch/arm/mach-ixp4xx/nas100d-setup.c
··· 319 319 .init_irq = ixp4xx_init_irq, 320 320 .timer = &ixp4xx_timer, 321 321 .init_machine = nas100d_init, 322 + #if defined(CONFIG_PCI) 323 + .dma_zone_size = SZ_64M, 324 + #endif 322 325 MACHINE_END
+3
arch/arm/mach-ixp4xx/nslu2-setup.c
··· 305 305 .init_irq = ixp4xx_init_irq, 306 306 .timer = &nslu2_timer, 307 307 .init_machine = nslu2_init, 308 + #if defined(CONFIG_PCI) 309 + .dma_zone_size = SZ_64M, 310 + #endif 308 311 MACHINE_END
+3
arch/arm/mach-ixp4xx/vulcan-setup.c
··· 241 241 .timer = &ixp4xx_timer, 242 242 .boot_params = 0x0100, 243 243 .init_machine = vulcan_init, 244 + #if defined(CONFIG_PCI) 245 + .dma_zone_size = SZ_64M, 246 + #endif 244 247 MACHINE_END
+3
arch/arm/mach-ixp4xx/wg302v2-setup.c
··· 102 102 .timer = &ixp4xx_timer, 103 103 .boot_params = 0x0100, 104 104 .init_machine = wg302v2_init, 105 + #if defined(CONFIG_PCI) 106 + .dma_zone_size = SZ_64M, 107 + #endif 105 108 MACHINE_END 106 109 #endif
+3
arch/arm/mach-pxa/cm-x2xx.c
··· 518 518 .init_irq = cmx2xx_init_irq, 519 519 .timer = &pxa_timer, 520 520 .init_machine = cmx2xx_init, 521 + #ifdef CONFIG_PCI 522 + .dma_zone_size = SZ_64M, 523 + #endif 521 524 MACHINE_END
-4
arch/arm/mach-pxa/include/mach/memory.h
··· 17 17 */ 18 18 #define PLAT_PHYS_OFFSET UL(0xa0000000) 19 19 20 - #if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) 21 - #define ARM_DMA_ZONE_SIZE SZ_64M 22 - #endif 23 - 24 20 #endif
-4
arch/arm/mach-realview/include/mach/memory.h
··· 29 29 #define PLAT_PHYS_OFFSET UL(0x00000000) 30 30 #endif 31 31 32 - #ifdef CONFIG_ZONE_DMA 33 - #define ARM_DMA_ZONE_SIZE SZ_256M 34 - #endif 35 - 36 32 #ifdef CONFIG_SPARSEMEM 37 33 38 34 /*
+3
arch/arm/mach-realview/realview_eb.c
··· 470 470 .init_irq = gic_init_irq, 471 471 .timer = &realview_eb_timer, 472 472 .init_machine = realview_eb_init, 473 + #ifdef CONFIG_ZONE_DMA 474 + .dma_zone_size = SZ_256M, 475 + #endif 473 476 MACHINE_END
+3
arch/arm/mach-realview/realview_pb1176.c
··· 365 365 .init_irq = gic_init_irq, 366 366 .timer = &realview_pb1176_timer, 367 367 .init_machine = realview_pb1176_init, 368 + #ifdef CONFIG_ZONE_DMA 369 + .dma_zone_size = SZ_256M, 370 + #endif 368 371 MACHINE_END
+3
arch/arm/mach-realview/realview_pb11mp.c
··· 367 367 .init_irq = gic_init_irq, 368 368 .timer = &realview_pb11mp_timer, 369 369 .init_machine = realview_pb11mp_init, 370 + #ifdef CONFIG_ZONE_DMA 371 + .dma_zone_size = SZ_256M, 372 + #endif 370 373 MACHINE_END
+3
arch/arm/mach-realview/realview_pba8.c
··· 317 317 .init_irq = gic_init_irq, 318 318 .timer = &realview_pba8_timer, 319 319 .init_machine = realview_pba8_init, 320 + #ifdef CONFIG_ZONE_DMA 321 + .dma_zone_size = SZ_256M, 322 + #endif 320 323 MACHINE_END
+3
arch/arm/mach-realview/realview_pbx.c
··· 400 400 .init_irq = gic_init_irq, 401 401 .timer = &realview_pbx_timer, 402 402 .init_machine = realview_pbx_init, 403 + #ifdef CONFIG_ZONE_DMA 404 + .dma_zone_size = SZ_256M, 405 + #endif 403 406 MACHINE_END
+3
arch/arm/mach-sa1100/assabet.c
··· 453 453 .init_irq = sa1100_init_irq, 454 454 .timer = &sa1100_timer, 455 455 .init_machine = assabet_init, 456 + #ifdef CONFIG_SA1111 457 + .dma_zone_size = SZ_1M, 458 + #endif 456 459 MACHINE_END
+3
arch/arm/mach-sa1100/badge4.c
··· 306 306 .map_io = badge4_map_io, 307 307 .init_irq = sa1100_init_irq, 308 308 .timer = &sa1100_timer, 309 + #ifdef CONFIG_SA1111 310 + .dma_zone_size = SZ_1M, 311 + #endif 309 312 MACHINE_END
-4
arch/arm/mach-sa1100/include/mach/memory.h
··· 14 14 */ 15 15 #define PLAT_PHYS_OFFSET UL(0xc0000000) 16 16 17 - #ifdef CONFIG_SA1111 18 - #define ARM_DMA_ZONE_SIZE SZ_1M 19 - #endif 20 - 21 17 /* 22 18 * Because of the wide memory address space between physical RAM banks on the 23 19 * SA1100, it's much convenient to use Linux's SparseMEM support to implement
+3
arch/arm/mach-sa1100/jornada720.c
··· 369 369 .init_irq = sa1100_init_irq, 370 370 .timer = &sa1100_timer, 371 371 .init_machine = jornada720_mach_init, 372 + #ifdef CONFIG_SA1111 373 + .dma_zone_size = SZ_1M, 374 + #endif 372 375 MACHINE_END
+1
arch/arm/mach-shark/core.c
··· 156 156 .map_io = shark_map_io, 157 157 .init_irq = shark_init_irq, 158 158 .timer = &shark_timer, 159 + .dma_zone_size = SZ_4M, 159 160 MACHINE_END
-2
arch/arm/mach-shark/include/mach/memory.h
··· 17 17 */ 18 18 #define PLAT_PHYS_OFFSET UL(0x08000000) 19 19 20 - #define ARM_DMA_ZONE_SIZE SZ_4M 21 - 22 20 /* 23 21 * Cache flushing area 24 22 */
+32 -3
arch/arm/mm/dma-mapping.c
··· 25 25 #include <asm/tlbflush.h> 26 26 #include <asm/sizes.h> 27 27 28 + #include "mm.h" 29 + 28 30 static u64 get_coherent_dma_mask(struct device *dev) 29 31 { 30 - u64 mask = ISA_DMA_THRESHOLD; 32 + u64 mask = (u64)arm_dma_limit; 31 33 32 34 if (dev) { 33 35 mask = dev->coherent_dma_mask; ··· 43 41 return 0; 44 42 } 45 43 46 - if ((~mask) & ISA_DMA_THRESHOLD) { 44 + if ((~mask) & (u64)arm_dma_limit) { 47 45 dev_warn(dev, "coherent DMA mask %#llx is smaller " 48 46 "than system GFP_DMA mask %#llx\n", 49 - mask, (unsigned long long)ISA_DMA_THRESHOLD); 47 + mask, (u64)arm_dma_limit); 50 48 return 0; 51 49 } 52 50 } ··· 658 656 debug_dma_sync_sg_for_device(dev, sg, nents, dir); 659 657 } 660 658 EXPORT_SYMBOL(dma_sync_sg_for_device); 659 + 660 + /* 661 + * Return whether the given device DMA address mask can be supported 662 + * properly. For example, if your device can only drive the low 24-bits 663 + * during bus mastering, then you would pass 0x00ffffff as the mask 664 + * to this function. 665 + */ 666 + int dma_supported(struct device *dev, u64 mask) 667 + { 668 + if (mask < (u64)arm_dma_limit) 669 + return 0; 670 + return 1; 671 + } 672 + EXPORT_SYMBOL(dma_supported); 673 + 674 + int dma_set_mask(struct device *dev, u64 dma_mask) 675 + { 676 + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 677 + return -EIO; 678 + 679 + #ifndef CONFIG_DMABOUNCE 680 + *dev->dma_mask = dma_mask; 681 + #endif 682 + 683 + return 0; 684 + } 685 + EXPORT_SYMBOL(dma_set_mask); 661 686 662 687 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 663 688
+19 -7
arch/arm/mm/init.c
··· 212 212 } 213 213 214 214 #ifdef CONFIG_ZONE_DMA 215 + 216 + unsigned long arm_dma_zone_size __read_mostly; 217 + EXPORT_SYMBOL(arm_dma_zone_size); 218 + 219 + /* 220 + * The DMA mask corresponding to the maximum bus address allocatable 221 + * using GFP_DMA. The default here places no restriction on DMA 222 + * allocations. This must be the smallest DMA mask in the system, 223 + * so a successful GFP_DMA allocation will always satisfy this. 224 + */ 225 + u32 arm_dma_limit; 226 + 215 227 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 216 228 unsigned long dma_size) 217 229 { ··· 279 267 #endif 280 268 } 281 269 282 - #ifdef ARM_DMA_ZONE_SIZE 283 - #ifndef CONFIG_ZONE_DMA 284 - #error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations 285 - #endif 286 - 270 + #ifdef CONFIG_ZONE_DMA 287 271 /* 288 272 * Adjust the sizes according to any special requirements for 289 273 * this machine type. 290 274 */ 291 - arm_adjust_dma_zone(zone_size, zhole_size, 292 - ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); 275 + if (arm_dma_zone_size) { 276 + arm_adjust_dma_zone(zone_size, zhole_size, 277 + arm_dma_zone_size >> PAGE_SHIFT); 278 + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 279 + } else 280 + arm_dma_limit = 0xffffffff; 293 281 #endif 294 282 295 283 free_area_init_node(0, zone_size, min, zhole_size);
+6
arch/arm/mm/mm.h
··· 23 23 24 24 #endif 25 25 26 + #ifdef CONFIG_ZONE_DMA 27 + extern u32 arm_dma_limit; 28 + #else 29 + #define arm_dma_limit ((u32)~0) 30 + #endif 31 + 26 32 void __init bootmem_init(void); 27 33 void arm_mm_memblock_reserve(void);