Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.10-rc1 165 lines 4.7 kB view raw
1/* 2 * Copyright (C) 2004 IBM 3 * 4 * Implements the generic device dma API for powerpc. 5 * the pci and vio busses 6 */ 7#ifndef _ASM_DMA_MAPPING_H 8#define _ASM_DMA_MAPPING_H 9#ifdef __KERNEL__ 10 11#include <linux/types.h> 12#include <linux/cache.h> 13/* need struct page definitions */ 14#include <linux/mm.h> 15#include <linux/scatterlist.h> 16#include <linux/dma-debug.h> 17#include <asm/io.h> 18#include <asm/swiotlb.h> 19 20#ifdef CONFIG_PPC64 21#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 22#endif 23 24/* Some dma direct funcs must be visible for use in other dma_ops */ 25extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, 26 dma_addr_t *dma_handle, gfp_t flag, 27 unsigned long attrs); 28extern void __dma_direct_free_coherent(struct device *dev, size_t size, 29 void *vaddr, dma_addr_t dma_handle, 30 unsigned long attrs); 31extern int dma_direct_mmap_coherent(struct device *dev, 32 struct vm_area_struct *vma, 33 void *cpu_addr, dma_addr_t handle, 34 size_t size, unsigned long attrs); 35 36#ifdef CONFIG_NOT_COHERENT_CACHE 37/* 38 * DMA-consistent mapping functions for PowerPCs that don't support 39 * cache snooping. These allocate/free a region of uncached mapped 40 * memory space for use with DMA devices. Alternatively, you could 41 * allocate the space "normally" and use the cache management functions 42 * to ensure it is consistent. 43 */ 44struct device; 45extern void *__dma_alloc_coherent(struct device *dev, size_t size, 46 dma_addr_t *handle, gfp_t gfp); 47extern void __dma_free_coherent(size_t size, void *vaddr); 48extern void __dma_sync(void *vaddr, size_t size, int direction); 49extern void __dma_sync_page(struct page *page, unsigned long offset, 50 size_t size, int direction); 51extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); 52 53#else /* ! CONFIG_NOT_COHERENT_CACHE */ 54/* 55 * Cache coherent cores. 56 */ 57 58#define __dma_alloc_coherent(dev, gfp, size, handle) NULL 59#define __dma_free_coherent(size, addr) ((void)0) 60#define __dma_sync(addr, size, rw) ((void)0) 61#define __dma_sync_page(pg, off, sz, rw) ((void)0) 62 63#endif /* ! CONFIG_NOT_COHERENT_CACHE */ 64 65static inline unsigned long device_to_mask(struct device *dev) 66{ 67 if (dev->dma_mask && *dev->dma_mask) 68 return *dev->dma_mask; 69 /* Assume devices without mask can take 32 bit addresses */ 70 return 0xfffffffful; 71} 72 73/* 74 * Available generic sets of operations 75 */ 76#ifdef CONFIG_PPC64 77extern struct dma_map_ops dma_iommu_ops; 78#endif 79extern struct dma_map_ops dma_direct_ops; 80 81static inline struct dma_map_ops *get_dma_ops(struct device *dev) 82{ 83 /* We don't handle the NULL dev case for ISA for now. We could 84 * do it via an out of line call but it is not needed for now. The 85 * only ISA DMA device we support is the floppy and we have a hack 86 * in the floppy driver directly to get a device for us. 87 */ 88 if (unlikely(dev == NULL)) 89 return NULL; 90 91 return dev->archdata.dma_ops; 92} 93 94static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 95{ 96 dev->archdata.dma_ops = ops; 97} 98 99/* 100 * get_dma_offset() 101 * 102 * Get the dma offset on configurations where the dma address can be determined 103 * from the physical address by looking at a simple offset. Direct dma and 104 * swiotlb use this function, but it is typically not used by implementations 105 * with an iommu. 106 */ 107static inline dma_addr_t get_dma_offset(struct device *dev) 108{ 109 if (dev) 110 return dev->archdata.dma_offset; 111 112 return PCI_DRAM_OFFSET; 113} 114 115static inline void set_dma_offset(struct device *dev, dma_addr_t off) 116{ 117 if (dev) 118 dev->archdata.dma_offset = off; 119} 120 121/* this will be removed soon */ 122#define flush_write_buffers() 123 124#define HAVE_ARCH_DMA_SET_MASK 1 125extern int dma_set_mask(struct device *dev, u64 dma_mask); 126 127extern int __dma_set_mask(struct device *dev, u64 dma_mask); 128extern u64 __dma_get_required_mask(struct device *dev); 129 130static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 131{ 132#ifdef CONFIG_SWIOTLB 133 struct dev_archdata *sd = &dev->archdata; 134 135 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) 136 return false; 137#endif 138 139 if (!dev->dma_mask) 140 return false; 141 142 return addr + size - 1 <= *dev->dma_mask; 143} 144 145static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 146{ 147 return paddr + get_dma_offset(dev); 148} 149 150static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 151{ 152 return daddr - get_dma_offset(dev); 153} 154 155#define ARCH_HAS_DMA_MMAP_COHERENT 156 157static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 158 enum dma_data_direction direction) 159{ 160 BUG_ON(direction == DMA_NONE); 161 __dma_sync(vaddr, size, (int)direction); 162} 163 164#endif /* __KERNEL__ */ 165#endif /* _ASM_DMA_MAPPING_H */