Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

microblaze: Fix mmap for cache coherent memory

When running in non-cache coherent configuration the memory that was
allocated with dma_alloc_coherent() has a custom mapping and so there is no
1-to-1 relationship between the kernel virtual address and the PFN. This
means that virt_to_pfn() will not work correctly for those addresses and the
default mmap implementation in the form of dma_common_mmap() will map some
random, but not the requested, memory area.

Fix this by providing a custom mmap implementation that looks up the PFN
from the page table rather than using virt_to_pfn.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>

authored by

Lars-Peter Clausen and committed by
Michal Simek
3a8e3265 b2776bf7

+48 -5
+1
arch/microblaze/include/asm/pgtable.h
··· 565 565 void consistent_sync(void *vaddr, size_t size, int direction); 566 566 void consistent_sync_page(struct page *page, unsigned long offset, 567 567 size_t size, int direction); 568 + unsigned long consistent_virt_to_pfn(void *vaddr); 568 569 569 570 void setup_memory(void); 570 571 #endif /* __ASSEMBLY__ */
+27
arch/microblaze/kernel/dma.c
··· 154 154 __dma_sync(sg->dma_address, sg->length, direction); 155 155 } 156 156 157 + int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 158 + void *cpu_addr, dma_addr_t handle, size_t size, 159 + struct dma_attrs *attrs) 160 + { 161 + #ifdef CONFIG_MMU 162 + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 163 + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 164 + unsigned long off = vma->vm_pgoff; 165 + unsigned long pfn; 166 + 167 + if (off >= count || user_count > (count - off)) 168 + return -ENXIO; 169 + 170 + #ifdef NOT_COHERENT_CACHE 171 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 172 + pfn = consistent_virt_to_pfn(cpu_addr); 173 + #else 174 + pfn = virt_to_pfn(cpu_addr); 175 + #endif 176 + return remap_pfn_range(vma, vma->vm_start, pfn + off, 177 + vma->vm_end - vma->vm_start, vma->vm_page_prot); 178 + #else 179 + return -ENXIO; 180 + #endif 181 + } 182 + 157 183 struct dma_map_ops dma_direct_ops = { 158 184 .alloc = dma_direct_alloc_coherent, 159 185 .free = dma_direct_free_coherent, 186 + .mmap = dma_direct_mmap_coherent, 160 187 .map_sg = dma_direct_map_sg, 161 188 .dma_supported = dma_direct_dma_supported, 162 189 .map_page = dma_direct_map_page,
+20 -5
arch/microblaze/mm/consistent.c
··· 156 156 } 157 157 EXPORT_SYMBOL(consistent_alloc); 158 158 159 + #ifdef CONFIG_MMU 160 + static pte_t *consistent_virt_to_pte(void *vaddr) 161 + { 162 + unsigned long addr = (unsigned long)vaddr; 163 + 164 + return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr); 165 + } 166 + 167 + unsigned long consistent_virt_to_pfn(void *vaddr) 168 + { 169 + pte_t *ptep = consistent_virt_to_pte(vaddr); 170 + 171 + if (pte_none(*ptep) || !pte_present(*ptep)) 172 + return 0; 173 + 174 + return pte_pfn(*ptep); 175 + } 176 + #endif 177 + 159 178 /* 160 179 * free page(s) as defined by the above mapping. 161 180 */ ··· 200 181 } while (size -= PAGE_SIZE); 201 182 #else 202 183 do { 203 - pte_t *ptep; 184 + pte_t *ptep = consistent_virt_to_pte(vaddr); 204 185 unsigned long pfn; 205 186 206 - ptep = pte_offset_kernel(pmd_offset(pgd_offset_k( 207 - (unsigned int)vaddr), 208 - (unsigned int)vaddr), 209 - (unsigned int)vaddr); 210 187 if (!pte_none(*ptep) && pte_present(*ptep)) { 211 188 pfn = pte_pfn(*ptep); 212 189 pte_clear(&init_mm, (unsigned int)vaddr, ptep);