[PATCH] powerpc: IOMMU support for honoring dma_mask

Some devices don't support full 32-bit DMA address space, which we currently
assume. Add the required mask-passing to the IOMMU allocators.

Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by Olof Johansson and committed by Paul Mackerras 7daa411b f4ffaa45

+69 -20
+26 -10
arch/powerpc/kernel/iommu.c
··· 61 61 static unsigned long iommu_range_alloc(struct iommu_table *tbl, 62 62 unsigned long npages, 63 63 unsigned long *handle, 64 + unsigned long mask, 64 65 unsigned int align_order) 65 66 { 66 67 unsigned long n, end, i, start; ··· 98 97 */ 99 98 if (start >= limit) 100 99 start = largealloc ? tbl->it_largehint : tbl->it_hint; 101 - 100 + 102 101 again: 102 + 103 + if (limit + tbl->it_offset > mask) { 104 + limit = mask - tbl->it_offset + 1; 105 + /* If we're constrained on address range, first try 106 + * at the masked hint to avoid O(n) search complexity, 107 + * but on second pass, start at 0. 108 + */ 109 + if ((start & mask) >= limit || pass > 0) 110 + start = 0; 111 + else 112 + start &= mask; 113 + } 103 114 104 115 n = find_next_zero_bit(tbl->it_map, limit, start); 105 116 ··· 163 150 164 151 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, 165 152 unsigned int npages, enum dma_data_direction direction, 166 - unsigned int align_order) 153 + unsigned long mask, unsigned int align_order) 167 154 { 168 155 unsigned long entry, flags; 169 156 dma_addr_t ret = DMA_ERROR_CODE; 170 - 157 + 171 158 spin_lock_irqsave(&(tbl->it_lock), flags); 172 159 173 - entry = iommu_range_alloc(tbl, npages, NULL, align_order); 160 + entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); 174 161 175 162 if (unlikely(entry == DMA_ERROR_CODE)) { 176 163 spin_unlock_irqrestore(&(tbl->it_lock), flags); ··· 249 236 250 237 int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 251 238 struct scatterlist *sglist, int nelems, 252 - enum dma_data_direction direction) 239 + unsigned long mask, enum dma_data_direction direction) 253 240 { 254 241 dma_addr_t dma_next = 0, dma_addr; 255 242 unsigned long flags; ··· 287 274 vaddr = (unsigned long)page_address(s->page) + s->offset; 288 275 npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); 289 276 npages >>= PAGE_SHIFT; 290 - entry = iommu_range_alloc(tbl, npages, &handle, 0); 277 + entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); 291 278 292 279 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 293 280 ··· 492 479 * byte within the page as vaddr. 493 480 */ 494 481 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, 495 - size_t size, enum dma_data_direction direction) 482 + size_t size, unsigned long mask, 483 + enum dma_data_direction direction) 496 484 { 497 485 dma_addr_t dma_handle = DMA_ERROR_CODE; 498 486 unsigned long uaddr; ··· 506 492 npages >>= PAGE_SHIFT; 507 493 508 494 if (tbl) { 509 - dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0); 495 + dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 496 + mask >> PAGE_SHIFT, 0); 510 497 if (dma_handle == DMA_ERROR_CODE) { 511 498 if (printk_ratelimit()) { 512 499 printk(KERN_INFO "iommu_alloc failed, " ··· 536 521 * to the dma address (mapping) of the first page. 537 522 */ 538 523 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 539 - dma_addr_t *dma_handle, gfp_t flag) 524 + dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) 540 525 { 541 526 void *ret = NULL; 542 527 dma_addr_t mapping; ··· 566 551 memset(ret, 0, size); 567 552 568 553 /* Set up tces to cover the allocated range */ 569 - mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order); 554 + mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, 555 + mask >> PAGE_SHIFT, order); 570 556 if (mapping == DMA_ERROR_CODE) { 571 557 free_pages((unsigned long)ret, order); 572 558 ret = NULL;
+36 -4
arch/powerpc/kernel/pci_iommu.c
··· 59 59 } 60 60 61 61 62 + static inline unsigned long device_to_mask(struct device *hwdev) 63 + { 64 + struct pci_dev *pdev; 65 + 66 + if (!hwdev) { 67 + pdev = ppc64_isabridge_dev; 68 + if (!pdev) /* This is the best guess we can do */ 69 + return 0xfffffffful; 70 + } else 71 + pdev = to_pci_dev(hwdev); 72 + 73 + if (pdev->dma_mask) 74 + return pdev->dma_mask; 75 + 76 + /* Assume devices without mask can take 32 bit addresses */ 77 + return 0xfffffffful; 78 + } 79 + 80 + 62 81 /* Allocates a contiguous real buffer and creates mappings over it. 63 82 * Returns the virtual address of the buffer and sets dma_handle 64 83 * to the dma address (mapping) of the first page. ··· 86 67 dma_addr_t *dma_handle, gfp_t flag) 87 68 { 88 69 return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, 89 - flag); 70 + device_to_mask(hwdev), flag); 90 71 } 91 72 92 73 static void pci_iommu_free_coherent(struct device *hwdev, size_t size, ··· 104 85 static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, 105 86 size_t size, enum dma_data_direction direction) 106 87 { 107 - return iommu_map_single(devnode_table(hwdev), vaddr, size, direction); 88 + return iommu_map_single(devnode_table(hwdev), vaddr, size, 89 + device_to_mask(hwdev), direction); 108 90 } 109 91 110 92 ··· 120 100 int nelems, enum dma_data_direction direction) 121 101 { 122 102 return iommu_map_sg(pdev, devnode_table(pdev), sglist, 123 - nelems, direction); 103 + nelems, device_to_mask(pdev), direction); 124 104 } 125 105 126 106 static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, ··· 132 112 /* We support DMA to/from any memory page via the iommu */ 133 113 static int pci_iommu_dma_supported(struct device *dev, u64 mask) 134 114 { 135 - return 1; 115 + struct iommu_table *tbl = devnode_table(dev); 116 + 117 + if (!tbl || tbl->it_offset > mask) { 118 + printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); 119 + if (tbl) 120 + printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n", 121 + mask, tbl->it_offset); 122 + else 123 + printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", 124 + mask); 125 + return 0; 126 + } else 127 + return 1; 136 128 } 137 129 138 130 void pci_iommu_init(void)
+3 -3
arch/powerpc/kernel/vio.c
··· 202 202 size_t size, enum dma_data_direction direction) 203 203 { 204 204 return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size, 205 - direction); 205 + ~0ul, direction); 206 206 } 207 207 208 208 static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle, ··· 216 216 int nelems, enum dma_data_direction direction) 217 217 { 218 218 return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist, 219 - nelems, direction); 219 + nelems, ~0ul, direction); 220 220 } 221 221 222 222 static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, ··· 229 229 dma_addr_t *dma_handle, gfp_t flag) 230 230 { 231 231 return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, 232 - dma_handle, flag); 232 + dma_handle, ~0ul, flag); 233 233 } 234 234 235 235 static void vio_free_coherent(struct device *dev, size_t size,
+4 -3
include/asm-powerpc/iommu.h
··· 70 70 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl); 71 71 72 72 extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 73 - struct scatterlist *sglist, int nelems, 73 + struct scatterlist *sglist, int nelems, unsigned long mask, 74 74 enum dma_data_direction direction); 75 75 extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 76 76 int nelems, enum dma_data_direction direction); 77 77 78 78 extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 79 - dma_addr_t *dma_handle, gfp_t flag); 79 + dma_addr_t *dma_handle, unsigned long mask, gfp_t flag); 80 80 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 81 81 void *vaddr, dma_addr_t dma_handle); 82 82 extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, 83 - size_t size, enum dma_data_direction direction); 83 + size_t size, unsigned long mask, 84 + enum dma_data_direction direction); 84 85 extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, 85 86 size_t size, enum dma_data_direction direction); 86 87