[SPARC64]: IOMMU allocations using iommu-helper layer.

Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

David S. Miller and committed by
David S. Miller
d284142c 19814ea2

+112 -110
+4
arch/sparc64/Kconfig
··· 40 bool 41 default y 42 43 config QUICKLIST 44 bool 45 default y
··· 40 bool 41 default y 42 43 + config IOMMU_HELPER 44 + bool 45 + default y 46 + 47 config QUICKLIST 48 bool 49 default y
+81 -44
arch/sparc64/kernel/iommu.c
··· 1 /* iommu.c: Generic sparc64 IOMMU support. 2 * 3 - * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) 5 */ 6 ··· 10 #include <linux/device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/errno.h> 13 14 #ifdef CONFIG_PCI 15 #include <linux/pci.h> ··· 42 "i" (ASI_PHYS_BYPASS_EC_E)) 43 44 /* Must be invoked under the IOMMU lock. */ 45 - static void __iommu_flushall(struct iommu *iommu) 46 { 47 if (iommu->iommu_flushinv) { 48 iommu_write(iommu->iommu_flushinv, ~(u64)0); ··· 84 iopte_val(*iopte) = val; 85 } 86 87 - /* Based largely upon the ppc64 iommu allocator. */ 88 - static long arena_alloc(struct iommu *iommu, unsigned long npages) 89 { 90 struct iommu_arena *arena = &iommu->arena; 91 - unsigned long n, i, start, end, limit; 92 - int pass; 93 94 limit = arena->limit; 95 - start = arena->hint; 96 - pass = 0; 97 98 - again: 99 - n = find_next_zero_bit(arena->map, limit, start); 100 - end = n + npages; 101 - if (unlikely(end >= limit)) { 102 if (likely(pass < 1)) { 103 - limit = start; 104 start = 0; 105 - __iommu_flushall(iommu); 106 pass++; 107 goto again; 108 } else { 109 - /* Scanned the whole thing, give up. */ 110 - return -1; 111 } 112 } 113 114 - for (i = n; i < end; i++) { 115 - if (test_bit(i, arena->map)) { 116 - start = i + 1; 117 - goto again; 118 - } 119 - } 120 - 121 - for (i = n; i < end; i++) 122 - __set_bit(i, arena->map); 123 124 arena->hint = end; 125 126 return n; 127 } 128 129 - static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) 130 { 131 - unsigned long i; 132 133 - for (i = base; i < (base + npages); i++) 134 - __clear_bit(i, arena->map); 135 } 136 137 int iommu_table_init(struct iommu *iommu, int tsbsize, ··· 193 return -ENOMEM; 194 } 195 iommu->arena.limit = num_tsb_entries; 196 197 /* Allocate and initialize the dummy page which we 198 * set inactive IO PTEs to point to. ··· 233 return -ENOMEM; 234 } 235 236 - static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) 237 { 238 - long entry; 239 240 - entry = arena_alloc(iommu, npages); 241 - if (unlikely(entry < 0)) 242 return NULL; 243 244 return iommu->page_table + entry; 245 - } 246 - 247 - static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) 248 - { 249 - arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); 250 } 251 252 static int iommu_alloc_ctx(struct iommu *iommu) ··· 295 iommu = dev->archdata.iommu; 296 297 spin_lock_irqsave(&iommu->lock, flags); 298 - iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); 299 spin_unlock_irqrestore(&iommu->lock, flags); 300 301 if (unlikely(iopte == NULL)) { ··· 333 334 spin_lock_irqsave(&iommu->lock, flags); 335 336 - free_npages(iommu, dvma - iommu->page_table_map_base, npages); 337 338 spin_unlock_irqrestore(&iommu->lock, flags); 339 ··· 364 npages >>= IO_PAGE_SHIFT; 365 366 spin_lock_irqsave(&iommu->lock, flags); 367 - base = alloc_npages(iommu, npages); 368 ctx = 0; 369 if (iommu->iommu_ctxflush) 370 ctx = iommu_alloc_ctx(iommu); ··· 502 for (i = 0; i < npages; i++) 503 iopte_make_dummy(iommu, base + i); 504 505 - free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); 506 507 iommu_free_ctx(iommu, ctx); 508 ··· 540 541 spin_lock_irqsave(&iommu->lock, flags); 542 543 - base = alloc_npages(iommu, npages); 544 ctx = 0; 545 if (iommu->iommu_ctxflush) 546 ctx = iommu_alloc_ctx(iommu); ··· 629 for (i = 0; i < npages; i++) 630 iopte_make_dummy(iommu, base + i); 631 632 - free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); 633 634 iommu_free_ctx(iommu, ctx); 635
··· 1 /* iommu.c: Generic sparc64 IOMMU support. 2 * 3 + * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) 5 */ 6 ··· 10 #include <linux/device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/errno.h> 13 + #include <linux/iommu-helper.h> 14 15 #ifdef CONFIG_PCI 16 #include <linux/pci.h> ··· 41 "i" (ASI_PHYS_BYPASS_EC_E)) 42 43 /* Must be invoked under the IOMMU lock. */ 44 + static void iommu_flushall(struct iommu *iommu) 45 { 46 if (iommu->iommu_flushinv) { 47 iommu_write(iommu->iommu_flushinv, ~(u64)0); ··· 83 iopte_val(*iopte) = val; 84 } 85 86 + /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle' 87 + * facility it must all be done in one pass while under the iommu lock. 88 + * 89 + * On sun4u platforms, we only flush the IOMMU once every time we've passed 90 + * over the entire page table doing allocations. Therefore we only ever advance 91 + * the hint and cannot backtrack it. 92 + */ 93 + unsigned long iommu_range_alloc(struct device *dev, 94 + struct iommu *iommu, 95 + unsigned long npages, 96 + unsigned long *handle) 97 { 98 + unsigned long n, end, start, limit, boundary_size; 99 struct iommu_arena *arena = &iommu->arena; 100 + int pass = 0; 101 + 102 + /* This allocator was derived from x86_64's bit string search */ 103 + 104 + /* Sanity check */ 105 + if (unlikely(npages == 0)) { 106 + if (printk_ratelimit()) 107 + WARN_ON(1); 108 + return DMA_ERROR_CODE; 109 + } 110 + 111 + if (handle && *handle) 112 + start = *handle; 113 + else 114 + start = arena->hint; 115 116 limit = arena->limit; 117 118 + /* The case below can happen if we have a small segment appended 119 + * to a large, or when the previous alloc was at the very end of 120 + * the available space. If so, go back to the beginning and flush. 121 + */ 122 + if (start >= limit) { 123 + start = 0; 124 + if (iommu->flush_all) 125 + iommu->flush_all(iommu); 126 + } 127 + 128 + again: 129 + 130 + if (dev) 131 + boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 132 + 1 << IO_PAGE_SHIFT); 133 + else 134 + boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT); 135 + 136 + n = iommu_area_alloc(arena->map, limit, start, npages, 0, 137 + boundary_size >> IO_PAGE_SHIFT, 0); 138 + if (n == -1) { 139 if (likely(pass < 1)) { 140 + /* First failure, rescan from the beginning. */ 141 start = 0; 142 + if (iommu->flush_all) 143 + iommu->flush_all(iommu); 144 pass++; 145 goto again; 146 } else { 147 + /* Second failure, give up */ 148 + return DMA_ERROR_CODE; 149 } 150 } 151 152 + end = n + npages; 153 154 arena->hint = end; 155 + 156 + /* Update handle for SG allocations */ 157 + if (handle) 158 + *handle = end; 159 160 return n; 161 } 162 163 + void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages) 164 { 165 + struct iommu_arena *arena = &iommu->arena; 166 + unsigned long entry; 167 168 + entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 169 + 170 + iommu_area_free(arena->map, entry, npages); 171 } 172 173 int iommu_table_init(struct iommu *iommu, int tsbsize, ··· 155 return -ENOMEM; 156 } 157 iommu->arena.limit = num_tsb_entries; 158 + 159 + if (tlb_type != hypervisor) 160 + iommu->flush_all = iommu_flushall; 161 162 /* Allocate and initialize the dummy page which we 163 * set inactive IO PTEs to point to. ··· 192 return -ENOMEM; 193 } 194 195 + static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, 196 + unsigned long npages) 197 { 198 + unsigned long entry; 199 200 + entry = iommu_range_alloc(dev, iommu, npages, NULL); 201 + if (unlikely(entry == DMA_ERROR_CODE)) 202 return NULL; 203 204 return iommu->page_table + entry; 205 } 206 207 static int iommu_alloc_ctx(struct iommu *iommu) ··· 258 iommu = dev->archdata.iommu; 259 260 spin_lock_irqsave(&iommu->lock, flags); 261 + iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); 262 spin_unlock_irqrestore(&iommu->lock, flags); 263 264 if (unlikely(iopte == NULL)) { ··· 296 297 spin_lock_irqsave(&iommu->lock, flags); 298 299 + iommu_range_free(iommu, dvma, npages); 300 301 spin_unlock_irqrestore(&iommu->lock, flags); 302 ··· 327 npages >>= IO_PAGE_SHIFT; 328 329 spin_lock_irqsave(&iommu->lock, flags); 330 + base = alloc_npages(dev, iommu, npages); 331 ctx = 0; 332 if (iommu->iommu_ctxflush) 333 ctx = iommu_alloc_ctx(iommu); ··· 465 for (i = 0; i < npages; i++) 466 iopte_make_dummy(iommu, base + i); 467 468 + iommu_range_free(iommu, bus_addr, npages); 469 470 iommu_free_ctx(iommu, ctx); 471 ··· 503 504 spin_lock_irqsave(&iommu->lock, flags); 505 506 + base = alloc_npages(dev, iommu, npages); 507 ctx = 0; 508 if (iommu->iommu_ctxflush) 509 ctx = iommu_alloc_ctx(iommu); ··· 592 for (i = 0; i < npages; i++) 593 iopte_make_dummy(iommu, base + i); 594 595 + iommu_range_free(iommu, bus_addr, npages); 596 597 iommu_free_ctx(iommu, ctx); 598
+8
arch/sparc64/kernel/iommu_common.h
··· 58 return npages; 59 } 60 61 #endif /* _IOMMU_COMMON_H */
··· 58 return npages; 59 } 60 61 + extern unsigned long iommu_range_alloc(struct device *dev, 62 + struct iommu *iommu, 63 + unsigned long npages, 64 + unsigned long *handle); 65 + extern void iommu_range_free(struct iommu *iommu, 66 + dma_addr_t dma_addr, 67 + unsigned long npages); 68 + 69 #endif /* _IOMMU_COMMON_H */
+18 -66
arch/sparc64/kernel/pci_sun4v.c
··· 1 /* pci_sun4v.c: SUN4V specific PCI controller support. 2 * 3 - * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/kernel.h> ··· 113 return iommu_batch_flush(p); 114 } 115 116 - static long arena_alloc(struct iommu_arena *arena, unsigned long npages) 117 - { 118 - unsigned long n, i, start, end, limit; 119 - int pass; 120 - 121 - limit = arena->limit; 122 - start = arena->hint; 123 - pass = 0; 124 - 125 - again: 126 - n = find_next_zero_bit(arena->map, limit, start); 127 - end = n + npages; 128 - if (unlikely(end >= limit)) { 129 - if (likely(pass < 1)) { 130 - limit = start; 131 - start = 0; 132 - pass++; 133 - goto again; 134 - } else { 135 - /* Scanned the whole thing, give up. */ 136 - return -1; 137 - } 138 - } 139 - 140 - for (i = n; i < end; i++) { 141 - if (test_bit(i, arena->map)) { 142 - start = i + 1; 143 - goto again; 144 - } 145 - } 146 - 147 - for (i = n; i < end; i++) 148 - __set_bit(i, arena->map); 149 - 150 - arena->hint = end; 151 - 152 - return n; 153 - } 154 - 155 - static void arena_free(struct iommu_arena *arena, unsigned long base, 156 - unsigned long npages) 157 - { 158 - unsigned long i; 159 - 160 - for (i = base; i < (base + npages); i++) 161 - __clear_bit(i, arena->map); 162 - } 163 - 164 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 165 dma_addr_t *dma_addrp, gfp_t gfp) 166 { ··· 137 iommu = dev->archdata.iommu; 138 139 spin_lock_irqsave(&iommu->lock, flags); 140 - entry = arena_alloc(&iommu->arena, npages); 141 spin_unlock_irqrestore(&iommu->lock, flags); 142 143 - if (unlikely(entry < 0L)) 144 - goto arena_alloc_fail; 145 146 *dma_addrp = (iommu->page_table_map_base + 147 (entry << IO_PAGE_SHIFT)); ··· 171 iommu_map_fail: 172 /* Interrupts are disabled. */ 173 spin_lock(&iommu->lock); 174 - arena_free(&iommu->arena, entry, npages); 175 spin_unlock_irqrestore(&iommu->lock, flags); 176 177 - arena_alloc_fail: 178 free_pages(first_page, order); 179 return NULL; 180 } ··· 195 196 spin_lock_irqsave(&iommu->lock, flags); 197 198 - arena_free(&iommu->arena, entry, npages); 199 200 do { 201 unsigned long num; ··· 233 npages >>= IO_PAGE_SHIFT; 234 235 spin_lock_irqsave(&iommu->lock, flags); 236 - entry = arena_alloc(&iommu->arena, npages); 237 spin_unlock_irqrestore(&iommu->lock, flags); 238 239 - if (unlikely(entry < 0L)) 240 goto bad; 241 242 bus_addr = (iommu->page_table_map_base + ··· 271 iommu_map_fail: 272 /* Interrupts are disabled. */ 273 spin_lock(&iommu->lock); 274 - arena_free(&iommu->arena, entry, npages); 275 spin_unlock_irqrestore(&iommu->lock, flags); 276 277 return DMA_ERROR_CODE; ··· 302 303 spin_lock_irqsave(&iommu->lock, flags); 304 305 - entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 306 - arena_free(&iommu->arena, entry, npages); 307 308 do { 309 unsigned long num; 310 ··· 321 int nelems, enum dma_data_direction direction) 322 { 323 unsigned long flags, npages, i, prot; 324 struct scatterlist *sg; 325 struct iommu *iommu; 326 long entry, err; 327 - u32 dma_base; 328 329 /* Fast path single entry scatterlists. */ 330 if (nelems == 1) { ··· 345 npages = calc_npages(sglist, nelems); 346 347 spin_lock_irqsave(&iommu->lock, flags); 348 - entry = arena_alloc(&iommu->arena, npages); 349 spin_unlock_irqrestore(&iommu->lock, flags); 350 351 - if (unlikely(entry < 0L)) 352 goto bad; 353 354 - dma_base = iommu->page_table_map_base + 355 (entry << IO_PAGE_SHIFT); 356 357 prot = HV_PCI_MAP_ATTR_READ; ··· 401 402 iommu_map_failed: 403 spin_lock_irqsave(&iommu->lock, flags); 404 - arena_free(&iommu->arena, entry, npages); 405 spin_unlock_irqrestore(&iommu->lock, flags); 406 407 return 0; ··· 433 434 spin_lock_irqsave(&iommu->lock, flags); 435 436 - arena_free(&iommu->arena, entry, npages); 437 438 do { 439 unsigned long num;
··· 1 /* pci_sun4v.c: SUN4V specific PCI controller support. 2 * 3 + * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/kernel.h> ··· 113 return iommu_batch_flush(p); 114 } 115 116 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 117 dma_addr_t *dma_addrp, gfp_t gfp) 118 { ··· 185 iommu = dev->archdata.iommu; 186 187 spin_lock_irqsave(&iommu->lock, flags); 188 + entry = iommu_range_alloc(dev, iommu, npages, NULL); 189 spin_unlock_irqrestore(&iommu->lock, flags); 190 191 + if (unlikely(entry == DMA_ERROR_CODE)) 192 + goto range_alloc_fail; 193 194 *dma_addrp = (iommu->page_table_map_base + 195 (entry << IO_PAGE_SHIFT)); ··· 219 iommu_map_fail: 220 /* Interrupts are disabled. */ 221 spin_lock(&iommu->lock); 222 + iommu_range_free(iommu, *dma_addrp, npages); 223 spin_unlock_irqrestore(&iommu->lock, flags); 224 225 + range_alloc_fail: 226 free_pages(first_page, order); 227 return NULL; 228 } ··· 243 244 spin_lock_irqsave(&iommu->lock, flags); 245 246 + iommu_range_free(iommu, dvma, npages); 247 248 do { 249 unsigned long num; ··· 281 npages >>= IO_PAGE_SHIFT; 282 283 spin_lock_irqsave(&iommu->lock, flags); 284 + entry = iommu_range_alloc(dev, iommu, npages, NULL); 285 spin_unlock_irqrestore(&iommu->lock, flags); 286 287 + if (unlikely(entry == DMA_ERROR_CODE)) 288 goto bad; 289 290 bus_addr = (iommu->page_table_map_base + ··· 319 iommu_map_fail: 320 /* Interrupts are disabled. */ 321 spin_lock(&iommu->lock); 322 + iommu_range_free(iommu, bus_addr, npages); 323 spin_unlock_irqrestore(&iommu->lock, flags); 324 325 return DMA_ERROR_CODE; ··· 350 351 spin_lock_irqsave(&iommu->lock, flags); 352 353 + iommu_range_free(iommu, bus_addr, npages); 354 355 + entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 356 do { 357 unsigned long num; 358 ··· 369 int nelems, enum dma_data_direction direction) 370 { 371 unsigned long flags, npages, i, prot; 372 + u32 dma_base, orig_dma_base; 373 struct scatterlist *sg; 374 struct iommu *iommu; 375 long entry, err; 376 377 /* Fast path single entry scatterlists. */ 378 if (nelems == 1) { ··· 393 npages = calc_npages(sglist, nelems); 394 395 spin_lock_irqsave(&iommu->lock, flags); 396 + entry = iommu_range_alloc(dev, iommu, npages, NULL); 397 spin_unlock_irqrestore(&iommu->lock, flags); 398 399 + if (unlikely(entry == DMA_ERROR_CODE)) 400 goto bad; 401 402 + orig_dma_base = dma_base = iommu->page_table_map_base + 403 (entry << IO_PAGE_SHIFT); 404 405 prot = HV_PCI_MAP_ATTR_READ; ··· 449 450 iommu_map_failed: 451 spin_lock_irqsave(&iommu->lock, flags); 452 + iommu_range_free(iommu, orig_dma_base, npages); 453 spin_unlock_irqrestore(&iommu->lock, flags); 454 455 return 0; ··· 481 482 spin_lock_irqsave(&iommu->lock, flags); 483 484 + iommu_range_free(iommu, bus_addr, npages); 485 486 do { 487 unsigned long num;
+1
include/asm-sparc64/iommu.h
··· 26 struct iommu { 27 spinlock_t lock; 28 struct iommu_arena arena; 29 iopte_t *page_table; 30 u32 page_table_map_base; 31 unsigned long iommu_control;
··· 26 struct iommu { 27 spinlock_t lock; 28 struct iommu_arena arena; 29 + void (*flush_all)(struct iommu *); 30 iopte_t *page_table; 31 u32 page_table_map_base; 32 unsigned long iommu_control;