···11/* iommu.c: Generic sparc64 IOMMU support.22 *33- * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)33+ * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)44 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)55 */66···1010#include <linux/device.h>1111#include <linux/dma-mapping.h>1212#include <linux/errno.h>1313+#include <linux/iommu-helper.h>13141415#ifdef CONFIG_PCI1516#include <linux/pci.h>···4241 "i" (ASI_PHYS_BYPASS_EC_E))43424443/* Must be invoked under the IOMMU lock. */4545-static void __iommu_flushall(struct iommu *iommu)4444+static void iommu_flushall(struct iommu *iommu)4645{4746 if (iommu->iommu_flushinv) {4847 iommu_write(iommu->iommu_flushinv, ~(u64)0);···8483 iopte_val(*iopte) = val;8584}86858787-/* Based largely upon the ppc64 iommu allocator. */8888-static long arena_alloc(struct iommu *iommu, unsigned long npages)8686+/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'8787+ * facility it must all be done in one pass while under the iommu lock.8888+ *8989+ * On sun4u platforms, we only flush the IOMMU once every time we've passed9090+ * over the entire page table doing allocations. Therefore we only ever advance9191+ * the hint and cannot backtrack it.9292+ */9393+unsigned long iommu_range_alloc(struct device *dev,9494+ struct iommu *iommu,9595+ unsigned long npages,9696+ unsigned long *handle)8997{9898+ unsigned long n, end, start, limit, boundary_size;9099 struct iommu_arena *arena = &iommu->arena;9191- unsigned long n, i, start, end, limit;9292- int pass;100100+ int pass = 0;101101+102102+ /* This allocator was derived from x86_64's bit string search */103103+104104+ /* Sanity check */105105+ if (unlikely(npages == 0)) {106106+ if (printk_ratelimit())107107+ WARN_ON(1);108108+ return DMA_ERROR_CODE;109109+ }110110+111111+ if (handle && *handle)112112+ start = *handle;113113+ else114114+ start = arena->hint;9311594116 limit = arena->limit;9595- start = arena->hint;9696- pass = 0;971179898-again:9999- n = find_next_zero_bit(arena->map, limit, start);100100- end = n + npages;101101- if (unlikely(end >= limit)) {118118+ /* The case below can happen if we have a small segment appended119119+ * to a large, or when the previous alloc was at the very end of120120+ * the available space. If so, go back to the beginning and flush.121121+ */122122+ if (start >= limit) {123123+ start = 0;124124+ if (iommu->flush_all)125125+ iommu->flush_all(iommu);126126+ }127127+128128+ again:129129+130130+ if (dev)131131+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,132132+ 1 << IO_PAGE_SHIFT);133133+ else134134+ boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);135135+136136+ n = iommu_area_alloc(arena->map, limit, start, npages, 0,137137+ boundary_size >> IO_PAGE_SHIFT, 0);138138+ if (n == -1) {102139 if (likely(pass < 1)) {103103- limit = start;140140+ /* First failure, rescan from the beginning. */104141 start = 0;105105- __iommu_flushall(iommu);142142+ if (iommu->flush_all)143143+ iommu->flush_all(iommu);106144 pass++;107145 goto again;108146 } else {109109- /* Scanned the whole thing, give up. */110110- return -1;147147+ /* Second failure, give up */148148+ return DMA_ERROR_CODE;111149 }112150 }113151114114- for (i = n; i < end; i++) {115115- if (test_bit(i, arena->map)) {116116- start = i + 1;117117- goto again;118118- }119119- }120120-121121- for (i = n; i < end; i++)122122- __set_bit(i, arena->map);152152+ end = n + npages;123153124154 arena->hint = end;155155+156156+ /* Update handle for SG allocations */157157+ if (handle)158158+ *handle = end;125159126160 return n;127161}128162129129-static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)163163+void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)130164{131131- unsigned long i;165165+ struct iommu_arena *arena = &iommu->arena;166166+ unsigned long entry;132167133133- for (i = base; i < (base + npages); i++)134134- __clear_bit(i, arena->map);168168+ entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;169169+170170+ iommu_area_free(arena->map, entry, npages);135171}136172137173int iommu_table_init(struct iommu *iommu, int tsbsize,···193155 return -ENOMEM;194156 }195157 iommu->arena.limit = num_tsb_entries;158158+159159+ if (tlb_type != hypervisor)160160+ iommu->flush_all = iommu_flushall;196161197162 /* Allocate and initialize the dummy page which we198163 * set inactive IO PTEs to point to.···233192 return -ENOMEM;234193}235194236236-static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)195195+static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,196196+ unsigned long npages)237197{238238- long entry;198198+ unsigned long entry;239199240240- entry = arena_alloc(iommu, npages);241241- if (unlikely(entry < 0))200200+ entry = iommu_range_alloc(dev, iommu, npages, NULL);201201+ if (unlikely(entry == DMA_ERROR_CODE))242202 return NULL;243203244204 return iommu->page_table + entry;245245-}246246-247247-static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)248248-{249249- arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);250205}251206252207static int iommu_alloc_ctx(struct iommu *iommu)···295258 iommu = dev->archdata.iommu;296259297260 spin_lock_irqsave(&iommu->lock, flags);298298- iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);261261+ iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);299262 spin_unlock_irqrestore(&iommu->lock, flags);300263301264 if (unlikely(iopte == NULL)) {···333296334297 spin_lock_irqsave(&iommu->lock, flags);335298336336- free_npages(iommu, dvma - iommu->page_table_map_base, npages);299299+ iommu_range_free(iommu, dvma, npages);337300338301 spin_unlock_irqrestore(&iommu->lock, flags);339302···364327 npages >>= IO_PAGE_SHIFT;365328366329 spin_lock_irqsave(&iommu->lock, flags);367367- base = alloc_npages(iommu, npages);330330+ base = alloc_npages(dev, iommu, npages);368331 ctx = 0;369332 if (iommu->iommu_ctxflush)370333 ctx = iommu_alloc_ctx(iommu);···502465 for (i = 0; i < npages; i++)503466 iopte_make_dummy(iommu, base + i);504467505505- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);468468+ iommu_range_free(iommu, bus_addr, npages);506469507470 iommu_free_ctx(iommu, ctx);508471···540503541504 spin_lock_irqsave(&iommu->lock, flags);542505543543- base = alloc_npages(iommu, npages);506506+ base = alloc_npages(dev, iommu, npages);544507 ctx = 0;545508 if (iommu->iommu_ctxflush)546509 ctx = iommu_alloc_ctx(iommu);···629592 for (i = 0; i < npages; i++)630593 iopte_make_dummy(iommu, base + i);631594632632- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);595595+ iommu_range_free(iommu, bus_addr, npages);633596634597 iommu_free_ctx(iommu, ctx);635598
+8
arch/sparc64/kernel/iommu_common.h
···5858 return npages;5959}60606161+extern unsigned long iommu_range_alloc(struct device *dev,6262+ struct iommu *iommu,6363+ unsigned long npages,6464+ unsigned long *handle);6565+extern void iommu_range_free(struct iommu *iommu,6666+ dma_addr_t dma_addr,6767+ unsigned long npages);6868+6169#endif /* _IOMMU_COMMON_H */
+18-66
arch/sparc64/kernel/pci_sun4v.c
···11/* pci_sun4v.c: SUN4V specific PCI controller support.22 *33- * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)33+ * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)44 */5566#include <linux/kernel.h>···113113 return iommu_batch_flush(p);114114}115115116116-static long arena_alloc(struct iommu_arena *arena, unsigned long npages)117117-{118118- unsigned long n, i, start, end, limit;119119- int pass;120120-121121- limit = arena->limit;122122- start = arena->hint;123123- pass = 0;124124-125125-again:126126- n = find_next_zero_bit(arena->map, limit, start);127127- end = n + npages;128128- if (unlikely(end >= limit)) {129129- if (likely(pass < 1)) {130130- limit = start;131131- start = 0;132132- pass++;133133- goto again;134134- } else {135135- /* Scanned the whole thing, give up. */136136- return -1;137137- }138138- }139139-140140- for (i = n; i < end; i++) {141141- if (test_bit(i, arena->map)) {142142- start = i + 1;143143- goto again;144144- }145145- }146146-147147- for (i = n; i < end; i++)148148- __set_bit(i, arena->map);149149-150150- arena->hint = end;151151-152152- return n;153153-}154154-155155-static void arena_free(struct iommu_arena *arena, unsigned long base,156156- unsigned long npages)157157-{158158- unsigned long i;159159-160160- for (i = base; i < (base + npages); i++)161161- __clear_bit(i, arena->map);162162-}163163-164116static void *dma_4v_alloc_coherent(struct device *dev, size_t size,165117 dma_addr_t *dma_addrp, gfp_t gfp)166118{···137185 iommu = dev->archdata.iommu;138186139187 spin_lock_irqsave(&iommu->lock, flags);140140- entry = arena_alloc(&iommu->arena, npages);188188+ entry = iommu_range_alloc(dev, iommu, npages, NULL);141189 spin_unlock_irqrestore(&iommu->lock, flags);142190143143- if (unlikely(entry < 0L))144144- goto arena_alloc_fail;191191+ if (unlikely(entry == DMA_ERROR_CODE))192192+ goto range_alloc_fail;145193146194 *dma_addrp = (iommu->page_table_map_base +147195 (entry << IO_PAGE_SHIFT));···171219iommu_map_fail:172220 /* Interrupts are disabled. */173221 spin_lock(&iommu->lock);174174- arena_free(&iommu->arena, entry, npages);222222+ iommu_range_free(iommu, *dma_addrp, npages);175223 spin_unlock_irqrestore(&iommu->lock, flags);176224177177-arena_alloc_fail:225225+range_alloc_fail:178226 free_pages(first_page, order);179227 return NULL;180228}···195243196244 spin_lock_irqsave(&iommu->lock, flags);197245198198- arena_free(&iommu->arena, entry, npages);246246+ iommu_range_free(iommu, dvma, npages);199247200248 do {201249 unsigned long num;···233281 npages >>= IO_PAGE_SHIFT;234282235283 spin_lock_irqsave(&iommu->lock, flags);236236- entry = arena_alloc(&iommu->arena, npages);284284+ entry = iommu_range_alloc(dev, iommu, npages, NULL);237285 spin_unlock_irqrestore(&iommu->lock, flags);238286239239- if (unlikely(entry < 0L))287287+ if (unlikely(entry == DMA_ERROR_CODE))240288 goto bad;241289242290 bus_addr = (iommu->page_table_map_base +···271319iommu_map_fail:272320 /* Interrupts are disabled. */273321 spin_lock(&iommu->lock);274274- arena_free(&iommu->arena, entry, npages);322322+ iommu_range_free(iommu, bus_addr, npages);275323 spin_unlock_irqrestore(&iommu->lock, flags);276324277325 return DMA_ERROR_CODE;···302350303351 spin_lock_irqsave(&iommu->lock, flags);304352305305- entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;306306- arena_free(&iommu->arena, entry, npages);353353+ iommu_range_free(iommu, bus_addr, npages);307354355355+ entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;308356 do {309357 unsigned long num;310358···321369 int nelems, enum dma_data_direction direction)322370{323371 unsigned long flags, npages, i, prot;372372+ u32 dma_base, orig_dma_base;324373 struct scatterlist *sg;325374 struct iommu *iommu;326375 long entry, err;327327- u32 dma_base;328376329377 /* Fast path single entry scatterlists. */330378 if (nelems == 1) {···345393 npages = calc_npages(sglist, nelems);346394347395 spin_lock_irqsave(&iommu->lock, flags);348348- entry = arena_alloc(&iommu->arena, npages);396396+ entry = iommu_range_alloc(dev, iommu, npages, NULL);349397 spin_unlock_irqrestore(&iommu->lock, flags);350398351351- if (unlikely(entry < 0L))399399+ if (unlikely(entry == DMA_ERROR_CODE))352400 goto bad;353401354354- dma_base = iommu->page_table_map_base +402402+ orig_dma_base = dma_base = iommu->page_table_map_base +355403 (entry << IO_PAGE_SHIFT);356404357405 prot = HV_PCI_MAP_ATTR_READ;···401449402450iommu_map_failed:403451 spin_lock_irqsave(&iommu->lock, flags);404404- arena_free(&iommu->arena, entry, npages);452452+ iommu_range_free(iommu, orig_dma_base, npages);405453 spin_unlock_irqrestore(&iommu->lock, flags);406454407455 return 0;···433481434482 spin_lock_irqsave(&iommu->lock, flags);435483436436- arena_free(&iommu->arena, entry, npages);484484+ iommu_range_free(iommu, bus_addr, npages);437485438486 do {439487 unsigned long num;