···1/* iommu.c: Generic sparc64 IOMMU support.2 *3- * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)5 */6···10#include <linux/device.h>11#include <linux/dma-mapping.h>12#include <linux/errno.h>01314#ifdef CONFIG_PCI15#include <linux/pci.h>···42 "i" (ASI_PHYS_BYPASS_EC_E))4344/* Must be invoked under the IOMMU lock. */45-static void __iommu_flushall(struct iommu *iommu)46{47 if (iommu->iommu_flushinv) {48 iommu_write(iommu->iommu_flushinv, ~(u64)0);···84 iopte_val(*iopte) = val;85}8687-/* Based largely upon the ppc64 iommu allocator. */88-static long arena_alloc(struct iommu *iommu, unsigned long npages)00000000089{090 struct iommu_arena *arena = &iommu->arena;91- unsigned long n, i, start, end, limit;92- int pass;00000000000009394 limit = arena->limit;95- start = arena->hint;96- pass = 0;9798-again:99- n = find_next_zero_bit(arena->map, limit, start);100- end = n + npages;101- if (unlikely(end >= limit)) {00000000000000000102 if (likely(pass < 1)) {103- limit = start;104 start = 0;105- __iommu_flushall(iommu);0106 pass++;107 goto again;108 } else {109- /* Scanned the whole thing, give up. */110- return -1;111 }112 }113114- for (i = n; i < end; i++) {115- if (test_bit(i, arena->map)) {116- start = i + 1;117- goto again;118- }119- }120-121- for (i = n; i < end; i++)122- __set_bit(i, arena->map);123124 arena->hint = end;0000125126 return n;127}128129-static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)130{131- unsigned long i;0132133- for (i = base; i < (base + npages); i++)134- __clear_bit(i, arena->map);0135}136137int iommu_table_init(struct iommu *iommu, int tsbsize,···193 return -ENOMEM;194 }195 iommu->arena.limit = num_tsb_entries;000196197 /* Allocate and initialize the dummy page which we198 * set inactive IO PTEs to point to.···233 return -ENOMEM;234}235236-static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)0237{238- long entry;239240- entry = arena_alloc(iommu, npages);241- if (unlikely(entry < 0))242 return NULL;243244 return iommu->page_table + entry;245-}246-247-static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)248-{249- arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);250}251252static int iommu_alloc_ctx(struct iommu *iommu)···295 iommu = dev->archdata.iommu;296297 spin_lock_irqsave(&iommu->lock, flags);298- iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);299 spin_unlock_irqrestore(&iommu->lock, flags);300301 if (unlikely(iopte == NULL)) {···333334 spin_lock_irqsave(&iommu->lock, flags);335336- free_npages(iommu, dvma - iommu->page_table_map_base, npages);337338 spin_unlock_irqrestore(&iommu->lock, flags);339···364 npages >>= IO_PAGE_SHIFT;365366 spin_lock_irqsave(&iommu->lock, flags);367- base = alloc_npages(iommu, npages);368 ctx = 0;369 if (iommu->iommu_ctxflush)370 ctx = iommu_alloc_ctx(iommu);···502 for (i = 0; i < npages; i++)503 iopte_make_dummy(iommu, base + i);504505- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);506507 iommu_free_ctx(iommu, ctx);508···540541 spin_lock_irqsave(&iommu->lock, flags);542543- base = alloc_npages(iommu, npages);544 ctx = 0;545 if (iommu->iommu_ctxflush)546 ctx = iommu_alloc_ctx(iommu);···629 for (i = 0; i < npages; i++)630 iopte_make_dummy(iommu, base + i);631632- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);633634 iommu_free_ctx(iommu, ctx);635
···1/* iommu.c: Generic sparc64 IOMMU support.2 *3+ * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)5 */6···10#include <linux/device.h>11#include <linux/dma-mapping.h>12#include <linux/errno.h>13+#include <linux/iommu-helper.h>1415#ifdef CONFIG_PCI16#include <linux/pci.h>···41 "i" (ASI_PHYS_BYPASS_EC_E))4243/* Must be invoked under the IOMMU lock. */44+static void iommu_flushall(struct iommu *iommu)45{46 if (iommu->iommu_flushinv) {47 iommu_write(iommu->iommu_flushinv, ~(u64)0);···83 iopte_val(*iopte) = val;84}8586+/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'87+ * facility it must all be done in one pass while under the iommu lock.88+ *89+ * On sun4u platforms, we only flush the IOMMU once every time we've passed90+ * over the entire page table doing allocations. Therefore we only ever advance91+ * the hint and cannot backtrack it.92+ */93+unsigned long iommu_range_alloc(struct device *dev,94+ struct iommu *iommu,95+ unsigned long npages,96+ unsigned long *handle)97{98+ unsigned long n, end, start, limit, boundary_size;99 struct iommu_arena *arena = &iommu->arena;100+ int pass = 0;101+102+ /* This allocator was derived from x86_64's bit string search */103+104+ /* Sanity check */105+ if (unlikely(npages == 0)) {106+ if (printk_ratelimit())107+ WARN_ON(1);108+ return DMA_ERROR_CODE;109+ }110+111+ if (handle && *handle)112+ start = *handle;113+ else114+ start = arena->hint;115116 limit = arena->limit;00117118+ /* The case below can happen if we have a small segment appended119+ * to a large, or when the previous alloc was at the very end of120+ * the available space. If so, go back to the beginning and flush.121+ */122+ if (start >= limit) {123+ start = 0;124+ if (iommu->flush_all)125+ iommu->flush_all(iommu);126+ }127+128+ again:129+130+ if (dev)131+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,132+ 1 << IO_PAGE_SHIFT);133+ else134+ boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);135+136+ n = iommu_area_alloc(arena->map, limit, start, npages, 0,137+ boundary_size >> IO_PAGE_SHIFT, 0);138+ if (n == -1) {139 if (likely(pass < 1)) {140+ /* First failure, rescan from the beginning. */141 start = 0;142+ if (iommu->flush_all)143+ iommu->flush_all(iommu);144 pass++;145 goto again;146 } else {147+ /* Second failure, give up */148+ return DMA_ERROR_CODE;149 }150 }151152+ end = n + npages;00000000153154 arena->hint = end;155+156+ /* Update handle for SG allocations */157+ if (handle)158+ *handle = end;159160 return n;161}162163+void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)164{165+ struct iommu_arena *arena = &iommu->arena;166+ unsigned long entry;167168+ entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;169+170+ iommu_area_free(arena->map, entry, npages);171}172173int iommu_table_init(struct iommu *iommu, int tsbsize,···155 return -ENOMEM;156 }157 iommu->arena.limit = num_tsb_entries;158+159+ if (tlb_type != hypervisor)160+ iommu->flush_all = iommu_flushall;161162 /* Allocate and initialize the dummy page which we163 * set inactive IO PTEs to point to.···192 return -ENOMEM;193}194195+static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,196+ unsigned long npages)197{198+ unsigned long entry;199200+ entry = iommu_range_alloc(dev, iommu, npages, NULL);201+ if (unlikely(entry == DMA_ERROR_CODE))202 return NULL;203204 return iommu->page_table + entry;00000205}206207static int iommu_alloc_ctx(struct iommu *iommu)···258 iommu = dev->archdata.iommu;259260 spin_lock_irqsave(&iommu->lock, flags);261+ iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);262 spin_unlock_irqrestore(&iommu->lock, flags);263264 if (unlikely(iopte == NULL)) {···296297 spin_lock_irqsave(&iommu->lock, flags);298299+ iommu_range_free(iommu, dvma, npages);300301 spin_unlock_irqrestore(&iommu->lock, flags);302···327 npages >>= IO_PAGE_SHIFT;328329 spin_lock_irqsave(&iommu->lock, flags);330+ base = alloc_npages(dev, iommu, npages);331 ctx = 0;332 if (iommu->iommu_ctxflush)333 ctx = iommu_alloc_ctx(iommu);···465 for (i = 0; i < npages; i++)466 iopte_make_dummy(iommu, base + i);467468+ iommu_range_free(iommu, bus_addr, npages);469470 iommu_free_ctx(iommu, ctx);471···503504 spin_lock_irqsave(&iommu->lock, flags);505506+ base = alloc_npages(dev, iommu, npages);507 ctx = 0;508 if (iommu->iommu_ctxflush)509 ctx = iommu_alloc_ctx(iommu);···592 for (i = 0; i < npages; i++)593 iopte_make_dummy(iommu, base + i);594595+ iommu_range_free(iommu, bus_addr, npages);596597 iommu_free_ctx(iommu, ctx);598