···1515 to associate with its master device. See:1616 Documentation/devicetree/bindings/iommu/iommu.txt17171818+Optional properties:1919+- rockchip,disable-mmu-reset : Don't use the mmu reset operation.2020+ Some mmu instances may produce unexpected results2121+ when the reset operation is used.2222+1823Example:19242025 vopl_mmu: iommu@ff940300 {
···776776777777 zpci_exit_slot(zdev);778778 zpci_cleanup_bus_resources(zdev);779779+ zpci_destroy_iommu(zdev);779780 zpci_free_domain(zdev);780781781782 spin_lock(&zpci_list_lock);···849848 if (rc)850849 goto out;851850851851+ rc = zpci_init_iommu(zdev);852852+ if (rc)853853+ goto out_free;854854+852855 mutex_init(&zdev->lock);853856 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {854857 rc = zpci_enable_device(zdev);855858 if (rc)856856- goto out_free;859859+ goto out_destroy_iommu;857860 }858861 rc = zpci_scan_bus(zdev);859862 if (rc)···874869out_disable:875870 if (zdev->state == ZPCI_FN_STATE_ONLINE)876871 zpci_disable_device(zdev);872872+out_destroy_iommu:873873+ zpci_destroy_iommu(zdev);877874out_free:878875 zpci_free_domain(zdev);879876out:
+13
drivers/iommu/Kconfig
···76767777config FSL_PAMU7878 bool "Freescale IOMMU support"7979+ depends on PCI8080+ depends on PHYS_64BIT7981 depends on PPC_E500MC || (COMPILE_TEST && PPC)8082 select IOMMU_API8183 select GENERIC_ALLOCATOR···255253config EXYNOS_IOMMU256254 bool "Exynos IOMMU Support"257255 depends on ARCH_EXYNOS && MMU256256+ depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes258257 select IOMMU_API259258 select ARM_DMA_USE_IOMMU260259 help···369366 DMA memory accesses for the multimedia subsystem.370367371368 if unsure, say N here.369369+370370+config QCOM_IOMMU371371+ # Note: iommu drivers cannot (yet?) be built as modules372372+ bool "Qualcomm IOMMU Support"373373+ depends on ARCH_QCOM || COMPILE_TEST374374+ select IOMMU_API375375+ select IOMMU_IO_PGTABLE_LPAE376376+ select ARM_DMA_USE_IOMMU377377+ help378378+ Support for IOMMU on certain Qualcomm SoCs.372379373380endif # IOMMU_SUPPORT
···103103static const struct dma_map_ops amd_iommu_dma_ops;104104105105/*106106- * This struct contains device specific data for the IOMMU107107- */108108-struct iommu_dev_data {109109- struct list_head list; /* For domain->dev_list */110110- struct list_head dev_data_list; /* For global dev_data_list */111111- struct protection_domain *domain; /* Domain the device is bound to */112112- u16 devid; /* PCI Device ID */113113- u16 alias; /* Alias Device ID */114114- bool iommu_v2; /* Device can make use of IOMMUv2 */115115- bool passthrough; /* Device is identity mapped */116116- struct {117117- bool enabled;118118- int qdep;119119- } ats; /* ATS state */120120- bool pri_tlp; /* PASID TLB required for121121- PPR completions */122122- u32 errata; /* Bitmap for errata to apply */123123- bool use_vapic; /* Enable device to use vapic mode */124124-125125- struct ratelimit_state rs; /* Ratelimit IOPF messages */126126-};127127-128128-/*129106 * general struct to manage commands send to an IOMMU130107 */131108struct iommu_cmd {···114137static void update_domain(struct protection_domain *domain);115138static int protection_domain_init(struct protection_domain *domain);116139static void detach_device(struct device *dev);117117-118118-#define FLUSH_QUEUE_SIZE 256119119-120120-struct flush_queue_entry {121121- unsigned long iova_pfn;122122- unsigned long pages;123123- u64 counter; /* Flush counter when this entry was added to the queue */124124-};125125-126126-struct flush_queue {127127- struct flush_queue_entry *entries;128128- unsigned head, tail;129129- spinlock_t lock;130130-};140140+static void iova_domain_flush_tlb(struct iova_domain *iovad);131141132142/*133143 * Data container for a dma_ops specific protection domain···125161126162 /* IOVA RB-Tree */127163 struct iova_domain iovad;128128-129129- struct flush_queue __percpu *flush_queue;130130-131131- /*132132- * We need two counter here to be race-free wrt. IOTLB flushing and133133- * adding entries to the flush queue.134134- *135135- * The flush_start_cnt is incremented _before_ the IOTLB flush starts.136136- * New entries added to the flush ring-buffer get their 'counter' value137137- * from here. This way we can make sure that entries added to the queue138138- * (or other per-cpu queues of the same domain) while the TLB is about139139- * to be flushed are not considered to be flushed already.140140- */141141- atomic64_t flush_start_cnt;142142-143143- /*144144- * The flush_finish_cnt is incremented when an IOTLB flush is complete.145145- * This value is always smaller than flush_start_cnt. The queue_add146146- * function frees all IOVAs that have a counter value smaller than147147- * flush_finish_cnt. This makes sure that we only free IOVAs that are148148- * flushed out of the IOTLB of the domain.149149- */150150- atomic64_t flush_finish_cnt;151151-152152- /*153153- * Timer to make sure we don't keep IOVAs around unflushed154154- * for too long155155- */156156- struct timer_list flush_timer;157157- atomic_t flush_timer_on;158164};159165160166static struct iova_domain reserved_iova_ranges;···305371static struct iommu_dev_data *find_dev_data(u16 devid)306372{307373 struct iommu_dev_data *dev_data;374374+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];308375309376 dev_data = search_dev_data(devid);310377311311- if (dev_data == NULL)378378+ if (dev_data == NULL) {312379 dev_data = alloc_dev_data(devid);380380+381381+ if (translation_pre_enabled(iommu))382382+ dev_data->defer_attach = true;383383+ }313384314385 return dev_data;315386}316387317317-static struct iommu_dev_data *get_dev_data(struct device *dev)388388+struct iommu_dev_data *get_dev_data(struct device *dev)318389{319390 return dev->archdata.iommu;320391}392392+EXPORT_SYMBOL(get_dev_data);321393322394/*323395* Find or create an IOMMU group for a acpihid device.···11051165 return iommu_queue_command(iommu, &cmd);11061166}1107116711081108-static void iommu_flush_dte_all(struct amd_iommu *iommu)11681168+static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)11091169{11101170 u32 devid;11111171···11191179 * This function uses heavy locking and may disable irqs for some time. But11201180 * this is no issue because it is only called during resume.11211181 */11221122-static void iommu_flush_tlb_all(struct amd_iommu *iommu)11821182+static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)11231183{11241184 u32 dom_id;11251185···11331193 iommu_completion_wait(iommu);11341194}1135119511361136-static void iommu_flush_all(struct amd_iommu *iommu)11961196+static void amd_iommu_flush_all(struct amd_iommu *iommu)11371197{11381198 struct iommu_cmd cmd;11391199···11521212 iommu_queue_command(iommu, &cmd);11531213}1154121411551155-static void iommu_flush_irt_all(struct amd_iommu *iommu)12151215+static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)11561216{11571217 u32 devid;11581218···11651225void iommu_flush_all_caches(struct amd_iommu *iommu)11661226{11671227 if (iommu_feature(iommu, FEATURE_IA)) {11681168- iommu_flush_all(iommu);12281228+ amd_iommu_flush_all(iommu);11691229 } else {11701170- iommu_flush_dte_all(iommu);11711171- iommu_flush_irt_all(iommu);11721172- iommu_flush_tlb_all(iommu);12301230+ amd_iommu_flush_dte_all(iommu);12311231+ amd_iommu_flush_irt_all(iommu);12321232+ amd_iommu_flush_tlb_all(iommu);11731233 }11741234}11751235···1477153714781538 if (count > 1) {14791539 __pte = PAGE_SIZE_PTE(phys_addr, page_size);14801480- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;15401540+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;14811541 } else14821482- __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;15421542+ __pte = phys_addr | IOMMU_PTE_PR | IOMMU_PTE_FC;1483154314841544 if (prot & IOMMU_PROT_IR)14851545 __pte |= IOMMU_PTE_IR;···17281788 free_page((unsigned long)domain->gcr3_tbl);17291789}1730179017311731-static void dma_ops_domain_free_flush_queue(struct dma_ops_domain *dom)17321732-{17331733- int cpu;17341734-17351735- for_each_possible_cpu(cpu) {17361736- struct flush_queue *queue;17371737-17381738- queue = per_cpu_ptr(dom->flush_queue, cpu);17391739- kfree(queue->entries);17401740- }17411741-17421742- free_percpu(dom->flush_queue);17431743-17441744- dom->flush_queue = NULL;17451745-}17461746-17471747-static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)17481748-{17491749- int cpu;17501750-17511751- atomic64_set(&dom->flush_start_cnt, 0);17521752- atomic64_set(&dom->flush_finish_cnt, 0);17531753-17541754- dom->flush_queue = alloc_percpu(struct flush_queue);17551755- if (!dom->flush_queue)17561756- return -ENOMEM;17571757-17581758- /* First make sure everything is cleared */17591759- for_each_possible_cpu(cpu) {17601760- struct flush_queue *queue;17611761-17621762- queue = per_cpu_ptr(dom->flush_queue, cpu);17631763- queue->head = 0;17641764- queue->tail = 0;17651765- queue->entries = NULL;17661766- }17671767-17681768- /* Now start doing the allocation */17691769- for_each_possible_cpu(cpu) {17701770- struct flush_queue *queue;17711771-17721772- queue = per_cpu_ptr(dom->flush_queue, cpu);17731773- queue->entries = kzalloc(FLUSH_QUEUE_SIZE * sizeof(*queue->entries),17741774- GFP_KERNEL);17751775- if (!queue->entries) {17761776- dma_ops_domain_free_flush_queue(dom);17771777- return -ENOMEM;17781778- }17791779-17801780- spin_lock_init(&queue->lock);17811781- }17821782-17831783- return 0;17841784-}17851785-17861791static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)17871792{17881788- atomic64_inc(&dom->flush_start_cnt);17891793 domain_flush_tlb(&dom->domain);17901794 domain_flush_complete(&dom->domain);17911791- atomic64_inc(&dom->flush_finish_cnt);17921795}1793179617941794-static inline bool queue_ring_full(struct flush_queue *queue)17971797+static void iova_domain_flush_tlb(struct iova_domain *iovad)17951798{17961796- assert_spin_locked(&queue->lock);17991799+ struct dma_ops_domain *dom;1797180017981798- return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);17991799-}18001800-18011801-#define queue_ring_for_each(i, q) \18021802- for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)18031803-18041804-static inline unsigned queue_ring_add(struct flush_queue *queue)18051805-{18061806- unsigned idx = queue->tail;18071807-18081808- assert_spin_locked(&queue->lock);18091809- queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;18101810-18111811- return idx;18121812-}18131813-18141814-static inline void queue_ring_remove_head(struct flush_queue *queue)18151815-{18161816- assert_spin_locked(&queue->lock);18171817- queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE;18181818-}18191819-18201820-static void queue_ring_free_flushed(struct dma_ops_domain *dom,18211821- struct flush_queue *queue)18221822-{18231823- u64 counter = atomic64_read(&dom->flush_finish_cnt);18241824- int idx;18251825-18261826- queue_ring_for_each(idx, queue) {18271827- /*18281828- * This assumes that counter values in the ring-buffer are18291829- * monotonously rising.18301830- */18311831- if (queue->entries[idx].counter >= counter)18321832- break;18331833-18341834- free_iova_fast(&dom->iovad,18351835- queue->entries[idx].iova_pfn,18361836- queue->entries[idx].pages);18371837-18381838- queue_ring_remove_head(queue);18391839- }18401840-}18411841-18421842-static void queue_add(struct dma_ops_domain *dom,18431843- unsigned long address, unsigned long pages)18441844-{18451845- struct flush_queue *queue;18461846- unsigned long flags;18471847- int idx;18481848-18491849- pages = __roundup_pow_of_two(pages);18501850- address >>= PAGE_SHIFT;18511851-18521852- queue = get_cpu_ptr(dom->flush_queue);18531853- spin_lock_irqsave(&queue->lock, flags);18541854-18551855- /*18561856- * First remove the enries from the ring-buffer that are already18571857- * flushed to make the below queue_ring_full() check less likely18581858- */18591859- queue_ring_free_flushed(dom, queue);18601860-18611861- /*18621862- * When ring-queue is full, flush the entries from the IOTLB so18631863- * that we can free all entries with queue_ring_free_flushed()18641864- * below.18651865- */18661866- if (queue_ring_full(queue)) {18671867- dma_ops_domain_flush_tlb(dom);18681868- queue_ring_free_flushed(dom, queue);18691869- }18701870-18711871- idx = queue_ring_add(queue);18721872-18731873- queue->entries[idx].iova_pfn = address;18741874- queue->entries[idx].pages = pages;18751875- queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt);18761876-18771877- spin_unlock_irqrestore(&queue->lock, flags);18781878-18791879- if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0)18801880- mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10));18811881-18821882- put_cpu_ptr(dom->flush_queue);18831883-}18841884-18851885-static void queue_flush_timeout(unsigned long data)18861886-{18871887- struct dma_ops_domain *dom = (struct dma_ops_domain *)data;18881888- int cpu;18891889-18901890- atomic_set(&dom->flush_timer_on, 0);18011801+ dom = container_of(iovad, struct dma_ops_domain, iovad);1891180218921803 dma_ops_domain_flush_tlb(dom);18931893-18941894- for_each_possible_cpu(cpu) {18951895- struct flush_queue *queue;18961896- unsigned long flags;18971897-18981898- queue = per_cpu_ptr(dom->flush_queue, cpu);18991899- spin_lock_irqsave(&queue->lock, flags);19001900- queue_ring_free_flushed(dom, queue);19011901- spin_unlock_irqrestore(&queue->lock, flags);19021902- }19031804}1904180519051806/*···17531972 return;1754197317551974 del_domain_from_list(&dom->domain);17561756-17571757- if (timer_pending(&dom->flush_timer))17581758- del_timer(&dom->flush_timer);17591759-17601760- dma_ops_domain_free_flush_queue(dom);1761197517621976 put_iova_domain(&dom->iovad);17631977···17892013 init_iova_domain(&dma_dom->iovad, PAGE_SIZE,17902014 IOVA_START_PFN, DMA_32BIT_PFN);1791201517921792- /* Initialize reserved ranges */17931793- copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);17941794-17951795- if (dma_ops_domain_alloc_flush_queue(dma_dom))20162016+ if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))17962017 goto free_dma_dom;1797201817981798- setup_timer(&dma_dom->flush_timer, queue_flush_timeout,17991799- (unsigned long)dma_dom);18001800-18011801- atomic_set(&dma_dom->flush_timer_on, 0);20192019+ /* Initialize reserved ranges */20202020+ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);1802202118032022 add_domain_to_list(&dma_dom->domain);18042023···1824205318252054 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)18262055 << DEV_ENTRY_MODE_SHIFT;18271827- pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;20562056+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;1828205718292058 flags = amd_iommu_dev_table[devid].data[1];18302059···18572086 flags |= tmp;18582087 }1859208818601860-18611861- flags &= ~(DTE_FLAG_SA | 0xffffULL);20892089+ flags &= ~DEV_DOMID_MASK;18622090 flags |= domain->id;1863209118642092 amd_iommu_dev_table[devid].data[1] = flags;···18672097static void clear_dte_entry(u16 devid)18682098{18692099 /* remove entry from the device table seen by the hardware */18701870- amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;21002100+ amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;18712101 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;1872210218732103 amd_iommu_apply_erratum_63(devid);···22482478static struct protection_domain *get_domain(struct device *dev)22492479{22502480 struct protection_domain *domain;24812481+ struct iommu_domain *io_domain;2251248222522483 if (!check_device(dev))22532484 return ERR_PTR(-EINVAL);2254248522552486 domain = get_dev_data(dev)->domain;24872487+ if (domain == NULL && get_dev_data(dev)->defer_attach) {24882488+ get_dev_data(dev)->defer_attach = false;24892489+ io_domain = iommu_get_domain_for_dev(dev);24902490+ domain = to_pdomain(io_domain);24912491+ attach_device(dev, domain);24922492+ }24932493+ if (domain == NULL)24942494+ return ERR_PTR(-EBUSY);24952495+22562496 if (!dma_ops_domain(domain))22572497 return ERR_PTR(-EBUSY);22582498···23082528 else23092529 return 0;23102530}25312531+23112532/*23122533 * This function contains common code for mapping of a physically23132534 * contiguous memory region into DMA address space. It is used by all···24002619 domain_flush_tlb(&dma_dom->domain);24012620 domain_flush_complete(&dma_dom->domain);24022621 } else {24032403- queue_add(dma_dom, dma_addr, pages);26222622+ pages = __roundup_pow_of_two(pages);26232623+ queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);24042624 }24052625}24062626···31553373 WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);31563374}3157337533763376+static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,33773377+ struct device *dev)33783378+{33793379+ struct iommu_dev_data *dev_data = dev->archdata.iommu;33803380+ return dev_data->defer_attach;33813381+}33823382+31583383const struct iommu_ops amd_iommu_ops = {31593384 .capable = amd_iommu_capable,31603385 .domain_alloc = amd_iommu_domain_alloc,···31783389 .get_resv_regions = amd_iommu_get_resv_regions,31793390 .put_resv_regions = amd_iommu_put_resv_regions,31803391 .apply_resv_region = amd_iommu_apply_resv_region,33923392+ .is_attach_deferred = amd_iommu_is_attach_deferred,31813393 .pgsize_bitmap = AMD_IOMMU_PGSIZES,31823394};31833395···35663776 *****************************************************************************/3567377735683778static struct irq_chip amd_ir_chip;35693569-35703570-#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)35713571-#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)35723572-#define DTE_IRQ_TABLE_LEN (8ULL << 1)35733573-#define DTE_IRQ_REMAP_ENABLE 1ULL3574377935753780static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)35763781{
+199-24
drivers/iommu/amd_iommu_init.c
···2929#include <linux/export.h>3030#include <linux/iommu.h>3131#include <linux/kmemleak.h>3232-#include <linux/crash_dump.h>3332#include <asm/pci-direct.h>3433#include <asm/iommu.h>3534#include <asm/gart.h>···3738#include <asm/io_apic.h>3839#include <asm/irq_remapping.h>39404141+#include <linux/crash_dump.h>4042#include "amd_iommu_proto.h"4143#include "amd_iommu_types.h"4244#include "irq_remapping.h"···196196 * page table root pointer.197197 */198198struct dev_table_entry *amd_iommu_dev_table;199199+/*200200+ * Pointer to a device table which the content of old device table201201+ * will be copied to. It's only be used in kdump kernel.202202+ */203203+static struct dev_table_entry *old_dev_tbl_cpy;199204200205/*201206 * The alias table is a driver specific data structure which contains the···214209 * for a specific device. It is also indexed by the PCI device id.215210 */216211struct amd_iommu **amd_iommu_rlookup_table;212212+EXPORT_SYMBOL(amd_iommu_rlookup_table);217213218214/*219215 * This table is used to find the irq remapping table for a given device id···263257static int amd_iommu_enable_interrupts(void);264258static int __init iommu_go_to_state(enum iommu_init_state state);265259static void init_device_table_dma(void);260260+261261+static bool amd_iommu_pre_enabled = true;262262+263263+bool translation_pre_enabled(struct amd_iommu *iommu)264264+{265265+ return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);266266+}267267+EXPORT_SYMBOL(translation_pre_enabled);268268+269269+static void clear_translation_pre_enabled(struct amd_iommu *iommu)270270+{271271+ iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;272272+}273273+274274+static void init_translation_status(struct amd_iommu *iommu)275275+{276276+ u32 ctrl;277277+278278+ ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);279279+ if (ctrl & (1<<CONTROL_IOMMU_EN))280280+ iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;281281+}266282267283static inline void update_last_devid(u16 devid)268284{···643615 amd_iommu_reset_cmd_buffer(iommu);644616}645617618618+/*619619+ * This function disables the command buffer620620+ */621621+static void iommu_disable_command_buffer(struct amd_iommu *iommu)622622+{623623+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);624624+}625625+646626static void __init free_command_buffer(struct amd_iommu *iommu)647627{648628 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));···681645 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);682646683647 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);648648+}649649+650650+/*651651+ * This function disables the event log buffer652652+ */653653+static void iommu_disable_event_buffer(struct amd_iommu *iommu)654654+{655655+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);684656}685657686658static void __init free_event_buffer(struct amd_iommu *iommu)···851807 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;852808}853809810810+811811+static bool copy_device_table(void)812812+{813813+ u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;814814+ struct dev_table_entry *old_devtb = NULL;815815+ u32 lo, hi, devid, old_devtb_size;816816+ phys_addr_t old_devtb_phys;817817+ struct amd_iommu *iommu;818818+ u16 dom_id, dte_v, irq_v;819819+ gfp_t gfp_flag;820820+ u64 tmp;821821+822822+ if (!amd_iommu_pre_enabled)823823+ return false;824824+825825+ pr_warn("Translation is already enabled - trying to copy translation structures\n");826826+ for_each_iommu(iommu) {827827+ /* All IOMMUs should use the same device table with the same size */828828+ lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);829829+ hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);830830+ entry = (((u64) hi) << 32) + lo;831831+ if (last_entry && last_entry != entry) {832832+ pr_err("IOMMU:%d should use the same dev table as others!/n",833833+ iommu->index);834834+ return false;835835+ }836836+ last_entry = entry;837837+838838+ old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;839839+ if (old_devtb_size != dev_table_size) {840840+ pr_err("The device table size of IOMMU:%d is not expected!/n",841841+ iommu->index);842842+ return false;843843+ }844844+ }845845+846846+ old_devtb_phys = entry & PAGE_MASK;847847+ if (old_devtb_phys >= 0x100000000ULL) {848848+ pr_err("The address of old device table is above 4G, not trustworthy!/n");849849+ return false;850850+ }851851+ old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);852852+ if (!old_devtb)853853+ return false;854854+855855+ gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;856856+ old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,857857+ get_order(dev_table_size));858858+ if (old_dev_tbl_cpy == NULL) {859859+ pr_err("Failed to allocate memory for copying old device table!/n");860860+ return false;861861+ }862862+863863+ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {864864+ old_dev_tbl_cpy[devid] = old_devtb[devid];865865+ dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;866866+ dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;867867+868868+ if (dte_v && dom_id) {869869+ old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];870870+ old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];871871+ __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);872872+ /* If gcr3 table existed, mask it out */873873+ if (old_devtb[devid].data[0] & DTE_FLAG_GV) {874874+ tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;875875+ tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;876876+ old_dev_tbl_cpy[devid].data[1] &= ~tmp;877877+ tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;878878+ tmp |= DTE_FLAG_GV;879879+ old_dev_tbl_cpy[devid].data[0] &= ~tmp;880880+ }881881+ }882882+883883+ irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;884884+ int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;885885+ int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;886886+ if (irq_v && (int_ctl || int_tab_len)) {887887+ if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||888888+ (int_tab_len != DTE_IRQ_TABLE_LEN)) {889889+ pr_err("Wrong old irq remapping flag: %#x\n", devid);890890+ return false;891891+ }892892+893893+ old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];894894+ }895895+ }896896+ memunmap(old_devtb);897897+898898+ return true;899899+}854900855901void amd_iommu_apply_erratum_63(u16 devid)856902{···1533139915341400 iommu->int_enabled = false;1535140114021402+ init_translation_status(iommu);14031403+ if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {14041404+ iommu_disable(iommu);14051405+ clear_translation_pre_enabled(iommu);14061406+ pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",14071407+ iommu->index);14081408+ }14091409+ if (amd_iommu_pre_enabled)14101410+ amd_iommu_pre_enabled = translation_pre_enabled(iommu);14111411+15361412 ret = init_iommu_from_acpi(iommu, h);15371413 if (ret)15381414 return ret;···20361892}2037189320381894/*20392039- * Init the device table to not allow DMA access for devices and20402040- * suppress all page faults18951895+ * Init the device table to not allow DMA access for devices20411896 */20421897static void init_device_table_dma(void)20431898{···20451902 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {20461903 set_dev_entry_bit(devid, DEV_ENTRY_VALID);20471904 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);20482048- /*20492049- * In kdump kernels in-flight DMA from the old kernel might20502050- * cause IO_PAGE_FAULTs. There are no reports that a kdump20512051- * actually failed because of that, so just disable fault20522052- * reporting in the hardware to get rid of the messages20532053- */20542054- if (is_kdump_kernel())20552055- set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);20561905 }20571906}20581907···21572022#endif21582023}2159202420252025+static void early_enable_iommu(struct amd_iommu *iommu)20262026+{20272027+ iommu_disable(iommu);20282028+ iommu_init_flags(iommu);20292029+ iommu_set_device_table(iommu);20302030+ iommu_enable_command_buffer(iommu);20312031+ iommu_enable_event_buffer(iommu);20322032+ iommu_set_exclusion_range(iommu);20332033+ iommu_enable_ga(iommu);20342034+ iommu_enable(iommu);20352035+ iommu_flush_all_caches(iommu);20362036+}20372037+21602038/*21612039 * This function finally enables all IOMMUs found in the system after21622162- * they have been initialized20402040+ * they have been initialized.20412041+ *20422042+ * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy20432043+ * the old content of device table entries. Not this case or copy failed,20442044+ * just continue as normal kernel does.21632045 */21642046static void early_enable_iommus(void)21652047{21662048 struct amd_iommu *iommu;2167204921682168- for_each_iommu(iommu) {21692169- iommu_disable(iommu);21702170- iommu_init_flags(iommu);21712171- iommu_set_device_table(iommu);21722172- iommu_enable_command_buffer(iommu);21732173- iommu_enable_event_buffer(iommu);21742174- iommu_set_exclusion_range(iommu);21752175- iommu_enable_ga(iommu);21762176- iommu_enable(iommu);21772177- iommu_flush_all_caches(iommu);20502050+20512051+ if (!copy_device_table()) {20522052+ /*20532053+ * If come here because of failure in copying device table from old20542054+ * kernel with all IOMMUs enabled, print error message and try to20552055+ * free allocated old_dev_tbl_cpy.20562056+ */20572057+ if (amd_iommu_pre_enabled)20582058+ pr_err("Failed to copy DEV table from previous kernel.\n");20592059+ if (old_dev_tbl_cpy != NULL)20602060+ free_pages((unsigned long)old_dev_tbl_cpy,20612061+ get_order(dev_table_size));20622062+20632063+ for_each_iommu(iommu) {20642064+ clear_translation_pre_enabled(iommu);20652065+ early_enable_iommu(iommu);20662066+ }20672067+ } else {20682068+ pr_info("Copied DEV table from previous kernel.\n");20692069+ free_pages((unsigned long)amd_iommu_dev_table,20702070+ get_order(dev_table_size));20712071+ amd_iommu_dev_table = old_dev_tbl_cpy;20722072+ for_each_iommu(iommu) {20732073+ iommu_disable_command_buffer(iommu);20742074+ iommu_disable_event_buffer(iommu);20752075+ iommu_enable_command_buffer(iommu);20762076+ iommu_enable_event_buffer(iommu);20772077+ iommu_enable_ga(iommu);20782078+ iommu_set_device_table(iommu);20792079+ iommu_flush_all_caches(iommu);20802080+ }21782081 }2179208221802083#ifdef CONFIG_IRQ_REMAP···2448227524492276 /* Device table - directly used by all IOMMUs */24502277 ret = -ENOMEM;24512451- amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,22782278+ amd_iommu_dev_table = (void *)__get_free_pages(22792279+ GFP_KERNEL | __GFP_ZERO | GFP_DMA32,24522280 get_order(dev_table_size));24532281 if (amd_iommu_dev_table == NULL)24542282 goto out;···24992325 goto out;2500232625012327 /* Disable any previously enabled IOMMUs */25022502- disable_iommus();23282328+ if (!is_kdump_kernel() || amd_iommu_disabled)23292329+ disable_iommus();2503233025042331 if (amd_iommu_irq_remap)25052332 amd_iommu_irq_remap = check_ioapic_information();
···5454#define lv2ent_small(pent) ((*(pent) & 2) == 2)5555#define lv2ent_large(pent) ((*(pent) & 3) == 1)56565757-#ifdef CONFIG_BIG_ENDIAN5858-#warning "revisit driver if we can enable big-endian ptes"5959-#endif6060-6157/*6258 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces6359 * v5.0 introduced support for 36bit physical address space by shifting···565569 spin_unlock_irqrestore(&data->lock, flags);566570}567571568568-static struct iommu_ops exynos_iommu_ops;572572+static const struct iommu_ops exynos_iommu_ops;569573570574static int __init exynos_sysmmu_probe(struct platform_device *pdev)571575{···654658 LV2_PROT = SYSMMU_V5_LV2_PROT;655659 }656660 }661661+662662+ /*663663+ * use the first registered sysmmu device for performing664664+ * dma mapping operations on iommu page tables (cpu cache flush)665665+ */666666+ if (!dma_dev)667667+ dma_dev = &pdev->dev;657668658669 pm_runtime_enable(dev);659670···13261323 return 0;13271324}1328132513291329-static struct iommu_ops exynos_iommu_ops = {13261326+static const struct iommu_ops exynos_iommu_ops = {13301327 .domain_alloc = exynos_iommu_domain_alloc,13311328 .domain_free = exynos_iommu_domain_free,13321329 .attach_dev = exynos_iommu_attach_device,···13411338 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,13421339 .of_xlate = exynos_iommu_of_xlate,13431340};13441344-13451345-static bool init_done;1346134113471342static int __init exynos_iommu_init(void)13481343{···13741373 goto err_set_iommu;13751374 }1376137513771377- init_done = true;13781378-13791376 return 0;13801377err_set_iommu:13811378 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);···13831384 kmem_cache_destroy(lv2table_kmem_cache);13841385 return ret;13851386}13871387+core_initcall(exynos_iommu_init);1386138813871387-static int __init exynos_iommu_of_setup(struct device_node *np)13881388-{13891389- struct platform_device *pdev;13901390-13911391- if (!init_done)13921392- exynos_iommu_init();13931393-13941394- pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);13951395- if (!pdev)13961396- return -ENODEV;13971397-13981398- /*13991399- * use the first registered sysmmu device for performing14001400- * dma mapping operations on iommu page tables (cpu cache flush)14011401- */14021402- if (!dma_dev)14031403- dma_dev = &pdev->dev;14041404-14051405- return 0;14061406-}14071407-14081408-IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",14091409- exynos_iommu_of_setup);13891389+IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);
+15-12
drivers/iommu/fsl_pamu.c
···4242static struct paace *ppaact;4343static struct paace *spaact;44444545+static bool probed; /* Has PAMU been probed? */4646+4547/*4648 * Table for matching compatible strings, for device tree4749 * guts node, for QorIQ SOCs.···532530 if (node) {533531 prop = of_get_property(node, "cache-stash-id", NULL);534532 if (!prop) {535535- pr_debug("missing cache-stash-id at %s\n",536536- node->full_name);533533+ pr_debug("missing cache-stash-id at %pOF\n",534534+ node);537535 of_node_put(node);538536 return ~(u32)0;539537 }···559557 if (stash_dest_hint == cache_level) {560558 prop = of_get_property(node, "cache-stash-id", NULL);561559 if (!prop) {562562- pr_debug("missing cache-stash-id at %s\n",563563- node->full_name);560560+ pr_debug("missing cache-stash-id at %pOF\n",561561+ node);564562 of_node_put(node);565563 return ~(u32)0;566564 }···570568571569 prop = of_get_property(node, "next-level-cache", NULL);572570 if (!prop) {573573- pr_debug("can't find next-level-cache at %s\n",574574- node->full_name);571571+ pr_debug("can't find next-level-cache at %pOF\n", node);575572 of_node_put(node);576573 return ~(u32)0; /* can't traverse any further */577574 }···10341033 * NOTE : All PAMUs share the same LIODN tables.10351034 */1036103510361036+ if (WARN_ON(probed))10371037+ return -EBUSY;10381038+10371039 pamu_regs = of_iomap(dev->of_node, 0);10381040 if (!pamu_regs) {10391041 dev_err(dev, "ioremap of PAMU node failed\n");···1067106310681064 guts_node = of_find_matching_node(NULL, guts_device_ids);10691065 if (!guts_node) {10701070- dev_err(dev, "could not find GUTS node %s\n",10711071- dev->of_node->full_name);10661066+ dev_err(dev, "could not find GUTS node %pOF\n", dev->of_node);10721067 ret = -ENODEV;10731068 goto error;10741069 }···1175117211761173 setup_liodns();1177117411751175+ probed = true;11761176+11781177 return 0;1179117811801179error_genpool:···1251124612521247 pdev = platform_device_alloc("fsl-of-pamu", 0);12531248 if (!pdev) {12541254- pr_err("could not allocate device %s\n",12551255- np->full_name);12491249+ pr_err("could not allocate device %pOF\n", np);12561250 ret = -ENOMEM;12571251 goto error_device_alloc;12581252 }···1263125912641260 ret = platform_device_add(pdev);12651261 if (ret) {12661266- pr_err("could not add device %s (err=%i)\n",12671267- np->full_name, ret);12621262+ pr_err("could not add device %pOF (err=%i)\n", np, ret);12681263 goto error_device_add;12691264 }12701265
+22-6
drivers/iommu/fsl_pamu_domain.c
···3333static struct kmem_cache *iommu_devinfo_cache;3434static DEFINE_SPINLOCK(device_domain_lock);35353636+struct iommu_device pamu_iommu; /* IOMMU core code handle */3737+3638static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)3739{3840 return container_of(dom, struct fsl_dma_domain, iommu_domain);···621619 for (i = 0; i < num; i++) {622620 /* Ensure that LIODN value is valid */623621 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {624624- pr_debug("Invalid liodn %d, attach device failed for %s\n",625625- liodn[i], dev->of_node->full_name);622622+ pr_debug("Invalid liodn %d, attach device failed for %pOF\n",623623+ liodn[i], dev->of_node);626624 ret = -EINVAL;627625 break;628626 }···686684 liodn_cnt = len / sizeof(u32);687685 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);688686 } else {689689- pr_debug("missing fsl,liodn property at %s\n",690690- dev->of_node->full_name);687687+ pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);691688 ret = -EINVAL;692689 }693690···721720 if (prop)722721 detach_device(dev, dma_domain);723722 else724724- pr_debug("missing fsl,liodn property at %s\n",725725- dev->of_node->full_name);723723+ pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);726724}727725728726static int configure_domain_geometry(struct iommu_domain *domain, void *data)···983983984984 iommu_group_put(group);985985986986+ iommu_device_link(&pamu_iommu, dev);987987+986988 return 0;987989}988990989991static void fsl_pamu_remove_device(struct device *dev)990992{993993+ iommu_device_unlink(&pamu_iommu, dev);991994 iommu_group_remove_device(dev);992995}993996···10751072 ret = iommu_init_mempool();10761073 if (ret)10771074 return ret;10751075+10761076+ ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");10771077+ if (ret)10781078+ return ret;10791079+10801080+ iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);10811081+10821082+ ret = iommu_device_register(&pamu_iommu);10831083+ if (ret) {10841084+ iommu_device_sysfs_remove(&pamu_iommu);10851085+ pr_err("Can't register iommu device\n");10861086+ return ret;10871087+ }1078108810791089 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);10801090 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
+90-190
drivers/iommu/intel-iommu.c
···458458#define for_each_rmrr_units(rmrr) \459459 list_for_each_entry(rmrr, &dmar_rmrr_units, list)460460461461-static void flush_unmaps_timeout(unsigned long data);462462-463463-struct deferred_flush_entry {464464- unsigned long iova_pfn;465465- unsigned long nrpages;466466- struct dmar_domain *domain;467467- struct page *freelist;468468-};469469-470470-#define HIGH_WATER_MARK 250471471-struct deferred_flush_table {472472- int next;473473- struct deferred_flush_entry entries[HIGH_WATER_MARK];474474-};475475-476476-struct deferred_flush_data {477477- spinlock_t lock;478478- int timer_on;479479- struct timer_list timer;480480- long size;481481- struct deferred_flush_table *tables;482482-};483483-484484-static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);485485-486461/* bitmap for indexing intel_iommus */487462static int g_num_of_iommus;488463···949974 return ret;950975}951976952952-static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)953953-{954954- struct context_entry *context;955955- unsigned long flags;956956-957957- spin_lock_irqsave(&iommu->lock, flags);958958- context = iommu_context_addr(iommu, bus, devfn, 0);959959- if (context) {960960- context_clear_entry(context);961961- __iommu_flush_cache(iommu, context, sizeof(*context));962962- }963963- spin_unlock_irqrestore(&iommu->lock, flags);964964-}965965-966977static void free_context_table(struct intel_iommu *iommu)967978{968979 int i;···10981137}1099113811001139static void dma_pte_free_level(struct dmar_domain *domain, int level,11011101- struct dma_pte *pte, unsigned long pfn,11021102- unsigned long start_pfn, unsigned long last_pfn)11401140+ int retain_level, struct dma_pte *pte,11411141+ unsigned long pfn, unsigned long start_pfn,11421142+ unsigned long last_pfn)11031143{11041144 pfn = max(start_pfn, pfn);11051145 pte = &pte[pfn_level_offset(pfn, level)];···11151153 level_pfn = pfn & level_mask(level);11161154 level_pte = phys_to_virt(dma_pte_addr(pte));1117115511181118- if (level > 2)11191119- dma_pte_free_level(domain, level - 1, level_pte,11201120- level_pfn, start_pfn, last_pfn);11561156+ if (level > 2) {11571157+ dma_pte_free_level(domain, level - 1, retain_level,11581158+ level_pte, level_pfn, start_pfn,11591159+ last_pfn);11601160+ }1121116111221122- /* If range covers entire pagetable, free it */11231123- if (!(start_pfn > level_pfn ||11621162+ /*11631163+ * Free the page table if we're below the level we want to11641164+ * retain and the range covers the entire table.11651165+ */11661166+ if (level < retain_level && !(start_pfn > level_pfn ||11241167 last_pfn < level_pfn + level_size(level) - 1)) {11251168 dma_clear_pte(pte);11261169 domain_flush_cache(domain, pte, sizeof(*pte));···11361169 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);11371170}1138117111391139-/* clear last level (leaf) ptes and free page table pages. */11721172+/*11731173+ * clear last level (leaf) ptes and free page table pages below the11741174+ * level we wish to keep intact.11751175+ */11401176static void dma_pte_free_pagetable(struct dmar_domain *domain,11411177 unsigned long start_pfn,11421142- unsigned long last_pfn)11781178+ unsigned long last_pfn,11791179+ int retain_level)11431180{11441181 BUG_ON(!domain_pfn_supported(domain, start_pfn));11451182 BUG_ON(!domain_pfn_supported(domain, last_pfn));···11521181 dma_pte_clear_range(domain, start_pfn, last_pfn);1153118211541183 /* We don't need lock here; nobody else touches the iova range */11551155- dma_pte_free_level(domain, agaw_to_level(domain->agaw),11841184+ dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,11561185 domain->pgd, 0, start_pfn, last_pfn);1157118611581187 /* free pgd */···12781307 freelist = pg->freelist;12791308 free_pgtable_page(page_address(pg));12801309 }13101310+}13111311+13121312+static void iova_entry_free(unsigned long data)13131313+{13141314+ struct page *freelist = (struct page *)data;13151315+13161316+ dma_free_pagelist(freelist);12811317}1282131812831319/* iommu handling */···16001622 addr, mask);16011623}1602162416251625+static void iommu_flush_iova(struct iova_domain *iovad)16261626+{16271627+ struct dmar_domain *domain;16281628+ int idx;16291629+16301630+ domain = container_of(iovad, struct dmar_domain, iovad);16311631+16321632+ for_each_domain_iommu(idx, domain) {16331633+ struct intel_iommu *iommu = g_iommus[idx];16341634+ u16 did = domain->iommu_did[iommu->seq_id];16351635+16361636+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);16371637+16381638+ if (!cap_caching_mode(iommu->cap))16391639+ iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),16401640+ 0, MAX_AGAW_PFN_WIDTH);16411641+ }16421642+}16431643+16031644static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)16041645{16051646 u32 pmen;···19291932{19301933 int adjust_width, agaw;19311934 unsigned long sagaw;19351935+ int err;1932193619331937 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,19341938 DMA_32BIT_PFN);19391939+19401940+ err = init_iova_flush_queue(&domain->iovad,19411941+ iommu_flush_iova, iova_entry_free);19421942+ if (err)19431943+ return err;19441944+19351945 domain_reserve_special_ranges(domain);1936194619371947 /* calculate AGAW */···19891985 /* Domain 0 is reserved, so dont process it */19901986 if (!domain)19911987 return;19921992-19931993- /* Flush any lazy unmaps that may reference this domain */19941994- if (!intel_iommu_strict) {19951995- int cpu;19961996-19971997- for_each_possible_cpu(cpu)19981998- flush_unmaps_timeout(cpu);19991999- }2000198820011989 /* Remove associated devices and clear attached or cached domains */20021990 rcu_read_lock();···22732277 /*22742278 * Ensure that old small page tables are22752279 * removed to make room for superpage(s).22802280+ * We're adding new large pages, so make sure22812281+ * we don't remove their parent tables.22762282 */22772277- dma_pte_free_pagetable(domain, iov_pfn, end_pfn);22832283+ dma_pte_free_pagetable(domain, iov_pfn, end_pfn,22842284+ largepage_lvl + 1);22782285 } else {22792286 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;22802287 }···2350235123512352static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)23522353{23542354+ unsigned long flags;23552355+ struct context_entry *context;23562356+ u16 did_old;23572357+23532358 if (!iommu)23542359 return;2355236023562356- clear_context_table(iommu, bus, devfn);23572357- iommu->flush.flush_context(iommu, 0, 0, 0,23582358- DMA_CCMD_GLOBAL_INVL);23592359- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);23612361+ spin_lock_irqsave(&iommu->lock, flags);23622362+ context = iommu_context_addr(iommu, bus, devfn, 0);23632363+ if (!context) {23642364+ spin_unlock_irqrestore(&iommu->lock, flags);23652365+ return;23662366+ }23672367+ did_old = context_domain_id(context);23682368+ context_clear_entry(context);23692369+ __iommu_flush_cache(iommu, context, sizeof(*context));23702370+ spin_unlock_irqrestore(&iommu->lock, flags);23712371+ iommu->flush.flush_context(iommu,23722372+ did_old,23732373+ (((u16)bus) << 8) | devfn,23742374+ DMA_CCMD_MASK_NOBIT,23752375+ DMA_CCMD_DEVICE_INVL);23762376+ iommu->flush.flush_iotlb(iommu,23772377+ did_old,23782378+ 0,23792379+ 0,23802380+ DMA_TLB_DSI_FLUSH);23602381}2361238223622383static inline void unlink_domain_info(struct device_domain_info *info)···32253206 bool copied_tables = false;32263207 struct device *dev;32273208 struct intel_iommu *iommu;32283228- int i, ret, cpu;32093209+ int i, ret;3229321032303211 /*32313212 * for each drhd···32563237 pr_err("Allocating global iommu array failed\n");32573238 ret = -ENOMEM;32583239 goto error;32593259- }32603260-32613261- for_each_possible_cpu(cpu) {32623262- struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,32633263- cpu);32643264-32653265- dfd->tables = kzalloc(g_num_of_iommus *32663266- sizeof(struct deferred_flush_table),32673267- GFP_KERNEL);32683268- if (!dfd->tables) {32693269- ret = -ENOMEM;32703270- goto free_g_iommus;32713271- }32723272-32733273- spin_lock_init(&dfd->lock);32743274- setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);32753240 }3276324132773242 for_each_active_iommu(iommu, drhd) {···34403437 disable_dmar_iommu(iommu);34413438 free_dmar_iommu(iommu);34423439 }34433443-free_g_iommus:34443444- for_each_possible_cpu(cpu)34453445- kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);34403440+34463441 kfree(g_iommus);34423442+34473443error:34483444 return ret;34493445}···36473645 dir, *dev->dma_mask);36483646}3649364736503650-static void flush_unmaps(struct deferred_flush_data *flush_data)36513651-{36523652- int i, j;36533653-36543654- flush_data->timer_on = 0;36553655-36563656- /* just flush them all */36573657- for (i = 0; i < g_num_of_iommus; i++) {36583658- struct intel_iommu *iommu = g_iommus[i];36593659- struct deferred_flush_table *flush_table =36603660- &flush_data->tables[i];36613661- if (!iommu)36623662- continue;36633663-36643664- if (!flush_table->next)36653665- continue;36663666-36673667- /* In caching mode, global flushes turn emulation expensive */36683668- if (!cap_caching_mode(iommu->cap))36693669- iommu->flush.flush_iotlb(iommu, 0, 0, 0,36703670- DMA_TLB_GLOBAL_FLUSH);36713671- for (j = 0; j < flush_table->next; j++) {36723672- unsigned long mask;36733673- struct deferred_flush_entry *entry =36743674- &flush_table->entries[j];36753675- unsigned long iova_pfn = entry->iova_pfn;36763676- unsigned long nrpages = entry->nrpages;36773677- struct dmar_domain *domain = entry->domain;36783678- struct page *freelist = entry->freelist;36793679-36803680- /* On real hardware multiple invalidations are expensive */36813681- if (cap_caching_mode(iommu->cap))36823682- iommu_flush_iotlb_psi(iommu, domain,36833683- mm_to_dma_pfn(iova_pfn),36843684- nrpages, !freelist, 0);36853685- else {36863686- mask = ilog2(nrpages);36873687- iommu_flush_dev_iotlb(domain,36883688- (uint64_t)iova_pfn << PAGE_SHIFT, mask);36893689- }36903690- free_iova_fast(&domain->iovad, iova_pfn, nrpages);36913691- if (freelist)36923692- dma_free_pagelist(freelist);36933693- }36943694- flush_table->next = 0;36953695- }36963696-36973697- flush_data->size = 0;36983698-}36993699-37003700-static void flush_unmaps_timeout(unsigned long cpuid)37013701-{37023702- struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);37033703- unsigned long flags;37043704-37053705- spin_lock_irqsave(&flush_data->lock, flags);37063706- flush_unmaps(flush_data);37073707- spin_unlock_irqrestore(&flush_data->lock, flags);37083708-}37093709-37103710-static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,37113711- unsigned long nrpages, struct page *freelist)37123712-{37133713- unsigned long flags;37143714- int entry_id, iommu_id;37153715- struct intel_iommu *iommu;37163716- struct deferred_flush_entry *entry;37173717- struct deferred_flush_data *flush_data;37183718-37193719- flush_data = raw_cpu_ptr(&deferred_flush);37203720-37213721- /* Flush all CPUs' entries to avoid deferring too much. If37223722- * this becomes a bottleneck, can just flush us, and rely on37233723- * flush timer for the rest.37243724- */37253725- if (flush_data->size == HIGH_WATER_MARK) {37263726- int cpu;37273727-37283728- for_each_online_cpu(cpu)37293729- flush_unmaps_timeout(cpu);37303730- }37313731-37323732- spin_lock_irqsave(&flush_data->lock, flags);37333733-37343734- iommu = domain_get_iommu(dom);37353735- iommu_id = iommu->seq_id;37363736-37373737- entry_id = flush_data->tables[iommu_id].next;37383738- ++(flush_data->tables[iommu_id].next);37393739-37403740- entry = &flush_data->tables[iommu_id].entries[entry_id];37413741- entry->domain = dom;37423742- entry->iova_pfn = iova_pfn;37433743- entry->nrpages = nrpages;37443744- entry->freelist = freelist;37453745-37463746- if (!flush_data->timer_on) {37473747- mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));37483748- flush_data->timer_on = 1;37493749- }37503750- flush_data->size++;37513751- spin_unlock_irqrestore(&flush_data->lock, flags);37523752-}37533753-37543648static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)37553649{37563650 struct dmar_domain *domain;···36823784 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));36833785 dma_free_pagelist(freelist);36843786 } else {36853685- add_unmap(domain, iova_pfn, nrpages, freelist);37873787+ queue_iova(&domain->iovad, iova_pfn, nrpages,37883788+ (unsigned long)freelist);36863789 /*36873790 * queue up the release of the unmap to save the 1/6th of the36883791 * cpu used up by the iotlb flush operation...···38373938 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);38383939 if (unlikely(ret)) {38393940 dma_pte_free_pagetable(domain, start_vpfn,38403840- start_vpfn + size - 1);39413941+ start_vpfn + size - 1,39423942+ agaw_to_level(domain->agaw) + 1);38413943 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));38423944 return 0;38433945 }···46214721static int intel_iommu_cpu_dead(unsigned int cpu)46224722{46234723 free_all_cpu_cached_iovas(cpu);46244624- flush_unmaps_timeout(cpu);46254724 return 0;46264725}46274726···52425343 sdev->sid = PCI_DEVID(info->bus, info->devfn);5243534452445345 if (!(ctx_lo & CONTEXT_PASIDE)) {52455245- context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);53465346+ if (iommu->pasid_state_table)53475347+ context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);52465348 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |52475349 intel_iommu_get_pts(iommu);52485350
+14
drivers/iommu/intel-svm.c
···2424#include <linux/pci-ats.h>2525#include <linux/dmar.h>2626#include <linux/interrupt.h>2727+#include <asm/page.h>27282829static irqreturn_t prq_event_thread(int irq, void *d);2930···556555 return (requested & ~vma->vm_flags) != 0;557556}558557558558+static bool is_canonical_address(u64 addr)559559+{560560+ int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);561561+ long saddr = (long) addr;562562+563563+ return (((saddr << shift) >> shift) == saddr);564564+}565565+559566static irqreturn_t prq_event_thread(int irq, void *d)560567{561568 struct intel_iommu *iommu = d;···621612 /* If the mm is already defunct, don't handle faults. */622613 if (!mmget_not_zero(svm->mm))623614 goto bad_req;615615+616616+ /* If address is not canonical, return invalid response */617617+ if (!is_canonical_address(address))618618+ goto bad_req;619619+624620 down_read(&svm->mm->mmap_sem);625621 vma = find_extend_vma(svm->mm, address);626622 if (!vma || address < vma->vm_start)
+41-18
drivers/iommu/iommu.c
···527527528528 }529529530530+ iommu_flush_tlb_all(domain);531531+530532out:531533 iommu_put_resv_regions(dev, &mappings);532534···10071005 if (group)10081006 return group;1009100710101010- group = ERR_PTR(-EINVAL);10081008+ if (!ops)10091009+ return ERR_PTR(-EINVAL);1011101010121012- if (ops && ops->device_group)10131013- group = ops->device_group(dev);10141014-10111011+ group = ops->device_group(dev);10151012 if (WARN_ON_ONCE(group == NULL))10161013 return ERR_PTR(-EINVAL);10171014···12841283 struct device *dev)12851284{12861285 int ret;12861286+ if ((domain->ops->is_attach_deferred != NULL) &&12871287+ domain->ops->is_attach_deferred(domain, dev))12881288+ return 0;12891289+12871290 if (unlikely(domain->ops->attach_dev == NULL))12881291 return -ENODEV;12891292···13031298 int ret;1304129913051300 group = iommu_group_get(dev);13061306- /* FIXME: Remove this when groups a mandatory for iommu drivers */13071307- if (group == NULL)13081308- return __iommu_attach_device(domain, dev);13091309-13101301 /*13111311- * We have a group - lock it to make sure the device-count doesn't13021302+ * Lock the group to make sure the device-count doesn't13121303 * change while we are attaching13131304 */13141305 mutex_lock(&group->mutex);···13251324static void __iommu_detach_device(struct iommu_domain *domain,13261325 struct device *dev)13271326{13271327+ if ((domain->ops->is_attach_deferred != NULL) &&13281328+ domain->ops->is_attach_deferred(domain, dev))13291329+ return;13301330+13281331 if (unlikely(domain->ops->detach_dev == NULL))13291332 return;13301333···13411336 struct iommu_group *group;1342133713431338 group = iommu_group_get(dev);13441344- /* FIXME: Remove this when groups a mandatory for iommu drivers */13451345- if (group == NULL)13461346- return __iommu_detach_device(domain, dev);1347133913481340 mutex_lock(&group->mutex);13491341 if (iommu_group_device_count(group) != 1) {···13621360 struct iommu_group *group;1363136113641362 group = iommu_group_get(dev);13651365- /* FIXME: Remove this when groups a mandatory for iommu drivers */13661366- if (group == NULL)13631363+ if (!group)13671364 return NULL;1368136513691366 domain = group->domain;···15571556}15581557EXPORT_SYMBOL_GPL(iommu_map);1559155815601560-size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)15591559+static size_t __iommu_unmap(struct iommu_domain *domain,15601560+ unsigned long iova, size_t size,15611561+ bool sync)15611562{15631563+ const struct iommu_ops *ops = domain->ops;15621564 size_t unmapped_page, unmapped = 0;15631563- unsigned int min_pagesz;15641565 unsigned long orig_iova = iova;15661566+ unsigned int min_pagesz;1565156715661566- if (unlikely(domain->ops->unmap == NULL ||15681568+ if (unlikely(ops->unmap == NULL ||15671569 domain->pgsize_bitmap == 0UL))15681570 return -ENODEV;15691571···15961592 while (unmapped < size) {15971593 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);1598159415991599- unmapped_page = domain->ops->unmap(domain, iova, pgsize);15951595+ unmapped_page = ops->unmap(domain, iova, pgsize);16001596 if (!unmapped_page)16011597 break;15981598+15991599+ if (sync && ops->iotlb_range_add)16001600+ ops->iotlb_range_add(domain, iova, pgsize);1602160116031602 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",16041603 iova, unmapped_page);···16101603 unmapped += unmapped_page;16111604 }1612160516061606+ if (sync && ops->iotlb_sync)16071607+ ops->iotlb_sync(domain);16081608+16131609 trace_unmap(orig_iova, size, unmapped);16141610 return unmapped;16151611}16121612+16131613+size_t iommu_unmap(struct iommu_domain *domain,16141614+ unsigned long iova, size_t size)16151615+{16161616+ return __iommu_unmap(domain, iova, size, true);16171617+}16161618EXPORT_SYMBOL_GPL(iommu_unmap);16191619+16201620+size_t iommu_unmap_fast(struct iommu_domain *domain,16211621+ unsigned long iova, size_t size)16221622+{16231623+ return __iommu_unmap(domain, iova, size, false);16241624+}16251625+EXPORT_SYMBOL_GPL(iommu_unmap_fast);1617162616181627size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,16191628 struct scatterlist *sg, unsigned int nents, int prot)
+183
drivers/iommu/iova.c
···3232 unsigned long limit_pfn);3333static void init_iova_rcaches(struct iova_domain *iovad);3434static void free_iova_rcaches(struct iova_domain *iovad);3535+static void fq_destroy_all_entries(struct iova_domain *iovad);3636+static void fq_flush_timeout(unsigned long data);35373638void3739init_iova_domain(struct iova_domain *iovad, unsigned long granule,···5250 iovad->granule = granule;5351 iovad->start_pfn = start_pfn;5452 iovad->dma_32bit_pfn = pfn_32bit + 1;5353+ iovad->flush_cb = NULL;5454+ iovad->fq = NULL;5555 init_iova_rcaches(iovad);5656}5757EXPORT_SYMBOL_GPL(init_iova_domain);5858+5959+static void free_iova_flush_queue(struct iova_domain *iovad)6060+{6161+ if (!iovad->fq)6262+ return;6363+6464+ if (timer_pending(&iovad->fq_timer))6565+ del_timer(&iovad->fq_timer);6666+6767+ fq_destroy_all_entries(iovad);6868+6969+ free_percpu(iovad->fq);7070+7171+ iovad->fq = NULL;7272+ iovad->flush_cb = NULL;7373+ iovad->entry_dtor = NULL;7474+}7575+7676+int init_iova_flush_queue(struct iova_domain *iovad,7777+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)7878+{7979+ int cpu;8080+8181+ atomic64_set(&iovad->fq_flush_start_cnt, 0);8282+ atomic64_set(&iovad->fq_flush_finish_cnt, 0);8383+8484+ iovad->fq = alloc_percpu(struct iova_fq);8585+ if (!iovad->fq)8686+ return -ENOMEM;8787+8888+ iovad->flush_cb = flush_cb;8989+ iovad->entry_dtor = entry_dtor;9090+9191+ for_each_possible_cpu(cpu) {9292+ struct iova_fq *fq;9393+9494+ fq = per_cpu_ptr(iovad->fq, cpu);9595+ fq->head = 0;9696+ fq->tail = 0;9797+9898+ spin_lock_init(&fq->lock);9999+ }100100+101101+ setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);102102+ atomic_set(&iovad->fq_timer_on, 0);103103+104104+ return 0;105105+}106106+EXPORT_SYMBOL_GPL(init_iova_flush_queue);5810759108static struct rb_node *60109__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)···476423}477424EXPORT_SYMBOL_GPL(free_iova_fast);478425426426+#define fq_ring_for_each(i, fq) \427427+ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)428428+429429+static inline bool fq_full(struct iova_fq *fq)430430+{431431+ assert_spin_locked(&fq->lock);432432+ return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);433433+}434434+435435+static inline unsigned fq_ring_add(struct iova_fq *fq)436436+{437437+ unsigned idx = fq->tail;438438+439439+ assert_spin_locked(&fq->lock);440440+441441+ fq->tail = (idx + 1) % IOVA_FQ_SIZE;442442+443443+ return idx;444444+}445445+446446+static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)447447+{448448+ u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);449449+ unsigned idx;450450+451451+ assert_spin_locked(&fq->lock);452452+453453+ fq_ring_for_each(idx, fq) {454454+455455+ if (fq->entries[idx].counter >= counter)456456+ break;457457+458458+ if (iovad->entry_dtor)459459+ iovad->entry_dtor(fq->entries[idx].data);460460+461461+ free_iova_fast(iovad,462462+ fq->entries[idx].iova_pfn,463463+ fq->entries[idx].pages);464464+465465+ fq->head = (fq->head + 1) % IOVA_FQ_SIZE;466466+ }467467+}468468+469469+static void iova_domain_flush(struct iova_domain *iovad)470470+{471471+ atomic64_inc(&iovad->fq_flush_start_cnt);472472+ iovad->flush_cb(iovad);473473+ atomic64_inc(&iovad->fq_flush_finish_cnt);474474+}475475+476476+static void fq_destroy_all_entries(struct iova_domain *iovad)477477+{478478+ int cpu;479479+480480+ /*481481+ * This code runs when the iova_domain is being detroyed, so don't482482+ * bother to free iovas, just call the entry_dtor on all remaining483483+ * entries.484484+ */485485+ if (!iovad->entry_dtor)486486+ return;487487+488488+ for_each_possible_cpu(cpu) {489489+ struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);490490+ int idx;491491+492492+ fq_ring_for_each(idx, fq)493493+ iovad->entry_dtor(fq->entries[idx].data);494494+ }495495+}496496+497497+static void fq_flush_timeout(unsigned long data)498498+{499499+ struct iova_domain *iovad = (struct iova_domain *)data;500500+ int cpu;501501+502502+ atomic_set(&iovad->fq_timer_on, 0);503503+ iova_domain_flush(iovad);504504+505505+ for_each_possible_cpu(cpu) {506506+ unsigned long flags;507507+ struct iova_fq *fq;508508+509509+ fq = per_cpu_ptr(iovad->fq, cpu);510510+ spin_lock_irqsave(&fq->lock, flags);511511+ fq_ring_free(iovad, fq);512512+ spin_unlock_irqrestore(&fq->lock, flags);513513+ }514514+}515515+516516+void queue_iova(struct iova_domain *iovad,517517+ unsigned long pfn, unsigned long pages,518518+ unsigned long data)519519+{520520+ struct iova_fq *fq = get_cpu_ptr(iovad->fq);521521+ unsigned long flags;522522+ unsigned idx;523523+524524+ spin_lock_irqsave(&fq->lock, flags);525525+526526+ /*527527+ * First remove all entries from the flush queue that have already been528528+ * flushed out on another CPU. This makes the fq_full() check below less529529+ * likely to be true.530530+ */531531+ fq_ring_free(iovad, fq);532532+533533+ if (fq_full(fq)) {534534+ iova_domain_flush(iovad);535535+ fq_ring_free(iovad, fq);536536+ }537537+538538+ idx = fq_ring_add(fq);539539+540540+ fq->entries[idx].iova_pfn = pfn;541541+ fq->entries[idx].pages = pages;542542+ fq->entries[idx].data = data;543543+ fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);544544+545545+ spin_unlock_irqrestore(&fq->lock, flags);546546+547547+ if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)548548+ mod_timer(&iovad->fq_timer,549549+ jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));550550+551551+ put_cpu_ptr(iovad->fq);552552+}553553+EXPORT_SYMBOL_GPL(queue_iova);554554+479555/**480556 * put_iova_domain - destroys the iova doamin481557 * @iovad: - iova domain in question.···615433 struct rb_node *node;616434 unsigned long flags;617435436436+ free_iova_flush_queue(iovad);618437 free_iova_rcaches(iovad);619438 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);620439 node = rb_first(&iovad->rbroot);
+74-168
drivers/iommu/ipmmu-vmsa.c
···1919#include <linux/iommu.h>2020#include <linux/module.h>2121#include <linux/of.h>2222+#include <linux/of_platform.h>2223#include <linux/platform_device.h>2324#include <linux/sizes.h>2425#include <linux/slab.h>···3635struct ipmmu_vmsa_device {3736 struct device *dev;3837 void __iomem *base;3939- struct list_head list;3838+ struct iommu_device iommu;40394140 unsigned int num_utlbs;4241 spinlock_t lock; /* Protects ctx and domains[] */···59586059struct ipmmu_vmsa_iommu_priv {6160 struct ipmmu_vmsa_device *mmu;6262- unsigned int *utlbs;6363- unsigned int num_utlbs;6461 struct device *dev;6562 struct list_head list;6663};6767-6868-static DEFINE_SPINLOCK(ipmmu_devices_lock);6969-static LIST_HEAD(ipmmu_devices);70647165static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)7266{7367 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);7468}75697676-7770static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)7871{7979-#if defined(CONFIG_ARM)8080- return dev->archdata.iommu;8181-#else8282- return dev->iommu_fwspec->iommu_priv;8383-#endif8484-}8585-static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p)8686-{8787-#if defined(CONFIG_ARM)8888- dev->archdata.iommu = p;8989-#else9090- dev->iommu_fwspec->iommu_priv = p;9191-#endif7272+ return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;9273}93749475#define TLB_LOOP_TIMEOUT 100 /* 100us */···295312 /* The hardware doesn't support selective TLB flush. */296313}297314298298-static struct iommu_gather_ops ipmmu_gather_ops = {315315+static const struct iommu_gather_ops ipmmu_gather_ops = {299316 .tlb_flush_all = ipmmu_tlb_flush_all,300317 .tlb_add_flush = ipmmu_tlb_add_flush,301318 .tlb_sync = ipmmu_tlb_flush_all,···322339 spin_unlock_irqrestore(&mmu->lock, flags);323340324341 return ret;342342+}343343+344344+static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,345345+ unsigned int context_id)346346+{347347+ unsigned long flags;348348+349349+ spin_lock_irqsave(&mmu->lock, flags);350350+351351+ clear_bit(context_id, mmu->ctx);352352+ mmu->domains[context_id] = NULL;353353+354354+ spin_unlock_irqrestore(&mmu->lock, flags);325355}326356327357static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)···366370 */367371 domain->cfg.iommu_dev = domain->mmu->dev;368372369369- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,370370- domain);371371- if (!domain->iop)372372- return -EINVAL;373373-374373 /*375374 * Find an unused context.376375 */377376 ret = ipmmu_domain_allocate_context(domain->mmu, domain);378378- if (ret == IPMMU_CTX_MAX) {379379- free_io_pgtable_ops(domain->iop);377377+ if (ret == IPMMU_CTX_MAX)380378 return -EBUSY;381381- }382379383380 domain->context_id = ret;381381+382382+ domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,383383+ domain);384384+ if (!domain->iop) {385385+ ipmmu_domain_free_context(domain->mmu, domain->context_id);386386+ return -EINVAL;387387+ }384388385389 /* TTBR0 */386390 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];···420424 ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);421425422426 return 0;423423-}424424-425425-static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,426426- unsigned int context_id)427427-{428428- unsigned long flags;429429-430430- spin_lock_irqsave(&mmu->lock, flags);431431-432432- clear_bit(context_id, mmu->ctx);433433- mmu->domains[context_id] = NULL;434434-435435- spin_unlock_irqrestore(&mmu->lock, flags);436427}437428438429static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)···545562 struct device *dev)546563{547564 struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);565565+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;548566 struct ipmmu_vmsa_device *mmu = priv->mmu;549567 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);550568 unsigned long flags;551569 unsigned int i;552570 int ret = 0;553571554554- if (!mmu) {572572+ if (!priv || !priv->mmu) {555573 dev_err(dev, "Cannot attach to IPMMU\n");556574 return -ENXIO;557575 }···579595 if (ret < 0)580596 return ret;581597582582- for (i = 0; i < priv->num_utlbs; ++i)583583- ipmmu_utlb_enable(domain, priv->utlbs[i]);598598+ for (i = 0; i < fwspec->num_ids; ++i)599599+ ipmmu_utlb_enable(domain, fwspec->ids[i]);584600585601 return 0;586602}···588604static void ipmmu_detach_device(struct iommu_domain *io_domain,589605 struct device *dev)590606{591591- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);607607+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;592608 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);593609 unsigned int i;594610595595- for (i = 0; i < priv->num_utlbs; ++i)596596- ipmmu_utlb_disable(domain, priv->utlbs[i]);611611+ for (i = 0; i < fwspec->num_ids; ++i)612612+ ipmmu_utlb_disable(domain, fwspec->ids[i]);597613598614 /*599615 * TODO: Optimize by disabling the context when no device is attached.···629645 return domain->iop->iova_to_phys(domain->iop, iova);630646}631647632632-static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,633633- unsigned int *utlbs, unsigned int num_utlbs)648648+static int ipmmu_init_platform_device(struct device *dev,649649+ struct of_phandle_args *args)634650{635635- unsigned int i;651651+ struct platform_device *ipmmu_pdev;652652+ struct ipmmu_vmsa_iommu_priv *priv;636653637637- for (i = 0; i < num_utlbs; ++i) {638638- struct of_phandle_args args;639639- int ret;654654+ ipmmu_pdev = of_find_device_by_node(args->np);655655+ if (!ipmmu_pdev)656656+ return -ENODEV;640657641641- ret = of_parse_phandle_with_args(dev->of_node, "iommus",642642- "#iommu-cells", i, &args);643643- if (ret < 0)644644- return ret;658658+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);659659+ if (!priv)660660+ return -ENOMEM;645661646646- of_node_put(args.np);647647-648648- if (args.np != mmu->dev->of_node || args.args_count != 1)649649- return -EINVAL;650650-651651- utlbs[i] = args.args[0];652652- }653653-662662+ priv->mmu = platform_get_drvdata(ipmmu_pdev);663663+ priv->dev = dev;664664+ dev->iommu_fwspec->iommu_priv = priv;654665 return 0;655666}656667657657-static int ipmmu_init_platform_device(struct device *dev)668668+static int ipmmu_of_xlate(struct device *dev,669669+ struct of_phandle_args *spec)658670{659659- struct ipmmu_vmsa_iommu_priv *priv;660660- struct ipmmu_vmsa_device *mmu;661661- unsigned int *utlbs;662662- unsigned int i;663663- int num_utlbs;664664- int ret = -ENODEV;671671+ iommu_fwspec_add_ids(dev, spec->args, 1);665672666666- /* Find the master corresponding to the device. */673673+ /* Initialize once - xlate() will call multiple times */674674+ if (to_priv(dev))675675+ return 0;667676668668- num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",669669- "#iommu-cells");670670- if (num_utlbs < 0)671671- return -ENODEV;672672-673673- utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);674674- if (!utlbs)675675- return -ENOMEM;676676-677677- spin_lock(&ipmmu_devices_lock);678678-679679- list_for_each_entry(mmu, &ipmmu_devices, list) {680680- ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);681681- if (!ret) {682682- /*683683- * TODO Take a reference to the MMU to protect684684- * against device removal.685685- */686686- break;687687- }688688- }689689-690690- spin_unlock(&ipmmu_devices_lock);691691-692692- if (ret < 0)693693- goto error;694694-695695- for (i = 0; i < num_utlbs; ++i) {696696- if (utlbs[i] >= mmu->num_utlbs) {697697- ret = -EINVAL;698698- goto error;699699- }700700- }701701-702702- priv = kzalloc(sizeof(*priv), GFP_KERNEL);703703- if (!priv) {704704- ret = -ENOMEM;705705- goto error;706706- }707707-708708- priv->mmu = mmu;709709- priv->utlbs = utlbs;710710- priv->num_utlbs = num_utlbs;711711- priv->dev = dev;712712- set_priv(dev, priv);713713- return 0;714714-715715-error:716716- kfree(utlbs);717717- return ret;677677+ return ipmmu_init_platform_device(dev, spec);718678}719679720680#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)···677749 struct iommu_group *group;678750 int ret;679751680680- if (to_priv(dev)) {681681- dev_warn(dev, "IOMMU driver already assigned to device %s\n",682682- dev_name(dev));683683- return -EINVAL;684684- }752752+ /*753753+ * Only let through devices that have been verified in xlate()754754+ */755755+ if (!to_priv(dev))756756+ return -ENODEV;685757686758 /* Create a device group and add the device to it. */687759 group = iommu_group_alloc();···699771 group = NULL;700772 goto error;701773 }702702-703703- ret = ipmmu_init_platform_device(dev);704704- if (ret < 0)705705- goto error;706774707775 /*708776 * Create the ARM mapping, used by the ARM DMA mapping core to allocate···740816 if (!IS_ERR_OR_NULL(group))741817 iommu_group_remove_device(dev);742818743743- kfree(to_priv(dev)->utlbs);744744- kfree(to_priv(dev));745745- set_priv(dev, NULL);746746-747819 return ret;748820}749821750822static void ipmmu_remove_device(struct device *dev)751823{752752- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);753753-754824 arm_iommu_detach_device(dev);755825 iommu_group_remove_device(dev);756756-757757- kfree(priv->utlbs);758758- kfree(priv);759759-760760- set_priv(dev, NULL);761826}762827763828static const struct iommu_ops ipmmu_ops = {···761848 .add_device = ipmmu_add_device,762849 .remove_device = ipmmu_remove_device,763850 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,851851+ .of_xlate = ipmmu_of_xlate,764852};765853766854#endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */···804890805891static int ipmmu_add_device_dma(struct device *dev)806892{807807- struct iommu_fwspec *fwspec = dev->iommu_fwspec;808893 struct iommu_group *group;809894810895 /*811896 * Only let through devices that have been verified in xlate()812812- * We may get called with dev->iommu_fwspec set to NULL.813897 */814814- if (!fwspec || !fwspec->iommu_priv)898898+ if (!to_priv(dev))815899 return -ENODEV;816900817901 group = iommu_group_get_for_dev(dev);···869957 return group;870958}871959872872-static int ipmmu_of_xlate_dma(struct device *dev,873873- struct of_phandle_args *spec)874874-{875875- /* If the IPMMU device is disabled in DT then return error876876- * to make sure the of_iommu code does not install ops877877- * even though the iommu device is disabled878878- */879879- if (!of_device_is_available(spec->np))880880- return -ENODEV;881881-882882- return ipmmu_init_platform_device(dev);883883-}884884-885960static const struct iommu_ops ipmmu_ops = {886961 .domain_alloc = ipmmu_domain_alloc_dma,887962 .domain_free = ipmmu_domain_free_dma,···882983 .remove_device = ipmmu_remove_device_dma,883984 .device_group = ipmmu_find_group_dma,884985 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,885885- .of_xlate = ipmmu_of_xlate_dma,986986+ .of_xlate = ipmmu_of_xlate,886987};887988888989#endif /* CONFIG_IOMMU_DMA */···95310549541055 ipmmu_device_reset(mmu);955105610571057+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,10581058+ dev_name(&pdev->dev));10591059+ if (ret)10601060+ return ret;10611061+10621062+ iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);10631063+ iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode);10641064+10651065+ ret = iommu_device_register(&mmu->iommu);10661066+ if (ret)10671067+ return ret;10681068+9561069 /*9571070 * We can't create the ARM mapping here as it requires the bus to have9581071 * an IOMMU, which only happens when bus_set_iommu() is called in9591072 * ipmmu_init() after the probe function returns.9601073 */961961-962962- spin_lock(&ipmmu_devices_lock);963963- list_add(&mmu->list, &ipmmu_devices);964964- spin_unlock(&ipmmu_devices_lock);96510749661075 platform_set_drvdata(pdev, mmu);9671076···9801073{9811074 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);9821075983983- spin_lock(&ipmmu_devices_lock);984984- list_del(&mmu->list);985985- spin_unlock(&ipmmu_devices_lock);10761076+ iommu_device_sysfs_remove(&mmu->iommu);10771077+ iommu_device_unregister(&mmu->iommu);98610789871079#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)9881080 arm_iommu_release_mapping(mmu->mapping);
+14-1
drivers/iommu/msm_iommu.c
···393393static int msm_iommu_add_device(struct device *dev)394394{395395 struct msm_iommu_dev *iommu;396396+ struct iommu_group *group;396397 unsigned long flags;397398 int ret = 0;398399···407406408407 spin_unlock_irqrestore(&msm_iommu_lock, flags);409408410410- return ret;409409+ if (ret)410410+ return ret;411411+412412+ group = iommu_group_get_for_dev(dev);413413+ if (IS_ERR(group))414414+ return PTR_ERR(group);415415+416416+ iommu_group_put(group);417417+418418+ return 0;411419}412420413421static void msm_iommu_remove_device(struct device *dev)···431421 iommu_device_unlink(&iommu->iommu, dev);432422433423 spin_unlock_irqrestore(&msm_iommu_lock, flags);424424+425425+ iommu_group_remove_device(dev);434426}435427436428static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)···712700 .iova_to_phys = msm_iommu_iova_to_phys,713701 .add_device = msm_iommu_add_device,714702 .remove_device = msm_iommu_remove_device,703703+ .device_group = generic_device_group,715704 .pgsize_bitmap = MSM_IOMMU_PGSIZES,716705 .of_xlate = qcom_iommu_of_xlate,717706};
+136-78
drivers/iommu/mtk_iommu.c
···3131#include <linux/slab.h>3232#include <linux/spinlock.h>3333#include <asm/barrier.h>3434-#include <dt-bindings/memory/mt8173-larb-port.h>3534#include <soc/mediatek/smi.h>36353736#include "mtk_iommu.h"···53545455#define REG_MMU_CTRL_REG 0x1105556#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)5656-#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)5757+#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \5858+ ((data)->m4u_plat == M4U_MT2712 ? 4 : 5)5959+/* It's named by F_MMU_TF_PROT_SEL in mt2712. */6060+#define F_MMU_TF_PROTECT_SEL(prot, data) \6161+ (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))57625863#define REG_MMU_IVRP_PADDR 0x1145964#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))6565+#define REG_MMU_VLD_PA_RNG 0x1186666+#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))60676168#define REG_MMU_INT_CONTROL0 0x1206269#define F_L2_MULIT_HIT_EN BIT(0)···8782#define REG_MMU_FAULT_ST1 0x13488838984#define REG_MMU_FAULT_VA 0x13c9090-#define F_MMU_FAULT_VA_MSK 0xfffff0009185#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)9286#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)9387···9692#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)97939894#define MTK_PROTECT_PA_ALIGN 1289595+9696+/*9797+ * Get the local arbiter ID and the portid within the larb arbiter9898+ * from mtk_m4u_id which is defined by MTK_M4U_ID.9999+ */100100+#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)101101+#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)99102100103struct mtk_iommu_domain {101104 spinlock_t pgtlock; /* lock for page table */···115104116105static struct iommu_ops mtk_iommu_ops;117106107107+static LIST_HEAD(m4ulist); /* List all the M4U HWs */108108+109109+#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)110110+111111+/*112112+ * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain113113+ * for the performance.114114+ *115115+ * Here always return the mtk_iommu_data of the first probed M4U where the116116+ * iommu domain information is recorded.117117+ */118118+static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)119119+{120120+ struct mtk_iommu_data *data;121121+122122+ for_each_m4u(data)123123+ return data;124124+125125+ return NULL;126126+}127127+118128static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)119129{120130 return container_of(dom, struct mtk_iommu_domain, domain);···145113{146114 struct mtk_iommu_data *data = cookie;147115148148- writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);149149- writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);150150- wmb(); /* Make sure the tlb flush all done */116116+ for_each_m4u(data) {117117+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,118118+ data->base + REG_MMU_INV_SEL);119119+ writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);120120+ wmb(); /* Make sure the tlb flush all done */121121+ }151122}152123153124static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,···159124{160125 struct mtk_iommu_data *data = cookie;161126162162- writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);127127+ for_each_m4u(data) {128128+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,129129+ data->base + REG_MMU_INV_SEL);163130164164- writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);165165- writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);166166- writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);167167- data->tlb_flush_active = true;131131+ writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);132132+ writel_relaxed(iova + size - 1,133133+ data->base + REG_MMU_INVLD_END_A);134134+ writel_relaxed(F_MMU_INV_RANGE,135135+ data->base + REG_MMU_INVALIDATE);136136+ data->tlb_flush_active = true;137137+ }168138}169139170140static void mtk_iommu_tlb_sync(void *cookie)···178138 int ret;179139 u32 tmp;180140181181- /* Avoid timing out if there's nothing to wait for */182182- if (!data->tlb_flush_active)183183- return;141141+ for_each_m4u(data) {142142+ /* Avoid timing out if there's nothing to wait for */143143+ if (!data->tlb_flush_active)144144+ return;184145185185- ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,186186- tmp != 0, 10, 100000);187187- if (ret) {188188- dev_warn(data->dev,189189- "Partial TLB flush timed out, falling back to full flush\n");190190- mtk_iommu_tlb_flush_all(cookie);146146+ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,147147+ tmp, tmp != 0, 10, 100000);148148+ if (ret) {149149+ dev_warn(data->dev,150150+ "Partial TLB flush timed out, falling back to full flush\n");151151+ mtk_iommu_tlb_flush_all(cookie);152152+ }153153+ /* Clear the CPE status */154154+ writel_relaxed(0, data->base + REG_MMU_CPE_DONE);155155+ data->tlb_flush_active = false;191156 }192192- /* Clear the CPE status */193193- writel_relaxed(0, data->base + REG_MMU_CPE_DONE);194194- data->tlb_flush_active = false;195157}196158197159static const struct iommu_gather_ops mtk_iommu_gather_ops = {···215173 fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);216174 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;217175 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;218218- fault_iova &= F_MMU_FAULT_VA_MSK;219176 fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);220177 regval = readl_relaxed(data->base + REG_MMU_INT_ID);221178 fault_larb = F_MMU0_INT_ID_LARB_ID(regval);···262221 }263222}264223265265-static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)224224+static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)266225{267267- struct mtk_iommu_domain *dom = data->m4u_dom;226226+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();268227269228 spin_lock_init(&dom->pgtlock);270229···290249291250 /* Update our support page sizes bitmap */292251 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;293293-294294- writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],295295- data->base + REG_MMU_PT_BASE_ADDR);296252 return 0;297253}298254···304266 if (!dom)305267 return NULL;306268307307- if (iommu_get_dma_cookie(&dom->domain)) {308308- kfree(dom);309309- return NULL;310310- }269269+ if (iommu_get_dma_cookie(&dom->domain))270270+ goto free_dom;271271+272272+ if (mtk_iommu_domain_finalise(dom))273273+ goto put_dma_cookie;311274312275 dom->domain.geometry.aperture_start = 0;313276 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);314277 dom->domain.geometry.force_aperture = true;315278316279 return &dom->domain;280280+281281+put_dma_cookie:282282+ iommu_put_dma_cookie(&dom->domain);283283+free_dom:284284+ kfree(dom);285285+ return NULL;317286}318287319288static void mtk_iommu_domain_free(struct iommu_domain *domain)320289{290290+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);291291+292292+ free_io_pgtable_ops(dom->iop);321293 iommu_put_dma_cookie(domain);322294 kfree(to_mtk_domain(domain));323295}···337289{338290 struct mtk_iommu_domain *dom = to_mtk_domain(domain);339291 struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;340340- int ret;341292342293 if (!data)343294 return -ENODEV;344295296296+ /* Update the pgtable base address register of the M4U HW */345297 if (!data->m4u_dom) {346298 data->m4u_dom = dom;347347- ret = mtk_iommu_domain_finalise(data);348348- if (ret) {349349- data->m4u_dom = NULL;350350- return ret;351351- }352352- } else if (data->m4u_dom != dom) {353353- /* All the client devices should be in the same m4u domain */354354- dev_err(dev, "try to attach into the error iommu domain\n");355355- return -EPERM;299299+ writel(dom->cfg.arm_v7s_cfg.ttbr[0],300300+ data->base + REG_MMU_PT_BASE_ADDR);356301 }357302358303 mtk_iommu_config(data, dev, true);···395354 dma_addr_t iova)396355{397356 struct mtk_iommu_domain *dom = to_mtk_domain(domain);357357+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();398358 unsigned long flags;399359 phys_addr_t pa;400360401361 spin_lock_irqsave(&dom->pgtlock, flags);402362 pa = dom->iop->iova_to_phys(dom->iop, iova);403363 spin_unlock_irqrestore(&dom->pgtlock, flags);364364+365365+ if (data->enable_4GB)366366+ pa |= BIT_ULL(32);404367405368 return pa;406369}···444399445400static struct iommu_group *mtk_iommu_device_group(struct device *dev)446401{447447- struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;402402+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();448403449404 if (!data)450405 return ERR_PTR(-ENODEV);···509464 return ret;510465 }511466512512- regval = F_MMU_PREFETCH_RT_REPLACE_MOD |513513- F_MMU_TF_PROTECT_SEL(2);467467+ regval = F_MMU_TF_PROTECT_SEL(2, data);468468+ if (data->m4u_plat == M4U_MT8173)469469+ regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;514470 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);515471516472 regval = F_L2_MULIT_HIT_EN |···533487534488 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),535489 data->base + REG_MMU_IVRP_PADDR);536536-490490+ if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {491491+ /*492492+ * If 4GB mode is enabled, the validate PA range is from493493+ * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].494494+ */495495+ regval = F_MMU_VLD_PA_RNG(7, 4);496496+ writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);497497+ }537498 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);538538- writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);499499+500500+ /* It's MISC control register whose default value is ok except mt8173.*/501501+ if (data->m4u_plat == M4U_MT8173)502502+ writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);539503540504 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,541505 dev_name(data->dev), (void *)data)) {···577521 if (!data)578522 return -ENOMEM;579523 data->dev = dev;524524+ data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev);580525581526 /* Protect memory. HW will access here while translation fault.*/582527 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);···586529 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);587530588531 /* Whether the current dram is over 4GB */589589- data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));532532+ data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));590533591534 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);592535 data->base = devm_ioremap_resource(dev, res);···611554 for (i = 0; i < larb_nr; i++) {612555 struct device_node *larbnode;613556 struct platform_device *plarbdev;557557+ u32 id;614558615559 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);616560 if (!larbnode)···620562 if (!of_device_is_available(larbnode))621563 continue;622564565565+ ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);566566+ if (ret)/* The id is consecutive if there is no this property */567567+ id = i;568568+623569 plarbdev = of_find_device_by_node(larbnode);624624- if (!plarbdev) {625625- plarbdev = of_platform_device_create(626626- larbnode, NULL,627627- platform_bus_type.dev_root);628628- if (!plarbdev) {629629- of_node_put(larbnode);630630- return -EPROBE_DEFER;631631- }632632- }633633- data->smi_imu.larb_imu[i].dev = &plarbdev->dev;570570+ if (!plarbdev)571571+ return -EPROBE_DEFER;572572+ data->smi_imu.larb_imu[id].dev = &plarbdev->dev;634573635574 component_match_add_release(dev, &match, release_of,636575 compare_of, larbnode);···651596 if (ret)652597 return ret;653598599599+ list_add_tail(&data->list, &m4ulist);600600+654601 if (!iommu_present(&platform_bus_type))655602 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);656603···669612 if (iommu_present(&platform_bus_type))670613 bus_set_iommu(&platform_bus_type, NULL);671614672672- free_io_pgtable_ops(data->m4u_dom->iop);673615 clk_disable_unprepare(data->bclk);674616 devm_free_irq(&pdev->dev, data->irq, data);675617 component_master_del(&pdev->dev, &mtk_iommu_com_ops);···687631 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);688632 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);689633 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);634634+ clk_disable_unprepare(data->bclk);690635 return 0;691636}692637···696639 struct mtk_iommu_data *data = dev_get_drvdata(dev);697640 struct mtk_iommu_suspend_reg *reg = &data->reg;698641 void __iomem *base = data->base;642642+ int ret;699643700700- writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],701701- base + REG_MMU_PT_BASE_ADDR);644644+ ret = clk_prepare_enable(data->bclk);645645+ if (ret) {646646+ dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);647647+ return ret;648648+ }702649 writel_relaxed(reg->standard_axi_mode,703650 base + REG_MMU_STANDARD_AXI_MODE);704651 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);···711650 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);712651 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),713652 base + REG_MMU_IVRP_PADDR);653653+ if (data->m4u_dom)654654+ writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],655655+ base + REG_MMU_PT_BASE_ADDR);714656 return 0;715657}716658717717-const struct dev_pm_ops mtk_iommu_pm_ops = {718718- SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)659659+static const struct dev_pm_ops mtk_iommu_pm_ops = {660660+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)719661};720662721663static const struct of_device_id mtk_iommu_of_ids[] = {722722- { .compatible = "mediatek,mt8173-m4u", },664664+ { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712},665665+ { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173},723666 {}724667};725668···732667 .remove = mtk_iommu_remove,733668 .driver = {734669 .name = "mtk-iommu",735735- .of_match_table = mtk_iommu_of_ids,670670+ .of_match_table = of_match_ptr(mtk_iommu_of_ids),736671 .pm = &mtk_iommu_pm_ops,737672 }738673};739674740740-static int mtk_iommu_init_fn(struct device_node *np)675675+static int __init mtk_iommu_init(void)741676{742677 int ret;743743- struct platform_device *pdev;744744-745745- pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);746746- if (!pdev)747747- return -ENOMEM;748678749679 ret = platform_driver_register(&mtk_iommu_driver);750750- if (ret) {751751- pr_err("%s: Failed to register driver\n", __func__);752752- return ret;753753- }680680+ if (ret != 0)681681+ pr_err("Failed to register MTK IOMMU driver\n");754682755755- return 0;683683+ return ret;756684}757685758758-IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn);686686+subsys_initcall(mtk_iommu_init)
···1616#include <linux/device.h>1717#include <linux/err.h>1818#include <linux/io.h>1919+#include <linux/module.h>1920#include <linux/of.h>2021#include <linux/of_platform.h>2122#include <linux/platform_device.h>···2423#include <soc/mediatek/smi.h>2524#include <dt-bindings/memory/mt2701-larb-port.h>26252626+/* mt8173 */2727#define SMI_LARB_MMU_EN 0xf002828+2929+/* mt2701 */2830#define REG_SMI_SECUR_CON_BASE 0x5c029313032/* every register control 8 port, register offset 0x4 */···4541/* mt2701 domain should be set to 3 */4642#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))47434444+/* mt2712 */4545+#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))4646+#define F_MMU_EN BIT(0)4747+4848struct mtk_smi_larb_gen {4949+ bool need_larbid;4950 int port_in_larb[MTK_LARB_NR_MAX + 1];5051 void (*config_port)(struct device *);5152};···157148 struct mtk_smi_iommu *smi_iommu = data;158149 unsigned int i;159150151151+ if (larb->larb_gen->need_larbid) {152152+ larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu;153153+ return 0;154154+ }155155+156156+ /*157157+ * If there is no larbid property, Loop to find the corresponding158158+ * iommu information.159159+ */160160 for (i = 0; i < smi_iommu->larb_nr; i++) {161161 if (dev == smi_iommu->larb_imu[i].dev) {162162 /* The 'mmu' may be updated in iommu-attach/detach. */···176158 return -ENODEV;177159}178160179179-static void mtk_smi_larb_config_port(struct device *dev)161161+static void mtk_smi_larb_config_port_mt2712(struct device *dev)162162+{163163+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);164164+ u32 reg;165165+ int i;166166+167167+ /*168168+ * larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly.169169+ * Don't need to set it again.170170+ */171171+ if (larb->larbid == 8 || larb->larbid == 9)172172+ return;173173+174174+ for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {175175+ reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));176176+ reg |= F_MMU_EN;177177+ writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));178178+ }179179+}180180+181181+static void mtk_smi_larb_config_port_mt8173(struct device *dev)180182{181183 struct mtk_smi_larb *larb = dev_get_drvdata(dev);182184183185 writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);184186}185185-186187187188static void mtk_smi_larb_config_port_gen1(struct device *dev)188189{···247210248211static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {249212 /* mt8173 do not need the port in larb */250250- .config_port = mtk_smi_larb_config_port,213213+ .config_port = mtk_smi_larb_config_port_mt8173,251214};252215253216static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {217217+ .need_larbid = true,254218 .port_in_larb = {255219 LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,256220 LARB2_PORT_OFFSET, LARB3_PORT_OFFSET257221 },258222 .config_port = mtk_smi_larb_config_port_gen1,223223+};224224+225225+static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {226226+ .need_larbid = true,227227+ .config_port = mtk_smi_larb_config_port_mt2712,259228};260229261230static const struct of_device_id mtk_smi_larb_of_ids[] = {···273230 .compatible = "mediatek,mt2701-smi-larb",274231 .data = &mtk_smi_larb_mt2701275232 },233233+ {234234+ .compatible = "mediatek,mt2712-smi-larb",235235+ .data = &mtk_smi_larb_mt2712236236+ },276237 {}277238};278239···287240 struct device *dev = &pdev->dev;288241 struct device_node *smi_node;289242 struct platform_device *smi_pdev;290290- const struct of_device_id *of_id;291291-292292- if (!dev->pm_domain)293293- return -EPROBE_DEFER;294294-295295- of_id = of_match_node(mtk_smi_larb_of_ids, pdev->dev.of_node);296296- if (!of_id)297297- return -EINVAL;243243+ int err;298244299245 larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);300246 if (!larb)301247 return -ENOMEM;302248303303- larb->larb_gen = of_id->data;249249+ larb->larb_gen = of_device_get_match_data(dev);304250 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);305251 larb->base = devm_ioremap_resource(dev, res);306252 if (IS_ERR(larb->base))···308268 return PTR_ERR(larb->smi.clk_smi);309269 larb->smi.dev = dev;310270271271+ if (larb->larb_gen->need_larbid) {272272+ err = of_property_read_u32(dev->of_node, "mediatek,larb-id",273273+ &larb->larbid);274274+ if (err) {275275+ dev_err(dev, "missing larbid property\n");276276+ return err;277277+ }278278+ }279279+311280 smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);312281 if (!smi_node)313282 return -EINVAL;···324275 smi_pdev = of_find_device_by_node(smi_node);325276 of_node_put(smi_node);326277 if (smi_pdev) {278278+ if (!platform_get_drvdata(smi_pdev))279279+ return -EPROBE_DEFER;327280 larb->smi_common_dev = &smi_pdev->dev;328281 } else {329282 dev_err(dev, "Failed to get the smi_common device\n");···362311 .compatible = "mediatek,mt2701-smi-common",363312 .data = (void *)MTK_SMI_GEN1364313 },314314+ {315315+ .compatible = "mediatek,mt2712-smi-common",316316+ .data = (void *)MTK_SMI_GEN2317317+ },365318 {}366319};367320···374319 struct device *dev = &pdev->dev;375320 struct mtk_smi *common;376321 struct resource *res;377377- const struct of_device_id *of_id;378322 enum mtk_smi_gen smi_gen;379379-380380- if (!dev->pm_domain)381381- return -EPROBE_DEFER;323323+ int ret;382324383325 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);384326 if (!common)···390338 if (IS_ERR(common->clk_smi))391339 return PTR_ERR(common->clk_smi);392340393393- of_id = of_match_node(mtk_smi_common_of_ids, pdev->dev.of_node);394394- if (!of_id)395395- return -EINVAL;396396-397341 /*398342 * for mtk smi gen 1, we need to get the ao(always on) base to config399343 * m4u port, and we need to enable the aync clock for transform the smi400344 * clock into emi clock domain, but for mtk smi gen2, there's no smi ao401345 * base.402346 */403403- smi_gen = (enum mtk_smi_gen)of_id->data;347347+ smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev);404348 if (smi_gen == MTK_SMI_GEN1) {405349 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);406350 common->smi_ao_base = devm_ioremap_resource(dev, res);···407359 if (IS_ERR(common->clk_async))408360 return PTR_ERR(common->clk_async);409361410410- clk_prepare_enable(common->clk_async);362362+ ret = clk_prepare_enable(common->clk_async);363363+ if (ret)364364+ return ret;411365 }412366 pm_runtime_enable(dev);413367 platform_set_drvdata(pdev, common);···453403 return ret;454404}455405456456-subsys_initcall(mtk_smi_init);406406+module_init(mtk_smi_init);
-4
include/dt-bindings/memory/mt8173-larb-port.h
···1515#define __DTS_IOMMU_PORT_MT8173_H16161717#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))1818-/* Local arbiter ID */1919-#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x7)2020-/* PortID within the local arbiter */2121-#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)22182319#define M4U_LARB0_ID 02420#define M4U_LARB1_ID 1
+52-3
include/linux/iommu.h
···167167 * @map: map a physically contiguous memory region to an iommu domain168168 * @unmap: unmap a physically contiguous memory region from an iommu domain169169 * @map_sg: map a scatter-gather list of physically contiguous memory chunks170170+ * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain171171+ * @tlb_range_add: Add a given iova range to the flush queue for this domain172172+ * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush173173+ * queue170174 * to an iommu domain171175 * @iova_to_phys: translate iova to physical address172176 * @add_device: add device to iommu grouping···203199 size_t size);204200 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,205201 struct scatterlist *sg, unsigned int nents, int prot);202202+ void (*flush_iotlb_all)(struct iommu_domain *domain);203203+ void (*iotlb_range_add)(struct iommu_domain *domain,204204+ unsigned long iova, size_t size);205205+ void (*iotlb_sync)(struct iommu_domain *domain);206206 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);207207 int (*add_device)(struct device *dev);208208 void (*remove_device)(struct device *dev);···233225 u32 (*domain_get_windows)(struct iommu_domain *domain);234226235227 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);228228+ bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);236229237230 unsigned long pgsize_bitmap;238231};···300291extern int iommu_map(struct iommu_domain *domain, unsigned long iova,301292 phys_addr_t paddr, size_t size, int prot);302293extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,303303- size_t size);294294+ size_t size);295295+extern size_t iommu_unmap_fast(struct iommu_domain *domain,296296+ unsigned long iova, size_t size);304297extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,305298 struct scatterlist *sg,unsigned int nents,306299 int prot);···358347359348extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,360349 unsigned long iova, int flags);350350+351351+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)352352+{353353+ if (domain->ops->flush_iotlb_all)354354+ domain->ops->flush_iotlb_all(domain);355355+}356356+357357+static inline void iommu_tlb_range_add(struct iommu_domain *domain,358358+ unsigned long iova, size_t size)359359+{360360+ if (domain->ops->iotlb_range_add)361361+ domain->ops->iotlb_range_add(domain, iova, size);362362+}363363+364364+static inline void iommu_tlb_sync(struct iommu_domain *domain)365365+{366366+ if (domain->ops->iotlb_sync)367367+ domain->ops->iotlb_sync(domain);368368+}361369362370static inline size_t iommu_map_sg(struct iommu_domain *domain,363371 unsigned long iova, struct scatterlist *sg,···460430}461431462432static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,463463- phys_addr_t paddr, int gfp_order, int prot)433433+ phys_addr_t paddr, size_t size, int prot)464434{465435 return -ENODEV;466436}467437468438static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,469469- int gfp_order)439439+ size_t size)440440+{441441+ return -ENODEV;442442+}443443+444444+static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,445445+ int gfp_order)470446{471447 return -ENODEV;472448}···482446 unsigned int nents, int prot)483447{484448 return -ENODEV;449449+}450450+451451+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)452452+{453453+}454454+455455+static inline void iommu_tlb_range_add(struct iommu_domain *domain,456456+ unsigned long iova, size_t size)457457+{458458+}459459+460460+static inline void iommu_tlb_sync(struct iommu_domain *domain)461461+{485462}486463487464static inline int iommu_domain_window_enable(struct iommu_domain *domain,
+67
include/linux/iova.h
···1414#include <linux/types.h>1515#include <linux/kernel.h>1616#include <linux/rbtree.h>1717+#include <linux/atomic.h>1718#include <linux/dma-mapping.h>18191920/* iova structure */···3736 struct iova_cpu_rcache __percpu *cpu_rcaches;3837};39383939+struct iova_domain;4040+4141+/* Call-Back from IOVA code into IOMMU drivers */4242+typedef void (* iova_flush_cb)(struct iova_domain *domain);4343+4444+/* Destructor for per-entry data */4545+typedef void (* iova_entry_dtor)(unsigned long data);4646+4747+/* Number of entries per Flush Queue */4848+#define IOVA_FQ_SIZE 2564949+5050+/* Timeout (in ms) after which entries are flushed from the Flush-Queue */5151+#define IOVA_FQ_TIMEOUT 105252+5353+/* Flush Queue entry for defered flushing */5454+struct iova_fq_entry {5555+ unsigned long iova_pfn;5656+ unsigned long pages;5757+ unsigned long data;5858+ u64 counter; /* Flush counter when this entrie was added */5959+};6060+6161+/* Per-CPU Flush Queue structure */6262+struct iova_fq {6363+ struct iova_fq_entry entries[IOVA_FQ_SIZE];6464+ unsigned head, tail;6565+ spinlock_t lock;6666+};6767+4068/* holds all the iova translations for a domain */4169struct iova_domain {4270 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */···7545 unsigned long start_pfn; /* Lower limit for this domain */7646 unsigned long dma_32bit_pfn;7747 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */4848+4949+ iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU5050+ TLBs */5151+5252+ iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for5353+ iova entry */5454+5555+ struct iova_fq __percpu *fq; /* Flush Queue */5656+5757+ atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that5858+ have been started */5959+6060+ atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that6161+ have been finished */6262+6363+ struct timer_list fq_timer; /* Timer to regularily empty the6464+ flush-queues */6565+ atomic_t fq_timer_on; /* 1 when timer is active, 06666+ when not */7867};79688069static inline unsigned long iova_size(struct iova *iova)···14495 bool size_aligned);14596void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,14697 unsigned long size);9898+void queue_iova(struct iova_domain *iovad,9999+ unsigned long pfn, unsigned long pages,100100+ unsigned long data);147101unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,148102 unsigned long limit_pfn);149103struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,···154102void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);155103void init_iova_domain(struct iova_domain *iovad, unsigned long granule,156104 unsigned long start_pfn, unsigned long pfn_32bit);105105+int init_iova_flush_queue(struct iova_domain *iovad,106106+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);157107struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);158108void put_iova_domain(struct iova_domain *iovad);159109struct iova *split_and_remove_iova(struct iova_domain *iovad,···202148{203149}204150151151+static inline void queue_iova(struct iova_domain *iovad,152152+ unsigned long pfn, unsigned long pages,153153+ unsigned long data)154154+{155155+}156156+205157static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,206158 unsigned long size,207159 unsigned long limit_pfn)···232172 unsigned long start_pfn,233173 unsigned long pfn_32bit)234174{175175+}176176+177177+static inline int init_iova_flush_queue(struct iova_domain *iovad,178178+ iova_flush_cb flush_cb,179179+ iova_entry_dtor entry_dtor)180180+{181181+ return -ENODEV;235182}236183237184static inline struct iova *find_iova(struct iova_domain *iovad,