Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'apple/dart', 'arm/mediatek', 'arm/renesas', 'arm/rockchip', 'arm/smmu', 'unisoc', 'x86/vt-d', 'x86/amd' and 'core' into next

+1542 -696
+41
Documentation/devicetree/bindings/iommu/arm,smmu.yaml
··· 270 270 contains: 271 271 enum: 272 272 - qcom,msm8998-smmu-v2 273 + then: 274 + anyOf: 275 + - properties: 276 + clock-names: 277 + items: 278 + - const: bus 279 + clocks: 280 + items: 281 + - description: bus clock required for downstream bus access and for 282 + the smmu ptw 283 + - properties: 284 + clock-names: 285 + items: 286 + - const: iface 287 + - const: mem 288 + - const: mem_iface 289 + clocks: 290 + items: 291 + - description: interface clock required to access smmu's registers 292 + through the TCU's programming interface. 293 + - description: bus clock required for memory access 294 + - description: bus clock required for GPU memory access 295 + - properties: 296 + clock-names: 297 + items: 298 + - const: iface-mm 299 + - const: iface-smmu 300 + - const: bus-smmu 301 + clocks: 302 + items: 303 + - description: interface clock required to access mnoc's registers 304 + through the TCU's programming interface. 305 + - description: interface clock required to access smmu's registers 306 + through the TCU's programming interface. 307 + - description: bus clock required for the smmu ptw 308 + 309 + - if: 310 + properties: 311 + compatible: 312 + contains: 313 + enum: 273 314 - qcom,sdm630-smmu-v2 274 315 - qcom,sm6375-smmu-v2 275 316 then:
+11 -1
Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
··· 78 78 - mediatek,mt8173-m4u # generation two 79 79 - mediatek,mt8183-m4u # generation two 80 80 - mediatek,mt8186-iommu-mm # generation two 81 + - mediatek,mt8188-iommu-vdo # generation two 82 + - mediatek,mt8188-iommu-vpp # generation two 83 + - mediatek,mt8188-iommu-infra # generation two 81 84 - mediatek,mt8192-m4u # generation two 82 85 - mediatek,mt8195-iommu-vdo # generation two 83 86 - mediatek,mt8195-iommu-vpp # generation two ··· 126 123 description: | 127 124 This is the mtk_m4u_id according to the HW. Specifies the mtk_m4u_id as 128 125 defined in 126 + dt-binding/memory/mediatek,mt8188-memory-port.h for mt8188, 129 127 dt-binding/memory/mt2701-larb-port.h for mt2701 and mt7623, 130 128 dt-binding/memory/mt2712-larb-port.h for mt2712, 131 129 dt-binding/memory/mt6779-larb-port.h for mt6779, ··· 159 155 - mediatek,mt6795-m4u 160 156 - mediatek,mt8173-m4u 161 157 - mediatek,mt8186-iommu-mm 158 + - mediatek,mt8188-iommu-vdo 159 + - mediatek,mt8188-iommu-vpp 162 160 - mediatek,mt8192-m4u 163 161 - mediatek,mt8195-iommu-vdo 164 162 - mediatek,mt8195-iommu-vpp ··· 174 168 compatible: 175 169 enum: 176 170 - mediatek,mt8186-iommu-mm 171 + - mediatek,mt8188-iommu-vdo 172 + - mediatek,mt8188-iommu-vpp 177 173 - mediatek,mt8192-m4u 178 174 - mediatek,mt8195-iommu-vdo 179 175 - mediatek,mt8195-iommu-vpp ··· 202 194 properties: 203 195 compatible: 204 196 contains: 205 - const: mediatek,mt8195-iommu-infra 197 + enum: 198 + - mediatek,mt8188-iommu-infra 199 + - mediatek,mt8195-iommu-infra 206 200 207 201 then: 208 202 required:
+17 -5
Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
··· 17 17 18 18 properties: 19 19 compatible: 20 - items: 21 - - enum: 22 - - qcom,msm8916-iommu 23 - - qcom,msm8953-iommu 24 - - const: qcom,msm-iommu-v1 20 + oneOf: 21 + - items: 22 + - enum: 23 + - qcom,msm8916-iommu 24 + - qcom,msm8953-iommu 25 + - const: qcom,msm-iommu-v1 26 + - items: 27 + - enum: 28 + - qcom,msm8976-iommu 29 + - const: qcom,msm-iommu-v2 25 30 26 31 clocks: 27 32 items: ··· 69 64 enum: 70 65 - qcom,msm-iommu-v1-ns 71 66 - qcom,msm-iommu-v1-sec 67 + - qcom,msm-iommu-v2-ns 68 + - qcom,msm-iommu-v2-sec 72 69 73 70 interrupts: 74 71 maxItems: 1 75 72 76 73 reg: 77 74 maxItems: 1 75 + 76 + qcom,ctx-asid: 77 + $ref: /schemas/types.yaml#/definitions/uint32 78 + description: 79 + The ASID number associated to the context bank. 78 80 79 81 required: 80 82 - compatible
+1
MAINTAINERS
··· 13247 13247 S: Supported 13248 13248 F: Documentation/devicetree/bindings/iommu/mediatek* 13249 13249 F: drivers/iommu/mtk_iommu* 13250 + F: include/dt-bindings/memory/mediatek,mt*-port.h 13250 13251 F: include/dt-bindings/memory/mt*-port.h 13251 13252 13252 13253 MEDIATEK JPEG DRIVER
+1 -1
drivers/acpi/scan.c
··· 1581 1581 * If we have reason to believe the IOMMU driver missed the initial 1582 1582 * iommu_probe_device() call for dev, replay it to get things in order. 1583 1583 */ 1584 - if (!err && dev->bus && !device_iommu_mapped(dev)) 1584 + if (!err && dev->bus) 1585 1585 err = iommu_probe_device(dev); 1586 1586 1587 1587 /* Ignore all other errors apart from EPROBE_DEFER */
+14 -25
drivers/dma/idxd/device.c
··· 299 299 } 300 300 } 301 301 302 - static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv) 303 - { 304 - struct idxd_device *idxd = wq->idxd; 305 - union wqcfg wqcfg; 306 - unsigned int offset; 307 - 308 - offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX); 309 - spin_lock(&idxd->dev_lock); 310 - wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset); 311 - wqcfg.priv = priv; 312 - wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX]; 313 - iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset); 314 - spin_unlock(&idxd->dev_lock); 315 - } 316 - 317 302 static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) 318 303 { 319 304 struct idxd_device *idxd = wq->idxd; ··· 1406 1421 } 1407 1422 1408 1423 /* 1409 - * In the event that the WQ is configurable for pasid and priv bits. 1410 - * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit. 1411 - * However, for non-kernel wq, the driver should only set the pasid_en bit for 1412 - * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and 1424 + * In the event that the WQ is configurable for pasid, the driver 1425 + * should setup the pasid, pasid_en bit. This is true for both kernel 1426 + * and user shared workqueues. There is no need to setup priv bit in 1427 + * that in-kernel DMA will also do user privileged requests. 1428 + * A dedicated wq that is not 'kernel' type will configure pasid and 1413 1429 * pasid_en later on so there is no need to setup. 1414 1430 */ 1415 1431 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 1416 - int priv = 0; 1417 - 1418 1432 if (wq_pasid_enabled(wq)) { 1419 1433 if (is_idxd_wq_kernel(wq) || wq_shared(wq)) { 1420 1434 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; ··· 1421 1437 __idxd_wq_set_pasid_locked(wq, pasid); 1422 1438 } 1423 1439 } 1424 - 1425 - if (is_idxd_wq_kernel(wq)) 1426 - priv = 1; 1427 - __idxd_wq_set_priv_locked(wq, priv); 1428 1440 } 1429 1441 1430 1442 rc = 0; ··· 1527 1547 spin_unlock(&idxd->dev_lock); 1528 1548 if (rc < 0) 1529 1549 return -ENXIO; 1550 + 1551 + /* 1552 + * System PASID is preserved across device disable/enable cycle, but 1553 + * genconfig register content gets cleared during device reset. We 1554 + * need to re-enable user interrupts for kernel work queue completion 1555 + * IRQ to function. 1556 + */ 1557 + if (idxd->pasid != IOMMU_PASID_INVALID) 1558 + idxd_set_user_intr(idxd, 1); 1530 1559 1531 1560 rc = idxd_device_evl_setup(idxd); 1532 1561 if (rc < 0) {
+3 -2
drivers/dma/idxd/dma.c
··· 75 75 hw->xfer_size = len; 76 76 /* 77 77 * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv 78 - * field instead. This field should be set to 1 for kernel descriptors. 78 + * field instead. This field should be set to 0 for kernel descriptors 79 + * since kernel DMA on VT-d supports "user" privilege only. 79 80 */ 80 - hw->priv = 1; 81 + hw->priv = 0; 81 82 hw->completion_addr = compl; 82 83 } 83 84
+9
drivers/dma/idxd/idxd.h
··· 473 473 return container_of(ie, struct idxd_device, ie); 474 474 } 475 475 476 + static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable) 477 + { 478 + union gencfg_reg reg; 479 + 480 + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 481 + reg.user_int_en = enable; 482 + iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); 483 + } 484 + 476 485 extern struct bus_type dsa_bus_type; 477 486 478 487 extern bool support_enqcmd;
+50 -4
drivers/dma/idxd/init.c
··· 550 550 551 551 static int idxd_enable_system_pasid(struct idxd_device *idxd) 552 552 { 553 - return -EOPNOTSUPP; 553 + struct pci_dev *pdev = idxd->pdev; 554 + struct device *dev = &pdev->dev; 555 + struct iommu_domain *domain; 556 + ioasid_t pasid; 557 + int ret; 558 + 559 + /* 560 + * Attach a global PASID to the DMA domain so that we can use ENQCMDS 561 + * to submit work on buffers mapped by DMA API. 562 + */ 563 + domain = iommu_get_domain_for_dev(dev); 564 + if (!domain) 565 + return -EPERM; 566 + 567 + pasid = iommu_alloc_global_pasid(dev); 568 + if (pasid == IOMMU_PASID_INVALID) 569 + return -ENOSPC; 570 + 571 + /* 572 + * DMA domain is owned by the driver, it should support all valid 573 + * types such as DMA-FQ, identity, etc. 574 + */ 575 + ret = iommu_attach_device_pasid(domain, dev, pasid); 576 + if (ret) { 577 + dev_err(dev, "failed to attach device pasid %d, domain type %d", 578 + pasid, domain->type); 579 + iommu_free_global_pasid(pasid); 580 + return ret; 581 + } 582 + 583 + /* Since we set user privilege for kernel DMA, enable completion IRQ */ 584 + idxd_set_user_intr(idxd, 1); 585 + idxd->pasid = pasid; 586 + 587 + return ret; 554 588 } 555 589 556 590 static void idxd_disable_system_pasid(struct idxd_device *idxd) 557 591 { 592 + struct pci_dev *pdev = idxd->pdev; 593 + struct device *dev = &pdev->dev; 594 + struct iommu_domain *domain; 558 595 559 - iommu_sva_unbind_device(idxd->sva); 596 + domain = iommu_get_domain_for_dev(dev); 597 + if (!domain) 598 + return; 599 + 600 + iommu_detach_device_pasid(domain, dev, idxd->pasid); 601 + iommu_free_global_pasid(idxd->pasid); 602 + 603 + idxd_set_user_intr(idxd, 0); 560 604 idxd->sva = NULL; 605 + idxd->pasid = IOMMU_PASID_INVALID; 561 606 } 562 607 563 608 static int idxd_enable_sva(struct pci_dev *pdev) ··· 645 600 } else { 646 601 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 647 602 648 - if (idxd_enable_system_pasid(idxd)) 649 - dev_warn(dev, "No in-kernel DMA with PASID.\n"); 603 + rc = idxd_enable_system_pasid(idxd); 604 + if (rc) 605 + dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); 650 606 else 651 607 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 652 608 }
-7
drivers/dma/idxd/sysfs.c
··· 948 948 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 949 949 return -EINVAL; 950 950 951 - /* 952 - * This is temporarily placed here until we have SVM support for 953 - * dmaengine. 954 - */ 955 - if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) 956 - return -EOPNOTSUPP; 957 - 958 951 input = kstrndup(buf, count, GFP_KERNEL); 959 952 if (!input) 960 953 return -ENOMEM;
+4 -3
drivers/iommu/amd/amd_iommu.h
··· 12 12 #include "amd_iommu_types.h" 13 13 14 14 irqreturn_t amd_iommu_int_thread(int irq, void *data); 15 + irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data); 16 + irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data); 17 + irqreturn_t amd_iommu_int_thread_galog(int irq, void *data); 15 18 irqreturn_t amd_iommu_int_handler(int irq, void *data); 16 19 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); 17 20 void amd_iommu_restart_event_logging(struct amd_iommu *iommu); 18 21 void amd_iommu_restart_ga_log(struct amd_iommu *iommu); 19 - int amd_iommu_init_devices(void); 20 - void amd_iommu_uninit_devices(void); 21 - void amd_iommu_init_notifier(void); 22 + void amd_iommu_restart_ppr_log(struct amd_iommu *iommu); 22 23 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); 23 24 24 25 #ifdef CONFIG_AMD_IOMMU_DEBUGFS
+17 -5
drivers/iommu/amd/amd_iommu_types.h
··· 120 120 #define PASID_MASK 0x0000ffff 121 121 122 122 /* MMIO status bits */ 123 - #define MMIO_STATUS_EVT_OVERFLOW_INT_MASK BIT(0) 123 + #define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0) 124 124 #define MMIO_STATUS_EVT_INT_MASK BIT(1) 125 125 #define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2) 126 + #define MMIO_STATUS_EVT_RUN_MASK BIT(3) 127 + #define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5) 126 128 #define MMIO_STATUS_PPR_INT_MASK BIT(6) 129 + #define MMIO_STATUS_PPR_RUN_MASK BIT(7) 127 130 #define MMIO_STATUS_GALOG_RUN_MASK BIT(8) 128 131 #define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9) 129 132 #define MMIO_STATUS_GALOG_INT_MASK BIT(10) ··· 384 381 */ 385 382 #define DTE_FLAG_V BIT_ULL(0) 386 383 #define DTE_FLAG_TV BIT_ULL(1) 384 + #define DTE_FLAG_GIOV BIT_ULL(54) 385 + #define DTE_FLAG_GV BIT_ULL(55) 386 + #define DTE_GLX_SHIFT (56) 387 + #define DTE_GLX_MASK (3) 387 388 #define DTE_FLAG_IR BIT_ULL(61) 388 389 #define DTE_FLAG_IW BIT_ULL(62) 389 390 390 391 #define DTE_FLAG_IOTLB BIT_ULL(32) 391 - #define DTE_FLAG_GIOV BIT_ULL(54) 392 - #define DTE_FLAG_GV BIT_ULL(55) 393 392 #define DTE_FLAG_MASK (0x3ffULL << 32) 394 - #define DTE_GLX_SHIFT (56) 395 - #define DTE_GLX_MASK (3) 396 393 #define DEV_DOMID_MASK 0xffffULL 397 394 398 395 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) ··· 705 702 /* event buffer virtual address */ 706 703 u8 *evt_buf; 707 704 705 + /* Name for event log interrupt */ 706 + unsigned char evt_irq_name[16]; 707 + 708 708 /* Base of the PPR log, if present */ 709 709 u8 *ppr_log; 710 710 711 + /* Name for PPR log interrupt */ 712 + unsigned char ppr_irq_name[16]; 713 + 711 714 /* Base of the GA log, if present */ 712 715 u8 *ga_log; 716 + 717 + /* Name for GA log interrupt */ 718 + unsigned char ga_irq_name[16]; 713 719 714 720 /* Tail of the GA log, if present */ 715 721 u8 *ga_log_tail;
+94 -37
drivers/iommu/amd/init.c
··· 483 483 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 484 484 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 485 485 486 + /* Disable IOMMU PPR logging */ 487 + iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); 488 + iommu_feature_disable(iommu, CONTROL_PPRINT_EN); 489 + 486 490 /* Disable IOMMU hardware itself */ 487 491 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 488 492 ··· 757 753 } 758 754 759 755 /* 756 + * Interrupt handler has processed all pending events and adjusted head 757 + * and tail pointer. Reset overflow mask and restart logging again. 758 + */ 759 + static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, 760 + u8 cntrl_intr, u8 cntrl_log, 761 + u32 status_run_mask, u32 status_overflow_mask) 762 + { 763 + u32 status; 764 + 765 + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 766 + if (status & status_run_mask) 767 + return; 768 + 769 + pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); 770 + 771 + iommu_feature_disable(iommu, cntrl_log); 772 + iommu_feature_disable(iommu, cntrl_intr); 773 + 774 + writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); 775 + 776 + iommu_feature_enable(iommu, cntrl_intr); 777 + iommu_feature_enable(iommu, cntrl_log); 778 + } 779 + 780 + /* 760 781 * This function restarts event logging in case the IOMMU experienced 761 782 * an event log buffer overflow. 762 783 */ 763 784 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) 764 785 { 765 - iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 766 - iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 786 + amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, 787 + CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK, 788 + MMIO_STATUS_EVT_OVERFLOW_MASK); 767 789 } 768 790 769 791 /* 770 792 * This function restarts event logging in case the IOMMU experienced 771 - * an GA log overflow. 793 + * GA log overflow. 772 794 */ 773 795 void amd_iommu_restart_ga_log(struct amd_iommu *iommu) 774 796 { 775 - u32 status; 797 + amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, 798 + CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK, 799 + MMIO_STATUS_GALOG_OVERFLOW_MASK); 800 + } 776 801 777 - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 778 - if (status & MMIO_STATUS_GALOG_RUN_MASK) 779 - return; 780 - 781 - pr_info_ratelimited("IOMMU GA Log restarting\n"); 782 - 783 - iommu_feature_disable(iommu, CONTROL_GALOG_EN); 784 - iommu_feature_disable(iommu, CONTROL_GAINT_EN); 785 - 786 - writel(MMIO_STATUS_GALOG_OVERFLOW_MASK, 787 - iommu->mmio_base + MMIO_STATUS_OFFSET); 788 - 789 - iommu_feature_enable(iommu, CONTROL_GAINT_EN); 790 - iommu_feature_enable(iommu, CONTROL_GALOG_EN); 802 + /* 803 + * This function restarts ppr logging in case the IOMMU experienced 804 + * PPR log overflow. 805 + */ 806 + void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) 807 + { 808 + amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN, 809 + CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, 810 + MMIO_STATUS_PPR_OVERFLOW_MASK); 791 811 } 792 812 793 813 /* ··· 934 906 if (iommu->ppr_log == NULL) 935 907 return; 936 908 909 + iommu_feature_enable(iommu, CONTROL_PPR_EN); 910 + 937 911 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 938 912 939 913 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, ··· 946 916 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 947 917 948 918 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 949 - iommu_feature_enable(iommu, CONTROL_PPR_EN); 919 + iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 950 920 } 951 921 952 922 static void __init free_ppr_log(struct amd_iommu *iommu) ··· 2341 2311 struct irq_data *irqd = irq_domain_get_irq_data(domain, i); 2342 2312 2343 2313 irqd->chip = &intcapxt_controller; 2314 + irqd->hwirq = info->hwirq; 2344 2315 irqd->chip_data = info->data; 2345 2316 __irq_set_handler(i, handle_edge_irq, 0, "edge"); 2346 2317 } ··· 2368 2337 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); 2369 2338 xt.destid_24_31 = cfg->dest_apicid >> 24; 2370 2339 2371 - /** 2372 - * Current IOMMU implementation uses the same IRQ for all 2373 - * 3 IOMMU interrupts. 2374 - */ 2375 - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2376 - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2377 - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2340 + writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); 2378 2341 } 2379 2342 2380 2343 static void intcapxt_mask_irq(struct irq_data *irqd) 2381 2344 { 2382 2345 struct amd_iommu *iommu = irqd->chip_data; 2383 2346 2384 - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2385 - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2386 - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2347 + writeq(0, iommu->mmio_base + irqd->hwirq); 2387 2348 } 2388 2349 2389 2350 ··· 2438 2415 return iommu_irqdomain; 2439 2416 } 2440 2417 2441 - static int iommu_setup_intcapxt(struct amd_iommu *iommu) 2418 + static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, 2419 + int hwirq, irq_handler_t thread_fn) 2442 2420 { 2443 2421 struct irq_domain *domain; 2444 2422 struct irq_alloc_info info; ··· 2453 2429 init_irq_alloc_info(&info, NULL); 2454 2430 info.type = X86_IRQ_ALLOC_TYPE_AMDVI; 2455 2431 info.data = iommu; 2432 + info.hwirq = hwirq; 2456 2433 2457 2434 irq = irq_domain_alloc_irqs(domain, 1, node, &info); 2458 2435 if (irq < 0) { ··· 2462 2437 } 2463 2438 2464 2439 ret = request_threaded_irq(irq, amd_iommu_int_handler, 2465 - amd_iommu_int_thread, 0, "AMD-Vi", iommu); 2440 + thread_fn, 0, devname, iommu); 2466 2441 if (ret) { 2467 2442 irq_domain_free_irqs(irq, 1); 2468 2443 irq_domain_remove(domain); ··· 2470 2445 } 2471 2446 2472 2447 return 0; 2448 + } 2449 + 2450 + static int iommu_setup_intcapxt(struct amd_iommu *iommu) 2451 + { 2452 + int ret; 2453 + 2454 + snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), 2455 + "AMD-Vi%d-Evt", iommu->index); 2456 + ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, 2457 + MMIO_INTCAPXT_EVT_OFFSET, 2458 + amd_iommu_int_thread_evtlog); 2459 + if (ret) 2460 + return ret; 2461 + 2462 + snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), 2463 + "AMD-Vi%d-PPR", iommu->index); 2464 + ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, 2465 + MMIO_INTCAPXT_PPR_OFFSET, 2466 + amd_iommu_int_thread_pprlog); 2467 + if (ret) 2468 + return ret; 2469 + 2470 + #ifdef CONFIG_IRQ_REMAP 2471 + snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), 2472 + "AMD-Vi%d-GA", iommu->index); 2473 + ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, 2474 + MMIO_INTCAPXT_GALOG_OFFSET, 2475 + amd_iommu_int_thread_galog); 2476 + #endif 2477 + 2478 + return ret; 2473 2479 } 2474 2480 2475 2481 static int iommu_init_irq(struct amd_iommu *iommu) ··· 2528 2472 2529 2473 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2530 2474 2531 - if (iommu->ppr_log != NULL) 2532 - iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2533 2475 return 0; 2534 2476 } 2535 2477 ··· 2943 2889 static void enable_iommus(void) 2944 2890 { 2945 2891 early_enable_iommus(); 2946 - enable_iommus_vapic(); 2947 - enable_iommus_v2(); 2948 2892 } 2949 2893 2950 2894 static void disable_iommus(void) ··· 3206 3154 goto out; 3207 3155 } 3208 3156 3157 + /* 3158 + * Interrupt handler is ready to process interrupts. Enable 3159 + * PPR and GA log interrupt for all IOMMUs. 3160 + */ 3161 + enable_iommus_vapic(); 3162 + enable_iommus_v2(); 3163 + 3209 3164 out: 3210 3165 return ret; 3211 3166 } ··· 3292 3233 register_syscore_ops(&amd_iommu_syscore_ops); 3293 3234 ret = amd_iommu_init_pci(); 3294 3235 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 3295 - enable_iommus_vapic(); 3296 - enable_iommus_v2(); 3297 3236 break; 3298 3237 case IOMMU_PCI_INIT: 3299 3238 ret = amd_iommu_enable_interrupts();
+50 -36
drivers/iommu/amd/iommu.c
··· 841 841 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } 842 842 #endif /* !CONFIG_IRQ_REMAP */ 843 843 844 - #define AMD_IOMMU_INT_MASK \ 845 - (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ 846 - MMIO_STATUS_EVT_INT_MASK | \ 847 - MMIO_STATUS_PPR_INT_MASK | \ 848 - MMIO_STATUS_GALOG_OVERFLOW_MASK | \ 849 - MMIO_STATUS_GALOG_INT_MASK) 850 - 851 - irqreturn_t amd_iommu_int_thread(int irq, void *data) 844 + static void amd_iommu_handle_irq(void *data, const char *evt_type, 845 + u32 int_mask, u32 overflow_mask, 846 + void (*int_handler)(struct amd_iommu *), 847 + void (*overflow_handler)(struct amd_iommu *)) 852 848 { 853 849 struct amd_iommu *iommu = (struct amd_iommu *) data; 854 850 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 851 + u32 mask = int_mask | overflow_mask; 855 852 856 - while (status & AMD_IOMMU_INT_MASK) { 853 + while (status & mask) { 857 854 /* Enable interrupt sources again */ 858 - writel(AMD_IOMMU_INT_MASK, 859 - iommu->mmio_base + MMIO_STATUS_OFFSET); 855 + writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); 860 856 861 - if (status & MMIO_STATUS_EVT_INT_MASK) { 862 - pr_devel("Processing IOMMU Event Log\n"); 863 - iommu_poll_events(iommu); 857 + if (int_handler) { 858 + pr_devel("Processing IOMMU (ivhd%d) %s Log\n", 859 + iommu->index, evt_type); 860 + int_handler(iommu); 864 861 } 865 862 866 - if (status & MMIO_STATUS_PPR_INT_MASK) { 867 - pr_devel("Processing IOMMU PPR Log\n"); 868 - iommu_poll_ppr_log(iommu); 869 - } 870 - 871 - #ifdef CONFIG_IRQ_REMAP 872 - if (status & (MMIO_STATUS_GALOG_INT_MASK | 873 - MMIO_STATUS_GALOG_OVERFLOW_MASK)) { 874 - pr_devel("Processing IOMMU GA Log\n"); 875 - iommu_poll_ga_log(iommu); 876 - } 877 - 878 - if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) { 879 - pr_info_ratelimited("IOMMU GA Log overflow\n"); 880 - amd_iommu_restart_ga_log(iommu); 881 - } 882 - #endif 883 - 884 - if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { 885 - pr_info_ratelimited("IOMMU event log overflow\n"); 886 - amd_iommu_restart_event_logging(iommu); 887 - } 863 + if ((status & overflow_mask) && overflow_handler) 864 + overflow_handler(iommu); 888 865 889 866 /* 890 867 * Hardware bug: ERBT1312 ··· 878 901 */ 879 902 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 880 903 } 904 + } 905 + 906 + irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data) 907 + { 908 + amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK, 909 + MMIO_STATUS_EVT_OVERFLOW_MASK, 910 + iommu_poll_events, amd_iommu_restart_event_logging); 911 + 912 + return IRQ_HANDLED; 913 + } 914 + 915 + irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data) 916 + { 917 + amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK, 918 + MMIO_STATUS_PPR_OVERFLOW_MASK, 919 + iommu_poll_ppr_log, amd_iommu_restart_ppr_log); 920 + 921 + return IRQ_HANDLED; 922 + } 923 + 924 + irqreturn_t amd_iommu_int_thread_galog(int irq, void *data) 925 + { 926 + #ifdef CONFIG_IRQ_REMAP 927 + amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK, 928 + MMIO_STATUS_GALOG_OVERFLOW_MASK, 929 + iommu_poll_ga_log, amd_iommu_restart_ga_log); 930 + #endif 931 + 932 + return IRQ_HANDLED; 933 + } 934 + 935 + irqreturn_t amd_iommu_int_thread(int irq, void *data) 936 + { 937 + amd_iommu_int_thread_evtlog(irq, data); 938 + amd_iommu_int_thread_pprlog(irq, data); 939 + amd_iommu_int_thread_galog(irq, data); 940 + 881 941 return IRQ_HANDLED; 882 942 } 883 943
+5 -2
drivers/iommu/amd/iommu_v2.c
··· 262 262 263 263 static void put_pasid_state_wait(struct pasid_state *pasid_state) 264 264 { 265 - refcount_dec(&pasid_state->count); 266 - wait_event(pasid_state->wq, !refcount_read(&pasid_state->count)); 265 + if (!refcount_dec_and_test(&pasid_state->count)) 266 + wait_event(pasid_state->wq, !refcount_read(&pasid_state->count)); 267 267 free_pasid_state(pasid_state); 268 268 } 269 269 ··· 326 326 continue; 327 327 328 328 put_pasid_state(pasid_state); 329 + 330 + /* Clear the pasid state so that the pasid can be re-used */ 331 + clear_pasid_state(dev_state, pasid_state->pasid); 329 332 330 333 /* 331 334 * This will call the mn_release function and
+1 -1
drivers/iommu/apple-dart.c
··· 1276 1276 return 0; 1277 1277 } 1278 1278 1279 - DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume); 1279 + static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume); 1280 1280 1281 1281 static const struct of_device_id apple_dart_of_match[] = { 1282 1282 { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+1 -1
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
··· 80 80 * be some overlap between use of both ASIDs, until we invalidate the 81 81 * TLB. 82 82 */ 83 - arm_smmu_write_ctx_desc(smmu_domain, 0, cd); 83 + arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd); 84 84 85 85 /* Invalidate TLB entries previously associated with that context */ 86 86 arm_smmu_tlb_inv_asid(smmu, asid);
+15 -30
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
··· 1059 1059 /* 1060 1060 * This function handles the following cases: 1061 1061 * 1062 - * (1) Install primary CD, for normal DMA traffic (SSID = 0). 1062 + * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0). 1063 1063 * (2) Install a secondary CD, for SID+SSID traffic. 1064 1064 * (3) Update ASID of a CD. Atomically write the first 64 bits of the 1065 1065 * CD, then invalidate the old entry and mappings. ··· 1607 1607 1608 1608 sid = FIELD_GET(PRIQ_0_SID, evt[0]); 1609 1609 ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]); 1610 - ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0; 1610 + ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : IOMMU_NO_PASID; 1611 1611 last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]); 1612 1612 grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]); 1613 1613 ··· 1748 1748 */ 1749 1749 *cmd = (struct arm_smmu_cmdq_ent) { 1750 1750 .opcode = CMDQ_OP_ATC_INV, 1751 - .substream_valid = !!ssid, 1751 + .substream_valid = (ssid != IOMMU_NO_PASID), 1752 1752 .atc.ssid = ssid, 1753 1753 }; 1754 1754 ··· 1795 1795 struct arm_smmu_cmdq_ent cmd; 1796 1796 struct arm_smmu_cmdq_batch cmds; 1797 1797 1798 - arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); 1798 + arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd); 1799 1799 1800 1800 cmds.num = 0; 1801 1801 for (i = 0; i < master->num_streams; i++) { ··· 1875 1875 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; 1876 1876 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); 1877 1877 } 1878 - arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); 1878 + arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); 1879 1879 } 1880 1880 1881 1881 static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ··· 1968 1968 * Unfortunately, this can't be leaf-only since we may have 1969 1969 * zapped an entire table. 1970 1970 */ 1971 - arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size); 1971 + arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size); 1972 1972 } 1973 1973 1974 1974 void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, ··· 2055 2055 return &smmu_domain->domain; 2056 2056 } 2057 2057 2058 - static int arm_smmu_bitmap_alloc(unsigned long *map, int span) 2059 - { 2060 - int idx, size = 1 << span; 2061 - 2062 - do { 2063 - idx = find_first_zero_bit(map, size); 2064 - if (idx == size) 2065 - return -ENOSPC; 2066 - } while (test_and_set_bit(idx, map)); 2067 - 2068 - return idx; 2069 - } 2070 - 2071 - static void arm_smmu_bitmap_free(unsigned long *map, int idx) 2072 - { 2073 - clear_bit(idx, map); 2074 - } 2075 - 2076 2058 static void arm_smmu_domain_free(struct iommu_domain *domain) 2077 2059 { 2078 2060 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ··· 2075 2093 } else { 2076 2094 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 2077 2095 if (cfg->vmid) 2078 - arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid); 2096 + ida_free(&smmu->vmid_map, cfg->vmid); 2079 2097 } 2080 2098 2081 2099 kfree(smmu_domain); ··· 2124 2142 * the master has been added to the devices list for this domain. 2125 2143 * This isn't an issue because the STE hasn't been installed yet. 2126 2144 */ 2127 - ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd); 2145 + ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd); 2128 2146 if (ret) 2129 2147 goto out_free_cd_tables; 2130 2148 ··· 2149 2167 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 2150 2168 typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr; 2151 2169 2152 - vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); 2170 + /* Reserve VMID 0 for stage-2 bypass STEs */ 2171 + vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, 2172 + GFP_KERNEL); 2153 2173 if (vmid < 0) 2154 2174 return vmid; 2155 2175 ··· 2312 2328 pdev = to_pci_dev(master->dev); 2313 2329 2314 2330 atomic_inc(&smmu_domain->nr_ats_masters); 2315 - arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); 2331 + arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); 2316 2332 if (pci_enable_ats(pdev, stu)) 2317 2333 dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); 2318 2334 } ··· 3082 3098 reg |= STRTAB_BASE_RA; 3083 3099 smmu->strtab_cfg.strtab_base = reg; 3084 3100 3085 - /* Allocate the first VMID for stage-2 bypass STEs */ 3086 - set_bit(0, smmu->vmid_map); 3101 + ida_init(&smmu->vmid_map); 3102 + 3087 3103 return 0; 3088 3104 } 3089 3105 ··· 3907 3923 iommu_device_sysfs_remove(&smmu->iommu); 3908 3924 arm_smmu_device_disable(smmu); 3909 3925 iopf_queue_free(smmu->evtq.iopf); 3926 + ida_destroy(&smmu->vmid_map); 3910 3927 } 3911 3928 3912 3929 static void arm_smmu_device_shutdown(struct platform_device *pdev)
+1 -1
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
··· 670 670 671 671 #define ARM_SMMU_MAX_VMIDS (1 << 16) 672 672 unsigned int vmid_bits; 673 - DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS); 673 + struct ida vmid_map; 674 674 675 675 unsigned int ssid_bits; 676 676 unsigned int sid_bits;
+1 -1
drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
··· 3 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 */ 5 5 6 - #include <linux/of_device.h> 6 + #include <linux/device.h> 7 7 #include <linux/firmware/qcom/qcom_scm.h> 8 8 #include <linux/ratelimit.h> 9 9
+5 -2
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
··· 251 251 { .compatible = "qcom,sc7280-mss-pil" }, 252 252 { .compatible = "qcom,sc8180x-mdss" }, 253 253 { .compatible = "qcom,sc8280xp-mdss" }, 254 - { .compatible = "qcom,sm8150-mdss" }, 255 - { .compatible = "qcom,sm8250-mdss" }, 256 254 { .compatible = "qcom,sdm845-mdss" }, 257 255 { .compatible = "qcom,sdm845-mss-pil" }, 256 + { .compatible = "qcom,sm6350-mdss" }, 257 + { .compatible = "qcom,sm6375-mdss" }, 258 + { .compatible = "qcom,sm8150-mdss" }, 259 + { .compatible = "qcom,sm8250-mdss" }, 258 260 { } 259 261 }; 260 262 ··· 530 528 { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data }, 531 529 { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data }, 532 530 { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data }, 531 + { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data }, 533 532 { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data }, 534 533 { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data }, 535 534 { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
-1
drivers/iommu/arm/arm-smmu/arm-smmu.c
··· 29 29 #include <linux/module.h> 30 30 #include <linux/of.h> 31 31 #include <linux/of_address.h> 32 - #include <linux/of_device.h> 33 32 #include <linux/pci.h> 34 33 #include <linux/platform_device.h> 35 34 #include <linux/pm_runtime.h>
+50 -21
drivers/iommu/arm/arm-smmu/qcom_iommu.c
··· 22 22 #include <linux/init.h> 23 23 #include <linux/mutex.h> 24 24 #include <linux/of.h> 25 - #include <linux/of_address.h> 26 - #include <linux/of_device.h> 25 + #include <linux/of_platform.h> 27 26 #include <linux/platform_device.h> 28 27 #include <linux/pm.h> 29 28 #include <linux/pm_runtime.h> ··· 50 51 struct clk_bulk_data clks[CLK_NUM]; 51 52 void __iomem *local_base; 52 53 u32 sec_id; 53 - u8 num_ctxs; 54 - struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */ 54 + u8 max_asid; 55 + struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */ 55 56 }; 56 57 57 58 struct qcom_iommu_ctx { 58 59 struct device *dev; 59 60 void __iomem *base; 60 61 bool secure_init; 62 + bool secured_ctx; 61 63 u8 asid; /* asid and ctx bank # are 1:1 */ 62 64 struct iommu_domain *domain; 63 65 }; ··· 94 94 struct qcom_iommu_dev *qcom_iommu = d->iommu; 95 95 if (!qcom_iommu) 96 96 return NULL; 97 - return qcom_iommu->ctxs[asid - 1]; 97 + return qcom_iommu->ctxs[asid]; 98 98 } 99 99 100 100 static inline void ··· 272 272 } 273 273 ctx->secure_init = true; 274 274 } 275 + 276 + /* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */ 277 + if (ctx->secured_ctx) { 278 + ctx->domain = domain; 279 + continue; 280 + } 281 + 282 + /* Disable context bank before programming */ 283 + iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); 284 + 285 + /* Clear context bank fault address fault status registers */ 286 + iommu_writel(ctx, ARM_SMMU_CB_FAR, 0); 287 + iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); 275 288 276 289 /* TTBRs */ 277 290 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, ··· 540 527 qcom_iommu = platform_get_drvdata(iommu_pdev); 541 528 542 529 /* make sure the asid specified in dt is valid, so we don't have 543 - * to sanity check this elsewhere, since 'asid - 1' is used to 544 - * index into qcom_iommu->ctxs: 530 + * to sanity check this elsewhere: 545 531 */ 546 - if (WARN_ON(asid < 1) || 547 - WARN_ON(asid > qcom_iommu->num_ctxs)) { 532 + if (WARN_ON(asid > qcom_iommu->max_asid) || 533 + WARN_ON(qcom_iommu->ctxs[asid] == NULL)) { 548 534 put_device(&iommu_pdev->dev); 549 535 return -EINVAL; 550 536 } ··· 629 617 630 618 static int get_asid(const struct device_node *np) 631 619 { 632 - u32 reg; 620 + u32 reg, val; 621 + int asid; 633 622 634 623 /* read the "reg" property directly to get the relative address 635 624 * of the context bank, and calculate the asid from that: ··· 638 625 if (of_property_read_u32_index(np, "reg", 0, &reg)) 639 626 return -ENODEV; 640 627 641 - return reg / 0x1000; /* context banks are 0x1000 apart */ 628 + /* 629 + * Context banks are 0x1000 apart but, in some cases, the ASID 630 + * number doesn't match to this logic and needs to be passed 631 + * from the DT configuration explicitly. 632 + */ 633 + if (!of_property_read_u32(np, "qcom,ctx-asid", &val)) 634 + asid = val; 635 + else 636 + asid = reg / 0x1000; 637 + 638 + return asid; 642 639 } 643 640 644 641 static int qcom_iommu_ctx_probe(struct platform_device *pdev) ··· 656 633 struct qcom_iommu_ctx *ctx; 657 634 struct device *dev = &pdev->dev; 658 635 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent); 659 - struct resource *res; 660 636 int ret, irq; 661 637 662 638 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); ··· 665 643 ctx->dev = dev; 666 644 platform_set_drvdata(pdev, ctx); 667 645 668 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 669 - ctx->base = devm_ioremap_resource(dev, res); 646 + ctx->base = devm_platform_ioremap_resource(pdev, 0); 670 647 if (IS_ERR(ctx->base)) 671 648 return PTR_ERR(ctx->base); 672 649 673 650 irq = platform_get_irq(pdev, 0); 674 651 if (irq < 0) 675 - return -ENODEV; 652 + return irq; 653 + 654 + if (of_device_is_compatible(dev->of_node, "qcom,msm-iommu-v2-sec")) 655 + ctx->secured_ctx = true; 676 656 677 657 /* clear IRQs before registering fault handler, just in case the 678 658 * boot-loader left us a surprise: 679 659 */ 680 - iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); 660 + if (!ctx->secured_ctx) 661 + iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); 681 662 682 663 ret = devm_request_irq(dev, irq, 683 664 qcom_iommu_fault, ··· 702 677 703 678 dev_dbg(dev, "found asid %u\n", ctx->asid); 704 679 705 - qcom_iommu->ctxs[ctx->asid - 1] = ctx; 680 + qcom_iommu->ctxs[ctx->asid] = ctx; 706 681 707 682 return 0; 708 683 } ··· 714 689 715 690 platform_set_drvdata(pdev, NULL); 716 691 717 - qcom_iommu->ctxs[ctx->asid - 1] = NULL; 692 + qcom_iommu->ctxs[ctx->asid] = NULL; 718 693 } 719 694 720 695 static const struct of_device_id ctx_of_match[] = { 721 696 { .compatible = "qcom,msm-iommu-v1-ns" }, 722 697 { .compatible = "qcom,msm-iommu-v1-sec" }, 698 + { .compatible = "qcom,msm-iommu-v2-ns" }, 699 + { .compatible = "qcom,msm-iommu-v2-sec" }, 723 700 { /* sentinel */ } 724 701 }; 725 702 ··· 739 712 struct device_node *child; 740 713 741 714 for_each_child_of_node(qcom_iommu->dev->of_node, child) { 742 - if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) { 715 + if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec") || 716 + of_device_is_compatible(child, "qcom,msm-iommu-v2-sec")) { 743 717 of_node_put(child); 744 718 return true; 745 719 } ··· 764 736 for_each_child_of_node(dev->of_node, child) 765 737 max_asid = max(max_asid, get_asid(child)); 766 738 767 - qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid), 739 + qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1), 768 740 GFP_KERNEL); 769 741 if (!qcom_iommu) 770 742 return -ENOMEM; 771 - qcom_iommu->num_ctxs = max_asid; 743 + qcom_iommu->max_asid = max_asid; 772 744 qcom_iommu->dev = dev; 773 745 774 746 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 884 856 885 857 static const struct of_device_id qcom_iommu_of_match[] = { 886 858 { .compatible = "qcom,msm-iommu-v1" }, 859 + { .compatible = "qcom,msm-iommu-v2" }, 887 860 { /* sentinel */ } 888 861 }; 889 862
+20 -6
drivers/iommu/dma-iommu.c
··· 660 660 { 661 661 struct iommu_dma_cookie *cookie = domain->iova_cookie; 662 662 struct iova_domain *iovad = &cookie->iovad; 663 - unsigned long shift, iova_len, iova = 0; 663 + unsigned long shift, iova_len, iova; 664 664 665 665 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 666 666 cookie->msi_iova += size; ··· 675 675 if (domain->geometry.force_aperture) 676 676 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 677 677 678 - /* Try to get PCI devices a SAC address */ 679 - if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) 678 + /* 679 + * Try to use all the 32-bit PCI addresses first. The original SAC vs. 680 + * DAC reasoning loses relevance with PCIe, but enough hardware and 681 + * firmware bugs are still lurking out there that it's safest not to 682 + * venture into the 64-bit space until necessary. 683 + * 684 + * If your device goes wrong after seeing the notice then likely either 685 + * its driver is not setting DMA masks accurately, the hardware has 686 + * some inherent bug in handling >32-bit addresses, or not all the 687 + * expected address bits are wired up between the device and the IOMMU. 688 + */ 689 + if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) { 680 690 iova = alloc_iova_fast(iovad, iova_len, 681 691 DMA_BIT_MASK(32) >> shift, false); 692 + if (iova) 693 + goto done; 682 694 683 - if (!iova) 684 - iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 685 - true); 695 + dev->iommu->pci_32bit_workaround = false; 696 + dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit)); 697 + } 686 698 699 + iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true); 700 + done: 687 701 return (dma_addr_t)iova << shift; 688 702 } 689 703
+8
drivers/iommu/dma-iommu.h
··· 17 17 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); 18 18 19 19 extern bool iommu_dma_forcedac; 20 + static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) 21 + { 22 + dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; 23 + } 20 24 21 25 #else /* CONFIG_IOMMU_DMA */ 22 26 ··· 39 35 } 40 36 41 37 static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 38 + { 39 + } 40 + 41 + static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) 42 42 { 43 43 } 44 44
+149 -95
drivers/iommu/intel/iommu.c
··· 113 113 114 114 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things 115 115 are never going to work. */ 116 - static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) 116 + static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn) 117 117 { 118 118 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); 119 119 } 120 + static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn) 121 + { 122 + return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1; 123 + } 120 124 static inline unsigned long page_to_dma_pfn(struct page *pg) 121 125 { 122 - return mm_to_dma_pfn(page_to_pfn(pg)); 126 + return mm_to_dma_pfn_start(page_to_pfn(pg)); 123 127 } 124 128 static inline unsigned long virt_to_dma_pfn(void *p) 125 129 { ··· 881 877 } 882 878 /* For request-without-pasid, get the pasid from context entry */ 883 879 if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID) 884 - pasid = PASID_RID2PASID; 880 + pasid = IOMMU_NO_PASID; 885 881 886 882 dir_index = pasid >> PASID_PDE_SHIFT; 887 883 pde = &dir[dir_index]; ··· 1363 1359 1364 1360 static void domain_update_iotlb(struct dmar_domain *domain) 1365 1361 { 1362 + struct dev_pasid_info *dev_pasid; 1366 1363 struct device_domain_info *info; 1367 1364 bool has_iotlb_device = false; 1368 1365 unsigned long flags; 1369 1366 1370 1367 spin_lock_irqsave(&domain->lock, flags); 1371 1368 list_for_each_entry(info, &domain->devices, link) { 1369 + if (info->ats_enabled) { 1370 + has_iotlb_device = true; 1371 + break; 1372 + } 1373 + } 1374 + 1375 + list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) { 1376 + info = dev_iommu_priv_get(dev_pasid->dev); 1372 1377 if (info->ats_enabled) { 1373 1378 has_iotlb_device = true; 1374 1379 break; ··· 1462 1449 qdep = info->ats_qdep; 1463 1450 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, 1464 1451 qdep, addr, mask); 1465 - quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep); 1452 + quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep); 1466 1453 } 1467 1454 1468 1455 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, 1469 1456 u64 addr, unsigned mask) 1470 1457 { 1458 + struct dev_pasid_info *dev_pasid; 1471 1459 struct device_domain_info *info; 1472 1460 unsigned long flags; 1473 1461 ··· 1478 1464 spin_lock_irqsave(&domain->lock, flags); 1479 1465 list_for_each_entry(info, &domain->devices, link) 1480 1466 __iommu_flush_dev_iotlb(info, addr, mask); 1467 + 1468 + list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) { 1469 + info = dev_iommu_priv_get(dev_pasid->dev); 1470 + 1471 + if (!info->ats_enabled) 1472 + continue; 1473 + 1474 + qi_flush_dev_iotlb_pasid(info->iommu, 1475 + PCI_DEVID(info->bus, info->devfn), 1476 + info->pfsid, dev_pasid->pasid, 1477 + info->ats_qdep, addr, 1478 + mask); 1479 + } 1480 + spin_unlock_irqrestore(&domain->lock, flags); 1481 + } 1482 + 1483 + static void domain_flush_pasid_iotlb(struct intel_iommu *iommu, 1484 + struct dmar_domain *domain, u64 addr, 1485 + unsigned long npages, bool ih) 1486 + { 1487 + u16 did = domain_id_iommu(domain, iommu); 1488 + struct dev_pasid_info *dev_pasid; 1489 + unsigned long flags; 1490 + 1491 + spin_lock_irqsave(&domain->lock, flags); 1492 + list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) 1493 + qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih); 1494 + 1495 + if (!list_empty(&domain->devices)) 1496 + qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih); 1481 1497 spin_unlock_irqrestore(&domain->lock, flags); 1482 1498 } 1483 1499 ··· 1528 1484 ih = 1 << 6; 1529 1485 1530 1486 if (domain->use_first_level) { 1531 - qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); 1487 + domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih); 1532 1488 } else { 1533 1489 unsigned long bitmask = aligned_pages - 1; 1534 1490 ··· 1598 1554 u16 did = domain_id_iommu(dmar_domain, iommu); 1599 1555 1600 1556 if (dmar_domain->use_first_level) 1601 - qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0); 1557 + domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0); 1602 1558 else 1603 1559 iommu->flush.flush_iotlb(iommu, did, 0, 0, 1604 1560 DMA_TLB_DSI_FLUSH); ··· 1770 1726 domain->use_first_level = true; 1771 1727 domain->has_iotlb_device = false; 1772 1728 INIT_LIST_HEAD(&domain->devices); 1729 + INIT_LIST_HEAD(&domain->dev_pasids); 1773 1730 spin_lock_init(&domain->lock); 1774 1731 xa_init(&domain->iommu_array); 1775 1732 ··· 1985 1940 context_pdts(pds); 1986 1941 1987 1942 /* Setup the RID_PASID field: */ 1988 - context_set_sm_rid2pasid(context, PASID_RID2PASID); 1943 + context_set_sm_rid2pasid(context, IOMMU_NO_PASID); 1989 1944 1990 1945 /* 1991 1946 * Setup the Device-TLB enable bit and Page request ··· 2407 2362 2408 2363 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 2409 2364 ret = iommu_domain_identity_map(si_domain, 2410 - mm_to_dma_pfn(start_pfn), 2411 - mm_to_dma_pfn(end_pfn)); 2365 + mm_to_dma_pfn_start(start_pfn), 2366 + mm_to_dma_pfn_end(end_pfn)); 2412 2367 if (ret) 2413 2368 return ret; 2414 2369 } ··· 2429 2384 continue; 2430 2385 2431 2386 ret = iommu_domain_identity_map(si_domain, 2432 - mm_to_dma_pfn(start >> PAGE_SHIFT), 2433 - mm_to_dma_pfn(end >> PAGE_SHIFT)); 2387 + mm_to_dma_pfn_start(start >> PAGE_SHIFT), 2388 + mm_to_dma_pfn_end(end >> PAGE_SHIFT)); 2434 2389 if (ret) 2435 2390 return ret; 2436 2391 } ··· 2465 2420 /* Setup the PASID entry for requests without PASID: */ 2466 2421 if (hw_pass_through && domain_type_is_si(domain)) 2467 2422 ret = intel_pasid_setup_pass_through(iommu, domain, 2468 - dev, PASID_RID2PASID); 2423 + dev, IOMMU_NO_PASID); 2469 2424 else if (domain->use_first_level) 2470 2425 ret = domain_setup_first_level(iommu, domain, dev, 2471 - PASID_RID2PASID); 2426 + IOMMU_NO_PASID); 2472 2427 else 2473 2428 ret = intel_pasid_setup_second_level(iommu, domain, 2474 - dev, PASID_RID2PASID); 2429 + dev, IOMMU_NO_PASID); 2475 2430 if (ret) { 2476 2431 dev_err(dev, "Setup RID2PASID failed\n"); 2477 2432 device_block_translation(dev); ··· 2489 2444 iommu_enable_pci_caps(info); 2490 2445 2491 2446 return 0; 2492 - } 2493 - 2494 - static bool device_has_rmrr(struct device *dev) 2495 - { 2496 - struct dmar_rmrr_unit *rmrr; 2497 - struct device *tmp; 2498 - int i; 2499 - 2500 - rcu_read_lock(); 2501 - for_each_rmrr_units(rmrr) { 2502 - /* 2503 - * Return TRUE if this RMRR contains the device that 2504 - * is passed in. 2505 - */ 2506 - for_each_active_dev_scope(rmrr->devices, 2507 - rmrr->devices_cnt, i, tmp) 2508 - if (tmp == dev || 2509 - is_downstream_to_pci_bridge(dev, tmp)) { 2510 - rcu_read_unlock(); 2511 - return true; 2512 - } 2513 - } 2514 - rcu_read_unlock(); 2515 - return false; 2516 2447 } 2517 2448 2518 2449 /** ··· 2518 2497 return true; 2519 2498 else 2520 2499 return false; 2521 - } 2522 - 2523 - /* 2524 - * There are a couple cases where we need to restrict the functionality of 2525 - * devices associated with RMRRs. The first is when evaluating a device for 2526 - * identity mapping because problems exist when devices are moved in and out 2527 - * of domains and their respective RMRR information is lost. This means that 2528 - * a device with associated RMRRs will never be in a "passthrough" domain. 2529 - * The second is use of the device through the IOMMU API. This interface 2530 - * expects to have full control of the IOVA space for the device. We cannot 2531 - * satisfy both the requirement that RMRR access is maintained and have an 2532 - * unencumbered IOVA space. We also have no ability to quiesce the device's 2533 - * use of the RMRR space or even inform the IOMMU API user of the restriction. 2534 - * We therefore prevent devices associated with an RMRR from participating in 2535 - * the IOMMU API, which eliminates them from device assignment. 2536 - * 2537 - * In both cases, devices which have relaxable RMRRs are not concerned by this 2538 - * restriction. See device_rmrr_is_relaxable comment. 2539 - */ 2540 - static bool device_is_rmrr_locked(struct device *dev) 2541 - { 2542 - if (!device_has_rmrr(dev)) 2543 - return false; 2544 - 2545 - if (device_rmrr_is_relaxable(dev)) 2546 - return false; 2547 - 2548 - return true; 2549 2500 } 2550 2501 2551 2502 /* ··· 3553 3560 unsigned long val, void *v) 3554 3561 { 3555 3562 struct memory_notify *mhp = v; 3556 - unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn); 3557 - unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn + 3563 + unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn); 3564 + unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn + 3558 3565 mhp->nr_pages - 1); 3559 3566 3560 3567 switch (val) { ··· 3749 3756 for_each_active_dev_scope(drhd->devices, 3750 3757 drhd->devices_cnt, i, dev) { 3751 3758 struct acpi_device_physical_node *pn; 3752 - struct iommu_group *group; 3753 3759 struct acpi_device *adev; 3754 3760 3755 3761 if (dev->bus != &acpi_bus_type) ··· 3758 3766 mutex_lock(&adev->physical_node_lock); 3759 3767 list_for_each_entry(pn, 3760 3768 &adev->physical_node_list, node) { 3761 - group = iommu_group_get(pn->dev); 3762 - if (group) { 3763 - iommu_group_put(group); 3764 - continue; 3765 - } 3766 - 3767 3769 ret = iommu_probe_device(pn->dev); 3768 3770 if (ret) 3769 3771 break; ··· 3954 3968 if (!dev_is_real_dma_subdevice(info->dev)) { 3955 3969 if (dev_is_pci(info->dev) && sm_supported(iommu)) 3956 3970 intel_pasid_tear_down_entry(iommu, info->dev, 3957 - PASID_RID2PASID, false); 3971 + IOMMU_NO_PASID, false); 3958 3972 3959 3973 iommu_disable_pci_caps(info); 3960 3974 domain_context_clear(info); ··· 3983 3997 if (!dev_is_real_dma_subdevice(dev)) { 3984 3998 if (sm_supported(iommu)) 3985 3999 intel_pasid_tear_down_entry(iommu, dev, 3986 - PASID_RID2PASID, false); 4000 + IOMMU_NO_PASID, false); 3987 4001 else 3988 4002 domain_context_clear(info); 3989 4003 } ··· 4125 4139 struct device_domain_info *info = dev_iommu_priv_get(dev); 4126 4140 int ret; 4127 4141 4128 - if (domain->type == IOMMU_DOMAIN_UNMANAGED && 4129 - device_is_rmrr_locked(dev)) { 4130 - dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n"); 4131 - return -EPERM; 4132 - } 4133 - 4134 4142 if (info->domain) 4135 4143 device_block_translation(dev); 4136 4144 ··· 4251 4271 unsigned long i; 4252 4272 4253 4273 nrpages = aligned_nrpages(gather->start, size); 4254 - start_pfn = mm_to_dma_pfn(iova_pfn); 4274 + start_pfn = mm_to_dma_pfn_start(iova_pfn); 4255 4275 4256 4276 xa_for_each(&dmar_domain->iommu_array, i, info) 4257 4277 iommu_flush_iotlb_psi(info->iommu, dmar_domain, ··· 4311 4331 4312 4332 list_for_each_entry(info, &domain->devices, link) 4313 4333 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, 4314 - PASID_RID2PASID); 4334 + IOMMU_NO_PASID); 4315 4335 } 4316 4336 4317 4337 static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) ··· 4693 4713 static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) 4694 4714 { 4695 4715 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); 4716 + struct dev_pasid_info *curr, *dev_pasid = NULL; 4717 + struct dmar_domain *dmar_domain; 4696 4718 struct iommu_domain *domain; 4719 + unsigned long flags; 4697 4720 4698 - /* Domain type specific cleanup: */ 4699 4721 domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0); 4700 - if (domain) { 4701 - switch (domain->type) { 4702 - case IOMMU_DOMAIN_SVA: 4703 - intel_svm_remove_dev_pasid(dev, pasid); 4704 - break; 4705 - default: 4706 - /* should never reach here */ 4707 - WARN_ON(1); 4722 + if (WARN_ON_ONCE(!domain)) 4723 + goto out_tear_down; 4724 + 4725 + /* 4726 + * The SVA implementation needs to handle its own stuffs like the mm 4727 + * notification. Before consolidating that code into iommu core, let 4728 + * the intel sva code handle it. 4729 + */ 4730 + if (domain->type == IOMMU_DOMAIN_SVA) { 4731 + intel_svm_remove_dev_pasid(dev, pasid); 4732 + goto out_tear_down; 4733 + } 4734 + 4735 + dmar_domain = to_dmar_domain(domain); 4736 + spin_lock_irqsave(&dmar_domain->lock, flags); 4737 + list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { 4738 + if (curr->dev == dev && curr->pasid == pasid) { 4739 + list_del(&curr->link_domain); 4740 + dev_pasid = curr; 4708 4741 break; 4709 4742 } 4710 4743 } 4744 + WARN_ON_ONCE(!dev_pasid); 4745 + spin_unlock_irqrestore(&dmar_domain->lock, flags); 4711 4746 4747 + domain_detach_iommu(dmar_domain, iommu); 4748 + kfree(dev_pasid); 4749 + out_tear_down: 4712 4750 intel_pasid_tear_down_entry(iommu, dev, pasid, false); 4751 + intel_drain_pasid_prq(dev, pasid); 4752 + } 4753 + 4754 + static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, 4755 + struct device *dev, ioasid_t pasid) 4756 + { 4757 + struct device_domain_info *info = dev_iommu_priv_get(dev); 4758 + struct dmar_domain *dmar_domain = to_dmar_domain(domain); 4759 + struct intel_iommu *iommu = info->iommu; 4760 + struct dev_pasid_info *dev_pasid; 4761 + unsigned long flags; 4762 + int ret; 4763 + 4764 + if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) 4765 + return -EOPNOTSUPP; 4766 + 4767 + if (context_copied(iommu, info->bus, info->devfn)) 4768 + return -EBUSY; 4769 + 4770 + ret = prepare_domain_attach_device(domain, dev); 4771 + if (ret) 4772 + return ret; 4773 + 4774 + dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL); 4775 + if (!dev_pasid) 4776 + return -ENOMEM; 4777 + 4778 + ret = domain_attach_iommu(dmar_domain, iommu); 4779 + if (ret) 4780 + goto out_free; 4781 + 4782 + if (domain_type_is_si(dmar_domain)) 4783 + ret = intel_pasid_setup_pass_through(iommu, dmar_domain, 4784 + dev, pasid); 4785 + else if (dmar_domain->use_first_level) 4786 + ret = domain_setup_first_level(iommu, dmar_domain, 4787 + dev, pasid); 4788 + else 4789 + ret = intel_pasid_setup_second_level(iommu, dmar_domain, 4790 + dev, pasid); 4791 + if (ret) 4792 + goto out_detach_iommu; 4793 + 4794 + dev_pasid->dev = dev; 4795 + dev_pasid->pasid = pasid; 4796 + spin_lock_irqsave(&dmar_domain->lock, flags); 4797 + list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); 4798 + spin_unlock_irqrestore(&dmar_domain->lock, flags); 4799 + 4800 + return 0; 4801 + out_detach_iommu: 4802 + domain_detach_iommu(dmar_domain, iommu); 4803 + out_free: 4804 + kfree(dev_pasid); 4805 + return ret; 4713 4806 } 4714 4807 4715 4808 const struct iommu_ops intel_iommu_ops = { ··· 4804 4751 #endif 4805 4752 .default_domain_ops = &(const struct iommu_domain_ops) { 4806 4753 .attach_dev = intel_iommu_attach_device, 4754 + .set_dev_pasid = intel_iommu_set_dev_pasid, 4807 4755 .map_pages = intel_iommu_map_pages, 4808 4756 .unmap_pages = intel_iommu_unmap_pages, 4809 4757 .iotlb_sync_map = intel_iommu_iotlb_sync_map, ··· 5041 4987 return; 5042 4988 5043 4989 sid = PCI_DEVID(info->bus, info->devfn); 5044 - if (pasid == PASID_RID2PASID) { 4990 + if (pasid == IOMMU_NO_PASID) { 5045 4991 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, 5046 4992 qdep, address, mask); 5047 4993 } else {
+9
drivers/iommu/intel/iommu.h
··· 595 595 596 596 spinlock_t lock; /* Protect device tracking lists */ 597 597 struct list_head devices; /* all devices' list */ 598 + struct list_head dev_pasids; /* all attached pasids */ 598 599 599 600 struct dma_pte *pgd; /* virtual address */ 600 601 int gaw; /* max guest address width */ ··· 716 715 struct intel_iommu *iommu; /* IOMMU used by this device */ 717 716 struct dmar_domain *domain; /* pointer to domain */ 718 717 struct pasid_table *pasid_table; /* pasid table */ 718 + }; 719 + 720 + struct dev_pasid_info { 721 + struct list_head link_domain; /* link to domain siblings */ 722 + struct device *dev; 723 + ioasid_t pasid; 719 724 }; 720 725 721 726 static inline void __iommu_flush_cache( ··· 851 844 struct iommu_page_response *msg); 852 845 struct iommu_domain *intel_svm_domain_alloc(void); 853 846 void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid); 847 + void intel_drain_pasid_prq(struct device *dev, u32 pasid); 854 848 855 849 struct intel_svm_dev { 856 850 struct list_head list; ··· 870 862 }; 871 863 #else 872 864 static inline void intel_svm_check(struct intel_iommu *iommu) {} 865 + static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {} 873 866 static inline struct iommu_domain *intel_svm_domain_alloc(void) 874 867 { 875 868 return NULL;
+2 -2
drivers/iommu/intel/pasid.c
··· 129 129 info->pasid_table = pasid_table; 130 130 131 131 if (!ecap_coherent(info->iommu->ecap)) 132 - clflush_cache_range(pasid_table->table, size); 132 + clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE); 133 133 134 134 return 0; 135 135 } ··· 438 438 * SVA usage, device could do DMA with multiple PASIDs. It is more 439 439 * efficient to flush devTLB specific to the PASID. 440 440 */ 441 - if (pasid == PASID_RID2PASID) 441 + if (pasid == IOMMU_NO_PASID) 442 442 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); 443 443 else 444 444 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
-2
drivers/iommu/intel/pasid.h
··· 10 10 #ifndef __INTEL_PASID_H 11 11 #define __INTEL_PASID_H 12 12 13 - #define PASID_RID2PASID 0x0 14 - #define PASID_MIN 0x1 15 13 #define PASID_MAX 0x100000 16 14 #define PASID_PTE_MASK 0x3F 17 15 #define PASID_PTE_PRESENT 1
+7 -55
drivers/iommu/intel/svm.c
··· 26 26 #include "trace.h" 27 27 28 28 static irqreturn_t prq_event_thread(int irq, void *d); 29 - static void intel_svm_drain_prq(struct device *dev, u32 pasid); 30 - #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva) 31 29 32 30 static DEFINE_XARRAY_ALLOC(pasid_private_array); 33 31 static int pasid_private_add(ioasid_t pasid, void *priv) ··· 257 259 .invalidate_range = intel_invalidate_range, 258 260 }; 259 261 260 - static DEFINE_MUTEX(pasid_mutex); 261 - 262 262 static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid, 263 263 struct intel_svm **rsvm, 264 264 struct intel_svm_dev **rsdev) 265 265 { 266 266 struct intel_svm_dev *sdev = NULL; 267 267 struct intel_svm *svm; 268 - 269 - /* The caller should hold the pasid_mutex lock */ 270 - if (WARN_ON(!mutex_is_locked(&pasid_mutex))) 271 - return -EINVAL; 272 268 273 269 if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX) 274 270 return -EINVAL; ··· 363 371 return ret; 364 372 } 365 373 366 - /* Caller must hold pasid_mutex */ 367 - static int intel_svm_unbind_mm(struct device *dev, u32 pasid) 374 + void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid) 368 375 { 369 376 struct intel_svm_dev *sdev; 370 377 struct intel_iommu *iommu; 371 378 struct intel_svm *svm; 372 379 struct mm_struct *mm; 373 - int ret = -EINVAL; 374 380 375 381 iommu = device_to_iommu(dev, NULL, NULL); 376 382 if (!iommu) 377 - goto out; 383 + return; 378 384 379 - ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); 380 - if (ret) 381 - goto out; 385 + if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev)) 386 + return; 382 387 mm = svm->mm; 383 388 384 389 if (sdev) { 385 390 list_del_rcu(&sdev->list); 386 - /* 387 - * Flush the PASID cache and IOTLB for this device. 388 - * Note that we do depend on the hardware *not* using 389 - * the PASID any more. Just as we depend on other 390 - * devices never using PASIDs that they have no right 391 - * to use. We have a *shared* PASID table, because it's 392 - * large and has to be physically contiguous. So it's 393 - * hard to be as defensive as we might like. 394 - */ 395 - intel_pasid_tear_down_entry(iommu, dev, svm->pasid, false); 396 - intel_svm_drain_prq(dev, svm->pasid); 397 391 kfree_rcu(sdev, rcu); 398 392 399 393 if (list_empty(&svm->devs)) { ··· 396 418 kfree(svm); 397 419 } 398 420 } 399 - out: 400 - return ret; 401 421 } 402 422 403 423 /* Page request queue descriptor */ ··· 436 460 } 437 461 438 462 /** 439 - * intel_svm_drain_prq - Drain page requests and responses for a pasid 463 + * intel_drain_pasid_prq - Drain page requests and responses for a pasid 440 464 * @dev: target device 441 465 * @pasid: pasid for draining 442 466 * ··· 450 474 * described in VT-d spec CH7.10 to drain all page requests and page 451 475 * responses pending in the hardware. 452 476 */ 453 - static void intel_svm_drain_prq(struct device *dev, u32 pasid) 477 + void intel_drain_pasid_prq(struct device *dev, u32 pasid) 454 478 { 455 479 struct device_domain_info *info; 456 480 struct dmar_domain *domain; ··· 496 520 goto prq_retry; 497 521 } 498 522 499 - /* 500 - * A work in IO page fault workqueue may try to lock pasid_mutex now. 501 - * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for 502 - * all works in the workqueue to finish may cause deadlock. 503 - * 504 - * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev(). 505 - * Unlock it to allow the works to be handled while waiting for 506 - * them to finish. 507 - */ 508 - lockdep_assert_held(&pasid_mutex); 509 - mutex_unlock(&pasid_mutex); 510 523 iopf_queue_flush_dev(dev); 511 - mutex_lock(&pasid_mutex); 512 524 513 525 /* 514 526 * Perform steps described in VT-d spec CH7.10 to drain page ··· 791 827 return ret; 792 828 } 793 829 794 - void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid) 795 - { 796 - mutex_lock(&pasid_mutex); 797 - intel_svm_unbind_mm(dev, pasid); 798 - mutex_unlock(&pasid_mutex); 799 - } 800 - 801 830 static int intel_svm_set_dev_pasid(struct iommu_domain *domain, 802 831 struct device *dev, ioasid_t pasid) 803 832 { 804 833 struct device_domain_info *info = dev_iommu_priv_get(dev); 805 834 struct intel_iommu *iommu = info->iommu; 806 835 struct mm_struct *mm = domain->mm; 807 - int ret; 808 836 809 - mutex_lock(&pasid_mutex); 810 - ret = intel_svm_bind_mm(iommu, dev, mm); 811 - mutex_unlock(&pasid_mutex); 812 - 813 - return ret; 837 + return intel_svm_bind_mm(iommu, dev, mm); 814 838 } 815 839 816 840 static void intel_svm_domain_free(struct iommu_domain *domain)
+10 -19
drivers/iommu/iommu-sva.c
··· 10 10 #include "iommu-sva.h" 11 11 12 12 static DEFINE_MUTEX(iommu_sva_lock); 13 - static DEFINE_IDA(iommu_global_pasid_ida); 14 13 15 14 /* Allocate a PASID for the mm within range (inclusive) */ 16 - static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) 15 + static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev) 17 16 { 17 + ioasid_t pasid; 18 18 int ret = 0; 19 - 20 - if (min == IOMMU_PASID_INVALID || 21 - max == IOMMU_PASID_INVALID || 22 - min == 0 || max < min) 23 - return -EINVAL; 24 19 25 20 if (!arch_pgtable_dma_compat(mm)) 26 21 return -EBUSY; ··· 23 28 mutex_lock(&iommu_sva_lock); 24 29 /* Is a PASID already associated with this mm? */ 25 30 if (mm_valid_pasid(mm)) { 26 - if (mm->pasid < min || mm->pasid > max) 31 + if (mm->pasid >= dev->iommu->max_pasids) 27 32 ret = -EOVERFLOW; 28 33 goto out; 29 34 } 30 35 31 - ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL); 32 - if (ret < 0) 36 + pasid = iommu_alloc_global_pasid(dev); 37 + if (pasid == IOMMU_PASID_INVALID) { 38 + ret = -ENOSPC; 33 39 goto out; 34 - 35 - mm->pasid = ret; 40 + } 41 + mm->pasid = pasid; 36 42 ret = 0; 37 43 out: 38 44 mutex_unlock(&iommu_sva_lock); ··· 60 64 { 61 65 struct iommu_domain *domain; 62 66 struct iommu_sva *handle; 63 - ioasid_t max_pasids; 64 67 int ret; 65 68 66 - max_pasids = dev->iommu->max_pasids; 67 - if (!max_pasids) 68 - return ERR_PTR(-EOPNOTSUPP); 69 - 70 69 /* Allocate mm->pasid if necessary. */ 71 - ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1); 70 + ret = iommu_sva_alloc_pasid(mm, dev); 72 71 if (ret) 73 72 return ERR_PTR(ret); 74 73 ··· 208 217 if (likely(!mm_valid_pasid(mm))) 209 218 return; 210 219 211 - ida_free(&iommu_global_pasid_ida, mm->pasid); 220 + iommu_free_global_pasid(mm->pasid); 212 221 }
-8
drivers/iommu/iommu-sysfs.c
··· 107 107 { 108 108 int ret; 109 109 110 - if (!iommu || IS_ERR(iommu)) 111 - return -ENODEV; 112 - 113 110 ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices", 114 111 &link->kobj, dev_name(link)); 115 112 if (ret) ··· 119 122 120 123 return ret; 121 124 } 122 - EXPORT_SYMBOL_GPL(iommu_device_link); 123 125 124 126 void iommu_device_unlink(struct iommu_device *iommu, struct device *link) 125 127 { 126 - if (!iommu || IS_ERR(iommu)) 127 - return; 128 - 129 128 sysfs_remove_link(&link->kobj, "iommu"); 130 129 sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); 131 130 } 132 - EXPORT_SYMBOL_GPL(iommu_device_unlink);
+296 -237
drivers/iommu/iommu.c
··· 39 39 40 40 static struct kset *iommu_group_kset; 41 41 static DEFINE_IDA(iommu_group_ida); 42 + static DEFINE_IDA(iommu_global_pasid_ida); 42 43 43 44 static unsigned int iommu_def_domain_type __read_mostly; 44 45 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); ··· 128 127 int target_type); 129 128 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 130 129 struct device *dev); 131 - static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 132 130 static ssize_t iommu_group_store_type(struct iommu_group *group, 133 131 const char *buf, size_t count); 132 + static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 133 + struct device *dev); 134 + static void __iommu_group_free_device(struct iommu_group *group, 135 + struct group_device *grp_dev); 134 136 135 137 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 136 138 struct iommu_group_attribute iommu_group_attr_##_name = \ ··· 337 333 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 338 334 } 339 335 336 + /* 337 + * Init the dev->iommu and dev->iommu_group in the struct device and get the 338 + * driver probed 339 + */ 340 + static int iommu_init_device(struct device *dev, const struct iommu_ops *ops) 341 + { 342 + struct iommu_device *iommu_dev; 343 + struct iommu_group *group; 344 + int ret; 345 + 346 + if (!dev_iommu_get(dev)) 347 + return -ENOMEM; 348 + 349 + if (!try_module_get(ops->owner)) { 350 + ret = -EINVAL; 351 + goto err_free; 352 + } 353 + 354 + iommu_dev = ops->probe_device(dev); 355 + if (IS_ERR(iommu_dev)) { 356 + ret = PTR_ERR(iommu_dev); 357 + goto err_module_put; 358 + } 359 + 360 + ret = iommu_device_link(iommu_dev, dev); 361 + if (ret) 362 + goto err_release; 363 + 364 + group = ops->device_group(dev); 365 + if (WARN_ON_ONCE(group == NULL)) 366 + group = ERR_PTR(-EINVAL); 367 + if (IS_ERR(group)) { 368 + ret = PTR_ERR(group); 369 + goto err_unlink; 370 + } 371 + dev->iommu_group = group; 372 + 373 + dev->iommu->iommu_dev = iommu_dev; 374 + dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 375 + if (ops->is_attach_deferred) 376 + dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 377 + return 0; 378 + 379 + err_unlink: 380 + iommu_device_unlink(iommu_dev, dev); 381 + err_release: 382 + if (ops->release_device) 383 + ops->release_device(dev); 384 + err_module_put: 385 + module_put(ops->owner); 386 + err_free: 387 + dev_iommu_free(dev); 388 + return ret; 389 + } 390 + 391 + static void iommu_deinit_device(struct device *dev) 392 + { 393 + struct iommu_group *group = dev->iommu_group; 394 + const struct iommu_ops *ops = dev_iommu_ops(dev); 395 + 396 + lockdep_assert_held(&group->mutex); 397 + 398 + iommu_device_unlink(dev->iommu->iommu_dev, dev); 399 + 400 + /* 401 + * release_device() must stop using any attached domain on the device. 402 + * If there are still other devices in the group they are not effected 403 + * by this callback. 404 + * 405 + * The IOMMU driver must set the device to either an identity or 406 + * blocking translation and stop using any domain pointer, as it is 407 + * going to be freed. 408 + */ 409 + if (ops->release_device) 410 + ops->release_device(dev); 411 + 412 + /* 413 + * If this is the last driver to use the group then we must free the 414 + * domains before we do the module_put(). 415 + */ 416 + if (list_empty(&group->devices)) { 417 + if (group->default_domain) { 418 + iommu_domain_free(group->default_domain); 419 + group->default_domain = NULL; 420 + } 421 + if (group->blocking_domain) { 422 + iommu_domain_free(group->blocking_domain); 423 + group->blocking_domain = NULL; 424 + } 425 + group->domain = NULL; 426 + } 427 + 428 + /* Caller must put iommu_group */ 429 + dev->iommu_group = NULL; 430 + module_put(ops->owner); 431 + dev_iommu_free(dev); 432 + } 433 + 340 434 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 341 435 { 342 436 const struct iommu_ops *ops = dev->bus->iommu_ops; 343 - struct iommu_device *iommu_dev; 344 437 struct iommu_group *group; 345 438 static DEFINE_MUTEX(iommu_probe_device_lock); 439 + struct group_device *gdev; 346 440 int ret; 347 441 348 442 if (!ops) ··· 453 351 * but for now enforcing a simple global ordering is fine. 454 352 */ 455 353 mutex_lock(&iommu_probe_device_lock); 456 - if (!dev_iommu_get(dev)) { 457 - ret = -ENOMEM; 458 - goto err_unlock; 354 + 355 + /* Device is probed already if in a group */ 356 + if (dev->iommu_group) { 357 + ret = 0; 358 + goto out_unlock; 459 359 } 460 360 461 - if (!try_module_get(ops->owner)) { 462 - ret = -EINVAL; 463 - goto err_free; 464 - } 361 + ret = iommu_init_device(dev, ops); 362 + if (ret) 363 + goto out_unlock; 465 364 466 - iommu_dev = ops->probe_device(dev); 467 - if (IS_ERR(iommu_dev)) { 468 - ret = PTR_ERR(iommu_dev); 469 - goto out_module_put; 470 - } 471 - 472 - dev->iommu->iommu_dev = iommu_dev; 473 - dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 474 - if (ops->is_attach_deferred) 475 - dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 476 - 477 - group = iommu_group_get_for_dev(dev); 478 - if (IS_ERR(group)) { 479 - ret = PTR_ERR(group); 480 - goto out_release; 481 - } 482 - 365 + group = dev->iommu_group; 366 + gdev = iommu_group_alloc_device(group, dev); 483 367 mutex_lock(&group->mutex); 484 - if (group_list && !group->default_domain && list_empty(&group->entry)) 485 - list_add_tail(&group->entry, group_list); 486 - mutex_unlock(&group->mutex); 487 - iommu_group_put(group); 368 + if (IS_ERR(gdev)) { 369 + ret = PTR_ERR(gdev); 370 + goto err_put_group; 371 + } 488 372 373 + /* 374 + * The gdev must be in the list before calling 375 + * iommu_setup_default_domain() 376 + */ 377 + list_add_tail(&gdev->list, &group->devices); 378 + WARN_ON(group->default_domain && !group->domain); 379 + if (group->default_domain) 380 + iommu_create_device_direct_mappings(group->default_domain, dev); 381 + if (group->domain) { 382 + ret = __iommu_device_set_domain(group, dev, group->domain, 0); 383 + if (ret) 384 + goto err_remove_gdev; 385 + } else if (!group->default_domain && !group_list) { 386 + ret = iommu_setup_default_domain(group, 0); 387 + if (ret) 388 + goto err_remove_gdev; 389 + } else if (!group->default_domain) { 390 + /* 391 + * With a group_list argument we defer the default_domain setup 392 + * to the caller by providing a de-duplicated list of groups 393 + * that need further setup. 394 + */ 395 + if (list_empty(&group->entry)) 396 + list_add_tail(&group->entry, group_list); 397 + } 398 + mutex_unlock(&group->mutex); 489 399 mutex_unlock(&iommu_probe_device_lock); 490 - iommu_device_link(iommu_dev, dev); 400 + 401 + if (dev_is_pci(dev)) 402 + iommu_dma_set_pci_32bit_workaround(dev); 491 403 492 404 return 0; 493 405 494 - out_release: 495 - if (ops->release_device) 496 - ops->release_device(dev); 497 - 498 - out_module_put: 499 - module_put(ops->owner); 500 - 501 - err_free: 502 - dev_iommu_free(dev); 503 - 504 - err_unlock: 406 + err_remove_gdev: 407 + list_del(&gdev->list); 408 + __iommu_group_free_device(group, gdev); 409 + err_put_group: 410 + iommu_deinit_device(dev); 411 + mutex_unlock(&group->mutex); 412 + iommu_group_put(group); 413 + out_unlock: 505 414 mutex_unlock(&iommu_probe_device_lock); 506 415 507 416 return ret; ··· 521 408 int iommu_probe_device(struct device *dev) 522 409 { 523 410 const struct iommu_ops *ops; 524 - struct iommu_group *group; 525 411 int ret; 526 412 527 413 ret = __iommu_probe_device(dev, NULL); 528 414 if (ret) 529 - goto err_out; 530 - 531 - group = iommu_group_get(dev); 532 - if (!group) { 533 - ret = -ENODEV; 534 - goto err_release; 535 - } 536 - 537 - mutex_lock(&group->mutex); 538 - 539 - if (group->default_domain) 540 - iommu_create_device_direct_mappings(group->default_domain, dev); 541 - 542 - if (group->domain) { 543 - ret = __iommu_device_set_domain(group, dev, group->domain, 0); 544 - if (ret) 545 - goto err_unlock; 546 - } else if (!group->default_domain) { 547 - ret = iommu_setup_default_domain(group, 0); 548 - if (ret) 549 - goto err_unlock; 550 - } 551 - 552 - mutex_unlock(&group->mutex); 553 - iommu_group_put(group); 415 + return ret; 554 416 555 417 ops = dev_iommu_ops(dev); 556 418 if (ops->probe_finalize) 557 419 ops->probe_finalize(dev); 558 420 559 421 return 0; 560 - 561 - err_unlock: 562 - mutex_unlock(&group->mutex); 563 - iommu_group_put(group); 564 - err_release: 565 - iommu_release_device(dev); 566 - 567 - err_out: 568 - return ret; 569 - 570 422 } 571 423 572 - /* 573 - * Remove a device from a group's device list and return the group device 574 - * if successful. 575 - */ 576 - static struct group_device * 577 - __iommu_group_remove_device(struct iommu_group *group, struct device *dev) 578 - { 579 - struct group_device *device; 580 - 581 - lockdep_assert_held(&group->mutex); 582 - for_each_group_device(group, device) { 583 - if (device->dev == dev) { 584 - list_del(&device->list); 585 - return device; 586 - } 587 - } 588 - 589 - return NULL; 590 - } 591 - 592 - /* 593 - * Release a device from its group and decrements the iommu group reference 594 - * count. 595 - */ 596 - static void __iommu_group_release_device(struct iommu_group *group, 597 - struct group_device *grp_dev) 424 + static void __iommu_group_free_device(struct iommu_group *group, 425 + struct group_device *grp_dev) 598 426 { 599 427 struct device *dev = grp_dev->dev; 600 428 ··· 544 490 545 491 trace_remove_device_from_group(group->id, dev); 546 492 547 - kfree(grp_dev->name); 548 - kfree(grp_dev); 549 - dev->iommu_group = NULL; 550 - kobject_put(group->devices_kobj); 551 - } 552 - 553 - static void iommu_release_device(struct device *dev) 554 - { 555 - struct iommu_group *group = dev->iommu_group; 556 - struct group_device *device; 557 - const struct iommu_ops *ops; 558 - 559 - if (!dev->iommu || !group) 560 - return; 561 - 562 - iommu_device_unlink(dev->iommu->iommu_dev, dev); 563 - 564 - mutex_lock(&group->mutex); 565 - device = __iommu_group_remove_device(group, dev); 566 - 567 493 /* 568 - * If the group has become empty then ownership must have been released, 569 - * and the current domain must be set back to NULL or the default 570 - * domain. 494 + * If the group has become empty then ownership must have been 495 + * released, and the current domain must be set back to NULL or 496 + * the default domain. 571 497 */ 572 498 if (list_empty(&group->devices)) 573 499 WARN_ON(group->owner_cnt || 574 500 group->domain != group->default_domain); 575 501 576 - /* 577 - * release_device() must stop using any attached domain on the device. 578 - * If there are still other devices in the group they are not effected 579 - * by this callback. 580 - * 581 - * The IOMMU driver must set the device to either an identity or 582 - * blocking translation and stop using any domain pointer, as it is 583 - * going to be freed. 584 - */ 585 - ops = dev_iommu_ops(dev); 586 - if (ops->release_device) 587 - ops->release_device(dev); 502 + kfree(grp_dev->name); 503 + kfree(grp_dev); 504 + } 505 + 506 + /* Remove the iommu_group from the struct device. */ 507 + static void __iommu_group_remove_device(struct device *dev) 508 + { 509 + struct iommu_group *group = dev->iommu_group; 510 + struct group_device *device; 511 + 512 + mutex_lock(&group->mutex); 513 + for_each_group_device(group, device) { 514 + if (device->dev != dev) 515 + continue; 516 + 517 + list_del(&device->list); 518 + __iommu_group_free_device(group, device); 519 + if (dev->iommu && dev->iommu->iommu_dev) 520 + iommu_deinit_device(dev); 521 + else 522 + dev->iommu_group = NULL; 523 + break; 524 + } 588 525 mutex_unlock(&group->mutex); 589 526 590 - if (device) 591 - __iommu_group_release_device(group, device); 527 + /* 528 + * Pairs with the get in iommu_init_device() or 529 + * iommu_group_add_device() 530 + */ 531 + iommu_group_put(group); 532 + } 592 533 593 - module_put(ops->owner); 594 - dev_iommu_free(dev); 534 + static void iommu_release_device(struct device *dev) 535 + { 536 + struct iommu_group *group = dev->iommu_group; 537 + 538 + if (group) 539 + __iommu_group_remove_device(dev); 540 + 541 + /* Free any fwspec if no iommu_driver was ever attached */ 542 + if (dev->iommu) 543 + dev_iommu_free(dev); 595 544 } 596 545 597 546 static int __init iommu_set_def_domain_type(char *str) ··· 855 798 856 799 ida_free(&iommu_group_ida, group->id); 857 800 858 - if (group->default_domain) 859 - iommu_domain_free(group->default_domain); 860 - if (group->blocking_domain) 861 - iommu_domain_free(group->blocking_domain); 801 + /* Domains are free'd by iommu_deinit_device() */ 802 + WARN_ON(group->default_domain); 803 + WARN_ON(group->blocking_domain); 862 804 863 805 kfree(group->name); 864 806 kfree(group); ··· 1015 959 unsigned long pg_size; 1016 960 int ret = 0; 1017 961 1018 - if (!iommu_is_dma_domain(domain)) 1019 - return 0; 1020 - 1021 - BUG_ON(!domain->pgsize_bitmap); 1022 - 1023 - pg_size = 1UL << __ffs(domain->pgsize_bitmap); 962 + pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; 1024 963 INIT_LIST_HEAD(&mappings); 964 + 965 + if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) 966 + return -EINVAL; 1025 967 1026 968 iommu_get_resv_regions(dev, &mappings); 1027 969 ··· 1028 974 dma_addr_t start, end, addr; 1029 975 size_t map_size = 0; 1030 976 977 + if (entry->type == IOMMU_RESV_DIRECT) 978 + dev->iommu->require_direct = 1; 979 + 980 + if ((entry->type != IOMMU_RESV_DIRECT && 981 + entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || 982 + !iommu_is_dma_domain(domain)) 983 + continue; 984 + 1031 985 start = ALIGN(entry->start, pg_size); 1032 986 end = ALIGN(entry->start + entry->length, pg_size); 1033 - 1034 - if (entry->type != IOMMU_RESV_DIRECT && 1035 - entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 1036 - continue; 1037 987 1038 988 for (addr = start; addr <= end; addr += pg_size) { 1039 989 phys_addr_t phys_addr; ··· 1072 1014 return ret; 1073 1015 } 1074 1016 1075 - /** 1076 - * iommu_group_add_device - add a device to an iommu group 1077 - * @group: the group into which to add the device (reference should be held) 1078 - * @dev: the device 1079 - * 1080 - * This function is called by an iommu driver to add a device into a 1081 - * group. Adding a device increments the group reference count. 1082 - */ 1083 - int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1017 + /* This is undone by __iommu_group_free_device() */ 1018 + static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 1019 + struct device *dev) 1084 1020 { 1085 1021 int ret, i = 0; 1086 1022 struct group_device *device; 1087 1023 1088 1024 device = kzalloc(sizeof(*device), GFP_KERNEL); 1089 1025 if (!device) 1090 - return -ENOMEM; 1026 + return ERR_PTR(-ENOMEM); 1091 1027 1092 1028 device->dev = dev; 1093 1029 ··· 1112 1060 goto err_free_name; 1113 1061 } 1114 1062 1115 - kobject_get(group->devices_kobj); 1116 - 1117 - dev->iommu_group = group; 1118 - 1119 - mutex_lock(&group->mutex); 1120 - list_add_tail(&device->list, &group->devices); 1121 - mutex_unlock(&group->mutex); 1122 1063 trace_add_device_to_group(group->id, dev); 1123 1064 1124 1065 dev_info(dev, "Adding to iommu group %d\n", group->id); 1125 1066 1126 - return 0; 1067 + return device; 1127 1068 1128 1069 err_free_name: 1129 1070 kfree(device->name); ··· 1125 1080 err_free_device: 1126 1081 kfree(device); 1127 1082 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1128 - return ret; 1083 + return ERR_PTR(ret); 1084 + } 1085 + 1086 + /** 1087 + * iommu_group_add_device - add a device to an iommu group 1088 + * @group: the group into which to add the device (reference should be held) 1089 + * @dev: the device 1090 + * 1091 + * This function is called by an iommu driver to add a device into a 1092 + * group. Adding a device increments the group reference count. 1093 + */ 1094 + int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1095 + { 1096 + struct group_device *gdev; 1097 + 1098 + gdev = iommu_group_alloc_device(group, dev); 1099 + if (IS_ERR(gdev)) 1100 + return PTR_ERR(gdev); 1101 + 1102 + iommu_group_ref_get(group); 1103 + dev->iommu_group = group; 1104 + 1105 + mutex_lock(&group->mutex); 1106 + list_add_tail(&gdev->list, &group->devices); 1107 + mutex_unlock(&group->mutex); 1108 + return 0; 1129 1109 } 1130 1110 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1131 1111 ··· 1164 1094 void iommu_group_remove_device(struct device *dev) 1165 1095 { 1166 1096 struct iommu_group *group = dev->iommu_group; 1167 - struct group_device *device; 1168 1097 1169 1098 if (!group) 1170 1099 return; 1171 1100 1172 1101 dev_info(dev, "Removing from iommu group %d\n", group->id); 1173 1102 1174 - mutex_lock(&group->mutex); 1175 - device = __iommu_group_remove_device(group, dev); 1176 - mutex_unlock(&group->mutex); 1177 - 1178 - if (device) 1179 - __iommu_group_release_device(group, device); 1103 + __iommu_group_remove_device(dev); 1180 1104 } 1181 1105 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1182 1106 ··· 1728 1664 return dom; 1729 1665 } 1730 1666 1731 - /** 1732 - * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1733 - * @dev: target device 1734 - * 1735 - * This function is intended to be called by IOMMU drivers and extended to 1736 - * support common, bus-defined algorithms when determining or creating the 1737 - * IOMMU group for a device. On success, the caller will hold a reference 1738 - * to the returned IOMMU group, which will already include the provided 1739 - * device. The reference should be released with iommu_group_put(). 1740 - */ 1741 - static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1742 - { 1743 - const struct iommu_ops *ops = dev_iommu_ops(dev); 1744 - struct iommu_group *group; 1745 - int ret; 1746 - 1747 - group = iommu_group_get(dev); 1748 - if (group) 1749 - return group; 1750 - 1751 - group = ops->device_group(dev); 1752 - if (WARN_ON_ONCE(group == NULL)) 1753 - return ERR_PTR(-EINVAL); 1754 - 1755 - if (IS_ERR(group)) 1756 - return group; 1757 - 1758 - ret = iommu_group_add_device(group, dev); 1759 - if (ret) 1760 - goto out_put_group; 1761 - 1762 - return group; 1763 - 1764 - out_put_group: 1765 - iommu_group_put(group); 1766 - 1767 - return ERR_PTR(ret); 1768 - } 1769 - 1770 1667 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1771 1668 { 1772 1669 return group->default_domain; ··· 1736 1711 static int probe_iommu_group(struct device *dev, void *data) 1737 1712 { 1738 1713 struct list_head *group_list = data; 1739 - struct iommu_group *group; 1740 1714 int ret; 1741 - 1742 - /* Device is probed already if in a group */ 1743 - group = iommu_group_get(dev); 1744 - if (group) { 1745 - iommu_group_put(group); 1746 - return 0; 1747 - } 1748 1715 1749 1716 ret = __iommu_probe_device(dev, group_list); 1750 1717 if (ret == -ENODEV) ··· 1813 1796 LIST_HEAD(group_list); 1814 1797 int ret; 1815 1798 1816 - /* 1817 - * This code-path does not allocate the default domain when 1818 - * creating the iommu group, so do it after the groups are 1819 - * created. 1820 - */ 1821 1799 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1822 1800 if (ret) 1823 1801 return ret; ··· 1825 1813 /* Remove item from the list */ 1826 1814 list_del_init(&group->entry); 1827 1815 1816 + /* 1817 + * We go to the trouble of deferred default domain creation so 1818 + * that the cross-group default domain type and the setup of the 1819 + * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. 1820 + */ 1828 1821 ret = iommu_setup_default_domain(group, 0); 1829 1822 if (ret) { 1830 1823 mutex_unlock(&group->mutex); ··· 2137 2120 unsigned int flags) 2138 2121 { 2139 2122 int ret; 2123 + 2124 + /* 2125 + * If the device requires IOMMU_RESV_DIRECT then we cannot allow 2126 + * the blocking domain to be attached as it does not contain the 2127 + * required 1:1 mapping. This test effectively excludes the device 2128 + * being used with iommu_group_claim_dma_owner() which will block 2129 + * vfio and iommufd as well. 2130 + */ 2131 + if (dev->iommu->require_direct && 2132 + (new_domain->type == IOMMU_DOMAIN_BLOCKED || 2133 + new_domain == group->blocking_domain)) { 2134 + dev_warn(dev, 2135 + "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); 2136 + return -EINVAL; 2137 + } 2140 2138 2141 2139 if (dev->iommu->attach_deferred) { 2142 2140 if (new_domain == group->default_domain) ··· 3235 3203 3236 3204 /** 3237 3205 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3238 - * @dev: The device 3206 + * @group: The group 3239 3207 * 3240 3208 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3241 3209 */ ··· 3249 3217 3250 3218 /** 3251 3219 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3252 - * @group: The device. 3220 + * @dev: The device. 3253 3221 * 3254 3222 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3255 3223 */ ··· 3432 3400 3433 3401 return domain; 3434 3402 } 3403 + 3404 + ioasid_t iommu_alloc_global_pasid(struct device *dev) 3405 + { 3406 + int ret; 3407 + 3408 + /* max_pasids == 0 means that the device does not support PASID */ 3409 + if (!dev->iommu->max_pasids) 3410 + return IOMMU_PASID_INVALID; 3411 + 3412 + /* 3413 + * max_pasids is set up by vendor driver based on number of PASID bits 3414 + * supported but the IDA allocation is inclusive. 3415 + */ 3416 + ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, 3417 + dev->iommu->max_pasids - 1, GFP_KERNEL); 3418 + return ret < 0 ? IOMMU_PASID_INVALID : ret; 3419 + } 3420 + EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); 3421 + 3422 + void iommu_free_global_pasid(ioasid_t pasid) 3423 + { 3424 + if (WARN_ON(pasid == IOMMU_PASID_INVALID)) 3425 + return; 3426 + 3427 + ida_free(&iommu_global_pasid_ida, pasid); 3428 + } 3429 + EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
+11 -10
drivers/iommu/ipmmu-vmsa.c
··· 14 14 #include <linux/init.h> 15 15 #include <linux/interrupt.h> 16 16 #include <linux/io.h> 17 + #include <linux/iopoll.h> 17 18 #include <linux/io-pgtable.h> 18 19 #include <linux/iommu.h> 19 20 #include <linux/of.h> 20 - #include <linux/of_device.h> 21 21 #include <linux/of_platform.h> 22 + #include <linux/pci.h> 22 23 #include <linux/platform_device.h> 23 24 #include <linux/sizes.h> 24 25 #include <linux/slab.h> ··· 254 253 /* Wait for any pending TLB invalidations to complete */ 255 254 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) 256 255 { 257 - unsigned int count = 0; 256 + u32 val; 258 257 259 - while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { 260 - cpu_relax(); 261 - if (++count == TLB_LOOP_TIMEOUT) { 262 - dev_err_ratelimited(domain->mmu->dev, 258 + if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val, 259 + !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT, 260 + false, domain, IMCTR)) 261 + dev_err_ratelimited(domain->mmu->dev, 263 262 "TLB sync timed out -- MMU may be deadlocked\n"); 264 - return; 265 - } 266 - udelay(1); 267 - } 268 263 } 269 264 270 265 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) ··· 719 722 /* Check whether this SoC can use the IPMMU correctly or not */ 720 723 if (soc_device_match(soc_denylist)) 721 724 return false; 725 + 726 + /* Check whether this device is a PCI device */ 727 + if (dev_is_pci(dev)) 728 + return true; 722 729 723 730 /* Check whether this device can work with the IPMMU */ 724 731 for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
+119 -30
drivers/iommu/mtk_iommu.c
··· 3 3 * Copyright (c) 2015-2016 MediaTek Inc. 4 4 * Author: Yong Wu <yong.wu@mediatek.com> 5 5 */ 6 + #include <linux/arm-smccc.h> 6 7 #include <linux/bitfield.h> 7 8 #include <linux/bug.h> 8 9 #include <linux/clk.h> ··· 28 27 #include <linux/slab.h> 29 28 #include <linux/spinlock.h> 30 29 #include <linux/soc/mediatek/infracfg.h> 30 + #include <linux/soc/mediatek/mtk_sip_svc.h> 31 31 #include <asm/barrier.h> 32 32 #include <soc/mediatek/smi.h> 33 33 ··· 145 143 #define PGTABLE_PA_35_EN BIT(17) 146 144 #define TF_PORT_TO_ADDR_MT8173 BIT(18) 147 145 #define INT_ID_PORT_WIDTH_6 BIT(19) 146 + #define CFG_IFA_MASTER_IN_ATF BIT(20) 148 147 149 148 #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ 150 149 ((((pdata)->flags) & (mask)) == (_x)) ··· 170 167 M4U_MT8173, 171 168 M4U_MT8183, 172 169 M4U_MT8186, 170 + M4U_MT8188, 173 171 M4U_MT8192, 174 172 M4U_MT8195, 175 173 M4U_MT8365, ··· 262 258 struct device *smicomm_dev; 263 259 264 260 struct mtk_iommu_bank_data *bank; 261 + struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */ 262 + 265 263 struct regmap *pericfg; 266 264 struct mutex mutex; /* Protect m4u_group/m4u_dom above */ 267 265 ··· 583 577 unsigned int larbid, portid; 584 578 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 585 579 const struct mtk_iommu_iova_region *region; 586 - u32 peri_mmuen, peri_mmuen_msk; 580 + unsigned long portid_msk = 0; 581 + struct arm_smccc_res res; 587 582 int i, ret = 0; 588 583 589 584 for (i = 0; i < fwspec->num_ids; ++i) { 590 - larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); 591 585 portid = MTK_M4U_TO_PORT(fwspec->ids[i]); 586 + portid_msk |= BIT(portid); 587 + } 592 588 593 - if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { 594 - larb_mmu = &data->larb_imu[larbid]; 589 + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { 590 + /* All ports should be in the same larb. just use 0 here */ 591 + larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); 592 + larb_mmu = &data->larb_imu[larbid]; 593 + region = data->plat_data->iova_region + regionid; 595 594 596 - region = data->plat_data->iova_region + regionid; 595 + for_each_set_bit(portid, &portid_msk, 32) 597 596 larb_mmu->bank[portid] = upper_32_bits(region->iova_base); 598 597 599 - dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n", 600 - enable ? "enable" : "disable", dev_name(larb_mmu->dev), 601 - portid, regionid, larb_mmu->bank[portid]); 598 + dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n", 599 + enable ? "enable" : "disable", dev_name(larb_mmu->dev), 600 + portid_msk, regionid, upper_32_bits(region->iova_base)); 602 601 603 - if (enable) 604 - larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); 605 - else 606 - larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); 607 - } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { 608 - peri_mmuen_msk = BIT(portid); 602 + if (enable) 603 + larb_mmu->mmu |= portid_msk; 604 + else 605 + larb_mmu->mmu &= ~portid_msk; 606 + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { 607 + if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) { 608 + arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL, 609 + IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, 610 + portid_msk, enable, 0, 0, 0, 0, &res); 611 + ret = res.a0; 612 + } else { 609 613 /* PCI dev has only one output id, enable the next writing bit for PCIe */ 610 - if (dev_is_pci(dev)) 611 - peri_mmuen_msk |= BIT(portid + 1); 614 + if (dev_is_pci(dev)) { 615 + if (fwspec->num_ids != 1) { 616 + dev_err(dev, "PCI dev can only have one port.\n"); 617 + return -ENODEV; 618 + } 619 + portid_msk |= BIT(portid + 1); 620 + } 612 621 613 - peri_mmuen = enable ? peri_mmuen_msk : 0; 614 622 ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1, 615 - peri_mmuen_msk, peri_mmuen); 616 - if (ret) 617 - dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n", 618 - enable ? "enable" : "disable", 619 - dev_name(data->dev), peri_mmuen_msk, ret); 623 + (u32)portid_msk, enable ? (u32)portid_msk : 0); 620 624 } 625 + if (ret) 626 + dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n", 627 + enable ? "enable" : "disable", 628 + dev_name(data->dev), portid_msk, ret); 621 629 } 622 630 return ret; 623 631 } ··· 640 620 struct mtk_iommu_data *data, 641 621 unsigned int region_id) 642 622 { 623 + struct mtk_iommu_domain *share_dom = data->share_dom; 643 624 const struct mtk_iommu_iova_region *region; 644 - struct mtk_iommu_domain *m4u_dom; 645 625 646 - /* Always use bank0 in sharing pgtable case */ 647 - m4u_dom = data->bank[0].m4u_dom; 648 - if (m4u_dom) { 649 - dom->iop = m4u_dom->iop; 650 - dom->cfg = m4u_dom->cfg; 651 - dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap; 626 + /* Always use share domain in sharing pgtable case */ 627 + if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) { 628 + dom->iop = share_dom->iop; 629 + dom->cfg = share_dom->cfg; 630 + dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap; 652 631 goto update_iova_region; 653 632 } 654 633 ··· 676 657 677 658 /* Update our support page sizes bitmap */ 678 659 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; 660 + 661 + if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) 662 + data->share_dom = dom; 679 663 680 664 update_iova_region: 681 665 /* Update the iova region for this domain */ ··· 730 708 /* Data is in the frstdata in sharing pgtable case. */ 731 709 frstdata = mtk_iommu_get_frst_data(hw_list); 732 710 711 + mutex_lock(&frstdata->mutex); 733 712 ret = mtk_iommu_domain_finalise(dom, frstdata, region_id); 713 + mutex_unlock(&frstdata->mutex); 734 714 if (ret) { 735 715 mutex_unlock(&dom->mutex); 736 716 return ret; ··· 1342 1318 dev_err_probe(dev, ret, "mm dts parse fail\n"); 1343 1319 goto out_runtime_disable; 1344 1320 } 1345 - } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { 1321 + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) && 1322 + !MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) { 1346 1323 p = data->plat_data->pericfg_comp_str; 1347 1324 data->pericfg = syscon_regmap_lookup_by_compatible(p); 1348 1325 if (IS_ERR(data->pericfg)) { ··· 1595 1570 .iova_region_larb_msk = mt8186_larb_region_msk, 1596 1571 }; 1597 1572 1573 + static const struct mtk_iommu_plat_data mt8188_data_infra = { 1574 + .m4u_plat = M4U_MT8188, 1575 + .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO | 1576 + MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT | 1577 + PGTABLE_PA_35_EN | CFG_IFA_MASTER_IN_ATF, 1578 + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 1579 + .banks_num = 1, 1580 + .banks_enable = {true}, 1581 + .iova_region = single_domain, 1582 + .iova_region_nr = ARRAY_SIZE(single_domain), 1583 + }; 1584 + 1585 + static const u32 mt8188_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { 1586 + [0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */ 1587 + [1] = {0, 0, 0, 0, 0, 0, 0, 0, 1588 + 0, 0, 0, 0, 0, 0, 0, 0, 1589 + 0, 0, 0, 0, 0, ~0, ~0, ~0}, /* Region1: larb19(21)/21(22)/23 */ 1590 + [2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */ 1591 + ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, 1592 + ~0, ~0, ~0, ~0, ~0, 0, 0, 0, 1593 + 0, ~0}, 1594 + [3] = {0}, 1595 + [4] = {[24] = BIT(0) | BIT(1)}, /* Only larb27(24) port0/1 */ 1596 + [5] = {[24] = BIT(2) | BIT(3)}, /* Only larb27(24) port2/3 */ 1597 + }; 1598 + 1599 + static const struct mtk_iommu_plat_data mt8188_data_vdo = { 1600 + .m4u_plat = M4U_MT8188, 1601 + .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | 1602 + WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | 1603 + PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM, 1604 + .hw_list = &m4ulist, 1605 + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 1606 + .banks_num = 1, 1607 + .banks_enable = {true}, 1608 + .iova_region = mt8192_multi_dom, 1609 + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), 1610 + .iova_region_larb_msk = mt8188_larb_region_msk, 1611 + .larbid_remap = {{2}, {0}, {21}, {0}, {19}, {9, 10, 1612 + 11 /* 11a */, 25 /* 11c */}, 1613 + {13, 0, 29 /* 16b */, 30 /* 17b */, 0}, {5}}, 1614 + }; 1615 + 1616 + static const struct mtk_iommu_plat_data mt8188_data_vpp = { 1617 + .m4u_plat = M4U_MT8188, 1618 + .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | 1619 + WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | 1620 + PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM, 1621 + .hw_list = &m4ulist, 1622 + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 1623 + .banks_num = 1, 1624 + .banks_enable = {true}, 1625 + .iova_region = mt8192_multi_dom, 1626 + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), 1627 + .iova_region_larb_msk = mt8188_larb_region_msk, 1628 + .larbid_remap = {{1}, {3}, {23}, {7}, {MTK_INVALID_LARBID}, 1629 + {12, 15, 24 /* 11b */}, {14, MTK_INVALID_LARBID, 1630 + 16 /* 16a */, 17 /* 17a */, MTK_INVALID_LARBID, 1631 + 27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}}, 1632 + }; 1633 + 1598 1634 static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { 1599 1635 [0] = {~0, ~0}, /* Region0: larb0/1 */ 1600 1636 [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */ ··· 1764 1678 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, 1765 1679 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, 1766 1680 { .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */ 1681 + { .compatible = "mediatek,mt8188-iommu-infra", .data = &mt8188_data_infra}, 1682 + { .compatible = "mediatek,mt8188-iommu-vdo", .data = &mt8188_data_vdo}, 1683 + { .compatible = "mediatek,mt8188-iommu-vpp", .data = &mt8188_data_vpp}, 1767 1684 { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data}, 1768 1685 { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra}, 1769 1686 { .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo},
+1 -1
drivers/iommu/of_iommu.c
··· 159 159 * If we have reason to believe the IOMMU driver missed the initial 160 160 * probe for dev, replay it to get things in order. 161 161 */ 162 - if (!err && dev->bus && !device_iommu_mapped(dev)) 162 + if (!err && dev->bus) 163 163 err = iommu_probe_device(dev); 164 164 165 165 /* Ignore all other errors apart from EPROBE_DEFER */
+10 -40
drivers/iommu/rockchip-iommu.c
··· 98 98 phys_addr_t (*pt_address)(u32 dte); 99 99 u32 (*mk_dtentries)(dma_addr_t pt_dma); 100 100 u32 (*mk_ptentries)(phys_addr_t page, int prot); 101 - phys_addr_t (*dte_addr_phys)(u32 addr); 102 - u32 (*dma_addr_dte)(dma_addr_t dt_dma); 103 101 u64 dma_bit_mask; 102 + gfp_t gfp_flags; 104 103 }; 105 104 106 105 struct rk_iommu { ··· 277 278 /* 278 279 * In v2: 279 280 * 31:12 - Page address bit 31:0 280 - * 11:9 - Page address bit 34:32 281 - * 8:4 - Page address bit 39:35 281 + * 11: 8 - Page address bit 35:32 282 + * 7: 4 - Page address bit 39:36 282 283 * 3 - Security 283 284 * 2 - Writable 284 285 * 1 - Readable ··· 505 506 506 507 /* 507 508 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY 508 - * and verifying that upper 5 nybbles are read back. 509 + * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back. 509 510 */ 510 511 for (i = 0; i < iommu->num_mmu; i++) { 511 512 dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY); ··· 530 531 return 0; 531 532 } 532 533 533 - static inline phys_addr_t rk_dte_addr_phys(u32 addr) 534 - { 535 - return (phys_addr_t)addr; 536 - } 537 - 538 - static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma) 539 - { 540 - return dt_dma; 541 - } 542 - 543 - #define DT_HI_MASK GENMASK_ULL(39, 32) 544 - #define DTE_BASE_HI_MASK GENMASK(11, 4) 545 - #define DT_SHIFT 28 546 - 547 - static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr) 548 - { 549 - u64 addr64 = addr; 550 - return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) | 551 - ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT); 552 - } 553 - 554 - static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma) 555 - { 556 - return (dt_dma & RK_DTE_PT_ADDRESS_MASK) | 557 - ((dt_dma & DT_HI_MASK) >> DT_SHIFT); 558 - } 559 - 560 534 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) 561 535 { 562 536 void __iomem *base = iommu->bases[index]; ··· 549 577 page_offset = rk_iova_page_offset(iova); 550 578 551 579 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); 552 - mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr); 580 + mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr); 553 581 554 582 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); 555 583 dte_addr = phys_to_virt(dte_addr_phys); ··· 728 756 if (rk_dte_is_pt_valid(dte)) 729 757 goto done; 730 758 731 - page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); 759 + page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags); 732 760 if (!page_table) 733 761 return ERR_PTR(-ENOMEM); 734 762 ··· 939 967 940 968 for (i = 0; i < iommu->num_mmu; i++) { 941 969 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 942 - rk_ops->dma_addr_dte(rk_domain->dt_dma)); 970 + rk_ops->mk_dtentries(rk_domain->dt_dma)); 943 971 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 944 972 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 945 973 } ··· 1077 1105 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. 1078 1106 * Allocate one 4 KiB page for each table. 1079 1107 */ 1080 - rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); 1108 + rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags); 1081 1109 if (!rk_domain->dt) 1082 1110 goto err_free_domain; 1083 1111 ··· 1377 1405 .pt_address = &rk_dte_pt_address, 1378 1406 .mk_dtentries = &rk_mk_dte, 1379 1407 .mk_ptentries = &rk_mk_pte, 1380 - .dte_addr_phys = &rk_dte_addr_phys, 1381 - .dma_addr_dte = &rk_dma_addr_dte, 1382 1408 .dma_bit_mask = DMA_BIT_MASK(32), 1409 + .gfp_flags = GFP_DMA32, 1383 1410 }; 1384 1411 1385 1412 static struct rk_iommu_ops iommu_data_ops_v2 = { 1386 1413 .pt_address = &rk_dte_pt_address_v2, 1387 1414 .mk_dtentries = &rk_mk_dte_v2, 1388 1415 .mk_ptentries = &rk_mk_pte_v2, 1389 - .dte_addr_phys = &rk_dte_addr_phys_v2, 1390 - .dma_addr_dte = &rk_dma_addr_dte_v2, 1391 1416 .dma_bit_mask = DMA_BIT_MASK(40), 1417 + .gfp_flags = 0, 1392 1418 }; 1393 1419 1394 1420 static const struct of_device_id rk_iommu_dt_ids[] = {
+2
drivers/iommu/sprd-iommu.c
··· 14 14 #include <linux/mfd/syscon.h> 15 15 #include <linux/module.h> 16 16 #include <linux/of_platform.h> 17 + #include <linux/platform_device.h> 17 18 #include <linux/regmap.h> 18 19 #include <linux/slab.h> 19 20 ··· 149 148 150 149 dom->domain.geometry.aperture_start = 0; 151 150 dom->domain.geometry.aperture_end = SZ_256M - 1; 151 + dom->domain.geometry.force_aperture = true; 152 152 153 153 return &dom->domain; 154 154 }
+1 -1
drivers/iommu/tegra-smmu.c
··· 9 9 #include <linux/iommu.h> 10 10 #include <linux/kernel.h> 11 11 #include <linux/of.h> 12 - #include <linux/of_device.h> 12 + #include <linux/of_platform.h> 13 13 #include <linux/pci.h> 14 14 #include <linux/platform_device.h> 15 15 #include <linux/slab.h>
+1 -1
drivers/iommu/virtio-iommu.c
··· 13 13 #include <linux/interval_tree.h> 14 14 #include <linux/iommu.h> 15 15 #include <linux/module.h> 16 - #include <linux/of_platform.h> 16 + #include <linux/of.h> 17 17 #include <linux/pci.h> 18 18 #include <linux/virtio.h> 19 19 #include <linux/virtio_config.h>
+489
include/dt-bindings/memory/mediatek,mt8188-memory-port.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* 3 + * Copyright (c) 2022 MediaTek Inc. 4 + * Author: Chengci Xu <chengci.xu@mediatek.com> 5 + */ 6 + #ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_ 7 + #define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_ 8 + 9 + #include <dt-bindings/memory/mtk-memory-port.h> 10 + 11 + /* 12 + * MM IOMMU larbs: 13 + * From below, for example larb11 has larb11a/larb11b/larb11c, 14 + * the index of larb is not in order. So we reindexed these larbs from a 15 + * software view. 16 + */ 17 + #define SMI_L0_ID 0 18 + #define SMI_L1_ID 1 19 + #define SMI_L2_ID 2 20 + #define SMI_L3_ID 3 21 + #define SMI_L4_ID 4 22 + #define SMI_L5_ID 5 23 + #define SMI_L6_ID 6 24 + #define SMI_L7_ID 7 25 + #define SMI_L9_ID 8 26 + #define SMI_L10_ID 9 27 + #define SMI_L11A_ID 10 28 + #define SMI_L11B_ID 11 29 + #define SMI_L11C_ID 12 30 + #define SMI_L12_ID 13 31 + #define SMI_L13_ID 14 32 + #define SMI_L14_ID 15 33 + #define SMI_L15_ID 16 34 + #define SMI_L16A_ID 17 35 + #define SMI_L16B_ID 18 36 + #define SMI_L17A_ID 19 37 + #define SMI_L17B_ID 20 38 + #define SMI_L19_ID 21 39 + #define SMI_L21_ID 22 40 + #define SMI_L23_ID 23 41 + #define SMI_L27_ID 24 42 + #define SMI_L28_ID 25 43 + 44 + /* 45 + * MM IOMMU supports 16GB dma address. We separate it to four ranges: 46 + * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters 47 + * locate in anyone region. BUT: 48 + * a) Make sure all the ports inside a larb are in one range. 49 + * b) The iova of any master can NOT cross the 4G/8G/12G boundary. 50 + * 51 + * This is the suggested mapping in this SoC: 52 + * 53 + * modules dma-address-region larbs-ports 54 + * disp 0 ~ 4G larb0/1/2/3 55 + * vcodec 4G ~ 8G larb19(21)[1]/21(22)/23 56 + * cam/mdp 8G ~ 12G the other larbs. 57 + * N/A 12G ~ 16G 58 + * CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1 59 + * CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3 60 + * 61 + * This SoC have two MM IOMMU HWs, this is the connected information: 62 + * iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21 63 + * iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27 64 + * 65 + * [1]: This is larb19, but the index is 21 from the SW view. 66 + */ 67 + 68 + /* MM IOMMU ports */ 69 + /* LARB 0 -- VDO-0 */ 70 + #define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0) 71 + #define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1) 72 + #define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2) 73 + #define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3) 74 + #define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4) 75 + #define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5) 76 + #define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6) 77 + 78 + /* LARB 1 -- VD0-0 */ 79 + #define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0) 80 + #define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1) 81 + #define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2) 82 + #define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3) 83 + #define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4) 84 + #define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5) 85 + #define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6) 86 + 87 + /* LARB 2 -- VDO-1 */ 88 + #define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0) 89 + #define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1) 90 + #define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2) 91 + #define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3) 92 + #define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4) 93 + 94 + /* LARB 3 -- VDO-1 */ 95 + #define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0) 96 + #define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1) 97 + #define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2) 98 + #define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3) 99 + #define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4) 100 + #define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5) 101 + #define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6) 102 + 103 + /* LARB 4 -- VPP-0 */ 104 + #define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0) 105 + #define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1) 106 + #define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2) 107 + #define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3) 108 + #define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4) 109 + #define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5) 110 + #define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6) 111 + 112 + /* LARB 5 -- VPP-1 */ 113 + #define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0) 114 + #define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1) 115 + #define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2) 116 + #define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3) 117 + #define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4) 118 + #define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5) 119 + #define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6) 120 + #define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7) 121 + 122 + /* LARB 6 -- VPP-1 */ 123 + #define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0) 124 + #define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1) 125 + #define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2) 126 + #define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3) 127 + 128 + /* LARB 7 -- WPE */ 129 + #define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0) 130 + #define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1) 131 + #define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2) 132 + 133 + /* LARB 9 -- IMG-M */ 134 + #define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0) 135 + #define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1) 136 + #define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2) 137 + #define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3) 138 + #define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4) 139 + #define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5) 140 + #define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6) 141 + #define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7) 142 + #define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8) 143 + #define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9) 144 + #define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10) 145 + #define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11) 146 + #define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12) 147 + #define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13) 148 + #define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14) 149 + #define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15) 150 + #define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16) 151 + #define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17) 152 + #define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18) 153 + #define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19) 154 + #define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20) 155 + #define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21) 156 + #define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22) 157 + #define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23) 158 + #define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24) 159 + 160 + /* LARB 10 -- IMG-D */ 161 + #define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0) 162 + #define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1) 163 + #define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2) 164 + #define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3) 165 + #define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4) 166 + #define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5) 167 + #define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6) 168 + #define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7) 169 + #define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8) 170 + #define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9) 171 + #define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10) 172 + #define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11) 173 + #define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12) 174 + #define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13) 175 + #define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14) 176 + #define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15) 177 + #define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16) 178 + #define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17) 179 + #define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18) 180 + #define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19) 181 + 182 + /* LARB 11A -- IMG-D */ 183 + #define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0) 184 + #define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1) 185 + #define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2) 186 + #define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3) 187 + #define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4) 188 + #define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5) 189 + #define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6) 190 + #define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7) 191 + #define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8) 192 + #define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9) 193 + #define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10) 194 + #define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11) 195 + #define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12) 196 + #define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13) 197 + #define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14) 198 + #define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15) 199 + #define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16) 200 + #define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17) 201 + #define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18) 202 + #define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19) 203 + #define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20) 204 + #define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21) 205 + #define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22) 206 + #define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23) 207 + #define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24) 208 + #define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25) 209 + #define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26) 210 + #define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27) 211 + #define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28) 212 + #define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29) 213 + 214 + /* LARB 11B -- IMG-D */ 215 + #define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0) 216 + #define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1) 217 + #define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2) 218 + #define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3) 219 + #define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4) 220 + #define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5) 221 + #define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6) 222 + #define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7) 223 + #define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8) 224 + #define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9) 225 + #define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10) 226 + #define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11) 227 + #define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12) 228 + #define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13) 229 + #define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14) 230 + #define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15) 231 + #define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16) 232 + #define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17) 233 + #define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18) 234 + #define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19) 235 + #define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20) 236 + #define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21) 237 + #define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22) 238 + #define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23) 239 + #define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24) 240 + #define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25) 241 + #define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26) 242 + #define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27) 243 + #define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28) 244 + #define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29) 245 + 246 + /* LARB 11C -- IMG-D */ 247 + #define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0) 248 + #define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1) 249 + #define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2) 250 + #define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3) 251 + #define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4) 252 + #define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5) 253 + #define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6) 254 + #define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7) 255 + #define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8) 256 + #define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9) 257 + #define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10) 258 + #define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11) 259 + #define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12) 260 + #define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13) 261 + #define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14) 262 + #define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15) 263 + #define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16) 264 + #define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17) 265 + #define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18) 266 + #define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19) 267 + #define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20) 268 + #define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21) 269 + #define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22) 270 + #define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23) 271 + #define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24) 272 + #define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25) 273 + #define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26) 274 + #define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27) 275 + #define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28) 276 + #define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29) 277 + 278 + /* LARB 12 -- IPE */ 279 + #define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0) 280 + #define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1) 281 + #define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2) 282 + #define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3) 283 + #define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4) 284 + #define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5) 285 + #define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6) 286 + #define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7) 287 + #define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8) 288 + #define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9) 289 + #define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10) 290 + #define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11) 291 + #define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12) 292 + #define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13) 293 + #define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14) 294 + #define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15) 295 + 296 + /* LARB 13 -- CAM-1 */ 297 + #define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0) 298 + #define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1) 299 + #define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2) 300 + #define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3) 301 + #define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4) 302 + #define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5) 303 + #define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6) 304 + #define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7) 305 + #define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8) 306 + #define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9) 307 + #define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10) 308 + #define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11) 309 + #define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12) 310 + #define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13) 311 + #define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14) 312 + #define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15) 313 + #define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16) 314 + #define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17) 315 + #define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18) 316 + #define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19) 317 + #define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20) 318 + #define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21) 319 + #define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22) 320 + #define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23) 321 + 322 + /* LARB 14 -- CAM-1 */ 323 + #define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0) 324 + #define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1) 325 + #define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2) 326 + #define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3) 327 + #define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4) 328 + #define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5) 329 + #define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6) 330 + #define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7) 331 + #define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8) 332 + #define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9) 333 + #define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10) 334 + #define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11) 335 + #define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12) 336 + #define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13) 337 + #define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14) 338 + #define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15) 339 + #define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16) 340 + #define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17) 341 + #define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18) 342 + #define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19) 343 + #define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20) 344 + #define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21) 345 + #define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22) 346 + 347 + /* LARB 15 -- IMG-D */ 348 + #define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0) 349 + #define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1) 350 + #define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2) 351 + #define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3) 352 + #define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4) 353 + #define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5) 354 + #define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6) 355 + #define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7) 356 + #define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8) 357 + #define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9) 358 + #define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10) 359 + #define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11) 360 + #define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12) 361 + #define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13) 362 + #define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14) 363 + #define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15) 364 + #define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16) 365 + #define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17) 366 + #define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18) 367 + 368 + /* LARB 16A -- CAM */ 369 + #define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0) 370 + #define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1) 371 + #define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2) 372 + #define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3) 373 + #define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4) 374 + #define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5) 375 + #define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6) 376 + #define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7) 377 + #define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8) 378 + #define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9) 379 + #define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10) 380 + #define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11) 381 + #define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12) 382 + #define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13) 383 + #define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14) 384 + #define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15) 385 + #define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16) 386 + 387 + /* LARB 16B -- CAM */ 388 + #define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0) 389 + #define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1) 390 + #define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2) 391 + #define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3) 392 + #define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4) 393 + #define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5) 394 + #define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6) 395 + #define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7) 396 + #define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8) 397 + #define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9) 398 + #define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10) 399 + #define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11) 400 + #define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12) 401 + #define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13) 402 + #define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14) 403 + #define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15) 404 + #define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16) 405 + 406 + /* LARB 17A -- CAM */ 407 + #define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0) 408 + #define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1) 409 + #define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2) 410 + #define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3) 411 + #define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4) 412 + #define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5) 413 + #define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6) 414 + 415 + /* LARB 17B -- CAM */ 416 + #define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0) 417 + #define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1) 418 + #define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2) 419 + #define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3) 420 + #define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4) 421 + #define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5) 422 + #define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6) 423 + 424 + /* LARB 19 -- VENC */ 425 + #define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0) 426 + #define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1) 427 + #define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2) 428 + #define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3) 429 + #define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4) 430 + #define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5) 431 + #define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6) 432 + #define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7) 433 + #define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8) 434 + #define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9) 435 + #define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10) 436 + #define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11) 437 + #define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12) 438 + #define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13) 439 + #define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14) 440 + #define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15) 441 + #define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16) 442 + #define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17) 443 + #define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18) 444 + #define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19) 445 + #define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20) 446 + #define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21) 447 + #define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22) 448 + #define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23) 449 + #define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24) 450 + #define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25) 451 + #define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26) 452 + 453 + /* LARB 21 -- VDEC-CORE0 */ 454 + #define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0) 455 + #define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1) 456 + #define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2) 457 + #define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3) 458 + #define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4) 459 + #define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5) 460 + #define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6) 461 + #define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7) 462 + #define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8) 463 + #define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9) 464 + #define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10) 465 + 466 + /* LARB 23 -- VDEC-SOC */ 467 + #define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0) 468 + #define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1) 469 + #define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2) 470 + #define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3) 471 + #define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4) 472 + #define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5) 473 + #define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6) 474 + #define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7) 475 + #define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8) 476 + 477 + /* LARB 27 -- CCU */ 478 + #define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0) 479 + #define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1) 480 + #define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2) 481 + #define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3) 482 + 483 + /* LARB 28 -- AXI-CCU */ 484 + #define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0) 485 + 486 + /* infra/peri */ 487 + #define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0) 488 + 489 + #endif
-1
include/linux/amd-iommu.h
··· 32 32 struct pci_dev; 33 33 34 34 extern int amd_iommu_detect(void); 35 - extern int amd_iommu_init_hardware(void); 36 35 37 36 /** 38 37 * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
-2
include/linux/dmar.h
··· 106 106 extern int dmar_table_init(void); 107 107 extern int dmar_dev_scope_init(void); 108 108 extern void dmar_register_bus_notifier(void); 109 - extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, 110 - struct dmar_dev_scope **devices, u16 segment); 111 109 extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); 112 110 extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); 113 111 extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
+15
include/linux/iommu.h
··· 196 196 IOMMU_DEV_FEAT_IOPF, 197 197 }; 198 198 199 + #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ 200 + #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */ 199 201 #define IOMMU_PASID_INVALID (-1U) 200 202 typedef unsigned int ioasid_t; 201 203 ··· 411 409 * @priv: IOMMU Driver private data 412 410 * @max_pasids: number of PASIDs this device can consume 413 411 * @attach_deferred: the dma domain attachment is deferred 412 + * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs 413 + * @require_direct: device requires IOMMU_RESV_DIRECT regions 414 414 * 415 415 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 416 416 * struct iommu_group *iommu_group; ··· 426 422 void *priv; 427 423 u32 max_pasids; 428 424 u32 attach_deferred:1; 425 + u32 pci_32bit_workaround:1; 426 + u32 require_direct:1; 429 427 }; 430 428 431 429 int iommu_device_register(struct iommu_device *iommu, ··· 733 727 struct iommu_domain * 734 728 iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 735 729 unsigned int type); 730 + ioasid_t iommu_alloc_global_pasid(struct device *dev); 731 + void iommu_free_global_pasid(ioasid_t pasid); 736 732 #else /* CONFIG_IOMMU_API */ 737 733 738 734 struct iommu_ops {}; ··· 1096 1088 { 1097 1089 return NULL; 1098 1090 } 1091 + 1092 + static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) 1093 + { 1094 + return IOMMU_PASID_INVALID; 1095 + } 1096 + 1097 + static inline void iommu_free_global_pasid(ioasid_t pasid) {} 1099 1098 #endif /* CONFIG_IOMMU_API */ 1100 1099 1101 1100 /**
+1
include/soc/mediatek/smi.h
··· 13 13 14 14 enum iommu_atf_cmd { 15 15 IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */ 16 + IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, /* For infra master to enable iommu */ 16 17 IOMMU_ATF_CMD_MAX, 17 18 }; 18 19