Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull iommu fixes from Will Deacon:
"Here's another round of IOMMU fixes for -rc6 consisting mainly of a
bunch of independent driver fixes. Thomas agreed for me to take the
x86 'tboot' fix here, as it fixes a regression introduced by a vt-d
change.

- Fix intel iommu driver when running on devices without VCCAP_REG

- Fix swiotlb and "iommu=pt" interaction under TXT (tboot)

- Fix missing return value check during device probe()

- Fix probe ordering for Qualcomm SMMU implementation

- Ensure page-sized mappings are used for AMD IOMMU buffers with SNP
RMP"

* tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
iommu/vt-d: Don't read VCCAP register unless it exists
x86/tboot: Don't disable swiotlb when iommu is forced on
iommu: Check return of __iommu_attach_device()
arm-smmu-qcom: Ensure the qcom_scm driver has finished probing
iommu/amd: Enforce 4k mapping for certain IOMMU data structures

+37 -16
+1 -4
arch/x86/kernel/tboot.c
··· 514 514 if (!tboot_enabled()) 515 515 return 0; 516 516 517 - if (no_iommu || swiotlb || dmar_disabled) 517 + if (no_iommu || dmar_disabled) 518 518 pr_warn("Forcing Intel-IOMMU to enabled\n"); 519 519 520 520 dmar_disabled = 0; 521 - #ifdef CONFIG_SWIOTLB 522 - swiotlb = 0; 523 - #endif 524 521 no_iommu = 0; 525 522 526 523 return 1;
+22 -5
drivers/iommu/amd/init.c
··· 29 29 #include <asm/iommu_table.h> 30 30 #include <asm/io_apic.h> 31 31 #include <asm/irq_remapping.h> 32 + #include <asm/set_memory.h> 32 33 33 34 #include <linux/crash_dump.h> 34 35 ··· 673 672 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 674 673 } 675 674 675 + static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, 676 + gfp_t gfp, size_t size) 677 + { 678 + int order = get_order(size); 679 + void *buf = (void *)__get_free_pages(gfp, order); 680 + 681 + if (buf && 682 + iommu_feature(iommu, FEATURE_SNP) && 683 + set_memory_4k((unsigned long)buf, (1 << order))) { 684 + free_pages((unsigned long)buf, order); 685 + buf = NULL; 686 + } 687 + 688 + return buf; 689 + } 690 + 676 691 /* allocates the memory where the IOMMU will log its events to */ 677 692 static int __init alloc_event_buffer(struct amd_iommu *iommu) 678 693 { 679 - iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 680 - get_order(EVT_BUFFER_SIZE)); 694 + iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 695 + EVT_BUFFER_SIZE); 681 696 682 697 return iommu->evt_buf ? 0 : -ENOMEM; 683 698 } ··· 732 715 /* allocates the memory where the IOMMU will log its events to */ 733 716 static int __init alloc_ppr_log(struct amd_iommu *iommu) 734 717 { 735 - iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 736 - get_order(PPR_LOG_SIZE)); 718 + iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 719 + PPR_LOG_SIZE); 737 720 738 721 return iommu->ppr_log ? 0 : -ENOMEM; 739 722 } ··· 855 838 856 839 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 857 840 { 858 - iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL); 841 + iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); 859 842 860 843 return iommu->cmd_sem ? 0 : -ENOMEM; 861 844 }
+4
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
··· 69 69 { 70 70 struct qcom_smmu *qsmmu; 71 71 72 + /* Check to make sure qcom_scm has finished probing */ 73 + if (!qcom_scm_is_available()) 74 + return ERR_PTR(-EPROBE_DEFER); 75 + 72 76 qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL); 73 77 if (!qsmmu) 74 78 return ERR_PTR(-ENOMEM);
+2 -1
drivers/iommu/intel/dmar.c
··· 986 986 warn_invalid_dmar(phys_addr, " returns all ones"); 987 987 goto unmap; 988 988 } 989 - iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG); 989 + if (ecap_vcs(iommu->ecap)) 990 + iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG); 990 991 991 992 /* the registers might be more than one page */ 992 993 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+2 -2
drivers/iommu/intel/iommu.c
··· 1833 1833 if (ecap_prs(iommu->ecap)) 1834 1834 intel_svm_finish_prq(iommu); 1835 1835 } 1836 - if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap)) 1836 + if (vccap_pasid(iommu->vccap)) 1837 1837 ioasid_unregister_allocator(&iommu->pasid_allocator); 1838 1838 1839 1839 #endif ··· 3212 3212 * is active. All vIOMMU allocators will eventually be calling the same 3213 3213 * host allocator. 3214 3214 */ 3215 - if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap)) 3215 + if (!vccap_pasid(iommu->vccap)) 3216 3216 return; 3217 3217 3218 3218 pr_info("Register custom PASID allocator\n");
+6 -4
drivers/iommu/iommu.c
··· 264 264 */ 265 265 iommu_alloc_default_domain(group, dev); 266 266 267 - if (group->default_domain) 267 + if (group->default_domain) { 268 268 ret = __iommu_attach_device(group->default_domain, dev); 269 + if (ret) { 270 + iommu_group_put(group); 271 + goto err_release; 272 + } 273 + } 269 274 270 275 iommu_create_device_direct_mappings(group, dev); 271 276 272 277 iommu_group_put(group); 273 - 274 - if (ret) 275 - goto err_release; 276 278 277 279 if (ops->probe_finalize) 278 280 ops->probe_finalize(dev);