Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'iommu-fixes-v3.5-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
"The patches fix several issues in the AMD IOMMU driver, the NVidia
SMMU driver, and the DMA debug code.

The most important fix for the AMD IOMMU solves a problem with SR-IOV
devices where virtual functions did not work with IOMMU enabled. The
NVidia SMMU patch fixes a possible sleep while spin-lock situation
(queued the small fix for v3.5, a better but more intrusive fix is
coming for v3.6). The DMA debug patches fix a possible data
corruption issue due to bool vs u32 usage."

* tag 'iommu-fixes-v3.5-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/amd: fix type bug in flush code
dma-debug: debugfs_create_bool() takes a u32 pointer
iommu/tegra: smmu: Fix unsleepable memory allocation
iommu/amd: Initialize dma_ops for hotplug and sriov devices
iommu/amd: Fix missing iommu_shutdown initialization in passthrough mode

+18 -9
+10 -1
drivers/iommu/amd_iommu.c
··· 83 83 static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 84 84 int amd_iommu_max_glx_val = -1; 85 85 86 + static struct dma_map_ops amd_iommu_dma_ops; 87 + 86 88 /* 87 89 * general struct to manage commands send to an IOMMU 88 90 */ ··· 404 402 return; 405 403 406 404 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, 407 - (u32 *)&amd_iommu_unmap_flush); 405 + &amd_iommu_unmap_flush); 408 406 409 407 amd_iommu_stats_add(&compl_wait); 410 408 amd_iommu_stats_add(&cnt_map_single); ··· 2268 2266 spin_lock_irqsave(&iommu_pd_list_lock, flags); 2269 2267 list_add_tail(&dma_domain->list, &iommu_pd_list); 2270 2268 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 2269 + 2270 + dev_data = get_dev_data(dev); 2271 + 2272 + if (!dev_data->passthrough) 2273 + dev->archdata.dma_ops = &amd_iommu_dma_ops; 2274 + else 2275 + dev->archdata.dma_ops = &nommu_dma_ops; 2271 2276 2272 2277 break; 2273 2278 case BUS_NOTIFY_DEL_DEVICE:
+3 -3
drivers/iommu/amd_iommu_init.c
··· 129 129 to handle */ 130 130 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 131 131 we find in ACPI */ 132 - bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 132 + u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */ 133 133 134 134 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 135 135 system */ ··· 1641 1641 1642 1642 amd_iommu_init_api(); 1643 1643 1644 + x86_platform.iommu_shutdown = disable_iommus; 1645 + 1644 1646 if (iommu_pass_through) 1645 1647 goto out; 1646 1648 ··· 1650 1648 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); 1651 1649 else 1652 1650 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); 1653 - 1654 - x86_platform.iommu_shutdown = disable_iommus; 1655 1651 1656 1652 out: 1657 1653 return ret;
+1 -1
drivers/iommu/amd_iommu_types.h
··· 652 652 * If true, the addresses will be flushed on unmap time, not when 653 653 * they are reused 654 654 */ 655 - extern bool amd_iommu_unmap_flush; 655 + extern u32 amd_iommu_unmap_flush; 656 656 657 657 /* Smallest number of PASIDs supported by any IOMMU in the system */ 658 658 extern u32 amd_iommu_max_pasids;
+2 -2
drivers/iommu/tegra-smmu.c
··· 550 550 return 0; 551 551 552 552 as->pte_count = devm_kzalloc(smmu->dev, 553 - sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL); 553 + sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC); 554 554 if (!as->pte_count) { 555 555 dev_err(smmu->dev, 556 556 "failed to allocate smmu_device PTE cunters\n"); 557 557 return -ENOMEM; 558 558 } 559 - as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA); 559 + as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA); 560 560 if (!as->pdir_page) { 561 561 dev_err(smmu->dev, 562 562 "failed to allocate smmu_device page directory\n");
+2 -2
lib/dma-debug.c
··· 78 78 static DEFINE_SPINLOCK(free_entries_lock); 79 79 80 80 /* Global disable flag - will be set in case of an error */ 81 - static bool global_disable __read_mostly; 81 + static u32 global_disable __read_mostly; 82 82 83 83 /* Global error count */ 84 84 static u32 error_count; ··· 657 657 658 658 global_disable_dent = debugfs_create_bool("disabled", 0444, 659 659 dma_debug_dent, 660 - (u32 *)&global_disable); 660 + &global_disable); 661 661 if (!global_disable_dent) 662 662 goto out_err; 663 663