Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: Support IO_PGTABLE_QUIRK_NO_WARN_ON

With user managed VMs and multiple queues, it is in theory possible to
trigger map/unmap errors. These will (in a later patch) mark the VM as
unusable. But we want to tell the io-pgtable helpers not to spam the
log. In addition, in the unmap path, we don't want to bail early from
the unmap, to ensure we don't leave some dangling pages mapped.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661520/

authored by

Rob Clark and committed by
Rob Clark
2b93efeb 92395af6

+20 -7
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 2280 2280 { 2281 2281 struct msm_mmu *mmu; 2282 2282 2283 - mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu); 2283 + mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed); 2284 2284 2285 2285 if (IS_ERR(mmu)) 2286 2286 return ERR_CAST(mmu);
+18 -5
drivers/gpu/drm/msm/msm_iommu.c
··· 94 94 { 95 95 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); 96 96 struct io_pgtable_ops *ops = pagetable->pgtbl_ops; 97 + int ret = 0; 97 98 98 99 while (size) { 99 - size_t unmapped, pgsize, count; 100 + size_t pgsize, count; 101 + ssize_t unmapped; 100 102 101 103 pgsize = calc_pgsize(pagetable, iova, iova, size, &count); 102 104 103 105 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); 104 - if (!unmapped) 105 - break; 106 + if (unmapped <= 0) { 107 + ret = -EINVAL; 108 + /* 109 + * Continue attempting to unamp the remained of the 110 + * range, so we don't end up with some dangling 111 + * mapped pages 112 + */ 113 + unmapped = PAGE_SIZE; 114 + } 106 115 107 116 iova += unmapped; 108 117 size -= unmapped; ··· 119 110 120 111 iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain); 121 112 122 - return (size == 0) ? 0 : -EINVAL; 113 + return ret; 123 114 } 124 115 125 116 static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot) ··· 333 324 static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev, 334 325 unsigned long iova, int flags, void *arg); 335 326 336 - struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) 327 + struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed) 337 328 { 338 329 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev); 339 330 struct msm_iommu *iommu = to_msm_iommu(parent); ··· 366 357 /* The incoming cfg will have the TTBR1 quirk enabled */ 367 358 ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; 368 359 ttbr0_cfg.tlb = &tlb_ops; 360 + 361 + if (!kernel_managed) { 362 + ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN; 363 + } 369 364 370 365 pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, 371 366 &ttbr0_cfg, pagetable);
+1 -1
drivers/gpu/drm/msm/msm_mmu.h
··· 51 51 mmu->handler = handler; 52 52 } 53 53 54 - struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent); 54 + struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed); 55 55 56 56 int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr, 57 57 int *asid);