Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: Refactor address space initialization

Refactor how address space initialization works. Instead of having the
address space function create the MMU object (and thus require separate but
equal functions for gpummu and iommu) use a single function and pass the
MMU struct in. Make the generic code cleaner by using target specific
functions to create the address space so a2xx can do its own thing in its
own space. For all the other targets use a generic helper to initialize
IOMMU but leave the door open for newer targets to use customization
if they need it.

Reviewed-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
[squash in rebase fixups]
Signed-off-by: Rob Clark <robdclark@chromium.org>

authored by

Jordan Crouse and committed by
Rob Clark
ccac7ce3 52da6d51

+86 -119
+16
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
··· 401 401 return state; 402 402 } 403 403 404 + static struct msm_gem_address_space * 405 + a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) 406 + { 407 + struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu); 408 + struct msm_gem_address_space *aspace; 409 + 410 + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, 411 + SZ_16M + 0xfff * SZ_64K); 412 + 413 + if (IS_ERR(aspace) && !IS_ERR(mmu)) 414 + mmu->funcs->destroy(mmu); 415 + 416 + return aspace; 417 + } 418 + 404 419 /* Register offset defines for A2XX - copy of A3XX */ 405 420 static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { 406 421 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), ··· 444 429 #endif 445 430 .gpu_state_get = a2xx_gpu_state_get, 446 431 .gpu_state_put = adreno_gpu_state_put, 432 + .create_address_space = a2xx_create_address_space, 447 433 }, 448 434 }; 449 435
+1
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
··· 441 441 #endif 442 442 .gpu_state_get = a3xx_gpu_state_get, 443 443 .gpu_state_put = adreno_gpu_state_put, 444 + .create_address_space = adreno_iommu_create_address_space, 444 445 }, 445 446 }; 446 447
+1
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
··· 583 583 #endif 584 584 .gpu_state_get = a4xx_gpu_state_get, 585 585 .gpu_state_put = adreno_gpu_state_put, 586 + .create_address_space = adreno_iommu_create_address_space, 586 587 }, 587 588 .get_timestamp = a4xx_get_timestamp, 588 589 };
+1
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 1445 1445 .gpu_busy = a5xx_gpu_busy, 1446 1446 .gpu_state_get = a5xx_gpu_state_get, 1447 1447 .gpu_state_put = a5xx_gpu_state_put, 1448 + .create_address_space = adreno_iommu_create_address_space, 1448 1449 }, 1449 1450 .get_timestamp = a5xx_get_timestamp, 1450 1451 };
+3 -4
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 1114 1114 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 1115 1115 { 1116 1116 struct iommu_domain *domain; 1117 + struct msm_mmu *mmu; 1117 1118 1118 1119 domain = iommu_domain_alloc(&platform_bus_type); 1119 1120 if (!domain) 1120 1121 return -ENODEV; 1121 1122 1122 - domain->geometry.aperture_start = 0x00000000; 1123 - domain->geometry.aperture_end = 0x7fffffff; 1124 - 1125 - gmu->aspace = msm_gem_address_space_create(gmu->dev, domain, "gmu"); 1123 + mmu = msm_iommu_new(gmu->dev, domain); 1124 + gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff); 1126 1125 if (IS_ERR(gmu->aspace)) { 1127 1126 iommu_domain_free(domain); 1128 1127 return PTR_ERR(gmu->aspace);
+1
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 893 893 #if defined(CONFIG_DRM_MSM_GPU_STATE) 894 894 .gpu_state_get = a6xx_gpu_state_get, 895 895 .gpu_state_put = a6xx_gpu_state_put, 896 + .create_address_space = adreno_iommu_create_address_space, 896 897 #endif 897 898 }, 898 899 .get_timestamp = a6xx_get_timestamp,
+17 -6
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 185 185 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); 186 186 } 187 187 188 + struct msm_gem_address_space * 189 + adreno_iommu_create_address_space(struct msm_gpu *gpu, 190 + struct platform_device *pdev) 191 + { 192 + struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type); 193 + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu); 194 + struct msm_gem_address_space *aspace; 195 + 196 + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, 197 + 0xfffffff); 198 + 199 + if (IS_ERR(aspace) && !IS_ERR(mmu)) 200 + mmu->funcs->destroy(mmu); 201 + 202 + return aspace; 203 + } 204 + 188 205 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) 189 206 { 190 207 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); ··· 1004 987 adreno_gpu->rev = config->rev; 1005 988 1006 989 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; 1007 - 1008 - adreno_gpu_config.va_start = SZ_16M; 1009 - adreno_gpu_config.va_end = 0xffffffff; 1010 - /* maximum range of a2xx mmu */ 1011 - if (adreno_is_a2xx(adreno_gpu)) 1012 - adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K; 1013 990 1014 991 adreno_gpu_config.nr_rings = nr_rings; 1015 992
+8
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 288 288 int adreno_gpu_state_put(struct msm_gpu_state *state); 289 289 290 290 /* 291 + * Common helper function to initialize the default address space for arm-smmu 292 + * attached targets 293 + */ 294 + struct msm_gem_address_space * 295 + adreno_iommu_create_address_space(struct msm_gpu *gpu, 296 + struct platform_device *pdev); 297 + 298 + /* 291 299 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU 292 300 * out of secure mode 293 301 */
+5 -5
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 794 794 { 795 795 struct iommu_domain *domain; 796 796 struct msm_gem_address_space *aspace; 797 + struct msm_mmu *mmu; 797 798 798 799 domain = iommu_domain_alloc(&platform_bus_type); 799 800 if (!domain) 800 801 return 0; 801 802 802 - domain->geometry.aperture_start = 0x1000; 803 - domain->geometry.aperture_end = 0xffffffff; 803 + mmu = msm_iommu_new(dpu_kms->dev->dev, domain); 804 + aspace = msm_gem_address_space_create(mmu, "dpu1", 805 + 0x1000, 0xfffffff); 804 806 805 - aspace = msm_gem_address_space_create(dpu_kms->dev->dev, 806 - domain, "dpu1"); 807 807 if (IS_ERR(aspace)) { 808 - iommu_domain_free(domain); 808 + mmu->funcs->destroy(mmu); 809 809 return PTR_ERR(aspace); 810 810 } 811 811
+8 -6
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
··· 510 510 mdelay(16); 511 511 512 512 if (config->iommu) { 513 - aspace = msm_gem_address_space_create(&pdev->dev, 514 - config->iommu, "mdp4"); 513 + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, 514 + config->iommu); 515 + 516 + aspace = msm_gem_address_space_create(mmu, 517 + "mdp4", 0x1000, 0xffffffff); 518 + 515 519 if (IS_ERR(aspace)) { 520 + if (!IS_ERR(mmu)) 521 + mmu->funcs->destroy(mmu); 516 522 ret = PTR_ERR(aspace); 517 523 goto fail; 518 524 } ··· 571 565 /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ 572 566 config.max_clk = 266667000; 573 567 config.iommu = iommu_domain_alloc(&platform_bus_type); 574 - if (config.iommu) { 575 - config.iommu->geometry.aperture_start = 0x1000; 576 - config.iommu->geometry.aperture_end = 0xffffffff; 577 - } 578 568 579 569 return &config; 580 570 }
-4
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
··· 1017 1017 static struct mdp5_cfg_platform config = {}; 1018 1018 1019 1019 config.iommu = iommu_domain_alloc(&platform_bus_type); 1020 - if (config.iommu) { 1021 - config.iommu->geometry.aperture_start = 0x1000; 1022 - config.iommu->geometry.aperture_end = 0xffffffff; 1023 - } 1024 1020 1025 1021 return &config; 1026 1022 }
+9 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 632 632 mdelay(16); 633 633 634 634 if (config->platform.iommu) { 635 + struct msm_mmu *mmu; 636 + 635 637 iommu_dev = &pdev->dev; 636 638 if (!dev_iommu_fwspec_get(iommu_dev)) 637 639 iommu_dev = iommu_dev->parent; 638 640 639 - aspace = msm_gem_address_space_create(iommu_dev, 640 - config->platform.iommu, "mdp5"); 641 + mmu = msm_iommu_new(iommu_dev, config->platform.iommu); 642 + 643 + aspace = msm_gem_address_space_create(mmu, "mdp5", 644 + 0x1000, 0xffffffff); 645 + 641 646 if (IS_ERR(aspace)) { 647 + if (!IS_ERR(mmu)) 648 + mmu->funcs->destroy(mmu); 642 649 ret = PTR_ERR(aspace); 643 650 goto fail; 644 651 }
+2 -6
drivers/gpu/drm/msm/msm_drv.h
··· 252 252 void msm_gem_address_space_put(struct msm_gem_address_space *aspace); 253 253 254 254 struct msm_gem_address_space * 255 - msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, 256 - const char *name); 257 - 258 - struct msm_gem_address_space * 259 - msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu, 260 - const char *name, uint64_t va_start, uint64_t va_end); 255 + msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, 256 + u64 va_start, u64 size); 261 257 262 258 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 263 259 void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+7 -46
drivers/gpu/drm/msm/msm_gem_vma.c
··· 127 127 return 0; 128 128 } 129 129 130 - 131 130 struct msm_gem_address_space * 132 - msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, 133 - const char *name) 131 + msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, 132 + u64 va_start, u64 size) 134 133 { 135 134 struct msm_gem_address_space *aspace; 136 - u64 start = domain->geometry.aperture_start; 137 - u64 size = domain->geometry.aperture_end - start; 135 + 136 + if (IS_ERR(mmu)) 137 + return ERR_CAST(mmu); 138 138 139 139 aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); 140 140 if (!aspace) ··· 142 142 143 143 spin_lock_init(&aspace->lock); 144 144 aspace->name = name; 145 - aspace->mmu = msm_iommu_new(dev, domain); 146 - if (IS_ERR(aspace->mmu)) { 147 - int ret = PTR_ERR(aspace->mmu); 145 + aspace->mmu = mmu; 148 146 149 - kfree(aspace); 150 - return ERR_PTR(ret); 151 - } 152 - 153 - /* 154 - * Attaching the IOMMU device changes the aperture values so use the 155 - * cached values instead 156 - */ 157 - drm_mm_init(&aspace->mm, start >> PAGE_SHIFT, size >> PAGE_SHIFT); 158 - 159 - kref_init(&aspace->kref); 160 - 161 - return aspace; 162 - } 163 - 164 - struct msm_gem_address_space * 165 - msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu, 166 - const char *name, uint64_t va_start, uint64_t va_end) 167 - { 168 - struct msm_gem_address_space *aspace; 169 - u64 size = va_end - va_start; 170 - 171 - aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); 172 - if (!aspace) 173 - return ERR_PTR(-ENOMEM); 174 - 175 - spin_lock_init(&aspace->lock); 176 - aspace->name = name; 177 - aspace->mmu = msm_gpummu_new(dev, gpu); 178 - if (IS_ERR(aspace->mmu)) { 179 - int ret = PTR_ERR(aspace->mmu); 180 - 181 - kfree(aspace); 182 - return ERR_PTR(ret); 183 - } 184 - 185 - drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT), 186 - size >> PAGE_SHIFT); 147 + drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT); 187 148 188 149 kref_init(&aspace->kref); 189 150
+2 -38
drivers/gpu/drm/msm/msm_gpu.c
··· 821 821 return 0; 822 822 } 823 823 824 - static struct msm_gem_address_space * 825 - msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, 826 - uint64_t va_start, uint64_t va_end) 827 - { 828 - struct msm_gem_address_space *aspace; 829 - 830 - /* 831 - * Setup IOMMU.. eventually we will (I think) do this once per context 832 - * and have separate page tables per context. For now, to keep things 833 - * simple and to get something working, just use a single address space: 834 - */ 835 - if (!adreno_is_a2xx(to_adreno_gpu(gpu))) { 836 - struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type); 837 - if (!iommu) 838 - return NULL; 839 - 840 - iommu->geometry.aperture_start = va_start; 841 - iommu->geometry.aperture_end = va_end; 842 - 843 - DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); 844 - 845 - aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); 846 - if (IS_ERR(aspace)) 847 - iommu_domain_free(iommu); 848 - } else { 849 - aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu", 850 - va_start, va_end); 851 - } 852 - 853 - if (IS_ERR(aspace)) 854 - DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n", 855 - PTR_ERR(aspace)); 856 - 857 - return aspace; 858 - } 859 - 860 824 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 861 825 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 862 826 const char *name, struct msm_gpu_config *config) ··· 893 929 894 930 msm_devfreq_init(gpu); 895 931 896 - gpu->aspace = msm_gpu_create_address_space(gpu, pdev, 897 - config->va_start, config->va_end); 932 + 933 + gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); 898 934 899 935 if (gpu->aspace == NULL) 900 936 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+2 -2
drivers/gpu/drm/msm/msm_gpu.h
··· 21 21 22 22 struct msm_gpu_config { 23 23 const char *ioname; 24 - uint64_t va_start; 25 - uint64_t va_end; 26 24 unsigned int nr_rings; 27 25 }; 28 26 ··· 62 64 int (*gpu_state_put)(struct msm_gpu_state *state); 63 65 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu); 64 66 void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq); 67 + struct msm_gem_address_space *(*create_address_space) 68 + (struct msm_gpu *gpu, struct platform_device *pdev); 65 69 }; 66 70 67 71 struct msm_gpu {
+3
drivers/gpu/drm/msm/msm_iommu.c
··· 70 70 struct msm_iommu *iommu; 71 71 int ret; 72 72 73 + if (!domain) 74 + return ERR_PTR(-ENODEV); 75 + 73 76 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 74 77 if (!iommu) 75 78 return ERR_PTR(-ENOMEM);