Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/v3d: Delete v3d_dev->dev

We already have it in v3d_dev->drm.dev with zero additional pointer
chasing. Personally I don't like duplicated pointers like this
because:
- reviewers need to check whether the pointer is for the same or
different objects if there's multiple
- compilers have an easier time too

But also a bit a bikeshed, so feel free to ignore.

Acked-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20200415074034.175360-10-daniel.vetter@ffwll.ch

+37 -38
+6 -6
drivers/gpu/drm/v3d/v3d_debugfs.c
··· 132 132 u32 ident0, ident1, ident2, ident3, cores; 133 133 int ret, core; 134 134 135 - ret = pm_runtime_get_sync(v3d->dev); 135 + ret = pm_runtime_get_sync(v3d->drm.dev); 136 136 if (ret < 0) 137 137 return ret; 138 138 ··· 187 187 (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); 188 188 } 189 189 190 - pm_runtime_mark_last_busy(v3d->dev); 191 - pm_runtime_put_autosuspend(v3d->dev); 190 + pm_runtime_mark_last_busy(v3d->drm.dev); 191 + pm_runtime_put_autosuspend(v3d->drm.dev); 192 192 193 193 return 0; 194 194 } ··· 219 219 int measure_ms = 1000; 220 220 int ret; 221 221 222 - ret = pm_runtime_get_sync(v3d->dev); 222 + ret = pm_runtime_get_sync(v3d->drm.dev); 223 223 if (ret < 0) 224 224 return ret; 225 225 ··· 245 245 cycles / (measure_ms * 1000), 246 246 (cycles / (measure_ms * 100)) % 10); 247 247 248 - pm_runtime_mark_last_busy(v3d->dev); 249 - pm_runtime_put_autosuspend(v3d->dev); 248 + pm_runtime_mark_last_busy(v3d->drm.dev); 249 + pm_runtime_put_autosuspend(v3d->drm.dev); 250 250 251 251 return 0; 252 252 }
+6 -6
drivers/gpu/drm/v3d/v3d_drv.c
··· 105 105 if (args->value != 0) 106 106 return -EINVAL; 107 107 108 - ret = pm_runtime_get_sync(v3d->dev); 108 + ret = pm_runtime_get_sync(v3d->drm.dev); 109 109 if (ret < 0) 110 110 return ret; 111 111 if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 && ··· 114 114 } else { 115 115 args->value = V3D_READ(offset); 116 116 } 117 - pm_runtime_mark_last_busy(v3d->dev); 118 - pm_runtime_put_autosuspend(v3d->dev); 117 + pm_runtime_mark_last_busy(v3d->drm.dev); 118 + pm_runtime_put_autosuspend(v3d->drm.dev); 119 119 return 0; 120 120 } 121 121 ··· 237 237 struct resource *res = 238 238 platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name); 239 239 240 - *regs = devm_ioremap_resource(v3d->dev, res); 240 + *regs = devm_ioremap_resource(v3d->drm.dev, res); 241 241 return PTR_ERR_OR_ZERO(*regs); 242 242 } 243 243 ··· 255 255 if (IS_ERR(v3d)) 256 256 return PTR_ERR(v3d); 257 257 258 - v3d->dev = dev; 259 258 v3d->pdev = pdev; 260 259 drm = &v3d->drm; 261 260 ··· 344 345 345 346 v3d_gem_destroy(drm); 346 347 347 - dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); 348 + dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, 349 + v3d->mmu_scratch_paddr); 348 350 349 351 return 0; 350 352 }
-2
drivers/gpu/drm/v3d/v3d_drv.h
··· 14 14 #include "uapi/drm/v3d_drm.h" 15 15 16 16 struct clk; 17 - struct device; 18 17 struct platform_device; 19 18 struct reset_control; 20 19 ··· 46 47 int ver; 47 48 bool single_irq_line; 48 49 49 - struct device *dev; 50 50 struct platform_device *pdev; 51 51 void __iomem *hub_regs; 52 52 void __iomem *core_regs[3];
+9 -8
drivers/gpu/drm/v3d/v3d_gem.c
··· 370 370 dma_fence_put(job->irq_fence); 371 371 dma_fence_put(job->done_fence); 372 372 373 - pm_runtime_mark_last_busy(job->v3d->dev); 374 - pm_runtime_put_autosuspend(job->v3d->dev); 373 + pm_runtime_mark_last_busy(job->v3d->drm.dev); 374 + pm_runtime_put_autosuspend(job->v3d->drm.dev); 375 375 376 376 kfree(job); 377 377 } ··· 439 439 job->v3d = v3d; 440 440 job->free = free; 441 441 442 - ret = pm_runtime_get_sync(v3d->dev); 442 + ret = pm_runtime_get_sync(v3d->drm.dev); 443 443 if (ret < 0) 444 444 return ret; 445 445 ··· 458 458 return 0; 459 459 fail: 460 460 xa_destroy(&job->deps); 461 - pm_runtime_put_autosuspend(v3d->dev); 461 + pm_runtime_put_autosuspend(v3d->drm.dev); 462 462 return ret; 463 463 } 464 464 ··· 886 886 */ 887 887 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); 888 888 889 - v3d->pt = dma_alloc_wc(v3d->dev, pt_size, 889 + v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size, 890 890 &v3d->pt_paddr, 891 891 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 892 892 if (!v3d->pt) { 893 893 drm_mm_takedown(&v3d->mm); 894 - dev_err(v3d->dev, 894 + dev_err(v3d->drm.dev, 895 895 "Failed to allocate page tables. " 896 896 "Please ensure you have CMA enabled.\n"); 897 897 return -ENOMEM; ··· 903 903 ret = v3d_sched_init(v3d); 904 904 if (ret) { 905 905 drm_mm_takedown(&v3d->mm); 906 - dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, 906 + dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, 907 907 v3d->pt_paddr); 908 908 } 909 909 ··· 925 925 926 926 drm_mm_takedown(&v3d->mm); 927 927 928 - dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr); 928 + dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, 929 + v3d->pt_paddr); 929 930 }
+6 -6
drivers/gpu/drm/v3d/v3d_irq.c
··· 128 128 * always-allowed mode. 129 129 */ 130 130 if (intsts & V3D_INT_GMPV) 131 - dev_err(v3d->dev, "GMP violation\n"); 131 + dev_err(v3d->drm.dev, "GMP violation\n"); 132 132 133 133 /* V3D 4.2 wires the hub and core IRQs together, so if we & 134 134 * didn't see the common one then check hub for MMU IRQs. ··· 189 189 client = v3d41_axi_ids[axi_id]; 190 190 } 191 191 192 - dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n", 192 + dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n", 193 193 client, axi_id, (long long)vio_addr, 194 194 ((intsts & V3D_HUB_INT_MMU_WRV) ? 195 195 ", write violation" : ""), ··· 221 221 if (irq1 == -EPROBE_DEFER) 222 222 return irq1; 223 223 if (irq1 > 0) { 224 - ret = devm_request_irq(v3d->dev, irq1, 224 + ret = devm_request_irq(v3d->drm.dev, irq1, 225 225 v3d_irq, IRQF_SHARED, 226 226 "v3d_core0", v3d); 227 227 if (ret) 228 228 goto fail; 229 - ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 229 + ret = devm_request_irq(v3d->drm.dev, platform_get_irq(v3d->pdev, 0), 230 230 v3d_hub_irq, IRQF_SHARED, 231 231 "v3d_hub", v3d); 232 232 if (ret) ··· 234 234 } else { 235 235 v3d->single_irq_line = true; 236 236 237 - ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 237 + ret = devm_request_irq(v3d->drm.dev, platform_get_irq(v3d->pdev, 0), 238 238 v3d_irq, IRQF_SHARED, 239 239 "v3d", v3d); 240 240 if (ret) ··· 246 246 247 247 fail: 248 248 if (ret != -EPROBE_DEFER) 249 - dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); 249 + dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret); 250 250 return ret; 251 251 } 252 252
+5 -5
drivers/gpu/drm/v3d/v3d_mmu.c
··· 40 40 ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & 41 41 V3D_MMU_CTL_TLB_CLEARING), 100); 42 42 if (ret) 43 - dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n"); 43 + dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n"); 44 44 45 45 V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | 46 46 V3D_MMU_CTL_TLB_CLEAR); ··· 52 52 ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & 53 53 V3D_MMU_CTL_TLB_CLEARING), 100); 54 54 if (ret) { 55 - dev_err(v3d->dev, "TLB clear wait idle failed\n"); 55 + dev_err(v3d->drm.dev, "TLB clear wait idle failed\n"); 56 56 return ret; 57 57 } 58 58 59 59 ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & 60 60 V3D_MMUC_CONTROL_FLUSHING), 100); 61 61 if (ret) 62 - dev_err(v3d->dev, "MMUC flush wait idle failed\n"); 62 + dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); 63 63 64 64 return ret; 65 65 } ··· 109 109 shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT); 110 110 111 111 if (v3d_mmu_flush_all(v3d)) 112 - dev_err(v3d->dev, "MMU flush timeout\n"); 112 + dev_err(v3d->drm.dev, "MMU flush timeout\n"); 113 113 } 114 114 115 115 void v3d_mmu_remove_ptes(struct v3d_bo *bo) ··· 122 122 v3d->pt[page] = 0; 123 123 124 124 if (v3d_mmu_flush_all(v3d)) 125 - dev_err(v3d->dev, "MMU flush timeout\n"); 125 + dev_err(v3d->drm.dev, "MMU flush timeout\n"); 126 126 }
+5 -5
drivers/gpu/drm/v3d/v3d_sched.c
··· 403 403 msecs_to_jiffies(hang_limit_ms), 404 404 "v3d_bin"); 405 405 if (ret) { 406 - dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret); 406 + dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret); 407 407 return ret; 408 408 } 409 409 ··· 413 413 msecs_to_jiffies(hang_limit_ms), 414 414 "v3d_render"); 415 415 if (ret) { 416 - dev_err(v3d->dev, "Failed to create render scheduler: %d.", 416 + dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.", 417 417 ret); 418 418 v3d_sched_fini(v3d); 419 419 return ret; ··· 425 425 msecs_to_jiffies(hang_limit_ms), 426 426 "v3d_tfu"); 427 427 if (ret) { 428 - dev_err(v3d->dev, "Failed to create TFU scheduler: %d.", 428 + dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.", 429 429 ret); 430 430 v3d_sched_fini(v3d); 431 431 return ret; ··· 438 438 msecs_to_jiffies(hang_limit_ms), 439 439 "v3d_csd"); 440 440 if (ret) { 441 - dev_err(v3d->dev, "Failed to create CSD scheduler: %d.", 441 + dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.", 442 442 ret); 443 443 v3d_sched_fini(v3d); 444 444 return ret; ··· 450 450 msecs_to_jiffies(hang_limit_ms), 451 451 "v3d_cache_clean"); 452 452 if (ret) { 453 - dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.", 453 + dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.", 454 454 ret); 455 455 v3d_sched_fini(v3d); 456 456 return ret;