Merge tag 'drm-misc-fixes-2024-07-04' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

drm-misc-fixes for v6.10-rc7:
- Add panel quirks.
- Firmware sysfb refcount fix.
- Another null pointer mode deref fix for nouveau.
- Panthor sync and uobj fixes.
- Fix fbdev regression since v6.7.
- Delay free imported bo in ttm to fix lockdep splat.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ffba0c63-2798-40b6-948d-361cd3b14e9f@linux.intel.com

+63 -20
+8 -4
drivers/firmware/sysfb.c
··· 101 101 if (IS_ERR(pdev)) { 102 102 return ERR_CAST(pdev); 103 103 } else if (pdev) { 104 - if (!sysfb_pci_dev_is_enabled(pdev)) 104 + if (!sysfb_pci_dev_is_enabled(pdev)) { 105 + pci_dev_put(pdev); 105 106 return ERR_PTR(-ENODEV); 107 + } 106 108 return &pdev->dev; 107 109 } 108 110 ··· 139 137 if (compatible) { 140 138 pd = sysfb_create_simplefb(si, &mode, parent); 141 139 if (!IS_ERR(pd)) 142 - goto unlock_mutex; 140 + goto put_device; 143 141 } 144 142 145 143 /* if the FB is incompatible, create a legacy framebuffer device */ ··· 157 155 pd = platform_device_alloc(name, 0); 158 156 if (!pd) { 159 157 ret = -ENOMEM; 160 - goto unlock_mutex; 158 + goto put_device; 161 159 } 162 160 163 161 pd->dev.parent = parent; ··· 172 170 if (ret) 173 171 goto err; 174 172 175 - goto unlock_mutex; 173 + goto put_device; 176 174 err: 177 175 platform_device_put(pd); 176 + put_device: 177 + put_device(parent); 178 178 unlock_mutex: 179 179 mutex_unlock(&disable_lock); 180 180 return ret;
+2 -1
drivers/gpu/drm/drm_fbdev_generic.c
··· 84 84 sizes->surface_width, sizes->surface_height, 85 85 sizes->surface_bpp); 86 86 87 - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 87 + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, 88 + sizes->surface_depth); 88 89 buffer = drm_client_framebuffer_create(client, sizes->surface_width, 89 90 sizes->surface_height, format); 90 91 if (IS_ERR(buffer))
+8 -1
drivers/gpu/drm/drm_panel_orientation_quirks.c
··· 420 420 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"), 421 421 }, 422 422 .driver_data = (void *)&lcd1280x1920_rightside_up, 423 - }, { /* Valve Steam Deck */ 423 + }, { /* Valve Steam Deck (Jupiter) */ 424 424 .matches = { 425 425 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), 426 426 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"), 427 + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), 428 + }, 429 + .driver_data = (void *)&lcd800x1280_rightside_up, 430 + }, { /* Valve Steam Deck (Galileo) */ 431 + .matches = { 432 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), 433 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"), 427 434 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), 428 435 }, 429 436 .driver_data = (void *)&lcd800x1280_rightside_up,
+3
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 1001 1001 struct drm_display_mode *mode; 1002 1002 1003 1003 mode = drm_mode_duplicate(dev, nv_connector->native_mode); 1004 + if (!mode) 1005 + return 0; 1006 + 1004 1007 drm_mode_probed_add(connector, mode); 1005 1008 ret = 1; 1006 1009 }
+3 -3
drivers/gpu/drm/panthor/panthor_drv.c
··· 86 86 int ret = 0; 87 87 void *out_alloc; 88 88 89 + if (!in->count) 90 + return NULL; 91 + 89 92 /* User stride must be at least the minimum object size, otherwise it might 90 93 * lack useful information. 91 94 */ 92 95 if (in->stride < min_stride) 93 96 return ERR_PTR(-EINVAL); 94 - 95 - if (!in->count) 96 - return NULL; 97 97 98 98 out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL); 99 99 if (!out_alloc)
+33 -11
drivers/gpu/drm/panthor/panthor_sched.c
··· 459 459 atomic64_t seqno; 460 460 461 461 /** 462 + * @last_fence: Fence of the last submitted job. 463 + * 464 + * We return this fence when we get an empty command stream. 465 + * This way, we are guaranteed that all earlier jobs have completed 466 + * when drm_sched_job::s_fence::finished without having to feed 467 + * the CS ring buffer with a dummy job that only signals the fence. 468 + */ 469 + struct dma_fence *last_fence; 470 + 471 + /** 462 472 * @in_flight_jobs: List containing all in-flight jobs. 463 473 * 464 474 * Used to keep track and signal panthor_job::done_fence when the ··· 838 828 839 829 panthor_kernel_bo_destroy(queue->ringbuf); 840 830 panthor_kernel_bo_destroy(queue->iface.mem); 831 + 832 + /* Release the last_fence we were holding, if any. */ 833 + dma_fence_put(queue->fence_ctx.last_fence); 841 834 842 835 kfree(queue); 843 836 } ··· 2797 2784 2798 2785 spin_lock(&queue->fence_ctx.lock); 2799 2786 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 2800 - if (!job->call_info.size) 2801 - continue; 2802 - 2803 2787 if (syncobj->seqno < job->done_fence->seqno) 2804 2788 break; 2805 2789 ··· 2875 2865 static_assert(sizeof(call_instrs) % 64 == 0, 2876 2866 "call_instrs is not aligned on a cacheline"); 2877 2867 2878 - /* Stream size is zero, nothing to do => return a NULL fence and let 2879 - * drm_sched signal the parent. 2868 + /* Stream size is zero, nothing to do except making sure all previously 2869 + * submitted jobs are done before we signal the 2870 + * drm_sched_job::s_fence::finished fence. 2880 2871 */ 2881 - if (!job->call_info.size) 2882 - return NULL; 2872 + if (!job->call_info.size) { 2873 + job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); 2874 + return dma_fence_get(job->done_fence); 2875 + } 2883 2876 2884 2877 ret = pm_runtime_resume_and_get(ptdev->base.dev); 2885 2878 if (drm_WARN_ON(&ptdev->base, ret)) ··· 2940 2927 sched->pm.has_ref = true; 2941 2928 } 2942 2929 } 2930 + 2931 + /* Update the last fence. */ 2932 + dma_fence_put(queue->fence_ctx.last_fence); 2933 + queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); 2943 2934 2944 2935 done_fence = dma_fence_get(job->done_fence); 2945 2936 ··· 3395 3378 goto err_put_job; 3396 3379 } 3397 3380 3398 - job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3399 - if (!job->done_fence) { 3400 - ret = -ENOMEM; 3401 - goto err_put_job; 3381 + /* Empty command streams don't need a fence, they'll pick the one from 3382 + * the previously submitted job. 3383 + */ 3384 + if (job->call_info.size) { 3385 + job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3386 + if (!job->done_fence) { 3387 + ret = -ENOMEM; 3388 + goto err_put_job; 3389 + } 3402 3390 } 3403 3391 3404 3392 ret = drm_sched_job_init(&job->base,
+1
drivers/gpu/drm/ttm/ttm_bo.c
··· 346 346 if (!dma_resv_test_signaled(bo->base.resv, 347 347 DMA_RESV_USAGE_BOOKKEEP) || 348 348 (want_init_on_free() && (bo->ttm != NULL)) || 349 + bo->type == ttm_bo_type_sg || 349 350 !dma_resv_trylock(bo->base.resv)) { 350 351 /* The BO is not idle, resurrect it for delayed destroy */ 351 352 ttm_bo_flush_all_fences(bo);
+5
include/uapi/drm/panthor_drm.h
··· 802 802 * Must be 64-bit/8-byte aligned (the size of a CS instruction) 803 803 * 804 804 * Can be zero if stream_addr is zero too. 805 + * 806 + * When the stream size is zero, the queue submit serves as a 807 + * synchronization point. 805 808 */ 806 809 __u32 stream_size; 807 810 ··· 825 822 * ensure the GPU doesn't get garbage when reading the indirect command 826 823 * stream buffers. If you want the cache flush to happen 827 824 * unconditionally, pass a zero here. 825 + * 826 + * Ignored when stream_size is zero. 828 827 */ 829 828 __u32 latest_flush; 830 829