Merge tag 'drm-fixes-2024-07-05' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Daniel Vetter:
"Just small fixes all over here, all quiet as it should.

drivers:

- amd: mostly amdgpu display fixes + radeon vm NULL deref fix

- xe: migration error handling + typoed register name in gt setup

- i915: usb-c fix to shut up warnings on MTL+

- panthor: fix sync-only jobs + ioctl validation fix to not EINVAL
wrongly

- panel quirks

- nouveau: NULL deref in get_modes

drm core:

- fbdev big endian fix for the dma memory backed variant

drivers/firmware:

- fix sysfb refcounting"

* tag 'drm-fixes-2024-07-05' of https://gitlab.freedesktop.org/drm/kernel:
drm/xe/mcr: Avoid clobbering DSS steering
drm/xe: fix error handling in xe_migrate_update_pgtables
drm/ttm: Always take the bo delayed cleanup path for imported bos
drm/fbdev-generic: Fix framebuffer on big endian devices
drm/panthor: Fix sync-only jobs
drm/panthor: Don't check the array stride on empty uobj arrays
drm/amdgpu/atomfirmware: silence UBSAN warning
drm/radeon: check bo_va->bo is non-NULL before using it
drm/amd/display: Fix array-index-out-of-bounds in dml2/FCLKChangeSupport
drm/amd/display: Update efficiency bandwidth for dcn351
drm/amd/display: Fix refresh rate range for some panel
drm/amd/display: Account for cursor prefetch BW in DML1 mode support
drm/amd/display: Add refresh rate range check
drm/amd/display: Reset freesync config before update new state
drm: panel-orientation-quirks: Add labels for both Valve Steam Deck revisions
drm: panel-orientation-quirks: Add quirk for Valve Galileo
drm/i915/display: For MTL+ platforms skip mg dp programming
drm/nouveau: fix null pointer dereference in nouveau_connector_get_modes
firmware: sysfb: Fix reference count of sysfb parent device

+132 -31
+8 -4
drivers/firmware/sysfb.c
··· 101 101 if (IS_ERR(pdev)) { 102 102 return ERR_CAST(pdev); 103 103 } else if (pdev) { 104 - if (!sysfb_pci_dev_is_enabled(pdev)) 104 + if (!sysfb_pci_dev_is_enabled(pdev)) { 105 + pci_dev_put(pdev); 105 106 return ERR_PTR(-ENODEV); 107 + } 106 108 return &pdev->dev; 107 109 } 108 110 ··· 139 137 if (compatible) { 140 138 pd = sysfb_create_simplefb(si, &mode, parent); 141 139 if (!IS_ERR(pd)) 142 - goto unlock_mutex; 140 + goto put_device; 143 141 } 144 142 145 143 /* if the FB is incompatible, create a legacy framebuffer device */ ··· 157 155 pd = platform_device_alloc(name, 0); 158 156 if (!pd) { 159 157 ret = -ENOMEM; 160 - goto unlock_mutex; 158 + goto put_device; 161 159 } 162 160 163 161 pd->dev.parent = parent; ··· 172 170 if (ret) 173 171 goto err; 174 172 175 - goto unlock_mutex; 173 + goto put_device; 176 174 err: 177 175 platform_device_put(pd); 176 + put_device: 177 + put_device(parent); 178 178 unlock_mutex: 179 179 mutex_unlock(&disable_lock); 180 180 return ret;
+52 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 10048 10048 } 10049 10049 10050 10050 /* Update Freesync settings. */ 10051 + reset_freesync_config_for_crtc(dm_new_crtc_state); 10051 10052 get_freesync_config_for_crtc(dm_new_crtc_state, 10052 10053 dm_new_conn_state); 10053 10054 ··· 11182 11181 return ret; 11183 11182 } 11184 11183 11184 + static void parse_edid_displayid_vrr(struct drm_connector *connector, 11185 + struct edid *edid) 11186 + { 11187 + u8 *edid_ext = NULL; 11188 + int i; 11189 + int j = 0; 11190 + u16 min_vfreq; 11191 + u16 max_vfreq; 11192 + 11193 + if (edid == NULL || edid->extensions == 0) 11194 + return; 11195 + 11196 + /* Find DisplayID extension */ 11197 + for (i = 0; i < edid->extensions; i++) { 11198 + edid_ext = (void *)(edid + (i + 1)); 11199 + if (edid_ext[0] == DISPLAYID_EXT) 11200 + break; 11201 + } 11202 + 11203 + if (edid_ext == NULL) 11204 + return; 11205 + 11206 + while (j < EDID_LENGTH) { 11207 + /* Get dynamic video timing range from DisplayID if available */ 11208 + if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && 11209 + (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { 11210 + min_vfreq = edid_ext[j+9]; 11211 + if (edid_ext[j+1] & 7) 11212 + max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); 11213 + else 11214 + max_vfreq = edid_ext[j+10]; 11215 + 11216 + if (max_vfreq && min_vfreq) { 11217 + connector->display_info.monitor_range.max_vfreq = max_vfreq; 11218 + connector->display_info.monitor_range.min_vfreq = min_vfreq; 11219 + 11220 + return; 11221 + } 11222 + } 11223 + j++; 11224 + } 11225 + } 11226 + 11185 11227 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 11186 11228 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 11187 11229 { ··· 11346 11302 if (!adev->dm.freesync_module) 11347 11303 goto update; 11348 11304 11305 + /* Some eDP panels only have the refresh rate range info in DisplayID */ 11306 + if ((connector->display_info.monitor_range.min_vfreq == 0 || 11307 + connector->display_info.monitor_range.max_vfreq == 0)) 11308 + parse_edid_displayid_vrr(connector, edid); 11309 + 11349 11310 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 11350 11311 sink->sink_signal == SIGNAL_TYPE_EDP)) { 11351 11312 bool edid_check_required = false; ··· 11358 11309 if (is_dp_capable_without_timing_msa(adev->dm.dc, 11359 11310 amdgpu_dm_connector)) { 11360 11311 if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { 11361 - freesync_capable = true; 11362 11312 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; 11363 11313 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; 11314 + if (amdgpu_dm_connector->max_vfreq - 11315 + amdgpu_dm_connector->min_vfreq > 10) 11316 + freesync_capable = true; 11364 11317 } else { 11365 11318 edid_check_required = edid->version > 1 || 11366 11319 (edid->version == 1 &&
+3
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
··· 3364 3364 &mode_lib->vba.UrgentBurstFactorLumaPre[k], 3365 3365 &mode_lib->vba.UrgentBurstFactorChromaPre[k], 3366 3366 &mode_lib->vba.NotUrgentLatencyHidingPre[k]); 3367 + 3368 + v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] / 3369 + 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k]; 3367 3370 } 3368 3371 3369 3372 {
+1
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
··· 234 234 out->round_trip_ping_latency_dcfclk_cycles = 106; 235 235 out->smn_latency_us = 2; 236 236 out->dispclk_dppclk_vco_speed_mhz = 3600; 237 + out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0; 237 238 break; 238 239 239 240 }
+1 -1
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
··· 294 294 context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000; 295 295 context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; 296 296 297 - if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported) 297 + if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0] == dml_fclock_change_unsupported) 298 298 context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false; 299 299 else 300 300 context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
+1 -1
drivers/gpu/drm/amd/include/atomfirmware.h
··· 734 734 { 735 735 struct atom_common_table_header table_header; 736 736 /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */ 737 - struct atom_gpio_pin_assignment gpio_pin[8]; 737 + struct atom_gpio_pin_assignment gpio_pin[]; 738 738 }; 739 739 740 740
+2 -1
drivers/gpu/drm/drm_fbdev_generic.c
··· 84 84 sizes->surface_width, sizes->surface_height, 85 85 sizes->surface_bpp); 86 86 87 - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 87 + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, 88 + sizes->surface_depth); 88 89 buffer = drm_client_framebuffer_create(client, sizes->surface_width, 89 90 sizes->surface_height, format); 90 91 if (IS_ERR(buffer))
+8 -1
drivers/gpu/drm/drm_panel_orientation_quirks.c
··· 420 420 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"), 421 421 }, 422 422 .driver_data = (void *)&lcd1280x1920_rightside_up, 423 - }, { /* Valve Steam Deck */ 423 + }, { /* Valve Steam Deck (Jupiter) */ 424 424 .matches = { 425 425 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), 426 426 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"), 427 + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), 428 + }, 429 + .driver_data = (void *)&lcd800x1280_rightside_up, 430 + }, { /* Valve Steam Deck (Galileo) */ 431 + .matches = { 432 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), 433 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"), 427 434 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), 428 435 }, 429 436 .driver_data = (void *)&lcd800x1280_rightside_up,
+3
drivers/gpu/drm/i915/display/intel_ddi.c
··· 2088 2088 u32 ln0, ln1, pin_assignment; 2089 2089 u8 width; 2090 2090 2091 + if (DISPLAY_VER(dev_priv) >= 14) 2092 + return; 2093 + 2091 2094 if (!intel_encoder_is_tc(&dig_port->base) || 2092 2095 intel_tc_port_in_tbt_alt_mode(dig_port)) 2093 2096 return;
+3
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 1001 1001 struct drm_display_mode *mode; 1002 1002 1003 1003 mode = drm_mode_duplicate(dev, nv_connector->native_mode); 1004 + if (!mode) 1005 + return 0; 1006 + 1004 1007 drm_mode_probed_add(connector, mode); 1005 1008 ret = 1; 1006 1009 }
+3 -3
drivers/gpu/drm/panthor/panthor_drv.c
··· 86 86 int ret = 0; 87 87 void *out_alloc; 88 88 89 + if (!in->count) 90 + return NULL; 91 + 89 92 /* User stride must be at least the minimum object size, otherwise it might 90 93 * lack useful information. 91 94 */ 92 95 if (in->stride < min_stride) 93 96 return ERR_PTR(-EINVAL); 94 - 95 - if (!in->count) 96 - return NULL; 97 97 98 98 out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL); 99 99 if (!out_alloc)
+33 -11
drivers/gpu/drm/panthor/panthor_sched.c
··· 459 459 atomic64_t seqno; 460 460 461 461 /** 462 + * @last_fence: Fence of the last submitted job. 463 + * 464 + * We return this fence when we get an empty command stream. 465 + * This way, we are guaranteed that all earlier jobs have completed 466 + * when drm_sched_job::s_fence::finished without having to feed 467 + * the CS ring buffer with a dummy job that only signals the fence. 468 + */ 469 + struct dma_fence *last_fence; 470 + 471 + /** 462 472 * @in_flight_jobs: List containing all in-flight jobs. 463 473 * 464 474 * Used to keep track and signal panthor_job::done_fence when the ··· 838 828 839 829 panthor_kernel_bo_destroy(queue->ringbuf); 840 830 panthor_kernel_bo_destroy(queue->iface.mem); 831 + 832 + /* Release the last_fence we were holding, if any. */ 833 + dma_fence_put(queue->fence_ctx.last_fence); 841 834 842 835 kfree(queue); 843 836 } ··· 2797 2784 2798 2785 spin_lock(&queue->fence_ctx.lock); 2799 2786 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 2800 - if (!job->call_info.size) 2801 - continue; 2802 - 2803 2787 if (syncobj->seqno < job->done_fence->seqno) 2804 2788 break; 2805 2789 ··· 2875 2865 static_assert(sizeof(call_instrs) % 64 == 0, 2876 2866 "call_instrs is not aligned on a cacheline"); 2877 2867 2878 - /* Stream size is zero, nothing to do => return a NULL fence and let 2879 - * drm_sched signal the parent. 2868 + /* Stream size is zero, nothing to do except making sure all previously 2869 + * submitted jobs are done before we signal the 2870 + * drm_sched_job::s_fence::finished fence. 2880 2871 */ 2881 - if (!job->call_info.size) 2882 - return NULL; 2872 + if (!job->call_info.size) { 2873 + job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); 2874 + return dma_fence_get(job->done_fence); 2875 + } 2883 2876 2884 2877 ret = pm_runtime_resume_and_get(ptdev->base.dev); 2885 2878 if (drm_WARN_ON(&ptdev->base, ret)) ··· 2940 2927 sched->pm.has_ref = true; 2941 2928 } 2942 2929 } 2930 + 2931 + /* Update the last fence. */ 2932 + dma_fence_put(queue->fence_ctx.last_fence); 2933 + queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); 2943 2934 2944 2935 done_fence = dma_fence_get(job->done_fence); 2945 2936 ··· 3395 3378 goto err_put_job; 3396 3379 } 3397 3380 3398 - job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3399 - if (!job->done_fence) { 3400 - ret = -ENOMEM; 3401 - goto err_put_job; 3381 + /* Empty command streams don't need a fence, they'll pick the one from 3382 + * the previously submitted job. 3383 + */ 3384 + if (job->call_info.size) { 3385 + job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3386 + if (!job->done_fence) { 3387 + ret = -ENOMEM; 3388 + goto err_put_job; 3389 + } 3402 3390 } 3403 3391 3404 3392 ret = drm_sched_job_init(&job->base,
+1 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 642 642 if (r) 643 643 goto error_unlock; 644 644 645 - if (bo_va->it.start) 645 + if (bo_va->it.start && bo_va->bo) 646 646 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); 647 647 648 648 error_unlock:
+1
drivers/gpu/drm/ttm/ttm_bo.c
··· 346 346 if (!dma_resv_test_signaled(bo->base.resv, 347 347 DMA_RESV_USAGE_BOOKKEEP) || 348 348 (want_init_on_free() && (bo->ttm != NULL)) || 349 + bo->type == ttm_bo_type_sg || 349 350 !dma_resv_trylock(bo->base.resv)) { 350 351 /* The BO is not idle, resurrect it for delayed destroy */ 351 352 ttm_bo_flush_all_fences(bo);
+3 -3
drivers/gpu/drm/xe/xe_gt_mcr.c
··· 342 342 else 343 343 gt->steering[OADDRM].group_target = 1; 344 344 345 - gt->steering[DSS].instance_target = 0; /* unused */ 345 + gt->steering[OADDRM].instance_target = 0; /* unused */ 346 346 } 347 347 348 348 static void init_steering_sqidi_psmi(struct xe_gt *gt) ··· 357 357 358 358 static void init_steering_inst0(struct xe_gt *gt) 359 359 { 360 - gt->steering[DSS].group_target = 0; /* unused */ 361 - gt->steering[DSS].instance_target = 0; /* unused */ 360 + gt->steering[INSTANCE0].group_target = 0; /* unused */ 361 + gt->steering[INSTANCE0].instance_target = 0; /* unused */ 362 362 } 363 363 364 364 static const struct {
+4 -4
drivers/gpu/drm/xe/xe_migrate.c
··· 1334 1334 GFP_KERNEL, true, 0); 1335 1335 if (IS_ERR(sa_bo)) { 1336 1336 err = PTR_ERR(sa_bo); 1337 - goto err; 1337 + goto err_bb; 1338 1338 } 1339 1339 1340 1340 ppgtt_ofs = NUM_KERNEL_PDE + ··· 1385 1385 update_idx); 1386 1386 if (IS_ERR(job)) { 1387 1387 err = PTR_ERR(job); 1388 - goto err_bb; 1388 + goto err_sa; 1389 1389 } 1390 1390 1391 1391 /* Wait on BO move */ ··· 1434 1434 1435 1435 err_job: 1436 1436 xe_sched_job_put(job); 1437 + err_sa: 1438 + drm_suballoc_free(sa_bo, NULL); 1437 1439 err_bb: 1438 1440 if (!q) 1439 1441 mutex_unlock(&m->job_mutex); 1440 1442 xe_bb_free(bb, NULL); 1441 - err: 1442 - drm_suballoc_free(sa_bo, NULL); 1443 1443 return ERR_PTR(err); 1444 1444 } 1445 1445
+5
include/uapi/drm/panthor_drm.h
··· 802 802 * Must be 64-bit/8-byte aligned (the size of a CS instruction) 803 803 * 804 804 * Can be zero if stream_addr is zero too. 805 + * 806 + * When the stream size is zero, the queue submit serves as a 807 + * synchronization point. 805 808 */ 806 809 __u32 stream_size; 807 810 ··· 825 822 * ensure the GPU doesn't get garbage when reading the indirect command 826 823 * stream buffers. If you want the cache flush to happen 827 824 * unconditionally, pass a zero here. 825 + * 826 + * Ignored when stream_size is zero. 828 827 */ 829 828 __u32 latest_flush; 830 829