Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2024-07-12' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"Oh I screwed up last week's fixes pull, and forgot to send..

Back to work, thanks to Sima for last week, not too many fixes as
expected getting close to release [ sic - Linus ], amdgpu and xe have
a couple each, and then some other misc ones.

amdgpu:
- PSR-SU fix
- Reseved VMID fix

xe:
- Use write-back caching mode for system memory on DGFX
- Do not leak object when finalizing hdcp gsc

bridge:
- adv7511 EDID irq fix

gma500:
- NULL mode fixes.

meson:
- fix resource leak"

* tag 'drm-fixes-2024-07-12' of https://gitlab.freedesktop.org/drm/kernel:
Revert "drm/amd/display: Reset freesync config before update new state"
drm/xe/display/xe_hdcp_gsc: Free arbiter on driver removal
drm/xe: Use write-back caching mode for system memory on DGFX
drm/amdgpu: reject gang submit on reserved VMIDs
drm/gma500: fix null pointer dereference in cdv_intel_lvds_get_modes
drm/gma500: fix null pointer dereference in psb_intel_lvds_get_modes
drm/meson: fix canvas release in bind function
drm/bridge: adv7511: Fix Intermittent EDID failures

+122 -60
+15
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1093 1093 unsigned int i; 1094 1094 int r; 1095 1095 1096 + /* 1097 + * We can't use gang submit on with reserved VMIDs when the VM changes 1098 + * can't be invalidated by more than one engine at the same time. 1099 + */ 1100 + if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) { 1101 + for (i = 0; i < p->gang_size; ++i) { 1102 + struct drm_sched_entity *entity = p->entities[i]; 1103 + struct drm_gpu_scheduler *sched = entity->rq->sched; 1104 + struct amdgpu_ring *ring = to_amdgpu_ring(sched); 1105 + 1106 + if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) 1107 + return -EINVAL; 1108 + } 1109 + } 1110 + 1096 1111 r = amdgpu_vm_clear_freed(adev, vm, NULL); 1097 1112 if (r) 1098 1113 return r;
+14 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 406 406 if (r || !idle) 407 407 goto error; 408 408 409 - if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { 409 + if (amdgpu_vmid_uses_reserved(vm, vmhub)) { 410 410 r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); 411 411 if (r || !id) 412 412 goto error; ··· 454 454 error: 455 455 mutex_unlock(&id_mgr->lock); 456 456 return r; 457 + } 458 + 459 + /* 460 + * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID 461 + * @vm: the VM to check 462 + * @vmhub: the VMHUB which will be used 463 + * 464 + * Returns: True if the VM will use a reserved VMID. 465 + */ 466 + bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) 467 + { 468 + return vm->reserved_vmid[vmhub] || 469 + (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0))); 457 470 } 458 471 459 472 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
··· 78 78 79 79 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 80 80 struct amdgpu_vmid *id); 81 + bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub); 81 82 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, 82 83 unsigned vmhub); 83 84 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
-1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 10048 10048 } 10049 10049 10050 10050 /* Update Freesync settings. */ 10051 - reset_freesync_config_for_crtc(dm_new_crtc_state); 10052 10051 get_freesync_config_for_crtc(dm_new_crtc_state, 10053 10052 dm_new_conn_state); 10054 10053
+1 -1
drivers/gpu/drm/bridge/adv7511/adv7511.h
··· 401 401 402 402 #ifdef CONFIG_DRM_I2C_ADV7511_CEC 403 403 int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511); 404 - void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); 404 + int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); 405 405 #else 406 406 static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) 407 407 {
+9 -4
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
··· 119 119 cec_received_msg(adv7511->cec_adap, &msg); 120 120 } 121 121 122 - void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) 122 + int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) 123 123 { 124 124 unsigned int offset = adv7511->info->reg_cec_offset; 125 125 const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY | ··· 131 131 unsigned int rx_status; 132 132 int rx_order[3] = { -1, -1, -1 }; 133 133 int i; 134 + int irq_status = IRQ_NONE; 134 135 135 - if (irq1 & irq_tx_mask) 136 + if (irq1 & irq_tx_mask) { 136 137 adv_cec_tx_raw_status(adv7511, irq1); 138 + irq_status = IRQ_HANDLED; 139 + } 137 140 138 141 if (!(irq1 & irq_rx_mask)) 139 - return; 142 + return irq_status; 140 143 141 144 if (regmap_read(adv7511->regmap_cec, 142 145 ADV7511_REG_CEC_RX_STATUS + offset, &rx_status)) 143 - return; 146 + return irq_status; 144 147 145 148 /* 146 149 * ADV7511_REG_CEC_RX_STATUS[5:0] contains the reception order of RX ··· 175 172 176 173 adv7511_cec_rx(adv7511, rx_buf); 177 174 } 175 + 176 + return IRQ_HANDLED; 178 177 } 179 178 180 179 static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+13 -9
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 469 469 { 470 470 unsigned int irq0, irq1; 471 471 int ret; 472 + int cec_status = IRQ_NONE; 473 + int irq_status = IRQ_NONE; 472 474 473 475 ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); 474 476 if (ret < 0) ··· 480 478 if (ret < 0) 481 479 return ret; 482 480 483 - /* If there is no IRQ to handle, exit indicating no IRQ data */ 484 - if (!(irq0 & (ADV7511_INT0_HPD | ADV7511_INT0_EDID_READY)) && 485 - !(irq1 & ADV7511_INT1_DDC_ERROR)) 486 - return -ENODATA; 487 - 488 481 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); 489 482 regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); 490 483 491 - if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) 484 + if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) { 492 485 schedule_work(&adv7511->hpd_work); 486 + irq_status = IRQ_HANDLED; 487 + } 493 488 494 489 if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { 495 490 adv7511->edid_read = true; 496 491 497 492 if (adv7511->i2c_main->irq) 498 493 wake_up_all(&adv7511->wq); 494 + irq_status = IRQ_HANDLED; 499 495 } 500 496 501 497 #ifdef CONFIG_DRM_I2C_ADV7511_CEC 502 - adv7511_cec_irq_process(adv7511, irq1); 498 + cec_status = adv7511_cec_irq_process(adv7511, irq1); 503 499 #endif 504 500 505 - return 0; 501 + /* If there is no IRQ to handle, exit indicating no IRQ data */ 502 + if (irq_status == IRQ_HANDLED || cec_status == IRQ_HANDLED) 503 + return IRQ_HANDLED; 504 + 505 + return IRQ_NONE; 506 506 } 507 507 508 508 static irqreturn_t adv7511_irq_handler(int irq, void *devid) ··· 513 509 int ret; 514 510 515 511 ret = adv7511_irq_process(adv7511, true); 516 - return ret < 0 ? IRQ_NONE : IRQ_HANDLED; 512 + return ret < 0 ? IRQ_NONE : ret; 517 513 } 518 514 519 515 /* -----------------------------------------------------------------------------
+3
drivers/gpu/drm/gma500/cdv_intel_lvds.c
··· 311 311 if (mode_dev->panel_fixed_mode != NULL) { 312 312 struct drm_display_mode *mode = 313 313 drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); 314 + if (!mode) 315 + return 0; 316 + 314 317 drm_mode_probed_add(connector, mode); 315 318 return 1; 316 319 }
+3
drivers/gpu/drm/gma500/psb_intel_lvds.c
··· 504 504 if (mode_dev->panel_fixed_mode != NULL) { 505 505 struct drm_display_mode *mode = 506 506 drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); 507 + if (!mode) 508 + return 0; 509 + 507 510 drm_mode_probed_add(connector, mode); 508 511 return 1; 509 512 }
+18 -19
drivers/gpu/drm/meson/meson_drv.c
··· 250 250 if (ret) 251 251 goto free_drm; 252 252 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); 253 - if (ret) { 254 - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 255 - goto free_drm; 256 - } 253 + if (ret) 254 + goto free_canvas_osd1; 257 255 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); 258 - if (ret) { 259 - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 260 - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); 261 - goto free_drm; 262 - } 256 + if (ret) 257 + goto free_canvas_vd1_0; 263 258 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); 264 - if (ret) { 265 - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 266 - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); 267 - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); 268 - goto free_drm; 269 - } 259 + if (ret) 260 + goto free_canvas_vd1_1; 270 261 271 262 priv->vsync_irq = platform_get_irq(pdev, 0); 272 263 273 264 ret = drm_vblank_init(drm, 1); 274 265 if (ret) 275 - goto free_drm; 266 + goto free_canvas_vd1_2; 276 267 277 268 /* Assign limits per soc revision/package */ 278 269 for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) { ··· 279 288 */ 280 289 ret = drm_aperture_remove_framebuffers(&meson_driver); 281 290 if (ret) 282 - goto free_drm; 291 + goto free_canvas_vd1_2; 283 292 284 293 ret = drmm_mode_config_init(drm); 285 294 if (ret) 286 - goto free_drm; 295 + goto free_canvas_vd1_2; 287 296 drm->mode_config.max_width = 3840; 288 297 drm->mode_config.max_height = 2160; 289 298 drm->mode_config.funcs = &meson_mode_config_funcs; ··· 298 307 if (priv->afbcd.ops) { 299 308 ret = priv->afbcd.ops->init(priv); 300 309 if (ret) 301 - goto free_drm; 310 + goto free_canvas_vd1_2; 302 311 } 303 312 304 313 /* Encoder Initialization */ ··· 362 371 exit_afbcd: 363 372 if (priv->afbcd.ops) 364 373 priv->afbcd.ops->exit(priv); 374 + free_canvas_vd1_2: 375 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2); 376 + free_canvas_vd1_1: 377 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); 378 + free_canvas_vd1_0: 379 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); 380 + free_canvas_osd1: 381 + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 365 382 free_drm: 366 383 drm_dev_put(drm); 367 384
+8 -4
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
··· 159 159 { 160 160 struct intel_hdcp_gsc_message *hdcp_message = 161 161 xe->display.hdcp.hdcp_message; 162 + struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; 162 163 163 - if (!hdcp_message) 164 - return; 164 + if (hdcp_message) { 165 + xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); 166 + kfree(hdcp_message); 167 + xe->display.hdcp.hdcp_message = NULL; 168 + } 165 169 166 - xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); 167 - kfree(hdcp_message); 170 + kfree(arb); 171 + xe->display.hdcp.arbiter = NULL; 168 172 } 169 173 170 174 static int xe_gsc_send_sync(struct xe_device *xe,
+28 -19
drivers/gpu/drm/xe/xe_bo.c
··· 317 317 struct xe_device *xe = xe_bo_device(bo); 318 318 struct xe_ttm_tt *tt; 319 319 unsigned long extra_pages; 320 - enum ttm_caching caching; 320 + enum ttm_caching caching = ttm_cached; 321 321 int err; 322 322 323 323 tt = kzalloc(sizeof(*tt), GFP_KERNEL); ··· 331 331 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), 332 332 PAGE_SIZE); 333 333 334 - switch (bo->cpu_caching) { 335 - case DRM_XE_GEM_CPU_CACHING_WC: 336 - caching = ttm_write_combined; 337 - break; 338 - default: 339 - caching = ttm_cached; 340 - break; 341 - } 342 - 343 - WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); 344 - 345 334 /* 346 - * Display scanout is always non-coherent with the CPU cache. 347 - * 348 - * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and 349 - * require a CPU:WC mapping. 335 + * DGFX system memory is always WB / ttm_cached, since 336 + * other caching modes are only supported on x86. DGFX 337 + * GPU system memory accesses are always coherent with the 338 + * CPU. 350 339 */ 351 - if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || 352 - (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE)) 353 - caching = ttm_write_combined; 340 + if (!IS_DGFX(xe)) { 341 + switch (bo->cpu_caching) { 342 + case DRM_XE_GEM_CPU_CACHING_WC: 343 + caching = ttm_write_combined; 344 + break; 345 + default: 346 + caching = ttm_cached; 347 + break; 348 + } 349 + 350 + WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); 351 + 352 + /* 353 + * Display scanout is always non-coherent with the CPU cache. 354 + * 355 + * For Xe_LPG and beyond, PPGTT PTE lookups are also 356 + * non-coherent and require a CPU:WC mapping. 357 + */ 358 + if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || 359 + (xe->info.graphics_verx100 >= 1270 && 360 + bo->flags & XE_BO_FLAG_PAGETABLE)) 361 + caching = ttm_write_combined; 362 + } 354 363 355 364 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); 356 365 if (err) {
+2 -1
drivers/gpu/drm/xe/xe_bo_types.h
··· 66 66 67 67 /** 68 68 * @cpu_caching: CPU caching mode. Currently only used for userspace 69 - * objects. 69 + * objects. Exceptions are system memory on DGFX, which is always 70 + * WB. 70 71 */ 71 72 u16 cpu_caching; 72 73
+7 -1
include/uapi/drm/xe_drm.h
··· 776 776 #define DRM_XE_GEM_CPU_CACHING_WC 2 777 777 /** 778 778 * @cpu_caching: The CPU caching mode to select for this object. If 779 - * mmaping the object the mode selected here will also be used. 779 + * mmaping the object the mode selected here will also be used. The 780 + * exception is when mapping system memory (including data evicted 781 + * to system) on discrete GPUs. The caching mode selected will 782 + * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency 783 + * between GPU- and CPU is guaranteed. The caching mode of 784 + * existing CPU-mappings will be updated transparently to 785 + * user-space clients. 780 786 */ 781 787 __u16 cpu_caching; 782 788 /** @pad: MBZ */