Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2023-02-17' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Just a final collection of misc fixes, the biggest disables the
recently added dynamic debugging support, it has a regression that
needs some bigger fixes.

Otherwise a bunch of fixes across the board, vc4, amdgpu and vmwgfx
mostly, with some smaller i915 and ast fixes.

drm:
- dynamic debug disable for now

fbdev:
- deferred i/o device close fix

amdgpu:
- Fix GC11.x suspend warning
- Fix display warning

vc4:
- YUV planes fix
- hdmi display fix
- crtc reduced blanking fix

ast:
- fix start address computation

vmwgfx:
- fix bo/handle races

i915:
- gen11 WA fix"

* tag 'drm-fixes-2023-02-17' of git://anongit.freedesktop.org/drm/drm:
drm/amd/display: Fail atomic_check early on normalize_zpos error
drm/amd/amdgpu: fix warning during suspend
drm/vmwgfx: Do not drop the reference to the handle too soon
drm/vmwgfx: Stop accessing buffer objects which failed init
drm/i915/gen11: Wa_1408615072/Wa_1407596294 should be on GT list
drm: Disable dynamic debug as broken
drm/ast: Fix start address computation
fbdev: Fix invalid page access after closing deferred I/O devices
drm/vc4: crtc: Increase setup cost in core clock calculation to handle extreme reduced blanking
drm/vc4: hdmi: Always enable GCP with AVMUTE cleared
drm/vc4: Fix YUV plane handling when planes are in different buffers

+73 -38
+2 -1
drivers/gpu/drm/Kconfig
··· 53 53 54 54 config DRM_USE_DYNAMIC_DEBUG 55 55 bool "use dynamic debug to implement drm.debug" 56 - default y 56 + default n 57 + depends on BROKEN 57 58 depends on DRM 58 59 depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE 59 60 depends on JUMP_LABEL
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4268 4268 } 4269 4269 adev->in_suspend = false; 4270 4270 4271 + if (adev->enable_mes) 4272 + amdgpu_mes_self_test(adev); 4273 + 4271 4274 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) 4272 4275 DRM_WARN("smart shift update failed\n"); 4273 4276
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 1344 1344 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1345 1345 1346 1346 /* it's only intended for use in mes_self_test case, not for s0ix and reset */ 1347 - if (!amdgpu_in_reset(adev) && !adev->in_s0ix && 1347 + if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend && 1348 1348 (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) 1349 1349 amdgpu_mes_self_test(adev); 1350 1350
+5 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 9658 9658 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 9659 9659 * atomic state, so call drm helper to normalize zpos. 9660 9660 */ 9661 - drm_atomic_normalize_zpos(dev, state); 9661 + ret = drm_atomic_normalize_zpos(dev, state); 9662 + if (ret) { 9663 + drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 9664 + goto fail; 9665 + } 9662 9666 9663 9667 /* Remove exiting planes if they are modified */ 9664 9668 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+2 -2
drivers/gpu/drm/ast/ast_mode.c
··· 714 714 struct ast_plane *ast_primary_plane = &ast->primary_plane; 715 715 struct drm_plane *primary_plane = &ast_primary_plane->base; 716 716 void __iomem *vaddr = ast->vram; 717 - u64 offset = ast->vram_base; 717 + u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */ 718 718 unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); 719 719 unsigned long size = ast->vram_fb_available - cursor_size; 720 720 int ret; ··· 972 972 return -ENOMEM; 973 973 974 974 vaddr = ast->vram + ast->vram_fb_available - size; 975 - offset = ast->vram_base + ast->vram_fb_available - size; 975 + offset = ast->vram_fb_available - size; 976 976 977 977 ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size, 978 978 0x01, &ast_cursor_plane_funcs,
+7 -7
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 1355 1355 GAMT_CHKN_BIT_REG, 1356 1356 GAMT_CHKN_DISABLE_L3_COH_PIPE); 1357 1357 1358 + /* 1359 + * Wa_1408615072:icl,ehl (vsunit) 1360 + * Wa_1407596294:icl,ehl (hsunit) 1361 + */ 1362 + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1363 + VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); 1364 + 1358 1365 /* Wa_1407352427:icl,ehl */ 1359 1366 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, 1360 1367 PSDUNIT_CLKGATE_DIS); ··· 2545 2538 /* WaEnable32PlaneMode:icl */ 2546 2539 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, 2547 2540 GEN11_ENABLE_32_PLANE_MODE); 2548 - 2549 - /* 2550 - * Wa_1408615072:icl,ehl (vsunit) 2551 - * Wa_1407596294:icl,ehl (hsunit) 2552 - */ 2553 - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 2554 - VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); 2555 2541 2556 2542 /* 2557 2543 * Wa_1408767742:icl[a2..forever],ehl[all]
+1 -1
drivers/gpu/drm/vc4/vc4_crtc.c
··· 711 711 struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); 712 712 713 713 if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) { 714 - vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000, 714 + vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000, 715 715 mode->clock * 9 / 10) * 1000; 716 716 } else { 717 717 vc4_state->hvs_load = mode->clock * 1000;
+9 -9
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 97 97 #define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8 98 98 #define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8) 99 99 100 + #define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0) 101 + #define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0) 102 + #define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4) 103 + 100 104 # define VC4_HD_M_SW_RST BIT(2) 101 105 # define VC4_HD_M_ENABLE BIT(0) 102 106 ··· 1310 1306 VC4_HDMI_VERTB_VBP)); 1311 1307 unsigned long flags; 1312 1308 unsigned char gcp; 1313 - bool gcp_en; 1314 1309 u32 reg; 1315 1310 int idx; 1316 1311 ··· 1344 1341 switch (vc4_state->output_bpc) { 1345 1342 case 12: 1346 1343 gcp = 6; 1347 - gcp_en = true; 1348 1344 break; 1349 1345 case 10: 1350 1346 gcp = 5; 1351 - gcp_en = true; 1352 1347 break; 1353 1348 case 8: 1354 1349 default: 1355 - gcp = 4; 1356 - gcp_en = false; 1350 + gcp = 0; 1357 1351 break; 1358 1352 } 1359 1353 ··· 1359 1359 * doesn't signal in GCP. 1360 1360 */ 1361 1361 if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) { 1362 - gcp = 4; 1363 - gcp_en = false; 1362 + gcp = 0; 1364 1363 } 1365 1364 1366 1365 reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1); ··· 1372 1373 reg = HDMI_READ(HDMI_GCP_WORD_1); 1373 1374 reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK; 1374 1375 reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1); 1376 + reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK; 1377 + reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE; 1375 1378 HDMI_WRITE(HDMI_GCP_WORD_1, reg); 1376 1379 1377 1380 reg = HDMI_READ(HDMI_GCP_CONFIG); 1378 - reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE; 1379 - reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0; 1381 + reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE; 1380 1382 HDMI_WRITE(HDMI_GCP_CONFIG, reg); 1381 1383 1382 1384 reg = HDMI_READ(HDMI_MISC_CONTROL);
+4 -2
drivers/gpu/drm/vc4/vc4_plane.c
··· 340 340 { 341 341 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 342 342 struct drm_framebuffer *fb = state->fb; 343 - struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0); 343 + struct drm_gem_dma_object *bo; 344 344 int num_planes = fb->format->num_planes; 345 345 struct drm_crtc_state *crtc_state; 346 346 u32 h_subsample = fb->format->hsub; ··· 359 359 if (ret) 360 360 return ret; 361 361 362 - for (i = 0; i < num_planes; i++) 362 + for (i = 0; i < num_planes; i++) { 363 + bo = drm_fb_dma_get_gem_obj(fb, i); 363 364 vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i]; 365 + } 364 366 365 367 /* 366 368 * We don't support subpixel source positioning for scaling,
+8 -4
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 462 462 return -ENOMEM; 463 463 } 464 464 465 + /* 466 + * vmw_bo_init will delete the *p_bo object if it fails 467 + */ 465 468 ret = vmw_bo_init(vmw, *p_bo, size, 466 469 placement, interruptible, pin, 467 470 bo_free); ··· 473 470 474 471 return ret; 475 472 out_error: 476 - kfree(*p_bo); 477 473 *p_bo = NULL; 478 474 return ret; 479 475 } ··· 598 596 ttm_bo_put(&vmw_bo->base); 599 597 } 600 598 599 + drm_gem_object_put(&vmw_bo->base.base); 601 600 return ret; 602 601 } 603 602 ··· 639 636 640 637 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); 641 638 vmw_bo_unreference(&vbo); 639 + drm_gem_object_put(&vbo->base.base); 642 640 if (unlikely(ret != 0)) { 643 641 if (ret == -ERESTARTSYS || ret == -EBUSY) 644 642 return -EBUSY; ··· 697 693 * struct vmw_buffer_object should be placed. 698 694 * Return: Zero on success, Negative error code on error. 699 695 * 700 - * The vmw buffer object pointer will be refcounted. 696 + * The vmw buffer object pointer will be refcounted (both ttm and gem) 701 697 */ 702 698 int vmw_user_bo_lookup(struct drm_file *filp, 703 699 uint32_t handle, ··· 714 710 715 711 *out = gem_to_vmw_bo(gobj); 716 712 ttm_bo_get(&(*out)->base); 717 - drm_gem_object_put(gobj); 718 713 719 714 return 0; 720 715 } ··· 794 791 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 795 792 args->size, &args->handle, 796 793 &vbo); 797 - 794 + /* drop reference from allocate - handle holds it now */ 795 + drm_gem_object_put(&vbo->base.base); 798 796 return ret; 799 797 } 800 798
+2
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1160 1160 } 1161 1161 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); 1162 1162 ttm_bo_put(&vmw_bo->base); 1163 + drm_gem_object_put(&vmw_bo->base.base); 1163 1164 if (unlikely(ret != 0)) 1164 1165 return ret; 1165 1166 ··· 1215 1214 } 1216 1215 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); 1217 1216 ttm_bo_put(&vmw_bo->base); 1217 + drm_gem_object_put(&vmw_bo->base.base); 1218 1218 if (unlikely(ret != 0)) 1219 1219 return ret; 1220 1220
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 146 146 &vmw_sys_placement : 147 147 &vmw_vram_sys_placement, 148 148 true, false, &vmw_gem_destroy, p_vbo); 149 - 150 - (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; 151 149 if (ret != 0) 152 150 goto out_no_bo; 153 151 152 + (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; 153 + 154 154 ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); 155 - /* drop reference from allocate - handle holds it now */ 156 - drm_gem_object_put(&(*p_vbo)->base.base); 157 155 out_no_bo: 158 156 return ret; 159 157 } ··· 178 180 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); 179 181 rep->cur_gmr_id = handle; 180 182 rep->cur_gmr_offset = 0; 183 + /* drop reference from allocate - handle holds it now */ 184 + drm_gem_object_put(&vbo->base.base); 181 185 out_no_bo: 182 186 return ret; 183 187 }
+3 -1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 1815 1815 1816 1816 err_out: 1817 1817 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1818 - if (bo) 1818 + if (bo) { 1819 1819 vmw_bo_unreference(&bo); 1820 + drm_gem_object_put(&bo->base.base); 1821 + } 1820 1822 if (surface) 1821 1823 vmw_surface_unreference(&surface); 1822 1824
+1
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
··· 458 458 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); 459 459 460 460 vmw_bo_unreference(&buf); 461 + drm_gem_object_put(&buf->base.base); 461 462 462 463 out_unlock: 463 464 mutex_unlock(&overlay->mutex);
+1
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 807 807 num_output_sig, tfile, shader_handle); 808 808 out_bad_arg: 809 809 vmw_bo_unreference(&buffer); 810 + drm_gem_object_put(&buffer->base.base); 810 811 return ret; 811 812 } 812 813
+6 -4
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 683 683 container_of(base, struct vmw_user_surface, prime.base); 684 684 struct vmw_resource *res = &user_srf->srf.res; 685 685 686 - if (base->shareable && res && res->backup) 686 + if (res && res->backup) 687 687 drm_gem_object_put(&res->backup->base.base); 688 688 689 689 *p_base = NULL; ··· 864 864 goto out_unlock; 865 865 } 866 866 vmw_bo_reference(res->backup); 867 - drm_gem_object_get(&res->backup->base.base); 867 + /* 868 + * We don't expose the handle to the userspace and surface 869 + * already holds a gem reference 870 + */ 871 + drm_gem_handle_delete(file_priv, backup_handle); 868 872 } 869 873 870 874 tmp = vmw_resource_reference(&srf->res); ··· 1572 1568 drm_vma_node_offset_addr(&res->backup->base.base.vma_node); 1573 1569 rep->buffer_size = res->backup->base.base.size; 1574 1570 rep->buffer_handle = backup_handle; 1575 - if (user_srf->prime.base.shareable) 1576 - drm_gem_object_get(&res->backup->base.base); 1577 1571 } else { 1578 1572 rep->buffer_map_handle = 0; 1579 1573 rep->buffer_size = 0;
+9 -1
drivers/video/fbdev/core/fb_defio.c
··· 313 313 } 314 314 EXPORT_SYMBOL_GPL(fb_deferred_io_open); 315 315 316 - void fb_deferred_io_cleanup(struct fb_info *info) 316 + void fb_deferred_io_release(struct fb_info *info) 317 317 { 318 318 struct fb_deferred_io *fbdefio = info->fbdefio; 319 319 struct page *page; ··· 327 327 page = fb_deferred_io_page(info, i); 328 328 page->mapping = NULL; 329 329 } 330 + } 331 + EXPORT_SYMBOL_GPL(fb_deferred_io_release); 332 + 333 + void fb_deferred_io_cleanup(struct fb_info *info) 334 + { 335 + struct fb_deferred_io *fbdefio = info->fbdefio; 336 + 337 + fb_deferred_io_release(info); 330 338 331 339 kvfree(info->pagerefs); 332 340 mutex_destroy(&fbdefio->lock);
+4
drivers/video/fbdev/core/fbmem.c
··· 1454 1454 struct fb_info * const info = file->private_data; 1455 1455 1456 1456 lock_fb_info(info); 1457 + #if IS_ENABLED(CONFIG_FB_DEFERRED_IO) 1458 + if (info->fbdefio) 1459 + fb_deferred_io_release(info); 1460 + #endif 1457 1461 if (info->fbops->fb_release) 1458 1462 info->fbops->fb_release(info,1); 1459 1463 module_put(info->fbops->owner);
+1
include/linux/fb.h
··· 662 662 extern void fb_deferred_io_open(struct fb_info *info, 663 663 struct inode *inode, 664 664 struct file *file); 665 + extern void fb_deferred_io_release(struct fb_info *info); 665 666 extern void fb_deferred_io_cleanup(struct fb_info *info); 666 667 extern int fb_deferred_io_fsync(struct file *file, loff_t start, 667 668 loff_t end, int datasync);