Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-fixes-2023-08-24' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

A samsung-dsim initialization fix, a devfreq fix for panfrost, a DP DSC
define fix, a recursive lock fix for dma-buf, a shader validation fix
and a reference counting fix for vmwgfx

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/amy26vu5xbeeikswpx7nt6rddwfocdidshrtt2qovipihx5poj@y45p3dtzrloc

+67 -55
+9 -9
drivers/dma-buf/sw_sync.c
··· 191 191 */ 192 192 static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) 193 193 { 194 + LIST_HEAD(signalled); 194 195 struct sync_pt *pt, *next; 195 196 196 197 trace_sync_timeline(obj); ··· 204 203 if (!timeline_fence_signaled(&pt->base)) 205 204 break; 206 205 207 - list_del_init(&pt->link); 206 + dma_fence_get(&pt->base); 207 + 208 + list_move_tail(&pt->link, &signalled); 208 209 rb_erase(&pt->node, &obj->pt_tree); 209 210 210 - /* 211 - * A signal callback may release the last reference to this 212 - * fence, causing it to be freed. That operation has to be 213 - * last to avoid a use after free inside this loop, and must 214 - * be after we remove the fence from the timeline in order to 215 - * prevent deadlocking on timeline->lock inside 216 - * timeline_fence_release(). 217 - */ 218 211 dma_fence_signal_locked(&pt->base); 219 212 } 220 213 221 214 spin_unlock_irq(&obj->lock); 215 + 216 + list_for_each_entry_safe(pt, next, &signalled, link) { 217 + list_del_init(&pt->link); 218 + dma_fence_put(&pt->base); 219 + } 222 220 } 223 221 224 222 /**
+17 -10
drivers/gpu/drm/bridge/samsung-dsim.c
··· 1386 1386 disable_irq(dsi->irq); 1387 1387 } 1388 1388 1389 + static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable) 1390 + { 1391 + u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); 1392 + 1393 + if (enable) 1394 + reg |= DSIM_FORCE_STOP_STATE; 1395 + else 1396 + reg &= ~DSIM_FORCE_STOP_STATE; 1397 + 1398 + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); 1399 + } 1400 + 1389 1401 static int samsung_dsim_init(struct samsung_dsim *dsi) 1390 1402 { 1391 1403 const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; ··· 1457 1445 struct drm_bridge_state *old_bridge_state) 1458 1446 { 1459 1447 struct samsung_dsim *dsi = bridge_to_dsi(bridge); 1460 - u32 reg; 1461 1448 1462 1449 if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { 1463 1450 samsung_dsim_set_display_mode(dsi); 1464 1451 samsung_dsim_set_display_enable(dsi, true); 1465 1452 } else { 1466 - reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); 1467 - reg &= ~DSIM_FORCE_STOP_STATE; 1468 - samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); 1453 + samsung_dsim_set_stop_state(dsi, false); 1469 1454 } 1470 1455 1471 1456 dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; ··· 1472 1463 struct drm_bridge_state *old_bridge_state) 1473 1464 { 1474 1465 struct samsung_dsim *dsi = bridge_to_dsi(bridge); 1475 - u32 reg; 1476 1466 1477 1467 if (!(dsi->state & DSIM_STATE_ENABLED)) 1478 1468 return; 1479 1469 1480 - if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { 1481 - reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); 1482 - reg |= DSIM_FORCE_STOP_STATE; 1483 - samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); 1484 - } 1470 + if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) 1471 + samsung_dsim_set_stop_state(dsi, true); 1485 1472 1486 1473 dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; 1487 1474 } ··· 1779 1774 ret = samsung_dsim_init(dsi); 1780 1775 if (ret) 1781 1776 return ret; 1777 + 1778 + samsung_dsim_set_stop_state(dsi, false); 1782 1779 1783 1780 ret = mipi_dsi_create_packet(&xfer.packet, msg); 1784 1781 if (ret < 0)
+1 -1
drivers/gpu/drm/panfrost/panfrost_devfreq.c
··· 96 96 * keep going without it; any other error means that we are 97 97 * supposed to read the bin value, but we failed doing so. 98 98 */ 99 - if (ret != -ENOENT) { 99 + if (ret != -ENOENT && ret != -EOPNOTSUPP) { 100 100 DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret); 101 101 return ret; 102 102 }
+2 -4
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 497 497 if (!(flags & drm_vmw_synccpu_allow_cs)) { 498 498 atomic_dec(&vmw_bo->cpu_writers); 499 499 } 500 - ttm_bo_put(&vmw_bo->tbo); 500 + vmw_user_bo_unref(vmw_bo); 501 501 } 502 502 503 - drm_gem_object_put(&vmw_bo->tbo.base); 504 503 return ret; 505 504 } 506 505 ··· 539 540 return ret; 540 541 541 542 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); 542 - vmw_bo_unreference(&vbo); 543 - drm_gem_object_put(&vbo->tbo.base); 543 + vmw_user_bo_unref(vbo); 544 544 if (unlikely(ret != 0)) { 545 545 if (ret == -ERESTARTSYS || ret == -EBUSY) 546 546 return -EBUSY;
+8
drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
··· 195 195 return buf; 196 196 } 197 197 198 + static inline void vmw_user_bo_unref(struct vmw_bo *vbo) 199 + { 200 + if (vbo) { 201 + ttm_bo_put(&vbo->tbo); 202 + drm_gem_object_put(&vbo->tbo.base); 203 + } 204 + } 205 + 198 206 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) 199 207 { 200 208 return container_of((gobj), struct vmw_bo, tbo.base);
+12
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 1513 1513 return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; 1514 1514 } 1515 1515 1516 + static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, 1517 + u32 shader_type) 1518 + { 1519 + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; 1520 + 1521 + if (shader_model >= VMW_SM_5) 1522 + max_allowed = SVGA3D_SHADERTYPE_MAX; 1523 + else if (shader_model >= VMW_SM_4) 1524 + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; 1525 + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; 1526 + } 1527 + 1516 1528 #endif
+13 -22
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1164 1164 } 1165 1165 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); 1166 1166 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); 1167 - ttm_bo_put(&vmw_bo->tbo); 1168 - drm_gem_object_put(&vmw_bo->tbo.base); 1167 + vmw_user_bo_unref(vmw_bo); 1169 1168 if (unlikely(ret != 0)) 1170 1169 return ret; 1171 1170 ··· 1220 1221 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 1221 1222 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); 1222 1223 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); 1223 - ttm_bo_put(&vmw_bo->tbo); 1224 - drm_gem_object_put(&vmw_bo->tbo.base); 1224 + vmw_user_bo_unref(vmw_bo); 1225 1225 if (unlikely(ret != 0)) 1226 1226 return ret; 1227 1227 ··· 1990 1992 1991 1993 cmd = container_of(header, typeof(*cmd), header); 1992 1994 1993 - if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { 1995 + if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { 1994 1996 VMW_DEBUG_USER("Illegal shader type %u.\n", 1995 1997 (unsigned int) cmd->body.type); 1996 1998 return -EINVAL; ··· 2113 2115 SVGA3dCmdHeader *header) 2114 2116 { 2115 2117 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); 2116 - SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ? 2117 - SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10; 2118 2118 2119 2119 struct vmw_resource *res = NULL; 2120 2120 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); ··· 2129 2133 if (unlikely(ret != 0)) 2130 2134 return ret; 2131 2135 2136 + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || 2137 + cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2138 + VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", 2139 + (unsigned int) cmd->body.type, 2140 + (unsigned int) cmd->body.slot); 2141 + return -EINVAL; 2142 + } 2143 + 2132 2144 binding.bi.ctx = ctx_node->ctx; 2133 2145 binding.bi.res = res; 2134 2146 binding.bi.bt = vmw_ctx_binding_cb; ··· 2144 2140 binding.offset = cmd->body.offsetInBytes; 2145 2141 binding.size = cmd->body.sizeInBytes; 2146 2142 binding.slot = cmd->body.slot; 2147 - 2148 - if (binding.shader_slot >= max_shader_num || 2149 - binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2150 - VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", 2151 - (unsigned int) cmd->body.type, 2152 - (unsigned int) binding.slot); 2153 - return -EINVAL; 2154 - } 2155 2143 2156 2144 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 2157 2145 binding.slot); ··· 2203 2207 { 2204 2208 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = 2205 2209 container_of(header, typeof(*cmd), header); 2206 - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? 2207 - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; 2208 2210 2209 2211 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / 2210 2212 sizeof(SVGA3dShaderResourceViewId); 2211 2213 2212 2214 if ((u64) cmd->body.startView + (u64) num_sr_view > 2213 2215 (u64) SVGA3D_DX_MAX_SRVIEWS || 2214 - cmd->body.type >= max_allowed) { 2216 + !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { 2215 2217 VMW_DEBUG_USER("Invalid shader binding.\n"); 2216 2218 return -EINVAL; 2217 2219 } ··· 2233 2239 SVGA3dCmdHeader *header) 2234 2240 { 2235 2241 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); 2236 - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? 2237 - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; 2238 2242 struct vmw_resource *res = NULL; 2239 2243 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2240 2244 struct vmw_ctx_bindinfo_shader binding; ··· 2243 2251 2244 2252 cmd = container_of(header, typeof(*cmd), header); 2245 2253 2246 - if (cmd->body.type >= max_allowed || 2247 - cmd->body.type < SVGA3D_SHADERTYPE_MIN) { 2254 + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { 2248 2255 VMW_DEBUG_USER("Illegal shader type %u.\n", 2249 2256 (unsigned int) cmd->body.type); 2250 2257 return -EINVAL;
+2 -4
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 1665 1665 1666 1666 err_out: 1667 1667 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1668 - if (bo) { 1669 - vmw_bo_unreference(&bo); 1670 - drm_gem_object_put(&bo->tbo.base); 1671 - } 1668 + if (bo) 1669 + vmw_user_bo_unref(bo); 1672 1670 if (surface) 1673 1671 vmw_surface_unreference(&surface); 1674 1672
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
··· 451 451 452 452 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); 453 453 454 - vmw_bo_unreference(&buf); 455 - drm_gem_object_put(&buf->tbo.base); 454 + vmw_user_bo_unref(buf); 456 455 457 456 out_unlock: 458 457 mutex_unlock(&overlay->mutex);
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 809 809 shader_type, num_input_sig, 810 810 num_output_sig, tfile, shader_handle); 811 811 out_bad_arg: 812 - vmw_bo_unreference(&buffer); 813 - drm_gem_object_put(&buffer->tbo.base); 812 + vmw_user_bo_unref(buffer); 814 813 return ret; 815 814 } 816 815
+1 -1
include/drm/display/drm_dp.h
··· 1537 1537 1538 1538 #define DP_BRANCH_OUI_HEADER_SIZE 0xc 1539 1539 #define DP_RECEIVER_CAP_SIZE 0xf 1540 - #define DP_DSC_RECEIVER_CAP_SIZE 0xf 1540 + #define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ 1541 1541 #define EDP_PSR_RECEIVER_CAP_SIZE 2 1542 1542 #define EDP_DISPLAY_CTL_CAP_SIZE 3 1543 1543 #define DP_LTTPR_COMMON_CAP_SIZE 8