Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-fixes-2020-07-17-1' of git://anongit.freedesktop.org/drm/drm into master

Pull drm fixes from Dave Airlie:
"Weekly fixes pull, big bigger than I'd normally like, but they are
fairly scattered and small individually.

The vmwgfx one is a black screen regression, otherwise the largest is
an MST encoder fix for amdgpu which results in a WARN in some cases,
and a scattering of i915 fixes.

I'm tracking two regressions at the moment that hopefully we get
nailed down this week for rc7.

dma-buf:
- sleeping atomic fix

amdgpu:
- Fix a race condition with KIQ
- Preemption fix
- Fix handling of fake MST encoders
- OLED panel fix
- Handle allocation failure in stream construction
- Renoir SMC fix
- SDMA 5.x fix

i915:
- FBC w/a stride fix
- Fix use-after-free fix on module reload
- Ignore irq enabling on the virtual engines to fix device sleep
- Use GTT when saving/restoring engine GPR
- Fix selftest sort function

vmwgfx:
- black screen fix

aspeed:
- fbcon init warn fix"

* tag 'drm-fixes-2020-07-17-1' of git://anongit.freedesktop.org/drm/drm:
drm/amdgpu/sdma5: fix wptr overwritten in ->get_wptr()
drm/amdgpu/powerplay: Modify SMC message name for setting power profile mode
drm/amd/display: handle failed allocation during stream construction
drm/amd/display: OLED panel backlight adjust not work with external display connected
drm/amdgpu/display: create fake mst encoders ahead of time (v4)
drm/amdgpu: fix preemption unit test
drm/amdgpu/gfx10: fix race condition for kiq
drm/i915: Recalculate FBC w/a stride when needed
drm/i915: Move cec_notifier to intel_hdmi_connector_unregister, v2.
drm/i915/gt: Only swap to a random sibling once upon creation
drm/i915/gt: Ignore irq enabling on the virtual engines
drm/i915/perf: Use GTT when saving/restoring engine GPR
drm/i915/selftests: Fix compare functions provided for sorting
drm/vmwgfx: fix update of display surface when resolution changes
dmabuf: use spinlock to access dmabuf->name
drm/aspeed: Call drm_fbdev_generic_setup after drm_dev_register

+153 -99
+7 -4
drivers/dma-buf/dma-buf.c
··· 45 45 size_t ret = 0; 46 46 47 47 dmabuf = dentry->d_fsdata; 48 - dma_resv_lock(dmabuf->resv, NULL); 48 + spin_lock(&dmabuf->name_lock); 49 49 if (dmabuf->name) 50 50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); 51 - dma_resv_unlock(dmabuf->resv); 51 + spin_unlock(&dmabuf->name_lock); 52 52 53 53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s", 54 54 dentry->d_name.name, ret > 0 ? name : ""); ··· 338 338 kfree(name); 339 339 goto out_unlock; 340 340 } 341 + spin_lock(&dmabuf->name_lock); 341 342 kfree(dmabuf->name); 342 343 dmabuf->name = name; 344 + spin_unlock(&dmabuf->name_lock); 343 345 344 346 out_unlock: 345 347 dma_resv_unlock(dmabuf->resv); ··· 404 402 /* Don't count the temporary reference taken inside procfs seq_show */ 405 403 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); 406 404 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); 407 - dma_resv_lock(dmabuf->resv, NULL); 405 + spin_lock(&dmabuf->name_lock); 408 406 if (dmabuf->name) 409 407 seq_printf(m, "name:\t%s\n", dmabuf->name); 410 - dma_resv_unlock(dmabuf->resv); 408 + spin_unlock(&dmabuf->name_lock); 411 409 } 412 410 413 411 static const struct file_operations dma_buf_fops = { ··· 544 542 dmabuf->size = exp_info->size; 545 543 dmabuf->exp_name = exp_info->exp_name; 546 544 dmabuf->owner = exp_info->owner; 545 + spin_lock_init(&dmabuf->name_lock); 547 546 init_waitqueue_head(&dmabuf->poll); 548 547 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; 549 548 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+15 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 1295 1295 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) 1296 1296 { 1297 1297 struct amdgpu_job *job; 1298 - struct drm_sched_job *s_job; 1298 + struct drm_sched_job *s_job, *tmp; 1299 1299 uint32_t preempt_seq; 1300 1300 struct dma_fence *fence, **ptr; 1301 1301 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1302 1302 struct drm_gpu_scheduler *sched = &ring->sched; 1303 + bool preempted = true; 1303 1304 1304 1305 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 1305 1306 return; 1306 1307 1307 1308 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); 1308 - if (preempt_seq <= atomic_read(&drv->last_seq)) 1309 - return; 1309 + if (preempt_seq <= atomic_read(&drv->last_seq)) { 1310 + preempted = false; 1311 + goto no_preempt; 1312 + } 1310 1313 1311 1314 preempt_seq &= drv->num_fences_mask; 1312 1315 ptr = &drv->fences[preempt_seq]; 1313 1316 fence = rcu_dereference_protected(*ptr, 1); 1314 1317 1318 + no_preempt: 1315 1319 spin_lock(&sched->job_list_lock); 1316 - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { 1320 + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 1321 + if (dma_fence_is_signaled(&s_job->s_fence->finished)) { 1322 + /* remove job from ring_mirror_list */ 1323 + list_del_init(&s_job->node); 1324 + sched->ops->free_job(s_job); 1325 + continue; 1326 + } 1317 1327 job = to_amdgpu_job(s_job); 1318 - if (job->fence == fence) 1328 + if (preempted && job->fence == fence) 1319 1329 /* mark the job as preempted */ 1320 1330 job->preemption_status |= AMDGPU_IB_PREEMPTED; 1321 1331 }
+8 -1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 7513 7513 struct amdgpu_device *adev = ring->adev; 7514 7514 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 7515 7515 struct amdgpu_ring *kiq_ring = &kiq->ring; 7516 + unsigned long flags; 7516 7517 7517 7518 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 7518 7519 return -EINVAL; 7519 7520 7520 - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) 7521 + spin_lock_irqsave(&kiq->ring_lock, flags); 7522 + 7523 + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 7524 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 7521 7525 return -ENOMEM; 7526 + } 7522 7527 7523 7528 /* assert preemption condition */ 7524 7529 amdgpu_ring_set_preempt_cond_exec(ring, false); ··· 7533 7528 ring->trail_fence_gpu_addr, 7534 7529 ++ring->trail_seq); 7535 7530 amdgpu_ring_commit(kiq_ring); 7531 + 7532 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 7536 7533 7537 7534 /* poll the trailing fence */ 7538 7535 for (i = 0; i < adev->usec_timeout; i++) {
+8 -18
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 314 314 static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 315 315 { 316 316 struct amdgpu_device *adev = ring->adev; 317 - u64 *wptr = NULL; 318 - uint64_t local_wptr = 0; 317 + u64 wptr; 319 318 320 319 if (ring->use_doorbell) { 321 320 /* XXX check if swapping is necessary on BE */ 322 - wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); 323 - DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); 324 - *wptr = (*wptr) >> 2; 325 - DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); 321 + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); 322 + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); 326 323 } else { 327 - u32 lowbit, highbit; 328 - 329 - wptr = &local_wptr; 330 - lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; 331 - highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; 332 - 333 - DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", 334 - ring->me, highbit, lowbit); 335 - *wptr = highbit; 336 - *wptr = (*wptr) << 32; 337 - *wptr |= lowbit; 324 + wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); 325 + wptr = wptr << 32; 326 + wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); 327 + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); 338 328 } 339 329 340 - return *wptr; 330 + return wptr >> 2; 341 331 } 342 332 343 333 /**
+14
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 974 974 /* Update the actual used number of crtc */ 975 975 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 976 976 977 + /* create fake encoders for MST */ 978 + dm_dp_create_fake_mst_encoders(adev); 979 + 977 980 /* TODO: Add_display_info? */ 978 981 979 982 /* TODO use dynamic cursor width */ ··· 1000 997 1001 998 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1002 999 { 1000 + int i; 1001 + 1002 + for (i = 0; i < adev->dm.display_indexes_num; i++) { 1003 + drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); 1004 + } 1005 + 1003 1006 amdgpu_dm_audio_fini(adev); 1004 1007 1005 1008 amdgpu_dm_destroy_drm_device(&adev->dm); ··· 2019 2010 struct amdgpu_display_manager *dm; 2020 2011 struct drm_connector *conn_base; 2021 2012 struct amdgpu_device *adev; 2013 + struct dc_link *link = NULL; 2022 2014 static const u8 pre_computed_values[] = { 2023 2015 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 2024 2016 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; 2025 2017 2026 2018 if (!aconnector || !aconnector->dc_link) 2019 + return; 2020 + 2021 + link = aconnector->dc_link; 2022 + if (link->connector_signal != SIGNAL_TYPE_EDP) 2027 2023 return; 2028 2024 2029 2025 conn_base = &aconnector->base;
+10 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 43 43 */ 44 44 45 45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 46 + 47 + #define AMDGPU_DM_MAX_CRTC 6 48 + 46 49 /* 47 50 #include "include/amdgpu_dal_power_if.h" 48 51 #include "amdgpu_dm_irq.h" ··· 331 328 * available in FW 332 329 */ 333 330 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 331 + 332 + /** 333 + * @mst_encoders: 334 + * 335 + * fake encoders used for DP MST. 336 + */ 337 + struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 334 338 }; 335 339 336 340 struct amdgpu_dm_connector { ··· 366 356 struct amdgpu_dm_dp_aux dm_dp_aux; 367 357 struct drm_dp_mst_port *port; 368 358 struct amdgpu_dm_connector *mst_port; 369 - struct amdgpu_encoder *mst_encoder; 370 359 struct drm_dp_aux *dsc_aux; 371 360 372 361 /* TODO see if we can merge with ddc_bus or make a dm_connector */
+26 -27
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 95 95 { 96 96 struct amdgpu_dm_connector *aconnector = 97 97 to_amdgpu_dm_connector(connector); 98 - struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder; 99 98 100 99 if (aconnector->dc_sink) { 101 100 dc_link_remove_remote_sink(aconnector->dc_link, ··· 104 105 105 106 kfree(aconnector->edid); 106 107 107 - drm_encoder_cleanup(&amdgpu_encoder->base); 108 - kfree(amdgpu_encoder); 109 108 drm_connector_cleanup(connector); 110 109 drm_dp_mst_put_port_malloc(aconnector->port); 111 110 kfree(aconnector); ··· 240 243 dm_mst_atomic_best_encoder(struct drm_connector *connector, 241 244 struct drm_connector_state *connector_state) 242 245 { 243 - return &to_amdgpu_dm_connector(connector)->mst_encoder->base; 246 + struct drm_device *dev = connector->dev; 247 + struct amdgpu_device *adev = dev->dev_private; 248 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); 249 + 250 + return &adev->dm.mst_encoders[acrtc->crtc_id].base; 244 251 } 245 252 246 253 static int ··· 307 306 .destroy = amdgpu_dm_encoder_destroy, 308 307 }; 309 308 310 - static struct amdgpu_encoder * 311 - dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) 309 + void 310 + dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) 312 311 { 313 - struct drm_device *dev = connector->base.dev; 314 - struct amdgpu_device *adev = dev->dev_private; 315 - struct amdgpu_encoder *amdgpu_encoder; 316 - struct drm_encoder *encoder; 312 + struct drm_device *dev = adev->ddev; 313 + int i; 317 314 318 - amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); 319 - if (!amdgpu_encoder) 320 - return NULL; 315 + for (i = 0; i < adev->dm.display_indexes_num; i++) { 316 + struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; 317 + struct drm_encoder *encoder = &amdgpu_encoder->base; 321 318 322 - encoder = &amdgpu_encoder->base; 323 - encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 319 + encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 324 320 325 - drm_encoder_init( 326 - dev, 327 - &amdgpu_encoder->base, 328 - &amdgpu_dm_encoder_funcs, 329 - DRM_MODE_ENCODER_DPMST, 330 - NULL); 321 + drm_encoder_init( 322 + dev, 323 + &amdgpu_encoder->base, 324 + &amdgpu_dm_encoder_funcs, 325 + DRM_MODE_ENCODER_DPMST, 326 + NULL); 331 327 332 - drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); 333 - 334 - return amdgpu_encoder; 328 + drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); 329 + } 335 330 } 336 331 337 332 static struct drm_connector * ··· 340 343 struct amdgpu_device *adev = dev->dev_private; 341 344 struct amdgpu_dm_connector *aconnector; 342 345 struct drm_connector *connector; 346 + int i; 343 347 344 348 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 345 349 if (!aconnector) ··· 367 369 master->dc_link, 368 370 master->connector_id); 369 371 370 - aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); 371 - drm_connector_attach_encoder(&aconnector->base, 372 - &aconnector->mst_encoder->base); 372 + for (i = 0; i < adev->dm.display_indexes_num; i++) { 373 + drm_connector_attach_encoder(&aconnector->base, 374 + &adev->dm.mst_encoders[i].base); 375 + } 373 376 374 377 connector->max_bpc_property = master->base.max_bpc_property; 375 378 if (connector->max_bpc_property)
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
··· 35 35 struct amdgpu_dm_connector *aconnector, 36 36 int link_index); 37 37 38 + void 39 + dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); 40 + 38 41 #if defined(CONFIG_DRM_AMD_DC_DCN) 39 42 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 40 43 struct dc_state *dc_state);
+16 -3
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 56 56 } 57 57 } 58 58 59 - static void dc_stream_construct(struct dc_stream_state *stream, 59 + static bool dc_stream_construct(struct dc_stream_state *stream, 60 60 struct dc_sink *dc_sink_data) 61 61 { 62 62 uint32_t i = 0; ··· 118 118 update_stream_signal(stream, dc_sink_data); 119 119 120 120 stream->out_transfer_func = dc_create_transfer_func(); 121 + if (stream->out_transfer_func == NULL) { 122 + dc_sink_release(dc_sink_data); 123 + return false; 124 + } 121 125 stream->out_transfer_func->type = TF_TYPE_BYPASS; 122 126 stream->out_transfer_func->ctx = stream->ctx; 123 127 124 128 stream->stream_id = stream->ctx->dc_stream_id_count; 125 129 stream->ctx->dc_stream_id_count++; 130 + 131 + return true; 126 132 } 127 133 128 134 static void dc_stream_destruct(struct dc_stream_state *stream) ··· 170 164 171 165 stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); 172 166 if (stream == NULL) 173 - return NULL; 167 + goto alloc_fail; 174 168 175 - dc_stream_construct(stream, sink); 169 + if (dc_stream_construct(stream, sink) == false) 170 + goto construct_fail; 176 171 177 172 kref_init(&stream->refcount); 178 173 179 174 return stream; 175 + 176 + construct_fail: 177 + kfree(stream); 178 + 179 + alloc_fail: 180 + return NULL; 180 181 } 181 182 182 183 struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
+1 -1
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
··· 689 689 return -EINVAL; 690 690 } 691 691 692 - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 692 + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 693 693 1 << workload_type, 694 694 NULL); 695 695 if (ret) {
+1 -2
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
··· 173 173 174 174 drm_mode_config_reset(drm); 175 175 176 - drm_fbdev_generic_setup(drm, 32); 177 - 178 176 return 0; 179 177 } 180 178 ··· 223 225 if (ret) 224 226 goto err_unload; 225 227 228 + drm_fbdev_generic_setup(&priv->drm, 32); 226 229 return 0; 227 230 228 231 err_unload:
+26 -7
drivers/gpu/drm/i915/display/intel_fbc.c
··· 719 719 fbc->compressed_fb.size * fbc->threshold; 720 720 } 721 721 722 + static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) 723 + { 724 + struct intel_fbc *fbc = &dev_priv->fbc; 725 + struct intel_fbc_state_cache *cache = &fbc->state_cache; 726 + 727 + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && 728 + cache->fb.modifier != I915_FORMAT_MOD_X_TILED) 729 + return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; 730 + else 731 + return 0; 732 + } 733 + 734 + static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv) 735 + { 736 + struct intel_fbc *fbc = &dev_priv->fbc; 737 + 738 + return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv); 739 + } 740 + 722 741 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) 723 742 { 724 743 struct intel_fbc *fbc = &dev_priv->fbc; ··· 896 877 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; 897 878 898 879 params->fb.format = cache->fb.format; 880 + params->fb.modifier = cache->fb.modifier; 899 881 params->fb.stride = cache->fb.stride; 900 882 901 883 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); ··· 924 904 return false; 925 905 926 906 if (params->fb.format != cache->fb.format) 907 + return false; 908 + 909 + if (params->fb.modifier != cache->fb.modifier) 927 910 return false; 928 911 929 912 if (params->fb.stride != cache->fb.stride) ··· 1208 1185 1209 1186 if (fbc->crtc) { 1210 1187 if (fbc->crtc != crtc || 1211 - !intel_fbc_cfb_size_changed(dev_priv)) 1188 + (!intel_fbc_cfb_size_changed(dev_priv) && 1189 + !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv))) 1212 1190 goto out; 1213 1191 1214 1192 __intel_fbc_disable(dev_priv); ··· 1231 1207 goto out; 1232 1208 } 1233 1209 1234 - if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && 1235 - plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) 1236 - cache->gen9_wa_cfb_stride = 1237 - DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; 1238 - else 1239 - cache->gen9_wa_cfb_stride = 0; 1210 + cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv); 1240 1211 1241 1212 drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", 1242 1213 pipe_name(crtc->pipe));
+2 -8
drivers/gpu/drm/i915/display/intel_hdmi.c
··· 2867 2867 return ret; 2868 2868 } 2869 2869 2870 - static void intel_hdmi_destroy(struct drm_connector *connector) 2870 + static void intel_hdmi_connector_unregister(struct drm_connector *connector) 2871 2871 { 2872 2872 struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; 2873 2873 2874 2874 cec_notifier_conn_unregister(n); 2875 2875 2876 - intel_connector_destroy(connector); 2877 - } 2878 - 2879 - static void intel_hdmi_connector_unregister(struct drm_connector *connector) 2880 - { 2881 2876 intel_hdmi_remove_i2c_symlink(connector); 2882 - 2883 2877 intel_connector_unregister(connector); 2884 2878 } 2885 2879 ··· 2885 2891 .atomic_set_property = intel_digital_connector_atomic_set_property, 2886 2892 .late_register = intel_hdmi_connector_register, 2887 2893 .early_unregister = intel_hdmi_connector_unregister, 2888 - .destroy = intel_hdmi_destroy, 2894 + .destroy = intel_connector_destroy, 2889 2895 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2890 2896 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 2891 2897 };
+5 -14
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 5396 5396 * typically be the first we inspect for submission. 5397 5397 */ 5398 5398 swp = prandom_u32_max(ve->num_siblings); 5399 - if (!swp) 5400 - return; 5401 - 5402 - swap(ve->siblings[swp], ve->siblings[0]); 5403 - if (!intel_engine_has_relative_mmio(ve->siblings[0])) 5404 - virtual_update_register_offsets(ve->context.lrc_reg_state, 5405 - ve->siblings[0]); 5399 + if (swp) 5400 + swap(ve->siblings[swp], ve->siblings[0]); 5406 5401 } 5407 5402 5408 5403 static int virtual_context_alloc(struct intel_context *ce) ··· 5410 5415 static int virtual_context_pin(struct intel_context *ce) 5411 5416 { 5412 5417 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5413 - int err; 5414 5418 5415 5419 /* Note: we must use a real engine class for setting up reg state */ 5416 - err = __execlists_context_pin(ce, ve->siblings[0]); 5417 - if (err) 5418 - return err; 5419 - 5420 - virtual_engine_initial_hint(ve); 5421 - return 0; 5420 + return __execlists_context_pin(ce, ve->siblings[0]); 5422 5421 } 5423 5422 5424 5423 static void virtual_context_enter(struct intel_context *ce) ··· 5677 5688 intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); 5678 5689 intel_engine_init_breadcrumbs(&ve->base); 5679 5690 intel_engine_init_execlists(&ve->base); 5691 + ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ 5680 5692 5681 5693 ve->base.cops = &virtual_context_ops; 5682 5694 ve->base.request_alloc = execlists_request_alloc; ··· 5759 5769 5760 5770 ve->base.flags |= I915_ENGINE_IS_VIRTUAL; 5761 5771 5772 + virtual_engine_initial_hint(ve); 5762 5773 return &ve->context; 5763 5774 5764 5775 err_put:
+4 -4
drivers/gpu/drm/i915/gt/selftest_rps.c
··· 44 44 { 45 45 const u64 *a = A, *b = B; 46 46 47 - if (a < b) 47 + if (*a < *b) 48 48 return -1; 49 - else if (a > b) 49 + else if (*a > *b) 50 50 return 1; 51 51 else 52 52 return 0; ··· 56 56 { 57 57 const u32 *a = A, *b = B; 58 58 59 - if (a < b) 59 + if (*a < *b) 60 60 return -1; 61 - else if (a > b) 61 + else if (*a > *b) 62 62 return 1; 63 63 else 64 64 return 0;
+1
drivers/gpu/drm/i915/i915_drv.h
··· 440 440 struct { 441 441 const struct drm_format_info *format; 442 442 unsigned int stride; 443 + u64 modifier; 443 444 } fb; 444 445 445 446 int cfb_size;
+1
drivers/gpu/drm/i915/i915_perf.c
··· 1592 1592 u32 d; 1593 1593 1594 1594 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1595 + cmd |= MI_SRM_LRM_GLOBAL_GTT; 1595 1596 if (INTEL_GEN(stream->perf->i915) >= 8) 1596 1597 cmd++; 1597 1598
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 1069 1069 if (new_content_type != SAME_AS_DISPLAY) { 1070 1070 struct vmw_surface_metadata metadata = {0}; 1071 1071 1072 - metadata.base_size.width = hdisplay; 1073 - metadata.base_size.height = vdisplay; 1074 - metadata.base_size.depth = 1; 1075 - 1076 1072 /* 1077 1073 * If content buffer is a buffer object, then we have to 1078 1074 * construct surface info ··· 1099 1103 } else { 1100 1104 metadata = new_vfbs->surface->metadata; 1101 1105 } 1106 + 1107 + metadata.base_size.width = hdisplay; 1108 + metadata.base_size.height = vdisplay; 1109 + metadata.base_size.depth = 1; 1102 1110 1103 1111 if (vps->surf) { 1104 1112 struct drm_vmw_size cur_base_size =
+1
include/linux/dma-buf.h
··· 311 311 void *vmap_ptr; 312 312 const char *exp_name; 313 313 const char *name; 314 + spinlock_t name_lock; /* spinlock to protect name access */ 314 315 struct module *owner; 315 316 struct list_head list_node; 316 317 void *priv;