Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-for-v4.14-rc8' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:

- one nouveau regression fix

- some amdgpu fixes for stable to fix hangs on some harvested Polaris
GPUs

- a set of KASAN and regression fixes for i915, their CI system seems
to be working pretty well now.

* tag 'drm-fixes-for-v4.14-rc8' of git://people.freedesktop.org/~airlied/linux:
drm/amdgpu: allow harvesting check for Polaris VCE
drm/amdgpu: return -ENOENT from uvd 6.0 early init for harvesting
drm/i915: Check incoming alignment for unfenced buffers (on i915gm)
drm/nouveau/kms/nv50: use the correct state for base channel notifier setup
drm/i915: Hold rcu_read_lock when iterating over the radixtree (vma idr)
drm/i915: Hold rcu_read_lock when iterating over the radixtree (objects)
drm/i915/edp: read edp display control registers unconditionally
drm/i915: Do not rely on wm preservation for ILK watermarks
drm/i915: Cancel the modeset retry work during modeset cleanup

+70 -44
+4
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 93 93 { 94 94 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 95 95 96 + if (!(adev->flags & AMD_IS_APU) && 97 + (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) 98 + return -ENOENT; 99 + 96 100 uvd_v6_0_set_ring_funcs(adev); 97 101 uvd_v6_0_set_irq_funcs(adev); 98 102
+6 -6
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 365 365 { 366 366 u32 tmp; 367 367 368 - /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */ 369 368 if ((adev->asic_type == CHIP_FIJI) || 370 - (adev->asic_type == CHIP_STONEY) || 371 - (adev->asic_type == CHIP_POLARIS10) || 372 - (adev->asic_type == CHIP_POLARIS11) || 373 - (adev->asic_type == CHIP_POLARIS12)) 369 + (adev->asic_type == CHIP_STONEY)) 374 370 return AMDGPU_VCE_HARVEST_VCE1; 375 371 376 - /* Tonga and CZ are dual or single pipe */ 377 372 if (adev->flags & AMD_IS_APU) 378 373 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 379 374 VCE_HARVEST_FUSE_MACRO__MASK) >> ··· 386 391 case 3: 387 392 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 388 393 default: 394 + if ((adev->asic_type == CHIP_POLARIS10) || 395 + (adev->asic_type == CHIP_POLARIS11) || 396 + (adev->asic_type == CHIP_POLARIS12)) 397 + return AMDGPU_VCE_HARVEST_VCE1; 398 + 389 399 return 0; 390 400 } 391 401 }
+2
drivers/gpu/drm/i915/i915_gem.c
··· 2214 2214 struct radix_tree_iter iter; 2215 2215 void __rcu **slot; 2216 2216 2217 + rcu_read_lock(); 2217 2218 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 2218 2219 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 2220 + rcu_read_unlock(); 2219 2221 } 2220 2222 2221 2223 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+2
drivers/gpu/drm/i915/i915_gem_context.c
··· 104 104 kmem_cache_free(ctx->i915->luts, lut); 105 105 } 106 106 107 + rcu_read_lock(); 107 108 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 108 109 struct i915_vma *vma = rcu_dereference_raw(*slot); 109 110 struct drm_i915_gem_object *obj = vma->obj; ··· 116 115 117 116 __i915_gem_object_release_unless_active(obj); 118 117 } 118 + rcu_read_unlock(); 119 119 } 120 120 121 121 static void i915_gem_context_free(struct i915_gem_context *ctx)
+4
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 337 337 (vma->node.start + vma->node.size - 1) >> 32) 338 338 return true; 339 339 340 + if (flags & __EXEC_OBJECT_NEEDS_MAP && 341 + !i915_vma_is_map_and_fenceable(vma)) 342 + return true; 343 + 340 344 return false; 341 345 } 342 346
+18 -1
drivers/gpu/drm/i915/intel_display.c
··· 15227 15227 intel_panel_destroy_backlight(connector); 15228 15228 } 15229 15229 15230 + static void intel_hpd_poll_fini(struct drm_device *dev) 15231 + { 15232 + struct intel_connector *connector; 15233 + struct drm_connector_list_iter conn_iter; 15234 + 15235 + /* First disable polling... */ 15236 + drm_kms_helper_poll_fini(dev); 15237 + 15238 + /* Then kill the work that may have been queued by hpd. */ 15239 + drm_connector_list_iter_begin(dev, &conn_iter); 15240 + for_each_intel_connector_iter(connector, &conn_iter) { 15241 + if (connector->modeset_retry_work.func) 15242 + cancel_work_sync(&connector->modeset_retry_work); 15243 + } 15244 + drm_connector_list_iter_end(&conn_iter); 15245 + } 15246 + 15230 15247 void intel_modeset_cleanup(struct drm_device *dev) 15231 15248 { 15232 15249 struct drm_i915_private *dev_priv = to_i915(dev); ··· 15264 15247 * Due to the hpd irq storm handling the hotplug work can re-arm the 15265 15248 * poll handlers. Hence disable polling after hpd handling is shut down. 15266 15249 */ 15267 - drm_kms_helper_poll_fini(dev); 15250 + intel_hpd_poll_fini(dev); 15268 15251 15269 15252 /* poll work can call into fbdev, hence clean that up afterwards */ 15270 15253 intel_fbdev_fini(dev_priv);
+10 -3
drivers/gpu/drm/i915/intel_dp.c
··· 3731 3731 3732 3732 } 3733 3733 3734 - /* Read the eDP Display control capabilities registers */ 3735 - if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && 3736 - drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3734 + /* 3735 + * Read the eDP display control registers. 3736 + * 3737 + * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 3738 + * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 3739 + * set, but require eDP 1.4+ detection (e.g. for supported link rates 3740 + * method). The display control registers should read zero if they're 3741 + * not supported anyway. 3742 + */ 3743 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3737 3744 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 3738 3745 sizeof(intel_dp->edp_dpcd)) 3739 3746 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
-1
drivers/gpu/drm/i915/intel_drv.h
··· 496 496 497 497 struct intel_pipe_wm { 498 498 struct intel_wm_level wm[5]; 499 - struct intel_wm_level raw_wm[5]; 500 499 uint32_t linetime; 501 500 bool fbc_wm_enabled; 502 501 bool pipe_enabled;
+21 -30
drivers/gpu/drm/i915/intel_pm.c
··· 2716 2716 const struct intel_crtc *intel_crtc, 2717 2717 int level, 2718 2718 struct intel_crtc_state *cstate, 2719 - struct intel_plane_state *pristate, 2720 - struct intel_plane_state *sprstate, 2721 - struct intel_plane_state *curstate, 2719 + const struct intel_plane_state *pristate, 2720 + const struct intel_plane_state *sprstate, 2721 + const struct intel_plane_state *curstate, 2722 2722 struct intel_wm_level *result) 2723 2723 { 2724 2724 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; ··· 3038 3038 struct intel_pipe_wm *pipe_wm; 3039 3039 struct drm_device *dev = state->dev; 3040 3040 const struct drm_i915_private *dev_priv = to_i915(dev); 3041 - struct intel_plane *intel_plane; 3042 - struct intel_plane_state *pristate = NULL; 3043 - struct intel_plane_state *sprstate = NULL; 3044 - struct intel_plane_state *curstate = NULL; 3041 + struct drm_plane *plane; 3042 + const struct drm_plane_state *plane_state; 3043 + const struct intel_plane_state *pristate = NULL; 3044 + const struct intel_plane_state *sprstate = NULL; 3045 + const struct intel_plane_state *curstate = NULL; 3045 3046 int level, max_level = ilk_wm_max_level(dev_priv), usable_level; 3046 3047 struct ilk_wm_maximums max; 3047 3048 3048 3049 pipe_wm = &cstate->wm.ilk.optimal; 3049 3050 3050 - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3051 - struct intel_plane_state *ps; 3051 + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) { 3052 + const struct intel_plane_state *ps = to_intel_plane_state(plane_state); 3052 3053 3053 - ps = intel_atomic_get_existing_plane_state(state, 3054 - intel_plane); 3055 - if (!ps) 3056 - continue; 3057 - 3058 - if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) 3054 + if (plane->type == DRM_PLANE_TYPE_PRIMARY) 3059 3055 pristate = ps; 3060 - else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) 3056 + else if (plane->type == DRM_PLANE_TYPE_OVERLAY) 3061 3057 sprstate = ps; 3062 - else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 3058 + else if (plane->type == DRM_PLANE_TYPE_CURSOR) 3063 3059 curstate = ps; 3064 3060 } 3065 3061 ··· 3077 3081 if (pipe_wm->sprites_scaled) 3078 3082 usable_level = 0; 3079 3083 3080 - ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, 3081 - pristate, sprstate, curstate, &pipe_wm->raw_wm[0]); 3082 - 3083 3084 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); 3084 - pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 3085 + ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, 3086 + pristate, sprstate, curstate, &pipe_wm->wm[0]); 3085 3087 3086 3088 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3087 3089 pipe_wm->linetime = hsw_compute_linetime_wm(cstate); ··· 3089 3095 3090 3096 ilk_compute_wm_reg_maximums(dev_priv, 1, &max); 3091 3097 3092 - for (level = 1; level <= max_level; level++) { 3093 - struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; 3098 + for (level = 1; level <= usable_level; level++) { 3099 + struct intel_wm_level *wm = &pipe_wm->wm[level]; 3094 3100 3095 3101 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, 3096 3102 pristate, sprstate, curstate, wm); ··· 3100 3106 * register maximums since such watermarks are 3101 3107 * always invalid. 3102 3108 */ 3103 - if (level > usable_level) 3104 - continue; 3105 - 3106 - if (ilk_validate_wm_level(level, &max, wm)) 3107 - pipe_wm->wm[level] = *wm; 3108 - else 3109 - usable_level = level; 3109 + if (!ilk_validate_wm_level(level, &max, wm)) { 3110 + memset(wm, 0, sizeof(*wm)); 3111 + break; 3112 + } 3110 3113 } 3111 3114 3112 3115 return 0;
+3 -3
drivers/gpu/drm/nouveau/nv50_display.c
··· 4099 4099 { 4100 4100 struct nouveau_drm *drm = nouveau_drm(dev); 4101 4101 struct nv50_disp *disp = nv50_disp(dev); 4102 - struct drm_plane_state *old_plane_state; 4102 + struct drm_plane_state *new_plane_state; 4103 4103 struct drm_plane *plane; 4104 4104 struct drm_crtc *crtc; 4105 4105 bool active = false; ··· 4129 4129 if (ret) 4130 4130 goto err_cleanup; 4131 4131 4132 - for_each_old_plane_in_state(state, plane, old_plane_state, i) { 4133 - struct nv50_wndw_atom *asyw = nv50_wndw_atom(old_plane_state); 4132 + for_each_new_plane_in_state(state, plane, new_plane_state, i) { 4133 + struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); 4134 4134 struct nv50_wndw *wndw = nv50_wndw(plane); 4135 4135 4136 4136 if (asyw->set.image) {