Merge tag 'drm-fixes-for-v4.10-rc6-part-two' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"This is the main request for rc6, since really the one earlier was the
rc5 one :-)

The main thing are the nouveau specific race fixes for the connector
locking bug we fixed in -next and reverted here as it has quite large
prereqs. These two fixes should solve the problem at that level and we
can fix it properly in 4.11

Otherwise i915 has a bunch of changes, one ABI change for GVT related
stuff, some VC4 leak fixes, one core fence fix and some AMD changes,
oh and one ast hang avoidance fix.

Hoping it calms down around now"

* tag 'drm-fixes-for-v4.10-rc6-part-two' of git://people.freedesktop.org/~airlied/linux: (25 commits)
drm/nouveau: Handle fbcon suspend/resume in seperate worker
drm/nouveau: Don't enabling polling twice on runtime resume
drm/ast: Fixed system hanged if disable P2A
Revert "drm/radeon: always apply pci shutdown callbacks"
drm/i915: reinstate call to trace_i915_vma_bind
drm/i915: Move atomic state free from out of fence release
drm/i915: Check for NULL atomic state in intel_crtc_disable_noatomic()
drm/i915: Fix calculation of rotated x and y offsets for planar formats
drm/i915: Don't init hpd polling for vlv and chv from runtime_suspend()
drm/i915: Don't leak edid in intel_crt_detect_ddc()
drm/i915: Release temporary load-detect state upon switching
drm/i915: prevent crash with .disable_display parameter
drm/i915: Avoid drm_atomic_state_put(NULL) in intel_display_resume
MAINTAINERS: update new mail list for intel gvt driver
drm/i915/gvt: Fix kmem_cache_create() name
drm/i915/gvt/kvmgt: mdev ABI is available_instances, not available_instance
drm/amdgpu: fix unload driver issue for virtual display
drm/amdgpu: check ring being ready before using
drm/vc4: Return -EINVAL on the overflow checks failing.
drm/vc4: Fix an integer overflow in temporary allocation layout.
...

+243 -176
+1 -1
MAINTAINERS
··· 4153 4153 INTEL GVT-g DRIVERS (Intel GPU Virtualization) 4154 4154 M: Zhenyu Wang <zhenyuw@linux.intel.com> 4155 4155 M: Zhi Wang <zhi.a.wang@intel.com> 4156 - L: igvt-g-dev@lists.01.org 4156 + L: intel-gvt-dev@lists.freedesktop.org 4157 4157 L: intel-gfx@lists.freedesktop.org 4158 4158 W: https://01.org/igvt-g 4159 4159 T: git https://github.com/01org/gvt-linux.git
+7
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 83 83 } 84 84 break; 85 85 } 86 + 87 + if (!(*out_ring && (*out_ring)->adev)) { 88 + DRM_ERROR("Ring %d is not initialized on IP %d\n", 89 + ring, ip_type); 90 + return -EINVAL; 91 + } 92 + 86 93 return 0; 87 94 } 88 95
+1 -4
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 627 627 628 628 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) 629 629 { 630 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 631 - 632 - kfree(amdgpu_encoder->enc_priv); 633 630 drm_encoder_cleanup(encoder); 634 - kfree(amdgpu_encoder); 631 + kfree(encoder); 635 632 } 636 633 637 634 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+1
drivers/gpu/drm/ast/ast_drv.h
··· 113 113 struct ttm_bo_kmap_obj cache_kmap; 114 114 int next_cursor; 115 115 bool support_wide_screen; 116 + bool DisableP2A; 116 117 117 118 enum ast_tx_chip tx_chip_type; 118 119 u8 dp501_maxclk;
+83 -74
drivers/gpu/drm/ast/ast_main.c
··· 124 124 } else 125 125 *need_post = false; 126 126 127 + /* Check P2A Access */ 128 + ast->DisableP2A = true; 129 + data = ast_read32(ast, 0xf004); 130 + if (data != 0xFFFFFFFF) 131 + ast->DisableP2A = false; 132 + 127 133 /* Check if we support wide screen */ 128 134 switch (ast->chip) { 129 135 case AST1180: ··· 146 140 ast->support_wide_screen = true; 147 141 else { 148 142 ast->support_wide_screen = false; 149 - /* Read SCU7c (silicon revision register) */ 150 - ast_write32(ast, 0xf004, 0x1e6e0000); 151 - ast_write32(ast, 0xf000, 0x1); 152 - data = ast_read32(ast, 0x1207c); 153 - data &= 0x300; 154 - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ 155 - ast->support_wide_screen = true; 156 - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ 157 - ast->support_wide_screen = true; 143 + if (ast->DisableP2A == false) { 144 + /* Read SCU7c (silicon revision register) */ 145 + ast_write32(ast, 0xf004, 0x1e6e0000); 146 + ast_write32(ast, 0xf000, 0x1); 147 + data = ast_read32(ast, 0x1207c); 148 + data &= 0x300; 149 + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ 150 + ast->support_wide_screen = true; 151 + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ 152 + ast->support_wide_screen = true; 153 + } 158 154 } 159 155 break; 160 156 } ··· 224 216 uint32_t data, data2; 225 217 uint32_t denum, num, div, ref_pll; 226 218 227 - ast_write32(ast, 0xf004, 0x1e6e0000); 228 - ast_write32(ast, 0xf000, 0x1); 229 - 230 - 231 - ast_write32(ast, 0x10000, 0xfc600309); 232 - 233 - do { 234 - if (pci_channel_offline(dev->pdev)) 235 - return -EIO; 236 - } while (ast_read32(ast, 0x10000) != 0x01); 237 - data = ast_read32(ast, 0x10004); 238 - 239 - if (data & 0x40) 219 + if (ast->DisableP2A) 220 + { 240 221 ast->dram_bus_width = 16; 222 + ast->dram_type = AST_DRAM_1Gx16; 223 + ast->mclk = 396; 224 + } 241 225 else 242 - ast->dram_bus_width = 32; 226 + { 227 + ast_write32(ast, 0xf004, 0x1e6e0000); 228 + ast_write32(ast, 0xf000, 0x1); 229 + data = ast_read32(ast, 0x10004); 243 230 244 - if (ast->chip == AST2300 || ast->chip == AST2400) { 245 - switch (data & 0x03) { 246 - case 0: 247 - ast->dram_type = AST_DRAM_512Mx16; 248 - break; 249 - default: 250 - case 1: 251 - ast->dram_type = AST_DRAM_1Gx16; 231 + if (data & 0x40) 232 + ast->dram_bus_width = 16; 233 + else 234 + ast->dram_bus_width = 32; 235 + 236 + if (ast->chip == AST2300 || ast->chip == AST2400) { 237 + switch (data & 0x03) { 238 + case 0: 239 + ast->dram_type = AST_DRAM_512Mx16; 240 + break; 241 + default: 242 + case 1: 243 + ast->dram_type = AST_DRAM_1Gx16; 244 + break; 245 + case 2: 246 + ast->dram_type = AST_DRAM_2Gx16; 247 + break; 248 + case 3: 249 + ast->dram_type = AST_DRAM_4Gx16; 250 + break; 251 + } 252 + } else { 253 + switch (data & 0x0c) { 254 + case 0: 255 + case 4: 256 + ast->dram_type = AST_DRAM_512Mx16; 257 + break; 258 + case 8: 259 + if (data & 0x40) 260 + ast->dram_type = AST_DRAM_1Gx16; 261 + else 262 + ast->dram_type = AST_DRAM_512Mx32; 263 + break; 264 + case 0xc: 265 + ast->dram_type = AST_DRAM_1Gx32; 266 + break; 267 + } 268 + } 269 + 270 + data = ast_read32(ast, 0x10120); 271 + data2 = ast_read32(ast, 0x10170); 272 + if (data2 & 0x2000) 273 + ref_pll = 14318; 274 + else 275 + ref_pll = 12000; 276 + 277 + denum = data & 0x1f; 278 + num = (data & 0x3fe0) >> 5; 279 + data = (data & 0xc000) >> 14; 280 + switch (data) { 281 + case 3: 282 + div = 0x4; 252 283 break; 253 284 case 2: 254 - ast->dram_type = AST_DRAM_2Gx16; 285 + case 1: 286 + div = 0x2; 255 287 break; 256 - case 3: 257 - ast->dram_type = AST_DRAM_4Gx16; 258 - break; 259 - } 260 - } else { 261 - switch (data & 0x0c) { 262 - case 0: 263 - case 4: 264 - ast->dram_type = AST_DRAM_512Mx16; 265 - break; 266 - case 8: 267 - if (data & 0x40) 268 - ast->dram_type = AST_DRAM_1Gx16; 269 - else 270 - ast->dram_type = AST_DRAM_512Mx32; 271 - break; 272 - case 0xc: 273 - ast->dram_type = AST_DRAM_1Gx32; 288 + default: 289 + div = 0x1; 274 290 break; 275 291 } 292 + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); 276 293 } 277 - 278 - data = ast_read32(ast, 0x10120); 279 - data2 = ast_read32(ast, 0x10170); 280 - if (data2 & 0x2000) 281 - ref_pll = 14318; 282 - else 283 - ref_pll = 12000; 284 - 285 - denum = data & 0x1f; 286 - num = (data & 0x3fe0) >> 5; 287 - data = (data & 0xc000) >> 14; 288 - switch (data) { 289 - case 3: 290 - div = 0x4; 291 - break; 292 - case 2: 293 - case 1: 294 - div = 0x2; 295 - break; 296 - default: 297 - div = 0x1; 298 - break; 299 - } 300 - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); 301 294 return 0; 302 295 } 303 296
+13 -5
drivers/gpu/drm/ast/ast_post.c
··· 379 379 ast_open_key(ast); 380 380 ast_set_def_ext_reg(dev); 381 381 382 - if (ast->chip == AST2300 || ast->chip == AST2400) 383 - ast_init_dram_2300(dev); 384 - else 385 - ast_init_dram_reg(dev); 382 + if (ast->DisableP2A == false) 383 + { 384 + if (ast->chip == AST2300 || ast->chip == AST2400) 385 + ast_init_dram_2300(dev); 386 + else 387 + ast_init_dram_reg(dev); 386 388 387 - ast_init_3rdtx(dev); 389 + ast_init_3rdtx(dev); 390 + } 391 + else 392 + { 393 + if (ast->tx_chip_type != AST_TX_NONE) 394 + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ 395 + } 388 396 } 389 397 390 398 /* AST 2300 DRAM settings */
+6 -6
drivers/gpu/drm/drm_atomic.c
··· 291 291 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 292 292 293 293 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 294 - struct drm_crtc *crtc, s64 __user *fence_ptr) 294 + struct drm_crtc *crtc, s32 __user *fence_ptr) 295 295 { 296 296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 297 297 } 298 298 299 - static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 299 + static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 300 300 struct drm_crtc *crtc) 301 301 { 302 - s64 __user *fence_ptr; 302 + s32 __user *fence_ptr; 303 303 304 304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 305 305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; ··· 512 512 state->color_mgmt_changed |= replaced; 513 513 return ret; 514 514 } else if (property == config->prop_out_fence_ptr) { 515 - s64 __user *fence_ptr = u64_to_user_ptr(val); 515 + s32 __user *fence_ptr = u64_to_user_ptr(val); 516 516 517 517 if (!fence_ptr) 518 518 return 0; ··· 1915 1915 */ 1916 1916 1917 1917 struct drm_out_fence_state { 1918 - s64 __user *out_fence_ptr; 1918 + s32 __user *out_fence_ptr; 1919 1919 struct sync_file *sync_file; 1920 1920 int fd; 1921 1921 }; ··· 1952 1952 return 0; 1953 1953 1954 1954 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1955 - u64 __user *fence_ptr; 1955 + s32 __user *fence_ptr; 1956 1956 1957 1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 1958 1958
-4
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 481 481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 482 482 483 483 static unsigned long bypass_scan_mask = 0; 484 - static bool bypass_batch_buffer_scan = true; 485 484 486 485 /* ring ALL, type = 0 */ 487 486 static struct sub_op_bits sub_op_mi[] = { ··· 1523 1524 static int batch_buffer_needs_scan(struct parser_exec_state *s) 1524 1525 { 1525 1526 struct intel_gvt *gvt = s->vgpu->gvt; 1526 - 1527 - if (bypass_batch_buffer_scan) 1528 - return 0; 1529 1527 1530 1528 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1531 1529 /* BDW decides privilege based on address space */
+19 -47
drivers/gpu/drm/i915/gvt/execlist.c
··· 364 364 #define get_desc_from_elsp_dwords(ed, i) \ 365 365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 366 366 367 - 368 - #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) 369 - #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) 370 - static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, 371 - unsigned long add, int gmadr_bytes) 372 - { 373 - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) 374 - return -1; 375 - 376 - *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & 377 - BATCH_BUFFER_ADDR_MASK; 378 - if (gmadr_bytes == 8) { 379 - *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = 380 - add & BATCH_BUFFER_ADDR_HIGH_MASK; 381 - } 382 - 383 - return 0; 384 - } 385 - 386 367 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 387 368 { 388 - int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 369 + const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 370 + struct intel_shadow_bb_entry *entry_obj; 389 371 390 372 /* pin the gem object to ggtt */ 391 - if (!list_empty(&workload->shadow_bb)) { 392 - struct intel_shadow_bb_entry *entry_obj = 393 - list_first_entry(&workload->shadow_bb, 394 - struct intel_shadow_bb_entry, 395 - list); 396 - struct intel_shadow_bb_entry *temp; 373 + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { 374 + struct i915_vma *vma; 397 375 398 - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, 399 - list) { 400 - struct i915_vma *vma; 401 - 402 - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 403 - 4, 0); 404 - if (IS_ERR(vma)) { 405 - gvt_err("Cannot pin\n"); 406 - return; 407 - } 408 - 409 - /* FIXME: we are not tracking our pinned VMA leaving it 410 - * up to the core to fix up the stray pin_count upon 411 - * free. 412 - */ 413 - 414 - /* update the relocate gma with shadow batch buffer*/ 415 - set_gma_to_bb_cmd(entry_obj, 416 - i915_ggtt_offset(vma), 417 - gmadr_bytes); 376 + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 377 + if (IS_ERR(vma)) { 378 + gvt_err("Cannot pin\n"); 379 + return; 418 380 } 381 + 382 + /* FIXME: we are not tracking our pinned VMA leaving it 383 + * up to the core to fix up the stray pin_count upon 384 + * free. 385 + */ 386 + 387 + /* update the relocate gma with shadow batch buffer*/ 388 + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); 389 + if (gmadr_bytes == 8) 390 + entry_obj->bb_start_cmd_va[2] = 0; 419 391 } 420 392 } 421 393 ··· 798 826 INIT_LIST_HEAD(&vgpu->workload_q_head[i]); 799 827 } 800 828 801 - vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", 829 + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", 802 830 sizeof(struct intel_vgpu_workload), 0, 803 831 SLAB_HWCACHE_ALIGN, 804 832 NULL);
+4 -4
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 230 230 return NULL; 231 231 } 232 232 233 - static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, 234 - char *buf) 233 + static ssize_t available_instances_show(struct kobject *kobj, 234 + struct device *dev, char *buf) 235 235 { 236 236 struct intel_vgpu_type *type; 237 237 unsigned int num = 0; ··· 269 269 type->fence); 270 270 } 271 271 272 - static MDEV_TYPE_ATTR_RO(available_instance); 272 + static MDEV_TYPE_ATTR_RO(available_instances); 273 273 static MDEV_TYPE_ATTR_RO(device_api); 274 274 static MDEV_TYPE_ATTR_RO(description); 275 275 276 276 static struct attribute *type_attrs[] = { 277 - &mdev_type_attr_available_instance.attr, 277 + &mdev_type_attr_available_instances.attr, 278 278 &mdev_type_attr_device_api.attr, 279 279 &mdev_type_attr_description.attr, 280 280 NULL,
+1 -1
drivers/gpu/drm/i915/gvt/scheduler.h
··· 113 113 struct drm_i915_gem_object *obj; 114 114 void *va; 115 115 unsigned long len; 116 - void *bb_start_cmd_va; 116 + u32 *bb_start_cmd_va; 117 117 }; 118 118 119 119 #define workload_q_head(vgpu, ring_id) \
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 2378 2378 2379 2379 assert_forcewakes_inactive(dev_priv); 2380 2380 2381 - if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2381 + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2382 2382 intel_hpd_poll_init(dev_priv); 2383 2383 2384 2384 DRM_DEBUG_KMS("Device suspended\n");
+5
drivers/gpu/drm/i915/i915_drv.h
··· 1977 1977 1978 1978 struct i915_frontbuffer_tracking fb_tracking; 1979 1979 1980 + struct intel_atomic_helper { 1981 + struct llist_head free_list; 1982 + struct work_struct free_work; 1983 + } atomic_helper; 1984 + 1980 1985 u16 orig_clock; 1981 1986 1982 1987 bool mchbar_need_disable;
+1
drivers/gpu/drm/i915/i915_vma.c
··· 185 185 return ret; 186 186 } 187 187 188 + trace_i915_vma_bind(vma, bind_flags); 188 189 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 189 190 if (ret) 190 191 return ret;
+5 -4
drivers/gpu/drm/i915/intel_crt.c
··· 499 499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); 500 500 struct edid *edid; 501 501 struct i2c_adapter *i2c; 502 + bool ret = false; 502 503 503 504 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 504 505 ··· 516 515 */ 517 516 if (!is_digital) { 518 517 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 519 - return true; 518 + ret = true; 519 + } else { 520 + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); 520 521 } 521 - 522 - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); 523 522 } else { 524 523 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); 525 524 } 526 525 527 526 kfree(edid); 528 527 529 - return false; 528 + return ret; 530 529 } 531 530 532 531 static enum drm_connector_status
+38 -5
drivers/gpu/drm/i915/intel_display.c
··· 2585 2585 * We only keep the x/y offsets, so push all of the 2586 2586 * gtt offset into the x/y offsets. 2587 2587 */ 2588 - _intel_adjust_tile_offset(&x, &y, tile_size, 2589 - tile_width, tile_height, pitch_tiles, 2588 + _intel_adjust_tile_offset(&x, &y, 2589 + tile_width, tile_height, 2590 + tile_size, pitch_tiles, 2590 2591 gtt_offset_rotated * tile_size, 0); 2591 2592 2592 2593 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; ··· 6850 6849 } 6851 6850 6852 6851 state = drm_atomic_state_alloc(crtc->dev); 6852 + if (!state) { 6853 + DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 6854 + crtc->base.id, crtc->name); 6855 + return; 6856 + } 6857 + 6853 6858 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 6854 6859 6855 6860 /* Everything's already locked, -EDEADLK can't happen. */ ··· 11253 11246 } 11254 11247 11255 11248 old->restore_state = restore_state; 11249 + drm_atomic_state_put(state); 11256 11250 11257 11251 /* let the connector get through one full cycle before testing */ 11258 11252 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); ··· 14523 14515 break; 14524 14516 14525 14517 case FENCE_FREE: 14526 - drm_atomic_state_put(&state->base); 14527 - break; 14518 + { 14519 + struct intel_atomic_helper *helper = 14520 + &to_i915(state->base.dev)->atomic_helper; 14521 + 14522 + if (llist_add(&state->freed, &helper->free_list)) 14523 + schedule_work(&helper->free_work); 14524 + break; 14525 + } 14528 14526 } 14529 14527 14530 14528 return NOTIFY_DONE; ··· 16409 16395 drm_modeset_acquire_fini(&ctx); 16410 16396 } 16411 16397 16398 + static void intel_atomic_helper_free_state(struct work_struct *work) 16399 + { 16400 + struct drm_i915_private *dev_priv = 16401 + container_of(work, typeof(*dev_priv), atomic_helper.free_work); 16402 + struct intel_atomic_state *state, *next; 16403 + struct llist_node *freed; 16404 + 16405 + freed = llist_del_all(&dev_priv->atomic_helper.free_list); 16406 + llist_for_each_entry_safe(state, next, freed, freed) 16407 + drm_atomic_state_put(&state->base); 16408 + } 16409 + 16412 16410 int intel_modeset_init(struct drm_device *dev) 16413 16411 { 16414 16412 struct drm_i915_private *dev_priv = to_i915(dev); ··· 16439 16413 dev->mode_config.allow_fb_modifiers = true; 16440 16414 16441 16415 dev->mode_config.funcs = &intel_mode_funcs; 16416 + 16417 + INIT_WORK(&dev_priv->atomic_helper.free_work, 16418 + intel_atomic_helper_free_state); 16442 16419 16443 16420 intel_init_quirks(dev); 16444 16421 ··· 17056 17027 17057 17028 if (ret) 17058 17029 DRM_ERROR("Restoring old state failed with %i\n", ret); 17059 - drm_atomic_state_put(state); 17030 + if (state) 17031 + drm_atomic_state_put(state); 17060 17032 } 17061 17033 17062 17034 void intel_modeset_gem_init(struct drm_device *dev) ··· 17126 17096 void intel_modeset_cleanup(struct drm_device *dev) 17127 17097 { 17128 17098 struct drm_i915_private *dev_priv = to_i915(dev); 17099 + 17100 + flush_work(&dev_priv->atomic_helper.free_work); 17101 + WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 17129 17102 17130 17103 intel_disable_gt_powersave(dev_priv); 17131 17104
+2
drivers/gpu/drm/i915/intel_drv.h
··· 370 370 struct skl_wm_values wm_results; 371 371 372 372 struct i915_sw_fence commit_ready; 373 + 374 + struct llist_node freed; 373 375 }; 374 376 375 377 struct intel_plane_state {
+3
drivers/gpu/drm/i915/intel_fbdev.c
··· 742 742 { 743 743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 744 744 745 + if (!ifbdev) 746 + return; 747 + 745 748 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); 746 749 } 747 750
+2 -1
drivers/gpu/drm/nouveau/nouveau_display.c
··· 411 411 return ret; 412 412 413 413 /* enable polling for external displays */ 414 - drm_kms_helper_poll_enable(dev); 414 + if (!dev->mode_config.poll_enabled) 415 + drm_kms_helper_poll_enable(dev); 415 416 416 417 /* enable hotplug interrupts */ 417 418 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+4 -1
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 773 773 pci_set_master(pdev); 774 774 775 775 ret = nouveau_do_resume(drm_dev, true); 776 - drm_kms_helper_poll_enable(drm_dev); 776 + 777 + if (!drm_dev->mode_config.poll_enabled) 778 + drm_kms_helper_poll_enable(drm_dev); 779 + 777 780 /* do magic */ 778 781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 779 782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+2
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 165 165 struct backlight_device *backlight; 166 166 struct list_head bl_connectors; 167 167 struct work_struct hpd_work; 168 + struct work_struct fbcon_work; 169 + int fbcon_new_state; 168 170 #ifdef CONFIG_ACPI 169 171 struct notifier_block acpi_nb; 170 172 #endif
+34 -9
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 470 470 .fb_probe = nouveau_fbcon_create, 471 471 }; 472 472 473 + static void 474 + nouveau_fbcon_set_suspend_work(struct work_struct *work) 475 + { 476 + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); 477 + int state = READ_ONCE(drm->fbcon_new_state); 478 + 479 + if (state == FBINFO_STATE_RUNNING) 480 + pm_runtime_get_sync(drm->dev->dev); 481 + 482 + console_lock(); 483 + if (state == FBINFO_STATE_RUNNING) 484 + nouveau_fbcon_accel_restore(drm->dev); 485 + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 486 + if (state != FBINFO_STATE_RUNNING) 487 + nouveau_fbcon_accel_save_disable(drm->dev); 488 + console_unlock(); 489 + 490 + if (state == FBINFO_STATE_RUNNING) { 491 + pm_runtime_mark_last_busy(drm->dev->dev); 492 + pm_runtime_put_sync(drm->dev->dev); 493 + } 494 + } 495 + 473 496 void 474 497 nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 475 498 { 476 499 struct nouveau_drm *drm = nouveau_drm(dev); 477 - if (drm->fbcon) { 478 - console_lock(); 479 - if (state == FBINFO_STATE_RUNNING) 480 - nouveau_fbcon_accel_restore(dev); 481 - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 482 - if (state != FBINFO_STATE_RUNNING) 483 - nouveau_fbcon_accel_save_disable(dev); 484 - console_unlock(); 485 - } 500 + 501 + if (!drm->fbcon) 502 + return; 503 + 504 + drm->fbcon_new_state = state; 505 + /* Since runtime resume can happen as a result of a sysfs operation, 506 + * it's possible we already have the console locked. So handle fbcon 507 + * init/deinit from a seperate work thread 508 + */ 509 + schedule_work(&drm->fbcon_work); 486 510 } 487 511 488 512 int ··· 526 502 return -ENOMEM; 527 503 528 504 drm->fbcon = fbcon; 505 + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); 529 506 530 507 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 531 508
+3 -4
drivers/gpu/drm/radeon/radeon_drv.c
··· 366 366 radeon_pci_shutdown(struct pci_dev *pdev) 367 367 { 368 368 /* if we are running in a VM, make sure the device 369 - * torn down properly on reboot/shutdown. 370 - * unfortunately we can't detect certain 371 - * hypervisors so just do this all the time. 369 + * torn down properly on reboot/shutdown 372 370 */ 373 - radeon_pci_remove(pdev); 371 + if (radeon_device_is_virtual()) 372 + radeon_pci_remove(pdev); 374 373 } 375 374 376 375 static int radeon_pmops_suspend(struct device *dev)
+1 -1
drivers/gpu/drm/vc4/vc4_crtc.c
··· 839 839 840 840 } 841 841 842 - __drm_atomic_helper_crtc_destroy_state(state); 842 + drm_atomic_helper_crtc_destroy_state(crtc, state); 843 843 } 844 844 845 845 static const struct drm_crtc_funcs vc4_crtc_funcs = {
+3 -1
drivers/gpu/drm/vc4/vc4_gem.c
··· 594 594 args->shader_rec_count); 595 595 struct vc4_bo *bo; 596 596 597 - if (uniforms_offset < shader_rec_offset || 597 + if (shader_rec_offset < args->bin_cl_size || 598 + uniforms_offset < shader_rec_offset || 598 599 exec_size < uniforms_offset || 599 600 args->shader_rec_count >= (UINT_MAX / 600 601 sizeof(struct vc4_shader_state)) || 601 602 temp_size < exec_size) { 602 603 DRM_ERROR("overflow in exec arguments\n"); 604 + ret = -EINVAL; 603 605 goto fail; 604 606 } 605 607
+1 -1
drivers/gpu/drm/vc4/vc4_render_cl.c
··· 461 461 } 462 462 463 463 ret = vc4_full_res_bounds_check(exec, *obj, surf); 464 - if (!ret) 464 + if (ret) 465 465 return ret; 466 466 467 467 return 0;
+1 -1
include/drm/drm_atomic.h
··· 144 144 struct drm_crtc *ptr; 145 145 struct drm_crtc_state *state; 146 146 struct drm_crtc_commit *commit; 147 - s64 __user *out_fence_ptr; 147 + s32 __user *out_fence_ptr; 148 148 }; 149 149 150 150 struct __drm_connnectors_state {
+1 -1
include/drm/drm_mode_config.h
··· 488 488 /** 489 489 * @prop_out_fence_ptr: Sync File fd pointer representing the 490 490 * outgoing fences for a CRTC. Userspace should provide a pointer to a 491 - * value of type s64, and then cast that pointer to u64. 491 + * value of type s32, and then cast that pointer to u64. 492 492 */ 493 493 struct drm_property *prop_out_fence_ptr; 494 494 /**