Merge tag 'drm-fixes-for-v4.10-rc6-part-two' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"This is the main request for rc6, since really the one earlier was the
rc5 one :-)

The main thing are the nouveau specific race fixes for the connector
locking bug we fixed in -next and reverted here as it has quite large
prereqs. These two fixes should solve the problem at that level and we
can fix it properly in 4.11

Otherwise i915 has a bunch of changes, one ABI change for GVT related
stuff, some VC4 leak fixes, one core fence fix and some AMD changes,
oh and one ast hang avoidance fix.

Hoping it calms down around now"

* tag 'drm-fixes-for-v4.10-rc6-part-two' of git://people.freedesktop.org/~airlied/linux: (25 commits)
drm/nouveau: Handle fbcon suspend/resume in seperate worker
drm/nouveau: Don't enabling polling twice on runtime resume
drm/ast: Fixed system hanged if disable P2A
Revert "drm/radeon: always apply pci shutdown callbacks"
drm/i915: reinstate call to trace_i915_vma_bind
drm/i915: Move atomic state free from out of fence release
drm/i915: Check for NULL atomic state in intel_crtc_disable_noatomic()
drm/i915: Fix calculation of rotated x and y offsets for planar formats
drm/i915: Don't init hpd polling for vlv and chv from runtime_suspend()
drm/i915: Don't leak edid in intel_crt_detect_ddc()
drm/i915: Release temporary load-detect state upon switching
drm/i915: prevent crash with .disable_display parameter
drm/i915: Avoid drm_atomic_state_put(NULL) in intel_display_resume
MAINTAINERS: update new mail list for intel gvt driver
drm/i915/gvt: Fix kmem_cache_create() name
drm/i915/gvt/kvmgt: mdev ABI is available_instances, not available_instance
drm/amdgpu: fix unload driver issue for virtual display
drm/amdgpu: check ring being ready before using
drm/vc4: Return -EINVAL on the overflow checks failing.
drm/vc4: Fix an integer overflow in temporary allocation layout.
...

+243 -176
+1 -1
MAINTAINERS
··· 4153 INTEL GVT-g DRIVERS (Intel GPU Virtualization) 4154 M: Zhenyu Wang <zhenyuw@linux.intel.com> 4155 M: Zhi Wang <zhi.a.wang@intel.com> 4156 - L: igvt-g-dev@lists.01.org 4157 L: intel-gfx@lists.freedesktop.org 4158 W: https://01.org/igvt-g 4159 T: git https://github.com/01org/gvt-linux.git
··· 4153 INTEL GVT-g DRIVERS (Intel GPU Virtualization) 4154 M: Zhenyu Wang <zhenyuw@linux.intel.com> 4155 M: Zhi Wang <zhi.a.wang@intel.com> 4156 + L: intel-gvt-dev@lists.freedesktop.org 4157 L: intel-gfx@lists.freedesktop.org 4158 W: https://01.org/igvt-g 4159 T: git https://github.com/01org/gvt-linux.git
+7
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 83 } 84 break; 85 } 86 return 0; 87 } 88
··· 83 } 84 break; 85 } 86 + 87 + if (!(*out_ring && (*out_ring)->adev)) { 88 + DRM_ERROR("Ring %d is not initialized on IP %d\n", 89 + ring, ip_type); 90 + return -EINVAL; 91 + } 92 + 93 return 0; 94 } 95
+1 -4
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 627 628 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) 629 { 630 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 631 - 632 - kfree(amdgpu_encoder->enc_priv); 633 drm_encoder_cleanup(encoder); 634 - kfree(amdgpu_encoder); 635 } 636 637 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
··· 627 628 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) 629 { 630 drm_encoder_cleanup(encoder); 631 + kfree(encoder); 632 } 633 634 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+1
drivers/gpu/drm/ast/ast_drv.h
··· 113 struct ttm_bo_kmap_obj cache_kmap; 114 int next_cursor; 115 bool support_wide_screen; 116 117 enum ast_tx_chip tx_chip_type; 118 u8 dp501_maxclk;
··· 113 struct ttm_bo_kmap_obj cache_kmap; 114 int next_cursor; 115 bool support_wide_screen; 116 + bool DisableP2A; 117 118 enum ast_tx_chip tx_chip_type; 119 u8 dp501_maxclk;
+83 -74
drivers/gpu/drm/ast/ast_main.c
··· 124 } else 125 *need_post = false; 126 127 /* Check if we support wide screen */ 128 switch (ast->chip) { 129 case AST1180: ··· 146 ast->support_wide_screen = true; 147 else { 148 ast->support_wide_screen = false; 149 - /* Read SCU7c (silicon revision register) */ 150 - ast_write32(ast, 0xf004, 0x1e6e0000); 151 - ast_write32(ast, 0xf000, 0x1); 152 - data = ast_read32(ast, 0x1207c); 153 - data &= 0x300; 154 - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ 155 - ast->support_wide_screen = true; 156 - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ 157 - ast->support_wide_screen = true; 158 } 159 break; 160 } ··· 224 uint32_t data, data2; 225 uint32_t denum, num, div, ref_pll; 226 227 - ast_write32(ast, 0xf004, 0x1e6e0000); 228 - ast_write32(ast, 0xf000, 0x1); 229 - 230 - 231 - ast_write32(ast, 0x10000, 0xfc600309); 232 - 233 - do { 234 - if (pci_channel_offline(dev->pdev)) 235 - return -EIO; 236 - } while (ast_read32(ast, 0x10000) != 0x01); 237 - data = ast_read32(ast, 0x10004); 238 - 239 - if (data & 0x40) 240 ast->dram_bus_width = 16; 241 else 242 - ast->dram_bus_width = 32; 243 244 - if (ast->chip == AST2300 || ast->chip == AST2400) { 245 - switch (data & 0x03) { 246 - case 0: 247 - ast->dram_type = AST_DRAM_512Mx16; 248 - break; 249 - default: 250 - case 1: 251 - ast->dram_type = AST_DRAM_1Gx16; 252 break; 253 case 2: 254 - ast->dram_type = AST_DRAM_2Gx16; 255 break; 256 - case 3: 257 - ast->dram_type = AST_DRAM_4Gx16; 258 - break; 259 - } 260 - } else { 261 - switch (data & 0x0c) { 262 - case 0: 263 - case 4: 264 - ast->dram_type = AST_DRAM_512Mx16; 265 - break; 266 - case 8: 267 - if (data & 0x40) 268 - ast->dram_type = AST_DRAM_1Gx16; 269 - else 270 - ast->dram_type = AST_DRAM_512Mx32; 271 - break; 272 - case 0xc: 273 - ast->dram_type = AST_DRAM_1Gx32; 274 break; 275 } 276 } 277 - 278 - data = ast_read32(ast, 0x10120); 279 - data2 = ast_read32(ast, 0x10170); 280 - if (data2 & 0x2000) 281 - ref_pll = 14318; 282 - else 283 - ref_pll = 12000; 284 - 285 - denum = data & 0x1f; 286 - num = (data & 0x3fe0) >> 5; 287 - data = (data & 0xc000) >> 14; 288 - switch (data) { 289 - case 3: 290 - div = 0x4; 291 - break; 292 - case 2: 293 - case 1: 294 - div = 0x2; 295 - break; 296 - default: 297 - div = 0x1; 298 - break; 299 - } 300 - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); 301 return 0; 302 } 303
··· 124 } else 125 *need_post = false; 126 127 + /* Check P2A Access */ 128 + ast->DisableP2A = true; 129 + data = ast_read32(ast, 0xf004); 130 + if (data != 0xFFFFFFFF) 131 + ast->DisableP2A = false; 132 + 133 /* Check if we support wide screen */ 134 switch (ast->chip) { 135 case AST1180: ··· 140 ast->support_wide_screen = true; 141 else { 142 ast->support_wide_screen = false; 143 + if (ast->DisableP2A == false) { 144 + /* Read SCU7c (silicon revision register) */ 145 + ast_write32(ast, 0xf004, 0x1e6e0000); 146 + ast_write32(ast, 0xf000, 0x1); 147 + data = ast_read32(ast, 0x1207c); 148 + data &= 0x300; 149 + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ 150 + ast->support_wide_screen = true; 151 + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ 152 + ast->support_wide_screen = true; 153 + } 154 } 155 break; 156 } ··· 216 uint32_t data, data2; 217 uint32_t denum, num, div, ref_pll; 218 219 + if (ast->DisableP2A) 220 + { 221 ast->dram_bus_width = 16; 222 + ast->dram_type = AST_DRAM_1Gx16; 223 + ast->mclk = 396; 224 + } 225 else 226 + { 227 + ast_write32(ast, 0xf004, 0x1e6e0000); 228 + ast_write32(ast, 0xf000, 0x1); 229 + data = ast_read32(ast, 0x10004); 230 231 + if (data & 0x40) 232 + ast->dram_bus_width = 16; 233 + else 234 + ast->dram_bus_width = 32; 235 + 236 + if (ast->chip == AST2300 || ast->chip == AST2400) { 237 + switch (data & 0x03) { 238 + case 0: 239 + ast->dram_type = AST_DRAM_512Mx16; 240 + break; 241 + default: 242 + case 1: 243 + ast->dram_type = AST_DRAM_1Gx16; 244 + break; 245 + case 2: 246 + ast->dram_type = AST_DRAM_2Gx16; 247 + break; 248 + case 3: 249 + ast->dram_type = AST_DRAM_4Gx16; 250 + break; 251 + } 252 + } else { 253 + switch (data & 0x0c) { 254 + case 0: 255 + case 4: 256 + ast->dram_type = AST_DRAM_512Mx16; 257 + break; 258 + case 8: 259 + if (data & 0x40) 260 + ast->dram_type = AST_DRAM_1Gx16; 261 + else 262 + ast->dram_type = AST_DRAM_512Mx32; 263 + break; 264 + case 0xc: 265 + ast->dram_type = AST_DRAM_1Gx32; 266 + break; 267 + } 268 + } 269 + 270 + data = ast_read32(ast, 0x10120); 271 + data2 = ast_read32(ast, 0x10170); 272 + if (data2 & 0x2000) 273 + ref_pll = 14318; 274 + else 275 + ref_pll = 12000; 276 + 277 + denum = data & 0x1f; 278 + num = (data & 0x3fe0) >> 5; 279 + data = (data & 0xc000) >> 14; 280 + switch (data) { 281 + case 3: 282 + div = 0x4; 283 break; 284 case 2: 285 + case 1: 286 + div = 0x2; 287 break; 288 + default: 289 + div = 0x1; 290 break; 291 } 292 + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); 293 } 294 return 0; 295 } 296
+13 -5
drivers/gpu/drm/ast/ast_post.c
··· 379 ast_open_key(ast); 380 ast_set_def_ext_reg(dev); 381 382 - if (ast->chip == AST2300 || ast->chip == AST2400) 383 - ast_init_dram_2300(dev); 384 - else 385 - ast_init_dram_reg(dev); 386 387 - ast_init_3rdtx(dev); 388 } 389 390 /* AST 2300 DRAM settings */
··· 379 ast_open_key(ast); 380 ast_set_def_ext_reg(dev); 381 382 + if (ast->DisableP2A == false) 383 + { 384 + if (ast->chip == AST2300 || ast->chip == AST2400) 385 + ast_init_dram_2300(dev); 386 + else 387 + ast_init_dram_reg(dev); 388 389 + ast_init_3rdtx(dev); 390 + } 391 + else 392 + { 393 + if (ast->tx_chip_type != AST_TX_NONE) 394 + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ 395 + } 396 } 397 398 /* AST 2300 DRAM settings */
+6 -6
drivers/gpu/drm/drm_atomic.c
··· 291 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 292 293 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 294 - struct drm_crtc *crtc, s64 __user *fence_ptr) 295 { 296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 297 } 298 299 - static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 300 struct drm_crtc *crtc) 301 { 302 - s64 __user *fence_ptr; 303 304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; ··· 512 state->color_mgmt_changed |= replaced; 513 return ret; 514 } else if (property == config->prop_out_fence_ptr) { 515 - s64 __user *fence_ptr = u64_to_user_ptr(val); 516 517 if (!fence_ptr) 518 return 0; ··· 1915 */ 1916 1917 struct drm_out_fence_state { 1918 - s64 __user *out_fence_ptr; 1919 struct sync_file *sync_file; 1920 int fd; 1921 }; ··· 1952 return 0; 1953 1954 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1955 - u64 __user *fence_ptr; 1956 1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 1958
··· 291 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 292 293 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 294 + struct drm_crtc *crtc, s32 __user *fence_ptr) 295 { 296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 297 } 298 299 + static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 300 struct drm_crtc *crtc) 301 { 302 + s32 __user *fence_ptr; 303 304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; ··· 512 state->color_mgmt_changed |= replaced; 513 return ret; 514 } else if (property == config->prop_out_fence_ptr) { 515 + s32 __user *fence_ptr = u64_to_user_ptr(val); 516 517 if (!fence_ptr) 518 return 0; ··· 1915 */ 1916 1917 struct drm_out_fence_state { 1918 + s32 __user *out_fence_ptr; 1919 struct sync_file *sync_file; 1920 int fd; 1921 }; ··· 1952 return 0; 1953 1954 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1955 + s32 __user *fence_ptr; 1956 1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 1958
-4
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 482 483 static unsigned long bypass_scan_mask = 0; 484 - static bool bypass_batch_buffer_scan = true; 485 486 /* ring ALL, type = 0 */ 487 static struct sub_op_bits sub_op_mi[] = { ··· 1523 static int batch_buffer_needs_scan(struct parser_exec_state *s) 1524 { 1525 struct intel_gvt *gvt = s->vgpu->gvt; 1526 - 1527 - if (bypass_batch_buffer_scan) 1528 - return 0; 1529 1530 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1531 /* BDW decides privilege based on address space */
··· 481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 482 483 static unsigned long bypass_scan_mask = 0; 484 485 /* ring ALL, type = 0 */ 486 static struct sub_op_bits sub_op_mi[] = { ··· 1524 static int batch_buffer_needs_scan(struct parser_exec_state *s) 1525 { 1526 struct intel_gvt *gvt = s->vgpu->gvt; 1527 1528 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1529 /* BDW decides privilege based on address space */
+19 -47
drivers/gpu/drm/i915/gvt/execlist.c
··· 364 #define get_desc_from_elsp_dwords(ed, i) \ 365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 366 367 - 368 - #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) 369 - #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) 370 - static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, 371 - unsigned long add, int gmadr_bytes) 372 - { 373 - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) 374 - return -1; 375 - 376 - *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & 377 - BATCH_BUFFER_ADDR_MASK; 378 - if (gmadr_bytes == 8) { 379 - *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = 380 - add & BATCH_BUFFER_ADDR_HIGH_MASK; 381 - } 382 - 383 - return 0; 384 - } 385 - 386 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 387 { 388 - int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 389 390 /* pin the gem object to ggtt */ 391 - if (!list_empty(&workload->shadow_bb)) { 392 - struct intel_shadow_bb_entry *entry_obj = 393 - list_first_entry(&workload->shadow_bb, 394 - struct intel_shadow_bb_entry, 395 - list); 396 - struct intel_shadow_bb_entry *temp; 397 398 - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, 399 - list) { 400 - struct i915_vma *vma; 401 - 402 - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 403 - 4, 0); 404 - if (IS_ERR(vma)) { 405 - gvt_err("Cannot pin\n"); 406 - return; 407 - } 408 - 409 - /* FIXME: we are not tracking our pinned VMA leaving it 410 - * up to the core to fix up the stray pin_count upon 411 - * free. 412 - */ 413 - 414 - /* update the relocate gma with shadow batch buffer*/ 415 - set_gma_to_bb_cmd(entry_obj, 416 - i915_ggtt_offset(vma), 417 - gmadr_bytes); 418 } 419 } 420 } 421 ··· 798 INIT_LIST_HEAD(&vgpu->workload_q_head[i]); 799 } 800 801 - vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", 802 sizeof(struct intel_vgpu_workload), 0, 803 SLAB_HWCACHE_ALIGN, 804 NULL);
··· 364 #define get_desc_from_elsp_dwords(ed, i) \ 365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 366 367 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 368 { 369 + const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 370 + struct intel_shadow_bb_entry *entry_obj; 371 372 /* pin the gem object to ggtt */ 373 + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { 374 + struct i915_vma *vma; 375 376 + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 377 + if (IS_ERR(vma)) { 378 + gvt_err("Cannot pin\n"); 379 + return; 380 } 381 + 382 + /* FIXME: we are not tracking our pinned VMA leaving it 383 + * up to the core to fix up the stray pin_count upon 384 + * free. 385 + */ 386 + 387 + /* update the relocate gma with shadow batch buffer*/ 388 + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); 389 + if (gmadr_bytes == 8) 390 + entry_obj->bb_start_cmd_va[2] = 0; 391 } 392 } 393 ··· 826 INIT_LIST_HEAD(&vgpu->workload_q_head[i]); 827 } 828 829 + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", 830 sizeof(struct intel_vgpu_workload), 0, 831 SLAB_HWCACHE_ALIGN, 832 NULL);
+4 -4
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 230 return NULL; 231 } 232 233 - static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, 234 - char *buf) 235 { 236 struct intel_vgpu_type *type; 237 unsigned int num = 0; ··· 269 type->fence); 270 } 271 272 - static MDEV_TYPE_ATTR_RO(available_instance); 273 static MDEV_TYPE_ATTR_RO(device_api); 274 static MDEV_TYPE_ATTR_RO(description); 275 276 static struct attribute *type_attrs[] = { 277 - &mdev_type_attr_available_instance.attr, 278 &mdev_type_attr_device_api.attr, 279 &mdev_type_attr_description.attr, 280 NULL,
··· 230 return NULL; 231 } 232 233 + static ssize_t available_instances_show(struct kobject *kobj, 234 + struct device *dev, char *buf) 235 { 236 struct intel_vgpu_type *type; 237 unsigned int num = 0; ··· 269 type->fence); 270 } 271 272 + static MDEV_TYPE_ATTR_RO(available_instances); 273 static MDEV_TYPE_ATTR_RO(device_api); 274 static MDEV_TYPE_ATTR_RO(description); 275 276 static struct attribute *type_attrs[] = { 277 + &mdev_type_attr_available_instances.attr, 278 &mdev_type_attr_device_api.attr, 279 &mdev_type_attr_description.attr, 280 NULL,
+1 -1
drivers/gpu/drm/i915/gvt/scheduler.h
··· 113 struct drm_i915_gem_object *obj; 114 void *va; 115 unsigned long len; 116 - void *bb_start_cmd_va; 117 }; 118 119 #define workload_q_head(vgpu, ring_id) \
··· 113 struct drm_i915_gem_object *obj; 114 void *va; 115 unsigned long len; 116 + u32 *bb_start_cmd_va; 117 }; 118 119 #define workload_q_head(vgpu, ring_id) \
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 2378 2379 assert_forcewakes_inactive(dev_priv); 2380 2381 - if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2382 intel_hpd_poll_init(dev_priv); 2383 2384 DRM_DEBUG_KMS("Device suspended\n");
··· 2378 2379 assert_forcewakes_inactive(dev_priv); 2380 2381 + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2382 intel_hpd_poll_init(dev_priv); 2383 2384 DRM_DEBUG_KMS("Device suspended\n");
+5
drivers/gpu/drm/i915/i915_drv.h
··· 1977 1978 struct i915_frontbuffer_tracking fb_tracking; 1979 1980 u16 orig_clock; 1981 1982 bool mchbar_need_disable;
··· 1977 1978 struct i915_frontbuffer_tracking fb_tracking; 1979 1980 + struct intel_atomic_helper { 1981 + struct llist_head free_list; 1982 + struct work_struct free_work; 1983 + } atomic_helper; 1984 + 1985 u16 orig_clock; 1986 1987 bool mchbar_need_disable;
+1
drivers/gpu/drm/i915/i915_vma.c
··· 185 return ret; 186 } 187 188 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 189 if (ret) 190 return ret;
··· 185 return ret; 186 } 187 188 + trace_i915_vma_bind(vma, bind_flags); 189 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 190 if (ret) 191 return ret;
+5 -4
drivers/gpu/drm/i915/intel_crt.c
··· 499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); 500 struct edid *edid; 501 struct i2c_adapter *i2c; 502 503 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 504 ··· 516 */ 517 if (!is_digital) { 518 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 519 - return true; 520 } 521 - 522 - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); 523 } else { 524 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); 525 } 526 527 kfree(edid); 528 529 - return false; 530 } 531 532 static enum drm_connector_status
··· 499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); 500 struct edid *edid; 501 struct i2c_adapter *i2c; 502 + bool ret = false; 503 504 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 505 ··· 515 */ 516 if (!is_digital) { 517 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 518 + ret = true; 519 + } else { 520 + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); 521 } 522 } else { 523 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); 524 } 525 526 kfree(edid); 527 528 + return ret; 529 } 530 531 static enum drm_connector_status
+38 -5
drivers/gpu/drm/i915/intel_display.c
··· 2585 * We only keep the x/y offsets, so push all of the 2586 * gtt offset into the x/y offsets. 2587 */ 2588 - _intel_adjust_tile_offset(&x, &y, tile_size, 2589 - tile_width, tile_height, pitch_tiles, 2590 gtt_offset_rotated * tile_size, 0); 2591 2592 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; ··· 6850 } 6851 6852 state = drm_atomic_state_alloc(crtc->dev); 6853 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 6854 6855 /* Everything's already locked, -EDEADLK can't happen. */ ··· 11253 } 11254 11255 old->restore_state = restore_state; 11256 11257 /* let the connector get through one full cycle before testing */ 11258 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); ··· 14523 break; 14524 14525 case FENCE_FREE: 14526 - drm_atomic_state_put(&state->base); 14527 - break; 14528 } 14529 14530 return NOTIFY_DONE; ··· 16409 drm_modeset_acquire_fini(&ctx); 16410 } 16411 16412 int intel_modeset_init(struct drm_device *dev) 16413 { 16414 struct drm_i915_private *dev_priv = to_i915(dev); ··· 16439 dev->mode_config.allow_fb_modifiers = true; 16440 16441 dev->mode_config.funcs = &intel_mode_funcs; 16442 16443 intel_init_quirks(dev); 16444 ··· 17056 17057 if (ret) 17058 DRM_ERROR("Restoring old state failed with %i\n", ret); 17059 - drm_atomic_state_put(state); 17060 } 17061 17062 void intel_modeset_gem_init(struct drm_device *dev) ··· 17126 void intel_modeset_cleanup(struct drm_device *dev) 17127 { 17128 struct drm_i915_private *dev_priv = to_i915(dev); 17129 17130 intel_disable_gt_powersave(dev_priv); 17131
··· 2585 * We only keep the x/y offsets, so push all of the 2586 * gtt offset into the x/y offsets. 2587 */ 2588 + _intel_adjust_tile_offset(&x, &y, 2589 + tile_width, tile_height, 2590 + tile_size, pitch_tiles, 2591 gtt_offset_rotated * tile_size, 0); 2592 2593 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; ··· 6849 } 6850 6851 state = drm_atomic_state_alloc(crtc->dev); 6852 + if (!state) { 6853 + DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 6854 + crtc->base.id, crtc->name); 6855 + return; 6856 + } 6857 + 6858 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 6859 6860 /* Everything's already locked, -EDEADLK can't happen. */ ··· 11246 } 11247 11248 old->restore_state = restore_state; 11249 + drm_atomic_state_put(state); 11250 11251 /* let the connector get through one full cycle before testing */ 11252 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); ··· 14515 break; 14516 14517 case FENCE_FREE: 14518 + { 14519 + struct intel_atomic_helper *helper = 14520 + &to_i915(state->base.dev)->atomic_helper; 14521 + 14522 + if (llist_add(&state->freed, &helper->free_list)) 14523 + schedule_work(&helper->free_work); 14524 + break; 14525 + } 14526 } 14527 14528 return NOTIFY_DONE; ··· 16395 drm_modeset_acquire_fini(&ctx); 16396 } 16397 16398 + static void intel_atomic_helper_free_state(struct work_struct *work) 16399 + { 16400 + struct drm_i915_private *dev_priv = 16401 + container_of(work, typeof(*dev_priv), atomic_helper.free_work); 16402 + struct intel_atomic_state *state, *next; 16403 + struct llist_node *freed; 16404 + 16405 + freed = llist_del_all(&dev_priv->atomic_helper.free_list); 16406 + llist_for_each_entry_safe(state, next, freed, freed) 16407 + drm_atomic_state_put(&state->base); 16408 + } 16409 + 16410 int intel_modeset_init(struct drm_device *dev) 16411 { 16412 struct drm_i915_private *dev_priv = to_i915(dev); ··· 16413 dev->mode_config.allow_fb_modifiers = true; 16414 16415 dev->mode_config.funcs = &intel_mode_funcs; 16416 + 16417 + INIT_WORK(&dev_priv->atomic_helper.free_work, 16418 + intel_atomic_helper_free_state); 16419 16420 intel_init_quirks(dev); 16421 ··· 17027 17028 if (ret) 17029 DRM_ERROR("Restoring old state failed with %i\n", ret); 17030 + if (state) 17031 + drm_atomic_state_put(state); 17032 } 17033 17034 void intel_modeset_gem_init(struct drm_device *dev) ··· 17096 void intel_modeset_cleanup(struct drm_device *dev) 17097 { 17098 struct drm_i915_private *dev_priv = to_i915(dev); 17099 + 17100 + flush_work(&dev_priv->atomic_helper.free_work); 17101 + WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 17102 17103 intel_disable_gt_powersave(dev_priv); 17104
+2
drivers/gpu/drm/i915/intel_drv.h
··· 370 struct skl_wm_values wm_results; 371 372 struct i915_sw_fence commit_ready; 373 }; 374 375 struct intel_plane_state {
··· 370 struct skl_wm_values wm_results; 371 372 struct i915_sw_fence commit_ready; 373 + 374 + struct llist_node freed; 375 }; 376 377 struct intel_plane_state {
+3
drivers/gpu/drm/i915/intel_fbdev.c
··· 742 { 743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 744 745 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); 746 } 747
··· 742 { 743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 744 745 + if (!ifbdev) 746 + return; 747 + 748 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); 749 } 750
+2 -1
drivers/gpu/drm/nouveau/nouveau_display.c
··· 411 return ret; 412 413 /* enable polling for external displays */ 414 - drm_kms_helper_poll_enable(dev); 415 416 /* enable hotplug interrupts */ 417 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
··· 411 return ret; 412 413 /* enable polling for external displays */ 414 + if (!dev->mode_config.poll_enabled) 415 + drm_kms_helper_poll_enable(dev); 416 417 /* enable hotplug interrupts */ 418 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+4 -1
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 773 pci_set_master(pdev); 774 775 ret = nouveau_do_resume(drm_dev, true); 776 - drm_kms_helper_poll_enable(drm_dev); 777 /* do magic */ 778 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 779 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
··· 773 pci_set_master(pdev); 774 775 ret = nouveau_do_resume(drm_dev, true); 776 + 777 + if (!drm_dev->mode_config.poll_enabled) 778 + drm_kms_helper_poll_enable(drm_dev); 779 + 780 /* do magic */ 781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+2
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 165 struct backlight_device *backlight; 166 struct list_head bl_connectors; 167 struct work_struct hpd_work; 168 #ifdef CONFIG_ACPI 169 struct notifier_block acpi_nb; 170 #endif
··· 165 struct backlight_device *backlight; 166 struct list_head bl_connectors; 167 struct work_struct hpd_work; 168 + struct work_struct fbcon_work; 169 + int fbcon_new_state; 170 #ifdef CONFIG_ACPI 171 struct notifier_block acpi_nb; 172 #endif
+34 -9
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 470 .fb_probe = nouveau_fbcon_create, 471 }; 472 473 void 474 nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 475 { 476 struct nouveau_drm *drm = nouveau_drm(dev); 477 - if (drm->fbcon) { 478 - console_lock(); 479 - if (state == FBINFO_STATE_RUNNING) 480 - nouveau_fbcon_accel_restore(dev); 481 - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 482 - if (state != FBINFO_STATE_RUNNING) 483 - nouveau_fbcon_accel_save_disable(dev); 484 - console_unlock(); 485 - } 486 } 487 488 int ··· 526 return -ENOMEM; 527 528 drm->fbcon = fbcon; 529 530 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 531
··· 470 .fb_probe = nouveau_fbcon_create, 471 }; 472 473 + static void 474 + nouveau_fbcon_set_suspend_work(struct work_struct *work) 475 + { 476 + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); 477 + int state = READ_ONCE(drm->fbcon_new_state); 478 + 479 + if (state == FBINFO_STATE_RUNNING) 480 + pm_runtime_get_sync(drm->dev->dev); 481 + 482 + console_lock(); 483 + if (state == FBINFO_STATE_RUNNING) 484 + nouveau_fbcon_accel_restore(drm->dev); 485 + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 486 + if (state != FBINFO_STATE_RUNNING) 487 + nouveau_fbcon_accel_save_disable(drm->dev); 488 + console_unlock(); 489 + 490 + if (state == FBINFO_STATE_RUNNING) { 491 + pm_runtime_mark_last_busy(drm->dev->dev); 492 + pm_runtime_put_sync(drm->dev->dev); 493 + } 494 + } 495 + 496 void 497 nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 498 { 499 struct nouveau_drm *drm = nouveau_drm(dev); 500 + 501 + if (!drm->fbcon) 502 + return; 503 + 504 + drm->fbcon_new_state = state; 505 + /* Since runtime resume can happen as a result of a sysfs operation, 506 + * it's possible we already have the console locked. So handle fbcon 507 + * init/deinit from a seperate work thread 508 + */ 509 + schedule_work(&drm->fbcon_work); 510 } 511 512 int ··· 502 return -ENOMEM; 503 504 drm->fbcon = fbcon; 505 + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); 506 507 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 508
+3 -4
drivers/gpu/drm/radeon/radeon_drv.c
··· 366 radeon_pci_shutdown(struct pci_dev *pdev) 367 { 368 /* if we are running in a VM, make sure the device 369 - * torn down properly on reboot/shutdown. 370 - * unfortunately we can't detect certain 371 - * hypervisors so just do this all the time. 372 */ 373 - radeon_pci_remove(pdev); 374 } 375 376 static int radeon_pmops_suspend(struct device *dev)
··· 366 radeon_pci_shutdown(struct pci_dev *pdev) 367 { 368 /* if we are running in a VM, make sure the device 369 + * torn down properly on reboot/shutdown 370 */ 371 + if (radeon_device_is_virtual()) 372 + radeon_pci_remove(pdev); 373 } 374 375 static int radeon_pmops_suspend(struct device *dev)
+1 -1
drivers/gpu/drm/vc4/vc4_crtc.c
··· 839 840 } 841 842 - __drm_atomic_helper_crtc_destroy_state(state); 843 } 844 845 static const struct drm_crtc_funcs vc4_crtc_funcs = {
··· 839 840 } 841 842 + drm_atomic_helper_crtc_destroy_state(crtc, state); 843 } 844 845 static const struct drm_crtc_funcs vc4_crtc_funcs = {
+3 -1
drivers/gpu/drm/vc4/vc4_gem.c
··· 594 args->shader_rec_count); 595 struct vc4_bo *bo; 596 597 - if (uniforms_offset < shader_rec_offset || 598 exec_size < uniforms_offset || 599 args->shader_rec_count >= (UINT_MAX / 600 sizeof(struct vc4_shader_state)) || 601 temp_size < exec_size) { 602 DRM_ERROR("overflow in exec arguments\n"); 603 goto fail; 604 } 605
··· 594 args->shader_rec_count); 595 struct vc4_bo *bo; 596 597 + if (shader_rec_offset < args->bin_cl_size || 598 + uniforms_offset < shader_rec_offset || 599 exec_size < uniforms_offset || 600 args->shader_rec_count >= (UINT_MAX / 601 sizeof(struct vc4_shader_state)) || 602 temp_size < exec_size) { 603 DRM_ERROR("overflow in exec arguments\n"); 604 + ret = -EINVAL; 605 goto fail; 606 } 607
+1 -1
drivers/gpu/drm/vc4/vc4_render_cl.c
··· 461 } 462 463 ret = vc4_full_res_bounds_check(exec, *obj, surf); 464 - if (!ret) 465 return ret; 466 467 return 0;
··· 461 } 462 463 ret = vc4_full_res_bounds_check(exec, *obj, surf); 464 + if (ret) 465 return ret; 466 467 return 0;
+1 -1
include/drm/drm_atomic.h
··· 144 struct drm_crtc *ptr; 145 struct drm_crtc_state *state; 146 struct drm_crtc_commit *commit; 147 - s64 __user *out_fence_ptr; 148 }; 149 150 struct __drm_connnectors_state {
··· 144 struct drm_crtc *ptr; 145 struct drm_crtc_state *state; 146 struct drm_crtc_commit *commit; 147 + s32 __user *out_fence_ptr; 148 }; 149 150 struct __drm_connnectors_state {
+1 -1
include/drm/drm_mode_config.h
··· 488 /** 489 * @prop_out_fence_ptr: Sync File fd pointer representing the 490 * outgoing fences for a CRTC. Userspace should provide a pointer to a 491 - * value of type s64, and then cast that pointer to u64. 492 */ 493 struct drm_property *prop_out_fence_ptr; 494 /**
··· 488 /** 489 * @prop_out_fence_ptr: Sync File fd pointer representing the 490 * outgoing fences for a CRTC. Userspace should provide a pointer to a 491 + * value of type s32, and then cast that pointer to u64. 492 */ 493 struct drm_property *prop_out_fence_ptr; 494 /**