Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2020-03-13' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"It's a bit quieter, probably not as much as it could be.

There is on large regression fix in here from Lyude for displayport
bandwidth calculations, there've been reports of multi-monitor in
docks not working since -rc1 and this has been tested to fix those.

Otherwise it's a bunch of i915 (with some GVT fixes), a set of amdgpu
watermark + bios fixes, and an exynos iommu cleanup fix.

core:
- DP MST bandwidth regression fix.

i915:
- hard lockup fix
- GVT fixes
- 32-bit alignment issue fix
- timeline wait fixes
- cacheline_retire and free

amdgpu:
- Update the display watermark bounding box for navi14
- Fix fetching vbios directly from rom on vega20/arcturus
- Navi and renoir watermark fixes

exynos:
- iommu object cleanup fix"

`

* tag 'drm-fixes-2020-03-13' of git://anongit.freedesktop.org/drm/drm:
drm/dp_mst: Rewrite and fix bandwidth limit checks
drm/dp_mst: Reprobe path resources in CSN handler
drm/dp_mst: Use full_pbn instead of available_pbn for bandwidth checks
drm/dp_mst: Rename drm_dp_mst_is_dp_mst_end_device() to be less redundant
drm/i915: Defer semaphore priority bumping to a workqueue
drm/i915/gt: Close race between cacheline_retire and free
drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
drm/i915: be more solid in checking the alignment
drm/i915/gvt: Fix dma-buf display blur issue on CFL
drm/i915: Return early for await_start on same timeline
drm/i915: Actually emit the await_start
drm/amdgpu/powerplay: nv1x, renior copy dcn clock settings of watermark to smu during boot up
drm/exynos: Fix cleanup of IOMMU related objects
drm/amdgpu: correct ROM_INDEX/DATA offset for VEGA20
drm/amd/display: update soc bb for nv14
drm/i915/gvt: Fix emulated vbt size issue
drm/i915/gvt: Fix unnecessary schedule timer when no vGPU exits

+407 -135
+23 -2
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 89 89 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L 90 90 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L 91 91 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 92 + 93 + /* for Vega20/arcturus regiter offset change */ 94 + #define mmROM_INDEX_VG20 0x00e4 95 + #define mmROM_INDEX_VG20_BASE_IDX 0 96 + #define mmROM_DATA_VG20 0x00e5 97 + #define mmROM_DATA_VG20_BASE_IDX 0 98 + 92 99 /* 93 100 * Indirect registers accessor 94 101 */ ··· 316 309 { 317 310 u32 *dw_ptr; 318 311 u32 i, length_dw; 312 + uint32_t rom_index_offset; 313 + uint32_t rom_data_offset; 319 314 320 315 if (bios == NULL) 321 316 return false; ··· 330 321 dw_ptr = (u32 *)bios; 331 322 length_dw = ALIGN(length_bytes, 4) / 4; 332 323 324 + switch (adev->asic_type) { 325 + case CHIP_VEGA20: 326 + case CHIP_ARCTURUS: 327 + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20); 328 + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20); 329 + break; 330 + default: 331 + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); 332 + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); 333 + break; 334 + } 335 + 333 336 /* set rom index to 0 */ 334 - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 337 + WREG32(rom_index_offset, 0); 335 338 /* read out the rom data */ 336 339 for (i = 0; i < length_dw; i++) 337 - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 340 + dw_ptr[i] = RREG32(rom_data_offset); 338 341 339 342 return true; 340 343 }
+114
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 335 335 .use_urgent_burst_bw = 0 336 336 }; 337 337 338 + struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { 339 + .clock_limits = { 340 + { 341 + .state = 0, 342 + .dcfclk_mhz = 560.0, 343 + .fabricclk_mhz = 560.0, 344 + .dispclk_mhz = 513.0, 345 + .dppclk_mhz = 513.0, 346 + .phyclk_mhz = 540.0, 347 + .socclk_mhz = 560.0, 348 + .dscclk_mhz = 171.0, 349 + .dram_speed_mts = 8960.0, 350 + }, 351 + { 352 + .state = 1, 353 + .dcfclk_mhz = 694.0, 354 + .fabricclk_mhz = 694.0, 355 + .dispclk_mhz = 642.0, 356 + .dppclk_mhz = 642.0, 357 + .phyclk_mhz = 600.0, 358 + .socclk_mhz = 694.0, 359 + .dscclk_mhz = 214.0, 360 + .dram_speed_mts = 11104.0, 361 + }, 362 + { 363 + .state = 2, 364 + .dcfclk_mhz = 875.0, 365 + .fabricclk_mhz = 875.0, 366 + .dispclk_mhz = 734.0, 367 + .dppclk_mhz = 734.0, 368 + .phyclk_mhz = 810.0, 369 + .socclk_mhz = 875.0, 370 + .dscclk_mhz = 245.0, 371 + .dram_speed_mts = 14000.0, 372 + }, 373 + { 374 + .state = 3, 375 + .dcfclk_mhz = 1000.0, 376 + .fabricclk_mhz = 1000.0, 377 + .dispclk_mhz = 1100.0, 378 + .dppclk_mhz = 1100.0, 379 + .phyclk_mhz = 810.0, 380 + .socclk_mhz = 1000.0, 381 + .dscclk_mhz = 367.0, 382 + .dram_speed_mts = 16000.0, 383 + }, 384 + { 385 + .state = 4, 386 + .dcfclk_mhz = 1200.0, 387 + .fabricclk_mhz = 1200.0, 388 + .dispclk_mhz = 1284.0, 389 + .dppclk_mhz = 1284.0, 390 + .phyclk_mhz = 810.0, 391 + .socclk_mhz = 1200.0, 392 + .dscclk_mhz = 428.0, 393 + .dram_speed_mts = 16000.0, 394 + }, 395 + /*Extra state, no dispclk ramping*/ 396 + { 397 + .state = 5, 398 + .dcfclk_mhz = 1200.0, 399 + .fabricclk_mhz = 1200.0, 400 + .dispclk_mhz = 1284.0, 401 + .dppclk_mhz = 1284.0, 402 + .phyclk_mhz = 810.0, 403 + .socclk_mhz = 1200.0, 404 + .dscclk_mhz = 428.0, 405 + .dram_speed_mts = 16000.0, 406 + }, 407 + }, 408 + .num_states = 5, 409 + .sr_exit_time_us = 8.6, 410 + .sr_enter_plus_exit_time_us = 10.9, 411 + .urgent_latency_us = 4.0, 412 + .urgent_latency_pixel_data_only_us = 4.0, 413 + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, 414 + .urgent_latency_vm_data_only_us = 4.0, 415 + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, 416 + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, 417 + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, 418 + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, 419 + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, 420 + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, 421 + .max_avg_sdp_bw_use_normal_percent = 40.0, 422 + .max_avg_dram_bw_use_normal_percent = 40.0, 423 + .writeback_latency_us = 12.0, 424 + .ideal_dram_bw_after_urgent_percent = 40.0, 425 + .max_request_size_bytes = 256, 426 + .dram_channel_width_bytes = 2, 427 + .fabric_datapath_to_dcn_data_return_bytes = 64, 428 + .dcn_downspread_percent = 0.5, 429 + .downspread_percent = 0.38, 430 + .dram_page_open_time_ns = 50.0, 431 + .dram_rw_turnaround_time_ns = 17.5, 432 + .dram_return_buffer_per_channel_bytes = 8192, 433 + .round_trip_ping_latency_dcfclk_cycles = 131, 434 + .urgent_out_of_order_return_per_channel_bytes = 256, 435 + .channel_interleave_bytes = 256, 436 + .num_banks = 8, 437 + .num_chans = 8, 438 + .vmm_page_size_bytes = 4096, 439 + .dram_clock_change_latency_us = 404.0, 440 + .dummy_pstate_latency_us = 5.0, 441 + .writeback_dram_clock_change_latency_us = 23.0, 442 + .return_bus_width_bytes = 64, 443 + .dispclk_dppclk_vco_speed_mhz = 3850, 444 + .xfc_bus_transport_time_us = 20, 445 + .xfc_xbuf_latency_tolerance_us = 4, 446 + .use_urgent_burst_bw = 0 447 + }; 448 + 338 449 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; 339 450 340 451 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL ··· 3402 3291 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( 3403 3292 uint32_t hw_internal_rev) 3404 3293 { 3294 + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) 3295 + return &dcn2_0_nv14_soc; 3296 + 3405 3297 if (ASICREV_IS_NAVI12_P(hw_internal_rev)) 3406 3298 return &dcn2_0_nv12_soc; 3407 3299
+5 -2
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
··· 2006 2006 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && 2007 2007 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 2008 2008 smu_set_watermarks_table(smu, table, clock_ranges); 2009 - smu->watermarks_bitmap |= WATERMARKS_EXIST; 2010 - smu->watermarks_bitmap &= ~WATERMARKS_LOADED; 2009 + 2010 + if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) { 2011 + smu->watermarks_bitmap |= WATERMARKS_EXIST; 2012 + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; 2013 + } 2011 2014 } 2012 2015 2013 2016 mutex_unlock(&smu->mutex);
+13 -9
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
··· 1063 1063 int ret = 0; 1064 1064 1065 1065 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1066 - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1067 - ret = smu_write_watermarks_table(smu); 1068 - if (ret) 1069 - return ret; 1070 - 1071 - smu->watermarks_bitmap |= WATERMARKS_LOADED; 1072 - } 1073 - 1074 - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1075 1066 smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && 1076 1067 smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 1077 1068 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, ··· 1484 1493 *clock_ranges) 1485 1494 { 1486 1495 int i; 1496 + int ret = 0; 1487 1497 Watermarks_t *table = watermarks; 1488 1498 1489 1499 if (!table || !clock_ranges) ··· 1534 1542 1000)); 1535 1543 table->WatermarkRow[0][i].WmSetting = (uint8_t) 1536 1544 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; 1545 + } 1546 + 1547 + smu->watermarks_bitmap |= WATERMARKS_EXIST; 1548 + 1549 + /* pass data to smu controller */ 1550 + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1551 + ret = smu_write_watermarks_table(smu); 1552 + if (ret) { 1553 + pr_err("Failed to update WMTABLE!"); 1554 + return ret; 1555 + } 1556 + smu->watermarks_bitmap |= WATERMARKS_LOADED; 1537 1557 } 1538 1558 1539 1559 return 0;
+3 -2
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
··· 806 806 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; 807 807 } 808 808 809 + smu->watermarks_bitmap |= WATERMARKS_EXIST; 810 + 809 811 /* pass data to smu controller */ 810 - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 811 - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 812 + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 812 813 ret = smu_write_watermarks_table(smu); 813 814 if (ret) { 814 815 pr_err("Failed to update WMTABLE!");
+128 -60
drivers/gpu/drm/drm_dp_mst_topology.c
··· 1935 1935 return parent_lct + 1; 1936 1936 } 1937 1937 1938 - static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) 1938 + static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs) 1939 1939 { 1940 1940 switch (pdt) { 1941 1941 case DP_PEER_DEVICE_DP_LEGACY_CONV: ··· 1965 1965 1966 1966 /* Teardown the old pdt, if there is one */ 1967 1967 if (port->pdt != DP_PEER_DEVICE_NONE) { 1968 - if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { 1968 + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 1969 1969 /* 1970 1970 * If the new PDT would also have an i2c bus, 1971 1971 * don't bother with reregistering it 1972 1972 */ 1973 1973 if (new_pdt != DP_PEER_DEVICE_NONE && 1974 - drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { 1974 + drm_dp_mst_is_end_device(new_pdt, new_mcs)) { 1975 1975 port->pdt = new_pdt; 1976 1976 port->mcs = new_mcs; 1977 1977 return 0; ··· 1991 1991 port->mcs = new_mcs; 1992 1992 1993 1993 if (port->pdt != DP_PEER_DEVICE_NONE) { 1994 - if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { 1994 + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 1995 1995 /* add i2c over sideband */ 1996 1996 ret = drm_dp_mst_register_i2c_bus(&port->aux); 1997 1997 } else { ··· 2172 2172 } 2173 2173 2174 2174 if (port->pdt != DP_PEER_DEVICE_NONE && 2175 - drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { 2175 + drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 2176 2176 port->cached_edid = drm_get_edid(port->connector, 2177 2177 &port->aux.ddc); 2178 2178 drm_connector_set_tile_property(port->connector); ··· 2302 2302 mutex_unlock(&mgr->lock); 2303 2303 } 2304 2304 2305 - if (old_ddps != port->ddps) { 2306 - if (port->ddps) { 2307 - if (!port->input) { 2308 - drm_dp_send_enum_path_resources(mgr, mstb, 2309 - port); 2310 - } 2305 + /* 2306 + * Reprobe PBN caps on both hotplug, and when re-probing the link 2307 + * for our parent mstb 2308 + */ 2309 + if (old_ddps != port->ddps || !created) { 2310 + if (port->ddps && !port->input) { 2311 + ret = drm_dp_send_enum_path_resources(mgr, mstb, 2312 + port); 2313 + if (ret == 1) 2314 + changed = true; 2311 2315 } else { 2312 - port->available_pbn = 0; 2316 + port->full_pbn = 0; 2313 2317 } 2314 2318 } 2315 2319 ··· 2405 2401 port->ddps = conn_stat->displayport_device_plug_status; 2406 2402 2407 2403 if (old_ddps != port->ddps) { 2408 - if (port->ddps) { 2409 - dowork = true; 2410 - } else { 2411 - port->available_pbn = 0; 2412 - } 2404 + if (port->ddps && !port->input) 2405 + drm_dp_send_enum_path_resources(mgr, mstb, port); 2406 + else 2407 + port->full_pbn = 0; 2413 2408 } 2414 2409 2415 2410 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; ··· 2558 2555 2559 2556 if (port->input || !port->ddps) 2560 2557 continue; 2561 - 2562 - if (!port->available_pbn) { 2563 - drm_modeset_lock(&mgr->base.lock, NULL); 2564 - drm_dp_send_enum_path_resources(mgr, mstb, port); 2565 - drm_modeset_unlock(&mgr->base.lock); 2566 - changed = true; 2567 - } 2568 2558 2569 2559 if (port->mstb) 2570 2560 mstb_child = drm_dp_mst_topology_get_mstb_validated( ··· 2986 2990 2987 2991 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2988 2992 if (ret > 0) { 2993 + ret = 0; 2989 2994 path_res = &txmsg->reply.u.path_resources; 2990 2995 2991 2996 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { ··· 2999 3002 path_res->port_number, 3000 3003 path_res->full_payload_bw_number, 3001 3004 path_res->avail_payload_bw_number); 3002 - port->available_pbn = 3003 - path_res->avail_payload_bw_number; 3005 + 3006 + /* 3007 + * If something changed, make sure we send a 3008 + * hotplug 3009 + */ 3010 + if (port->full_pbn != path_res->full_payload_bw_number || 3011 + port->fec_capable != path_res->fec_capable) 3012 + ret = 1; 3013 + 3014 + port->full_pbn = path_res->full_payload_bw_number; 3004 3015 port->fec_capable = path_res->fec_capable; 3005 3016 } 3006 3017 } 3007 3018 3008 3019 kfree(txmsg); 3009 - return 0; 3020 + return ret; 3010 3021 } 3011 3022 3012 3023 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) ··· 3601 3596 /* The link address will need to be re-sent on resume */ 3602 3597 mstb->link_address_sent = false; 3603 3598 3604 - list_for_each_entry(port, &mstb->ports, next) { 3605 - /* The PBN for each port will also need to be re-probed */ 3606 - port->available_pbn = 0; 3607 - 3599 + list_for_each_entry(port, &mstb->ports, next) 3608 3600 if (port->mstb) 3609 3601 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); 3610 - } 3611 3602 } 3612 3603 3613 3604 /** ··· 4830 4829 return false; 4831 4830 } 4832 4831 4833 - static inline 4834 - int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, 4835 - struct drm_dp_mst_topology_state *mst_state) 4832 + static int 4833 + drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 4834 + struct drm_dp_mst_topology_state *state); 4835 + 4836 + static int 4837 + drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, 4838 + struct drm_dp_mst_topology_state *state) 4836 4839 { 4837 - struct drm_dp_mst_port *port; 4838 4840 struct drm_dp_vcpi_allocation *vcpi; 4839 - int pbn_limit = 0, pbn_used = 0; 4841 + struct drm_dp_mst_port *port; 4842 + int pbn_used = 0, ret; 4843 + bool found = false; 4840 4844 4841 - list_for_each_entry(port, &branch->ports, next) { 4842 - if (port->mstb) 4843 - if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) 4844 - return -ENOSPC; 4845 - 4846 - if (port->available_pbn > 0) 4847 - pbn_limit = port->available_pbn; 4848 - } 4849 - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", 4850 - branch, pbn_limit); 4851 - 4852 - list_for_each_entry(vcpi, &mst_state->vcpis, next) { 4853 - if (!vcpi->pbn) 4845 + /* Check that we have at least one port in our state that's downstream 4846 + * of this branch, otherwise we can skip this branch 4847 + */ 4848 + list_for_each_entry(vcpi, &state->vcpis, next) { 4849 + if (!vcpi->pbn || 4850 + !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb)) 4854 4851 continue; 4855 4852 4856 - if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) 4857 - pbn_used += vcpi->pbn; 4853 + found = true; 4854 + break; 4858 4855 } 4859 - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n", 4860 - branch, pbn_used); 4856 + if (!found) 4857 + return 0; 4861 4858 4862 - if (pbn_used > pbn_limit) { 4863 - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", 4864 - branch); 4859 + if (mstb->port_parent) 4860 + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n", 4861 + mstb->port_parent->parent, mstb->port_parent, 4862 + mstb); 4863 + else 4864 + DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n", 4865 + mstb); 4866 + 4867 + list_for_each_entry(port, &mstb->ports, next) { 4868 + ret = drm_dp_mst_atomic_check_port_bw_limit(port, state); 4869 + if (ret < 0) 4870 + return ret; 4871 + 4872 + pbn_used += ret; 4873 + } 4874 + 4875 + return pbn_used; 4876 + } 4877 + 4878 + static int 4879 + drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 4880 + struct drm_dp_mst_topology_state *state) 4881 + { 4882 + struct drm_dp_vcpi_allocation *vcpi; 4883 + int pbn_used = 0; 4884 + 4885 + if (port->pdt == DP_PEER_DEVICE_NONE) 4886 + return 0; 4887 + 4888 + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 4889 + bool found = false; 4890 + 4891 + list_for_each_entry(vcpi, &state->vcpis, next) { 4892 + if (vcpi->port != port) 4893 + continue; 4894 + if (!vcpi->pbn) 4895 + return 0; 4896 + 4897 + found = true; 4898 + break; 4899 + } 4900 + if (!found) 4901 + return 0; 4902 + 4903 + /* This should never happen, as it means we tried to 4904 + * set a mode before querying the full_pbn 4905 + */ 4906 + if (WARN_ON(!port->full_pbn)) 4907 + return -EINVAL; 4908 + 4909 + pbn_used = vcpi->pbn; 4910 + } else { 4911 + pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, 4912 + state); 4913 + if (pbn_used <= 0) 4914 + return pbn_used; 4915 + } 4916 + 4917 + if (pbn_used > port->full_pbn) { 4918 + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", 4919 + port->parent, port, pbn_used, 4920 + port->full_pbn); 4865 4921 return -ENOSPC; 4866 4922 } 4867 - return 0; 4923 + 4924 + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", 4925 + port->parent, port, pbn_used, port->full_pbn); 4926 + 4927 + return pbn_used; 4868 4928 } 4869 4929 4870 4930 static inline int ··· 5123 5061 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); 5124 5062 if (ret) 5125 5063 break; 5126 - ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state); 5127 - if (ret) 5064 + 5065 + mutex_lock(&mgr->lock); 5066 + ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, 5067 + mst_state); 5068 + mutex_unlock(&mgr->lock); 5069 + if (ret < 0) 5128 5070 break; 5071 + else 5072 + ret = 0; 5129 5073 } 5130 5074 5131 5075 return ret;
+3 -2
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
··· 55 55 struct decon_context { 56 56 struct device *dev; 57 57 struct drm_device *drm_dev; 58 + void *dma_priv; 58 59 struct exynos_drm_crtc *crtc; 59 60 struct exynos_drm_plane planes[WINDOWS_NR]; 60 61 struct exynos_drm_plane_config configs[WINDOWS_NR]; ··· 645 644 646 645 decon_clear_channels(ctx->crtc); 647 646 648 - return exynos_drm_register_dma(drm_dev, dev); 647 + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); 649 648 } 650 649 651 650 static void decon_unbind(struct device *dev, struct device *master, void *data) ··· 655 654 decon_atomic_disable(ctx->crtc); 656 655 657 656 /* detach this sub driver from iommu mapping if supported. */ 658 - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); 657 + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); 659 658 } 660 659 661 660 static const struct component_ops decon_component_ops = {
+3 -2
drivers/gpu/drm/exynos/exynos7_drm_decon.c
··· 40 40 struct decon_context { 41 41 struct device *dev; 42 42 struct drm_device *drm_dev; 43 + void *dma_priv; 43 44 struct exynos_drm_crtc *crtc; 44 45 struct exynos_drm_plane planes[WINDOWS_NR]; 45 46 struct exynos_drm_plane_config configs[WINDOWS_NR]; ··· 128 127 129 128 decon_clear_channels(ctx->crtc); 130 129 131 - return exynos_drm_register_dma(drm_dev, ctx->dev); 130 + return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); 132 131 } 133 132 134 133 static void decon_ctx_remove(struct decon_context *ctx) 135 134 { 136 135 /* detach this sub driver from iommu mapping if supported. */ 137 - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); 136 + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); 138 137 } 139 138 140 139 static u32 decon_calc_clkdiv(struct decon_context *ctx,
+19 -9
drivers/gpu/drm/exynos/exynos_drm_dma.c
··· 58 58 * mapping. 59 59 */ 60 60 static int drm_iommu_attach_device(struct drm_device *drm_dev, 61 - struct device *subdrv_dev) 61 + struct device *subdrv_dev, void **dma_priv) 62 62 { 63 63 struct exynos_drm_private *priv = drm_dev->dev_private; 64 64 int ret; ··· 74 74 return ret; 75 75 76 76 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { 77 - if (to_dma_iommu_mapping(subdrv_dev)) 77 + /* 78 + * Keep the original DMA mapping of the sub-device and 79 + * restore it on Exynos DRM detach, otherwise the DMA 80 + * framework considers it as IOMMU-less during the next 81 + * probe (in case of deferred probe or modular build) 82 + */ 83 + *dma_priv = to_dma_iommu_mapping(subdrv_dev); 84 + if (*dma_priv) 78 85 arm_iommu_detach_device(subdrv_dev); 79 86 80 87 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); ··· 105 98 * mapping 106 99 */ 107 100 static void drm_iommu_detach_device(struct drm_device *drm_dev, 108 - struct device *subdrv_dev) 101 + struct device *subdrv_dev, void **dma_priv) 109 102 { 110 103 struct exynos_drm_private *priv = drm_dev->dev_private; 111 104 112 - if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) 105 + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { 113 106 arm_iommu_detach_device(subdrv_dev); 114 - else if (IS_ENABLED(CONFIG_IOMMU_DMA)) 107 + arm_iommu_attach_device(subdrv_dev, *dma_priv); 108 + } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) 115 109 iommu_detach_device(priv->mapping, subdrv_dev); 116 110 117 111 clear_dma_max_seg_size(subdrv_dev); 118 112 } 119 113 120 - int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) 114 + int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, 115 + void **dma_priv) 121 116 { 122 117 struct exynos_drm_private *priv = drm->dev_private; 123 118 ··· 146 137 priv->mapping = mapping; 147 138 } 148 139 149 - return drm_iommu_attach_device(drm, dev); 140 + return drm_iommu_attach_device(drm, dev, dma_priv); 150 141 } 151 142 152 - void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev) 143 + void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, 144 + void **dma_priv) 153 145 { 154 146 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU)) 155 - drm_iommu_detach_device(drm, dev); 147 + drm_iommu_detach_device(drm, dev, dma_priv); 156 148 } 157 149 158 150 void exynos_drm_cleanup_dma(struct drm_device *drm)
+4 -2
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 223 223 return priv->mapping ? true : false; 224 224 } 225 225 226 - int exynos_drm_register_dma(struct drm_device *drm, struct device *dev); 227 - void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev); 226 + int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, 227 + void **dma_priv); 228 + void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, 229 + void **dma_priv); 228 230 void exynos_drm_cleanup_dma(struct drm_device *drm); 229 231 230 232 #ifdef CONFIG_DRM_EXYNOS_DPI
+3 -2
drivers/gpu/drm/exynos/exynos_drm_fimc.c
··· 97 97 struct fimc_context { 98 98 struct exynos_drm_ipp ipp; 99 99 struct drm_device *drm_dev; 100 + void *dma_priv; 100 101 struct device *dev; 101 102 struct exynos_drm_ipp_task *task; 102 103 struct exynos_drm_ipp_formats *formats; ··· 1134 1133 1135 1134 ctx->drm_dev = drm_dev; 1136 1135 ipp->drm_dev = drm_dev; 1137 - exynos_drm_register_dma(drm_dev, dev); 1136 + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); 1138 1137 1139 1138 exynos_drm_ipp_register(dev, ipp, &ipp_funcs, 1140 1139 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | ··· 1154 1153 struct exynos_drm_ipp *ipp = &ctx->ipp; 1155 1154 1156 1155 exynos_drm_ipp_unregister(dev, ipp); 1157 - exynos_drm_unregister_dma(drm_dev, dev); 1156 + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); 1158 1157 } 1159 1158 1160 1159 static const struct component_ops fimc_component_ops = {
+3 -2
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 167 167 struct fimd_context { 168 168 struct device *dev; 169 169 struct drm_device *drm_dev; 170 + void *dma_priv; 170 171 struct exynos_drm_crtc *crtc; 171 172 struct exynos_drm_plane planes[WINDOWS_NR]; 172 173 struct exynos_drm_plane_config configs[WINDOWS_NR]; ··· 1091 1090 if (is_drm_iommu_supported(drm_dev)) 1092 1091 fimd_clear_channels(ctx->crtc); 1093 1092 1094 - return exynos_drm_register_dma(drm_dev, dev); 1093 + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); 1095 1094 } 1096 1095 1097 1096 static void fimd_unbind(struct device *dev, struct device *master, ··· 1101 1100 1102 1101 fimd_atomic_disable(ctx->crtc); 1103 1102 1104 - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); 1103 + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); 1105 1104 1106 1105 if (ctx->encoder) 1107 1106 exynos_dpi_remove(ctx->encoder);
+3 -2
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 232 232 233 233 struct g2d_data { 234 234 struct device *dev; 235 + void *dma_priv; 235 236 struct clk *gate_clk; 236 237 void __iomem *regs; 237 238 int irq; ··· 1410 1409 return ret; 1411 1410 } 1412 1411 1413 - ret = exynos_drm_register_dma(drm_dev, dev); 1412 + ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv); 1414 1413 if (ret < 0) { 1415 1414 dev_err(dev, "failed to enable iommu.\n"); 1416 1415 g2d_fini_cmdlist(g2d); ··· 1435 1434 priv->g2d_dev = NULL; 1436 1435 1437 1436 cancel_work_sync(&g2d->runqueue_work); 1438 - exynos_drm_unregister_dma(g2d->drm_dev, dev); 1437 + exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv); 1439 1438 } 1440 1439 1441 1440 static const struct component_ops g2d_component_ops = {
+3 -2
drivers/gpu/drm/exynos/exynos_drm_gsc.c
··· 97 97 struct gsc_context { 98 98 struct exynos_drm_ipp ipp; 99 99 struct drm_device *drm_dev; 100 + void *dma_priv; 100 101 struct device *dev; 101 102 struct exynos_drm_ipp_task *task; 102 103 struct exynos_drm_ipp_formats *formats; ··· 1170 1169 1171 1170 ctx->drm_dev = drm_dev; 1172 1171 ctx->drm_dev = drm_dev; 1173 - exynos_drm_register_dma(drm_dev, dev); 1172 + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); 1174 1173 1175 1174 exynos_drm_ipp_register(dev, ipp, &ipp_funcs, 1176 1175 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | ··· 1190 1189 struct exynos_drm_ipp *ipp = &ctx->ipp; 1191 1190 1192 1191 exynos_drm_ipp_unregister(dev, ipp); 1193 - exynos_drm_unregister_dma(drm_dev, dev); 1192 + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); 1194 1193 } 1195 1194 1196 1195 static const struct component_ops gsc_component_ops = {
+3 -2
drivers/gpu/drm/exynos/exynos_drm_rotator.c
··· 56 56 struct rot_context { 57 57 struct exynos_drm_ipp ipp; 58 58 struct drm_device *drm_dev; 59 + void *dma_priv; 59 60 struct device *dev; 60 61 void __iomem *regs; 61 62 struct clk *clock; ··· 244 243 245 244 rot->drm_dev = drm_dev; 246 245 ipp->drm_dev = drm_dev; 247 - exynos_drm_register_dma(drm_dev, dev); 246 + exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv); 248 247 249 248 exynos_drm_ipp_register(dev, ipp, &ipp_funcs, 250 249 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE, ··· 262 261 struct exynos_drm_ipp *ipp = &rot->ipp; 263 262 264 263 exynos_drm_ipp_unregister(dev, ipp); 265 - exynos_drm_unregister_dma(rot->drm_dev, rot->dev); 264 + exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv); 266 265 } 267 266 268 267 static const struct component_ops rotator_component_ops = {
+4 -2
drivers/gpu/drm/exynos/exynos_drm_scaler.c
··· 39 39 struct scaler_context { 40 40 struct exynos_drm_ipp ipp; 41 41 struct drm_device *drm_dev; 42 + void *dma_priv; 42 43 struct device *dev; 43 44 void __iomem *regs; 44 45 struct clk *clock[SCALER_MAX_CLK]; ··· 451 450 452 451 scaler->drm_dev = drm_dev; 453 452 ipp->drm_dev = drm_dev; 454 - exynos_drm_register_dma(drm_dev, dev); 453 + exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv); 455 454 456 455 exynos_drm_ipp_register(dev, ipp, &ipp_funcs, 457 456 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | ··· 471 470 struct exynos_drm_ipp *ipp = &scaler->ipp; 472 471 473 472 exynos_drm_ipp_unregister(dev, ipp); 474 - exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev); 473 + exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev, 474 + &scaler->dma_priv); 475 475 } 476 476 477 477 static const struct component_ops scaler_component_ops = {
+5 -2
drivers/gpu/drm/exynos/exynos_mixer.c
··· 94 94 struct platform_device *pdev; 95 95 struct device *dev; 96 96 struct drm_device *drm_dev; 97 + void *dma_priv; 97 98 struct exynos_drm_crtc *crtc; 98 99 struct exynos_drm_plane planes[MIXER_WIN_NR]; 99 100 unsigned long flags; ··· 895 894 } 896 895 } 897 896 898 - return exynos_drm_register_dma(drm_dev, mixer_ctx->dev); 897 + return exynos_drm_register_dma(drm_dev, mixer_ctx->dev, 898 + &mixer_ctx->dma_priv); 899 899 } 900 900 901 901 static void mixer_ctx_remove(struct mixer_context *mixer_ctx) 902 902 { 903 - exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev); 903 + exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev, 904 + &mixer_ctx->dma_priv); 904 905 } 905 906 906 907 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
+2 -1
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 423 423 if (unlikely(entry->flags & eb->invalid_flags)) 424 424 return -EINVAL; 425 425 426 - if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) 426 + if (unlikely(entry->alignment && 427 + !is_power_of_2_u64(entry->alignment))) 427 428 return -EINVAL; 428 429 429 430 /*
+18 -11
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 1679 1679 if (!intel_engine_has_timeslices(engine)) 1680 1680 return false; 1681 1681 1682 - if (list_is_last(&rq->sched.link, &engine->active.requests)) 1683 - return false; 1684 - 1685 - hint = max(rq_prio(list_next_entry(rq, sched.link)), 1686 - engine->execlists.queue_priority_hint); 1682 + hint = engine->execlists.queue_priority_hint; 1683 + if (!list_is_last(&rq->sched.link, &engine->active.requests)) 1684 + hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); 1687 1685 1688 1686 return hint >= effective_prio(rq); 1689 1687 } ··· 1721 1723 return; 1722 1724 1723 1725 set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); 1726 + } 1727 + 1728 + static void start_timeslice(struct intel_engine_cs *engine) 1729 + { 1730 + struct intel_engine_execlists *execlists = &engine->execlists; 1731 + 1732 + execlists->switch_priority_hint = execlists->queue_priority_hint; 1733 + 1734 + if (timer_pending(&execlists->timer)) 1735 + return; 1736 + 1737 + set_timer_ms(&execlists->timer, timeslice(engine)); 1724 1738 } 1725 1739 1726 1740 static void record_preemption(struct intel_engine_execlists *execlists) ··· 1898 1888 * Even if ELSP[1] is occupied and not worthy 1899 1889 * of timeslices, our queue might be. 1900 1890 */ 1901 - if (!execlists->timer.expires && 1902 - need_timeslice(engine, last)) 1903 - set_timer_ms(&execlists->timer, 1904 - timeslice(engine)); 1905 - 1891 + start_timeslice(engine); 1906 1892 return; 1907 1893 } 1908 1894 } ··· 1933 1927 1934 1928 if (last && !can_merge_rq(last, rq)) { 1935 1929 spin_unlock(&ve->base.active.lock); 1936 - return; /* leave this for another */ 1930 + start_timeslice(engine); 1931 + return; /* leave this for another sibling */ 1937 1932 } 1938 1933 1939 1934 ENGINE_TRACE(engine,
+6 -2
drivers/gpu/drm/i915/gt/intel_timeline.c
··· 192 192 193 193 static void cacheline_free(struct intel_timeline_cacheline *cl) 194 194 { 195 + if (!i915_active_acquire_if_busy(&cl->active)) { 196 + __idle_cacheline_free(cl); 197 + return; 198 + } 199 + 195 200 GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); 196 201 cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); 197 202 198 - if (i915_active_is_idle(&cl->active)) 199 - __idle_cacheline_free(cl); 203 + i915_active_release(&cl->active); 200 204 } 201 205 202 206 int intel_timeline_init(struct intel_timeline *timeline,
+2 -1
drivers/gpu/drm/i915/gvt/display.c
··· 457 457 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 458 458 459 459 /* TODO: add more platforms support */ 460 - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 460 + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || 461 + IS_COFFEELAKE(dev_priv)) { 461 462 if (connected) { 462 463 vgpu_vreg_t(vgpu, SFUSE_STRAP) |= 463 464 SFUSE_STRAP_DDID_DETECTED;
+2 -3
drivers/gpu/drm/i915/gvt/opregion.c
··· 147 147 /* there's features depending on version! */ 148 148 v->header.version = 155; 149 149 v->header.header_size = sizeof(v->header); 150 - v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header); 150 + v->header.vbt_size = sizeof(struct vbt); 151 151 v->header.bdb_offset = offsetof(struct vbt, bdb_header); 152 152 153 153 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); 154 154 v->bdb_header.version = 186; /* child_dev_size = 33 */ 155 155 v->bdb_header.header_size = sizeof(v->bdb_header); 156 156 157 - v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) 158 - - sizeof(struct bdb_header); 157 + v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header); 159 158 160 159 /* general features */ 161 160 v->general_features_header.id = BDB_GENERAL_FEATURES;
+9 -3
drivers/gpu/drm/i915/gvt/vgpu.c
··· 272 272 { 273 273 struct intel_gvt *gvt = vgpu->gvt; 274 274 275 - mutex_lock(&vgpu->vgpu_lock); 276 - 277 275 WARN(vgpu->active, "vGPU is still active!\n"); 278 276 277 + /* 278 + * remove idr first so later clean can judge if need to stop 279 + * service if no active vgpu. 280 + */ 281 + mutex_lock(&gvt->lock); 282 + idr_remove(&gvt->vgpu_idr, vgpu->id); 283 + mutex_unlock(&gvt->lock); 284 + 285 + mutex_lock(&vgpu->vgpu_lock); 279 286 intel_gvt_debugfs_remove_vgpu(vgpu); 280 287 intel_vgpu_clean_sched_policy(vgpu); 281 288 intel_vgpu_clean_submission(vgpu); ··· 297 290 mutex_unlock(&vgpu->vgpu_lock); 298 291 299 292 mutex_lock(&gvt->lock); 300 - idr_remove(&gvt->vgpu_idr, vgpu->id); 301 293 if (idr_is_empty(&gvt->vgpu_idr)) 302 294 intel_gvt_clean_irq(gvt); 303 295 intel_gvt_update_vgpu_types(gvt);
+20 -8
drivers/gpu/drm/i915/i915_request.c
··· 527 527 return NOTIFY_DONE; 528 528 } 529 529 530 + static void irq_semaphore_cb(struct irq_work *wrk) 531 + { 532 + struct i915_request *rq = 533 + container_of(wrk, typeof(*rq), semaphore_work); 534 + 535 + i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE); 536 + i915_request_put(rq); 537 + } 538 + 530 539 static int __i915_sw_fence_call 531 540 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 532 541 { 533 - struct i915_request *request = 534 - container_of(fence, typeof(*request), semaphore); 542 + struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); 535 543 536 544 switch (state) { 537 545 case FENCE_COMPLETE: 538 - i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); 546 + if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) { 547 + i915_request_get(rq); 548 + init_irq_work(&rq->semaphore_work, irq_semaphore_cb); 549 + irq_work_queue(&rq->semaphore_work); 550 + } 539 551 break; 540 552 541 553 case FENCE_FREE: 542 - i915_request_put(request); 554 + i915_request_put(rq); 543 555 break; 544 556 } 545 557 ··· 788 776 struct dma_fence *fence; 789 777 int err; 790 778 791 - GEM_BUG_ON(i915_request_timeline(rq) == 792 - rcu_access_pointer(signal->timeline)); 779 + if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)) 780 + return 0; 793 781 794 782 if (i915_request_started(signal)) 795 783 return 0; ··· 833 821 return 0; 834 822 835 823 err = 0; 836 - if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) 824 + if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) 837 825 err = i915_sw_fence_await_dma_fence(&rq->submit, 838 826 fence, 0, 839 827 I915_FENCE_GFP); ··· 1330 1318 * decide whether to preempt the entire chain so that it is ready to 1331 1319 * run at the earliest possible convenience. 1332 1320 */ 1333 - i915_sw_fence_commit(&rq->semaphore); 1334 1321 if (attr && rq->engine->schedule) 1335 1322 rq->engine->schedule(rq, attr); 1323 + i915_sw_fence_commit(&rq->semaphore); 1336 1324 i915_sw_fence_commit(&rq->submit); 1337 1325 } 1338 1326
+2
drivers/gpu/drm/i915/i915_request.h
··· 26 26 #define I915_REQUEST_H 27 27 28 28 #include <linux/dma-fence.h> 29 + #include <linux/irq_work.h> 29 30 #include <linux/lockdep.h> 30 31 31 32 #include "gem/i915_gem_context_types.h" ··· 209 208 }; 210 209 struct list_head execute_cb; 211 210 struct i915_sw_fence semaphore; 211 + struct irq_work semaphore_work; 212 212 213 213 /* 214 214 * A list of everyone we wait upon, and everyone who waits upon us.
+5
drivers/gpu/drm/i915/i915_utils.h
··· 234 234 __idx; \ 235 235 }) 236 236 237 + static inline bool is_power_of_2_u64(u64 n) 238 + { 239 + return (n != 0 && ((n & (n - 1)) == 0)); 240 + } 241 + 237 242 static inline void __list_del_many(struct list_head *head, 238 243 struct list_head *first) 239 244 {
+2 -2
include/drm/drm_dp_mst_helper.h
··· 81 81 * &drm_dp_mst_topology_mgr.base.lock. 82 82 * @num_sdp_stream_sinks: Number of stream sinks. Protected by 83 83 * &drm_dp_mst_topology_mgr.base.lock. 84 - * @available_pbn: Available bandwidth for this port. Protected by 84 + * @full_pbn: Max possible bandwidth for this port. Protected by 85 85 * &drm_dp_mst_topology_mgr.base.lock. 86 86 * @next: link to next port on this branch device 87 87 * @aux: i2c aux transport to talk to device connected to this port, protected ··· 126 126 u8 dpcd_rev; 127 127 u8 num_sdp_streams; 128 128 u8 num_sdp_stream_sinks; 129 - uint16_t available_pbn; 129 + uint16_t full_pbn; 130 130 struct list_head next; 131 131 /** 132 132 * @mstb: the branch device connected to this port, if there is one.