Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2019-03-15' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes and updates from Dave Airlie:
"A few various fixes pulls and one late etnaviv pull but it was nearly
all fixes anyways.

etnaviv:
- late next pull
- mmu mapping fix
- build non-ARM arches
- misc fixes

i915:
- HDCP state handling fix
- shrinker interaction fix
- atomic state leak fix

qxl:
- kick out framebuffers early fix

amdgpu:
- Powerplay fixes
- DC fixes
- BACO turned off for now on vega20
- Locking fix
- KFD MQD fix
- gfx9 golden register updates"

* tag 'drm-next-2019-03-15' of git://anongit.freedesktop.org/drm/drm: (43 commits)
drm/amdgpu: Update gc golden setting for vega family
drm/amd/powerplay: correct power reading on fiji
drm/amd/powerplay: set max fan target temperature as 105C
drm/i915: Relax mmap VMA check
drm/i915: Fix atomic state leak when resetting HDMI link
drm/i915: Acquire breadcrumb ref before cancelling
drm/i915/selftests: Always free spinner on __sseu_prepare error
drm/i915: Reacquire priolist cache after dropping the engine lock
drm/i915: Protect i915_active iterators from the shrinker
drm/i915: HDCP state handling in ddi_update_pipe
drm/qxl: remove conflicting framebuffers earlier
drm/fb-helper: call vga_remove_vgacon automatically.
drm: move i915_kick_out_vgacon to vgaarb
drm/amd/display: don't call dm_pp_ function from an fpu block
drm: add __user attribute to ptr_to_compat()
drm/amdgpu: clear PDs/PTs only after initializing them
drm/amd/display: Pass app_tf by value rather than by reference
Revert "drm/amdgpu: use BACO reset on vega20 if platform support"
drm/amd/powerplay: show the right override pcie parameters
drm/amd/powerplay: honor the OD settings
...

+451 -375
+1 -1
MAINTAINERS
··· 5278 5278 M: Lucas Stach <l.stach@pengutronix.de> 5279 5279 R: Russell King <linux+etnaviv@armlinux.org.uk> 5280 5280 R: Christian Gmeiner <christian.gmeiner@gmail.com> 5281 - L: etnaviv@lists.freedesktop.org 5281 + L: etnaviv@lists.freedesktop.org (moderated for non-subscribers) 5282 5282 L: dri-devel@lists.freedesktop.org 5283 5283 S: Maintained 5284 5284 F: drivers/gpu/drm/etnaviv/
+6 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 947 947 if (r) 948 948 return r; 949 949 950 - r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); 951 - if (r) 952 - goto error_free_pt; 953 - 954 950 if (vm->use_cpu_for_update) { 955 951 r = amdgpu_bo_kmap(pt, NULL); 956 952 if (r) ··· 959 963 pt->parent = amdgpu_bo_ref(cursor.parent->base.bo); 960 964 961 965 amdgpu_vm_bo_base_init(&entry->base, vm, pt); 966 + 967 + r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); 968 + if (r) 969 + goto error_free_pt; 962 970 } 963 971 964 972 return 0; ··· 3033 3033 if (r) 3034 3034 goto error_unreserve; 3035 3035 3036 + amdgpu_vm_bo_base_init(&vm->root.base, vm, root); 3037 + 3036 3038 r = amdgpu_vm_clear_bo(adev, vm, root, 3037 3039 adev->vm_manager.root_level, 3038 3040 vm->pte_support_ats); 3039 3041 if (r) 3040 3042 goto error_unreserve; 3041 3043 3042 - amdgpu_vm_bo_base_init(&vm->root.base, vm, root); 3043 3044 amdgpu_bo_unreserve(vm->root.base.bo); 3044 3045 3045 3046 if (pasid) {
+1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 220 220 221 221 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = 222 222 { 223 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff), 223 224 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), 224 225 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) 225 226 };
+1 -3
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
··· 500 500 struct amdgpu_device *adev = psp->adev; 501 501 uint32_t reg; 502 502 503 - reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000; 504 - WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg); 505 - reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2); 503 + reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000); 506 504 return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; 507 505 } 508 506
-1
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 461 461 462 462 switch (adev->asic_type) { 463 463 case CHIP_VEGA10: 464 - case CHIP_VEGA20: 465 464 soc15_asic_get_baco_capability(adev, &baco_reset); 466 465 break; 467 466 default:
+1 -51
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
··· 323 323 struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, 324 324 struct queue_properties *q) 325 325 { 326 - uint64_t addr; 327 - struct cik_mqd *m; 328 - int retval; 329 - 330 - retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), 331 - mqd_mem_obj); 332 - 333 - if (retval != 0) 334 - return -ENOMEM; 335 - 336 - m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; 337 - addr = (*mqd_mem_obj)->gpu_addr; 338 - 339 - memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); 340 - 341 - m->header = 0xC0310800; 342 - m->compute_pipelinestat_enable = 1; 343 - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; 344 - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; 345 - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; 346 - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; 347 - 348 - m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | 349 - PRELOAD_REQ; 350 - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | 351 - QUANTUM_DURATION(10); 352 - 353 - m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; 354 - m->cp_mqd_base_addr_lo = lower_32_bits(addr); 355 - m->cp_mqd_base_addr_hi = upper_32_bits(addr); 356 - 357 - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; 358 - 359 - /* 360 - * Pipe Priority 361 - * Identifies the pipe relative priority when this queue is connected 362 - * to the pipeline. The pipe priority is against the GFX pipe and HP3D. 363 - * In KFD we are using a fixed pipe priority set to CS_MEDIUM. 364 - * 0 = CS_LOW (typically below GFX) 365 - * 1 = CS_MEDIUM (typically between HP3D and GFX 366 - * 2 = CS_HIGH (typically above HP3D) 367 - */ 368 - m->cp_hqd_pipe_priority = 1; 369 - m->cp_hqd_queue_priority = 15; 370 - 371 - *mqd = m; 372 - if (gart_addr) 373 - *gart_addr = addr; 374 - retval = mm->update_mqd(mm, m, q); 375 - 376 - return retval; 326 + return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); 377 327 } 378 328 379 329 static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+35 -8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 886 886 return; 887 887 } 888 888 889 + /* dc_sink_create returns a new reference */ 889 890 link->local_sink = sink; 890 891 891 892 edid_status = dm_helpers_read_local_edid( ··· 953 952 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 954 953 aconnector->fake_enable = false; 955 954 955 + if (aconnector->dc_sink) 956 + dc_sink_release(aconnector->dc_sink); 956 957 aconnector->dc_sink = NULL; 957 958 amdgpu_dm_update_connector_after_detect(aconnector); 958 959 mutex_unlock(&aconnector->hpd_lock); ··· 1064 1061 1065 1062 1066 1063 sink = aconnector->dc_link->local_sink; 1064 + if (sink) 1065 + dc_sink_retain(sink); 1067 1066 1068 1067 /* 1069 1068 * Edid mgmt connector gets first update only in mode_valid hook and then ··· 1090 1085 * to it anymore after disconnect, so on next crtc to connector 1091 1086 * reshuffle by UMD we will get into unwanted dc_sink release 1092 1087 */ 1093 - if (aconnector->dc_sink != aconnector->dc_em_sink) 1094 - dc_sink_release(aconnector->dc_sink); 1088 + dc_sink_release(aconnector->dc_sink); 1095 1089 } 1096 1090 aconnector->dc_sink = sink; 1091 + dc_sink_retain(aconnector->dc_sink); 1097 1092 amdgpu_dm_update_freesync_caps(connector, 1098 1093 aconnector->edid); 1099 1094 } else { 1100 1095 amdgpu_dm_update_freesync_caps(connector, NULL); 1101 - if (!aconnector->dc_sink) 1096 + if (!aconnector->dc_sink) { 1102 1097 aconnector->dc_sink = aconnector->dc_em_sink; 1103 - else if (aconnector->dc_sink != aconnector->dc_em_sink) 1104 1098 dc_sink_retain(aconnector->dc_sink); 1099 + } 1105 1100 } 1106 1101 1107 1102 mutex_unlock(&dev->mode_config.mutex); 1103 + 1104 + if (sink) 1105 + dc_sink_release(sink); 1108 1106 return; 1109 1107 } 1110 1108 ··· 1115 1107 * TODO: temporary guard to look for proper fix 1116 1108 * if this sink is MST sink, we should not do anything 1117 1109 */ 1118 - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 1110 + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 1111 + dc_sink_release(sink); 1119 1112 return; 1113 + } 1120 1114 1121 1115 if (aconnector->dc_sink == sink) { 1122 1116 /* ··· 1127 1117 */ 1128 1118 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 1129 1119 aconnector->connector_id); 1120 + if (sink) 1121 + dc_sink_release(sink); 1130 1122 return; 1131 1123 } 1132 1124 ··· 1150 1138 amdgpu_dm_update_freesync_caps(connector, NULL); 1151 1139 1152 1140 aconnector->dc_sink = sink; 1141 + dc_sink_retain(aconnector->dc_sink); 1153 1142 if (sink->dc_edid.length == 0) { 1154 1143 aconnector->edid = NULL; 1155 1144 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); ··· 1171 1158 amdgpu_dm_update_freesync_caps(connector, NULL); 1172 1159 drm_connector_update_edid_property(connector, NULL); 1173 1160 aconnector->num_modes = 0; 1161 + dc_sink_release(aconnector->dc_sink); 1174 1162 aconnector->dc_sink = NULL; 1175 1163 aconnector->edid = NULL; 1176 1164 } 1177 1165 1178 1166 mutex_unlock(&dev->mode_config.mutex); 1167 + 1168 + if (sink) 1169 + dc_sink_release(sink); 1179 1170 } 1180 1171 1181 1172 static void handle_hpd_irq(void *param) ··· 2994 2977 return stream; 2995 2978 } else { 2996 2979 sink = aconnector->dc_sink; 2980 + dc_sink_retain(sink); 2997 2981 } 2998 2982 2999 2983 stream = dc_create_stream_for_sink(sink); ··· 3060 3042 update_stream_signal(stream, sink); 3061 3043 3062 3044 finish: 3063 - if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) 3064 - dc_sink_release(sink); 3045 + dc_sink_release(sink); 3065 3046 3066 3047 return stream; 3067 3048 } ··· 3318 3301 dm->backlight_dev = NULL; 3319 3302 } 3320 3303 #endif 3304 + 3305 + if (aconnector->dc_em_sink) 3306 + dc_sink_release(aconnector->dc_em_sink); 3307 + aconnector->dc_em_sink = NULL; 3308 + if (aconnector->dc_sink) 3309 + dc_sink_release(aconnector->dc_sink); 3310 + aconnector->dc_sink = NULL; 3311 + 3321 3312 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 3322 3313 drm_connector_unregister(connector); 3323 3314 drm_connector_cleanup(connector); ··· 3423 3398 (edid->extensions + 1) * EDID_LENGTH, 3424 3399 &init_params); 3425 3400 3426 - if (aconnector->base.force == DRM_FORCE_ON) 3401 + if (aconnector->base.force == DRM_FORCE_ON) { 3427 3402 aconnector->dc_sink = aconnector->dc_link->local_sink ? 3428 3403 aconnector->dc_link->local_sink : 3429 3404 aconnector->dc_em_sink; 3405 + dc_sink_retain(aconnector->dc_sink); 3406 + } 3430 3407 } 3431 3408 3432 3409 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 191 191 &init_params); 192 192 193 193 dc_sink->priv = aconnector; 194 + /* dc_link_add_remote_sink returns a new reference */ 194 195 aconnector->dc_sink = dc_sink; 195 196 196 197 if (aconnector->dc_sink)
+6 -2
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
··· 1348 1348 struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; 1349 1349 bool res; 1350 1350 1351 - kernel_fpu_begin(); 1352 - 1353 1351 /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ 1354 1352 res = dm_pp_get_clock_levels_by_type_with_voltage( 1355 1353 ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); 1354 + 1355 + kernel_fpu_begin(); 1356 1356 1357 1357 if (res) 1358 1358 res = verify_clock_values(&fclks); ··· 1372 1372 } else 1373 1373 BREAK_TO_DEBUGGER(); 1374 1374 1375 + kernel_fpu_end(); 1376 + 1375 1377 res = dm_pp_get_clock_levels_by_type_with_voltage( 1376 1378 ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); 1379 + 1380 + kernel_fpu_begin(); 1377 1381 1378 1382 if (res) 1379 1383 res = verify_clock_values(&dcfclks);
+12 -4
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 794 794 sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; 795 795 sink->converter_disable_audio = converter_disable_audio; 796 796 797 + /* dc_sink_create returns a new reference */ 797 798 link->local_sink = sink; 798 799 799 800 edid_status = dm_helpers_read_local_edid( ··· 2038 2037 break; 2039 2038 } 2040 2039 2040 + if (status == DC_OK) 2041 + pipe_ctx->stream->link->link_status.link_active = true; 2042 + 2041 2043 return status; 2042 2044 } 2043 2045 ··· 2064 2060 dp_disable_link_phy_mst(link, signal); 2065 2061 } else 2066 2062 link->link_enc->funcs->disable_output(link->link_enc, signal); 2063 + 2064 + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 2065 + /* MST disable link only when no stream use the link */ 2066 + if (link->mst_stream_alloc_table.stream_count <= 0) 2067 + link->link_status.link_active = false; 2068 + } else { 2069 + link->link_status.link_active = false; 2070 + } 2067 2071 } 2068 2072 2069 2073 static bool dp_active_dongle_validate_timing( ··· 2635 2623 } 2636 2624 } 2637 2625 2638 - stream->link->link_status.link_active = true; 2639 - 2640 2626 core_dc->hwss.enable_audio_stream(pipe_ctx); 2641 2627 2642 2628 /* turn off otg test pattern if enable */ ··· 2669 2659 core_dc->hwss.disable_stream(pipe_ctx, option); 2670 2660 2671 2661 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); 2672 - 2673 - pipe_ctx->stream->link->link_status.link_active = false; 2674 2662 } 2675 2663 2676 2664 void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+3 -4
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
··· 724 724 725 725 static void build_vrr_infopacket_v2(enum signal_type signal, 726 726 const struct mod_vrr_params *vrr, 727 - const enum color_transfer_func *app_tf, 727 + enum color_transfer_func app_tf, 728 728 struct dc_info_packet *infopacket) 729 729 { 730 730 unsigned int payload_size = 0; ··· 732 732 build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); 733 733 build_vrr_infopacket_data(vrr, infopacket); 734 734 735 - if (app_tf != NULL) 736 - build_vrr_infopacket_fs2_data(*app_tf, infopacket); 735 + build_vrr_infopacket_fs2_data(app_tf, infopacket); 737 736 738 737 build_vrr_infopacket_checksum(&payload_size, infopacket); 739 738 ··· 756 757 const struct dc_stream_state *stream, 757 758 const struct mod_vrr_params *vrr, 758 759 enum vrr_packet_type packet_type, 759 - const enum color_transfer_func *app_tf, 760 + enum color_transfer_func app_tf, 760 761 struct dc_info_packet *infopacket) 761 762 { 762 763 /* SPD info packet for FreeSync
+1 -1
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
··· 145 145 const struct dc_stream_state *stream, 146 146 const struct mod_vrr_params *vrr, 147 147 enum vrr_packet_type packet_type, 148 - const enum color_transfer_func *app_tf, 148 + enum color_transfer_func app_tf, 149 149 struct dc_info_packet *infopacket); 150 150 151 151 void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+1 -2
drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
··· 277 277 if (!skip_display_settings) 278 278 phm_notify_smc_display_config_after_ps_adjustment(hwmgr); 279 279 280 - if ((hwmgr->request_dpm_level != hwmgr->dpm_level) && 281 - !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) 280 + if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) 282 281 hwmgr->dpm_level = hwmgr->request_dpm_level; 283 282 284 283 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+17 -13
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
··· 489 489 } 490 490 491 491 int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, 492 - uint8_t id, uint32_t *frequency) 492 + uint8_t clk_id, uint8_t syspll_id, 493 + uint32_t *frequency) 493 494 { 494 495 struct amdgpu_device *adev = hwmgr->adev; 495 496 struct atom_get_smu_clock_info_parameters_v3_1 parameters; 496 497 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 497 498 uint32_t ix; 498 499 499 - parameters.clk_id = id; 500 - parameters.syspll_id = 0; 500 + parameters.clk_id = clk_id; 501 + parameters.syspll_id = syspll_id; 501 502 parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 502 503 parameters.dfsdid = 0; 503 504 ··· 531 530 boot_values->ulSocClk = 0; 532 531 boot_values->ulDCEFClk = 0; 533 532 534 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) 533 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency)) 535 534 boot_values->ulSocClk = frequency; 536 535 537 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) 536 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency)) 538 537 boot_values->ulDCEFClk = frequency; 539 538 540 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) 539 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency)) 541 540 boot_values->ulEClk = frequency; 542 541 543 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) 542 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency)) 544 543 boot_values->ulVClk = frequency; 545 544 546 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) 545 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency)) 547 546 boot_values->ulDClk = frequency; 547 + 548 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency)) 549 + boot_values->ulFClk = frequency; 548 550 } 549 551 550 552 static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, ··· 567 563 boot_values->ulSocClk = 0; 568 564 boot_values->ulDCEFClk = 0; 569 565 570 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) 566 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency)) 571 567 boot_values->ulSocClk = frequency; 572 568 573 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) 569 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency)) 574 570 boot_values->ulDCEFClk = frequency; 575 571 576 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) 572 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency)) 577 573 boot_values->ulEClk = frequency; 578 574 579 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) 575 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency)) 580 576 boot_values->ulVClk = frequency; 581 577 582 - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) 578 + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency)) 583 579 boot_values->ulDClk = frequency; 584 580 } 585 581
+3 -1
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
··· 139 139 uint32_t ulEClk; 140 140 uint32_t ulVClk; 141 141 uint32_t ulDClk; 142 + uint32_t ulFClk; 142 143 uint16_t usVddc; 143 144 uint16_t usVddci; 144 145 uint16_t usMvddc; ··· 237 236 int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, 238 237 struct pp_atomfwctrl_smc_dpm_parameters *param); 239 238 int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, 240 - uint8_t id, uint32_t *frequency); 239 + uint8_t clk_id, uint8_t syspll_id, 240 + uint32_t *frequency); 241 241 242 242 #endif 243 243
+3 -3
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 3491 3491 3492 3492 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); 3493 3493 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 3494 - ixSMU_PM_STATUS_94, 0); 3494 + ixSMU_PM_STATUS_95, 0); 3495 3495 3496 3496 for (i = 0; i < 10; i++) { 3497 - mdelay(1); 3497 + mdelay(500); 3498 3498 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); 3499 3499 tmp = cgs_read_ind_register(hwmgr->device, 3500 3500 CGS_IND_REG__SMC, 3501 - ixSMU_PM_STATUS_94); 3501 + ixSMU_PM_STATUS_95); 3502 3502 if (tmp != 0) 3503 3503 break; 3504 3504 }
+4 -4
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
··· 2575 2575 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 2576 2576 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 2577 2577 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, 2578 - SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk); 2578 + SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk); 2579 2579 2580 2580 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, 2581 - SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk); 2581 + SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk); 2582 2582 2583 2583 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 2584 2584 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; ··· 4407 4407 return ret; 4408 4408 4409 4409 features_to_disable = 4410 - (features_enabled ^ new_ppfeature_masks) & features_enabled; 4410 + features_enabled & ~new_ppfeature_masks; 4411 4411 features_to_enable = 4412 - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; 4412 + ~features_enabled & new_ppfeature_masks; 4413 4413 4414 4414 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 4415 4415 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
+2 -2
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
··· 2009 2009 return ret; 2010 2010 2011 2011 features_to_disable = 2012 - (features_enabled ^ new_ppfeature_masks) & features_enabled; 2012 + features_enabled & ~new_ppfeature_masks; 2013 2013 features_to_enable = 2014 - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; 2014 + ~features_enabled & new_ppfeature_masks; 2015 2015 2016 2016 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 2017 2017 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
+121 -101
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
··· 463 463 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) 464 464 { 465 465 dpm_state->soft_min_level = 0x0; 466 - dpm_state->soft_max_level = 0xffff; 466 + dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; 467 467 dpm_state->hard_min_level = 0x0; 468 - dpm_state->hard_max_level = 0xffff; 468 + dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; 469 469 } 470 470 471 471 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, ··· 711 711 PP_ASSERT_WITH_CODE(!ret, 712 712 "[SetupDefaultDpmTable] failed to get fclk dpm levels!", 713 713 return ret); 714 - } else 715 - dpm_table->count = 0; 714 + } else { 715 + dpm_table->count = 1; 716 + dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; 717 + } 716 718 vega20_init_dpm_state(&(dpm_table->dpm_state)); 717 719 718 720 /* save a copy of the default DPM table */ ··· 756 754 data->vbios_boot_state.eclock = boot_up_values.ulEClk; 757 755 data->vbios_boot_state.vclock = boot_up_values.ulVClk; 758 756 data->vbios_boot_state.dclock = boot_up_values.ulDClk; 757 + data->vbios_boot_state.fclock = boot_up_values.ulFClk; 759 758 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 760 759 761 760 smum_send_msg_to_smc_with_parameter(hwmgr, ··· 783 780 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) 784 781 { 785 782 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 783 + struct vega20_hwmgr *data = 784 + (struct vega20_hwmgr *)(hwmgr->backend); 786 785 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; 787 786 int ret; 788 787 ··· 820 815 PP_ASSERT_WITH_CODE(!ret, 821 816 "[OverridePcieParameters] Attempt to override pcie params failed!", 822 817 return ret); 818 + 819 + data->pcie_parameters_override = 1; 820 + data->pcie_gen_level1 = pcie_gen; 821 + data->pcie_width_level1 = pcie_width; 823 822 824 823 return 0; 825 824 } ··· 988 979 } 989 980 990 981 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 982 + pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = 983 + data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; 991 984 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && 992 985 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && 993 986 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && ··· 2325 2314 2326 2315 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2327 2316 { 2328 - struct vega20_hwmgr *data = 2329 - (struct vega20_hwmgr *)(hwmgr->backend); 2330 - uint32_t soft_min_level, soft_max_level; 2331 2317 int ret = 0; 2332 - 2333 - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2334 - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2335 - data->dpm_table.gfx_table.dpm_state.soft_min_level = 2336 - data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2337 - data->dpm_table.gfx_table.dpm_state.soft_max_level = 2338 - data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2339 - 2340 - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2341 - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2342 - data->dpm_table.mem_table.dpm_state.soft_min_level = 2343 - data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2344 - data->dpm_table.mem_table.dpm_state.soft_max_level = 2345 - data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2346 - 2347 - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2348 - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2349 - data->dpm_table.soc_table.dpm_state.soft_min_level = 2350 - data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2351 - data->dpm_table.soc_table.dpm_state.soft_max_level = 2352 - data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2353 2318 2354 2319 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2355 2320 PP_ASSERT_WITH_CODE(!ret, ··· 2628 2641 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 2629 2642 int i, count; 2630 2643 2631 - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, 2632 - "[GetSclks]: gfxclk dpm not enabled!\n", 2633 - return -EPERM); 2644 + if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) 2645 + return -1; 2634 2646 2635 2647 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2636 2648 clocks->num_levels = count; ··· 2656 2670 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); 2657 2671 int i, count; 2658 2672 2659 - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, 2660 - "[GetMclks]: uclk dpm not enabled!\n", 2661 - return -EPERM); 2673 + if (!data->smu_features[GNLD_DPM_UCLK].enabled) 2674 + return -1; 2662 2675 2663 2676 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2664 2677 clocks->num_levels = data->mclk_latency_table.count = count; ··· 2681 2696 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); 2682 2697 int i, count; 2683 2698 2684 - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled, 2685 - "[GetDcfclocks]: dcefclk dpm not enabled!\n", 2686 - return -EPERM); 2699 + if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) 2700 + return -1; 2687 2701 2688 2702 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2689 2703 clocks->num_levels = count; ··· 2703 2719 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); 2704 2720 int i, count; 2705 2721 2706 - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled, 2707 - "[GetSocclks]: socclk dpm not enabled!\n", 2708 - return -EPERM); 2722 + if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) 2723 + return -1; 2709 2724 2710 2725 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2711 2726 clocks->num_levels = count; ··· 2782 2799 data->od8_settings.od8_settings_array; 2783 2800 OverDriveTable_t *od_table = 2784 2801 &(data->smc_state_table.overdrive_table); 2785 - struct pp_clock_levels_with_latency clocks; 2786 2802 int32_t input_index, input_clk, input_vol, i; 2787 2803 int od8_id; 2788 2804 int ret; ··· 2840 2858 return -EOPNOTSUPP; 2841 2859 } 2842 2860 2843 - ret = vega20_get_memclocks(hwmgr, &clocks); 2844 - PP_ASSERT_WITH_CODE(!ret, 2845 - "Attempt to get memory clk levels failed!", 2846 - return ret); 2847 - 2848 2861 for (i = 0; i < size; i += 2) { 2849 2862 if (i + 2 > size) { 2850 2863 pr_info("invalid number of input parameters %d\n", ··· 2856 2879 return -EINVAL; 2857 2880 } 2858 2881 2859 - if (input_clk < clocks.data[0].clocks_in_khz / 1000 || 2882 + if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 2860 2883 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { 2861 2884 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 2862 2885 input_clk, 2863 - clocks.data[0].clocks_in_khz / 1000, 2886 + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 2864 2887 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 2865 2888 return -EINVAL; 2866 2889 } ··· 3065 3088 return ret; 3066 3089 3067 3090 features_to_disable = 3068 - (features_enabled ^ new_ppfeature_masks) & features_enabled; 3091 + features_enabled & ~new_ppfeature_masks; 3069 3092 features_to_enable = 3070 - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; 3093 + ~features_enabled & new_ppfeature_masks; 3071 3094 3072 3095 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 3073 3096 pr_debug("features_to_enable 0x%llx\n", features_to_enable); ··· 3105 3128 &(data->dpm_table.fclk_table); 3106 3129 int i, now, size = 0; 3107 3130 int ret = 0; 3108 - uint32_t gen_speed, lane_width; 3131 + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; 3109 3132 3110 3133 switch (type) { 3111 3134 case PP_SCLK: ··· 3114 3137 "Attempt to get current gfx clk Failed!", 3115 3138 return ret); 3116 3139 3117 - ret = vega20_get_sclks(hwmgr, &clocks); 3118 - PP_ASSERT_WITH_CODE(!ret, 3119 - "Attempt to get gfx clk levels Failed!", 3120 - return ret); 3140 + if (vega20_get_sclks(hwmgr, &clocks)) { 3141 + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3142 + now / 100); 3143 + break; 3144 + } 3121 3145 3122 3146 for (i = 0; i < clocks.num_levels; i++) 3123 3147 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 3132 3154 "Attempt to get current mclk freq Failed!", 3133 3155 return ret); 3134 3156 3135 - ret = vega20_get_memclocks(hwmgr, &clocks); 3136 - PP_ASSERT_WITH_CODE(!ret, 3137 - "Attempt to get memory clk levels Failed!", 3138 - return ret); 3157 + if (vega20_get_memclocks(hwmgr, &clocks)) { 3158 + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3159 + now / 100); 3160 + break; 3161 + } 3139 3162 3140 3163 for (i = 0; i < clocks.num_levels; i++) 3141 3164 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 3150 3171 "Attempt to get current socclk freq Failed!", 3151 3172 return ret); 3152 3173 3153 - ret = vega20_get_socclocks(hwmgr, &clocks); 3154 - PP_ASSERT_WITH_CODE(!ret, 3155 - "Attempt to get soc clk levels Failed!", 3156 - return ret); 3174 + if (vega20_get_socclocks(hwmgr, &clocks)) { 3175 + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3176 + now / 100); 3177 + break; 3178 + } 3157 3179 3158 3180 for (i = 0; i < clocks.num_levels; i++) 3159 3181 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 3180 3200 "Attempt to get current dcefclk freq Failed!", 3181 3201 return ret); 3182 3202 3183 - ret = vega20_get_dcefclocks(hwmgr, &clocks); 3184 - PP_ASSERT_WITH_CODE(!ret, 3185 - "Attempt to get dcefclk levels Failed!", 3186 - return ret); 3203 + if (vega20_get_dcefclocks(hwmgr, &clocks)) { 3204 + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3205 + now / 100); 3206 + break; 3207 + } 3187 3208 3188 3209 for (i = 0; i < clocks.num_levels; i++) 3189 3210 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 3193 3212 break; 3194 3213 3195 3214 case PP_PCIE: 3196 - gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3215 + current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3197 3216 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 3198 3217 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 3199 - lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 3218 + current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 3200 3219 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 3201 3220 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 3202 - for (i = 0; i < NUM_LINK_LEVELS; i++) 3221 + for (i = 0; i < NUM_LINK_LEVELS; i++) { 3222 + if (i == 1 && data->pcie_parameters_override) { 3223 + gen_speed = data->pcie_gen_level1; 3224 + lane_width = data->pcie_width_level1; 3225 + } else { 3226 + gen_speed = pptable->PcieGenSpeed[i]; 3227 + lane_width = pptable->PcieLaneCount[i]; 3228 + } 3203 3229 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, 3204 - (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : 3205 - (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : 3206 - (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : 3207 - (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", 3208 - (pptable->PcieLaneCount[i] == 1) ? "x1" : 3209 - (pptable->PcieLaneCount[i] == 2) ? "x2" : 3210 - (pptable->PcieLaneCount[i] == 3) ? "x4" : 3211 - (pptable->PcieLaneCount[i] == 4) ? "x8" : 3212 - (pptable->PcieLaneCount[i] == 5) ? "x12" : 3213 - (pptable->PcieLaneCount[i] == 6) ? "x16" : "", 3230 + (gen_speed == 0) ? "2.5GT/s," : 3231 + (gen_speed == 1) ? "5.0GT/s," : 3232 + (gen_speed == 2) ? "8.0GT/s," : 3233 + (gen_speed == 3) ? "16.0GT/s," : "", 3234 + (lane_width == 1) ? "x1" : 3235 + (lane_width == 2) ? "x2" : 3236 + (lane_width == 3) ? "x4" : 3237 + (lane_width == 4) ? "x8" : 3238 + (lane_width == 5) ? "x12" : 3239 + (lane_width == 6) ? "x16" : "", 3214 3240 pptable->LclkFreq[i], 3215 - (gen_speed == pptable->PcieGenSpeed[i]) && 3216 - (lane_width == pptable->PcieLaneCount[i]) ? 3241 + (current_gen_speed == gen_speed) && 3242 + (current_lane_width == lane_width) ? 3217 3243 "*" : ""); 3244 + } 3218 3245 break; 3219 3246 3220 3247 case OD_SCLK: ··· 3277 3288 } 3278 3289 3279 3290 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3280 - ret = vega20_get_memclocks(hwmgr, &clocks); 3281 - PP_ASSERT_WITH_CODE(!ret, 3282 - "Fail to get memory clk levels!", 3283 - return ret); 3284 - 3285 3291 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", 3286 - clocks.data[0].clocks_in_khz / 1000, 3292 + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3287 3293 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3288 3294 } 3289 3295 ··· 3340 3356 return ret; 3341 3357 } 3342 3358 3359 + static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) 3360 + { 3361 + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3362 + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); 3363 + int ret = 0; 3364 + 3365 + if (data->smu_features[GNLD_DPM_FCLK].enabled) { 3366 + PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3367 + "[SetFclkToHightestDpmLevel] Dpm table has no entry!", 3368 + return -EINVAL); 3369 + PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, 3370 + "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", 3371 + return -EINVAL); 3372 + 3373 + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3374 + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3375 + PPSMC_MSG_SetSoftMinByFreq, 3376 + (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)), 3377 + "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", 3378 + return ret); 3379 + } 3380 + 3381 + return ret; 3382 + } 3383 + 3343 3384 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3344 3385 { 3345 3386 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); ··· 3375 3366 3376 3367 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, 3377 3368 &data->dpm_table.mem_table); 3369 + if (ret) 3370 + return ret; 3378 3371 3379 - return ret; 3372 + return vega20_set_fclk_to_highest_dpm_level(hwmgr); 3380 3373 } 3381 3374 3382 3375 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) ··· 3472 3461 /* gfxclk */ 3473 3462 dpm_table = &(data->dpm_table.gfx_table); 3474 3463 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3475 - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3464 + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3476 3465 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3477 - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3466 + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3478 3467 3479 3468 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3480 3469 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { ··· 3496 3485 /* memclk */ 3497 3486 dpm_table = &(data->dpm_table.mem_table); 3498 3487 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3499 - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3488 + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3500 3489 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3501 - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3490 + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3502 3491 3503 3492 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3504 3493 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { ··· 3537 3526 if (hwmgr->display_config->nb_pstate_switch_disable) 3538 3527 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3539 3528 3529 + /* fclk */ 3530 + dpm_table = &(data->dpm_table.fclk_table); 3531 + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3532 + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3533 + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3534 + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3535 + if (hwmgr->display_config->nb_pstate_switch_disable) 3536 + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3537 + 3540 3538 /* vclk */ 3541 3539 dpm_table = &(data->dpm_table.vclk_table); 3542 3540 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3543 - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3541 + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3544 3542 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3545 - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3543 + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3546 3544 3547 3545 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3548 3546 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { ··· 3568 3548 /* dclk */ 3569 3549 dpm_table = &(data->dpm_table.dclk_table); 3570 3550 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3571 - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3551 + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3572 3552 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3573 - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3553 + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3574 3554 3575 3555 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3576 3556 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { ··· 3587 3567 /* socclk */ 3588 3568 dpm_table = &(data->dpm_table.soc_table); 3589 3569 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3590 - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3570 + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3591 3571 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3592 - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3572 + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3593 3573 3594 3574 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3595 3575 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { ··· 3606 3586 /* eclk */ 3607 3587 dpm_table = &(data->dpm_table.eclk_table); 3608 3588 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3609 - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3589 + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3610 3590 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3611 - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3591 + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3612 3592 3613 3593 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3614 3594 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+7
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
··· 42 42 #define AVFS_CURVE 0 43 43 #define OD8_HOTCURVE_TEMPERATURE 85 44 44 45 + #define VG20_CLOCK_MAX_DEFAULT 0xFFFF 46 + 45 47 typedef uint32_t PP_Clock; 46 48 47 49 enum { ··· 221 219 uint32_t eclock; 222 220 uint32_t dclock; 223 221 uint32_t vclock; 222 + uint32_t fclock; 224 223 }; 225 224 226 225 #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 ··· 526 523 527 524 unsigned long metrics_time; 528 525 SmuMetrics_t metrics_table; 526 + 527 + bool pcie_parameters_override; 528 + uint32_t pcie_gen_level1; 529 + uint32_t pcie_width_level1; 529 530 }; 530 531 531 532 #define VEGA20_DPM2_NEAR_TDP_DEC 10
+17
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
··· 32 32 #include "cgs_common.h" 33 33 #include "vega20_pptable.h" 34 34 35 + #define VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE 105 36 + 35 37 static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, 36 38 enum phm_platform_caps cap) 37 39 { ··· 800 798 return 0; 801 799 } 802 800 801 + static int override_powerplay_table_fantargettemperature(struct pp_hwmgr *hwmgr) 802 + { 803 + struct phm_ppt_v3_information *pptable_information = 804 + (struct phm_ppt_v3_information *)hwmgr->pptable; 805 + PPTable_t *ppsmc_pptable = (PPTable_t *)(pptable_information->smc_pptable); 806 + 807 + ppsmc_pptable->FanTargetTemperature = VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE; 808 + 809 + return 0; 810 + } 811 + 803 812 #define VEGA20_ENGINECLOCK_HARDMAX 198000 804 813 static int init_powerplay_table_information( 805 814 struct pp_hwmgr *hwmgr, ··· 900 887 901 888 902 889 result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); 890 + if (result) 891 + return result; 892 + 893 + result = override_powerplay_table_fantargettemperature(hwmgr); 903 894 904 895 return result; 905 896 }
+2
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
··· 2330 2330 case DRAM_LOG_BUFF_SIZE: 2331 2331 return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE); 2332 2332 } 2333 + break; 2333 2334 case SMU_Discrete_DpmTable: 2334 2335 switch (member) { 2335 2336 case UvdBootLevel: ··· 2340 2339 case LowSclkInterruptThreshold: 2341 2340 return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); 2342 2341 } 2342 + break; 2343 2343 } 2344 2344 pr_warn("can't get the offset of type %x member %x\n", type, member); 2345 2345 return 0;
+2 -4
drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
··· 40 40 struct amdgpu_device *adev = hwmgr->adev; 41 41 uint32_t mp1_fw_flags; 42 42 43 - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, 44 - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); 45 - 46 - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); 43 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 44 + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 47 45 48 46 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) 49 47 return true;
+2 -4
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
··· 49 49 struct amdgpu_device *adev = hwmgr->adev; 50 50 uint32_t mp1_fw_flags; 51 51 52 - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, 53 - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); 54 - 55 - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); 52 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 53 + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 56 54 57 55 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 58 56 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+26 -33
drivers/gpu/drm/drm_atomic_helper.c
··· 3039 3039 return 0; 3040 3040 } 3041 3041 3042 - static int __drm_atomic_helper_disable_all(struct drm_device *dev, 3043 - struct drm_modeset_acquire_ctx *ctx, 3044 - bool clean_old_fbs) 3042 + /** 3043 + * drm_atomic_helper_disable_all - disable all currently active outputs 3044 + * @dev: DRM device 3045 + * @ctx: lock acquisition context 3046 + * 3047 + * Loops through all connectors, finding those that aren't turned off and then 3048 + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC 3049 + * that they are connected to. 3050 + * 3051 + * This is used for example in suspend/resume to disable all currently active 3052 + * functions when suspending. If you just want to shut down everything at e.g. 3053 + * driver unload, look at drm_atomic_helper_shutdown(). 3054 + * 3055 + * Note that if callers haven't already acquired all modeset locks this might 3056 + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 3057 + * 3058 + * Returns: 3059 + * 0 on success or a negative error code on failure. 3060 + * 3061 + * See also: 3062 + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and 3063 + * drm_atomic_helper_shutdown(). 3064 + */ 3065 + int drm_atomic_helper_disable_all(struct drm_device *dev, 3066 + struct drm_modeset_acquire_ctx *ctx) 3045 3067 { 3046 3068 struct drm_atomic_state *state; 3047 3069 struct drm_connector_state *conn_state; ··· 3121 3099 drm_atomic_state_put(state); 3122 3100 return ret; 3123 3101 } 3124 - 3125 - /** 3126 - * drm_atomic_helper_disable_all - disable all currently active outputs 3127 - * @dev: DRM device 3128 - * @ctx: lock acquisition context 3129 - * 3130 - * Loops through all connectors, finding those that aren't turned off and then 3131 - * turns them off by setting their DPMS mode to OFF and deactivating the CRTC 3132 - * that they are connected to. 3133 - * 3134 - * This is used for example in suspend/resume to disable all currently active 3135 - * functions when suspending. If you just want to shut down everything at e.g. 3136 - * driver unload, look at drm_atomic_helper_shutdown(). 3137 - * 3138 - * Note that if callers haven't already acquired all modeset locks this might 3139 - * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 3140 - * 3141 - * Returns: 3142 - * 0 on success or a negative error code on failure. 3143 - * 3144 - * See also: 3145 - * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and 3146 - * drm_atomic_helper_shutdown(). 3147 - */ 3148 - int drm_atomic_helper_disable_all(struct drm_device *dev, 3149 - struct drm_modeset_acquire_ctx *ctx) 3150 - { 3151 - return __drm_atomic_helper_disable_all(dev, ctx, false); 3152 - } 3153 3102 EXPORT_SYMBOL(drm_atomic_helper_disable_all); 3154 3103 3155 3104 /** ··· 3141 3148 3142 3149 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); 3143 3150 3144 - ret = __drm_atomic_helper_disable_all(dev, &ctx, true); 3151 + ret = drm_atomic_helper_disable_all(dev, &ctx); 3145 3152 if (ret) 3146 3153 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); 3147 3154
+3 -3
drivers/gpu/drm/drm_ioc32.c
··· 185 185 m32.size = map.size; 186 186 m32.type = map.type; 187 187 m32.flags = map.flags; 188 - m32.handle = ptr_to_compat(map.handle); 188 + m32.handle = ptr_to_compat((void __user *)map.handle); 189 189 m32.mtrr = map.mtrr; 190 190 if (copy_to_user(argp, &m32, sizeof(m32))) 191 191 return -EFAULT; ··· 216 216 217 217 m32.offset = map.offset; 218 218 m32.mtrr = map.mtrr; 219 - m32.handle = ptr_to_compat(map.handle); 219 + m32.handle = ptr_to_compat((void __user *)map.handle); 220 220 if (map.handle != compat_ptr(m32.handle)) 221 221 pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", 222 222 map.handle, m32.type, m32.offset); ··· 526 526 if (err) 527 527 return err; 528 528 529 - req32.handle = ptr_to_compat(req.handle); 529 + req32.handle = ptr_to_compat((void __user *)req.handle); 530 530 if (copy_to_user(argp, &req32, sizeof(req32))) 531 531 return -EFAULT; 532 532
-1
drivers/gpu/drm/etnaviv/Kconfig
··· 2 2 config DRM_ETNAVIV 3 3 tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" 4 4 depends on DRM 5 - depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) 6 5 depends on MMU 7 6 select SHMEM 8 7 select SYNC_FILE
-2
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
··· 15 15 struct etnaviv_cmdbuf { 16 16 /* suballocator this cmdbuf is allocated from */ 17 17 struct etnaviv_cmdbuf_suballoc *suballoc; 18 - /* user context key, must be unique between all active users */ 19 - struct etnaviv_file_private *ctx; 20 18 /* cmdbuf properties */ 21 19 int suballoc_offset; 22 20 void *vaddr;
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_dump.c
··· 215 215 mutex_lock(&obj->lock); 216 216 pages = etnaviv_gem_get_pages(obj); 217 217 mutex_unlock(&obj->lock); 218 - if (pages) { 218 + if (!IS_ERR(pages)) { 219 219 int j; 220 220 221 221 iter.hdr->data[0] = bomap - bomap_start;
+1
drivers/gpu/drm/etnaviv/etnaviv_gem.h
··· 95 95 struct etnaviv_gem_submit { 96 96 struct drm_sched_job sched_job; 97 97 struct kref refcount; 98 + struct etnaviv_file_private *ctx; 98 99 struct etnaviv_gpu *gpu; 99 100 struct dma_fence *out_fence, *in_fence; 100 101 int out_fence_id;
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
··· 15 15 int npages = obj->size >> PAGE_SHIFT; 16 16 17 17 if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ 18 - return NULL; 18 + return ERR_PTR(-EINVAL); 19 19 20 20 return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); 21 21 }
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 506 506 if (ret) 507 507 goto err_submit_objects; 508 508 509 - submit->cmdbuf.ctx = file->driver_priv; 509 + submit->ctx = file->driver_priv; 510 510 submit->exec_state = args->exec_state; 511 511 submit->flags = args->flags; 512 512
+2 -2
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
··· 320 320 domain = &etnaviv_domain->base; 321 321 322 322 domain->dev = gpu->dev; 323 - domain->base = 0; 324 - domain->size = (u64)SZ_1G * 4; 323 + domain->base = SZ_4K; 324 + domain->size = (u64)SZ_1G * 4 - SZ_4K; 325 325 domain->ops = &etnaviv_iommuv2_ops; 326 326 327 327 ret = etnaviv_iommuv2_init(etnaviv_domain);
+3 -3
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
··· 113 113 .name = "PE", 114 114 .profile_read = VIVS_MC_PROFILE_PE_READ, 115 115 .profile_config = VIVS_MC_PROFILE_CONFIG0, 116 - .nr_signals = 5, 116 + .nr_signals = 4, 117 117 .signal = (const struct etnaviv_pm_signal[]) { 118 118 { 119 119 "PIXEL_COUNT_KILLED_BY_COLOR_PIPE", ··· 435 435 436 436 dom = meta->domains + signal->domain; 437 437 438 - if (signal->iter > dom->nr_signals) 438 + if (signal->iter >= dom->nr_signals) 439 439 return -EINVAL; 440 440 441 441 sig = &dom->signal[signal->iter]; ··· 461 461 462 462 dom = meta->domains + r->domain; 463 463 464 - if (r->signal > dom->nr_signals) 464 + if (r->signal >= dom->nr_signals) 465 465 return -EINVAL; 466 466 467 467 return 0;
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_sched.c
··· 153 153 mutex_lock(&submit->gpu->fence_lock); 154 154 155 155 ret = drm_sched_job_init(&submit->sched_job, sched_entity, 156 - submit->cmdbuf.ctx); 156 + submit->ctx); 157 157 if (ret) 158 158 goto out_unlock; 159 159
+25 -11
drivers/gpu/drm/i915/i915_active.c
··· 163 163 struct i915_request *rq) 164 164 { 165 165 struct i915_active_request *active; 166 + int err = 0; 167 + 168 + /* Prevent reaping in case we malloc/wait while building the tree */ 169 + i915_active_acquire(ref); 166 170 167 171 active = active_instance(ref, timeline); 168 - if (IS_ERR(active)) 169 - return PTR_ERR(active); 172 + if (IS_ERR(active)) { 173 + err = PTR_ERR(active); 174 + goto out; 175 + } 170 176 171 177 if (!i915_active_request_isset(active)) 172 178 ref->count++; 173 179 __i915_active_request_set(active, rq); 174 180 175 181 GEM_BUG_ON(!ref->count); 176 - return 0; 182 + out: 183 + i915_active_release(ref); 184 + return err; 177 185 } 178 186 179 187 bool i915_active_acquire(struct i915_active *ref) ··· 231 223 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) 232 224 { 233 225 struct active_node *it, *n; 234 - int ret; 226 + int err = 0; 235 227 236 - ret = i915_request_await_active_request(rq, &ref->last); 237 - if (ret) 238 - return ret; 228 + /* await allocates and so we need to avoid hitting the shrinker */ 229 + if (i915_active_acquire(ref)) 230 + goto out; /* was idle */ 231 + 232 + err = i915_request_await_active_request(rq, &ref->last); 233 + if (err) 234 + goto out; 239 235 240 236 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { 241 - ret = i915_request_await_active_request(rq, &it->base); 242 - if (ret) 243 - return ret; 237 + err = i915_request_await_active_request(rq, &it->base); 238 + if (err) 239 + goto out; 244 240 } 245 241 246 - return 0; 242 + out: 243 + i915_active_release(ref); 244 + return err; 247 245 } 248 246 249 247 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+1 -34
drivers/gpu/drm/i915/i915_drv.c
··· 757 757 return ret; 758 758 } 759 759 760 - #if !defined(CONFIG_VGA_CONSOLE) 761 - static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 762 - { 763 - return 0; 764 - } 765 - #elif !defined(CONFIG_DUMMY_CONSOLE) 766 - static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 767 - { 768 - return -ENODEV; 769 - } 770 - #else 771 - static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 772 - { 773 - int ret = 0; 774 - 775 - DRM_INFO("Replacing VGA console driver\n"); 776 - 777 - console_lock(); 778 - if (con_is_bound(&vga_con)) 779 - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 780 - if (ret == 0) { 781 - ret = do_unregister_con_driver(&vga_con); 782 - 783 - /* Ignore "already unregistered". */ 784 - if (ret == -ENODEV) 785 - ret = 0; 786 - } 787 - console_unlock(); 788 - 789 - return ret; 790 - } 791 - #endif 792 - 793 760 static void intel_init_dpio(struct drm_i915_private *dev_priv) 794 761 { 795 762 /* ··· 1387 1420 goto err_ggtt; 1388 1421 } 1389 1422 1390 - ret = i915_kick_out_vgacon(dev_priv); 1423 + ret = vga_remove_vgacon(pdev); 1391 1424 if (ret) { 1392 1425 DRM_ERROR("failed to remove conflicting VGA console\n"); 1393 1426 goto err_ggtt;
+2 -1
drivers/gpu/drm/i915/i915_gem.c
··· 1688 1688 if (vma->vm_file != filp) 1689 1689 return false; 1690 1690 1691 - return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; 1691 + return vma->vm_start == addr && 1692 + (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); 1692 1693 } 1693 1694 1694 1695 /**
+17 -10
drivers/gpu/drm/i915/i915_scheduler.c
··· 223 223 return &p->requests[idx]; 224 224 } 225 225 226 + struct sched_cache { 227 + struct list_head *priolist; 228 + }; 229 + 226 230 static struct intel_engine_cs * 227 - sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) 231 + sched_lock_engine(const struct i915_sched_node *node, 232 + struct intel_engine_cs *locked, 233 + struct sched_cache *cache) 228 234 { 229 235 struct intel_engine_cs *engine = node_to_request(node)->engine; 230 236 ··· 238 232 239 233 if (engine != locked) { 240 234 spin_unlock(&locked->timeline.lock); 235 + memset(cache, 0, sizeof(*cache)); 241 236 spin_lock(&engine->timeline.lock); 242 237 } 243 238 ··· 260 253 static void __i915_schedule(struct i915_request *rq, 261 254 const struct i915_sched_attr *attr) 262 255 { 263 - struct list_head *uninitialized_var(pl); 264 - struct intel_engine_cs *engine, *last; 256 + struct intel_engine_cs *engine; 265 257 struct i915_dependency *dep, *p; 266 258 struct i915_dependency stack; 267 259 const int prio = attr->priority; 260 + struct sched_cache cache; 268 261 LIST_HEAD(dfs); 269 262 270 263 /* Needed in order to use the temporary link inside i915_dependency */ ··· 335 328 __list_del_entry(&stack.dfs_link); 336 329 } 337 330 338 - last = NULL; 331 + memset(&cache, 0, sizeof(cache)); 339 332 engine = rq->engine; 340 333 spin_lock_irq(&engine->timeline.lock); 341 334 ··· 345 338 346 339 INIT_LIST_HEAD(&dep->dfs_link); 347 340 348 - engine = sched_lock_engine(node, engine); 341 + engine = sched_lock_engine(node, engine, &cache); 349 342 lockdep_assert_held(&engine->timeline.lock); 350 343 351 344 /* Recheck after acquiring the engine->timeline.lock */ ··· 354 347 355 348 node->attr.priority = prio; 356 349 if (!list_empty(&node->link)) { 357 - if (last != engine) { 358 - pl = i915_sched_lookup_priolist(engine, prio); 359 - last = engine; 360 - } 361 - list_move_tail(&node->link, pl); 350 + if (!cache.priolist) 351 + cache.priolist = 352 + i915_sched_lookup_priolist(engine, 353 + prio); 354 + list_move_tail(&node->link, cache.priolist); 362 355 } else { 363 356 /* 364 357 * If the request is not in the priolist queue because
+8 -10
drivers/gpu/drm/i915/intel_breadcrumbs.c
··· 106 106 107 107 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, 108 108 &rq->fence.flags)); 109 - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); 110 - 111 - /* 112 - * We may race with direct invocation of 113 - * dma_fence_signal(), e.g. i915_request_retire(), 114 - * in which case we can skip processing it ourselves. 115 - */ 116 - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 117 - &rq->fence.flags)) 118 - continue; 119 109 120 110 /* 121 111 * Queue for execution after dropping the signaling ··· 113 123 * more signalers to the same context or engine. 114 124 */ 115 125 i915_request_get(rq); 126 + 127 + /* 128 + * We may race with direct invocation of 129 + * dma_fence_signal(), e.g. i915_request_retire(), 130 + * so we need to acquire our reference to the request 131 + * before we cancel the breadcrumb. 132 + */ 133 + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); 116 134 list_add_tail(&rq->signal_link, &signal); 117 135 } 118 136
+8 -6
drivers/gpu/drm/i915/intel_ddi.c
··· 3568 3568 { 3569 3569 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 3570 3570 intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); 3571 + 3572 + if (conn_state->content_protection == 3573 + DRM_MODE_CONTENT_PROTECTION_DESIRED) 3574 + intel_hdcp_enable(to_intel_connector(conn_state->connector)); 3575 + else if (conn_state->content_protection == 3576 + DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 3577 + intel_hdcp_disable(to_intel_connector(conn_state->connector)); 3571 3578 } 3572 3579 3573 3580 static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, ··· 3969 3962 goto out; 3970 3963 3971 3964 ret = drm_atomic_commit(state); 3972 - if (ret) 3973 - goto out; 3974 - 3975 - return 0; 3976 - 3977 - out: 3965 + out: 3978 3966 drm_atomic_state_put(state); 3979 3967 3980 3968 return ret;
+36 -37
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 710 710 unsigned int flags, 711 711 struct i915_gem_context *ctx, 712 712 struct intel_engine_cs *engine, 713 - struct igt_spinner **spin_out) 713 + struct igt_spinner **spin) 714 714 { 715 - int ret = 0; 715 + struct i915_request *rq; 716 + int ret; 716 717 717 - if (flags & (TEST_BUSY | TEST_RESET)) { 718 - struct igt_spinner *spin; 719 - struct i915_request *rq; 718 + *spin = NULL; 719 + if (!(flags & (TEST_BUSY | TEST_RESET))) 720 + return 0; 720 721 721 - spin = kzalloc(sizeof(*spin), GFP_KERNEL); 722 - if (!spin) { 723 - ret = -ENOMEM; 724 - goto out; 725 - } 722 + *spin = kzalloc(sizeof(**spin), GFP_KERNEL); 723 + if (!*spin) 724 + return -ENOMEM; 726 725 727 - ret = igt_spinner_init(spin, i915); 728 - if (ret) 729 - return ret; 726 + ret = igt_spinner_init(*spin, i915); 727 + if (ret) 728 + goto err_free; 730 729 731 - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); 732 - if (IS_ERR(rq)) { 733 - ret = PTR_ERR(rq); 734 - igt_spinner_fini(spin); 735 - kfree(spin); 736 - goto out; 737 - } 738 - 739 - i915_request_add(rq); 740 - 741 - if (!igt_wait_for_spinner(spin, rq)) { 742 - pr_err("%s: Spinner failed to start!\n", name); 743 - igt_spinner_end(spin); 744 - igt_spinner_fini(spin); 745 - kfree(spin); 746 - ret = -ETIMEDOUT; 747 - goto out; 748 - } 749 - 750 - *spin_out = spin; 730 + rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP); 731 + if (IS_ERR(rq)) { 732 + ret = PTR_ERR(rq); 733 + goto err_fini; 751 734 } 752 735 753 - out: 736 + i915_request_add(rq); 737 + 738 + if (!igt_wait_for_spinner(*spin, rq)) { 739 + pr_err("%s: Spinner failed to start!\n", name); 740 + ret = -ETIMEDOUT; 741 + goto err_end; 742 + } 743 + 744 + return 0; 745 + 746 + err_end: 747 + igt_spinner_end(*spin); 748 + err_fini: 749 + igt_spinner_fini(*spin); 750 + err_free: 751 + kfree(fetch_and_zero(spin)); 754 752 return ret; 755 753 } 756 754 ··· 895 897 896 898 ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); 897 899 if (ret) 898 - goto out; 900 + goto out_context; 899 901 900 902 ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); 901 903 if (ret) 902 - goto out; 904 + goto out_spin; 903 905 904 906 ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, 905 907 hweight32(sseu.slice_mask), spin); 906 908 907 - out: 909 + out_spin: 908 910 if (spin) { 909 911 igt_spinner_end(spin); 910 912 igt_spinner_fini(spin); 911 913 kfree(spin); 912 914 } 913 915 916 + out_context: 914 917 kernel_context_close(kctx); 915 918 916 919 return ret;
+4 -1
drivers/gpu/drm/qxl/qxl_drv.c
··· 79 79 if (ret) 80 80 goto free_dev; 81 81 82 + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); 83 + if (ret) 84 + goto disable_pci; 85 + 82 86 ret = qxl_device_init(qdev, &qxl_driver, pdev); 83 87 if (ret) 84 88 goto disable_pci; ··· 98 94 if (ret) 99 95 goto modeset_cleanup; 100 96 101 - drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); 102 97 drm_fbdev_generic_setup(&qdev->ddev, 32); 103 98 return 0; 104 99
+49
drivers/gpu/vga/vgaarb.c
··· 48 48 #include <linux/miscdevice.h> 49 49 #include <linux/slab.h> 50 50 #include <linux/screen_info.h> 51 + #include <linux/vt.h> 52 + #include <linux/console.h> 51 53 52 54 #include <linux/uaccess.h> 53 55 ··· 169 167 pci_dev_put(vga_default); 170 168 vga_default = pci_dev_get(pdev); 171 169 } 170 + 171 + /** 172 + * vga_remove_vgacon - deactivete vga console 173 + * 174 + * Unbind and unregister vgacon in case pdev is the default vga 175 + * device. Can be called by gpu drivers on initialization to make 176 + * sure vga register access done by vgacon will not disturb the 177 + * device. 178 + * 179 + * @pdev: pci device. 180 + */ 181 + #if !defined(CONFIG_VGA_CONSOLE) 182 + int vga_remove_vgacon(struct pci_dev *pdev) 183 + { 184 + return 0; 185 + } 186 + #elif !defined(CONFIG_DUMMY_CONSOLE) 187 + int vga_remove_vgacon(struct pci_dev *pdev) 188 + { 189 + return -ENODEV; 190 + } 191 + #else 192 + int vga_remove_vgacon(struct pci_dev *pdev) 193 + { 194 + int ret = 0; 195 + 196 + if (pdev != vga_default) 197 + return 0; 198 + vgaarb_info(&pdev->dev, "deactivate vga console\n"); 199 + 200 + console_lock(); 201 + if (con_is_bound(&vga_con)) 202 + ret = do_take_over_console(&dummy_con, 0, 203 + MAX_NR_CONSOLES - 1, 1); 204 + if (ret == 0) { 205 + ret = do_unregister_con_driver(&vga_con); 206 + 207 + /* Ignore "already unregistered". */ 208 + if (ret == -ENODEV) 209 + ret = 0; 210 + } 211 + console_unlock(); 212 + 213 + return ret; 214 + } 215 + #endif 216 + EXPORT_SYMBOL(vga_remove_vgacon); 172 217 173 218 static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) 174 219 {
+11 -3
include/drm/drm_fb_helper.h
··· 36 36 #include <drm/drm_crtc.h> 37 37 #include <drm/drm_device.h> 38 38 #include <linux/kgdb.h> 39 + #include <linux/vgaarb.h> 39 40 40 41 enum mode_set_atomic { 41 42 LEAVE_ATOMIC_MODE_SET, ··· 643 642 int resource_id, 644 643 const char *name) 645 644 { 645 + int ret = 0; 646 + 647 + /* 648 + * WARNING: Apparently we must kick fbdev drivers before vgacon, 649 + * otherwise the vga fbdev driver falls over. 650 + */ 646 651 #if IS_REACHABLE(CONFIG_FB) 647 - return remove_conflicting_pci_framebuffers(pdev, resource_id, name); 648 - #else 649 - return 0; 652 + ret = remove_conflicting_pci_framebuffers(pdev, resource_id, name); 650 653 #endif 654 + if (ret == 0) 655 + ret = vga_remove_vgacon(pdev); 656 + return ret; 651 657 } 652 658 653 659 #endif
+2
include/linux/vgaarb.h
··· 125 125 #ifdef CONFIG_VGA_ARB 126 126 extern struct pci_dev *vga_default_device(void); 127 127 extern void vga_set_default_device(struct pci_dev *pdev); 128 + extern int vga_remove_vgacon(struct pci_dev *pdev); 128 129 #else 129 130 static inline struct pci_dev *vga_default_device(void) { return NULL; }; 130 131 static inline void vga_set_default_device(struct pci_dev *pdev) { }; 132 + static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; }; 131 133 #endif 132 134 133 135 /*