Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: support dpm level modification under virtualization v3

Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function
at amdgpu_virt_ops to support the feature.

v2: add get_pp_clk existence check and use kzalloc to allocate buf

v3: return -ENOMEM for allocation failure and correct the coding style

Signed-off-by: Yintian Tao <yttao@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Yintian Tao and committed by
Alex Deucher
bb5a2bdf b0960c35

+165
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2471 2471 mutex_init(&adev->virt.vf_errors.lock); 2472 2472 hash_init(adev->mn_hash); 2473 2473 mutex_init(&adev->lock_reset); 2474 + mutex_init(&adev->virt.dpm_mutex); 2474 2475 2475 2476 amdgpu_device_check_arguments(adev); 2476 2477
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 696 696 if (adev->pm.dpm_enabled) { 697 697 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 698 698 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 699 + } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 700 + adev->virt.ops->get_pp_clk) { 701 + dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; 702 + dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; 699 703 } else { 700 704 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 701 705 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
+16
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 327 327 goto fail; 328 328 } 329 329 330 + if (amdgpu_sriov_vf(adev)) { 331 + if (amdgim_is_hwperf(adev) && 332 + adev->virt.ops->force_dpm_level) { 333 + mutex_lock(&adev->pm.mutex); 334 + adev->virt.ops->force_dpm_level(adev, level); 335 + mutex_unlock(&adev->pm.mutex); 336 + return count; 337 + } else { 338 + return -EINVAL; 339 + } 340 + } 341 + 330 342 if (current_level == level) 331 343 return count; 332 344 ··· 801 789 { 802 790 struct drm_device *ddev = dev_get_drvdata(dev); 803 791 struct amdgpu_device *adev = ddev->dev_private; 792 + 793 + if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 794 + adev->virt.ops->get_pp_clk) 795 + return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); 804 796 805 797 if (is_support_sw_smu(adev)) 806 798 return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
+49
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 375 375 } 376 376 } 377 377 378 + static uint32_t parse_clk(char *buf, bool min) 379 + { 380 + char *ptr = buf; 381 + uint32_t clk = 0; 382 + 383 + do { 384 + ptr = strchr(ptr, ':'); 385 + if (!ptr) 386 + break; 387 + ptr+=2; 388 + clk = simple_strtoul(ptr, NULL, 10); 389 + } while (!min); 390 + 391 + return clk * 100; 392 + } 393 + 394 + uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) 395 + { 396 + char *buf = NULL; 397 + uint32_t clk = 0; 398 + 399 + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 400 + if (!buf) 401 + return -ENOMEM; 402 + 403 + adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); 404 + clk = parse_clk(buf, lowest); 405 + 406 + kfree(buf); 407 + 408 + return clk; 409 + } 410 + 411 + uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) 412 + { 413 + char *buf = NULL; 414 + uint32_t clk = 0; 415 + 416 + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 417 + if (!buf) 418 + return -ENOMEM; 419 + 420 + adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); 421 + clk = parse_clk(buf, lowest); 422 + 423 + kfree(buf); 424 + 425 + return clk; 426 + } 378 427
+11
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
··· 57 57 int (*reset_gpu)(struct amdgpu_device *adev); 58 58 int (*wait_reset)(struct amdgpu_device *adev); 59 59 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); 60 + int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); 61 + int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); 60 62 }; 61 63 62 64 /* ··· 85 83 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, 86 84 /* VRAM LOST by GIM */ 87 85 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, 86 + /* HW PERF SIM in GIM */ 87 + AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3), 88 88 }; 89 89 90 90 struct amd_sriov_msg_pf2vf_info_header { ··· 256 252 struct amdgpu_vf_error_buffer vf_errors; 257 253 struct amdgpu_virt_fw_reserve fw_reserve; 258 254 uint32_t gim_feature; 255 + /* protect DPM events to GIM */ 256 + struct mutex dpm_mutex; 259 257 }; 260 258 261 259 #define amdgpu_sriov_enabled(adev) \ ··· 284 278 #endif 285 279 } 286 280 281 + #define amdgim_is_hwperf(adev) \ 282 + ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION) 283 + 287 284 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 288 285 void amdgpu_virt_init_setting(struct amdgpu_device *adev); 289 286 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); ··· 304 295 unsigned int key, 305 296 unsigned int chksum); 306 297 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); 298 + uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); 299 + uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); 307 300 308 301 #endif
+78
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 157 157 xgpu_ai_mailbox_set_valid(adev, false); 158 158 } 159 159 160 + static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) 161 + { 162 + int r = 0; 163 + u32 req, val, size; 164 + 165 + if (!amdgim_is_hwperf(adev) || buf == NULL) 166 + return -EBADRQC; 167 + 168 + switch(type) { 169 + case PP_SCLK: 170 + req = IDH_IRQ_GET_PP_SCLK; 171 + break; 172 + case PP_MCLK: 173 + req = IDH_IRQ_GET_PP_MCLK; 174 + break; 175 + default: 176 + return -EBADRQC; 177 + } 178 + 179 + mutex_lock(&adev->virt.dpm_mutex); 180 + 181 + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 182 + 183 + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); 184 + if (!r && adev->fw_vram_usage.va != NULL) { 185 + val = RREG32_NO_KIQ( 186 + SOC15_REG_OFFSET(NBIO, 0, 187 + mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); 188 + size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + 189 + val), PAGE_SIZE); 190 + 191 + if (size < PAGE_SIZE) 192 + strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); 193 + else 194 + size = 0; 195 + 196 + r = size; 197 + goto out; 198 + } 199 + 200 + r = xgpu_ai_poll_msg(adev, IDH_FAIL); 201 + if(r) 202 + pr_info("%s DPM request failed", 203 + (type == PP_SCLK)? "SCLK" : "MCLK"); 204 + 205 + out: 206 + mutex_unlock(&adev->virt.dpm_mutex); 207 + return r; 208 + } 209 + 210 + static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) 211 + { 212 + int r = 0; 213 + u32 req = IDH_IRQ_FORCE_DPM_LEVEL; 214 + 215 + if (!amdgim_is_hwperf(adev)) 216 + return -EBADRQC; 217 + 218 + mutex_lock(&adev->virt.dpm_mutex); 219 + xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); 220 + 221 + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); 222 + if (!r) 223 + goto out; 224 + 225 + r = xgpu_ai_poll_msg(adev, IDH_FAIL); 226 + if (!r) 227 + pr_info("DPM request failed"); 228 + else 229 + pr_info("Mailbox is broken"); 230 + 231 + out: 232 + mutex_unlock(&adev->virt.dpm_mutex); 233 + return r; 234 + } 235 + 160 236 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 161 237 enum idh_request req) 162 238 { ··· 451 375 .reset_gpu = xgpu_ai_request_reset, 452 376 .wait_reset = NULL, 453 377 .trans_msg = xgpu_ai_mailbox_trans_msg, 378 + .get_pp_clk = xgpu_ai_get_pp_clk, 379 + .force_dpm_level = xgpu_ai_force_dpm_level, 454 380 };
+6
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
··· 35 35 IDH_REL_GPU_FINI_ACCESS, 36 36 IDH_REQ_GPU_RESET_ACCESS, 37 37 38 + IDH_IRQ_FORCE_DPM_LEVEL = 10, 39 + IDH_IRQ_GET_PP_SCLK, 40 + IDH_IRQ_GET_PP_MCLK, 41 + 38 42 IDH_LOG_VF_ERROR = 200, 39 43 }; 40 44 ··· 47 43 IDH_READY_TO_ACCESS_GPU, 48 44 IDH_FLR_NOTIFICATION, 49 45 IDH_FLR_NOTIFICATION_CMPL, 46 + IDH_SUCCESS, 47 + IDH_FAIL, 50 48 IDH_EVENT_MAX 51 49 }; 52 50