+14
-35
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+14
-35
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
···
1259
1259
smu->watermarks_bitmap = 0;
1260
1260
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1261
1261
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1262
-
smu->user_dpm_profile.user_workload_mask = 0;
1263
1262
1264
1263
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1265
1264
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1266
1265
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1267
1266
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1268
1267
1269
-
smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1270
-
smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1271
-
smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1272
-
smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1273
-
smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
1274
-
smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1275
-
smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1268
+
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1269
+
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1270
+
smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1271
+
smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1272
+
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1273
+
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1274
+
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1276
1275
1277
1276
if (smu->is_apu ||
1278
-
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
1279
-
smu->driver_workload_mask =
1280
-
1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1281
-
} else {
1282
-
smu->driver_workload_mask =
1283
-
1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1284
-
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1285
-
}
1277
+
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
1278
+
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1279
+
else
1280
+
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1286
1281
1287
-
smu->workload_mask = smu->driver_workload_mask |
1288
-
smu->user_dpm_profile.user_workload_mask;
1289
1282
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1290
1283
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1291
1284
smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
···
2348
2355
return -EINVAL;
2349
2356
2350
2357
if (!en) {
2351
-
smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
2358
+
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
2352
2359
index = fls(smu->workload_mask);
2353
2360
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2354
2361
workload[0] = smu->workload_setting[index];
2355
2362
} else {
2356
-
smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
2363
+
smu->workload_mask |= (1 << smu->workload_prority[type]);
2357
2364
index = fls(smu->workload_mask);
2358
2365
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2359
2366
workload[0] = smu->workload_setting[index];
2360
2367
}
2361
-
2362
-
smu->workload_mask = smu->driver_workload_mask |
2363
-
smu->user_dpm_profile.user_workload_mask;
2364
2368
2365
2369
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2366
2370
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
···
3049
3059
uint32_t param_size)
3050
3060
{
3051
3061
struct smu_context *smu = handle;
3052
-
int ret;
3053
3062
3054
3063
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3055
3064
!smu->ppt_funcs->set_power_profile_mode)
3056
3065
return -EOPNOTSUPP;
3057
3066
3058
-
if (smu->user_dpm_profile.user_workload_mask &
3059
-
(1 << smu->workload_priority[param[param_size]]))
3060
-
return 0;
3061
-
3062
-
smu->user_dpm_profile.user_workload_mask =
3063
-
(1 << smu->workload_priority[param[param_size]]);
3064
-
smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
3065
-
smu->driver_workload_mask;
3066
-
ret = smu_bump_power_profile_mode(smu, param, param_size);
3067
-
3068
-
return ret;
3067
+
return smu_bump_power_profile_mode(smu, param, param_size);
3069
3068
}
3070
3069
3071
3070
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
+1
-3
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+1
-3
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
···
240
240
/* user clock state information */
241
241
uint32_t clk_mask[SMU_CLK_COUNT];
242
242
uint32_t clk_dependency;
243
-
uint32_t user_workload_mask;
244
243
};
245
244
246
245
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
···
557
558
bool disable_uclk_switch;
558
559
559
560
uint32_t workload_mask;
560
-
uint32_t driver_workload_mask;
561
-
uint32_t workload_priority[WORKLOAD_POLICY_MAX];
561
+
uint32_t workload_prority[WORKLOAD_POLICY_MAX];
562
562
uint32_t workload_setting[WORKLOAD_POLICY_MAX];
563
563
uint32_t power_profile_mode;
564
564
uint32_t default_power_profile_mode;
+3
-2
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+3
-2
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
···
1455
1455
return -EINVAL;
1456
1456
}
1457
1457
1458
+
1458
1459
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
1459
1460
(smu->smc_fw_version >= 0x360d00)) {
1460
1461
if (size != 10)
···
1523
1522
1524
1523
ret = smu_cmn_send_smc_msg_with_param(smu,
1525
1524
SMU_MSG_SetWorkloadMask,
1526
-
smu->workload_mask,
1525
+
1 << workload_type,
1527
1526
NULL);
1528
1527
if (ret) {
1529
1528
dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
1530
1529
return ret;
1531
1530
}
1532
1531
1533
-
smu_cmn_assign_power_profile(smu);
1532
+
smu->power_profile_mode = profile_mode;
1534
1533
1535
1534
return 0;
1536
1535
}
+1
-4
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+1
-4
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
···
1786
1786
smu->power_profile_mode);
1787
1787
if (workload_type < 0)
1788
1788
return -EINVAL;
1789
-
1790
1789
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1791
-
smu->workload_mask, NULL);
1790
+
1 << workload_type, NULL);
1792
1791
if (ret)
1793
1792
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
1794
-
else
1795
-
smu_cmn_assign_power_profile(smu);
1796
1793
1797
1794
return ret;
1798
1795
}
+2
-2
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+2
-2
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
···
1079
1079
}
1080
1080
1081
1081
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1082
-
smu->workload_mask,
1082
+
1 << workload_type,
1083
1083
NULL);
1084
1084
if (ret) {
1085
1085
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
···
1087
1087
return ret;
1088
1088
}
1089
1089
1090
-
smu_cmn_assign_power_profile(smu);
1090
+
smu->power_profile_mode = profile_mode;
1091
1091
1092
1092
return 0;
1093
1093
}
+2
-2
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+2
-2
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
···
890
890
}
891
891
892
892
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
893
-
smu->workload_mask,
893
+
1 << workload_type,
894
894
NULL);
895
895
if (ret) {
896
896
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
897
897
return ret;
898
898
}
899
899
900
-
smu_cmn_assign_power_profile(smu);
900
+
smu->power_profile_mode = profile_mode;
901
901
902
902
return 0;
903
903
}
+5
-15
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+5
-15
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
···
2485
2485
DpmActivityMonitorCoeffInt_t *activity_monitor =
2486
2486
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
2487
2487
int workload_type, ret = 0;
2488
-
u32 workload_mask;
2488
+
u32 workload_mask, selected_workload_mask;
2489
2489
2490
2490
smu->power_profile_mode = input[size];
2491
2491
···
2552
2552
if (workload_type < 0)
2553
2553
return -EINVAL;
2554
2554
2555
-
workload_mask = 1 << workload_type;
2555
+
selected_workload_mask = workload_mask = 1 << workload_type;
2556
2556
2557
2557
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
2558
2558
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
···
2567
2567
workload_mask |= 1 << workload_type;
2568
2568
}
2569
2569
2570
-
smu->workload_mask |= workload_mask;
2571
2570
ret = smu_cmn_send_smc_msg_with_param(smu,
2572
2571
SMU_MSG_SetWorkloadMask,
2573
-
smu->workload_mask,
2572
+
workload_mask,
2574
2573
NULL);
2575
-
if (!ret) {
2576
-
smu_cmn_assign_power_profile(smu);
2577
-
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
2578
-
workload_type = smu_cmn_to_asic_specific_index(smu,
2579
-
CMN2ASIC_MAPPING_WORKLOAD,
2580
-
PP_SMC_POWER_PROFILE_FULLSCREEN3D);
2581
-
smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
2582
-
? PP_SMC_POWER_PROFILE_FULLSCREEN3D
2583
-
: PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
2584
-
}
2585
-
}
2574
+
if (!ret)
2575
+
smu->workload_mask = selected_workload_mask;
2586
2576
2587
2577
return ret;
2588
2578
}
+2
-3
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+2
-3
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
···
2499
2499
smu->power_profile_mode);
2500
2500
if (workload_type < 0)
2501
2501
return -EINVAL;
2502
-
2503
2502
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
2504
-
smu->workload_mask, NULL);
2503
+
1 << workload_type, NULL);
2505
2504
2506
2505
if (ret)
2507
2506
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
2508
2507
else
2509
-
smu_cmn_assign_power_profile(smu);
2508
+
smu->workload_mask = (1 << workload_type);
2510
2509
2511
2510
return ret;
2512
2511
}
+5
-4
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+5
-4
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
···
1807
1807
if (workload_type < 0)
1808
1808
return -EINVAL;
1809
1809
1810
-
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1811
-
smu->workload_mask, NULL);
1812
-
1810
+
ret = smu_cmn_send_smc_msg_with_param(smu,
1811
+
SMU_MSG_SetWorkloadMask,
1812
+
1 << workload_type,
1813
+
NULL);
1813
1814
if (!ret)
1814
-
smu_cmn_assign_power_profile(smu);
1815
+
smu->workload_mask = 1 << workload_type;
1815
1816
1816
1817
return ret;
1817
1818
}
-8
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
-8
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
···
1138
1138
return ret;
1139
1139
}
1140
1140
1141
-
void smu_cmn_assign_power_profile(struct smu_context *smu)
1142
-
{
1143
-
uint32_t index;
1144
-
index = fls(smu->workload_mask);
1145
-
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1146
-
smu->power_profile_mode = smu->workload_setting[index];
1147
-
}
1148
-
1149
1141
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1150
1142
{
1151
1143
struct pci_dev *p = NULL;
-2
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
-2
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
···
130
130
int smu_cmn_set_mp1_state(struct smu_context *smu,
131
131
enum pp_mp1_state mp1_state);
132
132
133
-
void smu_cmn_assign_power_profile(struct smu_context *smu);
134
-
135
133
/*
136
134
* Helper function to make sysfs_emit_at() happy. Align buf to
137
135
* the current page boundary and record the offset.