Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.7-rc1 3287 lines 92 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include "drmP.h" 25#include "amdgpu.h" 26#include "amdgpu_pm.h" 27#include "cikd.h" 28#include "atom.h" 29#include "amdgpu_atombios.h" 30#include "amdgpu_dpm.h" 31#include "kv_dpm.h" 32#include "gfx_v7_0.h" 33#include <linux/seq_file.h> 34 35#include "smu/smu_7_0_0_d.h" 36#include "smu/smu_7_0_0_sh_mask.h" 37 38#include "gca/gfx_7_2_d.h" 39#include "gca/gfx_7_2_sh_mask.h" 40 41#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42#define KV_MINIMUM_ENGINE_CLOCK 800 43#define SMC_RAM_END 0x40000 44 45static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev); 46static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 47static int kv_enable_nb_dpm(struct amdgpu_device *adev, 48 bool enable); 49static void kv_init_graphics_levels(struct amdgpu_device *adev); 50static int kv_calculate_ds_divider(struct amdgpu_device *adev); 51static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 52static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 53static void kv_enable_new_levels(struct amdgpu_device *adev); 54static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 55 struct amdgpu_ps *new_rps); 56static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 57static int kv_set_enabled_levels(struct amdgpu_device *adev); 58static int kv_force_dpm_highest(struct amdgpu_device *adev); 59static int kv_force_dpm_lowest(struct amdgpu_device *adev); 60static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 61 struct amdgpu_ps *new_rps, 62 struct amdgpu_ps *old_rps); 63static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 64 int min_temp, int max_temp); 65static int kv_init_fps_limits(struct amdgpu_device *adev); 66 67static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); 68static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 70static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 71 72 73static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 74 struct sumo_vid_mapping_table *vid_mapping_table, 75 u32 vid_2bit) 76{ 77 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 78 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 79 u32 i; 80 81 if (vddc_sclk_table && vddc_sclk_table->count) { 82 if (vid_2bit < vddc_sclk_table->count) 83 return vddc_sclk_table->entries[vid_2bit].v; 84 else 85 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 86 } else { 87 for (i = 0; i < vid_mapping_table->num_entries; i++) { 88 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 89 return vid_mapping_table->entries[i].vid_7bit; 90 } 91 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 92 } 93} 94 95static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 96 struct sumo_vid_mapping_table *vid_mapping_table, 97 u32 vid_7bit) 98{ 99 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 100 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 101 u32 i; 102 103 if (vddc_sclk_table && vddc_sclk_table->count) { 104 for (i = 0; i < vddc_sclk_table->count; i++) { 105 if (vddc_sclk_table->entries[i].v == vid_7bit) 106 return i; 107 } 108 return vddc_sclk_table->count - 1; 109 } else { 110 for (i = 0; i < vid_mapping_table->num_entries; i++) { 111 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 112 return vid_mapping_table->entries[i].vid_2bit; 113 } 114 115 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 116 } 117} 118 119static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 120{ 121/* This bit selects who handles display phy powergating. 122 * Clear the bit to let atom handle it. 123 * Set it to let the driver handle it. 124 * For now we just let atom handle it. 125 */ 126#if 0 127 u32 v = RREG32(mmDOUT_SCRATCH3); 128 129 if (enable) 130 v |= 0x4; 131 else 132 v &= 0xFFFFFFFB; 133 134 WREG32(mmDOUT_SCRATCH3, v); 135#endif 136} 137 138static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 139 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 140 ATOM_AVAILABLE_SCLK_LIST *table) 141{ 142 u32 i; 143 u32 n = 0; 144 u32 prev_sclk = 0; 145 146 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 147 if (table[i].ulSupportedSCLK > prev_sclk) { 148 sclk_voltage_mapping_table->entries[n].sclk_frequency = 149 table[i].ulSupportedSCLK; 150 sclk_voltage_mapping_table->entries[n].vid_2bit = 151 table[i].usVoltageIndex; 152 prev_sclk = table[i].ulSupportedSCLK; 153 n++; 154 } 155 } 156 157 sclk_voltage_mapping_table->num_max_dpm_entries = n; 158} 159 160static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 161 struct sumo_vid_mapping_table *vid_mapping_table, 162 ATOM_AVAILABLE_SCLK_LIST *table) 163{ 164 u32 i, j; 165 166 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 167 if (table[i].ulSupportedSCLK != 0) { 168 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 169 table[i].usVoltageID; 170 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 171 table[i].usVoltageIndex; 172 } 173 } 174 175 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 176 if (vid_mapping_table->entries[i].vid_7bit == 0) { 177 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 178 if (vid_mapping_table->entries[j].vid_7bit != 0) { 179 vid_mapping_table->entries[i] = 180 vid_mapping_table->entries[j]; 181 vid_mapping_table->entries[j].vid_7bit = 0; 182 break; 183 } 184 } 185 186 if (j == SUMO_MAX_NUMBER_VOLTAGES) 187 break; 188 } 189 } 190 191 vid_mapping_table->num_entries = i; 192} 193 194static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 195{ 196 { 0, 4, 1 }, 197 { 1, 4, 1 }, 198 { 2, 5, 1 }, 199 { 3, 4, 2 }, 200 { 4, 1, 1 }, 201 { 5, 5, 2 }, 202 { 6, 6, 1 }, 203 { 7, 9, 2 }, 204 { 0xffffffff } 205}; 206 207static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 208{ 209 { 0, 4, 1 }, 210 { 0xffffffff } 211}; 212 213static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 214{ 215 { 0, 4, 1 }, 216 { 0xffffffff } 217}; 218 219static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 220{ 221 { 0, 4, 1 }, 222 { 0xffffffff } 223}; 224 225static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 226{ 227 { 0, 4, 1 }, 228 { 0xffffffff } 229}; 230 231static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 232{ 233 { 0, 4, 1 }, 234 { 1, 4, 1 }, 235 { 2, 5, 1 }, 236 { 3, 4, 1 }, 237 { 4, 1, 1 }, 238 { 5, 5, 1 }, 239 { 6, 6, 1 }, 240 { 7, 9, 1 }, 241 { 8, 4, 1 }, 242 { 9, 2, 1 }, 243 { 10, 3, 1 }, 244 { 11, 6, 1 }, 245 { 12, 8, 2 }, 246 { 13, 1, 1 }, 247 { 14, 2, 1 }, 248 { 15, 3, 1 }, 249 { 16, 1, 1 }, 250 { 17, 4, 1 }, 251 { 18, 3, 1 }, 252 { 19, 1, 1 }, 253 { 20, 8, 1 }, 254 { 21, 5, 1 }, 255 { 22, 1, 1 }, 256 { 23, 1, 1 }, 257 { 24, 4, 1 }, 258 { 27, 6, 1 }, 259 { 28, 1, 1 }, 260 { 0xffffffff } 261}; 262 263static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 264{ 265 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 266}; 267 268static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 269{ 270 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 271}; 272 273static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 274{ 275 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 276}; 277 278static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 279{ 280 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 281}; 282 283static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 284{ 285 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 286}; 287 288static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 289{ 290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 291}; 292 293static const struct kv_pt_config_reg didt_config_kv[] = 294{ 295 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 296 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 297 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 298 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 299 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 300 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 301 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 302 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 308 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 309 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 310 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 311 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 312 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 313 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 314 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 315 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 316 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 317 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 318 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 319 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 320 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 326 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 327 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 328 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 329 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 330 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 331 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 332 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 333 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 334 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 335 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 336 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 337 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 338 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 344 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 345 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 346 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 347 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 348 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 349 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 350 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 351 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 352 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 353 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 354 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 355 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 356 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 358 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 359 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 360 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 361 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 362 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 363 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 364 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 365 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 366 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 367 { 0xFFFFFFFF } 368}; 369 370static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 371{ 372 struct kv_ps *ps = rps->ps_priv; 373 374 return ps; 375} 376 377static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 378{ 379 struct kv_power_info *pi = adev->pm.dpm.priv; 380 381 return pi; 382} 383 384#if 0 385static void kv_program_local_cac_table(struct amdgpu_device *adev, 386 const struct kv_lcac_config_values *local_cac_table, 387 const struct kv_lcac_config_reg *local_cac_reg) 388{ 389 u32 i, count, data; 390 const struct kv_lcac_config_values *values = local_cac_table; 391 392 while (values->block_id != 0xffffffff) { 393 count = values->signal_id; 394 for (i = 0; i < count; i++) { 395 data = ((values->block_id << local_cac_reg->block_shift) & 396 local_cac_reg->block_mask); 397 data |= ((i << local_cac_reg->signal_shift) & 398 local_cac_reg->signal_mask); 399 data |= ((values->t << local_cac_reg->t_shift) & 400 local_cac_reg->t_mask); 401 data |= ((1 << local_cac_reg->enable_shift) & 402 local_cac_reg->enable_mask); 403 WREG32_SMC(local_cac_reg->cntl, data); 404 } 405 values++; 406 } 407} 408#endif 409 410static int kv_program_pt_config_registers(struct amdgpu_device *adev, 411 const struct kv_pt_config_reg *cac_config_regs) 412{ 413 const struct kv_pt_config_reg *config_regs = cac_config_regs; 414 u32 data; 415 u32 cache = 0; 416 417 if (config_regs == NULL) 418 return -EINVAL; 419 420 while (config_regs->offset != 0xFFFFFFFF) { 421 if (config_regs->type == KV_CONFIGREG_CACHE) { 422 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 423 } else { 424 switch (config_regs->type) { 425 case KV_CONFIGREG_SMC_IND: 426 data = RREG32_SMC(config_regs->offset); 427 break; 428 case KV_CONFIGREG_DIDT_IND: 429 data = RREG32_DIDT(config_regs->offset); 430 break; 431 default: 432 data = RREG32(config_regs->offset); 433 break; 434 } 435 436 data &= ~config_regs->mask; 437 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 438 data |= cache; 439 cache = 0; 440 441 switch (config_regs->type) { 442 case KV_CONFIGREG_SMC_IND: 443 WREG32_SMC(config_regs->offset, data); 444 break; 445 case KV_CONFIGREG_DIDT_IND: 446 WREG32_DIDT(config_regs->offset, data); 447 break; 448 default: 449 WREG32(config_regs->offset, data); 450 break; 451 } 452 } 453 config_regs++; 454 } 455 456 return 0; 457} 458 459static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 460{ 461 struct kv_power_info *pi = kv_get_pi(adev); 462 u32 data; 463 464 if (pi->caps_sq_ramping) { 465 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 466 if (enable) 467 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 468 else 469 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 470 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 471 } 472 473 if (pi->caps_db_ramping) { 474 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 475 if (enable) 476 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 477 else 478 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 479 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 480 } 481 482 if (pi->caps_td_ramping) { 483 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 484 if (enable) 485 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 486 else 487 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 488 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 489 } 490 491 if (pi->caps_tcp_ramping) { 492 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 493 if (enable) 494 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 495 else 496 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 497 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 498 } 499} 500 501static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 502{ 503 struct kv_power_info *pi = kv_get_pi(adev); 504 int ret; 505 506 if (pi->caps_sq_ramping || 507 pi->caps_db_ramping || 508 pi->caps_td_ramping || 509 pi->caps_tcp_ramping) { 510 gfx_v7_0_enter_rlc_safe_mode(adev); 511 512 if (enable) { 513 ret = kv_program_pt_config_registers(adev, didt_config_kv); 514 if (ret) { 515 gfx_v7_0_exit_rlc_safe_mode(adev); 516 return ret; 517 } 518 } 519 520 kv_do_enable_didt(adev, enable); 521 522 gfx_v7_0_exit_rlc_safe_mode(adev); 523 } 524 525 return 0; 526} 527 528#if 0 529static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 530{ 531 struct kv_power_info *pi = kv_get_pi(adev); 532 533 if (pi->caps_cac) { 534 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 535 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 536 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 537 538 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 539 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 540 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 541 542 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 543 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 544 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 545 546 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 547 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 548 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 549 550 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 551 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 552 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 553 554 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 555 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 556 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 557 } 558} 559#endif 560 561static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 562{ 563 struct kv_power_info *pi = kv_get_pi(adev); 564 int ret = 0; 565 566 if (pi->caps_cac) { 567 if (enable) { 568 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 569 if (ret) 570 pi->cac_enabled = false; 571 else 572 pi->cac_enabled = true; 573 } else if (pi->cac_enabled) { 574 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 575 pi->cac_enabled = false; 576 } 577 } 578 579 return ret; 580} 581 582static int kv_process_firmware_header(struct amdgpu_device *adev) 583{ 584 struct kv_power_info *pi = kv_get_pi(adev); 585 u32 tmp; 586 int ret; 587 588 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 589 offsetof(SMU7_Firmware_Header, DpmTable), 590 &tmp, pi->sram_end); 591 592 if (ret == 0) 593 pi->dpm_table_start = tmp; 594 595 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 596 offsetof(SMU7_Firmware_Header, SoftRegisters), 597 &tmp, pi->sram_end); 598 599 if (ret == 0) 600 pi->soft_regs_start = tmp; 601 602 return ret; 603} 604 605static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 606{ 607 struct kv_power_info *pi = kv_get_pi(adev); 608 int ret; 609 610 pi->graphics_voltage_change_enable = 1; 611 612 ret = amdgpu_kv_copy_bytes_to_smc(adev, 613 pi->dpm_table_start + 614 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 615 &pi->graphics_voltage_change_enable, 616 sizeof(u8), pi->sram_end); 617 618 return ret; 619} 620 621static int kv_set_dpm_interval(struct amdgpu_device *adev) 622{ 623 struct kv_power_info *pi = kv_get_pi(adev); 624 int ret; 625 626 pi->graphics_interval = 1; 627 628 ret = amdgpu_kv_copy_bytes_to_smc(adev, 629 pi->dpm_table_start + 630 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 631 &pi->graphics_interval, 632 sizeof(u8), pi->sram_end); 633 634 return ret; 635} 636 637static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 638{ 639 struct kv_power_info *pi = kv_get_pi(adev); 640 int ret; 641 642 ret = amdgpu_kv_copy_bytes_to_smc(adev, 643 pi->dpm_table_start + 644 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 645 &pi->graphics_boot_level, 646 sizeof(u8), pi->sram_end); 647 648 return ret; 649} 650 651static void kv_program_vc(struct amdgpu_device *adev) 652{ 653 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 654} 655 656static void kv_clear_vc(struct amdgpu_device *adev) 657{ 658 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 659} 660 661static int kv_set_divider_value(struct amdgpu_device *adev, 662 u32 index, u32 sclk) 663{ 664 struct kv_power_info *pi = kv_get_pi(adev); 665 struct atom_clock_dividers dividers; 666 int ret; 667 668 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 669 sclk, false, &dividers); 670 if (ret) 671 return ret; 672 673 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 674 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 675 676 return 0; 677} 678 679static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 680 u16 voltage) 681{ 682 return 6200 - (voltage * 25); 683} 684 685static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 686 u32 vid_2bit) 687{ 688 struct kv_power_info *pi = kv_get_pi(adev); 689 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 690 &pi->sys_info.vid_mapping_table, 691 vid_2bit); 692 693 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 694} 695 696 697static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 698{ 699 struct kv_power_info *pi = kv_get_pi(adev); 700 701 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 702 pi->graphics_level[index].MinVddNb = 703 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 704 705 return 0; 706} 707 708static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 709{ 710 struct kv_power_info *pi = kv_get_pi(adev); 711 712 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 713 714 return 0; 715} 716 717static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 718 u32 index, bool enable) 719{ 720 struct kv_power_info *pi = kv_get_pi(adev); 721 722 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 723} 724 725static void kv_start_dpm(struct amdgpu_device *adev) 726{ 727 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 728 729 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 730 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 731 732 amdgpu_kv_smc_dpm_enable(adev, true); 733} 734 735static void kv_stop_dpm(struct amdgpu_device *adev) 736{ 737 amdgpu_kv_smc_dpm_enable(adev, false); 738} 739 740static void kv_start_am(struct amdgpu_device *adev) 741{ 742 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 743 744 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 745 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 746 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 747 748 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 749} 750 751static void kv_reset_am(struct amdgpu_device *adev) 752{ 753 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 754 755 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 756 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 757 758 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 759} 760 761static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 762{ 763 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 764 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 765} 766 767static int kv_force_lowest_valid(struct amdgpu_device *adev) 768{ 769 return kv_force_dpm_lowest(adev); 770} 771 772static int kv_unforce_levels(struct amdgpu_device *adev) 773{ 774 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 775 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 776 else 777 return kv_set_enabled_levels(adev); 778} 779 780static int kv_update_sclk_t(struct amdgpu_device *adev) 781{ 782 struct kv_power_info *pi = kv_get_pi(adev); 783 u32 low_sclk_interrupt_t = 0; 784 int ret = 0; 785 786 if (pi->caps_sclk_throttle_low_notification) { 787 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 788 789 ret = amdgpu_kv_copy_bytes_to_smc(adev, 790 pi->dpm_table_start + 791 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 792 (u8 *)&low_sclk_interrupt_t, 793 sizeof(u32), pi->sram_end); 794 } 795 return ret; 796} 797 798static int kv_program_bootup_state(struct amdgpu_device *adev) 799{ 800 struct kv_power_info *pi = kv_get_pi(adev); 801 u32 i; 802 struct amdgpu_clock_voltage_dependency_table *table = 803 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 804 805 if (table && table->count) { 806 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 807 if (table->entries[i].clk == pi->boot_pl.sclk) 808 break; 809 } 810 811 pi->graphics_boot_level = (u8)i; 812 kv_dpm_power_level_enable(adev, i, true); 813 } else { 814 struct sumo_sclk_voltage_mapping_table *table = 815 &pi->sys_info.sclk_voltage_mapping_table; 816 817 if (table->num_max_dpm_entries == 0) 818 return -EINVAL; 819 820 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 821 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 822 break; 823 } 824 825 pi->graphics_boot_level = (u8)i; 826 kv_dpm_power_level_enable(adev, i, true); 827 } 828 return 0; 829} 830 831static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 832{ 833 struct kv_power_info *pi = kv_get_pi(adev); 834 int ret; 835 836 pi->graphics_therm_throttle_enable = 1; 837 838 ret = amdgpu_kv_copy_bytes_to_smc(adev, 839 pi->dpm_table_start + 840 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 841 &pi->graphics_therm_throttle_enable, 842 sizeof(u8), pi->sram_end); 843 844 return ret; 845} 846 847static int kv_upload_dpm_settings(struct amdgpu_device *adev) 848{ 849 struct kv_power_info *pi = kv_get_pi(adev); 850 int ret; 851 852 ret = amdgpu_kv_copy_bytes_to_smc(adev, 853 pi->dpm_table_start + 854 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 855 (u8 *)&pi->graphics_level, 856 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 857 pi->sram_end); 858 859 if (ret) 860 return ret; 861 862 ret = amdgpu_kv_copy_bytes_to_smc(adev, 863 pi->dpm_table_start + 864 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 865 &pi->graphics_dpm_level_count, 866 sizeof(u8), pi->sram_end); 867 868 return ret; 869} 870 871static u32 kv_get_clock_difference(u32 a, u32 b) 872{ 873 return (a >= b) ? a - b : b - a; 874} 875 876static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 877{ 878 struct kv_power_info *pi = kv_get_pi(adev); 879 u32 value; 880 881 if (pi->caps_enable_dfs_bypass) { 882 if (kv_get_clock_difference(clk, 40000) < 200) 883 value = 3; 884 else if (kv_get_clock_difference(clk, 30000) < 200) 885 value = 2; 886 else if (kv_get_clock_difference(clk, 20000) < 200) 887 value = 7; 888 else if (kv_get_clock_difference(clk, 15000) < 200) 889 value = 6; 890 else if (kv_get_clock_difference(clk, 10000) < 200) 891 value = 8; 892 else 893 value = 0; 894 } else { 895 value = 0; 896 } 897 898 return value; 899} 900 901static int kv_populate_uvd_table(struct amdgpu_device *adev) 902{ 903 struct kv_power_info *pi = kv_get_pi(adev); 904 struct amdgpu_uvd_clock_voltage_dependency_table *table = 905 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 906 struct atom_clock_dividers dividers; 907 int ret; 908 u32 i; 909 910 if (table == NULL || table->count == 0) 911 return 0; 912 913 pi->uvd_level_count = 0; 914 for (i = 0; i < table->count; i++) { 915 if (pi->high_voltage_t && 916 (pi->high_voltage_t < table->entries[i].v)) 917 break; 918 919 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 920 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 921 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 922 923 pi->uvd_level[i].VClkBypassCntl = 924 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 925 pi->uvd_level[i].DClkBypassCntl = 926 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 927 928 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 929 table->entries[i].vclk, false, &dividers); 930 if (ret) 931 return ret; 932 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 933 934 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 935 table->entries[i].dclk, false, &dividers); 936 if (ret) 937 return ret; 938 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 939 940 pi->uvd_level_count++; 941 } 942 943 ret = amdgpu_kv_copy_bytes_to_smc(adev, 944 pi->dpm_table_start + 945 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 946 (u8 *)&pi->uvd_level_count, 947 sizeof(u8), pi->sram_end); 948 if (ret) 949 return ret; 950 951 pi->uvd_interval = 1; 952 953 ret = amdgpu_kv_copy_bytes_to_smc(adev, 954 pi->dpm_table_start + 955 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 956 &pi->uvd_interval, 957 sizeof(u8), pi->sram_end); 958 if (ret) 959 return ret; 960 961 ret = amdgpu_kv_copy_bytes_to_smc(adev, 962 pi->dpm_table_start + 963 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 964 (u8 *)&pi->uvd_level, 965 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 966 pi->sram_end); 967 968 return ret; 969 970} 971 972static int kv_populate_vce_table(struct amdgpu_device *adev) 973{ 974 struct kv_power_info *pi = kv_get_pi(adev); 975 int ret; 976 u32 i; 977 struct amdgpu_vce_clock_voltage_dependency_table *table = 978 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 979 struct atom_clock_dividers dividers; 980 981 if (table == NULL || table->count == 0) 982 return 0; 983 984 pi->vce_level_count = 0; 985 for (i = 0; i < table->count; i++) { 986 if (pi->high_voltage_t && 987 pi->high_voltage_t < table->entries[i].v) 988 break; 989 990 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 991 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 992 993 pi->vce_level[i].ClkBypassCntl = 994 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 995 996 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 997 table->entries[i].evclk, false, &dividers); 998 if (ret) 999 return ret; 1000 pi->vce_level[i].Divider = (u8)dividers.post_div; 1001 1002 pi->vce_level_count++; 1003 } 1004 1005 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1006 pi->dpm_table_start + 1007 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 1008 (u8 *)&pi->vce_level_count, 1009 sizeof(u8), 1010 pi->sram_end); 1011 if (ret) 1012 return ret; 1013 1014 pi->vce_interval = 1; 1015 1016 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1017 pi->dpm_table_start + 1018 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1019 (u8 *)&pi->vce_interval, 1020 sizeof(u8), 1021 pi->sram_end); 1022 if (ret) 1023 return ret; 1024 1025 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1026 pi->dpm_table_start + 1027 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1028 (u8 *)&pi->vce_level, 1029 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1030 pi->sram_end); 1031 1032 return ret; 1033} 1034 1035static int kv_populate_samu_table(struct amdgpu_device *adev) 1036{ 1037 struct kv_power_info *pi = kv_get_pi(adev); 1038 struct amdgpu_clock_voltage_dependency_table *table = 1039 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1040 struct atom_clock_dividers dividers; 1041 int ret; 1042 u32 i; 1043 1044 if (table == NULL || table->count == 0) 1045 return 0; 1046 1047 pi->samu_level_count = 0; 1048 for (i = 0; i < table->count; i++) { 1049 if (pi->high_voltage_t && 1050 pi->high_voltage_t < table->entries[i].v) 1051 break; 1052 1053 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1054 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1055 1056 pi->samu_level[i].ClkBypassCntl = 1057 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1058 1059 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1060 table->entries[i].clk, false, &dividers); 1061 if (ret) 1062 return ret; 1063 pi->samu_level[i].Divider = (u8)dividers.post_div; 1064 1065 pi->samu_level_count++; 1066 } 1067 1068 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1069 pi->dpm_table_start + 1070 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1071 (u8 *)&pi->samu_level_count, 1072 sizeof(u8), 1073 pi->sram_end); 1074 if (ret) 1075 return ret; 1076 1077 pi->samu_interval = 1; 1078 1079 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1080 pi->dpm_table_start + 1081 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1082 (u8 *)&pi->samu_interval, 1083 sizeof(u8), 1084 pi->sram_end); 1085 if (ret) 1086 return ret; 1087 1088 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1089 pi->dpm_table_start + 1090 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1091 (u8 *)&pi->samu_level, 1092 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1093 pi->sram_end); 1094 if (ret) 1095 return ret; 1096 1097 return ret; 1098} 1099 1100 1101static int kv_populate_acp_table(struct amdgpu_device *adev) 1102{ 1103 struct kv_power_info *pi = kv_get_pi(adev); 1104 struct amdgpu_clock_voltage_dependency_table *table = 1105 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1106 struct atom_clock_dividers dividers; 1107 int ret; 1108 u32 i; 1109 1110 if (table == NULL || table->count == 0) 1111 return 0; 1112 1113 pi->acp_level_count = 0; 1114 for (i = 0; i < table->count; i++) { 1115 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1116 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1117 1118 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1119 table->entries[i].clk, false, &dividers); 1120 if (ret) 1121 return ret; 1122 pi->acp_level[i].Divider = (u8)dividers.post_div; 1123 1124 pi->acp_level_count++; 1125 } 1126 1127 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1128 pi->dpm_table_start + 1129 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1130 (u8 *)&pi->acp_level_count, 1131 sizeof(u8), 1132 pi->sram_end); 1133 if (ret) 1134 return ret; 1135 1136 pi->acp_interval = 1; 1137 1138 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1139 pi->dpm_table_start + 1140 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1141 (u8 *)&pi->acp_interval, 1142 sizeof(u8), 1143 pi->sram_end); 1144 if (ret) 1145 return ret; 1146 1147 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1148 pi->dpm_table_start + 1149 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1150 (u8 *)&pi->acp_level, 1151 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1152 pi->sram_end); 1153 if (ret) 1154 return ret; 1155 1156 return ret; 1157} 1158 1159static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1160{ 1161 struct kv_power_info *pi = kv_get_pi(adev); 1162 u32 i; 1163 struct amdgpu_clock_voltage_dependency_table *table = 1164 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1165 1166 if (table && table->count) { 1167 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1168 if (pi->caps_enable_dfs_bypass) { 1169 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1170 pi->graphics_level[i].ClkBypassCntl = 3; 1171 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1172 pi->graphics_level[i].ClkBypassCntl = 2; 1173 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1174 pi->graphics_level[i].ClkBypassCntl = 7; 1175 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1176 pi->graphics_level[i].ClkBypassCntl = 6; 1177 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1178 pi->graphics_level[i].ClkBypassCntl = 8; 1179 else 1180 pi->graphics_level[i].ClkBypassCntl = 0; 1181 } else { 1182 pi->graphics_level[i].ClkBypassCntl = 0; 1183 } 1184 } 1185 } else { 1186 struct sumo_sclk_voltage_mapping_table *table = 1187 &pi->sys_info.sclk_voltage_mapping_table; 1188 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1189 if (pi->caps_enable_dfs_bypass) { 1190 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1191 pi->graphics_level[i].ClkBypassCntl = 3; 1192 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1193 pi->graphics_level[i].ClkBypassCntl = 2; 1194 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1195 pi->graphics_level[i].ClkBypassCntl = 7; 1196 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1197 pi->graphics_level[i].ClkBypassCntl = 6; 1198 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1199 pi->graphics_level[i].ClkBypassCntl = 8; 1200 else 1201 pi->graphics_level[i].ClkBypassCntl = 0; 1202 } else { 1203 pi->graphics_level[i].ClkBypassCntl = 0; 1204 } 1205 } 1206 } 1207} 1208 1209static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1210{ 1211 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1212 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1213} 1214 1215static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1216{ 1217 struct kv_power_info *pi = kv_get_pi(adev); 1218 1219 pi->acp_boot_level = 0xff; 1220} 1221 1222static void kv_update_current_ps(struct amdgpu_device *adev, 1223 struct amdgpu_ps *rps) 1224{ 1225 struct kv_ps *new_ps = kv_get_ps(rps); 1226 struct kv_power_info *pi = kv_get_pi(adev); 1227 1228 pi->current_rps = *rps; 1229 pi->current_ps = *new_ps; 1230 pi->current_rps.ps_priv = &pi->current_ps; 1231} 1232 1233static void kv_update_requested_ps(struct amdgpu_device *adev, 1234 struct amdgpu_ps *rps) 1235{ 1236 struct kv_ps *new_ps = kv_get_ps(rps); 1237 struct kv_power_info *pi = kv_get_pi(adev); 1238 1239 pi->requested_rps = *rps; 1240 pi->requested_ps = *new_ps; 1241 pi->requested_rps.ps_priv = &pi->requested_ps; 1242} 1243 1244static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) 1245{ 1246 struct kv_power_info *pi = kv_get_pi(adev); 1247 int ret; 1248 1249 if (pi->bapm_enable) { 1250 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1251 if (ret) 1252 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1253 } 1254} 1255 1256static int kv_dpm_enable(struct amdgpu_device *adev) 1257{ 1258 struct kv_power_info *pi = kv_get_pi(adev); 1259 int ret; 1260 1261 ret = kv_process_firmware_header(adev); 1262 if (ret) { 1263 DRM_ERROR("kv_process_firmware_header failed\n"); 1264 return ret; 1265 } 1266 kv_init_fps_limits(adev); 1267 kv_init_graphics_levels(adev); 1268 ret = kv_program_bootup_state(adev); 1269 if (ret) { 1270 DRM_ERROR("kv_program_bootup_state failed\n"); 1271 return ret; 1272 } 1273 kv_calculate_dfs_bypass_settings(adev); 1274 ret = kv_upload_dpm_settings(adev); 1275 if (ret) { 1276 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1277 return ret; 1278 } 1279 ret = kv_populate_uvd_table(adev); 1280 if (ret) { 1281 DRM_ERROR("kv_populate_uvd_table failed\n"); 1282 return ret; 1283 } 1284 ret = kv_populate_vce_table(adev); 1285 if (ret) { 1286 DRM_ERROR("kv_populate_vce_table failed\n"); 1287 return ret; 1288 } 1289 ret = kv_populate_samu_table(adev); 1290 if (ret) { 1291 DRM_ERROR("kv_populate_samu_table failed\n"); 1292 return ret; 1293 } 1294 ret = kv_populate_acp_table(adev); 1295 if (ret) { 1296 DRM_ERROR("kv_populate_acp_table failed\n"); 1297 return ret; 1298 } 1299 kv_program_vc(adev); 1300#if 0 1301 kv_initialize_hardware_cac_manager(adev); 1302#endif 1303 kv_start_am(adev); 1304 if (pi->enable_auto_thermal_throttling) { 1305 ret = kv_enable_auto_thermal_throttling(adev); 1306 if (ret) { 1307 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1308 return ret; 1309 } 1310 } 1311 ret = kv_enable_dpm_voltage_scaling(adev); 1312 if (ret) { 1313 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1314 return ret; 1315 } 1316 ret = kv_set_dpm_interval(adev); 1317 if (ret) { 1318 DRM_ERROR("kv_set_dpm_interval failed\n"); 1319 return ret; 1320 } 1321 ret = kv_set_dpm_boot_state(adev); 1322 if (ret) { 1323 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1324 return ret; 1325 } 1326 ret = kv_enable_ulv(adev, true); 1327 if (ret) { 1328 DRM_ERROR("kv_enable_ulv failed\n"); 1329 return ret; 1330 } 1331 kv_start_dpm(adev); 1332 ret = kv_enable_didt(adev, true); 1333 if (ret) { 1334 DRM_ERROR("kv_enable_didt failed\n"); 1335 return ret; 1336 } 1337 ret = kv_enable_smc_cac(adev, true); 1338 if (ret) { 1339 DRM_ERROR("kv_enable_smc_cac failed\n"); 1340 return ret; 1341 } 1342 1343 kv_reset_acp_boot_level(adev); 1344 1345 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1346 if (ret) { 1347 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1348 return ret; 1349 } 1350 1351 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1352 1353 if (adev->irq.installed && 1354 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1355 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1356 if (ret) { 1357 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1358 return ret; 1359 } 1360 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1361 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1362 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1363 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1364 } 1365 1366 return ret; 1367} 1368 1369static void kv_dpm_disable(struct amdgpu_device *adev) 1370{ 1371 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1372 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1373 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1374 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1375 1376 amdgpu_kv_smc_bapm_enable(adev, false); 1377 1378 if (adev->asic_type == CHIP_MULLINS) 1379 kv_enable_nb_dpm(adev, false); 1380 1381 /* powerup blocks */ 1382 kv_dpm_powergate_acp(adev, false); 1383 kv_dpm_powergate_samu(adev, false); 1384 kv_dpm_powergate_vce(adev, false); 1385 kv_dpm_powergate_uvd(adev, false); 1386 1387 kv_enable_smc_cac(adev, false); 1388 kv_enable_didt(adev, false); 1389 kv_clear_vc(adev); 1390 kv_stop_dpm(adev); 1391 kv_enable_ulv(adev, false); 1392 kv_reset_am(adev); 1393 1394 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1395} 1396 1397#if 0 1398static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1399 u16 reg_offset, u32 value) 1400{ 1401 struct kv_power_info *pi = kv_get_pi(adev); 1402 1403 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1404 (u8 *)&value, sizeof(u16), pi->sram_end); 1405} 1406 1407static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1408 u16 reg_offset, u32 *value) 1409{ 1410 struct kv_power_info *pi = kv_get_pi(adev); 1411 1412 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1413 value, pi->sram_end); 1414} 1415#endif 1416 1417static void kv_init_sclk_t(struct amdgpu_device *adev) 1418{ 1419 struct kv_power_info *pi = kv_get_pi(adev); 1420 1421 pi->low_sclk_interrupt_t = 0; 1422} 1423 1424static int kv_init_fps_limits(struct amdgpu_device *adev) 1425{ 1426 struct kv_power_info *pi = kv_get_pi(adev); 1427 int ret = 0; 1428 1429 if (pi->caps_fps) { 1430 u16 tmp; 1431 1432 tmp = 45; 1433 pi->fps_high_t = cpu_to_be16(tmp); 1434 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1435 pi->dpm_table_start + 1436 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1437 (u8 *)&pi->fps_high_t, 1438 sizeof(u16), pi->sram_end); 1439 1440 tmp = 30; 1441 pi->fps_low_t = cpu_to_be16(tmp); 1442 1443 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1444 pi->dpm_table_start + 1445 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1446 (u8 *)&pi->fps_low_t, 1447 sizeof(u16), pi->sram_end); 1448 1449 } 1450 return ret; 1451} 1452 1453static void kv_init_powergate_state(struct amdgpu_device *adev) 1454{ 1455 struct kv_power_info *pi = kv_get_pi(adev); 1456 1457 pi->uvd_power_gated = false; 1458 pi->vce_power_gated = false; 1459 pi->samu_power_gated = false; 1460 pi->acp_power_gated = false; 1461 1462} 1463 1464static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1465{ 1466 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1467 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1468} 1469 1470static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1471{ 1472 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1473 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1474} 1475 1476static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1477{ 1478 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1479 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1480} 1481 1482static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1483{ 1484 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1485 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1486} 1487 1488static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1489{ 1490 struct kv_power_info *pi = kv_get_pi(adev); 1491 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1492 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1493 int ret; 1494 u32 mask; 1495 1496 if (!gate) { 1497 if (table->count) 1498 pi->uvd_boot_level = table->count - 1; 1499 else 1500 pi->uvd_boot_level = 0; 1501 1502 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1503 mask = 1 << pi->uvd_boot_level; 1504 } else { 1505 mask = 0x1f; 1506 } 1507 1508 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1509 pi->dpm_table_start + 1510 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1511 (uint8_t *)&pi->uvd_boot_level, 1512 sizeof(u8), pi->sram_end); 1513 if (ret) 1514 return ret; 1515 1516 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1517 PPSMC_MSG_UVDDPM_SetEnabledMask, 1518 mask); 1519 } 1520 1521 return kv_enable_uvd_dpm(adev, !gate); 1522} 1523 1524static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1525{ 1526 u8 i; 1527 struct amdgpu_vce_clock_voltage_dependency_table *table = 1528 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1529 1530 for (i = 0; i < table->count; i++) { 1531 if (table->entries[i].evclk >= evclk) 1532 break; 1533 } 1534 1535 return i; 1536} 1537 1538static int kv_update_vce_dpm(struct amdgpu_device *adev, 1539 struct amdgpu_ps *amdgpu_new_state, 1540 struct amdgpu_ps *amdgpu_current_state) 1541{ 1542 struct kv_power_info *pi = kv_get_pi(adev); 1543 struct amdgpu_vce_clock_voltage_dependency_table *table = 1544 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1545 int ret; 1546 1547 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1548 kv_dpm_powergate_vce(adev, false); 1549 /* turn the clocks on when encoding */ 1550 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1551 AMD_CG_STATE_UNGATE); 1552 if (ret) 1553 return ret; 1554 if (pi->caps_stable_p_state) 1555 pi->vce_boot_level = table->count - 1; 1556 else 1557 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1558 1559 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1560 pi->dpm_table_start + 1561 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1562 (u8 *)&pi->vce_boot_level, 1563 sizeof(u8), 1564 pi->sram_end); 1565 if (ret) 1566 return ret; 1567 1568 if (pi->caps_stable_p_state) 1569 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1570 PPSMC_MSG_VCEDPM_SetEnabledMask, 1571 (1 << pi->vce_boot_level)); 1572 1573 kv_enable_vce_dpm(adev, true); 1574 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1575 kv_enable_vce_dpm(adev, false); 1576 /* turn the clocks off when not encoding */ 1577 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1578 AMD_CG_STATE_GATE); 1579 if (ret) 1580 return ret; 1581 kv_dpm_powergate_vce(adev, true); 1582 } 1583 1584 return 0; 1585} 1586 1587static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1588{ 1589 struct kv_power_info *pi = kv_get_pi(adev); 1590 struct amdgpu_clock_voltage_dependency_table *table = 1591 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1592 int ret; 1593 1594 if (!gate) { 1595 if (pi->caps_stable_p_state) 1596 pi->samu_boot_level = table->count - 1; 1597 else 1598 pi->samu_boot_level = 0; 1599 1600 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1601 pi->dpm_table_start + 1602 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1603 (u8 *)&pi->samu_boot_level, 1604 sizeof(u8), 1605 pi->sram_end); 1606 if (ret) 1607 return ret; 1608 1609 if (pi->caps_stable_p_state) 1610 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1611 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1612 (1 << pi->samu_boot_level)); 1613 } 1614 1615 return kv_enable_samu_dpm(adev, !gate); 1616} 1617 1618static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1619{ 1620 u8 i; 1621 struct amdgpu_clock_voltage_dependency_table *table = 1622 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1623 1624 for (i = 0; i < table->count; i++) { 1625 if (table->entries[i].clk >= 0) /* XXX */ 1626 break; 1627 } 1628 1629 if (i >= table->count) 1630 i = table->count - 1; 1631 1632 return i; 1633} 1634 1635static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1636{ 1637 struct kv_power_info *pi = kv_get_pi(adev); 1638 u8 acp_boot_level; 1639 1640 if (!pi->caps_stable_p_state) { 1641 acp_boot_level = kv_get_acp_boot_level(adev); 1642 if (acp_boot_level != pi->acp_boot_level) { 1643 pi->acp_boot_level = acp_boot_level; 1644 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1645 PPSMC_MSG_ACPDPM_SetEnabledMask, 1646 (1 << pi->acp_boot_level)); 1647 } 1648 } 1649} 1650 1651static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1652{ 1653 struct kv_power_info *pi = kv_get_pi(adev); 1654 struct amdgpu_clock_voltage_dependency_table *table = 1655 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1656 int ret; 1657 1658 if (!gate) { 1659 if (pi->caps_stable_p_state) 1660 pi->acp_boot_level = table->count - 1; 1661 else 1662 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1663 1664 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1665 pi->dpm_table_start + 1666 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1667 (u8 *)&pi->acp_boot_level, 1668 sizeof(u8), 1669 pi->sram_end); 1670 if (ret) 1671 return ret; 1672 1673 if (pi->caps_stable_p_state) 1674 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1675 PPSMC_MSG_ACPDPM_SetEnabledMask, 1676 (1 << pi->acp_boot_level)); 1677 } 1678 1679 return kv_enable_acp_dpm(adev, !gate); 1680} 1681 1682static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) 1683{ 1684 struct kv_power_info *pi = kv_get_pi(adev); 1685 int ret; 1686 1687 if (pi->uvd_power_gated == gate) 1688 return; 1689 1690 pi->uvd_power_gated = gate; 1691 1692 if (gate) { 1693 if (pi->caps_uvd_pg) { 1694 /* disable clockgating so we can properly shut down the block */ 1695 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1696 AMD_CG_STATE_UNGATE); 1697 /* shutdown the UVD block */ 1698 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1699 AMD_PG_STATE_GATE); 1700 /* XXX: check for errors */ 1701 } 1702 kv_update_uvd_dpm(adev, gate); 1703 if (pi->caps_uvd_pg) 1704 /* power off the UVD block */ 1705 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1706 } else { 1707 if (pi->caps_uvd_pg) { 1708 /* power on the UVD block */ 1709 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1710 /* re-init the UVD block */ 1711 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1712 AMD_PG_STATE_UNGATE); 1713 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ 1714 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1715 AMD_CG_STATE_GATE); 1716 /* XXX: check for errors */ 1717 } 1718 kv_update_uvd_dpm(adev, gate); 1719 } 1720} 1721 1722static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1723{ 1724 struct kv_power_info *pi = kv_get_pi(adev); 1725 int ret; 1726 1727 if (pi->vce_power_gated == gate) 1728 return; 1729 1730 pi->vce_power_gated = gate; 1731 1732 if (gate) { 1733 if (pi->caps_vce_pg) { 1734 /* shutdown the VCE block */ 1735 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1736 AMD_PG_STATE_GATE); 1737 /* XXX: check for errors */ 1738 /* power off the VCE block */ 1739 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1740 } 1741 } else { 1742 if (pi->caps_vce_pg) { 1743 /* power on the VCE block */ 1744 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1745 /* re-init the VCE block */ 1746 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1747 AMD_PG_STATE_UNGATE); 1748 /* XXX: check for errors */ 1749 } 1750 } 1751} 1752 1753static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1754{ 1755 struct kv_power_info *pi = kv_get_pi(adev); 1756 1757 if (pi->samu_power_gated == gate) 1758 return; 1759 1760 pi->samu_power_gated = gate; 1761 1762 if (gate) { 1763 kv_update_samu_dpm(adev, true); 1764 if (pi->caps_samu_pg) 1765 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1766 } else { 1767 if (pi->caps_samu_pg) 1768 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1769 kv_update_samu_dpm(adev, false); 1770 } 1771} 1772 1773static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1774{ 1775 struct kv_power_info *pi = kv_get_pi(adev); 1776 1777 if (pi->acp_power_gated == gate) 1778 return; 1779 1780 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1781 return; 1782 1783 pi->acp_power_gated = gate; 1784 1785 if (gate) { 1786 kv_update_acp_dpm(adev, true); 1787 if (pi->caps_acp_pg) 1788 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1789 } else { 1790 if (pi->caps_acp_pg) 1791 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1792 kv_update_acp_dpm(adev, false); 1793 } 1794} 1795 1796static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1797 struct amdgpu_ps *new_rps) 1798{ 1799 struct kv_ps *new_ps = kv_get_ps(new_rps); 1800 struct kv_power_info *pi = kv_get_pi(adev); 1801 u32 i; 1802 struct amdgpu_clock_voltage_dependency_table *table = 1803 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1804 1805 if (table && table->count) { 1806 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1807 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1808 (i == (pi->graphics_dpm_level_count - 1))) { 1809 pi->lowest_valid = i; 1810 break; 1811 } 1812 } 1813 1814 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1815 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1816 break; 1817 } 1818 pi->highest_valid = i; 1819 1820 if (pi->lowest_valid > pi->highest_valid) { 1821 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1822 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1823 pi->highest_valid = pi->lowest_valid; 1824 else 1825 pi->lowest_valid = pi->highest_valid; 1826 } 1827 } else { 1828 struct sumo_sclk_voltage_mapping_table *table = 1829 &pi->sys_info.sclk_voltage_mapping_table; 1830 1831 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1832 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1833 i == (int)(pi->graphics_dpm_level_count - 1)) { 1834 pi->lowest_valid = i; 1835 break; 1836 } 1837 } 1838 1839 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1840 if (table->entries[i].sclk_frequency <= 1841 new_ps->levels[new_ps->num_levels - 1].sclk) 1842 break; 1843 } 1844 pi->highest_valid = i; 1845 1846 if (pi->lowest_valid > pi->highest_valid) { 1847 if ((new_ps->levels[0].sclk - 1848 table->entries[pi->highest_valid].sclk_frequency) > 1849 (table->entries[pi->lowest_valid].sclk_frequency - 1850 new_ps->levels[new_ps->num_levels -1].sclk)) 1851 pi->highest_valid = pi->lowest_valid; 1852 else 1853 pi->lowest_valid = pi->highest_valid; 1854 } 1855 } 1856} 1857 1858static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1859 struct amdgpu_ps *new_rps) 1860{ 1861 struct kv_ps *new_ps = kv_get_ps(new_rps); 1862 struct kv_power_info *pi = kv_get_pi(adev); 1863 int ret = 0; 1864 u8 clk_bypass_cntl; 1865 1866 if (pi->caps_enable_dfs_bypass) { 1867 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1868 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1869 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1870 (pi->dpm_table_start + 1871 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1872 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1873 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1874 &clk_bypass_cntl, 1875 sizeof(u8), pi->sram_end); 1876 } 1877 1878 return ret; 1879} 1880 1881static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1882 bool enable) 1883{ 1884 struct kv_power_info *pi = kv_get_pi(adev); 1885 int ret = 0; 1886 1887 if (enable) { 1888 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1889 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1890 if (ret == 0) 1891 pi->nb_dpm_enabled = true; 1892 } 1893 } else { 1894 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1895 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1896 if (ret == 0) 1897 pi->nb_dpm_enabled = false; 1898 } 1899 } 1900 1901 return ret; 1902} 1903 1904static int kv_dpm_force_performance_level(struct amdgpu_device *adev, 1905 enum amdgpu_dpm_forced_level level) 1906{ 1907 int ret; 1908 1909 if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { 1910 ret = kv_force_dpm_highest(adev); 1911 if (ret) 1912 return ret; 1913 } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { 1914 ret = kv_force_dpm_lowest(adev); 1915 if (ret) 1916 return ret; 1917 } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { 1918 ret = kv_unforce_levels(adev); 1919 if (ret) 1920 return ret; 1921 } 1922 1923 adev->pm.dpm.forced_level = level; 1924 1925 return 0; 1926} 1927 1928static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) 1929{ 1930 struct kv_power_info *pi = kv_get_pi(adev); 1931 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1932 struct amdgpu_ps *new_ps = &requested_ps; 1933 1934 kv_update_requested_ps(adev, new_ps); 1935 1936 kv_apply_state_adjust_rules(adev, 1937 &pi->requested_rps, 1938 &pi->current_rps); 1939 1940 return 0; 1941} 1942 1943static int kv_dpm_set_power_state(struct amdgpu_device *adev) 1944{ 1945 struct kv_power_info *pi = kv_get_pi(adev); 1946 struct amdgpu_ps *new_ps = &pi->requested_rps; 1947 struct amdgpu_ps *old_ps = &pi->current_rps; 1948 int ret; 1949 1950 if (pi->bapm_enable) { 1951 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power); 1952 if (ret) { 1953 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1954 return ret; 1955 } 1956 } 1957 1958 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1959 if (pi->enable_dpm) { 1960 kv_set_valid_clock_range(adev, new_ps); 1961 kv_update_dfs_bypass_settings(adev, new_ps); 1962 ret = kv_calculate_ds_divider(adev); 1963 if (ret) { 1964 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1965 return ret; 1966 } 1967 kv_calculate_nbps_level_settings(adev); 1968 kv_calculate_dpm_settings(adev); 1969 kv_force_lowest_valid(adev); 1970 kv_enable_new_levels(adev); 1971 kv_upload_dpm_settings(adev); 1972 kv_program_nbps_index_settings(adev, new_ps); 1973 kv_unforce_levels(adev); 1974 kv_set_enabled_levels(adev); 1975 kv_force_lowest_valid(adev); 1976 kv_unforce_levels(adev); 1977 1978 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1979 if (ret) { 1980 DRM_ERROR("kv_update_vce_dpm failed\n"); 1981 return ret; 1982 } 1983 kv_update_sclk_t(adev); 1984 if (adev->asic_type == CHIP_MULLINS) 1985 kv_enable_nb_dpm(adev, true); 1986 } 1987 } else { 1988 if (pi->enable_dpm) { 1989 kv_set_valid_clock_range(adev, new_ps); 1990 kv_update_dfs_bypass_settings(adev, new_ps); 1991 ret = kv_calculate_ds_divider(adev); 1992 if (ret) { 1993 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1994 return ret; 1995 } 1996 kv_calculate_nbps_level_settings(adev); 1997 kv_calculate_dpm_settings(adev); 1998 kv_freeze_sclk_dpm(adev, true); 1999 kv_upload_dpm_settings(adev); 2000 kv_program_nbps_index_settings(adev, new_ps); 2001 kv_freeze_sclk_dpm(adev, false); 2002 kv_set_enabled_levels(adev); 2003 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 2004 if (ret) { 2005 DRM_ERROR("kv_update_vce_dpm failed\n"); 2006 return ret; 2007 } 2008 kv_update_acp_boot_level(adev); 2009 kv_update_sclk_t(adev); 2010 kv_enable_nb_dpm(adev, true); 2011 } 2012 } 2013 2014 return 0; 2015} 2016 2017static void kv_dpm_post_set_power_state(struct amdgpu_device *adev) 2018{ 2019 struct kv_power_info *pi = kv_get_pi(adev); 2020 struct amdgpu_ps *new_ps = &pi->requested_rps; 2021 2022 kv_update_current_ps(adev, new_ps); 2023} 2024 2025static void kv_dpm_setup_asic(struct amdgpu_device *adev) 2026{ 2027 sumo_take_smu_control(adev, true); 2028 kv_init_powergate_state(adev); 2029 kv_init_sclk_t(adev); 2030} 2031 2032#if 0 2033static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2034{ 2035 struct kv_power_info *pi = kv_get_pi(adev); 2036 2037 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2038 kv_force_lowest_valid(adev); 2039 kv_init_graphics_levels(adev); 2040 kv_program_bootup_state(adev); 2041 kv_upload_dpm_settings(adev); 2042 kv_force_lowest_valid(adev); 2043 kv_unforce_levels(adev); 2044 } else { 2045 kv_init_graphics_levels(adev); 2046 kv_program_bootup_state(adev); 2047 kv_freeze_sclk_dpm(adev, true); 2048 kv_upload_dpm_settings(adev); 2049 kv_freeze_sclk_dpm(adev, false); 2050 kv_set_enabled_level(adev, pi->graphics_boot_level); 2051 } 2052} 2053#endif 2054 2055static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2056 struct amdgpu_clock_and_voltage_limits *table) 2057{ 2058 struct kv_power_info *pi = kv_get_pi(adev); 2059 2060 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2061 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2062 table->sclk = 2063 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2064 table->vddc = 2065 kv_convert_2bit_index_to_voltage(adev, 2066 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2067 } 2068 2069 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2070} 2071 2072static void kv_patch_voltage_values(struct amdgpu_device *adev) 2073{ 2074 int i; 2075 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2076 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2077 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2078 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2079 struct amdgpu_clock_voltage_dependency_table *samu_table = 2080 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2081 struct amdgpu_clock_voltage_dependency_table *acp_table = 2082 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2083 2084 if (uvd_table->count) { 2085 for (i = 0; i < uvd_table->count; i++) 2086 uvd_table->entries[i].v = 2087 kv_convert_8bit_index_to_voltage(adev, 2088 uvd_table->entries[i].v); 2089 } 2090 2091 if (vce_table->count) { 2092 for (i = 0; i < vce_table->count; i++) 2093 vce_table->entries[i].v = 2094 kv_convert_8bit_index_to_voltage(adev, 2095 vce_table->entries[i].v); 2096 } 2097 2098 if (samu_table->count) { 2099 for (i = 0; i < samu_table->count; i++) 2100 samu_table->entries[i].v = 2101 kv_convert_8bit_index_to_voltage(adev, 2102 samu_table->entries[i].v); 2103 } 2104 2105 if (acp_table->count) { 2106 for (i = 0; i < acp_table->count; i++) 2107 acp_table->entries[i].v = 2108 kv_convert_8bit_index_to_voltage(adev, 2109 acp_table->entries[i].v); 2110 } 2111 2112} 2113 2114static void kv_construct_boot_state(struct amdgpu_device *adev) 2115{ 2116 struct kv_power_info *pi = kv_get_pi(adev); 2117 2118 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2119 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2120 pi->boot_pl.ds_divider_index = 0; 2121 pi->boot_pl.ss_divider_index = 0; 2122 pi->boot_pl.allow_gnb_slow = 1; 2123 pi->boot_pl.force_nbp_state = 0; 2124 pi->boot_pl.display_wm = 0; 2125 pi->boot_pl.vce_wm = 0; 2126} 2127 2128static int kv_force_dpm_highest(struct amdgpu_device *adev) 2129{ 2130 int ret; 2131 u32 enable_mask, i; 2132 2133 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2134 if (ret) 2135 return ret; 2136 2137 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2138 if (enable_mask & (1 << i)) 2139 break; 2140 } 2141 2142 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2143 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2144 else 2145 return kv_set_enabled_level(adev, i); 2146} 2147 2148static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2149{ 2150 int ret; 2151 u32 enable_mask, i; 2152 2153 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2154 if (ret) 2155 return ret; 2156 2157 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2158 if (enable_mask & (1 << i)) 2159 break; 2160 } 2161 2162 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2163 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2164 else 2165 return kv_set_enabled_level(adev, i); 2166} 2167 2168static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2169 u32 sclk, u32 min_sclk_in_sr) 2170{ 2171 struct kv_power_info *pi = kv_get_pi(adev); 2172 u32 i; 2173 u32 temp; 2174 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); 2175 2176 if (sclk < min) 2177 return 0; 2178 2179 if (!pi->caps_sclk_ds) 2180 return 0; 2181 2182 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2183 temp = sclk >> i; 2184 if (temp >= min) 2185 break; 2186 } 2187 2188 return (u8)i; 2189} 2190 2191static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2192{ 2193 struct kv_power_info *pi = kv_get_pi(adev); 2194 struct amdgpu_clock_voltage_dependency_table *table = 2195 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2196 int i; 2197 2198 if (table && table->count) { 2199 for (i = table->count - 1; i >= 0; i--) { 2200 if (pi->high_voltage_t && 2201 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2202 pi->high_voltage_t)) { 2203 *limit = i; 2204 return 0; 2205 } 2206 } 2207 } else { 2208 struct sumo_sclk_voltage_mapping_table *table = 2209 &pi->sys_info.sclk_voltage_mapping_table; 2210 2211 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2212 if (pi->high_voltage_t && 2213 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2214 pi->high_voltage_t)) { 2215 *limit = i; 2216 return 0; 2217 } 2218 } 2219 } 2220 2221 *limit = 0; 2222 return 0; 2223} 2224 2225static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2226 struct amdgpu_ps *new_rps, 2227 struct amdgpu_ps *old_rps) 2228{ 2229 struct kv_ps *ps = kv_get_ps(new_rps); 2230 struct kv_power_info *pi = kv_get_pi(adev); 2231 u32 min_sclk = 10000; /* ??? */ 2232 u32 sclk, mclk = 0; 2233 int i, limit; 2234 bool force_high; 2235 struct amdgpu_clock_voltage_dependency_table *table = 2236 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2237 u32 stable_p_state_sclk = 0; 2238 struct amdgpu_clock_and_voltage_limits *max_limits = 2239 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2240 2241 if (new_rps->vce_active) { 2242 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2243 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2244 } else { 2245 new_rps->evclk = 0; 2246 new_rps->ecclk = 0; 2247 } 2248 2249 mclk = max_limits->mclk; 2250 sclk = min_sclk; 2251 2252 if (pi->caps_stable_p_state) { 2253 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2254 2255 for (i = table->count - 1; i >= 0; i--) { 2256 if (stable_p_state_sclk >= table->entries[i].clk) { 2257 stable_p_state_sclk = table->entries[i].clk; 2258 break; 2259 } 2260 } 2261 2262 if (i > 0) 2263 stable_p_state_sclk = table->entries[0].clk; 2264 2265 sclk = stable_p_state_sclk; 2266 } 2267 2268 if (new_rps->vce_active) { 2269 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2270 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2271 } 2272 2273 ps->need_dfs_bypass = true; 2274 2275 for (i = 0; i < ps->num_levels; i++) { 2276 if (ps->levels[i].sclk < sclk) 2277 ps->levels[i].sclk = sclk; 2278 } 2279 2280 if (table && table->count) { 2281 for (i = 0; i < ps->num_levels; i++) { 2282 if (pi->high_voltage_t && 2283 (pi->high_voltage_t < 2284 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2285 kv_get_high_voltage_limit(adev, &limit); 2286 ps->levels[i].sclk = table->entries[limit].clk; 2287 } 2288 } 2289 } else { 2290 struct sumo_sclk_voltage_mapping_table *table = 2291 &pi->sys_info.sclk_voltage_mapping_table; 2292 2293 for (i = 0; i < ps->num_levels; i++) { 2294 if (pi->high_voltage_t && 2295 (pi->high_voltage_t < 2296 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2297 kv_get_high_voltage_limit(adev, &limit); 2298 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2299 } 2300 } 2301 } 2302 2303 if (pi->caps_stable_p_state) { 2304 for (i = 0; i < ps->num_levels; i++) { 2305 ps->levels[i].sclk = stable_p_state_sclk; 2306 } 2307 } 2308 2309 pi->video_start = new_rps->dclk || new_rps->vclk || 2310 new_rps->evclk || new_rps->ecclk; 2311 2312 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2313 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2314 pi->battery_state = true; 2315 else 2316 pi->battery_state = false; 2317 2318 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2319 ps->dpm0_pg_nb_ps_lo = 0x1; 2320 ps->dpm0_pg_nb_ps_hi = 0x0; 2321 ps->dpmx_nb_ps_lo = 0x1; 2322 ps->dpmx_nb_ps_hi = 0x0; 2323 } else { 2324 ps->dpm0_pg_nb_ps_lo = 0x3; 2325 ps->dpm0_pg_nb_ps_hi = 0x0; 2326 ps->dpmx_nb_ps_lo = 0x3; 2327 ps->dpmx_nb_ps_hi = 0x0; 2328 2329 if (pi->sys_info.nb_dpm_enable) { 2330 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2331 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2332 pi->disable_nb_ps3_in_battery; 2333 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2334 ps->dpm0_pg_nb_ps_hi = 0x2; 2335 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2336 ps->dpmx_nb_ps_hi = 0x2; 2337 } 2338 } 2339} 2340 2341static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2342 u32 index, bool enable) 2343{ 2344 struct kv_power_info *pi = kv_get_pi(adev); 2345 2346 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2347} 2348 2349static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2350{ 2351 struct kv_power_info *pi = kv_get_pi(adev); 2352 u32 sclk_in_sr = 10000; /* ??? */ 2353 u32 i; 2354 2355 if (pi->lowest_valid > pi->highest_valid) 2356 return -EINVAL; 2357 2358 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2359 pi->graphics_level[i].DeepSleepDivId = 2360 kv_get_sleep_divider_id_from_clock(adev, 2361 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2362 sclk_in_sr); 2363 } 2364 return 0; 2365} 2366 2367static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2368{ 2369 struct kv_power_info *pi = kv_get_pi(adev); 2370 u32 i; 2371 bool force_high; 2372 struct amdgpu_clock_and_voltage_limits *max_limits = 2373 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2374 u32 mclk = max_limits->mclk; 2375 2376 if (pi->lowest_valid > pi->highest_valid) 2377 return -EINVAL; 2378 2379 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2380 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2381 pi->graphics_level[i].GnbSlow = 1; 2382 pi->graphics_level[i].ForceNbPs1 = 0; 2383 pi->graphics_level[i].UpH = 0; 2384 } 2385 2386 if (!pi->sys_info.nb_dpm_enable) 2387 return 0; 2388 2389 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2390 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2391 2392 if (force_high) { 2393 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2394 pi->graphics_level[i].GnbSlow = 0; 2395 } else { 2396 if (pi->battery_state) 2397 pi->graphics_level[0].ForceNbPs1 = 1; 2398 2399 pi->graphics_level[1].GnbSlow = 0; 2400 pi->graphics_level[2].GnbSlow = 0; 2401 pi->graphics_level[3].GnbSlow = 0; 2402 pi->graphics_level[4].GnbSlow = 0; 2403 } 2404 } else { 2405 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2406 pi->graphics_level[i].GnbSlow = 1; 2407 pi->graphics_level[i].ForceNbPs1 = 0; 2408 pi->graphics_level[i].UpH = 0; 2409 } 2410 2411 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2412 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2413 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2414 if (pi->lowest_valid != pi->highest_valid) 2415 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2416 } 2417 } 2418 return 0; 2419} 2420 2421static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2422{ 2423 struct kv_power_info *pi = kv_get_pi(adev); 2424 u32 i; 2425 2426 if (pi->lowest_valid > pi->highest_valid) 2427 return -EINVAL; 2428 2429 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2430 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2431 2432 return 0; 2433} 2434 2435static void kv_init_graphics_levels(struct amdgpu_device *adev) 2436{ 2437 struct kv_power_info *pi = kv_get_pi(adev); 2438 u32 i; 2439 struct amdgpu_clock_voltage_dependency_table *table = 2440 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2441 2442 if (table && table->count) { 2443 u32 vid_2bit; 2444 2445 pi->graphics_dpm_level_count = 0; 2446 for (i = 0; i < table->count; i++) { 2447 if (pi->high_voltage_t && 2448 (pi->high_voltage_t < 2449 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2450 break; 2451 2452 kv_set_divider_value(adev, i, table->entries[i].clk); 2453 vid_2bit = kv_convert_vid7_to_vid2(adev, 2454 &pi->sys_info.vid_mapping_table, 2455 table->entries[i].v); 2456 kv_set_vid(adev, i, vid_2bit); 2457 kv_set_at(adev, i, pi->at[i]); 2458 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2459 pi->graphics_dpm_level_count++; 2460 } 2461 } else { 2462 struct sumo_sclk_voltage_mapping_table *table = 2463 &pi->sys_info.sclk_voltage_mapping_table; 2464 2465 pi->graphics_dpm_level_count = 0; 2466 for (i = 0; i < table->num_max_dpm_entries; i++) { 2467 if (pi->high_voltage_t && 2468 pi->high_voltage_t < 2469 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2470 break; 2471 2472 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2473 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2474 kv_set_at(adev, i, pi->at[i]); 2475 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2476 pi->graphics_dpm_level_count++; 2477 } 2478 } 2479 2480 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2481 kv_dpm_power_level_enable(adev, i, false); 2482} 2483 2484static void kv_enable_new_levels(struct amdgpu_device *adev) 2485{ 2486 struct kv_power_info *pi = kv_get_pi(adev); 2487 u32 i; 2488 2489 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2490 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2491 kv_dpm_power_level_enable(adev, i, true); 2492 } 2493} 2494 2495static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2496{ 2497 u32 new_mask = (1 << level); 2498 2499 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2500 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2501 new_mask); 2502} 2503 2504static int kv_set_enabled_levels(struct amdgpu_device *adev) 2505{ 2506 struct kv_power_info *pi = kv_get_pi(adev); 2507 u32 i, new_mask = 0; 2508 2509 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2510 new_mask |= (1 << i); 2511 2512 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2513 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2514 new_mask); 2515} 2516 2517static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2518 struct amdgpu_ps *new_rps) 2519{ 2520 struct kv_ps *new_ps = kv_get_ps(new_rps); 2521 struct kv_power_info *pi = kv_get_pi(adev); 2522 u32 nbdpmconfig1; 2523 2524 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2525 return; 2526 2527 if (pi->sys_info.nb_dpm_enable) { 2528 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2529 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2530 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2531 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2532 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2533 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2534 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2535 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2536 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2537 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2538 } 2539} 2540 2541static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2542 int min_temp, int max_temp) 2543{ 2544 int low_temp = 0 * 1000; 2545 int high_temp = 255 * 1000; 2546 u32 tmp; 2547 2548 if (low_temp < min_temp) 2549 low_temp = min_temp; 2550 if (high_temp > max_temp) 2551 high_temp = max_temp; 2552 if (high_temp < low_temp) { 2553 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2554 return -EINVAL; 2555 } 2556 2557 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2558 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2559 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2560 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2561 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2562 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2563 2564 adev->pm.dpm.thermal.min_temp = low_temp; 2565 adev->pm.dpm.thermal.max_temp = high_temp; 2566 2567 return 0; 2568} 2569 2570union igp_info { 2571 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2572 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2573 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2574 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2575 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2576 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2577}; 2578 2579static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2580{ 2581 struct kv_power_info *pi = kv_get_pi(adev); 2582 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2583 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2584 union igp_info *igp_info; 2585 u8 frev, crev; 2586 u16 data_offset; 2587 int i; 2588 2589 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2590 &frev, &crev, &data_offset)) { 2591 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2592 data_offset); 2593 2594 if (crev != 8) { 2595 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2596 return -EINVAL; 2597 } 2598 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2599 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2600 pi->sys_info.bootup_nb_voltage_index = 2601 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2602 if (igp_info->info_8.ucHtcTmpLmt == 0) 2603 pi->sys_info.htc_tmp_lmt = 203; 2604 else 2605 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2606 if (igp_info->info_8.ucHtcHystLmt == 0) 2607 pi->sys_info.htc_hyst_lmt = 5; 2608 else 2609 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2610 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2611 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2612 } 2613 2614 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2615 pi->sys_info.nb_dpm_enable = true; 2616 else 2617 pi->sys_info.nb_dpm_enable = false; 2618 2619 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2620 pi->sys_info.nbp_memory_clock[i] = 2621 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2622 pi->sys_info.nbp_n_clock[i] = 2623 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2624 } 2625 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2626 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2627 pi->caps_enable_dfs_bypass = true; 2628 2629 sumo_construct_sclk_voltage_mapping_table(adev, 2630 &pi->sys_info.sclk_voltage_mapping_table, 2631 igp_info->info_8.sAvail_SCLK); 2632 2633 sumo_construct_vid_mapping_table(adev, 2634 &pi->sys_info.vid_mapping_table, 2635 igp_info->info_8.sAvail_SCLK); 2636 2637 kv_construct_max_power_limits_table(adev, 2638 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2639 } 2640 return 0; 2641} 2642 2643union power_info { 2644 struct _ATOM_POWERPLAY_INFO info; 2645 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2646 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2647 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2648 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2649 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2650}; 2651 2652union pplib_clock_info { 2653 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2654 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2655 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2656 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2657}; 2658 2659union pplib_power_state { 2660 struct _ATOM_PPLIB_STATE v1; 2661 struct _ATOM_PPLIB_STATE_V2 v2; 2662}; 2663 2664static void kv_patch_boot_state(struct amdgpu_device *adev, 2665 struct kv_ps *ps) 2666{ 2667 struct kv_power_info *pi = kv_get_pi(adev); 2668 2669 ps->num_levels = 1; 2670 ps->levels[0] = pi->boot_pl; 2671} 2672 2673static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2674 struct amdgpu_ps *rps, 2675 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2676 u8 table_rev) 2677{ 2678 struct kv_ps *ps = kv_get_ps(rps); 2679 2680 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2681 rps->class = le16_to_cpu(non_clock_info->usClassification); 2682 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2683 2684 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2685 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2686 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2687 } else { 2688 rps->vclk = 0; 2689 rps->dclk = 0; 2690 } 2691 2692 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2693 adev->pm.dpm.boot_ps = rps; 2694 kv_patch_boot_state(adev, ps); 2695 } 2696 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2697 adev->pm.dpm.uvd_ps = rps; 2698} 2699 2700static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2701 struct amdgpu_ps *rps, int index, 2702 union pplib_clock_info *clock_info) 2703{ 2704 struct kv_power_info *pi = kv_get_pi(adev); 2705 struct kv_ps *ps = kv_get_ps(rps); 2706 struct kv_pl *pl = &ps->levels[index]; 2707 u32 sclk; 2708 2709 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2710 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2711 pl->sclk = sclk; 2712 pl->vddc_index = clock_info->sumo.vddcIndex; 2713 2714 ps->num_levels = index + 1; 2715 2716 if (pi->caps_sclk_ds) { 2717 pl->ds_divider_index = 5; 2718 pl->ss_divider_index = 5; 2719 } 2720} 2721 2722static int kv_parse_power_table(struct amdgpu_device *adev) 2723{ 2724 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2725 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2726 union pplib_power_state *power_state; 2727 int i, j, k, non_clock_array_index, clock_array_index; 2728 union pplib_clock_info *clock_info; 2729 struct _StateArray *state_array; 2730 struct _ClockInfoArray *clock_info_array; 2731 struct _NonClockInfoArray *non_clock_info_array; 2732 union power_info *power_info; 2733 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2734 u16 data_offset; 2735 u8 frev, crev; 2736 u8 *power_state_offset; 2737 struct kv_ps *ps; 2738 2739 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2740 &frev, &crev, &data_offset)) 2741 return -EINVAL; 2742 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2743 2744 amdgpu_add_thermal_controller(adev); 2745 2746 state_array = (struct _StateArray *) 2747 (mode_info->atom_context->bios + data_offset + 2748 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2749 clock_info_array = (struct _ClockInfoArray *) 2750 (mode_info->atom_context->bios + data_offset + 2751 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2752 non_clock_info_array = (struct _NonClockInfoArray *) 2753 (mode_info->atom_context->bios + data_offset + 2754 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2755 2756 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 2757 state_array->ucNumEntries, GFP_KERNEL); 2758 if (!adev->pm.dpm.ps) 2759 return -ENOMEM; 2760 power_state_offset = (u8 *)state_array->states; 2761 for (i = 0; i < state_array->ucNumEntries; i++) { 2762 u8 *idx; 2763 power_state = (union pplib_power_state *)power_state_offset; 2764 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2765 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2766 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2767 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2768 if (ps == NULL) { 2769 kfree(adev->pm.dpm.ps); 2770 return -ENOMEM; 2771 } 2772 adev->pm.dpm.ps[i].ps_priv = ps; 2773 k = 0; 2774 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2775 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2776 clock_array_index = idx[j]; 2777 if (clock_array_index >= clock_info_array->ucNumEntries) 2778 continue; 2779 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2780 break; 2781 clock_info = (union pplib_clock_info *) 2782 ((u8 *)&clock_info_array->clockInfo[0] + 2783 (clock_array_index * clock_info_array->ucEntrySize)); 2784 kv_parse_pplib_clock_info(adev, 2785 &adev->pm.dpm.ps[i], k, 2786 clock_info); 2787 k++; 2788 } 2789 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2790 non_clock_info, 2791 non_clock_info_array->ucEntrySize); 2792 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2793 } 2794 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2795 2796 /* fill in the vce power states */ 2797 for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { 2798 u32 sclk; 2799 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2800 clock_info = (union pplib_clock_info *) 2801 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2802 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2803 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2804 adev->pm.dpm.vce_states[i].sclk = sclk; 2805 adev->pm.dpm.vce_states[i].mclk = 0; 2806 } 2807 2808 return 0; 2809} 2810 2811static int kv_dpm_init(struct amdgpu_device *adev) 2812{ 2813 struct kv_power_info *pi; 2814 int ret, i; 2815 2816 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2817 if (pi == NULL) 2818 return -ENOMEM; 2819 adev->pm.dpm.priv = pi; 2820 2821 ret = amdgpu_get_platform_caps(adev); 2822 if (ret) 2823 return ret; 2824 2825 ret = amdgpu_parse_extended_power_table(adev); 2826 if (ret) 2827 return ret; 2828 2829 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2830 pi->at[i] = TRINITY_AT_DFLT; 2831 2832 pi->sram_end = SMC_RAM_END; 2833 2834 pi->enable_nb_dpm = true; 2835 2836 pi->caps_power_containment = true; 2837 pi->caps_cac = true; 2838 pi->enable_didt = false; 2839 if (pi->enable_didt) { 2840 pi->caps_sq_ramping = true; 2841 pi->caps_db_ramping = true; 2842 pi->caps_td_ramping = true; 2843 pi->caps_tcp_ramping = true; 2844 } 2845 2846 pi->caps_sclk_ds = true; 2847 pi->enable_auto_thermal_throttling = true; 2848 pi->disable_nb_ps3_in_battery = false; 2849 if (amdgpu_bapm == 0) 2850 pi->bapm_enable = false; 2851 else 2852 pi->bapm_enable = true; 2853 pi->voltage_drop_t = 0; 2854 pi->caps_sclk_throttle_low_notification = false; 2855 pi->caps_fps = false; /* true? */ 2856 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2857 pi->caps_uvd_dpm = true; 2858 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2859 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2860 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2861 pi->caps_stable_p_state = false; 2862 2863 ret = kv_parse_sys_info_table(adev); 2864 if (ret) 2865 return ret; 2866 2867 kv_patch_voltage_values(adev); 2868 kv_construct_boot_state(adev); 2869 2870 ret = kv_parse_power_table(adev); 2871 if (ret) 2872 return ret; 2873 2874 pi->enable_dpm = true; 2875 2876 return 0; 2877} 2878 2879static void 2880kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 2881 struct seq_file *m) 2882{ 2883 struct kv_power_info *pi = kv_get_pi(adev); 2884 u32 current_index = 2885 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2886 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2887 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2888 u32 sclk, tmp; 2889 u16 vddc; 2890 2891 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2892 seq_printf(m, "invalid dpm profile %d\n", current_index); 2893 } else { 2894 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2895 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2896 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2897 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2898 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2899 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2900 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2901 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2902 current_index, sclk, vddc); 2903 } 2904} 2905 2906static void 2907kv_dpm_print_power_state(struct amdgpu_device *adev, 2908 struct amdgpu_ps *rps) 2909{ 2910 int i; 2911 struct kv_ps *ps = kv_get_ps(rps); 2912 2913 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2914 amdgpu_dpm_print_cap_info(rps->caps); 2915 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2916 for (i = 0; i < ps->num_levels; i++) { 2917 struct kv_pl *pl = &ps->levels[i]; 2918 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2919 i, pl->sclk, 2920 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2921 } 2922 amdgpu_dpm_print_ps_status(adev, rps); 2923} 2924 2925static void kv_dpm_fini(struct amdgpu_device *adev) 2926{ 2927 int i; 2928 2929 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2930 kfree(adev->pm.dpm.ps[i].ps_priv); 2931 } 2932 kfree(adev->pm.dpm.ps); 2933 kfree(adev->pm.dpm.priv); 2934 amdgpu_free_extended_power_table(adev); 2935} 2936 2937static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev) 2938{ 2939 2940} 2941 2942static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) 2943{ 2944 struct kv_power_info *pi = kv_get_pi(adev); 2945 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2946 2947 if (low) 2948 return requested_state->levels[0].sclk; 2949 else 2950 return requested_state->levels[requested_state->num_levels - 1].sclk; 2951} 2952 2953static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low) 2954{ 2955 struct kv_power_info *pi = kv_get_pi(adev); 2956 2957 return pi->sys_info.bootup_uma_clk; 2958} 2959 2960/* get temperature in millidegrees */ 2961static int kv_dpm_get_temp(struct amdgpu_device *adev) 2962{ 2963 u32 temp; 2964 int actual_temp = 0; 2965 2966 temp = RREG32_SMC(0xC0300E0C); 2967 2968 if (temp) 2969 actual_temp = (temp / 8) - 49; 2970 else 2971 actual_temp = 0; 2972 2973 actual_temp = actual_temp * 1000; 2974 2975 return actual_temp; 2976} 2977 2978static int kv_dpm_early_init(void *handle) 2979{ 2980 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2981 2982 kv_dpm_set_dpm_funcs(adev); 2983 kv_dpm_set_irq_funcs(adev); 2984 2985 return 0; 2986} 2987 2988static int kv_dpm_late_init(void *handle) 2989{ 2990 /* powerdown unused blocks for now */ 2991 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2992 int ret; 2993 2994 if (!amdgpu_dpm) 2995 return 0; 2996 2997 /* init the sysfs and debugfs files late */ 2998 ret = amdgpu_pm_sysfs_init(adev); 2999 if (ret) 3000 return ret; 3001 3002 kv_dpm_powergate_acp(adev, true); 3003 kv_dpm_powergate_samu(adev, true); 3004 kv_dpm_powergate_vce(adev, true); 3005 kv_dpm_powergate_uvd(adev, true); 3006 3007 return 0; 3008} 3009 3010static int kv_dpm_sw_init(void *handle) 3011{ 3012 int ret; 3013 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3014 3015 ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); 3016 if (ret) 3017 return ret; 3018 3019 ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); 3020 if (ret) 3021 return ret; 3022 3023 /* default to balanced state */ 3024 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 3025 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 3026 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 3027 adev->pm.default_sclk = adev->clock.default_sclk; 3028 adev->pm.default_mclk = adev->clock.default_mclk; 3029 adev->pm.current_sclk = adev->clock.default_sclk; 3030 adev->pm.current_mclk = adev->clock.default_mclk; 3031 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3032 3033 if (amdgpu_dpm == 0) 3034 return 0; 3035 3036 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3037 mutex_lock(&adev->pm.mutex); 3038 ret = kv_dpm_init(adev); 3039 if (ret) 3040 goto dpm_failed; 3041 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3042 if (amdgpu_dpm == 1) 3043 amdgpu_pm_print_power_states(adev); 3044 mutex_unlock(&adev->pm.mutex); 3045 DRM_INFO("amdgpu: dpm initialized\n"); 3046 3047 return 0; 3048 3049dpm_failed: 3050 kv_dpm_fini(adev); 3051 mutex_unlock(&adev->pm.mutex); 3052 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3053 return ret; 3054} 3055 3056static int kv_dpm_sw_fini(void *handle) 3057{ 3058 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3059 3060 mutex_lock(&adev->pm.mutex); 3061 amdgpu_pm_sysfs_fini(adev); 3062 kv_dpm_fini(adev); 3063 mutex_unlock(&adev->pm.mutex); 3064 3065 return 0; 3066} 3067 3068static int kv_dpm_hw_init(void *handle) 3069{ 3070 int ret; 3071 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3072 3073 mutex_lock(&adev->pm.mutex); 3074 kv_dpm_setup_asic(adev); 3075 ret = kv_dpm_enable(adev); 3076 if (ret) 3077 adev->pm.dpm_enabled = false; 3078 else 3079 adev->pm.dpm_enabled = true; 3080 mutex_unlock(&adev->pm.mutex); 3081 3082 return ret; 3083} 3084 3085static int kv_dpm_hw_fini(void *handle) 3086{ 3087 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3088 3089 if (adev->pm.dpm_enabled) { 3090 mutex_lock(&adev->pm.mutex); 3091 kv_dpm_disable(adev); 3092 mutex_unlock(&adev->pm.mutex); 3093 } 3094 3095 return 0; 3096} 3097 3098static int kv_dpm_suspend(void *handle) 3099{ 3100 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3101 3102 if (adev->pm.dpm_enabled) { 3103 mutex_lock(&adev->pm.mutex); 3104 /* disable dpm */ 3105 kv_dpm_disable(adev); 3106 /* reset the power state */ 3107 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3108 mutex_unlock(&adev->pm.mutex); 3109 } 3110 return 0; 3111} 3112 3113static int kv_dpm_resume(void *handle) 3114{ 3115 int ret; 3116 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3117 3118 if (adev->pm.dpm_enabled) { 3119 /* asic init will reset to the boot state */ 3120 mutex_lock(&adev->pm.mutex); 3121 kv_dpm_setup_asic(adev); 3122 ret = kv_dpm_enable(adev); 3123 if (ret) 3124 adev->pm.dpm_enabled = false; 3125 else 3126 adev->pm.dpm_enabled = true; 3127 mutex_unlock(&adev->pm.mutex); 3128 if (adev->pm.dpm_enabled) 3129 amdgpu_pm_compute_clocks(adev); 3130 } 3131 return 0; 3132} 3133 3134static bool kv_dpm_is_idle(void *handle) 3135{ 3136 return true; 3137} 3138 3139static int kv_dpm_wait_for_idle(void *handle) 3140{ 3141 return 0; 3142} 3143 3144 3145static int kv_dpm_soft_reset(void *handle) 3146{ 3147 return 0; 3148} 3149 3150static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3151 struct amdgpu_irq_src *src, 3152 unsigned type, 3153 enum amdgpu_interrupt_state state) 3154{ 3155 u32 cg_thermal_int; 3156 3157 switch (type) { 3158 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3159 switch (state) { 3160 case AMDGPU_IRQ_STATE_DISABLE: 3161 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3162 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3163 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3164 break; 3165 case AMDGPU_IRQ_STATE_ENABLE: 3166 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3167 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3168 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3169 break; 3170 default: 3171 break; 3172 } 3173 break; 3174 3175 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3176 switch (state) { 3177 case AMDGPU_IRQ_STATE_DISABLE: 3178 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3179 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3180 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3181 break; 3182 case AMDGPU_IRQ_STATE_ENABLE: 3183 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3184 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3185 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3186 break; 3187 default: 3188 break; 3189 } 3190 break; 3191 3192 default: 3193 break; 3194 } 3195 return 0; 3196} 3197 3198static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3199 struct amdgpu_irq_src *source, 3200 struct amdgpu_iv_entry *entry) 3201{ 3202 bool queue_thermal = false; 3203 3204 if (entry == NULL) 3205 return -EINVAL; 3206 3207 switch (entry->src_id) { 3208 case 230: /* thermal low to high */ 3209 DRM_DEBUG("IH: thermal low to high\n"); 3210 adev->pm.dpm.thermal.high_to_low = false; 3211 queue_thermal = true; 3212 break; 3213 case 231: /* thermal high to low */ 3214 DRM_DEBUG("IH: thermal high to low\n"); 3215 adev->pm.dpm.thermal.high_to_low = true; 3216 queue_thermal = true; 3217 break; 3218 default: 3219 break; 3220 } 3221 3222 if (queue_thermal) 3223 schedule_work(&adev->pm.dpm.thermal.work); 3224 3225 return 0; 3226} 3227 3228static int kv_dpm_set_clockgating_state(void *handle, 3229 enum amd_clockgating_state state) 3230{ 3231 return 0; 3232} 3233 3234static int kv_dpm_set_powergating_state(void *handle, 3235 enum amd_powergating_state state) 3236{ 3237 return 0; 3238} 3239 3240const struct amd_ip_funcs kv_dpm_ip_funcs = { 3241 .name = "kv_dpm", 3242 .early_init = kv_dpm_early_init, 3243 .late_init = kv_dpm_late_init, 3244 .sw_init = kv_dpm_sw_init, 3245 .sw_fini = kv_dpm_sw_fini, 3246 .hw_init = kv_dpm_hw_init, 3247 .hw_fini = kv_dpm_hw_fini, 3248 .suspend = kv_dpm_suspend, 3249 .resume = kv_dpm_resume, 3250 .is_idle = kv_dpm_is_idle, 3251 .wait_for_idle = kv_dpm_wait_for_idle, 3252 .soft_reset = kv_dpm_soft_reset, 3253 .set_clockgating_state = kv_dpm_set_clockgating_state, 3254 .set_powergating_state = kv_dpm_set_powergating_state, 3255}; 3256 3257static const struct amdgpu_dpm_funcs kv_dpm_funcs = { 3258 .get_temperature = &kv_dpm_get_temp, 3259 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3260 .set_power_state = &kv_dpm_set_power_state, 3261 .post_set_power_state = &kv_dpm_post_set_power_state, 3262 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3263 .get_sclk = &kv_dpm_get_sclk, 3264 .get_mclk = &kv_dpm_get_mclk, 3265 .print_power_state = &kv_dpm_print_power_state, 3266 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3267 .force_performance_level = &kv_dpm_force_performance_level, 3268 .powergate_uvd = &kv_dpm_powergate_uvd, 3269 .enable_bapm = &kv_dpm_enable_bapm, 3270}; 3271 3272static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) 3273{ 3274 if (adev->pm.funcs == NULL) 3275 adev->pm.funcs = &kv_dpm_funcs; 3276} 3277 3278static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3279 .set = kv_dpm_set_interrupt_state, 3280 .process = kv_dpm_process_interrupt, 3281}; 3282 3283static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3284{ 3285 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3286 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3287}