Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.0-rc3 3383 lines 94 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <drm/drmP.h> 25#include "amdgpu.h" 26#include "amdgpu_pm.h" 27#include "cikd.h" 28#include "atom.h" 29#include "amdgpu_atombios.h" 30#include "amdgpu_dpm.h" 31#include "kv_dpm.h" 32#include "gfx_v7_0.h" 33#include <linux/seq_file.h> 34 35#include "smu/smu_7_0_0_d.h" 36#include "smu/smu_7_0_0_sh_mask.h" 37 38#include "gca/gfx_7_2_d.h" 39#include "gca/gfx_7_2_sh_mask.h" 40 41#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42#define KV_MINIMUM_ENGINE_CLOCK 800 43#define SMC_RAM_END 0x40000 44 45static const struct amd_pm_funcs kv_dpm_funcs; 46 47static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 48static int kv_enable_nb_dpm(struct amdgpu_device *adev, 49 bool enable); 50static void kv_init_graphics_levels(struct amdgpu_device *adev); 51static int kv_calculate_ds_divider(struct amdgpu_device *adev); 52static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 53static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 54static void kv_enable_new_levels(struct amdgpu_device *adev); 55static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 56 struct amdgpu_ps *new_rps); 57static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 58static int kv_set_enabled_levels(struct amdgpu_device *adev); 59static int kv_force_dpm_highest(struct amdgpu_device *adev); 60static int kv_force_dpm_lowest(struct amdgpu_device *adev); 61static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 62 struct amdgpu_ps *new_rps, 63 struct amdgpu_ps *old_rps); 64static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 65 int min_temp, int max_temp); 66static int kv_init_fps_limits(struct amdgpu_device *adev); 67 68static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 70 71 72static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 73 struct sumo_vid_mapping_table *vid_mapping_table, 74 u32 vid_2bit) 75{ 76 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 77 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 78 u32 i; 79 80 if (vddc_sclk_table && vddc_sclk_table->count) { 81 if (vid_2bit < vddc_sclk_table->count) 82 return vddc_sclk_table->entries[vid_2bit].v; 83 else 84 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 85 } else { 86 for (i = 0; i < vid_mapping_table->num_entries; i++) { 87 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 88 return vid_mapping_table->entries[i].vid_7bit; 89 } 90 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 91 } 92} 93 94static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 95 struct sumo_vid_mapping_table *vid_mapping_table, 96 u32 vid_7bit) 97{ 98 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 99 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 100 u32 i; 101 102 if (vddc_sclk_table && vddc_sclk_table->count) { 103 for (i = 0; i < vddc_sclk_table->count; i++) { 104 if (vddc_sclk_table->entries[i].v == vid_7bit) 105 return i; 106 } 107 return vddc_sclk_table->count - 1; 108 } else { 109 for (i = 0; i < vid_mapping_table->num_entries; i++) { 110 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 111 return vid_mapping_table->entries[i].vid_2bit; 112 } 113 114 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 115 } 116} 117 118static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 119{ 120/* This bit selects who handles display phy powergating. 121 * Clear the bit to let atom handle it. 122 * Set it to let the driver handle it. 123 * For now we just let atom handle it. 124 */ 125#if 0 126 u32 v = RREG32(mmDOUT_SCRATCH3); 127 128 if (enable) 129 v |= 0x4; 130 else 131 v &= 0xFFFFFFFB; 132 133 WREG32(mmDOUT_SCRATCH3, v); 134#endif 135} 136 137static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 138 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 139 ATOM_AVAILABLE_SCLK_LIST *table) 140{ 141 u32 i; 142 u32 n = 0; 143 u32 prev_sclk = 0; 144 145 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 146 if (table[i].ulSupportedSCLK > prev_sclk) { 147 sclk_voltage_mapping_table->entries[n].sclk_frequency = 148 table[i].ulSupportedSCLK; 149 sclk_voltage_mapping_table->entries[n].vid_2bit = 150 table[i].usVoltageIndex; 151 prev_sclk = table[i].ulSupportedSCLK; 152 n++; 153 } 154 } 155 156 sclk_voltage_mapping_table->num_max_dpm_entries = n; 157} 158 159static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 160 struct sumo_vid_mapping_table *vid_mapping_table, 161 ATOM_AVAILABLE_SCLK_LIST *table) 162{ 163 u32 i, j; 164 165 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 166 if (table[i].ulSupportedSCLK != 0) { 167 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 168 table[i].usVoltageID; 169 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 170 table[i].usVoltageIndex; 171 } 172 } 173 174 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 175 if (vid_mapping_table->entries[i].vid_7bit == 0) { 176 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 177 if (vid_mapping_table->entries[j].vid_7bit != 0) { 178 vid_mapping_table->entries[i] = 179 vid_mapping_table->entries[j]; 180 vid_mapping_table->entries[j].vid_7bit = 0; 181 break; 182 } 183 } 184 185 if (j == SUMO_MAX_NUMBER_VOLTAGES) 186 break; 187 } 188 } 189 190 vid_mapping_table->num_entries = i; 191} 192 193#if 0 194static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 195{ 196 { 0, 4, 1 }, 197 { 1, 4, 1 }, 198 { 2, 5, 1 }, 199 { 3, 4, 2 }, 200 { 4, 1, 1 }, 201 { 5, 5, 2 }, 202 { 6, 6, 1 }, 203 { 7, 9, 2 }, 204 { 0xffffffff } 205}; 206 207static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 208{ 209 { 0, 4, 1 }, 210 { 0xffffffff } 211}; 212 213static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 214{ 215 { 0, 4, 1 }, 216 { 0xffffffff } 217}; 218 219static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 220{ 221 { 0, 4, 1 }, 222 { 0xffffffff } 223}; 224 225static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 226{ 227 { 0, 4, 1 }, 228 { 0xffffffff } 229}; 230 231static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 232{ 233 { 0, 4, 1 }, 234 { 1, 4, 1 }, 235 { 2, 5, 1 }, 236 { 3, 4, 1 }, 237 { 4, 1, 1 }, 238 { 5, 5, 1 }, 239 { 6, 6, 1 }, 240 { 7, 9, 1 }, 241 { 8, 4, 1 }, 242 { 9, 2, 1 }, 243 { 10, 3, 1 }, 244 { 11, 6, 1 }, 245 { 12, 8, 2 }, 246 { 13, 1, 1 }, 247 { 14, 2, 1 }, 248 { 15, 3, 1 }, 249 { 16, 1, 1 }, 250 { 17, 4, 1 }, 251 { 18, 3, 1 }, 252 { 19, 1, 1 }, 253 { 20, 8, 1 }, 254 { 21, 5, 1 }, 255 { 22, 1, 1 }, 256 { 23, 1, 1 }, 257 { 24, 4, 1 }, 258 { 27, 6, 1 }, 259 { 28, 1, 1 }, 260 { 0xffffffff } 261}; 262 263static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 264{ 265 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 266}; 267 268static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 269{ 270 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 271}; 272 273static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 274{ 275 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 276}; 277 278static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 279{ 280 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 281}; 282 283static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 284{ 285 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 286}; 287 288static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 289{ 290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 291}; 292#endif 293 294static const struct kv_pt_config_reg didt_config_kv[] = 295{ 296 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 297 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 298 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 299 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 300 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 301 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 302 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 308 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 309 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 310 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 311 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 312 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 313 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 314 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 315 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 316 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 317 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 318 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 319 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 320 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 326 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 327 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 328 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 329 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 330 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 331 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 332 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 333 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 334 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 335 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 336 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 337 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 338 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 344 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 345 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 346 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 347 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 348 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 349 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 350 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 351 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 352 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 353 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 354 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 355 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 356 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 358 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 359 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 360 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 361 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 362 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 363 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 364 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 365 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 366 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 367 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 368 { 0xFFFFFFFF } 369}; 370 371static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 372{ 373 struct kv_ps *ps = rps->ps_priv; 374 375 return ps; 376} 377 378static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 379{ 380 struct kv_power_info *pi = adev->pm.dpm.priv; 381 382 return pi; 383} 384 385#if 0 386static void kv_program_local_cac_table(struct amdgpu_device *adev, 387 const struct kv_lcac_config_values *local_cac_table, 388 const struct kv_lcac_config_reg *local_cac_reg) 389{ 390 u32 i, count, data; 391 const struct kv_lcac_config_values *values = local_cac_table; 392 393 while (values->block_id != 0xffffffff) { 394 count = values->signal_id; 395 for (i = 0; i < count; i++) { 396 data = ((values->block_id << local_cac_reg->block_shift) & 397 local_cac_reg->block_mask); 398 data |= ((i << local_cac_reg->signal_shift) & 399 local_cac_reg->signal_mask); 400 data |= ((values->t << local_cac_reg->t_shift) & 401 local_cac_reg->t_mask); 402 data |= ((1 << local_cac_reg->enable_shift) & 403 local_cac_reg->enable_mask); 404 WREG32_SMC(local_cac_reg->cntl, data); 405 } 406 values++; 407 } 408} 409#endif 410 411static int kv_program_pt_config_registers(struct amdgpu_device *adev, 412 const struct kv_pt_config_reg *cac_config_regs) 413{ 414 const struct kv_pt_config_reg *config_regs = cac_config_regs; 415 u32 data; 416 u32 cache = 0; 417 418 if (config_regs == NULL) 419 return -EINVAL; 420 421 while (config_regs->offset != 0xFFFFFFFF) { 422 if (config_regs->type == KV_CONFIGREG_CACHE) { 423 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 424 } else { 425 switch (config_regs->type) { 426 case KV_CONFIGREG_SMC_IND: 427 data = RREG32_SMC(config_regs->offset); 428 break; 429 case KV_CONFIGREG_DIDT_IND: 430 data = RREG32_DIDT(config_regs->offset); 431 break; 432 default: 433 data = RREG32(config_regs->offset); 434 break; 435 } 436 437 data &= ~config_regs->mask; 438 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 439 data |= cache; 440 cache = 0; 441 442 switch (config_regs->type) { 443 case KV_CONFIGREG_SMC_IND: 444 WREG32_SMC(config_regs->offset, data); 445 break; 446 case KV_CONFIGREG_DIDT_IND: 447 WREG32_DIDT(config_regs->offset, data); 448 break; 449 default: 450 WREG32(config_regs->offset, data); 451 break; 452 } 453 } 454 config_regs++; 455 } 456 457 return 0; 458} 459 460static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 461{ 462 struct kv_power_info *pi = kv_get_pi(adev); 463 u32 data; 464 465 if (pi->caps_sq_ramping) { 466 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 467 if (enable) 468 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 469 else 470 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 471 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 472 } 473 474 if (pi->caps_db_ramping) { 475 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 476 if (enable) 477 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 478 else 479 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 480 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 481 } 482 483 if (pi->caps_td_ramping) { 484 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 485 if (enable) 486 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 487 else 488 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 489 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 490 } 491 492 if (pi->caps_tcp_ramping) { 493 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 494 if (enable) 495 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 496 else 497 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 498 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 499 } 500} 501 502static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 503{ 504 struct kv_power_info *pi = kv_get_pi(adev); 505 int ret; 506 507 if (pi->caps_sq_ramping || 508 pi->caps_db_ramping || 509 pi->caps_td_ramping || 510 pi->caps_tcp_ramping) { 511 amdgpu_gfx_rlc_enter_safe_mode(adev); 512 513 if (enable) { 514 ret = kv_program_pt_config_registers(adev, didt_config_kv); 515 if (ret) { 516 amdgpu_gfx_rlc_exit_safe_mode(adev); 517 return ret; 518 } 519 } 520 521 kv_do_enable_didt(adev, enable); 522 523 amdgpu_gfx_rlc_exit_safe_mode(adev); 524 } 525 526 return 0; 527} 528 529#if 0 530static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 531{ 532 struct kv_power_info *pi = kv_get_pi(adev); 533 534 if (pi->caps_cac) { 535 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 536 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 537 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 538 539 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 540 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 541 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 542 543 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 544 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 545 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 546 547 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 548 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 549 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 550 551 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 552 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 553 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 554 555 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 556 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 557 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 558 } 559} 560#endif 561 562static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 563{ 564 struct kv_power_info *pi = kv_get_pi(adev); 565 int ret = 0; 566 567 if (pi->caps_cac) { 568 if (enable) { 569 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 570 if (ret) 571 pi->cac_enabled = false; 572 else 573 pi->cac_enabled = true; 574 } else if (pi->cac_enabled) { 575 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 576 pi->cac_enabled = false; 577 } 578 } 579 580 return ret; 581} 582 583static int kv_process_firmware_header(struct amdgpu_device *adev) 584{ 585 struct kv_power_info *pi = kv_get_pi(adev); 586 u32 tmp; 587 int ret; 588 589 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 590 offsetof(SMU7_Firmware_Header, DpmTable), 591 &tmp, pi->sram_end); 592 593 if (ret == 0) 594 pi->dpm_table_start = tmp; 595 596 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 597 offsetof(SMU7_Firmware_Header, SoftRegisters), 598 &tmp, pi->sram_end); 599 600 if (ret == 0) 601 pi->soft_regs_start = tmp; 602 603 return ret; 604} 605 606static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 607{ 608 struct kv_power_info *pi = kv_get_pi(adev); 609 int ret; 610 611 pi->graphics_voltage_change_enable = 1; 612 613 ret = amdgpu_kv_copy_bytes_to_smc(adev, 614 pi->dpm_table_start + 615 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 616 &pi->graphics_voltage_change_enable, 617 sizeof(u8), pi->sram_end); 618 619 return ret; 620} 621 622static int kv_set_dpm_interval(struct amdgpu_device *adev) 623{ 624 struct kv_power_info *pi = kv_get_pi(adev); 625 int ret; 626 627 pi->graphics_interval = 1; 628 629 ret = amdgpu_kv_copy_bytes_to_smc(adev, 630 pi->dpm_table_start + 631 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 632 &pi->graphics_interval, 633 sizeof(u8), pi->sram_end); 634 635 return ret; 636} 637 638static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 639{ 640 struct kv_power_info *pi = kv_get_pi(adev); 641 int ret; 642 643 ret = amdgpu_kv_copy_bytes_to_smc(adev, 644 pi->dpm_table_start + 645 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 646 &pi->graphics_boot_level, 647 sizeof(u8), pi->sram_end); 648 649 return ret; 650} 651 652static void kv_program_vc(struct amdgpu_device *adev) 653{ 654 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 655} 656 657static void kv_clear_vc(struct amdgpu_device *adev) 658{ 659 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 660} 661 662static int kv_set_divider_value(struct amdgpu_device *adev, 663 u32 index, u32 sclk) 664{ 665 struct kv_power_info *pi = kv_get_pi(adev); 666 struct atom_clock_dividers dividers; 667 int ret; 668 669 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 670 sclk, false, &dividers); 671 if (ret) 672 return ret; 673 674 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 675 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 676 677 return 0; 678} 679 680static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 681 u16 voltage) 682{ 683 return 6200 - (voltage * 25); 684} 685 686static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 687 u32 vid_2bit) 688{ 689 struct kv_power_info *pi = kv_get_pi(adev); 690 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 691 &pi->sys_info.vid_mapping_table, 692 vid_2bit); 693 694 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 695} 696 697 698static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 699{ 700 struct kv_power_info *pi = kv_get_pi(adev); 701 702 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 703 pi->graphics_level[index].MinVddNb = 704 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 705 706 return 0; 707} 708 709static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 710{ 711 struct kv_power_info *pi = kv_get_pi(adev); 712 713 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 714 715 return 0; 716} 717 718static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 719 u32 index, bool enable) 720{ 721 struct kv_power_info *pi = kv_get_pi(adev); 722 723 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 724} 725 726static void kv_start_dpm(struct amdgpu_device *adev) 727{ 728 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 729 730 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 731 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 732 733 amdgpu_kv_smc_dpm_enable(adev, true); 734} 735 736static void kv_stop_dpm(struct amdgpu_device *adev) 737{ 738 amdgpu_kv_smc_dpm_enable(adev, false); 739} 740 741static void kv_start_am(struct amdgpu_device *adev) 742{ 743 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 744 745 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 746 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 747 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 748 749 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 750} 751 752static void kv_reset_am(struct amdgpu_device *adev) 753{ 754 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 755 756 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 757 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 758 759 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 760} 761 762static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 763{ 764 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 765 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 766} 767 768static int kv_force_lowest_valid(struct amdgpu_device *adev) 769{ 770 return kv_force_dpm_lowest(adev); 771} 772 773static int kv_unforce_levels(struct amdgpu_device *adev) 774{ 775 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 776 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 777 else 778 return kv_set_enabled_levels(adev); 779} 780 781static int kv_update_sclk_t(struct amdgpu_device *adev) 782{ 783 struct kv_power_info *pi = kv_get_pi(adev); 784 u32 low_sclk_interrupt_t = 0; 785 int ret = 0; 786 787 if (pi->caps_sclk_throttle_low_notification) { 788 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 789 790 ret = amdgpu_kv_copy_bytes_to_smc(adev, 791 pi->dpm_table_start + 792 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 793 (u8 *)&low_sclk_interrupt_t, 794 sizeof(u32), pi->sram_end); 795 } 796 return ret; 797} 798 799static int kv_program_bootup_state(struct amdgpu_device *adev) 800{ 801 struct kv_power_info *pi = kv_get_pi(adev); 802 u32 i; 803 struct amdgpu_clock_voltage_dependency_table *table = 804 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 805 806 if (table && table->count) { 807 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 808 if (table->entries[i].clk == pi->boot_pl.sclk) 809 break; 810 } 811 812 pi->graphics_boot_level = (u8)i; 813 kv_dpm_power_level_enable(adev, i, true); 814 } else { 815 struct sumo_sclk_voltage_mapping_table *table = 816 &pi->sys_info.sclk_voltage_mapping_table; 817 818 if (table->num_max_dpm_entries == 0) 819 return -EINVAL; 820 821 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 822 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 823 break; 824 } 825 826 pi->graphics_boot_level = (u8)i; 827 kv_dpm_power_level_enable(adev, i, true); 828 } 829 return 0; 830} 831 832static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 833{ 834 struct kv_power_info *pi = kv_get_pi(adev); 835 int ret; 836 837 pi->graphics_therm_throttle_enable = 1; 838 839 ret = amdgpu_kv_copy_bytes_to_smc(adev, 840 pi->dpm_table_start + 841 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 842 &pi->graphics_therm_throttle_enable, 843 sizeof(u8), pi->sram_end); 844 845 return ret; 846} 847 848static int kv_upload_dpm_settings(struct amdgpu_device *adev) 849{ 850 struct kv_power_info *pi = kv_get_pi(adev); 851 int ret; 852 853 ret = amdgpu_kv_copy_bytes_to_smc(adev, 854 pi->dpm_table_start + 855 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 856 (u8 *)&pi->graphics_level, 857 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 858 pi->sram_end); 859 860 if (ret) 861 return ret; 862 863 ret = amdgpu_kv_copy_bytes_to_smc(adev, 864 pi->dpm_table_start + 865 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 866 &pi->graphics_dpm_level_count, 867 sizeof(u8), pi->sram_end); 868 869 return ret; 870} 871 872static u32 kv_get_clock_difference(u32 a, u32 b) 873{ 874 return (a >= b) ? a - b : b - a; 875} 876 877static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 878{ 879 struct kv_power_info *pi = kv_get_pi(adev); 880 u32 value; 881 882 if (pi->caps_enable_dfs_bypass) { 883 if (kv_get_clock_difference(clk, 40000) < 200) 884 value = 3; 885 else if (kv_get_clock_difference(clk, 30000) < 200) 886 value = 2; 887 else if (kv_get_clock_difference(clk, 20000) < 200) 888 value = 7; 889 else if (kv_get_clock_difference(clk, 15000) < 200) 890 value = 6; 891 else if (kv_get_clock_difference(clk, 10000) < 200) 892 value = 8; 893 else 894 value = 0; 895 } else { 896 value = 0; 897 } 898 899 return value; 900} 901 902static int kv_populate_uvd_table(struct amdgpu_device *adev) 903{ 904 struct kv_power_info *pi = kv_get_pi(adev); 905 struct amdgpu_uvd_clock_voltage_dependency_table *table = 906 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 907 struct atom_clock_dividers dividers; 908 int ret; 909 u32 i; 910 911 if (table == NULL || table->count == 0) 912 return 0; 913 914 pi->uvd_level_count = 0; 915 for (i = 0; i < table->count; i++) { 916 if (pi->high_voltage_t && 917 (pi->high_voltage_t < table->entries[i].v)) 918 break; 919 920 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 921 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 922 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 923 924 pi->uvd_level[i].VClkBypassCntl = 925 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 926 pi->uvd_level[i].DClkBypassCntl = 927 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 928 929 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 930 table->entries[i].vclk, false, &dividers); 931 if (ret) 932 return ret; 933 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 934 935 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 936 table->entries[i].dclk, false, &dividers); 937 if (ret) 938 return ret; 939 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 940 941 pi->uvd_level_count++; 942 } 943 944 ret = amdgpu_kv_copy_bytes_to_smc(adev, 945 pi->dpm_table_start + 946 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 947 (u8 *)&pi->uvd_level_count, 948 sizeof(u8), pi->sram_end); 949 if (ret) 950 return ret; 951 952 pi->uvd_interval = 1; 953 954 ret = amdgpu_kv_copy_bytes_to_smc(adev, 955 pi->dpm_table_start + 956 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 957 &pi->uvd_interval, 958 sizeof(u8), pi->sram_end); 959 if (ret) 960 return ret; 961 962 ret = amdgpu_kv_copy_bytes_to_smc(adev, 963 pi->dpm_table_start + 964 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 965 (u8 *)&pi->uvd_level, 966 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 967 pi->sram_end); 968 969 return ret; 970 971} 972 973static int kv_populate_vce_table(struct amdgpu_device *adev) 974{ 975 struct kv_power_info *pi = kv_get_pi(adev); 976 int ret; 977 u32 i; 978 struct amdgpu_vce_clock_voltage_dependency_table *table = 979 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 980 struct atom_clock_dividers dividers; 981 982 if (table == NULL || table->count == 0) 983 return 0; 984 985 pi->vce_level_count = 0; 986 for (i = 0; i < table->count; i++) { 987 if (pi->high_voltage_t && 988 pi->high_voltage_t < table->entries[i].v) 989 break; 990 991 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 992 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 993 994 pi->vce_level[i].ClkBypassCntl = 995 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 996 997 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 998 table->entries[i].evclk, false, &dividers); 999 if (ret) 1000 return ret; 1001 pi->vce_level[i].Divider = (u8)dividers.post_div; 1002 1003 pi->vce_level_count++; 1004 } 1005 1006 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1007 pi->dpm_table_start + 1008 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 1009 (u8 *)&pi->vce_level_count, 1010 sizeof(u8), 1011 pi->sram_end); 1012 if (ret) 1013 return ret; 1014 1015 pi->vce_interval = 1; 1016 1017 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1018 pi->dpm_table_start + 1019 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1020 (u8 *)&pi->vce_interval, 1021 sizeof(u8), 1022 pi->sram_end); 1023 if (ret) 1024 return ret; 1025 1026 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1027 pi->dpm_table_start + 1028 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1029 (u8 *)&pi->vce_level, 1030 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1031 pi->sram_end); 1032 1033 return ret; 1034} 1035 1036static int kv_populate_samu_table(struct amdgpu_device *adev) 1037{ 1038 struct kv_power_info *pi = kv_get_pi(adev); 1039 struct amdgpu_clock_voltage_dependency_table *table = 1040 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1041 struct atom_clock_dividers dividers; 1042 int ret; 1043 u32 i; 1044 1045 if (table == NULL || table->count == 0) 1046 return 0; 1047 1048 pi->samu_level_count = 0; 1049 for (i = 0; i < table->count; i++) { 1050 if (pi->high_voltage_t && 1051 pi->high_voltage_t < table->entries[i].v) 1052 break; 1053 1054 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1055 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1056 1057 pi->samu_level[i].ClkBypassCntl = 1058 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1059 1060 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1061 table->entries[i].clk, false, &dividers); 1062 if (ret) 1063 return ret; 1064 pi->samu_level[i].Divider = (u8)dividers.post_div; 1065 1066 pi->samu_level_count++; 1067 } 1068 1069 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1070 pi->dpm_table_start + 1071 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1072 (u8 *)&pi->samu_level_count, 1073 sizeof(u8), 1074 pi->sram_end); 1075 if (ret) 1076 return ret; 1077 1078 pi->samu_interval = 1; 1079 1080 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1081 pi->dpm_table_start + 1082 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1083 (u8 *)&pi->samu_interval, 1084 sizeof(u8), 1085 pi->sram_end); 1086 if (ret) 1087 return ret; 1088 1089 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1090 pi->dpm_table_start + 1091 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1092 (u8 *)&pi->samu_level, 1093 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1094 pi->sram_end); 1095 if (ret) 1096 return ret; 1097 1098 return ret; 1099} 1100 1101 1102static int kv_populate_acp_table(struct amdgpu_device *adev) 1103{ 1104 struct kv_power_info *pi = kv_get_pi(adev); 1105 struct amdgpu_clock_voltage_dependency_table *table = 1106 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1107 struct atom_clock_dividers dividers; 1108 int ret; 1109 u32 i; 1110 1111 if (table == NULL || table->count == 0) 1112 return 0; 1113 1114 pi->acp_level_count = 0; 1115 for (i = 0; i < table->count; i++) { 1116 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1117 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1118 1119 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1120 table->entries[i].clk, false, &dividers); 1121 if (ret) 1122 return ret; 1123 pi->acp_level[i].Divider = (u8)dividers.post_div; 1124 1125 pi->acp_level_count++; 1126 } 1127 1128 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1129 pi->dpm_table_start + 1130 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1131 (u8 *)&pi->acp_level_count, 1132 sizeof(u8), 1133 pi->sram_end); 1134 if (ret) 1135 return ret; 1136 1137 pi->acp_interval = 1; 1138 1139 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1140 pi->dpm_table_start + 1141 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1142 (u8 *)&pi->acp_interval, 1143 sizeof(u8), 1144 pi->sram_end); 1145 if (ret) 1146 return ret; 1147 1148 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1149 pi->dpm_table_start + 1150 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1151 (u8 *)&pi->acp_level, 1152 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1153 pi->sram_end); 1154 if (ret) 1155 return ret; 1156 1157 return ret; 1158} 1159 1160static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1161{ 1162 struct kv_power_info *pi = kv_get_pi(adev); 1163 u32 i; 1164 struct amdgpu_clock_voltage_dependency_table *table = 1165 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1166 1167 if (table && table->count) { 1168 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1169 if (pi->caps_enable_dfs_bypass) { 1170 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1171 pi->graphics_level[i].ClkBypassCntl = 3; 1172 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1173 pi->graphics_level[i].ClkBypassCntl = 2; 1174 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1175 pi->graphics_level[i].ClkBypassCntl = 7; 1176 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1177 pi->graphics_level[i].ClkBypassCntl = 6; 1178 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1179 pi->graphics_level[i].ClkBypassCntl = 8; 1180 else 1181 pi->graphics_level[i].ClkBypassCntl = 0; 1182 } else { 1183 pi->graphics_level[i].ClkBypassCntl = 0; 1184 } 1185 } 1186 } else { 1187 struct sumo_sclk_voltage_mapping_table *table = 1188 &pi->sys_info.sclk_voltage_mapping_table; 1189 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1190 if (pi->caps_enable_dfs_bypass) { 1191 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1192 pi->graphics_level[i].ClkBypassCntl = 3; 1193 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1194 pi->graphics_level[i].ClkBypassCntl = 2; 1195 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1196 pi->graphics_level[i].ClkBypassCntl = 7; 1197 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1198 pi->graphics_level[i].ClkBypassCntl = 6; 1199 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1200 pi->graphics_level[i].ClkBypassCntl = 8; 1201 else 1202 pi->graphics_level[i].ClkBypassCntl = 0; 1203 } else { 1204 pi->graphics_level[i].ClkBypassCntl = 0; 1205 } 1206 } 1207 } 1208} 1209 1210static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1211{ 1212 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1213 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1214} 1215 1216static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1217{ 1218 struct kv_power_info *pi = kv_get_pi(adev); 1219 1220 pi->acp_boot_level = 0xff; 1221} 1222 1223static void kv_update_current_ps(struct amdgpu_device *adev, 1224 struct amdgpu_ps *rps) 1225{ 1226 struct kv_ps *new_ps = kv_get_ps(rps); 1227 struct kv_power_info *pi = kv_get_pi(adev); 1228 1229 pi->current_rps = *rps; 1230 pi->current_ps = *new_ps; 1231 pi->current_rps.ps_priv = &pi->current_ps; 1232 adev->pm.dpm.current_ps = &pi->current_rps; 1233} 1234 1235static void kv_update_requested_ps(struct amdgpu_device *adev, 1236 struct amdgpu_ps *rps) 1237{ 1238 struct kv_ps *new_ps = kv_get_ps(rps); 1239 struct kv_power_info *pi = kv_get_pi(adev); 1240 1241 pi->requested_rps = *rps; 1242 pi->requested_ps = *new_ps; 1243 pi->requested_rps.ps_priv = &pi->requested_ps; 1244 adev->pm.dpm.requested_ps = &pi->requested_rps; 1245} 1246 1247static void kv_dpm_enable_bapm(void *handle, bool enable) 1248{ 1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1250 struct kv_power_info *pi = kv_get_pi(adev); 1251 int ret; 1252 1253 if (pi->bapm_enable) { 1254 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1255 if (ret) 1256 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1257 } 1258} 1259 1260static int kv_dpm_enable(struct amdgpu_device *adev) 1261{ 1262 struct kv_power_info *pi = kv_get_pi(adev); 1263 int ret; 1264 1265 ret = kv_process_firmware_header(adev); 1266 if (ret) { 1267 DRM_ERROR("kv_process_firmware_header failed\n"); 1268 return ret; 1269 } 1270 kv_init_fps_limits(adev); 1271 kv_init_graphics_levels(adev); 1272 ret = kv_program_bootup_state(adev); 1273 if (ret) { 1274 DRM_ERROR("kv_program_bootup_state failed\n"); 1275 return ret; 1276 } 1277 kv_calculate_dfs_bypass_settings(adev); 1278 ret = kv_upload_dpm_settings(adev); 1279 if (ret) { 1280 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1281 return ret; 1282 } 1283 ret = kv_populate_uvd_table(adev); 1284 if (ret) { 1285 DRM_ERROR("kv_populate_uvd_table failed\n"); 1286 return ret; 1287 } 1288 ret = kv_populate_vce_table(adev); 1289 if (ret) { 1290 DRM_ERROR("kv_populate_vce_table failed\n"); 1291 return ret; 1292 } 1293 ret = kv_populate_samu_table(adev); 1294 if (ret) { 1295 DRM_ERROR("kv_populate_samu_table failed\n"); 1296 return ret; 1297 } 1298 ret = kv_populate_acp_table(adev); 1299 if (ret) { 1300 DRM_ERROR("kv_populate_acp_table failed\n"); 1301 return ret; 1302 } 1303 kv_program_vc(adev); 1304#if 0 1305 kv_initialize_hardware_cac_manager(adev); 1306#endif 1307 kv_start_am(adev); 1308 if (pi->enable_auto_thermal_throttling) { 1309 ret = kv_enable_auto_thermal_throttling(adev); 1310 if (ret) { 1311 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1312 return ret; 1313 } 1314 } 1315 ret = kv_enable_dpm_voltage_scaling(adev); 1316 if (ret) { 1317 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1318 return ret; 1319 } 1320 ret = kv_set_dpm_interval(adev); 1321 if (ret) { 1322 DRM_ERROR("kv_set_dpm_interval failed\n"); 1323 return ret; 1324 } 1325 ret = kv_set_dpm_boot_state(adev); 1326 if (ret) { 1327 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1328 return ret; 1329 } 1330 ret = kv_enable_ulv(adev, true); 1331 if (ret) { 1332 DRM_ERROR("kv_enable_ulv failed\n"); 1333 return ret; 1334 } 1335 kv_start_dpm(adev); 1336 ret = kv_enable_didt(adev, true); 1337 if (ret) { 1338 DRM_ERROR("kv_enable_didt failed\n"); 1339 return ret; 1340 } 1341 ret = kv_enable_smc_cac(adev, true); 1342 if (ret) { 1343 DRM_ERROR("kv_enable_smc_cac failed\n"); 1344 return ret; 1345 } 1346 1347 kv_reset_acp_boot_level(adev); 1348 1349 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1350 if (ret) { 1351 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1352 return ret; 1353 } 1354 1355 if (adev->irq.installed && 1356 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1357 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1358 if (ret) { 1359 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1360 return ret; 1361 } 1362 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1363 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1364 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1365 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1366 } 1367 1368 return ret; 1369} 1370 1371static void kv_dpm_disable(struct amdgpu_device *adev) 1372{ 1373 struct kv_power_info *pi = kv_get_pi(adev); 1374 1375 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1378 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1379 1380 amdgpu_kv_smc_bapm_enable(adev, false); 1381 1382 if (adev->asic_type == CHIP_MULLINS) 1383 kv_enable_nb_dpm(adev, false); 1384 1385 /* powerup blocks */ 1386 kv_dpm_powergate_acp(adev, false); 1387 kv_dpm_powergate_samu(adev, false); 1388 if (pi->caps_vce_pg) /* power on the VCE block */ 1389 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1390 if (pi->caps_uvd_pg) /* power on the UVD block */ 1391 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1392 1393 kv_enable_smc_cac(adev, false); 1394 kv_enable_didt(adev, false); 1395 kv_clear_vc(adev); 1396 kv_stop_dpm(adev); 1397 kv_enable_ulv(adev, false); 1398 kv_reset_am(adev); 1399 1400 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1401} 1402 1403#if 0 1404static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1405 u16 reg_offset, u32 value) 1406{ 1407 struct kv_power_info *pi = kv_get_pi(adev); 1408 1409 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1410 (u8 *)&value, sizeof(u16), pi->sram_end); 1411} 1412 1413static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1414 u16 reg_offset, u32 *value) 1415{ 1416 struct kv_power_info *pi = kv_get_pi(adev); 1417 1418 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1419 value, pi->sram_end); 1420} 1421#endif 1422 1423static void kv_init_sclk_t(struct amdgpu_device *adev) 1424{ 1425 struct kv_power_info *pi = kv_get_pi(adev); 1426 1427 pi->low_sclk_interrupt_t = 0; 1428} 1429 1430static int kv_init_fps_limits(struct amdgpu_device *adev) 1431{ 1432 struct kv_power_info *pi = kv_get_pi(adev); 1433 int ret = 0; 1434 1435 if (pi->caps_fps) { 1436 u16 tmp; 1437 1438 tmp = 45; 1439 pi->fps_high_t = cpu_to_be16(tmp); 1440 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1441 pi->dpm_table_start + 1442 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1443 (u8 *)&pi->fps_high_t, 1444 sizeof(u16), pi->sram_end); 1445 1446 tmp = 30; 1447 pi->fps_low_t = cpu_to_be16(tmp); 1448 1449 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1450 pi->dpm_table_start + 1451 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1452 (u8 *)&pi->fps_low_t, 1453 sizeof(u16), pi->sram_end); 1454 1455 } 1456 return ret; 1457} 1458 1459static void kv_init_powergate_state(struct amdgpu_device *adev) 1460{ 1461 struct kv_power_info *pi = kv_get_pi(adev); 1462 1463 pi->uvd_power_gated = false; 1464 pi->vce_power_gated = false; 1465 pi->samu_power_gated = false; 1466 pi->acp_power_gated = false; 1467 1468} 1469 1470static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1471{ 1472 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1473 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1474} 1475 1476static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1477{ 1478 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1479 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1480} 1481 1482static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1483{ 1484 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1485 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1486} 1487 1488static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1489{ 1490 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1491 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1492} 1493 1494static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1495{ 1496 struct kv_power_info *pi = kv_get_pi(adev); 1497 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1498 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1499 int ret; 1500 u32 mask; 1501 1502 if (!gate) { 1503 if (table->count) 1504 pi->uvd_boot_level = table->count - 1; 1505 else 1506 pi->uvd_boot_level = 0; 1507 1508 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1509 mask = 1 << pi->uvd_boot_level; 1510 } else { 1511 mask = 0x1f; 1512 } 1513 1514 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1515 pi->dpm_table_start + 1516 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1517 (uint8_t *)&pi->uvd_boot_level, 1518 sizeof(u8), pi->sram_end); 1519 if (ret) 1520 return ret; 1521 1522 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1523 PPSMC_MSG_UVDDPM_SetEnabledMask, 1524 mask); 1525 } 1526 1527 return kv_enable_uvd_dpm(adev, !gate); 1528} 1529 1530static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1531{ 1532 u8 i; 1533 struct amdgpu_vce_clock_voltage_dependency_table *table = 1534 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1535 1536 for (i = 0; i < table->count; i++) { 1537 if (table->entries[i].evclk >= evclk) 1538 break; 1539 } 1540 1541 return i; 1542} 1543 1544static int kv_update_vce_dpm(struct amdgpu_device *adev, 1545 struct amdgpu_ps *amdgpu_new_state, 1546 struct amdgpu_ps *amdgpu_current_state) 1547{ 1548 struct kv_power_info *pi = kv_get_pi(adev); 1549 struct amdgpu_vce_clock_voltage_dependency_table *table = 1550 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1551 int ret; 1552 1553 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1554 if (pi->caps_stable_p_state) 1555 pi->vce_boot_level = table->count - 1; 1556 else 1557 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1558 1559 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1560 pi->dpm_table_start + 1561 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1562 (u8 *)&pi->vce_boot_level, 1563 sizeof(u8), 1564 pi->sram_end); 1565 if (ret) 1566 return ret; 1567 1568 if (pi->caps_stable_p_state) 1569 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1570 PPSMC_MSG_VCEDPM_SetEnabledMask, 1571 (1 << pi->vce_boot_level)); 1572 kv_enable_vce_dpm(adev, true); 1573 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1574 kv_enable_vce_dpm(adev, false); 1575 } 1576 1577 return 0; 1578} 1579 1580static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1581{ 1582 struct kv_power_info *pi = kv_get_pi(adev); 1583 struct amdgpu_clock_voltage_dependency_table *table = 1584 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1585 int ret; 1586 1587 if (!gate) { 1588 if (pi->caps_stable_p_state) 1589 pi->samu_boot_level = table->count - 1; 1590 else 1591 pi->samu_boot_level = 0; 1592 1593 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1594 pi->dpm_table_start + 1595 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1596 (u8 *)&pi->samu_boot_level, 1597 sizeof(u8), 1598 pi->sram_end); 1599 if (ret) 1600 return ret; 1601 1602 if (pi->caps_stable_p_state) 1603 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1604 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1605 (1 << pi->samu_boot_level)); 1606 } 1607 1608 return kv_enable_samu_dpm(adev, !gate); 1609} 1610 1611static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1612{ 1613 u8 i; 1614 struct amdgpu_clock_voltage_dependency_table *table = 1615 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1616 1617 for (i = 0; i < table->count; i++) { 1618 if (table->entries[i].clk >= 0) /* XXX */ 1619 break; 1620 } 1621 1622 if (i >= table->count) 1623 i = table->count - 1; 1624 1625 return i; 1626} 1627 1628static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1629{ 1630 struct kv_power_info *pi = kv_get_pi(adev); 1631 u8 acp_boot_level; 1632 1633 if (!pi->caps_stable_p_state) { 1634 acp_boot_level = kv_get_acp_boot_level(adev); 1635 if (acp_boot_level != pi->acp_boot_level) { 1636 pi->acp_boot_level = acp_boot_level; 1637 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1638 PPSMC_MSG_ACPDPM_SetEnabledMask, 1639 (1 << pi->acp_boot_level)); 1640 } 1641 } 1642} 1643 1644static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1645{ 1646 struct kv_power_info *pi = kv_get_pi(adev); 1647 struct amdgpu_clock_voltage_dependency_table *table = 1648 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1649 int ret; 1650 1651 if (!gate) { 1652 if (pi->caps_stable_p_state) 1653 pi->acp_boot_level = table->count - 1; 1654 else 1655 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1656 1657 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1658 pi->dpm_table_start + 1659 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1660 (u8 *)&pi->acp_boot_level, 1661 sizeof(u8), 1662 pi->sram_end); 1663 if (ret) 1664 return ret; 1665 1666 if (pi->caps_stable_p_state) 1667 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1668 PPSMC_MSG_ACPDPM_SetEnabledMask, 1669 (1 << pi->acp_boot_level)); 1670 } 1671 1672 return kv_enable_acp_dpm(adev, !gate); 1673} 1674 1675static void kv_dpm_powergate_uvd(void *handle, bool gate) 1676{ 1677 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1678 struct kv_power_info *pi = kv_get_pi(adev); 1679 int ret; 1680 1681 pi->uvd_power_gated = gate; 1682 1683 if (gate) { 1684 /* stop the UVD block */ 1685 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1686 AMD_PG_STATE_GATE); 1687 kv_update_uvd_dpm(adev, gate); 1688 if (pi->caps_uvd_pg) 1689 /* power off the UVD block */ 1690 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1691 } else { 1692 if (pi->caps_uvd_pg) 1693 /* power on the UVD block */ 1694 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1695 /* re-init the UVD block */ 1696 kv_update_uvd_dpm(adev, gate); 1697 1698 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1699 AMD_PG_STATE_UNGATE); 1700 } 1701} 1702 1703static void kv_dpm_powergate_vce(void *handle, bool gate) 1704{ 1705 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1706 struct kv_power_info *pi = kv_get_pi(adev); 1707 int ret; 1708 1709 pi->vce_power_gated = gate; 1710 1711 if (gate) { 1712 /* stop the VCE block */ 1713 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1714 AMD_PG_STATE_GATE); 1715 kv_enable_vce_dpm(adev, false); 1716 if (pi->caps_vce_pg) /* power off the VCE block */ 1717 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1718 } else { 1719 if (pi->caps_vce_pg) /* power on the VCE block */ 1720 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1721 kv_enable_vce_dpm(adev, true); 1722 /* re-init the VCE block */ 1723 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1724 AMD_PG_STATE_UNGATE); 1725 } 1726} 1727 1728 1729static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1730{ 1731 struct kv_power_info *pi = kv_get_pi(adev); 1732 1733 if (pi->samu_power_gated == gate) 1734 return; 1735 1736 pi->samu_power_gated = gate; 1737 1738 if (gate) { 1739 kv_update_samu_dpm(adev, true); 1740 if (pi->caps_samu_pg) 1741 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1742 } else { 1743 if (pi->caps_samu_pg) 1744 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1745 kv_update_samu_dpm(adev, false); 1746 } 1747} 1748 1749static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1750{ 1751 struct kv_power_info *pi = kv_get_pi(adev); 1752 1753 if (pi->acp_power_gated == gate) 1754 return; 1755 1756 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1757 return; 1758 1759 pi->acp_power_gated = gate; 1760 1761 if (gate) { 1762 kv_update_acp_dpm(adev, true); 1763 if (pi->caps_acp_pg) 1764 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1765 } else { 1766 if (pi->caps_acp_pg) 1767 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1768 kv_update_acp_dpm(adev, false); 1769 } 1770} 1771 1772static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1773 struct amdgpu_ps *new_rps) 1774{ 1775 struct kv_ps *new_ps = kv_get_ps(new_rps); 1776 struct kv_power_info *pi = kv_get_pi(adev); 1777 u32 i; 1778 struct amdgpu_clock_voltage_dependency_table *table = 1779 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1780 1781 if (table && table->count) { 1782 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1783 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1784 (i == (pi->graphics_dpm_level_count - 1))) { 1785 pi->lowest_valid = i; 1786 break; 1787 } 1788 } 1789 1790 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1791 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1792 break; 1793 } 1794 pi->highest_valid = i; 1795 1796 if (pi->lowest_valid > pi->highest_valid) { 1797 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1798 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1799 pi->highest_valid = pi->lowest_valid; 1800 else 1801 pi->lowest_valid = pi->highest_valid; 1802 } 1803 } else { 1804 struct sumo_sclk_voltage_mapping_table *table = 1805 &pi->sys_info.sclk_voltage_mapping_table; 1806 1807 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1808 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1809 i == (int)(pi->graphics_dpm_level_count - 1)) { 1810 pi->lowest_valid = i; 1811 break; 1812 } 1813 } 1814 1815 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1816 if (table->entries[i].sclk_frequency <= 1817 new_ps->levels[new_ps->num_levels - 1].sclk) 1818 break; 1819 } 1820 pi->highest_valid = i; 1821 1822 if (pi->lowest_valid > pi->highest_valid) { 1823 if ((new_ps->levels[0].sclk - 1824 table->entries[pi->highest_valid].sclk_frequency) > 1825 (table->entries[pi->lowest_valid].sclk_frequency - 1826 new_ps->levels[new_ps->num_levels -1].sclk)) 1827 pi->highest_valid = pi->lowest_valid; 1828 else 1829 pi->lowest_valid = pi->highest_valid; 1830 } 1831 } 1832} 1833 1834static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1835 struct amdgpu_ps *new_rps) 1836{ 1837 struct kv_ps *new_ps = kv_get_ps(new_rps); 1838 struct kv_power_info *pi = kv_get_pi(adev); 1839 int ret = 0; 1840 u8 clk_bypass_cntl; 1841 1842 if (pi->caps_enable_dfs_bypass) { 1843 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1844 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1845 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1846 (pi->dpm_table_start + 1847 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1848 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1849 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1850 &clk_bypass_cntl, 1851 sizeof(u8), pi->sram_end); 1852 } 1853 1854 return ret; 1855} 1856 1857static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1858 bool enable) 1859{ 1860 struct kv_power_info *pi = kv_get_pi(adev); 1861 int ret = 0; 1862 1863 if (enable) { 1864 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1865 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1866 if (ret == 0) 1867 pi->nb_dpm_enabled = true; 1868 } 1869 } else { 1870 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1871 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1872 if (ret == 0) 1873 pi->nb_dpm_enabled = false; 1874 } 1875 } 1876 1877 return ret; 1878} 1879 1880static int kv_dpm_force_performance_level(void *handle, 1881 enum amd_dpm_forced_level level) 1882{ 1883 int ret; 1884 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1885 1886 if (level == AMD_DPM_FORCED_LEVEL_HIGH) { 1887 ret = kv_force_dpm_highest(adev); 1888 if (ret) 1889 return ret; 1890 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { 1891 ret = kv_force_dpm_lowest(adev); 1892 if (ret) 1893 return ret; 1894 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { 1895 ret = kv_unforce_levels(adev); 1896 if (ret) 1897 return ret; 1898 } 1899 1900 adev->pm.dpm.forced_level = level; 1901 1902 return 0; 1903} 1904 1905static int kv_dpm_pre_set_power_state(void *handle) 1906{ 1907 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1908 struct kv_power_info *pi = kv_get_pi(adev); 1909 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1910 struct amdgpu_ps *new_ps = &requested_ps; 1911 1912 kv_update_requested_ps(adev, new_ps); 1913 1914 kv_apply_state_adjust_rules(adev, 1915 &pi->requested_rps, 1916 &pi->current_rps); 1917 1918 return 0; 1919} 1920 1921static int kv_dpm_set_power_state(void *handle) 1922{ 1923 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1924 struct kv_power_info *pi = kv_get_pi(adev); 1925 struct amdgpu_ps *new_ps = &pi->requested_rps; 1926 struct amdgpu_ps *old_ps = &pi->current_rps; 1927 int ret; 1928 1929 if (pi->bapm_enable) { 1930 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); 1931 if (ret) { 1932 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1933 return ret; 1934 } 1935 } 1936 1937 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1938 if (pi->enable_dpm) { 1939 kv_set_valid_clock_range(adev, new_ps); 1940 kv_update_dfs_bypass_settings(adev, new_ps); 1941 ret = kv_calculate_ds_divider(adev); 1942 if (ret) { 1943 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1944 return ret; 1945 } 1946 kv_calculate_nbps_level_settings(adev); 1947 kv_calculate_dpm_settings(adev); 1948 kv_force_lowest_valid(adev); 1949 kv_enable_new_levels(adev); 1950 kv_upload_dpm_settings(adev); 1951 kv_program_nbps_index_settings(adev, new_ps); 1952 kv_unforce_levels(adev); 1953 kv_set_enabled_levels(adev); 1954 kv_force_lowest_valid(adev); 1955 kv_unforce_levels(adev); 1956 1957 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1958 if (ret) { 1959 DRM_ERROR("kv_update_vce_dpm failed\n"); 1960 return ret; 1961 } 1962 kv_update_sclk_t(adev); 1963 if (adev->asic_type == CHIP_MULLINS) 1964 kv_enable_nb_dpm(adev, true); 1965 } 1966 } else { 1967 if (pi->enable_dpm) { 1968 kv_set_valid_clock_range(adev, new_ps); 1969 kv_update_dfs_bypass_settings(adev, new_ps); 1970 ret = kv_calculate_ds_divider(adev); 1971 if (ret) { 1972 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1973 return ret; 1974 } 1975 kv_calculate_nbps_level_settings(adev); 1976 kv_calculate_dpm_settings(adev); 1977 kv_freeze_sclk_dpm(adev, true); 1978 kv_upload_dpm_settings(adev); 1979 kv_program_nbps_index_settings(adev, new_ps); 1980 kv_freeze_sclk_dpm(adev, false); 1981 kv_set_enabled_levels(adev); 1982 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1983 if (ret) { 1984 DRM_ERROR("kv_update_vce_dpm failed\n"); 1985 return ret; 1986 } 1987 kv_update_acp_boot_level(adev); 1988 kv_update_sclk_t(adev); 1989 kv_enable_nb_dpm(adev, true); 1990 } 1991 } 1992 1993 return 0; 1994} 1995 1996static void kv_dpm_post_set_power_state(void *handle) 1997{ 1998 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1999 struct kv_power_info *pi = kv_get_pi(adev); 2000 struct amdgpu_ps *new_ps = &pi->requested_rps; 2001 2002 kv_update_current_ps(adev, new_ps); 2003} 2004 2005static void kv_dpm_setup_asic(struct amdgpu_device *adev) 2006{ 2007 sumo_take_smu_control(adev, true); 2008 kv_init_powergate_state(adev); 2009 kv_init_sclk_t(adev); 2010} 2011 2012#if 0 2013static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2014{ 2015 struct kv_power_info *pi = kv_get_pi(adev); 2016 2017 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2018 kv_force_lowest_valid(adev); 2019 kv_init_graphics_levels(adev); 2020 kv_program_bootup_state(adev); 2021 kv_upload_dpm_settings(adev); 2022 kv_force_lowest_valid(adev); 2023 kv_unforce_levels(adev); 2024 } else { 2025 kv_init_graphics_levels(adev); 2026 kv_program_bootup_state(adev); 2027 kv_freeze_sclk_dpm(adev, true); 2028 kv_upload_dpm_settings(adev); 2029 kv_freeze_sclk_dpm(adev, false); 2030 kv_set_enabled_level(adev, pi->graphics_boot_level); 2031 } 2032} 2033#endif 2034 2035static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2036 struct amdgpu_clock_and_voltage_limits *table) 2037{ 2038 struct kv_power_info *pi = kv_get_pi(adev); 2039 2040 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2041 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2042 table->sclk = 2043 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2044 table->vddc = 2045 kv_convert_2bit_index_to_voltage(adev, 2046 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2047 } 2048 2049 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2050} 2051 2052static void kv_patch_voltage_values(struct amdgpu_device *adev) 2053{ 2054 int i; 2055 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2056 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2057 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2058 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2059 struct amdgpu_clock_voltage_dependency_table *samu_table = 2060 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2061 struct amdgpu_clock_voltage_dependency_table *acp_table = 2062 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2063 2064 if (uvd_table->count) { 2065 for (i = 0; i < uvd_table->count; i++) 2066 uvd_table->entries[i].v = 2067 kv_convert_8bit_index_to_voltage(adev, 2068 uvd_table->entries[i].v); 2069 } 2070 2071 if (vce_table->count) { 2072 for (i = 0; i < vce_table->count; i++) 2073 vce_table->entries[i].v = 2074 kv_convert_8bit_index_to_voltage(adev, 2075 vce_table->entries[i].v); 2076 } 2077 2078 if (samu_table->count) { 2079 for (i = 0; i < samu_table->count; i++) 2080 samu_table->entries[i].v = 2081 kv_convert_8bit_index_to_voltage(adev, 2082 samu_table->entries[i].v); 2083 } 2084 2085 if (acp_table->count) { 2086 for (i = 0; i < acp_table->count; i++) 2087 acp_table->entries[i].v = 2088 kv_convert_8bit_index_to_voltage(adev, 2089 acp_table->entries[i].v); 2090 } 2091 2092} 2093 2094static void kv_construct_boot_state(struct amdgpu_device *adev) 2095{ 2096 struct kv_power_info *pi = kv_get_pi(adev); 2097 2098 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2099 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2100 pi->boot_pl.ds_divider_index = 0; 2101 pi->boot_pl.ss_divider_index = 0; 2102 pi->boot_pl.allow_gnb_slow = 1; 2103 pi->boot_pl.force_nbp_state = 0; 2104 pi->boot_pl.display_wm = 0; 2105 pi->boot_pl.vce_wm = 0; 2106} 2107 2108static int kv_force_dpm_highest(struct amdgpu_device *adev) 2109{ 2110 int ret; 2111 u32 enable_mask, i; 2112 2113 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2114 if (ret) 2115 return ret; 2116 2117 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2118 if (enable_mask & (1 << i)) 2119 break; 2120 } 2121 2122 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2123 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2124 else 2125 return kv_set_enabled_level(adev, i); 2126} 2127 2128static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2129{ 2130 int ret; 2131 u32 enable_mask, i; 2132 2133 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2134 if (ret) 2135 return ret; 2136 2137 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2138 if (enable_mask & (1 << i)) 2139 break; 2140 } 2141 2142 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2143 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2144 else 2145 return kv_set_enabled_level(adev, i); 2146} 2147 2148static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2149 u32 sclk, u32 min_sclk_in_sr) 2150{ 2151 struct kv_power_info *pi = kv_get_pi(adev); 2152 u32 i; 2153 u32 temp; 2154 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); 2155 2156 if (sclk < min) 2157 return 0; 2158 2159 if (!pi->caps_sclk_ds) 2160 return 0; 2161 2162 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2163 temp = sclk >> i; 2164 if (temp >= min) 2165 break; 2166 } 2167 2168 return (u8)i; 2169} 2170 2171static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2172{ 2173 struct kv_power_info *pi = kv_get_pi(adev); 2174 struct amdgpu_clock_voltage_dependency_table *table = 2175 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2176 int i; 2177 2178 if (table && table->count) { 2179 for (i = table->count - 1; i >= 0; i--) { 2180 if (pi->high_voltage_t && 2181 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2182 pi->high_voltage_t)) { 2183 *limit = i; 2184 return 0; 2185 } 2186 } 2187 } else { 2188 struct sumo_sclk_voltage_mapping_table *table = 2189 &pi->sys_info.sclk_voltage_mapping_table; 2190 2191 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2192 if (pi->high_voltage_t && 2193 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2194 pi->high_voltage_t)) { 2195 *limit = i; 2196 return 0; 2197 } 2198 } 2199 } 2200 2201 *limit = 0; 2202 return 0; 2203} 2204 2205static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2206 struct amdgpu_ps *new_rps, 2207 struct amdgpu_ps *old_rps) 2208{ 2209 struct kv_ps *ps = kv_get_ps(new_rps); 2210 struct kv_power_info *pi = kv_get_pi(adev); 2211 u32 min_sclk = 10000; /* ??? */ 2212 u32 sclk, mclk = 0; 2213 int i, limit; 2214 bool force_high; 2215 struct amdgpu_clock_voltage_dependency_table *table = 2216 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2217 u32 stable_p_state_sclk = 0; 2218 struct amdgpu_clock_and_voltage_limits *max_limits = 2219 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2220 2221 if (new_rps->vce_active) { 2222 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2223 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2224 } else { 2225 new_rps->evclk = 0; 2226 new_rps->ecclk = 0; 2227 } 2228 2229 mclk = max_limits->mclk; 2230 sclk = min_sclk; 2231 2232 if (pi->caps_stable_p_state) { 2233 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2234 2235 for (i = table->count - 1; i >= 0; i--) { 2236 if (stable_p_state_sclk >= table->entries[i].clk) { 2237 stable_p_state_sclk = table->entries[i].clk; 2238 break; 2239 } 2240 } 2241 2242 if (i > 0) 2243 stable_p_state_sclk = table->entries[0].clk; 2244 2245 sclk = stable_p_state_sclk; 2246 } 2247 2248 if (new_rps->vce_active) { 2249 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2250 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2251 } 2252 2253 ps->need_dfs_bypass = true; 2254 2255 for (i = 0; i < ps->num_levels; i++) { 2256 if (ps->levels[i].sclk < sclk) 2257 ps->levels[i].sclk = sclk; 2258 } 2259 2260 if (table && table->count) { 2261 for (i = 0; i < ps->num_levels; i++) { 2262 if (pi->high_voltage_t && 2263 (pi->high_voltage_t < 2264 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2265 kv_get_high_voltage_limit(adev, &limit); 2266 ps->levels[i].sclk = table->entries[limit].clk; 2267 } 2268 } 2269 } else { 2270 struct sumo_sclk_voltage_mapping_table *table = 2271 &pi->sys_info.sclk_voltage_mapping_table; 2272 2273 for (i = 0; i < ps->num_levels; i++) { 2274 if (pi->high_voltage_t && 2275 (pi->high_voltage_t < 2276 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2277 kv_get_high_voltage_limit(adev, &limit); 2278 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2279 } 2280 } 2281 } 2282 2283 if (pi->caps_stable_p_state) { 2284 for (i = 0; i < ps->num_levels; i++) { 2285 ps->levels[i].sclk = stable_p_state_sclk; 2286 } 2287 } 2288 2289 pi->video_start = new_rps->dclk || new_rps->vclk || 2290 new_rps->evclk || new_rps->ecclk; 2291 2292 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2293 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2294 pi->battery_state = true; 2295 else 2296 pi->battery_state = false; 2297 2298 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2299 ps->dpm0_pg_nb_ps_lo = 0x1; 2300 ps->dpm0_pg_nb_ps_hi = 0x0; 2301 ps->dpmx_nb_ps_lo = 0x1; 2302 ps->dpmx_nb_ps_hi = 0x0; 2303 } else { 2304 ps->dpm0_pg_nb_ps_lo = 0x3; 2305 ps->dpm0_pg_nb_ps_hi = 0x0; 2306 ps->dpmx_nb_ps_lo = 0x3; 2307 ps->dpmx_nb_ps_hi = 0x0; 2308 2309 if (pi->sys_info.nb_dpm_enable) { 2310 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2311 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2312 pi->disable_nb_ps3_in_battery; 2313 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2314 ps->dpm0_pg_nb_ps_hi = 0x2; 2315 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2316 ps->dpmx_nb_ps_hi = 0x2; 2317 } 2318 } 2319} 2320 2321static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2322 u32 index, bool enable) 2323{ 2324 struct kv_power_info *pi = kv_get_pi(adev); 2325 2326 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2327} 2328 2329static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2330{ 2331 struct kv_power_info *pi = kv_get_pi(adev); 2332 u32 sclk_in_sr = 10000; /* ??? */ 2333 u32 i; 2334 2335 if (pi->lowest_valid > pi->highest_valid) 2336 return -EINVAL; 2337 2338 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2339 pi->graphics_level[i].DeepSleepDivId = 2340 kv_get_sleep_divider_id_from_clock(adev, 2341 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2342 sclk_in_sr); 2343 } 2344 return 0; 2345} 2346 2347static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2348{ 2349 struct kv_power_info *pi = kv_get_pi(adev); 2350 u32 i; 2351 bool force_high; 2352 struct amdgpu_clock_and_voltage_limits *max_limits = 2353 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2354 u32 mclk = max_limits->mclk; 2355 2356 if (pi->lowest_valid > pi->highest_valid) 2357 return -EINVAL; 2358 2359 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2360 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2361 pi->graphics_level[i].GnbSlow = 1; 2362 pi->graphics_level[i].ForceNbPs1 = 0; 2363 pi->graphics_level[i].UpH = 0; 2364 } 2365 2366 if (!pi->sys_info.nb_dpm_enable) 2367 return 0; 2368 2369 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2370 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2371 2372 if (force_high) { 2373 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2374 pi->graphics_level[i].GnbSlow = 0; 2375 } else { 2376 if (pi->battery_state) 2377 pi->graphics_level[0].ForceNbPs1 = 1; 2378 2379 pi->graphics_level[1].GnbSlow = 0; 2380 pi->graphics_level[2].GnbSlow = 0; 2381 pi->graphics_level[3].GnbSlow = 0; 2382 pi->graphics_level[4].GnbSlow = 0; 2383 } 2384 } else { 2385 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2386 pi->graphics_level[i].GnbSlow = 1; 2387 pi->graphics_level[i].ForceNbPs1 = 0; 2388 pi->graphics_level[i].UpH = 0; 2389 } 2390 2391 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2392 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2393 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2394 if (pi->lowest_valid != pi->highest_valid) 2395 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2396 } 2397 } 2398 return 0; 2399} 2400 2401static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2402{ 2403 struct kv_power_info *pi = kv_get_pi(adev); 2404 u32 i; 2405 2406 if (pi->lowest_valid > pi->highest_valid) 2407 return -EINVAL; 2408 2409 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2410 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2411 2412 return 0; 2413} 2414 2415static void kv_init_graphics_levels(struct amdgpu_device *adev) 2416{ 2417 struct kv_power_info *pi = kv_get_pi(adev); 2418 u32 i; 2419 struct amdgpu_clock_voltage_dependency_table *table = 2420 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2421 2422 if (table && table->count) { 2423 u32 vid_2bit; 2424 2425 pi->graphics_dpm_level_count = 0; 2426 for (i = 0; i < table->count; i++) { 2427 if (pi->high_voltage_t && 2428 (pi->high_voltage_t < 2429 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2430 break; 2431 2432 kv_set_divider_value(adev, i, table->entries[i].clk); 2433 vid_2bit = kv_convert_vid7_to_vid2(adev, 2434 &pi->sys_info.vid_mapping_table, 2435 table->entries[i].v); 2436 kv_set_vid(adev, i, vid_2bit); 2437 kv_set_at(adev, i, pi->at[i]); 2438 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2439 pi->graphics_dpm_level_count++; 2440 } 2441 } else { 2442 struct sumo_sclk_voltage_mapping_table *table = 2443 &pi->sys_info.sclk_voltage_mapping_table; 2444 2445 pi->graphics_dpm_level_count = 0; 2446 for (i = 0; i < table->num_max_dpm_entries; i++) { 2447 if (pi->high_voltage_t && 2448 pi->high_voltage_t < 2449 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2450 break; 2451 2452 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2453 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2454 kv_set_at(adev, i, pi->at[i]); 2455 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2456 pi->graphics_dpm_level_count++; 2457 } 2458 } 2459 2460 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2461 kv_dpm_power_level_enable(adev, i, false); 2462} 2463 2464static void kv_enable_new_levels(struct amdgpu_device *adev) 2465{ 2466 struct kv_power_info *pi = kv_get_pi(adev); 2467 u32 i; 2468 2469 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2470 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2471 kv_dpm_power_level_enable(adev, i, true); 2472 } 2473} 2474 2475static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2476{ 2477 u32 new_mask = (1 << level); 2478 2479 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2480 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2481 new_mask); 2482} 2483 2484static int kv_set_enabled_levels(struct amdgpu_device *adev) 2485{ 2486 struct kv_power_info *pi = kv_get_pi(adev); 2487 u32 i, new_mask = 0; 2488 2489 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2490 new_mask |= (1 << i); 2491 2492 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2493 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2494 new_mask); 2495} 2496 2497static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2498 struct amdgpu_ps *new_rps) 2499{ 2500 struct kv_ps *new_ps = kv_get_ps(new_rps); 2501 struct kv_power_info *pi = kv_get_pi(adev); 2502 u32 nbdpmconfig1; 2503 2504 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2505 return; 2506 2507 if (pi->sys_info.nb_dpm_enable) { 2508 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2509 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2510 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2511 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2512 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2513 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2514 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2515 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2516 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2517 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2518 } 2519} 2520 2521static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2522 int min_temp, int max_temp) 2523{ 2524 int low_temp = 0 * 1000; 2525 int high_temp = 255 * 1000; 2526 u32 tmp; 2527 2528 if (low_temp < min_temp) 2529 low_temp = min_temp; 2530 if (high_temp > max_temp) 2531 high_temp = max_temp; 2532 if (high_temp < low_temp) { 2533 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2534 return -EINVAL; 2535 } 2536 2537 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2538 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2539 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2540 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2541 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2542 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2543 2544 adev->pm.dpm.thermal.min_temp = low_temp; 2545 adev->pm.dpm.thermal.max_temp = high_temp; 2546 2547 return 0; 2548} 2549 2550union igp_info { 2551 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2552 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2553 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2554 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2555 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2556 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2557}; 2558 2559static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2560{ 2561 struct kv_power_info *pi = kv_get_pi(adev); 2562 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2563 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2564 union igp_info *igp_info; 2565 u8 frev, crev; 2566 u16 data_offset; 2567 int i; 2568 2569 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2570 &frev, &crev, &data_offset)) { 2571 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2572 data_offset); 2573 2574 if (crev != 8) { 2575 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2576 return -EINVAL; 2577 } 2578 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2579 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2580 pi->sys_info.bootup_nb_voltage_index = 2581 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2582 if (igp_info->info_8.ucHtcTmpLmt == 0) 2583 pi->sys_info.htc_tmp_lmt = 203; 2584 else 2585 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2586 if (igp_info->info_8.ucHtcHystLmt == 0) 2587 pi->sys_info.htc_hyst_lmt = 5; 2588 else 2589 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2590 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2591 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2592 } 2593 2594 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2595 pi->sys_info.nb_dpm_enable = true; 2596 else 2597 pi->sys_info.nb_dpm_enable = false; 2598 2599 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2600 pi->sys_info.nbp_memory_clock[i] = 2601 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2602 pi->sys_info.nbp_n_clock[i] = 2603 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2604 } 2605 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2606 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2607 pi->caps_enable_dfs_bypass = true; 2608 2609 sumo_construct_sclk_voltage_mapping_table(adev, 2610 &pi->sys_info.sclk_voltage_mapping_table, 2611 igp_info->info_8.sAvail_SCLK); 2612 2613 sumo_construct_vid_mapping_table(adev, 2614 &pi->sys_info.vid_mapping_table, 2615 igp_info->info_8.sAvail_SCLK); 2616 2617 kv_construct_max_power_limits_table(adev, 2618 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2619 } 2620 return 0; 2621} 2622 2623union power_info { 2624 struct _ATOM_POWERPLAY_INFO info; 2625 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2626 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2627 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2628 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2629 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2630}; 2631 2632union pplib_clock_info { 2633 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2634 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2635 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2636 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2637}; 2638 2639union pplib_power_state { 2640 struct _ATOM_PPLIB_STATE v1; 2641 struct _ATOM_PPLIB_STATE_V2 v2; 2642}; 2643 2644static void kv_patch_boot_state(struct amdgpu_device *adev, 2645 struct kv_ps *ps) 2646{ 2647 struct kv_power_info *pi = kv_get_pi(adev); 2648 2649 ps->num_levels = 1; 2650 ps->levels[0] = pi->boot_pl; 2651} 2652 2653static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2654 struct amdgpu_ps *rps, 2655 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2656 u8 table_rev) 2657{ 2658 struct kv_ps *ps = kv_get_ps(rps); 2659 2660 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2661 rps->class = le16_to_cpu(non_clock_info->usClassification); 2662 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2663 2664 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2665 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2666 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2667 } else { 2668 rps->vclk = 0; 2669 rps->dclk = 0; 2670 } 2671 2672 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2673 adev->pm.dpm.boot_ps = rps; 2674 kv_patch_boot_state(adev, ps); 2675 } 2676 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2677 adev->pm.dpm.uvd_ps = rps; 2678} 2679 2680static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2681 struct amdgpu_ps *rps, int index, 2682 union pplib_clock_info *clock_info) 2683{ 2684 struct kv_power_info *pi = kv_get_pi(adev); 2685 struct kv_ps *ps = kv_get_ps(rps); 2686 struct kv_pl *pl = &ps->levels[index]; 2687 u32 sclk; 2688 2689 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2690 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2691 pl->sclk = sclk; 2692 pl->vddc_index = clock_info->sumo.vddcIndex; 2693 2694 ps->num_levels = index + 1; 2695 2696 if (pi->caps_sclk_ds) { 2697 pl->ds_divider_index = 5; 2698 pl->ss_divider_index = 5; 2699 } 2700} 2701 2702static int kv_parse_power_table(struct amdgpu_device *adev) 2703{ 2704 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2705 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2706 union pplib_power_state *power_state; 2707 int i, j, k, non_clock_array_index, clock_array_index; 2708 union pplib_clock_info *clock_info; 2709 struct _StateArray *state_array; 2710 struct _ClockInfoArray *clock_info_array; 2711 struct _NonClockInfoArray *non_clock_info_array; 2712 union power_info *power_info; 2713 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2714 u16 data_offset; 2715 u8 frev, crev; 2716 u8 *power_state_offset; 2717 struct kv_ps *ps; 2718 2719 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2720 &frev, &crev, &data_offset)) 2721 return -EINVAL; 2722 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2723 2724 amdgpu_add_thermal_controller(adev); 2725 2726 state_array = (struct _StateArray *) 2727 (mode_info->atom_context->bios + data_offset + 2728 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2729 clock_info_array = (struct _ClockInfoArray *) 2730 (mode_info->atom_context->bios + data_offset + 2731 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2732 non_clock_info_array = (struct _NonClockInfoArray *) 2733 (mode_info->atom_context->bios + data_offset + 2734 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2735 2736 adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 2737 sizeof(struct amdgpu_ps), 2738 GFP_KERNEL); 2739 if (!adev->pm.dpm.ps) 2740 return -ENOMEM; 2741 power_state_offset = (u8 *)state_array->states; 2742 for (i = 0; i < state_array->ucNumEntries; i++) { 2743 u8 *idx; 2744 power_state = (union pplib_power_state *)power_state_offset; 2745 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2746 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2747 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2748 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2749 if (ps == NULL) { 2750 kfree(adev->pm.dpm.ps); 2751 return -ENOMEM; 2752 } 2753 adev->pm.dpm.ps[i].ps_priv = ps; 2754 k = 0; 2755 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2756 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2757 clock_array_index = idx[j]; 2758 if (clock_array_index >= clock_info_array->ucNumEntries) 2759 continue; 2760 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2761 break; 2762 clock_info = (union pplib_clock_info *) 2763 ((u8 *)&clock_info_array->clockInfo[0] + 2764 (clock_array_index * clock_info_array->ucEntrySize)); 2765 kv_parse_pplib_clock_info(adev, 2766 &adev->pm.dpm.ps[i], k, 2767 clock_info); 2768 k++; 2769 } 2770 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2771 non_clock_info, 2772 non_clock_info_array->ucEntrySize); 2773 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2774 } 2775 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2776 2777 /* fill in the vce power states */ 2778 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 2779 u32 sclk; 2780 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2781 clock_info = (union pplib_clock_info *) 2782 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2783 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2784 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2785 adev->pm.dpm.vce_states[i].sclk = sclk; 2786 adev->pm.dpm.vce_states[i].mclk = 0; 2787 } 2788 2789 return 0; 2790} 2791 2792static int kv_dpm_init(struct amdgpu_device *adev) 2793{ 2794 struct kv_power_info *pi; 2795 int ret, i; 2796 2797 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2798 if (pi == NULL) 2799 return -ENOMEM; 2800 adev->pm.dpm.priv = pi; 2801 2802 ret = amdgpu_get_platform_caps(adev); 2803 if (ret) 2804 return ret; 2805 2806 ret = amdgpu_parse_extended_power_table(adev); 2807 if (ret) 2808 return ret; 2809 2810 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2811 pi->at[i] = TRINITY_AT_DFLT; 2812 2813 pi->sram_end = SMC_RAM_END; 2814 2815 pi->enable_nb_dpm = true; 2816 2817 pi->caps_power_containment = true; 2818 pi->caps_cac = true; 2819 pi->enable_didt = false; 2820 if (pi->enable_didt) { 2821 pi->caps_sq_ramping = true; 2822 pi->caps_db_ramping = true; 2823 pi->caps_td_ramping = true; 2824 pi->caps_tcp_ramping = true; 2825 } 2826 2827 if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) 2828 pi->caps_sclk_ds = true; 2829 else 2830 pi->caps_sclk_ds = false; 2831 2832 pi->enable_auto_thermal_throttling = true; 2833 pi->disable_nb_ps3_in_battery = false; 2834 if (amdgpu_bapm == 0) 2835 pi->bapm_enable = false; 2836 else 2837 pi->bapm_enable = true; 2838 pi->voltage_drop_t = 0; 2839 pi->caps_sclk_throttle_low_notification = false; 2840 pi->caps_fps = false; /* true? */ 2841 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2842 pi->caps_uvd_dpm = true; 2843 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2844 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2845 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2846 pi->caps_stable_p_state = false; 2847 2848 ret = kv_parse_sys_info_table(adev); 2849 if (ret) 2850 return ret; 2851 2852 kv_patch_voltage_values(adev); 2853 kv_construct_boot_state(adev); 2854 2855 ret = kv_parse_power_table(adev); 2856 if (ret) 2857 return ret; 2858 2859 pi->enable_dpm = true; 2860 2861 return 0; 2862} 2863 2864static void 2865kv_dpm_debugfs_print_current_performance_level(void *handle, 2866 struct seq_file *m) 2867{ 2868 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2869 struct kv_power_info *pi = kv_get_pi(adev); 2870 u32 current_index = 2871 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2872 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2873 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2874 u32 sclk, tmp; 2875 u16 vddc; 2876 2877 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2878 seq_printf(m, "invalid dpm profile %d\n", current_index); 2879 } else { 2880 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2881 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2882 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2883 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2884 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2885 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2886 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2887 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2888 current_index, sclk, vddc); 2889 } 2890} 2891 2892static void 2893kv_dpm_print_power_state(void *handle, void *request_ps) 2894{ 2895 int i; 2896 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 2897 struct kv_ps *ps = kv_get_ps(rps); 2898 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2899 2900 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2901 amdgpu_dpm_print_cap_info(rps->caps); 2902 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2903 for (i = 0; i < ps->num_levels; i++) { 2904 struct kv_pl *pl = &ps->levels[i]; 2905 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2906 i, pl->sclk, 2907 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2908 } 2909 amdgpu_dpm_print_ps_status(adev, rps); 2910} 2911 2912static void kv_dpm_fini(struct amdgpu_device *adev) 2913{ 2914 int i; 2915 2916 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2917 kfree(adev->pm.dpm.ps[i].ps_priv); 2918 } 2919 kfree(adev->pm.dpm.ps); 2920 kfree(adev->pm.dpm.priv); 2921 amdgpu_free_extended_power_table(adev); 2922} 2923 2924static void kv_dpm_display_configuration_changed(void *handle) 2925{ 2926 2927} 2928 2929static u32 kv_dpm_get_sclk(void *handle, bool low) 2930{ 2931 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2932 struct kv_power_info *pi = kv_get_pi(adev); 2933 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2934 2935 if (low) 2936 return requested_state->levels[0].sclk; 2937 else 2938 return requested_state->levels[requested_state->num_levels - 1].sclk; 2939} 2940 2941static u32 kv_dpm_get_mclk(void *handle, bool low) 2942{ 2943 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2944 struct kv_power_info *pi = kv_get_pi(adev); 2945 2946 return pi->sys_info.bootup_uma_clk; 2947} 2948 2949/* get temperature in millidegrees */ 2950static int kv_dpm_get_temp(void *handle) 2951{ 2952 u32 temp; 2953 int actual_temp = 0; 2954 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2955 2956 temp = RREG32_SMC(0xC0300E0C); 2957 2958 if (temp) 2959 actual_temp = (temp / 8) - 49; 2960 else 2961 actual_temp = 0; 2962 2963 actual_temp = actual_temp * 1000; 2964 2965 return actual_temp; 2966} 2967 2968static int kv_dpm_early_init(void *handle) 2969{ 2970 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2971 2972 adev->powerplay.pp_funcs = &kv_dpm_funcs; 2973 adev->powerplay.pp_handle = adev; 2974 kv_dpm_set_irq_funcs(adev); 2975 2976 return 0; 2977} 2978 2979static int kv_dpm_late_init(void *handle) 2980{ 2981 /* powerdown unused blocks for now */ 2982 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2983 2984 if (!adev->pm.dpm_enabled) 2985 return 0; 2986 2987 kv_dpm_powergate_acp(adev, true); 2988 kv_dpm_powergate_samu(adev, true); 2989 2990 return 0; 2991} 2992 2993static int kv_dpm_sw_init(void *handle) 2994{ 2995 int ret; 2996 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2997 2998 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, 2999 &adev->pm.dpm.thermal.irq); 3000 if (ret) 3001 return ret; 3002 3003 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, 3004 &adev->pm.dpm.thermal.irq); 3005 if (ret) 3006 return ret; 3007 3008 /* default to balanced state */ 3009 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 3010 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 3011 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; 3012 adev->pm.default_sclk = adev->clock.default_sclk; 3013 adev->pm.default_mclk = adev->clock.default_mclk; 3014 adev->pm.current_sclk = adev->clock.default_sclk; 3015 adev->pm.current_mclk = adev->clock.default_mclk; 3016 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3017 3018 if (amdgpu_dpm == 0) 3019 return 0; 3020 3021 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3022 mutex_lock(&adev->pm.mutex); 3023 ret = kv_dpm_init(adev); 3024 if (ret) 3025 goto dpm_failed; 3026 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3027 if (amdgpu_dpm == 1) 3028 amdgpu_pm_print_power_states(adev); 3029 mutex_unlock(&adev->pm.mutex); 3030 DRM_INFO("amdgpu: dpm initialized\n"); 3031 3032 return 0; 3033 3034dpm_failed: 3035 kv_dpm_fini(adev); 3036 mutex_unlock(&adev->pm.mutex); 3037 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3038 return ret; 3039} 3040 3041static int kv_dpm_sw_fini(void *handle) 3042{ 3043 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3044 3045 flush_work(&adev->pm.dpm.thermal.work); 3046 3047 mutex_lock(&adev->pm.mutex); 3048 kv_dpm_fini(adev); 3049 mutex_unlock(&adev->pm.mutex); 3050 3051 return 0; 3052} 3053 3054static int kv_dpm_hw_init(void *handle) 3055{ 3056 int ret; 3057 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3058 3059 if (!amdgpu_dpm) 3060 return 0; 3061 3062 mutex_lock(&adev->pm.mutex); 3063 kv_dpm_setup_asic(adev); 3064 ret = kv_dpm_enable(adev); 3065 if (ret) 3066 adev->pm.dpm_enabled = false; 3067 else 3068 adev->pm.dpm_enabled = true; 3069 mutex_unlock(&adev->pm.mutex); 3070 amdgpu_pm_compute_clocks(adev); 3071 return ret; 3072} 3073 3074static int kv_dpm_hw_fini(void *handle) 3075{ 3076 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3077 3078 if (adev->pm.dpm_enabled) { 3079 mutex_lock(&adev->pm.mutex); 3080 kv_dpm_disable(adev); 3081 mutex_unlock(&adev->pm.mutex); 3082 } 3083 3084 return 0; 3085} 3086 3087static int kv_dpm_suspend(void *handle) 3088{ 3089 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3090 3091 if (adev->pm.dpm_enabled) { 3092 mutex_lock(&adev->pm.mutex); 3093 /* disable dpm */ 3094 kv_dpm_disable(adev); 3095 /* reset the power state */ 3096 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3097 mutex_unlock(&adev->pm.mutex); 3098 } 3099 return 0; 3100} 3101 3102static int kv_dpm_resume(void *handle) 3103{ 3104 int ret; 3105 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3106 3107 if (adev->pm.dpm_enabled) { 3108 /* asic init will reset to the boot state */ 3109 mutex_lock(&adev->pm.mutex); 3110 kv_dpm_setup_asic(adev); 3111 ret = kv_dpm_enable(adev); 3112 if (ret) 3113 adev->pm.dpm_enabled = false; 3114 else 3115 adev->pm.dpm_enabled = true; 3116 mutex_unlock(&adev->pm.mutex); 3117 if (adev->pm.dpm_enabled) 3118 amdgpu_pm_compute_clocks(adev); 3119 } 3120 return 0; 3121} 3122 3123static bool kv_dpm_is_idle(void *handle) 3124{ 3125 return true; 3126} 3127 3128static int kv_dpm_wait_for_idle(void *handle) 3129{ 3130 return 0; 3131} 3132 3133 3134static int kv_dpm_soft_reset(void *handle) 3135{ 3136 return 0; 3137} 3138 3139static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3140 struct amdgpu_irq_src *src, 3141 unsigned type, 3142 enum amdgpu_interrupt_state state) 3143{ 3144 u32 cg_thermal_int; 3145 3146 switch (type) { 3147 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3148 switch (state) { 3149 case AMDGPU_IRQ_STATE_DISABLE: 3150 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3151 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3152 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3153 break; 3154 case AMDGPU_IRQ_STATE_ENABLE: 3155 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3156 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3157 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3158 break; 3159 default: 3160 break; 3161 } 3162 break; 3163 3164 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3165 switch (state) { 3166 case AMDGPU_IRQ_STATE_DISABLE: 3167 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3168 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3169 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3170 break; 3171 case AMDGPU_IRQ_STATE_ENABLE: 3172 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3173 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3174 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3175 break; 3176 default: 3177 break; 3178 } 3179 break; 3180 3181 default: 3182 break; 3183 } 3184 return 0; 3185} 3186 3187static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3188 struct amdgpu_irq_src *source, 3189 struct amdgpu_iv_entry *entry) 3190{ 3191 bool queue_thermal = false; 3192 3193 if (entry == NULL) 3194 return -EINVAL; 3195 3196 switch (entry->src_id) { 3197 case 230: /* thermal low to high */ 3198 DRM_DEBUG("IH: thermal low to high\n"); 3199 adev->pm.dpm.thermal.high_to_low = false; 3200 queue_thermal = true; 3201 break; 3202 case 231: /* thermal high to low */ 3203 DRM_DEBUG("IH: thermal high to low\n"); 3204 adev->pm.dpm.thermal.high_to_low = true; 3205 queue_thermal = true; 3206 break; 3207 default: 3208 break; 3209 } 3210 3211 if (queue_thermal) 3212 schedule_work(&adev->pm.dpm.thermal.work); 3213 3214 return 0; 3215} 3216 3217static int kv_dpm_set_clockgating_state(void *handle, 3218 enum amd_clockgating_state state) 3219{ 3220 return 0; 3221} 3222 3223static int kv_dpm_set_powergating_state(void *handle, 3224 enum amd_powergating_state state) 3225{ 3226 return 0; 3227} 3228 3229static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, 3230 const struct kv_pl *kv_cpl2) 3231{ 3232 return ((kv_cpl1->sclk == kv_cpl2->sclk) && 3233 (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && 3234 (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && 3235 (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); 3236} 3237 3238static int kv_check_state_equal(void *handle, 3239 void *current_ps, 3240 void *request_ps, 3241 bool *equal) 3242{ 3243 struct kv_ps *kv_cps; 3244 struct kv_ps *kv_rps; 3245 int i; 3246 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; 3247 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 3248 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3249 3250 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) 3251 return -EINVAL; 3252 3253 kv_cps = kv_get_ps(cps); 3254 kv_rps = kv_get_ps(rps); 3255 3256 if (kv_cps == NULL) { 3257 *equal = false; 3258 return 0; 3259 } 3260 3261 if (kv_cps->num_levels != kv_rps->num_levels) { 3262 *equal = false; 3263 return 0; 3264 } 3265 3266 for (i = 0; i < kv_cps->num_levels; i++) { 3267 if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), 3268 &(kv_rps->levels[i]))) { 3269 *equal = false; 3270 return 0; 3271 } 3272 } 3273 3274 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 3275 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); 3276 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); 3277 3278 return 0; 3279} 3280 3281static int kv_dpm_read_sensor(void *handle, int idx, 3282 void *value, int *size) 3283{ 3284 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3285 struct kv_power_info *pi = kv_get_pi(adev); 3286 uint32_t sclk; 3287 u32 pl_index = 3288 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 3289 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 3290 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 3291 3292 /* size must be at least 4 bytes for all sensors */ 3293 if (*size < 4) 3294 return -EINVAL; 3295 3296 switch (idx) { 3297 case AMDGPU_PP_SENSOR_GFX_SCLK: 3298 if (pl_index < SMU__NUM_SCLK_DPM_STATE) { 3299 sclk = be32_to_cpu( 3300 pi->graphics_level[pl_index].SclkFrequency); 3301 *((uint32_t *)value) = sclk; 3302 *size = 4; 3303 return 0; 3304 } 3305 return -EINVAL; 3306 case AMDGPU_PP_SENSOR_GPU_TEMP: 3307 *((uint32_t *)value) = kv_dpm_get_temp(adev); 3308 *size = 4; 3309 return 0; 3310 default: 3311 return -EINVAL; 3312 } 3313} 3314 3315static int kv_set_powergating_by_smu(void *handle, 3316 uint32_t block_type, bool gate) 3317{ 3318 switch (block_type) { 3319 case AMD_IP_BLOCK_TYPE_UVD: 3320 kv_dpm_powergate_uvd(handle, gate); 3321 break; 3322 case AMD_IP_BLOCK_TYPE_VCE: 3323 kv_dpm_powergate_vce(handle, gate); 3324 break; 3325 default: 3326 break; 3327 } 3328 return 0; 3329} 3330 3331static const struct amd_ip_funcs kv_dpm_ip_funcs = { 3332 .name = "kv_dpm", 3333 .early_init = kv_dpm_early_init, 3334 .late_init = kv_dpm_late_init, 3335 .sw_init = kv_dpm_sw_init, 3336 .sw_fini = kv_dpm_sw_fini, 3337 .hw_init = kv_dpm_hw_init, 3338 .hw_fini = kv_dpm_hw_fini, 3339 .suspend = kv_dpm_suspend, 3340 .resume = kv_dpm_resume, 3341 .is_idle = kv_dpm_is_idle, 3342 .wait_for_idle = kv_dpm_wait_for_idle, 3343 .soft_reset = kv_dpm_soft_reset, 3344 .set_clockgating_state = kv_dpm_set_clockgating_state, 3345 .set_powergating_state = kv_dpm_set_powergating_state, 3346}; 3347 3348const struct amdgpu_ip_block_version kv_smu_ip_block = 3349{ 3350 .type = AMD_IP_BLOCK_TYPE_SMC, 3351 .major = 1, 3352 .minor = 0, 3353 .rev = 0, 3354 .funcs = &kv_dpm_ip_funcs, 3355}; 3356 3357static const struct amd_pm_funcs kv_dpm_funcs = { 3358 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3359 .set_power_state = &kv_dpm_set_power_state, 3360 .post_set_power_state = &kv_dpm_post_set_power_state, 3361 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3362 .get_sclk = &kv_dpm_get_sclk, 3363 .get_mclk = &kv_dpm_get_mclk, 3364 .print_power_state = &kv_dpm_print_power_state, 3365 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3366 .force_performance_level = &kv_dpm_force_performance_level, 3367 .set_powergating_by_smu = kv_set_powergating_by_smu, 3368 .enable_bapm = &kv_dpm_enable_bapm, 3369 .get_vce_clock_state = amdgpu_get_vce_clock_state, 3370 .check_state_equal = kv_check_state_equal, 3371 .read_sensor = &kv_dpm_read_sensor, 3372}; 3373 3374static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3375 .set = kv_dpm_set_interrupt_state, 3376 .process = kv_dpm_process_interrupt, 3377}; 3378 3379static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3380{ 3381 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3382 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3383}