Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.14-rc6 3348 lines 93 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <drm/drmP.h> 25#include "amdgpu.h" 26#include "amdgpu_pm.h" 27#include "cikd.h" 28#include "atom.h" 29#include "amdgpu_atombios.h" 30#include "amdgpu_dpm.h" 31#include "kv_dpm.h" 32#include "gfx_v7_0.h" 33#include <linux/seq_file.h> 34 35#include "smu/smu_7_0_0_d.h" 36#include "smu/smu_7_0_0_sh_mask.h" 37 38#include "gca/gfx_7_2_d.h" 39#include "gca/gfx_7_2_sh_mask.h" 40 41#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42#define KV_MINIMUM_ENGINE_CLOCK 800 43#define SMC_RAM_END 0x40000 44 45static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev); 46static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 47static int kv_enable_nb_dpm(struct amdgpu_device *adev, 48 bool enable); 49static void kv_init_graphics_levels(struct amdgpu_device *adev); 50static int kv_calculate_ds_divider(struct amdgpu_device *adev); 51static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 52static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 53static void kv_enable_new_levels(struct amdgpu_device *adev); 54static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 55 struct amdgpu_ps *new_rps); 56static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 57static int kv_set_enabled_levels(struct amdgpu_device *adev); 58static int kv_force_dpm_highest(struct amdgpu_device *adev); 59static int kv_force_dpm_lowest(struct amdgpu_device *adev); 60static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 61 struct amdgpu_ps *new_rps, 62 struct amdgpu_ps *old_rps); 63static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 64 int min_temp, int max_temp); 65static int kv_init_fps_limits(struct amdgpu_device *adev); 66 67static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); 68static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 70static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 71 72 73static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 74 struct sumo_vid_mapping_table *vid_mapping_table, 75 u32 vid_2bit) 76{ 77 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 78 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 79 u32 i; 80 81 if (vddc_sclk_table && vddc_sclk_table->count) { 82 if (vid_2bit < vddc_sclk_table->count) 83 return vddc_sclk_table->entries[vid_2bit].v; 84 else 85 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 86 } else { 87 for (i = 0; i < vid_mapping_table->num_entries; i++) { 88 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 89 return vid_mapping_table->entries[i].vid_7bit; 90 } 91 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 92 } 93} 94 95static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 96 struct sumo_vid_mapping_table *vid_mapping_table, 97 u32 vid_7bit) 98{ 99 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 100 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 101 u32 i; 102 103 if (vddc_sclk_table && vddc_sclk_table->count) { 104 for (i = 0; i < vddc_sclk_table->count; i++) { 105 if (vddc_sclk_table->entries[i].v == vid_7bit) 106 return i; 107 } 108 return vddc_sclk_table->count - 1; 109 } else { 110 for (i = 0; i < vid_mapping_table->num_entries; i++) { 111 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 112 return vid_mapping_table->entries[i].vid_2bit; 113 } 114 115 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 116 } 117} 118 119static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 120{ 121/* This bit selects who handles display phy powergating. 122 * Clear the bit to let atom handle it. 123 * Set it to let the driver handle it. 124 * For now we just let atom handle it. 125 */ 126#if 0 127 u32 v = RREG32(mmDOUT_SCRATCH3); 128 129 if (enable) 130 v |= 0x4; 131 else 132 v &= 0xFFFFFFFB; 133 134 WREG32(mmDOUT_SCRATCH3, v); 135#endif 136} 137 138static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 139 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 140 ATOM_AVAILABLE_SCLK_LIST *table) 141{ 142 u32 i; 143 u32 n = 0; 144 u32 prev_sclk = 0; 145 146 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 147 if (table[i].ulSupportedSCLK > prev_sclk) { 148 sclk_voltage_mapping_table->entries[n].sclk_frequency = 149 table[i].ulSupportedSCLK; 150 sclk_voltage_mapping_table->entries[n].vid_2bit = 151 table[i].usVoltageIndex; 152 prev_sclk = table[i].ulSupportedSCLK; 153 n++; 154 } 155 } 156 157 sclk_voltage_mapping_table->num_max_dpm_entries = n; 158} 159 160static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 161 struct sumo_vid_mapping_table *vid_mapping_table, 162 ATOM_AVAILABLE_SCLK_LIST *table) 163{ 164 u32 i, j; 165 166 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 167 if (table[i].ulSupportedSCLK != 0) { 168 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 169 table[i].usVoltageID; 170 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 171 table[i].usVoltageIndex; 172 } 173 } 174 175 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 176 if (vid_mapping_table->entries[i].vid_7bit == 0) { 177 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 178 if (vid_mapping_table->entries[j].vid_7bit != 0) { 179 vid_mapping_table->entries[i] = 180 vid_mapping_table->entries[j]; 181 vid_mapping_table->entries[j].vid_7bit = 0; 182 break; 183 } 184 } 185 186 if (j == SUMO_MAX_NUMBER_VOLTAGES) 187 break; 188 } 189 } 190 191 vid_mapping_table->num_entries = i; 192} 193 194#if 0 195static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 196{ 197 { 0, 4, 1 }, 198 { 1, 4, 1 }, 199 { 2, 5, 1 }, 200 { 3, 4, 2 }, 201 { 4, 1, 1 }, 202 { 5, 5, 2 }, 203 { 6, 6, 1 }, 204 { 7, 9, 2 }, 205 { 0xffffffff } 206}; 207 208static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 209{ 210 { 0, 4, 1 }, 211 { 0xffffffff } 212}; 213 214static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 215{ 216 { 0, 4, 1 }, 217 { 0xffffffff } 218}; 219 220static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 221{ 222 { 0, 4, 1 }, 223 { 0xffffffff } 224}; 225 226static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 227{ 228 { 0, 4, 1 }, 229 { 0xffffffff } 230}; 231 232static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 233{ 234 { 0, 4, 1 }, 235 { 1, 4, 1 }, 236 { 2, 5, 1 }, 237 { 3, 4, 1 }, 238 { 4, 1, 1 }, 239 { 5, 5, 1 }, 240 { 6, 6, 1 }, 241 { 7, 9, 1 }, 242 { 8, 4, 1 }, 243 { 9, 2, 1 }, 244 { 10, 3, 1 }, 245 { 11, 6, 1 }, 246 { 12, 8, 2 }, 247 { 13, 1, 1 }, 248 { 14, 2, 1 }, 249 { 15, 3, 1 }, 250 { 16, 1, 1 }, 251 { 17, 4, 1 }, 252 { 18, 3, 1 }, 253 { 19, 1, 1 }, 254 { 20, 8, 1 }, 255 { 21, 5, 1 }, 256 { 22, 1, 1 }, 257 { 23, 1, 1 }, 258 { 24, 4, 1 }, 259 { 27, 6, 1 }, 260 { 28, 1, 1 }, 261 { 0xffffffff } 262}; 263 264static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 265{ 266 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 267}; 268 269static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 270{ 271 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 272}; 273 274static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 275{ 276 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 277}; 278 279static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 280{ 281 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 282}; 283 284static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 285{ 286 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 287}; 288 289static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 290{ 291 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 292}; 293#endif 294 295static const struct kv_pt_config_reg didt_config_kv[] = 296{ 297 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 298 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 299 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 300 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 301 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 302 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 308 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 309 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 310 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 311 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 312 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 313 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 314 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 315 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 316 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 317 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 318 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 319 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 320 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 326 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 327 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 328 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 329 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 330 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 331 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 332 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 333 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 334 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 335 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 336 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 337 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 338 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 344 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 345 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 346 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 347 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 348 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 349 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 350 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 351 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 352 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 353 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 354 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 355 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 356 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 358 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 359 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 360 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 361 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 362 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 363 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 364 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 365 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 366 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 367 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 368 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 369 { 0xFFFFFFFF } 370}; 371 372static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 373{ 374 struct kv_ps *ps = rps->ps_priv; 375 376 return ps; 377} 378 379static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 380{ 381 struct kv_power_info *pi = adev->pm.dpm.priv; 382 383 return pi; 384} 385 386#if 0 387static void kv_program_local_cac_table(struct amdgpu_device *adev, 388 const struct kv_lcac_config_values *local_cac_table, 389 const struct kv_lcac_config_reg *local_cac_reg) 390{ 391 u32 i, count, data; 392 const struct kv_lcac_config_values *values = local_cac_table; 393 394 while (values->block_id != 0xffffffff) { 395 count = values->signal_id; 396 for (i = 0; i < count; i++) { 397 data = ((values->block_id << local_cac_reg->block_shift) & 398 local_cac_reg->block_mask); 399 data |= ((i << local_cac_reg->signal_shift) & 400 local_cac_reg->signal_mask); 401 data |= ((values->t << local_cac_reg->t_shift) & 402 local_cac_reg->t_mask); 403 data |= ((1 << local_cac_reg->enable_shift) & 404 local_cac_reg->enable_mask); 405 WREG32_SMC(local_cac_reg->cntl, data); 406 } 407 values++; 408 } 409} 410#endif 411 412static int kv_program_pt_config_registers(struct amdgpu_device *adev, 413 const struct kv_pt_config_reg *cac_config_regs) 414{ 415 const struct kv_pt_config_reg *config_regs = cac_config_regs; 416 u32 data; 417 u32 cache = 0; 418 419 if (config_regs == NULL) 420 return -EINVAL; 421 422 while (config_regs->offset != 0xFFFFFFFF) { 423 if (config_regs->type == KV_CONFIGREG_CACHE) { 424 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 425 } else { 426 switch (config_regs->type) { 427 case KV_CONFIGREG_SMC_IND: 428 data = RREG32_SMC(config_regs->offset); 429 break; 430 case KV_CONFIGREG_DIDT_IND: 431 data = RREG32_DIDT(config_regs->offset); 432 break; 433 default: 434 data = RREG32(config_regs->offset); 435 break; 436 } 437 438 data &= ~config_regs->mask; 439 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 440 data |= cache; 441 cache = 0; 442 443 switch (config_regs->type) { 444 case KV_CONFIGREG_SMC_IND: 445 WREG32_SMC(config_regs->offset, data); 446 break; 447 case KV_CONFIGREG_DIDT_IND: 448 WREG32_DIDT(config_regs->offset, data); 449 break; 450 default: 451 WREG32(config_regs->offset, data); 452 break; 453 } 454 } 455 config_regs++; 456 } 457 458 return 0; 459} 460 461static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 462{ 463 struct kv_power_info *pi = kv_get_pi(adev); 464 u32 data; 465 466 if (pi->caps_sq_ramping) { 467 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 468 if (enable) 469 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 470 else 471 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 472 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 473 } 474 475 if (pi->caps_db_ramping) { 476 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 477 if (enable) 478 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 479 else 480 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 481 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 482 } 483 484 if (pi->caps_td_ramping) { 485 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 486 if (enable) 487 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 488 else 489 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 490 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 491 } 492 493 if (pi->caps_tcp_ramping) { 494 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 495 if (enable) 496 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 497 else 498 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 499 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 500 } 501} 502 503static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 504{ 505 struct kv_power_info *pi = kv_get_pi(adev); 506 int ret; 507 508 if (pi->caps_sq_ramping || 509 pi->caps_db_ramping || 510 pi->caps_td_ramping || 511 pi->caps_tcp_ramping) { 512 adev->gfx.rlc.funcs->enter_safe_mode(adev); 513 514 if (enable) { 515 ret = kv_program_pt_config_registers(adev, didt_config_kv); 516 if (ret) { 517 adev->gfx.rlc.funcs->exit_safe_mode(adev); 518 return ret; 519 } 520 } 521 522 kv_do_enable_didt(adev, enable); 523 524 adev->gfx.rlc.funcs->exit_safe_mode(adev); 525 } 526 527 return 0; 528} 529 530#if 0 531static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 532{ 533 struct kv_power_info *pi = kv_get_pi(adev); 534 535 if (pi->caps_cac) { 536 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 537 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 538 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 539 540 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 541 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 542 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 543 544 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 545 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 546 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 547 548 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 549 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 550 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 551 552 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 553 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 554 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 555 556 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 557 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 558 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 559 } 560} 561#endif 562 563static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 564{ 565 struct kv_power_info *pi = kv_get_pi(adev); 566 int ret = 0; 567 568 if (pi->caps_cac) { 569 if (enable) { 570 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 571 if (ret) 572 pi->cac_enabled = false; 573 else 574 pi->cac_enabled = true; 575 } else if (pi->cac_enabled) { 576 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 577 pi->cac_enabled = false; 578 } 579 } 580 581 return ret; 582} 583 584static int kv_process_firmware_header(struct amdgpu_device *adev) 585{ 586 struct kv_power_info *pi = kv_get_pi(adev); 587 u32 tmp; 588 int ret; 589 590 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 591 offsetof(SMU7_Firmware_Header, DpmTable), 592 &tmp, pi->sram_end); 593 594 if (ret == 0) 595 pi->dpm_table_start = tmp; 596 597 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 598 offsetof(SMU7_Firmware_Header, SoftRegisters), 599 &tmp, pi->sram_end); 600 601 if (ret == 0) 602 pi->soft_regs_start = tmp; 603 604 return ret; 605} 606 607static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 608{ 609 struct kv_power_info *pi = kv_get_pi(adev); 610 int ret; 611 612 pi->graphics_voltage_change_enable = 1; 613 614 ret = amdgpu_kv_copy_bytes_to_smc(adev, 615 pi->dpm_table_start + 616 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 617 &pi->graphics_voltage_change_enable, 618 sizeof(u8), pi->sram_end); 619 620 return ret; 621} 622 623static int kv_set_dpm_interval(struct amdgpu_device *adev) 624{ 625 struct kv_power_info *pi = kv_get_pi(adev); 626 int ret; 627 628 pi->graphics_interval = 1; 629 630 ret = amdgpu_kv_copy_bytes_to_smc(adev, 631 pi->dpm_table_start + 632 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 633 &pi->graphics_interval, 634 sizeof(u8), pi->sram_end); 635 636 return ret; 637} 638 639static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 640{ 641 struct kv_power_info *pi = kv_get_pi(adev); 642 int ret; 643 644 ret = amdgpu_kv_copy_bytes_to_smc(adev, 645 pi->dpm_table_start + 646 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 647 &pi->graphics_boot_level, 648 sizeof(u8), pi->sram_end); 649 650 return ret; 651} 652 653static void kv_program_vc(struct amdgpu_device *adev) 654{ 655 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 656} 657 658static void kv_clear_vc(struct amdgpu_device *adev) 659{ 660 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 661} 662 663static int kv_set_divider_value(struct amdgpu_device *adev, 664 u32 index, u32 sclk) 665{ 666 struct kv_power_info *pi = kv_get_pi(adev); 667 struct atom_clock_dividers dividers; 668 int ret; 669 670 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 671 sclk, false, &dividers); 672 if (ret) 673 return ret; 674 675 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 676 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 677 678 return 0; 679} 680 681static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 682 u16 voltage) 683{ 684 return 6200 - (voltage * 25); 685} 686 687static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 688 u32 vid_2bit) 689{ 690 struct kv_power_info *pi = kv_get_pi(adev); 691 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 692 &pi->sys_info.vid_mapping_table, 693 vid_2bit); 694 695 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 696} 697 698 699static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 700{ 701 struct kv_power_info *pi = kv_get_pi(adev); 702 703 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 704 pi->graphics_level[index].MinVddNb = 705 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 706 707 return 0; 708} 709 710static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 711{ 712 struct kv_power_info *pi = kv_get_pi(adev); 713 714 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 715 716 return 0; 717} 718 719static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 720 u32 index, bool enable) 721{ 722 struct kv_power_info *pi = kv_get_pi(adev); 723 724 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 725} 726 727static void kv_start_dpm(struct amdgpu_device *adev) 728{ 729 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 730 731 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 732 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 733 734 amdgpu_kv_smc_dpm_enable(adev, true); 735} 736 737static void kv_stop_dpm(struct amdgpu_device *adev) 738{ 739 amdgpu_kv_smc_dpm_enable(adev, false); 740} 741 742static void kv_start_am(struct amdgpu_device *adev) 743{ 744 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 745 746 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 747 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 748 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 749 750 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 751} 752 753static void kv_reset_am(struct amdgpu_device *adev) 754{ 755 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 756 757 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 758 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 759 760 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 761} 762 763static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 764{ 765 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 766 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 767} 768 769static int kv_force_lowest_valid(struct amdgpu_device *adev) 770{ 771 return kv_force_dpm_lowest(adev); 772} 773 774static int kv_unforce_levels(struct amdgpu_device *adev) 775{ 776 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 777 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 778 else 779 return kv_set_enabled_levels(adev); 780} 781 782static int kv_update_sclk_t(struct amdgpu_device *adev) 783{ 784 struct kv_power_info *pi = kv_get_pi(adev); 785 u32 low_sclk_interrupt_t = 0; 786 int ret = 0; 787 788 if (pi->caps_sclk_throttle_low_notification) { 789 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 790 791 ret = amdgpu_kv_copy_bytes_to_smc(adev, 792 pi->dpm_table_start + 793 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 794 (u8 *)&low_sclk_interrupt_t, 795 sizeof(u32), pi->sram_end); 796 } 797 return ret; 798} 799 800static int kv_program_bootup_state(struct amdgpu_device *adev) 801{ 802 struct kv_power_info *pi = kv_get_pi(adev); 803 u32 i; 804 struct amdgpu_clock_voltage_dependency_table *table = 805 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 806 807 if (table && table->count) { 808 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 809 if (table->entries[i].clk == pi->boot_pl.sclk) 810 break; 811 } 812 813 pi->graphics_boot_level = (u8)i; 814 kv_dpm_power_level_enable(adev, i, true); 815 } else { 816 struct sumo_sclk_voltage_mapping_table *table = 817 &pi->sys_info.sclk_voltage_mapping_table; 818 819 if (table->num_max_dpm_entries == 0) 820 return -EINVAL; 821 822 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 823 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 824 break; 825 } 826 827 pi->graphics_boot_level = (u8)i; 828 kv_dpm_power_level_enable(adev, i, true); 829 } 830 return 0; 831} 832 833static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 834{ 835 struct kv_power_info *pi = kv_get_pi(adev); 836 int ret; 837 838 pi->graphics_therm_throttle_enable = 1; 839 840 ret = amdgpu_kv_copy_bytes_to_smc(adev, 841 pi->dpm_table_start + 842 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 843 &pi->graphics_therm_throttle_enable, 844 sizeof(u8), pi->sram_end); 845 846 return ret; 847} 848 849static int kv_upload_dpm_settings(struct amdgpu_device *adev) 850{ 851 struct kv_power_info *pi = kv_get_pi(adev); 852 int ret; 853 854 ret = amdgpu_kv_copy_bytes_to_smc(adev, 855 pi->dpm_table_start + 856 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 857 (u8 *)&pi->graphics_level, 858 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 859 pi->sram_end); 860 861 if (ret) 862 return ret; 863 864 ret = amdgpu_kv_copy_bytes_to_smc(adev, 865 pi->dpm_table_start + 866 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 867 &pi->graphics_dpm_level_count, 868 sizeof(u8), pi->sram_end); 869 870 return ret; 871} 872 873static u32 kv_get_clock_difference(u32 a, u32 b) 874{ 875 return (a >= b) ? a - b : b - a; 876} 877 878static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 879{ 880 struct kv_power_info *pi = kv_get_pi(adev); 881 u32 value; 882 883 if (pi->caps_enable_dfs_bypass) { 884 if (kv_get_clock_difference(clk, 40000) < 200) 885 value = 3; 886 else if (kv_get_clock_difference(clk, 30000) < 200) 887 value = 2; 888 else if (kv_get_clock_difference(clk, 20000) < 200) 889 value = 7; 890 else if (kv_get_clock_difference(clk, 15000) < 200) 891 value = 6; 892 else if (kv_get_clock_difference(clk, 10000) < 200) 893 value = 8; 894 else 895 value = 0; 896 } else { 897 value = 0; 898 } 899 900 return value; 901} 902 903static int kv_populate_uvd_table(struct amdgpu_device *adev) 904{ 905 struct kv_power_info *pi = kv_get_pi(adev); 906 struct amdgpu_uvd_clock_voltage_dependency_table *table = 907 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 908 struct atom_clock_dividers dividers; 909 int ret; 910 u32 i; 911 912 if (table == NULL || table->count == 0) 913 return 0; 914 915 pi->uvd_level_count = 0; 916 for (i = 0; i < table->count; i++) { 917 if (pi->high_voltage_t && 918 (pi->high_voltage_t < table->entries[i].v)) 919 break; 920 921 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 922 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 923 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 924 925 pi->uvd_level[i].VClkBypassCntl = 926 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 927 pi->uvd_level[i].DClkBypassCntl = 928 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 929 930 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 931 table->entries[i].vclk, false, &dividers); 932 if (ret) 933 return ret; 934 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 935 936 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 937 table->entries[i].dclk, false, &dividers); 938 if (ret) 939 return ret; 940 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 941 942 pi->uvd_level_count++; 943 } 944 945 ret = amdgpu_kv_copy_bytes_to_smc(adev, 946 pi->dpm_table_start + 947 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 948 (u8 *)&pi->uvd_level_count, 949 sizeof(u8), pi->sram_end); 950 if (ret) 951 return ret; 952 953 pi->uvd_interval = 1; 954 955 ret = amdgpu_kv_copy_bytes_to_smc(adev, 956 pi->dpm_table_start + 957 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 958 &pi->uvd_interval, 959 sizeof(u8), pi->sram_end); 960 if (ret) 961 return ret; 962 963 ret = amdgpu_kv_copy_bytes_to_smc(adev, 964 pi->dpm_table_start + 965 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 966 (u8 *)&pi->uvd_level, 967 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 968 pi->sram_end); 969 970 return ret; 971 972} 973 974static int kv_populate_vce_table(struct amdgpu_device *adev) 975{ 976 struct kv_power_info *pi = kv_get_pi(adev); 977 int ret; 978 u32 i; 979 struct amdgpu_vce_clock_voltage_dependency_table *table = 980 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 981 struct atom_clock_dividers dividers; 982 983 if (table == NULL || table->count == 0) 984 return 0; 985 986 pi->vce_level_count = 0; 987 for (i = 0; i < table->count; i++) { 988 if (pi->high_voltage_t && 989 pi->high_voltage_t < table->entries[i].v) 990 break; 991 992 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 993 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 994 995 pi->vce_level[i].ClkBypassCntl = 996 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 997 998 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 999 table->entries[i].evclk, false, &dividers); 1000 if (ret) 1001 return ret; 1002 pi->vce_level[i].Divider = (u8)dividers.post_div; 1003 1004 pi->vce_level_count++; 1005 } 1006 1007 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1008 pi->dpm_table_start + 1009 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 1010 (u8 *)&pi->vce_level_count, 1011 sizeof(u8), 1012 pi->sram_end); 1013 if (ret) 1014 return ret; 1015 1016 pi->vce_interval = 1; 1017 1018 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1019 pi->dpm_table_start + 1020 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1021 (u8 *)&pi->vce_interval, 1022 sizeof(u8), 1023 pi->sram_end); 1024 if (ret) 1025 return ret; 1026 1027 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1028 pi->dpm_table_start + 1029 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1030 (u8 *)&pi->vce_level, 1031 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1032 pi->sram_end); 1033 1034 return ret; 1035} 1036 1037static int kv_populate_samu_table(struct amdgpu_device *adev) 1038{ 1039 struct kv_power_info *pi = kv_get_pi(adev); 1040 struct amdgpu_clock_voltage_dependency_table *table = 1041 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1042 struct atom_clock_dividers dividers; 1043 int ret; 1044 u32 i; 1045 1046 if (table == NULL || table->count == 0) 1047 return 0; 1048 1049 pi->samu_level_count = 0; 1050 for (i = 0; i < table->count; i++) { 1051 if (pi->high_voltage_t && 1052 pi->high_voltage_t < table->entries[i].v) 1053 break; 1054 1055 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1056 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1057 1058 pi->samu_level[i].ClkBypassCntl = 1059 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1060 1061 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1062 table->entries[i].clk, false, &dividers); 1063 if (ret) 1064 return ret; 1065 pi->samu_level[i].Divider = (u8)dividers.post_div; 1066 1067 pi->samu_level_count++; 1068 } 1069 1070 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1071 pi->dpm_table_start + 1072 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1073 (u8 *)&pi->samu_level_count, 1074 sizeof(u8), 1075 pi->sram_end); 1076 if (ret) 1077 return ret; 1078 1079 pi->samu_interval = 1; 1080 1081 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1082 pi->dpm_table_start + 1083 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1084 (u8 *)&pi->samu_interval, 1085 sizeof(u8), 1086 pi->sram_end); 1087 if (ret) 1088 return ret; 1089 1090 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1091 pi->dpm_table_start + 1092 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1093 (u8 *)&pi->samu_level, 1094 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1095 pi->sram_end); 1096 if (ret) 1097 return ret; 1098 1099 return ret; 1100} 1101 1102 1103static int kv_populate_acp_table(struct amdgpu_device *adev) 1104{ 1105 struct kv_power_info *pi = kv_get_pi(adev); 1106 struct amdgpu_clock_voltage_dependency_table *table = 1107 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1108 struct atom_clock_dividers dividers; 1109 int ret; 1110 u32 i; 1111 1112 if (table == NULL || table->count == 0) 1113 return 0; 1114 1115 pi->acp_level_count = 0; 1116 for (i = 0; i < table->count; i++) { 1117 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1118 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1119 1120 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1121 table->entries[i].clk, false, &dividers); 1122 if (ret) 1123 return ret; 1124 pi->acp_level[i].Divider = (u8)dividers.post_div; 1125 1126 pi->acp_level_count++; 1127 } 1128 1129 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1130 pi->dpm_table_start + 1131 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1132 (u8 *)&pi->acp_level_count, 1133 sizeof(u8), 1134 pi->sram_end); 1135 if (ret) 1136 return ret; 1137 1138 pi->acp_interval = 1; 1139 1140 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1141 pi->dpm_table_start + 1142 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1143 (u8 *)&pi->acp_interval, 1144 sizeof(u8), 1145 pi->sram_end); 1146 if (ret) 1147 return ret; 1148 1149 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1150 pi->dpm_table_start + 1151 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1152 (u8 *)&pi->acp_level, 1153 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1154 pi->sram_end); 1155 if (ret) 1156 return ret; 1157 1158 return ret; 1159} 1160 1161static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1162{ 1163 struct kv_power_info *pi = kv_get_pi(adev); 1164 u32 i; 1165 struct amdgpu_clock_voltage_dependency_table *table = 1166 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1167 1168 if (table && table->count) { 1169 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1170 if (pi->caps_enable_dfs_bypass) { 1171 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1172 pi->graphics_level[i].ClkBypassCntl = 3; 1173 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1174 pi->graphics_level[i].ClkBypassCntl = 2; 1175 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1176 pi->graphics_level[i].ClkBypassCntl = 7; 1177 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1178 pi->graphics_level[i].ClkBypassCntl = 6; 1179 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1180 pi->graphics_level[i].ClkBypassCntl = 8; 1181 else 1182 pi->graphics_level[i].ClkBypassCntl = 0; 1183 } else { 1184 pi->graphics_level[i].ClkBypassCntl = 0; 1185 } 1186 } 1187 } else { 1188 struct sumo_sclk_voltage_mapping_table *table = 1189 &pi->sys_info.sclk_voltage_mapping_table; 1190 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1191 if (pi->caps_enable_dfs_bypass) { 1192 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1193 pi->graphics_level[i].ClkBypassCntl = 3; 1194 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1195 pi->graphics_level[i].ClkBypassCntl = 2; 1196 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1197 pi->graphics_level[i].ClkBypassCntl = 7; 1198 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1199 pi->graphics_level[i].ClkBypassCntl = 6; 1200 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1201 pi->graphics_level[i].ClkBypassCntl = 8; 1202 else 1203 pi->graphics_level[i].ClkBypassCntl = 0; 1204 } else { 1205 pi->graphics_level[i].ClkBypassCntl = 0; 1206 } 1207 } 1208 } 1209} 1210 1211static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1212{ 1213 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1214 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1215} 1216 1217static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1218{ 1219 struct kv_power_info *pi = kv_get_pi(adev); 1220 1221 pi->acp_boot_level = 0xff; 1222} 1223 1224static void kv_update_current_ps(struct amdgpu_device *adev, 1225 struct amdgpu_ps *rps) 1226{ 1227 struct kv_ps *new_ps = kv_get_ps(rps); 1228 struct kv_power_info *pi = kv_get_pi(adev); 1229 1230 pi->current_rps = *rps; 1231 pi->current_ps = *new_ps; 1232 pi->current_rps.ps_priv = &pi->current_ps; 1233 adev->pm.dpm.current_ps = &pi->current_rps; 1234} 1235 1236static void kv_update_requested_ps(struct amdgpu_device *adev, 1237 struct amdgpu_ps *rps) 1238{ 1239 struct kv_ps *new_ps = kv_get_ps(rps); 1240 struct kv_power_info *pi = kv_get_pi(adev); 1241 1242 pi->requested_rps = *rps; 1243 pi->requested_ps = *new_ps; 1244 pi->requested_rps.ps_priv = &pi->requested_ps; 1245 adev->pm.dpm.requested_ps = &pi->requested_rps; 1246} 1247 1248static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) 1249{ 1250 struct kv_power_info *pi = kv_get_pi(adev); 1251 int ret; 1252 1253 if (pi->bapm_enable) { 1254 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1255 if (ret) 1256 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1257 } 1258} 1259 1260static int kv_dpm_enable(struct amdgpu_device *adev) 1261{ 1262 struct kv_power_info *pi = kv_get_pi(adev); 1263 int ret; 1264 1265 ret = kv_process_firmware_header(adev); 1266 if (ret) { 1267 DRM_ERROR("kv_process_firmware_header failed\n"); 1268 return ret; 1269 } 1270 kv_init_fps_limits(adev); 1271 kv_init_graphics_levels(adev); 1272 ret = kv_program_bootup_state(adev); 1273 if (ret) { 1274 DRM_ERROR("kv_program_bootup_state failed\n"); 1275 return ret; 1276 } 1277 kv_calculate_dfs_bypass_settings(adev); 1278 ret = kv_upload_dpm_settings(adev); 1279 if (ret) { 1280 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1281 return ret; 1282 } 1283 ret = kv_populate_uvd_table(adev); 1284 if (ret) { 1285 DRM_ERROR("kv_populate_uvd_table failed\n"); 1286 return ret; 1287 } 1288 ret = kv_populate_vce_table(adev); 1289 if (ret) { 1290 DRM_ERROR("kv_populate_vce_table failed\n"); 1291 return ret; 1292 } 1293 ret = kv_populate_samu_table(adev); 1294 if (ret) { 1295 DRM_ERROR("kv_populate_samu_table failed\n"); 1296 return ret; 1297 } 1298 ret = kv_populate_acp_table(adev); 1299 if (ret) { 1300 DRM_ERROR("kv_populate_acp_table failed\n"); 1301 return ret; 1302 } 1303 kv_program_vc(adev); 1304#if 0 1305 kv_initialize_hardware_cac_manager(adev); 1306#endif 1307 kv_start_am(adev); 1308 if (pi->enable_auto_thermal_throttling) { 1309 ret = kv_enable_auto_thermal_throttling(adev); 1310 if (ret) { 1311 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1312 return ret; 1313 } 1314 } 1315 ret = kv_enable_dpm_voltage_scaling(adev); 1316 if (ret) { 1317 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1318 return ret; 1319 } 1320 ret = kv_set_dpm_interval(adev); 1321 if (ret) { 1322 DRM_ERROR("kv_set_dpm_interval failed\n"); 1323 return ret; 1324 } 1325 ret = kv_set_dpm_boot_state(adev); 1326 if (ret) { 1327 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1328 return ret; 1329 } 1330 ret = kv_enable_ulv(adev, true); 1331 if (ret) { 1332 DRM_ERROR("kv_enable_ulv failed\n"); 1333 return ret; 1334 } 1335 kv_start_dpm(adev); 1336 ret = kv_enable_didt(adev, true); 1337 if (ret) { 1338 DRM_ERROR("kv_enable_didt failed\n"); 1339 return ret; 1340 } 1341 ret = kv_enable_smc_cac(adev, true); 1342 if (ret) { 1343 DRM_ERROR("kv_enable_smc_cac failed\n"); 1344 return ret; 1345 } 1346 1347 kv_reset_acp_boot_level(adev); 1348 1349 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1350 if (ret) { 1351 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1352 return ret; 1353 } 1354 1355 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1356 1357 if (adev->irq.installed && 1358 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1359 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1360 if (ret) { 1361 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1362 return ret; 1363 } 1364 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1365 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1366 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1367 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1368 } 1369 1370 return ret; 1371} 1372 1373static void kv_dpm_disable(struct amdgpu_device *adev) 1374{ 1375 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1378 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1379 1380 amdgpu_kv_smc_bapm_enable(adev, false); 1381 1382 if (adev->asic_type == CHIP_MULLINS) 1383 kv_enable_nb_dpm(adev, false); 1384 1385 /* powerup blocks */ 1386 kv_dpm_powergate_acp(adev, false); 1387 kv_dpm_powergate_samu(adev, false); 1388 kv_dpm_powergate_vce(adev, false); 1389 kv_dpm_powergate_uvd(adev, false); 1390 1391 kv_enable_smc_cac(adev, false); 1392 kv_enable_didt(adev, false); 1393 kv_clear_vc(adev); 1394 kv_stop_dpm(adev); 1395 kv_enable_ulv(adev, false); 1396 kv_reset_am(adev); 1397 1398 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1399} 1400 1401#if 0 1402static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1403 u16 reg_offset, u32 value) 1404{ 1405 struct kv_power_info *pi = kv_get_pi(adev); 1406 1407 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1408 (u8 *)&value, sizeof(u16), pi->sram_end); 1409} 1410 1411static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1412 u16 reg_offset, u32 *value) 1413{ 1414 struct kv_power_info *pi = kv_get_pi(adev); 1415 1416 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1417 value, pi->sram_end); 1418} 1419#endif 1420 1421static void kv_init_sclk_t(struct amdgpu_device *adev) 1422{ 1423 struct kv_power_info *pi = kv_get_pi(adev); 1424 1425 pi->low_sclk_interrupt_t = 0; 1426} 1427 1428static int kv_init_fps_limits(struct amdgpu_device *adev) 1429{ 1430 struct kv_power_info *pi = kv_get_pi(adev); 1431 int ret = 0; 1432 1433 if (pi->caps_fps) { 1434 u16 tmp; 1435 1436 tmp = 45; 1437 pi->fps_high_t = cpu_to_be16(tmp); 1438 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1439 pi->dpm_table_start + 1440 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1441 (u8 *)&pi->fps_high_t, 1442 sizeof(u16), pi->sram_end); 1443 1444 tmp = 30; 1445 pi->fps_low_t = cpu_to_be16(tmp); 1446 1447 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1448 pi->dpm_table_start + 1449 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1450 (u8 *)&pi->fps_low_t, 1451 sizeof(u16), pi->sram_end); 1452 1453 } 1454 return ret; 1455} 1456 1457static void kv_init_powergate_state(struct amdgpu_device *adev) 1458{ 1459 struct kv_power_info *pi = kv_get_pi(adev); 1460 1461 pi->uvd_power_gated = false; 1462 pi->vce_power_gated = false; 1463 pi->samu_power_gated = false; 1464 pi->acp_power_gated = false; 1465 1466} 1467 1468static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1469{ 1470 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1471 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1472} 1473 1474static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1475{ 1476 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1477 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1478} 1479 1480static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1481{ 1482 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1483 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1484} 1485 1486static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1487{ 1488 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1489 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1490} 1491 1492static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1493{ 1494 struct kv_power_info *pi = kv_get_pi(adev); 1495 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1496 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1497 int ret; 1498 u32 mask; 1499 1500 if (!gate) { 1501 if (table->count) 1502 pi->uvd_boot_level = table->count - 1; 1503 else 1504 pi->uvd_boot_level = 0; 1505 1506 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1507 mask = 1 << pi->uvd_boot_level; 1508 } else { 1509 mask = 0x1f; 1510 } 1511 1512 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1513 pi->dpm_table_start + 1514 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1515 (uint8_t *)&pi->uvd_boot_level, 1516 sizeof(u8), pi->sram_end); 1517 if (ret) 1518 return ret; 1519 1520 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1521 PPSMC_MSG_UVDDPM_SetEnabledMask, 1522 mask); 1523 } 1524 1525 return kv_enable_uvd_dpm(adev, !gate); 1526} 1527 1528static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1529{ 1530 u8 i; 1531 struct amdgpu_vce_clock_voltage_dependency_table *table = 1532 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1533 1534 for (i = 0; i < table->count; i++) { 1535 if (table->entries[i].evclk >= evclk) 1536 break; 1537 } 1538 1539 return i; 1540} 1541 1542static int kv_update_vce_dpm(struct amdgpu_device *adev, 1543 struct amdgpu_ps *amdgpu_new_state, 1544 struct amdgpu_ps *amdgpu_current_state) 1545{ 1546 struct kv_power_info *pi = kv_get_pi(adev); 1547 struct amdgpu_vce_clock_voltage_dependency_table *table = 1548 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1549 int ret; 1550 1551 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1552 kv_dpm_powergate_vce(adev, false); 1553 if (pi->caps_stable_p_state) 1554 pi->vce_boot_level = table->count - 1; 1555 else 1556 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1557 1558 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1559 pi->dpm_table_start + 1560 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1561 (u8 *)&pi->vce_boot_level, 1562 sizeof(u8), 1563 pi->sram_end); 1564 if (ret) 1565 return ret; 1566 1567 if (pi->caps_stable_p_state) 1568 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1569 PPSMC_MSG_VCEDPM_SetEnabledMask, 1570 (1 << pi->vce_boot_level)); 1571 kv_enable_vce_dpm(adev, true); 1572 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1573 kv_enable_vce_dpm(adev, false); 1574 kv_dpm_powergate_vce(adev, true); 1575 } 1576 1577 return 0; 1578} 1579 1580static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1581{ 1582 struct kv_power_info *pi = kv_get_pi(adev); 1583 struct amdgpu_clock_voltage_dependency_table *table = 1584 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1585 int ret; 1586 1587 if (!gate) { 1588 if (pi->caps_stable_p_state) 1589 pi->samu_boot_level = table->count - 1; 1590 else 1591 pi->samu_boot_level = 0; 1592 1593 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1594 pi->dpm_table_start + 1595 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1596 (u8 *)&pi->samu_boot_level, 1597 sizeof(u8), 1598 pi->sram_end); 1599 if (ret) 1600 return ret; 1601 1602 if (pi->caps_stable_p_state) 1603 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1604 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1605 (1 << pi->samu_boot_level)); 1606 } 1607 1608 return kv_enable_samu_dpm(adev, !gate); 1609} 1610 1611static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1612{ 1613 u8 i; 1614 struct amdgpu_clock_voltage_dependency_table *table = 1615 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1616 1617 for (i = 0; i < table->count; i++) { 1618 if (table->entries[i].clk >= 0) /* XXX */ 1619 break; 1620 } 1621 1622 if (i >= table->count) 1623 i = table->count - 1; 1624 1625 return i; 1626} 1627 1628static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1629{ 1630 struct kv_power_info *pi = kv_get_pi(adev); 1631 u8 acp_boot_level; 1632 1633 if (!pi->caps_stable_p_state) { 1634 acp_boot_level = kv_get_acp_boot_level(adev); 1635 if (acp_boot_level != pi->acp_boot_level) { 1636 pi->acp_boot_level = acp_boot_level; 1637 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1638 PPSMC_MSG_ACPDPM_SetEnabledMask, 1639 (1 << pi->acp_boot_level)); 1640 } 1641 } 1642} 1643 1644static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1645{ 1646 struct kv_power_info *pi = kv_get_pi(adev); 1647 struct amdgpu_clock_voltage_dependency_table *table = 1648 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1649 int ret; 1650 1651 if (!gate) { 1652 if (pi->caps_stable_p_state) 1653 pi->acp_boot_level = table->count - 1; 1654 else 1655 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1656 1657 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1658 pi->dpm_table_start + 1659 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1660 (u8 *)&pi->acp_boot_level, 1661 sizeof(u8), 1662 pi->sram_end); 1663 if (ret) 1664 return ret; 1665 1666 if (pi->caps_stable_p_state) 1667 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1668 PPSMC_MSG_ACPDPM_SetEnabledMask, 1669 (1 << pi->acp_boot_level)); 1670 } 1671 1672 return kv_enable_acp_dpm(adev, !gate); 1673} 1674 1675static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) 1676{ 1677 struct kv_power_info *pi = kv_get_pi(adev); 1678 int ret; 1679 1680 pi->uvd_power_gated = gate; 1681 1682 if (gate) { 1683 /* stop the UVD block */ 1684 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1685 AMD_PG_STATE_GATE); 1686 kv_update_uvd_dpm(adev, gate); 1687 if (pi->caps_uvd_pg) 1688 /* power off the UVD block */ 1689 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1690 } else { 1691 if (pi->caps_uvd_pg) 1692 /* power on the UVD block */ 1693 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1694 /* re-init the UVD block */ 1695 kv_update_uvd_dpm(adev, gate); 1696 1697 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1698 AMD_PG_STATE_UNGATE); 1699 } 1700} 1701 1702static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1703{ 1704 struct kv_power_info *pi = kv_get_pi(adev); 1705 1706 if (pi->vce_power_gated == gate) 1707 return; 1708 1709 pi->vce_power_gated = gate; 1710 1711 if (!pi->caps_vce_pg) 1712 return; 1713 1714 if (gate) 1715 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1716 else 1717 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1718} 1719 1720static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1721{ 1722 struct kv_power_info *pi = kv_get_pi(adev); 1723 1724 if (pi->samu_power_gated == gate) 1725 return; 1726 1727 pi->samu_power_gated = gate; 1728 1729 if (gate) { 1730 kv_update_samu_dpm(adev, true); 1731 if (pi->caps_samu_pg) 1732 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1733 } else { 1734 if (pi->caps_samu_pg) 1735 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1736 kv_update_samu_dpm(adev, false); 1737 } 1738} 1739 1740static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1741{ 1742 struct kv_power_info *pi = kv_get_pi(adev); 1743 1744 if (pi->acp_power_gated == gate) 1745 return; 1746 1747 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1748 return; 1749 1750 pi->acp_power_gated = gate; 1751 1752 if (gate) { 1753 kv_update_acp_dpm(adev, true); 1754 if (pi->caps_acp_pg) 1755 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1756 } else { 1757 if (pi->caps_acp_pg) 1758 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1759 kv_update_acp_dpm(adev, false); 1760 } 1761} 1762 1763static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1764 struct amdgpu_ps *new_rps) 1765{ 1766 struct kv_ps *new_ps = kv_get_ps(new_rps); 1767 struct kv_power_info *pi = kv_get_pi(adev); 1768 u32 i; 1769 struct amdgpu_clock_voltage_dependency_table *table = 1770 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1771 1772 if (table && table->count) { 1773 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1774 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1775 (i == (pi->graphics_dpm_level_count - 1))) { 1776 pi->lowest_valid = i; 1777 break; 1778 } 1779 } 1780 1781 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1782 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1783 break; 1784 } 1785 pi->highest_valid = i; 1786 1787 if (pi->lowest_valid > pi->highest_valid) { 1788 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1789 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1790 pi->highest_valid = pi->lowest_valid; 1791 else 1792 pi->lowest_valid = pi->highest_valid; 1793 } 1794 } else { 1795 struct sumo_sclk_voltage_mapping_table *table = 1796 &pi->sys_info.sclk_voltage_mapping_table; 1797 1798 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1799 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1800 i == (int)(pi->graphics_dpm_level_count - 1)) { 1801 pi->lowest_valid = i; 1802 break; 1803 } 1804 } 1805 1806 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1807 if (table->entries[i].sclk_frequency <= 1808 new_ps->levels[new_ps->num_levels - 1].sclk) 1809 break; 1810 } 1811 pi->highest_valid = i; 1812 1813 if (pi->lowest_valid > pi->highest_valid) { 1814 if ((new_ps->levels[0].sclk - 1815 table->entries[pi->highest_valid].sclk_frequency) > 1816 (table->entries[pi->lowest_valid].sclk_frequency - 1817 new_ps->levels[new_ps->num_levels -1].sclk)) 1818 pi->highest_valid = pi->lowest_valid; 1819 else 1820 pi->lowest_valid = pi->highest_valid; 1821 } 1822 } 1823} 1824 1825static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1826 struct amdgpu_ps *new_rps) 1827{ 1828 struct kv_ps *new_ps = kv_get_ps(new_rps); 1829 struct kv_power_info *pi = kv_get_pi(adev); 1830 int ret = 0; 1831 u8 clk_bypass_cntl; 1832 1833 if (pi->caps_enable_dfs_bypass) { 1834 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1835 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1836 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1837 (pi->dpm_table_start + 1838 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1839 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1840 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1841 &clk_bypass_cntl, 1842 sizeof(u8), pi->sram_end); 1843 } 1844 1845 return ret; 1846} 1847 1848static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1849 bool enable) 1850{ 1851 struct kv_power_info *pi = kv_get_pi(adev); 1852 int ret = 0; 1853 1854 if (enable) { 1855 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1856 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1857 if (ret == 0) 1858 pi->nb_dpm_enabled = true; 1859 } 1860 } else { 1861 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1862 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1863 if (ret == 0) 1864 pi->nb_dpm_enabled = false; 1865 } 1866 } 1867 1868 return ret; 1869} 1870 1871static int kv_dpm_force_performance_level(struct amdgpu_device *adev, 1872 enum amd_dpm_forced_level level) 1873{ 1874 int ret; 1875 1876 if (level == AMD_DPM_FORCED_LEVEL_HIGH) { 1877 ret = kv_force_dpm_highest(adev); 1878 if (ret) 1879 return ret; 1880 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { 1881 ret = kv_force_dpm_lowest(adev); 1882 if (ret) 1883 return ret; 1884 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { 1885 ret = kv_unforce_levels(adev); 1886 if (ret) 1887 return ret; 1888 } 1889 1890 adev->pm.dpm.forced_level = level; 1891 1892 return 0; 1893} 1894 1895static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) 1896{ 1897 struct kv_power_info *pi = kv_get_pi(adev); 1898 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1899 struct amdgpu_ps *new_ps = &requested_ps; 1900 1901 kv_update_requested_ps(adev, new_ps); 1902 1903 kv_apply_state_adjust_rules(adev, 1904 &pi->requested_rps, 1905 &pi->current_rps); 1906 1907 return 0; 1908} 1909 1910static int kv_dpm_set_power_state(struct amdgpu_device *adev) 1911{ 1912 struct kv_power_info *pi = kv_get_pi(adev); 1913 struct amdgpu_ps *new_ps = &pi->requested_rps; 1914 struct amdgpu_ps *old_ps = &pi->current_rps; 1915 int ret; 1916 1917 if (pi->bapm_enable) { 1918 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power); 1919 if (ret) { 1920 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1921 return ret; 1922 } 1923 } 1924 1925 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1926 if (pi->enable_dpm) { 1927 kv_set_valid_clock_range(adev, new_ps); 1928 kv_update_dfs_bypass_settings(adev, new_ps); 1929 ret = kv_calculate_ds_divider(adev); 1930 if (ret) { 1931 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1932 return ret; 1933 } 1934 kv_calculate_nbps_level_settings(adev); 1935 kv_calculate_dpm_settings(adev); 1936 kv_force_lowest_valid(adev); 1937 kv_enable_new_levels(adev); 1938 kv_upload_dpm_settings(adev); 1939 kv_program_nbps_index_settings(adev, new_ps); 1940 kv_unforce_levels(adev); 1941 kv_set_enabled_levels(adev); 1942 kv_force_lowest_valid(adev); 1943 kv_unforce_levels(adev); 1944 1945 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1946 if (ret) { 1947 DRM_ERROR("kv_update_vce_dpm failed\n"); 1948 return ret; 1949 } 1950 kv_update_sclk_t(adev); 1951 if (adev->asic_type == CHIP_MULLINS) 1952 kv_enable_nb_dpm(adev, true); 1953 } 1954 } else { 1955 if (pi->enable_dpm) { 1956 kv_set_valid_clock_range(adev, new_ps); 1957 kv_update_dfs_bypass_settings(adev, new_ps); 1958 ret = kv_calculate_ds_divider(adev); 1959 if (ret) { 1960 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1961 return ret; 1962 } 1963 kv_calculate_nbps_level_settings(adev); 1964 kv_calculate_dpm_settings(adev); 1965 kv_freeze_sclk_dpm(adev, true); 1966 kv_upload_dpm_settings(adev); 1967 kv_program_nbps_index_settings(adev, new_ps); 1968 kv_freeze_sclk_dpm(adev, false); 1969 kv_set_enabled_levels(adev); 1970 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1971 if (ret) { 1972 DRM_ERROR("kv_update_vce_dpm failed\n"); 1973 return ret; 1974 } 1975 kv_update_acp_boot_level(adev); 1976 kv_update_sclk_t(adev); 1977 kv_enable_nb_dpm(adev, true); 1978 } 1979 } 1980 1981 return 0; 1982} 1983 1984static void kv_dpm_post_set_power_state(struct amdgpu_device *adev) 1985{ 1986 struct kv_power_info *pi = kv_get_pi(adev); 1987 struct amdgpu_ps *new_ps = &pi->requested_rps; 1988 1989 kv_update_current_ps(adev, new_ps); 1990} 1991 1992static void kv_dpm_setup_asic(struct amdgpu_device *adev) 1993{ 1994 sumo_take_smu_control(adev, true); 1995 kv_init_powergate_state(adev); 1996 kv_init_sclk_t(adev); 1997} 1998 1999#if 0 2000static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2001{ 2002 struct kv_power_info *pi = kv_get_pi(adev); 2003 2004 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2005 kv_force_lowest_valid(adev); 2006 kv_init_graphics_levels(adev); 2007 kv_program_bootup_state(adev); 2008 kv_upload_dpm_settings(adev); 2009 kv_force_lowest_valid(adev); 2010 kv_unforce_levels(adev); 2011 } else { 2012 kv_init_graphics_levels(adev); 2013 kv_program_bootup_state(adev); 2014 kv_freeze_sclk_dpm(adev, true); 2015 kv_upload_dpm_settings(adev); 2016 kv_freeze_sclk_dpm(adev, false); 2017 kv_set_enabled_level(adev, pi->graphics_boot_level); 2018 } 2019} 2020#endif 2021 2022static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2023 struct amdgpu_clock_and_voltage_limits *table) 2024{ 2025 struct kv_power_info *pi = kv_get_pi(adev); 2026 2027 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2028 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2029 table->sclk = 2030 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2031 table->vddc = 2032 kv_convert_2bit_index_to_voltage(adev, 2033 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2034 } 2035 2036 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2037} 2038 2039static void kv_patch_voltage_values(struct amdgpu_device *adev) 2040{ 2041 int i; 2042 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2043 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2044 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2045 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2046 struct amdgpu_clock_voltage_dependency_table *samu_table = 2047 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2048 struct amdgpu_clock_voltage_dependency_table *acp_table = 2049 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2050 2051 if (uvd_table->count) { 2052 for (i = 0; i < uvd_table->count; i++) 2053 uvd_table->entries[i].v = 2054 kv_convert_8bit_index_to_voltage(adev, 2055 uvd_table->entries[i].v); 2056 } 2057 2058 if (vce_table->count) { 2059 for (i = 0; i < vce_table->count; i++) 2060 vce_table->entries[i].v = 2061 kv_convert_8bit_index_to_voltage(adev, 2062 vce_table->entries[i].v); 2063 } 2064 2065 if (samu_table->count) { 2066 for (i = 0; i < samu_table->count; i++) 2067 samu_table->entries[i].v = 2068 kv_convert_8bit_index_to_voltage(adev, 2069 samu_table->entries[i].v); 2070 } 2071 2072 if (acp_table->count) { 2073 for (i = 0; i < acp_table->count; i++) 2074 acp_table->entries[i].v = 2075 kv_convert_8bit_index_to_voltage(adev, 2076 acp_table->entries[i].v); 2077 } 2078 2079} 2080 2081static void kv_construct_boot_state(struct amdgpu_device *adev) 2082{ 2083 struct kv_power_info *pi = kv_get_pi(adev); 2084 2085 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2086 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2087 pi->boot_pl.ds_divider_index = 0; 2088 pi->boot_pl.ss_divider_index = 0; 2089 pi->boot_pl.allow_gnb_slow = 1; 2090 pi->boot_pl.force_nbp_state = 0; 2091 pi->boot_pl.display_wm = 0; 2092 pi->boot_pl.vce_wm = 0; 2093} 2094 2095static int kv_force_dpm_highest(struct amdgpu_device *adev) 2096{ 2097 int ret; 2098 u32 enable_mask, i; 2099 2100 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2101 if (ret) 2102 return ret; 2103 2104 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2105 if (enable_mask & (1 << i)) 2106 break; 2107 } 2108 2109 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2110 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2111 else 2112 return kv_set_enabled_level(adev, i); 2113} 2114 2115static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2116{ 2117 int ret; 2118 u32 enable_mask, i; 2119 2120 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2121 if (ret) 2122 return ret; 2123 2124 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2125 if (enable_mask & (1 << i)) 2126 break; 2127 } 2128 2129 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2130 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2131 else 2132 return kv_set_enabled_level(adev, i); 2133} 2134 2135static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2136 u32 sclk, u32 min_sclk_in_sr) 2137{ 2138 struct kv_power_info *pi = kv_get_pi(adev); 2139 u32 i; 2140 u32 temp; 2141 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); 2142 2143 if (sclk < min) 2144 return 0; 2145 2146 if (!pi->caps_sclk_ds) 2147 return 0; 2148 2149 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2150 temp = sclk >> i; 2151 if (temp >= min) 2152 break; 2153 } 2154 2155 return (u8)i; 2156} 2157 2158static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2159{ 2160 struct kv_power_info *pi = kv_get_pi(adev); 2161 struct amdgpu_clock_voltage_dependency_table *table = 2162 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2163 int i; 2164 2165 if (table && table->count) { 2166 for (i = table->count - 1; i >= 0; i--) { 2167 if (pi->high_voltage_t && 2168 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2169 pi->high_voltage_t)) { 2170 *limit = i; 2171 return 0; 2172 } 2173 } 2174 } else { 2175 struct sumo_sclk_voltage_mapping_table *table = 2176 &pi->sys_info.sclk_voltage_mapping_table; 2177 2178 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2179 if (pi->high_voltage_t && 2180 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2181 pi->high_voltage_t)) { 2182 *limit = i; 2183 return 0; 2184 } 2185 } 2186 } 2187 2188 *limit = 0; 2189 return 0; 2190} 2191 2192static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2193 struct amdgpu_ps *new_rps, 2194 struct amdgpu_ps *old_rps) 2195{ 2196 struct kv_ps *ps = kv_get_ps(new_rps); 2197 struct kv_power_info *pi = kv_get_pi(adev); 2198 u32 min_sclk = 10000; /* ??? */ 2199 u32 sclk, mclk = 0; 2200 int i, limit; 2201 bool force_high; 2202 struct amdgpu_clock_voltage_dependency_table *table = 2203 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2204 u32 stable_p_state_sclk = 0; 2205 struct amdgpu_clock_and_voltage_limits *max_limits = 2206 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2207 2208 if (new_rps->vce_active) { 2209 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2210 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2211 } else { 2212 new_rps->evclk = 0; 2213 new_rps->ecclk = 0; 2214 } 2215 2216 mclk = max_limits->mclk; 2217 sclk = min_sclk; 2218 2219 if (pi->caps_stable_p_state) { 2220 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2221 2222 for (i = table->count - 1; i >= 0; i--) { 2223 if (stable_p_state_sclk >= table->entries[i].clk) { 2224 stable_p_state_sclk = table->entries[i].clk; 2225 break; 2226 } 2227 } 2228 2229 if (i > 0) 2230 stable_p_state_sclk = table->entries[0].clk; 2231 2232 sclk = stable_p_state_sclk; 2233 } 2234 2235 if (new_rps->vce_active) { 2236 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2237 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2238 } 2239 2240 ps->need_dfs_bypass = true; 2241 2242 for (i = 0; i < ps->num_levels; i++) { 2243 if (ps->levels[i].sclk < sclk) 2244 ps->levels[i].sclk = sclk; 2245 } 2246 2247 if (table && table->count) { 2248 for (i = 0; i < ps->num_levels; i++) { 2249 if (pi->high_voltage_t && 2250 (pi->high_voltage_t < 2251 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2252 kv_get_high_voltage_limit(adev, &limit); 2253 ps->levels[i].sclk = table->entries[limit].clk; 2254 } 2255 } 2256 } else { 2257 struct sumo_sclk_voltage_mapping_table *table = 2258 &pi->sys_info.sclk_voltage_mapping_table; 2259 2260 for (i = 0; i < ps->num_levels; i++) { 2261 if (pi->high_voltage_t && 2262 (pi->high_voltage_t < 2263 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2264 kv_get_high_voltage_limit(adev, &limit); 2265 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2266 } 2267 } 2268 } 2269 2270 if (pi->caps_stable_p_state) { 2271 for (i = 0; i < ps->num_levels; i++) { 2272 ps->levels[i].sclk = stable_p_state_sclk; 2273 } 2274 } 2275 2276 pi->video_start = new_rps->dclk || new_rps->vclk || 2277 new_rps->evclk || new_rps->ecclk; 2278 2279 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2280 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2281 pi->battery_state = true; 2282 else 2283 pi->battery_state = false; 2284 2285 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2286 ps->dpm0_pg_nb_ps_lo = 0x1; 2287 ps->dpm0_pg_nb_ps_hi = 0x0; 2288 ps->dpmx_nb_ps_lo = 0x1; 2289 ps->dpmx_nb_ps_hi = 0x0; 2290 } else { 2291 ps->dpm0_pg_nb_ps_lo = 0x3; 2292 ps->dpm0_pg_nb_ps_hi = 0x0; 2293 ps->dpmx_nb_ps_lo = 0x3; 2294 ps->dpmx_nb_ps_hi = 0x0; 2295 2296 if (pi->sys_info.nb_dpm_enable) { 2297 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2298 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2299 pi->disable_nb_ps3_in_battery; 2300 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2301 ps->dpm0_pg_nb_ps_hi = 0x2; 2302 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2303 ps->dpmx_nb_ps_hi = 0x2; 2304 } 2305 } 2306} 2307 2308static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2309 u32 index, bool enable) 2310{ 2311 struct kv_power_info *pi = kv_get_pi(adev); 2312 2313 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2314} 2315 2316static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2317{ 2318 struct kv_power_info *pi = kv_get_pi(adev); 2319 u32 sclk_in_sr = 10000; /* ??? */ 2320 u32 i; 2321 2322 if (pi->lowest_valid > pi->highest_valid) 2323 return -EINVAL; 2324 2325 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2326 pi->graphics_level[i].DeepSleepDivId = 2327 kv_get_sleep_divider_id_from_clock(adev, 2328 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2329 sclk_in_sr); 2330 } 2331 return 0; 2332} 2333 2334static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2335{ 2336 struct kv_power_info *pi = kv_get_pi(adev); 2337 u32 i; 2338 bool force_high; 2339 struct amdgpu_clock_and_voltage_limits *max_limits = 2340 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2341 u32 mclk = max_limits->mclk; 2342 2343 if (pi->lowest_valid > pi->highest_valid) 2344 return -EINVAL; 2345 2346 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2347 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2348 pi->graphics_level[i].GnbSlow = 1; 2349 pi->graphics_level[i].ForceNbPs1 = 0; 2350 pi->graphics_level[i].UpH = 0; 2351 } 2352 2353 if (!pi->sys_info.nb_dpm_enable) 2354 return 0; 2355 2356 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2357 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2358 2359 if (force_high) { 2360 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2361 pi->graphics_level[i].GnbSlow = 0; 2362 } else { 2363 if (pi->battery_state) 2364 pi->graphics_level[0].ForceNbPs1 = 1; 2365 2366 pi->graphics_level[1].GnbSlow = 0; 2367 pi->graphics_level[2].GnbSlow = 0; 2368 pi->graphics_level[3].GnbSlow = 0; 2369 pi->graphics_level[4].GnbSlow = 0; 2370 } 2371 } else { 2372 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2373 pi->graphics_level[i].GnbSlow = 1; 2374 pi->graphics_level[i].ForceNbPs1 = 0; 2375 pi->graphics_level[i].UpH = 0; 2376 } 2377 2378 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2379 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2380 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2381 if (pi->lowest_valid != pi->highest_valid) 2382 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2383 } 2384 } 2385 return 0; 2386} 2387 2388static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2389{ 2390 struct kv_power_info *pi = kv_get_pi(adev); 2391 u32 i; 2392 2393 if (pi->lowest_valid > pi->highest_valid) 2394 return -EINVAL; 2395 2396 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2397 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2398 2399 return 0; 2400} 2401 2402static void kv_init_graphics_levels(struct amdgpu_device *adev) 2403{ 2404 struct kv_power_info *pi = kv_get_pi(adev); 2405 u32 i; 2406 struct amdgpu_clock_voltage_dependency_table *table = 2407 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2408 2409 if (table && table->count) { 2410 u32 vid_2bit; 2411 2412 pi->graphics_dpm_level_count = 0; 2413 for (i = 0; i < table->count; i++) { 2414 if (pi->high_voltage_t && 2415 (pi->high_voltage_t < 2416 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2417 break; 2418 2419 kv_set_divider_value(adev, i, table->entries[i].clk); 2420 vid_2bit = kv_convert_vid7_to_vid2(adev, 2421 &pi->sys_info.vid_mapping_table, 2422 table->entries[i].v); 2423 kv_set_vid(adev, i, vid_2bit); 2424 kv_set_at(adev, i, pi->at[i]); 2425 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2426 pi->graphics_dpm_level_count++; 2427 } 2428 } else { 2429 struct sumo_sclk_voltage_mapping_table *table = 2430 &pi->sys_info.sclk_voltage_mapping_table; 2431 2432 pi->graphics_dpm_level_count = 0; 2433 for (i = 0; i < table->num_max_dpm_entries; i++) { 2434 if (pi->high_voltage_t && 2435 pi->high_voltage_t < 2436 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2437 break; 2438 2439 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2440 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2441 kv_set_at(adev, i, pi->at[i]); 2442 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2443 pi->graphics_dpm_level_count++; 2444 } 2445 } 2446 2447 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2448 kv_dpm_power_level_enable(adev, i, false); 2449} 2450 2451static void kv_enable_new_levels(struct amdgpu_device *adev) 2452{ 2453 struct kv_power_info *pi = kv_get_pi(adev); 2454 u32 i; 2455 2456 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2457 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2458 kv_dpm_power_level_enable(adev, i, true); 2459 } 2460} 2461 2462static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2463{ 2464 u32 new_mask = (1 << level); 2465 2466 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2467 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2468 new_mask); 2469} 2470 2471static int kv_set_enabled_levels(struct amdgpu_device *adev) 2472{ 2473 struct kv_power_info *pi = kv_get_pi(adev); 2474 u32 i, new_mask = 0; 2475 2476 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2477 new_mask |= (1 << i); 2478 2479 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2480 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2481 new_mask); 2482} 2483 2484static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2485 struct amdgpu_ps *new_rps) 2486{ 2487 struct kv_ps *new_ps = kv_get_ps(new_rps); 2488 struct kv_power_info *pi = kv_get_pi(adev); 2489 u32 nbdpmconfig1; 2490 2491 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2492 return; 2493 2494 if (pi->sys_info.nb_dpm_enable) { 2495 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2496 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2497 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2498 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2499 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2500 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2501 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2502 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2503 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2504 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2505 } 2506} 2507 2508static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2509 int min_temp, int max_temp) 2510{ 2511 int low_temp = 0 * 1000; 2512 int high_temp = 255 * 1000; 2513 u32 tmp; 2514 2515 if (low_temp < min_temp) 2516 low_temp = min_temp; 2517 if (high_temp > max_temp) 2518 high_temp = max_temp; 2519 if (high_temp < low_temp) { 2520 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2521 return -EINVAL; 2522 } 2523 2524 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2525 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2526 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2527 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2528 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2529 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2530 2531 adev->pm.dpm.thermal.min_temp = low_temp; 2532 adev->pm.dpm.thermal.max_temp = high_temp; 2533 2534 return 0; 2535} 2536 2537union igp_info { 2538 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2539 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2540 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2541 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2542 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2543 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2544}; 2545 2546static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2547{ 2548 struct kv_power_info *pi = kv_get_pi(adev); 2549 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2550 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2551 union igp_info *igp_info; 2552 u8 frev, crev; 2553 u16 data_offset; 2554 int i; 2555 2556 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2557 &frev, &crev, &data_offset)) { 2558 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2559 data_offset); 2560 2561 if (crev != 8) { 2562 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2563 return -EINVAL; 2564 } 2565 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2566 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2567 pi->sys_info.bootup_nb_voltage_index = 2568 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2569 if (igp_info->info_8.ucHtcTmpLmt == 0) 2570 pi->sys_info.htc_tmp_lmt = 203; 2571 else 2572 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2573 if (igp_info->info_8.ucHtcHystLmt == 0) 2574 pi->sys_info.htc_hyst_lmt = 5; 2575 else 2576 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2577 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2578 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2579 } 2580 2581 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2582 pi->sys_info.nb_dpm_enable = true; 2583 else 2584 pi->sys_info.nb_dpm_enable = false; 2585 2586 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2587 pi->sys_info.nbp_memory_clock[i] = 2588 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2589 pi->sys_info.nbp_n_clock[i] = 2590 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2591 } 2592 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2593 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2594 pi->caps_enable_dfs_bypass = true; 2595 2596 sumo_construct_sclk_voltage_mapping_table(adev, 2597 &pi->sys_info.sclk_voltage_mapping_table, 2598 igp_info->info_8.sAvail_SCLK); 2599 2600 sumo_construct_vid_mapping_table(adev, 2601 &pi->sys_info.vid_mapping_table, 2602 igp_info->info_8.sAvail_SCLK); 2603 2604 kv_construct_max_power_limits_table(adev, 2605 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2606 } 2607 return 0; 2608} 2609 2610union power_info { 2611 struct _ATOM_POWERPLAY_INFO info; 2612 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2613 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2614 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2615 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2616 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2617}; 2618 2619union pplib_clock_info { 2620 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2621 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2622 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2623 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2624}; 2625 2626union pplib_power_state { 2627 struct _ATOM_PPLIB_STATE v1; 2628 struct _ATOM_PPLIB_STATE_V2 v2; 2629}; 2630 2631static void kv_patch_boot_state(struct amdgpu_device *adev, 2632 struct kv_ps *ps) 2633{ 2634 struct kv_power_info *pi = kv_get_pi(adev); 2635 2636 ps->num_levels = 1; 2637 ps->levels[0] = pi->boot_pl; 2638} 2639 2640static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2641 struct amdgpu_ps *rps, 2642 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2643 u8 table_rev) 2644{ 2645 struct kv_ps *ps = kv_get_ps(rps); 2646 2647 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2648 rps->class = le16_to_cpu(non_clock_info->usClassification); 2649 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2650 2651 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2652 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2653 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2654 } else { 2655 rps->vclk = 0; 2656 rps->dclk = 0; 2657 } 2658 2659 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2660 adev->pm.dpm.boot_ps = rps; 2661 kv_patch_boot_state(adev, ps); 2662 } 2663 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2664 adev->pm.dpm.uvd_ps = rps; 2665} 2666 2667static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2668 struct amdgpu_ps *rps, int index, 2669 union pplib_clock_info *clock_info) 2670{ 2671 struct kv_power_info *pi = kv_get_pi(adev); 2672 struct kv_ps *ps = kv_get_ps(rps); 2673 struct kv_pl *pl = &ps->levels[index]; 2674 u32 sclk; 2675 2676 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2677 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2678 pl->sclk = sclk; 2679 pl->vddc_index = clock_info->sumo.vddcIndex; 2680 2681 ps->num_levels = index + 1; 2682 2683 if (pi->caps_sclk_ds) { 2684 pl->ds_divider_index = 5; 2685 pl->ss_divider_index = 5; 2686 } 2687} 2688 2689static int kv_parse_power_table(struct amdgpu_device *adev) 2690{ 2691 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2692 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2693 union pplib_power_state *power_state; 2694 int i, j, k, non_clock_array_index, clock_array_index; 2695 union pplib_clock_info *clock_info; 2696 struct _StateArray *state_array; 2697 struct _ClockInfoArray *clock_info_array; 2698 struct _NonClockInfoArray *non_clock_info_array; 2699 union power_info *power_info; 2700 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2701 u16 data_offset; 2702 u8 frev, crev; 2703 u8 *power_state_offset; 2704 struct kv_ps *ps; 2705 2706 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2707 &frev, &crev, &data_offset)) 2708 return -EINVAL; 2709 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2710 2711 amdgpu_add_thermal_controller(adev); 2712 2713 state_array = (struct _StateArray *) 2714 (mode_info->atom_context->bios + data_offset + 2715 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2716 clock_info_array = (struct _ClockInfoArray *) 2717 (mode_info->atom_context->bios + data_offset + 2718 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2719 non_clock_info_array = (struct _NonClockInfoArray *) 2720 (mode_info->atom_context->bios + data_offset + 2721 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2722 2723 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 2724 state_array->ucNumEntries, GFP_KERNEL); 2725 if (!adev->pm.dpm.ps) 2726 return -ENOMEM; 2727 power_state_offset = (u8 *)state_array->states; 2728 for (i = 0; i < state_array->ucNumEntries; i++) { 2729 u8 *idx; 2730 power_state = (union pplib_power_state *)power_state_offset; 2731 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2732 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2733 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2734 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2735 if (ps == NULL) { 2736 kfree(adev->pm.dpm.ps); 2737 return -ENOMEM; 2738 } 2739 adev->pm.dpm.ps[i].ps_priv = ps; 2740 k = 0; 2741 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2742 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2743 clock_array_index = idx[j]; 2744 if (clock_array_index >= clock_info_array->ucNumEntries) 2745 continue; 2746 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2747 break; 2748 clock_info = (union pplib_clock_info *) 2749 ((u8 *)&clock_info_array->clockInfo[0] + 2750 (clock_array_index * clock_info_array->ucEntrySize)); 2751 kv_parse_pplib_clock_info(adev, 2752 &adev->pm.dpm.ps[i], k, 2753 clock_info); 2754 k++; 2755 } 2756 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2757 non_clock_info, 2758 non_clock_info_array->ucEntrySize); 2759 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2760 } 2761 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2762 2763 /* fill in the vce power states */ 2764 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 2765 u32 sclk; 2766 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2767 clock_info = (union pplib_clock_info *) 2768 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2769 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2770 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2771 adev->pm.dpm.vce_states[i].sclk = sclk; 2772 adev->pm.dpm.vce_states[i].mclk = 0; 2773 } 2774 2775 return 0; 2776} 2777 2778static int kv_dpm_init(struct amdgpu_device *adev) 2779{ 2780 struct kv_power_info *pi; 2781 int ret, i; 2782 2783 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2784 if (pi == NULL) 2785 return -ENOMEM; 2786 adev->pm.dpm.priv = pi; 2787 2788 ret = amdgpu_get_platform_caps(adev); 2789 if (ret) 2790 return ret; 2791 2792 ret = amdgpu_parse_extended_power_table(adev); 2793 if (ret) 2794 return ret; 2795 2796 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2797 pi->at[i] = TRINITY_AT_DFLT; 2798 2799 pi->sram_end = SMC_RAM_END; 2800 2801 pi->enable_nb_dpm = true; 2802 2803 pi->caps_power_containment = true; 2804 pi->caps_cac = true; 2805 pi->enable_didt = false; 2806 if (pi->enable_didt) { 2807 pi->caps_sq_ramping = true; 2808 pi->caps_db_ramping = true; 2809 pi->caps_td_ramping = true; 2810 pi->caps_tcp_ramping = true; 2811 } 2812 2813 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) 2814 pi->caps_sclk_ds = true; 2815 else 2816 pi->caps_sclk_ds = false; 2817 2818 pi->enable_auto_thermal_throttling = true; 2819 pi->disable_nb_ps3_in_battery = false; 2820 if (amdgpu_bapm == 0) 2821 pi->bapm_enable = false; 2822 else 2823 pi->bapm_enable = true; 2824 pi->voltage_drop_t = 0; 2825 pi->caps_sclk_throttle_low_notification = false; 2826 pi->caps_fps = false; /* true? */ 2827 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2828 pi->caps_uvd_dpm = true; 2829 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2830 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2831 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2832 pi->caps_stable_p_state = false; 2833 2834 ret = kv_parse_sys_info_table(adev); 2835 if (ret) 2836 return ret; 2837 2838 kv_patch_voltage_values(adev); 2839 kv_construct_boot_state(adev); 2840 2841 ret = kv_parse_power_table(adev); 2842 if (ret) 2843 return ret; 2844 2845 pi->enable_dpm = true; 2846 2847 return 0; 2848} 2849 2850static void 2851kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 2852 struct seq_file *m) 2853{ 2854 struct kv_power_info *pi = kv_get_pi(adev); 2855 u32 current_index = 2856 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2857 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2858 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2859 u32 sclk, tmp; 2860 u16 vddc; 2861 2862 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2863 seq_printf(m, "invalid dpm profile %d\n", current_index); 2864 } else { 2865 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2866 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2867 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2868 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2869 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2870 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2871 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2872 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2873 current_index, sclk, vddc); 2874 } 2875} 2876 2877static void 2878kv_dpm_print_power_state(struct amdgpu_device *adev, 2879 struct amdgpu_ps *rps) 2880{ 2881 int i; 2882 struct kv_ps *ps = kv_get_ps(rps); 2883 2884 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2885 amdgpu_dpm_print_cap_info(rps->caps); 2886 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2887 for (i = 0; i < ps->num_levels; i++) { 2888 struct kv_pl *pl = &ps->levels[i]; 2889 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2890 i, pl->sclk, 2891 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2892 } 2893 amdgpu_dpm_print_ps_status(adev, rps); 2894} 2895 2896static void kv_dpm_fini(struct amdgpu_device *adev) 2897{ 2898 int i; 2899 2900 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2901 kfree(adev->pm.dpm.ps[i].ps_priv); 2902 } 2903 kfree(adev->pm.dpm.ps); 2904 kfree(adev->pm.dpm.priv); 2905 amdgpu_free_extended_power_table(adev); 2906} 2907 2908static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev) 2909{ 2910 2911} 2912 2913static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) 2914{ 2915 struct kv_power_info *pi = kv_get_pi(adev); 2916 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2917 2918 if (low) 2919 return requested_state->levels[0].sclk; 2920 else 2921 return requested_state->levels[requested_state->num_levels - 1].sclk; 2922} 2923 2924static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low) 2925{ 2926 struct kv_power_info *pi = kv_get_pi(adev); 2927 2928 return pi->sys_info.bootup_uma_clk; 2929} 2930 2931/* get temperature in millidegrees */ 2932static int kv_dpm_get_temp(struct amdgpu_device *adev) 2933{ 2934 u32 temp; 2935 int actual_temp = 0; 2936 2937 temp = RREG32_SMC(0xC0300E0C); 2938 2939 if (temp) 2940 actual_temp = (temp / 8) - 49; 2941 else 2942 actual_temp = 0; 2943 2944 actual_temp = actual_temp * 1000; 2945 2946 return actual_temp; 2947} 2948 2949static int kv_dpm_early_init(void *handle) 2950{ 2951 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2952 2953 kv_dpm_set_dpm_funcs(adev); 2954 kv_dpm_set_irq_funcs(adev); 2955 2956 return 0; 2957} 2958 2959static int kv_dpm_late_init(void *handle) 2960{ 2961 /* powerdown unused blocks for now */ 2962 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2963 int ret; 2964 2965 if (!amdgpu_dpm) 2966 return 0; 2967 2968 /* init the sysfs and debugfs files late */ 2969 ret = amdgpu_pm_sysfs_init(adev); 2970 if (ret) 2971 return ret; 2972 2973 kv_dpm_powergate_acp(adev, true); 2974 kv_dpm_powergate_samu(adev, true); 2975 2976 return 0; 2977} 2978 2979static int kv_dpm_sw_init(void *handle) 2980{ 2981 int ret; 2982 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2983 2984 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, 2985 &adev->pm.dpm.thermal.irq); 2986 if (ret) 2987 return ret; 2988 2989 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, 2990 &adev->pm.dpm.thermal.irq); 2991 if (ret) 2992 return ret; 2993 2994 /* default to balanced state */ 2995 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 2996 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 2997 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; 2998 adev->pm.default_sclk = adev->clock.default_sclk; 2999 adev->pm.default_mclk = adev->clock.default_mclk; 3000 adev->pm.current_sclk = adev->clock.default_sclk; 3001 adev->pm.current_mclk = adev->clock.default_mclk; 3002 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3003 3004 if (amdgpu_dpm == 0) 3005 return 0; 3006 3007 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3008 mutex_lock(&adev->pm.mutex); 3009 ret = kv_dpm_init(adev); 3010 if (ret) 3011 goto dpm_failed; 3012 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3013 if (amdgpu_dpm == 1) 3014 amdgpu_pm_print_power_states(adev); 3015 mutex_unlock(&adev->pm.mutex); 3016 DRM_INFO("amdgpu: dpm initialized\n"); 3017 3018 return 0; 3019 3020dpm_failed: 3021 kv_dpm_fini(adev); 3022 mutex_unlock(&adev->pm.mutex); 3023 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3024 return ret; 3025} 3026 3027static int kv_dpm_sw_fini(void *handle) 3028{ 3029 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3030 3031 flush_work(&adev->pm.dpm.thermal.work); 3032 3033 mutex_lock(&adev->pm.mutex); 3034 amdgpu_pm_sysfs_fini(adev); 3035 kv_dpm_fini(adev); 3036 mutex_unlock(&adev->pm.mutex); 3037 3038 return 0; 3039} 3040 3041static int kv_dpm_hw_init(void *handle) 3042{ 3043 int ret; 3044 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3045 3046 if (!amdgpu_dpm) 3047 return 0; 3048 3049 mutex_lock(&adev->pm.mutex); 3050 kv_dpm_setup_asic(adev); 3051 ret = kv_dpm_enable(adev); 3052 if (ret) 3053 adev->pm.dpm_enabled = false; 3054 else 3055 adev->pm.dpm_enabled = true; 3056 mutex_unlock(&adev->pm.mutex); 3057 3058 return ret; 3059} 3060 3061static int kv_dpm_hw_fini(void *handle) 3062{ 3063 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3064 3065 if (adev->pm.dpm_enabled) { 3066 mutex_lock(&adev->pm.mutex); 3067 kv_dpm_disable(adev); 3068 mutex_unlock(&adev->pm.mutex); 3069 } 3070 3071 return 0; 3072} 3073 3074static int kv_dpm_suspend(void *handle) 3075{ 3076 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3077 3078 if (adev->pm.dpm_enabled) { 3079 mutex_lock(&adev->pm.mutex); 3080 /* disable dpm */ 3081 kv_dpm_disable(adev); 3082 /* reset the power state */ 3083 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3084 mutex_unlock(&adev->pm.mutex); 3085 } 3086 return 0; 3087} 3088 3089static int kv_dpm_resume(void *handle) 3090{ 3091 int ret; 3092 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3093 3094 if (adev->pm.dpm_enabled) { 3095 /* asic init will reset to the boot state */ 3096 mutex_lock(&adev->pm.mutex); 3097 kv_dpm_setup_asic(adev); 3098 ret = kv_dpm_enable(adev); 3099 if (ret) 3100 adev->pm.dpm_enabled = false; 3101 else 3102 adev->pm.dpm_enabled = true; 3103 mutex_unlock(&adev->pm.mutex); 3104 if (adev->pm.dpm_enabled) 3105 amdgpu_pm_compute_clocks(adev); 3106 } 3107 return 0; 3108} 3109 3110static bool kv_dpm_is_idle(void *handle) 3111{ 3112 return true; 3113} 3114 3115static int kv_dpm_wait_for_idle(void *handle) 3116{ 3117 return 0; 3118} 3119 3120 3121static int kv_dpm_soft_reset(void *handle) 3122{ 3123 return 0; 3124} 3125 3126static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3127 struct amdgpu_irq_src *src, 3128 unsigned type, 3129 enum amdgpu_interrupt_state state) 3130{ 3131 u32 cg_thermal_int; 3132 3133 switch (type) { 3134 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3135 switch (state) { 3136 case AMDGPU_IRQ_STATE_DISABLE: 3137 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3138 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3139 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3140 break; 3141 case AMDGPU_IRQ_STATE_ENABLE: 3142 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3143 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3144 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3145 break; 3146 default: 3147 break; 3148 } 3149 break; 3150 3151 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3152 switch (state) { 3153 case AMDGPU_IRQ_STATE_DISABLE: 3154 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3155 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3156 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3157 break; 3158 case AMDGPU_IRQ_STATE_ENABLE: 3159 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3160 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3161 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3162 break; 3163 default: 3164 break; 3165 } 3166 break; 3167 3168 default: 3169 break; 3170 } 3171 return 0; 3172} 3173 3174static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3175 struct amdgpu_irq_src *source, 3176 struct amdgpu_iv_entry *entry) 3177{ 3178 bool queue_thermal = false; 3179 3180 if (entry == NULL) 3181 return -EINVAL; 3182 3183 switch (entry->src_id) { 3184 case 230: /* thermal low to high */ 3185 DRM_DEBUG("IH: thermal low to high\n"); 3186 adev->pm.dpm.thermal.high_to_low = false; 3187 queue_thermal = true; 3188 break; 3189 case 231: /* thermal high to low */ 3190 DRM_DEBUG("IH: thermal high to low\n"); 3191 adev->pm.dpm.thermal.high_to_low = true; 3192 queue_thermal = true; 3193 break; 3194 default: 3195 break; 3196 } 3197 3198 if (queue_thermal) 3199 schedule_work(&adev->pm.dpm.thermal.work); 3200 3201 return 0; 3202} 3203 3204static int kv_dpm_set_clockgating_state(void *handle, 3205 enum amd_clockgating_state state) 3206{ 3207 return 0; 3208} 3209 3210static int kv_dpm_set_powergating_state(void *handle, 3211 enum amd_powergating_state state) 3212{ 3213 return 0; 3214} 3215 3216static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, 3217 const struct kv_pl *kv_cpl2) 3218{ 3219 return ((kv_cpl1->sclk == kv_cpl2->sclk) && 3220 (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && 3221 (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && 3222 (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); 3223} 3224 3225static int kv_check_state_equal(struct amdgpu_device *adev, 3226 struct amdgpu_ps *cps, 3227 struct amdgpu_ps *rps, 3228 bool *equal) 3229{ 3230 struct kv_ps *kv_cps; 3231 struct kv_ps *kv_rps; 3232 int i; 3233 3234 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) 3235 return -EINVAL; 3236 3237 kv_cps = kv_get_ps(cps); 3238 kv_rps = kv_get_ps(rps); 3239 3240 if (kv_cps == NULL) { 3241 *equal = false; 3242 return 0; 3243 } 3244 3245 if (kv_cps->num_levels != kv_rps->num_levels) { 3246 *equal = false; 3247 return 0; 3248 } 3249 3250 for (i = 0; i < kv_cps->num_levels; i++) { 3251 if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), 3252 &(kv_rps->levels[i]))) { 3253 *equal = false; 3254 return 0; 3255 } 3256 } 3257 3258 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 3259 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); 3260 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); 3261 3262 return 0; 3263} 3264 3265static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx, 3266 void *value, int *size) 3267{ 3268 struct kv_power_info *pi = kv_get_pi(adev); 3269 uint32_t sclk; 3270 u32 pl_index = 3271 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 3272 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 3273 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 3274 3275 /* size must be at least 4 bytes for all sensors */ 3276 if (*size < 4) 3277 return -EINVAL; 3278 3279 switch (idx) { 3280 case AMDGPU_PP_SENSOR_GFX_SCLK: 3281 if (pl_index < SMU__NUM_SCLK_DPM_STATE) { 3282 sclk = be32_to_cpu( 3283 pi->graphics_level[pl_index].SclkFrequency); 3284 *((uint32_t *)value) = sclk; 3285 *size = 4; 3286 return 0; 3287 } 3288 return -EINVAL; 3289 case AMDGPU_PP_SENSOR_GPU_TEMP: 3290 *((uint32_t *)value) = kv_dpm_get_temp(adev); 3291 *size = 4; 3292 return 0; 3293 default: 3294 return -EINVAL; 3295 } 3296} 3297 3298const struct amd_ip_funcs kv_dpm_ip_funcs = { 3299 .name = "kv_dpm", 3300 .early_init = kv_dpm_early_init, 3301 .late_init = kv_dpm_late_init, 3302 .sw_init = kv_dpm_sw_init, 3303 .sw_fini = kv_dpm_sw_fini, 3304 .hw_init = kv_dpm_hw_init, 3305 .hw_fini = kv_dpm_hw_fini, 3306 .suspend = kv_dpm_suspend, 3307 .resume = kv_dpm_resume, 3308 .is_idle = kv_dpm_is_idle, 3309 .wait_for_idle = kv_dpm_wait_for_idle, 3310 .soft_reset = kv_dpm_soft_reset, 3311 .set_clockgating_state = kv_dpm_set_clockgating_state, 3312 .set_powergating_state = kv_dpm_set_powergating_state, 3313}; 3314 3315static const struct amdgpu_dpm_funcs kv_dpm_funcs = { 3316 .get_temperature = &kv_dpm_get_temp, 3317 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3318 .set_power_state = &kv_dpm_set_power_state, 3319 .post_set_power_state = &kv_dpm_post_set_power_state, 3320 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3321 .get_sclk = &kv_dpm_get_sclk, 3322 .get_mclk = &kv_dpm_get_mclk, 3323 .print_power_state = &kv_dpm_print_power_state, 3324 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3325 .force_performance_level = &kv_dpm_force_performance_level, 3326 .powergate_uvd = &kv_dpm_powergate_uvd, 3327 .enable_bapm = &kv_dpm_enable_bapm, 3328 .get_vce_clock_state = amdgpu_get_vce_clock_state, 3329 .check_state_equal = kv_check_state_equal, 3330 .read_sensor = &kv_dpm_read_sensor, 3331}; 3332 3333static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) 3334{ 3335 if (adev->pm.funcs == NULL) 3336 adev->pm.funcs = &kv_dpm_funcs; 3337} 3338 3339static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3340 .set = kv_dpm_set_interrupt_state, 3341 .process = kv_dpm_process_interrupt, 3342}; 3343 3344static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3345{ 3346 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3347 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3348}