Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.16-rc1 3348 lines 93 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <drm/drmP.h> 25#include "amdgpu.h" 26#include "amdgpu_pm.h" 27#include "cikd.h" 28#include "atom.h" 29#include "amdgpu_atombios.h" 30#include "amdgpu_dpm.h" 31#include "kv_dpm.h" 32#include "gfx_v7_0.h" 33#include <linux/seq_file.h> 34 35#include "smu/smu_7_0_0_d.h" 36#include "smu/smu_7_0_0_sh_mask.h" 37 38#include "gca/gfx_7_2_d.h" 39#include "gca/gfx_7_2_sh_mask.h" 40 41#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42#define KV_MINIMUM_ENGINE_CLOCK 800 43#define SMC_RAM_END 0x40000 44 45static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 46static int kv_enable_nb_dpm(struct amdgpu_device *adev, 47 bool enable); 48static void kv_init_graphics_levels(struct amdgpu_device *adev); 49static int kv_calculate_ds_divider(struct amdgpu_device *adev); 50static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 51static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 52static void kv_enable_new_levels(struct amdgpu_device *adev); 53static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 54 struct amdgpu_ps *new_rps); 55static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 56static int kv_set_enabled_levels(struct amdgpu_device *adev); 57static int kv_force_dpm_highest(struct amdgpu_device *adev); 58static int kv_force_dpm_lowest(struct amdgpu_device *adev); 59static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 60 struct amdgpu_ps *new_rps, 61 struct amdgpu_ps *old_rps); 62static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 63 int min_temp, int max_temp); 64static int kv_init_fps_limits(struct amdgpu_device *adev); 65 66static void kv_dpm_powergate_uvd(void *handle, bool gate); 67static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 68static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 70 71 72static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 73 struct sumo_vid_mapping_table *vid_mapping_table, 74 u32 vid_2bit) 75{ 76 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 77 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 78 u32 i; 79 80 if (vddc_sclk_table && vddc_sclk_table->count) { 81 if (vid_2bit < vddc_sclk_table->count) 82 return vddc_sclk_table->entries[vid_2bit].v; 83 else 84 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 85 } else { 86 for (i = 0; i < vid_mapping_table->num_entries; i++) { 87 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 88 return vid_mapping_table->entries[i].vid_7bit; 89 } 90 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 91 } 92} 93 94static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 95 struct sumo_vid_mapping_table *vid_mapping_table, 96 u32 vid_7bit) 97{ 98 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 99 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 100 u32 i; 101 102 if (vddc_sclk_table && vddc_sclk_table->count) { 103 for (i = 0; i < vddc_sclk_table->count; i++) { 104 if (vddc_sclk_table->entries[i].v == vid_7bit) 105 return i; 106 } 107 return vddc_sclk_table->count - 1; 108 } else { 109 for (i = 0; i < vid_mapping_table->num_entries; i++) { 110 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 111 return vid_mapping_table->entries[i].vid_2bit; 112 } 113 114 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 115 } 116} 117 118static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 119{ 120/* This bit selects who handles display phy powergating. 121 * Clear the bit to let atom handle it. 122 * Set it to let the driver handle it. 123 * For now we just let atom handle it. 124 */ 125#if 0 126 u32 v = RREG32(mmDOUT_SCRATCH3); 127 128 if (enable) 129 v |= 0x4; 130 else 131 v &= 0xFFFFFFFB; 132 133 WREG32(mmDOUT_SCRATCH3, v); 134#endif 135} 136 137static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 138 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 139 ATOM_AVAILABLE_SCLK_LIST *table) 140{ 141 u32 i; 142 u32 n = 0; 143 u32 prev_sclk = 0; 144 145 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 146 if (table[i].ulSupportedSCLK > prev_sclk) { 147 sclk_voltage_mapping_table->entries[n].sclk_frequency = 148 table[i].ulSupportedSCLK; 149 sclk_voltage_mapping_table->entries[n].vid_2bit = 150 table[i].usVoltageIndex; 151 prev_sclk = table[i].ulSupportedSCLK; 152 n++; 153 } 154 } 155 156 sclk_voltage_mapping_table->num_max_dpm_entries = n; 157} 158 159static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 160 struct sumo_vid_mapping_table *vid_mapping_table, 161 ATOM_AVAILABLE_SCLK_LIST *table) 162{ 163 u32 i, j; 164 165 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 166 if (table[i].ulSupportedSCLK != 0) { 167 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 168 table[i].usVoltageID; 169 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 170 table[i].usVoltageIndex; 171 } 172 } 173 174 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 175 if (vid_mapping_table->entries[i].vid_7bit == 0) { 176 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 177 if (vid_mapping_table->entries[j].vid_7bit != 0) { 178 vid_mapping_table->entries[i] = 179 vid_mapping_table->entries[j]; 180 vid_mapping_table->entries[j].vid_7bit = 0; 181 break; 182 } 183 } 184 185 if (j == SUMO_MAX_NUMBER_VOLTAGES) 186 break; 187 } 188 } 189 190 vid_mapping_table->num_entries = i; 191} 192 193#if 0 194static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 195{ 196 { 0, 4, 1 }, 197 { 1, 4, 1 }, 198 { 2, 5, 1 }, 199 { 3, 4, 2 }, 200 { 4, 1, 1 }, 201 { 5, 5, 2 }, 202 { 6, 6, 1 }, 203 { 7, 9, 2 }, 204 { 0xffffffff } 205}; 206 207static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 208{ 209 { 0, 4, 1 }, 210 { 0xffffffff } 211}; 212 213static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 214{ 215 { 0, 4, 1 }, 216 { 0xffffffff } 217}; 218 219static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 220{ 221 { 0, 4, 1 }, 222 { 0xffffffff } 223}; 224 225static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 226{ 227 { 0, 4, 1 }, 228 { 0xffffffff } 229}; 230 231static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 232{ 233 { 0, 4, 1 }, 234 { 1, 4, 1 }, 235 { 2, 5, 1 }, 236 { 3, 4, 1 }, 237 { 4, 1, 1 }, 238 { 5, 5, 1 }, 239 { 6, 6, 1 }, 240 { 7, 9, 1 }, 241 { 8, 4, 1 }, 242 { 9, 2, 1 }, 243 { 10, 3, 1 }, 244 { 11, 6, 1 }, 245 { 12, 8, 2 }, 246 { 13, 1, 1 }, 247 { 14, 2, 1 }, 248 { 15, 3, 1 }, 249 { 16, 1, 1 }, 250 { 17, 4, 1 }, 251 { 18, 3, 1 }, 252 { 19, 1, 1 }, 253 { 20, 8, 1 }, 254 { 21, 5, 1 }, 255 { 22, 1, 1 }, 256 { 23, 1, 1 }, 257 { 24, 4, 1 }, 258 { 27, 6, 1 }, 259 { 28, 1, 1 }, 260 { 0xffffffff } 261}; 262 263static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 264{ 265 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 266}; 267 268static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 269{ 270 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 271}; 272 273static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 274{ 275 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 276}; 277 278static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 279{ 280 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 281}; 282 283static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 284{ 285 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 286}; 287 288static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 289{ 290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 291}; 292#endif 293 294static const struct kv_pt_config_reg didt_config_kv[] = 295{ 296 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 297 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 298 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 299 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 300 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 301 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 302 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 308 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 309 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 310 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 311 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 312 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 313 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 314 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 315 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 316 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 317 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 318 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 319 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 320 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 326 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 327 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 328 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 329 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 330 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 331 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 332 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 333 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 334 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 335 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 336 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 337 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 338 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 344 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 345 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 346 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 347 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 348 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 349 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 350 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 351 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 352 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 353 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 354 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 355 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 356 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 358 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 359 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 360 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 361 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 362 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 363 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 364 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 365 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 366 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 367 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 368 { 0xFFFFFFFF } 369}; 370 371static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 372{ 373 struct kv_ps *ps = rps->ps_priv; 374 375 return ps; 376} 377 378static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 379{ 380 struct kv_power_info *pi = adev->pm.dpm.priv; 381 382 return pi; 383} 384 385#if 0 386static void kv_program_local_cac_table(struct amdgpu_device *adev, 387 const struct kv_lcac_config_values *local_cac_table, 388 const struct kv_lcac_config_reg *local_cac_reg) 389{ 390 u32 i, count, data; 391 const struct kv_lcac_config_values *values = local_cac_table; 392 393 while (values->block_id != 0xffffffff) { 394 count = values->signal_id; 395 for (i = 0; i < count; i++) { 396 data = ((values->block_id << local_cac_reg->block_shift) & 397 local_cac_reg->block_mask); 398 data |= ((i << local_cac_reg->signal_shift) & 399 local_cac_reg->signal_mask); 400 data |= ((values->t << local_cac_reg->t_shift) & 401 local_cac_reg->t_mask); 402 data |= ((1 << local_cac_reg->enable_shift) & 403 local_cac_reg->enable_mask); 404 WREG32_SMC(local_cac_reg->cntl, data); 405 } 406 values++; 407 } 408} 409#endif 410 411static int kv_program_pt_config_registers(struct amdgpu_device *adev, 412 const struct kv_pt_config_reg *cac_config_regs) 413{ 414 const struct kv_pt_config_reg *config_regs = cac_config_regs; 415 u32 data; 416 u32 cache = 0; 417 418 if (config_regs == NULL) 419 return -EINVAL; 420 421 while (config_regs->offset != 0xFFFFFFFF) { 422 if (config_regs->type == KV_CONFIGREG_CACHE) { 423 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 424 } else { 425 switch (config_regs->type) { 426 case KV_CONFIGREG_SMC_IND: 427 data = RREG32_SMC(config_regs->offset); 428 break; 429 case KV_CONFIGREG_DIDT_IND: 430 data = RREG32_DIDT(config_regs->offset); 431 break; 432 default: 433 data = RREG32(config_regs->offset); 434 break; 435 } 436 437 data &= ~config_regs->mask; 438 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 439 data |= cache; 440 cache = 0; 441 442 switch (config_regs->type) { 443 case KV_CONFIGREG_SMC_IND: 444 WREG32_SMC(config_regs->offset, data); 445 break; 446 case KV_CONFIGREG_DIDT_IND: 447 WREG32_DIDT(config_regs->offset, data); 448 break; 449 default: 450 WREG32(config_regs->offset, data); 451 break; 452 } 453 } 454 config_regs++; 455 } 456 457 return 0; 458} 459 460static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 461{ 462 struct kv_power_info *pi = kv_get_pi(adev); 463 u32 data; 464 465 if (pi->caps_sq_ramping) { 466 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 467 if (enable) 468 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 469 else 470 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 471 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 472 } 473 474 if (pi->caps_db_ramping) { 475 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 476 if (enable) 477 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 478 else 479 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 480 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 481 } 482 483 if (pi->caps_td_ramping) { 484 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 485 if (enable) 486 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 487 else 488 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 489 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 490 } 491 492 if (pi->caps_tcp_ramping) { 493 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 494 if (enable) 495 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 496 else 497 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 498 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 499 } 500} 501 502static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 503{ 504 struct kv_power_info *pi = kv_get_pi(adev); 505 int ret; 506 507 if (pi->caps_sq_ramping || 508 pi->caps_db_ramping || 509 pi->caps_td_ramping || 510 pi->caps_tcp_ramping) { 511 adev->gfx.rlc.funcs->enter_safe_mode(adev); 512 513 if (enable) { 514 ret = kv_program_pt_config_registers(adev, didt_config_kv); 515 if (ret) { 516 adev->gfx.rlc.funcs->exit_safe_mode(adev); 517 return ret; 518 } 519 } 520 521 kv_do_enable_didt(adev, enable); 522 523 adev->gfx.rlc.funcs->exit_safe_mode(adev); 524 } 525 526 return 0; 527} 528 529#if 0 530static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 531{ 532 struct kv_power_info *pi = kv_get_pi(adev); 533 534 if (pi->caps_cac) { 535 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 536 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 537 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 538 539 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 540 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 541 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 542 543 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 544 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 545 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 546 547 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 548 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 549 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 550 551 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 552 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 553 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 554 555 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 556 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 557 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 558 } 559} 560#endif 561 562static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 563{ 564 struct kv_power_info *pi = kv_get_pi(adev); 565 int ret = 0; 566 567 if (pi->caps_cac) { 568 if (enable) { 569 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 570 if (ret) 571 pi->cac_enabled = false; 572 else 573 pi->cac_enabled = true; 574 } else if (pi->cac_enabled) { 575 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 576 pi->cac_enabled = false; 577 } 578 } 579 580 return ret; 581} 582 583static int kv_process_firmware_header(struct amdgpu_device *adev) 584{ 585 struct kv_power_info *pi = kv_get_pi(adev); 586 u32 tmp; 587 int ret; 588 589 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 590 offsetof(SMU7_Firmware_Header, DpmTable), 591 &tmp, pi->sram_end); 592 593 if (ret == 0) 594 pi->dpm_table_start = tmp; 595 596 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 597 offsetof(SMU7_Firmware_Header, SoftRegisters), 598 &tmp, pi->sram_end); 599 600 if (ret == 0) 601 pi->soft_regs_start = tmp; 602 603 return ret; 604} 605 606static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 607{ 608 struct kv_power_info *pi = kv_get_pi(adev); 609 int ret; 610 611 pi->graphics_voltage_change_enable = 1; 612 613 ret = amdgpu_kv_copy_bytes_to_smc(adev, 614 pi->dpm_table_start + 615 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 616 &pi->graphics_voltage_change_enable, 617 sizeof(u8), pi->sram_end); 618 619 return ret; 620} 621 622static int kv_set_dpm_interval(struct amdgpu_device *adev) 623{ 624 struct kv_power_info *pi = kv_get_pi(adev); 625 int ret; 626 627 pi->graphics_interval = 1; 628 629 ret = amdgpu_kv_copy_bytes_to_smc(adev, 630 pi->dpm_table_start + 631 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 632 &pi->graphics_interval, 633 sizeof(u8), pi->sram_end); 634 635 return ret; 636} 637 638static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 639{ 640 struct kv_power_info *pi = kv_get_pi(adev); 641 int ret; 642 643 ret = amdgpu_kv_copy_bytes_to_smc(adev, 644 pi->dpm_table_start + 645 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 646 &pi->graphics_boot_level, 647 sizeof(u8), pi->sram_end); 648 649 return ret; 650} 651 652static void kv_program_vc(struct amdgpu_device *adev) 653{ 654 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 655} 656 657static void kv_clear_vc(struct amdgpu_device *adev) 658{ 659 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 660} 661 662static int kv_set_divider_value(struct amdgpu_device *adev, 663 u32 index, u32 sclk) 664{ 665 struct kv_power_info *pi = kv_get_pi(adev); 666 struct atom_clock_dividers dividers; 667 int ret; 668 669 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 670 sclk, false, &dividers); 671 if (ret) 672 return ret; 673 674 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 675 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 676 677 return 0; 678} 679 680static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 681 u16 voltage) 682{ 683 return 6200 - (voltage * 25); 684} 685 686static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 687 u32 vid_2bit) 688{ 689 struct kv_power_info *pi = kv_get_pi(adev); 690 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 691 &pi->sys_info.vid_mapping_table, 692 vid_2bit); 693 694 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 695} 696 697 698static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 699{ 700 struct kv_power_info *pi = kv_get_pi(adev); 701 702 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 703 pi->graphics_level[index].MinVddNb = 704 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 705 706 return 0; 707} 708 709static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 710{ 711 struct kv_power_info *pi = kv_get_pi(adev); 712 713 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 714 715 return 0; 716} 717 718static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 719 u32 index, bool enable) 720{ 721 struct kv_power_info *pi = kv_get_pi(adev); 722 723 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 724} 725 726static void kv_start_dpm(struct amdgpu_device *adev) 727{ 728 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 729 730 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 731 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 732 733 amdgpu_kv_smc_dpm_enable(adev, true); 734} 735 736static void kv_stop_dpm(struct amdgpu_device *adev) 737{ 738 amdgpu_kv_smc_dpm_enable(adev, false); 739} 740 741static void kv_start_am(struct amdgpu_device *adev) 742{ 743 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 744 745 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 746 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 747 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 748 749 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 750} 751 752static void kv_reset_am(struct amdgpu_device *adev) 753{ 754 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 755 756 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 757 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 758 759 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 760} 761 762static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 763{ 764 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 765 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 766} 767 768static int kv_force_lowest_valid(struct amdgpu_device *adev) 769{ 770 return kv_force_dpm_lowest(adev); 771} 772 773static int kv_unforce_levels(struct amdgpu_device *adev) 774{ 775 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 776 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 777 else 778 return kv_set_enabled_levels(adev); 779} 780 781static int kv_update_sclk_t(struct amdgpu_device *adev) 782{ 783 struct kv_power_info *pi = kv_get_pi(adev); 784 u32 low_sclk_interrupt_t = 0; 785 int ret = 0; 786 787 if (pi->caps_sclk_throttle_low_notification) { 788 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 789 790 ret = amdgpu_kv_copy_bytes_to_smc(adev, 791 pi->dpm_table_start + 792 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 793 (u8 *)&low_sclk_interrupt_t, 794 sizeof(u32), pi->sram_end); 795 } 796 return ret; 797} 798 799static int kv_program_bootup_state(struct amdgpu_device *adev) 800{ 801 struct kv_power_info *pi = kv_get_pi(adev); 802 u32 i; 803 struct amdgpu_clock_voltage_dependency_table *table = 804 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 805 806 if (table && table->count) { 807 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 808 if (table->entries[i].clk == pi->boot_pl.sclk) 809 break; 810 } 811 812 pi->graphics_boot_level = (u8)i; 813 kv_dpm_power_level_enable(adev, i, true); 814 } else { 815 struct sumo_sclk_voltage_mapping_table *table = 816 &pi->sys_info.sclk_voltage_mapping_table; 817 818 if (table->num_max_dpm_entries == 0) 819 return -EINVAL; 820 821 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 822 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 823 break; 824 } 825 826 pi->graphics_boot_level = (u8)i; 827 kv_dpm_power_level_enable(adev, i, true); 828 } 829 return 0; 830} 831 832static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 833{ 834 struct kv_power_info *pi = kv_get_pi(adev); 835 int ret; 836 837 pi->graphics_therm_throttle_enable = 1; 838 839 ret = amdgpu_kv_copy_bytes_to_smc(adev, 840 pi->dpm_table_start + 841 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 842 &pi->graphics_therm_throttle_enable, 843 sizeof(u8), pi->sram_end); 844 845 return ret; 846} 847 848static int kv_upload_dpm_settings(struct amdgpu_device *adev) 849{ 850 struct kv_power_info *pi = kv_get_pi(adev); 851 int ret; 852 853 ret = amdgpu_kv_copy_bytes_to_smc(adev, 854 pi->dpm_table_start + 855 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 856 (u8 *)&pi->graphics_level, 857 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 858 pi->sram_end); 859 860 if (ret) 861 return ret; 862 863 ret = amdgpu_kv_copy_bytes_to_smc(adev, 864 pi->dpm_table_start + 865 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 866 &pi->graphics_dpm_level_count, 867 sizeof(u8), pi->sram_end); 868 869 return ret; 870} 871 872static u32 kv_get_clock_difference(u32 a, u32 b) 873{ 874 return (a >= b) ? a - b : b - a; 875} 876 877static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 878{ 879 struct kv_power_info *pi = kv_get_pi(adev); 880 u32 value; 881 882 if (pi->caps_enable_dfs_bypass) { 883 if (kv_get_clock_difference(clk, 40000) < 200) 884 value = 3; 885 else if (kv_get_clock_difference(clk, 30000) < 200) 886 value = 2; 887 else if (kv_get_clock_difference(clk, 20000) < 200) 888 value = 7; 889 else if (kv_get_clock_difference(clk, 15000) < 200) 890 value = 6; 891 else if (kv_get_clock_difference(clk, 10000) < 200) 892 value = 8; 893 else 894 value = 0; 895 } else { 896 value = 0; 897 } 898 899 return value; 900} 901 902static int kv_populate_uvd_table(struct amdgpu_device *adev) 903{ 904 struct kv_power_info *pi = kv_get_pi(adev); 905 struct amdgpu_uvd_clock_voltage_dependency_table *table = 906 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 907 struct atom_clock_dividers dividers; 908 int ret; 909 u32 i; 910 911 if (table == NULL || table->count == 0) 912 return 0; 913 914 pi->uvd_level_count = 0; 915 for (i = 0; i < table->count; i++) { 916 if (pi->high_voltage_t && 917 (pi->high_voltage_t < table->entries[i].v)) 918 break; 919 920 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 921 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 922 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 923 924 pi->uvd_level[i].VClkBypassCntl = 925 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 926 pi->uvd_level[i].DClkBypassCntl = 927 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 928 929 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 930 table->entries[i].vclk, false, &dividers); 931 if (ret) 932 return ret; 933 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 934 935 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 936 table->entries[i].dclk, false, &dividers); 937 if (ret) 938 return ret; 939 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 940 941 pi->uvd_level_count++; 942 } 943 944 ret = amdgpu_kv_copy_bytes_to_smc(adev, 945 pi->dpm_table_start + 946 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 947 (u8 *)&pi->uvd_level_count, 948 sizeof(u8), pi->sram_end); 949 if (ret) 950 return ret; 951 952 pi->uvd_interval = 1; 953 954 ret = amdgpu_kv_copy_bytes_to_smc(adev, 955 pi->dpm_table_start + 956 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 957 &pi->uvd_interval, 958 sizeof(u8), pi->sram_end); 959 if (ret) 960 return ret; 961 962 ret = amdgpu_kv_copy_bytes_to_smc(adev, 963 pi->dpm_table_start + 964 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 965 (u8 *)&pi->uvd_level, 966 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 967 pi->sram_end); 968 969 return ret; 970 971} 972 973static int kv_populate_vce_table(struct amdgpu_device *adev) 974{ 975 struct kv_power_info *pi = kv_get_pi(adev); 976 int ret; 977 u32 i; 978 struct amdgpu_vce_clock_voltage_dependency_table *table = 979 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 980 struct atom_clock_dividers dividers; 981 982 if (table == NULL || table->count == 0) 983 return 0; 984 985 pi->vce_level_count = 0; 986 for (i = 0; i < table->count; i++) { 987 if (pi->high_voltage_t && 988 pi->high_voltage_t < table->entries[i].v) 989 break; 990 991 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 992 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 993 994 pi->vce_level[i].ClkBypassCntl = 995 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 996 997 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 998 table->entries[i].evclk, false, &dividers); 999 if (ret) 1000 return ret; 1001 pi->vce_level[i].Divider = (u8)dividers.post_div; 1002 1003 pi->vce_level_count++; 1004 } 1005 1006 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1007 pi->dpm_table_start + 1008 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 1009 (u8 *)&pi->vce_level_count, 1010 sizeof(u8), 1011 pi->sram_end); 1012 if (ret) 1013 return ret; 1014 1015 pi->vce_interval = 1; 1016 1017 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1018 pi->dpm_table_start + 1019 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1020 (u8 *)&pi->vce_interval, 1021 sizeof(u8), 1022 pi->sram_end); 1023 if (ret) 1024 return ret; 1025 1026 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1027 pi->dpm_table_start + 1028 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1029 (u8 *)&pi->vce_level, 1030 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1031 pi->sram_end); 1032 1033 return ret; 1034} 1035 1036static int kv_populate_samu_table(struct amdgpu_device *adev) 1037{ 1038 struct kv_power_info *pi = kv_get_pi(adev); 1039 struct amdgpu_clock_voltage_dependency_table *table = 1040 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1041 struct atom_clock_dividers dividers; 1042 int ret; 1043 u32 i; 1044 1045 if (table == NULL || table->count == 0) 1046 return 0; 1047 1048 pi->samu_level_count = 0; 1049 for (i = 0; i < table->count; i++) { 1050 if (pi->high_voltage_t && 1051 pi->high_voltage_t < table->entries[i].v) 1052 break; 1053 1054 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1055 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1056 1057 pi->samu_level[i].ClkBypassCntl = 1058 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1059 1060 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1061 table->entries[i].clk, false, &dividers); 1062 if (ret) 1063 return ret; 1064 pi->samu_level[i].Divider = (u8)dividers.post_div; 1065 1066 pi->samu_level_count++; 1067 } 1068 1069 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1070 pi->dpm_table_start + 1071 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1072 (u8 *)&pi->samu_level_count, 1073 sizeof(u8), 1074 pi->sram_end); 1075 if (ret) 1076 return ret; 1077 1078 pi->samu_interval = 1; 1079 1080 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1081 pi->dpm_table_start + 1082 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1083 (u8 *)&pi->samu_interval, 1084 sizeof(u8), 1085 pi->sram_end); 1086 if (ret) 1087 return ret; 1088 1089 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1090 pi->dpm_table_start + 1091 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1092 (u8 *)&pi->samu_level, 1093 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1094 pi->sram_end); 1095 if (ret) 1096 return ret; 1097 1098 return ret; 1099} 1100 1101 1102static int kv_populate_acp_table(struct amdgpu_device *adev) 1103{ 1104 struct kv_power_info *pi = kv_get_pi(adev); 1105 struct amdgpu_clock_voltage_dependency_table *table = 1106 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1107 struct atom_clock_dividers dividers; 1108 int ret; 1109 u32 i; 1110 1111 if (table == NULL || table->count == 0) 1112 return 0; 1113 1114 pi->acp_level_count = 0; 1115 for (i = 0; i < table->count; i++) { 1116 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1117 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1118 1119 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1120 table->entries[i].clk, false, &dividers); 1121 if (ret) 1122 return ret; 1123 pi->acp_level[i].Divider = (u8)dividers.post_div; 1124 1125 pi->acp_level_count++; 1126 } 1127 1128 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1129 pi->dpm_table_start + 1130 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1131 (u8 *)&pi->acp_level_count, 1132 sizeof(u8), 1133 pi->sram_end); 1134 if (ret) 1135 return ret; 1136 1137 pi->acp_interval = 1; 1138 1139 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1140 pi->dpm_table_start + 1141 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1142 (u8 *)&pi->acp_interval, 1143 sizeof(u8), 1144 pi->sram_end); 1145 if (ret) 1146 return ret; 1147 1148 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1149 pi->dpm_table_start + 1150 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1151 (u8 *)&pi->acp_level, 1152 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1153 pi->sram_end); 1154 if (ret) 1155 return ret; 1156 1157 return ret; 1158} 1159 1160static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1161{ 1162 struct kv_power_info *pi = kv_get_pi(adev); 1163 u32 i; 1164 struct amdgpu_clock_voltage_dependency_table *table = 1165 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1166 1167 if (table && table->count) { 1168 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1169 if (pi->caps_enable_dfs_bypass) { 1170 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1171 pi->graphics_level[i].ClkBypassCntl = 3; 1172 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1173 pi->graphics_level[i].ClkBypassCntl = 2; 1174 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1175 pi->graphics_level[i].ClkBypassCntl = 7; 1176 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1177 pi->graphics_level[i].ClkBypassCntl = 6; 1178 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1179 pi->graphics_level[i].ClkBypassCntl = 8; 1180 else 1181 pi->graphics_level[i].ClkBypassCntl = 0; 1182 } else { 1183 pi->graphics_level[i].ClkBypassCntl = 0; 1184 } 1185 } 1186 } else { 1187 struct sumo_sclk_voltage_mapping_table *table = 1188 &pi->sys_info.sclk_voltage_mapping_table; 1189 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1190 if (pi->caps_enable_dfs_bypass) { 1191 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1192 pi->graphics_level[i].ClkBypassCntl = 3; 1193 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1194 pi->graphics_level[i].ClkBypassCntl = 2; 1195 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1196 pi->graphics_level[i].ClkBypassCntl = 7; 1197 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1198 pi->graphics_level[i].ClkBypassCntl = 6; 1199 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1200 pi->graphics_level[i].ClkBypassCntl = 8; 1201 else 1202 pi->graphics_level[i].ClkBypassCntl = 0; 1203 } else { 1204 pi->graphics_level[i].ClkBypassCntl = 0; 1205 } 1206 } 1207 } 1208} 1209 1210static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1211{ 1212 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1213 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1214} 1215 1216static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1217{ 1218 struct kv_power_info *pi = kv_get_pi(adev); 1219 1220 pi->acp_boot_level = 0xff; 1221} 1222 1223static void kv_update_current_ps(struct amdgpu_device *adev, 1224 struct amdgpu_ps *rps) 1225{ 1226 struct kv_ps *new_ps = kv_get_ps(rps); 1227 struct kv_power_info *pi = kv_get_pi(adev); 1228 1229 pi->current_rps = *rps; 1230 pi->current_ps = *new_ps; 1231 pi->current_rps.ps_priv = &pi->current_ps; 1232 adev->pm.dpm.current_ps = &pi->current_rps; 1233} 1234 1235static void kv_update_requested_ps(struct amdgpu_device *adev, 1236 struct amdgpu_ps *rps) 1237{ 1238 struct kv_ps *new_ps = kv_get_ps(rps); 1239 struct kv_power_info *pi = kv_get_pi(adev); 1240 1241 pi->requested_rps = *rps; 1242 pi->requested_ps = *new_ps; 1243 pi->requested_rps.ps_priv = &pi->requested_ps; 1244 adev->pm.dpm.requested_ps = &pi->requested_rps; 1245} 1246 1247static void kv_dpm_enable_bapm(void *handle, bool enable) 1248{ 1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1250 struct kv_power_info *pi = kv_get_pi(adev); 1251 int ret; 1252 1253 if (pi->bapm_enable) { 1254 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1255 if (ret) 1256 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1257 } 1258} 1259 1260static int kv_dpm_enable(struct amdgpu_device *adev) 1261{ 1262 struct kv_power_info *pi = kv_get_pi(adev); 1263 int ret; 1264 1265 ret = kv_process_firmware_header(adev); 1266 if (ret) { 1267 DRM_ERROR("kv_process_firmware_header failed\n"); 1268 return ret; 1269 } 1270 kv_init_fps_limits(adev); 1271 kv_init_graphics_levels(adev); 1272 ret = kv_program_bootup_state(adev); 1273 if (ret) { 1274 DRM_ERROR("kv_program_bootup_state failed\n"); 1275 return ret; 1276 } 1277 kv_calculate_dfs_bypass_settings(adev); 1278 ret = kv_upload_dpm_settings(adev); 1279 if (ret) { 1280 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1281 return ret; 1282 } 1283 ret = kv_populate_uvd_table(adev); 1284 if (ret) { 1285 DRM_ERROR("kv_populate_uvd_table failed\n"); 1286 return ret; 1287 } 1288 ret = kv_populate_vce_table(adev); 1289 if (ret) { 1290 DRM_ERROR("kv_populate_vce_table failed\n"); 1291 return ret; 1292 } 1293 ret = kv_populate_samu_table(adev); 1294 if (ret) { 1295 DRM_ERROR("kv_populate_samu_table failed\n"); 1296 return ret; 1297 } 1298 ret = kv_populate_acp_table(adev); 1299 if (ret) { 1300 DRM_ERROR("kv_populate_acp_table failed\n"); 1301 return ret; 1302 } 1303 kv_program_vc(adev); 1304#if 0 1305 kv_initialize_hardware_cac_manager(adev); 1306#endif 1307 kv_start_am(adev); 1308 if (pi->enable_auto_thermal_throttling) { 1309 ret = kv_enable_auto_thermal_throttling(adev); 1310 if (ret) { 1311 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1312 return ret; 1313 } 1314 } 1315 ret = kv_enable_dpm_voltage_scaling(adev); 1316 if (ret) { 1317 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1318 return ret; 1319 } 1320 ret = kv_set_dpm_interval(adev); 1321 if (ret) { 1322 DRM_ERROR("kv_set_dpm_interval failed\n"); 1323 return ret; 1324 } 1325 ret = kv_set_dpm_boot_state(adev); 1326 if (ret) { 1327 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1328 return ret; 1329 } 1330 ret = kv_enable_ulv(adev, true); 1331 if (ret) { 1332 DRM_ERROR("kv_enable_ulv failed\n"); 1333 return ret; 1334 } 1335 kv_start_dpm(adev); 1336 ret = kv_enable_didt(adev, true); 1337 if (ret) { 1338 DRM_ERROR("kv_enable_didt failed\n"); 1339 return ret; 1340 } 1341 ret = kv_enable_smc_cac(adev, true); 1342 if (ret) { 1343 DRM_ERROR("kv_enable_smc_cac failed\n"); 1344 return ret; 1345 } 1346 1347 kv_reset_acp_boot_level(adev); 1348 1349 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1350 if (ret) { 1351 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1352 return ret; 1353 } 1354 1355 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1356 1357 if (adev->irq.installed && 1358 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1359 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1360 if (ret) { 1361 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1362 return ret; 1363 } 1364 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1365 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1366 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1367 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1368 } 1369 1370 return ret; 1371} 1372 1373static void kv_dpm_disable(struct amdgpu_device *adev) 1374{ 1375 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1378 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1379 1380 amdgpu_kv_smc_bapm_enable(adev, false); 1381 1382 if (adev->asic_type == CHIP_MULLINS) 1383 kv_enable_nb_dpm(adev, false); 1384 1385 /* powerup blocks */ 1386 kv_dpm_powergate_acp(adev, false); 1387 kv_dpm_powergate_samu(adev, false); 1388 kv_dpm_powergate_vce(adev, false); 1389 kv_dpm_powergate_uvd(adev, false); 1390 1391 kv_enable_smc_cac(adev, false); 1392 kv_enable_didt(adev, false); 1393 kv_clear_vc(adev); 1394 kv_stop_dpm(adev); 1395 kv_enable_ulv(adev, false); 1396 kv_reset_am(adev); 1397 1398 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1399} 1400 1401#if 0 1402static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1403 u16 reg_offset, u32 value) 1404{ 1405 struct kv_power_info *pi = kv_get_pi(adev); 1406 1407 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1408 (u8 *)&value, sizeof(u16), pi->sram_end); 1409} 1410 1411static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1412 u16 reg_offset, u32 *value) 1413{ 1414 struct kv_power_info *pi = kv_get_pi(adev); 1415 1416 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1417 value, pi->sram_end); 1418} 1419#endif 1420 1421static void kv_init_sclk_t(struct amdgpu_device *adev) 1422{ 1423 struct kv_power_info *pi = kv_get_pi(adev); 1424 1425 pi->low_sclk_interrupt_t = 0; 1426} 1427 1428static int kv_init_fps_limits(struct amdgpu_device *adev) 1429{ 1430 struct kv_power_info *pi = kv_get_pi(adev); 1431 int ret = 0; 1432 1433 if (pi->caps_fps) { 1434 u16 tmp; 1435 1436 tmp = 45; 1437 pi->fps_high_t = cpu_to_be16(tmp); 1438 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1439 pi->dpm_table_start + 1440 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1441 (u8 *)&pi->fps_high_t, 1442 sizeof(u16), pi->sram_end); 1443 1444 tmp = 30; 1445 pi->fps_low_t = cpu_to_be16(tmp); 1446 1447 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1448 pi->dpm_table_start + 1449 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1450 (u8 *)&pi->fps_low_t, 1451 sizeof(u16), pi->sram_end); 1452 1453 } 1454 return ret; 1455} 1456 1457static void kv_init_powergate_state(struct amdgpu_device *adev) 1458{ 1459 struct kv_power_info *pi = kv_get_pi(adev); 1460 1461 pi->uvd_power_gated = false; 1462 pi->vce_power_gated = false; 1463 pi->samu_power_gated = false; 1464 pi->acp_power_gated = false; 1465 1466} 1467 1468static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1469{ 1470 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1471 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1472} 1473 1474static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1475{ 1476 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1477 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1478} 1479 1480static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1481{ 1482 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1483 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1484} 1485 1486static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1487{ 1488 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1489 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1490} 1491 1492static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1493{ 1494 struct kv_power_info *pi = kv_get_pi(adev); 1495 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1496 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1497 int ret; 1498 u32 mask; 1499 1500 if (!gate) { 1501 if (table->count) 1502 pi->uvd_boot_level = table->count - 1; 1503 else 1504 pi->uvd_boot_level = 0; 1505 1506 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1507 mask = 1 << pi->uvd_boot_level; 1508 } else { 1509 mask = 0x1f; 1510 } 1511 1512 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1513 pi->dpm_table_start + 1514 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1515 (uint8_t *)&pi->uvd_boot_level, 1516 sizeof(u8), pi->sram_end); 1517 if (ret) 1518 return ret; 1519 1520 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1521 PPSMC_MSG_UVDDPM_SetEnabledMask, 1522 mask); 1523 } 1524 1525 return kv_enable_uvd_dpm(adev, !gate); 1526} 1527 1528static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1529{ 1530 u8 i; 1531 struct amdgpu_vce_clock_voltage_dependency_table *table = 1532 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1533 1534 for (i = 0; i < table->count; i++) { 1535 if (table->entries[i].evclk >= evclk) 1536 break; 1537 } 1538 1539 return i; 1540} 1541 1542static int kv_update_vce_dpm(struct amdgpu_device *adev, 1543 struct amdgpu_ps *amdgpu_new_state, 1544 struct amdgpu_ps *amdgpu_current_state) 1545{ 1546 struct kv_power_info *pi = kv_get_pi(adev); 1547 struct amdgpu_vce_clock_voltage_dependency_table *table = 1548 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1549 int ret; 1550 1551 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1552 kv_dpm_powergate_vce(adev, false); 1553 if (pi->caps_stable_p_state) 1554 pi->vce_boot_level = table->count - 1; 1555 else 1556 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1557 1558 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1559 pi->dpm_table_start + 1560 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1561 (u8 *)&pi->vce_boot_level, 1562 sizeof(u8), 1563 pi->sram_end); 1564 if (ret) 1565 return ret; 1566 1567 if (pi->caps_stable_p_state) 1568 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1569 PPSMC_MSG_VCEDPM_SetEnabledMask, 1570 (1 << pi->vce_boot_level)); 1571 kv_enable_vce_dpm(adev, true); 1572 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1573 kv_enable_vce_dpm(adev, false); 1574 kv_dpm_powergate_vce(adev, true); 1575 } 1576 1577 return 0; 1578} 1579 1580static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1581{ 1582 struct kv_power_info *pi = kv_get_pi(adev); 1583 struct amdgpu_clock_voltage_dependency_table *table = 1584 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1585 int ret; 1586 1587 if (!gate) { 1588 if (pi->caps_stable_p_state) 1589 pi->samu_boot_level = table->count - 1; 1590 else 1591 pi->samu_boot_level = 0; 1592 1593 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1594 pi->dpm_table_start + 1595 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1596 (u8 *)&pi->samu_boot_level, 1597 sizeof(u8), 1598 pi->sram_end); 1599 if (ret) 1600 return ret; 1601 1602 if (pi->caps_stable_p_state) 1603 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1604 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1605 (1 << pi->samu_boot_level)); 1606 } 1607 1608 return kv_enable_samu_dpm(adev, !gate); 1609} 1610 1611static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1612{ 1613 u8 i; 1614 struct amdgpu_clock_voltage_dependency_table *table = 1615 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1616 1617 for (i = 0; i < table->count; i++) { 1618 if (table->entries[i].clk >= 0) /* XXX */ 1619 break; 1620 } 1621 1622 if (i >= table->count) 1623 i = table->count - 1; 1624 1625 return i; 1626} 1627 1628static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1629{ 1630 struct kv_power_info *pi = kv_get_pi(adev); 1631 u8 acp_boot_level; 1632 1633 if (!pi->caps_stable_p_state) { 1634 acp_boot_level = kv_get_acp_boot_level(adev); 1635 if (acp_boot_level != pi->acp_boot_level) { 1636 pi->acp_boot_level = acp_boot_level; 1637 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1638 PPSMC_MSG_ACPDPM_SetEnabledMask, 1639 (1 << pi->acp_boot_level)); 1640 } 1641 } 1642} 1643 1644static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1645{ 1646 struct kv_power_info *pi = kv_get_pi(adev); 1647 struct amdgpu_clock_voltage_dependency_table *table = 1648 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1649 int ret; 1650 1651 if (!gate) { 1652 if (pi->caps_stable_p_state) 1653 pi->acp_boot_level = table->count - 1; 1654 else 1655 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1656 1657 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1658 pi->dpm_table_start + 1659 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1660 (u8 *)&pi->acp_boot_level, 1661 sizeof(u8), 1662 pi->sram_end); 1663 if (ret) 1664 return ret; 1665 1666 if (pi->caps_stable_p_state) 1667 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1668 PPSMC_MSG_ACPDPM_SetEnabledMask, 1669 (1 << pi->acp_boot_level)); 1670 } 1671 1672 return kv_enable_acp_dpm(adev, !gate); 1673} 1674 1675static void kv_dpm_powergate_uvd(void *handle, bool gate) 1676{ 1677 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1678 struct kv_power_info *pi = kv_get_pi(adev); 1679 int ret; 1680 1681 pi->uvd_power_gated = gate; 1682 1683 if (gate) { 1684 /* stop the UVD block */ 1685 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1686 AMD_PG_STATE_GATE); 1687 kv_update_uvd_dpm(adev, gate); 1688 if (pi->caps_uvd_pg) 1689 /* power off the UVD block */ 1690 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1691 } else { 1692 if (pi->caps_uvd_pg) 1693 /* power on the UVD block */ 1694 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1695 /* re-init the UVD block */ 1696 kv_update_uvd_dpm(adev, gate); 1697 1698 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1699 AMD_PG_STATE_UNGATE); 1700 } 1701} 1702 1703static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1704{ 1705 struct kv_power_info *pi = kv_get_pi(adev); 1706 1707 if (pi->vce_power_gated == gate) 1708 return; 1709 1710 pi->vce_power_gated = gate; 1711 1712 if (!pi->caps_vce_pg) 1713 return; 1714 1715 if (gate) 1716 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1717 else 1718 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1719} 1720 1721static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1722{ 1723 struct kv_power_info *pi = kv_get_pi(adev); 1724 1725 if (pi->samu_power_gated == gate) 1726 return; 1727 1728 pi->samu_power_gated = gate; 1729 1730 if (gate) { 1731 kv_update_samu_dpm(adev, true); 1732 if (pi->caps_samu_pg) 1733 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1734 } else { 1735 if (pi->caps_samu_pg) 1736 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1737 kv_update_samu_dpm(adev, false); 1738 } 1739} 1740 1741static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1742{ 1743 struct kv_power_info *pi = kv_get_pi(adev); 1744 1745 if (pi->acp_power_gated == gate) 1746 return; 1747 1748 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1749 return; 1750 1751 pi->acp_power_gated = gate; 1752 1753 if (gate) { 1754 kv_update_acp_dpm(adev, true); 1755 if (pi->caps_acp_pg) 1756 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1757 } else { 1758 if (pi->caps_acp_pg) 1759 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1760 kv_update_acp_dpm(adev, false); 1761 } 1762} 1763 1764static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1765 struct amdgpu_ps *new_rps) 1766{ 1767 struct kv_ps *new_ps = kv_get_ps(new_rps); 1768 struct kv_power_info *pi = kv_get_pi(adev); 1769 u32 i; 1770 struct amdgpu_clock_voltage_dependency_table *table = 1771 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1772 1773 if (table && table->count) { 1774 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1775 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1776 (i == (pi->graphics_dpm_level_count - 1))) { 1777 pi->lowest_valid = i; 1778 break; 1779 } 1780 } 1781 1782 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1783 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1784 break; 1785 } 1786 pi->highest_valid = i; 1787 1788 if (pi->lowest_valid > pi->highest_valid) { 1789 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1790 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1791 pi->highest_valid = pi->lowest_valid; 1792 else 1793 pi->lowest_valid = pi->highest_valid; 1794 } 1795 } else { 1796 struct sumo_sclk_voltage_mapping_table *table = 1797 &pi->sys_info.sclk_voltage_mapping_table; 1798 1799 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1800 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1801 i == (int)(pi->graphics_dpm_level_count - 1)) { 1802 pi->lowest_valid = i; 1803 break; 1804 } 1805 } 1806 1807 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1808 if (table->entries[i].sclk_frequency <= 1809 new_ps->levels[new_ps->num_levels - 1].sclk) 1810 break; 1811 } 1812 pi->highest_valid = i; 1813 1814 if (pi->lowest_valid > pi->highest_valid) { 1815 if ((new_ps->levels[0].sclk - 1816 table->entries[pi->highest_valid].sclk_frequency) > 1817 (table->entries[pi->lowest_valid].sclk_frequency - 1818 new_ps->levels[new_ps->num_levels -1].sclk)) 1819 pi->highest_valid = pi->lowest_valid; 1820 else 1821 pi->lowest_valid = pi->highest_valid; 1822 } 1823 } 1824} 1825 1826static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1827 struct amdgpu_ps *new_rps) 1828{ 1829 struct kv_ps *new_ps = kv_get_ps(new_rps); 1830 struct kv_power_info *pi = kv_get_pi(adev); 1831 int ret = 0; 1832 u8 clk_bypass_cntl; 1833 1834 if (pi->caps_enable_dfs_bypass) { 1835 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1836 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1837 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1838 (pi->dpm_table_start + 1839 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1840 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1841 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1842 &clk_bypass_cntl, 1843 sizeof(u8), pi->sram_end); 1844 } 1845 1846 return ret; 1847} 1848 1849static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1850 bool enable) 1851{ 1852 struct kv_power_info *pi = kv_get_pi(adev); 1853 int ret = 0; 1854 1855 if (enable) { 1856 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1857 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1858 if (ret == 0) 1859 pi->nb_dpm_enabled = true; 1860 } 1861 } else { 1862 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1863 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1864 if (ret == 0) 1865 pi->nb_dpm_enabled = false; 1866 } 1867 } 1868 1869 return ret; 1870} 1871 1872static int kv_dpm_force_performance_level(void *handle, 1873 enum amd_dpm_forced_level level) 1874{ 1875 int ret; 1876 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1877 1878 if (level == AMD_DPM_FORCED_LEVEL_HIGH) { 1879 ret = kv_force_dpm_highest(adev); 1880 if (ret) 1881 return ret; 1882 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { 1883 ret = kv_force_dpm_lowest(adev); 1884 if (ret) 1885 return ret; 1886 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { 1887 ret = kv_unforce_levels(adev); 1888 if (ret) 1889 return ret; 1890 } 1891 1892 adev->pm.dpm.forced_level = level; 1893 1894 return 0; 1895} 1896 1897static int kv_dpm_pre_set_power_state(void *handle) 1898{ 1899 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1900 struct kv_power_info *pi = kv_get_pi(adev); 1901 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1902 struct amdgpu_ps *new_ps = &requested_ps; 1903 1904 kv_update_requested_ps(adev, new_ps); 1905 1906 kv_apply_state_adjust_rules(adev, 1907 &pi->requested_rps, 1908 &pi->current_rps); 1909 1910 return 0; 1911} 1912 1913static int kv_dpm_set_power_state(void *handle) 1914{ 1915 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1916 struct kv_power_info *pi = kv_get_pi(adev); 1917 struct amdgpu_ps *new_ps = &pi->requested_rps; 1918 struct amdgpu_ps *old_ps = &pi->current_rps; 1919 int ret; 1920 1921 if (pi->bapm_enable) { 1922 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power); 1923 if (ret) { 1924 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1925 return ret; 1926 } 1927 } 1928 1929 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1930 if (pi->enable_dpm) { 1931 kv_set_valid_clock_range(adev, new_ps); 1932 kv_update_dfs_bypass_settings(adev, new_ps); 1933 ret = kv_calculate_ds_divider(adev); 1934 if (ret) { 1935 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1936 return ret; 1937 } 1938 kv_calculate_nbps_level_settings(adev); 1939 kv_calculate_dpm_settings(adev); 1940 kv_force_lowest_valid(adev); 1941 kv_enable_new_levels(adev); 1942 kv_upload_dpm_settings(adev); 1943 kv_program_nbps_index_settings(adev, new_ps); 1944 kv_unforce_levels(adev); 1945 kv_set_enabled_levels(adev); 1946 kv_force_lowest_valid(adev); 1947 kv_unforce_levels(adev); 1948 1949 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1950 if (ret) { 1951 DRM_ERROR("kv_update_vce_dpm failed\n"); 1952 return ret; 1953 } 1954 kv_update_sclk_t(adev); 1955 if (adev->asic_type == CHIP_MULLINS) 1956 kv_enable_nb_dpm(adev, true); 1957 } 1958 } else { 1959 if (pi->enable_dpm) { 1960 kv_set_valid_clock_range(adev, new_ps); 1961 kv_update_dfs_bypass_settings(adev, new_ps); 1962 ret = kv_calculate_ds_divider(adev); 1963 if (ret) { 1964 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1965 return ret; 1966 } 1967 kv_calculate_nbps_level_settings(adev); 1968 kv_calculate_dpm_settings(adev); 1969 kv_freeze_sclk_dpm(adev, true); 1970 kv_upload_dpm_settings(adev); 1971 kv_program_nbps_index_settings(adev, new_ps); 1972 kv_freeze_sclk_dpm(adev, false); 1973 kv_set_enabled_levels(adev); 1974 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1975 if (ret) { 1976 DRM_ERROR("kv_update_vce_dpm failed\n"); 1977 return ret; 1978 } 1979 kv_update_acp_boot_level(adev); 1980 kv_update_sclk_t(adev); 1981 kv_enable_nb_dpm(adev, true); 1982 } 1983 } 1984 1985 return 0; 1986} 1987 1988static void kv_dpm_post_set_power_state(void *handle) 1989{ 1990 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1991 struct kv_power_info *pi = kv_get_pi(adev); 1992 struct amdgpu_ps *new_ps = &pi->requested_rps; 1993 1994 kv_update_current_ps(adev, new_ps); 1995} 1996 1997static void kv_dpm_setup_asic(struct amdgpu_device *adev) 1998{ 1999 sumo_take_smu_control(adev, true); 2000 kv_init_powergate_state(adev); 2001 kv_init_sclk_t(adev); 2002} 2003 2004#if 0 2005static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2006{ 2007 struct kv_power_info *pi = kv_get_pi(adev); 2008 2009 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2010 kv_force_lowest_valid(adev); 2011 kv_init_graphics_levels(adev); 2012 kv_program_bootup_state(adev); 2013 kv_upload_dpm_settings(adev); 2014 kv_force_lowest_valid(adev); 2015 kv_unforce_levels(adev); 2016 } else { 2017 kv_init_graphics_levels(adev); 2018 kv_program_bootup_state(adev); 2019 kv_freeze_sclk_dpm(adev, true); 2020 kv_upload_dpm_settings(adev); 2021 kv_freeze_sclk_dpm(adev, false); 2022 kv_set_enabled_level(adev, pi->graphics_boot_level); 2023 } 2024} 2025#endif 2026 2027static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2028 struct amdgpu_clock_and_voltage_limits *table) 2029{ 2030 struct kv_power_info *pi = kv_get_pi(adev); 2031 2032 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2033 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2034 table->sclk = 2035 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2036 table->vddc = 2037 kv_convert_2bit_index_to_voltage(adev, 2038 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2039 } 2040 2041 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2042} 2043 2044static void kv_patch_voltage_values(struct amdgpu_device *adev) 2045{ 2046 int i; 2047 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2048 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2049 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2050 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2051 struct amdgpu_clock_voltage_dependency_table *samu_table = 2052 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2053 struct amdgpu_clock_voltage_dependency_table *acp_table = 2054 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2055 2056 if (uvd_table->count) { 2057 for (i = 0; i < uvd_table->count; i++) 2058 uvd_table->entries[i].v = 2059 kv_convert_8bit_index_to_voltage(adev, 2060 uvd_table->entries[i].v); 2061 } 2062 2063 if (vce_table->count) { 2064 for (i = 0; i < vce_table->count; i++) 2065 vce_table->entries[i].v = 2066 kv_convert_8bit_index_to_voltage(adev, 2067 vce_table->entries[i].v); 2068 } 2069 2070 if (samu_table->count) { 2071 for (i = 0; i < samu_table->count; i++) 2072 samu_table->entries[i].v = 2073 kv_convert_8bit_index_to_voltage(adev, 2074 samu_table->entries[i].v); 2075 } 2076 2077 if (acp_table->count) { 2078 for (i = 0; i < acp_table->count; i++) 2079 acp_table->entries[i].v = 2080 kv_convert_8bit_index_to_voltage(adev, 2081 acp_table->entries[i].v); 2082 } 2083 2084} 2085 2086static void kv_construct_boot_state(struct amdgpu_device *adev) 2087{ 2088 struct kv_power_info *pi = kv_get_pi(adev); 2089 2090 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2091 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2092 pi->boot_pl.ds_divider_index = 0; 2093 pi->boot_pl.ss_divider_index = 0; 2094 pi->boot_pl.allow_gnb_slow = 1; 2095 pi->boot_pl.force_nbp_state = 0; 2096 pi->boot_pl.display_wm = 0; 2097 pi->boot_pl.vce_wm = 0; 2098} 2099 2100static int kv_force_dpm_highest(struct amdgpu_device *adev) 2101{ 2102 int ret; 2103 u32 enable_mask, i; 2104 2105 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2106 if (ret) 2107 return ret; 2108 2109 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2110 if (enable_mask & (1 << i)) 2111 break; 2112 } 2113 2114 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2115 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2116 else 2117 return kv_set_enabled_level(adev, i); 2118} 2119 2120static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2121{ 2122 int ret; 2123 u32 enable_mask, i; 2124 2125 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2126 if (ret) 2127 return ret; 2128 2129 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2130 if (enable_mask & (1 << i)) 2131 break; 2132 } 2133 2134 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2135 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2136 else 2137 return kv_set_enabled_level(adev, i); 2138} 2139 2140static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2141 u32 sclk, u32 min_sclk_in_sr) 2142{ 2143 struct kv_power_info *pi = kv_get_pi(adev); 2144 u32 i; 2145 u32 temp; 2146 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); 2147 2148 if (sclk < min) 2149 return 0; 2150 2151 if (!pi->caps_sclk_ds) 2152 return 0; 2153 2154 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2155 temp = sclk >> i; 2156 if (temp >= min) 2157 break; 2158 } 2159 2160 return (u8)i; 2161} 2162 2163static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2164{ 2165 struct kv_power_info *pi = kv_get_pi(adev); 2166 struct amdgpu_clock_voltage_dependency_table *table = 2167 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2168 int i; 2169 2170 if (table && table->count) { 2171 for (i = table->count - 1; i >= 0; i--) { 2172 if (pi->high_voltage_t && 2173 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2174 pi->high_voltage_t)) { 2175 *limit = i; 2176 return 0; 2177 } 2178 } 2179 } else { 2180 struct sumo_sclk_voltage_mapping_table *table = 2181 &pi->sys_info.sclk_voltage_mapping_table; 2182 2183 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2184 if (pi->high_voltage_t && 2185 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2186 pi->high_voltage_t)) { 2187 *limit = i; 2188 return 0; 2189 } 2190 } 2191 } 2192 2193 *limit = 0; 2194 return 0; 2195} 2196 2197static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2198 struct amdgpu_ps *new_rps, 2199 struct amdgpu_ps *old_rps) 2200{ 2201 struct kv_ps *ps = kv_get_ps(new_rps); 2202 struct kv_power_info *pi = kv_get_pi(adev); 2203 u32 min_sclk = 10000; /* ??? */ 2204 u32 sclk, mclk = 0; 2205 int i, limit; 2206 bool force_high; 2207 struct amdgpu_clock_voltage_dependency_table *table = 2208 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2209 u32 stable_p_state_sclk = 0; 2210 struct amdgpu_clock_and_voltage_limits *max_limits = 2211 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2212 2213 if (new_rps->vce_active) { 2214 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2215 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2216 } else { 2217 new_rps->evclk = 0; 2218 new_rps->ecclk = 0; 2219 } 2220 2221 mclk = max_limits->mclk; 2222 sclk = min_sclk; 2223 2224 if (pi->caps_stable_p_state) { 2225 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2226 2227 for (i = table->count - 1; i >= 0; i--) { 2228 if (stable_p_state_sclk >= table->entries[i].clk) { 2229 stable_p_state_sclk = table->entries[i].clk; 2230 break; 2231 } 2232 } 2233 2234 if (i > 0) 2235 stable_p_state_sclk = table->entries[0].clk; 2236 2237 sclk = stable_p_state_sclk; 2238 } 2239 2240 if (new_rps->vce_active) { 2241 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2242 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2243 } 2244 2245 ps->need_dfs_bypass = true; 2246 2247 for (i = 0; i < ps->num_levels; i++) { 2248 if (ps->levels[i].sclk < sclk) 2249 ps->levels[i].sclk = sclk; 2250 } 2251 2252 if (table && table->count) { 2253 for (i = 0; i < ps->num_levels; i++) { 2254 if (pi->high_voltage_t && 2255 (pi->high_voltage_t < 2256 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2257 kv_get_high_voltage_limit(adev, &limit); 2258 ps->levels[i].sclk = table->entries[limit].clk; 2259 } 2260 } 2261 } else { 2262 struct sumo_sclk_voltage_mapping_table *table = 2263 &pi->sys_info.sclk_voltage_mapping_table; 2264 2265 for (i = 0; i < ps->num_levels; i++) { 2266 if (pi->high_voltage_t && 2267 (pi->high_voltage_t < 2268 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2269 kv_get_high_voltage_limit(adev, &limit); 2270 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2271 } 2272 } 2273 } 2274 2275 if (pi->caps_stable_p_state) { 2276 for (i = 0; i < ps->num_levels; i++) { 2277 ps->levels[i].sclk = stable_p_state_sclk; 2278 } 2279 } 2280 2281 pi->video_start = new_rps->dclk || new_rps->vclk || 2282 new_rps->evclk || new_rps->ecclk; 2283 2284 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2285 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2286 pi->battery_state = true; 2287 else 2288 pi->battery_state = false; 2289 2290 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2291 ps->dpm0_pg_nb_ps_lo = 0x1; 2292 ps->dpm0_pg_nb_ps_hi = 0x0; 2293 ps->dpmx_nb_ps_lo = 0x1; 2294 ps->dpmx_nb_ps_hi = 0x0; 2295 } else { 2296 ps->dpm0_pg_nb_ps_lo = 0x3; 2297 ps->dpm0_pg_nb_ps_hi = 0x0; 2298 ps->dpmx_nb_ps_lo = 0x3; 2299 ps->dpmx_nb_ps_hi = 0x0; 2300 2301 if (pi->sys_info.nb_dpm_enable) { 2302 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2303 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2304 pi->disable_nb_ps3_in_battery; 2305 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2306 ps->dpm0_pg_nb_ps_hi = 0x2; 2307 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2308 ps->dpmx_nb_ps_hi = 0x2; 2309 } 2310 } 2311} 2312 2313static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2314 u32 index, bool enable) 2315{ 2316 struct kv_power_info *pi = kv_get_pi(adev); 2317 2318 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2319} 2320 2321static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2322{ 2323 struct kv_power_info *pi = kv_get_pi(adev); 2324 u32 sclk_in_sr = 10000; /* ??? */ 2325 u32 i; 2326 2327 if (pi->lowest_valid > pi->highest_valid) 2328 return -EINVAL; 2329 2330 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2331 pi->graphics_level[i].DeepSleepDivId = 2332 kv_get_sleep_divider_id_from_clock(adev, 2333 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2334 sclk_in_sr); 2335 } 2336 return 0; 2337} 2338 2339static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2340{ 2341 struct kv_power_info *pi = kv_get_pi(adev); 2342 u32 i; 2343 bool force_high; 2344 struct amdgpu_clock_and_voltage_limits *max_limits = 2345 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2346 u32 mclk = max_limits->mclk; 2347 2348 if (pi->lowest_valid > pi->highest_valid) 2349 return -EINVAL; 2350 2351 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2352 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2353 pi->graphics_level[i].GnbSlow = 1; 2354 pi->graphics_level[i].ForceNbPs1 = 0; 2355 pi->graphics_level[i].UpH = 0; 2356 } 2357 2358 if (!pi->sys_info.nb_dpm_enable) 2359 return 0; 2360 2361 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2362 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2363 2364 if (force_high) { 2365 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2366 pi->graphics_level[i].GnbSlow = 0; 2367 } else { 2368 if (pi->battery_state) 2369 pi->graphics_level[0].ForceNbPs1 = 1; 2370 2371 pi->graphics_level[1].GnbSlow = 0; 2372 pi->graphics_level[2].GnbSlow = 0; 2373 pi->graphics_level[3].GnbSlow = 0; 2374 pi->graphics_level[4].GnbSlow = 0; 2375 } 2376 } else { 2377 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2378 pi->graphics_level[i].GnbSlow = 1; 2379 pi->graphics_level[i].ForceNbPs1 = 0; 2380 pi->graphics_level[i].UpH = 0; 2381 } 2382 2383 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2384 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2385 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2386 if (pi->lowest_valid != pi->highest_valid) 2387 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2388 } 2389 } 2390 return 0; 2391} 2392 2393static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2394{ 2395 struct kv_power_info *pi = kv_get_pi(adev); 2396 u32 i; 2397 2398 if (pi->lowest_valid > pi->highest_valid) 2399 return -EINVAL; 2400 2401 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2402 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2403 2404 return 0; 2405} 2406 2407static void kv_init_graphics_levels(struct amdgpu_device *adev) 2408{ 2409 struct kv_power_info *pi = kv_get_pi(adev); 2410 u32 i; 2411 struct amdgpu_clock_voltage_dependency_table *table = 2412 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2413 2414 if (table && table->count) { 2415 u32 vid_2bit; 2416 2417 pi->graphics_dpm_level_count = 0; 2418 for (i = 0; i < table->count; i++) { 2419 if (pi->high_voltage_t && 2420 (pi->high_voltage_t < 2421 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2422 break; 2423 2424 kv_set_divider_value(adev, i, table->entries[i].clk); 2425 vid_2bit = kv_convert_vid7_to_vid2(adev, 2426 &pi->sys_info.vid_mapping_table, 2427 table->entries[i].v); 2428 kv_set_vid(adev, i, vid_2bit); 2429 kv_set_at(adev, i, pi->at[i]); 2430 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2431 pi->graphics_dpm_level_count++; 2432 } 2433 } else { 2434 struct sumo_sclk_voltage_mapping_table *table = 2435 &pi->sys_info.sclk_voltage_mapping_table; 2436 2437 pi->graphics_dpm_level_count = 0; 2438 for (i = 0; i < table->num_max_dpm_entries; i++) { 2439 if (pi->high_voltage_t && 2440 pi->high_voltage_t < 2441 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2442 break; 2443 2444 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2445 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2446 kv_set_at(adev, i, pi->at[i]); 2447 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2448 pi->graphics_dpm_level_count++; 2449 } 2450 } 2451 2452 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2453 kv_dpm_power_level_enable(adev, i, false); 2454} 2455 2456static void kv_enable_new_levels(struct amdgpu_device *adev) 2457{ 2458 struct kv_power_info *pi = kv_get_pi(adev); 2459 u32 i; 2460 2461 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2462 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2463 kv_dpm_power_level_enable(adev, i, true); 2464 } 2465} 2466 2467static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2468{ 2469 u32 new_mask = (1 << level); 2470 2471 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2472 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2473 new_mask); 2474} 2475 2476static int kv_set_enabled_levels(struct amdgpu_device *adev) 2477{ 2478 struct kv_power_info *pi = kv_get_pi(adev); 2479 u32 i, new_mask = 0; 2480 2481 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2482 new_mask |= (1 << i); 2483 2484 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2485 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2486 new_mask); 2487} 2488 2489static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2490 struct amdgpu_ps *new_rps) 2491{ 2492 struct kv_ps *new_ps = kv_get_ps(new_rps); 2493 struct kv_power_info *pi = kv_get_pi(adev); 2494 u32 nbdpmconfig1; 2495 2496 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2497 return; 2498 2499 if (pi->sys_info.nb_dpm_enable) { 2500 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2501 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2502 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2503 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2504 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2505 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2506 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2507 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2508 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2509 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2510 } 2511} 2512 2513static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2514 int min_temp, int max_temp) 2515{ 2516 int low_temp = 0 * 1000; 2517 int high_temp = 255 * 1000; 2518 u32 tmp; 2519 2520 if (low_temp < min_temp) 2521 low_temp = min_temp; 2522 if (high_temp > max_temp) 2523 high_temp = max_temp; 2524 if (high_temp < low_temp) { 2525 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2526 return -EINVAL; 2527 } 2528 2529 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2530 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2531 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2532 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2533 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2534 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2535 2536 adev->pm.dpm.thermal.min_temp = low_temp; 2537 adev->pm.dpm.thermal.max_temp = high_temp; 2538 2539 return 0; 2540} 2541 2542union igp_info { 2543 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2544 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2545 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2546 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2547 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2548 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2549}; 2550 2551static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2552{ 2553 struct kv_power_info *pi = kv_get_pi(adev); 2554 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2555 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2556 union igp_info *igp_info; 2557 u8 frev, crev; 2558 u16 data_offset; 2559 int i; 2560 2561 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2562 &frev, &crev, &data_offset)) { 2563 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2564 data_offset); 2565 2566 if (crev != 8) { 2567 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2568 return -EINVAL; 2569 } 2570 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2571 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2572 pi->sys_info.bootup_nb_voltage_index = 2573 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2574 if (igp_info->info_8.ucHtcTmpLmt == 0) 2575 pi->sys_info.htc_tmp_lmt = 203; 2576 else 2577 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2578 if (igp_info->info_8.ucHtcHystLmt == 0) 2579 pi->sys_info.htc_hyst_lmt = 5; 2580 else 2581 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2582 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2583 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2584 } 2585 2586 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2587 pi->sys_info.nb_dpm_enable = true; 2588 else 2589 pi->sys_info.nb_dpm_enable = false; 2590 2591 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2592 pi->sys_info.nbp_memory_clock[i] = 2593 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2594 pi->sys_info.nbp_n_clock[i] = 2595 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2596 } 2597 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2598 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2599 pi->caps_enable_dfs_bypass = true; 2600 2601 sumo_construct_sclk_voltage_mapping_table(adev, 2602 &pi->sys_info.sclk_voltage_mapping_table, 2603 igp_info->info_8.sAvail_SCLK); 2604 2605 sumo_construct_vid_mapping_table(adev, 2606 &pi->sys_info.vid_mapping_table, 2607 igp_info->info_8.sAvail_SCLK); 2608 2609 kv_construct_max_power_limits_table(adev, 2610 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2611 } 2612 return 0; 2613} 2614 2615union power_info { 2616 struct _ATOM_POWERPLAY_INFO info; 2617 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2618 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2619 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2620 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2621 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2622}; 2623 2624union pplib_clock_info { 2625 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2626 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2627 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2628 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2629}; 2630 2631union pplib_power_state { 2632 struct _ATOM_PPLIB_STATE v1; 2633 struct _ATOM_PPLIB_STATE_V2 v2; 2634}; 2635 2636static void kv_patch_boot_state(struct amdgpu_device *adev, 2637 struct kv_ps *ps) 2638{ 2639 struct kv_power_info *pi = kv_get_pi(adev); 2640 2641 ps->num_levels = 1; 2642 ps->levels[0] = pi->boot_pl; 2643} 2644 2645static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2646 struct amdgpu_ps *rps, 2647 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2648 u8 table_rev) 2649{ 2650 struct kv_ps *ps = kv_get_ps(rps); 2651 2652 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2653 rps->class = le16_to_cpu(non_clock_info->usClassification); 2654 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2655 2656 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2657 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2658 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2659 } else { 2660 rps->vclk = 0; 2661 rps->dclk = 0; 2662 } 2663 2664 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2665 adev->pm.dpm.boot_ps = rps; 2666 kv_patch_boot_state(adev, ps); 2667 } 2668 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2669 adev->pm.dpm.uvd_ps = rps; 2670} 2671 2672static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2673 struct amdgpu_ps *rps, int index, 2674 union pplib_clock_info *clock_info) 2675{ 2676 struct kv_power_info *pi = kv_get_pi(adev); 2677 struct kv_ps *ps = kv_get_ps(rps); 2678 struct kv_pl *pl = &ps->levels[index]; 2679 u32 sclk; 2680 2681 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2682 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2683 pl->sclk = sclk; 2684 pl->vddc_index = clock_info->sumo.vddcIndex; 2685 2686 ps->num_levels = index + 1; 2687 2688 if (pi->caps_sclk_ds) { 2689 pl->ds_divider_index = 5; 2690 pl->ss_divider_index = 5; 2691 } 2692} 2693 2694static int kv_parse_power_table(struct amdgpu_device *adev) 2695{ 2696 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2697 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2698 union pplib_power_state *power_state; 2699 int i, j, k, non_clock_array_index, clock_array_index; 2700 union pplib_clock_info *clock_info; 2701 struct _StateArray *state_array; 2702 struct _ClockInfoArray *clock_info_array; 2703 struct _NonClockInfoArray *non_clock_info_array; 2704 union power_info *power_info; 2705 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2706 u16 data_offset; 2707 u8 frev, crev; 2708 u8 *power_state_offset; 2709 struct kv_ps *ps; 2710 2711 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2712 &frev, &crev, &data_offset)) 2713 return -EINVAL; 2714 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2715 2716 amdgpu_add_thermal_controller(adev); 2717 2718 state_array = (struct _StateArray *) 2719 (mode_info->atom_context->bios + data_offset + 2720 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2721 clock_info_array = (struct _ClockInfoArray *) 2722 (mode_info->atom_context->bios + data_offset + 2723 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2724 non_clock_info_array = (struct _NonClockInfoArray *) 2725 (mode_info->atom_context->bios + data_offset + 2726 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2727 2728 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 2729 state_array->ucNumEntries, GFP_KERNEL); 2730 if (!adev->pm.dpm.ps) 2731 return -ENOMEM; 2732 power_state_offset = (u8 *)state_array->states; 2733 for (i = 0; i < state_array->ucNumEntries; i++) { 2734 u8 *idx; 2735 power_state = (union pplib_power_state *)power_state_offset; 2736 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2737 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2738 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2739 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2740 if (ps == NULL) { 2741 kfree(adev->pm.dpm.ps); 2742 return -ENOMEM; 2743 } 2744 adev->pm.dpm.ps[i].ps_priv = ps; 2745 k = 0; 2746 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2747 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2748 clock_array_index = idx[j]; 2749 if (clock_array_index >= clock_info_array->ucNumEntries) 2750 continue; 2751 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2752 break; 2753 clock_info = (union pplib_clock_info *) 2754 ((u8 *)&clock_info_array->clockInfo[0] + 2755 (clock_array_index * clock_info_array->ucEntrySize)); 2756 kv_parse_pplib_clock_info(adev, 2757 &adev->pm.dpm.ps[i], k, 2758 clock_info); 2759 k++; 2760 } 2761 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2762 non_clock_info, 2763 non_clock_info_array->ucEntrySize); 2764 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2765 } 2766 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2767 2768 /* fill in the vce power states */ 2769 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 2770 u32 sclk; 2771 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2772 clock_info = (union pplib_clock_info *) 2773 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2774 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2775 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2776 adev->pm.dpm.vce_states[i].sclk = sclk; 2777 adev->pm.dpm.vce_states[i].mclk = 0; 2778 } 2779 2780 return 0; 2781} 2782 2783static int kv_dpm_init(struct amdgpu_device *adev) 2784{ 2785 struct kv_power_info *pi; 2786 int ret, i; 2787 2788 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2789 if (pi == NULL) 2790 return -ENOMEM; 2791 adev->pm.dpm.priv = pi; 2792 2793 ret = amdgpu_get_platform_caps(adev); 2794 if (ret) 2795 return ret; 2796 2797 ret = amdgpu_parse_extended_power_table(adev); 2798 if (ret) 2799 return ret; 2800 2801 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2802 pi->at[i] = TRINITY_AT_DFLT; 2803 2804 pi->sram_end = SMC_RAM_END; 2805 2806 pi->enable_nb_dpm = true; 2807 2808 pi->caps_power_containment = true; 2809 pi->caps_cac = true; 2810 pi->enable_didt = false; 2811 if (pi->enable_didt) { 2812 pi->caps_sq_ramping = true; 2813 pi->caps_db_ramping = true; 2814 pi->caps_td_ramping = true; 2815 pi->caps_tcp_ramping = true; 2816 } 2817 2818 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) 2819 pi->caps_sclk_ds = true; 2820 else 2821 pi->caps_sclk_ds = false; 2822 2823 pi->enable_auto_thermal_throttling = true; 2824 pi->disable_nb_ps3_in_battery = false; 2825 if (amdgpu_bapm == 0) 2826 pi->bapm_enable = false; 2827 else 2828 pi->bapm_enable = true; 2829 pi->voltage_drop_t = 0; 2830 pi->caps_sclk_throttle_low_notification = false; 2831 pi->caps_fps = false; /* true? */ 2832 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2833 pi->caps_uvd_dpm = true; 2834 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2835 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2836 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2837 pi->caps_stable_p_state = false; 2838 2839 ret = kv_parse_sys_info_table(adev); 2840 if (ret) 2841 return ret; 2842 2843 kv_patch_voltage_values(adev); 2844 kv_construct_boot_state(adev); 2845 2846 ret = kv_parse_power_table(adev); 2847 if (ret) 2848 return ret; 2849 2850 pi->enable_dpm = true; 2851 2852 return 0; 2853} 2854 2855static void 2856kv_dpm_debugfs_print_current_performance_level(void *handle, 2857 struct seq_file *m) 2858{ 2859 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2860 struct kv_power_info *pi = kv_get_pi(adev); 2861 u32 current_index = 2862 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2863 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2864 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2865 u32 sclk, tmp; 2866 u16 vddc; 2867 2868 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2869 seq_printf(m, "invalid dpm profile %d\n", current_index); 2870 } else { 2871 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2872 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2873 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2874 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2875 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2876 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2877 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2878 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2879 current_index, sclk, vddc); 2880 } 2881} 2882 2883static void 2884kv_dpm_print_power_state(void *handle, void *request_ps) 2885{ 2886 int i; 2887 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 2888 struct kv_ps *ps = kv_get_ps(rps); 2889 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2890 2891 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2892 amdgpu_dpm_print_cap_info(rps->caps); 2893 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2894 for (i = 0; i < ps->num_levels; i++) { 2895 struct kv_pl *pl = &ps->levels[i]; 2896 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2897 i, pl->sclk, 2898 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2899 } 2900 amdgpu_dpm_print_ps_status(adev, rps); 2901} 2902 2903static void kv_dpm_fini(struct amdgpu_device *adev) 2904{ 2905 int i; 2906 2907 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2908 kfree(adev->pm.dpm.ps[i].ps_priv); 2909 } 2910 kfree(adev->pm.dpm.ps); 2911 kfree(adev->pm.dpm.priv); 2912 amdgpu_free_extended_power_table(adev); 2913} 2914 2915static void kv_dpm_display_configuration_changed(void *handle) 2916{ 2917 2918} 2919 2920static u32 kv_dpm_get_sclk(void *handle, bool low) 2921{ 2922 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2923 struct kv_power_info *pi = kv_get_pi(adev); 2924 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2925 2926 if (low) 2927 return requested_state->levels[0].sclk; 2928 else 2929 return requested_state->levels[requested_state->num_levels - 1].sclk; 2930} 2931 2932static u32 kv_dpm_get_mclk(void *handle, bool low) 2933{ 2934 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2935 struct kv_power_info *pi = kv_get_pi(adev); 2936 2937 return pi->sys_info.bootup_uma_clk; 2938} 2939 2940/* get temperature in millidegrees */ 2941static int kv_dpm_get_temp(void *handle) 2942{ 2943 u32 temp; 2944 int actual_temp = 0; 2945 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2946 2947 temp = RREG32_SMC(0xC0300E0C); 2948 2949 if (temp) 2950 actual_temp = (temp / 8) - 49; 2951 else 2952 actual_temp = 0; 2953 2954 actual_temp = actual_temp * 1000; 2955 2956 return actual_temp; 2957} 2958 2959static int kv_dpm_early_init(void *handle) 2960{ 2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2962 2963 kv_dpm_set_irq_funcs(adev); 2964 2965 return 0; 2966} 2967 2968static int kv_dpm_late_init(void *handle) 2969{ 2970 /* powerdown unused blocks for now */ 2971 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2972 2973 if (!amdgpu_dpm) 2974 return 0; 2975 2976 kv_dpm_powergate_acp(adev, true); 2977 kv_dpm_powergate_samu(adev, true); 2978 2979 return 0; 2980} 2981 2982static int kv_dpm_sw_init(void *handle) 2983{ 2984 int ret; 2985 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2986 2987 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, 2988 &adev->pm.dpm.thermal.irq); 2989 if (ret) 2990 return ret; 2991 2992 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, 2993 &adev->pm.dpm.thermal.irq); 2994 if (ret) 2995 return ret; 2996 2997 /* default to balanced state */ 2998 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 2999 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 3000 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; 3001 adev->pm.default_sclk = adev->clock.default_sclk; 3002 adev->pm.default_mclk = adev->clock.default_mclk; 3003 adev->pm.current_sclk = adev->clock.default_sclk; 3004 adev->pm.current_mclk = adev->clock.default_mclk; 3005 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3006 3007 if (amdgpu_dpm == 0) 3008 return 0; 3009 3010 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3011 mutex_lock(&adev->pm.mutex); 3012 ret = kv_dpm_init(adev); 3013 if (ret) 3014 goto dpm_failed; 3015 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3016 if (amdgpu_dpm == 1) 3017 amdgpu_pm_print_power_states(adev); 3018 mutex_unlock(&adev->pm.mutex); 3019 DRM_INFO("amdgpu: dpm initialized\n"); 3020 3021 return 0; 3022 3023dpm_failed: 3024 kv_dpm_fini(adev); 3025 mutex_unlock(&adev->pm.mutex); 3026 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3027 return ret; 3028} 3029 3030static int kv_dpm_sw_fini(void *handle) 3031{ 3032 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3033 3034 flush_work(&adev->pm.dpm.thermal.work); 3035 3036 mutex_lock(&adev->pm.mutex); 3037 kv_dpm_fini(adev); 3038 mutex_unlock(&adev->pm.mutex); 3039 3040 return 0; 3041} 3042 3043static int kv_dpm_hw_init(void *handle) 3044{ 3045 int ret; 3046 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3047 3048 if (!amdgpu_dpm) 3049 return 0; 3050 3051 mutex_lock(&adev->pm.mutex); 3052 kv_dpm_setup_asic(adev); 3053 ret = kv_dpm_enable(adev); 3054 if (ret) 3055 adev->pm.dpm_enabled = false; 3056 else 3057 adev->pm.dpm_enabled = true; 3058 mutex_unlock(&adev->pm.mutex); 3059 3060 return ret; 3061} 3062 3063static int kv_dpm_hw_fini(void *handle) 3064{ 3065 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3066 3067 if (adev->pm.dpm_enabled) { 3068 mutex_lock(&adev->pm.mutex); 3069 kv_dpm_disable(adev); 3070 mutex_unlock(&adev->pm.mutex); 3071 } 3072 3073 return 0; 3074} 3075 3076static int kv_dpm_suspend(void *handle) 3077{ 3078 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3079 3080 if (adev->pm.dpm_enabled) { 3081 mutex_lock(&adev->pm.mutex); 3082 /* disable dpm */ 3083 kv_dpm_disable(adev); 3084 /* reset the power state */ 3085 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3086 mutex_unlock(&adev->pm.mutex); 3087 } 3088 return 0; 3089} 3090 3091static int kv_dpm_resume(void *handle) 3092{ 3093 int ret; 3094 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3095 3096 if (adev->pm.dpm_enabled) { 3097 /* asic init will reset to the boot state */ 3098 mutex_lock(&adev->pm.mutex); 3099 kv_dpm_setup_asic(adev); 3100 ret = kv_dpm_enable(adev); 3101 if (ret) 3102 adev->pm.dpm_enabled = false; 3103 else 3104 adev->pm.dpm_enabled = true; 3105 mutex_unlock(&adev->pm.mutex); 3106 if (adev->pm.dpm_enabled) 3107 amdgpu_pm_compute_clocks(adev); 3108 } 3109 return 0; 3110} 3111 3112static bool kv_dpm_is_idle(void *handle) 3113{ 3114 return true; 3115} 3116 3117static int kv_dpm_wait_for_idle(void *handle) 3118{ 3119 return 0; 3120} 3121 3122 3123static int kv_dpm_soft_reset(void *handle) 3124{ 3125 return 0; 3126} 3127 3128static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3129 struct amdgpu_irq_src *src, 3130 unsigned type, 3131 enum amdgpu_interrupt_state state) 3132{ 3133 u32 cg_thermal_int; 3134 3135 switch (type) { 3136 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3137 switch (state) { 3138 case AMDGPU_IRQ_STATE_DISABLE: 3139 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3140 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3141 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3142 break; 3143 case AMDGPU_IRQ_STATE_ENABLE: 3144 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3145 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3146 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3147 break; 3148 default: 3149 break; 3150 } 3151 break; 3152 3153 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3154 switch (state) { 3155 case AMDGPU_IRQ_STATE_DISABLE: 3156 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3157 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3158 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3159 break; 3160 case AMDGPU_IRQ_STATE_ENABLE: 3161 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3162 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3163 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3164 break; 3165 default: 3166 break; 3167 } 3168 break; 3169 3170 default: 3171 break; 3172 } 3173 return 0; 3174} 3175 3176static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3177 struct amdgpu_irq_src *source, 3178 struct amdgpu_iv_entry *entry) 3179{ 3180 bool queue_thermal = false; 3181 3182 if (entry == NULL) 3183 return -EINVAL; 3184 3185 switch (entry->src_id) { 3186 case 230: /* thermal low to high */ 3187 DRM_DEBUG("IH: thermal low to high\n"); 3188 adev->pm.dpm.thermal.high_to_low = false; 3189 queue_thermal = true; 3190 break; 3191 case 231: /* thermal high to low */ 3192 DRM_DEBUG("IH: thermal high to low\n"); 3193 adev->pm.dpm.thermal.high_to_low = true; 3194 queue_thermal = true; 3195 break; 3196 default: 3197 break; 3198 } 3199 3200 if (queue_thermal) 3201 schedule_work(&adev->pm.dpm.thermal.work); 3202 3203 return 0; 3204} 3205 3206static int kv_dpm_set_clockgating_state(void *handle, 3207 enum amd_clockgating_state state) 3208{ 3209 return 0; 3210} 3211 3212static int kv_dpm_set_powergating_state(void *handle, 3213 enum amd_powergating_state state) 3214{ 3215 return 0; 3216} 3217 3218static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, 3219 const struct kv_pl *kv_cpl2) 3220{ 3221 return ((kv_cpl1->sclk == kv_cpl2->sclk) && 3222 (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && 3223 (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && 3224 (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); 3225} 3226 3227static int kv_check_state_equal(void *handle, 3228 void *current_ps, 3229 void *request_ps, 3230 bool *equal) 3231{ 3232 struct kv_ps *kv_cps; 3233 struct kv_ps *kv_rps; 3234 int i; 3235 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; 3236 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 3237 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3238 3239 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) 3240 return -EINVAL; 3241 3242 kv_cps = kv_get_ps(cps); 3243 kv_rps = kv_get_ps(rps); 3244 3245 if (kv_cps == NULL) { 3246 *equal = false; 3247 return 0; 3248 } 3249 3250 if (kv_cps->num_levels != kv_rps->num_levels) { 3251 *equal = false; 3252 return 0; 3253 } 3254 3255 for (i = 0; i < kv_cps->num_levels; i++) { 3256 if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), 3257 &(kv_rps->levels[i]))) { 3258 *equal = false; 3259 return 0; 3260 } 3261 } 3262 3263 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 3264 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); 3265 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); 3266 3267 return 0; 3268} 3269 3270static int kv_dpm_read_sensor(void *handle, int idx, 3271 void *value, int *size) 3272{ 3273 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3274 struct kv_power_info *pi = kv_get_pi(adev); 3275 uint32_t sclk; 3276 u32 pl_index = 3277 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 3278 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 3279 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 3280 3281 /* size must be at least 4 bytes for all sensors */ 3282 if (*size < 4) 3283 return -EINVAL; 3284 3285 switch (idx) { 3286 case AMDGPU_PP_SENSOR_GFX_SCLK: 3287 if (pl_index < SMU__NUM_SCLK_DPM_STATE) { 3288 sclk = be32_to_cpu( 3289 pi->graphics_level[pl_index].SclkFrequency); 3290 *((uint32_t *)value) = sclk; 3291 *size = 4; 3292 return 0; 3293 } 3294 return -EINVAL; 3295 case AMDGPU_PP_SENSOR_GPU_TEMP: 3296 *((uint32_t *)value) = kv_dpm_get_temp(adev); 3297 *size = 4; 3298 return 0; 3299 default: 3300 return -EINVAL; 3301 } 3302} 3303 3304const struct amd_ip_funcs kv_dpm_ip_funcs = { 3305 .name = "kv_dpm", 3306 .early_init = kv_dpm_early_init, 3307 .late_init = kv_dpm_late_init, 3308 .sw_init = kv_dpm_sw_init, 3309 .sw_fini = kv_dpm_sw_fini, 3310 .hw_init = kv_dpm_hw_init, 3311 .hw_fini = kv_dpm_hw_fini, 3312 .suspend = kv_dpm_suspend, 3313 .resume = kv_dpm_resume, 3314 .is_idle = kv_dpm_is_idle, 3315 .wait_for_idle = kv_dpm_wait_for_idle, 3316 .soft_reset = kv_dpm_soft_reset, 3317 .set_clockgating_state = kv_dpm_set_clockgating_state, 3318 .set_powergating_state = kv_dpm_set_powergating_state, 3319}; 3320 3321const struct amd_pm_funcs kv_dpm_funcs = { 3322 .get_temperature = &kv_dpm_get_temp, 3323 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3324 .set_power_state = &kv_dpm_set_power_state, 3325 .post_set_power_state = &kv_dpm_post_set_power_state, 3326 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3327 .get_sclk = &kv_dpm_get_sclk, 3328 .get_mclk = &kv_dpm_get_mclk, 3329 .print_power_state = &kv_dpm_print_power_state, 3330 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3331 .force_performance_level = &kv_dpm_force_performance_level, 3332 .powergate_uvd = &kv_dpm_powergate_uvd, 3333 .enable_bapm = &kv_dpm_enable_bapm, 3334 .get_vce_clock_state = amdgpu_get_vce_clock_state, 3335 .check_state_equal = kv_check_state_equal, 3336 .read_sensor = &kv_dpm_read_sensor, 3337}; 3338 3339static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3340 .set = kv_dpm_set_interrupt_state, 3341 .process = kv_dpm_process_interrupt, 3342}; 3343 3344static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3345{ 3346 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3347 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3348}