Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.6 3349 lines 94 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include "drmP.h" 25#include "amdgpu.h" 26#include "amdgpu_pm.h" 27#include "cikd.h" 28#include "atom.h" 29#include "amdgpu_atombios.h" 30#include "amdgpu_dpm.h" 31#include "kv_dpm.h" 32#include "gfx_v7_0.h" 33#include <linux/seq_file.h> 34 35#include "smu/smu_7_0_0_d.h" 36#include "smu/smu_7_0_0_sh_mask.h" 37 38#include "gca/gfx_7_2_d.h" 39#include "gca/gfx_7_2_sh_mask.h" 40 41#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42#define KV_MINIMUM_ENGINE_CLOCK 800 43#define SMC_RAM_END 0x40000 44 45static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev); 46static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 47static int kv_enable_nb_dpm(struct amdgpu_device *adev, 48 bool enable); 49static void kv_init_graphics_levels(struct amdgpu_device *adev); 50static int kv_calculate_ds_divider(struct amdgpu_device *adev); 51static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 52static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 53static void kv_enable_new_levels(struct amdgpu_device *adev); 54static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 55 struct amdgpu_ps *new_rps); 56static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 57static int kv_set_enabled_levels(struct amdgpu_device *adev); 58static int kv_force_dpm_highest(struct amdgpu_device *adev); 59static int kv_force_dpm_lowest(struct amdgpu_device *adev); 60static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 61 struct amdgpu_ps *new_rps, 62 struct amdgpu_ps *old_rps); 63static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 64 int min_temp, int max_temp); 65static int kv_init_fps_limits(struct amdgpu_device *adev); 66 67static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); 68static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 70static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 71 72 73static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 74 struct sumo_vid_mapping_table *vid_mapping_table, 75 u32 vid_2bit) 76{ 77 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 78 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 79 u32 i; 80 81 if (vddc_sclk_table && vddc_sclk_table->count) { 82 if (vid_2bit < vddc_sclk_table->count) 83 return vddc_sclk_table->entries[vid_2bit].v; 84 else 85 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 86 } else { 87 for (i = 0; i < vid_mapping_table->num_entries; i++) { 88 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 89 return vid_mapping_table->entries[i].vid_7bit; 90 } 91 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 92 } 93} 94 95static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 96 struct sumo_vid_mapping_table *vid_mapping_table, 97 u32 vid_7bit) 98{ 99 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 100 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 101 u32 i; 102 103 if (vddc_sclk_table && vddc_sclk_table->count) { 104 for (i = 0; i < vddc_sclk_table->count; i++) { 105 if (vddc_sclk_table->entries[i].v == vid_7bit) 106 return i; 107 } 108 return vddc_sclk_table->count - 1; 109 } else { 110 for (i = 0; i < vid_mapping_table->num_entries; i++) { 111 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 112 return vid_mapping_table->entries[i].vid_2bit; 113 } 114 115 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 116 } 117} 118 119static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 120{ 121/* This bit selects who handles display phy powergating. 122 * Clear the bit to let atom handle it. 123 * Set it to let the driver handle it. 124 * For now we just let atom handle it. 125 */ 126#if 0 127 u32 v = RREG32(mmDOUT_SCRATCH3); 128 129 if (enable) 130 v |= 0x4; 131 else 132 v &= 0xFFFFFFFB; 133 134 WREG32(mmDOUT_SCRATCH3, v); 135#endif 136} 137 138static u32 sumo_get_sleep_divider_from_id(u32 id) 139{ 140 return 1 << id; 141} 142 143static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 144 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 145 ATOM_AVAILABLE_SCLK_LIST *table) 146{ 147 u32 i; 148 u32 n = 0; 149 u32 prev_sclk = 0; 150 151 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 152 if (table[i].ulSupportedSCLK > prev_sclk) { 153 sclk_voltage_mapping_table->entries[n].sclk_frequency = 154 table[i].ulSupportedSCLK; 155 sclk_voltage_mapping_table->entries[n].vid_2bit = 156 table[i].usVoltageIndex; 157 prev_sclk = table[i].ulSupportedSCLK; 158 n++; 159 } 160 } 161 162 sclk_voltage_mapping_table->num_max_dpm_entries = n; 163} 164 165static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 166 struct sumo_vid_mapping_table *vid_mapping_table, 167 ATOM_AVAILABLE_SCLK_LIST *table) 168{ 169 u32 i, j; 170 171 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 172 if (table[i].ulSupportedSCLK != 0) { 173 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 174 table[i].usVoltageID; 175 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 176 table[i].usVoltageIndex; 177 } 178 } 179 180 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 181 if (vid_mapping_table->entries[i].vid_7bit == 0) { 182 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 183 if (vid_mapping_table->entries[j].vid_7bit != 0) { 184 vid_mapping_table->entries[i] = 185 vid_mapping_table->entries[j]; 186 vid_mapping_table->entries[j].vid_7bit = 0; 187 break; 188 } 189 } 190 191 if (j == SUMO_MAX_NUMBER_VOLTAGES) 192 break; 193 } 194 } 195 196 vid_mapping_table->num_entries = i; 197} 198 199static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 200{ 201 { 0, 4, 1 }, 202 { 1, 4, 1 }, 203 { 2, 5, 1 }, 204 { 3, 4, 2 }, 205 { 4, 1, 1 }, 206 { 5, 5, 2 }, 207 { 6, 6, 1 }, 208 { 7, 9, 2 }, 209 { 0xffffffff } 210}; 211 212static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 213{ 214 { 0, 4, 1 }, 215 { 0xffffffff } 216}; 217 218static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 219{ 220 { 0, 4, 1 }, 221 { 0xffffffff } 222}; 223 224static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 225{ 226 { 0, 4, 1 }, 227 { 0xffffffff } 228}; 229 230static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 231{ 232 { 0, 4, 1 }, 233 { 0xffffffff } 234}; 235 236static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 237{ 238 { 0, 4, 1 }, 239 { 1, 4, 1 }, 240 { 2, 5, 1 }, 241 { 3, 4, 1 }, 242 { 4, 1, 1 }, 243 { 5, 5, 1 }, 244 { 6, 6, 1 }, 245 { 7, 9, 1 }, 246 { 8, 4, 1 }, 247 { 9, 2, 1 }, 248 { 10, 3, 1 }, 249 { 11, 6, 1 }, 250 { 12, 8, 2 }, 251 { 13, 1, 1 }, 252 { 14, 2, 1 }, 253 { 15, 3, 1 }, 254 { 16, 1, 1 }, 255 { 17, 4, 1 }, 256 { 18, 3, 1 }, 257 { 19, 1, 1 }, 258 { 20, 8, 1 }, 259 { 21, 5, 1 }, 260 { 22, 1, 1 }, 261 { 23, 1, 1 }, 262 { 24, 4, 1 }, 263 { 27, 6, 1 }, 264 { 28, 1, 1 }, 265 { 0xffffffff } 266}; 267 268static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 269{ 270 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 271}; 272 273static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 274{ 275 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 276}; 277 278static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 279{ 280 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 281}; 282 283static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 284{ 285 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 286}; 287 288static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 289{ 290 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 291}; 292 293static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 294{ 295 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 296}; 297 298static const struct kv_pt_config_reg didt_config_kv[] = 299{ 300 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 301 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 302 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 308 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 309 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 310 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 311 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 312 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 313 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 314 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 315 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 316 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 317 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 318 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 319 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 320 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 326 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 327 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 328 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 329 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 330 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 331 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 332 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 333 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 334 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 335 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 336 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 337 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 338 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 344 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 345 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 346 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 347 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 348 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 349 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 350 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 351 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 352 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 353 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 354 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 355 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 356 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 358 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 359 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 360 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 361 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 362 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 363 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 364 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 365 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 366 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 367 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 368 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 369 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 370 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 371 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 372 { 0xFFFFFFFF } 373}; 374 375static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 376{ 377 struct kv_ps *ps = rps->ps_priv; 378 379 return ps; 380} 381 382static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 383{ 384 struct kv_power_info *pi = adev->pm.dpm.priv; 385 386 return pi; 387} 388 389#if 0 390static void kv_program_local_cac_table(struct amdgpu_device *adev, 391 const struct kv_lcac_config_values *local_cac_table, 392 const struct kv_lcac_config_reg *local_cac_reg) 393{ 394 u32 i, count, data; 395 const struct kv_lcac_config_values *values = local_cac_table; 396 397 while (values->block_id != 0xffffffff) { 398 count = values->signal_id; 399 for (i = 0; i < count; i++) { 400 data = ((values->block_id << local_cac_reg->block_shift) & 401 local_cac_reg->block_mask); 402 data |= ((i << local_cac_reg->signal_shift) & 403 local_cac_reg->signal_mask); 404 data |= ((values->t << local_cac_reg->t_shift) & 405 local_cac_reg->t_mask); 406 data |= ((1 << local_cac_reg->enable_shift) & 407 local_cac_reg->enable_mask); 408 WREG32_SMC(local_cac_reg->cntl, data); 409 } 410 values++; 411 } 412} 413#endif 414 415static int kv_program_pt_config_registers(struct amdgpu_device *adev, 416 const struct kv_pt_config_reg *cac_config_regs) 417{ 418 const struct kv_pt_config_reg *config_regs = cac_config_regs; 419 u32 data; 420 u32 cache = 0; 421 422 if (config_regs == NULL) 423 return -EINVAL; 424 425 while (config_regs->offset != 0xFFFFFFFF) { 426 if (config_regs->type == KV_CONFIGREG_CACHE) { 427 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 428 } else { 429 switch (config_regs->type) { 430 case KV_CONFIGREG_SMC_IND: 431 data = RREG32_SMC(config_regs->offset); 432 break; 433 case KV_CONFIGREG_DIDT_IND: 434 data = RREG32_DIDT(config_regs->offset); 435 break; 436 default: 437 data = RREG32(config_regs->offset); 438 break; 439 } 440 441 data &= ~config_regs->mask; 442 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 443 data |= cache; 444 cache = 0; 445 446 switch (config_regs->type) { 447 case KV_CONFIGREG_SMC_IND: 448 WREG32_SMC(config_regs->offset, data); 449 break; 450 case KV_CONFIGREG_DIDT_IND: 451 WREG32_DIDT(config_regs->offset, data); 452 break; 453 default: 454 WREG32(config_regs->offset, data); 455 break; 456 } 457 } 458 config_regs++; 459 } 460 461 return 0; 462} 463 464static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 465{ 466 struct kv_power_info *pi = kv_get_pi(adev); 467 u32 data; 468 469 if (pi->caps_sq_ramping) { 470 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 471 if (enable) 472 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 473 else 474 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 475 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 476 } 477 478 if (pi->caps_db_ramping) { 479 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 480 if (enable) 481 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 482 else 483 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 484 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 485 } 486 487 if (pi->caps_td_ramping) { 488 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 489 if (enable) 490 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 491 else 492 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 493 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 494 } 495 496 if (pi->caps_tcp_ramping) { 497 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 498 if (enable) 499 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 500 else 501 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 502 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 503 } 504} 505 506static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 507{ 508 struct kv_power_info *pi = kv_get_pi(adev); 509 int ret; 510 511 if (pi->caps_sq_ramping || 512 pi->caps_db_ramping || 513 pi->caps_td_ramping || 514 pi->caps_tcp_ramping) { 515 gfx_v7_0_enter_rlc_safe_mode(adev); 516 517 if (enable) { 518 ret = kv_program_pt_config_registers(adev, didt_config_kv); 519 if (ret) { 520 gfx_v7_0_exit_rlc_safe_mode(adev); 521 return ret; 522 } 523 } 524 525 kv_do_enable_didt(adev, enable); 526 527 gfx_v7_0_exit_rlc_safe_mode(adev); 528 } 529 530 return 0; 531} 532 533#if 0 534static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 535{ 536 struct kv_power_info *pi = kv_get_pi(adev); 537 538 if (pi->caps_cac) { 539 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 540 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 541 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 542 543 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 544 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 545 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 546 547 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 548 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 549 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 550 551 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 552 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 553 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 554 555 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 556 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 557 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 558 559 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 560 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 561 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 562 } 563} 564#endif 565 566static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 567{ 568 struct kv_power_info *pi = kv_get_pi(adev); 569 int ret = 0; 570 571 if (pi->caps_cac) { 572 if (enable) { 573 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 574 if (ret) 575 pi->cac_enabled = false; 576 else 577 pi->cac_enabled = true; 578 } else if (pi->cac_enabled) { 579 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 580 pi->cac_enabled = false; 581 } 582 } 583 584 return ret; 585} 586 587static int kv_process_firmware_header(struct amdgpu_device *adev) 588{ 589 struct kv_power_info *pi = kv_get_pi(adev); 590 u32 tmp; 591 int ret; 592 593 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 594 offsetof(SMU7_Firmware_Header, DpmTable), 595 &tmp, pi->sram_end); 596 597 if (ret == 0) 598 pi->dpm_table_start = tmp; 599 600 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 601 offsetof(SMU7_Firmware_Header, SoftRegisters), 602 &tmp, pi->sram_end); 603 604 if (ret == 0) 605 pi->soft_regs_start = tmp; 606 607 return ret; 608} 609 610static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 611{ 612 struct kv_power_info *pi = kv_get_pi(adev); 613 int ret; 614 615 pi->graphics_voltage_change_enable = 1; 616 617 ret = amdgpu_kv_copy_bytes_to_smc(adev, 618 pi->dpm_table_start + 619 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 620 &pi->graphics_voltage_change_enable, 621 sizeof(u8), pi->sram_end); 622 623 return ret; 624} 625 626static int kv_set_dpm_interval(struct amdgpu_device *adev) 627{ 628 struct kv_power_info *pi = kv_get_pi(adev); 629 int ret; 630 631 pi->graphics_interval = 1; 632 633 ret = amdgpu_kv_copy_bytes_to_smc(adev, 634 pi->dpm_table_start + 635 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 636 &pi->graphics_interval, 637 sizeof(u8), pi->sram_end); 638 639 return ret; 640} 641 642static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 643{ 644 struct kv_power_info *pi = kv_get_pi(adev); 645 int ret; 646 647 ret = amdgpu_kv_copy_bytes_to_smc(adev, 648 pi->dpm_table_start + 649 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 650 &pi->graphics_boot_level, 651 sizeof(u8), pi->sram_end); 652 653 return ret; 654} 655 656static void kv_program_vc(struct amdgpu_device *adev) 657{ 658 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 659} 660 661static void kv_clear_vc(struct amdgpu_device *adev) 662{ 663 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 664} 665 666static int kv_set_divider_value(struct amdgpu_device *adev, 667 u32 index, u32 sclk) 668{ 669 struct kv_power_info *pi = kv_get_pi(adev); 670 struct atom_clock_dividers dividers; 671 int ret; 672 673 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 674 sclk, false, &dividers); 675 if (ret) 676 return ret; 677 678 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 679 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 680 681 return 0; 682} 683 684static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 685 u16 voltage) 686{ 687 return 6200 - (voltage * 25); 688} 689 690static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 691 u32 vid_2bit) 692{ 693 struct kv_power_info *pi = kv_get_pi(adev); 694 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 695 &pi->sys_info.vid_mapping_table, 696 vid_2bit); 697 698 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 699} 700 701 702static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 703{ 704 struct kv_power_info *pi = kv_get_pi(adev); 705 706 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 707 pi->graphics_level[index].MinVddNb = 708 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 709 710 return 0; 711} 712 713static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 714{ 715 struct kv_power_info *pi = kv_get_pi(adev); 716 717 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 718 719 return 0; 720} 721 722static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 723 u32 index, bool enable) 724{ 725 struct kv_power_info *pi = kv_get_pi(adev); 726 727 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 728} 729 730static void kv_start_dpm(struct amdgpu_device *adev) 731{ 732 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 733 734 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 735 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 736 737 amdgpu_kv_smc_dpm_enable(adev, true); 738} 739 740static void kv_stop_dpm(struct amdgpu_device *adev) 741{ 742 amdgpu_kv_smc_dpm_enable(adev, false); 743} 744 745static void kv_start_am(struct amdgpu_device *adev) 746{ 747 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 748 749 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 750 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 751 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 752 753 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 754} 755 756static void kv_reset_am(struct amdgpu_device *adev) 757{ 758 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 759 760 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 761 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 762 763 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 764} 765 766static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 767{ 768 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 769 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 770} 771 772static int kv_force_lowest_valid(struct amdgpu_device *adev) 773{ 774 return kv_force_dpm_lowest(adev); 775} 776 777static int kv_unforce_levels(struct amdgpu_device *adev) 778{ 779 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 780 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 781 else 782 return kv_set_enabled_levels(adev); 783} 784 785static int kv_update_sclk_t(struct amdgpu_device *adev) 786{ 787 struct kv_power_info *pi = kv_get_pi(adev); 788 u32 low_sclk_interrupt_t = 0; 789 int ret = 0; 790 791 if (pi->caps_sclk_throttle_low_notification) { 792 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 793 794 ret = amdgpu_kv_copy_bytes_to_smc(adev, 795 pi->dpm_table_start + 796 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 797 (u8 *)&low_sclk_interrupt_t, 798 sizeof(u32), pi->sram_end); 799 } 800 return ret; 801} 802 803static int kv_program_bootup_state(struct amdgpu_device *adev) 804{ 805 struct kv_power_info *pi = kv_get_pi(adev); 806 u32 i; 807 struct amdgpu_clock_voltage_dependency_table *table = 808 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 809 810 if (table && table->count) { 811 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 812 if (table->entries[i].clk == pi->boot_pl.sclk) 813 break; 814 } 815 816 pi->graphics_boot_level = (u8)i; 817 kv_dpm_power_level_enable(adev, i, true); 818 } else { 819 struct sumo_sclk_voltage_mapping_table *table = 820 &pi->sys_info.sclk_voltage_mapping_table; 821 822 if (table->num_max_dpm_entries == 0) 823 return -EINVAL; 824 825 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 826 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 827 break; 828 } 829 830 pi->graphics_boot_level = (u8)i; 831 kv_dpm_power_level_enable(adev, i, true); 832 } 833 return 0; 834} 835 836static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 837{ 838 struct kv_power_info *pi = kv_get_pi(adev); 839 int ret; 840 841 pi->graphics_therm_throttle_enable = 1; 842 843 ret = amdgpu_kv_copy_bytes_to_smc(adev, 844 pi->dpm_table_start + 845 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 846 &pi->graphics_therm_throttle_enable, 847 sizeof(u8), pi->sram_end); 848 849 return ret; 850} 851 852static int kv_upload_dpm_settings(struct amdgpu_device *adev) 853{ 854 struct kv_power_info *pi = kv_get_pi(adev); 855 int ret; 856 857 ret = amdgpu_kv_copy_bytes_to_smc(adev, 858 pi->dpm_table_start + 859 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 860 (u8 *)&pi->graphics_level, 861 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 862 pi->sram_end); 863 864 if (ret) 865 return ret; 866 867 ret = amdgpu_kv_copy_bytes_to_smc(adev, 868 pi->dpm_table_start + 869 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 870 &pi->graphics_dpm_level_count, 871 sizeof(u8), pi->sram_end); 872 873 return ret; 874} 875 876static u32 kv_get_clock_difference(u32 a, u32 b) 877{ 878 return (a >= b) ? a - b : b - a; 879} 880 881static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 882{ 883 struct kv_power_info *pi = kv_get_pi(adev); 884 u32 value; 885 886 if (pi->caps_enable_dfs_bypass) { 887 if (kv_get_clock_difference(clk, 40000) < 200) 888 value = 3; 889 else if (kv_get_clock_difference(clk, 30000) < 200) 890 value = 2; 891 else if (kv_get_clock_difference(clk, 20000) < 200) 892 value = 7; 893 else if (kv_get_clock_difference(clk, 15000) < 200) 894 value = 6; 895 else if (kv_get_clock_difference(clk, 10000) < 200) 896 value = 8; 897 else 898 value = 0; 899 } else { 900 value = 0; 901 } 902 903 return value; 904} 905 906static int kv_populate_uvd_table(struct amdgpu_device *adev) 907{ 908 struct kv_power_info *pi = kv_get_pi(adev); 909 struct amdgpu_uvd_clock_voltage_dependency_table *table = 910 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 911 struct atom_clock_dividers dividers; 912 int ret; 913 u32 i; 914 915 if (table == NULL || table->count == 0) 916 return 0; 917 918 pi->uvd_level_count = 0; 919 for (i = 0; i < table->count; i++) { 920 if (pi->high_voltage_t && 921 (pi->high_voltage_t < table->entries[i].v)) 922 break; 923 924 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 925 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 926 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 927 928 pi->uvd_level[i].VClkBypassCntl = 929 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 930 pi->uvd_level[i].DClkBypassCntl = 931 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 932 933 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 934 table->entries[i].vclk, false, &dividers); 935 if (ret) 936 return ret; 937 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 938 939 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 940 table->entries[i].dclk, false, &dividers); 941 if (ret) 942 return ret; 943 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 944 945 pi->uvd_level_count++; 946 } 947 948 ret = amdgpu_kv_copy_bytes_to_smc(adev, 949 pi->dpm_table_start + 950 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 951 (u8 *)&pi->uvd_level_count, 952 sizeof(u8), pi->sram_end); 953 if (ret) 954 return ret; 955 956 pi->uvd_interval = 1; 957 958 ret = amdgpu_kv_copy_bytes_to_smc(adev, 959 pi->dpm_table_start + 960 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 961 &pi->uvd_interval, 962 sizeof(u8), pi->sram_end); 963 if (ret) 964 return ret; 965 966 ret = amdgpu_kv_copy_bytes_to_smc(adev, 967 pi->dpm_table_start + 968 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 969 (u8 *)&pi->uvd_level, 970 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 971 pi->sram_end); 972 973 return ret; 974 975} 976 977static int kv_populate_vce_table(struct amdgpu_device *adev) 978{ 979 struct kv_power_info *pi = kv_get_pi(adev); 980 int ret; 981 u32 i; 982 struct amdgpu_vce_clock_voltage_dependency_table *table = 983 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 984 struct atom_clock_dividers dividers; 985 986 if (table == NULL || table->count == 0) 987 return 0; 988 989 pi->vce_level_count = 0; 990 for (i = 0; i < table->count; i++) { 991 if (pi->high_voltage_t && 992 pi->high_voltage_t < table->entries[i].v) 993 break; 994 995 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 996 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 997 998 pi->vce_level[i].ClkBypassCntl = 999 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 1000 1001 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1002 table->entries[i].evclk, false, &dividers); 1003 if (ret) 1004 return ret; 1005 pi->vce_level[i].Divider = (u8)dividers.post_div; 1006 1007 pi->vce_level_count++; 1008 } 1009 1010 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1011 pi->dpm_table_start + 1012 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 1013 (u8 *)&pi->vce_level_count, 1014 sizeof(u8), 1015 pi->sram_end); 1016 if (ret) 1017 return ret; 1018 1019 pi->vce_interval = 1; 1020 1021 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1022 pi->dpm_table_start + 1023 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1024 (u8 *)&pi->vce_interval, 1025 sizeof(u8), 1026 pi->sram_end); 1027 if (ret) 1028 return ret; 1029 1030 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1031 pi->dpm_table_start + 1032 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1033 (u8 *)&pi->vce_level, 1034 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1035 pi->sram_end); 1036 1037 return ret; 1038} 1039 1040static int kv_populate_samu_table(struct amdgpu_device *adev) 1041{ 1042 struct kv_power_info *pi = kv_get_pi(adev); 1043 struct amdgpu_clock_voltage_dependency_table *table = 1044 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1045 struct atom_clock_dividers dividers; 1046 int ret; 1047 u32 i; 1048 1049 if (table == NULL || table->count == 0) 1050 return 0; 1051 1052 pi->samu_level_count = 0; 1053 for (i = 0; i < table->count; i++) { 1054 if (pi->high_voltage_t && 1055 pi->high_voltage_t < table->entries[i].v) 1056 break; 1057 1058 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1059 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1060 1061 pi->samu_level[i].ClkBypassCntl = 1062 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1063 1064 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1065 table->entries[i].clk, false, &dividers); 1066 if (ret) 1067 return ret; 1068 pi->samu_level[i].Divider = (u8)dividers.post_div; 1069 1070 pi->samu_level_count++; 1071 } 1072 1073 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1074 pi->dpm_table_start + 1075 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1076 (u8 *)&pi->samu_level_count, 1077 sizeof(u8), 1078 pi->sram_end); 1079 if (ret) 1080 return ret; 1081 1082 pi->samu_interval = 1; 1083 1084 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1085 pi->dpm_table_start + 1086 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1087 (u8 *)&pi->samu_interval, 1088 sizeof(u8), 1089 pi->sram_end); 1090 if (ret) 1091 return ret; 1092 1093 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1094 pi->dpm_table_start + 1095 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1096 (u8 *)&pi->samu_level, 1097 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1098 pi->sram_end); 1099 if (ret) 1100 return ret; 1101 1102 return ret; 1103} 1104 1105 1106static int kv_populate_acp_table(struct amdgpu_device *adev) 1107{ 1108 struct kv_power_info *pi = kv_get_pi(adev); 1109 struct amdgpu_clock_voltage_dependency_table *table = 1110 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1111 struct atom_clock_dividers dividers; 1112 int ret; 1113 u32 i; 1114 1115 if (table == NULL || table->count == 0) 1116 return 0; 1117 1118 pi->acp_level_count = 0; 1119 for (i = 0; i < table->count; i++) { 1120 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1121 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1122 1123 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1124 table->entries[i].clk, false, &dividers); 1125 if (ret) 1126 return ret; 1127 pi->acp_level[i].Divider = (u8)dividers.post_div; 1128 1129 pi->acp_level_count++; 1130 } 1131 1132 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1133 pi->dpm_table_start + 1134 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1135 (u8 *)&pi->acp_level_count, 1136 sizeof(u8), 1137 pi->sram_end); 1138 if (ret) 1139 return ret; 1140 1141 pi->acp_interval = 1; 1142 1143 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1144 pi->dpm_table_start + 1145 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1146 (u8 *)&pi->acp_interval, 1147 sizeof(u8), 1148 pi->sram_end); 1149 if (ret) 1150 return ret; 1151 1152 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1153 pi->dpm_table_start + 1154 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1155 (u8 *)&pi->acp_level, 1156 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1157 pi->sram_end); 1158 if (ret) 1159 return ret; 1160 1161 return ret; 1162} 1163 1164static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1165{ 1166 struct kv_power_info *pi = kv_get_pi(adev); 1167 u32 i; 1168 struct amdgpu_clock_voltage_dependency_table *table = 1169 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1170 1171 if (table && table->count) { 1172 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1173 if (pi->caps_enable_dfs_bypass) { 1174 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1175 pi->graphics_level[i].ClkBypassCntl = 3; 1176 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1177 pi->graphics_level[i].ClkBypassCntl = 2; 1178 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1179 pi->graphics_level[i].ClkBypassCntl = 7; 1180 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1181 pi->graphics_level[i].ClkBypassCntl = 6; 1182 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1183 pi->graphics_level[i].ClkBypassCntl = 8; 1184 else 1185 pi->graphics_level[i].ClkBypassCntl = 0; 1186 } else { 1187 pi->graphics_level[i].ClkBypassCntl = 0; 1188 } 1189 } 1190 } else { 1191 struct sumo_sclk_voltage_mapping_table *table = 1192 &pi->sys_info.sclk_voltage_mapping_table; 1193 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1194 if (pi->caps_enable_dfs_bypass) { 1195 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1196 pi->graphics_level[i].ClkBypassCntl = 3; 1197 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1198 pi->graphics_level[i].ClkBypassCntl = 2; 1199 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1200 pi->graphics_level[i].ClkBypassCntl = 7; 1201 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1202 pi->graphics_level[i].ClkBypassCntl = 6; 1203 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1204 pi->graphics_level[i].ClkBypassCntl = 8; 1205 else 1206 pi->graphics_level[i].ClkBypassCntl = 0; 1207 } else { 1208 pi->graphics_level[i].ClkBypassCntl = 0; 1209 } 1210 } 1211 } 1212} 1213 1214static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1215{ 1216 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1217 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1218} 1219 1220static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1221{ 1222 struct kv_power_info *pi = kv_get_pi(adev); 1223 1224 pi->acp_boot_level = 0xff; 1225} 1226 1227static void kv_update_current_ps(struct amdgpu_device *adev, 1228 struct amdgpu_ps *rps) 1229{ 1230 struct kv_ps *new_ps = kv_get_ps(rps); 1231 struct kv_power_info *pi = kv_get_pi(adev); 1232 1233 pi->current_rps = *rps; 1234 pi->current_ps = *new_ps; 1235 pi->current_rps.ps_priv = &pi->current_ps; 1236} 1237 1238static void kv_update_requested_ps(struct amdgpu_device *adev, 1239 struct amdgpu_ps *rps) 1240{ 1241 struct kv_ps *new_ps = kv_get_ps(rps); 1242 struct kv_power_info *pi = kv_get_pi(adev); 1243 1244 pi->requested_rps = *rps; 1245 pi->requested_ps = *new_ps; 1246 pi->requested_rps.ps_priv = &pi->requested_ps; 1247} 1248 1249static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) 1250{ 1251 struct kv_power_info *pi = kv_get_pi(adev); 1252 int ret; 1253 1254 if (pi->bapm_enable) { 1255 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1256 if (ret) 1257 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1258 } 1259} 1260 1261static int kv_dpm_enable(struct amdgpu_device *adev) 1262{ 1263 struct kv_power_info *pi = kv_get_pi(adev); 1264 int ret; 1265 1266 ret = kv_process_firmware_header(adev); 1267 if (ret) { 1268 DRM_ERROR("kv_process_firmware_header failed\n"); 1269 return ret; 1270 } 1271 kv_init_fps_limits(adev); 1272 kv_init_graphics_levels(adev); 1273 ret = kv_program_bootup_state(adev); 1274 if (ret) { 1275 DRM_ERROR("kv_program_bootup_state failed\n"); 1276 return ret; 1277 } 1278 kv_calculate_dfs_bypass_settings(adev); 1279 ret = kv_upload_dpm_settings(adev); 1280 if (ret) { 1281 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1282 return ret; 1283 } 1284 ret = kv_populate_uvd_table(adev); 1285 if (ret) { 1286 DRM_ERROR("kv_populate_uvd_table failed\n"); 1287 return ret; 1288 } 1289 ret = kv_populate_vce_table(adev); 1290 if (ret) { 1291 DRM_ERROR("kv_populate_vce_table failed\n"); 1292 return ret; 1293 } 1294 ret = kv_populate_samu_table(adev); 1295 if (ret) { 1296 DRM_ERROR("kv_populate_samu_table failed\n"); 1297 return ret; 1298 } 1299 ret = kv_populate_acp_table(adev); 1300 if (ret) { 1301 DRM_ERROR("kv_populate_acp_table failed\n"); 1302 return ret; 1303 } 1304 kv_program_vc(adev); 1305#if 0 1306 kv_initialize_hardware_cac_manager(adev); 1307#endif 1308 kv_start_am(adev); 1309 if (pi->enable_auto_thermal_throttling) { 1310 ret = kv_enable_auto_thermal_throttling(adev); 1311 if (ret) { 1312 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1313 return ret; 1314 } 1315 } 1316 ret = kv_enable_dpm_voltage_scaling(adev); 1317 if (ret) { 1318 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1319 return ret; 1320 } 1321 ret = kv_set_dpm_interval(adev); 1322 if (ret) { 1323 DRM_ERROR("kv_set_dpm_interval failed\n"); 1324 return ret; 1325 } 1326 ret = kv_set_dpm_boot_state(adev); 1327 if (ret) { 1328 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1329 return ret; 1330 } 1331 ret = kv_enable_ulv(adev, true); 1332 if (ret) { 1333 DRM_ERROR("kv_enable_ulv failed\n"); 1334 return ret; 1335 } 1336 kv_start_dpm(adev); 1337 ret = kv_enable_didt(adev, true); 1338 if (ret) { 1339 DRM_ERROR("kv_enable_didt failed\n"); 1340 return ret; 1341 } 1342 ret = kv_enable_smc_cac(adev, true); 1343 if (ret) { 1344 DRM_ERROR("kv_enable_smc_cac failed\n"); 1345 return ret; 1346 } 1347 1348 kv_reset_acp_boot_level(adev); 1349 1350 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1351 if (ret) { 1352 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1353 return ret; 1354 } 1355 1356 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1357 1358 if (adev->irq.installed && 1359 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1360 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1361 if (ret) { 1362 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1363 return ret; 1364 } 1365 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1366 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1367 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1368 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1369 } 1370 1371 return ret; 1372} 1373 1374static void kv_dpm_disable(struct amdgpu_device *adev) 1375{ 1376 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1377 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1378 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1379 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1380 1381 amdgpu_kv_smc_bapm_enable(adev, false); 1382 1383 if (adev->asic_type == CHIP_MULLINS) 1384 kv_enable_nb_dpm(adev, false); 1385 1386 /* powerup blocks */ 1387 kv_dpm_powergate_acp(adev, false); 1388 kv_dpm_powergate_samu(adev, false); 1389 kv_dpm_powergate_vce(adev, false); 1390 kv_dpm_powergate_uvd(adev, false); 1391 1392 kv_enable_smc_cac(adev, false); 1393 kv_enable_didt(adev, false); 1394 kv_clear_vc(adev); 1395 kv_stop_dpm(adev); 1396 kv_enable_ulv(adev, false); 1397 kv_reset_am(adev); 1398 1399 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1400} 1401 1402#if 0 1403static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1404 u16 reg_offset, u32 value) 1405{ 1406 struct kv_power_info *pi = kv_get_pi(adev); 1407 1408 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1409 (u8 *)&value, sizeof(u16), pi->sram_end); 1410} 1411 1412static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1413 u16 reg_offset, u32 *value) 1414{ 1415 struct kv_power_info *pi = kv_get_pi(adev); 1416 1417 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1418 value, pi->sram_end); 1419} 1420#endif 1421 1422static void kv_init_sclk_t(struct amdgpu_device *adev) 1423{ 1424 struct kv_power_info *pi = kv_get_pi(adev); 1425 1426 pi->low_sclk_interrupt_t = 0; 1427} 1428 1429static int kv_init_fps_limits(struct amdgpu_device *adev) 1430{ 1431 struct kv_power_info *pi = kv_get_pi(adev); 1432 int ret = 0; 1433 1434 if (pi->caps_fps) { 1435 u16 tmp; 1436 1437 tmp = 45; 1438 pi->fps_high_t = cpu_to_be16(tmp); 1439 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1440 pi->dpm_table_start + 1441 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1442 (u8 *)&pi->fps_high_t, 1443 sizeof(u16), pi->sram_end); 1444 1445 tmp = 30; 1446 pi->fps_low_t = cpu_to_be16(tmp); 1447 1448 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1449 pi->dpm_table_start + 1450 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1451 (u8 *)&pi->fps_low_t, 1452 sizeof(u16), pi->sram_end); 1453 1454 } 1455 return ret; 1456} 1457 1458static void kv_init_powergate_state(struct amdgpu_device *adev) 1459{ 1460 struct kv_power_info *pi = kv_get_pi(adev); 1461 1462 pi->uvd_power_gated = false; 1463 pi->vce_power_gated = false; 1464 pi->samu_power_gated = false; 1465 pi->acp_power_gated = false; 1466 1467} 1468 1469static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1470{ 1471 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1472 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1473} 1474 1475static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1476{ 1477 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1478 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1479} 1480 1481static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1482{ 1483 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1484 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1485} 1486 1487static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1488{ 1489 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1490 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1491} 1492 1493static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1494{ 1495 struct kv_power_info *pi = kv_get_pi(adev); 1496 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1497 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1498 int ret; 1499 u32 mask; 1500 1501 if (!gate) { 1502 if (table->count) 1503 pi->uvd_boot_level = table->count - 1; 1504 else 1505 pi->uvd_boot_level = 0; 1506 1507 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1508 mask = 1 << pi->uvd_boot_level; 1509 } else { 1510 mask = 0x1f; 1511 } 1512 1513 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1514 pi->dpm_table_start + 1515 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1516 (uint8_t *)&pi->uvd_boot_level, 1517 sizeof(u8), pi->sram_end); 1518 if (ret) 1519 return ret; 1520 1521 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1522 PPSMC_MSG_UVDDPM_SetEnabledMask, 1523 mask); 1524 } 1525 1526 return kv_enable_uvd_dpm(adev, !gate); 1527} 1528 1529static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1530{ 1531 u8 i; 1532 struct amdgpu_vce_clock_voltage_dependency_table *table = 1533 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1534 1535 for (i = 0; i < table->count; i++) { 1536 if (table->entries[i].evclk >= evclk) 1537 break; 1538 } 1539 1540 return i; 1541} 1542 1543static int kv_update_vce_dpm(struct amdgpu_device *adev, 1544 struct amdgpu_ps *amdgpu_new_state, 1545 struct amdgpu_ps *amdgpu_current_state) 1546{ 1547 struct kv_power_info *pi = kv_get_pi(adev); 1548 struct amdgpu_vce_clock_voltage_dependency_table *table = 1549 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1550 int ret; 1551 1552 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1553 kv_dpm_powergate_vce(adev, false); 1554 /* turn the clocks on when encoding */ 1555 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1556 AMD_CG_STATE_UNGATE); 1557 if (ret) 1558 return ret; 1559 if (pi->caps_stable_p_state) 1560 pi->vce_boot_level = table->count - 1; 1561 else 1562 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1563 1564 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1565 pi->dpm_table_start + 1566 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1567 (u8 *)&pi->vce_boot_level, 1568 sizeof(u8), 1569 pi->sram_end); 1570 if (ret) 1571 return ret; 1572 1573 if (pi->caps_stable_p_state) 1574 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1575 PPSMC_MSG_VCEDPM_SetEnabledMask, 1576 (1 << pi->vce_boot_level)); 1577 1578 kv_enable_vce_dpm(adev, true); 1579 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1580 kv_enable_vce_dpm(adev, false); 1581 /* turn the clocks off when not encoding */ 1582 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1583 AMD_CG_STATE_GATE); 1584 if (ret) 1585 return ret; 1586 kv_dpm_powergate_vce(adev, true); 1587 } 1588 1589 return 0; 1590} 1591 1592static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1593{ 1594 struct kv_power_info *pi = kv_get_pi(adev); 1595 struct amdgpu_clock_voltage_dependency_table *table = 1596 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1597 int ret; 1598 1599 if (!gate) { 1600 if (pi->caps_stable_p_state) 1601 pi->samu_boot_level = table->count - 1; 1602 else 1603 pi->samu_boot_level = 0; 1604 1605 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1606 pi->dpm_table_start + 1607 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1608 (u8 *)&pi->samu_boot_level, 1609 sizeof(u8), 1610 pi->sram_end); 1611 if (ret) 1612 return ret; 1613 1614 if (pi->caps_stable_p_state) 1615 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1616 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1617 (1 << pi->samu_boot_level)); 1618 } 1619 1620 return kv_enable_samu_dpm(adev, !gate); 1621} 1622 1623static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1624{ 1625 u8 i; 1626 struct amdgpu_clock_voltage_dependency_table *table = 1627 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1628 1629 for (i = 0; i < table->count; i++) { 1630 if (table->entries[i].clk >= 0) /* XXX */ 1631 break; 1632 } 1633 1634 if (i >= table->count) 1635 i = table->count - 1; 1636 1637 return i; 1638} 1639 1640static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1641{ 1642 struct kv_power_info *pi = kv_get_pi(adev); 1643 u8 acp_boot_level; 1644 1645 if (!pi->caps_stable_p_state) { 1646 acp_boot_level = kv_get_acp_boot_level(adev); 1647 if (acp_boot_level != pi->acp_boot_level) { 1648 pi->acp_boot_level = acp_boot_level; 1649 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1650 PPSMC_MSG_ACPDPM_SetEnabledMask, 1651 (1 << pi->acp_boot_level)); 1652 } 1653 } 1654} 1655 1656static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1657{ 1658 struct kv_power_info *pi = kv_get_pi(adev); 1659 struct amdgpu_clock_voltage_dependency_table *table = 1660 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1661 int ret; 1662 1663 if (!gate) { 1664 if (pi->caps_stable_p_state) 1665 pi->acp_boot_level = table->count - 1; 1666 else 1667 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1668 1669 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1670 pi->dpm_table_start + 1671 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1672 (u8 *)&pi->acp_boot_level, 1673 sizeof(u8), 1674 pi->sram_end); 1675 if (ret) 1676 return ret; 1677 1678 if (pi->caps_stable_p_state) 1679 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1680 PPSMC_MSG_ACPDPM_SetEnabledMask, 1681 (1 << pi->acp_boot_level)); 1682 } 1683 1684 return kv_enable_acp_dpm(adev, !gate); 1685} 1686 1687static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) 1688{ 1689 struct kv_power_info *pi = kv_get_pi(adev); 1690 int ret; 1691 1692 if (pi->uvd_power_gated == gate) 1693 return; 1694 1695 pi->uvd_power_gated = gate; 1696 1697 if (gate) { 1698 if (pi->caps_uvd_pg) { 1699 /* disable clockgating so we can properly shut down the block */ 1700 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1701 AMD_CG_STATE_UNGATE); 1702 /* shutdown the UVD block */ 1703 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1704 AMD_PG_STATE_GATE); 1705 /* XXX: check for errors */ 1706 } 1707 kv_update_uvd_dpm(adev, gate); 1708 if (pi->caps_uvd_pg) 1709 /* power off the UVD block */ 1710 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1711 } else { 1712 if (pi->caps_uvd_pg) { 1713 /* power on the UVD block */ 1714 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1715 /* re-init the UVD block */ 1716 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1717 AMD_PG_STATE_UNGATE); 1718 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ 1719 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1720 AMD_CG_STATE_GATE); 1721 /* XXX: check for errors */ 1722 } 1723 kv_update_uvd_dpm(adev, gate); 1724 } 1725} 1726 1727static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1728{ 1729 struct kv_power_info *pi = kv_get_pi(adev); 1730 int ret; 1731 1732 if (pi->vce_power_gated == gate) 1733 return; 1734 1735 pi->vce_power_gated = gate; 1736 1737 if (gate) { 1738 if (pi->caps_vce_pg) { 1739 /* shutdown the VCE block */ 1740 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1741 AMD_PG_STATE_GATE); 1742 /* XXX: check for errors */ 1743 /* power off the VCE block */ 1744 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1745 } 1746 } else { 1747 if (pi->caps_vce_pg) { 1748 /* power on the VCE block */ 1749 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1750 /* re-init the VCE block */ 1751 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1752 AMD_PG_STATE_UNGATE); 1753 /* XXX: check for errors */ 1754 } 1755 } 1756} 1757 1758static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1759{ 1760 struct kv_power_info *pi = kv_get_pi(adev); 1761 1762 if (pi->samu_power_gated == gate) 1763 return; 1764 1765 pi->samu_power_gated = gate; 1766 1767 if (gate) { 1768 kv_update_samu_dpm(adev, true); 1769 if (pi->caps_samu_pg) 1770 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1771 } else { 1772 if (pi->caps_samu_pg) 1773 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1774 kv_update_samu_dpm(adev, false); 1775 } 1776} 1777 1778static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1779{ 1780 struct kv_power_info *pi = kv_get_pi(adev); 1781 1782 if (pi->acp_power_gated == gate) 1783 return; 1784 1785 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1786 return; 1787 1788 pi->acp_power_gated = gate; 1789 1790 if (gate) { 1791 kv_update_acp_dpm(adev, true); 1792 if (pi->caps_acp_pg) 1793 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1794 } else { 1795 if (pi->caps_acp_pg) 1796 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1797 kv_update_acp_dpm(adev, false); 1798 } 1799} 1800 1801static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1802 struct amdgpu_ps *new_rps) 1803{ 1804 struct kv_ps *new_ps = kv_get_ps(new_rps); 1805 struct kv_power_info *pi = kv_get_pi(adev); 1806 u32 i; 1807 struct amdgpu_clock_voltage_dependency_table *table = 1808 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1809 1810 if (table && table->count) { 1811 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1812 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1813 (i == (pi->graphics_dpm_level_count - 1))) { 1814 pi->lowest_valid = i; 1815 break; 1816 } 1817 } 1818 1819 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1820 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1821 break; 1822 } 1823 pi->highest_valid = i; 1824 1825 if (pi->lowest_valid > pi->highest_valid) { 1826 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1827 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1828 pi->highest_valid = pi->lowest_valid; 1829 else 1830 pi->lowest_valid = pi->highest_valid; 1831 } 1832 } else { 1833 struct sumo_sclk_voltage_mapping_table *table = 1834 &pi->sys_info.sclk_voltage_mapping_table; 1835 1836 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1837 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1838 i == (int)(pi->graphics_dpm_level_count - 1)) { 1839 pi->lowest_valid = i; 1840 break; 1841 } 1842 } 1843 1844 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1845 if (table->entries[i].sclk_frequency <= 1846 new_ps->levels[new_ps->num_levels - 1].sclk) 1847 break; 1848 } 1849 pi->highest_valid = i; 1850 1851 if (pi->lowest_valid > pi->highest_valid) { 1852 if ((new_ps->levels[0].sclk - 1853 table->entries[pi->highest_valid].sclk_frequency) > 1854 (table->entries[pi->lowest_valid].sclk_frequency - 1855 new_ps->levels[new_ps->num_levels -1].sclk)) 1856 pi->highest_valid = pi->lowest_valid; 1857 else 1858 pi->lowest_valid = pi->highest_valid; 1859 } 1860 } 1861} 1862 1863static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1864 struct amdgpu_ps *new_rps) 1865{ 1866 struct kv_ps *new_ps = kv_get_ps(new_rps); 1867 struct kv_power_info *pi = kv_get_pi(adev); 1868 int ret = 0; 1869 u8 clk_bypass_cntl; 1870 1871 if (pi->caps_enable_dfs_bypass) { 1872 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1873 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1874 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1875 (pi->dpm_table_start + 1876 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1877 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1878 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1879 &clk_bypass_cntl, 1880 sizeof(u8), pi->sram_end); 1881 } 1882 1883 return ret; 1884} 1885 1886static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1887 bool enable) 1888{ 1889 struct kv_power_info *pi = kv_get_pi(adev); 1890 int ret = 0; 1891 1892 if (enable) { 1893 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1894 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1895 if (ret == 0) 1896 pi->nb_dpm_enabled = true; 1897 } 1898 } else { 1899 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1900 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1901 if (ret == 0) 1902 pi->nb_dpm_enabled = false; 1903 } 1904 } 1905 1906 return ret; 1907} 1908 1909static int kv_dpm_force_performance_level(struct amdgpu_device *adev, 1910 enum amdgpu_dpm_forced_level level) 1911{ 1912 int ret; 1913 1914 if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { 1915 ret = kv_force_dpm_highest(adev); 1916 if (ret) 1917 return ret; 1918 } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { 1919 ret = kv_force_dpm_lowest(adev); 1920 if (ret) 1921 return ret; 1922 } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { 1923 ret = kv_unforce_levels(adev); 1924 if (ret) 1925 return ret; 1926 } 1927 1928 adev->pm.dpm.forced_level = level; 1929 1930 return 0; 1931} 1932 1933static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) 1934{ 1935 struct kv_power_info *pi = kv_get_pi(adev); 1936 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1937 struct amdgpu_ps *new_ps = &requested_ps; 1938 1939 kv_update_requested_ps(adev, new_ps); 1940 1941 kv_apply_state_adjust_rules(adev, 1942 &pi->requested_rps, 1943 &pi->current_rps); 1944 1945 return 0; 1946} 1947 1948static int kv_dpm_set_power_state(struct amdgpu_device *adev) 1949{ 1950 struct kv_power_info *pi = kv_get_pi(adev); 1951 struct amdgpu_ps *new_ps = &pi->requested_rps; 1952 struct amdgpu_ps *old_ps = &pi->current_rps; 1953 int ret; 1954 1955 if (pi->bapm_enable) { 1956 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power); 1957 if (ret) { 1958 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1959 return ret; 1960 } 1961 } 1962 1963 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1964 if (pi->enable_dpm) { 1965 kv_set_valid_clock_range(adev, new_ps); 1966 kv_update_dfs_bypass_settings(adev, new_ps); 1967 ret = kv_calculate_ds_divider(adev); 1968 if (ret) { 1969 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1970 return ret; 1971 } 1972 kv_calculate_nbps_level_settings(adev); 1973 kv_calculate_dpm_settings(adev); 1974 kv_force_lowest_valid(adev); 1975 kv_enable_new_levels(adev); 1976 kv_upload_dpm_settings(adev); 1977 kv_program_nbps_index_settings(adev, new_ps); 1978 kv_unforce_levels(adev); 1979 kv_set_enabled_levels(adev); 1980 kv_force_lowest_valid(adev); 1981 kv_unforce_levels(adev); 1982 1983 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1984 if (ret) { 1985 DRM_ERROR("kv_update_vce_dpm failed\n"); 1986 return ret; 1987 } 1988 kv_update_sclk_t(adev); 1989 if (adev->asic_type == CHIP_MULLINS) 1990 kv_enable_nb_dpm(adev, true); 1991 } 1992 } else { 1993 if (pi->enable_dpm) { 1994 kv_set_valid_clock_range(adev, new_ps); 1995 kv_update_dfs_bypass_settings(adev, new_ps); 1996 ret = kv_calculate_ds_divider(adev); 1997 if (ret) { 1998 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1999 return ret; 2000 } 2001 kv_calculate_nbps_level_settings(adev); 2002 kv_calculate_dpm_settings(adev); 2003 kv_freeze_sclk_dpm(adev, true); 2004 kv_upload_dpm_settings(adev); 2005 kv_program_nbps_index_settings(adev, new_ps); 2006 kv_freeze_sclk_dpm(adev, false); 2007 kv_set_enabled_levels(adev); 2008 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 2009 if (ret) { 2010 DRM_ERROR("kv_update_vce_dpm failed\n"); 2011 return ret; 2012 } 2013 kv_update_acp_boot_level(adev); 2014 kv_update_sclk_t(adev); 2015 kv_enable_nb_dpm(adev, true); 2016 } 2017 } 2018 2019 return 0; 2020} 2021 2022static void kv_dpm_post_set_power_state(struct amdgpu_device *adev) 2023{ 2024 struct kv_power_info *pi = kv_get_pi(adev); 2025 struct amdgpu_ps *new_ps = &pi->requested_rps; 2026 2027 kv_update_current_ps(adev, new_ps); 2028} 2029 2030static void kv_dpm_setup_asic(struct amdgpu_device *adev) 2031{ 2032 sumo_take_smu_control(adev, true); 2033 kv_init_powergate_state(adev); 2034 kv_init_sclk_t(adev); 2035} 2036 2037#if 0 2038static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2039{ 2040 struct kv_power_info *pi = kv_get_pi(adev); 2041 2042 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2043 kv_force_lowest_valid(adev); 2044 kv_init_graphics_levels(adev); 2045 kv_program_bootup_state(adev); 2046 kv_upload_dpm_settings(adev); 2047 kv_force_lowest_valid(adev); 2048 kv_unforce_levels(adev); 2049 } else { 2050 kv_init_graphics_levels(adev); 2051 kv_program_bootup_state(adev); 2052 kv_freeze_sclk_dpm(adev, true); 2053 kv_upload_dpm_settings(adev); 2054 kv_freeze_sclk_dpm(adev, false); 2055 kv_set_enabled_level(adev, pi->graphics_boot_level); 2056 } 2057} 2058#endif 2059 2060static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2061 struct amdgpu_clock_and_voltage_limits *table) 2062{ 2063 struct kv_power_info *pi = kv_get_pi(adev); 2064 2065 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2066 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2067 table->sclk = 2068 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2069 table->vddc = 2070 kv_convert_2bit_index_to_voltage(adev, 2071 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2072 } 2073 2074 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2075} 2076 2077static void kv_patch_voltage_values(struct amdgpu_device *adev) 2078{ 2079 int i; 2080 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2081 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2082 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2083 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2084 struct amdgpu_clock_voltage_dependency_table *samu_table = 2085 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2086 struct amdgpu_clock_voltage_dependency_table *acp_table = 2087 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2088 2089 if (uvd_table->count) { 2090 for (i = 0; i < uvd_table->count; i++) 2091 uvd_table->entries[i].v = 2092 kv_convert_8bit_index_to_voltage(adev, 2093 uvd_table->entries[i].v); 2094 } 2095 2096 if (vce_table->count) { 2097 for (i = 0; i < vce_table->count; i++) 2098 vce_table->entries[i].v = 2099 kv_convert_8bit_index_to_voltage(adev, 2100 vce_table->entries[i].v); 2101 } 2102 2103 if (samu_table->count) { 2104 for (i = 0; i < samu_table->count; i++) 2105 samu_table->entries[i].v = 2106 kv_convert_8bit_index_to_voltage(adev, 2107 samu_table->entries[i].v); 2108 } 2109 2110 if (acp_table->count) { 2111 for (i = 0; i < acp_table->count; i++) 2112 acp_table->entries[i].v = 2113 kv_convert_8bit_index_to_voltage(adev, 2114 acp_table->entries[i].v); 2115 } 2116 2117} 2118 2119static void kv_construct_boot_state(struct amdgpu_device *adev) 2120{ 2121 struct kv_power_info *pi = kv_get_pi(adev); 2122 2123 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2124 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2125 pi->boot_pl.ds_divider_index = 0; 2126 pi->boot_pl.ss_divider_index = 0; 2127 pi->boot_pl.allow_gnb_slow = 1; 2128 pi->boot_pl.force_nbp_state = 0; 2129 pi->boot_pl.display_wm = 0; 2130 pi->boot_pl.vce_wm = 0; 2131} 2132 2133static int kv_force_dpm_highest(struct amdgpu_device *adev) 2134{ 2135 int ret; 2136 u32 enable_mask, i; 2137 2138 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2139 if (ret) 2140 return ret; 2141 2142 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2143 if (enable_mask & (1 << i)) 2144 break; 2145 } 2146 2147 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2148 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2149 else 2150 return kv_set_enabled_level(adev, i); 2151} 2152 2153static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2154{ 2155 int ret; 2156 u32 enable_mask, i; 2157 2158 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2159 if (ret) 2160 return ret; 2161 2162 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2163 if (enable_mask & (1 << i)) 2164 break; 2165 } 2166 2167 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2168 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2169 else 2170 return kv_set_enabled_level(adev, i); 2171} 2172 2173static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2174 u32 sclk, u32 min_sclk_in_sr) 2175{ 2176 struct kv_power_info *pi = kv_get_pi(adev); 2177 u32 i; 2178 u32 temp; 2179 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 2180 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 2181 2182 if (sclk < min) 2183 return 0; 2184 2185 if (!pi->caps_sclk_ds) 2186 return 0; 2187 2188 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2189 temp = sclk / sumo_get_sleep_divider_from_id(i); 2190 if (temp >= min) 2191 break; 2192 } 2193 2194 return (u8)i; 2195} 2196 2197static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2198{ 2199 struct kv_power_info *pi = kv_get_pi(adev); 2200 struct amdgpu_clock_voltage_dependency_table *table = 2201 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2202 int i; 2203 2204 if (table && table->count) { 2205 for (i = table->count - 1; i >= 0; i--) { 2206 if (pi->high_voltage_t && 2207 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2208 pi->high_voltage_t)) { 2209 *limit = i; 2210 return 0; 2211 } 2212 } 2213 } else { 2214 struct sumo_sclk_voltage_mapping_table *table = 2215 &pi->sys_info.sclk_voltage_mapping_table; 2216 2217 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2218 if (pi->high_voltage_t && 2219 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2220 pi->high_voltage_t)) { 2221 *limit = i; 2222 return 0; 2223 } 2224 } 2225 } 2226 2227 *limit = 0; 2228 return 0; 2229} 2230 2231static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2232 struct amdgpu_ps *new_rps, 2233 struct amdgpu_ps *old_rps) 2234{ 2235 struct kv_ps *ps = kv_get_ps(new_rps); 2236 struct kv_power_info *pi = kv_get_pi(adev); 2237 u32 min_sclk = 10000; /* ??? */ 2238 u32 sclk, mclk = 0; 2239 int i, limit; 2240 bool force_high; 2241 struct amdgpu_clock_voltage_dependency_table *table = 2242 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2243 u32 stable_p_state_sclk = 0; 2244 struct amdgpu_clock_and_voltage_limits *max_limits = 2245 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2246 2247 if (new_rps->vce_active) { 2248 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2249 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2250 } else { 2251 new_rps->evclk = 0; 2252 new_rps->ecclk = 0; 2253 } 2254 2255 mclk = max_limits->mclk; 2256 sclk = min_sclk; 2257 2258 if (pi->caps_stable_p_state) { 2259 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2260 2261 for (i = table->count - 1; i >= 0; i++) { 2262 if (stable_p_state_sclk >= table->entries[i].clk) { 2263 stable_p_state_sclk = table->entries[i].clk; 2264 break; 2265 } 2266 } 2267 2268 if (i > 0) 2269 stable_p_state_sclk = table->entries[0].clk; 2270 2271 sclk = stable_p_state_sclk; 2272 } 2273 2274 if (new_rps->vce_active) { 2275 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2276 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2277 } 2278 2279 ps->need_dfs_bypass = true; 2280 2281 for (i = 0; i < ps->num_levels; i++) { 2282 if (ps->levels[i].sclk < sclk) 2283 ps->levels[i].sclk = sclk; 2284 } 2285 2286 if (table && table->count) { 2287 for (i = 0; i < ps->num_levels; i++) { 2288 if (pi->high_voltage_t && 2289 (pi->high_voltage_t < 2290 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2291 kv_get_high_voltage_limit(adev, &limit); 2292 ps->levels[i].sclk = table->entries[limit].clk; 2293 } 2294 } 2295 } else { 2296 struct sumo_sclk_voltage_mapping_table *table = 2297 &pi->sys_info.sclk_voltage_mapping_table; 2298 2299 for (i = 0; i < ps->num_levels; i++) { 2300 if (pi->high_voltage_t && 2301 (pi->high_voltage_t < 2302 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2303 kv_get_high_voltage_limit(adev, &limit); 2304 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2305 } 2306 } 2307 } 2308 2309 if (pi->caps_stable_p_state) { 2310 for (i = 0; i < ps->num_levels; i++) { 2311 ps->levels[i].sclk = stable_p_state_sclk; 2312 } 2313 } 2314 2315 pi->video_start = new_rps->dclk || new_rps->vclk || 2316 new_rps->evclk || new_rps->ecclk; 2317 2318 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2319 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2320 pi->battery_state = true; 2321 else 2322 pi->battery_state = false; 2323 2324 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2325 ps->dpm0_pg_nb_ps_lo = 0x1; 2326 ps->dpm0_pg_nb_ps_hi = 0x0; 2327 ps->dpmx_nb_ps_lo = 0x1; 2328 ps->dpmx_nb_ps_hi = 0x0; 2329 } else { 2330 ps->dpm0_pg_nb_ps_lo = 0x3; 2331 ps->dpm0_pg_nb_ps_hi = 0x0; 2332 ps->dpmx_nb_ps_lo = 0x3; 2333 ps->dpmx_nb_ps_hi = 0x0; 2334 2335 if (pi->sys_info.nb_dpm_enable) { 2336 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2337 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2338 pi->disable_nb_ps3_in_battery; 2339 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2340 ps->dpm0_pg_nb_ps_hi = 0x2; 2341 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2342 ps->dpmx_nb_ps_hi = 0x2; 2343 } 2344 } 2345} 2346 2347static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2348 u32 index, bool enable) 2349{ 2350 struct kv_power_info *pi = kv_get_pi(adev); 2351 2352 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2353} 2354 2355static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2356{ 2357 struct kv_power_info *pi = kv_get_pi(adev); 2358 u32 sclk_in_sr = 10000; /* ??? */ 2359 u32 i; 2360 2361 if (pi->lowest_valid > pi->highest_valid) 2362 return -EINVAL; 2363 2364 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2365 pi->graphics_level[i].DeepSleepDivId = 2366 kv_get_sleep_divider_id_from_clock(adev, 2367 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2368 sclk_in_sr); 2369 } 2370 return 0; 2371} 2372 2373static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2374{ 2375 struct kv_power_info *pi = kv_get_pi(adev); 2376 u32 i; 2377 bool force_high; 2378 struct amdgpu_clock_and_voltage_limits *max_limits = 2379 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2380 u32 mclk = max_limits->mclk; 2381 2382 if (pi->lowest_valid > pi->highest_valid) 2383 return -EINVAL; 2384 2385 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2386 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2387 pi->graphics_level[i].GnbSlow = 1; 2388 pi->graphics_level[i].ForceNbPs1 = 0; 2389 pi->graphics_level[i].UpH = 0; 2390 } 2391 2392 if (!pi->sys_info.nb_dpm_enable) 2393 return 0; 2394 2395 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2396 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2397 2398 if (force_high) { 2399 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2400 pi->graphics_level[i].GnbSlow = 0; 2401 } else { 2402 if (pi->battery_state) 2403 pi->graphics_level[0].ForceNbPs1 = 1; 2404 2405 pi->graphics_level[1].GnbSlow = 0; 2406 pi->graphics_level[2].GnbSlow = 0; 2407 pi->graphics_level[3].GnbSlow = 0; 2408 pi->graphics_level[4].GnbSlow = 0; 2409 } 2410 } else { 2411 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2412 pi->graphics_level[i].GnbSlow = 1; 2413 pi->graphics_level[i].ForceNbPs1 = 0; 2414 pi->graphics_level[i].UpH = 0; 2415 } 2416 2417 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2418 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2419 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2420 if (pi->lowest_valid != pi->highest_valid) 2421 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2422 } 2423 } 2424 return 0; 2425} 2426 2427static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2428{ 2429 struct kv_power_info *pi = kv_get_pi(adev); 2430 u32 i; 2431 2432 if (pi->lowest_valid > pi->highest_valid) 2433 return -EINVAL; 2434 2435 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2436 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2437 2438 return 0; 2439} 2440 2441static void kv_init_graphics_levels(struct amdgpu_device *adev) 2442{ 2443 struct kv_power_info *pi = kv_get_pi(adev); 2444 u32 i; 2445 struct amdgpu_clock_voltage_dependency_table *table = 2446 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2447 2448 if (table && table->count) { 2449 u32 vid_2bit; 2450 2451 pi->graphics_dpm_level_count = 0; 2452 for (i = 0; i < table->count; i++) { 2453 if (pi->high_voltage_t && 2454 (pi->high_voltage_t < 2455 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2456 break; 2457 2458 kv_set_divider_value(adev, i, table->entries[i].clk); 2459 vid_2bit = kv_convert_vid7_to_vid2(adev, 2460 &pi->sys_info.vid_mapping_table, 2461 table->entries[i].v); 2462 kv_set_vid(adev, i, vid_2bit); 2463 kv_set_at(adev, i, pi->at[i]); 2464 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2465 pi->graphics_dpm_level_count++; 2466 } 2467 } else { 2468 struct sumo_sclk_voltage_mapping_table *table = 2469 &pi->sys_info.sclk_voltage_mapping_table; 2470 2471 pi->graphics_dpm_level_count = 0; 2472 for (i = 0; i < table->num_max_dpm_entries; i++) { 2473 if (pi->high_voltage_t && 2474 pi->high_voltage_t < 2475 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2476 break; 2477 2478 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2479 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2480 kv_set_at(adev, i, pi->at[i]); 2481 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2482 pi->graphics_dpm_level_count++; 2483 } 2484 } 2485 2486 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2487 kv_dpm_power_level_enable(adev, i, false); 2488} 2489 2490static void kv_enable_new_levels(struct amdgpu_device *adev) 2491{ 2492 struct kv_power_info *pi = kv_get_pi(adev); 2493 u32 i; 2494 2495 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2496 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2497 kv_dpm_power_level_enable(adev, i, true); 2498 } 2499} 2500 2501static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2502{ 2503 u32 new_mask = (1 << level); 2504 2505 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2506 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2507 new_mask); 2508} 2509 2510static int kv_set_enabled_levels(struct amdgpu_device *adev) 2511{ 2512 struct kv_power_info *pi = kv_get_pi(adev); 2513 u32 i, new_mask = 0; 2514 2515 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2516 new_mask |= (1 << i); 2517 2518 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2519 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2520 new_mask); 2521} 2522 2523static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2524 struct amdgpu_ps *new_rps) 2525{ 2526 struct kv_ps *new_ps = kv_get_ps(new_rps); 2527 struct kv_power_info *pi = kv_get_pi(adev); 2528 u32 nbdpmconfig1; 2529 2530 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2531 return; 2532 2533 if (pi->sys_info.nb_dpm_enable) { 2534 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2535 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2536 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2537 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2538 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2539 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2540 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2541 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2542 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2543 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2544 } 2545} 2546 2547static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2548 int min_temp, int max_temp) 2549{ 2550 int low_temp = 0 * 1000; 2551 int high_temp = 255 * 1000; 2552 u32 tmp; 2553 2554 if (low_temp < min_temp) 2555 low_temp = min_temp; 2556 if (high_temp > max_temp) 2557 high_temp = max_temp; 2558 if (high_temp < low_temp) { 2559 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2560 return -EINVAL; 2561 } 2562 2563 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2564 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2565 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2566 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2567 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2568 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2569 2570 adev->pm.dpm.thermal.min_temp = low_temp; 2571 adev->pm.dpm.thermal.max_temp = high_temp; 2572 2573 return 0; 2574} 2575 2576union igp_info { 2577 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2578 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2579 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2580 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2581 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2582 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2583}; 2584 2585static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2586{ 2587 struct kv_power_info *pi = kv_get_pi(adev); 2588 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2589 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2590 union igp_info *igp_info; 2591 u8 frev, crev; 2592 u16 data_offset; 2593 int i; 2594 2595 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2596 &frev, &crev, &data_offset)) { 2597 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2598 data_offset); 2599 2600 if (crev != 8) { 2601 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2602 return -EINVAL; 2603 } 2604 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2605 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2606 pi->sys_info.bootup_nb_voltage_index = 2607 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2608 if (igp_info->info_8.ucHtcTmpLmt == 0) 2609 pi->sys_info.htc_tmp_lmt = 203; 2610 else 2611 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2612 if (igp_info->info_8.ucHtcHystLmt == 0) 2613 pi->sys_info.htc_hyst_lmt = 5; 2614 else 2615 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2616 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2617 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2618 } 2619 2620 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2621 pi->sys_info.nb_dpm_enable = true; 2622 else 2623 pi->sys_info.nb_dpm_enable = false; 2624 2625 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2626 pi->sys_info.nbp_memory_clock[i] = 2627 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2628 pi->sys_info.nbp_n_clock[i] = 2629 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2630 } 2631 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2632 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2633 pi->caps_enable_dfs_bypass = true; 2634 2635 sumo_construct_sclk_voltage_mapping_table(adev, 2636 &pi->sys_info.sclk_voltage_mapping_table, 2637 igp_info->info_8.sAvail_SCLK); 2638 2639 sumo_construct_vid_mapping_table(adev, 2640 &pi->sys_info.vid_mapping_table, 2641 igp_info->info_8.sAvail_SCLK); 2642 2643 kv_construct_max_power_limits_table(adev, 2644 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2645 } 2646 return 0; 2647} 2648 2649union power_info { 2650 struct _ATOM_POWERPLAY_INFO info; 2651 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2652 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2653 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2654 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2655 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2656}; 2657 2658union pplib_clock_info { 2659 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2660 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2661 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2662 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2663}; 2664 2665union pplib_power_state { 2666 struct _ATOM_PPLIB_STATE v1; 2667 struct _ATOM_PPLIB_STATE_V2 v2; 2668}; 2669 2670static void kv_patch_boot_state(struct amdgpu_device *adev, 2671 struct kv_ps *ps) 2672{ 2673 struct kv_power_info *pi = kv_get_pi(adev); 2674 2675 ps->num_levels = 1; 2676 ps->levels[0] = pi->boot_pl; 2677} 2678 2679static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2680 struct amdgpu_ps *rps, 2681 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2682 u8 table_rev) 2683{ 2684 struct kv_ps *ps = kv_get_ps(rps); 2685 2686 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2687 rps->class = le16_to_cpu(non_clock_info->usClassification); 2688 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2689 2690 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2691 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2692 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2693 } else { 2694 rps->vclk = 0; 2695 rps->dclk = 0; 2696 } 2697 2698 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2699 adev->pm.dpm.boot_ps = rps; 2700 kv_patch_boot_state(adev, ps); 2701 } 2702 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2703 adev->pm.dpm.uvd_ps = rps; 2704} 2705 2706static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2707 struct amdgpu_ps *rps, int index, 2708 union pplib_clock_info *clock_info) 2709{ 2710 struct kv_power_info *pi = kv_get_pi(adev); 2711 struct kv_ps *ps = kv_get_ps(rps); 2712 struct kv_pl *pl = &ps->levels[index]; 2713 u32 sclk; 2714 2715 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2716 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2717 pl->sclk = sclk; 2718 pl->vddc_index = clock_info->sumo.vddcIndex; 2719 2720 ps->num_levels = index + 1; 2721 2722 if (pi->caps_sclk_ds) { 2723 pl->ds_divider_index = 5; 2724 pl->ss_divider_index = 5; 2725 } 2726} 2727 2728static int kv_parse_power_table(struct amdgpu_device *adev) 2729{ 2730 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2731 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2732 union pplib_power_state *power_state; 2733 int i, j, k, non_clock_array_index, clock_array_index; 2734 union pplib_clock_info *clock_info; 2735 struct _StateArray *state_array; 2736 struct _ClockInfoArray *clock_info_array; 2737 struct _NonClockInfoArray *non_clock_info_array; 2738 union power_info *power_info; 2739 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2740 u16 data_offset; 2741 u8 frev, crev; 2742 u8 *power_state_offset; 2743 struct kv_ps *ps; 2744 2745 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2746 &frev, &crev, &data_offset)) 2747 return -EINVAL; 2748 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2749 2750 amdgpu_add_thermal_controller(adev); 2751 2752 state_array = (struct _StateArray *) 2753 (mode_info->atom_context->bios + data_offset + 2754 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2755 clock_info_array = (struct _ClockInfoArray *) 2756 (mode_info->atom_context->bios + data_offset + 2757 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2758 non_clock_info_array = (struct _NonClockInfoArray *) 2759 (mode_info->atom_context->bios + data_offset + 2760 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2761 2762 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 2763 state_array->ucNumEntries, GFP_KERNEL); 2764 if (!adev->pm.dpm.ps) 2765 return -ENOMEM; 2766 power_state_offset = (u8 *)state_array->states; 2767 for (i = 0; i < state_array->ucNumEntries; i++) { 2768 u8 *idx; 2769 power_state = (union pplib_power_state *)power_state_offset; 2770 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2771 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2772 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2773 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2774 if (ps == NULL) { 2775 kfree(adev->pm.dpm.ps); 2776 return -ENOMEM; 2777 } 2778 adev->pm.dpm.ps[i].ps_priv = ps; 2779 k = 0; 2780 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2781 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2782 clock_array_index = idx[j]; 2783 if (clock_array_index >= clock_info_array->ucNumEntries) 2784 continue; 2785 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2786 break; 2787 clock_info = (union pplib_clock_info *) 2788 ((u8 *)&clock_info_array->clockInfo[0] + 2789 (clock_array_index * clock_info_array->ucEntrySize)); 2790 kv_parse_pplib_clock_info(adev, 2791 &adev->pm.dpm.ps[i], k, 2792 clock_info); 2793 k++; 2794 } 2795 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2796 non_clock_info, 2797 non_clock_info_array->ucEntrySize); 2798 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2799 } 2800 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2801 2802 /* fill in the vce power states */ 2803 for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { 2804 u32 sclk; 2805 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2806 clock_info = (union pplib_clock_info *) 2807 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2808 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2809 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2810 adev->pm.dpm.vce_states[i].sclk = sclk; 2811 adev->pm.dpm.vce_states[i].mclk = 0; 2812 } 2813 2814 return 0; 2815} 2816 2817static int kv_dpm_init(struct amdgpu_device *adev) 2818{ 2819 struct kv_power_info *pi; 2820 int ret, i; 2821 2822 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2823 if (pi == NULL) 2824 return -ENOMEM; 2825 adev->pm.dpm.priv = pi; 2826 2827 ret = amdgpu_get_platform_caps(adev); 2828 if (ret) 2829 return ret; 2830 2831 ret = amdgpu_parse_extended_power_table(adev); 2832 if (ret) 2833 return ret; 2834 2835 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2836 pi->at[i] = TRINITY_AT_DFLT; 2837 2838 pi->sram_end = SMC_RAM_END; 2839 2840 pi->enable_nb_dpm = true; 2841 2842 pi->caps_power_containment = true; 2843 pi->caps_cac = true; 2844 pi->enable_didt = false; 2845 if (pi->enable_didt) { 2846 pi->caps_sq_ramping = true; 2847 pi->caps_db_ramping = true; 2848 pi->caps_td_ramping = true; 2849 pi->caps_tcp_ramping = true; 2850 } 2851 2852 pi->caps_sclk_ds = true; 2853 pi->enable_auto_thermal_throttling = true; 2854 pi->disable_nb_ps3_in_battery = false; 2855 if (amdgpu_bapm == 0) 2856 pi->bapm_enable = false; 2857 else 2858 pi->bapm_enable = true; 2859 pi->voltage_drop_t = 0; 2860 pi->caps_sclk_throttle_low_notification = false; 2861 pi->caps_fps = false; /* true? */ 2862 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2863 pi->caps_uvd_dpm = true; 2864 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2865 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2866 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2867 pi->caps_stable_p_state = false; 2868 2869 ret = kv_parse_sys_info_table(adev); 2870 if (ret) 2871 return ret; 2872 2873 kv_patch_voltage_values(adev); 2874 kv_construct_boot_state(adev); 2875 2876 ret = kv_parse_power_table(adev); 2877 if (ret) 2878 return ret; 2879 2880 pi->enable_dpm = true; 2881 2882 return 0; 2883} 2884 2885static void 2886kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 2887 struct seq_file *m) 2888{ 2889 struct kv_power_info *pi = kv_get_pi(adev); 2890 u32 current_index = 2891 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2892 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2893 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2894 u32 sclk, tmp; 2895 u16 vddc; 2896 2897 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2898 seq_printf(m, "invalid dpm profile %d\n", current_index); 2899 } else { 2900 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2901 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2902 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2903 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2904 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2905 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2906 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2907 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2908 current_index, sclk, vddc); 2909 } 2910} 2911 2912static void 2913kv_dpm_print_power_state(struct amdgpu_device *adev, 2914 struct amdgpu_ps *rps) 2915{ 2916 int i; 2917 struct kv_ps *ps = kv_get_ps(rps); 2918 2919 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2920 amdgpu_dpm_print_cap_info(rps->caps); 2921 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2922 for (i = 0; i < ps->num_levels; i++) { 2923 struct kv_pl *pl = &ps->levels[i]; 2924 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2925 i, pl->sclk, 2926 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2927 } 2928 amdgpu_dpm_print_ps_status(adev, rps); 2929} 2930 2931static void kv_dpm_fini(struct amdgpu_device *adev) 2932{ 2933 int i; 2934 2935 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2936 kfree(adev->pm.dpm.ps[i].ps_priv); 2937 } 2938 kfree(adev->pm.dpm.ps); 2939 kfree(adev->pm.dpm.priv); 2940 amdgpu_free_extended_power_table(adev); 2941} 2942 2943static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev) 2944{ 2945 2946} 2947 2948static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) 2949{ 2950 struct kv_power_info *pi = kv_get_pi(adev); 2951 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2952 2953 if (low) 2954 return requested_state->levels[0].sclk; 2955 else 2956 return requested_state->levels[requested_state->num_levels - 1].sclk; 2957} 2958 2959static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low) 2960{ 2961 struct kv_power_info *pi = kv_get_pi(adev); 2962 2963 return pi->sys_info.bootup_uma_clk; 2964} 2965 2966/* get temperature in millidegrees */ 2967static int kv_dpm_get_temp(struct amdgpu_device *adev) 2968{ 2969 u32 temp; 2970 int actual_temp = 0; 2971 2972 temp = RREG32_SMC(0xC0300E0C); 2973 2974 if (temp) 2975 actual_temp = (temp / 8) - 49; 2976 else 2977 actual_temp = 0; 2978 2979 actual_temp = actual_temp * 1000; 2980 2981 return actual_temp; 2982} 2983 2984static int kv_dpm_early_init(void *handle) 2985{ 2986 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2987 2988 kv_dpm_set_dpm_funcs(adev); 2989 kv_dpm_set_irq_funcs(adev); 2990 2991 return 0; 2992} 2993 2994static int kv_dpm_late_init(void *handle) 2995{ 2996 /* powerdown unused blocks for now */ 2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2998 int ret; 2999 3000 if (!amdgpu_dpm) 3001 return 0; 3002 3003 /* init the sysfs and debugfs files late */ 3004 ret = amdgpu_pm_sysfs_init(adev); 3005 if (ret) 3006 return ret; 3007 3008 kv_dpm_powergate_acp(adev, true); 3009 kv_dpm_powergate_samu(adev, true); 3010 kv_dpm_powergate_vce(adev, true); 3011 kv_dpm_powergate_uvd(adev, true); 3012 3013 return 0; 3014} 3015 3016static int kv_dpm_sw_init(void *handle) 3017{ 3018 int ret; 3019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3020 3021 ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); 3022 if (ret) 3023 return ret; 3024 3025 ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); 3026 if (ret) 3027 return ret; 3028 3029 /* default to balanced state */ 3030 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 3031 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 3032 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 3033 adev->pm.default_sclk = adev->clock.default_sclk; 3034 adev->pm.default_mclk = adev->clock.default_mclk; 3035 adev->pm.current_sclk = adev->clock.default_sclk; 3036 adev->pm.current_mclk = adev->clock.default_mclk; 3037 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3038 3039 if (amdgpu_dpm == 0) 3040 return 0; 3041 3042 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3043 mutex_lock(&adev->pm.mutex); 3044 ret = kv_dpm_init(adev); 3045 if (ret) 3046 goto dpm_failed; 3047 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3048 if (amdgpu_dpm == 1) 3049 amdgpu_pm_print_power_states(adev); 3050 mutex_unlock(&adev->pm.mutex); 3051 DRM_INFO("amdgpu: dpm initialized\n"); 3052 3053 return 0; 3054 3055dpm_failed: 3056 kv_dpm_fini(adev); 3057 mutex_unlock(&adev->pm.mutex); 3058 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3059 return ret; 3060} 3061 3062static int kv_dpm_sw_fini(void *handle) 3063{ 3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3065 3066 mutex_lock(&adev->pm.mutex); 3067 amdgpu_pm_sysfs_fini(adev); 3068 kv_dpm_fini(adev); 3069 mutex_unlock(&adev->pm.mutex); 3070 3071 return 0; 3072} 3073 3074static int kv_dpm_hw_init(void *handle) 3075{ 3076 int ret; 3077 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3078 3079 mutex_lock(&adev->pm.mutex); 3080 kv_dpm_setup_asic(adev); 3081 ret = kv_dpm_enable(adev); 3082 if (ret) 3083 adev->pm.dpm_enabled = false; 3084 else 3085 adev->pm.dpm_enabled = true; 3086 mutex_unlock(&adev->pm.mutex); 3087 3088 return ret; 3089} 3090 3091static int kv_dpm_hw_fini(void *handle) 3092{ 3093 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3094 3095 if (adev->pm.dpm_enabled) { 3096 mutex_lock(&adev->pm.mutex); 3097 kv_dpm_disable(adev); 3098 mutex_unlock(&adev->pm.mutex); 3099 } 3100 3101 return 0; 3102} 3103 3104static int kv_dpm_suspend(void *handle) 3105{ 3106 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3107 3108 if (adev->pm.dpm_enabled) { 3109 mutex_lock(&adev->pm.mutex); 3110 /* disable dpm */ 3111 kv_dpm_disable(adev); 3112 /* reset the power state */ 3113 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3114 mutex_unlock(&adev->pm.mutex); 3115 } 3116 return 0; 3117} 3118 3119static int kv_dpm_resume(void *handle) 3120{ 3121 int ret; 3122 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3123 3124 if (adev->pm.dpm_enabled) { 3125 /* asic init will reset to the boot state */ 3126 mutex_lock(&adev->pm.mutex); 3127 kv_dpm_setup_asic(adev); 3128 ret = kv_dpm_enable(adev); 3129 if (ret) 3130 adev->pm.dpm_enabled = false; 3131 else 3132 adev->pm.dpm_enabled = true; 3133 mutex_unlock(&adev->pm.mutex); 3134 if (adev->pm.dpm_enabled) 3135 amdgpu_pm_compute_clocks(adev); 3136 } 3137 return 0; 3138} 3139 3140static bool kv_dpm_is_idle(void *handle) 3141{ 3142 return true; 3143} 3144 3145static int kv_dpm_wait_for_idle(void *handle) 3146{ 3147 return 0; 3148} 3149 3150static void kv_dpm_print_status(void *handle) 3151{ 3152 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3153 3154 dev_info(adev->dev, "KV/KB DPM registers\n"); 3155 dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n", 3156 RREG32_DIDT(ixDIDT_SQ_CTRL0)); 3157 dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n", 3158 RREG32_DIDT(ixDIDT_DB_CTRL0)); 3159 dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n", 3160 RREG32_DIDT(ixDIDT_TD_CTRL0)); 3161 dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n", 3162 RREG32_DIDT(ixDIDT_TCP_CTRL0)); 3163 dev_info(adev->dev, " LCAC_SX0_OVR_SEL=0x%08X\n", 3164 RREG32_SMC(ixLCAC_SX0_OVR_SEL)); 3165 dev_info(adev->dev, " LCAC_SX0_OVR_VAL=0x%08X\n", 3166 RREG32_SMC(ixLCAC_SX0_OVR_VAL)); 3167 dev_info(adev->dev, " LCAC_MC0_OVR_SEL=0x%08X\n", 3168 RREG32_SMC(ixLCAC_MC0_OVR_SEL)); 3169 dev_info(adev->dev, " LCAC_MC0_OVR_VAL=0x%08X\n", 3170 RREG32_SMC(ixLCAC_MC0_OVR_VAL)); 3171 dev_info(adev->dev, " LCAC_MC1_OVR_SEL=0x%08X\n", 3172 RREG32_SMC(ixLCAC_MC1_OVR_SEL)); 3173 dev_info(adev->dev, " LCAC_MC1_OVR_VAL=0x%08X\n", 3174 RREG32_SMC(ixLCAC_MC1_OVR_VAL)); 3175 dev_info(adev->dev, " LCAC_MC2_OVR_SEL=0x%08X\n", 3176 RREG32_SMC(ixLCAC_MC2_OVR_SEL)); 3177 dev_info(adev->dev, " LCAC_MC2_OVR_VAL=0x%08X\n", 3178 RREG32_SMC(ixLCAC_MC2_OVR_VAL)); 3179 dev_info(adev->dev, " LCAC_MC3_OVR_SEL=0x%08X\n", 3180 RREG32_SMC(ixLCAC_MC3_OVR_SEL)); 3181 dev_info(adev->dev, " LCAC_MC3_OVR_VAL=0x%08X\n", 3182 RREG32_SMC(ixLCAC_MC3_OVR_VAL)); 3183 dev_info(adev->dev, " LCAC_CPL_OVR_SEL=0x%08X\n", 3184 RREG32_SMC(ixLCAC_CPL_OVR_SEL)); 3185 dev_info(adev->dev, " LCAC_CPL_OVR_VAL=0x%08X\n", 3186 RREG32_SMC(ixLCAC_CPL_OVR_VAL)); 3187 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n", 3188 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0)); 3189 dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n", 3190 RREG32_SMC(ixGENERAL_PWRMGT)); 3191 dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n", 3192 RREG32_SMC(ixSCLK_PWRMGT_CNTL)); 3193 dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n", 3194 RREG32(mmSMC_MESSAGE_0)); 3195 dev_info(adev->dev, " SMC_RESP_0=0x%08X\n", 3196 RREG32(mmSMC_RESP_0)); 3197 dev_info(adev->dev, " SMC_MSG_ARG_0=0x%08X\n", 3198 RREG32(mmSMC_MSG_ARG_0)); 3199 dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n", 3200 RREG32(mmSMC_IND_INDEX_0)); 3201 dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n", 3202 RREG32(mmSMC_IND_DATA_0)); 3203 dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n", 3204 RREG32(mmSMC_IND_ACCESS_CNTL)); 3205} 3206 3207static int kv_dpm_soft_reset(void *handle) 3208{ 3209 return 0; 3210} 3211 3212static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3213 struct amdgpu_irq_src *src, 3214 unsigned type, 3215 enum amdgpu_interrupt_state state) 3216{ 3217 u32 cg_thermal_int; 3218 3219 switch (type) { 3220 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3221 switch (state) { 3222 case AMDGPU_IRQ_STATE_DISABLE: 3223 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3224 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3225 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3226 break; 3227 case AMDGPU_IRQ_STATE_ENABLE: 3228 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3229 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3230 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3231 break; 3232 default: 3233 break; 3234 } 3235 break; 3236 3237 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3238 switch (state) { 3239 case AMDGPU_IRQ_STATE_DISABLE: 3240 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3241 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3242 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3243 break; 3244 case AMDGPU_IRQ_STATE_ENABLE: 3245 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3246 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3247 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3248 break; 3249 default: 3250 break; 3251 } 3252 break; 3253 3254 default: 3255 break; 3256 } 3257 return 0; 3258} 3259 3260static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3261 struct amdgpu_irq_src *source, 3262 struct amdgpu_iv_entry *entry) 3263{ 3264 bool queue_thermal = false; 3265 3266 if (entry == NULL) 3267 return -EINVAL; 3268 3269 switch (entry->src_id) { 3270 case 230: /* thermal low to high */ 3271 DRM_DEBUG("IH: thermal low to high\n"); 3272 adev->pm.dpm.thermal.high_to_low = false; 3273 queue_thermal = true; 3274 break; 3275 case 231: /* thermal high to low */ 3276 DRM_DEBUG("IH: thermal high to low\n"); 3277 adev->pm.dpm.thermal.high_to_low = true; 3278 queue_thermal = true; 3279 break; 3280 default: 3281 break; 3282 } 3283 3284 if (queue_thermal) 3285 schedule_work(&adev->pm.dpm.thermal.work); 3286 3287 return 0; 3288} 3289 3290static int kv_dpm_set_clockgating_state(void *handle, 3291 enum amd_clockgating_state state) 3292{ 3293 return 0; 3294} 3295 3296static int kv_dpm_set_powergating_state(void *handle, 3297 enum amd_powergating_state state) 3298{ 3299 return 0; 3300} 3301 3302const struct amd_ip_funcs kv_dpm_ip_funcs = { 3303 .early_init = kv_dpm_early_init, 3304 .late_init = kv_dpm_late_init, 3305 .sw_init = kv_dpm_sw_init, 3306 .sw_fini = kv_dpm_sw_fini, 3307 .hw_init = kv_dpm_hw_init, 3308 .hw_fini = kv_dpm_hw_fini, 3309 .suspend = kv_dpm_suspend, 3310 .resume = kv_dpm_resume, 3311 .is_idle = kv_dpm_is_idle, 3312 .wait_for_idle = kv_dpm_wait_for_idle, 3313 .soft_reset = kv_dpm_soft_reset, 3314 .print_status = kv_dpm_print_status, 3315 .set_clockgating_state = kv_dpm_set_clockgating_state, 3316 .set_powergating_state = kv_dpm_set_powergating_state, 3317}; 3318 3319static const struct amdgpu_dpm_funcs kv_dpm_funcs = { 3320 .get_temperature = &kv_dpm_get_temp, 3321 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3322 .set_power_state = &kv_dpm_set_power_state, 3323 .post_set_power_state = &kv_dpm_post_set_power_state, 3324 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3325 .get_sclk = &kv_dpm_get_sclk, 3326 .get_mclk = &kv_dpm_get_mclk, 3327 .print_power_state = &kv_dpm_print_power_state, 3328 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3329 .force_performance_level = &kv_dpm_force_performance_level, 3330 .powergate_uvd = &kv_dpm_powergate_uvd, 3331 .enable_bapm = &kv_dpm_enable_bapm, 3332}; 3333 3334static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) 3335{ 3336 if (adev->pm.funcs == NULL) 3337 adev->pm.funcs = &kv_dpm_funcs; 3338} 3339 3340static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3341 .set = kv_dpm_set_interrupt_state, 3342 .process = kv_dpm_process_interrupt, 3343}; 3344 3345static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3346{ 3347 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3348 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3349}