Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc6 3295 lines 92 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include "drmP.h" 25#include "amdgpu.h" 26#include "amdgpu_pm.h" 27#include "cikd.h" 28#include "atom.h" 29#include "amdgpu_atombios.h" 30#include "amdgpu_dpm.h" 31#include "kv_dpm.h" 32#include "gfx_v7_0.h" 33#include <linux/seq_file.h> 34 35#include "smu/smu_7_0_0_d.h" 36#include "smu/smu_7_0_0_sh_mask.h" 37 38#include "gca/gfx_7_2_d.h" 39#include "gca/gfx_7_2_sh_mask.h" 40 41#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42#define KV_MINIMUM_ENGINE_CLOCK 800 43#define SMC_RAM_END 0x40000 44 45static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev); 46static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 47static int kv_enable_nb_dpm(struct amdgpu_device *adev, 48 bool enable); 49static void kv_init_graphics_levels(struct amdgpu_device *adev); 50static int kv_calculate_ds_divider(struct amdgpu_device *adev); 51static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 52static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 53static void kv_enable_new_levels(struct amdgpu_device *adev); 54static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 55 struct amdgpu_ps *new_rps); 56static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 57static int kv_set_enabled_levels(struct amdgpu_device *adev); 58static int kv_force_dpm_highest(struct amdgpu_device *adev); 59static int kv_force_dpm_lowest(struct amdgpu_device *adev); 60static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 61 struct amdgpu_ps *new_rps, 62 struct amdgpu_ps *old_rps); 63static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 64 int min_temp, int max_temp); 65static int kv_init_fps_limits(struct amdgpu_device *adev); 66 67static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); 68static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 70static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 71 72 73static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 74 struct sumo_vid_mapping_table *vid_mapping_table, 75 u32 vid_2bit) 76{ 77 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 78 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 79 u32 i; 80 81 if (vddc_sclk_table && vddc_sclk_table->count) { 82 if (vid_2bit < vddc_sclk_table->count) 83 return vddc_sclk_table->entries[vid_2bit].v; 84 else 85 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 86 } else { 87 for (i = 0; i < vid_mapping_table->num_entries; i++) { 88 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 89 return vid_mapping_table->entries[i].vid_7bit; 90 } 91 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 92 } 93} 94 95static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 96 struct sumo_vid_mapping_table *vid_mapping_table, 97 u32 vid_7bit) 98{ 99 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 100 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 101 u32 i; 102 103 if (vddc_sclk_table && vddc_sclk_table->count) { 104 for (i = 0; i < vddc_sclk_table->count; i++) { 105 if (vddc_sclk_table->entries[i].v == vid_7bit) 106 return i; 107 } 108 return vddc_sclk_table->count - 1; 109 } else { 110 for (i = 0; i < vid_mapping_table->num_entries; i++) { 111 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 112 return vid_mapping_table->entries[i].vid_2bit; 113 } 114 115 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 116 } 117} 118 119static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 120{ 121/* This bit selects who handles display phy powergating. 122 * Clear the bit to let atom handle it. 123 * Set it to let the driver handle it. 124 * For now we just let atom handle it. 125 */ 126#if 0 127 u32 v = RREG32(mmDOUT_SCRATCH3); 128 129 if (enable) 130 v |= 0x4; 131 else 132 v &= 0xFFFFFFFB; 133 134 WREG32(mmDOUT_SCRATCH3, v); 135#endif 136} 137 138static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 139 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 140 ATOM_AVAILABLE_SCLK_LIST *table) 141{ 142 u32 i; 143 u32 n = 0; 144 u32 prev_sclk = 0; 145 146 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 147 if (table[i].ulSupportedSCLK > prev_sclk) { 148 sclk_voltage_mapping_table->entries[n].sclk_frequency = 149 table[i].ulSupportedSCLK; 150 sclk_voltage_mapping_table->entries[n].vid_2bit = 151 table[i].usVoltageIndex; 152 prev_sclk = table[i].ulSupportedSCLK; 153 n++; 154 } 155 } 156 157 sclk_voltage_mapping_table->num_max_dpm_entries = n; 158} 159 160static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 161 struct sumo_vid_mapping_table *vid_mapping_table, 162 ATOM_AVAILABLE_SCLK_LIST *table) 163{ 164 u32 i, j; 165 166 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 167 if (table[i].ulSupportedSCLK != 0) { 168 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 169 table[i].usVoltageID; 170 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 171 table[i].usVoltageIndex; 172 } 173 } 174 175 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 176 if (vid_mapping_table->entries[i].vid_7bit == 0) { 177 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 178 if (vid_mapping_table->entries[j].vid_7bit != 0) { 179 vid_mapping_table->entries[i] = 180 vid_mapping_table->entries[j]; 181 vid_mapping_table->entries[j].vid_7bit = 0; 182 break; 183 } 184 } 185 186 if (j == SUMO_MAX_NUMBER_VOLTAGES) 187 break; 188 } 189 } 190 191 vid_mapping_table->num_entries = i; 192} 193 194#if 0 195static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 196{ 197 { 0, 4, 1 }, 198 { 1, 4, 1 }, 199 { 2, 5, 1 }, 200 { 3, 4, 2 }, 201 { 4, 1, 1 }, 202 { 5, 5, 2 }, 203 { 6, 6, 1 }, 204 { 7, 9, 2 }, 205 { 0xffffffff } 206}; 207 208static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 209{ 210 { 0, 4, 1 }, 211 { 0xffffffff } 212}; 213 214static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 215{ 216 { 0, 4, 1 }, 217 { 0xffffffff } 218}; 219 220static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 221{ 222 { 0, 4, 1 }, 223 { 0xffffffff } 224}; 225 226static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 227{ 228 { 0, 4, 1 }, 229 { 0xffffffff } 230}; 231 232static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 233{ 234 { 0, 4, 1 }, 235 { 1, 4, 1 }, 236 { 2, 5, 1 }, 237 { 3, 4, 1 }, 238 { 4, 1, 1 }, 239 { 5, 5, 1 }, 240 { 6, 6, 1 }, 241 { 7, 9, 1 }, 242 { 8, 4, 1 }, 243 { 9, 2, 1 }, 244 { 10, 3, 1 }, 245 { 11, 6, 1 }, 246 { 12, 8, 2 }, 247 { 13, 1, 1 }, 248 { 14, 2, 1 }, 249 { 15, 3, 1 }, 250 { 16, 1, 1 }, 251 { 17, 4, 1 }, 252 { 18, 3, 1 }, 253 { 19, 1, 1 }, 254 { 20, 8, 1 }, 255 { 21, 5, 1 }, 256 { 22, 1, 1 }, 257 { 23, 1, 1 }, 258 { 24, 4, 1 }, 259 { 27, 6, 1 }, 260 { 28, 1, 1 }, 261 { 0xffffffff } 262}; 263 264static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 265{ 266 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 267}; 268 269static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 270{ 271 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 272}; 273 274static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 275{ 276 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 277}; 278 279static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 280{ 281 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 282}; 283 284static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 285{ 286 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 287}; 288 289static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 290{ 291 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 292}; 293#endif 294 295static const struct kv_pt_config_reg didt_config_kv[] = 296{ 297 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 298 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 299 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 300 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 301 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 302 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 308 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 309 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 310 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 311 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 312 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 313 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 314 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 315 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 316 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 317 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 318 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 319 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 320 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 326 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 327 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 328 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 329 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 330 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 331 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 332 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 333 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 334 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 335 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 336 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 337 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 338 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 344 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 345 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 346 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 347 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 348 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 349 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 350 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 351 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 352 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 353 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 354 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 355 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 356 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 358 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 359 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 360 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 361 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 362 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 363 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 364 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 365 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 366 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 367 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 368 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 369 { 0xFFFFFFFF } 370}; 371 372static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 373{ 374 struct kv_ps *ps = rps->ps_priv; 375 376 return ps; 377} 378 379static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 380{ 381 struct kv_power_info *pi = adev->pm.dpm.priv; 382 383 return pi; 384} 385 386#if 0 387static void kv_program_local_cac_table(struct amdgpu_device *adev, 388 const struct kv_lcac_config_values *local_cac_table, 389 const struct kv_lcac_config_reg *local_cac_reg) 390{ 391 u32 i, count, data; 392 const struct kv_lcac_config_values *values = local_cac_table; 393 394 while (values->block_id != 0xffffffff) { 395 count = values->signal_id; 396 for (i = 0; i < count; i++) { 397 data = ((values->block_id << local_cac_reg->block_shift) & 398 local_cac_reg->block_mask); 399 data |= ((i << local_cac_reg->signal_shift) & 400 local_cac_reg->signal_mask); 401 data |= ((values->t << local_cac_reg->t_shift) & 402 local_cac_reg->t_mask); 403 data |= ((1 << local_cac_reg->enable_shift) & 404 local_cac_reg->enable_mask); 405 WREG32_SMC(local_cac_reg->cntl, data); 406 } 407 values++; 408 } 409} 410#endif 411 412static int kv_program_pt_config_registers(struct amdgpu_device *adev, 413 const struct kv_pt_config_reg *cac_config_regs) 414{ 415 const struct kv_pt_config_reg *config_regs = cac_config_regs; 416 u32 data; 417 u32 cache = 0; 418 419 if (config_regs == NULL) 420 return -EINVAL; 421 422 while (config_regs->offset != 0xFFFFFFFF) { 423 if (config_regs->type == KV_CONFIGREG_CACHE) { 424 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 425 } else { 426 switch (config_regs->type) { 427 case KV_CONFIGREG_SMC_IND: 428 data = RREG32_SMC(config_regs->offset); 429 break; 430 case KV_CONFIGREG_DIDT_IND: 431 data = RREG32_DIDT(config_regs->offset); 432 break; 433 default: 434 data = RREG32(config_regs->offset); 435 break; 436 } 437 438 data &= ~config_regs->mask; 439 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 440 data |= cache; 441 cache = 0; 442 443 switch (config_regs->type) { 444 case KV_CONFIGREG_SMC_IND: 445 WREG32_SMC(config_regs->offset, data); 446 break; 447 case KV_CONFIGREG_DIDT_IND: 448 WREG32_DIDT(config_regs->offset, data); 449 break; 450 default: 451 WREG32(config_regs->offset, data); 452 break; 453 } 454 } 455 config_regs++; 456 } 457 458 return 0; 459} 460 461static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 462{ 463 struct kv_power_info *pi = kv_get_pi(adev); 464 u32 data; 465 466 if (pi->caps_sq_ramping) { 467 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 468 if (enable) 469 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 470 else 471 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 472 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 473 } 474 475 if (pi->caps_db_ramping) { 476 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 477 if (enable) 478 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 479 else 480 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 481 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 482 } 483 484 if (pi->caps_td_ramping) { 485 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 486 if (enable) 487 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 488 else 489 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 490 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 491 } 492 493 if (pi->caps_tcp_ramping) { 494 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 495 if (enable) 496 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 497 else 498 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 499 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 500 } 501} 502 503static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 504{ 505 struct kv_power_info *pi = kv_get_pi(adev); 506 int ret; 507 508 if (pi->caps_sq_ramping || 509 pi->caps_db_ramping || 510 pi->caps_td_ramping || 511 pi->caps_tcp_ramping) { 512 adev->gfx.rlc.funcs->enter_safe_mode(adev); 513 514 if (enable) { 515 ret = kv_program_pt_config_registers(adev, didt_config_kv); 516 if (ret) { 517 adev->gfx.rlc.funcs->exit_safe_mode(adev); 518 return ret; 519 } 520 } 521 522 kv_do_enable_didt(adev, enable); 523 524 adev->gfx.rlc.funcs->exit_safe_mode(adev); 525 } 526 527 return 0; 528} 529 530#if 0 531static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 532{ 533 struct kv_power_info *pi = kv_get_pi(adev); 534 535 if (pi->caps_cac) { 536 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 537 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 538 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 539 540 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 541 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 542 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 543 544 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 545 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 546 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 547 548 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 549 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 550 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 551 552 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 553 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 554 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 555 556 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 557 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 558 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 559 } 560} 561#endif 562 563static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 564{ 565 struct kv_power_info *pi = kv_get_pi(adev); 566 int ret = 0; 567 568 if (pi->caps_cac) { 569 if (enable) { 570 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 571 if (ret) 572 pi->cac_enabled = false; 573 else 574 pi->cac_enabled = true; 575 } else if (pi->cac_enabled) { 576 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 577 pi->cac_enabled = false; 578 } 579 } 580 581 return ret; 582} 583 584static int kv_process_firmware_header(struct amdgpu_device *adev) 585{ 586 struct kv_power_info *pi = kv_get_pi(adev); 587 u32 tmp; 588 int ret; 589 590 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 591 offsetof(SMU7_Firmware_Header, DpmTable), 592 &tmp, pi->sram_end); 593 594 if (ret == 0) 595 pi->dpm_table_start = tmp; 596 597 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 598 offsetof(SMU7_Firmware_Header, SoftRegisters), 599 &tmp, pi->sram_end); 600 601 if (ret == 0) 602 pi->soft_regs_start = tmp; 603 604 return ret; 605} 606 607static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 608{ 609 struct kv_power_info *pi = kv_get_pi(adev); 610 int ret; 611 612 pi->graphics_voltage_change_enable = 1; 613 614 ret = amdgpu_kv_copy_bytes_to_smc(adev, 615 pi->dpm_table_start + 616 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 617 &pi->graphics_voltage_change_enable, 618 sizeof(u8), pi->sram_end); 619 620 return ret; 621} 622 623static int kv_set_dpm_interval(struct amdgpu_device *adev) 624{ 625 struct kv_power_info *pi = kv_get_pi(adev); 626 int ret; 627 628 pi->graphics_interval = 1; 629 630 ret = amdgpu_kv_copy_bytes_to_smc(adev, 631 pi->dpm_table_start + 632 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 633 &pi->graphics_interval, 634 sizeof(u8), pi->sram_end); 635 636 return ret; 637} 638 639static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 640{ 641 struct kv_power_info *pi = kv_get_pi(adev); 642 int ret; 643 644 ret = amdgpu_kv_copy_bytes_to_smc(adev, 645 pi->dpm_table_start + 646 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 647 &pi->graphics_boot_level, 648 sizeof(u8), pi->sram_end); 649 650 return ret; 651} 652 653static void kv_program_vc(struct amdgpu_device *adev) 654{ 655 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 656} 657 658static void kv_clear_vc(struct amdgpu_device *adev) 659{ 660 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 661} 662 663static int kv_set_divider_value(struct amdgpu_device *adev, 664 u32 index, u32 sclk) 665{ 666 struct kv_power_info *pi = kv_get_pi(adev); 667 struct atom_clock_dividers dividers; 668 int ret; 669 670 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 671 sclk, false, &dividers); 672 if (ret) 673 return ret; 674 675 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 676 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 677 678 return 0; 679} 680 681static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 682 u16 voltage) 683{ 684 return 6200 - (voltage * 25); 685} 686 687static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 688 u32 vid_2bit) 689{ 690 struct kv_power_info *pi = kv_get_pi(adev); 691 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 692 &pi->sys_info.vid_mapping_table, 693 vid_2bit); 694 695 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 696} 697 698 699static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 700{ 701 struct kv_power_info *pi = kv_get_pi(adev); 702 703 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 704 pi->graphics_level[index].MinVddNb = 705 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 706 707 return 0; 708} 709 710static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 711{ 712 struct kv_power_info *pi = kv_get_pi(adev); 713 714 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 715 716 return 0; 717} 718 719static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 720 u32 index, bool enable) 721{ 722 struct kv_power_info *pi = kv_get_pi(adev); 723 724 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 725} 726 727static void kv_start_dpm(struct amdgpu_device *adev) 728{ 729 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 730 731 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 732 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 733 734 amdgpu_kv_smc_dpm_enable(adev, true); 735} 736 737static void kv_stop_dpm(struct amdgpu_device *adev) 738{ 739 amdgpu_kv_smc_dpm_enable(adev, false); 740} 741 742static void kv_start_am(struct amdgpu_device *adev) 743{ 744 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 745 746 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 747 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 748 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 749 750 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 751} 752 753static void kv_reset_am(struct amdgpu_device *adev) 754{ 755 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 756 757 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 758 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 759 760 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 761} 762 763static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 764{ 765 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 766 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 767} 768 769static int kv_force_lowest_valid(struct amdgpu_device *adev) 770{ 771 return kv_force_dpm_lowest(adev); 772} 773 774static int kv_unforce_levels(struct amdgpu_device *adev) 775{ 776 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 777 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 778 else 779 return kv_set_enabled_levels(adev); 780} 781 782static int kv_update_sclk_t(struct amdgpu_device *adev) 783{ 784 struct kv_power_info *pi = kv_get_pi(adev); 785 u32 low_sclk_interrupt_t = 0; 786 int ret = 0; 787 788 if (pi->caps_sclk_throttle_low_notification) { 789 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 790 791 ret = amdgpu_kv_copy_bytes_to_smc(adev, 792 pi->dpm_table_start + 793 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 794 (u8 *)&low_sclk_interrupt_t, 795 sizeof(u32), pi->sram_end); 796 } 797 return ret; 798} 799 800static int kv_program_bootup_state(struct amdgpu_device *adev) 801{ 802 struct kv_power_info *pi = kv_get_pi(adev); 803 u32 i; 804 struct amdgpu_clock_voltage_dependency_table *table = 805 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 806 807 if (table && table->count) { 808 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 809 if (table->entries[i].clk == pi->boot_pl.sclk) 810 break; 811 } 812 813 pi->graphics_boot_level = (u8)i; 814 kv_dpm_power_level_enable(adev, i, true); 815 } else { 816 struct sumo_sclk_voltage_mapping_table *table = 817 &pi->sys_info.sclk_voltage_mapping_table; 818 819 if (table->num_max_dpm_entries == 0) 820 return -EINVAL; 821 822 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 823 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 824 break; 825 } 826 827 pi->graphics_boot_level = (u8)i; 828 kv_dpm_power_level_enable(adev, i, true); 829 } 830 return 0; 831} 832 833static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 834{ 835 struct kv_power_info *pi = kv_get_pi(adev); 836 int ret; 837 838 pi->graphics_therm_throttle_enable = 1; 839 840 ret = amdgpu_kv_copy_bytes_to_smc(adev, 841 pi->dpm_table_start + 842 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 843 &pi->graphics_therm_throttle_enable, 844 sizeof(u8), pi->sram_end); 845 846 return ret; 847} 848 849static int kv_upload_dpm_settings(struct amdgpu_device *adev) 850{ 851 struct kv_power_info *pi = kv_get_pi(adev); 852 int ret; 853 854 ret = amdgpu_kv_copy_bytes_to_smc(adev, 855 pi->dpm_table_start + 856 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 857 (u8 *)&pi->graphics_level, 858 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 859 pi->sram_end); 860 861 if (ret) 862 return ret; 863 864 ret = amdgpu_kv_copy_bytes_to_smc(adev, 865 pi->dpm_table_start + 866 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 867 &pi->graphics_dpm_level_count, 868 sizeof(u8), pi->sram_end); 869 870 return ret; 871} 872 873static u32 kv_get_clock_difference(u32 a, u32 b) 874{ 875 return (a >= b) ? a - b : b - a; 876} 877 878static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 879{ 880 struct kv_power_info *pi = kv_get_pi(adev); 881 u32 value; 882 883 if (pi->caps_enable_dfs_bypass) { 884 if (kv_get_clock_difference(clk, 40000) < 200) 885 value = 3; 886 else if (kv_get_clock_difference(clk, 30000) < 200) 887 value = 2; 888 else if (kv_get_clock_difference(clk, 20000) < 200) 889 value = 7; 890 else if (kv_get_clock_difference(clk, 15000) < 200) 891 value = 6; 892 else if (kv_get_clock_difference(clk, 10000) < 200) 893 value = 8; 894 else 895 value = 0; 896 } else { 897 value = 0; 898 } 899 900 return value; 901} 902 903static int kv_populate_uvd_table(struct amdgpu_device *adev) 904{ 905 struct kv_power_info *pi = kv_get_pi(adev); 906 struct amdgpu_uvd_clock_voltage_dependency_table *table = 907 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 908 struct atom_clock_dividers dividers; 909 int ret; 910 u32 i; 911 912 if (table == NULL || table->count == 0) 913 return 0; 914 915 pi->uvd_level_count = 0; 916 for (i = 0; i < table->count; i++) { 917 if (pi->high_voltage_t && 918 (pi->high_voltage_t < table->entries[i].v)) 919 break; 920 921 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 922 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 923 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 924 925 pi->uvd_level[i].VClkBypassCntl = 926 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 927 pi->uvd_level[i].DClkBypassCntl = 928 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 929 930 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 931 table->entries[i].vclk, false, &dividers); 932 if (ret) 933 return ret; 934 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 935 936 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 937 table->entries[i].dclk, false, &dividers); 938 if (ret) 939 return ret; 940 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 941 942 pi->uvd_level_count++; 943 } 944 945 ret = amdgpu_kv_copy_bytes_to_smc(adev, 946 pi->dpm_table_start + 947 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 948 (u8 *)&pi->uvd_level_count, 949 sizeof(u8), pi->sram_end); 950 if (ret) 951 return ret; 952 953 pi->uvd_interval = 1; 954 955 ret = amdgpu_kv_copy_bytes_to_smc(adev, 956 pi->dpm_table_start + 957 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 958 &pi->uvd_interval, 959 sizeof(u8), pi->sram_end); 960 if (ret) 961 return ret; 962 963 ret = amdgpu_kv_copy_bytes_to_smc(adev, 964 pi->dpm_table_start + 965 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 966 (u8 *)&pi->uvd_level, 967 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 968 pi->sram_end); 969 970 return ret; 971 972} 973 974static int kv_populate_vce_table(struct amdgpu_device *adev) 975{ 976 struct kv_power_info *pi = kv_get_pi(adev); 977 int ret; 978 u32 i; 979 struct amdgpu_vce_clock_voltage_dependency_table *table = 980 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 981 struct atom_clock_dividers dividers; 982 983 if (table == NULL || table->count == 0) 984 return 0; 985 986 pi->vce_level_count = 0; 987 for (i = 0; i < table->count; i++) { 988 if (pi->high_voltage_t && 989 pi->high_voltage_t < table->entries[i].v) 990 break; 991 992 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 993 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 994 995 pi->vce_level[i].ClkBypassCntl = 996 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 997 998 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 999 table->entries[i].evclk, false, &dividers); 1000 if (ret) 1001 return ret; 1002 pi->vce_level[i].Divider = (u8)dividers.post_div; 1003 1004 pi->vce_level_count++; 1005 } 1006 1007 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1008 pi->dpm_table_start + 1009 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 1010 (u8 *)&pi->vce_level_count, 1011 sizeof(u8), 1012 pi->sram_end); 1013 if (ret) 1014 return ret; 1015 1016 pi->vce_interval = 1; 1017 1018 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1019 pi->dpm_table_start + 1020 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1021 (u8 *)&pi->vce_interval, 1022 sizeof(u8), 1023 pi->sram_end); 1024 if (ret) 1025 return ret; 1026 1027 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1028 pi->dpm_table_start + 1029 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1030 (u8 *)&pi->vce_level, 1031 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1032 pi->sram_end); 1033 1034 return ret; 1035} 1036 1037static int kv_populate_samu_table(struct amdgpu_device *adev) 1038{ 1039 struct kv_power_info *pi = kv_get_pi(adev); 1040 struct amdgpu_clock_voltage_dependency_table *table = 1041 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1042 struct atom_clock_dividers dividers; 1043 int ret; 1044 u32 i; 1045 1046 if (table == NULL || table->count == 0) 1047 return 0; 1048 1049 pi->samu_level_count = 0; 1050 for (i = 0; i < table->count; i++) { 1051 if (pi->high_voltage_t && 1052 pi->high_voltage_t < table->entries[i].v) 1053 break; 1054 1055 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1056 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1057 1058 pi->samu_level[i].ClkBypassCntl = 1059 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1060 1061 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1062 table->entries[i].clk, false, &dividers); 1063 if (ret) 1064 return ret; 1065 pi->samu_level[i].Divider = (u8)dividers.post_div; 1066 1067 pi->samu_level_count++; 1068 } 1069 1070 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1071 pi->dpm_table_start + 1072 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1073 (u8 *)&pi->samu_level_count, 1074 sizeof(u8), 1075 pi->sram_end); 1076 if (ret) 1077 return ret; 1078 1079 pi->samu_interval = 1; 1080 1081 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1082 pi->dpm_table_start + 1083 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1084 (u8 *)&pi->samu_interval, 1085 sizeof(u8), 1086 pi->sram_end); 1087 if (ret) 1088 return ret; 1089 1090 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1091 pi->dpm_table_start + 1092 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1093 (u8 *)&pi->samu_level, 1094 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1095 pi->sram_end); 1096 if (ret) 1097 return ret; 1098 1099 return ret; 1100} 1101 1102 1103static int kv_populate_acp_table(struct amdgpu_device *adev) 1104{ 1105 struct kv_power_info *pi = kv_get_pi(adev); 1106 struct amdgpu_clock_voltage_dependency_table *table = 1107 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1108 struct atom_clock_dividers dividers; 1109 int ret; 1110 u32 i; 1111 1112 if (table == NULL || table->count == 0) 1113 return 0; 1114 1115 pi->acp_level_count = 0; 1116 for (i = 0; i < table->count; i++) { 1117 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1118 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1119 1120 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1121 table->entries[i].clk, false, &dividers); 1122 if (ret) 1123 return ret; 1124 pi->acp_level[i].Divider = (u8)dividers.post_div; 1125 1126 pi->acp_level_count++; 1127 } 1128 1129 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1130 pi->dpm_table_start + 1131 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1132 (u8 *)&pi->acp_level_count, 1133 sizeof(u8), 1134 pi->sram_end); 1135 if (ret) 1136 return ret; 1137 1138 pi->acp_interval = 1; 1139 1140 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1141 pi->dpm_table_start + 1142 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1143 (u8 *)&pi->acp_interval, 1144 sizeof(u8), 1145 pi->sram_end); 1146 if (ret) 1147 return ret; 1148 1149 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1150 pi->dpm_table_start + 1151 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1152 (u8 *)&pi->acp_level, 1153 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1154 pi->sram_end); 1155 if (ret) 1156 return ret; 1157 1158 return ret; 1159} 1160 1161static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1162{ 1163 struct kv_power_info *pi = kv_get_pi(adev); 1164 u32 i; 1165 struct amdgpu_clock_voltage_dependency_table *table = 1166 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1167 1168 if (table && table->count) { 1169 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1170 if (pi->caps_enable_dfs_bypass) { 1171 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1172 pi->graphics_level[i].ClkBypassCntl = 3; 1173 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1174 pi->graphics_level[i].ClkBypassCntl = 2; 1175 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1176 pi->graphics_level[i].ClkBypassCntl = 7; 1177 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1178 pi->graphics_level[i].ClkBypassCntl = 6; 1179 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1180 pi->graphics_level[i].ClkBypassCntl = 8; 1181 else 1182 pi->graphics_level[i].ClkBypassCntl = 0; 1183 } else { 1184 pi->graphics_level[i].ClkBypassCntl = 0; 1185 } 1186 } 1187 } else { 1188 struct sumo_sclk_voltage_mapping_table *table = 1189 &pi->sys_info.sclk_voltage_mapping_table; 1190 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1191 if (pi->caps_enable_dfs_bypass) { 1192 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1193 pi->graphics_level[i].ClkBypassCntl = 3; 1194 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1195 pi->graphics_level[i].ClkBypassCntl = 2; 1196 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1197 pi->graphics_level[i].ClkBypassCntl = 7; 1198 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1199 pi->graphics_level[i].ClkBypassCntl = 6; 1200 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1201 pi->graphics_level[i].ClkBypassCntl = 8; 1202 else 1203 pi->graphics_level[i].ClkBypassCntl = 0; 1204 } else { 1205 pi->graphics_level[i].ClkBypassCntl = 0; 1206 } 1207 } 1208 } 1209} 1210 1211static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1212{ 1213 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1214 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1215} 1216 1217static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1218{ 1219 struct kv_power_info *pi = kv_get_pi(adev); 1220 1221 pi->acp_boot_level = 0xff; 1222} 1223 1224static void kv_update_current_ps(struct amdgpu_device *adev, 1225 struct amdgpu_ps *rps) 1226{ 1227 struct kv_ps *new_ps = kv_get_ps(rps); 1228 struct kv_power_info *pi = kv_get_pi(adev); 1229 1230 pi->current_rps = *rps; 1231 pi->current_ps = *new_ps; 1232 pi->current_rps.ps_priv = &pi->current_ps; 1233} 1234 1235static void kv_update_requested_ps(struct amdgpu_device *adev, 1236 struct amdgpu_ps *rps) 1237{ 1238 struct kv_ps *new_ps = kv_get_ps(rps); 1239 struct kv_power_info *pi = kv_get_pi(adev); 1240 1241 pi->requested_rps = *rps; 1242 pi->requested_ps = *new_ps; 1243 pi->requested_rps.ps_priv = &pi->requested_ps; 1244} 1245 1246static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) 1247{ 1248 struct kv_power_info *pi = kv_get_pi(adev); 1249 int ret; 1250 1251 if (pi->bapm_enable) { 1252 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1253 if (ret) 1254 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1255 } 1256} 1257 1258static int kv_dpm_enable(struct amdgpu_device *adev) 1259{ 1260 struct kv_power_info *pi = kv_get_pi(adev); 1261 int ret; 1262 1263 ret = kv_process_firmware_header(adev); 1264 if (ret) { 1265 DRM_ERROR("kv_process_firmware_header failed\n"); 1266 return ret; 1267 } 1268 kv_init_fps_limits(adev); 1269 kv_init_graphics_levels(adev); 1270 ret = kv_program_bootup_state(adev); 1271 if (ret) { 1272 DRM_ERROR("kv_program_bootup_state failed\n"); 1273 return ret; 1274 } 1275 kv_calculate_dfs_bypass_settings(adev); 1276 ret = kv_upload_dpm_settings(adev); 1277 if (ret) { 1278 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1279 return ret; 1280 } 1281 ret = kv_populate_uvd_table(adev); 1282 if (ret) { 1283 DRM_ERROR("kv_populate_uvd_table failed\n"); 1284 return ret; 1285 } 1286 ret = kv_populate_vce_table(adev); 1287 if (ret) { 1288 DRM_ERROR("kv_populate_vce_table failed\n"); 1289 return ret; 1290 } 1291 ret = kv_populate_samu_table(adev); 1292 if (ret) { 1293 DRM_ERROR("kv_populate_samu_table failed\n"); 1294 return ret; 1295 } 1296 ret = kv_populate_acp_table(adev); 1297 if (ret) { 1298 DRM_ERROR("kv_populate_acp_table failed\n"); 1299 return ret; 1300 } 1301 kv_program_vc(adev); 1302#if 0 1303 kv_initialize_hardware_cac_manager(adev); 1304#endif 1305 kv_start_am(adev); 1306 if (pi->enable_auto_thermal_throttling) { 1307 ret = kv_enable_auto_thermal_throttling(adev); 1308 if (ret) { 1309 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1310 return ret; 1311 } 1312 } 1313 ret = kv_enable_dpm_voltage_scaling(adev); 1314 if (ret) { 1315 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1316 return ret; 1317 } 1318 ret = kv_set_dpm_interval(adev); 1319 if (ret) { 1320 DRM_ERROR("kv_set_dpm_interval failed\n"); 1321 return ret; 1322 } 1323 ret = kv_set_dpm_boot_state(adev); 1324 if (ret) { 1325 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1326 return ret; 1327 } 1328 ret = kv_enable_ulv(adev, true); 1329 if (ret) { 1330 DRM_ERROR("kv_enable_ulv failed\n"); 1331 return ret; 1332 } 1333 kv_start_dpm(adev); 1334 ret = kv_enable_didt(adev, true); 1335 if (ret) { 1336 DRM_ERROR("kv_enable_didt failed\n"); 1337 return ret; 1338 } 1339 ret = kv_enable_smc_cac(adev, true); 1340 if (ret) { 1341 DRM_ERROR("kv_enable_smc_cac failed\n"); 1342 return ret; 1343 } 1344 1345 kv_reset_acp_boot_level(adev); 1346 1347 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1348 if (ret) { 1349 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1350 return ret; 1351 } 1352 1353 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1354 1355 if (adev->irq.installed && 1356 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1357 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1358 if (ret) { 1359 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1360 return ret; 1361 } 1362 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1363 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1364 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1365 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1366 } 1367 1368 return ret; 1369} 1370 1371static void kv_dpm_disable(struct amdgpu_device *adev) 1372{ 1373 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1374 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1375 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1376 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1377 1378 amdgpu_kv_smc_bapm_enable(adev, false); 1379 1380 if (adev->asic_type == CHIP_MULLINS) 1381 kv_enable_nb_dpm(adev, false); 1382 1383 /* powerup blocks */ 1384 kv_dpm_powergate_acp(adev, false); 1385 kv_dpm_powergate_samu(adev, false); 1386 kv_dpm_powergate_vce(adev, false); 1387 kv_dpm_powergate_uvd(adev, false); 1388 1389 kv_enable_smc_cac(adev, false); 1390 kv_enable_didt(adev, false); 1391 kv_clear_vc(adev); 1392 kv_stop_dpm(adev); 1393 kv_enable_ulv(adev, false); 1394 kv_reset_am(adev); 1395 1396 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1397} 1398 1399#if 0 1400static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1401 u16 reg_offset, u32 value) 1402{ 1403 struct kv_power_info *pi = kv_get_pi(adev); 1404 1405 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1406 (u8 *)&value, sizeof(u16), pi->sram_end); 1407} 1408 1409static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1410 u16 reg_offset, u32 *value) 1411{ 1412 struct kv_power_info *pi = kv_get_pi(adev); 1413 1414 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1415 value, pi->sram_end); 1416} 1417#endif 1418 1419static void kv_init_sclk_t(struct amdgpu_device *adev) 1420{ 1421 struct kv_power_info *pi = kv_get_pi(adev); 1422 1423 pi->low_sclk_interrupt_t = 0; 1424} 1425 1426static int kv_init_fps_limits(struct amdgpu_device *adev) 1427{ 1428 struct kv_power_info *pi = kv_get_pi(adev); 1429 int ret = 0; 1430 1431 if (pi->caps_fps) { 1432 u16 tmp; 1433 1434 tmp = 45; 1435 pi->fps_high_t = cpu_to_be16(tmp); 1436 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1437 pi->dpm_table_start + 1438 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1439 (u8 *)&pi->fps_high_t, 1440 sizeof(u16), pi->sram_end); 1441 1442 tmp = 30; 1443 pi->fps_low_t = cpu_to_be16(tmp); 1444 1445 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1446 pi->dpm_table_start + 1447 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1448 (u8 *)&pi->fps_low_t, 1449 sizeof(u16), pi->sram_end); 1450 1451 } 1452 return ret; 1453} 1454 1455static void kv_init_powergate_state(struct amdgpu_device *adev) 1456{ 1457 struct kv_power_info *pi = kv_get_pi(adev); 1458 1459 pi->uvd_power_gated = false; 1460 pi->vce_power_gated = false; 1461 pi->samu_power_gated = false; 1462 pi->acp_power_gated = false; 1463 1464} 1465 1466static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1467{ 1468 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1469 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1470} 1471 1472static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1473{ 1474 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1475 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1476} 1477 1478static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1479{ 1480 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1481 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1482} 1483 1484static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1485{ 1486 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1487 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1488} 1489 1490static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1491{ 1492 struct kv_power_info *pi = kv_get_pi(adev); 1493 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1494 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1495 int ret; 1496 u32 mask; 1497 1498 if (!gate) { 1499 if (table->count) 1500 pi->uvd_boot_level = table->count - 1; 1501 else 1502 pi->uvd_boot_level = 0; 1503 1504 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1505 mask = 1 << pi->uvd_boot_level; 1506 } else { 1507 mask = 0x1f; 1508 } 1509 1510 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1511 pi->dpm_table_start + 1512 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1513 (uint8_t *)&pi->uvd_boot_level, 1514 sizeof(u8), pi->sram_end); 1515 if (ret) 1516 return ret; 1517 1518 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1519 PPSMC_MSG_UVDDPM_SetEnabledMask, 1520 mask); 1521 } 1522 1523 return kv_enable_uvd_dpm(adev, !gate); 1524} 1525 1526static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1527{ 1528 u8 i; 1529 struct amdgpu_vce_clock_voltage_dependency_table *table = 1530 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1531 1532 for (i = 0; i < table->count; i++) { 1533 if (table->entries[i].evclk >= evclk) 1534 break; 1535 } 1536 1537 return i; 1538} 1539 1540static int kv_update_vce_dpm(struct amdgpu_device *adev, 1541 struct amdgpu_ps *amdgpu_new_state, 1542 struct amdgpu_ps *amdgpu_current_state) 1543{ 1544 struct kv_power_info *pi = kv_get_pi(adev); 1545 struct amdgpu_vce_clock_voltage_dependency_table *table = 1546 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1547 int ret; 1548 1549 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1550 kv_dpm_powergate_vce(adev, false); 1551 /* turn the clocks on when encoding */ 1552 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1553 AMD_CG_STATE_UNGATE); 1554 if (ret) 1555 return ret; 1556 if (pi->caps_stable_p_state) 1557 pi->vce_boot_level = table->count - 1; 1558 else 1559 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1560 1561 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1562 pi->dpm_table_start + 1563 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1564 (u8 *)&pi->vce_boot_level, 1565 sizeof(u8), 1566 pi->sram_end); 1567 if (ret) 1568 return ret; 1569 1570 if (pi->caps_stable_p_state) 1571 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1572 PPSMC_MSG_VCEDPM_SetEnabledMask, 1573 (1 << pi->vce_boot_level)); 1574 1575 kv_enable_vce_dpm(adev, true); 1576 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1577 kv_enable_vce_dpm(adev, false); 1578 /* turn the clocks off when not encoding */ 1579 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1580 AMD_CG_STATE_GATE); 1581 if (ret) 1582 return ret; 1583 kv_dpm_powergate_vce(adev, true); 1584 } 1585 1586 return 0; 1587} 1588 1589static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1590{ 1591 struct kv_power_info *pi = kv_get_pi(adev); 1592 struct amdgpu_clock_voltage_dependency_table *table = 1593 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1594 int ret; 1595 1596 if (!gate) { 1597 if (pi->caps_stable_p_state) 1598 pi->samu_boot_level = table->count - 1; 1599 else 1600 pi->samu_boot_level = 0; 1601 1602 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1603 pi->dpm_table_start + 1604 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1605 (u8 *)&pi->samu_boot_level, 1606 sizeof(u8), 1607 pi->sram_end); 1608 if (ret) 1609 return ret; 1610 1611 if (pi->caps_stable_p_state) 1612 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1613 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1614 (1 << pi->samu_boot_level)); 1615 } 1616 1617 return kv_enable_samu_dpm(adev, !gate); 1618} 1619 1620static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1621{ 1622 u8 i; 1623 struct amdgpu_clock_voltage_dependency_table *table = 1624 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1625 1626 for (i = 0; i < table->count; i++) { 1627 if (table->entries[i].clk >= 0) /* XXX */ 1628 break; 1629 } 1630 1631 if (i >= table->count) 1632 i = table->count - 1; 1633 1634 return i; 1635} 1636 1637static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1638{ 1639 struct kv_power_info *pi = kv_get_pi(adev); 1640 u8 acp_boot_level; 1641 1642 if (!pi->caps_stable_p_state) { 1643 acp_boot_level = kv_get_acp_boot_level(adev); 1644 if (acp_boot_level != pi->acp_boot_level) { 1645 pi->acp_boot_level = acp_boot_level; 1646 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1647 PPSMC_MSG_ACPDPM_SetEnabledMask, 1648 (1 << pi->acp_boot_level)); 1649 } 1650 } 1651} 1652 1653static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1654{ 1655 struct kv_power_info *pi = kv_get_pi(adev); 1656 struct amdgpu_clock_voltage_dependency_table *table = 1657 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1658 int ret; 1659 1660 if (!gate) { 1661 if (pi->caps_stable_p_state) 1662 pi->acp_boot_level = table->count - 1; 1663 else 1664 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1665 1666 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1667 pi->dpm_table_start + 1668 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1669 (u8 *)&pi->acp_boot_level, 1670 sizeof(u8), 1671 pi->sram_end); 1672 if (ret) 1673 return ret; 1674 1675 if (pi->caps_stable_p_state) 1676 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1677 PPSMC_MSG_ACPDPM_SetEnabledMask, 1678 (1 << pi->acp_boot_level)); 1679 } 1680 1681 return kv_enable_acp_dpm(adev, !gate); 1682} 1683 1684static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) 1685{ 1686 struct kv_power_info *pi = kv_get_pi(adev); 1687 int ret; 1688 1689 if (pi->uvd_power_gated == gate) 1690 return; 1691 1692 pi->uvd_power_gated = gate; 1693 1694 if (gate) { 1695 if (pi->caps_uvd_pg) { 1696 /* disable clockgating so we can properly shut down the block */ 1697 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1698 AMD_CG_STATE_UNGATE); 1699 /* shutdown the UVD block */ 1700 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1701 AMD_PG_STATE_GATE); 1702 /* XXX: check for errors */ 1703 } 1704 kv_update_uvd_dpm(adev, gate); 1705 if (pi->caps_uvd_pg) 1706 /* power off the UVD block */ 1707 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1708 } else { 1709 if (pi->caps_uvd_pg) { 1710 /* power on the UVD block */ 1711 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1712 /* re-init the UVD block */ 1713 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1714 AMD_PG_STATE_UNGATE); 1715 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ 1716 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1717 AMD_CG_STATE_GATE); 1718 /* XXX: check for errors */ 1719 } 1720 kv_update_uvd_dpm(adev, gate); 1721 } 1722} 1723 1724static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1725{ 1726 struct kv_power_info *pi = kv_get_pi(adev); 1727 int ret; 1728 1729 if (pi->vce_power_gated == gate) 1730 return; 1731 1732 pi->vce_power_gated = gate; 1733 1734 if (gate) { 1735 if (pi->caps_vce_pg) { 1736 /* shutdown the VCE block */ 1737 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1738 AMD_PG_STATE_GATE); 1739 /* XXX: check for errors */ 1740 /* power off the VCE block */ 1741 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1742 } 1743 } else { 1744 if (pi->caps_vce_pg) { 1745 /* power on the VCE block */ 1746 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1747 /* re-init the VCE block */ 1748 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1749 AMD_PG_STATE_UNGATE); 1750 /* XXX: check for errors */ 1751 } 1752 } 1753} 1754 1755static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1756{ 1757 struct kv_power_info *pi = kv_get_pi(adev); 1758 1759 if (pi->samu_power_gated == gate) 1760 return; 1761 1762 pi->samu_power_gated = gate; 1763 1764 if (gate) { 1765 kv_update_samu_dpm(adev, true); 1766 if (pi->caps_samu_pg) 1767 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1768 } else { 1769 if (pi->caps_samu_pg) 1770 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1771 kv_update_samu_dpm(adev, false); 1772 } 1773} 1774 1775static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1776{ 1777 struct kv_power_info *pi = kv_get_pi(adev); 1778 1779 if (pi->acp_power_gated == gate) 1780 return; 1781 1782 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1783 return; 1784 1785 pi->acp_power_gated = gate; 1786 1787 if (gate) { 1788 kv_update_acp_dpm(adev, true); 1789 if (pi->caps_acp_pg) 1790 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1791 } else { 1792 if (pi->caps_acp_pg) 1793 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1794 kv_update_acp_dpm(adev, false); 1795 } 1796} 1797 1798static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1799 struct amdgpu_ps *new_rps) 1800{ 1801 struct kv_ps *new_ps = kv_get_ps(new_rps); 1802 struct kv_power_info *pi = kv_get_pi(adev); 1803 u32 i; 1804 struct amdgpu_clock_voltage_dependency_table *table = 1805 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1806 1807 if (table && table->count) { 1808 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1809 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1810 (i == (pi->graphics_dpm_level_count - 1))) { 1811 pi->lowest_valid = i; 1812 break; 1813 } 1814 } 1815 1816 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1817 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1818 break; 1819 } 1820 pi->highest_valid = i; 1821 1822 if (pi->lowest_valid > pi->highest_valid) { 1823 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1824 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1825 pi->highest_valid = pi->lowest_valid; 1826 else 1827 pi->lowest_valid = pi->highest_valid; 1828 } 1829 } else { 1830 struct sumo_sclk_voltage_mapping_table *table = 1831 &pi->sys_info.sclk_voltage_mapping_table; 1832 1833 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1834 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1835 i == (int)(pi->graphics_dpm_level_count - 1)) { 1836 pi->lowest_valid = i; 1837 break; 1838 } 1839 } 1840 1841 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1842 if (table->entries[i].sclk_frequency <= 1843 new_ps->levels[new_ps->num_levels - 1].sclk) 1844 break; 1845 } 1846 pi->highest_valid = i; 1847 1848 if (pi->lowest_valid > pi->highest_valid) { 1849 if ((new_ps->levels[0].sclk - 1850 table->entries[pi->highest_valid].sclk_frequency) > 1851 (table->entries[pi->lowest_valid].sclk_frequency - 1852 new_ps->levels[new_ps->num_levels -1].sclk)) 1853 pi->highest_valid = pi->lowest_valid; 1854 else 1855 pi->lowest_valid = pi->highest_valid; 1856 } 1857 } 1858} 1859 1860static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1861 struct amdgpu_ps *new_rps) 1862{ 1863 struct kv_ps *new_ps = kv_get_ps(new_rps); 1864 struct kv_power_info *pi = kv_get_pi(adev); 1865 int ret = 0; 1866 u8 clk_bypass_cntl; 1867 1868 if (pi->caps_enable_dfs_bypass) { 1869 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1870 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1871 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1872 (pi->dpm_table_start + 1873 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1874 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1875 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1876 &clk_bypass_cntl, 1877 sizeof(u8), pi->sram_end); 1878 } 1879 1880 return ret; 1881} 1882 1883static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1884 bool enable) 1885{ 1886 struct kv_power_info *pi = kv_get_pi(adev); 1887 int ret = 0; 1888 1889 if (enable) { 1890 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1891 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1892 if (ret == 0) 1893 pi->nb_dpm_enabled = true; 1894 } 1895 } else { 1896 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1897 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1898 if (ret == 0) 1899 pi->nb_dpm_enabled = false; 1900 } 1901 } 1902 1903 return ret; 1904} 1905 1906static int kv_dpm_force_performance_level(struct amdgpu_device *adev, 1907 enum amdgpu_dpm_forced_level level) 1908{ 1909 int ret; 1910 1911 if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { 1912 ret = kv_force_dpm_highest(adev); 1913 if (ret) 1914 return ret; 1915 } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { 1916 ret = kv_force_dpm_lowest(adev); 1917 if (ret) 1918 return ret; 1919 } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { 1920 ret = kv_unforce_levels(adev); 1921 if (ret) 1922 return ret; 1923 } 1924 1925 adev->pm.dpm.forced_level = level; 1926 1927 return 0; 1928} 1929 1930static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) 1931{ 1932 struct kv_power_info *pi = kv_get_pi(adev); 1933 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1934 struct amdgpu_ps *new_ps = &requested_ps; 1935 1936 kv_update_requested_ps(adev, new_ps); 1937 1938 kv_apply_state_adjust_rules(adev, 1939 &pi->requested_rps, 1940 &pi->current_rps); 1941 1942 return 0; 1943} 1944 1945static int kv_dpm_set_power_state(struct amdgpu_device *adev) 1946{ 1947 struct kv_power_info *pi = kv_get_pi(adev); 1948 struct amdgpu_ps *new_ps = &pi->requested_rps; 1949 struct amdgpu_ps *old_ps = &pi->current_rps; 1950 int ret; 1951 1952 if (pi->bapm_enable) { 1953 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power); 1954 if (ret) { 1955 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1956 return ret; 1957 } 1958 } 1959 1960 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1961 if (pi->enable_dpm) { 1962 kv_set_valid_clock_range(adev, new_ps); 1963 kv_update_dfs_bypass_settings(adev, new_ps); 1964 ret = kv_calculate_ds_divider(adev); 1965 if (ret) { 1966 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1967 return ret; 1968 } 1969 kv_calculate_nbps_level_settings(adev); 1970 kv_calculate_dpm_settings(adev); 1971 kv_force_lowest_valid(adev); 1972 kv_enable_new_levels(adev); 1973 kv_upload_dpm_settings(adev); 1974 kv_program_nbps_index_settings(adev, new_ps); 1975 kv_unforce_levels(adev); 1976 kv_set_enabled_levels(adev); 1977 kv_force_lowest_valid(adev); 1978 kv_unforce_levels(adev); 1979 1980 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1981 if (ret) { 1982 DRM_ERROR("kv_update_vce_dpm failed\n"); 1983 return ret; 1984 } 1985 kv_update_sclk_t(adev); 1986 if (adev->asic_type == CHIP_MULLINS) 1987 kv_enable_nb_dpm(adev, true); 1988 } 1989 } else { 1990 if (pi->enable_dpm) { 1991 kv_set_valid_clock_range(adev, new_ps); 1992 kv_update_dfs_bypass_settings(adev, new_ps); 1993 ret = kv_calculate_ds_divider(adev); 1994 if (ret) { 1995 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1996 return ret; 1997 } 1998 kv_calculate_nbps_level_settings(adev); 1999 kv_calculate_dpm_settings(adev); 2000 kv_freeze_sclk_dpm(adev, true); 2001 kv_upload_dpm_settings(adev); 2002 kv_program_nbps_index_settings(adev, new_ps); 2003 kv_freeze_sclk_dpm(adev, false); 2004 kv_set_enabled_levels(adev); 2005 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 2006 if (ret) { 2007 DRM_ERROR("kv_update_vce_dpm failed\n"); 2008 return ret; 2009 } 2010 kv_update_acp_boot_level(adev); 2011 kv_update_sclk_t(adev); 2012 kv_enable_nb_dpm(adev, true); 2013 } 2014 } 2015 2016 return 0; 2017} 2018 2019static void kv_dpm_post_set_power_state(struct amdgpu_device *adev) 2020{ 2021 struct kv_power_info *pi = kv_get_pi(adev); 2022 struct amdgpu_ps *new_ps = &pi->requested_rps; 2023 2024 kv_update_current_ps(adev, new_ps); 2025} 2026 2027static void kv_dpm_setup_asic(struct amdgpu_device *adev) 2028{ 2029 sumo_take_smu_control(adev, true); 2030 kv_init_powergate_state(adev); 2031 kv_init_sclk_t(adev); 2032} 2033 2034#if 0 2035static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2036{ 2037 struct kv_power_info *pi = kv_get_pi(adev); 2038 2039 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2040 kv_force_lowest_valid(adev); 2041 kv_init_graphics_levels(adev); 2042 kv_program_bootup_state(adev); 2043 kv_upload_dpm_settings(adev); 2044 kv_force_lowest_valid(adev); 2045 kv_unforce_levels(adev); 2046 } else { 2047 kv_init_graphics_levels(adev); 2048 kv_program_bootup_state(adev); 2049 kv_freeze_sclk_dpm(adev, true); 2050 kv_upload_dpm_settings(adev); 2051 kv_freeze_sclk_dpm(adev, false); 2052 kv_set_enabled_level(adev, pi->graphics_boot_level); 2053 } 2054} 2055#endif 2056 2057static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2058 struct amdgpu_clock_and_voltage_limits *table) 2059{ 2060 struct kv_power_info *pi = kv_get_pi(adev); 2061 2062 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2063 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2064 table->sclk = 2065 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2066 table->vddc = 2067 kv_convert_2bit_index_to_voltage(adev, 2068 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2069 } 2070 2071 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2072} 2073 2074static void kv_patch_voltage_values(struct amdgpu_device *adev) 2075{ 2076 int i; 2077 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2078 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2079 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2080 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2081 struct amdgpu_clock_voltage_dependency_table *samu_table = 2082 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2083 struct amdgpu_clock_voltage_dependency_table *acp_table = 2084 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2085 2086 if (uvd_table->count) { 2087 for (i = 0; i < uvd_table->count; i++) 2088 uvd_table->entries[i].v = 2089 kv_convert_8bit_index_to_voltage(adev, 2090 uvd_table->entries[i].v); 2091 } 2092 2093 if (vce_table->count) { 2094 for (i = 0; i < vce_table->count; i++) 2095 vce_table->entries[i].v = 2096 kv_convert_8bit_index_to_voltage(adev, 2097 vce_table->entries[i].v); 2098 } 2099 2100 if (samu_table->count) { 2101 for (i = 0; i < samu_table->count; i++) 2102 samu_table->entries[i].v = 2103 kv_convert_8bit_index_to_voltage(adev, 2104 samu_table->entries[i].v); 2105 } 2106 2107 if (acp_table->count) { 2108 for (i = 0; i < acp_table->count; i++) 2109 acp_table->entries[i].v = 2110 kv_convert_8bit_index_to_voltage(adev, 2111 acp_table->entries[i].v); 2112 } 2113 2114} 2115 2116static void kv_construct_boot_state(struct amdgpu_device *adev) 2117{ 2118 struct kv_power_info *pi = kv_get_pi(adev); 2119 2120 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2121 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2122 pi->boot_pl.ds_divider_index = 0; 2123 pi->boot_pl.ss_divider_index = 0; 2124 pi->boot_pl.allow_gnb_slow = 1; 2125 pi->boot_pl.force_nbp_state = 0; 2126 pi->boot_pl.display_wm = 0; 2127 pi->boot_pl.vce_wm = 0; 2128} 2129 2130static int kv_force_dpm_highest(struct amdgpu_device *adev) 2131{ 2132 int ret; 2133 u32 enable_mask, i; 2134 2135 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2136 if (ret) 2137 return ret; 2138 2139 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2140 if (enable_mask & (1 << i)) 2141 break; 2142 } 2143 2144 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2145 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2146 else 2147 return kv_set_enabled_level(adev, i); 2148} 2149 2150static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2151{ 2152 int ret; 2153 u32 enable_mask, i; 2154 2155 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2156 if (ret) 2157 return ret; 2158 2159 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2160 if (enable_mask & (1 << i)) 2161 break; 2162 } 2163 2164 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2165 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2166 else 2167 return kv_set_enabled_level(adev, i); 2168} 2169 2170static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2171 u32 sclk, u32 min_sclk_in_sr) 2172{ 2173 struct kv_power_info *pi = kv_get_pi(adev); 2174 u32 i; 2175 u32 temp; 2176 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); 2177 2178 if (sclk < min) 2179 return 0; 2180 2181 if (!pi->caps_sclk_ds) 2182 return 0; 2183 2184 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2185 temp = sclk >> i; 2186 if (temp >= min) 2187 break; 2188 } 2189 2190 return (u8)i; 2191} 2192 2193static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2194{ 2195 struct kv_power_info *pi = kv_get_pi(adev); 2196 struct amdgpu_clock_voltage_dependency_table *table = 2197 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2198 int i; 2199 2200 if (table && table->count) { 2201 for (i = table->count - 1; i >= 0; i--) { 2202 if (pi->high_voltage_t && 2203 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2204 pi->high_voltage_t)) { 2205 *limit = i; 2206 return 0; 2207 } 2208 } 2209 } else { 2210 struct sumo_sclk_voltage_mapping_table *table = 2211 &pi->sys_info.sclk_voltage_mapping_table; 2212 2213 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2214 if (pi->high_voltage_t && 2215 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2216 pi->high_voltage_t)) { 2217 *limit = i; 2218 return 0; 2219 } 2220 } 2221 } 2222 2223 *limit = 0; 2224 return 0; 2225} 2226 2227static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2228 struct amdgpu_ps *new_rps, 2229 struct amdgpu_ps *old_rps) 2230{ 2231 struct kv_ps *ps = kv_get_ps(new_rps); 2232 struct kv_power_info *pi = kv_get_pi(adev); 2233 u32 min_sclk = 10000; /* ??? */ 2234 u32 sclk, mclk = 0; 2235 int i, limit; 2236 bool force_high; 2237 struct amdgpu_clock_voltage_dependency_table *table = 2238 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2239 u32 stable_p_state_sclk = 0; 2240 struct amdgpu_clock_and_voltage_limits *max_limits = 2241 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2242 2243 if (new_rps->vce_active) { 2244 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2245 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2246 } else { 2247 new_rps->evclk = 0; 2248 new_rps->ecclk = 0; 2249 } 2250 2251 mclk = max_limits->mclk; 2252 sclk = min_sclk; 2253 2254 if (pi->caps_stable_p_state) { 2255 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2256 2257 for (i = table->count - 1; i >= 0; i--) { 2258 if (stable_p_state_sclk >= table->entries[i].clk) { 2259 stable_p_state_sclk = table->entries[i].clk; 2260 break; 2261 } 2262 } 2263 2264 if (i > 0) 2265 stable_p_state_sclk = table->entries[0].clk; 2266 2267 sclk = stable_p_state_sclk; 2268 } 2269 2270 if (new_rps->vce_active) { 2271 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2272 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2273 } 2274 2275 ps->need_dfs_bypass = true; 2276 2277 for (i = 0; i < ps->num_levels; i++) { 2278 if (ps->levels[i].sclk < sclk) 2279 ps->levels[i].sclk = sclk; 2280 } 2281 2282 if (table && table->count) { 2283 for (i = 0; i < ps->num_levels; i++) { 2284 if (pi->high_voltage_t && 2285 (pi->high_voltage_t < 2286 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2287 kv_get_high_voltage_limit(adev, &limit); 2288 ps->levels[i].sclk = table->entries[limit].clk; 2289 } 2290 } 2291 } else { 2292 struct sumo_sclk_voltage_mapping_table *table = 2293 &pi->sys_info.sclk_voltage_mapping_table; 2294 2295 for (i = 0; i < ps->num_levels; i++) { 2296 if (pi->high_voltage_t && 2297 (pi->high_voltage_t < 2298 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2299 kv_get_high_voltage_limit(adev, &limit); 2300 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2301 } 2302 } 2303 } 2304 2305 if (pi->caps_stable_p_state) { 2306 for (i = 0; i < ps->num_levels; i++) { 2307 ps->levels[i].sclk = stable_p_state_sclk; 2308 } 2309 } 2310 2311 pi->video_start = new_rps->dclk || new_rps->vclk || 2312 new_rps->evclk || new_rps->ecclk; 2313 2314 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2315 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2316 pi->battery_state = true; 2317 else 2318 pi->battery_state = false; 2319 2320 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2321 ps->dpm0_pg_nb_ps_lo = 0x1; 2322 ps->dpm0_pg_nb_ps_hi = 0x0; 2323 ps->dpmx_nb_ps_lo = 0x1; 2324 ps->dpmx_nb_ps_hi = 0x0; 2325 } else { 2326 ps->dpm0_pg_nb_ps_lo = 0x3; 2327 ps->dpm0_pg_nb_ps_hi = 0x0; 2328 ps->dpmx_nb_ps_lo = 0x3; 2329 ps->dpmx_nb_ps_hi = 0x0; 2330 2331 if (pi->sys_info.nb_dpm_enable) { 2332 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2333 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2334 pi->disable_nb_ps3_in_battery; 2335 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2336 ps->dpm0_pg_nb_ps_hi = 0x2; 2337 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2338 ps->dpmx_nb_ps_hi = 0x2; 2339 } 2340 } 2341} 2342 2343static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2344 u32 index, bool enable) 2345{ 2346 struct kv_power_info *pi = kv_get_pi(adev); 2347 2348 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2349} 2350 2351static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2352{ 2353 struct kv_power_info *pi = kv_get_pi(adev); 2354 u32 sclk_in_sr = 10000; /* ??? */ 2355 u32 i; 2356 2357 if (pi->lowest_valid > pi->highest_valid) 2358 return -EINVAL; 2359 2360 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2361 pi->graphics_level[i].DeepSleepDivId = 2362 kv_get_sleep_divider_id_from_clock(adev, 2363 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2364 sclk_in_sr); 2365 } 2366 return 0; 2367} 2368 2369static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2370{ 2371 struct kv_power_info *pi = kv_get_pi(adev); 2372 u32 i; 2373 bool force_high; 2374 struct amdgpu_clock_and_voltage_limits *max_limits = 2375 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2376 u32 mclk = max_limits->mclk; 2377 2378 if (pi->lowest_valid > pi->highest_valid) 2379 return -EINVAL; 2380 2381 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2382 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2383 pi->graphics_level[i].GnbSlow = 1; 2384 pi->graphics_level[i].ForceNbPs1 = 0; 2385 pi->graphics_level[i].UpH = 0; 2386 } 2387 2388 if (!pi->sys_info.nb_dpm_enable) 2389 return 0; 2390 2391 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2392 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2393 2394 if (force_high) { 2395 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2396 pi->graphics_level[i].GnbSlow = 0; 2397 } else { 2398 if (pi->battery_state) 2399 pi->graphics_level[0].ForceNbPs1 = 1; 2400 2401 pi->graphics_level[1].GnbSlow = 0; 2402 pi->graphics_level[2].GnbSlow = 0; 2403 pi->graphics_level[3].GnbSlow = 0; 2404 pi->graphics_level[4].GnbSlow = 0; 2405 } 2406 } else { 2407 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2408 pi->graphics_level[i].GnbSlow = 1; 2409 pi->graphics_level[i].ForceNbPs1 = 0; 2410 pi->graphics_level[i].UpH = 0; 2411 } 2412 2413 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2414 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2415 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2416 if (pi->lowest_valid != pi->highest_valid) 2417 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2418 } 2419 } 2420 return 0; 2421} 2422 2423static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2424{ 2425 struct kv_power_info *pi = kv_get_pi(adev); 2426 u32 i; 2427 2428 if (pi->lowest_valid > pi->highest_valid) 2429 return -EINVAL; 2430 2431 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2432 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2433 2434 return 0; 2435} 2436 2437static void kv_init_graphics_levels(struct amdgpu_device *adev) 2438{ 2439 struct kv_power_info *pi = kv_get_pi(adev); 2440 u32 i; 2441 struct amdgpu_clock_voltage_dependency_table *table = 2442 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2443 2444 if (table && table->count) { 2445 u32 vid_2bit; 2446 2447 pi->graphics_dpm_level_count = 0; 2448 for (i = 0; i < table->count; i++) { 2449 if (pi->high_voltage_t && 2450 (pi->high_voltage_t < 2451 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2452 break; 2453 2454 kv_set_divider_value(adev, i, table->entries[i].clk); 2455 vid_2bit = kv_convert_vid7_to_vid2(adev, 2456 &pi->sys_info.vid_mapping_table, 2457 table->entries[i].v); 2458 kv_set_vid(adev, i, vid_2bit); 2459 kv_set_at(adev, i, pi->at[i]); 2460 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2461 pi->graphics_dpm_level_count++; 2462 } 2463 } else { 2464 struct sumo_sclk_voltage_mapping_table *table = 2465 &pi->sys_info.sclk_voltage_mapping_table; 2466 2467 pi->graphics_dpm_level_count = 0; 2468 for (i = 0; i < table->num_max_dpm_entries; i++) { 2469 if (pi->high_voltage_t && 2470 pi->high_voltage_t < 2471 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2472 break; 2473 2474 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2475 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2476 kv_set_at(adev, i, pi->at[i]); 2477 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2478 pi->graphics_dpm_level_count++; 2479 } 2480 } 2481 2482 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2483 kv_dpm_power_level_enable(adev, i, false); 2484} 2485 2486static void kv_enable_new_levels(struct amdgpu_device *adev) 2487{ 2488 struct kv_power_info *pi = kv_get_pi(adev); 2489 u32 i; 2490 2491 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2492 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2493 kv_dpm_power_level_enable(adev, i, true); 2494 } 2495} 2496 2497static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2498{ 2499 u32 new_mask = (1 << level); 2500 2501 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2502 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2503 new_mask); 2504} 2505 2506static int kv_set_enabled_levels(struct amdgpu_device *adev) 2507{ 2508 struct kv_power_info *pi = kv_get_pi(adev); 2509 u32 i, new_mask = 0; 2510 2511 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2512 new_mask |= (1 << i); 2513 2514 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2515 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2516 new_mask); 2517} 2518 2519static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2520 struct amdgpu_ps *new_rps) 2521{ 2522 struct kv_ps *new_ps = kv_get_ps(new_rps); 2523 struct kv_power_info *pi = kv_get_pi(adev); 2524 u32 nbdpmconfig1; 2525 2526 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2527 return; 2528 2529 if (pi->sys_info.nb_dpm_enable) { 2530 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2531 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2532 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2533 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2534 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2535 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2536 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2537 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2538 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2539 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2540 } 2541} 2542 2543static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2544 int min_temp, int max_temp) 2545{ 2546 int low_temp = 0 * 1000; 2547 int high_temp = 255 * 1000; 2548 u32 tmp; 2549 2550 if (low_temp < min_temp) 2551 low_temp = min_temp; 2552 if (high_temp > max_temp) 2553 high_temp = max_temp; 2554 if (high_temp < low_temp) { 2555 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2556 return -EINVAL; 2557 } 2558 2559 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2560 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2561 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2562 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2563 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2564 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2565 2566 adev->pm.dpm.thermal.min_temp = low_temp; 2567 adev->pm.dpm.thermal.max_temp = high_temp; 2568 2569 return 0; 2570} 2571 2572union igp_info { 2573 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2574 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2575 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2576 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2577 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2578 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2579}; 2580 2581static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2582{ 2583 struct kv_power_info *pi = kv_get_pi(adev); 2584 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2585 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2586 union igp_info *igp_info; 2587 u8 frev, crev; 2588 u16 data_offset; 2589 int i; 2590 2591 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2592 &frev, &crev, &data_offset)) { 2593 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2594 data_offset); 2595 2596 if (crev != 8) { 2597 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2598 return -EINVAL; 2599 } 2600 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2601 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2602 pi->sys_info.bootup_nb_voltage_index = 2603 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2604 if (igp_info->info_8.ucHtcTmpLmt == 0) 2605 pi->sys_info.htc_tmp_lmt = 203; 2606 else 2607 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2608 if (igp_info->info_8.ucHtcHystLmt == 0) 2609 pi->sys_info.htc_hyst_lmt = 5; 2610 else 2611 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2612 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2613 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2614 } 2615 2616 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2617 pi->sys_info.nb_dpm_enable = true; 2618 else 2619 pi->sys_info.nb_dpm_enable = false; 2620 2621 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2622 pi->sys_info.nbp_memory_clock[i] = 2623 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2624 pi->sys_info.nbp_n_clock[i] = 2625 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2626 } 2627 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2628 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2629 pi->caps_enable_dfs_bypass = true; 2630 2631 sumo_construct_sclk_voltage_mapping_table(adev, 2632 &pi->sys_info.sclk_voltage_mapping_table, 2633 igp_info->info_8.sAvail_SCLK); 2634 2635 sumo_construct_vid_mapping_table(adev, 2636 &pi->sys_info.vid_mapping_table, 2637 igp_info->info_8.sAvail_SCLK); 2638 2639 kv_construct_max_power_limits_table(adev, 2640 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2641 } 2642 return 0; 2643} 2644 2645union power_info { 2646 struct _ATOM_POWERPLAY_INFO info; 2647 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2648 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2649 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2650 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2651 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2652}; 2653 2654union pplib_clock_info { 2655 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2656 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2657 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2658 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2659}; 2660 2661union pplib_power_state { 2662 struct _ATOM_PPLIB_STATE v1; 2663 struct _ATOM_PPLIB_STATE_V2 v2; 2664}; 2665 2666static void kv_patch_boot_state(struct amdgpu_device *adev, 2667 struct kv_ps *ps) 2668{ 2669 struct kv_power_info *pi = kv_get_pi(adev); 2670 2671 ps->num_levels = 1; 2672 ps->levels[0] = pi->boot_pl; 2673} 2674 2675static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2676 struct amdgpu_ps *rps, 2677 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2678 u8 table_rev) 2679{ 2680 struct kv_ps *ps = kv_get_ps(rps); 2681 2682 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2683 rps->class = le16_to_cpu(non_clock_info->usClassification); 2684 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2685 2686 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2687 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2688 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2689 } else { 2690 rps->vclk = 0; 2691 rps->dclk = 0; 2692 } 2693 2694 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2695 adev->pm.dpm.boot_ps = rps; 2696 kv_patch_boot_state(adev, ps); 2697 } 2698 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2699 adev->pm.dpm.uvd_ps = rps; 2700} 2701 2702static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2703 struct amdgpu_ps *rps, int index, 2704 union pplib_clock_info *clock_info) 2705{ 2706 struct kv_power_info *pi = kv_get_pi(adev); 2707 struct kv_ps *ps = kv_get_ps(rps); 2708 struct kv_pl *pl = &ps->levels[index]; 2709 u32 sclk; 2710 2711 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2712 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2713 pl->sclk = sclk; 2714 pl->vddc_index = clock_info->sumo.vddcIndex; 2715 2716 ps->num_levels = index + 1; 2717 2718 if (pi->caps_sclk_ds) { 2719 pl->ds_divider_index = 5; 2720 pl->ss_divider_index = 5; 2721 } 2722} 2723 2724static int kv_parse_power_table(struct amdgpu_device *adev) 2725{ 2726 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2727 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2728 union pplib_power_state *power_state; 2729 int i, j, k, non_clock_array_index, clock_array_index; 2730 union pplib_clock_info *clock_info; 2731 struct _StateArray *state_array; 2732 struct _ClockInfoArray *clock_info_array; 2733 struct _NonClockInfoArray *non_clock_info_array; 2734 union power_info *power_info; 2735 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2736 u16 data_offset; 2737 u8 frev, crev; 2738 u8 *power_state_offset; 2739 struct kv_ps *ps; 2740 2741 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2742 &frev, &crev, &data_offset)) 2743 return -EINVAL; 2744 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2745 2746 amdgpu_add_thermal_controller(adev); 2747 2748 state_array = (struct _StateArray *) 2749 (mode_info->atom_context->bios + data_offset + 2750 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2751 clock_info_array = (struct _ClockInfoArray *) 2752 (mode_info->atom_context->bios + data_offset + 2753 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2754 non_clock_info_array = (struct _NonClockInfoArray *) 2755 (mode_info->atom_context->bios + data_offset + 2756 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2757 2758 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 2759 state_array->ucNumEntries, GFP_KERNEL); 2760 if (!adev->pm.dpm.ps) 2761 return -ENOMEM; 2762 power_state_offset = (u8 *)state_array->states; 2763 for (i = 0; i < state_array->ucNumEntries; i++) { 2764 u8 *idx; 2765 power_state = (union pplib_power_state *)power_state_offset; 2766 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2767 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2768 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2769 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2770 if (ps == NULL) { 2771 kfree(adev->pm.dpm.ps); 2772 return -ENOMEM; 2773 } 2774 adev->pm.dpm.ps[i].ps_priv = ps; 2775 k = 0; 2776 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2777 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2778 clock_array_index = idx[j]; 2779 if (clock_array_index >= clock_info_array->ucNumEntries) 2780 continue; 2781 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2782 break; 2783 clock_info = (union pplib_clock_info *) 2784 ((u8 *)&clock_info_array->clockInfo[0] + 2785 (clock_array_index * clock_info_array->ucEntrySize)); 2786 kv_parse_pplib_clock_info(adev, 2787 &adev->pm.dpm.ps[i], k, 2788 clock_info); 2789 k++; 2790 } 2791 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2792 non_clock_info, 2793 non_clock_info_array->ucEntrySize); 2794 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2795 } 2796 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2797 2798 /* fill in the vce power states */ 2799 for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { 2800 u32 sclk; 2801 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2802 clock_info = (union pplib_clock_info *) 2803 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2804 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2805 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2806 adev->pm.dpm.vce_states[i].sclk = sclk; 2807 adev->pm.dpm.vce_states[i].mclk = 0; 2808 } 2809 2810 return 0; 2811} 2812 2813static int kv_dpm_init(struct amdgpu_device *adev) 2814{ 2815 struct kv_power_info *pi; 2816 int ret, i; 2817 2818 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2819 if (pi == NULL) 2820 return -ENOMEM; 2821 adev->pm.dpm.priv = pi; 2822 2823 ret = amdgpu_get_platform_caps(adev); 2824 if (ret) 2825 return ret; 2826 2827 ret = amdgpu_parse_extended_power_table(adev); 2828 if (ret) 2829 return ret; 2830 2831 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2832 pi->at[i] = TRINITY_AT_DFLT; 2833 2834 pi->sram_end = SMC_RAM_END; 2835 2836 pi->enable_nb_dpm = true; 2837 2838 pi->caps_power_containment = true; 2839 pi->caps_cac = true; 2840 pi->enable_didt = false; 2841 if (pi->enable_didt) { 2842 pi->caps_sq_ramping = true; 2843 pi->caps_db_ramping = true; 2844 pi->caps_td_ramping = true; 2845 pi->caps_tcp_ramping = true; 2846 } 2847 2848 if (amdgpu_sclk_deep_sleep_en) 2849 pi->caps_sclk_ds = true; 2850 else 2851 pi->caps_sclk_ds = false; 2852 2853 pi->enable_auto_thermal_throttling = true; 2854 pi->disable_nb_ps3_in_battery = false; 2855 if (amdgpu_bapm == 0) 2856 pi->bapm_enable = false; 2857 else 2858 pi->bapm_enable = true; 2859 pi->voltage_drop_t = 0; 2860 pi->caps_sclk_throttle_low_notification = false; 2861 pi->caps_fps = false; /* true? */ 2862 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2863 pi->caps_uvd_dpm = true; 2864 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2865 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2866 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2867 pi->caps_stable_p_state = false; 2868 2869 ret = kv_parse_sys_info_table(adev); 2870 if (ret) 2871 return ret; 2872 2873 kv_patch_voltage_values(adev); 2874 kv_construct_boot_state(adev); 2875 2876 ret = kv_parse_power_table(adev); 2877 if (ret) 2878 return ret; 2879 2880 pi->enable_dpm = true; 2881 2882 return 0; 2883} 2884 2885static void 2886kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 2887 struct seq_file *m) 2888{ 2889 struct kv_power_info *pi = kv_get_pi(adev); 2890 u32 current_index = 2891 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2892 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2893 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2894 u32 sclk, tmp; 2895 u16 vddc; 2896 2897 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2898 seq_printf(m, "invalid dpm profile %d\n", current_index); 2899 } else { 2900 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2901 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2902 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2903 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2904 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2905 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2906 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2907 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2908 current_index, sclk, vddc); 2909 } 2910} 2911 2912static void 2913kv_dpm_print_power_state(struct amdgpu_device *adev, 2914 struct amdgpu_ps *rps) 2915{ 2916 int i; 2917 struct kv_ps *ps = kv_get_ps(rps); 2918 2919 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2920 amdgpu_dpm_print_cap_info(rps->caps); 2921 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2922 for (i = 0; i < ps->num_levels; i++) { 2923 struct kv_pl *pl = &ps->levels[i]; 2924 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2925 i, pl->sclk, 2926 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2927 } 2928 amdgpu_dpm_print_ps_status(adev, rps); 2929} 2930 2931static void kv_dpm_fini(struct amdgpu_device *adev) 2932{ 2933 int i; 2934 2935 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2936 kfree(adev->pm.dpm.ps[i].ps_priv); 2937 } 2938 kfree(adev->pm.dpm.ps); 2939 kfree(adev->pm.dpm.priv); 2940 amdgpu_free_extended_power_table(adev); 2941} 2942 2943static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev) 2944{ 2945 2946} 2947 2948static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) 2949{ 2950 struct kv_power_info *pi = kv_get_pi(adev); 2951 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2952 2953 if (low) 2954 return requested_state->levels[0].sclk; 2955 else 2956 return requested_state->levels[requested_state->num_levels - 1].sclk; 2957} 2958 2959static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low) 2960{ 2961 struct kv_power_info *pi = kv_get_pi(adev); 2962 2963 return pi->sys_info.bootup_uma_clk; 2964} 2965 2966/* get temperature in millidegrees */ 2967static int kv_dpm_get_temp(struct amdgpu_device *adev) 2968{ 2969 u32 temp; 2970 int actual_temp = 0; 2971 2972 temp = RREG32_SMC(0xC0300E0C); 2973 2974 if (temp) 2975 actual_temp = (temp / 8) - 49; 2976 else 2977 actual_temp = 0; 2978 2979 actual_temp = actual_temp * 1000; 2980 2981 return actual_temp; 2982} 2983 2984static int kv_dpm_early_init(void *handle) 2985{ 2986 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2987 2988 kv_dpm_set_dpm_funcs(adev); 2989 kv_dpm_set_irq_funcs(adev); 2990 2991 return 0; 2992} 2993 2994static int kv_dpm_late_init(void *handle) 2995{ 2996 /* powerdown unused blocks for now */ 2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2998 int ret; 2999 3000 if (!amdgpu_dpm) 3001 return 0; 3002 3003 /* init the sysfs and debugfs files late */ 3004 ret = amdgpu_pm_sysfs_init(adev); 3005 if (ret) 3006 return ret; 3007 3008 kv_dpm_powergate_acp(adev, true); 3009 kv_dpm_powergate_samu(adev, true); 3010 kv_dpm_powergate_vce(adev, true); 3011 kv_dpm_powergate_uvd(adev, true); 3012 3013 return 0; 3014} 3015 3016static int kv_dpm_sw_init(void *handle) 3017{ 3018 int ret; 3019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3020 3021 ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); 3022 if (ret) 3023 return ret; 3024 3025 ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); 3026 if (ret) 3027 return ret; 3028 3029 /* default to balanced state */ 3030 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 3031 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 3032 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 3033 adev->pm.default_sclk = adev->clock.default_sclk; 3034 adev->pm.default_mclk = adev->clock.default_mclk; 3035 adev->pm.current_sclk = adev->clock.default_sclk; 3036 adev->pm.current_mclk = adev->clock.default_mclk; 3037 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3038 3039 if (amdgpu_dpm == 0) 3040 return 0; 3041 3042 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3043 mutex_lock(&adev->pm.mutex); 3044 ret = kv_dpm_init(adev); 3045 if (ret) 3046 goto dpm_failed; 3047 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3048 if (amdgpu_dpm == 1) 3049 amdgpu_pm_print_power_states(adev); 3050 mutex_unlock(&adev->pm.mutex); 3051 DRM_INFO("amdgpu: dpm initialized\n"); 3052 3053 return 0; 3054 3055dpm_failed: 3056 kv_dpm_fini(adev); 3057 mutex_unlock(&adev->pm.mutex); 3058 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3059 return ret; 3060} 3061 3062static int kv_dpm_sw_fini(void *handle) 3063{ 3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3065 3066 flush_work(&adev->pm.dpm.thermal.work); 3067 3068 mutex_lock(&adev->pm.mutex); 3069 amdgpu_pm_sysfs_fini(adev); 3070 kv_dpm_fini(adev); 3071 mutex_unlock(&adev->pm.mutex); 3072 3073 return 0; 3074} 3075 3076static int kv_dpm_hw_init(void *handle) 3077{ 3078 int ret; 3079 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3080 3081 mutex_lock(&adev->pm.mutex); 3082 kv_dpm_setup_asic(adev); 3083 ret = kv_dpm_enable(adev); 3084 if (ret) 3085 adev->pm.dpm_enabled = false; 3086 else 3087 adev->pm.dpm_enabled = true; 3088 mutex_unlock(&adev->pm.mutex); 3089 3090 return ret; 3091} 3092 3093static int kv_dpm_hw_fini(void *handle) 3094{ 3095 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3096 3097 if (adev->pm.dpm_enabled) { 3098 mutex_lock(&adev->pm.mutex); 3099 kv_dpm_disable(adev); 3100 mutex_unlock(&adev->pm.mutex); 3101 } 3102 3103 return 0; 3104} 3105 3106static int kv_dpm_suspend(void *handle) 3107{ 3108 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3109 3110 if (adev->pm.dpm_enabled) { 3111 mutex_lock(&adev->pm.mutex); 3112 /* disable dpm */ 3113 kv_dpm_disable(adev); 3114 /* reset the power state */ 3115 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3116 mutex_unlock(&adev->pm.mutex); 3117 } 3118 return 0; 3119} 3120 3121static int kv_dpm_resume(void *handle) 3122{ 3123 int ret; 3124 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3125 3126 if (adev->pm.dpm_enabled) { 3127 /* asic init will reset to the boot state */ 3128 mutex_lock(&adev->pm.mutex); 3129 kv_dpm_setup_asic(adev); 3130 ret = kv_dpm_enable(adev); 3131 if (ret) 3132 adev->pm.dpm_enabled = false; 3133 else 3134 adev->pm.dpm_enabled = true; 3135 mutex_unlock(&adev->pm.mutex); 3136 if (adev->pm.dpm_enabled) 3137 amdgpu_pm_compute_clocks(adev); 3138 } 3139 return 0; 3140} 3141 3142static bool kv_dpm_is_idle(void *handle) 3143{ 3144 return true; 3145} 3146 3147static int kv_dpm_wait_for_idle(void *handle) 3148{ 3149 return 0; 3150} 3151 3152 3153static int kv_dpm_soft_reset(void *handle) 3154{ 3155 return 0; 3156} 3157 3158static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3159 struct amdgpu_irq_src *src, 3160 unsigned type, 3161 enum amdgpu_interrupt_state state) 3162{ 3163 u32 cg_thermal_int; 3164 3165 switch (type) { 3166 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3167 switch (state) { 3168 case AMDGPU_IRQ_STATE_DISABLE: 3169 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3170 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3171 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3172 break; 3173 case AMDGPU_IRQ_STATE_ENABLE: 3174 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3175 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3176 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3177 break; 3178 default: 3179 break; 3180 } 3181 break; 3182 3183 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3184 switch (state) { 3185 case AMDGPU_IRQ_STATE_DISABLE: 3186 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3187 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3188 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3189 break; 3190 case AMDGPU_IRQ_STATE_ENABLE: 3191 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3192 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3193 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3194 break; 3195 default: 3196 break; 3197 } 3198 break; 3199 3200 default: 3201 break; 3202 } 3203 return 0; 3204} 3205 3206static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3207 struct amdgpu_irq_src *source, 3208 struct amdgpu_iv_entry *entry) 3209{ 3210 bool queue_thermal = false; 3211 3212 if (entry == NULL) 3213 return -EINVAL; 3214 3215 switch (entry->src_id) { 3216 case 230: /* thermal low to high */ 3217 DRM_DEBUG("IH: thermal low to high\n"); 3218 adev->pm.dpm.thermal.high_to_low = false; 3219 queue_thermal = true; 3220 break; 3221 case 231: /* thermal high to low */ 3222 DRM_DEBUG("IH: thermal high to low\n"); 3223 adev->pm.dpm.thermal.high_to_low = true; 3224 queue_thermal = true; 3225 break; 3226 default: 3227 break; 3228 } 3229 3230 if (queue_thermal) 3231 schedule_work(&adev->pm.dpm.thermal.work); 3232 3233 return 0; 3234} 3235 3236static int kv_dpm_set_clockgating_state(void *handle, 3237 enum amd_clockgating_state state) 3238{ 3239 return 0; 3240} 3241 3242static int kv_dpm_set_powergating_state(void *handle, 3243 enum amd_powergating_state state) 3244{ 3245 return 0; 3246} 3247 3248const struct amd_ip_funcs kv_dpm_ip_funcs = { 3249 .name = "kv_dpm", 3250 .early_init = kv_dpm_early_init, 3251 .late_init = kv_dpm_late_init, 3252 .sw_init = kv_dpm_sw_init, 3253 .sw_fini = kv_dpm_sw_fini, 3254 .hw_init = kv_dpm_hw_init, 3255 .hw_fini = kv_dpm_hw_fini, 3256 .suspend = kv_dpm_suspend, 3257 .resume = kv_dpm_resume, 3258 .is_idle = kv_dpm_is_idle, 3259 .wait_for_idle = kv_dpm_wait_for_idle, 3260 .soft_reset = kv_dpm_soft_reset, 3261 .set_clockgating_state = kv_dpm_set_clockgating_state, 3262 .set_powergating_state = kv_dpm_set_powergating_state, 3263}; 3264 3265static const struct amdgpu_dpm_funcs kv_dpm_funcs = { 3266 .get_temperature = &kv_dpm_get_temp, 3267 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3268 .set_power_state = &kv_dpm_set_power_state, 3269 .post_set_power_state = &kv_dpm_post_set_power_state, 3270 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3271 .get_sclk = &kv_dpm_get_sclk, 3272 .get_mclk = &kv_dpm_get_mclk, 3273 .print_power_state = &kv_dpm_print_power_state, 3274 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3275 .force_performance_level = &kv_dpm_force_performance_level, 3276 .powergate_uvd = &kv_dpm_powergate_uvd, 3277 .enable_bapm = &kv_dpm_enable_bapm, 3278}; 3279 3280static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) 3281{ 3282 if (adev->pm.funcs == NULL) 3283 adev->pm.funcs = &kv_dpm_funcs; 3284} 3285 3286static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3287 .set = kv_dpm_set_interrupt_state, 3288 .process = kv_dpm_process_interrupt, 3289}; 3290 3291static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3292{ 3293 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3294 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3295}