Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.17-rc1 3360 lines 94 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <drm/drmP.h> 25#include "amdgpu.h" 26#include "amdgpu_pm.h" 27#include "cikd.h" 28#include "atom.h" 29#include "amdgpu_atombios.h" 30#include "amdgpu_dpm.h" 31#include "kv_dpm.h" 32#include "gfx_v7_0.h" 33#include <linux/seq_file.h> 34 35#include "smu/smu_7_0_0_d.h" 36#include "smu/smu_7_0_0_sh_mask.h" 37 38#include "gca/gfx_7_2_d.h" 39#include "gca/gfx_7_2_sh_mask.h" 40 41#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42#define KV_MINIMUM_ENGINE_CLOCK 800 43#define SMC_RAM_END 0x40000 44 45static const struct amd_pm_funcs kv_dpm_funcs; 46 47static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 48static int kv_enable_nb_dpm(struct amdgpu_device *adev, 49 bool enable); 50static void kv_init_graphics_levels(struct amdgpu_device *adev); 51static int kv_calculate_ds_divider(struct amdgpu_device *adev); 52static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 53static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 54static void kv_enable_new_levels(struct amdgpu_device *adev); 55static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 56 struct amdgpu_ps *new_rps); 57static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 58static int kv_set_enabled_levels(struct amdgpu_device *adev); 59static int kv_force_dpm_highest(struct amdgpu_device *adev); 60static int kv_force_dpm_lowest(struct amdgpu_device *adev); 61static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 62 struct amdgpu_ps *new_rps, 63 struct amdgpu_ps *old_rps); 64static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 65 int min_temp, int max_temp); 66static int kv_init_fps_limits(struct amdgpu_device *adev); 67 68static void kv_dpm_powergate_uvd(void *handle, bool gate); 69static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 70static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 71static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 72 73 74static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 75 struct sumo_vid_mapping_table *vid_mapping_table, 76 u32 vid_2bit) 77{ 78 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 79 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 80 u32 i; 81 82 if (vddc_sclk_table && vddc_sclk_table->count) { 83 if (vid_2bit < vddc_sclk_table->count) 84 return vddc_sclk_table->entries[vid_2bit].v; 85 else 86 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 87 } else { 88 for (i = 0; i < vid_mapping_table->num_entries; i++) { 89 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 90 return vid_mapping_table->entries[i].vid_7bit; 91 } 92 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 93 } 94} 95 96static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 97 struct sumo_vid_mapping_table *vid_mapping_table, 98 u32 vid_7bit) 99{ 100 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 101 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 102 u32 i; 103 104 if (vddc_sclk_table && vddc_sclk_table->count) { 105 for (i = 0; i < vddc_sclk_table->count; i++) { 106 if (vddc_sclk_table->entries[i].v == vid_7bit) 107 return i; 108 } 109 return vddc_sclk_table->count - 1; 110 } else { 111 for (i = 0; i < vid_mapping_table->num_entries; i++) { 112 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 113 return vid_mapping_table->entries[i].vid_2bit; 114 } 115 116 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 117 } 118} 119 120static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 121{ 122/* This bit selects who handles display phy powergating. 123 * Clear the bit to let atom handle it. 124 * Set it to let the driver handle it. 125 * For now we just let atom handle it. 126 */ 127#if 0 128 u32 v = RREG32(mmDOUT_SCRATCH3); 129 130 if (enable) 131 v |= 0x4; 132 else 133 v &= 0xFFFFFFFB; 134 135 WREG32(mmDOUT_SCRATCH3, v); 136#endif 137} 138 139static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 140 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 141 ATOM_AVAILABLE_SCLK_LIST *table) 142{ 143 u32 i; 144 u32 n = 0; 145 u32 prev_sclk = 0; 146 147 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 148 if (table[i].ulSupportedSCLK > prev_sclk) { 149 sclk_voltage_mapping_table->entries[n].sclk_frequency = 150 table[i].ulSupportedSCLK; 151 sclk_voltage_mapping_table->entries[n].vid_2bit = 152 table[i].usVoltageIndex; 153 prev_sclk = table[i].ulSupportedSCLK; 154 n++; 155 } 156 } 157 158 sclk_voltage_mapping_table->num_max_dpm_entries = n; 159} 160 161static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 162 struct sumo_vid_mapping_table *vid_mapping_table, 163 ATOM_AVAILABLE_SCLK_LIST *table) 164{ 165 u32 i, j; 166 167 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 168 if (table[i].ulSupportedSCLK != 0) { 169 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 170 table[i].usVoltageID; 171 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 172 table[i].usVoltageIndex; 173 } 174 } 175 176 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 177 if (vid_mapping_table->entries[i].vid_7bit == 0) { 178 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 179 if (vid_mapping_table->entries[j].vid_7bit != 0) { 180 vid_mapping_table->entries[i] = 181 vid_mapping_table->entries[j]; 182 vid_mapping_table->entries[j].vid_7bit = 0; 183 break; 184 } 185 } 186 187 if (j == SUMO_MAX_NUMBER_VOLTAGES) 188 break; 189 } 190 } 191 192 vid_mapping_table->num_entries = i; 193} 194 195#if 0 196static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 197{ 198 { 0, 4, 1 }, 199 { 1, 4, 1 }, 200 { 2, 5, 1 }, 201 { 3, 4, 2 }, 202 { 4, 1, 1 }, 203 { 5, 5, 2 }, 204 { 6, 6, 1 }, 205 { 7, 9, 2 }, 206 { 0xffffffff } 207}; 208 209static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 210{ 211 { 0, 4, 1 }, 212 { 0xffffffff } 213}; 214 215static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 216{ 217 { 0, 4, 1 }, 218 { 0xffffffff } 219}; 220 221static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 222{ 223 { 0, 4, 1 }, 224 { 0xffffffff } 225}; 226 227static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 228{ 229 { 0, 4, 1 }, 230 { 0xffffffff } 231}; 232 233static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 234{ 235 { 0, 4, 1 }, 236 { 1, 4, 1 }, 237 { 2, 5, 1 }, 238 { 3, 4, 1 }, 239 { 4, 1, 1 }, 240 { 5, 5, 1 }, 241 { 6, 6, 1 }, 242 { 7, 9, 1 }, 243 { 8, 4, 1 }, 244 { 9, 2, 1 }, 245 { 10, 3, 1 }, 246 { 11, 6, 1 }, 247 { 12, 8, 2 }, 248 { 13, 1, 1 }, 249 { 14, 2, 1 }, 250 { 15, 3, 1 }, 251 { 16, 1, 1 }, 252 { 17, 4, 1 }, 253 { 18, 3, 1 }, 254 { 19, 1, 1 }, 255 { 20, 8, 1 }, 256 { 21, 5, 1 }, 257 { 22, 1, 1 }, 258 { 23, 1, 1 }, 259 { 24, 4, 1 }, 260 { 27, 6, 1 }, 261 { 28, 1, 1 }, 262 { 0xffffffff } 263}; 264 265static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 266{ 267 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 268}; 269 270static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 271{ 272 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 273}; 274 275static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 276{ 277 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 278}; 279 280static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 281{ 282 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 283}; 284 285static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 286{ 287 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 288}; 289 290static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 291{ 292 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 293}; 294#endif 295 296static const struct kv_pt_config_reg didt_config_kv[] = 297{ 298 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 299 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 300 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 301 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 302 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 308 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 309 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 310 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 311 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 312 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 313 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 314 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 315 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 316 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 317 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 318 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 319 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 320 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 326 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 327 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 328 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 329 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 330 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 331 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 332 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 333 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 334 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 335 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 336 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 337 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 338 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 344 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 345 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 346 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 347 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 348 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 349 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 350 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 351 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 352 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 353 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 354 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 355 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 356 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 358 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 359 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 360 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 361 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 362 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 363 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 364 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 365 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 366 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 367 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 368 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 369 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 370 { 0xFFFFFFFF } 371}; 372 373static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 374{ 375 struct kv_ps *ps = rps->ps_priv; 376 377 return ps; 378} 379 380static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 381{ 382 struct kv_power_info *pi = adev->pm.dpm.priv; 383 384 return pi; 385} 386 387#if 0 388static void kv_program_local_cac_table(struct amdgpu_device *adev, 389 const struct kv_lcac_config_values *local_cac_table, 390 const struct kv_lcac_config_reg *local_cac_reg) 391{ 392 u32 i, count, data; 393 const struct kv_lcac_config_values *values = local_cac_table; 394 395 while (values->block_id != 0xffffffff) { 396 count = values->signal_id; 397 for (i = 0; i < count; i++) { 398 data = ((values->block_id << local_cac_reg->block_shift) & 399 local_cac_reg->block_mask); 400 data |= ((i << local_cac_reg->signal_shift) & 401 local_cac_reg->signal_mask); 402 data |= ((values->t << local_cac_reg->t_shift) & 403 local_cac_reg->t_mask); 404 data |= ((1 << local_cac_reg->enable_shift) & 405 local_cac_reg->enable_mask); 406 WREG32_SMC(local_cac_reg->cntl, data); 407 } 408 values++; 409 } 410} 411#endif 412 413static int kv_program_pt_config_registers(struct amdgpu_device *adev, 414 const struct kv_pt_config_reg *cac_config_regs) 415{ 416 const struct kv_pt_config_reg *config_regs = cac_config_regs; 417 u32 data; 418 u32 cache = 0; 419 420 if (config_regs == NULL) 421 return -EINVAL; 422 423 while (config_regs->offset != 0xFFFFFFFF) { 424 if (config_regs->type == KV_CONFIGREG_CACHE) { 425 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 426 } else { 427 switch (config_regs->type) { 428 case KV_CONFIGREG_SMC_IND: 429 data = RREG32_SMC(config_regs->offset); 430 break; 431 case KV_CONFIGREG_DIDT_IND: 432 data = RREG32_DIDT(config_regs->offset); 433 break; 434 default: 435 data = RREG32(config_regs->offset); 436 break; 437 } 438 439 data &= ~config_regs->mask; 440 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 441 data |= cache; 442 cache = 0; 443 444 switch (config_regs->type) { 445 case KV_CONFIGREG_SMC_IND: 446 WREG32_SMC(config_regs->offset, data); 447 break; 448 case KV_CONFIGREG_DIDT_IND: 449 WREG32_DIDT(config_regs->offset, data); 450 break; 451 default: 452 WREG32(config_regs->offset, data); 453 break; 454 } 455 } 456 config_regs++; 457 } 458 459 return 0; 460} 461 462static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 463{ 464 struct kv_power_info *pi = kv_get_pi(adev); 465 u32 data; 466 467 if (pi->caps_sq_ramping) { 468 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 469 if (enable) 470 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 471 else 472 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 473 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 474 } 475 476 if (pi->caps_db_ramping) { 477 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 478 if (enable) 479 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 480 else 481 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 482 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 483 } 484 485 if (pi->caps_td_ramping) { 486 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 487 if (enable) 488 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 489 else 490 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 491 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 492 } 493 494 if (pi->caps_tcp_ramping) { 495 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 496 if (enable) 497 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 498 else 499 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 500 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 501 } 502} 503 504static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 505{ 506 struct kv_power_info *pi = kv_get_pi(adev); 507 int ret; 508 509 if (pi->caps_sq_ramping || 510 pi->caps_db_ramping || 511 pi->caps_td_ramping || 512 pi->caps_tcp_ramping) { 513 adev->gfx.rlc.funcs->enter_safe_mode(adev); 514 515 if (enable) { 516 ret = kv_program_pt_config_registers(adev, didt_config_kv); 517 if (ret) { 518 adev->gfx.rlc.funcs->exit_safe_mode(adev); 519 return ret; 520 } 521 } 522 523 kv_do_enable_didt(adev, enable); 524 525 adev->gfx.rlc.funcs->exit_safe_mode(adev); 526 } 527 528 return 0; 529} 530 531#if 0 532static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 533{ 534 struct kv_power_info *pi = kv_get_pi(adev); 535 536 if (pi->caps_cac) { 537 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 538 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 539 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 540 541 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 542 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 543 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 544 545 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 546 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 547 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 548 549 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 550 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 551 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 552 553 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 554 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 555 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 556 557 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 558 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 559 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 560 } 561} 562#endif 563 564static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 565{ 566 struct kv_power_info *pi = kv_get_pi(adev); 567 int ret = 0; 568 569 if (pi->caps_cac) { 570 if (enable) { 571 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 572 if (ret) 573 pi->cac_enabled = false; 574 else 575 pi->cac_enabled = true; 576 } else if (pi->cac_enabled) { 577 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 578 pi->cac_enabled = false; 579 } 580 } 581 582 return ret; 583} 584 585static int kv_process_firmware_header(struct amdgpu_device *adev) 586{ 587 struct kv_power_info *pi = kv_get_pi(adev); 588 u32 tmp; 589 int ret; 590 591 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 592 offsetof(SMU7_Firmware_Header, DpmTable), 593 &tmp, pi->sram_end); 594 595 if (ret == 0) 596 pi->dpm_table_start = tmp; 597 598 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 599 offsetof(SMU7_Firmware_Header, SoftRegisters), 600 &tmp, pi->sram_end); 601 602 if (ret == 0) 603 pi->soft_regs_start = tmp; 604 605 return ret; 606} 607 608static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 609{ 610 struct kv_power_info *pi = kv_get_pi(adev); 611 int ret; 612 613 pi->graphics_voltage_change_enable = 1; 614 615 ret = amdgpu_kv_copy_bytes_to_smc(adev, 616 pi->dpm_table_start + 617 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 618 &pi->graphics_voltage_change_enable, 619 sizeof(u8), pi->sram_end); 620 621 return ret; 622} 623 624static int kv_set_dpm_interval(struct amdgpu_device *adev) 625{ 626 struct kv_power_info *pi = kv_get_pi(adev); 627 int ret; 628 629 pi->graphics_interval = 1; 630 631 ret = amdgpu_kv_copy_bytes_to_smc(adev, 632 pi->dpm_table_start + 633 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 634 &pi->graphics_interval, 635 sizeof(u8), pi->sram_end); 636 637 return ret; 638} 639 640static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 641{ 642 struct kv_power_info *pi = kv_get_pi(adev); 643 int ret; 644 645 ret = amdgpu_kv_copy_bytes_to_smc(adev, 646 pi->dpm_table_start + 647 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 648 &pi->graphics_boot_level, 649 sizeof(u8), pi->sram_end); 650 651 return ret; 652} 653 654static void kv_program_vc(struct amdgpu_device *adev) 655{ 656 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 657} 658 659static void kv_clear_vc(struct amdgpu_device *adev) 660{ 661 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 662} 663 664static int kv_set_divider_value(struct amdgpu_device *adev, 665 u32 index, u32 sclk) 666{ 667 struct kv_power_info *pi = kv_get_pi(adev); 668 struct atom_clock_dividers dividers; 669 int ret; 670 671 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 672 sclk, false, &dividers); 673 if (ret) 674 return ret; 675 676 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 677 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 678 679 return 0; 680} 681 682static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 683 u16 voltage) 684{ 685 return 6200 - (voltage * 25); 686} 687 688static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 689 u32 vid_2bit) 690{ 691 struct kv_power_info *pi = kv_get_pi(adev); 692 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 693 &pi->sys_info.vid_mapping_table, 694 vid_2bit); 695 696 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 697} 698 699 700static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 701{ 702 struct kv_power_info *pi = kv_get_pi(adev); 703 704 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 705 pi->graphics_level[index].MinVddNb = 706 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 707 708 return 0; 709} 710 711static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 712{ 713 struct kv_power_info *pi = kv_get_pi(adev); 714 715 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 716 717 return 0; 718} 719 720static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 721 u32 index, bool enable) 722{ 723 struct kv_power_info *pi = kv_get_pi(adev); 724 725 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 726} 727 728static void kv_start_dpm(struct amdgpu_device *adev) 729{ 730 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 731 732 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 733 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 734 735 amdgpu_kv_smc_dpm_enable(adev, true); 736} 737 738static void kv_stop_dpm(struct amdgpu_device *adev) 739{ 740 amdgpu_kv_smc_dpm_enable(adev, false); 741} 742 743static void kv_start_am(struct amdgpu_device *adev) 744{ 745 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 746 747 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 748 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 749 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 750 751 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 752} 753 754static void kv_reset_am(struct amdgpu_device *adev) 755{ 756 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 757 758 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 759 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 760 761 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 762} 763 764static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 765{ 766 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 767 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 768} 769 770static int kv_force_lowest_valid(struct amdgpu_device *adev) 771{ 772 return kv_force_dpm_lowest(adev); 773} 774 775static int kv_unforce_levels(struct amdgpu_device *adev) 776{ 777 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 778 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 779 else 780 return kv_set_enabled_levels(adev); 781} 782 783static int kv_update_sclk_t(struct amdgpu_device *adev) 784{ 785 struct kv_power_info *pi = kv_get_pi(adev); 786 u32 low_sclk_interrupt_t = 0; 787 int ret = 0; 788 789 if (pi->caps_sclk_throttle_low_notification) { 790 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 791 792 ret = amdgpu_kv_copy_bytes_to_smc(adev, 793 pi->dpm_table_start + 794 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 795 (u8 *)&low_sclk_interrupt_t, 796 sizeof(u32), pi->sram_end); 797 } 798 return ret; 799} 800 801static int kv_program_bootup_state(struct amdgpu_device *adev) 802{ 803 struct kv_power_info *pi = kv_get_pi(adev); 804 u32 i; 805 struct amdgpu_clock_voltage_dependency_table *table = 806 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 807 808 if (table && table->count) { 809 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 810 if (table->entries[i].clk == pi->boot_pl.sclk) 811 break; 812 } 813 814 pi->graphics_boot_level = (u8)i; 815 kv_dpm_power_level_enable(adev, i, true); 816 } else { 817 struct sumo_sclk_voltage_mapping_table *table = 818 &pi->sys_info.sclk_voltage_mapping_table; 819 820 if (table->num_max_dpm_entries == 0) 821 return -EINVAL; 822 823 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 824 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 825 break; 826 } 827 828 pi->graphics_boot_level = (u8)i; 829 kv_dpm_power_level_enable(adev, i, true); 830 } 831 return 0; 832} 833 834static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 835{ 836 struct kv_power_info *pi = kv_get_pi(adev); 837 int ret; 838 839 pi->graphics_therm_throttle_enable = 1; 840 841 ret = amdgpu_kv_copy_bytes_to_smc(adev, 842 pi->dpm_table_start + 843 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 844 &pi->graphics_therm_throttle_enable, 845 sizeof(u8), pi->sram_end); 846 847 return ret; 848} 849 850static int kv_upload_dpm_settings(struct amdgpu_device *adev) 851{ 852 struct kv_power_info *pi = kv_get_pi(adev); 853 int ret; 854 855 ret = amdgpu_kv_copy_bytes_to_smc(adev, 856 pi->dpm_table_start + 857 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 858 (u8 *)&pi->graphics_level, 859 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 860 pi->sram_end); 861 862 if (ret) 863 return ret; 864 865 ret = amdgpu_kv_copy_bytes_to_smc(adev, 866 pi->dpm_table_start + 867 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 868 &pi->graphics_dpm_level_count, 869 sizeof(u8), pi->sram_end); 870 871 return ret; 872} 873 874static u32 kv_get_clock_difference(u32 a, u32 b) 875{ 876 return (a >= b) ? a - b : b - a; 877} 878 879static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 880{ 881 struct kv_power_info *pi = kv_get_pi(adev); 882 u32 value; 883 884 if (pi->caps_enable_dfs_bypass) { 885 if (kv_get_clock_difference(clk, 40000) < 200) 886 value = 3; 887 else if (kv_get_clock_difference(clk, 30000) < 200) 888 value = 2; 889 else if (kv_get_clock_difference(clk, 20000) < 200) 890 value = 7; 891 else if (kv_get_clock_difference(clk, 15000) < 200) 892 value = 6; 893 else if (kv_get_clock_difference(clk, 10000) < 200) 894 value = 8; 895 else 896 value = 0; 897 } else { 898 value = 0; 899 } 900 901 return value; 902} 903 904static int kv_populate_uvd_table(struct amdgpu_device *adev) 905{ 906 struct kv_power_info *pi = kv_get_pi(adev); 907 struct amdgpu_uvd_clock_voltage_dependency_table *table = 908 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 909 struct atom_clock_dividers dividers; 910 int ret; 911 u32 i; 912 913 if (table == NULL || table->count == 0) 914 return 0; 915 916 pi->uvd_level_count = 0; 917 for (i = 0; i < table->count; i++) { 918 if (pi->high_voltage_t && 919 (pi->high_voltage_t < table->entries[i].v)) 920 break; 921 922 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 923 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 924 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 925 926 pi->uvd_level[i].VClkBypassCntl = 927 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 928 pi->uvd_level[i].DClkBypassCntl = 929 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 930 931 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 932 table->entries[i].vclk, false, &dividers); 933 if (ret) 934 return ret; 935 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 936 937 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 938 table->entries[i].dclk, false, &dividers); 939 if (ret) 940 return ret; 941 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 942 943 pi->uvd_level_count++; 944 } 945 946 ret = amdgpu_kv_copy_bytes_to_smc(adev, 947 pi->dpm_table_start + 948 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 949 (u8 *)&pi->uvd_level_count, 950 sizeof(u8), pi->sram_end); 951 if (ret) 952 return ret; 953 954 pi->uvd_interval = 1; 955 956 ret = amdgpu_kv_copy_bytes_to_smc(adev, 957 pi->dpm_table_start + 958 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 959 &pi->uvd_interval, 960 sizeof(u8), pi->sram_end); 961 if (ret) 962 return ret; 963 964 ret = amdgpu_kv_copy_bytes_to_smc(adev, 965 pi->dpm_table_start + 966 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 967 (u8 *)&pi->uvd_level, 968 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 969 pi->sram_end); 970 971 return ret; 972 973} 974 975static int kv_populate_vce_table(struct amdgpu_device *adev) 976{ 977 struct kv_power_info *pi = kv_get_pi(adev); 978 int ret; 979 u32 i; 980 struct amdgpu_vce_clock_voltage_dependency_table *table = 981 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 982 struct atom_clock_dividers dividers; 983 984 if (table == NULL || table->count == 0) 985 return 0; 986 987 pi->vce_level_count = 0; 988 for (i = 0; i < table->count; i++) { 989 if (pi->high_voltage_t && 990 pi->high_voltage_t < table->entries[i].v) 991 break; 992 993 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 994 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 995 996 pi->vce_level[i].ClkBypassCntl = 997 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 998 999 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1000 table->entries[i].evclk, false, &dividers); 1001 if (ret) 1002 return ret; 1003 pi->vce_level[i].Divider = (u8)dividers.post_div; 1004 1005 pi->vce_level_count++; 1006 } 1007 1008 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1009 pi->dpm_table_start + 1010 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 1011 (u8 *)&pi->vce_level_count, 1012 sizeof(u8), 1013 pi->sram_end); 1014 if (ret) 1015 return ret; 1016 1017 pi->vce_interval = 1; 1018 1019 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1020 pi->dpm_table_start + 1021 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1022 (u8 *)&pi->vce_interval, 1023 sizeof(u8), 1024 pi->sram_end); 1025 if (ret) 1026 return ret; 1027 1028 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1029 pi->dpm_table_start + 1030 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1031 (u8 *)&pi->vce_level, 1032 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1033 pi->sram_end); 1034 1035 return ret; 1036} 1037 1038static int kv_populate_samu_table(struct amdgpu_device *adev) 1039{ 1040 struct kv_power_info *pi = kv_get_pi(adev); 1041 struct amdgpu_clock_voltage_dependency_table *table = 1042 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1043 struct atom_clock_dividers dividers; 1044 int ret; 1045 u32 i; 1046 1047 if (table == NULL || table->count == 0) 1048 return 0; 1049 1050 pi->samu_level_count = 0; 1051 for (i = 0; i < table->count; i++) { 1052 if (pi->high_voltage_t && 1053 pi->high_voltage_t < table->entries[i].v) 1054 break; 1055 1056 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1057 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1058 1059 pi->samu_level[i].ClkBypassCntl = 1060 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1061 1062 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1063 table->entries[i].clk, false, &dividers); 1064 if (ret) 1065 return ret; 1066 pi->samu_level[i].Divider = (u8)dividers.post_div; 1067 1068 pi->samu_level_count++; 1069 } 1070 1071 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1072 pi->dpm_table_start + 1073 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1074 (u8 *)&pi->samu_level_count, 1075 sizeof(u8), 1076 pi->sram_end); 1077 if (ret) 1078 return ret; 1079 1080 pi->samu_interval = 1; 1081 1082 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1083 pi->dpm_table_start + 1084 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1085 (u8 *)&pi->samu_interval, 1086 sizeof(u8), 1087 pi->sram_end); 1088 if (ret) 1089 return ret; 1090 1091 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1092 pi->dpm_table_start + 1093 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1094 (u8 *)&pi->samu_level, 1095 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1096 pi->sram_end); 1097 if (ret) 1098 return ret; 1099 1100 return ret; 1101} 1102 1103 1104static int kv_populate_acp_table(struct amdgpu_device *adev) 1105{ 1106 struct kv_power_info *pi = kv_get_pi(adev); 1107 struct amdgpu_clock_voltage_dependency_table *table = 1108 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1109 struct atom_clock_dividers dividers; 1110 int ret; 1111 u32 i; 1112 1113 if (table == NULL || table->count == 0) 1114 return 0; 1115 1116 pi->acp_level_count = 0; 1117 for (i = 0; i < table->count; i++) { 1118 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1119 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1120 1121 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1122 table->entries[i].clk, false, &dividers); 1123 if (ret) 1124 return ret; 1125 pi->acp_level[i].Divider = (u8)dividers.post_div; 1126 1127 pi->acp_level_count++; 1128 } 1129 1130 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1131 pi->dpm_table_start + 1132 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1133 (u8 *)&pi->acp_level_count, 1134 sizeof(u8), 1135 pi->sram_end); 1136 if (ret) 1137 return ret; 1138 1139 pi->acp_interval = 1; 1140 1141 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1142 pi->dpm_table_start + 1143 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1144 (u8 *)&pi->acp_interval, 1145 sizeof(u8), 1146 pi->sram_end); 1147 if (ret) 1148 return ret; 1149 1150 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1151 pi->dpm_table_start + 1152 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1153 (u8 *)&pi->acp_level, 1154 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1155 pi->sram_end); 1156 if (ret) 1157 return ret; 1158 1159 return ret; 1160} 1161 1162static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1163{ 1164 struct kv_power_info *pi = kv_get_pi(adev); 1165 u32 i; 1166 struct amdgpu_clock_voltage_dependency_table *table = 1167 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1168 1169 if (table && table->count) { 1170 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1171 if (pi->caps_enable_dfs_bypass) { 1172 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1173 pi->graphics_level[i].ClkBypassCntl = 3; 1174 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1175 pi->graphics_level[i].ClkBypassCntl = 2; 1176 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1177 pi->graphics_level[i].ClkBypassCntl = 7; 1178 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1179 pi->graphics_level[i].ClkBypassCntl = 6; 1180 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1181 pi->graphics_level[i].ClkBypassCntl = 8; 1182 else 1183 pi->graphics_level[i].ClkBypassCntl = 0; 1184 } else { 1185 pi->graphics_level[i].ClkBypassCntl = 0; 1186 } 1187 } 1188 } else { 1189 struct sumo_sclk_voltage_mapping_table *table = 1190 &pi->sys_info.sclk_voltage_mapping_table; 1191 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1192 if (pi->caps_enable_dfs_bypass) { 1193 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1194 pi->graphics_level[i].ClkBypassCntl = 3; 1195 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1196 pi->graphics_level[i].ClkBypassCntl = 2; 1197 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1198 pi->graphics_level[i].ClkBypassCntl = 7; 1199 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1200 pi->graphics_level[i].ClkBypassCntl = 6; 1201 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1202 pi->graphics_level[i].ClkBypassCntl = 8; 1203 else 1204 pi->graphics_level[i].ClkBypassCntl = 0; 1205 } else { 1206 pi->graphics_level[i].ClkBypassCntl = 0; 1207 } 1208 } 1209 } 1210} 1211 1212static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1213{ 1214 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1215 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1216} 1217 1218static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1219{ 1220 struct kv_power_info *pi = kv_get_pi(adev); 1221 1222 pi->acp_boot_level = 0xff; 1223} 1224 1225static void kv_update_current_ps(struct amdgpu_device *adev, 1226 struct amdgpu_ps *rps) 1227{ 1228 struct kv_ps *new_ps = kv_get_ps(rps); 1229 struct kv_power_info *pi = kv_get_pi(adev); 1230 1231 pi->current_rps = *rps; 1232 pi->current_ps = *new_ps; 1233 pi->current_rps.ps_priv = &pi->current_ps; 1234 adev->pm.dpm.current_ps = &pi->current_rps; 1235} 1236 1237static void kv_update_requested_ps(struct amdgpu_device *adev, 1238 struct amdgpu_ps *rps) 1239{ 1240 struct kv_ps *new_ps = kv_get_ps(rps); 1241 struct kv_power_info *pi = kv_get_pi(adev); 1242 1243 pi->requested_rps = *rps; 1244 pi->requested_ps = *new_ps; 1245 pi->requested_rps.ps_priv = &pi->requested_ps; 1246 adev->pm.dpm.requested_ps = &pi->requested_rps; 1247} 1248 1249static void kv_dpm_enable_bapm(void *handle, bool enable) 1250{ 1251 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1252 struct kv_power_info *pi = kv_get_pi(adev); 1253 int ret; 1254 1255 if (pi->bapm_enable) { 1256 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1257 if (ret) 1258 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1259 } 1260} 1261 1262static int kv_dpm_enable(struct amdgpu_device *adev) 1263{ 1264 struct kv_power_info *pi = kv_get_pi(adev); 1265 int ret; 1266 1267 ret = kv_process_firmware_header(adev); 1268 if (ret) { 1269 DRM_ERROR("kv_process_firmware_header failed\n"); 1270 return ret; 1271 } 1272 kv_init_fps_limits(adev); 1273 kv_init_graphics_levels(adev); 1274 ret = kv_program_bootup_state(adev); 1275 if (ret) { 1276 DRM_ERROR("kv_program_bootup_state failed\n"); 1277 return ret; 1278 } 1279 kv_calculate_dfs_bypass_settings(adev); 1280 ret = kv_upload_dpm_settings(adev); 1281 if (ret) { 1282 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1283 return ret; 1284 } 1285 ret = kv_populate_uvd_table(adev); 1286 if (ret) { 1287 DRM_ERROR("kv_populate_uvd_table failed\n"); 1288 return ret; 1289 } 1290 ret = kv_populate_vce_table(adev); 1291 if (ret) { 1292 DRM_ERROR("kv_populate_vce_table failed\n"); 1293 return ret; 1294 } 1295 ret = kv_populate_samu_table(adev); 1296 if (ret) { 1297 DRM_ERROR("kv_populate_samu_table failed\n"); 1298 return ret; 1299 } 1300 ret = kv_populate_acp_table(adev); 1301 if (ret) { 1302 DRM_ERROR("kv_populate_acp_table failed\n"); 1303 return ret; 1304 } 1305 kv_program_vc(adev); 1306#if 0 1307 kv_initialize_hardware_cac_manager(adev); 1308#endif 1309 kv_start_am(adev); 1310 if (pi->enable_auto_thermal_throttling) { 1311 ret = kv_enable_auto_thermal_throttling(adev); 1312 if (ret) { 1313 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1314 return ret; 1315 } 1316 } 1317 ret = kv_enable_dpm_voltage_scaling(adev); 1318 if (ret) { 1319 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1320 return ret; 1321 } 1322 ret = kv_set_dpm_interval(adev); 1323 if (ret) { 1324 DRM_ERROR("kv_set_dpm_interval failed\n"); 1325 return ret; 1326 } 1327 ret = kv_set_dpm_boot_state(adev); 1328 if (ret) { 1329 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1330 return ret; 1331 } 1332 ret = kv_enable_ulv(adev, true); 1333 if (ret) { 1334 DRM_ERROR("kv_enable_ulv failed\n"); 1335 return ret; 1336 } 1337 kv_start_dpm(adev); 1338 ret = kv_enable_didt(adev, true); 1339 if (ret) { 1340 DRM_ERROR("kv_enable_didt failed\n"); 1341 return ret; 1342 } 1343 ret = kv_enable_smc_cac(adev, true); 1344 if (ret) { 1345 DRM_ERROR("kv_enable_smc_cac failed\n"); 1346 return ret; 1347 } 1348 1349 kv_reset_acp_boot_level(adev); 1350 1351 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1352 if (ret) { 1353 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1354 return ret; 1355 } 1356 1357 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1358 1359 if (adev->irq.installed && 1360 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1361 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1362 if (ret) { 1363 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1364 return ret; 1365 } 1366 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1367 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1368 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1369 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1370 } 1371 1372 return ret; 1373} 1374 1375static void kv_dpm_disable(struct amdgpu_device *adev) 1376{ 1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1378 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1379 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1380 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1381 1382 amdgpu_kv_smc_bapm_enable(adev, false); 1383 1384 if (adev->asic_type == CHIP_MULLINS) 1385 kv_enable_nb_dpm(adev, false); 1386 1387 /* powerup blocks */ 1388 kv_dpm_powergate_acp(adev, false); 1389 kv_dpm_powergate_samu(adev, false); 1390 kv_dpm_powergate_vce(adev, false); 1391 kv_dpm_powergate_uvd(adev, false); 1392 1393 kv_enable_smc_cac(adev, false); 1394 kv_enable_didt(adev, false); 1395 kv_clear_vc(adev); 1396 kv_stop_dpm(adev); 1397 kv_enable_ulv(adev, false); 1398 kv_reset_am(adev); 1399 1400 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1401} 1402 1403#if 0 1404static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1405 u16 reg_offset, u32 value) 1406{ 1407 struct kv_power_info *pi = kv_get_pi(adev); 1408 1409 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1410 (u8 *)&value, sizeof(u16), pi->sram_end); 1411} 1412 1413static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1414 u16 reg_offset, u32 *value) 1415{ 1416 struct kv_power_info *pi = kv_get_pi(adev); 1417 1418 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1419 value, pi->sram_end); 1420} 1421#endif 1422 1423static void kv_init_sclk_t(struct amdgpu_device *adev) 1424{ 1425 struct kv_power_info *pi = kv_get_pi(adev); 1426 1427 pi->low_sclk_interrupt_t = 0; 1428} 1429 1430static int kv_init_fps_limits(struct amdgpu_device *adev) 1431{ 1432 struct kv_power_info *pi = kv_get_pi(adev); 1433 int ret = 0; 1434 1435 if (pi->caps_fps) { 1436 u16 tmp; 1437 1438 tmp = 45; 1439 pi->fps_high_t = cpu_to_be16(tmp); 1440 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1441 pi->dpm_table_start + 1442 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1443 (u8 *)&pi->fps_high_t, 1444 sizeof(u16), pi->sram_end); 1445 1446 tmp = 30; 1447 pi->fps_low_t = cpu_to_be16(tmp); 1448 1449 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1450 pi->dpm_table_start + 1451 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1452 (u8 *)&pi->fps_low_t, 1453 sizeof(u16), pi->sram_end); 1454 1455 } 1456 return ret; 1457} 1458 1459static void kv_init_powergate_state(struct amdgpu_device *adev) 1460{ 1461 struct kv_power_info *pi = kv_get_pi(adev); 1462 1463 pi->uvd_power_gated = false; 1464 pi->vce_power_gated = false; 1465 pi->samu_power_gated = false; 1466 pi->acp_power_gated = false; 1467 1468} 1469 1470static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1471{ 1472 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1473 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1474} 1475 1476static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1477{ 1478 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1479 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1480} 1481 1482static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1483{ 1484 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1485 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1486} 1487 1488static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1489{ 1490 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1491 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1492} 1493 1494static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1495{ 1496 struct kv_power_info *pi = kv_get_pi(adev); 1497 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1498 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1499 int ret; 1500 u32 mask; 1501 1502 if (!gate) { 1503 if (table->count) 1504 pi->uvd_boot_level = table->count - 1; 1505 else 1506 pi->uvd_boot_level = 0; 1507 1508 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1509 mask = 1 << pi->uvd_boot_level; 1510 } else { 1511 mask = 0x1f; 1512 } 1513 1514 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1515 pi->dpm_table_start + 1516 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1517 (uint8_t *)&pi->uvd_boot_level, 1518 sizeof(u8), pi->sram_end); 1519 if (ret) 1520 return ret; 1521 1522 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1523 PPSMC_MSG_UVDDPM_SetEnabledMask, 1524 mask); 1525 } 1526 1527 return kv_enable_uvd_dpm(adev, !gate); 1528} 1529 1530static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1531{ 1532 u8 i; 1533 struct amdgpu_vce_clock_voltage_dependency_table *table = 1534 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1535 1536 for (i = 0; i < table->count; i++) { 1537 if (table->entries[i].evclk >= evclk) 1538 break; 1539 } 1540 1541 return i; 1542} 1543 1544static int kv_update_vce_dpm(struct amdgpu_device *adev, 1545 struct amdgpu_ps *amdgpu_new_state, 1546 struct amdgpu_ps *amdgpu_current_state) 1547{ 1548 struct kv_power_info *pi = kv_get_pi(adev); 1549 struct amdgpu_vce_clock_voltage_dependency_table *table = 1550 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1551 int ret; 1552 1553 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1554 kv_dpm_powergate_vce(adev, false); 1555 if (pi->caps_stable_p_state) 1556 pi->vce_boot_level = table->count - 1; 1557 else 1558 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1559 1560 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1561 pi->dpm_table_start + 1562 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1563 (u8 *)&pi->vce_boot_level, 1564 sizeof(u8), 1565 pi->sram_end); 1566 if (ret) 1567 return ret; 1568 1569 if (pi->caps_stable_p_state) 1570 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1571 PPSMC_MSG_VCEDPM_SetEnabledMask, 1572 (1 << pi->vce_boot_level)); 1573 kv_enable_vce_dpm(adev, true); 1574 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1575 kv_enable_vce_dpm(adev, false); 1576 kv_dpm_powergate_vce(adev, true); 1577 } 1578 1579 return 0; 1580} 1581 1582static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1583{ 1584 struct kv_power_info *pi = kv_get_pi(adev); 1585 struct amdgpu_clock_voltage_dependency_table *table = 1586 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1587 int ret; 1588 1589 if (!gate) { 1590 if (pi->caps_stable_p_state) 1591 pi->samu_boot_level = table->count - 1; 1592 else 1593 pi->samu_boot_level = 0; 1594 1595 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1596 pi->dpm_table_start + 1597 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1598 (u8 *)&pi->samu_boot_level, 1599 sizeof(u8), 1600 pi->sram_end); 1601 if (ret) 1602 return ret; 1603 1604 if (pi->caps_stable_p_state) 1605 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1606 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1607 (1 << pi->samu_boot_level)); 1608 } 1609 1610 return kv_enable_samu_dpm(adev, !gate); 1611} 1612 1613static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1614{ 1615 u8 i; 1616 struct amdgpu_clock_voltage_dependency_table *table = 1617 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1618 1619 for (i = 0; i < table->count; i++) { 1620 if (table->entries[i].clk >= 0) /* XXX */ 1621 break; 1622 } 1623 1624 if (i >= table->count) 1625 i = table->count - 1; 1626 1627 return i; 1628} 1629 1630static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1631{ 1632 struct kv_power_info *pi = kv_get_pi(adev); 1633 u8 acp_boot_level; 1634 1635 if (!pi->caps_stable_p_state) { 1636 acp_boot_level = kv_get_acp_boot_level(adev); 1637 if (acp_boot_level != pi->acp_boot_level) { 1638 pi->acp_boot_level = acp_boot_level; 1639 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1640 PPSMC_MSG_ACPDPM_SetEnabledMask, 1641 (1 << pi->acp_boot_level)); 1642 } 1643 } 1644} 1645 1646static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1647{ 1648 struct kv_power_info *pi = kv_get_pi(adev); 1649 struct amdgpu_clock_voltage_dependency_table *table = 1650 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1651 int ret; 1652 1653 if (!gate) { 1654 if (pi->caps_stable_p_state) 1655 pi->acp_boot_level = table->count - 1; 1656 else 1657 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1658 1659 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1660 pi->dpm_table_start + 1661 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1662 (u8 *)&pi->acp_boot_level, 1663 sizeof(u8), 1664 pi->sram_end); 1665 if (ret) 1666 return ret; 1667 1668 if (pi->caps_stable_p_state) 1669 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1670 PPSMC_MSG_ACPDPM_SetEnabledMask, 1671 (1 << pi->acp_boot_level)); 1672 } 1673 1674 return kv_enable_acp_dpm(adev, !gate); 1675} 1676 1677static void kv_dpm_powergate_uvd(void *handle, bool gate) 1678{ 1679 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1680 struct kv_power_info *pi = kv_get_pi(adev); 1681 int ret; 1682 1683 pi->uvd_power_gated = gate; 1684 1685 if (gate) { 1686 /* stop the UVD block */ 1687 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1688 AMD_PG_STATE_GATE); 1689 kv_update_uvd_dpm(adev, gate); 1690 if (pi->caps_uvd_pg) 1691 /* power off the UVD block */ 1692 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1693 } else { 1694 if (pi->caps_uvd_pg) 1695 /* power on the UVD block */ 1696 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1697 /* re-init the UVD block */ 1698 kv_update_uvd_dpm(adev, gate); 1699 1700 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1701 AMD_PG_STATE_UNGATE); 1702 } 1703} 1704 1705static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1706{ 1707 struct kv_power_info *pi = kv_get_pi(adev); 1708 1709 if (pi->vce_power_gated == gate) 1710 return; 1711 1712 pi->vce_power_gated = gate; 1713 1714 if (!pi->caps_vce_pg) 1715 return; 1716 1717 if (gate) 1718 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1719 else 1720 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1721} 1722 1723static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1724{ 1725 struct kv_power_info *pi = kv_get_pi(adev); 1726 1727 if (pi->samu_power_gated == gate) 1728 return; 1729 1730 pi->samu_power_gated = gate; 1731 1732 if (gate) { 1733 kv_update_samu_dpm(adev, true); 1734 if (pi->caps_samu_pg) 1735 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1736 } else { 1737 if (pi->caps_samu_pg) 1738 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1739 kv_update_samu_dpm(adev, false); 1740 } 1741} 1742 1743static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1744{ 1745 struct kv_power_info *pi = kv_get_pi(adev); 1746 1747 if (pi->acp_power_gated == gate) 1748 return; 1749 1750 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1751 return; 1752 1753 pi->acp_power_gated = gate; 1754 1755 if (gate) { 1756 kv_update_acp_dpm(adev, true); 1757 if (pi->caps_acp_pg) 1758 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1759 } else { 1760 if (pi->caps_acp_pg) 1761 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1762 kv_update_acp_dpm(adev, false); 1763 } 1764} 1765 1766static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1767 struct amdgpu_ps *new_rps) 1768{ 1769 struct kv_ps *new_ps = kv_get_ps(new_rps); 1770 struct kv_power_info *pi = kv_get_pi(adev); 1771 u32 i; 1772 struct amdgpu_clock_voltage_dependency_table *table = 1773 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1774 1775 if (table && table->count) { 1776 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1777 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1778 (i == (pi->graphics_dpm_level_count - 1))) { 1779 pi->lowest_valid = i; 1780 break; 1781 } 1782 } 1783 1784 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1785 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1786 break; 1787 } 1788 pi->highest_valid = i; 1789 1790 if (pi->lowest_valid > pi->highest_valid) { 1791 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1792 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1793 pi->highest_valid = pi->lowest_valid; 1794 else 1795 pi->lowest_valid = pi->highest_valid; 1796 } 1797 } else { 1798 struct sumo_sclk_voltage_mapping_table *table = 1799 &pi->sys_info.sclk_voltage_mapping_table; 1800 1801 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1802 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1803 i == (int)(pi->graphics_dpm_level_count - 1)) { 1804 pi->lowest_valid = i; 1805 break; 1806 } 1807 } 1808 1809 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1810 if (table->entries[i].sclk_frequency <= 1811 new_ps->levels[new_ps->num_levels - 1].sclk) 1812 break; 1813 } 1814 pi->highest_valid = i; 1815 1816 if (pi->lowest_valid > pi->highest_valid) { 1817 if ((new_ps->levels[0].sclk - 1818 table->entries[pi->highest_valid].sclk_frequency) > 1819 (table->entries[pi->lowest_valid].sclk_frequency - 1820 new_ps->levels[new_ps->num_levels -1].sclk)) 1821 pi->highest_valid = pi->lowest_valid; 1822 else 1823 pi->lowest_valid = pi->highest_valid; 1824 } 1825 } 1826} 1827 1828static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1829 struct amdgpu_ps *new_rps) 1830{ 1831 struct kv_ps *new_ps = kv_get_ps(new_rps); 1832 struct kv_power_info *pi = kv_get_pi(adev); 1833 int ret = 0; 1834 u8 clk_bypass_cntl; 1835 1836 if (pi->caps_enable_dfs_bypass) { 1837 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1838 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1839 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1840 (pi->dpm_table_start + 1841 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1842 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1843 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1844 &clk_bypass_cntl, 1845 sizeof(u8), pi->sram_end); 1846 } 1847 1848 return ret; 1849} 1850 1851static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1852 bool enable) 1853{ 1854 struct kv_power_info *pi = kv_get_pi(adev); 1855 int ret = 0; 1856 1857 if (enable) { 1858 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1859 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1860 if (ret == 0) 1861 pi->nb_dpm_enabled = true; 1862 } 1863 } else { 1864 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1865 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1866 if (ret == 0) 1867 pi->nb_dpm_enabled = false; 1868 } 1869 } 1870 1871 return ret; 1872} 1873 1874static int kv_dpm_force_performance_level(void *handle, 1875 enum amd_dpm_forced_level level) 1876{ 1877 int ret; 1878 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1879 1880 if (level == AMD_DPM_FORCED_LEVEL_HIGH) { 1881 ret = kv_force_dpm_highest(adev); 1882 if (ret) 1883 return ret; 1884 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { 1885 ret = kv_force_dpm_lowest(adev); 1886 if (ret) 1887 return ret; 1888 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { 1889 ret = kv_unforce_levels(adev); 1890 if (ret) 1891 return ret; 1892 } 1893 1894 adev->pm.dpm.forced_level = level; 1895 1896 return 0; 1897} 1898 1899static int kv_dpm_pre_set_power_state(void *handle) 1900{ 1901 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1902 struct kv_power_info *pi = kv_get_pi(adev); 1903 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1904 struct amdgpu_ps *new_ps = &requested_ps; 1905 1906 kv_update_requested_ps(adev, new_ps); 1907 1908 kv_apply_state_adjust_rules(adev, 1909 &pi->requested_rps, 1910 &pi->current_rps); 1911 1912 return 0; 1913} 1914 1915static int kv_dpm_set_power_state(void *handle) 1916{ 1917 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1918 struct kv_power_info *pi = kv_get_pi(adev); 1919 struct amdgpu_ps *new_ps = &pi->requested_rps; 1920 struct amdgpu_ps *old_ps = &pi->current_rps; 1921 int ret; 1922 1923 if (pi->bapm_enable) { 1924 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power); 1925 if (ret) { 1926 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1927 return ret; 1928 } 1929 } 1930 1931 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1932 if (pi->enable_dpm) { 1933 kv_set_valid_clock_range(adev, new_ps); 1934 kv_update_dfs_bypass_settings(adev, new_ps); 1935 ret = kv_calculate_ds_divider(adev); 1936 if (ret) { 1937 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1938 return ret; 1939 } 1940 kv_calculate_nbps_level_settings(adev); 1941 kv_calculate_dpm_settings(adev); 1942 kv_force_lowest_valid(adev); 1943 kv_enable_new_levels(adev); 1944 kv_upload_dpm_settings(adev); 1945 kv_program_nbps_index_settings(adev, new_ps); 1946 kv_unforce_levels(adev); 1947 kv_set_enabled_levels(adev); 1948 kv_force_lowest_valid(adev); 1949 kv_unforce_levels(adev); 1950 1951 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1952 if (ret) { 1953 DRM_ERROR("kv_update_vce_dpm failed\n"); 1954 return ret; 1955 } 1956 kv_update_sclk_t(adev); 1957 if (adev->asic_type == CHIP_MULLINS) 1958 kv_enable_nb_dpm(adev, true); 1959 } 1960 } else { 1961 if (pi->enable_dpm) { 1962 kv_set_valid_clock_range(adev, new_ps); 1963 kv_update_dfs_bypass_settings(adev, new_ps); 1964 ret = kv_calculate_ds_divider(adev); 1965 if (ret) { 1966 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1967 return ret; 1968 } 1969 kv_calculate_nbps_level_settings(adev); 1970 kv_calculate_dpm_settings(adev); 1971 kv_freeze_sclk_dpm(adev, true); 1972 kv_upload_dpm_settings(adev); 1973 kv_program_nbps_index_settings(adev, new_ps); 1974 kv_freeze_sclk_dpm(adev, false); 1975 kv_set_enabled_levels(adev); 1976 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1977 if (ret) { 1978 DRM_ERROR("kv_update_vce_dpm failed\n"); 1979 return ret; 1980 } 1981 kv_update_acp_boot_level(adev); 1982 kv_update_sclk_t(adev); 1983 kv_enable_nb_dpm(adev, true); 1984 } 1985 } 1986 1987 return 0; 1988} 1989 1990static void kv_dpm_post_set_power_state(void *handle) 1991{ 1992 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1993 struct kv_power_info *pi = kv_get_pi(adev); 1994 struct amdgpu_ps *new_ps = &pi->requested_rps; 1995 1996 kv_update_current_ps(adev, new_ps); 1997} 1998 1999static void kv_dpm_setup_asic(struct amdgpu_device *adev) 2000{ 2001 sumo_take_smu_control(adev, true); 2002 kv_init_powergate_state(adev); 2003 kv_init_sclk_t(adev); 2004} 2005 2006#if 0 2007static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2008{ 2009 struct kv_power_info *pi = kv_get_pi(adev); 2010 2011 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2012 kv_force_lowest_valid(adev); 2013 kv_init_graphics_levels(adev); 2014 kv_program_bootup_state(adev); 2015 kv_upload_dpm_settings(adev); 2016 kv_force_lowest_valid(adev); 2017 kv_unforce_levels(adev); 2018 } else { 2019 kv_init_graphics_levels(adev); 2020 kv_program_bootup_state(adev); 2021 kv_freeze_sclk_dpm(adev, true); 2022 kv_upload_dpm_settings(adev); 2023 kv_freeze_sclk_dpm(adev, false); 2024 kv_set_enabled_level(adev, pi->graphics_boot_level); 2025 } 2026} 2027#endif 2028 2029static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2030 struct amdgpu_clock_and_voltage_limits *table) 2031{ 2032 struct kv_power_info *pi = kv_get_pi(adev); 2033 2034 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2035 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2036 table->sclk = 2037 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2038 table->vddc = 2039 kv_convert_2bit_index_to_voltage(adev, 2040 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2041 } 2042 2043 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2044} 2045 2046static void kv_patch_voltage_values(struct amdgpu_device *adev) 2047{ 2048 int i; 2049 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2050 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2051 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2052 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2053 struct amdgpu_clock_voltage_dependency_table *samu_table = 2054 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2055 struct amdgpu_clock_voltage_dependency_table *acp_table = 2056 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2057 2058 if (uvd_table->count) { 2059 for (i = 0; i < uvd_table->count; i++) 2060 uvd_table->entries[i].v = 2061 kv_convert_8bit_index_to_voltage(adev, 2062 uvd_table->entries[i].v); 2063 } 2064 2065 if (vce_table->count) { 2066 for (i = 0; i < vce_table->count; i++) 2067 vce_table->entries[i].v = 2068 kv_convert_8bit_index_to_voltage(adev, 2069 vce_table->entries[i].v); 2070 } 2071 2072 if (samu_table->count) { 2073 for (i = 0; i < samu_table->count; i++) 2074 samu_table->entries[i].v = 2075 kv_convert_8bit_index_to_voltage(adev, 2076 samu_table->entries[i].v); 2077 } 2078 2079 if (acp_table->count) { 2080 for (i = 0; i < acp_table->count; i++) 2081 acp_table->entries[i].v = 2082 kv_convert_8bit_index_to_voltage(adev, 2083 acp_table->entries[i].v); 2084 } 2085 2086} 2087 2088static void kv_construct_boot_state(struct amdgpu_device *adev) 2089{ 2090 struct kv_power_info *pi = kv_get_pi(adev); 2091 2092 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2093 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2094 pi->boot_pl.ds_divider_index = 0; 2095 pi->boot_pl.ss_divider_index = 0; 2096 pi->boot_pl.allow_gnb_slow = 1; 2097 pi->boot_pl.force_nbp_state = 0; 2098 pi->boot_pl.display_wm = 0; 2099 pi->boot_pl.vce_wm = 0; 2100} 2101 2102static int kv_force_dpm_highest(struct amdgpu_device *adev) 2103{ 2104 int ret; 2105 u32 enable_mask, i; 2106 2107 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2108 if (ret) 2109 return ret; 2110 2111 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2112 if (enable_mask & (1 << i)) 2113 break; 2114 } 2115 2116 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2117 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2118 else 2119 return kv_set_enabled_level(adev, i); 2120} 2121 2122static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2123{ 2124 int ret; 2125 u32 enable_mask, i; 2126 2127 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2128 if (ret) 2129 return ret; 2130 2131 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2132 if (enable_mask & (1 << i)) 2133 break; 2134 } 2135 2136 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2137 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2138 else 2139 return kv_set_enabled_level(adev, i); 2140} 2141 2142static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2143 u32 sclk, u32 min_sclk_in_sr) 2144{ 2145 struct kv_power_info *pi = kv_get_pi(adev); 2146 u32 i; 2147 u32 temp; 2148 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); 2149 2150 if (sclk < min) 2151 return 0; 2152 2153 if (!pi->caps_sclk_ds) 2154 return 0; 2155 2156 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2157 temp = sclk >> i; 2158 if (temp >= min) 2159 break; 2160 } 2161 2162 return (u8)i; 2163} 2164 2165static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2166{ 2167 struct kv_power_info *pi = kv_get_pi(adev); 2168 struct amdgpu_clock_voltage_dependency_table *table = 2169 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2170 int i; 2171 2172 if (table && table->count) { 2173 for (i = table->count - 1; i >= 0; i--) { 2174 if (pi->high_voltage_t && 2175 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2176 pi->high_voltage_t)) { 2177 *limit = i; 2178 return 0; 2179 } 2180 } 2181 } else { 2182 struct sumo_sclk_voltage_mapping_table *table = 2183 &pi->sys_info.sclk_voltage_mapping_table; 2184 2185 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2186 if (pi->high_voltage_t && 2187 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2188 pi->high_voltage_t)) { 2189 *limit = i; 2190 return 0; 2191 } 2192 } 2193 } 2194 2195 *limit = 0; 2196 return 0; 2197} 2198 2199static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2200 struct amdgpu_ps *new_rps, 2201 struct amdgpu_ps *old_rps) 2202{ 2203 struct kv_ps *ps = kv_get_ps(new_rps); 2204 struct kv_power_info *pi = kv_get_pi(adev); 2205 u32 min_sclk = 10000; /* ??? */ 2206 u32 sclk, mclk = 0; 2207 int i, limit; 2208 bool force_high; 2209 struct amdgpu_clock_voltage_dependency_table *table = 2210 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2211 u32 stable_p_state_sclk = 0; 2212 struct amdgpu_clock_and_voltage_limits *max_limits = 2213 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2214 2215 if (new_rps->vce_active) { 2216 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2217 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2218 } else { 2219 new_rps->evclk = 0; 2220 new_rps->ecclk = 0; 2221 } 2222 2223 mclk = max_limits->mclk; 2224 sclk = min_sclk; 2225 2226 if (pi->caps_stable_p_state) { 2227 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2228 2229 for (i = table->count - 1; i >= 0; i--) { 2230 if (stable_p_state_sclk >= table->entries[i].clk) { 2231 stable_p_state_sclk = table->entries[i].clk; 2232 break; 2233 } 2234 } 2235 2236 if (i > 0) 2237 stable_p_state_sclk = table->entries[0].clk; 2238 2239 sclk = stable_p_state_sclk; 2240 } 2241 2242 if (new_rps->vce_active) { 2243 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2244 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2245 } 2246 2247 ps->need_dfs_bypass = true; 2248 2249 for (i = 0; i < ps->num_levels; i++) { 2250 if (ps->levels[i].sclk < sclk) 2251 ps->levels[i].sclk = sclk; 2252 } 2253 2254 if (table && table->count) { 2255 for (i = 0; i < ps->num_levels; i++) { 2256 if (pi->high_voltage_t && 2257 (pi->high_voltage_t < 2258 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2259 kv_get_high_voltage_limit(adev, &limit); 2260 ps->levels[i].sclk = table->entries[limit].clk; 2261 } 2262 } 2263 } else { 2264 struct sumo_sclk_voltage_mapping_table *table = 2265 &pi->sys_info.sclk_voltage_mapping_table; 2266 2267 for (i = 0; i < ps->num_levels; i++) { 2268 if (pi->high_voltage_t && 2269 (pi->high_voltage_t < 2270 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2271 kv_get_high_voltage_limit(adev, &limit); 2272 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2273 } 2274 } 2275 } 2276 2277 if (pi->caps_stable_p_state) { 2278 for (i = 0; i < ps->num_levels; i++) { 2279 ps->levels[i].sclk = stable_p_state_sclk; 2280 } 2281 } 2282 2283 pi->video_start = new_rps->dclk || new_rps->vclk || 2284 new_rps->evclk || new_rps->ecclk; 2285 2286 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2287 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2288 pi->battery_state = true; 2289 else 2290 pi->battery_state = false; 2291 2292 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2293 ps->dpm0_pg_nb_ps_lo = 0x1; 2294 ps->dpm0_pg_nb_ps_hi = 0x0; 2295 ps->dpmx_nb_ps_lo = 0x1; 2296 ps->dpmx_nb_ps_hi = 0x0; 2297 } else { 2298 ps->dpm0_pg_nb_ps_lo = 0x3; 2299 ps->dpm0_pg_nb_ps_hi = 0x0; 2300 ps->dpmx_nb_ps_lo = 0x3; 2301 ps->dpmx_nb_ps_hi = 0x0; 2302 2303 if (pi->sys_info.nb_dpm_enable) { 2304 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2305 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2306 pi->disable_nb_ps3_in_battery; 2307 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2308 ps->dpm0_pg_nb_ps_hi = 0x2; 2309 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2310 ps->dpmx_nb_ps_hi = 0x2; 2311 } 2312 } 2313} 2314 2315static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2316 u32 index, bool enable) 2317{ 2318 struct kv_power_info *pi = kv_get_pi(adev); 2319 2320 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2321} 2322 2323static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2324{ 2325 struct kv_power_info *pi = kv_get_pi(adev); 2326 u32 sclk_in_sr = 10000; /* ??? */ 2327 u32 i; 2328 2329 if (pi->lowest_valid > pi->highest_valid) 2330 return -EINVAL; 2331 2332 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2333 pi->graphics_level[i].DeepSleepDivId = 2334 kv_get_sleep_divider_id_from_clock(adev, 2335 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2336 sclk_in_sr); 2337 } 2338 return 0; 2339} 2340 2341static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2342{ 2343 struct kv_power_info *pi = kv_get_pi(adev); 2344 u32 i; 2345 bool force_high; 2346 struct amdgpu_clock_and_voltage_limits *max_limits = 2347 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2348 u32 mclk = max_limits->mclk; 2349 2350 if (pi->lowest_valid > pi->highest_valid) 2351 return -EINVAL; 2352 2353 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2354 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2355 pi->graphics_level[i].GnbSlow = 1; 2356 pi->graphics_level[i].ForceNbPs1 = 0; 2357 pi->graphics_level[i].UpH = 0; 2358 } 2359 2360 if (!pi->sys_info.nb_dpm_enable) 2361 return 0; 2362 2363 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2364 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2365 2366 if (force_high) { 2367 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2368 pi->graphics_level[i].GnbSlow = 0; 2369 } else { 2370 if (pi->battery_state) 2371 pi->graphics_level[0].ForceNbPs1 = 1; 2372 2373 pi->graphics_level[1].GnbSlow = 0; 2374 pi->graphics_level[2].GnbSlow = 0; 2375 pi->graphics_level[3].GnbSlow = 0; 2376 pi->graphics_level[4].GnbSlow = 0; 2377 } 2378 } else { 2379 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2380 pi->graphics_level[i].GnbSlow = 1; 2381 pi->graphics_level[i].ForceNbPs1 = 0; 2382 pi->graphics_level[i].UpH = 0; 2383 } 2384 2385 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2386 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2387 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2388 if (pi->lowest_valid != pi->highest_valid) 2389 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2390 } 2391 } 2392 return 0; 2393} 2394 2395static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2396{ 2397 struct kv_power_info *pi = kv_get_pi(adev); 2398 u32 i; 2399 2400 if (pi->lowest_valid > pi->highest_valid) 2401 return -EINVAL; 2402 2403 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2404 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2405 2406 return 0; 2407} 2408 2409static void kv_init_graphics_levels(struct amdgpu_device *adev) 2410{ 2411 struct kv_power_info *pi = kv_get_pi(adev); 2412 u32 i; 2413 struct amdgpu_clock_voltage_dependency_table *table = 2414 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2415 2416 if (table && table->count) { 2417 u32 vid_2bit; 2418 2419 pi->graphics_dpm_level_count = 0; 2420 for (i = 0; i < table->count; i++) { 2421 if (pi->high_voltage_t && 2422 (pi->high_voltage_t < 2423 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2424 break; 2425 2426 kv_set_divider_value(adev, i, table->entries[i].clk); 2427 vid_2bit = kv_convert_vid7_to_vid2(adev, 2428 &pi->sys_info.vid_mapping_table, 2429 table->entries[i].v); 2430 kv_set_vid(adev, i, vid_2bit); 2431 kv_set_at(adev, i, pi->at[i]); 2432 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2433 pi->graphics_dpm_level_count++; 2434 } 2435 } else { 2436 struct sumo_sclk_voltage_mapping_table *table = 2437 &pi->sys_info.sclk_voltage_mapping_table; 2438 2439 pi->graphics_dpm_level_count = 0; 2440 for (i = 0; i < table->num_max_dpm_entries; i++) { 2441 if (pi->high_voltage_t && 2442 pi->high_voltage_t < 2443 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2444 break; 2445 2446 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2447 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2448 kv_set_at(adev, i, pi->at[i]); 2449 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2450 pi->graphics_dpm_level_count++; 2451 } 2452 } 2453 2454 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2455 kv_dpm_power_level_enable(adev, i, false); 2456} 2457 2458static void kv_enable_new_levels(struct amdgpu_device *adev) 2459{ 2460 struct kv_power_info *pi = kv_get_pi(adev); 2461 u32 i; 2462 2463 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2464 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2465 kv_dpm_power_level_enable(adev, i, true); 2466 } 2467} 2468 2469static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2470{ 2471 u32 new_mask = (1 << level); 2472 2473 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2474 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2475 new_mask); 2476} 2477 2478static int kv_set_enabled_levels(struct amdgpu_device *adev) 2479{ 2480 struct kv_power_info *pi = kv_get_pi(adev); 2481 u32 i, new_mask = 0; 2482 2483 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2484 new_mask |= (1 << i); 2485 2486 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2487 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2488 new_mask); 2489} 2490 2491static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2492 struct amdgpu_ps *new_rps) 2493{ 2494 struct kv_ps *new_ps = kv_get_ps(new_rps); 2495 struct kv_power_info *pi = kv_get_pi(adev); 2496 u32 nbdpmconfig1; 2497 2498 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2499 return; 2500 2501 if (pi->sys_info.nb_dpm_enable) { 2502 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2503 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2504 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2505 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2506 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2507 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2508 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2509 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2510 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2511 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2512 } 2513} 2514 2515static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2516 int min_temp, int max_temp) 2517{ 2518 int low_temp = 0 * 1000; 2519 int high_temp = 255 * 1000; 2520 u32 tmp; 2521 2522 if (low_temp < min_temp) 2523 low_temp = min_temp; 2524 if (high_temp > max_temp) 2525 high_temp = max_temp; 2526 if (high_temp < low_temp) { 2527 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2528 return -EINVAL; 2529 } 2530 2531 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2532 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2533 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2534 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2535 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2536 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2537 2538 adev->pm.dpm.thermal.min_temp = low_temp; 2539 adev->pm.dpm.thermal.max_temp = high_temp; 2540 2541 return 0; 2542} 2543 2544union igp_info { 2545 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2546 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2547 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2548 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2549 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2550 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2551}; 2552 2553static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2554{ 2555 struct kv_power_info *pi = kv_get_pi(adev); 2556 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2557 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2558 union igp_info *igp_info; 2559 u8 frev, crev; 2560 u16 data_offset; 2561 int i; 2562 2563 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2564 &frev, &crev, &data_offset)) { 2565 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2566 data_offset); 2567 2568 if (crev != 8) { 2569 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2570 return -EINVAL; 2571 } 2572 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2573 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2574 pi->sys_info.bootup_nb_voltage_index = 2575 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2576 if (igp_info->info_8.ucHtcTmpLmt == 0) 2577 pi->sys_info.htc_tmp_lmt = 203; 2578 else 2579 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2580 if (igp_info->info_8.ucHtcHystLmt == 0) 2581 pi->sys_info.htc_hyst_lmt = 5; 2582 else 2583 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2584 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2585 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2586 } 2587 2588 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2589 pi->sys_info.nb_dpm_enable = true; 2590 else 2591 pi->sys_info.nb_dpm_enable = false; 2592 2593 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2594 pi->sys_info.nbp_memory_clock[i] = 2595 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2596 pi->sys_info.nbp_n_clock[i] = 2597 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2598 } 2599 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2600 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2601 pi->caps_enable_dfs_bypass = true; 2602 2603 sumo_construct_sclk_voltage_mapping_table(adev, 2604 &pi->sys_info.sclk_voltage_mapping_table, 2605 igp_info->info_8.sAvail_SCLK); 2606 2607 sumo_construct_vid_mapping_table(adev, 2608 &pi->sys_info.vid_mapping_table, 2609 igp_info->info_8.sAvail_SCLK); 2610 2611 kv_construct_max_power_limits_table(adev, 2612 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2613 } 2614 return 0; 2615} 2616 2617union power_info { 2618 struct _ATOM_POWERPLAY_INFO info; 2619 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2620 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2621 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2622 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2623 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2624}; 2625 2626union pplib_clock_info { 2627 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2628 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2629 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2630 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2631}; 2632 2633union pplib_power_state { 2634 struct _ATOM_PPLIB_STATE v1; 2635 struct _ATOM_PPLIB_STATE_V2 v2; 2636}; 2637 2638static void kv_patch_boot_state(struct amdgpu_device *adev, 2639 struct kv_ps *ps) 2640{ 2641 struct kv_power_info *pi = kv_get_pi(adev); 2642 2643 ps->num_levels = 1; 2644 ps->levels[0] = pi->boot_pl; 2645} 2646 2647static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2648 struct amdgpu_ps *rps, 2649 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2650 u8 table_rev) 2651{ 2652 struct kv_ps *ps = kv_get_ps(rps); 2653 2654 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2655 rps->class = le16_to_cpu(non_clock_info->usClassification); 2656 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2657 2658 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2659 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2660 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2661 } else { 2662 rps->vclk = 0; 2663 rps->dclk = 0; 2664 } 2665 2666 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2667 adev->pm.dpm.boot_ps = rps; 2668 kv_patch_boot_state(adev, ps); 2669 } 2670 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2671 adev->pm.dpm.uvd_ps = rps; 2672} 2673 2674static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2675 struct amdgpu_ps *rps, int index, 2676 union pplib_clock_info *clock_info) 2677{ 2678 struct kv_power_info *pi = kv_get_pi(adev); 2679 struct kv_ps *ps = kv_get_ps(rps); 2680 struct kv_pl *pl = &ps->levels[index]; 2681 u32 sclk; 2682 2683 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2684 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2685 pl->sclk = sclk; 2686 pl->vddc_index = clock_info->sumo.vddcIndex; 2687 2688 ps->num_levels = index + 1; 2689 2690 if (pi->caps_sclk_ds) { 2691 pl->ds_divider_index = 5; 2692 pl->ss_divider_index = 5; 2693 } 2694} 2695 2696static int kv_parse_power_table(struct amdgpu_device *adev) 2697{ 2698 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2699 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2700 union pplib_power_state *power_state; 2701 int i, j, k, non_clock_array_index, clock_array_index; 2702 union pplib_clock_info *clock_info; 2703 struct _StateArray *state_array; 2704 struct _ClockInfoArray *clock_info_array; 2705 struct _NonClockInfoArray *non_clock_info_array; 2706 union power_info *power_info; 2707 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2708 u16 data_offset; 2709 u8 frev, crev; 2710 u8 *power_state_offset; 2711 struct kv_ps *ps; 2712 2713 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2714 &frev, &crev, &data_offset)) 2715 return -EINVAL; 2716 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2717 2718 amdgpu_add_thermal_controller(adev); 2719 2720 state_array = (struct _StateArray *) 2721 (mode_info->atom_context->bios + data_offset + 2722 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2723 clock_info_array = (struct _ClockInfoArray *) 2724 (mode_info->atom_context->bios + data_offset + 2725 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2726 non_clock_info_array = (struct _NonClockInfoArray *) 2727 (mode_info->atom_context->bios + data_offset + 2728 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2729 2730 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 2731 state_array->ucNumEntries, GFP_KERNEL); 2732 if (!adev->pm.dpm.ps) 2733 return -ENOMEM; 2734 power_state_offset = (u8 *)state_array->states; 2735 for (i = 0; i < state_array->ucNumEntries; i++) { 2736 u8 *idx; 2737 power_state = (union pplib_power_state *)power_state_offset; 2738 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2739 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2740 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2741 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2742 if (ps == NULL) { 2743 kfree(adev->pm.dpm.ps); 2744 return -ENOMEM; 2745 } 2746 adev->pm.dpm.ps[i].ps_priv = ps; 2747 k = 0; 2748 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2749 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2750 clock_array_index = idx[j]; 2751 if (clock_array_index >= clock_info_array->ucNumEntries) 2752 continue; 2753 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2754 break; 2755 clock_info = (union pplib_clock_info *) 2756 ((u8 *)&clock_info_array->clockInfo[0] + 2757 (clock_array_index * clock_info_array->ucEntrySize)); 2758 kv_parse_pplib_clock_info(adev, 2759 &adev->pm.dpm.ps[i], k, 2760 clock_info); 2761 k++; 2762 } 2763 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2764 non_clock_info, 2765 non_clock_info_array->ucEntrySize); 2766 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2767 } 2768 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2769 2770 /* fill in the vce power states */ 2771 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 2772 u32 sclk; 2773 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2774 clock_info = (union pplib_clock_info *) 2775 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2776 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2777 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2778 adev->pm.dpm.vce_states[i].sclk = sclk; 2779 adev->pm.dpm.vce_states[i].mclk = 0; 2780 } 2781 2782 return 0; 2783} 2784 2785static int kv_dpm_init(struct amdgpu_device *adev) 2786{ 2787 struct kv_power_info *pi; 2788 int ret, i; 2789 2790 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2791 if (pi == NULL) 2792 return -ENOMEM; 2793 adev->pm.dpm.priv = pi; 2794 2795 ret = amdgpu_get_platform_caps(adev); 2796 if (ret) 2797 return ret; 2798 2799 ret = amdgpu_parse_extended_power_table(adev); 2800 if (ret) 2801 return ret; 2802 2803 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2804 pi->at[i] = TRINITY_AT_DFLT; 2805 2806 pi->sram_end = SMC_RAM_END; 2807 2808 pi->enable_nb_dpm = true; 2809 2810 pi->caps_power_containment = true; 2811 pi->caps_cac = true; 2812 pi->enable_didt = false; 2813 if (pi->enable_didt) { 2814 pi->caps_sq_ramping = true; 2815 pi->caps_db_ramping = true; 2816 pi->caps_td_ramping = true; 2817 pi->caps_tcp_ramping = true; 2818 } 2819 2820 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) 2821 pi->caps_sclk_ds = true; 2822 else 2823 pi->caps_sclk_ds = false; 2824 2825 pi->enable_auto_thermal_throttling = true; 2826 pi->disable_nb_ps3_in_battery = false; 2827 if (amdgpu_bapm == 0) 2828 pi->bapm_enable = false; 2829 else 2830 pi->bapm_enable = true; 2831 pi->voltage_drop_t = 0; 2832 pi->caps_sclk_throttle_low_notification = false; 2833 pi->caps_fps = false; /* true? */ 2834 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2835 pi->caps_uvd_dpm = true; 2836 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2837 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2838 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2839 pi->caps_stable_p_state = false; 2840 2841 ret = kv_parse_sys_info_table(adev); 2842 if (ret) 2843 return ret; 2844 2845 kv_patch_voltage_values(adev); 2846 kv_construct_boot_state(adev); 2847 2848 ret = kv_parse_power_table(adev); 2849 if (ret) 2850 return ret; 2851 2852 pi->enable_dpm = true; 2853 2854 return 0; 2855} 2856 2857static void 2858kv_dpm_debugfs_print_current_performance_level(void *handle, 2859 struct seq_file *m) 2860{ 2861 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2862 struct kv_power_info *pi = kv_get_pi(adev); 2863 u32 current_index = 2864 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2865 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2866 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2867 u32 sclk, tmp; 2868 u16 vddc; 2869 2870 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2871 seq_printf(m, "invalid dpm profile %d\n", current_index); 2872 } else { 2873 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2874 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2875 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2876 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2877 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2878 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2879 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2880 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2881 current_index, sclk, vddc); 2882 } 2883} 2884 2885static void 2886kv_dpm_print_power_state(void *handle, void *request_ps) 2887{ 2888 int i; 2889 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 2890 struct kv_ps *ps = kv_get_ps(rps); 2891 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2892 2893 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2894 amdgpu_dpm_print_cap_info(rps->caps); 2895 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2896 for (i = 0; i < ps->num_levels; i++) { 2897 struct kv_pl *pl = &ps->levels[i]; 2898 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2899 i, pl->sclk, 2900 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2901 } 2902 amdgpu_dpm_print_ps_status(adev, rps); 2903} 2904 2905static void kv_dpm_fini(struct amdgpu_device *adev) 2906{ 2907 int i; 2908 2909 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2910 kfree(adev->pm.dpm.ps[i].ps_priv); 2911 } 2912 kfree(adev->pm.dpm.ps); 2913 kfree(adev->pm.dpm.priv); 2914 amdgpu_free_extended_power_table(adev); 2915} 2916 2917static void kv_dpm_display_configuration_changed(void *handle) 2918{ 2919 2920} 2921 2922static u32 kv_dpm_get_sclk(void *handle, bool low) 2923{ 2924 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2925 struct kv_power_info *pi = kv_get_pi(adev); 2926 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2927 2928 if (low) 2929 return requested_state->levels[0].sclk; 2930 else 2931 return requested_state->levels[requested_state->num_levels - 1].sclk; 2932} 2933 2934static u32 kv_dpm_get_mclk(void *handle, bool low) 2935{ 2936 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2937 struct kv_power_info *pi = kv_get_pi(adev); 2938 2939 return pi->sys_info.bootup_uma_clk; 2940} 2941 2942/* get temperature in millidegrees */ 2943static int kv_dpm_get_temp(void *handle) 2944{ 2945 u32 temp; 2946 int actual_temp = 0; 2947 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2948 2949 temp = RREG32_SMC(0xC0300E0C); 2950 2951 if (temp) 2952 actual_temp = (temp / 8) - 49; 2953 else 2954 actual_temp = 0; 2955 2956 actual_temp = actual_temp * 1000; 2957 2958 return actual_temp; 2959} 2960 2961static int kv_dpm_early_init(void *handle) 2962{ 2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2964 2965 adev->powerplay.pp_funcs = &kv_dpm_funcs; 2966 adev->powerplay.pp_handle = adev; 2967 kv_dpm_set_irq_funcs(adev); 2968 2969 return 0; 2970} 2971 2972static int kv_dpm_late_init(void *handle) 2973{ 2974 /* powerdown unused blocks for now */ 2975 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2976 2977 if (!amdgpu_dpm) 2978 return 0; 2979 2980 kv_dpm_powergate_acp(adev, true); 2981 kv_dpm_powergate_samu(adev, true); 2982 2983 return 0; 2984} 2985 2986static int kv_dpm_sw_init(void *handle) 2987{ 2988 int ret; 2989 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2990 2991 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, 2992 &adev->pm.dpm.thermal.irq); 2993 if (ret) 2994 return ret; 2995 2996 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, 2997 &adev->pm.dpm.thermal.irq); 2998 if (ret) 2999 return ret; 3000 3001 /* default to balanced state */ 3002 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 3003 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 3004 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; 3005 adev->pm.default_sclk = adev->clock.default_sclk; 3006 adev->pm.default_mclk = adev->clock.default_mclk; 3007 adev->pm.current_sclk = adev->clock.default_sclk; 3008 adev->pm.current_mclk = adev->clock.default_mclk; 3009 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3010 3011 if (amdgpu_dpm == 0) 3012 return 0; 3013 3014 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3015 mutex_lock(&adev->pm.mutex); 3016 ret = kv_dpm_init(adev); 3017 if (ret) 3018 goto dpm_failed; 3019 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3020 if (amdgpu_dpm == 1) 3021 amdgpu_pm_print_power_states(adev); 3022 mutex_unlock(&adev->pm.mutex); 3023 DRM_INFO("amdgpu: dpm initialized\n"); 3024 3025 return 0; 3026 3027dpm_failed: 3028 kv_dpm_fini(adev); 3029 mutex_unlock(&adev->pm.mutex); 3030 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3031 return ret; 3032} 3033 3034static int kv_dpm_sw_fini(void *handle) 3035{ 3036 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3037 3038 flush_work(&adev->pm.dpm.thermal.work); 3039 3040 mutex_lock(&adev->pm.mutex); 3041 kv_dpm_fini(adev); 3042 mutex_unlock(&adev->pm.mutex); 3043 3044 return 0; 3045} 3046 3047static int kv_dpm_hw_init(void *handle) 3048{ 3049 int ret; 3050 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3051 3052 if (!amdgpu_dpm) 3053 return 0; 3054 3055 mutex_lock(&adev->pm.mutex); 3056 kv_dpm_setup_asic(adev); 3057 ret = kv_dpm_enable(adev); 3058 if (ret) 3059 adev->pm.dpm_enabled = false; 3060 else 3061 adev->pm.dpm_enabled = true; 3062 mutex_unlock(&adev->pm.mutex); 3063 3064 return ret; 3065} 3066 3067static int kv_dpm_hw_fini(void *handle) 3068{ 3069 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3070 3071 if (adev->pm.dpm_enabled) { 3072 mutex_lock(&adev->pm.mutex); 3073 kv_dpm_disable(adev); 3074 mutex_unlock(&adev->pm.mutex); 3075 } 3076 3077 return 0; 3078} 3079 3080static int kv_dpm_suspend(void *handle) 3081{ 3082 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3083 3084 if (adev->pm.dpm_enabled) { 3085 mutex_lock(&adev->pm.mutex); 3086 /* disable dpm */ 3087 kv_dpm_disable(adev); 3088 /* reset the power state */ 3089 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3090 mutex_unlock(&adev->pm.mutex); 3091 } 3092 return 0; 3093} 3094 3095static int kv_dpm_resume(void *handle) 3096{ 3097 int ret; 3098 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3099 3100 if (adev->pm.dpm_enabled) { 3101 /* asic init will reset to the boot state */ 3102 mutex_lock(&adev->pm.mutex); 3103 kv_dpm_setup_asic(adev); 3104 ret = kv_dpm_enable(adev); 3105 if (ret) 3106 adev->pm.dpm_enabled = false; 3107 else 3108 adev->pm.dpm_enabled = true; 3109 mutex_unlock(&adev->pm.mutex); 3110 if (adev->pm.dpm_enabled) 3111 amdgpu_pm_compute_clocks(adev); 3112 } 3113 return 0; 3114} 3115 3116static bool kv_dpm_is_idle(void *handle) 3117{ 3118 return true; 3119} 3120 3121static int kv_dpm_wait_for_idle(void *handle) 3122{ 3123 return 0; 3124} 3125 3126 3127static int kv_dpm_soft_reset(void *handle) 3128{ 3129 return 0; 3130} 3131 3132static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3133 struct amdgpu_irq_src *src, 3134 unsigned type, 3135 enum amdgpu_interrupt_state state) 3136{ 3137 u32 cg_thermal_int; 3138 3139 switch (type) { 3140 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3141 switch (state) { 3142 case AMDGPU_IRQ_STATE_DISABLE: 3143 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3144 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3145 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3146 break; 3147 case AMDGPU_IRQ_STATE_ENABLE: 3148 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3149 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3150 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3151 break; 3152 default: 3153 break; 3154 } 3155 break; 3156 3157 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3158 switch (state) { 3159 case AMDGPU_IRQ_STATE_DISABLE: 3160 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3161 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3162 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3163 break; 3164 case AMDGPU_IRQ_STATE_ENABLE: 3165 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3166 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3167 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3168 break; 3169 default: 3170 break; 3171 } 3172 break; 3173 3174 default: 3175 break; 3176 } 3177 return 0; 3178} 3179 3180static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3181 struct amdgpu_irq_src *source, 3182 struct amdgpu_iv_entry *entry) 3183{ 3184 bool queue_thermal = false; 3185 3186 if (entry == NULL) 3187 return -EINVAL; 3188 3189 switch (entry->src_id) { 3190 case 230: /* thermal low to high */ 3191 DRM_DEBUG("IH: thermal low to high\n"); 3192 adev->pm.dpm.thermal.high_to_low = false; 3193 queue_thermal = true; 3194 break; 3195 case 231: /* thermal high to low */ 3196 DRM_DEBUG("IH: thermal high to low\n"); 3197 adev->pm.dpm.thermal.high_to_low = true; 3198 queue_thermal = true; 3199 break; 3200 default: 3201 break; 3202 } 3203 3204 if (queue_thermal) 3205 schedule_work(&adev->pm.dpm.thermal.work); 3206 3207 return 0; 3208} 3209 3210static int kv_dpm_set_clockgating_state(void *handle, 3211 enum amd_clockgating_state state) 3212{ 3213 return 0; 3214} 3215 3216static int kv_dpm_set_powergating_state(void *handle, 3217 enum amd_powergating_state state) 3218{ 3219 return 0; 3220} 3221 3222static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, 3223 const struct kv_pl *kv_cpl2) 3224{ 3225 return ((kv_cpl1->sclk == kv_cpl2->sclk) && 3226 (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && 3227 (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && 3228 (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); 3229} 3230 3231static int kv_check_state_equal(void *handle, 3232 void *current_ps, 3233 void *request_ps, 3234 bool *equal) 3235{ 3236 struct kv_ps *kv_cps; 3237 struct kv_ps *kv_rps; 3238 int i; 3239 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; 3240 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 3241 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3242 3243 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) 3244 return -EINVAL; 3245 3246 kv_cps = kv_get_ps(cps); 3247 kv_rps = kv_get_ps(rps); 3248 3249 if (kv_cps == NULL) { 3250 *equal = false; 3251 return 0; 3252 } 3253 3254 if (kv_cps->num_levels != kv_rps->num_levels) { 3255 *equal = false; 3256 return 0; 3257 } 3258 3259 for (i = 0; i < kv_cps->num_levels; i++) { 3260 if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), 3261 &(kv_rps->levels[i]))) { 3262 *equal = false; 3263 return 0; 3264 } 3265 } 3266 3267 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 3268 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); 3269 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); 3270 3271 return 0; 3272} 3273 3274static int kv_dpm_read_sensor(void *handle, int idx, 3275 void *value, int *size) 3276{ 3277 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3278 struct kv_power_info *pi = kv_get_pi(adev); 3279 uint32_t sclk; 3280 u32 pl_index = 3281 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 3282 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 3283 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 3284 3285 /* size must be at least 4 bytes for all sensors */ 3286 if (*size < 4) 3287 return -EINVAL; 3288 3289 switch (idx) { 3290 case AMDGPU_PP_SENSOR_GFX_SCLK: 3291 if (pl_index < SMU__NUM_SCLK_DPM_STATE) { 3292 sclk = be32_to_cpu( 3293 pi->graphics_level[pl_index].SclkFrequency); 3294 *((uint32_t *)value) = sclk; 3295 *size = 4; 3296 return 0; 3297 } 3298 return -EINVAL; 3299 case AMDGPU_PP_SENSOR_GPU_TEMP: 3300 *((uint32_t *)value) = kv_dpm_get_temp(adev); 3301 *size = 4; 3302 return 0; 3303 default: 3304 return -EINVAL; 3305 } 3306} 3307 3308static const struct amd_ip_funcs kv_dpm_ip_funcs = { 3309 .name = "kv_dpm", 3310 .early_init = kv_dpm_early_init, 3311 .late_init = kv_dpm_late_init, 3312 .sw_init = kv_dpm_sw_init, 3313 .sw_fini = kv_dpm_sw_fini, 3314 .hw_init = kv_dpm_hw_init, 3315 .hw_fini = kv_dpm_hw_fini, 3316 .suspend = kv_dpm_suspend, 3317 .resume = kv_dpm_resume, 3318 .is_idle = kv_dpm_is_idle, 3319 .wait_for_idle = kv_dpm_wait_for_idle, 3320 .soft_reset = kv_dpm_soft_reset, 3321 .set_clockgating_state = kv_dpm_set_clockgating_state, 3322 .set_powergating_state = kv_dpm_set_powergating_state, 3323}; 3324 3325const struct amdgpu_ip_block_version kv_smu_ip_block = 3326{ 3327 .type = AMD_IP_BLOCK_TYPE_SMC, 3328 .major = 1, 3329 .minor = 0, 3330 .rev = 0, 3331 .funcs = &kv_dpm_ip_funcs, 3332}; 3333 3334static const struct amd_pm_funcs kv_dpm_funcs = { 3335 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3336 .set_power_state = &kv_dpm_set_power_state, 3337 .post_set_power_state = &kv_dpm_post_set_power_state, 3338 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3339 .get_sclk = &kv_dpm_get_sclk, 3340 .get_mclk = &kv_dpm_get_mclk, 3341 .print_power_state = &kv_dpm_print_power_state, 3342 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3343 .force_performance_level = &kv_dpm_force_performance_level, 3344 .powergate_uvd = &kv_dpm_powergate_uvd, 3345 .enable_bapm = &kv_dpm_enable_bapm, 3346 .get_vce_clock_state = amdgpu_get_vce_clock_state, 3347 .check_state_equal = kv_check_state_equal, 3348 .read_sensor = &kv_dpm_read_sensor, 3349}; 3350 3351static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3352 .set = kv_dpm_set_interrupt_state, 3353 .process = kv_dpm_process_interrupt, 3354}; 3355 3356static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3357{ 3358 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3359 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3360}