Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon/kms: add dpm support for cayman (v5)

This adds dpm support for cayman asics. This includes:
- clockgating
- dynamic engine clock scaling
- dynamic memory clock scaling
- dynamic voltage scaling
- dynamic pcie gen1/gen2 switching (requires additional acpi support)
- power containment
- shader power scaling

Set radeon.dpm=1 to enable.

v2: fold in tdp fix
v3: fix indentation
v4: fix 64 bit div
v5: attempt to fix state enable

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>

+5344 -28
+1 -1
drivers/gpu/drm/radeon/Makefile
··· 79 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ 80 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 81 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 82 - trinity_smc.o 82 + trinity_smc.o ni_dpm.o 83 83 84 84 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 85 85 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
+18 -18
drivers/gpu/drm/radeon/btc_dpm.c
··· 1152 1152 1153 1153 #endif 1154 1154 1155 - u32 btc_valid_sclk[] = 1155 + u32 btc_valid_sclk[40] = 1156 1156 { 1157 1157 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 1158 1158 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000, ··· 1168 1168 { 25000, 30000, RADEON_SCLK_UP } 1169 1169 }; 1170 1170 1171 - static void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1172 - u32 clock, u16 max_voltage, u16 *voltage) 1171 + void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1172 + u32 clock, u16 max_voltage, u16 *voltage) 1173 1173 { 1174 1174 u32 i; 1175 1175 ··· 1219 1219 max_sclk, requested_sclk); 1220 1220 } 1221 1221 1222 - static void btc_skip_blacklist_clocks(struct radeon_device *rdev, 1223 - const u32 max_sclk, const u32 max_mclk, 1224 - u32 *sclk, u32 *mclk) 1222 + void btc_skip_blacklist_clocks(struct radeon_device *rdev, 1223 + const u32 max_sclk, const u32 max_mclk, 1224 + u32 *sclk, u32 *mclk) 1225 1225 { 1226 1226 int i, num_blacklist_clocks; 1227 1227 ··· 1246 1246 } 1247 1247 } 1248 1248 1249 - static void btc_adjust_clock_combinations(struct radeon_device *rdev, 1250 - const struct radeon_clock_and_voltage_limits *max_limits, 1251 - struct rv7xx_pl *pl) 1249 + void btc_adjust_clock_combinations(struct radeon_device *rdev, 1250 + const struct radeon_clock_and_voltage_limits *max_limits, 1251 + struct rv7xx_pl *pl) 1252 1252 { 1253 1253 1254 1254 if ((pl->mclk == 0) || (pl->sclk == 0)) ··· 1285 1285 return table->entries[table->count - 1].value; 1286 1286 } 1287 1287 1288 - static void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 1289 - u16 max_vddc, u16 max_vddci, 1290 - u16 *vddc, u16 *vddci) 1288 + void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 1289 + u16 max_vddc, u16 max_vddci, 1290 + u16 *vddc, u16 *vddci) 1291 1291 { 1292 1292 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1293 1293 u16 new_voltage; ··· 1417 1417 return ret; 1418 1418 } 1419 1419 1420 - static void btc_program_mgcg_hw_sequence(struct radeon_device *rdev, 1421 - const u32 *sequence, u32 count) 1420 + void btc_program_mgcg_hw_sequence(struct radeon_device *rdev, 1421 + const u32 *sequence, u32 count) 1422 1422 { 1423 1423 u32 i, length = count * 3; 1424 1424 u32 tmp; ··· 1596 1596 btc_program_mgcg_hw_sequence(rdev, p, count); 1597 1597 } 1598 1598 1599 - static bool btc_dpm_enabled(struct radeon_device *rdev) 1599 + bool btc_dpm_enabled(struct radeon_device *rdev) 1600 1600 { 1601 1601 if (rv770_is_smc_running(rdev)) 1602 1602 return true; ··· 1692 1692 1693 1693 } 1694 1694 1695 - static void btc_notify_uvd_to_smc(struct radeon_device *rdev) 1695 + void btc_notify_uvd_to_smc(struct radeon_device *rdev) 1696 1696 { 1697 1697 struct radeon_ps *radeon_new_state = rdev->pm.dpm.requested_ps; 1698 1698 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); ··· 1708 1708 } 1709 1709 } 1710 1710 1711 - static int btc_reset_to_default(struct radeon_device *rdev) 1711 + int btc_reset_to_default(struct radeon_device *rdev) 1712 1712 { 1713 1713 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) != PPSMC_Result_OK) 1714 1714 return -EINVAL; ··· 1730 1730 r7xx_stop_smc(rdev); 1731 1731 } 1732 1732 1733 - static void btc_read_arb_registers(struct radeon_device *rdev) 1733 + void btc_read_arb_registers(struct radeon_device *rdev) 1734 1734 { 1735 1735 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1736 1736 struct evergreen_arb_registers *arb_registers =
+19 -1
drivers/gpu/drm/radeon/btc_dpm.h
··· 33 33 #define BTC_CGULVPARAMETER_DFLT 0x00040035 34 34 #define BTC_CGULVCONTROL_DFLT 0x00001450 35 35 36 - extern u32 btc_valid_sclk[]; 36 + extern u32 btc_valid_sclk[40]; 37 + 38 + void btc_read_arb_registers(struct radeon_device *rdev); 39 + void btc_program_mgcg_hw_sequence(struct radeon_device *rdev, 40 + const u32 *sequence, u32 count); 41 + void btc_skip_blacklist_clocks(struct radeon_device *rdev, 42 + const u32 max_sclk, const u32 max_mclk, 43 + u32 *sclk, u32 *mclk); 44 + void btc_adjust_clock_combinations(struct radeon_device *rdev, 45 + const struct radeon_clock_and_voltage_limits *max_limits, 46 + struct rv7xx_pl *pl); 47 + void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 48 + u32 clock, u16 max_voltage, u16 *voltage); 49 + void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 50 + u16 max_vddc, u16 max_vddci, 51 + u16 *vddc, u16 *vddci); 52 + bool btc_dpm_enabled(struct radeon_device *rdev); 53 + int btc_reset_to_default(struct radeon_device *rdev); 54 + void btc_notify_uvd_to_smc(struct radeon_device *rdev); 37 55 38 56 #endif
+4 -7
drivers/gpu/drm/radeon/cypress_dpm.c
··· 45 45 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); 46 46 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); 47 47 48 - static u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev, 49 - u32 memory_clock, bool strobe_mode); 50 - 51 48 static void cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev, 52 49 bool enable) 53 50 { ··· 413 416 return 0; 414 417 } 415 418 416 - static u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk) 419 + u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk) 417 420 { 418 421 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 419 422 u8 result = 0; ··· 431 434 return result; 432 435 } 433 436 434 - static u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf) 437 + u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf) 435 438 { 436 439 u32 ref_clk = rdev->clock.mpll.reference_freq; 437 440 u32 vco = clkf * ref_clk; ··· 600 603 return 0; 601 604 } 602 605 603 - static u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev, 604 - u32 memory_clock, bool strobe_mode) 606 + u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev, 607 + u32 memory_clock, bool strobe_mode) 605 608 { 606 609 u8 mc_para_index; 607 610
+4
drivers/gpu/drm/radeon/cypress_dpm.h
··· 141 141 bool enable); 142 142 void cypress_start_dpm(struct radeon_device *rdev); 143 143 void cypress_advertise_gen2_capability(struct radeon_device *rdev); 144 + u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf); 145 + u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev, 146 + u32 memory_clock, bool strobe_mode); 147 + u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk); 144 148 145 149 #endif
+3 -1
drivers/gpu/drm/radeon/ni.c
··· 194 194 MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 195 195 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 196 196 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 197 + MODULE_FIRMWARE("radeon/CAYMAN_smc.bin"); 197 198 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); 198 199 MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 199 200 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); ··· 735 734 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 736 735 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 737 736 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 737 + smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4); 738 738 break; 739 739 case CHIP_ARUBA: 740 740 chip_name = "ARUBA"; ··· 799 797 } 800 798 } 801 799 802 - if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAICOS)) { 800 + if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { 803 801 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 804 802 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); 805 803 if (err)
+4113
drivers/gpu/drm/radeon/ni_dpm.c
··· 1 + /* 2 + * Copyright 2012 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include "drmP.h" 25 + #include "radeon.h" 26 + #include "nid.h" 27 + #include "r600_dpm.h" 28 + #include "ni_dpm.h" 29 + #include "atom.h" 30 + 31 + #define MC_CG_ARB_FREQ_F0 0x0a 32 + #define MC_CG_ARB_FREQ_F1 0x0b 33 + #define MC_CG_ARB_FREQ_F2 0x0c 34 + #define MC_CG_ARB_FREQ_F3 0x0d 35 + 36 + #define SMC_RAM_END 0xC000 37 + 38 + static const struct ni_cac_weights cac_weights_cayman_xt = 39 + { 40 + 0x15, 41 + 0x2, 42 + 0x19, 43 + 0x2, 44 + 0x8, 45 + 0x14, 46 + 0x2, 47 + 0x16, 48 + 0xE, 49 + 0x17, 50 + 0x13, 51 + 0x2B, 52 + 0x10, 53 + 0x7, 54 + 0x5, 55 + 0x5, 56 + 0x5, 57 + 0x2, 58 + 0x3, 59 + 0x9, 60 + 0x10, 61 + 0x10, 62 + 0x2B, 63 + 0xA, 64 + 0x9, 65 + 0x4, 66 + 0xD, 67 + 0xD, 68 + 0x3E, 69 + 0x18, 70 + 0x14, 71 + 0, 72 + 0x3, 73 + 0x3, 74 + 0x5, 75 + 0, 76 + 0x2, 77 + 0, 78 + 0, 79 + 0, 80 + 0, 81 + 0, 82 + 0, 83 + 0, 84 + 0, 85 + 0, 86 + 0x1CC, 87 + 0, 88 + 0x164, 89 + 1, 90 + 1, 91 + 1, 92 + 1, 93 + 12, 94 + 12, 95 + 12, 96 + 0x12, 97 + 0x1F, 98 + 132, 99 + 5, 100 + 7, 101 + 0, 102 + { 0, 0, 0, 0, 0, 0, 0, 0 }, 103 + { 0, 0, 0, 0 }, 104 + true 105 + }; 106 + 107 + static const struct ni_cac_weights cac_weights_cayman_pro = 108 + { 109 + 0x16, 110 + 0x4, 111 + 0x10, 112 + 0x2, 113 + 0xA, 114 + 0x16, 115 + 0x2, 116 + 0x18, 117 + 0x10, 118 + 0x1A, 119 + 0x16, 120 + 0x2D, 121 + 0x12, 122 + 0xA, 123 + 0x6, 124 + 0x6, 125 + 0x6, 126 + 0x2, 127 + 0x4, 128 + 0xB, 129 + 0x11, 130 + 0x11, 131 + 0x2D, 132 + 0xC, 133 + 0xC, 134 + 0x7, 135 + 0x10, 136 + 0x10, 137 + 0x3F, 138 + 0x1A, 139 + 0x16, 140 + 0, 141 + 0x7, 142 + 0x4, 143 + 0x6, 144 + 1, 145 + 0x2, 146 + 0x1, 147 + 0, 148 + 0, 149 + 0, 150 + 0, 151 + 0, 152 + 0, 153 + 0x30, 154 + 0, 155 + 0x1CF, 156 + 0, 157 + 0x166, 158 + 1, 159 + 1, 160 + 1, 161 + 1, 162 + 12, 163 + 12, 164 + 12, 165 + 0x15, 166 + 0x1F, 167 + 132, 168 + 6, 169 + 6, 170 + 0, 171 + { 0, 0, 0, 0, 0, 0, 0, 0 }, 172 + { 0, 0, 0, 0 }, 173 + true 174 + }; 175 + 176 + static const struct ni_cac_weights cac_weights_cayman_le = 177 + { 178 + 0x7, 179 + 0xE, 180 + 0x1, 181 + 0xA, 182 + 0x1, 183 + 0x3F, 184 + 0x2, 185 + 0x18, 186 + 0x10, 187 + 0x1A, 188 + 0x1, 189 + 0x3F, 190 + 0x1, 191 + 0xE, 192 + 0x6, 193 + 0x6, 194 + 0x6, 195 + 0x2, 196 + 0x4, 197 + 0x9, 198 + 0x1A, 199 + 0x1A, 200 + 0x2C, 201 + 0xA, 202 + 0x11, 203 + 0x8, 204 + 0x19, 205 + 0x19, 206 + 0x1, 207 + 0x1, 208 + 0x1A, 209 + 0, 210 + 0x8, 211 + 0x5, 212 + 0x8, 213 + 0x1, 214 + 0x3, 215 + 0x1, 216 + 0, 217 + 0, 218 + 0, 219 + 0, 220 + 0, 221 + 0, 222 + 0x38, 223 + 0x38, 224 + 0x239, 225 + 0x3, 226 + 0x18A, 227 + 1, 228 + 1, 229 + 1, 230 + 1, 231 + 12, 232 + 12, 233 + 12, 234 + 0x15, 235 + 0x22, 236 + 132, 237 + 6, 238 + 6, 239 + 0, 240 + { 0, 0, 0, 0, 0, 0, 0, 0 }, 241 + { 0, 0, 0, 0 }, 242 + true 243 + }; 244 + 245 + #define NISLANDS_MGCG_SEQUENCE 300 246 + 247 + static const u32 cayman_cgcg_cgls_default[] = 248 + { 249 + 0x000008f8, 0x00000010, 0xffffffff, 250 + 0x000008fc, 0x00000000, 0xffffffff, 251 + 0x000008f8, 0x00000011, 0xffffffff, 252 + 0x000008fc, 0x00000000, 0xffffffff, 253 + 0x000008f8, 0x00000012, 0xffffffff, 254 + 0x000008fc, 0x00000000, 0xffffffff, 255 + 0x000008f8, 0x00000013, 0xffffffff, 256 + 0x000008fc, 0x00000000, 0xffffffff, 257 + 0x000008f8, 0x00000014, 0xffffffff, 258 + 0x000008fc, 0x00000000, 0xffffffff, 259 + 0x000008f8, 0x00000015, 0xffffffff, 260 + 0x000008fc, 0x00000000, 0xffffffff, 261 + 0x000008f8, 0x00000016, 0xffffffff, 262 + 0x000008fc, 0x00000000, 0xffffffff, 263 + 0x000008f8, 0x00000017, 0xffffffff, 264 + 0x000008fc, 0x00000000, 0xffffffff, 265 + 0x000008f8, 0x00000018, 0xffffffff, 266 + 0x000008fc, 0x00000000, 0xffffffff, 267 + 0x000008f8, 0x00000019, 0xffffffff, 268 + 0x000008fc, 0x00000000, 0xffffffff, 269 + 0x000008f8, 0x0000001a, 0xffffffff, 270 + 0x000008fc, 0x00000000, 0xffffffff, 271 + 0x000008f8, 0x0000001b, 0xffffffff, 272 + 0x000008fc, 0x00000000, 0xffffffff, 273 + 0x000008f8, 0x00000020, 0xffffffff, 274 + 0x000008fc, 0x00000000, 0xffffffff, 275 + 0x000008f8, 0x00000021, 0xffffffff, 276 + 0x000008fc, 0x00000000, 0xffffffff, 277 + 0x000008f8, 0x00000022, 0xffffffff, 278 + 0x000008fc, 0x00000000, 0xffffffff, 279 + 0x000008f8, 0x00000023, 0xffffffff, 280 + 0x000008fc, 0x00000000, 0xffffffff, 281 + 0x000008f8, 0x00000024, 0xffffffff, 282 + 0x000008fc, 0x00000000, 0xffffffff, 283 + 0x000008f8, 0x00000025, 0xffffffff, 284 + 0x000008fc, 0x00000000, 0xffffffff, 285 + 0x000008f8, 0x00000026, 0xffffffff, 286 + 0x000008fc, 0x00000000, 0xffffffff, 287 + 0x000008f8, 0x00000027, 0xffffffff, 288 + 0x000008fc, 0x00000000, 0xffffffff, 289 + 0x000008f8, 0x00000028, 0xffffffff, 290 + 0x000008fc, 0x00000000, 0xffffffff, 291 + 0x000008f8, 0x00000029, 0xffffffff, 292 + 0x000008fc, 0x00000000, 0xffffffff, 293 + 0x000008f8, 0x0000002a, 0xffffffff, 294 + 0x000008fc, 0x00000000, 0xffffffff, 295 + 0x000008f8, 0x0000002b, 0xffffffff, 296 + 0x000008fc, 0x00000000, 0xffffffff 297 + }; 298 + #define CAYMAN_CGCG_CGLS_DEFAULT_LENGTH sizeof(cayman_cgcg_cgls_default) / (3 * sizeof(u32)) 299 + 300 + static const u32 cayman_cgcg_cgls_disable[] = 301 + { 302 + 0x000008f8, 0x00000010, 0xffffffff, 303 + 0x000008fc, 0xffffffff, 0xffffffff, 304 + 0x000008f8, 0x00000011, 0xffffffff, 305 + 0x000008fc, 0xffffffff, 0xffffffff, 306 + 0x000008f8, 0x00000012, 0xffffffff, 307 + 0x000008fc, 0xffffffff, 0xffffffff, 308 + 0x000008f8, 0x00000013, 0xffffffff, 309 + 0x000008fc, 0xffffffff, 0xffffffff, 310 + 0x000008f8, 0x00000014, 0xffffffff, 311 + 0x000008fc, 0xffffffff, 0xffffffff, 312 + 0x000008f8, 0x00000015, 0xffffffff, 313 + 0x000008fc, 0xffffffff, 0xffffffff, 314 + 0x000008f8, 0x00000016, 0xffffffff, 315 + 0x000008fc, 0xffffffff, 0xffffffff, 316 + 0x000008f8, 0x00000017, 0xffffffff, 317 + 0x000008fc, 0xffffffff, 0xffffffff, 318 + 0x000008f8, 0x00000018, 0xffffffff, 319 + 0x000008fc, 0xffffffff, 0xffffffff, 320 + 0x000008f8, 0x00000019, 0xffffffff, 321 + 0x000008fc, 0xffffffff, 0xffffffff, 322 + 0x000008f8, 0x0000001a, 0xffffffff, 323 + 0x000008fc, 0xffffffff, 0xffffffff, 324 + 0x000008f8, 0x0000001b, 0xffffffff, 325 + 0x000008fc, 0xffffffff, 0xffffffff, 326 + 0x000008f8, 0x00000020, 0xffffffff, 327 + 0x000008fc, 0x00000000, 0xffffffff, 328 + 0x000008f8, 0x00000021, 0xffffffff, 329 + 0x000008fc, 0x00000000, 0xffffffff, 330 + 0x000008f8, 0x00000022, 0xffffffff, 331 + 0x000008fc, 0x00000000, 0xffffffff, 332 + 0x000008f8, 0x00000023, 0xffffffff, 333 + 0x000008fc, 0x00000000, 0xffffffff, 334 + 0x000008f8, 0x00000024, 0xffffffff, 335 + 0x000008fc, 0x00000000, 0xffffffff, 336 + 0x000008f8, 0x00000025, 0xffffffff, 337 + 0x000008fc, 0x00000000, 0xffffffff, 338 + 0x000008f8, 0x00000026, 0xffffffff, 339 + 0x000008fc, 0x00000000, 0xffffffff, 340 + 0x000008f8, 0x00000027, 0xffffffff, 341 + 0x000008fc, 0x00000000, 0xffffffff, 342 + 0x000008f8, 0x00000028, 0xffffffff, 343 + 0x000008fc, 0x00000000, 0xffffffff, 344 + 0x000008f8, 0x00000029, 0xffffffff, 345 + 0x000008fc, 0x00000000, 0xffffffff, 346 + 0x000008f8, 0x0000002a, 0xffffffff, 347 + 0x000008fc, 0x00000000, 0xffffffff, 348 + 0x000008f8, 0x0000002b, 0xffffffff, 349 + 0x000008fc, 0x00000000, 0xffffffff, 350 + 0x00000644, 0x000f7902, 0x001f4180, 351 + 0x00000644, 0x000f3802, 0x001f4180 352 + }; 353 + #define CAYMAN_CGCG_CGLS_DISABLE_LENGTH sizeof(cayman_cgcg_cgls_disable) / (3 * sizeof(u32)) 354 + 355 + static const u32 cayman_cgcg_cgls_enable[] = 356 + { 357 + 0x00000644, 0x000f7882, 0x001f4080, 358 + 0x000008f8, 0x00000010, 0xffffffff, 359 + 0x000008fc, 0x00000000, 0xffffffff, 360 + 0x000008f8, 0x00000011, 0xffffffff, 361 + 0x000008fc, 0x00000000, 0xffffffff, 362 + 0x000008f8, 0x00000012, 0xffffffff, 363 + 0x000008fc, 0x00000000, 0xffffffff, 364 + 0x000008f8, 0x00000013, 0xffffffff, 365 + 0x000008fc, 0x00000000, 0xffffffff, 366 + 0x000008f8, 0x00000014, 0xffffffff, 367 + 0x000008fc, 0x00000000, 0xffffffff, 368 + 0x000008f8, 0x00000015, 0xffffffff, 369 + 0x000008fc, 0x00000000, 0xffffffff, 370 + 0x000008f8, 0x00000016, 0xffffffff, 371 + 0x000008fc, 0x00000000, 0xffffffff, 372 + 0x000008f8, 0x00000017, 0xffffffff, 373 + 0x000008fc, 0x00000000, 0xffffffff, 374 + 0x000008f8, 0x00000018, 0xffffffff, 375 + 0x000008fc, 0x00000000, 0xffffffff, 376 + 0x000008f8, 0x00000019, 0xffffffff, 377 + 0x000008fc, 0x00000000, 0xffffffff, 378 + 0x000008f8, 0x0000001a, 0xffffffff, 379 + 0x000008fc, 0x00000000, 0xffffffff, 380 + 0x000008f8, 0x0000001b, 0xffffffff, 381 + 0x000008fc, 0x00000000, 0xffffffff, 382 + 0x000008f8, 0x00000020, 0xffffffff, 383 + 0x000008fc, 0xffffffff, 0xffffffff, 384 + 0x000008f8, 0x00000021, 0xffffffff, 385 + 0x000008fc, 0xffffffff, 0xffffffff, 386 + 0x000008f8, 0x00000022, 0xffffffff, 387 + 0x000008fc, 0xffffffff, 0xffffffff, 388 + 0x000008f8, 0x00000023, 0xffffffff, 389 + 0x000008fc, 0xffffffff, 0xffffffff, 390 + 0x000008f8, 0x00000024, 0xffffffff, 391 + 0x000008fc, 0xffffffff, 0xffffffff, 392 + 0x000008f8, 0x00000025, 0xffffffff, 393 + 0x000008fc, 0xffffffff, 0xffffffff, 394 + 0x000008f8, 0x00000026, 0xffffffff, 395 + 0x000008fc, 0xffffffff, 0xffffffff, 396 + 0x000008f8, 0x00000027, 0xffffffff, 397 + 0x000008fc, 0xffffffff, 0xffffffff, 398 + 0x000008f8, 0x00000028, 0xffffffff, 399 + 0x000008fc, 0xffffffff, 0xffffffff, 400 + 0x000008f8, 0x00000029, 0xffffffff, 401 + 0x000008fc, 0xffffffff, 0xffffffff, 402 + 0x000008f8, 0x0000002a, 0xffffffff, 403 + 0x000008fc, 0xffffffff, 0xffffffff, 404 + 0x000008f8, 0x0000002b, 0xffffffff, 405 + 0x000008fc, 0xffffffff, 0xffffffff 406 + }; 407 + #define CAYMAN_CGCG_CGLS_ENABLE_LENGTH sizeof(cayman_cgcg_cgls_enable) / (3 * sizeof(u32)) 408 + 409 + static const u32 cayman_mgcg_default[] = 410 + { 411 + 0x0000802c, 0xc0000000, 0xffffffff, 412 + 0x00003fc4, 0xc0000000, 0xffffffff, 413 + 0x00005448, 0x00000100, 0xffffffff, 414 + 0x000055e4, 0x00000100, 0xffffffff, 415 + 0x0000160c, 0x00000100, 0xffffffff, 416 + 0x00008984, 0x06000100, 0xffffffff, 417 + 0x0000c164, 0x00000100, 0xffffffff, 418 + 0x00008a18, 0x00000100, 0xffffffff, 419 + 0x0000897c, 0x06000100, 0xffffffff, 420 + 0x00008b28, 0x00000100, 0xffffffff, 421 + 0x00009144, 0x00800200, 0xffffffff, 422 + 0x00009a60, 0x00000100, 0xffffffff, 423 + 0x00009868, 0x00000100, 0xffffffff, 424 + 0x00008d58, 0x00000100, 0xffffffff, 425 + 0x00009510, 0x00000100, 0xffffffff, 426 + 0x0000949c, 0x00000100, 0xffffffff, 427 + 0x00009654, 0x00000100, 0xffffffff, 428 + 0x00009030, 0x00000100, 0xffffffff, 429 + 0x00009034, 0x00000100, 0xffffffff, 430 + 0x00009038, 0x00000100, 0xffffffff, 431 + 0x0000903c, 0x00000100, 0xffffffff, 432 + 0x00009040, 0x00000100, 0xffffffff, 433 + 0x0000a200, 0x00000100, 0xffffffff, 434 + 0x0000a204, 0x00000100, 0xffffffff, 435 + 0x0000a208, 0x00000100, 0xffffffff, 436 + 0x0000a20c, 0x00000100, 0xffffffff, 437 + 0x00009744, 0x00000100, 0xffffffff, 438 + 0x00003f80, 0x00000100, 0xffffffff, 439 + 0x0000a210, 0x00000100, 0xffffffff, 440 + 0x0000a214, 0x00000100, 0xffffffff, 441 + 0x000004d8, 0x00000100, 0xffffffff, 442 + 0x00009664, 0x00000100, 0xffffffff, 443 + 0x00009698, 0x00000100, 0xffffffff, 444 + 0x000004d4, 0x00000200, 0xffffffff, 445 + 0x000004d0, 0x00000000, 0xffffffff, 446 + 0x000030cc, 0x00000104, 0xffffffff, 447 + 0x0000d0c0, 0x00000100, 0xffffffff, 448 + 0x0000d8c0, 0x00000100, 0xffffffff, 449 + 0x0000802c, 0x40000000, 0xffffffff, 450 + 0x00003fc4, 0x40000000, 0xffffffff, 451 + 0x0000915c, 0x00010000, 0xffffffff, 452 + 0x00009160, 0x00030002, 0xffffffff, 453 + 0x00009164, 0x00050004, 0xffffffff, 454 + 0x00009168, 0x00070006, 0xffffffff, 455 + 0x00009178, 0x00070000, 0xffffffff, 456 + 0x0000917c, 0x00030002, 0xffffffff, 457 + 0x00009180, 0x00050004, 0xffffffff, 458 + 0x0000918c, 0x00010006, 0xffffffff, 459 + 0x00009190, 0x00090008, 0xffffffff, 460 + 0x00009194, 0x00070000, 0xffffffff, 461 + 0x00009198, 0x00030002, 0xffffffff, 462 + 0x0000919c, 0x00050004, 0xffffffff, 463 + 0x000091a8, 0x00010006, 0xffffffff, 464 + 0x000091ac, 0x00090008, 0xffffffff, 465 + 0x000091b0, 0x00070000, 0xffffffff, 466 + 0x000091b4, 0x00030002, 0xffffffff, 467 + 0x000091b8, 0x00050004, 0xffffffff, 468 + 0x000091c4, 0x00010006, 0xffffffff, 469 + 0x000091c8, 0x00090008, 0xffffffff, 470 + 0x000091cc, 0x00070000, 0xffffffff, 471 + 0x000091d0, 0x00030002, 0xffffffff, 472 + 0x000091d4, 0x00050004, 0xffffffff, 473 + 0x000091e0, 0x00010006, 0xffffffff, 474 + 0x000091e4, 0x00090008, 0xffffffff, 475 + 0x000091e8, 0x00000000, 0xffffffff, 476 + 0x000091ec, 0x00070000, 0xffffffff, 477 + 0x000091f0, 0x00030002, 0xffffffff, 478 + 0x000091f4, 0x00050004, 0xffffffff, 479 + 0x00009200, 0x00010006, 0xffffffff, 480 + 0x00009204, 0x00090008, 0xffffffff, 481 + 0x00009208, 0x00070000, 0xffffffff, 482 + 0x0000920c, 0x00030002, 0xffffffff, 483 + 0x00009210, 0x00050004, 0xffffffff, 484 + 0x0000921c, 0x00010006, 0xffffffff, 485 + 0x00009220, 0x00090008, 0xffffffff, 486 + 0x00009224, 0x00070000, 0xffffffff, 487 + 0x00009228, 0x00030002, 0xffffffff, 488 + 0x0000922c, 0x00050004, 0xffffffff, 489 + 0x00009238, 0x00010006, 0xffffffff, 490 + 0x0000923c, 0x00090008, 0xffffffff, 491 + 0x00009240, 0x00070000, 0xffffffff, 492 + 0x00009244, 0x00030002, 0xffffffff, 493 + 0x00009248, 0x00050004, 0xffffffff, 494 + 0x00009254, 0x00010006, 0xffffffff, 495 + 0x00009258, 0x00090008, 0xffffffff, 496 + 0x0000925c, 0x00070000, 0xffffffff, 497 + 0x00009260, 0x00030002, 0xffffffff, 498 + 0x00009264, 0x00050004, 0xffffffff, 499 + 0x00009270, 0x00010006, 0xffffffff, 500 + 0x00009274, 0x00090008, 0xffffffff, 501 + 0x00009278, 0x00070000, 0xffffffff, 502 + 0x0000927c, 0x00030002, 0xffffffff, 503 + 0x00009280, 0x00050004, 0xffffffff, 504 + 0x0000928c, 0x00010006, 0xffffffff, 505 + 0x00009290, 0x00090008, 0xffffffff, 506 + 0x000092a8, 0x00070000, 0xffffffff, 507 + 0x000092ac, 0x00030002, 0xffffffff, 508 + 0x000092b0, 0x00050004, 0xffffffff, 509 + 0x000092bc, 0x00010006, 0xffffffff, 510 + 0x000092c0, 0x00090008, 0xffffffff, 511 + 0x000092c4, 0x00070000, 0xffffffff, 512 + 0x000092c8, 0x00030002, 0xffffffff, 513 + 0x000092cc, 0x00050004, 0xffffffff, 514 + 0x000092d8, 0x00010006, 0xffffffff, 515 + 0x000092dc, 0x00090008, 0xffffffff, 516 + 0x00009294, 0x00000000, 0xffffffff, 517 + 0x0000802c, 0x40010000, 0xffffffff, 518 + 0x00003fc4, 0x40010000, 0xffffffff, 519 + 0x0000915c, 0x00010000, 0xffffffff, 520 + 0x00009160, 0x00030002, 0xffffffff, 521 + 0x00009164, 0x00050004, 0xffffffff, 522 + 0x00009168, 0x00070006, 0xffffffff, 523 + 0x00009178, 0x00070000, 0xffffffff, 524 + 0x0000917c, 0x00030002, 0xffffffff, 525 + 0x00009180, 0x00050004, 0xffffffff, 526 + 0x0000918c, 0x00010006, 0xffffffff, 527 + 0x00009190, 0x00090008, 0xffffffff, 528 + 0x00009194, 0x00070000, 0xffffffff, 529 + 0x00009198, 0x00030002, 0xffffffff, 530 + 0x0000919c, 0x00050004, 0xffffffff, 531 + 0x000091a8, 0x00010006, 0xffffffff, 532 + 0x000091ac, 0x00090008, 0xffffffff, 533 + 0x000091b0, 0x00070000, 0xffffffff, 534 + 0x000091b4, 0x00030002, 0xffffffff, 535 + 0x000091b8, 0x00050004, 0xffffffff, 536 + 0x000091c4, 0x00010006, 0xffffffff, 537 + 0x000091c8, 0x00090008, 0xffffffff, 538 + 0x000091cc, 0x00070000, 0xffffffff, 539 + 0x000091d0, 0x00030002, 0xffffffff, 540 + 0x000091d4, 0x00050004, 0xffffffff, 541 + 0x000091e0, 0x00010006, 0xffffffff, 542 + 0x000091e4, 0x00090008, 0xffffffff, 543 + 0x000091e8, 0x00000000, 0xffffffff, 544 + 0x000091ec, 0x00070000, 0xffffffff, 545 + 0x000091f0, 0x00030002, 0xffffffff, 546 + 0x000091f4, 0x00050004, 0xffffffff, 547 + 0x00009200, 0x00010006, 0xffffffff, 548 + 0x00009204, 0x00090008, 0xffffffff, 549 + 0x00009208, 0x00070000, 0xffffffff, 550 + 0x0000920c, 0x00030002, 0xffffffff, 551 + 0x00009210, 0x00050004, 0xffffffff, 552 + 0x0000921c, 0x00010006, 0xffffffff, 553 + 0x00009220, 0x00090008, 0xffffffff, 554 + 0x00009224, 0x00070000, 0xffffffff, 555 + 0x00009228, 0x00030002, 0xffffffff, 556 + 0x0000922c, 0x00050004, 0xffffffff, 557 + 0x00009238, 0x00010006, 0xffffffff, 558 + 0x0000923c, 0x00090008, 0xffffffff, 559 + 0x00009240, 0x00070000, 0xffffffff, 560 + 0x00009244, 0x00030002, 0xffffffff, 561 + 0x00009248, 0x00050004, 0xffffffff, 562 + 0x00009254, 0x00010006, 0xffffffff, 563 + 0x00009258, 0x00090008, 0xffffffff, 564 + 0x0000925c, 0x00070000, 0xffffffff, 565 + 0x00009260, 0x00030002, 0xffffffff, 566 + 0x00009264, 0x00050004, 0xffffffff, 567 + 0x00009270, 0x00010006, 0xffffffff, 568 + 0x00009274, 0x00090008, 0xffffffff, 569 + 0x00009278, 0x00070000, 0xffffffff, 570 + 0x0000927c, 0x00030002, 0xffffffff, 571 + 0x00009280, 0x00050004, 0xffffffff, 572 + 0x0000928c, 0x00010006, 0xffffffff, 573 + 0x00009290, 0x00090008, 0xffffffff, 574 + 0x000092a8, 0x00070000, 0xffffffff, 575 + 0x000092ac, 0x00030002, 0xffffffff, 576 + 0x000092b0, 0x00050004, 0xffffffff, 577 + 0x000092bc, 0x00010006, 0xffffffff, 578 + 0x000092c0, 0x00090008, 0xffffffff, 579 + 0x000092c4, 0x00070000, 0xffffffff, 580 + 0x000092c8, 0x00030002, 0xffffffff, 581 + 0x000092cc, 0x00050004, 0xffffffff, 582 + 0x000092d8, 0x00010006, 0xffffffff, 583 + 0x000092dc, 0x00090008, 0xffffffff, 584 + 0x00009294, 0x00000000, 0xffffffff, 585 + 0x0000802c, 0xc0000000, 0xffffffff, 586 + 0x00003fc4, 0xc0000000, 0xffffffff, 587 + 0x000008f8, 0x00000010, 0xffffffff, 588 + 0x000008fc, 0x00000000, 0xffffffff, 589 + 0x000008f8, 0x00000011, 0xffffffff, 590 + 0x000008fc, 0x00000000, 0xffffffff, 591 + 0x000008f8, 0x00000012, 0xffffffff, 592 + 0x000008fc, 0x00000000, 0xffffffff, 593 + 0x000008f8, 0x00000013, 0xffffffff, 594 + 0x000008fc, 0x00000000, 0xffffffff, 595 + 0x000008f8, 0x00000014, 0xffffffff, 596 + 0x000008fc, 0x00000000, 0xffffffff, 597 + 0x000008f8, 0x00000015, 0xffffffff, 598 + 0x000008fc, 0x00000000, 0xffffffff, 599 + 0x000008f8, 0x00000016, 0xffffffff, 600 + 0x000008fc, 0x00000000, 0xffffffff, 601 + 0x000008f8, 0x00000017, 0xffffffff, 602 + 0x000008fc, 0x00000000, 0xffffffff, 603 + 0x000008f8, 0x00000018, 0xffffffff, 604 + 0x000008fc, 0x00000000, 0xffffffff, 605 + 0x000008f8, 0x00000019, 0xffffffff, 606 + 0x000008fc, 0x00000000, 0xffffffff, 607 + 0x000008f8, 0x0000001a, 0xffffffff, 608 + 0x000008fc, 0x00000000, 0xffffffff, 609 + 0x000008f8, 0x0000001b, 0xffffffff, 610 + 0x000008fc, 0x00000000, 0xffffffff 611 + }; 612 + #define CAYMAN_MGCG_DEFAULT_LENGTH sizeof(cayman_mgcg_default) / (3 * sizeof(u32)) 613 + 614 + static const u32 cayman_mgcg_disable[] = 615 + { 616 + 0x0000802c, 0xc0000000, 0xffffffff, 617 + 0x000008f8, 0x00000000, 0xffffffff, 618 + 0x000008fc, 0xffffffff, 0xffffffff, 619 + 0x000008f8, 0x00000001, 0xffffffff, 620 + 0x000008fc, 0xffffffff, 0xffffffff, 621 + 0x000008f8, 0x00000002, 0xffffffff, 622 + 0x000008fc, 0xffffffff, 0xffffffff, 623 + 0x000008f8, 0x00000003, 0xffffffff, 624 + 0x000008fc, 0xffffffff, 0xffffffff, 625 + 0x00009150, 0x00600000, 0xffffffff 626 + }; 627 + #define CAYMAN_MGCG_DISABLE_LENGTH sizeof(cayman_mgcg_disable) / (3 * sizeof(u32)) 628 + 629 + static const u32 cayman_mgcg_enable[] = 630 + { 631 + 0x0000802c, 0xc0000000, 0xffffffff, 632 + 0x000008f8, 0x00000000, 0xffffffff, 633 + 0x000008fc, 0x00000000, 0xffffffff, 634 + 0x000008f8, 0x00000001, 0xffffffff, 635 + 0x000008fc, 0x00000000, 0xffffffff, 636 + 0x000008f8, 0x00000002, 0xffffffff, 637 + 0x000008fc, 0x00600000, 0xffffffff, 638 + 0x000008f8, 0x00000003, 0xffffffff, 639 + 0x000008fc, 0x00000000, 0xffffffff, 640 + 0x00009150, 0x96944200, 0xffffffff 641 + }; 642 + 643 + #define CAYMAN_MGCG_ENABLE_LENGTH sizeof(cayman_mgcg_enable) / (3 * sizeof(u32)) 644 + 645 + #define NISLANDS_SYSLS_SEQUENCE 100 646 + 647 + static const u32 cayman_sysls_default[] = 648 + { 649 + /* Register, Value, Mask bits */ 650 + 0x000055e8, 0x00000000, 0xffffffff, 651 + 0x0000d0bc, 0x00000000, 0xffffffff, 652 + 0x0000d8bc, 0x00000000, 0xffffffff, 653 + 0x000015c0, 0x000c1401, 0xffffffff, 654 + 0x0000264c, 0x000c0400, 0xffffffff, 655 + 0x00002648, 0x000c0400, 0xffffffff, 656 + 0x00002650, 0x000c0400, 0xffffffff, 657 + 0x000020b8, 0x000c0400, 0xffffffff, 658 + 0x000020bc, 0x000c0400, 0xffffffff, 659 + 0x000020c0, 0x000c0c80, 0xffffffff, 660 + 0x0000f4a0, 0x000000c0, 0xffffffff, 661 + 0x0000f4a4, 0x00680fff, 0xffffffff, 662 + 0x00002f50, 0x00000404, 0xffffffff, 663 + 0x000004c8, 0x00000001, 0xffffffff, 664 + 0x000064ec, 0x00000000, 0xffffffff, 665 + 0x00000c7c, 0x00000000, 0xffffffff, 666 + 0x00008dfc, 0x00000000, 0xffffffff 667 + }; 668 + #define CAYMAN_SYSLS_DEFAULT_LENGTH sizeof(cayman_sysls_default) / (3 * sizeof(u32)) 669 + 670 + static const u32 cayman_sysls_disable[] = 671 + { 672 + /* Register, Value, Mask bits */ 673 + 0x0000d0c0, 0x00000000, 0xffffffff, 674 + 0x0000d8c0, 0x00000000, 0xffffffff, 675 + 0x000055e8, 0x00000000, 0xffffffff, 676 + 0x0000d0bc, 0x00000000, 0xffffffff, 677 + 0x0000d8bc, 0x00000000, 0xffffffff, 678 + 0x000015c0, 0x00041401, 0xffffffff, 679 + 0x0000264c, 0x00040400, 0xffffffff, 680 + 0x00002648, 0x00040400, 0xffffffff, 681 + 0x00002650, 0x00040400, 0xffffffff, 682 + 0x000020b8, 0x00040400, 0xffffffff, 683 + 0x000020bc, 0x00040400, 0xffffffff, 684 + 0x000020c0, 0x00040c80, 0xffffffff, 685 + 0x0000f4a0, 0x000000c0, 0xffffffff, 686 + 0x0000f4a4, 0x00680000, 0xffffffff, 687 + 0x00002f50, 0x00000404, 0xffffffff, 688 + 0x000004c8, 0x00000001, 0xffffffff, 689 + 0x000064ec, 0x00007ffd, 0xffffffff, 690 + 0x00000c7c, 0x0000ff00, 0xffffffff, 691 + 0x00008dfc, 0x0000007f, 0xffffffff 692 + }; 693 + #define CAYMAN_SYSLS_DISABLE_LENGTH sizeof(cayman_sysls_disable) / (3 * sizeof(u32)) 694 + 695 + static const u32 cayman_sysls_enable[] = 696 + { 697 + /* Register, Value, Mask bits */ 698 + 0x000055e8, 0x00000001, 0xffffffff, 699 + 0x0000d0bc, 0x00000100, 0xffffffff, 700 + 0x0000d8bc, 0x00000100, 0xffffffff, 701 + 0x000015c0, 0x000c1401, 0xffffffff, 702 + 0x0000264c, 0x000c0400, 0xffffffff, 703 + 0x00002648, 0x000c0400, 0xffffffff, 704 + 0x00002650, 0x000c0400, 0xffffffff, 705 + 0x000020b8, 0x000c0400, 0xffffffff, 706 + 0x000020bc, 0x000c0400, 0xffffffff, 707 + 0x000020c0, 0x000c0c80, 0xffffffff, 708 + 0x0000f4a0, 0x000000c0, 0xffffffff, 709 + 0x0000f4a4, 0x00680fff, 0xffffffff, 710 + 0x00002f50, 0x00000903, 0xffffffff, 711 + 0x000004c8, 0x00000000, 0xffffffff, 712 + 0x000064ec, 0x00000000, 0xffffffff, 713 + 0x00000c7c, 0x00000000, 0xffffffff, 714 + 0x00008dfc, 0x00000000, 0xffffffff 715 + }; 716 + #define CAYMAN_SYSLS_ENABLE_LENGTH sizeof(cayman_sysls_enable) / (3 * sizeof(u32)) 717 + 718 + struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); 719 + struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); 720 + 721 + static struct ni_power_info *ni_get_pi(struct radeon_device *rdev) 722 + { 723 + struct ni_power_info *pi = rdev->pm.dpm.priv; 724 + 725 + return pi; 726 + } 727 + 728 + struct ni_ps *ni_get_ps(struct radeon_ps *rps) 729 + { 730 + struct ni_ps *ps = rps->ps_priv; 731 + 732 + return ps; 733 + } 734 + 735 + /* XXX: fix for kernel use */ 736 + #if 0 737 + static double ni_exp(double x) 738 + { 739 + int count = 1; 740 + double sum = 1.0, term, tolerance = 0.000000001, y = x; 741 + 742 + if (x < 0) 743 + y = -1 * x; 744 + term = y; 745 + 746 + while (term >= tolerance) { 747 + sum = sum + term; 748 + count = count + 1; 749 + term = term * (y / count); 750 + } 751 + 752 + if (x < 0) 753 + sum = 1.0 / sum; 754 + 755 + return sum; 756 + } 757 + #endif 758 + 759 + static void ni_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, 760 + u16 v, s32 t, 761 + u32 ileakage, 762 + u32 *leakage) 763 + { 764 + /* XXX: fix for kernel use */ 765 + #if 0 766 + double kt, kv, leakage_w, i_leakage, vddc, temperature; 767 + 768 + i_leakage = ((double)ileakage) / 1000; 769 + vddc = ((double)v) / 1000; 770 + temperature = ((double)t) / 1000; 771 + 772 + kt = (((double)(coeff->at)) / 1000) * ni_exp((((double)(coeff->bt)) / 1000) * temperature); 773 + kv = (((double)(coeff->av)) / 1000) * ni_exp((((double)(coeff->bv)) / 1000) * vddc); 774 + 775 + leakage_w = i_leakage * kt * kv * vddc; 776 + 777 + *leakage = (u32)(leakage_w * 1000); 778 + #endif 779 + } 780 + 781 + static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev, 782 + const struct ni_leakage_coeffients *coeff, 783 + u16 v, 784 + s32 t, 785 + u32 i_leakage, 786 + u32 *leakage) 787 + { 788 + ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage); 789 + } 790 + 791 + static void ni_apply_state_adjust_rules(struct radeon_device *rdev) 792 + { 793 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 794 + struct radeon_ps *rps = rdev->pm.dpm.requested_ps; 795 + struct ni_ps *ps = ni_get_ps(rps); 796 + struct radeon_clock_and_voltage_limits *max_limits; 797 + bool disable_mclk_switching; 798 + u32 mclk, sclk; 799 + u16 vddc, vddci; 800 + int i; 801 + 802 + /* point to the hw copy since this function will modify the ps */ 803 + ni_pi->hw_ps = *ps; 804 + rdev->pm.dpm.hw_ps.ps_priv = &ni_pi->hw_ps; 805 + ps = &ni_pi->hw_ps; 806 + 807 + if (rdev->pm.dpm.new_active_crtc_count > 1) 808 + disable_mclk_switching = true; 809 + else 810 + disable_mclk_switching = false; 811 + 812 + if (rdev->pm.dpm.ac_power) 813 + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 814 + else 815 + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 816 + 817 + if (rdev->pm.dpm.ac_power == false) { 818 + for (i = 0; i < ps->performance_level_count; i++) { 819 + if (ps->performance_levels[i].mclk > max_limits->mclk) 820 + ps->performance_levels[i].mclk = max_limits->mclk; 821 + if (ps->performance_levels[i].sclk > max_limits->sclk) 822 + ps->performance_levels[i].sclk = max_limits->sclk; 823 + if (ps->performance_levels[i].vddc > max_limits->vddc) 824 + ps->performance_levels[i].vddc = max_limits->vddc; 825 + if (ps->performance_levels[i].vddci > max_limits->vddci) 826 + ps->performance_levels[i].vddci = max_limits->vddci; 827 + } 828 + } 829 + 830 + /* XXX validate the min clocks required for display */ 831 + 832 + if (disable_mclk_switching) { 833 + mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 834 + sclk = ps->performance_levels[0].sclk; 835 + vddc = ps->performance_levels[0].vddc; 836 + vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 837 + } else { 838 + sclk = ps->performance_levels[0].sclk; 839 + mclk = ps->performance_levels[0].mclk; 840 + vddc = ps->performance_levels[0].vddc; 841 + vddci = ps->performance_levels[0].vddci; 842 + } 843 + 844 + /* adjusted low state */ 845 + ps->performance_levels[0].sclk = sclk; 846 + ps->performance_levels[0].mclk = mclk; 847 + ps->performance_levels[0].vddc = vddc; 848 + ps->performance_levels[0].vddci = vddci; 849 + 850 + btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, 851 + &ps->performance_levels[0].sclk, 852 + &ps->performance_levels[0].mclk); 853 + 854 + for (i = 1; i < ps->performance_level_count; i++) { 855 + if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) 856 + ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; 857 + if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) 858 + ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 859 + } 860 + 861 + if (disable_mclk_switching) { 862 + mclk = ps->performance_levels[0].mclk; 863 + for (i = 1; i < ps->performance_level_count; i++) { 864 + if (mclk < ps->performance_levels[i].mclk) 865 + mclk = ps->performance_levels[i].mclk; 866 + } 867 + for (i = 0; i < ps->performance_level_count; i++) { 868 + ps->performance_levels[i].mclk = mclk; 869 + ps->performance_levels[i].vddci = vddci; 870 + } 871 + } else { 872 + for (i = 1; i < ps->performance_level_count; i++) { 873 + if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk) 874 + ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk; 875 + if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci) 876 + ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci; 877 + } 878 + } 879 + 880 + for (i = 1; i < ps->performance_level_count; i++) 881 + btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, 882 + &ps->performance_levels[i].sclk, 883 + &ps->performance_levels[i].mclk); 884 + 885 + for (i = 0; i < ps->performance_level_count; i++) 886 + btc_adjust_clock_combinations(rdev, max_limits, 887 + &ps->performance_levels[i]); 888 + 889 + for (i = 0; i < ps->performance_level_count; i++) { 890 + btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 891 + ps->performance_levels[i].sclk, 892 + max_limits->vddc, &ps->performance_levels[i].vddc); 893 + btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 894 + ps->performance_levels[i].mclk, 895 + max_limits->vddci, &ps->performance_levels[i].vddci); 896 + btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 897 + ps->performance_levels[i].mclk, 898 + max_limits->vddc, &ps->performance_levels[i].vddc); 899 + /* XXX validate the voltage required for display */ 900 + } 901 + 902 + for (i = 0; i < ps->performance_level_count; i++) { 903 + btc_apply_voltage_delta_rules(rdev, 904 + max_limits->vddc, max_limits->vddci, 905 + &ps->performance_levels[i].vddc, 906 + &ps->performance_levels[i].vddci); 907 + } 908 + 909 + ps->dc_compatible = true; 910 + for (i = 0; i < ps->performance_level_count; i++) { 911 + if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) 912 + ps->dc_compatible = false; 913 + 914 + if (ps->performance_levels[i].vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2) 915 + ps->performance_levels[i].flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2; 916 + } 917 + } 918 + 919 + static void ni_cg_clockgating_default(struct radeon_device *rdev) 920 + { 921 + u32 count; 922 + const u32 *ps = NULL; 923 + 924 + ps = (const u32 *)&cayman_cgcg_cgls_default; 925 + count = CAYMAN_CGCG_CGLS_DEFAULT_LENGTH; 926 + 927 + btc_program_mgcg_hw_sequence(rdev, ps, count); 928 + } 929 + 930 + static void ni_gfx_clockgating_enable(struct radeon_device *rdev, 931 + bool enable) 932 + { 933 + u32 count; 934 + const u32 *ps = NULL; 935 + 936 + if (enable) { 937 + ps = (const u32 *)&cayman_cgcg_cgls_enable; 938 + count = CAYMAN_CGCG_CGLS_ENABLE_LENGTH; 939 + } else { 940 + ps = (const u32 *)&cayman_cgcg_cgls_disable; 941 + count = CAYMAN_CGCG_CGLS_DISABLE_LENGTH; 942 + } 943 + 944 + btc_program_mgcg_hw_sequence(rdev, ps, count); 945 + } 946 + 947 + static void ni_mg_clockgating_default(struct radeon_device *rdev) 948 + { 949 + u32 count; 950 + const u32 *ps = NULL; 951 + 952 + ps = (const u32 *)&cayman_mgcg_default; 953 + count = CAYMAN_MGCG_DEFAULT_LENGTH; 954 + 955 + btc_program_mgcg_hw_sequence(rdev, ps, count); 956 + } 957 + 958 + static void ni_mg_clockgating_enable(struct radeon_device *rdev, 959 + bool enable) 960 + { 961 + u32 count; 962 + const u32 *ps = NULL; 963 + 964 + if (enable) { 965 + ps = (const u32 *)&cayman_mgcg_enable; 966 + count = CAYMAN_MGCG_ENABLE_LENGTH; 967 + } else { 968 + ps = (const u32 *)&cayman_mgcg_disable; 969 + count = CAYMAN_MGCG_DISABLE_LENGTH; 970 + } 971 + 972 + btc_program_mgcg_hw_sequence(rdev, ps, count); 973 + } 974 + 975 + static void ni_ls_clockgating_default(struct radeon_device *rdev) 976 + { 977 + u32 count; 978 + const u32 *ps = NULL; 979 + 980 + ps = (const u32 *)&cayman_sysls_default; 981 + count = CAYMAN_SYSLS_DEFAULT_LENGTH; 982 + 983 + btc_program_mgcg_hw_sequence(rdev, ps, count); 984 + } 985 + 986 + static void ni_ls_clockgating_enable(struct radeon_device *rdev, 987 + bool enable) 988 + { 989 + u32 count; 990 + const u32 *ps = NULL; 991 + 992 + if (enable) { 993 + ps = (const u32 *)&cayman_sysls_enable; 994 + count = CAYMAN_SYSLS_ENABLE_LENGTH; 995 + } else { 996 + ps = (const u32 *)&cayman_sysls_disable; 997 + count = CAYMAN_SYSLS_DISABLE_LENGTH; 998 + } 999 + 1000 + btc_program_mgcg_hw_sequence(rdev, ps, count); 1001 + 1002 + } 1003 + 1004 + static int ni_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev, 1005 + struct radeon_clock_voltage_dependency_table *table) 1006 + { 1007 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1008 + u32 i; 1009 + 1010 + if (table) { 1011 + for (i = 0; i < table->count; i++) { 1012 + if (0xff01 == table->entries[i].v) { 1013 + if (pi->max_vddc == 0) 1014 + return -EINVAL; 1015 + table->entries[i].v = pi->max_vddc; 1016 + } 1017 + } 1018 + } 1019 + return 0; 1020 + } 1021 + 1022 + static int ni_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev) 1023 + { 1024 + int ret = 0; 1025 + 1026 + ret = ni_patch_single_dependency_table_based_on_leakage(rdev, 1027 + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 1028 + 1029 + ret = ni_patch_single_dependency_table_based_on_leakage(rdev, 1030 + &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 1031 + return ret; 1032 + } 1033 + 1034 + static void ni_stop_dpm(struct radeon_device *rdev) 1035 + { 1036 + WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 1037 + } 1038 + 1039 + #if 0 1040 + static int ni_notify_hw_of_power_source(struct radeon_device *rdev, 1041 + bool ac_power) 1042 + { 1043 + if (ac_power) 1044 + return (rv770_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ? 1045 + 0 : -EINVAL; 1046 + 1047 + return 0; 1048 + } 1049 + #endif 1050 + 1051 + static PPSMC_Result ni_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 1052 + PPSMC_Msg msg, u32 parameter) 1053 + { 1054 + WREG32(SMC_SCRATCH0, parameter); 1055 + return rv770_send_msg_to_smc(rdev, msg); 1056 + } 1057 + 1058 + static int ni_restrict_performance_levels_before_switch(struct radeon_device *rdev) 1059 + { 1060 + if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) 1061 + return -EINVAL; 1062 + 1063 + return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ? 1064 + 0 : -EINVAL; 1065 + } 1066 + 1067 + #if 0 1068 + static int ni_unrestrict_performance_levels_after_switch(struct radeon_device *rdev) 1069 + { 1070 + if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 1071 + return -EINVAL; 1072 + 1073 + return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ? 1074 + 0 : -EINVAL; 1075 + } 1076 + #endif 1077 + 1078 + static void ni_stop_smc(struct radeon_device *rdev) 1079 + { 1080 + u32 tmp; 1081 + int i; 1082 + 1083 + for (i = 0; i < rdev->usec_timeout; i++) { 1084 + tmp = RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK; 1085 + if (tmp != 1) 1086 + break; 1087 + udelay(1); 1088 + } 1089 + 1090 + udelay(100); 1091 + 1092 + r7xx_stop_smc(rdev); 1093 + } 1094 + 1095 + static int ni_process_firmware_header(struct radeon_device *rdev) 1096 + { 1097 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1098 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1099 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1100 + u32 tmp; 1101 + int ret; 1102 + 1103 + ret = rv770_read_smc_sram_dword(rdev, 1104 + NISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 1105 + NISLANDS_SMC_FIRMWARE_HEADER_stateTable, 1106 + &tmp, pi->sram_end); 1107 + 1108 + if (ret) 1109 + return ret; 1110 + 1111 + pi->state_table_start = (u16)tmp; 1112 + 1113 + ret = rv770_read_smc_sram_dword(rdev, 1114 + NISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 1115 + NISLANDS_SMC_FIRMWARE_HEADER_softRegisters, 1116 + &tmp, pi->sram_end); 1117 + 1118 + if (ret) 1119 + return ret; 1120 + 1121 + pi->soft_regs_start = (u16)tmp; 1122 + 1123 + ret = rv770_read_smc_sram_dword(rdev, 1124 + NISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 1125 + NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable, 1126 + &tmp, pi->sram_end); 1127 + 1128 + if (ret) 1129 + return ret; 1130 + 1131 + eg_pi->mc_reg_table_start = (u16)tmp; 1132 + 1133 + ret = rv770_read_smc_sram_dword(rdev, 1134 + NISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 1135 + NISLANDS_SMC_FIRMWARE_HEADER_fanTable, 1136 + &tmp, pi->sram_end); 1137 + 1138 + if (ret) 1139 + return ret; 1140 + 1141 + ni_pi->fan_table_start = (u16)tmp; 1142 + 1143 + ret = rv770_read_smc_sram_dword(rdev, 1144 + NISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 1145 + NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, 1146 + &tmp, pi->sram_end); 1147 + 1148 + if (ret) 1149 + return ret; 1150 + 1151 + ni_pi->arb_table_start = (u16)tmp; 1152 + 1153 + ret = rv770_read_smc_sram_dword(rdev, 1154 + NISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 1155 + NISLANDS_SMC_FIRMWARE_HEADER_cacTable, 1156 + &tmp, pi->sram_end); 1157 + 1158 + if (ret) 1159 + return ret; 1160 + 1161 + ni_pi->cac_table_start = (u16)tmp; 1162 + 1163 + ret = rv770_read_smc_sram_dword(rdev, 1164 + NISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 1165 + NISLANDS_SMC_FIRMWARE_HEADER_spllTable, 1166 + &tmp, pi->sram_end); 1167 + 1168 + if (ret) 1169 + return ret; 1170 + 1171 + ni_pi->spll_table_start = (u16)tmp; 1172 + 1173 + 1174 + return ret; 1175 + } 1176 + 1177 + static void ni_read_clock_registers(struct radeon_device *rdev) 1178 + { 1179 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1180 + 1181 + ni_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); 1182 + ni_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); 1183 + ni_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); 1184 + ni_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); 1185 + ni_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); 1186 + ni_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); 1187 + ni_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); 1188 + ni_pi->clock_registers.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2); 1189 + ni_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); 1190 + ni_pi->clock_registers.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2); 1191 + ni_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); 1192 + ni_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); 1193 + ni_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); 1194 + ni_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); 1195 + } 1196 + 1197 + #if 0 1198 + static int ni_enter_ulp_state(struct radeon_device *rdev) 1199 + { 1200 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1201 + 1202 + if (pi->gfx_clock_gating) { 1203 + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); 1204 + WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); 1205 + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); 1206 + RREG32(GB_ADDR_CONFIG); 1207 + } 1208 + 1209 + WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower), 1210 + ~HOST_SMC_MSG_MASK); 1211 + 1212 + udelay(25000); 1213 + 1214 + return 0; 1215 + } 1216 + #endif 1217 + 1218 + static void ni_program_response_times(struct radeon_device *rdev) 1219 + { 1220 + u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out; 1221 + u32 vddc_dly, bb_dly, acpi_dly, vbi_dly, mclk_switch_limit; 1222 + u32 reference_clock; 1223 + 1224 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); 1225 + 1226 + voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time; 1227 + backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time; 1228 + 1229 + if (voltage_response_time == 0) 1230 + voltage_response_time = 1000; 1231 + 1232 + if (backbias_response_time == 0) 1233 + backbias_response_time = 1000; 1234 + 1235 + acpi_delay_time = 15000; 1236 + vbi_time_out = 100000; 1237 + 1238 + reference_clock = radeon_get_xclk(rdev); 1239 + 1240 + vddc_dly = (voltage_response_time * reference_clock) / 1600; 1241 + bb_dly = (backbias_response_time * reference_clock) / 1600; 1242 + acpi_dly = (acpi_delay_time * reference_clock) / 1600; 1243 + vbi_dly = (vbi_time_out * reference_clock) / 1600; 1244 + 1245 + mclk_switch_limit = (460 * reference_clock) / 100; 1246 + 1247 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); 1248 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_bbias, bb_dly); 1249 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); 1250 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); 1251 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA); 1252 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_switch_lim, mclk_switch_limit); 1253 + } 1254 + 1255 + static void ni_populate_smc_voltage_table(struct radeon_device *rdev, 1256 + struct atom_voltage_table *voltage_table, 1257 + NISLANDS_SMC_STATETABLE *table) 1258 + { 1259 + unsigned int i; 1260 + 1261 + for (i = 0; i < voltage_table->count; i++) { 1262 + table->highSMIO[i] = 0; 1263 + table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); 1264 + } 1265 + } 1266 + 1267 + static void ni_populate_smc_voltage_tables(struct radeon_device *rdev, 1268 + NISLANDS_SMC_STATETABLE *table) 1269 + { 1270 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1271 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1272 + unsigned char i; 1273 + 1274 + if (eg_pi->vddc_voltage_table.count) { 1275 + ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table); 1276 + table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0; 1277 + table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 1278 + cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); 1279 + 1280 + for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { 1281 + if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { 1282 + table->maxVDDCIndexInPPTable = i; 1283 + break; 1284 + } 1285 + } 1286 + } 1287 + 1288 + if (eg_pi->vddci_voltage_table.count) { 1289 + ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table); 1290 + 1291 + table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; 1292 + table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 1293 + cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); 1294 + } 1295 + } 1296 + 1297 + static int ni_populate_voltage_value(struct radeon_device *rdev, 1298 + struct atom_voltage_table *table, 1299 + u16 value, 1300 + NISLANDS_SMC_VOLTAGE_VALUE *voltage) 1301 + { 1302 + unsigned int i; 1303 + 1304 + for (i = 0; i < table->count; i++) { 1305 + if (value <= table->entries[i].value) { 1306 + voltage->index = (u8)i; 1307 + voltage->value = cpu_to_be16(table->entries[i].value); 1308 + break; 1309 + } 1310 + } 1311 + 1312 + if (i >= table->count) 1313 + return -EINVAL; 1314 + 1315 + return 0; 1316 + } 1317 + 1318 + static void ni_populate_mvdd_value(struct radeon_device *rdev, 1319 + u32 mclk, 1320 + NISLANDS_SMC_VOLTAGE_VALUE *voltage) 1321 + { 1322 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1323 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1324 + 1325 + if (!pi->mvdd_control) { 1326 + voltage->index = eg_pi->mvdd_high_index; 1327 + voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); 1328 + return; 1329 + } 1330 + 1331 + if (mclk <= pi->mvdd_split_frequency) { 1332 + voltage->index = eg_pi->mvdd_low_index; 1333 + voltage->value = cpu_to_be16(MVDD_LOW_VALUE); 1334 + } else { 1335 + voltage->index = eg_pi->mvdd_high_index; 1336 + voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); 1337 + } 1338 + } 1339 + 1340 + static int ni_get_std_voltage_value(struct radeon_device *rdev, 1341 + NISLANDS_SMC_VOLTAGE_VALUE *voltage, 1342 + u16 *std_voltage) 1343 + { 1344 + if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries && 1345 + ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)) 1346 + *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; 1347 + else 1348 + *std_voltage = be16_to_cpu(voltage->value); 1349 + 1350 + return 0; 1351 + } 1352 + 1353 + static void ni_populate_std_voltage_value(struct radeon_device *rdev, 1354 + u16 value, u8 index, 1355 + NISLANDS_SMC_VOLTAGE_VALUE *voltage) 1356 + { 1357 + voltage->index = index; 1358 + voltage->value = cpu_to_be16(value); 1359 + } 1360 + 1361 + static u32 ni_get_smc_power_scaling_factor(struct radeon_device *rdev) 1362 + { 1363 + u32 xclk_period; 1364 + u32 xclk = radeon_get_xclk(rdev); 1365 + u32 tmp = RREG32(CG_CAC_CTRL) & TID_CNT_MASK; 1366 + 1367 + xclk_period = (1000000000UL / xclk); 1368 + xclk_period /= 10000UL; 1369 + 1370 + return tmp * xclk_period; 1371 + } 1372 + 1373 + static u32 ni_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor) 1374 + { 1375 + return (power_in_watts * scaling_factor) << 2; 1376 + } 1377 + 1378 + static u32 ni_calculate_power_boost_limit(struct radeon_device *rdev, 1379 + struct radeon_ps *radeon_state, 1380 + u32 near_tdp_limit) 1381 + { 1382 + struct ni_ps *state = ni_get_ps(radeon_state); 1383 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1384 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1385 + u32 power_boost_limit = 0; 1386 + int ret; 1387 + 1388 + if (ni_pi->enable_power_containment && 1389 + ni_pi->use_power_boost_limit) { 1390 + NISLANDS_SMC_VOLTAGE_VALUE vddc; 1391 + u16 std_vddc_med; 1392 + u16 std_vddc_high; 1393 + u64 tmp, n, d; 1394 + 1395 + if (state->performance_level_count < 3) 1396 + return 0; 1397 + 1398 + ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table, 1399 + state->performance_levels[state->performance_level_count - 2].vddc, 1400 + &vddc); 1401 + if (ret) 1402 + return 0; 1403 + 1404 + ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_med); 1405 + if (ret) 1406 + return 0; 1407 + 1408 + ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table, 1409 + state->performance_levels[state->performance_level_count - 1].vddc, 1410 + &vddc); 1411 + if (ret) 1412 + return 0; 1413 + 1414 + ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_high); 1415 + if (ret) 1416 + return 0; 1417 + 1418 + n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90); 1419 + d = ((u64)std_vddc_high * (u64)std_vddc_high * 100); 1420 + tmp = div64_u64(n, d); 1421 + 1422 + if (tmp >> 32) 1423 + return 0; 1424 + power_boost_limit = (u32)tmp; 1425 + } 1426 + 1427 + return power_boost_limit; 1428 + } 1429 + 1430 + static int ni_calculate_adjusted_tdp_limits(struct radeon_device *rdev, 1431 + bool adjust_polarity, 1432 + u32 tdp_adjustment, 1433 + u32 *tdp_limit, 1434 + u32 *near_tdp_limit) 1435 + { 1436 + if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit) 1437 + return -EINVAL; 1438 + 1439 + if (adjust_polarity) { 1440 + *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100; 1441 + *near_tdp_limit = rdev->pm.dpm.near_tdp_limit + (*tdp_limit - rdev->pm.dpm.tdp_limit); 1442 + } else { 1443 + *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100; 1444 + *near_tdp_limit = rdev->pm.dpm.near_tdp_limit - (rdev->pm.dpm.tdp_limit - *tdp_limit); 1445 + } 1446 + 1447 + return 0; 1448 + } 1449 + 1450 + static int ni_populate_smc_tdp_limits(struct radeon_device *rdev) 1451 + { 1452 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1453 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1454 + 1455 + if (ni_pi->enable_power_containment) { 1456 + struct radeon_ps *radeon_state = rdev->pm.dpm.requested_ps; 1457 + NISLANDS_SMC_STATETABLE *smc_table = &ni_pi->smc_statetable; 1458 + u32 scaling_factor = ni_get_smc_power_scaling_factor(rdev); 1459 + u32 tdp_limit; 1460 + u32 near_tdp_limit; 1461 + u32 power_boost_limit; 1462 + int ret; 1463 + 1464 + if (scaling_factor == 0) 1465 + return -EINVAL; 1466 + 1467 + memset(smc_table, 0, sizeof(NISLANDS_SMC_STATETABLE)); 1468 + 1469 + ret = ni_calculate_adjusted_tdp_limits(rdev, 1470 + false, /* ??? */ 1471 + rdev->pm.dpm.tdp_adjustment, 1472 + &tdp_limit, 1473 + &near_tdp_limit); 1474 + if (ret) 1475 + return ret; 1476 + 1477 + power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, 1478 + near_tdp_limit); 1479 + 1480 + smc_table->dpm2Params.TDPLimit = 1481 + cpu_to_be32(ni_scale_power_for_smc(tdp_limit, scaling_factor)); 1482 + smc_table->dpm2Params.NearTDPLimit = 1483 + cpu_to_be32(ni_scale_power_for_smc(near_tdp_limit, scaling_factor)); 1484 + smc_table->dpm2Params.SafePowerLimit = 1485 + cpu_to_be32(ni_scale_power_for_smc((near_tdp_limit * NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, 1486 + scaling_factor)); 1487 + smc_table->dpm2Params.PowerBoostLimit = 1488 + cpu_to_be32(ni_scale_power_for_smc(power_boost_limit, scaling_factor)); 1489 + 1490 + ret = rv770_copy_bytes_to_smc(rdev, 1491 + (u16)(pi->state_table_start + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) + 1492 + offsetof(PP_NIslands_DPM2Parameters, TDPLimit)), 1493 + (u8 *)(&smc_table->dpm2Params.TDPLimit), 1494 + sizeof(u32) * 4, pi->sram_end); 1495 + if (ret) 1496 + return ret; 1497 + } 1498 + 1499 + return 0; 1500 + } 1501 + 1502 + static int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 1503 + u32 arb_freq_src, u32 arb_freq_dest) 1504 + { 1505 + u32 mc_arb_dram_timing; 1506 + u32 mc_arb_dram_timing2; 1507 + u32 burst_time; 1508 + u32 mc_cg_config; 1509 + 1510 + switch (arb_freq_src) { 1511 + case MC_CG_ARB_FREQ_F0: 1512 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); 1513 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 1514 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; 1515 + break; 1516 + case MC_CG_ARB_FREQ_F1: 1517 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); 1518 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); 1519 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; 1520 + break; 1521 + case MC_CG_ARB_FREQ_F2: 1522 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); 1523 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); 1524 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; 1525 + break; 1526 + case MC_CG_ARB_FREQ_F3: 1527 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); 1528 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); 1529 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; 1530 + break; 1531 + default: 1532 + return -EINVAL; 1533 + } 1534 + 1535 + switch (arb_freq_dest) { 1536 + case MC_CG_ARB_FREQ_F0: 1537 + WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); 1538 + WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); 1539 + WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); 1540 + break; 1541 + case MC_CG_ARB_FREQ_F1: 1542 + WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); 1543 + WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); 1544 + WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); 1545 + break; 1546 + case MC_CG_ARB_FREQ_F2: 1547 + WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); 1548 + WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); 1549 + WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); 1550 + break; 1551 + case MC_CG_ARB_FREQ_F3: 1552 + WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); 1553 + WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); 1554 + WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); 1555 + break; 1556 + default: 1557 + return -EINVAL; 1558 + } 1559 + 1560 + mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F; 1561 + WREG32(MC_CG_CONFIG, mc_cg_config); 1562 + WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK); 1563 + 1564 + return 0; 1565 + } 1566 + 1567 + static int ni_init_arb_table_index(struct radeon_device *rdev) 1568 + { 1569 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1570 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1571 + u32 tmp; 1572 + int ret; 1573 + 1574 + ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start, 1575 + &tmp, pi->sram_end); 1576 + if (ret) 1577 + return ret; 1578 + 1579 + tmp &= 0x00FFFFFF; 1580 + tmp |= ((u32)MC_CG_ARB_FREQ_F1) << 24; 1581 + 1582 + return rv770_write_smc_sram_dword(rdev, ni_pi->arb_table_start, 1583 + tmp, pi->sram_end); 1584 + } 1585 + 1586 + static int ni_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev) 1587 + { 1588 + return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 1589 + } 1590 + 1591 + static int ni_force_switch_to_arb_f0(struct radeon_device *rdev) 1592 + { 1593 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1594 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1595 + u32 tmp; 1596 + int ret; 1597 + 1598 + ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start, 1599 + &tmp, pi->sram_end); 1600 + if (ret) 1601 + return ret; 1602 + 1603 + tmp = (tmp >> 24) & 0xff; 1604 + 1605 + if (tmp == MC_CG_ARB_FREQ_F0) 1606 + return 0; 1607 + 1608 + return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); 1609 + } 1610 + 1611 + static int ni_populate_memory_timing_parameters(struct radeon_device *rdev, 1612 + struct rv7xx_pl *pl, 1613 + SMC_NIslands_MCArbDramTimingRegisterSet *arb_regs) 1614 + { 1615 + u32 dram_timing; 1616 + u32 dram_timing2; 1617 + 1618 + arb_regs->mc_arb_rfsh_rate = 1619 + (u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk); 1620 + 1621 + 1622 + radeon_atom_set_engine_dram_timings(rdev, 1623 + pl->sclk, 1624 + pl->mclk); 1625 + 1626 + dram_timing = RREG32(MC_ARB_DRAM_TIMING); 1627 + dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 1628 + 1629 + arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing); 1630 + arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2); 1631 + 1632 + return 0; 1633 + } 1634 + 1635 + static int ni_do_program_memory_timing_parameters(struct radeon_device *rdev, 1636 + struct radeon_ps *radeon_state, 1637 + unsigned int first_arb_set) 1638 + { 1639 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1640 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1641 + struct ni_ps *state = ni_get_ps(radeon_state); 1642 + SMC_NIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; 1643 + int i, ret = 0; 1644 + 1645 + for (i = 0; i < state->performance_level_count; i++) { 1646 + ret = ni_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs); 1647 + if (ret) 1648 + break; 1649 + 1650 + ret = rv770_copy_bytes_to_smc(rdev, 1651 + (u16)(ni_pi->arb_table_start + 1652 + offsetof(SMC_NIslands_MCArbDramTimingRegisters, data) + 1653 + sizeof(SMC_NIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i)), 1654 + (u8 *)&arb_regs, 1655 + (u16)sizeof(SMC_NIslands_MCArbDramTimingRegisterSet), 1656 + pi->sram_end); 1657 + if (ret) 1658 + break; 1659 + } 1660 + return ret; 1661 + } 1662 + 1663 + static int ni_program_memory_timing_parameters(struct radeon_device *rdev) 1664 + { 1665 + struct radeon_ps *radeon_new_state = rdev->pm.dpm.requested_ps; 1666 + 1667 + return ni_do_program_memory_timing_parameters(rdev, radeon_new_state, 1668 + NISLANDS_DRIVER_STATE_ARB_INDEX); 1669 + } 1670 + 1671 + static void ni_populate_initial_mvdd_value(struct radeon_device *rdev, 1672 + struct NISLANDS_SMC_VOLTAGE_VALUE *voltage) 1673 + { 1674 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1675 + 1676 + voltage->index = eg_pi->mvdd_high_index; 1677 + voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); 1678 + } 1679 + 1680 + static int ni_populate_smc_initial_state(struct radeon_device *rdev, 1681 + struct radeon_ps *radeon_initial_state, 1682 + NISLANDS_SMC_STATETABLE *table) 1683 + { 1684 + struct ni_ps *initial_state = ni_get_ps(radeon_initial_state); 1685 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1686 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1687 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1688 + u32 reg; 1689 + int ret; 1690 + 1691 + table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = 1692 + cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl); 1693 + table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = 1694 + cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2); 1695 + table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = 1696 + cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl); 1697 + table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = 1698 + cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2); 1699 + table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = 1700 + cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl); 1701 + table->initialState.levels[0].mclk.vDLL_CNTL = 1702 + cpu_to_be32(ni_pi->clock_registers.dll_cntl); 1703 + table->initialState.levels[0].mclk.vMPLL_SS = 1704 + cpu_to_be32(ni_pi->clock_registers.mpll_ss1); 1705 + table->initialState.levels[0].mclk.vMPLL_SS2 = 1706 + cpu_to_be32(ni_pi->clock_registers.mpll_ss2); 1707 + table->initialState.levels[0].mclk.mclk_value = 1708 + cpu_to_be32(initial_state->performance_levels[0].mclk); 1709 + 1710 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = 1711 + cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl); 1712 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = 1713 + cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2); 1714 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = 1715 + cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3); 1716 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = 1717 + cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4); 1718 + table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = 1719 + cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum); 1720 + table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = 1721 + cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2); 1722 + table->initialState.levels[0].sclk.sclk_value = 1723 + cpu_to_be32(initial_state->performance_levels[0].sclk); 1724 + table->initialState.levels[0].arbRefreshState = 1725 + NISLANDS_INITIAL_STATE_ARB_INDEX; 1726 + 1727 + table->initialState.levels[0].ACIndex = 0; 1728 + 1729 + ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table, 1730 + initial_state->performance_levels[0].vddc, 1731 + &table->initialState.levels[0].vddc); 1732 + if (!ret) { 1733 + u16 std_vddc; 1734 + 1735 + ret = ni_get_std_voltage_value(rdev, 1736 + &table->initialState.levels[0].vddc, 1737 + &std_vddc); 1738 + if (!ret) 1739 + ni_populate_std_voltage_value(rdev, std_vddc, 1740 + table->initialState.levels[0].vddc.index, 1741 + &table->initialState.levels[0].std_vddc); 1742 + } 1743 + 1744 + if (eg_pi->vddci_control) 1745 + ni_populate_voltage_value(rdev, 1746 + &eg_pi->vddci_voltage_table, 1747 + initial_state->performance_levels[0].vddci, 1748 + &table->initialState.levels[0].vddci); 1749 + 1750 + ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd); 1751 + 1752 + reg = CG_R(0xffff) | CG_L(0); 1753 + table->initialState.levels[0].aT = cpu_to_be32(reg); 1754 + 1755 + table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); 1756 + 1757 + if (pi->boot_in_gen2) 1758 + table->initialState.levels[0].gen2PCIE = 1; 1759 + else 1760 + table->initialState.levels[0].gen2PCIE = 0; 1761 + 1762 + if (pi->mem_gddr5) { 1763 + table->initialState.levels[0].strobeMode = 1764 + cypress_get_strobe_mode_settings(rdev, 1765 + initial_state->performance_levels[0].mclk); 1766 + 1767 + if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) 1768 + table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG; 1769 + else 1770 + table->initialState.levels[0].mcFlags = 0; 1771 + } 1772 + 1773 + table->initialState.levelCount = 1; 1774 + 1775 + table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; 1776 + 1777 + table->initialState.levels[0].dpm2.MaxPS = 0; 1778 + table->initialState.levels[0].dpm2.NearTDPDec = 0; 1779 + table->initialState.levels[0].dpm2.AboveSafeInc = 0; 1780 + table->initialState.levels[0].dpm2.BelowSafeInc = 0; 1781 + 1782 + reg = MIN_POWER_MASK | MAX_POWER_MASK; 1783 + table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); 1784 + 1785 + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; 1786 + table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); 1787 + 1788 + return 0; 1789 + } 1790 + 1791 + static int ni_populate_smc_acpi_state(struct radeon_device *rdev, 1792 + NISLANDS_SMC_STATETABLE *table) 1793 + { 1794 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1795 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 1796 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1797 + u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl; 1798 + u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2; 1799 + u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl; 1800 + u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2; 1801 + u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl; 1802 + u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2; 1803 + u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3; 1804 + u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4; 1805 + u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl; 1806 + u32 dll_cntl = ni_pi->clock_registers.dll_cntl; 1807 + u32 reg; 1808 + int ret; 1809 + 1810 + table->ACPIState = table->initialState; 1811 + 1812 + table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; 1813 + 1814 + if (pi->acpi_vddc) { 1815 + ret = ni_populate_voltage_value(rdev, 1816 + &eg_pi->vddc_voltage_table, 1817 + pi->acpi_vddc, &table->ACPIState.levels[0].vddc); 1818 + if (!ret) { 1819 + u16 std_vddc; 1820 + 1821 + ret = ni_get_std_voltage_value(rdev, 1822 + &table->ACPIState.levels[0].vddc, &std_vddc); 1823 + if (!ret) 1824 + ni_populate_std_voltage_value(rdev, std_vddc, 1825 + table->ACPIState.levels[0].vddc.index, 1826 + &table->ACPIState.levels[0].std_vddc); 1827 + } 1828 + 1829 + if (pi->pcie_gen2) { 1830 + if (pi->acpi_pcie_gen2) 1831 + table->ACPIState.levels[0].gen2PCIE = 1; 1832 + else 1833 + table->ACPIState.levels[0].gen2PCIE = 0; 1834 + } else { 1835 + table->ACPIState.levels[0].gen2PCIE = 0; 1836 + } 1837 + } else { 1838 + ret = ni_populate_voltage_value(rdev, 1839 + &eg_pi->vddc_voltage_table, 1840 + pi->min_vddc_in_table, 1841 + &table->ACPIState.levels[0].vddc); 1842 + if (!ret) { 1843 + u16 std_vddc; 1844 + 1845 + ret = ni_get_std_voltage_value(rdev, 1846 + &table->ACPIState.levels[0].vddc, 1847 + &std_vddc); 1848 + if (!ret) 1849 + ni_populate_std_voltage_value(rdev, std_vddc, 1850 + table->ACPIState.levels[0].vddc.index, 1851 + &table->ACPIState.levels[0].std_vddc); 1852 + } 1853 + table->ACPIState.levels[0].gen2PCIE = 0; 1854 + } 1855 + 1856 + if (eg_pi->acpi_vddci) { 1857 + if (eg_pi->vddci_control) 1858 + ni_populate_voltage_value(rdev, 1859 + &eg_pi->vddci_voltage_table, 1860 + eg_pi->acpi_vddci, 1861 + &table->ACPIState.levels[0].vddci); 1862 + } 1863 + 1864 + 1865 + mpll_ad_func_cntl &= ~PDNB; 1866 + 1867 + mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN; 1868 + 1869 + if (pi->mem_gddr5) 1870 + mpll_dq_func_cntl &= ~PDNB; 1871 + mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS; 1872 + 1873 + 1874 + mclk_pwrmgt_cntl |= (MRDCKA0_RESET | 1875 + MRDCKA1_RESET | 1876 + MRDCKB0_RESET | 1877 + MRDCKB1_RESET | 1878 + MRDCKC0_RESET | 1879 + MRDCKC1_RESET | 1880 + MRDCKD0_RESET | 1881 + MRDCKD1_RESET); 1882 + 1883 + mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB | 1884 + MRDCKA1_PDNB | 1885 + MRDCKB0_PDNB | 1886 + MRDCKB1_PDNB | 1887 + MRDCKC0_PDNB | 1888 + MRDCKC1_PDNB | 1889 + MRDCKD0_PDNB | 1890 + MRDCKD1_PDNB); 1891 + 1892 + dll_cntl |= (MRDCKA0_BYPASS | 1893 + MRDCKA1_BYPASS | 1894 + MRDCKB0_BYPASS | 1895 + MRDCKB1_BYPASS | 1896 + MRDCKC0_BYPASS | 1897 + MRDCKC1_BYPASS | 1898 + MRDCKD0_BYPASS | 1899 + MRDCKD1_BYPASS); 1900 + 1901 + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 1902 + spll_func_cntl_2 |= SCLK_MUX_SEL(4); 1903 + 1904 + table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); 1905 + table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); 1906 + table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); 1907 + table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); 1908 + table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); 1909 + table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl); 1910 + 1911 + table->ACPIState.levels[0].mclk.mclk_value = 0; 1912 + 1913 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); 1914 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); 1915 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); 1916 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4); 1917 + 1918 + table->ACPIState.levels[0].sclk.sclk_value = 0; 1919 + 1920 + ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); 1921 + 1922 + if (eg_pi->dynamic_ac_timing) 1923 + table->ACPIState.levels[0].ACIndex = 1; 1924 + 1925 + table->ACPIState.levels[0].dpm2.MaxPS = 0; 1926 + table->ACPIState.levels[0].dpm2.NearTDPDec = 0; 1927 + table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; 1928 + table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; 1929 + 1930 + reg = MIN_POWER_MASK | MAX_POWER_MASK; 1931 + table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); 1932 + 1933 + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; 1934 + table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); 1935 + 1936 + return 0; 1937 + } 1938 + 1939 + static int ni_init_smc_table(struct radeon_device *rdev) 1940 + { 1941 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1942 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 1943 + int ret; 1944 + struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; 1945 + NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable; 1946 + 1947 + memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE)); 1948 + 1949 + ni_populate_smc_voltage_tables(rdev, table); 1950 + 1951 + switch (rdev->pm.int_thermal_type) { 1952 + case THERMAL_TYPE_NI: 1953 + case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 1954 + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; 1955 + break; 1956 + case THERMAL_TYPE_NONE: 1957 + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; 1958 + break; 1959 + default: 1960 + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; 1961 + break; 1962 + } 1963 + 1964 + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 1965 + table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 1966 + 1967 + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) 1968 + table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; 1969 + 1970 + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 1971 + table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 1972 + 1973 + if (pi->mem_gddr5) 1974 + table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 1975 + 1976 + ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table); 1977 + if (ret) 1978 + return ret; 1979 + 1980 + ret = ni_populate_smc_acpi_state(rdev, table); 1981 + if (ret) 1982 + return ret; 1983 + 1984 + table->driverState = table->initialState; 1985 + 1986 + table->ULVState = table->initialState; 1987 + 1988 + ret = ni_do_program_memory_timing_parameters(rdev, radeon_boot_state, 1989 + NISLANDS_INITIAL_STATE_ARB_INDEX); 1990 + if (ret) 1991 + return ret; 1992 + 1993 + return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table, 1994 + sizeof(NISLANDS_SMC_STATETABLE), pi->sram_end); 1995 + } 1996 + 1997 + static int ni_calculate_sclk_params(struct radeon_device *rdev, 1998 + u32 engine_clock, 1999 + NISLANDS_SMC_SCLK_VALUE *sclk) 2000 + { 2001 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2002 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2003 + struct atom_clock_dividers dividers; 2004 + u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl; 2005 + u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2; 2006 + u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3; 2007 + u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4; 2008 + u32 cg_spll_spread_spectrum = ni_pi->clock_registers.cg_spll_spread_spectrum; 2009 + u32 cg_spll_spread_spectrum_2 = ni_pi->clock_registers.cg_spll_spread_spectrum_2; 2010 + u64 tmp; 2011 + u32 reference_clock = rdev->clock.spll.reference_freq; 2012 + u32 reference_divider; 2013 + u32 fbdiv; 2014 + int ret; 2015 + 2016 + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 2017 + engine_clock, false, &dividers); 2018 + if (ret) 2019 + return ret; 2020 + 2021 + reference_divider = 1 + dividers.ref_div; 2022 + 2023 + 2024 + tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16834; 2025 + do_div(tmp, reference_clock); 2026 + fbdiv = (u32) tmp; 2027 + 2028 + spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); 2029 + spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); 2030 + spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); 2031 + 2032 + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 2033 + spll_func_cntl_2 |= SCLK_MUX_SEL(2); 2034 + 2035 + spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 2036 + spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 2037 + spll_func_cntl_3 |= SPLL_DITHEN; 2038 + 2039 + if (pi->sclk_ss) { 2040 + struct radeon_atom_ss ss; 2041 + u32 vco_freq = engine_clock * dividers.post_div; 2042 + 2043 + if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2044 + ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 2045 + u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 2046 + u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 2047 + 2048 + cg_spll_spread_spectrum &= ~CLK_S_MASK; 2049 + cg_spll_spread_spectrum |= CLK_S(clk_s); 2050 + cg_spll_spread_spectrum |= SSEN; 2051 + 2052 + cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; 2053 + cg_spll_spread_spectrum_2 |= CLK_V(clk_v); 2054 + } 2055 + } 2056 + 2057 + sclk->sclk_value = engine_clock; 2058 + sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl; 2059 + sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2; 2060 + sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3; 2061 + sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4; 2062 + sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum; 2063 + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2; 2064 + 2065 + return 0; 2066 + } 2067 + 2068 + static int ni_populate_sclk_value(struct radeon_device *rdev, 2069 + u32 engine_clock, 2070 + NISLANDS_SMC_SCLK_VALUE *sclk) 2071 + { 2072 + NISLANDS_SMC_SCLK_VALUE sclk_tmp; 2073 + int ret; 2074 + 2075 + ret = ni_calculate_sclk_params(rdev, engine_clock, &sclk_tmp); 2076 + if (!ret) { 2077 + sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value); 2078 + sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL); 2079 + sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2); 2080 + sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3); 2081 + sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4); 2082 + sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM); 2083 + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2); 2084 + } 2085 + 2086 + return ret; 2087 + } 2088 + 2089 + static int ni_init_smc_spll_table(struct radeon_device *rdev) 2090 + { 2091 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2092 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2093 + SMC_NISLANDS_SPLL_DIV_TABLE *spll_table; 2094 + NISLANDS_SMC_SCLK_VALUE sclk_params; 2095 + u32 fb_div; 2096 + u32 p_div; 2097 + u32 clk_s; 2098 + u32 clk_v; 2099 + u32 sclk = 0; 2100 + int i, ret; 2101 + u32 tmp; 2102 + 2103 + if (ni_pi->spll_table_start == 0) 2104 + return -EINVAL; 2105 + 2106 + spll_table = kzalloc(sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), GFP_KERNEL); 2107 + if (spll_table == NULL) 2108 + return -ENOMEM; 2109 + 2110 + for (i = 0; i < 256; i++) { 2111 + ret = ni_calculate_sclk_params(rdev, sclk, &sclk_params); 2112 + if (ret) 2113 + break; 2114 + 2115 + p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; 2116 + fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; 2117 + clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; 2118 + clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; 2119 + 2120 + fb_div &= ~0x00001FFF; 2121 + fb_div >>= 1; 2122 + clk_v >>= 6; 2123 + 2124 + if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT)) 2125 + ret = -EINVAL; 2126 + 2127 + if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) 2128 + ret = -EINVAL; 2129 + 2130 + if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) 2131 + ret = -EINVAL; 2132 + 2133 + if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) 2134 + ret = -EINVAL; 2135 + 2136 + if (ret) 2137 + break; 2138 + 2139 + tmp = ((fb_div << SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) | 2140 + ((p_div << SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK); 2141 + spll_table->freq[i] = cpu_to_be32(tmp); 2142 + 2143 + tmp = ((clk_v << SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK) | 2144 + ((clk_s << SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK); 2145 + spll_table->ss[i] = cpu_to_be32(tmp); 2146 + 2147 + sclk += 512; 2148 + } 2149 + 2150 + if (!ret) 2151 + ret = rv770_copy_bytes_to_smc(rdev, ni_pi->spll_table_start, (u8 *)spll_table, 2152 + sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), pi->sram_end); 2153 + 2154 + kfree(spll_table); 2155 + 2156 + return ret; 2157 + } 2158 + 2159 + static int ni_populate_mclk_value(struct radeon_device *rdev, 2160 + u32 engine_clock, 2161 + u32 memory_clock, 2162 + NISLANDS_SMC_MCLK_VALUE *mclk, 2163 + bool strobe_mode, 2164 + bool dll_state_on) 2165 + { 2166 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2167 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2168 + u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl; 2169 + u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2; 2170 + u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl; 2171 + u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2; 2172 + u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl; 2173 + u32 dll_cntl = ni_pi->clock_registers.dll_cntl; 2174 + u32 mpll_ss1 = ni_pi->clock_registers.mpll_ss1; 2175 + u32 mpll_ss2 = ni_pi->clock_registers.mpll_ss2; 2176 + struct atom_clock_dividers dividers; 2177 + u32 ibias; 2178 + u32 dll_speed; 2179 + int ret; 2180 + u32 mc_seq_misc7; 2181 + 2182 + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, 2183 + memory_clock, strobe_mode, &dividers); 2184 + if (ret) 2185 + return ret; 2186 + 2187 + if (!strobe_mode) { 2188 + mc_seq_misc7 = RREG32(MC_SEQ_MISC7); 2189 + 2190 + if (mc_seq_misc7 & 0x8000000) 2191 + dividers.post_div = 1; 2192 + } 2193 + 2194 + ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div); 2195 + 2196 + mpll_ad_func_cntl &= ~(CLKR_MASK | 2197 + YCLK_POST_DIV_MASK | 2198 + CLKF_MASK | 2199 + CLKFRAC_MASK | 2200 + IBIAS_MASK); 2201 + mpll_ad_func_cntl |= CLKR(dividers.ref_div); 2202 + mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div); 2203 + mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div); 2204 + mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div); 2205 + mpll_ad_func_cntl |= IBIAS(ibias); 2206 + 2207 + if (dividers.vco_mode) 2208 + mpll_ad_func_cntl_2 |= VCO_MODE; 2209 + else 2210 + mpll_ad_func_cntl_2 &= ~VCO_MODE; 2211 + 2212 + if (pi->mem_gddr5) { 2213 + mpll_dq_func_cntl &= ~(CLKR_MASK | 2214 + YCLK_POST_DIV_MASK | 2215 + CLKF_MASK | 2216 + CLKFRAC_MASK | 2217 + IBIAS_MASK); 2218 + mpll_dq_func_cntl |= CLKR(dividers.ref_div); 2219 + mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div); 2220 + mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div); 2221 + mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div); 2222 + mpll_dq_func_cntl |= IBIAS(ibias); 2223 + 2224 + if (strobe_mode) 2225 + mpll_dq_func_cntl &= ~PDNB; 2226 + else 2227 + mpll_dq_func_cntl |= PDNB; 2228 + 2229 + if (dividers.vco_mode) 2230 + mpll_dq_func_cntl_2 |= VCO_MODE; 2231 + else 2232 + mpll_dq_func_cntl_2 &= ~VCO_MODE; 2233 + } 2234 + 2235 + if (pi->mclk_ss) { 2236 + struct radeon_atom_ss ss; 2237 + u32 vco_freq = memory_clock * dividers.post_div; 2238 + 2239 + if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2240 + ASIC_INTERNAL_MEMORY_SS, vco_freq)) { 2241 + u32 reference_clock = rdev->clock.mpll.reference_freq; 2242 + u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); 2243 + u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); 2244 + u32 clk_v = ss.percentage * 2245 + (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625); 2246 + 2247 + mpll_ss1 &= ~CLKV_MASK; 2248 + mpll_ss1 |= CLKV(clk_v); 2249 + 2250 + mpll_ss2 &= ~CLKS_MASK; 2251 + mpll_ss2 |= CLKS(clk_s); 2252 + } 2253 + } 2254 + 2255 + dll_speed = rv740_get_dll_speed(pi->mem_gddr5, 2256 + memory_clock); 2257 + 2258 + mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; 2259 + mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed); 2260 + if (dll_state_on) 2261 + mclk_pwrmgt_cntl |= (MRDCKA0_PDNB | 2262 + MRDCKA1_PDNB | 2263 + MRDCKB0_PDNB | 2264 + MRDCKB1_PDNB | 2265 + MRDCKC0_PDNB | 2266 + MRDCKC1_PDNB | 2267 + MRDCKD0_PDNB | 2268 + MRDCKD1_PDNB); 2269 + else 2270 + mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB | 2271 + MRDCKA1_PDNB | 2272 + MRDCKB0_PDNB | 2273 + MRDCKB1_PDNB | 2274 + MRDCKC0_PDNB | 2275 + MRDCKC1_PDNB | 2276 + MRDCKD0_PDNB | 2277 + MRDCKD1_PDNB); 2278 + 2279 + 2280 + mclk->mclk_value = cpu_to_be32(memory_clock); 2281 + mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); 2282 + mclk->vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); 2283 + mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); 2284 + mclk->vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); 2285 + mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); 2286 + mclk->vDLL_CNTL = cpu_to_be32(dll_cntl); 2287 + mclk->vMPLL_SS = cpu_to_be32(mpll_ss1); 2288 + mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2); 2289 + 2290 + return 0; 2291 + } 2292 + 2293 + static void ni_populate_smc_sp(struct radeon_device *rdev, 2294 + struct radeon_ps *radeon_state, 2295 + NISLANDS_SMC_SWSTATE *smc_state) 2296 + { 2297 + struct ni_ps *ps = ni_get_ps(radeon_state); 2298 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2299 + int i; 2300 + 2301 + for (i = 0; i < ps->performance_level_count - 1; i++) 2302 + smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); 2303 + 2304 + smc_state->levels[ps->performance_level_count - 1].bSP = 2305 + cpu_to_be32(pi->psp); 2306 + } 2307 + 2308 + static int ni_convert_power_level_to_smc(struct radeon_device *rdev, 2309 + struct rv7xx_pl *pl, 2310 + NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) 2311 + { 2312 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2313 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2314 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2315 + int ret; 2316 + bool dll_state_on; 2317 + u16 std_vddc; 2318 + u32 tmp = RREG32(DC_STUTTER_CNTL); 2319 + 2320 + level->gen2PCIE = pi->pcie_gen2 ? 2321 + ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0; 2322 + 2323 + ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk); 2324 + if (ret) 2325 + return ret; 2326 + 2327 + level->mcFlags = 0; 2328 + if (pi->mclk_stutter_mode_threshold && 2329 + (pl->mclk <= pi->mclk_stutter_mode_threshold) && 2330 + !eg_pi->uvd_enabled && 2331 + (tmp & DC_STUTTER_ENABLE_A) && 2332 + (tmp & DC_STUTTER_ENABLE_B)) 2333 + level->mcFlags |= NISLANDS_SMC_MC_STUTTER_EN; 2334 + 2335 + if (pi->mem_gddr5) { 2336 + if (pl->mclk > pi->mclk_edc_enable_threshold) 2337 + level->mcFlags |= NISLANDS_SMC_MC_EDC_RD_FLAG; 2338 + if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) 2339 + level->mcFlags |= NISLANDS_SMC_MC_EDC_WR_FLAG; 2340 + 2341 + level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk); 2342 + 2343 + if (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) { 2344 + if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >= 2345 + ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) 2346 + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2347 + else 2348 + dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 2349 + } else { 2350 + dll_state_on = false; 2351 + if (pl->mclk > ni_pi->mclk_rtt_mode_threshold) 2352 + level->mcFlags |= NISLANDS_SMC_MC_RTT_ENABLE; 2353 + } 2354 + 2355 + ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, 2356 + &level->mclk, 2357 + (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) != 0, 2358 + dll_state_on); 2359 + } else 2360 + ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1); 2361 + 2362 + if (ret) 2363 + return ret; 2364 + 2365 + ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table, 2366 + pl->vddc, &level->vddc); 2367 + if (ret) 2368 + return ret; 2369 + 2370 + ret = ni_get_std_voltage_value(rdev, &level->vddc, &std_vddc); 2371 + if (ret) 2372 + return ret; 2373 + 2374 + ni_populate_std_voltage_value(rdev, std_vddc, 2375 + level->vddc.index, &level->std_vddc); 2376 + 2377 + if (eg_pi->vddci_control) { 2378 + ret = ni_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table, 2379 + pl->vddci, &level->vddci); 2380 + if (ret) 2381 + return ret; 2382 + } 2383 + 2384 + ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd); 2385 + 2386 + return ret; 2387 + } 2388 + 2389 + static int ni_populate_smc_t(struct radeon_device *rdev, 2390 + struct radeon_ps *radeon_state, 2391 + NISLANDS_SMC_SWSTATE *smc_state) 2392 + { 2393 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2394 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2395 + struct ni_ps *state = ni_get_ps(radeon_state); 2396 + u32 a_t; 2397 + u32 t_l, t_h; 2398 + u32 high_bsp; 2399 + int i, ret; 2400 + 2401 + if (state->performance_level_count >= 9) 2402 + return -EINVAL; 2403 + 2404 + if (state->performance_level_count < 2) { 2405 + a_t = CG_R(0xffff) | CG_L(0); 2406 + smc_state->levels[0].aT = cpu_to_be32(a_t); 2407 + return 0; 2408 + } 2409 + 2410 + smc_state->levels[0].aT = cpu_to_be32(0); 2411 + 2412 + for (i = 0; i <= state->performance_level_count - 2; i++) { 2413 + if (eg_pi->uvd_enabled) 2414 + ret = r600_calculate_at( 2415 + 1000 * (i * (eg_pi->smu_uvd_hs ? 2 : 8) + 2), 2416 + 100 * R600_AH_DFLT, 2417 + state->performance_levels[i + 1].sclk, 2418 + state->performance_levels[i].sclk, 2419 + &t_l, 2420 + &t_h); 2421 + else 2422 + ret = r600_calculate_at( 2423 + 1000 * (i + 1), 2424 + 100 * R600_AH_DFLT, 2425 + state->performance_levels[i + 1].sclk, 2426 + state->performance_levels[i].sclk, 2427 + &t_l, 2428 + &t_h); 2429 + 2430 + if (ret) { 2431 + t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT; 2432 + t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; 2433 + } 2434 + 2435 + a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; 2436 + a_t |= CG_R(t_l * pi->bsp / 20000); 2437 + smc_state->levels[i].aT = cpu_to_be32(a_t); 2438 + 2439 + high_bsp = (i == state->performance_level_count - 2) ? 2440 + pi->pbsp : pi->bsp; 2441 + 2442 + a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); 2443 + smc_state->levels[i + 1].aT = cpu_to_be32(a_t); 2444 + } 2445 + 2446 + return 0; 2447 + } 2448 + 2449 + static int ni_populate_power_containment_values(struct radeon_device *rdev, 2450 + struct radeon_ps *radeon_state, 2451 + NISLANDS_SMC_SWSTATE *smc_state) 2452 + { 2453 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2454 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2455 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2456 + struct ni_ps *state = ni_get_ps(radeon_state); 2457 + u32 prev_sclk; 2458 + u32 max_sclk; 2459 + u32 min_sclk; 2460 + int i, ret; 2461 + u32 tdp_limit; 2462 + u32 near_tdp_limit; 2463 + u32 power_boost_limit; 2464 + u8 max_ps_percent; 2465 + 2466 + if (ni_pi->enable_power_containment == false) 2467 + return 0; 2468 + 2469 + if (state->performance_level_count == 0) 2470 + return -EINVAL; 2471 + 2472 + if (smc_state->levelCount != state->performance_level_count) 2473 + return -EINVAL; 2474 + 2475 + ret = ni_calculate_adjusted_tdp_limits(rdev, 2476 + false, /* ??? */ 2477 + rdev->pm.dpm.tdp_adjustment, 2478 + &tdp_limit, 2479 + &near_tdp_limit); 2480 + if (ret) 2481 + return ret; 2482 + 2483 + power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, near_tdp_limit); 2484 + 2485 + ret = rv770_write_smc_sram_dword(rdev, 2486 + pi->state_table_start + 2487 + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) + 2488 + offsetof(PP_NIslands_DPM2Parameters, PowerBoostLimit), 2489 + ni_scale_power_for_smc(power_boost_limit, ni_get_smc_power_scaling_factor(rdev)), 2490 + pi->sram_end); 2491 + if (ret) 2492 + power_boost_limit = 0; 2493 + 2494 + smc_state->levels[0].dpm2.MaxPS = 0; 2495 + smc_state->levels[0].dpm2.NearTDPDec = 0; 2496 + smc_state->levels[0].dpm2.AboveSafeInc = 0; 2497 + smc_state->levels[0].dpm2.BelowSafeInc = 0; 2498 + smc_state->levels[0].stateFlags |= power_boost_limit ? PPSMC_STATEFLAG_POWERBOOST : 0; 2499 + 2500 + for (i = 1; i < state->performance_level_count; i++) { 2501 + prev_sclk = state->performance_levels[i-1].sclk; 2502 + max_sclk = state->performance_levels[i].sclk; 2503 + max_ps_percent = (i != (state->performance_level_count - 1)) ? 2504 + NISLANDS_DPM2_MAXPS_PERCENT_M : NISLANDS_DPM2_MAXPS_PERCENT_H; 2505 + 2506 + if (max_sclk < prev_sclk) 2507 + return -EINVAL; 2508 + 2509 + if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || eg_pi->uvd_enabled) 2510 + min_sclk = max_sclk; 2511 + else if (1 == i) 2512 + min_sclk = prev_sclk; 2513 + else 2514 + min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; 2515 + 2516 + if (min_sclk < state->performance_levels[0].sclk) 2517 + min_sclk = state->performance_levels[0].sclk; 2518 + 2519 + if (min_sclk == 0) 2520 + return -EINVAL; 2521 + 2522 + smc_state->levels[i].dpm2.MaxPS = 2523 + (u8)((NISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk); 2524 + smc_state->levels[i].dpm2.NearTDPDec = NISLANDS_DPM2_NEAR_TDP_DEC; 2525 + smc_state->levels[i].dpm2.AboveSafeInc = NISLANDS_DPM2_ABOVE_SAFE_INC; 2526 + smc_state->levels[i].dpm2.BelowSafeInc = NISLANDS_DPM2_BELOW_SAFE_INC; 2527 + smc_state->levels[i].stateFlags |= 2528 + ((i != (state->performance_level_count - 1)) && power_boost_limit) ? 2529 + PPSMC_STATEFLAG_POWERBOOST : 0; 2530 + } 2531 + 2532 + return 0; 2533 + } 2534 + 2535 + static int ni_populate_sq_ramping_values(struct radeon_device *rdev, 2536 + struct radeon_ps *radeon_state, 2537 + NISLANDS_SMC_SWSTATE *smc_state) 2538 + { 2539 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2540 + struct ni_ps *state = ni_get_ps(radeon_state); 2541 + u32 sq_power_throttle; 2542 + u32 sq_power_throttle2; 2543 + bool enable_sq_ramping = ni_pi->enable_sq_ramping; 2544 + int i; 2545 + 2546 + if (state->performance_level_count == 0) 2547 + return -EINVAL; 2548 + 2549 + if (smc_state->levelCount != state->performance_level_count) 2550 + return -EINVAL; 2551 + 2552 + if (rdev->pm.dpm.sq_ramping_threshold == 0) 2553 + return -EINVAL; 2554 + 2555 + if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) 2556 + enable_sq_ramping = false; 2557 + 2558 + if (NISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) 2559 + enable_sq_ramping = false; 2560 + 2561 + if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) 2562 + enable_sq_ramping = false; 2563 + 2564 + if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2565 + enable_sq_ramping = false; 2566 + 2567 + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2568 + enable_sq_ramping = false; 2569 + 2570 + for (i = 0; i < state->performance_level_count; i++) { 2571 + sq_power_throttle = 0; 2572 + sq_power_throttle2 = 0; 2573 + 2574 + if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) && 2575 + enable_sq_ramping) { 2576 + sq_power_throttle |= MAX_POWER(NISLANDS_DPM2_SQ_RAMP_MAX_POWER); 2577 + sq_power_throttle |= MIN_POWER(NISLANDS_DPM2_SQ_RAMP_MIN_POWER); 2578 + sq_power_throttle2 |= MAX_POWER_DELTA(NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); 2579 + sq_power_throttle2 |= STI_SIZE(NISLANDS_DPM2_SQ_RAMP_STI_SIZE); 2580 + sq_power_throttle2 |= LTI_RATIO(NISLANDS_DPM2_SQ_RAMP_LTI_RATIO); 2581 + } else { 2582 + sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; 2583 + sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; 2584 + } 2585 + 2586 + smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); 2587 + smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2); 2588 + } 2589 + 2590 + return 0; 2591 + } 2592 + 2593 + static int ni_enable_power_containment(struct radeon_device *rdev, bool enable) 2594 + { 2595 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2596 + PPSMC_Result smc_result; 2597 + int ret = 0; 2598 + 2599 + if (ni_pi->enable_power_containment) { 2600 + if (enable) { 2601 + struct radeon_ps *radeon_new_state = rdev->pm.dpm.requested_ps; 2602 + 2603 + if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) { 2604 + smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingActive); 2605 + if (smc_result != PPSMC_Result_OK) { 2606 + ret = -EINVAL; 2607 + ni_pi->pc_enabled = false; 2608 + } else { 2609 + ni_pi->pc_enabled = true; 2610 + } 2611 + } 2612 + } else { 2613 + smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive); 2614 + if (smc_result != PPSMC_Result_OK) 2615 + ret = -EINVAL; 2616 + ni_pi->pc_enabled = false; 2617 + } 2618 + } 2619 + 2620 + return ret; 2621 + } 2622 + 2623 + static int ni_convert_power_state_to_smc(struct radeon_device *rdev, 2624 + struct radeon_ps *radeon_state, 2625 + NISLANDS_SMC_SWSTATE *smc_state) 2626 + { 2627 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2628 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2629 + struct ni_ps *state = ni_get_ps(radeon_state); 2630 + int i, ret; 2631 + u32 threshold = state->performance_levels[state->performance_level_count - 1].sclk * 100 / 100; 2632 + 2633 + if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC)) 2634 + smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; 2635 + 2636 + smc_state->levelCount = 0; 2637 + 2638 + if (state->performance_level_count > NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE) 2639 + return -EINVAL; 2640 + 2641 + for (i = 0; i < state->performance_level_count; i++) { 2642 + ret = ni_convert_power_level_to_smc(rdev, &state->performance_levels[i], 2643 + &smc_state->levels[i]); 2644 + smc_state->levels[i].arbRefreshState = 2645 + (u8)(NISLANDS_DRIVER_STATE_ARB_INDEX + i); 2646 + 2647 + if (ret) 2648 + return ret; 2649 + 2650 + if (ni_pi->enable_power_containment) 2651 + smc_state->levels[i].displayWatermark = 2652 + (state->performance_levels[i].sclk < threshold) ? 2653 + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; 2654 + else 2655 + smc_state->levels[i].displayWatermark = (i < 2) ? 2656 + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; 2657 + 2658 + if (eg_pi->dynamic_ac_timing) 2659 + smc_state->levels[i].ACIndex = NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i; 2660 + else 2661 + smc_state->levels[i].ACIndex = 0; 2662 + 2663 + smc_state->levelCount++; 2664 + } 2665 + 2666 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_watermark_threshold, 2667 + cpu_to_be32(threshold / 512)); 2668 + 2669 + ni_populate_smc_sp(rdev, radeon_state, smc_state); 2670 + 2671 + ret = ni_populate_power_containment_values(rdev, radeon_state, smc_state); 2672 + if (ret) 2673 + ni_pi->enable_power_containment = false; 2674 + 2675 + ret = ni_populate_sq_ramping_values(rdev, radeon_state, smc_state); 2676 + if (ret) 2677 + ni_pi->enable_sq_ramping = false; 2678 + 2679 + return ni_populate_smc_t(rdev, radeon_state, smc_state); 2680 + } 2681 + 2682 + static int ni_upload_sw_state(struct radeon_device *rdev) 2683 + { 2684 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2685 + struct radeon_ps *radeon_new_state = rdev->pm.dpm.requested_ps; 2686 + u16 address = pi->state_table_start + 2687 + offsetof(NISLANDS_SMC_STATETABLE, driverState); 2688 + u16 state_size = sizeof(NISLANDS_SMC_SWSTATE) + 2689 + ((NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1) * sizeof(NISLANDS_SMC_HW_PERFORMANCE_LEVEL)); 2690 + int ret; 2691 + NISLANDS_SMC_SWSTATE *smc_state = kzalloc(state_size, GFP_KERNEL); 2692 + 2693 + if (smc_state == NULL) 2694 + return -ENOMEM; 2695 + 2696 + ret = ni_convert_power_state_to_smc(rdev, radeon_new_state, smc_state); 2697 + if (ret) 2698 + goto done; 2699 + 2700 + ret = rv770_copy_bytes_to_smc(rdev, address, (u8 *)smc_state, state_size, pi->sram_end); 2701 + 2702 + done: 2703 + kfree(smc_state); 2704 + 2705 + return ret; 2706 + } 2707 + 2708 + static int ni_set_mc_special_registers(struct radeon_device *rdev, 2709 + struct ni_mc_reg_table *table) 2710 + { 2711 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2712 + u8 i, j, k; 2713 + u32 temp_reg; 2714 + 2715 + for (i = 0, j = table->last; i < table->last; i++) { 2716 + switch (table->mc_reg_address[i].s1) { 2717 + case MC_SEQ_MISC1 >> 2: 2718 + if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) 2719 + return -EINVAL; 2720 + temp_reg = RREG32(MC_PMG_CMD_EMRS); 2721 + table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; 2722 + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 2723 + for (k = 0; k < table->num_entries; k++) 2724 + table->mc_reg_table_entry[k].mc_data[j] = 2725 + ((temp_reg & 0xffff0000)) | 2726 + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 2727 + j++; 2728 + if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) 2729 + return -EINVAL; 2730 + 2731 + temp_reg = RREG32(MC_PMG_CMD_MRS); 2732 + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; 2733 + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; 2734 + for(k = 0; k < table->num_entries; k++) { 2735 + table->mc_reg_table_entry[k].mc_data[j] = 2736 + (temp_reg & 0xffff0000) | 2737 + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 2738 + if (!pi->mem_gddr5) 2739 + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 2740 + } 2741 + j++; 2742 + if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) 2743 + return -EINVAL; 2744 + break; 2745 + case MC_SEQ_RESERVE_M >> 2: 2746 + temp_reg = RREG32(MC_PMG_CMD_MRS1); 2747 + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; 2748 + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 2749 + for (k = 0; k < table->num_entries; k++) 2750 + table->mc_reg_table_entry[k].mc_data[j] = 2751 + (temp_reg & 0xffff0000) | 2752 + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 2753 + j++; 2754 + if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) 2755 + return -EINVAL; 2756 + break; 2757 + default: 2758 + break; 2759 + } 2760 + } 2761 + 2762 + table->last = j; 2763 + 2764 + return 0; 2765 + } 2766 + 2767 + static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 2768 + { 2769 + bool result = true; 2770 + 2771 + switch (in_reg) { 2772 + case MC_SEQ_RAS_TIMING >> 2: 2773 + *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; 2774 + break; 2775 + case MC_SEQ_CAS_TIMING >> 2: 2776 + *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; 2777 + break; 2778 + case MC_SEQ_MISC_TIMING >> 2: 2779 + *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; 2780 + break; 2781 + case MC_SEQ_MISC_TIMING2 >> 2: 2782 + *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; 2783 + break; 2784 + case MC_SEQ_RD_CTL_D0 >> 2: 2785 + *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; 2786 + break; 2787 + case MC_SEQ_RD_CTL_D1 >> 2: 2788 + *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; 2789 + break; 2790 + case MC_SEQ_WR_CTL_D0 >> 2: 2791 + *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; 2792 + break; 2793 + case MC_SEQ_WR_CTL_D1 >> 2: 2794 + *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; 2795 + break; 2796 + case MC_PMG_CMD_EMRS >> 2: 2797 + *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 2798 + break; 2799 + case MC_PMG_CMD_MRS >> 2: 2800 + *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; 2801 + break; 2802 + case MC_PMG_CMD_MRS1 >> 2: 2803 + *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 2804 + break; 2805 + case MC_SEQ_PMG_TIMING >> 2: 2806 + *out_reg = MC_SEQ_PMG_TIMING_LP >> 2; 2807 + break; 2808 + case MC_PMG_CMD_MRS2 >> 2: 2809 + *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; 2810 + break; 2811 + default: 2812 + result = false; 2813 + break; 2814 + } 2815 + 2816 + return result; 2817 + } 2818 + 2819 + static void ni_set_valid_flag(struct ni_mc_reg_table *table) 2820 + { 2821 + u8 i, j; 2822 + 2823 + for (i = 0; i < table->last; i++) { 2824 + for (j = 1; j < table->num_entries; j++) { 2825 + if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { 2826 + table->valid_flag |= 1 << i; 2827 + break; 2828 + } 2829 + } 2830 + } 2831 + } 2832 + 2833 + static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table) 2834 + { 2835 + u32 i; 2836 + u16 address; 2837 + 2838 + for (i = 0; i < table->last; i++) 2839 + table->mc_reg_address[i].s0 = 2840 + ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 2841 + address : table->mc_reg_address[i].s1; 2842 + } 2843 + 2844 + static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, 2845 + struct ni_mc_reg_table *ni_table) 2846 + { 2847 + u8 i, j; 2848 + 2849 + if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) 2850 + return -EINVAL; 2851 + if (table->num_entries > MAX_AC_TIMING_ENTRIES) 2852 + return -EINVAL; 2853 + 2854 + for (i = 0; i < table->last; i++) 2855 + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 2856 + ni_table->last = table->last; 2857 + 2858 + for (i = 0; i < table->num_entries; i++) { 2859 + ni_table->mc_reg_table_entry[i].mclk_max = 2860 + table->mc_reg_table_entry[i].mclk_max; 2861 + for (j = 0; j < table->last; j++) 2862 + ni_table->mc_reg_table_entry[i].mc_data[j] = 2863 + table->mc_reg_table_entry[i].mc_data[j]; 2864 + } 2865 + ni_table->num_entries = table->num_entries; 2866 + 2867 + return 0; 2868 + } 2869 + 2870 + static int ni_initialize_mc_reg_table(struct radeon_device *rdev) 2871 + { 2872 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2873 + int ret; 2874 + struct atom_mc_reg_table *table; 2875 + struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table; 2876 + u8 module_index = rv770_get_memory_module_index(rdev); 2877 + 2878 + table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 2879 + if (!table) 2880 + return -ENOMEM; 2881 + 2882 + WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); 2883 + WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); 2884 + WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); 2885 + WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); 2886 + WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); 2887 + WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); 2888 + WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); 2889 + WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); 2890 + WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); 2891 + WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); 2892 + WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); 2893 + WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); 2894 + WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); 2895 + 2896 + ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); 2897 + 2898 + if (ret) 2899 + goto init_mc_done; 2900 + 2901 + ret = ni_copy_vbios_mc_reg_table(table, ni_table); 2902 + 2903 + if (ret) 2904 + goto init_mc_done; 2905 + 2906 + ni_set_s0_mc_reg_index(ni_table); 2907 + 2908 + ret = ni_set_mc_special_registers(rdev, ni_table); 2909 + 2910 + if (ret) 2911 + goto init_mc_done; 2912 + 2913 + ni_set_valid_flag(ni_table); 2914 + 2915 + init_mc_done: 2916 + kfree(table); 2917 + 2918 + return ret; 2919 + } 2920 + 2921 + static void ni_populate_mc_reg_addresses(struct radeon_device *rdev, 2922 + SMC_NIslands_MCRegisters *mc_reg_table) 2923 + { 2924 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2925 + u32 i, j; 2926 + 2927 + for (i = 0, j = 0; j < ni_pi->mc_reg_table.last; j++) { 2928 + if (ni_pi->mc_reg_table.valid_flag & (1 << j)) { 2929 + if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) 2930 + break; 2931 + mc_reg_table->address[i].s0 = 2932 + cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s0); 2933 + mc_reg_table->address[i].s1 = 2934 + cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s1); 2935 + i++; 2936 + } 2937 + } 2938 + mc_reg_table->last = (u8)i; 2939 + } 2940 + 2941 + 2942 + static void ni_convert_mc_registers(struct ni_mc_reg_entry *entry, 2943 + SMC_NIslands_MCRegisterSet *data, 2944 + u32 num_entries, u32 valid_flag) 2945 + { 2946 + u32 i, j; 2947 + 2948 + for (i = 0, j = 0; j < num_entries; j++) { 2949 + if (valid_flag & (1 << j)) { 2950 + data->value[i] = cpu_to_be32(entry->mc_data[j]); 2951 + i++; 2952 + } 2953 + } 2954 + } 2955 + 2956 + static void ni_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, 2957 + struct rv7xx_pl *pl, 2958 + SMC_NIslands_MCRegisterSet *mc_reg_table_data) 2959 + { 2960 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2961 + u32 i = 0; 2962 + 2963 + for (i = 0; i < ni_pi->mc_reg_table.num_entries; i++) { 2964 + if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 2965 + break; 2966 + } 2967 + 2968 + if ((i == ni_pi->mc_reg_table.num_entries) && (i > 0)) 2969 + --i; 2970 + 2971 + ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[i], 2972 + mc_reg_table_data, 2973 + ni_pi->mc_reg_table.last, 2974 + ni_pi->mc_reg_table.valid_flag); 2975 + } 2976 + 2977 + static void ni_convert_mc_reg_table_to_smc(struct radeon_device *rdev, 2978 + struct radeon_ps *radeon_state, 2979 + SMC_NIslands_MCRegisters *mc_reg_table) 2980 + { 2981 + struct ni_ps *state = ni_get_ps(radeon_state); 2982 + int i; 2983 + 2984 + for (i = 0; i < state->performance_level_count; i++) { 2985 + ni_convert_mc_reg_table_entry_to_smc(rdev, 2986 + &state->performance_levels[i], 2987 + &mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]); 2988 + } 2989 + } 2990 + 2991 + static int ni_populate_mc_reg_table(struct radeon_device *rdev) 2992 + { 2993 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2994 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2995 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 2996 + struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; 2997 + struct ni_ps *boot_state = ni_get_ps(radeon_boot_state); 2998 + SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table; 2999 + 3000 + memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters)); 3001 + 3002 + rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_seq_index, 1); 3003 + 3004 + ni_populate_mc_reg_addresses(rdev, mc_reg_table); 3005 + 3006 + ni_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0], 3007 + &mc_reg_table->data[0]); 3008 + 3009 + ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[0], 3010 + &mc_reg_table->data[1], 3011 + ni_pi->mc_reg_table.last, 3012 + ni_pi->mc_reg_table.valid_flag); 3013 + 3014 + ni_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, mc_reg_table); 3015 + 3016 + return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start, 3017 + (u8 *)mc_reg_table, 3018 + sizeof(SMC_NIslands_MCRegisters), 3019 + pi->sram_end); 3020 + } 3021 + 3022 + static int ni_upload_mc_reg_table(struct radeon_device *rdev) 3023 + { 3024 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3025 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3026 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 3027 + struct radeon_ps *radeon_new_state = rdev->pm.dpm.requested_ps; 3028 + struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state); 3029 + SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table; 3030 + u16 address; 3031 + 3032 + memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters)); 3033 + 3034 + ni_convert_mc_reg_table_to_smc(rdev, radeon_new_state, mc_reg_table); 3035 + 3036 + address = eg_pi->mc_reg_table_start + 3037 + (u16)offsetof(SMC_NIslands_MCRegisters, data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]); 3038 + 3039 + return rv770_copy_bytes_to_smc(rdev, address, 3040 + (u8 *)&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], 3041 + sizeof(SMC_NIslands_MCRegisterSet) * ni_new_state->performance_level_count, 3042 + pi->sram_end); 3043 + } 3044 + 3045 + static int ni_init_driver_calculated_leakage_table(struct radeon_device *rdev, 3046 + PP_NIslands_CACTABLES *cac_tables) 3047 + { 3048 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 3049 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3050 + u32 leakage = 0; 3051 + unsigned int i, j, table_size; 3052 + s32 t; 3053 + u32 smc_leakage, max_leakage = 0; 3054 + u32 scaling_factor; 3055 + 3056 + table_size = eg_pi->vddc_voltage_table.count; 3057 + 3058 + if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size) 3059 + table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; 3060 + 3061 + scaling_factor = ni_get_smc_power_scaling_factor(rdev); 3062 + 3063 + for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) { 3064 + for (j = 0; j < table_size; j++) { 3065 + t = (1000 * ((i + 1) * 8)); 3066 + 3067 + if (t < ni_pi->cac_data.leakage_minimum_temperature) 3068 + t = ni_pi->cac_data.leakage_minimum_temperature; 3069 + 3070 + ni_calculate_leakage_for_v_and_t(rdev, 3071 + &ni_pi->cac_data.leakage_coefficients, 3072 + eg_pi->vddc_voltage_table.entries[j].value, 3073 + t, 3074 + ni_pi->cac_data.i_leakage, 3075 + &leakage); 3076 + 3077 + smc_leakage = ni_scale_power_for_smc(leakage, scaling_factor) / 1000; 3078 + if (smc_leakage > max_leakage) 3079 + max_leakage = smc_leakage; 3080 + 3081 + cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(smc_leakage); 3082 + } 3083 + } 3084 + 3085 + for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { 3086 + for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) 3087 + cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(max_leakage); 3088 + } 3089 + return 0; 3090 + } 3091 + 3092 + static int ni_init_simplified_leakage_table(struct radeon_device *rdev, 3093 + PP_NIslands_CACTABLES *cac_tables) 3094 + { 3095 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3096 + struct radeon_cac_leakage_table *leakage_table = 3097 + &rdev->pm.dpm.dyn_state.cac_leakage_table; 3098 + u32 i, j, table_size; 3099 + u32 smc_leakage, max_leakage = 0; 3100 + u32 scaling_factor; 3101 + 3102 + if (!leakage_table) 3103 + return -EINVAL; 3104 + 3105 + table_size = leakage_table->count; 3106 + 3107 + if (eg_pi->vddc_voltage_table.count != table_size) 3108 + table_size = (eg_pi->vddc_voltage_table.count < leakage_table->count) ? 3109 + eg_pi->vddc_voltage_table.count : leakage_table->count; 3110 + 3111 + if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size) 3112 + table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; 3113 + 3114 + if (table_size == 0) 3115 + return -EINVAL; 3116 + 3117 + scaling_factor = ni_get_smc_power_scaling_factor(rdev); 3118 + 3119 + for (j = 0; j < table_size; j++) { 3120 + smc_leakage = leakage_table->entries[j].leakage; 3121 + 3122 + if (smc_leakage > max_leakage) 3123 + max_leakage = smc_leakage; 3124 + 3125 + for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) 3126 + cac_tables->cac_lkge_lut[i][j] = 3127 + cpu_to_be32(ni_scale_power_for_smc(smc_leakage, scaling_factor)); 3128 + } 3129 + 3130 + for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { 3131 + for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) 3132 + cac_tables->cac_lkge_lut[i][j] = 3133 + cpu_to_be32(ni_scale_power_for_smc(max_leakage, scaling_factor)); 3134 + } 3135 + return 0; 3136 + } 3137 + 3138 + static int ni_initialize_smc_cac_tables(struct radeon_device *rdev) 3139 + { 3140 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3141 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 3142 + PP_NIslands_CACTABLES *cac_tables = NULL; 3143 + int i, ret; 3144 + u32 reg; 3145 + 3146 + if (ni_pi->enable_cac == false) 3147 + return 0; 3148 + 3149 + cac_tables = kzalloc(sizeof(PP_NIslands_CACTABLES), GFP_KERNEL); 3150 + if (!cac_tables) 3151 + return -ENOMEM; 3152 + 3153 + reg = RREG32(CG_CAC_CTRL) & ~(TID_CNT_MASK | TID_UNIT_MASK); 3154 + reg |= (TID_CNT(ni_pi->cac_weights->tid_cnt) | 3155 + TID_UNIT(ni_pi->cac_weights->tid_unit)); 3156 + WREG32(CG_CAC_CTRL, reg); 3157 + 3158 + for (i = 0; i < NISLANDS_DCCAC_MAX_LEVELS; i++) 3159 + ni_pi->dc_cac_table[i] = ni_pi->cac_weights->dc_cac[i]; 3160 + 3161 + for (i = 0; i < SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES; i++) 3162 + cac_tables->cac_bif_lut[i] = ni_pi->cac_weights->pcie_cac[i]; 3163 + 3164 + ni_pi->cac_data.i_leakage = rdev->pm.dpm.cac_leakage; 3165 + ni_pi->cac_data.pwr_const = 0; 3166 + ni_pi->cac_data.dc_cac_value = ni_pi->dc_cac_table[NISLANDS_DCCAC_LEVEL_0]; 3167 + ni_pi->cac_data.bif_cac_value = 0; 3168 + ni_pi->cac_data.mc_wr_weight = ni_pi->cac_weights->mc_write_weight; 3169 + ni_pi->cac_data.mc_rd_weight = ni_pi->cac_weights->mc_read_weight; 3170 + ni_pi->cac_data.allow_ovrflw = 0; 3171 + ni_pi->cac_data.l2num_win_tdp = ni_pi->lta_window_size; 3172 + ni_pi->cac_data.num_win_tdp = 0; 3173 + ni_pi->cac_data.lts_truncate_n = ni_pi->lts_truncate; 3174 + 3175 + if (ni_pi->driver_calculate_cac_leakage) 3176 + ret = ni_init_driver_calculated_leakage_table(rdev, cac_tables); 3177 + else 3178 + ret = ni_init_simplified_leakage_table(rdev, cac_tables); 3179 + 3180 + if (ret) 3181 + goto done_free; 3182 + 3183 + cac_tables->pwr_const = cpu_to_be32(ni_pi->cac_data.pwr_const); 3184 + cac_tables->dc_cacValue = cpu_to_be32(ni_pi->cac_data.dc_cac_value); 3185 + cac_tables->bif_cacValue = cpu_to_be32(ni_pi->cac_data.bif_cac_value); 3186 + cac_tables->AllowOvrflw = ni_pi->cac_data.allow_ovrflw; 3187 + cac_tables->MCWrWeight = ni_pi->cac_data.mc_wr_weight; 3188 + cac_tables->MCRdWeight = ni_pi->cac_data.mc_rd_weight; 3189 + cac_tables->numWin_TDP = ni_pi->cac_data.num_win_tdp; 3190 + cac_tables->l2numWin_TDP = ni_pi->cac_data.l2num_win_tdp; 3191 + cac_tables->lts_truncate_n = ni_pi->cac_data.lts_truncate_n; 3192 + 3193 + ret = rv770_copy_bytes_to_smc(rdev, ni_pi->cac_table_start, (u8 *)cac_tables, 3194 + sizeof(PP_NIslands_CACTABLES), pi->sram_end); 3195 + 3196 + done_free: 3197 + if (ret) { 3198 + ni_pi->enable_cac = false; 3199 + ni_pi->enable_power_containment = false; 3200 + } 3201 + 3202 + kfree(cac_tables); 3203 + 3204 + return 0; 3205 + } 3206 + 3207 + static int ni_initialize_hardware_cac_manager(struct radeon_device *rdev) 3208 + { 3209 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 3210 + u32 reg; 3211 + 3212 + if (!ni_pi->enable_cac || 3213 + !ni_pi->cac_configuration_required) 3214 + return 0; 3215 + 3216 + if (ni_pi->cac_weights == NULL) 3217 + return -EINVAL; 3218 + 3219 + reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_0) & ~(WEIGHT_TCP_SIG0_MASK | 3220 + WEIGHT_TCP_SIG1_MASK | 3221 + WEIGHT_TA_SIG_MASK); 3222 + reg |= (WEIGHT_TCP_SIG0(ni_pi->cac_weights->weight_tcp_sig0) | 3223 + WEIGHT_TCP_SIG1(ni_pi->cac_weights->weight_tcp_sig1) | 3224 + WEIGHT_TA_SIG(ni_pi->cac_weights->weight_ta_sig)); 3225 + WREG32_CG(CG_CAC_REGION_1_WEIGHT_0, reg); 3226 + 3227 + reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_1) & ~(WEIGHT_TCC_EN0_MASK | 3228 + WEIGHT_TCC_EN1_MASK | 3229 + WEIGHT_TCC_EN2_MASK); 3230 + reg |= (WEIGHT_TCC_EN0(ni_pi->cac_weights->weight_tcc_en0) | 3231 + WEIGHT_TCC_EN1(ni_pi->cac_weights->weight_tcc_en1) | 3232 + WEIGHT_TCC_EN2(ni_pi->cac_weights->weight_tcc_en2)); 3233 + WREG32_CG(CG_CAC_REGION_1_WEIGHT_1, reg); 3234 + 3235 + reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_0) & ~(WEIGHT_CB_EN0_MASK | 3236 + WEIGHT_CB_EN1_MASK | 3237 + WEIGHT_CB_EN2_MASK | 3238 + WEIGHT_CB_EN3_MASK); 3239 + reg |= (WEIGHT_CB_EN0(ni_pi->cac_weights->weight_cb_en0) | 3240 + WEIGHT_CB_EN1(ni_pi->cac_weights->weight_cb_en1) | 3241 + WEIGHT_CB_EN2(ni_pi->cac_weights->weight_cb_en2) | 3242 + WEIGHT_CB_EN3(ni_pi->cac_weights->weight_cb_en3)); 3243 + WREG32_CG(CG_CAC_REGION_2_WEIGHT_0, reg); 3244 + 3245 + reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_1) & ~(WEIGHT_DB_SIG0_MASK | 3246 + WEIGHT_DB_SIG1_MASK | 3247 + WEIGHT_DB_SIG2_MASK | 3248 + WEIGHT_DB_SIG3_MASK); 3249 + reg |= (WEIGHT_DB_SIG0(ni_pi->cac_weights->weight_db_sig0) | 3250 + WEIGHT_DB_SIG1(ni_pi->cac_weights->weight_db_sig1) | 3251 + WEIGHT_DB_SIG2(ni_pi->cac_weights->weight_db_sig2) | 3252 + WEIGHT_DB_SIG3(ni_pi->cac_weights->weight_db_sig3)); 3253 + WREG32_CG(CG_CAC_REGION_2_WEIGHT_1, reg); 3254 + 3255 + reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_2) & ~(WEIGHT_SXM_SIG0_MASK | 3256 + WEIGHT_SXM_SIG1_MASK | 3257 + WEIGHT_SXM_SIG2_MASK | 3258 + WEIGHT_SXS_SIG0_MASK | 3259 + WEIGHT_SXS_SIG1_MASK); 3260 + reg |= (WEIGHT_SXM_SIG0(ni_pi->cac_weights->weight_sxm_sig0) | 3261 + WEIGHT_SXM_SIG1(ni_pi->cac_weights->weight_sxm_sig1) | 3262 + WEIGHT_SXM_SIG2(ni_pi->cac_weights->weight_sxm_sig2) | 3263 + WEIGHT_SXS_SIG0(ni_pi->cac_weights->weight_sxs_sig0) | 3264 + WEIGHT_SXS_SIG1(ni_pi->cac_weights->weight_sxs_sig1)); 3265 + WREG32_CG(CG_CAC_REGION_2_WEIGHT_2, reg); 3266 + 3267 + reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_0) & ~(WEIGHT_XBR_0_MASK | 3268 + WEIGHT_XBR_1_MASK | 3269 + WEIGHT_XBR_2_MASK | 3270 + WEIGHT_SPI_SIG0_MASK); 3271 + reg |= (WEIGHT_XBR_0(ni_pi->cac_weights->weight_xbr_0) | 3272 + WEIGHT_XBR_1(ni_pi->cac_weights->weight_xbr_1) | 3273 + WEIGHT_XBR_2(ni_pi->cac_weights->weight_xbr_2) | 3274 + WEIGHT_SPI_SIG0(ni_pi->cac_weights->weight_spi_sig0)); 3275 + WREG32_CG(CG_CAC_REGION_3_WEIGHT_0, reg); 3276 + 3277 + reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_1) & ~(WEIGHT_SPI_SIG1_MASK | 3278 + WEIGHT_SPI_SIG2_MASK | 3279 + WEIGHT_SPI_SIG3_MASK | 3280 + WEIGHT_SPI_SIG4_MASK | 3281 + WEIGHT_SPI_SIG5_MASK); 3282 + reg |= (WEIGHT_SPI_SIG1(ni_pi->cac_weights->weight_spi_sig1) | 3283 + WEIGHT_SPI_SIG2(ni_pi->cac_weights->weight_spi_sig2) | 3284 + WEIGHT_SPI_SIG3(ni_pi->cac_weights->weight_spi_sig3) | 3285 + WEIGHT_SPI_SIG4(ni_pi->cac_weights->weight_spi_sig4) | 3286 + WEIGHT_SPI_SIG5(ni_pi->cac_weights->weight_spi_sig5)); 3287 + WREG32_CG(CG_CAC_REGION_3_WEIGHT_1, reg); 3288 + 3289 + reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_0) & ~(WEIGHT_LDS_SIG0_MASK | 3290 + WEIGHT_LDS_SIG1_MASK | 3291 + WEIGHT_SC_MASK); 3292 + reg |= (WEIGHT_LDS_SIG0(ni_pi->cac_weights->weight_lds_sig0) | 3293 + WEIGHT_LDS_SIG1(ni_pi->cac_weights->weight_lds_sig1) | 3294 + WEIGHT_SC(ni_pi->cac_weights->weight_sc)); 3295 + WREG32_CG(CG_CAC_REGION_4_WEIGHT_0, reg); 3296 + 3297 + reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_1) & ~(WEIGHT_BIF_MASK | 3298 + WEIGHT_CP_MASK | 3299 + WEIGHT_PA_SIG0_MASK | 3300 + WEIGHT_PA_SIG1_MASK | 3301 + WEIGHT_VGT_SIG0_MASK); 3302 + reg |= (WEIGHT_BIF(ni_pi->cac_weights->weight_bif) | 3303 + WEIGHT_CP(ni_pi->cac_weights->weight_cp) | 3304 + WEIGHT_PA_SIG0(ni_pi->cac_weights->weight_pa_sig0) | 3305 + WEIGHT_PA_SIG1(ni_pi->cac_weights->weight_pa_sig1) | 3306 + WEIGHT_VGT_SIG0(ni_pi->cac_weights->weight_vgt_sig0)); 3307 + WREG32_CG(CG_CAC_REGION_4_WEIGHT_1, reg); 3308 + 3309 + reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_2) & ~(WEIGHT_VGT_SIG1_MASK | 3310 + WEIGHT_VGT_SIG2_MASK | 3311 + WEIGHT_DC_SIG0_MASK | 3312 + WEIGHT_DC_SIG1_MASK | 3313 + WEIGHT_DC_SIG2_MASK); 3314 + reg |= (WEIGHT_VGT_SIG1(ni_pi->cac_weights->weight_vgt_sig1) | 3315 + WEIGHT_VGT_SIG2(ni_pi->cac_weights->weight_vgt_sig2) | 3316 + WEIGHT_DC_SIG0(ni_pi->cac_weights->weight_dc_sig0) | 3317 + WEIGHT_DC_SIG1(ni_pi->cac_weights->weight_dc_sig1) | 3318 + WEIGHT_DC_SIG2(ni_pi->cac_weights->weight_dc_sig2)); 3319 + WREG32_CG(CG_CAC_REGION_4_WEIGHT_2, reg); 3320 + 3321 + reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_3) & ~(WEIGHT_DC_SIG3_MASK | 3322 + WEIGHT_UVD_SIG0_MASK | 3323 + WEIGHT_UVD_SIG1_MASK | 3324 + WEIGHT_SPARE0_MASK | 3325 + WEIGHT_SPARE1_MASK); 3326 + reg |= (WEIGHT_DC_SIG3(ni_pi->cac_weights->weight_dc_sig3) | 3327 + WEIGHT_UVD_SIG0(ni_pi->cac_weights->weight_uvd_sig0) | 3328 + WEIGHT_UVD_SIG1(ni_pi->cac_weights->weight_uvd_sig1) | 3329 + WEIGHT_SPARE0(ni_pi->cac_weights->weight_spare0) | 3330 + WEIGHT_SPARE1(ni_pi->cac_weights->weight_spare1)); 3331 + WREG32_CG(CG_CAC_REGION_4_WEIGHT_3, reg); 3332 + 3333 + reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_0) & ~(WEIGHT_SQ_VSP_MASK | 3334 + WEIGHT_SQ_VSP0_MASK); 3335 + reg |= (WEIGHT_SQ_VSP(ni_pi->cac_weights->weight_sq_vsp) | 3336 + WEIGHT_SQ_VSP0(ni_pi->cac_weights->weight_sq_vsp0)); 3337 + WREG32_CG(CG_CAC_REGION_5_WEIGHT_0, reg); 3338 + 3339 + reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_1) & ~(WEIGHT_SQ_GPR_MASK); 3340 + reg |= WEIGHT_SQ_GPR(ni_pi->cac_weights->weight_sq_gpr); 3341 + WREG32_CG(CG_CAC_REGION_5_WEIGHT_1, reg); 3342 + 3343 + reg = RREG32_CG(CG_CAC_REGION_4_OVERRIDE_4) & ~(OVR_MODE_SPARE_0_MASK | 3344 + OVR_VAL_SPARE_0_MASK | 3345 + OVR_MODE_SPARE_1_MASK | 3346 + OVR_VAL_SPARE_1_MASK); 3347 + reg |= (OVR_MODE_SPARE_0(ni_pi->cac_weights->ovr_mode_spare_0) | 3348 + OVR_VAL_SPARE_0(ni_pi->cac_weights->ovr_val_spare_0) | 3349 + OVR_MODE_SPARE_1(ni_pi->cac_weights->ovr_mode_spare_1) | 3350 + OVR_VAL_SPARE_1(ni_pi->cac_weights->ovr_val_spare_1)); 3351 + WREG32_CG(CG_CAC_REGION_4_OVERRIDE_4, reg); 3352 + 3353 + reg = RREG32(SQ_CAC_THRESHOLD) & ~(VSP_MASK | 3354 + VSP0_MASK | 3355 + GPR_MASK); 3356 + reg |= (VSP(ni_pi->cac_weights->vsp) | 3357 + VSP0(ni_pi->cac_weights->vsp0) | 3358 + GPR(ni_pi->cac_weights->gpr)); 3359 + WREG32(SQ_CAC_THRESHOLD, reg); 3360 + 3361 + reg = (MCDW_WR_ENABLE | 3362 + MCDX_WR_ENABLE | 3363 + MCDY_WR_ENABLE | 3364 + MCDZ_WR_ENABLE | 3365 + INDEX(0x09D4)); 3366 + WREG32(MC_CG_CONFIG, reg); 3367 + 3368 + reg = (READ_WEIGHT(ni_pi->cac_weights->mc_read_weight) | 3369 + WRITE_WEIGHT(ni_pi->cac_weights->mc_write_weight) | 3370 + ALLOW_OVERFLOW); 3371 + WREG32(MC_CG_DATAPORT, reg); 3372 + 3373 + return 0; 3374 + } 3375 + 3376 + static int ni_enable_smc_cac(struct radeon_device *rdev, bool enable) 3377 + { 3378 + struct ni_power_info *ni_pi = ni_get_pi(rdev); 3379 + int ret = 0; 3380 + PPSMC_Result smc_result; 3381 + 3382 + if (ni_pi->enable_cac) { 3383 + if (enable) { 3384 + struct radeon_ps *radeon_new_state = rdev->pm.dpm.requested_ps; 3385 + 3386 + if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) { 3387 + smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_CollectCAC_PowerCorreln); 3388 + 3389 + if (ni_pi->support_cac_long_term_average) { 3390 + smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable); 3391 + if (PPSMC_Result_OK != smc_result) 3392 + ni_pi->support_cac_long_term_average = false; 3393 + } 3394 + 3395 + smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac); 3396 + if (PPSMC_Result_OK != smc_result) 3397 + ret = -EINVAL; 3398 + 3399 + ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false; 3400 + } 3401 + } else if (ni_pi->cac_enabled) { 3402 + smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); 3403 + 3404 + ni_pi->cac_enabled = false; 3405 + 3406 + if (ni_pi->support_cac_long_term_average) { 3407 + smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable); 3408 + if (PPSMC_Result_OK != smc_result) 3409 + ni_pi->support_cac_long_term_average = false; 3410 + } 3411 + } 3412 + } 3413 + 3414 + return ret; 3415 + } 3416 + 3417 + static int ni_pcie_performance_request(struct radeon_device *rdev, 3418 + u8 perf_req, bool advertise) 3419 + { 3420 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3421 + 3422 + #if defined(CONFIG_ACPI) 3423 + if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) || 3424 + (perf_req == PCIE_PERF_REQ_PECI_GEN2)) { 3425 + if (eg_pi->pcie_performance_request_registered == false) 3426 + radeon_acpi_pcie_notify_device_ready(rdev); 3427 + eg_pi->pcie_performance_request_registered = true; 3428 + return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise); 3429 + } else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) && 3430 + eg_pi->pcie_performance_request_registered) { 3431 + eg_pi->pcie_performance_request_registered = false; 3432 + return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise); 3433 + } 3434 + #endif 3435 + return 0; 3436 + } 3437 + 3438 + static int ni_advertise_gen2_capability(struct radeon_device *rdev) 3439 + { 3440 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3441 + u32 tmp; 3442 + 3443 + tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 3444 + 3445 + if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) && 3446 + (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) 3447 + pi->pcie_gen2 = true; 3448 + else 3449 + pi->pcie_gen2 = false; 3450 + 3451 + if (!pi->pcie_gen2) 3452 + ni_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true); 3453 + 3454 + return 0; 3455 + } 3456 + 3457 + static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev, 3458 + bool enable) 3459 + { 3460 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3461 + u32 tmp, bif; 3462 + 3463 + tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 3464 + 3465 + if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) && 3466 + (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 3467 + if (enable) { 3468 + if (!pi->boot_in_gen2) { 3469 + bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK; 3470 + bif |= CG_CLIENT_REQ(0xd); 3471 + WREG32(CG_BIF_REQ_AND_RSP, bif); 3472 + } 3473 + tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; 3474 + tmp |= LC_HW_VOLTAGE_IF_CONTROL(1); 3475 + tmp |= LC_GEN2_EN_STRAP; 3476 + 3477 + tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT; 3478 + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); 3479 + udelay(10); 3480 + tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 3481 + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); 3482 + } else { 3483 + if (!pi->boot_in_gen2) { 3484 + bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK; 3485 + bif |= CG_CLIENT_REQ(0xd); 3486 + WREG32(CG_BIF_REQ_AND_RSP, bif); 3487 + 3488 + tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; 3489 + tmp &= ~LC_GEN2_EN_STRAP; 3490 + } 3491 + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); 3492 + } 3493 + } 3494 + } 3495 + 3496 + static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev, 3497 + bool enable) 3498 + { 3499 + ni_enable_bif_dynamic_pcie_gen2(rdev, enable); 3500 + 3501 + if (enable) 3502 + WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); 3503 + else 3504 + WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); 3505 + } 3506 + 3507 + void ni_dpm_setup_asic(struct radeon_device *rdev) 3508 + { 3509 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3510 + 3511 + ni_read_clock_registers(rdev); 3512 + btc_read_arb_registers(rdev); 3513 + rv770_get_memory_type(rdev); 3514 + if (eg_pi->pcie_performance_request) 3515 + ni_advertise_gen2_capability(rdev); 3516 + rv770_get_pcie_gen2_status(rdev); 3517 + rv770_enable_acpi_pm(rdev); 3518 + } 3519 + 3520 + int ni_dpm_enable(struct radeon_device *rdev) 3521 + { 3522 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3523 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3524 + 3525 + if (pi->gfx_clock_gating) 3526 + ni_cg_clockgating_default(rdev); 3527 + if (btc_dpm_enabled(rdev)) 3528 + return -EINVAL; 3529 + if (pi->mg_clock_gating) 3530 + ni_mg_clockgating_default(rdev); 3531 + if (eg_pi->ls_clock_gating) 3532 + ni_ls_clockgating_default(rdev); 3533 + if (pi->voltage_control) { 3534 + rv770_enable_voltage_control(rdev, true); 3535 + cypress_construct_voltage_tables(rdev); 3536 + } 3537 + if (eg_pi->dynamic_ac_timing) 3538 + ni_initialize_mc_reg_table(rdev); 3539 + if (pi->dynamic_ss) 3540 + cypress_enable_spread_spectrum(rdev, true); 3541 + if (pi->thermal_protection) 3542 + rv770_enable_thermal_protection(rdev, true); 3543 + rv770_setup_bsp(rdev); 3544 + rv770_program_git(rdev); 3545 + rv770_program_tp(rdev); 3546 + rv770_program_tpp(rdev); 3547 + rv770_program_sstp(rdev); 3548 + cypress_enable_display_gap(rdev); 3549 + rv770_program_vc(rdev); 3550 + if (pi->dynamic_pcie_gen2) 3551 + ni_enable_dynamic_pcie_gen2(rdev, true); 3552 + if (rv770_upload_firmware(rdev)) 3553 + return -EINVAL; 3554 + ni_process_firmware_header(rdev); 3555 + ni_initial_switch_from_arb_f0_to_f1(rdev); 3556 + ni_init_smc_table(rdev); 3557 + ni_init_smc_spll_table(rdev); 3558 + ni_init_arb_table_index(rdev); 3559 + if (eg_pi->dynamic_ac_timing) 3560 + ni_populate_mc_reg_table(rdev); 3561 + ni_initialize_smc_cac_tables(rdev); 3562 + ni_initialize_hardware_cac_manager(rdev); 3563 + ni_populate_smc_tdp_limits(rdev); 3564 + ni_program_response_times(rdev); 3565 + r7xx_start_smc(rdev); 3566 + cypress_notify_smc_display_change(rdev, false); 3567 + cypress_enable_sclk_control(rdev, true); 3568 + if (eg_pi->memory_transition) 3569 + cypress_enable_mclk_control(rdev, true); 3570 + cypress_start_dpm(rdev); 3571 + if (pi->gfx_clock_gating) 3572 + ni_gfx_clockgating_enable(rdev, true); 3573 + if (pi->mg_clock_gating) 3574 + ni_mg_clockgating_enable(rdev, true); 3575 + if (eg_pi->ls_clock_gating) 3576 + ni_ls_clockgating_enable(rdev, true); 3577 + 3578 + if (rdev->irq.installed && 3579 + r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 3580 + PPSMC_Result result; 3581 + 3582 + rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000); 3583 + rdev->irq.dpm_thermal = true; 3584 + radeon_irq_set(rdev); 3585 + result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); 3586 + 3587 + if (result != PPSMC_Result_OK) 3588 + DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 3589 + } 3590 + 3591 + rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 3592 + 3593 + return 0; 3594 + } 3595 + 3596 + void ni_dpm_disable(struct radeon_device *rdev) 3597 + { 3598 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3599 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3600 + 3601 + if (!btc_dpm_enabled(rdev)) 3602 + return; 3603 + rv770_clear_vc(rdev); 3604 + if (pi->thermal_protection) 3605 + rv770_enable_thermal_protection(rdev, false); 3606 + ni_enable_power_containment(rdev, false); 3607 + ni_enable_smc_cac(rdev, false); 3608 + cypress_enable_spread_spectrum(rdev, false); 3609 + rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 3610 + if (pi->dynamic_pcie_gen2) 3611 + ni_enable_dynamic_pcie_gen2(rdev, false); 3612 + 3613 + if (rdev->irq.installed && 3614 + r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 3615 + rdev->irq.dpm_thermal = false; 3616 + radeon_irq_set(rdev); 3617 + } 3618 + 3619 + if (pi->gfx_clock_gating) 3620 + ni_gfx_clockgating_enable(rdev, false); 3621 + if (pi->mg_clock_gating) 3622 + ni_mg_clockgating_enable(rdev, false); 3623 + if (eg_pi->ls_clock_gating) 3624 + ni_ls_clockgating_enable(rdev, false); 3625 + ni_stop_dpm(rdev); 3626 + btc_reset_to_default(rdev); 3627 + ni_stop_smc(rdev); 3628 + ni_force_switch_to_arb_f0(rdev); 3629 + } 3630 + 3631 + int ni_power_control_set_level(struct radeon_device *rdev) 3632 + { 3633 + ni_restrict_performance_levels_before_switch(rdev); 3634 + rv770_halt_smc(rdev); 3635 + ni_populate_smc_tdp_limits(rdev); 3636 + rv770_resume_smc(rdev); 3637 + rv770_set_sw_state(rdev); 3638 + 3639 + return 0; 3640 + } 3641 + 3642 + int ni_dpm_set_power_state(struct radeon_device *rdev) 3643 + { 3644 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3645 + int ret; 3646 + 3647 + ni_apply_state_adjust_rules(rdev); 3648 + 3649 + ni_restrict_performance_levels_before_switch(rdev); 3650 + ni_enable_power_containment(rdev, false); 3651 + ni_enable_smc_cac(rdev, false); 3652 + rv770_halt_smc(rdev); 3653 + if (eg_pi->smu_uvd_hs) 3654 + btc_notify_uvd_to_smc(rdev); 3655 + ni_upload_sw_state(rdev); 3656 + if (eg_pi->dynamic_ac_timing) 3657 + ni_upload_mc_reg_table(rdev); 3658 + ret = ni_program_memory_timing_parameters(rdev); 3659 + if (ret) 3660 + return ret; 3661 + ni_populate_smc_tdp_limits(rdev); 3662 + rv770_resume_smc(rdev); 3663 + rv770_set_sw_state(rdev); 3664 + ni_enable_smc_cac(rdev, true); 3665 + ni_enable_power_containment(rdev, true); 3666 + 3667 + #if 0 3668 + /* XXX */ 3669 + ni_unrestrict_performance_levels_after_switch(rdev); 3670 + #endif 3671 + 3672 + return 0; 3673 + } 3674 + 3675 + void ni_dpm_reset_asic(struct radeon_device *rdev) 3676 + { 3677 + ni_restrict_performance_levels_before_switch(rdev); 3678 + rv770_set_boot_state(rdev); 3679 + } 3680 + 3681 + union power_info { 3682 + struct _ATOM_POWERPLAY_INFO info; 3683 + struct _ATOM_POWERPLAY_INFO_V2 info_2; 3684 + struct _ATOM_POWERPLAY_INFO_V3 info_3; 3685 + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 3686 + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 3687 + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 3688 + }; 3689 + 3690 + union pplib_clock_info { 3691 + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 3692 + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 3693 + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 3694 + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 3695 + }; 3696 + 3697 + union pplib_power_state { 3698 + struct _ATOM_PPLIB_STATE v1; 3699 + struct _ATOM_PPLIB_STATE_V2 v2; 3700 + }; 3701 + 3702 + static void ni_parse_pplib_non_clock_info(struct radeon_device *rdev, 3703 + struct radeon_ps *rps, 3704 + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 3705 + u8 table_rev) 3706 + { 3707 + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 3708 + rps->class = le16_to_cpu(non_clock_info->usClassification); 3709 + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 3710 + 3711 + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 3712 + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 3713 + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 3714 + } else if (r600_is_uvd_state(rps->class, rps->class2)) { 3715 + rps->vclk = RV770_DEFAULT_VCLK_FREQ; 3716 + rps->dclk = RV770_DEFAULT_DCLK_FREQ; 3717 + } else { 3718 + rps->vclk = 0; 3719 + rps->dclk = 0; 3720 + } 3721 + 3722 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 3723 + rdev->pm.dpm.boot_ps = rps; 3724 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 3725 + rdev->pm.dpm.uvd_ps = rps; 3726 + } 3727 + 3728 + static void ni_parse_pplib_clock_info(struct radeon_device *rdev, 3729 + struct radeon_ps *rps, int index, 3730 + union pplib_clock_info *clock_info) 3731 + { 3732 + struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3733 + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3734 + struct ni_ps *ps = ni_get_ps(rps); 3735 + u16 vddc; 3736 + struct rv7xx_pl *pl = &ps->performance_levels[index]; 3737 + 3738 + ps->performance_level_count = index + 1; 3739 + 3740 + pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); 3741 + pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16; 3742 + pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); 3743 + pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; 3744 + 3745 + pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC); 3746 + pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); 3747 + pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags); 3748 + 3749 + /* patch up vddc if necessary */ 3750 + if (pl->vddc == 0xff01) { 3751 + if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) 3752 + pl->vddc = vddc; 3753 + } 3754 + 3755 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 3756 + pi->acpi_vddc = pl->vddc; 3757 + eg_pi->acpi_vddci = pl->vddci; 3758 + if (ps->performance_levels[0].flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) 3759 + pi->acpi_pcie_gen2 = true; 3760 + else 3761 + pi->acpi_pcie_gen2 = false; 3762 + } 3763 + 3764 + if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 3765 + eg_pi->ulv.supported = true; 3766 + eg_pi->ulv.pl = pl; 3767 + } 3768 + 3769 + if (pi->min_vddc_in_table > pl->vddc) 3770 + pi->min_vddc_in_table = pl->vddc; 3771 + 3772 + if (pi->max_vddc_in_table < pl->vddc) 3773 + pi->max_vddc_in_table = pl->vddc; 3774 + 3775 + /* patch up boot state */ 3776 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 3777 + u16 vddc, vddci; 3778 + radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); 3779 + pl->mclk = rdev->clock.default_mclk; 3780 + pl->sclk = rdev->clock.default_sclk; 3781 + pl->vddc = vddc; 3782 + pl->vddci = vddci; 3783 + } 3784 + 3785 + if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 3786 + ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 3787 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; 3788 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; 3789 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; 3790 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; 3791 + } 3792 + } 3793 + 3794 + static int ni_parse_power_table(struct radeon_device *rdev) 3795 + { 3796 + struct radeon_mode_info *mode_info = &rdev->mode_info; 3797 + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 3798 + union pplib_power_state *power_state; 3799 + int i, j; 3800 + union pplib_clock_info *clock_info; 3801 + union power_info *power_info; 3802 + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 3803 + u16 data_offset; 3804 + u8 frev, crev; 3805 + struct ni_ps *ps; 3806 + 3807 + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 3808 + &frev, &crev, &data_offset)) 3809 + return -EINVAL; 3810 + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 3811 + 3812 + rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 3813 + power_info->pplib.ucNumStates, GFP_KERNEL); 3814 + if (!rdev->pm.dpm.ps) 3815 + return -ENOMEM; 3816 + rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 3817 + rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 3818 + rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 3819 + 3820 + for (i = 0; i < power_info->pplib.ucNumStates; i++) { 3821 + power_state = (union pplib_power_state *) 3822 + (mode_info->atom_context->bios + data_offset + 3823 + le16_to_cpu(power_info->pplib.usStateArrayOffset) + 3824 + i * power_info->pplib.ucStateEntrySize); 3825 + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 3826 + (mode_info->atom_context->bios + data_offset + 3827 + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + 3828 + (power_state->v1.ucNonClockStateIndex * 3829 + power_info->pplib.ucNonClockSize)); 3830 + if (power_info->pplib.ucStateEntrySize - 1) { 3831 + ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL); 3832 + if (ps == NULL) { 3833 + kfree(rdev->pm.dpm.ps); 3834 + return -ENOMEM; 3835 + } 3836 + rdev->pm.dpm.ps[i].ps_priv = ps; 3837 + ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 3838 + non_clock_info, 3839 + power_info->pplib.ucNonClockSize); 3840 + for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 3841 + clock_info = (union pplib_clock_info *) 3842 + (mode_info->atom_context->bios + data_offset + 3843 + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 3844 + (power_state->v1.ucClockStateIndices[j] * 3845 + power_info->pplib.ucClockInfoSize)); 3846 + ni_parse_pplib_clock_info(rdev, 3847 + &rdev->pm.dpm.ps[i], j, 3848 + clock_info); 3849 + } 3850 + } 3851 + } 3852 + rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates; 3853 + return 0; 3854 + } 3855 + 3856 + int ni_dpm_init(struct radeon_device *rdev) 3857 + { 3858 + struct rv7xx_power_info *pi; 3859 + struct evergreen_power_info *eg_pi; 3860 + struct ni_power_info *ni_pi; 3861 + int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 3862 + u16 data_offset, size; 3863 + u8 frev, crev; 3864 + struct atom_clock_dividers dividers; 3865 + int ret; 3866 + 3867 + ni_pi = kzalloc(sizeof(struct ni_power_info), GFP_KERNEL); 3868 + if (ni_pi == NULL) 3869 + return -ENOMEM; 3870 + rdev->pm.dpm.priv = ni_pi; 3871 + eg_pi = &ni_pi->eg; 3872 + pi = &eg_pi->rv7xx; 3873 + 3874 + rv770_get_max_vddc(rdev); 3875 + 3876 + eg_pi->ulv.supported = false; 3877 + pi->acpi_vddc = 0; 3878 + eg_pi->acpi_vddci = 0; 3879 + pi->min_vddc_in_table = 0; 3880 + pi->max_vddc_in_table = 0; 3881 + 3882 + ret = ni_parse_power_table(rdev); 3883 + if (ret) 3884 + return ret; 3885 + ret = r600_parse_extended_power_table(rdev); 3886 + if (ret) 3887 + return ret; 3888 + 3889 + ni_patch_dependency_tables_based_on_leakage(rdev); 3890 + 3891 + if (rdev->pm.dpm.voltage_response_time == 0) 3892 + rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; 3893 + if (rdev->pm.dpm.backbias_response_time == 0) 3894 + rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; 3895 + 3896 + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 3897 + 0, false, &dividers); 3898 + if (ret) 3899 + pi->ref_div = dividers.ref_div + 1; 3900 + else 3901 + pi->ref_div = R600_REFERENCEDIVIDER_DFLT; 3902 + 3903 + pi->rlp = RV770_RLP_DFLT; 3904 + pi->rmp = RV770_RMP_DFLT; 3905 + pi->lhp = RV770_LHP_DFLT; 3906 + pi->lmp = RV770_LMP_DFLT; 3907 + 3908 + eg_pi->ats[0].rlp = RV770_RLP_DFLT; 3909 + eg_pi->ats[0].rmp = RV770_RMP_DFLT; 3910 + eg_pi->ats[0].lhp = RV770_LHP_DFLT; 3911 + eg_pi->ats[0].lmp = RV770_LMP_DFLT; 3912 + 3913 + eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT; 3914 + eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT; 3915 + eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT; 3916 + eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT; 3917 + 3918 + eg_pi->smu_uvd_hs = true; 3919 + 3920 + if (rdev->pdev->device == 0x6707) { 3921 + pi->mclk_strobe_mode_threshold = 55000; 3922 + pi->mclk_edc_enable_threshold = 55000; 3923 + eg_pi->mclk_edc_wr_enable_threshold = 55000; 3924 + } else { 3925 + pi->mclk_strobe_mode_threshold = 40000; 3926 + pi->mclk_edc_enable_threshold = 40000; 3927 + eg_pi->mclk_edc_wr_enable_threshold = 40000; 3928 + } 3929 + ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; 3930 + 3931 + pi->voltage_control = 3932 + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC); 3933 + 3934 + pi->mvdd_control = 3935 + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC); 3936 + 3937 + eg_pi->vddci_control = 3938 + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI); 3939 + 3940 + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 3941 + &frev, &crev, &data_offset)) { 3942 + pi->sclk_ss = true; 3943 + pi->mclk_ss = true; 3944 + pi->dynamic_ss = true; 3945 + } else { 3946 + pi->sclk_ss = false; 3947 + pi->mclk_ss = false; 3948 + pi->dynamic_ss = true; 3949 + } 3950 + 3951 + pi->asi = RV770_ASI_DFLT; 3952 + pi->pasi = CYPRESS_HASI_DFLT; 3953 + pi->vrc = CYPRESS_VRC_DFLT; 3954 + 3955 + pi->power_gating = false; 3956 + 3957 + pi->gfx_clock_gating = true; 3958 + 3959 + pi->mg_clock_gating = true; 3960 + pi->mgcgtssm = true; 3961 + eg_pi->ls_clock_gating = false; 3962 + eg_pi->sclk_deep_sleep = false; 3963 + 3964 + pi->dynamic_pcie_gen2 = true; 3965 + 3966 + if (pi->gfx_clock_gating && 3967 + (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) 3968 + pi->thermal_protection = true; 3969 + else 3970 + pi->thermal_protection = false; 3971 + 3972 + pi->display_gap = true; 3973 + 3974 + pi->dcodt = true; 3975 + 3976 + pi->ulps = true; 3977 + 3978 + eg_pi->dynamic_ac_timing = true; 3979 + eg_pi->abm = true; 3980 + eg_pi->mcls = true; 3981 + eg_pi->light_sleep = true; 3982 + eg_pi->memory_transition = true; 3983 + #if defined(CONFIG_ACPI) 3984 + eg_pi->pcie_performance_request = 3985 + radeon_acpi_is_pcie_performance_request_supported(rdev); 3986 + #else 3987 + eg_pi->pcie_performance_request = false; 3988 + #endif 3989 + 3990 + eg_pi->dll_default_on = false; 3991 + 3992 + eg_pi->sclk_deep_sleep = false; 3993 + 3994 + pi->mclk_stutter_mode_threshold = 0; 3995 + 3996 + pi->sram_end = SMC_RAM_END; 3997 + 3998 + rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 3; 3999 + rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 4000 + rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900; 4001 + rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk); 4002 + rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk; 4003 + rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 4004 + rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 4005 + rdev->pm.dpm.dyn_state.sclk_mclk_delta = 12500; 4006 + 4007 + ni_pi->cac_data.leakage_coefficients.at = 516; 4008 + ni_pi->cac_data.leakage_coefficients.bt = 18; 4009 + ni_pi->cac_data.leakage_coefficients.av = 51; 4010 + ni_pi->cac_data.leakage_coefficients.bv = 2957; 4011 + 4012 + switch (rdev->pdev->device) { 4013 + case 0x6700: 4014 + case 0x6701: 4015 + case 0x6702: 4016 + case 0x6703: 4017 + case 0x6718: 4018 + ni_pi->cac_weights = &cac_weights_cayman_xt; 4019 + break; 4020 + case 0x6705: 4021 + case 0x6719: 4022 + case 0x671D: 4023 + case 0x671C: 4024 + default: 4025 + ni_pi->cac_weights = &cac_weights_cayman_pro; 4026 + break; 4027 + case 0x6704: 4028 + case 0x6706: 4029 + case 0x6707: 4030 + case 0x6708: 4031 + case 0x6709: 4032 + ni_pi->cac_weights = &cac_weights_cayman_le; 4033 + break; 4034 + } 4035 + 4036 + if (ni_pi->cac_weights->enable_power_containment_by_default) { 4037 + ni_pi->enable_power_containment = true; 4038 + ni_pi->enable_cac = true; 4039 + ni_pi->enable_sq_ramping = true; 4040 + } else { 4041 + ni_pi->enable_power_containment = false; 4042 + ni_pi->enable_cac = false; 4043 + ni_pi->enable_sq_ramping = false; 4044 + } 4045 + 4046 + ni_pi->driver_calculate_cac_leakage = false; 4047 + ni_pi->cac_configuration_required = true; 4048 + 4049 + if (ni_pi->cac_configuration_required) { 4050 + ni_pi->support_cac_long_term_average = true; 4051 + ni_pi->lta_window_size = ni_pi->cac_weights->l2_lta_window_size; 4052 + ni_pi->lts_truncate = ni_pi->cac_weights->lts_truncate; 4053 + } else { 4054 + ni_pi->support_cac_long_term_average = false; 4055 + ni_pi->lta_window_size = 0; 4056 + ni_pi->lts_truncate = 0; 4057 + } 4058 + 4059 + ni_pi->use_power_boost_limit = true; 4060 + 4061 + return 0; 4062 + } 4063 + 4064 + void ni_dpm_fini(struct radeon_device *rdev) 4065 + { 4066 + int i; 4067 + 4068 + for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 4069 + kfree(rdev->pm.dpm.ps[i].ps_priv); 4070 + } 4071 + kfree(rdev->pm.dpm.ps); 4072 + kfree(rdev->pm.dpm.priv); 4073 + r600_free_extended_power_table(rdev); 4074 + } 4075 + 4076 + void ni_dpm_print_power_state(struct radeon_device *rdev, 4077 + struct radeon_ps *rps) 4078 + { 4079 + struct ni_ps *ps = ni_get_ps(rps); 4080 + struct rv7xx_pl *pl; 4081 + int i; 4082 + 4083 + r600_dpm_print_class_info(rps->class, rps->class2); 4084 + r600_dpm_print_cap_info(rps->caps); 4085 + printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 4086 + for (i = 0; i < ps->performance_level_count; i++) { 4087 + pl = &ps->performance_levels[i]; 4088 + printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u vddci: %u\n", 4089 + pl->sclk, pl->mclk, pl->vddc, pl->vddci); 4090 + } 4091 + r600_dpm_print_ps_status(rdev, rps); 4092 + } 4093 + 4094 + u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low) 4095 + { 4096 + struct ni_ps *requested_state = ni_get_ps(rdev->pm.dpm.requested_ps); 4097 + 4098 + if (low) 4099 + return requested_state->performance_levels[0].sclk; 4100 + else 4101 + return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 4102 + } 4103 + 4104 + u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low) 4105 + { 4106 + struct ni_ps *requested_state = ni_get_ps(rdev->pm.dpm.requested_ps); 4107 + 4108 + if (low) 4109 + return requested_state->performance_levels[0].mclk; 4110 + else 4111 + return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 4112 + } 4113 +
+233
drivers/gpu/drm/radeon/ni_dpm.h
··· 1 + /* 2 + * Copyright 2012 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __NI_DPM_H__ 24 + #define __NI_DPM_H__ 25 + 26 + #include "cypress_dpm.h" 27 + #include "btc_dpm.h" 28 + #include "nislands_smc.h" 29 + 30 + struct ni_clock_registers { 31 + u32 cg_spll_func_cntl; 32 + u32 cg_spll_func_cntl_2; 33 + u32 cg_spll_func_cntl_3; 34 + u32 cg_spll_func_cntl_4; 35 + u32 cg_spll_spread_spectrum; 36 + u32 cg_spll_spread_spectrum_2; 37 + u32 mclk_pwrmgt_cntl; 38 + u32 dll_cntl; 39 + u32 mpll_ad_func_cntl; 40 + u32 mpll_ad_func_cntl_2; 41 + u32 mpll_dq_func_cntl; 42 + u32 mpll_dq_func_cntl_2; 43 + u32 mpll_ss1; 44 + u32 mpll_ss2; 45 + }; 46 + 47 + struct ni_mc_reg_entry { 48 + u32 mclk_max; 49 + u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 50 + }; 51 + 52 + struct ni_mc_reg_table { 53 + u8 last; 54 + u8 num_entries; 55 + u16 valid_flag; 56 + struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; 57 + SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 58 + }; 59 + 60 + #define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2 61 + 62 + enum ni_dc_cac_level 63 + { 64 + NISLANDS_DCCAC_LEVEL_0 = 0, 65 + NISLANDS_DCCAC_LEVEL_1, 66 + NISLANDS_DCCAC_LEVEL_2, 67 + NISLANDS_DCCAC_LEVEL_3, 68 + NISLANDS_DCCAC_LEVEL_4, 69 + NISLANDS_DCCAC_LEVEL_5, 70 + NISLANDS_DCCAC_LEVEL_6, 71 + NISLANDS_DCCAC_LEVEL_7, 72 + NISLANDS_DCCAC_MAX_LEVELS 73 + }; 74 + 75 + struct ni_leakage_coeffients 76 + { 77 + u32 at; 78 + u32 bt; 79 + u32 av; 80 + u32 bv; 81 + s32 t_slope; 82 + s32 t_intercept; 83 + u32 t_ref; 84 + }; 85 + 86 + struct ni_cac_data 87 + { 88 + struct ni_leakage_coeffients leakage_coefficients; 89 + u32 i_leakage; 90 + s32 leakage_minimum_temperature; 91 + u32 pwr_const; 92 + u32 dc_cac_value; 93 + u32 bif_cac_value; 94 + u32 lkge_pwr; 95 + u8 mc_wr_weight; 96 + u8 mc_rd_weight; 97 + u8 allow_ovrflw; 98 + u8 num_win_tdp; 99 + u8 l2num_win_tdp; 100 + u8 lts_truncate_n; 101 + }; 102 + 103 + struct ni_cac_weights 104 + { 105 + u32 weight_tcp_sig0; 106 + u32 weight_tcp_sig1; 107 + u32 weight_ta_sig; 108 + u32 weight_tcc_en0; 109 + u32 weight_tcc_en1; 110 + u32 weight_tcc_en2; 111 + u32 weight_cb_en0; 112 + u32 weight_cb_en1; 113 + u32 weight_cb_en2; 114 + u32 weight_cb_en3; 115 + u32 weight_db_sig0; 116 + u32 weight_db_sig1; 117 + u32 weight_db_sig2; 118 + u32 weight_db_sig3; 119 + u32 weight_sxm_sig0; 120 + u32 weight_sxm_sig1; 121 + u32 weight_sxm_sig2; 122 + u32 weight_sxs_sig0; 123 + u32 weight_sxs_sig1; 124 + u32 weight_xbr_0; 125 + u32 weight_xbr_1; 126 + u32 weight_xbr_2; 127 + u32 weight_spi_sig0; 128 + u32 weight_spi_sig1; 129 + u32 weight_spi_sig2; 130 + u32 weight_spi_sig3; 131 + u32 weight_spi_sig4; 132 + u32 weight_spi_sig5; 133 + u32 weight_lds_sig0; 134 + u32 weight_lds_sig1; 135 + u32 weight_sc; 136 + u32 weight_bif; 137 + u32 weight_cp; 138 + u32 weight_pa_sig0; 139 + u32 weight_pa_sig1; 140 + u32 weight_vgt_sig0; 141 + u32 weight_vgt_sig1; 142 + u32 weight_vgt_sig2; 143 + u32 weight_dc_sig0; 144 + u32 weight_dc_sig1; 145 + u32 weight_dc_sig2; 146 + u32 weight_dc_sig3; 147 + u32 weight_uvd_sig0; 148 + u32 weight_uvd_sig1; 149 + u32 weight_spare0; 150 + u32 weight_spare1; 151 + u32 weight_sq_vsp; 152 + u32 weight_sq_vsp0; 153 + u32 weight_sq_gpr; 154 + u32 ovr_mode_spare_0; 155 + u32 ovr_val_spare_0; 156 + u32 ovr_mode_spare_1; 157 + u32 ovr_val_spare_1; 158 + u32 vsp; 159 + u32 vsp0; 160 + u32 gpr; 161 + u8 mc_read_weight; 162 + u8 mc_write_weight; 163 + u32 tid_cnt; 164 + u32 tid_unit; 165 + u32 l2_lta_window_size; 166 + u32 lts_truncate; 167 + u32 dc_cac[NISLANDS_DCCAC_MAX_LEVELS]; 168 + u32 pcie_cac[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES]; 169 + bool enable_power_containment_by_default; 170 + }; 171 + 172 + struct ni_ps { 173 + u16 performance_level_count; 174 + bool dc_compatible; 175 + struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; 176 + }; 177 + 178 + struct ni_power_info { 179 + /* must be first! */ 180 + struct evergreen_power_info eg; 181 + struct ni_clock_registers clock_registers; 182 + struct ni_mc_reg_table mc_reg_table; 183 + u32 mclk_rtt_mode_threshold; 184 + /* flags */ 185 + bool use_power_boost_limit; 186 + bool support_cac_long_term_average; 187 + bool cac_enabled; 188 + bool cac_configuration_required; 189 + bool driver_calculate_cac_leakage; 190 + bool pc_enabled; 191 + bool enable_power_containment; 192 + bool enable_cac; 193 + bool enable_sq_ramping; 194 + /* smc offsets */ 195 + u16 arb_table_start; 196 + u16 fan_table_start; 197 + u16 cac_table_start; 198 + u16 spll_table_start; 199 + /* CAC stuff */ 200 + struct ni_cac_data cac_data; 201 + u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; 202 + const struct ni_cac_weights *cac_weights; 203 + u8 lta_window_size; 204 + u8 lts_truncate; 205 + struct ni_ps hw_ps; 206 + /* scratch structs */ 207 + SMC_NIslands_MCRegisters smc_mc_reg_table; 208 + NISLANDS_SMC_STATETABLE smc_statetable; 209 + }; 210 + 211 + #define NISLANDS_INITIAL_STATE_ARB_INDEX 0 212 + #define NISLANDS_ACPI_STATE_ARB_INDEX 1 213 + #define NISLANDS_ULV_STATE_ARB_INDEX 2 214 + #define NISLANDS_DRIVER_STATE_ARB_INDEX 3 215 + 216 + #define NISLANDS_DPM2_MAX_PULSE_SKIP 256 217 + 218 + #define NISLANDS_DPM2_NEAR_TDP_DEC 10 219 + #define NISLANDS_DPM2_ABOVE_SAFE_INC 5 220 + #define NISLANDS_DPM2_BELOW_SAFE_INC 20 221 + 222 + #define NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80 223 + 224 + #define NISLANDS_DPM2_MAXPS_PERCENT_H 90 225 + #define NISLANDS_DPM2_MAXPS_PERCENT_M 0 226 + 227 + #define NISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF 228 + #define NISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12 229 + #define NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 230 + #define NISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E 231 + #define NISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF 232 + 233 + #endif
+552
drivers/gpu/drm/radeon/nid.h
··· 492 492 /* TN SMU registers */ 493 493 #define TN_CURRENT_GNB_TEMP 0x1F390 494 494 495 + /* pm registers */ 496 + #define SMC_MSG 0x20c 497 + #define HOST_SMC_MSG(x) ((x) << 0) 498 + #define HOST_SMC_MSG_MASK (0xff << 0) 499 + #define HOST_SMC_MSG_SHIFT 0 500 + #define HOST_SMC_RESP(x) ((x) << 8) 501 + #define HOST_SMC_RESP_MASK (0xff << 8) 502 + #define HOST_SMC_RESP_SHIFT 8 503 + #define SMC_HOST_MSG(x) ((x) << 16) 504 + #define SMC_HOST_MSG_MASK (0xff << 16) 505 + #define SMC_HOST_MSG_SHIFT 16 506 + #define SMC_HOST_RESP(x) ((x) << 24) 507 + #define SMC_HOST_RESP_MASK (0xff << 24) 508 + #define SMC_HOST_RESP_SHIFT 24 509 + 510 + #define CG_SPLL_FUNC_CNTL 0x600 511 + #define SPLL_RESET (1 << 0) 512 + #define SPLL_SLEEP (1 << 1) 513 + #define SPLL_BYPASS_EN (1 << 3) 514 + #define SPLL_REF_DIV(x) ((x) << 4) 515 + #define SPLL_REF_DIV_MASK (0x3f << 4) 516 + #define SPLL_PDIV_A(x) ((x) << 20) 517 + #define SPLL_PDIV_A_MASK (0x7f << 20) 518 + #define SPLL_PDIV_A_SHIFT 20 519 + #define CG_SPLL_FUNC_CNTL_2 0x604 520 + #define SCLK_MUX_SEL(x) ((x) << 0) 521 + #define SCLK_MUX_SEL_MASK (0x1ff << 0) 522 + #define CG_SPLL_FUNC_CNTL_3 0x608 523 + #define SPLL_FB_DIV(x) ((x) << 0) 524 + #define SPLL_FB_DIV_MASK (0x3ffffff << 0) 525 + #define SPLL_FB_DIV_SHIFT 0 526 + #define SPLL_DITHEN (1 << 28) 527 + 528 + #define MPLL_CNTL_MODE 0x61c 529 + # define SS_SSEN (1 << 24) 530 + # define SS_DSMODE_EN (1 << 25) 531 + 532 + #define MPLL_AD_FUNC_CNTL 0x624 533 + #define CLKF(x) ((x) << 0) 534 + #define CLKF_MASK (0x7f << 0) 535 + #define CLKR(x) ((x) << 7) 536 + #define CLKR_MASK (0x1f << 7) 537 + #define CLKFRAC(x) ((x) << 12) 538 + #define CLKFRAC_MASK (0x1f << 12) 539 + #define YCLK_POST_DIV(x) ((x) << 17) 540 + #define YCLK_POST_DIV_MASK (3 << 17) 541 + #define IBIAS(x) ((x) << 20) 542 + #define IBIAS_MASK (0x3ff << 20) 543 + #define RESET (1 << 30) 544 + #define PDNB (1 << 31) 545 + #define MPLL_AD_FUNC_CNTL_2 0x628 546 + #define BYPASS (1 << 19) 547 + #define BIAS_GEN_PDNB (1 << 24) 548 + #define RESET_EN (1 << 25) 549 + #define VCO_MODE (1 << 29) 550 + #define MPLL_DQ_FUNC_CNTL 0x62c 551 + #define MPLL_DQ_FUNC_CNTL_2 0x630 552 + 553 + #define GENERAL_PWRMGT 0x63c 554 + # define GLOBAL_PWRMGT_EN (1 << 0) 555 + # define STATIC_PM_EN (1 << 1) 556 + # define THERMAL_PROTECTION_DIS (1 << 2) 557 + # define THERMAL_PROTECTION_TYPE (1 << 3) 558 + # define ENABLE_GEN2PCIE (1 << 4) 559 + # define ENABLE_GEN2XSP (1 << 5) 560 + # define SW_SMIO_INDEX(x) ((x) << 6) 561 + # define SW_SMIO_INDEX_MASK (3 << 6) 562 + # define SW_SMIO_INDEX_SHIFT 6 563 + # define LOW_VOLT_D2_ACPI (1 << 8) 564 + # define LOW_VOLT_D3_ACPI (1 << 9) 565 + # define VOLT_PWRMGT_EN (1 << 10) 566 + # define BACKBIAS_PAD_EN (1 << 18) 567 + # define BACKBIAS_VALUE (1 << 19) 568 + # define DYN_SPREAD_SPECTRUM_EN (1 << 23) 569 + # define AC_DC_SW (1 << 24) 570 + 571 + #define SCLK_PWRMGT_CNTL 0x644 572 + # define SCLK_PWRMGT_OFF (1 << 0) 573 + # define SCLK_LOW_D1 (1 << 1) 574 + # define FIR_RESET (1 << 4) 575 + # define FIR_FORCE_TREND_SEL (1 << 5) 576 + # define FIR_TREND_MODE (1 << 6) 577 + # define DYN_GFX_CLK_OFF_EN (1 << 7) 578 + # define GFX_CLK_FORCE_ON (1 << 8) 579 + # define GFX_CLK_REQUEST_OFF (1 << 9) 580 + # define GFX_CLK_FORCE_OFF (1 << 10) 581 + # define GFX_CLK_OFF_ACPI_D1 (1 << 11) 582 + # define GFX_CLK_OFF_ACPI_D2 (1 << 12) 583 + # define GFX_CLK_OFF_ACPI_D3 (1 << 13) 584 + # define DYN_LIGHT_SLEEP_EN (1 << 14) 585 + #define MCLK_PWRMGT_CNTL 0x648 586 + # define DLL_SPEED(x) ((x) << 0) 587 + # define DLL_SPEED_MASK (0x1f << 0) 588 + # define MPLL_PWRMGT_OFF (1 << 5) 589 + # define DLL_READY (1 << 6) 590 + # define MC_INT_CNTL (1 << 7) 591 + # define MRDCKA0_PDNB (1 << 8) 592 + # define MRDCKA1_PDNB (1 << 9) 593 + # define MRDCKB0_PDNB (1 << 10) 594 + # define MRDCKB1_PDNB (1 << 11) 595 + # define MRDCKC0_PDNB (1 << 12) 596 + # define MRDCKC1_PDNB (1 << 13) 597 + # define MRDCKD0_PDNB (1 << 14) 598 + # define MRDCKD1_PDNB (1 << 15) 599 + # define MRDCKA0_RESET (1 << 16) 600 + # define MRDCKA1_RESET (1 << 17) 601 + # define MRDCKB0_RESET (1 << 18) 602 + # define MRDCKB1_RESET (1 << 19) 603 + # define MRDCKC0_RESET (1 << 20) 604 + # define MRDCKC1_RESET (1 << 21) 605 + # define MRDCKD0_RESET (1 << 22) 606 + # define MRDCKD1_RESET (1 << 23) 607 + # define DLL_READY_READ (1 << 24) 608 + # define USE_DISPLAY_GAP (1 << 25) 609 + # define USE_DISPLAY_URGENT_NORMAL (1 << 26) 610 + # define MPLL_TURNOFF_D2 (1 << 28) 611 + #define DLL_CNTL 0x64c 612 + # define MRDCKA0_BYPASS (1 << 24) 613 + # define MRDCKA1_BYPASS (1 << 25) 614 + # define MRDCKB0_BYPASS (1 << 26) 615 + # define MRDCKB1_BYPASS (1 << 27) 616 + # define MRDCKC0_BYPASS (1 << 28) 617 + # define MRDCKC1_BYPASS (1 << 29) 618 + # define MRDCKD0_BYPASS (1 << 30) 619 + # define MRDCKD1_BYPASS (1 << 31) 620 + 621 + #define CG_AT 0x6d4 622 + # define CG_R(x) ((x) << 0) 623 + # define CG_R_MASK (0xffff << 0) 624 + # define CG_L(x) ((x) << 16) 625 + # define CG_L_MASK (0xffff << 16) 626 + 627 + #define CG_BIF_REQ_AND_RSP 0x7f4 628 + #define CG_CLIENT_REQ(x) ((x) << 0) 629 + #define CG_CLIENT_REQ_MASK (0xff << 0) 630 + #define CG_CLIENT_REQ_SHIFT 0 631 + #define CG_CLIENT_RESP(x) ((x) << 8) 632 + #define CG_CLIENT_RESP_MASK (0xff << 8) 633 + #define CG_CLIENT_RESP_SHIFT 8 634 + #define CLIENT_CG_REQ(x) ((x) << 16) 635 + #define CLIENT_CG_REQ_MASK (0xff << 16) 636 + #define CLIENT_CG_REQ_SHIFT 16 637 + #define CLIENT_CG_RESP(x) ((x) << 24) 638 + #define CLIENT_CG_RESP_MASK (0xff << 24) 639 + #define CLIENT_CG_RESP_SHIFT 24 640 + 641 + #define CG_SPLL_SPREAD_SPECTRUM 0x790 642 + #define SSEN (1 << 0) 643 + #define CLK_S(x) ((x) << 4) 644 + #define CLK_S_MASK (0xfff << 4) 645 + #define CLK_S_SHIFT 4 646 + #define CG_SPLL_SPREAD_SPECTRUM_2 0x794 647 + #define CLK_V(x) ((x) << 0) 648 + #define CLK_V_MASK (0x3ffffff << 0) 649 + #define CLK_V_SHIFT 0 650 + 651 + #define SMC_SCRATCH0 0x81c 652 + 653 + #define CG_SPLL_FUNC_CNTL_4 0x850 654 + 655 + #define MPLL_SS1 0x85c 656 + #define CLKV(x) ((x) << 0) 657 + #define CLKV_MASK (0x3ffffff << 0) 658 + #define MPLL_SS2 0x860 659 + #define CLKS(x) ((x) << 0) 660 + #define CLKS_MASK (0xfff << 0) 661 + 662 + #define CG_CAC_CTRL 0x88c 663 + #define TID_CNT(x) ((x) << 0) 664 + #define TID_CNT_MASK (0x3fff << 0) 665 + #define TID_UNIT(x) ((x) << 14) 666 + #define TID_UNIT_MASK (0xf << 14) 667 + 668 + #define MC_CG_CONFIG 0x25bc 669 + #define MCDW_WR_ENABLE (1 << 0) 670 + #define MCDX_WR_ENABLE (1 << 1) 671 + #define MCDY_WR_ENABLE (1 << 2) 672 + #define MCDZ_WR_ENABLE (1 << 3) 673 + #define MC_RD_ENABLE(x) ((x) << 4) 674 + #define MC_RD_ENABLE_MASK (3 << 4) 675 + #define INDEX(x) ((x) << 6) 676 + #define INDEX_MASK (0xfff << 6) 677 + #define INDEX_SHIFT 6 678 + 679 + #define MC_ARB_CAC_CNTL 0x2750 680 + #define ENABLE (1 << 0) 681 + #define READ_WEIGHT(x) ((x) << 1) 682 + #define READ_WEIGHT_MASK (0x3f << 1) 683 + #define READ_WEIGHT_SHIFT 1 684 + #define WRITE_WEIGHT(x) ((x) << 7) 685 + #define WRITE_WEIGHT_MASK (0x3f << 7) 686 + #define WRITE_WEIGHT_SHIFT 7 687 + #define ALLOW_OVERFLOW (1 << 13) 688 + 689 + #define MC_ARB_DRAM_TIMING 0x2774 690 + #define MC_ARB_DRAM_TIMING2 0x2778 691 + 692 + #define MC_ARB_RFSH_RATE 0x27b0 693 + #define POWERMODE0(x) ((x) << 0) 694 + #define POWERMODE0_MASK (0xff << 0) 695 + #define POWERMODE0_SHIFT 0 696 + #define POWERMODE1(x) ((x) << 8) 697 + #define POWERMODE1_MASK (0xff << 8) 698 + #define POWERMODE1_SHIFT 8 699 + #define POWERMODE2(x) ((x) << 16) 700 + #define POWERMODE2_MASK (0xff << 16) 701 + #define POWERMODE2_SHIFT 16 702 + #define POWERMODE3(x) ((x) << 24) 703 + #define POWERMODE3_MASK (0xff << 24) 704 + #define POWERMODE3_SHIFT 24 705 + 706 + #define MC_ARB_CG 0x27e8 707 + #define CG_ARB_REQ(x) ((x) << 0) 708 + #define CG_ARB_REQ_MASK (0xff << 0) 709 + #define CG_ARB_REQ_SHIFT 0 710 + #define CG_ARB_RESP(x) ((x) << 8) 711 + #define CG_ARB_RESP_MASK (0xff << 8) 712 + #define CG_ARB_RESP_SHIFT 8 713 + #define ARB_CG_REQ(x) ((x) << 16) 714 + #define ARB_CG_REQ_MASK (0xff << 16) 715 + #define ARB_CG_REQ_SHIFT 16 716 + #define ARB_CG_RESP(x) ((x) << 24) 717 + #define ARB_CG_RESP_MASK (0xff << 24) 718 + #define ARB_CG_RESP_SHIFT 24 719 + 720 + #define MC_ARB_DRAM_TIMING_1 0x27f0 721 + #define MC_ARB_DRAM_TIMING_2 0x27f4 722 + #define MC_ARB_DRAM_TIMING_3 0x27f8 723 + #define MC_ARB_DRAM_TIMING2_1 0x27fc 724 + #define MC_ARB_DRAM_TIMING2_2 0x2800 725 + #define MC_ARB_DRAM_TIMING2_3 0x2804 726 + #define MC_ARB_BURST_TIME 0x2808 727 + #define STATE0(x) ((x) << 0) 728 + #define STATE0_MASK (0x1f << 0) 729 + #define STATE0_SHIFT 0 730 + #define STATE1(x) ((x) << 5) 731 + #define STATE1_MASK (0x1f << 5) 732 + #define STATE1_SHIFT 5 733 + #define STATE2(x) ((x) << 10) 734 + #define STATE2_MASK (0x1f << 10) 735 + #define STATE2_SHIFT 10 736 + #define STATE3(x) ((x) << 15) 737 + #define STATE3_MASK (0x1f << 15) 738 + #define STATE3_SHIFT 15 739 + 740 + #define MC_CG_DATAPORT 0x2884 741 + 742 + #define MC_SEQ_RAS_TIMING 0x28a0 743 + #define MC_SEQ_CAS_TIMING 0x28a4 744 + #define MC_SEQ_MISC_TIMING 0x28a8 745 + #define MC_SEQ_MISC_TIMING2 0x28ac 746 + #define MC_SEQ_PMG_TIMING 0x28b0 747 + #define MC_SEQ_RD_CTL_D0 0x28b4 748 + #define MC_SEQ_RD_CTL_D1 0x28b8 749 + #define MC_SEQ_WR_CTL_D0 0x28bc 750 + #define MC_SEQ_WR_CTL_D1 0x28c0 751 + 752 + #define MC_SEQ_MISC0 0x2a00 753 + #define MC_SEQ_MISC0_GDDR5_SHIFT 28 754 + #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 755 + #define MC_SEQ_MISC0_GDDR5_VALUE 5 756 + #define MC_SEQ_MISC1 0x2a04 757 + #define MC_SEQ_RESERVE_M 0x2a08 758 + #define MC_PMG_CMD_EMRS 0x2a0c 759 + 760 + #define MC_SEQ_MISC3 0x2a2c 761 + 762 + #define MC_SEQ_MISC5 0x2a54 763 + #define MC_SEQ_MISC6 0x2a58 764 + 765 + #define MC_SEQ_MISC7 0x2a64 766 + 767 + #define MC_SEQ_RAS_TIMING_LP 0x2a6c 768 + #define MC_SEQ_CAS_TIMING_LP 0x2a70 769 + #define MC_SEQ_MISC_TIMING_LP 0x2a74 770 + #define MC_SEQ_MISC_TIMING2_LP 0x2a78 771 + #define MC_SEQ_WR_CTL_D0_LP 0x2a7c 772 + #define MC_SEQ_WR_CTL_D1_LP 0x2a80 773 + #define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84 774 + #define MC_SEQ_PMG_CMD_MRS_LP 0x2a88 775 + 776 + #define MC_PMG_CMD_MRS 0x2aac 777 + 778 + #define MC_SEQ_RD_CTL_D0_LP 0x2b1c 779 + #define MC_SEQ_RD_CTL_D1_LP 0x2b20 780 + 781 + #define MC_PMG_CMD_MRS1 0x2b44 782 + #define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48 783 + #define MC_SEQ_PMG_TIMING_LP 0x2b4c 784 + 785 + #define MC_PMG_CMD_MRS2 0x2b5c 786 + #define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60 787 + 788 + #define LB_SYNC_RESET_SEL 0x6b28 789 + #define LB_SYNC_RESET_SEL_MASK (3 << 0) 790 + #define LB_SYNC_RESET_SEL_SHIFT 0 791 + 792 + #define DC_STUTTER_CNTL 0x6b30 793 + #define DC_STUTTER_ENABLE_A (1 << 0) 794 + #define DC_STUTTER_ENABLE_B (1 << 1) 795 + 796 + #define SQ_CAC_THRESHOLD 0x8e4c 797 + #define VSP(x) ((x) << 0) 798 + #define VSP_MASK (0xff << 0) 799 + #define VSP_SHIFT 0 800 + #define VSP0(x) ((x) << 8) 801 + #define VSP0_MASK (0xff << 8) 802 + #define VSP0_SHIFT 8 803 + #define GPR(x) ((x) << 16) 804 + #define GPR_MASK (0xff << 16) 805 + #define GPR_SHIFT 16 806 + 807 + #define SQ_POWER_THROTTLE 0x8e58 808 + #define MIN_POWER(x) ((x) << 0) 809 + #define MIN_POWER_MASK (0x3fff << 0) 810 + #define MIN_POWER_SHIFT 0 811 + #define MAX_POWER(x) ((x) << 16) 812 + #define MAX_POWER_MASK (0x3fff << 16) 813 + #define MAX_POWER_SHIFT 0 814 + #define SQ_POWER_THROTTLE2 0x8e5c 815 + #define MAX_POWER_DELTA(x) ((x) << 0) 816 + #define MAX_POWER_DELTA_MASK (0x3fff << 0) 817 + #define MAX_POWER_DELTA_SHIFT 0 818 + #define STI_SIZE(x) ((x) << 16) 819 + #define STI_SIZE_MASK (0x3ff << 16) 820 + #define STI_SIZE_SHIFT 16 821 + #define LTI_RATIO(x) ((x) << 27) 822 + #define LTI_RATIO_MASK (0xf << 27) 823 + #define LTI_RATIO_SHIFT 27 824 + 825 + /* CG indirect registers */ 826 + #define CG_CAC_REGION_1_WEIGHT_0 0x83 827 + #define WEIGHT_TCP_SIG0(x) ((x) << 0) 828 + #define WEIGHT_TCP_SIG0_MASK (0x3f << 0) 829 + #define WEIGHT_TCP_SIG0_SHIFT 0 830 + #define WEIGHT_TCP_SIG1(x) ((x) << 6) 831 + #define WEIGHT_TCP_SIG1_MASK (0x3f << 6) 832 + #define WEIGHT_TCP_SIG1_SHIFT 6 833 + #define WEIGHT_TA_SIG(x) ((x) << 12) 834 + #define WEIGHT_TA_SIG_MASK (0x3f << 12) 835 + #define WEIGHT_TA_SIG_SHIFT 12 836 + #define CG_CAC_REGION_1_WEIGHT_1 0x84 837 + #define WEIGHT_TCC_EN0(x) ((x) << 0) 838 + #define WEIGHT_TCC_EN0_MASK (0x3f << 0) 839 + #define WEIGHT_TCC_EN0_SHIFT 0 840 + #define WEIGHT_TCC_EN1(x) ((x) << 6) 841 + #define WEIGHT_TCC_EN1_MASK (0x3f << 6) 842 + #define WEIGHT_TCC_EN1_SHIFT 6 843 + #define WEIGHT_TCC_EN2(x) ((x) << 12) 844 + #define WEIGHT_TCC_EN2_MASK (0x3f << 12) 845 + #define WEIGHT_TCC_EN2_SHIFT 12 846 + #define WEIGHT_TCC_EN3(x) ((x) << 18) 847 + #define WEIGHT_TCC_EN3_MASK (0x3f << 18) 848 + #define WEIGHT_TCC_EN3_SHIFT 18 849 + #define CG_CAC_REGION_2_WEIGHT_0 0x85 850 + #define WEIGHT_CB_EN0(x) ((x) << 0) 851 + #define WEIGHT_CB_EN0_MASK (0x3f << 0) 852 + #define WEIGHT_CB_EN0_SHIFT 0 853 + #define WEIGHT_CB_EN1(x) ((x) << 6) 854 + #define WEIGHT_CB_EN1_MASK (0x3f << 6) 855 + #define WEIGHT_CB_EN1_SHIFT 6 856 + #define WEIGHT_CB_EN2(x) ((x) << 12) 857 + #define WEIGHT_CB_EN2_MASK (0x3f << 12) 858 + #define WEIGHT_CB_EN2_SHIFT 12 859 + #define WEIGHT_CB_EN3(x) ((x) << 18) 860 + #define WEIGHT_CB_EN3_MASK (0x3f << 18) 861 + #define WEIGHT_CB_EN3_SHIFT 18 862 + #define CG_CAC_REGION_2_WEIGHT_1 0x86 863 + #define WEIGHT_DB_SIG0(x) ((x) << 0) 864 + #define WEIGHT_DB_SIG0_MASK (0x3f << 0) 865 + #define WEIGHT_DB_SIG0_SHIFT 0 866 + #define WEIGHT_DB_SIG1(x) ((x) << 6) 867 + #define WEIGHT_DB_SIG1_MASK (0x3f << 6) 868 + #define WEIGHT_DB_SIG1_SHIFT 6 869 + #define WEIGHT_DB_SIG2(x) ((x) << 12) 870 + #define WEIGHT_DB_SIG2_MASK (0x3f << 12) 871 + #define WEIGHT_DB_SIG2_SHIFT 12 872 + #define WEIGHT_DB_SIG3(x) ((x) << 18) 873 + #define WEIGHT_DB_SIG3_MASK (0x3f << 18) 874 + #define WEIGHT_DB_SIG3_SHIFT 18 875 + #define CG_CAC_REGION_2_WEIGHT_2 0x87 876 + #define WEIGHT_SXM_SIG0(x) ((x) << 0) 877 + #define WEIGHT_SXM_SIG0_MASK (0x3f << 0) 878 + #define WEIGHT_SXM_SIG0_SHIFT 0 879 + #define WEIGHT_SXM_SIG1(x) ((x) << 6) 880 + #define WEIGHT_SXM_SIG1_MASK (0x3f << 6) 881 + #define WEIGHT_SXM_SIG1_SHIFT 6 882 + #define WEIGHT_SXM_SIG2(x) ((x) << 12) 883 + #define WEIGHT_SXM_SIG2_MASK (0x3f << 12) 884 + #define WEIGHT_SXM_SIG2_SHIFT 12 885 + #define WEIGHT_SXS_SIG0(x) ((x) << 18) 886 + #define WEIGHT_SXS_SIG0_MASK (0x3f << 18) 887 + #define WEIGHT_SXS_SIG0_SHIFT 18 888 + #define WEIGHT_SXS_SIG1(x) ((x) << 24) 889 + #define WEIGHT_SXS_SIG1_MASK (0x3f << 24) 890 + #define WEIGHT_SXS_SIG1_SHIFT 24 891 + #define CG_CAC_REGION_3_WEIGHT_0 0x88 892 + #define WEIGHT_XBR_0(x) ((x) << 0) 893 + #define WEIGHT_XBR_0_MASK (0x3f << 0) 894 + #define WEIGHT_XBR_0_SHIFT 0 895 + #define WEIGHT_XBR_1(x) ((x) << 6) 896 + #define WEIGHT_XBR_1_MASK (0x3f << 6) 897 + #define WEIGHT_XBR_1_SHIFT 6 898 + #define WEIGHT_XBR_2(x) ((x) << 12) 899 + #define WEIGHT_XBR_2_MASK (0x3f << 12) 900 + #define WEIGHT_XBR_2_SHIFT 12 901 + #define WEIGHT_SPI_SIG0(x) ((x) << 18) 902 + #define WEIGHT_SPI_SIG0_MASK (0x3f << 18) 903 + #define WEIGHT_SPI_SIG0_SHIFT 18 904 + #define CG_CAC_REGION_3_WEIGHT_1 0x89 905 + #define WEIGHT_SPI_SIG1(x) ((x) << 0) 906 + #define WEIGHT_SPI_SIG1_MASK (0x3f << 0) 907 + #define WEIGHT_SPI_SIG1_SHIFT 0 908 + #define WEIGHT_SPI_SIG2(x) ((x) << 6) 909 + #define WEIGHT_SPI_SIG2_MASK (0x3f << 6) 910 + #define WEIGHT_SPI_SIG2_SHIFT 6 911 + #define WEIGHT_SPI_SIG3(x) ((x) << 12) 912 + #define WEIGHT_SPI_SIG3_MASK (0x3f << 12) 913 + #define WEIGHT_SPI_SIG3_SHIFT 12 914 + #define WEIGHT_SPI_SIG4(x) ((x) << 18) 915 + #define WEIGHT_SPI_SIG4_MASK (0x3f << 18) 916 + #define WEIGHT_SPI_SIG4_SHIFT 18 917 + #define WEIGHT_SPI_SIG5(x) ((x) << 24) 918 + #define WEIGHT_SPI_SIG5_MASK (0x3f << 24) 919 + #define WEIGHT_SPI_SIG5_SHIFT 24 920 + #define CG_CAC_REGION_4_WEIGHT_0 0x8a 921 + #define WEIGHT_LDS_SIG0(x) ((x) << 0) 922 + #define WEIGHT_LDS_SIG0_MASK (0x3f << 0) 923 + #define WEIGHT_LDS_SIG0_SHIFT 0 924 + #define WEIGHT_LDS_SIG1(x) ((x) << 6) 925 + #define WEIGHT_LDS_SIG1_MASK (0x3f << 6) 926 + #define WEIGHT_LDS_SIG1_SHIFT 6 927 + #define WEIGHT_SC(x) ((x) << 24) 928 + #define WEIGHT_SC_MASK (0x3f << 24) 929 + #define WEIGHT_SC_SHIFT 24 930 + #define CG_CAC_REGION_4_WEIGHT_1 0x8b 931 + #define WEIGHT_BIF(x) ((x) << 0) 932 + #define WEIGHT_BIF_MASK (0x3f << 0) 933 + #define WEIGHT_BIF_SHIFT 0 934 + #define WEIGHT_CP(x) ((x) << 6) 935 + #define WEIGHT_CP_MASK (0x3f << 6) 936 + #define WEIGHT_CP_SHIFT 6 937 + #define WEIGHT_PA_SIG0(x) ((x) << 12) 938 + #define WEIGHT_PA_SIG0_MASK (0x3f << 12) 939 + #define WEIGHT_PA_SIG0_SHIFT 12 940 + #define WEIGHT_PA_SIG1(x) ((x) << 18) 941 + #define WEIGHT_PA_SIG1_MASK (0x3f << 18) 942 + #define WEIGHT_PA_SIG1_SHIFT 18 943 + #define WEIGHT_VGT_SIG0(x) ((x) << 24) 944 + #define WEIGHT_VGT_SIG0_MASK (0x3f << 24) 945 + #define WEIGHT_VGT_SIG0_SHIFT 24 946 + #define CG_CAC_REGION_4_WEIGHT_2 0x8c 947 + #define WEIGHT_VGT_SIG1(x) ((x) << 0) 948 + #define WEIGHT_VGT_SIG1_MASK (0x3f << 0) 949 + #define WEIGHT_VGT_SIG1_SHIFT 0 950 + #define WEIGHT_VGT_SIG2(x) ((x) << 6) 951 + #define WEIGHT_VGT_SIG2_MASK (0x3f << 6) 952 + #define WEIGHT_VGT_SIG2_SHIFT 6 953 + #define WEIGHT_DC_SIG0(x) ((x) << 12) 954 + #define WEIGHT_DC_SIG0_MASK (0x3f << 12) 955 + #define WEIGHT_DC_SIG0_SHIFT 12 956 + #define WEIGHT_DC_SIG1(x) ((x) << 18) 957 + #define WEIGHT_DC_SIG1_MASK (0x3f << 18) 958 + #define WEIGHT_DC_SIG1_SHIFT 18 959 + #define WEIGHT_DC_SIG2(x) ((x) << 24) 960 + #define WEIGHT_DC_SIG2_MASK (0x3f << 24) 961 + #define WEIGHT_DC_SIG2_SHIFT 24 962 + #define CG_CAC_REGION_4_WEIGHT_3 0x8d 963 + #define WEIGHT_DC_SIG3(x) ((x) << 0) 964 + #define WEIGHT_DC_SIG3_MASK (0x3f << 0) 965 + #define WEIGHT_DC_SIG3_SHIFT 0 966 + #define WEIGHT_UVD_SIG0(x) ((x) << 6) 967 + #define WEIGHT_UVD_SIG0_MASK (0x3f << 6) 968 + #define WEIGHT_UVD_SIG0_SHIFT 6 969 + #define WEIGHT_UVD_SIG1(x) ((x) << 12) 970 + #define WEIGHT_UVD_SIG1_MASK (0x3f << 12) 971 + #define WEIGHT_UVD_SIG1_SHIFT 12 972 + #define WEIGHT_SPARE0(x) ((x) << 18) 973 + #define WEIGHT_SPARE0_MASK (0x3f << 18) 974 + #define WEIGHT_SPARE0_SHIFT 18 975 + #define WEIGHT_SPARE1(x) ((x) << 24) 976 + #define WEIGHT_SPARE1_MASK (0x3f << 24) 977 + #define WEIGHT_SPARE1_SHIFT 24 978 + #define CG_CAC_REGION_5_WEIGHT_0 0x8e 979 + #define WEIGHT_SQ_VSP(x) ((x) << 0) 980 + #define WEIGHT_SQ_VSP_MASK (0x3fff << 0) 981 + #define WEIGHT_SQ_VSP_SHIFT 0 982 + #define WEIGHT_SQ_VSP0(x) ((x) << 14) 983 + #define WEIGHT_SQ_VSP0_MASK (0x3fff << 14) 984 + #define WEIGHT_SQ_VSP0_SHIFT 14 985 + #define CG_CAC_REGION_4_OVERRIDE_4 0xab 986 + #define OVR_MODE_SPARE_0(x) ((x) << 16) 987 + #define OVR_MODE_SPARE_0_MASK (0x1 << 16) 988 + #define OVR_MODE_SPARE_0_SHIFT 16 989 + #define OVR_VAL_SPARE_0(x) ((x) << 17) 990 + #define OVR_VAL_SPARE_0_MASK (0x1 << 17) 991 + #define OVR_VAL_SPARE_0_SHIFT 17 992 + #define OVR_MODE_SPARE_1(x) ((x) << 18) 993 + #define OVR_MODE_SPARE_1_MASK (0x3f << 18) 994 + #define OVR_MODE_SPARE_1_SHIFT 18 995 + #define OVR_VAL_SPARE_1(x) ((x) << 19) 996 + #define OVR_VAL_SPARE_1_MASK (0x3f << 19) 997 + #define OVR_VAL_SPARE_1_SHIFT 19 998 + #define CG_CAC_REGION_5_WEIGHT_1 0xb7 999 + #define WEIGHT_SQ_GPR(x) ((x) << 0) 1000 + #define WEIGHT_SQ_GPR_MASK (0x3fff << 0) 1001 + #define WEIGHT_SQ_GPR_SHIFT 0 1002 + #define WEIGHT_SQ_LDS(x) ((x) << 14) 1003 + #define WEIGHT_SQ_LDS_MASK (0x3fff << 14) 1004 + #define WEIGHT_SQ_LDS_SHIFT 14 1005 + 1006 + /* PCIE link stuff */ 1007 + #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 1008 + #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ 1009 + # define LC_LINK_WIDTH_SHIFT 0 1010 + # define LC_LINK_WIDTH_MASK 0x7 1011 + # define LC_LINK_WIDTH_X0 0 1012 + # define LC_LINK_WIDTH_X1 1 1013 + # define LC_LINK_WIDTH_X2 2 1014 + # define LC_LINK_WIDTH_X4 3 1015 + # define LC_LINK_WIDTH_X8 4 1016 + # define LC_LINK_WIDTH_X16 6 1017 + # define LC_LINK_WIDTH_RD_SHIFT 4 1018 + # define LC_LINK_WIDTH_RD_MASK 0x70 1019 + # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) 1020 + # define LC_RECONFIG_NOW (1 << 8) 1021 + # define LC_RENEGOTIATION_SUPPORT (1 << 9) 1022 + # define LC_RENEGOTIATE_EN (1 << 10) 1023 + # define LC_SHORT_RECONFIG_EN (1 << 11) 1024 + # define LC_UPCONFIGURE_SUPPORT (1 << 12) 1025 + # define LC_UPCONFIGURE_DIS (1 << 13) 1026 + #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ 1027 + # define LC_GEN2_EN_STRAP (1 << 0) 1028 + # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) 1029 + # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) 1030 + # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) 1031 + # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) 1032 + # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 1033 + # define LC_CURRENT_DATA_RATE (1 << 11) 1034 + # define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12) 1035 + # define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12) 1036 + # define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12 1037 + # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) 1038 + # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) 1039 + # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) 1040 + # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) 1041 + #define MM_CFGREGS_CNTL 0x544c 1042 + # define MM_WR_TO_CFG_EN (1 << 3) 1043 + #define LINK_CNTL2 0x88 /* F0 */ 1044 + # define TARGET_LINK_SPEED_MASK (0xf << 0) 1045 + # define SELECTABLE_DEEMPHASIS (1 << 6) 1046 + 495 1047 /* 496 1048 * UVD 497 1049 */
+329
drivers/gpu/drm/radeon/nislands_smc.h
··· 1 + /* 2 + * Copyright 2012 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __NISLANDS_SMC_H__ 24 + #define __NISLANDS_SMC_H__ 25 + 26 + #pragma pack(push, 1) 27 + 28 + #define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 29 + 30 + struct PP_NIslands_Dpm2PerfLevel 31 + { 32 + uint8_t MaxPS; 33 + uint8_t TgtAct; 34 + uint8_t MaxPS_StepInc; 35 + uint8_t MaxPS_StepDec; 36 + uint8_t PSST; 37 + uint8_t NearTDPDec; 38 + uint8_t AboveSafeInc; 39 + uint8_t BelowSafeInc; 40 + uint8_t PSDeltaLimit; 41 + uint8_t PSDeltaWin; 42 + uint8_t Reserved[6]; 43 + }; 44 + 45 + typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; 46 + 47 + struct PP_NIslands_DPM2Parameters 48 + { 49 + uint32_t TDPLimit; 50 + uint32_t NearTDPLimit; 51 + uint32_t SafePowerLimit; 52 + uint32_t PowerBoostLimit; 53 + }; 54 + typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; 55 + 56 + struct NISLANDS_SMC_SCLK_VALUE 57 + { 58 + uint32_t vCG_SPLL_FUNC_CNTL; 59 + uint32_t vCG_SPLL_FUNC_CNTL_2; 60 + uint32_t vCG_SPLL_FUNC_CNTL_3; 61 + uint32_t vCG_SPLL_FUNC_CNTL_4; 62 + uint32_t vCG_SPLL_SPREAD_SPECTRUM; 63 + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; 64 + uint32_t sclk_value; 65 + }; 66 + 67 + typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; 68 + 69 + struct NISLANDS_SMC_MCLK_VALUE 70 + { 71 + uint32_t vMPLL_FUNC_CNTL; 72 + uint32_t vMPLL_FUNC_CNTL_1; 73 + uint32_t vMPLL_FUNC_CNTL_2; 74 + uint32_t vMPLL_AD_FUNC_CNTL; 75 + uint32_t vMPLL_AD_FUNC_CNTL_2; 76 + uint32_t vMPLL_DQ_FUNC_CNTL; 77 + uint32_t vMPLL_DQ_FUNC_CNTL_2; 78 + uint32_t vMCLK_PWRMGT_CNTL; 79 + uint32_t vDLL_CNTL; 80 + uint32_t vMPLL_SS; 81 + uint32_t vMPLL_SS2; 82 + uint32_t mclk_value; 83 + }; 84 + 85 + typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; 86 + 87 + struct NISLANDS_SMC_VOLTAGE_VALUE 88 + { 89 + uint16_t value; 90 + uint8_t index; 91 + uint8_t padding; 92 + }; 93 + 94 + typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; 95 + 96 + struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL 97 + { 98 + uint8_t arbValue; 99 + uint8_t ACIndex; 100 + uint8_t displayWatermark; 101 + uint8_t gen2PCIE; 102 + uint8_t reserved1; 103 + uint8_t reserved2; 104 + uint8_t strobeMode; 105 + uint8_t mcFlags; 106 + uint32_t aT; 107 + uint32_t bSP; 108 + NISLANDS_SMC_SCLK_VALUE sclk; 109 + NISLANDS_SMC_MCLK_VALUE mclk; 110 + NISLANDS_SMC_VOLTAGE_VALUE vddc; 111 + NISLANDS_SMC_VOLTAGE_VALUE mvdd; 112 + NISLANDS_SMC_VOLTAGE_VALUE vddci; 113 + NISLANDS_SMC_VOLTAGE_VALUE std_vddc; 114 + uint32_t powergate_en; 115 + uint8_t hUp; 116 + uint8_t hDown; 117 + uint8_t stateFlags; 118 + uint8_t arbRefreshState; 119 + uint32_t SQPowerThrottle; 120 + uint32_t SQPowerThrottle_2; 121 + uint32_t reserved[2]; 122 + PP_NIslands_Dpm2PerfLevel dpm2; 123 + }; 124 + 125 + #define NISLANDS_SMC_STROBE_RATIO 0x0F 126 + #define NISLANDS_SMC_STROBE_ENABLE 0x10 127 + 128 + #define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 129 + #define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 130 + #define NISLANDS_SMC_MC_RTT_ENABLE 0x04 131 + #define NISLANDS_SMC_MC_STUTTER_EN 0x08 132 + 133 + typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; 134 + 135 + struct NISLANDS_SMC_SWSTATE 136 + { 137 + uint8_t flags; 138 + uint8_t levelCount; 139 + uint8_t padding2; 140 + uint8_t padding3; 141 + NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1]; 142 + }; 143 + 144 + typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; 145 + 146 + #define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 147 + #define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 148 + #define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 149 + #define NISLANDS_SMC_VOLTAGEMASK_MAX 4 150 + 151 + struct NISLANDS_SMC_VOLTAGEMASKTABLE 152 + { 153 + uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; 154 + uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; 155 + }; 156 + 157 + typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; 158 + 159 + #define NISLANDS_MAX_NO_VREG_STEPS 32 160 + 161 + struct NISLANDS_SMC_STATETABLE 162 + { 163 + uint8_t thermalProtectType; 164 + uint8_t systemFlags; 165 + uint8_t maxVDDCIndexInPPTable; 166 + uint8_t extraFlags; 167 + uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; 168 + uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; 169 + NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; 170 + PP_NIslands_DPM2Parameters dpm2Params; 171 + NISLANDS_SMC_SWSTATE initialState; 172 + NISLANDS_SMC_SWSTATE ACPIState; 173 + NISLANDS_SMC_SWSTATE ULVState; 174 + NISLANDS_SMC_SWSTATE driverState; 175 + NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; 176 + }; 177 + 178 + typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; 179 + 180 + #define NI_SMC_SOFT_REGISTERS_START 0x108 181 + 182 + #define NI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0 183 + #define NI_SMC_SOFT_REGISTER_delay_bbias 0xC 184 + #define NI_SMC_SOFT_REGISTER_delay_vreg 0x10 185 + #define NI_SMC_SOFT_REGISTER_delay_acpi 0x2C 186 + #define NI_SMC_SOFT_REGISTER_seq_index 0x64 187 + #define NI_SMC_SOFT_REGISTER_mvdd_chg_time 0x68 188 + #define NI_SMC_SOFT_REGISTER_mclk_switch_lim 0x78 189 + #define NI_SMC_SOFT_REGISTER_watermark_threshold 0x80 190 + #define NI_SMC_SOFT_REGISTER_mc_block_delay 0x84 191 + #define NI_SMC_SOFT_REGISTER_uvd_enabled 0x98 192 + 193 + #define SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES 16 194 + #define SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 195 + #define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16 196 + #define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4 197 + 198 + struct SMC_NISLANDS_MC_TPP_CAC_TABLE 199 + { 200 + uint32_t tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES]; 201 + uint32_t cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES]; 202 + }; 203 + 204 + typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE; 205 + 206 + 207 + struct PP_NIslands_CACTABLES 208 + { 209 + uint32_t cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES]; 210 + uint32_t cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; 211 + 212 + uint32_t pwr_const; 213 + 214 + uint32_t dc_cacValue; 215 + uint32_t bif_cacValue; 216 + uint32_t lkge_pwr; 217 + 218 + uint8_t cac_width; 219 + uint8_t window_size_p2; 220 + 221 + uint8_t num_drop_lsb; 222 + uint8_t padding_0; 223 + 224 + uint32_t last_power; 225 + 226 + uint8_t AllowOvrflw; 227 + uint8_t MCWrWeight; 228 + uint8_t MCRdWeight; 229 + uint8_t padding_1[9]; 230 + 231 + uint8_t enableWinAvg; 232 + uint8_t numWin_TDP; 233 + uint8_t l2numWin_TDP; 234 + uint8_t WinIndex; 235 + 236 + uint32_t dynPwr_TDP[4]; 237 + uint32_t lkgePwr_TDP[4]; 238 + uint32_t power_TDP[4]; 239 + uint32_t avg_dynPwr_TDP; 240 + uint32_t avg_lkgePwr_TDP; 241 + uint32_t avg_power_TDP; 242 + uint32_t lts_power_TDP; 243 + uint8_t lts_truncate_n; 244 + uint8_t padding_2[7]; 245 + }; 246 + 247 + typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES; 248 + 249 + #define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 250 + #define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 251 + 252 + struct SMC_NIslands_MCRegisterAddress 253 + { 254 + uint16_t s0; 255 + uint16_t s1; 256 + }; 257 + 258 + typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; 259 + 260 + 261 + struct SMC_NIslands_MCRegisterSet 262 + { 263 + uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 264 + }; 265 + 266 + typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; 267 + 268 + struct SMC_NIslands_MCRegisters 269 + { 270 + uint8_t last; 271 + uint8_t reserved[3]; 272 + SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 273 + SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; 274 + }; 275 + 276 + typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; 277 + 278 + struct SMC_NIslands_MCArbDramTimingRegisterSet 279 + { 280 + uint32_t mc_arb_dram_timing; 281 + uint32_t mc_arb_dram_timing2; 282 + uint8_t mc_arb_rfsh_rate; 283 + uint8_t padding[3]; 284 + }; 285 + 286 + typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet; 287 + 288 + struct SMC_NIslands_MCArbDramTimingRegisters 289 + { 290 + uint8_t arb_current; 291 + uint8_t reserved[3]; 292 + SMC_NIslands_MCArbDramTimingRegisterSet data[20]; 293 + }; 294 + 295 + typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters; 296 + 297 + struct SMC_NISLANDS_SPLL_DIV_TABLE 298 + { 299 + uint32_t freq[256]; 300 + uint32_t ss[256]; 301 + }; 302 + 303 + #define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff 304 + #define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0 305 + #define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000 306 + #define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25 307 + #define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff 308 + #define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0 309 + #define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000 310 + #define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20 311 + 312 + typedef struct SMC_NISLANDS_SPLL_DIV_TABLE SMC_NISLANDS_SPLL_DIV_TABLE; 313 + 314 + #define NISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x100 315 + 316 + #define NISLANDS_SMC_FIRMWARE_HEADER_version 0x0 317 + #define NISLANDS_SMC_FIRMWARE_HEADER_flags 0x4 318 + #define NISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0x8 319 + #define NISLANDS_SMC_FIRMWARE_HEADER_stateTable 0xC 320 + #define NISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x10 321 + #define NISLANDS_SMC_FIRMWARE_HEADER_cacTable 0x14 322 + #define NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 323 + #define NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x2C 324 + #define NISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x30 325 + 326 + #pragma pack(pop) 327 + 328 + #endif 329 +
+13
drivers/gpu/drm/radeon/ppsmc.h
··· 46 46 #define PPSMC_DISPLAY_WATERMARK_HIGH 1 47 47 48 48 #define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 49 + #define PPSMC_STATEFLAG_POWERBOOST 0x02 49 50 50 51 #define PPSMC_Result_OK ((uint8_t)0x01) 51 52 #define PPSMC_Result_Failed ((uint8_t)0xFF) ··· 59 58 #define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14) 60 59 #define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15) 61 60 #define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16) 61 + #define PPSMC_MSG_RunningOnAC ((uint8_t)0x17) 62 62 #define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20) 63 63 #define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40) 64 64 #define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41) 65 65 #define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51) 66 66 #define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52) 67 + #define PPSMC_MSG_EnableCac ((uint8_t)0x53) 68 + #define PPSMC_MSG_DisableCac ((uint8_t)0x54) 69 + #define PPSMC_TDPClampingActive ((uint8_t)0x59) 70 + #define PPSMC_TDPClampingInactive ((uint8_t)0x5A) 67 71 #define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) 68 72 #define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) 73 + #define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) 74 + #define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) 69 75 #define PPSMC_MSG_EnableULV ((uint8_t)0x62) 70 76 #define PPSMC_MSG_DisableULV ((uint8_t)0x63) 71 77 #define PPSMC_MSG_EnterULV ((uint8_t)0x64) 72 78 #define PPSMC_MSG_ExitULV ((uint8_t)0x65) 79 + #define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E) 80 + #define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F) 81 + #define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A) 82 + #define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82) 83 + #define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83) 73 84 #define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84) 74 85 75 86 /* TN */
+12
drivers/gpu/drm/radeon/radeon_asic.c
··· 1906 1906 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1907 1907 .get_temperature = &evergreen_get_temp, 1908 1908 }, 1909 + .dpm = { 1910 + .init = &ni_dpm_init, 1911 + .setup_asic = &ni_dpm_setup_asic, 1912 + .enable = &ni_dpm_enable, 1913 + .disable = &ni_dpm_disable, 1914 + .set_power_state = &ni_dpm_set_power_state, 1915 + .display_configuration_changed = &cypress_dpm_display_configuration_changed, 1916 + .fini = &ni_dpm_fini, 1917 + .get_sclk = &ni_dpm_get_sclk, 1918 + .get_mclk = &ni_dpm_get_mclk, 1919 + .print_power_state = &ni_dpm_print_power_state, 1920 + }, 1909 1921 .pflip = { 1910 1922 .pre_page_flip = &evergreen_pre_page_flip, 1911 1923 .page_flip = &evergreen_page_flip,
+10
drivers/gpu/drm/radeon/radeon_asic.h
··· 587 587 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 588 588 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 589 589 590 + int ni_dpm_init(struct radeon_device *rdev); 591 + void ni_dpm_setup_asic(struct radeon_device *rdev); 592 + int ni_dpm_enable(struct radeon_device *rdev); 593 + void ni_dpm_disable(struct radeon_device *rdev); 594 + int ni_dpm_set_power_state(struct radeon_device *rdev); 595 + void ni_dpm_fini(struct radeon_device *rdev); 596 + u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low); 597 + u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low); 598 + void ni_dpm_print_power_state(struct radeon_device *rdev, 599 + struct radeon_ps *ps); 590 600 int trinity_dpm_init(struct radeon_device *rdev); 591 601 int trinity_dpm_enable(struct radeon_device *rdev); 592 602 void trinity_dpm_disable(struct radeon_device *rdev);
+1
drivers/gpu/drm/radeon/radeon_pm.c
··· 1097 1097 case CHIP_BARTS: 1098 1098 case CHIP_TURKS: 1099 1099 case CHIP_CAICOS: 1100 + case CHIP_CAYMAN: 1100 1101 case CHIP_ARUBA: 1101 1102 if (radeon_dpm == 1) 1102 1103 rdev->pm.pm_method = PM_METHOD_DPM;
+5
drivers/gpu/drm/radeon/radeon_ucode.h
··· 100 100 #define CAICOS_SMC_INT_VECTOR_START 0xffc0 101 101 #define CAICOS_SMC_INT_VECTOR_SIZE 0x0040 102 102 103 + #define CAYMAN_SMC_UCODE_START 0x0100 104 + #define CAYMAN_SMC_UCODE_SIZE 0x79ec 105 + #define CAYMAN_SMC_INT_VECTOR_START 0xffc0 106 + #define CAYMAN_SMC_INT_VECTOR_SIZE 0x0040 107 + 103 108 #endif
+27
drivers/gpu/drm/radeon/rv770_smc.c
··· 254 254 0x05, 0x0A, 0x05, 0x0A 255 255 }; 256 256 257 + static const u8 cayman_smc_int_vectors[] = 258 + { 259 + 0x12, 0x05, 0x12, 0x05, 260 + 0x12, 0x05, 0x12, 0x05, 261 + 0x12, 0x05, 0x12, 0x05, 262 + 0x12, 0x05, 0x12, 0x05, 263 + 0x12, 0x05, 0x12, 0x05, 264 + 0x12, 0x05, 0x12, 0x05, 265 + 0x12, 0x05, 0x12, 0x05, 266 + 0x12, 0x05, 0x12, 0x05, 267 + 0x12, 0x05, 0x12, 0x05, 268 + 0x12, 0x05, 0x12, 0x05, 269 + 0x12, 0x05, 0x12, 0x05, 270 + 0x12, 0x05, 0x12, 0x05, 271 + 0x12, 0x05, 0x18, 0xEA, 272 + 0x12, 0x20, 0x1C, 0x34, 273 + 0x1C, 0x34, 0x08, 0x72, 274 + 0x08, 0x72, 0x08, 0x72 275 + }; 276 + 257 277 int rv770_set_smc_sram_address(struct radeon_device *rdev, 258 278 u16 smc_address, u16 limit) 259 279 { ··· 563 543 int_vect = (const u8 *)&caicos_smc_int_vectors; 564 544 int_vect_start_address = CAICOS_SMC_INT_VECTOR_START; 565 545 int_vect_size = CAICOS_SMC_INT_VECTOR_SIZE; 546 + break; 547 + case CHIP_CAYMAN: 548 + ucode_start_address = CAYMAN_SMC_UCODE_START; 549 + ucode_size = CAYMAN_SMC_UCODE_SIZE; 550 + int_vect = (const u8 *)&cayman_smc_int_vectors; 551 + int_vect_start_address = CAYMAN_SMC_INT_VECTOR_START; 552 + int_vect_size = CAYMAN_SMC_INT_VECTOR_SIZE; 566 553 break; 567 554 default: 568 555 DRM_ERROR("unknown asic in smc ucode loader\n");