Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add SI DPM support (v4)

v2: corrected register offset shift
v3: rebase fixes
v4: fix firmware paths
add SI smc firmware versions for sysfs dump
remove unused function forward define
fix the tahiti specific value of DEEP_SLEEP_CLK_SEL field
fix to miss adding thermal controller
use vram_type instead of checking mem_gddr5 flag
fix incorrect index of CG_FFCT_0 register
fix incorrect reading method at si_get_current_pcie_speed

Signed-off-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Maruthi Bayyavarapu and committed by
Alex Deucher
841686df 0c34f453

+9135
+6
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1826 1826 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1827 1827 /* query virtual capabilities */ 1828 1828 u32 (*get_virtual_caps)(struct amdgpu_device *adev); 1829 + /* static power management */ 1830 + int (*get_pcie_lanes)(struct amdgpu_device *adev); 1831 + void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 1829 1832 }; 1830 1833 1831 1834 /* ··· 2245 2242 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2246 2243 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2247 2244 #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) 2245 + #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 2246 + #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 2247 + #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2248 2248 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2249 2249 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2250 2250 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
+127
drivers/gpu/drm/amd/amdgpu/r600_dpm.h
··· 1 + /* 2 + * Copyright 2011 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __R600_DPM_H__ 24 + #define __R600_DPM_H__ 25 + 26 + #define R600_ASI_DFLT 10000 27 + #define R600_BSP_DFLT 0x41EB 28 + #define R600_BSU_DFLT 0x2 29 + #define R600_AH_DFLT 5 30 + #define R600_RLP_DFLT 25 31 + #define R600_RMP_DFLT 65 32 + #define R600_LHP_DFLT 40 33 + #define R600_LMP_DFLT 15 34 + #define R600_TD_DFLT 0 35 + #define R600_UTC_DFLT_00 0x24 36 + #define R600_UTC_DFLT_01 0x22 37 + #define R600_UTC_DFLT_02 0x22 38 + #define R600_UTC_DFLT_03 0x22 39 + #define R600_UTC_DFLT_04 0x22 40 + #define R600_UTC_DFLT_05 0x22 41 + #define R600_UTC_DFLT_06 0x22 42 + #define R600_UTC_DFLT_07 0x22 43 + #define R600_UTC_DFLT_08 0x22 44 + #define R600_UTC_DFLT_09 0x22 45 + #define R600_UTC_DFLT_10 0x22 46 + #define R600_UTC_DFLT_11 0x22 47 + #define R600_UTC_DFLT_12 0x22 48 + #define R600_UTC_DFLT_13 0x22 49 + #define R600_UTC_DFLT_14 0x22 50 + #define R600_DTC_DFLT_00 0x24 51 + #define R600_DTC_DFLT_01 0x22 52 + #define R600_DTC_DFLT_02 0x22 53 + #define R600_DTC_DFLT_03 0x22 54 + #define R600_DTC_DFLT_04 0x22 55 + #define R600_DTC_DFLT_05 0x22 56 + #define R600_DTC_DFLT_06 0x22 57 + #define R600_DTC_DFLT_07 0x22 58 + #define R600_DTC_DFLT_08 0x22 59 + #define R600_DTC_DFLT_09 0x22 60 + #define R600_DTC_DFLT_10 0x22 61 + #define R600_DTC_DFLT_11 0x22 62 + #define R600_DTC_DFLT_12 0x22 63 + #define R600_DTC_DFLT_13 0x22 64 + #define R600_DTC_DFLT_14 0x22 65 + #define R600_VRC_DFLT 0x0000C003 66 + #define R600_VOLTAGERESPONSETIME_DFLT 1000 67 + #define R600_BACKBIASRESPONSETIME_DFLT 1000 68 + #define R600_VRU_DFLT 0x3 69 + #define R600_SPLLSTEPTIME_DFLT 0x1000 70 + #define R600_SPLLSTEPUNIT_DFLT 0x3 71 + #define R600_TPU_DFLT 0 72 + #define R600_TPC_DFLT 0x200 73 + #define R600_SSTU_DFLT 0 74 + #define R600_SST_DFLT 0x00C8 75 + #define R600_GICST_DFLT 0x200 76 + #define R600_FCT_DFLT 0x0400 77 + #define R600_FCTU_DFLT 0 78 + #define R600_CTXCGTT3DRPHC_DFLT 0x20 79 + #define R600_CTXCGTT3DRSDC_DFLT 0x40 80 + #define R600_VDDC3DOORPHC_DFLT 0x100 81 + #define R600_VDDC3DOORSDC_DFLT 0x7 82 + #define R600_VDDC3DOORSU_DFLT 0 83 + #define R600_MPLLLOCKTIME_DFLT 100 84 + #define R600_MPLLRESETTIME_DFLT 150 85 + #define R600_VCOSTEPPCT_DFLT 20 86 + #define R600_ENDINGVCOSTEPPCT_DFLT 5 87 + #define R600_REFERENCEDIVIDER_DFLT 4 88 + 89 + #define R600_PM_NUMBER_OF_TC 15 90 + #define R600_PM_NUMBER_OF_SCLKS 20 91 + #define R600_PM_NUMBER_OF_MCLKS 4 92 + #define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4 93 + #define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3 94 + 95 + /* XXX are these ok? */ 96 + #define R600_TEMP_RANGE_MIN (90 * 1000) 97 + #define R600_TEMP_RANGE_MAX (120 * 1000) 98 + 99 + #define FDO_PWM_MODE_STATIC 1 100 + #define FDO_PWM_MODE_STATIC_RPM 5 101 + 102 + enum r600_power_level { 103 + R600_POWER_LEVEL_LOW = 0, 104 + R600_POWER_LEVEL_MEDIUM = 1, 105 + R600_POWER_LEVEL_HIGH = 2, 106 + R600_POWER_LEVEL_CTXSW = 3, 107 + }; 108 + 109 + enum r600_td { 110 + R600_TD_AUTO, 111 + R600_TD_UP, 112 + R600_TD_DOWN, 113 + }; 114 + 115 + enum r600_display_watermark { 116 + R600_DISPLAY_WATERMARK_LOW = 0, 117 + R600_DISPLAY_WATERMARK_HIGH = 1, 118 + }; 119 + 120 + enum r600_display_gap 121 + { 122 + R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, 123 + R600_PM_DISPLAY_GAP_VBLANK = 1, 124 + R600_PM_DISPLAY_GAP_WATERMARK = 2, 125 + R600_PM_DISPLAY_GAP_IGNORE = 3, 126 + }; 127 + #endif
+7986
drivers/gpu/drm/amd/amdgpu/si_dpm.c
··· 1 + /* 2 + * Copyright 2013 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include "drmP.h" 25 + #include "amdgpu.h" 26 + #include "amdgpu_pm.h" 27 + #include "amdgpu_dpm.h" 28 + #include "amdgpu_atombios.h" 29 + #include "si/sid.h" 30 + #include "r600_dpm.h" 31 + #include "si_dpm.h" 32 + #include "atom.h" 33 + #include "../include/pptable.h" 34 + #include <linux/math64.h> 35 + #include <linux/seq_file.h> 36 + #include <linux/firmware.h> 37 + 38 + #define MC_CG_ARB_FREQ_F0 0x0a 39 + #define MC_CG_ARB_FREQ_F1 0x0b 40 + #define MC_CG_ARB_FREQ_F2 0x0c 41 + #define MC_CG_ARB_FREQ_F3 0x0d 42 + 43 + #define SMC_RAM_END 0x20000 44 + 45 + #define SCLK_MIN_DEEPSLEEP_FREQ 1350 46 + 47 + 48 + /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 49 + #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 50 + #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 51 + #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 52 + #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 53 + #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 54 + #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 55 + 56 + #define BIOS_SCRATCH_4 0x5cd 57 + 58 + MODULE_FIRMWARE("radeon/tahiti_smc.bin"); 59 + MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); 60 + MODULE_FIRMWARE("radeon/verde_smc.bin"); 61 + MODULE_FIRMWARE("radeon/oland_smc.bin"); 62 + MODULE_FIRMWARE("radeon/hainan_smc.bin"); 63 + 64 + union power_info { 65 + struct _ATOM_POWERPLAY_INFO info; 66 + struct _ATOM_POWERPLAY_INFO_V2 info_2; 67 + struct _ATOM_POWERPLAY_INFO_V3 info_3; 68 + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 69 + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 70 + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 71 + struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 72 + struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 73 + }; 74 + 75 + union fan_info { 76 + struct _ATOM_PPLIB_FANTABLE fan; 77 + struct _ATOM_PPLIB_FANTABLE2 fan2; 78 + struct _ATOM_PPLIB_FANTABLE3 fan3; 79 + }; 80 + 81 + union pplib_clock_info { 82 + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 83 + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 84 + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 85 + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 86 + struct _ATOM_PPLIB_SI_CLOCK_INFO si; 87 + }; 88 + 89 + const u32 r600_utc[R600_PM_NUMBER_OF_TC] = 90 + { 91 + R600_UTC_DFLT_00, 92 + R600_UTC_DFLT_01, 93 + R600_UTC_DFLT_02, 94 + R600_UTC_DFLT_03, 95 + R600_UTC_DFLT_04, 96 + R600_UTC_DFLT_05, 97 + R600_UTC_DFLT_06, 98 + R600_UTC_DFLT_07, 99 + R600_UTC_DFLT_08, 100 + R600_UTC_DFLT_09, 101 + R600_UTC_DFLT_10, 102 + R600_UTC_DFLT_11, 103 + R600_UTC_DFLT_12, 104 + R600_UTC_DFLT_13, 105 + R600_UTC_DFLT_14, 106 + }; 107 + 108 + const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = 109 + { 110 + R600_DTC_DFLT_00, 111 + R600_DTC_DFLT_01, 112 + R600_DTC_DFLT_02, 113 + R600_DTC_DFLT_03, 114 + R600_DTC_DFLT_04, 115 + R600_DTC_DFLT_05, 116 + R600_DTC_DFLT_06, 117 + R600_DTC_DFLT_07, 118 + R600_DTC_DFLT_08, 119 + R600_DTC_DFLT_09, 120 + R600_DTC_DFLT_10, 121 + R600_DTC_DFLT_11, 122 + R600_DTC_DFLT_12, 123 + R600_DTC_DFLT_13, 124 + R600_DTC_DFLT_14, 125 + }; 126 + 127 + static const struct si_cac_config_reg cac_weights_tahiti[] = 128 + { 129 + { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND }, 130 + { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 131 + { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND }, 132 + { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND }, 133 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 134 + { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 135 + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 136 + { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 137 + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 138 + { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND }, 139 + { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 140 + { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND }, 141 + { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND }, 142 + { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND }, 143 + { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND }, 144 + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 145 + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 146 + { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND }, 147 + { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 148 + { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND }, 149 + { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND }, 150 + { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND }, 151 + { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 152 + { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 153 + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 154 + { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 155 + { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 156 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 157 + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 158 + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 159 + { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND }, 160 + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 161 + { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 162 + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 163 + { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, 164 + { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 165 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 166 + { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, 167 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 168 + { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND }, 169 + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 170 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 171 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 172 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 173 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 174 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 175 + { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 176 + { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 177 + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 178 + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 179 + { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 180 + { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 181 + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 182 + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 183 + { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 184 + { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 185 + { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 186 + { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 187 + { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 188 + { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND }, 189 + { 0xFFFFFFFF } 190 + }; 191 + 192 + static const struct si_cac_config_reg lcac_tahiti[] = 193 + { 194 + { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, 195 + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 196 + { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, 197 + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 198 + { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, 199 + { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 200 + { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, 201 + { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 202 + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 203 + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 204 + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 205 + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 206 + { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 207 + { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 208 + { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 209 + { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 210 + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 211 + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 212 + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 213 + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 214 + { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 215 + { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 216 + { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 217 + { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 218 + { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 219 + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 220 + { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 221 + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 222 + { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 223 + { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 224 + { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 225 + { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 226 + { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 227 + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 228 + { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 229 + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 230 + { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 231 + { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 232 + { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 233 + { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 234 + { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 235 + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 236 + { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 237 + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 238 + { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 239 + { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 240 + { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, 241 + { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 242 + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 243 + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 244 + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 245 + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 246 + { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 247 + { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 248 + { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 249 + { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 250 + { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 251 + { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 252 + { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 253 + { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 254 + { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 255 + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 256 + { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 257 + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 258 + { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 259 + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 260 + { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 261 + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 262 + { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 263 + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 264 + { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 265 + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 266 + { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, 267 + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 268 + { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 269 + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 270 + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 271 + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 272 + { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 273 + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 274 + { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 275 + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 276 + { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 277 + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 278 + { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 279 + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 280 + { 0xFFFFFFFF } 281 + 282 + }; 283 + 284 + static const struct si_cac_config_reg cac_override_tahiti[] = 285 + { 286 + { 0xFFFFFFFF } 287 + }; 288 + 289 + static const struct si_powertune_data powertune_data_tahiti = 290 + { 291 + ((1 << 16) | 27027), 292 + 6, 293 + 0, 294 + 4, 295 + 95, 296 + { 297 + 0UL, 298 + 0UL, 299 + 4521550UL, 300 + 309631529UL, 301 + -1270850L, 302 + 4513710L, 303 + 40 304 + }, 305 + 595000000UL, 306 + 12, 307 + { 308 + 0, 309 + 0, 310 + 0, 311 + 0, 312 + 0, 313 + 0, 314 + 0, 315 + 0 316 + }, 317 + true 318 + }; 319 + 320 + static const struct si_dte_data dte_data_tahiti = 321 + { 322 + { 1159409, 0, 0, 0, 0 }, 323 + { 777, 0, 0, 0, 0 }, 324 + 2, 325 + 54000, 326 + 127000, 327 + 25, 328 + 2, 329 + 10, 330 + 13, 331 + { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 }, 332 + { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 }, 333 + { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 }, 334 + 85, 335 + false 336 + }; 337 + 338 + static const struct si_dte_data dte_data_tahiti_le = 339 + { 340 + { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 }, 341 + { 0x7D, 0x7D, 0x4E4, 0xB00, 0 }, 342 + 0x5, 343 + 0xAFC8, 344 + 0x64, 345 + 0x32, 346 + 1, 347 + 0, 348 + 0x10, 349 + { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 }, 350 + { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 }, 351 + { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 }, 352 + 85, 353 + true 354 + }; 355 + 356 + static const struct si_dte_data dte_data_tahiti_pro = 357 + { 358 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 359 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 360 + 5, 361 + 45000, 362 + 100, 363 + 0xA, 364 + 1, 365 + 0, 366 + 0x10, 367 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 368 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 369 + { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 370 + 90, 371 + true 372 + }; 373 + 374 + static const struct si_dte_data dte_data_new_zealand = 375 + { 376 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 }, 377 + { 0x29B, 0x3E9, 0x537, 0x7D2, 0 }, 378 + 0x5, 379 + 0xAFC8, 380 + 0x69, 381 + 0x32, 382 + 1, 383 + 0, 384 + 0x10, 385 + { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, 386 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 387 + { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 }, 388 + 85, 389 + true 390 + }; 391 + 392 + static const struct si_dte_data dte_data_aruba_pro = 393 + { 394 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 395 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 396 + 5, 397 + 45000, 398 + 100, 399 + 0xA, 400 + 1, 401 + 0, 402 + 0x10, 403 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 404 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 405 + { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 406 + 90, 407 + true 408 + }; 409 + 410 + static const struct si_dte_data dte_data_malta = 411 + { 412 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 413 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 414 + 5, 415 + 45000, 416 + 100, 417 + 0xA, 418 + 1, 419 + 0, 420 + 0x10, 421 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 422 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 423 + { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 424 + 90, 425 + true 426 + }; 427 + 428 + struct si_cac_config_reg cac_weights_pitcairn[] = 429 + { 430 + { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND }, 431 + { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 432 + { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 433 + { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND }, 434 + { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND }, 435 + { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, 436 + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 437 + { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, 438 + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 439 + { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND }, 440 + { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND }, 441 + { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND }, 442 + { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND }, 443 + { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND }, 444 + { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 445 + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 446 + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 447 + { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND }, 448 + { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND }, 449 + { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND }, 450 + { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND }, 451 + { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND }, 452 + { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND }, 453 + { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 454 + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 455 + { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, 456 + { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND }, 457 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 458 + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 459 + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 460 + { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND }, 461 + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 462 + { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND }, 463 + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 464 + { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND }, 465 + { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND }, 466 + { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND }, 467 + { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 468 + { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND }, 469 + { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 470 + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 471 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 472 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 473 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 474 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 475 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 476 + { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 477 + { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 478 + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 479 + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 480 + { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 481 + { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 482 + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 483 + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 484 + { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 485 + { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 486 + { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 487 + { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 488 + { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 489 + { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND }, 490 + { 0xFFFFFFFF } 491 + }; 492 + 493 + static const struct si_cac_config_reg lcac_pitcairn[] = 494 + { 495 + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 496 + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 497 + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 498 + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 499 + { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 500 + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 501 + { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 502 + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 503 + { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 504 + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 505 + { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 506 + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 507 + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 508 + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 509 + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 510 + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 511 + { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 512 + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 513 + { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 514 + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 515 + { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 516 + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 517 + { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 518 + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 519 + { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 520 + { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 521 + { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 522 + { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 523 + { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 524 + { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 525 + { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 526 + { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 527 + { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 528 + { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 529 + { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 530 + { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 531 + { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 532 + { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 533 + { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 534 + { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 535 + { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 536 + { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 537 + { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 538 + { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 539 + { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 540 + { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 541 + { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 542 + { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 543 + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 544 + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 545 + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 546 + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 547 + { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 548 + { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 549 + { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 550 + { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 551 + { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 552 + { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 553 + { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 554 + { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 555 + { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 556 + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 557 + { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 558 + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 559 + { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 560 + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 561 + { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 562 + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 563 + { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 564 + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 565 + { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 566 + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 567 + { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 568 + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 569 + { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 570 + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 571 + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 572 + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 573 + { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 574 + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 575 + { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 576 + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 577 + { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 578 + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 579 + { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 580 + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 581 + { 0xFFFFFFFF } 582 + }; 583 + 584 + static const struct si_cac_config_reg cac_override_pitcairn[] = 585 + { 586 + { 0xFFFFFFFF } 587 + }; 588 + 589 + static const struct si_powertune_data powertune_data_pitcairn = 590 + { 591 + ((1 << 16) | 27027), 592 + 5, 593 + 0, 594 + 6, 595 + 100, 596 + { 597 + 51600000UL, 598 + 1800000UL, 599 + 7194395UL, 600 + 309631529UL, 601 + -1270850L, 602 + 4513710L, 603 + 100 604 + }, 605 + 117830498UL, 606 + 12, 607 + { 608 + 0, 609 + 0, 610 + 0, 611 + 0, 612 + 0, 613 + 0, 614 + 0, 615 + 0 616 + }, 617 + true 618 + }; 619 + 620 + static const struct si_dte_data dte_data_pitcairn = 621 + { 622 + { 0, 0, 0, 0, 0 }, 623 + { 0, 0, 0, 0, 0 }, 624 + 0, 625 + 0, 626 + 0, 627 + 0, 628 + 0, 629 + 0, 630 + 0, 631 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 632 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 633 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 634 + 0, 635 + false 636 + }; 637 + 638 + static const struct si_dte_data dte_data_curacao_xt = 639 + { 640 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 641 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 642 + 5, 643 + 45000, 644 + 100, 645 + 0xA, 646 + 1, 647 + 0, 648 + 0x10, 649 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 650 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 651 + { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 652 + 90, 653 + true 654 + }; 655 + 656 + static const struct si_dte_data dte_data_curacao_pro = 657 + { 658 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 659 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 660 + 5, 661 + 45000, 662 + 100, 663 + 0xA, 664 + 1, 665 + 0, 666 + 0x10, 667 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 668 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 669 + { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 670 + 90, 671 + true 672 + }; 673 + 674 + static const struct si_dte_data dte_data_neptune_xt = 675 + { 676 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 677 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 678 + 5, 679 + 45000, 680 + 100, 681 + 0xA, 682 + 1, 683 + 0, 684 + 0x10, 685 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 686 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 687 + { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 688 + 90, 689 + true 690 + }; 691 + 692 + static const struct si_cac_config_reg cac_weights_chelsea_pro[] = 693 + { 694 + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, 695 + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 696 + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, 697 + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, 698 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 699 + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 700 + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 701 + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 702 + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, 703 + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, 704 + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, 705 + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, 706 + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, 707 + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 708 + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, 709 + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, 710 + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, 711 + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, 712 + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, 713 + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, 714 + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, 715 + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, 716 + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, 717 + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, 718 + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, 719 + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 720 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 721 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 722 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 723 + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, 724 + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 725 + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, 726 + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, 727 + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, 728 + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 729 + { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND }, 730 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 731 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 732 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 733 + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 734 + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, 735 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 736 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 737 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 738 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 739 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 740 + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 741 + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 742 + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 743 + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 744 + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 745 + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 746 + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 747 + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 748 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 749 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 750 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 751 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 752 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 753 + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, 754 + { 0xFFFFFFFF } 755 + }; 756 + 757 + static const struct si_cac_config_reg cac_weights_chelsea_xt[] = 758 + { 759 + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, 760 + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 761 + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, 762 + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, 763 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 764 + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 765 + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 766 + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 767 + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, 768 + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, 769 + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, 770 + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, 771 + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, 772 + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 773 + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, 774 + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, 775 + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, 776 + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, 777 + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, 778 + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, 779 + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, 780 + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, 781 + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, 782 + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, 783 + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, 784 + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 785 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 786 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 787 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 788 + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, 789 + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 790 + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, 791 + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, 792 + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, 793 + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 794 + { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND }, 795 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 796 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 797 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 798 + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 799 + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, 800 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 801 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 802 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 803 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 804 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 805 + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 806 + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 807 + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 808 + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 809 + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 810 + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 811 + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 812 + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 813 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 814 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 815 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 816 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 817 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 818 + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, 819 + { 0xFFFFFFFF } 820 + }; 821 + 822 + static const struct si_cac_config_reg cac_weights_heathrow[] = 823 + { 824 + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, 825 + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 826 + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, 827 + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, 828 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 829 + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 830 + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 831 + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 832 + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, 833 + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, 834 + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, 835 + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, 836 + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, 837 + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 838 + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, 839 + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, 840 + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, 841 + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, 842 + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, 843 + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, 844 + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, 845 + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, 846 + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, 847 + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, 848 + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, 849 + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 850 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 851 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 852 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 853 + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, 854 + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 855 + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, 856 + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, 857 + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, 858 + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 859 + { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND }, 860 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 861 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 862 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 863 + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 864 + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, 865 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 866 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 867 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 868 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 869 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 870 + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 871 + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 872 + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 873 + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 874 + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 875 + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 876 + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 877 + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 878 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 879 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 880 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 881 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 882 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 883 + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, 884 + { 0xFFFFFFFF } 885 + }; 886 + 887 + static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = 888 + { 889 + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, 890 + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 891 + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, 892 + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, 893 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 894 + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 895 + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 896 + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 897 + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, 898 + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, 899 + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, 900 + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, 901 + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, 902 + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 903 + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, 904 + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, 905 + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, 906 + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, 907 + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, 908 + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, 909 + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, 910 + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, 911 + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, 912 + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, 913 + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, 914 + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 915 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 916 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 917 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 918 + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, 919 + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 920 + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, 921 + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, 922 + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, 923 + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 924 + { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND }, 925 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 926 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 927 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 928 + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 929 + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, 930 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 931 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 932 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 933 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 934 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 935 + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 936 + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 937 + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 938 + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 939 + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 940 + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 941 + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 942 + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 943 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 944 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 945 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 946 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 947 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 948 + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, 949 + { 0xFFFFFFFF } 950 + }; 951 + 952 + static const struct si_cac_config_reg cac_weights_cape_verde[] = 953 + { 954 + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, 955 + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 956 + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, 957 + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, 958 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 959 + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 960 + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 961 + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 962 + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, 963 + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, 964 + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, 965 + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, 966 + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, 967 + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 968 + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, 969 + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, 970 + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, 971 + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, 972 + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, 973 + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, 974 + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, 975 + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, 976 + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, 977 + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, 978 + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, 979 + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 980 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 981 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 982 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 983 + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, 984 + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 985 + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, 986 + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, 987 + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, 988 + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 989 + { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, 990 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 991 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 992 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 993 + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 994 + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, 995 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 996 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 997 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 998 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 999 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1000 + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1001 + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1002 + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1003 + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1004 + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1005 + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1006 + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1007 + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1008 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1009 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1010 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1011 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1012 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1013 + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, 1014 + { 0xFFFFFFFF } 1015 + }; 1016 + 1017 + static const struct si_cac_config_reg lcac_cape_verde[] = 1018 + { 1019 + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1020 + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1021 + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1022 + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1023 + { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 1024 + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1025 + { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 1026 + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1027 + { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 1028 + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1029 + { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1030 + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1031 + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1032 + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1033 + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1034 + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1035 + { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 1036 + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1037 + { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, 1038 + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1039 + { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1040 + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1041 + { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1042 + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1043 + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1044 + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1045 + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1046 + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1047 + { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1048 + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1049 + { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1050 + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1051 + { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1052 + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1053 + { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1054 + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1055 + { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1056 + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1057 + { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1058 + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1059 + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1060 + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1061 + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1062 + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1063 + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1064 + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1065 + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1066 + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1067 + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1068 + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1069 + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1070 + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1071 + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1072 + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1073 + { 0xFFFFFFFF } 1074 + }; 1075 + 1076 + static const struct si_cac_config_reg cac_override_cape_verde[] = 1077 + { 1078 + { 0xFFFFFFFF } 1079 + }; 1080 + 1081 + static const struct si_powertune_data powertune_data_cape_verde = 1082 + { 1083 + ((1 << 16) | 0x6993), 1084 + 5, 1085 + 0, 1086 + 7, 1087 + 105, 1088 + { 1089 + 0UL, 1090 + 0UL, 1091 + 7194395UL, 1092 + 309631529UL, 1093 + -1270850L, 1094 + 4513710L, 1095 + 100 1096 + }, 1097 + 117830498UL, 1098 + 12, 1099 + { 1100 + 0, 1101 + 0, 1102 + 0, 1103 + 0, 1104 + 0, 1105 + 0, 1106 + 0, 1107 + 0 1108 + }, 1109 + true 1110 + }; 1111 + 1112 + static const struct si_dte_data dte_data_cape_verde = 1113 + { 1114 + { 0, 0, 0, 0, 0 }, 1115 + { 0, 0, 0, 0, 0 }, 1116 + 0, 1117 + 0, 1118 + 0, 1119 + 0, 1120 + 0, 1121 + 0, 1122 + 0, 1123 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 1124 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 1125 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 1126 + 0, 1127 + false 1128 + }; 1129 + 1130 + static const struct si_dte_data dte_data_venus_xtx = 1131 + { 1132 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 1133 + { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 }, 1134 + 5, 1135 + 55000, 1136 + 0x69, 1137 + 0xA, 1138 + 1, 1139 + 0, 1140 + 0x3, 1141 + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1142 + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1143 + { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1144 + 90, 1145 + true 1146 + }; 1147 + 1148 + static const struct si_dte_data dte_data_venus_xt = 1149 + { 1150 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 1151 + { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 }, 1152 + 5, 1153 + 55000, 1154 + 0x69, 1155 + 0xA, 1156 + 1, 1157 + 0, 1158 + 0x3, 1159 + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1160 + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1161 + { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1162 + 90, 1163 + true 1164 + }; 1165 + 1166 + static const struct si_dte_data dte_data_venus_pro = 1167 + { 1168 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 1169 + { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 }, 1170 + 5, 1171 + 55000, 1172 + 0x69, 1173 + 0xA, 1174 + 1, 1175 + 0, 1176 + 0x3, 1177 + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1178 + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1179 + { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1180 + 90, 1181 + true 1182 + }; 1183 + 1184 + struct si_cac_config_reg cac_weights_oland[] = 1185 + { 1186 + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, 1187 + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 1188 + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, 1189 + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, 1190 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1191 + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 1192 + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, 1193 + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, 1194 + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, 1195 + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, 1196 + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, 1197 + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, 1198 + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, 1199 + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 1200 + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, 1201 + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, 1202 + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, 1203 + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, 1204 + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, 1205 + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, 1206 + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, 1207 + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, 1208 + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, 1209 + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, 1210 + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, 1211 + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 1212 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 1213 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1214 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1215 + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, 1216 + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 1217 + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, 1218 + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, 1219 + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, 1220 + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 1221 + { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, 1222 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1223 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 1224 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1225 + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, 1226 + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, 1227 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1228 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1229 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1230 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1231 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1232 + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1233 + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1234 + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1235 + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1236 + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1237 + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1238 + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1239 + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1240 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1241 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1242 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1243 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1244 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1245 + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, 1246 + { 0xFFFFFFFF } 1247 + }; 1248 + 1249 + static const struct si_cac_config_reg cac_weights_mars_pro[] = 1250 + { 1251 + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, 1252 + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1253 + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, 1254 + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, 1255 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1256 + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1257 + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1258 + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1259 + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, 1260 + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, 1261 + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, 1262 + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, 1263 + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, 1264 + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, 1265 + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, 1266 + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, 1267 + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, 1268 + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, 1269 + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, 1270 + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, 1271 + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, 1272 + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, 1273 + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, 1274 + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 1275 + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, 1276 + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, 1277 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 1278 + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, 1279 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1280 + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 1281 + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, 1282 + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1283 + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, 1284 + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, 1285 + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, 1286 + { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND }, 1287 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1288 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 1289 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1290 + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, 1291 + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, 1292 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1293 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1294 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1295 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1296 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1297 + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, 1298 + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1299 + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1300 + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1301 + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, 1302 + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, 1303 + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1304 + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1305 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1306 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1307 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1308 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1309 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1310 + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, 1311 + { 0xFFFFFFFF } 1312 + }; 1313 + 1314 + static const struct si_cac_config_reg cac_weights_mars_xt[] = 1315 + { 1316 + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, 1317 + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1318 + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, 1319 + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, 1320 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1321 + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1322 + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1323 + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1324 + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, 1325 + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, 1326 + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, 1327 + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, 1328 + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, 1329 + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, 1330 + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, 1331 + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, 1332 + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, 1333 + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, 1334 + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, 1335 + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, 1336 + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, 1337 + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, 1338 + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, 1339 + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 1340 + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, 1341 + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, 1342 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 1343 + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, 1344 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1345 + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 1346 + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, 1347 + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1348 + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, 1349 + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, 1350 + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, 1351 + { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND }, 1352 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1353 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 1354 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1355 + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, 1356 + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, 1357 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1358 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1359 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1360 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1361 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1362 + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, 1363 + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1364 + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1365 + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1366 + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, 1367 + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, 1368 + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1369 + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1370 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1371 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1372 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1373 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1374 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1375 + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, 1376 + { 0xFFFFFFFF } 1377 + }; 1378 + 1379 + static const struct si_cac_config_reg cac_weights_oland_pro[] = 1380 + { 1381 + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, 1382 + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1383 + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, 1384 + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, 1385 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1386 + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1387 + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1388 + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1389 + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, 1390 + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, 1391 + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, 1392 + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, 1393 + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, 1394 + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, 1395 + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, 1396 + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, 1397 + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, 1398 + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, 1399 + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, 1400 + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, 1401 + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, 1402 + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, 1403 + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, 1404 + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 1405 + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, 1406 + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, 1407 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 1408 + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, 1409 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1410 + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 1411 + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, 1412 + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1413 + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, 1414 + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, 1415 + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, 1416 + { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND }, 1417 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1418 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 1419 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1420 + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, 1421 + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, 1422 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1423 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1424 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1425 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1426 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1427 + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, 1428 + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1429 + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1430 + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1431 + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, 1432 + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, 1433 + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1434 + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1435 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1436 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1437 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1438 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1439 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1440 + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, 1441 + { 0xFFFFFFFF } 1442 + }; 1443 + 1444 + static const struct si_cac_config_reg cac_weights_oland_xt[] = 1445 + { 1446 + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, 1447 + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1448 + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, 1449 + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, 1450 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1451 + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1452 + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, 1453 + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, 1454 + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, 1455 + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, 1456 + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, 1457 + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, 1458 + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, 1459 + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, 1460 + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, 1461 + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, 1462 + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, 1463 + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, 1464 + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, 1465 + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, 1466 + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, 1467 + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, 1468 + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, 1469 + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, 1470 + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, 1471 + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, 1472 + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, 1473 + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, 1474 + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1475 + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, 1476 + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, 1477 + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1478 + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, 1479 + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, 1480 + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, 1481 + { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND }, 1482 + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1483 + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, 1484 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1485 + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, 1486 + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, 1487 + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1488 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1489 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1490 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1491 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1492 + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, 1493 + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, 1494 + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1495 + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1496 + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, 1497 + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, 1498 + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1499 + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1500 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1501 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1502 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1503 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1504 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1505 + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, 1506 + { 0xFFFFFFFF } 1507 + }; 1508 + 1509 + static const struct si_cac_config_reg lcac_oland[] = 1510 + { 1511 + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1512 + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1513 + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1514 + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1515 + { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, 1516 + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1517 + { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, 1518 + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1519 + { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, 1520 + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1521 + { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, 1522 + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1523 + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1524 + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1525 + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1526 + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1527 + { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1528 + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1529 + { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1530 + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1531 + { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1532 + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1533 + { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1534 + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1535 + { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1536 + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1537 + { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1538 + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1539 + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1540 + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1541 + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1542 + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1543 + { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1544 + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1545 + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1546 + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1547 + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1548 + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1549 + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1550 + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1551 + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1552 + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1553 + { 0xFFFFFFFF } 1554 + }; 1555 + 1556 + static const struct si_cac_config_reg lcac_mars_pro[] = 1557 + { 1558 + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1559 + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1560 + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1561 + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1562 + { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, 1563 + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1564 + { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, 1565 + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1566 + { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, 1567 + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1568 + { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1569 + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1570 + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1571 + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1572 + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1573 + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1574 + { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1575 + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1576 + { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1577 + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1578 + { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1579 + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1580 + { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1581 + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1582 + { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1583 + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1584 + { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1585 + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1586 + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, 1587 + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1588 + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1589 + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1590 + { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1591 + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1592 + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1593 + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1594 + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1595 + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1596 + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1597 + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1598 + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, 1599 + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, 1600 + { 0xFFFFFFFF } 1601 + }; 1602 + 1603 + static const struct si_cac_config_reg cac_override_oland[] = 1604 + { 1605 + { 0xFFFFFFFF } 1606 + }; 1607 + 1608 + static const struct si_powertune_data powertune_data_oland = 1609 + { 1610 + ((1 << 16) | 0x6993), 1611 + 5, 1612 + 0, 1613 + 7, 1614 + 105, 1615 + { 1616 + 0UL, 1617 + 0UL, 1618 + 7194395UL, 1619 + 309631529UL, 1620 + -1270850L, 1621 + 4513710L, 1622 + 100 1623 + }, 1624 + 117830498UL, 1625 + 12, 1626 + { 1627 + 0, 1628 + 0, 1629 + 0, 1630 + 0, 1631 + 0, 1632 + 0, 1633 + 0, 1634 + 0 1635 + }, 1636 + true 1637 + }; 1638 + 1639 + static const struct si_powertune_data powertune_data_mars_pro = 1640 + { 1641 + ((1 << 16) | 0x6993), 1642 + 5, 1643 + 0, 1644 + 7, 1645 + 105, 1646 + { 1647 + 0UL, 1648 + 0UL, 1649 + 7194395UL, 1650 + 309631529UL, 1651 + -1270850L, 1652 + 4513710L, 1653 + 100 1654 + }, 1655 + 117830498UL, 1656 + 12, 1657 + { 1658 + 0, 1659 + 0, 1660 + 0, 1661 + 0, 1662 + 0, 1663 + 0, 1664 + 0, 1665 + 0 1666 + }, 1667 + true 1668 + }; 1669 + 1670 + static const struct si_dte_data dte_data_oland = 1671 + { 1672 + { 0, 0, 0, 0, 0 }, 1673 + { 0, 0, 0, 0, 0 }, 1674 + 0, 1675 + 0, 1676 + 0, 1677 + 0, 1678 + 0, 1679 + 0, 1680 + 0, 1681 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 1682 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 1683 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 1684 + 0, 1685 + false 1686 + }; 1687 + 1688 + static const struct si_dte_data dte_data_mars_pro = 1689 + { 1690 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 1691 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 1692 + 5, 1693 + 55000, 1694 + 105, 1695 + 0xA, 1696 + 1, 1697 + 0, 1698 + 0x10, 1699 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 1700 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 1701 + { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1702 + 90, 1703 + true 1704 + }; 1705 + 1706 + static const struct si_dte_data dte_data_sun_xt = 1707 + { 1708 + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, 1709 + { 0x0, 0x0, 0x0, 0x0, 0x0 }, 1710 + 5, 1711 + 55000, 1712 + 105, 1713 + 0xA, 1714 + 1, 1715 + 0, 1716 + 0x10, 1717 + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, 1718 + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, 1719 + { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 1720 + 90, 1721 + true 1722 + }; 1723 + 1724 + 1725 + static const struct si_cac_config_reg cac_weights_hainan[] = 1726 + { 1727 + { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND }, 1728 + { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND }, 1729 + { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND }, 1730 + { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND }, 1731 + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1732 + { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND }, 1733 + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1734 + { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1735 + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1736 + { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND }, 1737 + { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND }, 1738 + { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND }, 1739 + { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND }, 1740 + { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1741 + { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND }, 1742 + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1743 + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1744 + { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND }, 1745 + { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND }, 1746 + { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND }, 1747 + { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND }, 1748 + { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND }, 1749 + { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND }, 1750 + { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND }, 1751 + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1752 + { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND }, 1753 + { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND }, 1754 + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1755 + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1756 + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1757 + { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND }, 1758 + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1759 + { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1760 + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1761 + { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND }, 1762 + { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND }, 1763 + { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, 1764 + { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1765 + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1766 + { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND }, 1767 + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1768 + { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND }, 1769 + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1770 + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1771 + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, 1772 + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, 1773 + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1774 + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1775 + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1776 + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1777 + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1778 + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1779 + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1780 + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1781 + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1782 + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1783 + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1784 + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, 1785 + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, 1786 + { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND }, 1787 + { 0xFFFFFFFF } 1788 + }; 1789 + 1790 + static const struct si_powertune_data powertune_data_hainan = 1791 + { 1792 + ((1 << 16) | 0x6993), 1793 + 5, 1794 + 0, 1795 + 9, 1796 + 105, 1797 + { 1798 + 0UL, 1799 + 0UL, 1800 + 7194395UL, 1801 + 309631529UL, 1802 + -1270850L, 1803 + 4513710L, 1804 + 100 1805 + }, 1806 + 117830498UL, 1807 + 12, 1808 + { 1809 + 0, 1810 + 0, 1811 + 0, 1812 + 0, 1813 + 0, 1814 + 0, 1815 + 0, 1816 + 0 1817 + }, 1818 + true 1819 + }; 1820 + 1821 + struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev); 1822 + struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev); 1823 + struct ni_power_info *ni_get_pi(struct amdgpu_device *adev); 1824 + struct si_ps *si_get_ps(struct amdgpu_ps *rps); 1825 + 1826 + static int si_populate_voltage_value(struct amdgpu_device *adev, 1827 + const struct atom_voltage_table *table, 1828 + u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); 1829 + static int si_get_std_voltage_value(struct amdgpu_device *adev, 1830 + SISLANDS_SMC_VOLTAGE_VALUE *voltage, 1831 + u16 *std_voltage); 1832 + static int si_write_smc_soft_register(struct amdgpu_device *adev, 1833 + u16 reg_offset, u32 value); 1834 + static int si_convert_power_level_to_smc(struct amdgpu_device *adev, 1835 + struct rv7xx_pl *pl, 1836 + SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level); 1837 + static int si_calculate_sclk_params(struct amdgpu_device *adev, 1838 + u32 engine_clock, 1839 + SISLANDS_SMC_SCLK_VALUE *sclk); 1840 + 1841 + static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev); 1842 + static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev); 1843 + static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev); 1844 + static void si_dpm_set_irq_funcs(struct amdgpu_device *adev); 1845 + 1846 + extern u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg); 1847 + 1848 + static struct si_power_info *si_get_pi(struct amdgpu_device *adev) 1849 + { 1850 + struct si_power_info *pi = adev->pm.dpm.priv; 1851 + return pi; 1852 + } 1853 + 1854 + static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, 1855 + u16 v, s32 t, u32 ileakage, u32 *leakage) 1856 + { 1857 + s64 kt, kv, leakage_w, i_leakage, vddc; 1858 + s64 temperature, t_slope, t_intercept, av, bv, t_ref; 1859 + s64 tmp; 1860 + 1861 + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); 1862 + vddc = div64_s64(drm_int2fixp(v), 1000); 1863 + temperature = div64_s64(drm_int2fixp(t), 1000); 1864 + 1865 + t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000); 1866 + t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000); 1867 + av = div64_s64(drm_int2fixp(coeff->av), 100000000); 1868 + bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); 1869 + t_ref = drm_int2fixp(coeff->t_ref); 1870 + 1871 + tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; 1872 + kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); 1873 + kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); 1874 + kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); 1875 + 1876 + leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); 1877 + 1878 + *leakage = drm_fixp2int(leakage_w * 1000); 1879 + } 1880 + 1881 + static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev, 1882 + const struct ni_leakage_coeffients *coeff, 1883 + u16 v, 1884 + s32 t, 1885 + u32 i_leakage, 1886 + u32 *leakage) 1887 + { 1888 + si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage); 1889 + } 1890 + 1891 + static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff, 1892 + const u32 fixed_kt, u16 v, 1893 + u32 ileakage, u32 *leakage) 1894 + { 1895 + s64 kt, kv, leakage_w, i_leakage, vddc; 1896 + 1897 + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); 1898 + vddc = div64_s64(drm_int2fixp(v), 1000); 1899 + 1900 + kt = div64_s64(drm_int2fixp(fixed_kt), 100000000); 1901 + kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000), 1902 + drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc))); 1903 + 1904 + leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); 1905 + 1906 + *leakage = drm_fixp2int(leakage_w * 1000); 1907 + } 1908 + 1909 + static void si_calculate_leakage_for_v(struct amdgpu_device *adev, 1910 + const struct ni_leakage_coeffients *coeff, 1911 + const u32 fixed_kt, 1912 + u16 v, 1913 + u32 i_leakage, 1914 + u32 *leakage) 1915 + { 1916 + si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage); 1917 + } 1918 + 1919 + 1920 + static void si_update_dte_from_pl2(struct amdgpu_device *adev, 1921 + struct si_dte_data *dte_data) 1922 + { 1923 + u32 p_limit1 = adev->pm.dpm.tdp_limit; 1924 + u32 p_limit2 = adev->pm.dpm.near_tdp_limit; 1925 + u32 k = dte_data->k; 1926 + u32 t_max = dte_data->max_t; 1927 + u32 t_split[5] = { 10, 15, 20, 25, 30 }; 1928 + u32 t_0 = dte_data->t0; 1929 + u32 i; 1930 + 1931 + if (p_limit2 != 0 && p_limit2 <= p_limit1) { 1932 + dte_data->tdep_count = 3; 1933 + 1934 + for (i = 0; i < k; i++) { 1935 + dte_data->r[i] = 1936 + (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) / 1937 + (p_limit2 * (u32)100); 1938 + } 1939 + 1940 + dte_data->tdep_r[1] = dte_data->r[4] * 2; 1941 + 1942 + for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) { 1943 + dte_data->tdep_r[i] = dte_data->r[4]; 1944 + } 1945 + } else { 1946 + DRM_ERROR("Invalid PL2! DTE will not be updated.\n"); 1947 + } 1948 + } 1949 + 1950 + struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) 1951 + { 1952 + struct rv7xx_power_info *pi = adev->pm.dpm.priv; 1953 + 1954 + return pi; 1955 + } 1956 + 1957 + struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) 1958 + { 1959 + struct ni_power_info *pi = adev->pm.dpm.priv; 1960 + 1961 + return pi; 1962 + } 1963 + 1964 + struct si_ps *si_get_ps(struct amdgpu_ps *aps) 1965 + { 1966 + struct si_ps *ps = aps->ps_priv; 1967 + 1968 + return ps; 1969 + } 1970 + 1971 + static void si_initialize_powertune_defaults(struct amdgpu_device *adev) 1972 + { 1973 + struct ni_power_info *ni_pi = ni_get_pi(adev); 1974 + struct si_power_info *si_pi = si_get_pi(adev); 1975 + bool update_dte_from_pl2 = false; 1976 + 1977 + if (adev->asic_type == CHIP_TAHITI) { 1978 + si_pi->cac_weights = cac_weights_tahiti; 1979 + si_pi->lcac_config = lcac_tahiti; 1980 + si_pi->cac_override = cac_override_tahiti; 1981 + si_pi->powertune_data = &powertune_data_tahiti; 1982 + si_pi->dte_data = dte_data_tahiti; 1983 + 1984 + switch (adev->pdev->device) { 1985 + case 0x6798: 1986 + si_pi->dte_data.enable_dte_by_default = true; 1987 + break; 1988 + case 0x6799: 1989 + si_pi->dte_data = dte_data_new_zealand; 1990 + break; 1991 + case 0x6790: 1992 + case 0x6791: 1993 + case 0x6792: 1994 + case 0x679E: 1995 + si_pi->dte_data = dte_data_aruba_pro; 1996 + update_dte_from_pl2 = true; 1997 + break; 1998 + case 0x679B: 1999 + si_pi->dte_data = dte_data_malta; 2000 + update_dte_from_pl2 = true; 2001 + break; 2002 + case 0x679A: 2003 + si_pi->dte_data = dte_data_tahiti_pro; 2004 + update_dte_from_pl2 = true; 2005 + break; 2006 + default: 2007 + if (si_pi->dte_data.enable_dte_by_default == true) 2008 + DRM_ERROR("DTE is not enabled!\n"); 2009 + break; 2010 + } 2011 + } else if (adev->asic_type == CHIP_PITCAIRN) { 2012 + switch (adev->pdev->device) { 2013 + case 0x6810: 2014 + case 0x6818: 2015 + si_pi->cac_weights = cac_weights_pitcairn; 2016 + si_pi->lcac_config = lcac_pitcairn; 2017 + si_pi->cac_override = cac_override_pitcairn; 2018 + si_pi->powertune_data = &powertune_data_pitcairn; 2019 + si_pi->dte_data = dte_data_curacao_xt; 2020 + update_dte_from_pl2 = true; 2021 + break; 2022 + case 0x6819: 2023 + case 0x6811: 2024 + si_pi->cac_weights = cac_weights_pitcairn; 2025 + si_pi->lcac_config = lcac_pitcairn; 2026 + si_pi->cac_override = cac_override_pitcairn; 2027 + si_pi->powertune_data = &powertune_data_pitcairn; 2028 + si_pi->dte_data = dte_data_curacao_pro; 2029 + update_dte_from_pl2 = true; 2030 + break; 2031 + case 0x6800: 2032 + case 0x6806: 2033 + si_pi->cac_weights = cac_weights_pitcairn; 2034 + si_pi->lcac_config = lcac_pitcairn; 2035 + si_pi->cac_override = cac_override_pitcairn; 2036 + si_pi->powertune_data = &powertune_data_pitcairn; 2037 + si_pi->dte_data = dte_data_neptune_xt; 2038 + update_dte_from_pl2 = true; 2039 + break; 2040 + default: 2041 + si_pi->cac_weights = cac_weights_pitcairn; 2042 + si_pi->lcac_config = lcac_pitcairn; 2043 + si_pi->cac_override = cac_override_pitcairn; 2044 + si_pi->powertune_data = &powertune_data_pitcairn; 2045 + si_pi->dte_data = dte_data_pitcairn; 2046 + break; 2047 + } 2048 + } else if (adev->asic_type == CHIP_VERDE) { 2049 + si_pi->lcac_config = lcac_cape_verde; 2050 + si_pi->cac_override = cac_override_cape_verde; 2051 + si_pi->powertune_data = &powertune_data_cape_verde; 2052 + 2053 + switch (adev->pdev->device) { 2054 + case 0x683B: 2055 + case 0x683F: 2056 + case 0x6829: 2057 + case 0x6835: 2058 + si_pi->cac_weights = cac_weights_cape_verde_pro; 2059 + si_pi->dte_data = dte_data_cape_verde; 2060 + break; 2061 + case 0x682C: 2062 + si_pi->cac_weights = cac_weights_cape_verde_pro; 2063 + si_pi->dte_data = dte_data_sun_xt; 2064 + break; 2065 + case 0x6825: 2066 + case 0x6827: 2067 + si_pi->cac_weights = cac_weights_heathrow; 2068 + si_pi->dte_data = dte_data_cape_verde; 2069 + break; 2070 + case 0x6824: 2071 + case 0x682D: 2072 + si_pi->cac_weights = cac_weights_chelsea_xt; 2073 + si_pi->dte_data = dte_data_cape_verde; 2074 + break; 2075 + case 0x682F: 2076 + si_pi->cac_weights = cac_weights_chelsea_pro; 2077 + si_pi->dte_data = dte_data_cape_verde; 2078 + break; 2079 + case 0x6820: 2080 + si_pi->cac_weights = cac_weights_heathrow; 2081 + si_pi->dte_data = dte_data_venus_xtx; 2082 + break; 2083 + case 0x6821: 2084 + si_pi->cac_weights = cac_weights_heathrow; 2085 + si_pi->dte_data = dte_data_venus_xt; 2086 + break; 2087 + case 0x6823: 2088 + case 0x682B: 2089 + case 0x6822: 2090 + case 0x682A: 2091 + si_pi->cac_weights = cac_weights_chelsea_pro; 2092 + si_pi->dte_data = dte_data_venus_pro; 2093 + break; 2094 + default: 2095 + si_pi->cac_weights = cac_weights_cape_verde; 2096 + si_pi->dte_data = dte_data_cape_verde; 2097 + break; 2098 + } 2099 + } else if (adev->asic_type == CHIP_OLAND) { 2100 + switch (adev->pdev->device) { 2101 + case 0x6601: 2102 + case 0x6621: 2103 + case 0x6603: 2104 + case 0x6605: 2105 + si_pi->cac_weights = cac_weights_mars_pro; 2106 + si_pi->lcac_config = lcac_mars_pro; 2107 + si_pi->cac_override = cac_override_oland; 2108 + si_pi->powertune_data = &powertune_data_mars_pro; 2109 + si_pi->dte_data = dte_data_mars_pro; 2110 + update_dte_from_pl2 = true; 2111 + break; 2112 + case 0x6600: 2113 + case 0x6606: 2114 + case 0x6620: 2115 + case 0x6604: 2116 + si_pi->cac_weights = cac_weights_mars_xt; 2117 + si_pi->lcac_config = lcac_mars_pro; 2118 + si_pi->cac_override = cac_override_oland; 2119 + si_pi->powertune_data = &powertune_data_mars_pro; 2120 + si_pi->dte_data = dte_data_mars_pro; 2121 + update_dte_from_pl2 = true; 2122 + break; 2123 + case 0x6611: 2124 + case 0x6613: 2125 + case 0x6608: 2126 + si_pi->cac_weights = cac_weights_oland_pro; 2127 + si_pi->lcac_config = lcac_mars_pro; 2128 + si_pi->cac_override = cac_override_oland; 2129 + si_pi->powertune_data = &powertune_data_mars_pro; 2130 + si_pi->dte_data = dte_data_mars_pro; 2131 + update_dte_from_pl2 = true; 2132 + break; 2133 + case 0x6610: 2134 + si_pi->cac_weights = cac_weights_oland_xt; 2135 + si_pi->lcac_config = lcac_mars_pro; 2136 + si_pi->cac_override = cac_override_oland; 2137 + si_pi->powertune_data = &powertune_data_mars_pro; 2138 + si_pi->dte_data = dte_data_mars_pro; 2139 + update_dte_from_pl2 = true; 2140 + break; 2141 + default: 2142 + si_pi->cac_weights = cac_weights_oland; 2143 + si_pi->lcac_config = lcac_oland; 2144 + si_pi->cac_override = cac_override_oland; 2145 + si_pi->powertune_data = &powertune_data_oland; 2146 + si_pi->dte_data = dte_data_oland; 2147 + break; 2148 + } 2149 + } else if (adev->asic_type == CHIP_HAINAN) { 2150 + si_pi->cac_weights = cac_weights_hainan; 2151 + si_pi->lcac_config = lcac_oland; 2152 + si_pi->cac_override = cac_override_oland; 2153 + si_pi->powertune_data = &powertune_data_hainan; 2154 + si_pi->dte_data = dte_data_sun_xt; 2155 + update_dte_from_pl2 = true; 2156 + } else { 2157 + DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n"); 2158 + return; 2159 + } 2160 + 2161 + ni_pi->enable_power_containment = false; 2162 + ni_pi->enable_cac = false; 2163 + ni_pi->enable_sq_ramping = false; 2164 + si_pi->enable_dte = false; 2165 + 2166 + if (si_pi->powertune_data->enable_powertune_by_default) { 2167 + ni_pi->enable_power_containment= true; 2168 + ni_pi->enable_cac = true; 2169 + if (si_pi->dte_data.enable_dte_by_default) { 2170 + si_pi->enable_dte = true; 2171 + if (update_dte_from_pl2) 2172 + si_update_dte_from_pl2(adev, &si_pi->dte_data); 2173 + 2174 + } 2175 + ni_pi->enable_sq_ramping = true; 2176 + } 2177 + 2178 + ni_pi->driver_calculate_cac_leakage = true; 2179 + ni_pi->cac_configuration_required = true; 2180 + 2181 + if (ni_pi->cac_configuration_required) { 2182 + ni_pi->support_cac_long_term_average = true; 2183 + si_pi->dyn_powertune_data.l2_lta_window_size = 2184 + si_pi->powertune_data->l2_lta_window_size_default; 2185 + si_pi->dyn_powertune_data.lts_truncate = 2186 + si_pi->powertune_data->lts_truncate_default; 2187 + } else { 2188 + ni_pi->support_cac_long_term_average = false; 2189 + si_pi->dyn_powertune_data.l2_lta_window_size = 0; 2190 + si_pi->dyn_powertune_data.lts_truncate = 0; 2191 + } 2192 + 2193 + si_pi->dyn_powertune_data.disable_uvd_powertune = false; 2194 + } 2195 + 2196 + static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev) 2197 + { 2198 + return 1; 2199 + } 2200 + 2201 + static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) 2202 + { 2203 + u32 xclk; 2204 + u32 wintime; 2205 + u32 cac_window; 2206 + u32 cac_window_size; 2207 + 2208 + xclk = amdgpu_asic_get_xclk(adev); 2209 + 2210 + if (xclk == 0) 2211 + return 0; 2212 + 2213 + cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; 2214 + cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); 2215 + 2216 + wintime = (cac_window_size * 100) / xclk; 2217 + 2218 + return wintime; 2219 + } 2220 + 2221 + static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor) 2222 + { 2223 + return power_in_watts; 2224 + } 2225 + 2226 + static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev, 2227 + bool adjust_polarity, 2228 + u32 tdp_adjustment, 2229 + u32 *tdp_limit, 2230 + u32 *near_tdp_limit) 2231 + { 2232 + u32 adjustment_delta, max_tdp_limit; 2233 + 2234 + if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit) 2235 + return -EINVAL; 2236 + 2237 + max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100; 2238 + 2239 + if (adjust_polarity) { 2240 + *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; 2241 + *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit); 2242 + } else { 2243 + *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; 2244 + adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit; 2245 + if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted) 2246 + *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta; 2247 + else 2248 + *near_tdp_limit = 0; 2249 + } 2250 + 2251 + if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit)) 2252 + return -EINVAL; 2253 + if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit)) 2254 + return -EINVAL; 2255 + 2256 + return 0; 2257 + } 2258 + 2259 + static int si_populate_smc_tdp_limits(struct amdgpu_device *adev, 2260 + struct amdgpu_ps *amdgpu_state) 2261 + { 2262 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2263 + struct si_power_info *si_pi = si_get_pi(adev); 2264 + 2265 + if (ni_pi->enable_power_containment) { 2266 + SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; 2267 + PP_SIslands_PAPMParameters *papm_parm; 2268 + struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; 2269 + u32 scaling_factor = si_get_smc_power_scaling_factor(adev); 2270 + u32 tdp_limit; 2271 + u32 near_tdp_limit; 2272 + int ret; 2273 + 2274 + if (scaling_factor == 0) 2275 + return -EINVAL; 2276 + 2277 + memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); 2278 + 2279 + ret = si_calculate_adjusted_tdp_limits(adev, 2280 + false, /* ??? */ 2281 + adev->pm.dpm.tdp_adjustment, 2282 + &tdp_limit, 2283 + &near_tdp_limit); 2284 + if (ret) 2285 + return ret; 2286 + 2287 + smc_table->dpm2Params.TDPLimit = 2288 + cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); 2289 + smc_table->dpm2Params.NearTDPLimit = 2290 + cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000); 2291 + smc_table->dpm2Params.SafePowerLimit = 2292 + cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); 2293 + 2294 + ret = si_copy_bytes_to_smc(adev, 2295 + (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + 2296 + offsetof(PP_SIslands_DPM2Parameters, TDPLimit)), 2297 + (u8 *)(&(smc_table->dpm2Params.TDPLimit)), 2298 + sizeof(u32) * 3, 2299 + si_pi->sram_end); 2300 + if (ret) 2301 + return ret; 2302 + 2303 + if (si_pi->enable_ppm) { 2304 + papm_parm = &si_pi->papm_parm; 2305 + memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters)); 2306 + papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp); 2307 + papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max); 2308 + papm_parm->dGPU_T_Warning = cpu_to_be32(95); 2309 + papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5); 2310 + papm_parm->PlatformPowerLimit = 0xffffffff; 2311 + papm_parm->NearTDPLimitPAPM = 0xffffffff; 2312 + 2313 + ret = si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start, 2314 + (u8 *)papm_parm, 2315 + sizeof(PP_SIslands_PAPMParameters), 2316 + si_pi->sram_end); 2317 + if (ret) 2318 + return ret; 2319 + } 2320 + } 2321 + return 0; 2322 + } 2323 + 2324 + static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev, 2325 + struct amdgpu_ps *amdgpu_state) 2326 + { 2327 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2328 + struct si_power_info *si_pi = si_get_pi(adev); 2329 + 2330 + if (ni_pi->enable_power_containment) { 2331 + SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; 2332 + u32 scaling_factor = si_get_smc_power_scaling_factor(adev); 2333 + int ret; 2334 + 2335 + memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); 2336 + 2337 + smc_table->dpm2Params.NearTDPLimit = 2338 + cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); 2339 + smc_table->dpm2Params.SafePowerLimit = 2340 + cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); 2341 + 2342 + ret = si_copy_bytes_to_smc(adev, 2343 + (si_pi->state_table_start + 2344 + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + 2345 + offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)), 2346 + (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)), 2347 + sizeof(u32) * 2, 2348 + si_pi->sram_end); 2349 + if (ret) 2350 + return ret; 2351 + } 2352 + 2353 + return 0; 2354 + } 2355 + 2356 + static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev, 2357 + const u16 prev_std_vddc, 2358 + const u16 curr_std_vddc) 2359 + { 2360 + u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN; 2361 + u64 prev_vddc = (u64)prev_std_vddc; 2362 + u64 curr_vddc = (u64)curr_std_vddc; 2363 + u64 pwr_efficiency_ratio, n, d; 2364 + 2365 + if ((prev_vddc == 0) || (curr_vddc == 0)) 2366 + return 0; 2367 + 2368 + n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); 2369 + d = prev_vddc * prev_vddc; 2370 + pwr_efficiency_ratio = div64_u64(n, d); 2371 + 2372 + if (pwr_efficiency_ratio > (u64)0xFFFF) 2373 + return 0; 2374 + 2375 + return (u16)pwr_efficiency_ratio; 2376 + } 2377 + 2378 + static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev, 2379 + struct amdgpu_ps *amdgpu_state) 2380 + { 2381 + struct si_power_info *si_pi = si_get_pi(adev); 2382 + 2383 + if (si_pi->dyn_powertune_data.disable_uvd_powertune && 2384 + amdgpu_state->vclk && amdgpu_state->dclk) 2385 + return true; 2386 + 2387 + return false; 2388 + } 2389 + 2390 + struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev) 2391 + { 2392 + struct evergreen_power_info *pi = adev->pm.dpm.priv; 2393 + 2394 + return pi; 2395 + } 2396 + 2397 + static int si_populate_power_containment_values(struct amdgpu_device *adev, 2398 + struct amdgpu_ps *amdgpu_state, 2399 + SISLANDS_SMC_SWSTATE *smc_state) 2400 + { 2401 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 2402 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2403 + struct si_ps *state = si_get_ps(amdgpu_state); 2404 + SISLANDS_SMC_VOLTAGE_VALUE vddc; 2405 + u32 prev_sclk; 2406 + u32 max_sclk; 2407 + u32 min_sclk; 2408 + u16 prev_std_vddc; 2409 + u16 curr_std_vddc; 2410 + int i; 2411 + u16 pwr_efficiency_ratio; 2412 + u8 max_ps_percent; 2413 + bool disable_uvd_power_tune; 2414 + int ret; 2415 + 2416 + if (ni_pi->enable_power_containment == false) 2417 + return 0; 2418 + 2419 + if (state->performance_level_count == 0) 2420 + return -EINVAL; 2421 + 2422 + if (smc_state->levelCount != state->performance_level_count) 2423 + return -EINVAL; 2424 + 2425 + disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state); 2426 + 2427 + smc_state->levels[0].dpm2.MaxPS = 0; 2428 + smc_state->levels[0].dpm2.NearTDPDec = 0; 2429 + smc_state->levels[0].dpm2.AboveSafeInc = 0; 2430 + smc_state->levels[0].dpm2.BelowSafeInc = 0; 2431 + smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0; 2432 + 2433 + for (i = 1; i < state->performance_level_count; i++) { 2434 + prev_sclk = state->performance_levels[i-1].sclk; 2435 + max_sclk = state->performance_levels[i].sclk; 2436 + if (i == 1) 2437 + max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M; 2438 + else 2439 + max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H; 2440 + 2441 + if (prev_sclk > max_sclk) 2442 + return -EINVAL; 2443 + 2444 + if ((max_ps_percent == 0) || 2445 + (prev_sclk == max_sclk) || 2446 + disable_uvd_power_tune) { 2447 + min_sclk = max_sclk; 2448 + } else if (i == 1) { 2449 + min_sclk = prev_sclk; 2450 + } else { 2451 + min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; 2452 + } 2453 + 2454 + if (min_sclk < state->performance_levels[0].sclk) 2455 + min_sclk = state->performance_levels[0].sclk; 2456 + 2457 + if (min_sclk == 0) 2458 + return -EINVAL; 2459 + 2460 + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, 2461 + state->performance_levels[i-1].vddc, &vddc); 2462 + if (ret) 2463 + return ret; 2464 + 2465 + ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc); 2466 + if (ret) 2467 + return ret; 2468 + 2469 + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, 2470 + state->performance_levels[i].vddc, &vddc); 2471 + if (ret) 2472 + return ret; 2473 + 2474 + ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc); 2475 + if (ret) 2476 + return ret; 2477 + 2478 + pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev, 2479 + prev_std_vddc, curr_std_vddc); 2480 + 2481 + smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk); 2482 + smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC; 2483 + smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC; 2484 + smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC; 2485 + smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio); 2486 + } 2487 + 2488 + return 0; 2489 + } 2490 + 2491 + static int si_populate_sq_ramping_values(struct amdgpu_device *adev, 2492 + struct amdgpu_ps *amdgpu_state, 2493 + SISLANDS_SMC_SWSTATE *smc_state) 2494 + { 2495 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2496 + struct si_ps *state = si_get_ps(amdgpu_state); 2497 + u32 sq_power_throttle, sq_power_throttle2; 2498 + bool enable_sq_ramping = ni_pi->enable_sq_ramping; 2499 + int i; 2500 + 2501 + if (state->performance_level_count == 0) 2502 + return -EINVAL; 2503 + 2504 + if (smc_state->levelCount != state->performance_level_count) 2505 + return -EINVAL; 2506 + 2507 + if (adev->pm.dpm.sq_ramping_threshold == 0) 2508 + return -EINVAL; 2509 + 2510 + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) 2511 + enable_sq_ramping = false; 2512 + 2513 + if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) 2514 + enable_sq_ramping = false; 2515 + 2516 + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) 2517 + enable_sq_ramping = false; 2518 + 2519 + if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2520 + enable_sq_ramping = false; 2521 + 2522 + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2523 + enable_sq_ramping = false; 2524 + 2525 + for (i = 0; i < state->performance_level_count; i++) { 2526 + sq_power_throttle = 0; 2527 + sq_power_throttle2 = 0; 2528 + 2529 + if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && 2530 + enable_sq_ramping) { 2531 + sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); 2532 + sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); 2533 + sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); 2534 + sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); 2535 + sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); 2536 + } else { 2537 + sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; 2538 + sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; 2539 + } 2540 + 2541 + smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); 2542 + smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2); 2543 + } 2544 + 2545 + return 0; 2546 + } 2547 + 2548 + static int si_enable_power_containment(struct amdgpu_device *adev, 2549 + struct amdgpu_ps *amdgpu_new_state, 2550 + bool enable) 2551 + { 2552 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2553 + PPSMC_Result smc_result; 2554 + int ret = 0; 2555 + 2556 + if (ni_pi->enable_power_containment) { 2557 + if (enable) { 2558 + if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { 2559 + smc_result = si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); 2560 + if (smc_result != PPSMC_Result_OK) { 2561 + ret = -EINVAL; 2562 + ni_pi->pc_enabled = false; 2563 + } else { 2564 + ni_pi->pc_enabled = true; 2565 + } 2566 + } 2567 + } else { 2568 + smc_result = si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); 2569 + if (smc_result != PPSMC_Result_OK) 2570 + ret = -EINVAL; 2571 + ni_pi->pc_enabled = false; 2572 + } 2573 + } 2574 + 2575 + return ret; 2576 + } 2577 + 2578 + static int si_initialize_smc_dte_tables(struct amdgpu_device *adev) 2579 + { 2580 + struct si_power_info *si_pi = si_get_pi(adev); 2581 + int ret = 0; 2582 + struct si_dte_data *dte_data = &si_pi->dte_data; 2583 + Smc_SIslands_DTE_Configuration *dte_tables = NULL; 2584 + u32 table_size; 2585 + u8 tdep_count; 2586 + u32 i; 2587 + 2588 + if (dte_data == NULL) 2589 + si_pi->enable_dte = false; 2590 + 2591 + if (si_pi->enable_dte == false) 2592 + return 0; 2593 + 2594 + if (dte_data->k <= 0) 2595 + return -EINVAL; 2596 + 2597 + dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL); 2598 + if (dte_tables == NULL) { 2599 + si_pi->enable_dte = false; 2600 + return -ENOMEM; 2601 + } 2602 + 2603 + table_size = dte_data->k; 2604 + 2605 + if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES) 2606 + table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES; 2607 + 2608 + tdep_count = dte_data->tdep_count; 2609 + if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE) 2610 + tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; 2611 + 2612 + dte_tables->K = cpu_to_be32(table_size); 2613 + dte_tables->T0 = cpu_to_be32(dte_data->t0); 2614 + dte_tables->MaxT = cpu_to_be32(dte_data->max_t); 2615 + dte_tables->WindowSize = dte_data->window_size; 2616 + dte_tables->temp_select = dte_data->temp_select; 2617 + dte_tables->DTE_mode = dte_data->dte_mode; 2618 + dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold); 2619 + 2620 + if (tdep_count > 0) 2621 + table_size--; 2622 + 2623 + for (i = 0; i < table_size; i++) { 2624 + dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]); 2625 + dte_tables->R[i] = cpu_to_be32(dte_data->r[i]); 2626 + } 2627 + 2628 + dte_tables->Tdep_count = tdep_count; 2629 + 2630 + for (i = 0; i < (u32)tdep_count; i++) { 2631 + dte_tables->T_limits[i] = dte_data->t_limits[i]; 2632 + dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]); 2633 + dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]); 2634 + } 2635 + 2636 + ret = si_copy_bytes_to_smc(adev, si_pi->dte_table_start, (u8 *)dte_tables, 2637 + sizeof(Smc_SIslands_DTE_Configuration), si_pi->sram_end); 2638 + kfree(dte_tables); 2639 + 2640 + return ret; 2641 + } 2642 + 2643 + static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev, 2644 + u16 *max, u16 *min) 2645 + { 2646 + struct si_power_info *si_pi = si_get_pi(adev); 2647 + struct amdgpu_cac_leakage_table *table = 2648 + &adev->pm.dpm.dyn_state.cac_leakage_table; 2649 + u32 i; 2650 + u32 v0_loadline; 2651 + 2652 + 2653 + if (table == NULL) 2654 + return -EINVAL; 2655 + 2656 + *max = 0; 2657 + *min = 0xFFFF; 2658 + 2659 + for (i = 0; i < table->count; i++) { 2660 + if (table->entries[i].vddc > *max) 2661 + *max = table->entries[i].vddc; 2662 + if (table->entries[i].vddc < *min) 2663 + *min = table->entries[i].vddc; 2664 + } 2665 + 2666 + if (si_pi->powertune_data->lkge_lut_v0_percent > 100) 2667 + return -EINVAL; 2668 + 2669 + v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100; 2670 + 2671 + if (v0_loadline > 0xFFFFUL) 2672 + return -EINVAL; 2673 + 2674 + *min = (u16)v0_loadline; 2675 + 2676 + if ((*min > *max) || (*max == 0) || (*min == 0)) 2677 + return -EINVAL; 2678 + 2679 + return 0; 2680 + } 2681 + 2682 + static u16 si_get_cac_std_voltage_step(u16 max, u16 min) 2683 + { 2684 + return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) / 2685 + SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; 2686 + } 2687 + 2688 + static int si_init_dte_leakage_table(struct amdgpu_device *adev, 2689 + PP_SIslands_CacConfig *cac_tables, 2690 + u16 vddc_max, u16 vddc_min, u16 vddc_step, 2691 + u16 t0, u16 t_step) 2692 + { 2693 + struct si_power_info *si_pi = si_get_pi(adev); 2694 + u32 leakage; 2695 + unsigned int i, j; 2696 + s32 t; 2697 + u32 smc_leakage; 2698 + u32 scaling_factor; 2699 + u16 voltage; 2700 + 2701 + scaling_factor = si_get_smc_power_scaling_factor(adev); 2702 + 2703 + for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) { 2704 + t = (1000 * (i * t_step + t0)); 2705 + 2706 + for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { 2707 + voltage = vddc_max - (vddc_step * j); 2708 + 2709 + si_calculate_leakage_for_v_and_t(adev, 2710 + &si_pi->powertune_data->leakage_coefficients, 2711 + voltage, 2712 + t, 2713 + si_pi->dyn_powertune_data.cac_leakage, 2714 + &leakage); 2715 + 2716 + smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; 2717 + 2718 + if (smc_leakage > 0xFFFF) 2719 + smc_leakage = 0xFFFF; 2720 + 2721 + cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = 2722 + cpu_to_be16((u16)smc_leakage); 2723 + } 2724 + } 2725 + return 0; 2726 + } 2727 + 2728 + static int si_init_simplified_leakage_table(struct amdgpu_device *adev, 2729 + PP_SIslands_CacConfig *cac_tables, 2730 + u16 vddc_max, u16 vddc_min, u16 vddc_step) 2731 + { 2732 + struct si_power_info *si_pi = si_get_pi(adev); 2733 + u32 leakage; 2734 + unsigned int i, j; 2735 + u32 smc_leakage; 2736 + u32 scaling_factor; 2737 + u16 voltage; 2738 + 2739 + scaling_factor = si_get_smc_power_scaling_factor(adev); 2740 + 2741 + for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { 2742 + voltage = vddc_max - (vddc_step * j); 2743 + 2744 + si_calculate_leakage_for_v(adev, 2745 + &si_pi->powertune_data->leakage_coefficients, 2746 + si_pi->powertune_data->fixed_kt, 2747 + voltage, 2748 + si_pi->dyn_powertune_data.cac_leakage, 2749 + &leakage); 2750 + 2751 + smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; 2752 + 2753 + if (smc_leakage > 0xFFFF) 2754 + smc_leakage = 0xFFFF; 2755 + 2756 + for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) 2757 + cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = 2758 + cpu_to_be16((u16)smc_leakage); 2759 + } 2760 + return 0; 2761 + } 2762 + 2763 + static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) 2764 + { 2765 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2766 + struct si_power_info *si_pi = si_get_pi(adev); 2767 + PP_SIslands_CacConfig *cac_tables = NULL; 2768 + u16 vddc_max, vddc_min, vddc_step; 2769 + u16 t0, t_step; 2770 + u32 load_line_slope, reg; 2771 + int ret = 0; 2772 + u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100; 2773 + 2774 + if (ni_pi->enable_cac == false) 2775 + return 0; 2776 + 2777 + cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL); 2778 + if (!cac_tables) 2779 + return -ENOMEM; 2780 + 2781 + reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; 2782 + reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); 2783 + WREG32(CG_CAC_CTRL, reg); 2784 + 2785 + si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; 2786 + si_pi->dyn_powertune_data.dc_pwr_value = 2787 + si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0]; 2788 + si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev); 2789 + si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default; 2790 + 2791 + si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000; 2792 + 2793 + ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min); 2794 + if (ret) 2795 + goto done_free; 2796 + 2797 + vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min); 2798 + vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)); 2799 + t_step = 4; 2800 + t0 = 60; 2801 + 2802 + if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage) 2803 + ret = si_init_dte_leakage_table(adev, cac_tables, 2804 + vddc_max, vddc_min, vddc_step, 2805 + t0, t_step); 2806 + else 2807 + ret = si_init_simplified_leakage_table(adev, cac_tables, 2808 + vddc_max, vddc_min, vddc_step); 2809 + if (ret) 2810 + goto done_free; 2811 + 2812 + load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100; 2813 + 2814 + cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size); 2815 + cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate; 2816 + cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n; 2817 + cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min); 2818 + cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step); 2819 + cac_tables->R_LL = cpu_to_be32(load_line_slope); 2820 + cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime); 2821 + cac_tables->calculation_repeats = cpu_to_be32(2); 2822 + cac_tables->dc_cac = cpu_to_be32(0); 2823 + cac_tables->log2_PG_LKG_SCALE = 12; 2824 + cac_tables->cac_temp = si_pi->powertune_data->operating_temp; 2825 + cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0); 2826 + cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step); 2827 + 2828 + ret = si_copy_bytes_to_smc(adev, si_pi->cac_table_start, (u8 *)cac_tables, 2829 + sizeof(PP_SIslands_CacConfig), si_pi->sram_end); 2830 + 2831 + if (ret) 2832 + goto done_free; 2833 + 2834 + ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us); 2835 + 2836 + done_free: 2837 + if (ret) { 2838 + ni_pi->enable_cac = false; 2839 + ni_pi->enable_power_containment = false; 2840 + } 2841 + 2842 + kfree(cac_tables); 2843 + 2844 + return 0; 2845 + } 2846 + 2847 + static int si_program_cac_config_registers(struct amdgpu_device *adev, 2848 + const struct si_cac_config_reg *cac_config_regs) 2849 + { 2850 + const struct si_cac_config_reg *config_regs = cac_config_regs; 2851 + u32 data = 0, offset; 2852 + 2853 + if (!config_regs) 2854 + return -EINVAL; 2855 + 2856 + while (config_regs->offset != 0xFFFFFFFF) { 2857 + switch (config_regs->type) { 2858 + case SISLANDS_CACCONFIG_CGIND: 2859 + offset = SMC_CG_IND_START + config_regs->offset; 2860 + if (offset < SMC_CG_IND_END) 2861 + data = RREG32_SMC(offset); 2862 + break; 2863 + default: 2864 + data = RREG32(config_regs->offset); 2865 + break; 2866 + } 2867 + 2868 + data &= ~config_regs->mask; 2869 + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 2870 + 2871 + switch (config_regs->type) { 2872 + case SISLANDS_CACCONFIG_CGIND: 2873 + offset = SMC_CG_IND_START + config_regs->offset; 2874 + if (offset < SMC_CG_IND_END) 2875 + WREG32_SMC(offset, data); 2876 + break; 2877 + default: 2878 + WREG32(config_regs->offset, data); 2879 + break; 2880 + } 2881 + config_regs++; 2882 + } 2883 + return 0; 2884 + } 2885 + 2886 + static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev) 2887 + { 2888 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2889 + struct si_power_info *si_pi = si_get_pi(adev); 2890 + int ret; 2891 + 2892 + if ((ni_pi->enable_cac == false) || 2893 + (ni_pi->cac_configuration_required == false)) 2894 + return 0; 2895 + 2896 + ret = si_program_cac_config_registers(adev, si_pi->lcac_config); 2897 + if (ret) 2898 + return ret; 2899 + ret = si_program_cac_config_registers(adev, si_pi->cac_override); 2900 + if (ret) 2901 + return ret; 2902 + ret = si_program_cac_config_registers(adev, si_pi->cac_weights); 2903 + if (ret) 2904 + return ret; 2905 + 2906 + return 0; 2907 + } 2908 + 2909 + static int si_enable_smc_cac(struct amdgpu_device *adev, 2910 + struct amdgpu_ps *amdgpu_new_state, 2911 + bool enable) 2912 + { 2913 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2914 + struct si_power_info *si_pi = si_get_pi(adev); 2915 + PPSMC_Result smc_result; 2916 + int ret = 0; 2917 + 2918 + if (ni_pi->enable_cac) { 2919 + if (enable) { 2920 + if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { 2921 + if (ni_pi->support_cac_long_term_average) { 2922 + smc_result = si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable); 2923 + if (smc_result != PPSMC_Result_OK) 2924 + ni_pi->support_cac_long_term_average = false; 2925 + } 2926 + 2927 + smc_result = si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); 2928 + if (smc_result != PPSMC_Result_OK) { 2929 + ret = -EINVAL; 2930 + ni_pi->cac_enabled = false; 2931 + } else { 2932 + ni_pi->cac_enabled = true; 2933 + } 2934 + 2935 + if (si_pi->enable_dte) { 2936 + smc_result = si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); 2937 + if (smc_result != PPSMC_Result_OK) 2938 + ret = -EINVAL; 2939 + } 2940 + } 2941 + } else if (ni_pi->cac_enabled) { 2942 + if (si_pi->enable_dte) 2943 + smc_result = si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); 2944 + 2945 + smc_result = si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); 2946 + 2947 + ni_pi->cac_enabled = false; 2948 + 2949 + if (ni_pi->support_cac_long_term_average) 2950 + smc_result = si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable); 2951 + } 2952 + } 2953 + return ret; 2954 + } 2955 + 2956 + static int si_init_smc_spll_table(struct amdgpu_device *adev) 2957 + { 2958 + struct ni_power_info *ni_pi = ni_get_pi(adev); 2959 + struct si_power_info *si_pi = si_get_pi(adev); 2960 + SMC_SISLANDS_SPLL_DIV_TABLE *spll_table; 2961 + SISLANDS_SMC_SCLK_VALUE sclk_params; 2962 + u32 fb_div, p_div; 2963 + u32 clk_s, clk_v; 2964 + u32 sclk = 0; 2965 + int ret = 0; 2966 + u32 tmp; 2967 + int i; 2968 + 2969 + if (si_pi->spll_table_start == 0) 2970 + return -EINVAL; 2971 + 2972 + spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL); 2973 + if (spll_table == NULL) 2974 + return -ENOMEM; 2975 + 2976 + for (i = 0; i < 256; i++) { 2977 + ret = si_calculate_sclk_params(adev, sclk, &sclk_params); 2978 + if (ret) 2979 + break; 2980 + p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; 2981 + fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; 2982 + clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; 2983 + clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; 2984 + 2985 + fb_div &= ~0x00001FFF; 2986 + fb_div >>= 1; 2987 + clk_v >>= 6; 2988 + 2989 + if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT)) 2990 + ret = -EINVAL; 2991 + if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) 2992 + ret = -EINVAL; 2993 + if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) 2994 + ret = -EINVAL; 2995 + if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) 2996 + ret = -EINVAL; 2997 + 2998 + if (ret) 2999 + break; 3000 + 3001 + tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) | 3002 + ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK); 3003 + spll_table->freq[i] = cpu_to_be32(tmp); 3004 + 3005 + tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) | 3006 + ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK); 3007 + spll_table->ss[i] = cpu_to_be32(tmp); 3008 + 3009 + sclk += 512; 3010 + } 3011 + 3012 + 3013 + if (!ret) 3014 + ret = si_copy_bytes_to_smc(adev, si_pi->spll_table_start, 3015 + (u8 *)spll_table, sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), 3016 + si_pi->sram_end); 3017 + 3018 + if (ret) 3019 + ni_pi->enable_power_containment = false; 3020 + 3021 + kfree(spll_table); 3022 + 3023 + return ret; 3024 + } 3025 + 3026 + struct si_dpm_quirk { 3027 + u32 chip_vendor; 3028 + u32 chip_device; 3029 + u32 subsys_vendor; 3030 + u32 subsys_device; 3031 + u32 max_sclk; 3032 + u32 max_mclk; 3033 + }; 3034 + 3035 + /* cards with dpm stability problems */ 3036 + static struct si_dpm_quirk si_dpm_quirk_list[] = { 3037 + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ 3038 + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 3039 + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 3040 + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 3041 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 3042 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 3043 + { 0, 0, 0, 0 }, 3044 + }; 3045 + 3046 + static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev, 3047 + u16 vce_voltage) 3048 + { 3049 + u16 highest_leakage = 0; 3050 + struct si_power_info *si_pi = si_get_pi(adev); 3051 + int i; 3052 + 3053 + for (i = 0; i < si_pi->leakage_voltage.count; i++){ 3054 + if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) 3055 + highest_leakage = si_pi->leakage_voltage.entries[i].voltage; 3056 + } 3057 + 3058 + if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage)) 3059 + return highest_leakage; 3060 + 3061 + return vce_voltage; 3062 + } 3063 + 3064 + static int si_get_vce_clock_voltage(struct amdgpu_device *adev, 3065 + u32 evclk, u32 ecclk, u16 *voltage) 3066 + { 3067 + u32 i; 3068 + int ret = -EINVAL; 3069 + struct amdgpu_vce_clock_voltage_dependency_table *table = 3070 + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 3071 + 3072 + if (((evclk == 0) && (ecclk == 0)) || 3073 + (table && (table->count == 0))) { 3074 + *voltage = 0; 3075 + return 0; 3076 + } 3077 + 3078 + for (i = 0; i < table->count; i++) { 3079 + if ((evclk <= table->entries[i].evclk) && 3080 + (ecclk <= table->entries[i].ecclk)) { 3081 + *voltage = table->entries[i].v; 3082 + ret = 0; 3083 + break; 3084 + } 3085 + } 3086 + 3087 + /* if no match return the highest voltage */ 3088 + if (ret) 3089 + *voltage = table->entries[table->count - 1].v; 3090 + 3091 + *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage); 3092 + 3093 + return ret; 3094 + } 3095 + 3096 + static bool si_dpm_vblank_too_short(struct amdgpu_device *adev) 3097 + { 3098 + 3099 + u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 3100 + /* we never hit the non-gddr5 limit so disable it */ 3101 + u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; 3102 + 3103 + if (vblank_time < switch_limit) 3104 + return true; 3105 + else 3106 + return false; 3107 + 3108 + } 3109 + 3110 + static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, 3111 + u32 arb_freq_src, u32 arb_freq_dest) 3112 + { 3113 + u32 mc_arb_dram_timing; 3114 + u32 mc_arb_dram_timing2; 3115 + u32 burst_time; 3116 + u32 mc_cg_config; 3117 + 3118 + switch (arb_freq_src) { 3119 + case MC_CG_ARB_FREQ_F0: 3120 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); 3121 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 3122 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; 3123 + break; 3124 + case MC_CG_ARB_FREQ_F1: 3125 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); 3126 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); 3127 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; 3128 + break; 3129 + case MC_CG_ARB_FREQ_F2: 3130 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); 3131 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); 3132 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; 3133 + break; 3134 + case MC_CG_ARB_FREQ_F3: 3135 + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); 3136 + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); 3137 + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; 3138 + break; 3139 + default: 3140 + return -EINVAL; 3141 + } 3142 + 3143 + switch (arb_freq_dest) { 3144 + case MC_CG_ARB_FREQ_F0: 3145 + WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); 3146 + WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); 3147 + WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); 3148 + break; 3149 + case MC_CG_ARB_FREQ_F1: 3150 + WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); 3151 + WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); 3152 + WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); 3153 + break; 3154 + case MC_CG_ARB_FREQ_F2: 3155 + WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); 3156 + WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); 3157 + WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); 3158 + break; 3159 + case MC_CG_ARB_FREQ_F3: 3160 + WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); 3161 + WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); 3162 + WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); 3163 + break; 3164 + default: 3165 + return -EINVAL; 3166 + } 3167 + 3168 + mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F; 3169 + WREG32(MC_CG_CONFIG, mc_cg_config); 3170 + WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK); 3171 + 3172 + return 0; 3173 + } 3174 + 3175 + static void ni_update_current_ps(struct amdgpu_device *adev, 3176 + struct amdgpu_ps *rps) 3177 + { 3178 + struct si_ps *new_ps = si_get_ps(rps); 3179 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 3180 + struct ni_power_info *ni_pi = ni_get_pi(adev); 3181 + 3182 + eg_pi->current_rps = *rps; 3183 + ni_pi->current_ps = *new_ps; 3184 + eg_pi->current_rps.ps_priv = &ni_pi->current_ps; 3185 + } 3186 + 3187 + static void ni_update_requested_ps(struct amdgpu_device *adev, 3188 + struct amdgpu_ps *rps) 3189 + { 3190 + struct si_ps *new_ps = si_get_ps(rps); 3191 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 3192 + struct ni_power_info *ni_pi = ni_get_pi(adev); 3193 + 3194 + eg_pi->requested_rps = *rps; 3195 + ni_pi->requested_ps = *new_ps; 3196 + eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; 3197 + } 3198 + 3199 + static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, 3200 + struct amdgpu_ps *new_ps, 3201 + struct amdgpu_ps *old_ps) 3202 + { 3203 + struct si_ps *new_state = si_get_ps(new_ps); 3204 + struct si_ps *current_state = si_get_ps(old_ps); 3205 + 3206 + if ((new_ps->vclk == old_ps->vclk) && 3207 + (new_ps->dclk == old_ps->dclk)) 3208 + return; 3209 + 3210 + if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >= 3211 + current_state->performance_levels[current_state->performance_level_count - 1].sclk) 3212 + return; 3213 + 3214 + amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); 3215 + } 3216 + 3217 + static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, 3218 + struct amdgpu_ps *new_ps, 3219 + struct amdgpu_ps *old_ps) 3220 + { 3221 + struct si_ps *new_state = si_get_ps(new_ps); 3222 + struct si_ps *current_state = si_get_ps(old_ps); 3223 + 3224 + if ((new_ps->vclk == old_ps->vclk) && 3225 + (new_ps->dclk == old_ps->dclk)) 3226 + return; 3227 + 3228 + if (new_state->performance_levels[new_state->performance_level_count - 1].sclk < 3229 + current_state->performance_levels[current_state->performance_level_count - 1].sclk) 3230 + return; 3231 + 3232 + amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); 3233 + } 3234 + 3235 + static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) 3236 + { 3237 + unsigned int i; 3238 + 3239 + for (i = 0; i < table->count; i++) { 3240 + if (voltage <= table->entries[i].value) 3241 + return table->entries[i].value; 3242 + } 3243 + 3244 + return table->entries[table->count - 1].value; 3245 + } 3246 + 3247 + static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, 3248 + u32 max_clock, u32 requested_clock) 3249 + { 3250 + unsigned int i; 3251 + 3252 + if ((clocks == NULL) || (clocks->count == 0)) 3253 + return (requested_clock < max_clock) ? requested_clock : max_clock; 3254 + 3255 + for (i = 0; i < clocks->count; i++) { 3256 + if (clocks->values[i] >= requested_clock) 3257 + return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; 3258 + } 3259 + 3260 + return (clocks->values[clocks->count - 1] < max_clock) ? 3261 + clocks->values[clocks->count - 1] : max_clock; 3262 + } 3263 + 3264 + static u32 btc_get_valid_mclk(struct amdgpu_device *adev, 3265 + u32 max_mclk, u32 requested_mclk) 3266 + { 3267 + return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, 3268 + max_mclk, requested_mclk); 3269 + } 3270 + 3271 + static u32 btc_get_valid_sclk(struct amdgpu_device *adev, 3272 + u32 max_sclk, u32 requested_sclk) 3273 + { 3274 + return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, 3275 + max_sclk, requested_sclk); 3276 + } 3277 + 3278 + void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, 3279 + u32 *max_clock) 3280 + { 3281 + u32 i, clock = 0; 3282 + 3283 + if ((table == NULL) || (table->count == 0)) { 3284 + *max_clock = clock; 3285 + return; 3286 + } 3287 + 3288 + for (i = 0; i < table->count; i++) { 3289 + if (clock < table->entries[i].clk) 3290 + clock = table->entries[i].clk; 3291 + } 3292 + *max_clock = clock; 3293 + } 3294 + 3295 + static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, 3296 + u32 clock, u16 max_voltage, u16 *voltage) 3297 + { 3298 + u32 i; 3299 + 3300 + if ((table == NULL) || (table->count == 0)) 3301 + return; 3302 + 3303 + for (i= 0; i < table->count; i++) { 3304 + if (clock <= table->entries[i].clk) { 3305 + if (*voltage < table->entries[i].v) 3306 + *voltage = (u16)((table->entries[i].v < max_voltage) ? 3307 + table->entries[i].v : max_voltage); 3308 + return; 3309 + } 3310 + } 3311 + 3312 + *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; 3313 + } 3314 + 3315 + static void btc_adjust_clock_combinations(struct amdgpu_device *adev, 3316 + const struct amdgpu_clock_and_voltage_limits *max_limits, 3317 + struct rv7xx_pl *pl) 3318 + { 3319 + 3320 + if ((pl->mclk == 0) || (pl->sclk == 0)) 3321 + return; 3322 + 3323 + if (pl->mclk == pl->sclk) 3324 + return; 3325 + 3326 + if (pl->mclk > pl->sclk) { 3327 + if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) 3328 + pl->sclk = btc_get_valid_sclk(adev, 3329 + max_limits->sclk, 3330 + (pl->mclk + 3331 + (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / 3332 + adev->pm.dpm.dyn_state.mclk_sclk_ratio); 3333 + } else { 3334 + if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) 3335 + pl->mclk = btc_get_valid_mclk(adev, 3336 + max_limits->mclk, 3337 + pl->sclk - 3338 + adev->pm.dpm.dyn_state.sclk_mclk_delta); 3339 + } 3340 + } 3341 + 3342 + static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, 3343 + u16 max_vddc, u16 max_vddci, 3344 + u16 *vddc, u16 *vddci) 3345 + { 3346 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 3347 + u16 new_voltage; 3348 + 3349 + if ((0 == *vddc) || (0 == *vddci)) 3350 + return; 3351 + 3352 + if (*vddc > *vddci) { 3353 + if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { 3354 + new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, 3355 + (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); 3356 + *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; 3357 + } 3358 + } else { 3359 + if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { 3360 + new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, 3361 + (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); 3362 + *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; 3363 + } 3364 + } 3365 + } 3366 + 3367 + static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, 3368 + u32 sys_mask, 3369 + enum amdgpu_pcie_gen asic_gen, 3370 + enum amdgpu_pcie_gen default_gen) 3371 + { 3372 + switch (asic_gen) { 3373 + case AMDGPU_PCIE_GEN1: 3374 + return AMDGPU_PCIE_GEN1; 3375 + case AMDGPU_PCIE_GEN2: 3376 + return AMDGPU_PCIE_GEN2; 3377 + case AMDGPU_PCIE_GEN3: 3378 + return AMDGPU_PCIE_GEN3; 3379 + default: 3380 + if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) 3381 + return AMDGPU_PCIE_GEN3; 3382 + else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) 3383 + return AMDGPU_PCIE_GEN2; 3384 + else 3385 + return AMDGPU_PCIE_GEN1; 3386 + } 3387 + return AMDGPU_PCIE_GEN1; 3388 + } 3389 + 3390 + static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 3391 + u32 *p, u32 *u) 3392 + { 3393 + u32 b_c = 0; 3394 + u32 i_c; 3395 + u32 tmp; 3396 + 3397 + i_c = (i * r_c) / 100; 3398 + tmp = i_c >> p_b; 3399 + 3400 + while (tmp) { 3401 + b_c++; 3402 + tmp >>= 1; 3403 + } 3404 + 3405 + *u = (b_c + 1) / 2; 3406 + *p = i_c / (1 << (2 * (*u))); 3407 + } 3408 + 3409 + static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) 3410 + { 3411 + u32 k, a, ah, al; 3412 + u32 t1; 3413 + 3414 + if ((fl == 0) || (fh == 0) || (fl > fh)) 3415 + return -EINVAL; 3416 + 3417 + k = (100 * fh) / fl; 3418 + t1 = (t * (k - 100)); 3419 + a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); 3420 + a = (a + 5) / 10; 3421 + ah = ((a * t) + 5000) / 10000; 3422 + al = a - ah; 3423 + 3424 + *th = t - ah; 3425 + *tl = t + al; 3426 + 3427 + return 0; 3428 + } 3429 + 3430 + static bool r600_is_uvd_state(u32 class, u32 class2) 3431 + { 3432 + if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 3433 + return true; 3434 + if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 3435 + return true; 3436 + if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 3437 + return true; 3438 + if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 3439 + return true; 3440 + if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 3441 + return true; 3442 + return false; 3443 + } 3444 + 3445 + static u8 rv770_get_memory_module_index(struct amdgpu_device *adev) 3446 + { 3447 + return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); 3448 + } 3449 + 3450 + static void rv770_get_max_vddc(struct amdgpu_device *adev) 3451 + { 3452 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 3453 + u16 vddc; 3454 + 3455 + if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc)) 3456 + pi->max_vddc = 0; 3457 + else 3458 + pi->max_vddc = vddc; 3459 + } 3460 + 3461 + static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) 3462 + { 3463 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 3464 + struct amdgpu_atom_ss ss; 3465 + 3466 + pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, 3467 + ASIC_INTERNAL_ENGINE_SS, 0); 3468 + pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, 3469 + ASIC_INTERNAL_MEMORY_SS, 0); 3470 + 3471 + if (pi->sclk_ss || pi->mclk_ss) 3472 + pi->dynamic_ss = true; 3473 + else 3474 + pi->dynamic_ss = false; 3475 + } 3476 + 3477 + 3478 + static void si_apply_state_adjust_rules(struct amdgpu_device *adev, 3479 + struct amdgpu_ps *rps) 3480 + { 3481 + struct si_ps *ps = si_get_ps(rps); 3482 + struct amdgpu_clock_and_voltage_limits *max_limits; 3483 + bool disable_mclk_switching = false; 3484 + bool disable_sclk_switching = false; 3485 + u32 mclk, sclk; 3486 + u16 vddc, vddci, min_vce_voltage = 0; 3487 + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; 3488 + u32 max_sclk = 0, max_mclk = 0; 3489 + int i; 3490 + struct si_dpm_quirk *p = si_dpm_quirk_list; 3491 + 3492 + /* Apply dpm quirks */ 3493 + while (p && p->chip_device != 0) { 3494 + if (adev->pdev->vendor == p->chip_vendor && 3495 + adev->pdev->device == p->chip_device && 3496 + adev->pdev->subsystem_vendor == p->subsys_vendor && 3497 + adev->pdev->subsystem_device == p->subsys_device) { 3498 + max_sclk = p->max_sclk; 3499 + max_mclk = p->max_mclk; 3500 + break; 3501 + } 3502 + ++p; 3503 + } 3504 + 3505 + if (rps->vce_active) { 3506 + rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 3507 + rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 3508 + si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk, 3509 + &min_vce_voltage); 3510 + } else { 3511 + rps->evclk = 0; 3512 + rps->ecclk = 0; 3513 + } 3514 + 3515 + if ((adev->pm.dpm.new_active_crtc_count > 1) || 3516 + si_dpm_vblank_too_short(adev)) 3517 + disable_mclk_switching = true; 3518 + 3519 + if (rps->vclk || rps->dclk) { 3520 + disable_mclk_switching = true; 3521 + disable_sclk_switching = true; 3522 + } 3523 + 3524 + if (adev->pm.dpm.ac_power) 3525 + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3526 + else 3527 + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3528 + 3529 + for (i = ps->performance_level_count - 2; i >= 0; i--) { 3530 + if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc) 3531 + ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc; 3532 + } 3533 + if (adev->pm.dpm.ac_power == false) { 3534 + for (i = 0; i < ps->performance_level_count; i++) { 3535 + if (ps->performance_levels[i].mclk > max_limits->mclk) 3536 + ps->performance_levels[i].mclk = max_limits->mclk; 3537 + if (ps->performance_levels[i].sclk > max_limits->sclk) 3538 + ps->performance_levels[i].sclk = max_limits->sclk; 3539 + if (ps->performance_levels[i].vddc > max_limits->vddc) 3540 + ps->performance_levels[i].vddc = max_limits->vddc; 3541 + if (ps->performance_levels[i].vddci > max_limits->vddci) 3542 + ps->performance_levels[i].vddci = max_limits->vddci; 3543 + } 3544 + } 3545 + 3546 + /* limit clocks to max supported clocks based on voltage dependency tables */ 3547 + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 3548 + &max_sclk_vddc); 3549 + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 3550 + &max_mclk_vddci); 3551 + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 3552 + &max_mclk_vddc); 3553 + 3554 + for (i = 0; i < ps->performance_level_count; i++) { 3555 + if (max_sclk_vddc) { 3556 + if (ps->performance_levels[i].sclk > max_sclk_vddc) 3557 + ps->performance_levels[i].sclk = max_sclk_vddc; 3558 + } 3559 + if (max_mclk_vddci) { 3560 + if (ps->performance_levels[i].mclk > max_mclk_vddci) 3561 + ps->performance_levels[i].mclk = max_mclk_vddci; 3562 + } 3563 + if (max_mclk_vddc) { 3564 + if (ps->performance_levels[i].mclk > max_mclk_vddc) 3565 + ps->performance_levels[i].mclk = max_mclk_vddc; 3566 + } 3567 + if (max_mclk) { 3568 + if (ps->performance_levels[i].mclk > max_mclk) 3569 + ps->performance_levels[i].mclk = max_mclk; 3570 + } 3571 + if (max_sclk) { 3572 + if (ps->performance_levels[i].sclk > max_sclk) 3573 + ps->performance_levels[i].sclk = max_sclk; 3574 + } 3575 + } 3576 + 3577 + /* XXX validate the min clocks required for display */ 3578 + 3579 + if (disable_mclk_switching) { 3580 + mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 3581 + vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 3582 + } else { 3583 + mclk = ps->performance_levels[0].mclk; 3584 + vddci = ps->performance_levels[0].vddci; 3585 + } 3586 + 3587 + if (disable_sclk_switching) { 3588 + sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; 3589 + vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; 3590 + } else { 3591 + sclk = ps->performance_levels[0].sclk; 3592 + vddc = ps->performance_levels[0].vddc; 3593 + } 3594 + 3595 + if (rps->vce_active) { 3596 + if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 3597 + sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 3598 + if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) 3599 + mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; 3600 + } 3601 + 3602 + /* adjusted low state */ 3603 + ps->performance_levels[0].sclk = sclk; 3604 + ps->performance_levels[0].mclk = mclk; 3605 + ps->performance_levels[0].vddc = vddc; 3606 + ps->performance_levels[0].vddci = vddci; 3607 + 3608 + if (disable_sclk_switching) { 3609 + sclk = ps->performance_levels[0].sclk; 3610 + for (i = 1; i < ps->performance_level_count; i++) { 3611 + if (sclk < ps->performance_levels[i].sclk) 3612 + sclk = ps->performance_levels[i].sclk; 3613 + } 3614 + for (i = 0; i < ps->performance_level_count; i++) { 3615 + ps->performance_levels[i].sclk = sclk; 3616 + ps->performance_levels[i].vddc = vddc; 3617 + } 3618 + } else { 3619 + for (i = 1; i < ps->performance_level_count; i++) { 3620 + if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) 3621 + ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; 3622 + if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) 3623 + ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 3624 + } 3625 + } 3626 + 3627 + if (disable_mclk_switching) { 3628 + mclk = ps->performance_levels[0].mclk; 3629 + for (i = 1; i < ps->performance_level_count; i++) { 3630 + if (mclk < ps->performance_levels[i].mclk) 3631 + mclk = ps->performance_levels[i].mclk; 3632 + } 3633 + for (i = 0; i < ps->performance_level_count; i++) { 3634 + ps->performance_levels[i].mclk = mclk; 3635 + ps->performance_levels[i].vddci = vddci; 3636 + } 3637 + } else { 3638 + for (i = 1; i < ps->performance_level_count; i++) { 3639 + if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk) 3640 + ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk; 3641 + if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci) 3642 + ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci; 3643 + } 3644 + } 3645 + 3646 + for (i = 0; i < ps->performance_level_count; i++) 3647 + btc_adjust_clock_combinations(adev, max_limits, 3648 + &ps->performance_levels[i]); 3649 + 3650 + for (i = 0; i < ps->performance_level_count; i++) { 3651 + if (ps->performance_levels[i].vddc < min_vce_voltage) 3652 + ps->performance_levels[i].vddc = min_vce_voltage; 3653 + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 3654 + ps->performance_levels[i].sclk, 3655 + max_limits->vddc, &ps->performance_levels[i].vddc); 3656 + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 3657 + ps->performance_levels[i].mclk, 3658 + max_limits->vddci, &ps->performance_levels[i].vddci); 3659 + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 3660 + ps->performance_levels[i].mclk, 3661 + max_limits->vddc, &ps->performance_levels[i].vddc); 3662 + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, 3663 + adev->clock.current_dispclk, 3664 + max_limits->vddc, &ps->performance_levels[i].vddc); 3665 + } 3666 + 3667 + for (i = 0; i < ps->performance_level_count; i++) { 3668 + btc_apply_voltage_delta_rules(adev, 3669 + max_limits->vddc, max_limits->vddci, 3670 + &ps->performance_levels[i].vddc, 3671 + &ps->performance_levels[i].vddci); 3672 + } 3673 + 3674 + ps->dc_compatible = true; 3675 + for (i = 0; i < ps->performance_level_count; i++) { 3676 + if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) 3677 + ps->dc_compatible = false; 3678 + } 3679 + } 3680 + 3681 + #if 0 3682 + static int si_read_smc_soft_register(struct amdgpu_device *adev, 3683 + u16 reg_offset, u32 *value) 3684 + { 3685 + struct si_power_info *si_pi = si_get_pi(adev); 3686 + 3687 + return si_read_smc_sram_dword(adev, 3688 + si_pi->soft_regs_start + reg_offset, value, 3689 + si_pi->sram_end); 3690 + } 3691 + #endif 3692 + 3693 + static int si_write_smc_soft_register(struct amdgpu_device *adev, 3694 + u16 reg_offset, u32 value) 3695 + { 3696 + struct si_power_info *si_pi = si_get_pi(adev); 3697 + 3698 + return si_write_smc_sram_dword(adev, 3699 + si_pi->soft_regs_start + reg_offset, 3700 + value, si_pi->sram_end); 3701 + } 3702 + 3703 + static bool si_is_special_1gb_platform(struct amdgpu_device *adev) 3704 + { 3705 + bool ret = false; 3706 + u32 tmp, width, row, column, bank, density; 3707 + bool is_memory_gddr5, is_special; 3708 + 3709 + tmp = RREG32(MC_SEQ_MISC0); 3710 + is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT)); 3711 + is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT)) 3712 + & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT)); 3713 + 3714 + WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); 3715 + width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; 3716 + 3717 + tmp = RREG32(MC_ARB_RAMCFG); 3718 + row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; 3719 + column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; 3720 + bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; 3721 + 3722 + density = (1 << (row + column - 20 + bank)) * width; 3723 + 3724 + if ((adev->pdev->device == 0x6819) && 3725 + is_memory_gddr5 && is_special && (density == 0x400)) 3726 + ret = true; 3727 + 3728 + return ret; 3729 + } 3730 + 3731 + static void si_get_leakage_vddc(struct amdgpu_device *adev) 3732 + { 3733 + struct si_power_info *si_pi = si_get_pi(adev); 3734 + u16 vddc, count = 0; 3735 + int i, ret; 3736 + 3737 + for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) { 3738 + ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i); 3739 + 3740 + if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) { 3741 + si_pi->leakage_voltage.entries[count].voltage = vddc; 3742 + si_pi->leakage_voltage.entries[count].leakage_index = 3743 + SISLANDS_LEAKAGE_INDEX0 + i; 3744 + count++; 3745 + } 3746 + } 3747 + si_pi->leakage_voltage.count = count; 3748 + } 3749 + 3750 + static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev, 3751 + u32 index, u16 *leakage_voltage) 3752 + { 3753 + struct si_power_info *si_pi = si_get_pi(adev); 3754 + int i; 3755 + 3756 + if (leakage_voltage == NULL) 3757 + return -EINVAL; 3758 + 3759 + if ((index & 0xff00) != 0xff00) 3760 + return -EINVAL; 3761 + 3762 + if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1) 3763 + return -EINVAL; 3764 + 3765 + if (index < SISLANDS_LEAKAGE_INDEX0) 3766 + return -EINVAL; 3767 + 3768 + for (i = 0; i < si_pi->leakage_voltage.count; i++) { 3769 + if (si_pi->leakage_voltage.entries[i].leakage_index == index) { 3770 + *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; 3771 + return 0; 3772 + } 3773 + } 3774 + return -EAGAIN; 3775 + } 3776 + 3777 + static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) 3778 + { 3779 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 3780 + bool want_thermal_protection; 3781 + enum amdgpu_dpm_event_src dpm_event_src; 3782 + 3783 + switch (sources) { 3784 + case 0: 3785 + default: 3786 + want_thermal_protection = false; 3787 + break; 3788 + case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): 3789 + want_thermal_protection = true; 3790 + dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; 3791 + break; 3792 + case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 3793 + want_thermal_protection = true; 3794 + dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; 3795 + break; 3796 + case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 3797 + (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): 3798 + want_thermal_protection = true; 3799 + dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 3800 + break; 3801 + } 3802 + 3803 + if (want_thermal_protection) { 3804 + WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); 3805 + if (pi->thermal_protection) 3806 + WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); 3807 + } else { 3808 + WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); 3809 + } 3810 + } 3811 + 3812 + static void si_enable_auto_throttle_source(struct amdgpu_device *adev, 3813 + enum amdgpu_dpm_auto_throttle_src source, 3814 + bool enable) 3815 + { 3816 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 3817 + 3818 + if (enable) { 3819 + if (!(pi->active_auto_throttle_sources & (1 << source))) { 3820 + pi->active_auto_throttle_sources |= 1 << source; 3821 + si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); 3822 + } 3823 + } else { 3824 + if (pi->active_auto_throttle_sources & (1 << source)) { 3825 + pi->active_auto_throttle_sources &= ~(1 << source); 3826 + si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); 3827 + } 3828 + } 3829 + } 3830 + 3831 + static void si_start_dpm(struct amdgpu_device *adev) 3832 + { 3833 + WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); 3834 + } 3835 + 3836 + static void si_stop_dpm(struct amdgpu_device *adev) 3837 + { 3838 + WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 3839 + } 3840 + 3841 + static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) 3842 + { 3843 + if (enable) 3844 + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); 3845 + else 3846 + WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 3847 + 3848 + } 3849 + 3850 + #if 0 3851 + static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev, 3852 + u32 thermal_level) 3853 + { 3854 + PPSMC_Result ret; 3855 + 3856 + if (thermal_level == 0) { 3857 + ret = si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); 3858 + if (ret == PPSMC_Result_OK) 3859 + return 0; 3860 + else 3861 + return -EINVAL; 3862 + } 3863 + return 0; 3864 + } 3865 + 3866 + static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev) 3867 + { 3868 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true); 3869 + } 3870 + #endif 3871 + 3872 + #if 0 3873 + static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power) 3874 + { 3875 + if (ac_power) 3876 + return (si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ? 3877 + 0 : -EINVAL; 3878 + 3879 + return 0; 3880 + } 3881 + #endif 3882 + 3883 + static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, 3884 + PPSMC_Msg msg, u32 parameter) 3885 + { 3886 + WREG32(SMC_SCRATCH0, parameter); 3887 + return si_send_msg_to_smc(adev, msg); 3888 + } 3889 + 3890 + static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev) 3891 + { 3892 + if (si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) 3893 + return -EINVAL; 3894 + 3895 + return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ? 3896 + 0 : -EINVAL; 3897 + } 3898 + 3899 + static int si_dpm_force_performance_level(struct amdgpu_device *adev, 3900 + enum amdgpu_dpm_forced_level level) 3901 + { 3902 + struct amdgpu_ps *rps = adev->pm.dpm.current_ps; 3903 + struct si_ps *ps = si_get_ps(rps); 3904 + u32 levels = ps->performance_level_count; 3905 + 3906 + if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { 3907 + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) 3908 + return -EINVAL; 3909 + 3910 + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) 3911 + return -EINVAL; 3912 + } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { 3913 + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 3914 + return -EINVAL; 3915 + 3916 + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) 3917 + return -EINVAL; 3918 + } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { 3919 + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 3920 + return -EINVAL; 3921 + 3922 + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) 3923 + return -EINVAL; 3924 + } 3925 + 3926 + adev->pm.dpm.forced_level = level; 3927 + 3928 + return 0; 3929 + } 3930 + 3931 + #if 0 3932 + static int si_set_boot_state(struct amdgpu_device *adev) 3933 + { 3934 + return (si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ? 3935 + 0 : -EINVAL; 3936 + } 3937 + #endif 3938 + 3939 + static int si_set_sw_state(struct amdgpu_device *adev) 3940 + { 3941 + return (si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? 3942 + 0 : -EINVAL; 3943 + } 3944 + 3945 + static int si_halt_smc(struct amdgpu_device *adev) 3946 + { 3947 + if (si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK) 3948 + return -EINVAL; 3949 + 3950 + return (si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ? 3951 + 0 : -EINVAL; 3952 + } 3953 + 3954 + static int si_resume_smc(struct amdgpu_device *adev) 3955 + { 3956 + if (si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK) 3957 + return -EINVAL; 3958 + 3959 + return (si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ? 3960 + 0 : -EINVAL; 3961 + } 3962 + 3963 + static void si_dpm_start_smc(struct amdgpu_device *adev) 3964 + { 3965 + si_program_jump_on_start(adev); 3966 + si_start_smc(adev); 3967 + si_start_smc_clock(adev); 3968 + } 3969 + 3970 + static void si_dpm_stop_smc(struct amdgpu_device *adev) 3971 + { 3972 + si_reset_smc(adev); 3973 + si_stop_smc_clock(adev); 3974 + } 3975 + 3976 + static int si_process_firmware_header(struct amdgpu_device *adev) 3977 + { 3978 + struct si_power_info *si_pi = si_get_pi(adev); 3979 + u32 tmp; 3980 + int ret; 3981 + 3982 + ret = si_read_smc_sram_dword(adev, 3983 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 3984 + SISLANDS_SMC_FIRMWARE_HEADER_stateTable, 3985 + &tmp, si_pi->sram_end); 3986 + if (ret) 3987 + return ret; 3988 + 3989 + si_pi->state_table_start = tmp; 3990 + 3991 + ret = si_read_smc_sram_dword(adev, 3992 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 3993 + SISLANDS_SMC_FIRMWARE_HEADER_softRegisters, 3994 + &tmp, si_pi->sram_end); 3995 + if (ret) 3996 + return ret; 3997 + 3998 + si_pi->soft_regs_start = tmp; 3999 + 4000 + ret = si_read_smc_sram_dword(adev, 4001 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 4002 + SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable, 4003 + &tmp, si_pi->sram_end); 4004 + if (ret) 4005 + return ret; 4006 + 4007 + si_pi->mc_reg_table_start = tmp; 4008 + 4009 + ret = si_read_smc_sram_dword(adev, 4010 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 4011 + SISLANDS_SMC_FIRMWARE_HEADER_fanTable, 4012 + &tmp, si_pi->sram_end); 4013 + if (ret) 4014 + return ret; 4015 + 4016 + si_pi->fan_table_start = tmp; 4017 + 4018 + ret = si_read_smc_sram_dword(adev, 4019 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 4020 + SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, 4021 + &tmp, si_pi->sram_end); 4022 + if (ret) 4023 + return ret; 4024 + 4025 + si_pi->arb_table_start = tmp; 4026 + 4027 + ret = si_read_smc_sram_dword(adev, 4028 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 4029 + SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable, 4030 + &tmp, si_pi->sram_end); 4031 + if (ret) 4032 + return ret; 4033 + 4034 + si_pi->cac_table_start = tmp; 4035 + 4036 + ret = si_read_smc_sram_dword(adev, 4037 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 4038 + SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration, 4039 + &tmp, si_pi->sram_end); 4040 + if (ret) 4041 + return ret; 4042 + 4043 + si_pi->dte_table_start = tmp; 4044 + 4045 + ret = si_read_smc_sram_dword(adev, 4046 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 4047 + SISLANDS_SMC_FIRMWARE_HEADER_spllTable, 4048 + &tmp, si_pi->sram_end); 4049 + if (ret) 4050 + return ret; 4051 + 4052 + si_pi->spll_table_start = tmp; 4053 + 4054 + ret = si_read_smc_sram_dword(adev, 4055 + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 4056 + SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters, 4057 + &tmp, si_pi->sram_end); 4058 + if (ret) 4059 + return ret; 4060 + 4061 + si_pi->papm_cfg_table_start = tmp; 4062 + 4063 + return ret; 4064 + } 4065 + 4066 + static void si_read_clock_registers(struct amdgpu_device *adev) 4067 + { 4068 + struct si_power_info *si_pi = si_get_pi(adev); 4069 + 4070 + si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); 4071 + si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); 4072 + si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); 4073 + si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); 4074 + si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); 4075 + si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); 4076 + si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); 4077 + si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); 4078 + si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); 4079 + si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); 4080 + si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); 4081 + si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); 4082 + si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); 4083 + si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); 4084 + si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); 4085 + } 4086 + 4087 + static void si_enable_thermal_protection(struct amdgpu_device *adev, 4088 + bool enable) 4089 + { 4090 + if (enable) 4091 + WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); 4092 + else 4093 + WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); 4094 + } 4095 + 4096 + static void si_enable_acpi_power_management(struct amdgpu_device *adev) 4097 + { 4098 + WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); 4099 + } 4100 + 4101 + #if 0 4102 + static int si_enter_ulp_state(struct amdgpu_device *adev) 4103 + { 4104 + WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); 4105 + 4106 + udelay(25000); 4107 + 4108 + return 0; 4109 + } 4110 + 4111 + static int si_exit_ulp_state(struct amdgpu_device *adev) 4112 + { 4113 + int i; 4114 + 4115 + WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); 4116 + 4117 + udelay(7000); 4118 + 4119 + for (i = 0; i < adev->usec_timeout; i++) { 4120 + if (RREG32(SMC_RESP_0) == 1) 4121 + break; 4122 + udelay(1000); 4123 + } 4124 + 4125 + return 0; 4126 + } 4127 + #endif 4128 + 4129 + static int si_notify_smc_display_change(struct amdgpu_device *adev, 4130 + bool has_display) 4131 + { 4132 + PPSMC_Msg msg = has_display ? 4133 + PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; 4134 + 4135 + return (si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 4136 + 0 : -EINVAL; 4137 + } 4138 + 4139 + static void si_program_response_times(struct amdgpu_device *adev) 4140 + { 4141 + u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out; 4142 + u32 vddc_dly, acpi_dly, vbi_dly; 4143 + u32 reference_clock; 4144 + 4145 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); 4146 + 4147 + voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; 4148 + backbias_response_time = (u32)adev->pm.dpm.backbias_response_time; 4149 + 4150 + if (voltage_response_time == 0) 4151 + voltage_response_time = 1000; 4152 + 4153 + acpi_delay_time = 15000; 4154 + vbi_time_out = 100000; 4155 + 4156 + reference_clock = amdgpu_asic_get_xclk(adev); 4157 + 4158 + vddc_dly = (voltage_response_time * reference_clock) / 100; 4159 + acpi_dly = (acpi_delay_time * reference_clock) / 100; 4160 + vbi_dly = (vbi_time_out * reference_clock) / 100; 4161 + 4162 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); 4163 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); 4164 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); 4165 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA); 4166 + } 4167 + 4168 + static void si_program_ds_registers(struct amdgpu_device *adev) 4169 + { 4170 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 4171 + u32 tmp; 4172 + 4173 + /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */ 4174 + if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0) 4175 + tmp = 0x10; 4176 + else 4177 + tmp = 0x1; 4178 + 4179 + if (eg_pi->sclk_deep_sleep) { 4180 + WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); 4181 + WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, 4182 + ~AUTOSCALE_ON_SS_CLEAR); 4183 + } 4184 + } 4185 + 4186 + static void si_program_display_gap(struct amdgpu_device *adev) 4187 + { 4188 + u32 tmp, pipe; 4189 + int i; 4190 + 4191 + tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); 4192 + if (adev->pm.dpm.new_active_crtc_count > 0) 4193 + tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); 4194 + else 4195 + tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); 4196 + 4197 + if (adev->pm.dpm.new_active_crtc_count > 1) 4198 + tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); 4199 + else 4200 + tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); 4201 + 4202 + WREG32(CG_DISPLAY_GAP_CNTL, tmp); 4203 + 4204 + tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); 4205 + pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; 4206 + 4207 + if ((adev->pm.dpm.new_active_crtc_count > 0) && 4208 + (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { 4209 + /* find the first active crtc */ 4210 + for (i = 0; i < adev->mode_info.num_crtc; i++) { 4211 + if (adev->pm.dpm.new_active_crtcs & (1 << i)) 4212 + break; 4213 + } 4214 + if (i == adev->mode_info.num_crtc) 4215 + pipe = 0; 4216 + else 4217 + pipe = i; 4218 + 4219 + tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; 4220 + tmp |= DCCG_DISP1_SLOW_SELECT(pipe); 4221 + WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); 4222 + } 4223 + 4224 + /* Setting this to false forces the performance state to low if the crtcs are disabled. 4225 + * This can be a problem on PowerXpress systems or if you want to use the card 4226 + * for offscreen rendering or compute if there are no crtcs enabled. 4227 + */ 4228 + si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); 4229 + } 4230 + 4231 + static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) 4232 + { 4233 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4234 + 4235 + if (enable) { 4236 + if (pi->sclk_ss) 4237 + WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); 4238 + } else { 4239 + WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); 4240 + WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); 4241 + } 4242 + } 4243 + 4244 + static void si_setup_bsp(struct amdgpu_device *adev) 4245 + { 4246 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4247 + u32 xclk = amdgpu_asic_get_xclk(adev); 4248 + 4249 + r600_calculate_u_and_p(pi->asi, 4250 + xclk, 4251 + 16, 4252 + &pi->bsp, 4253 + &pi->bsu); 4254 + 4255 + r600_calculate_u_and_p(pi->pasi, 4256 + xclk, 4257 + 16, 4258 + &pi->pbsp, 4259 + &pi->pbsu); 4260 + 4261 + 4262 + pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); 4263 + pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); 4264 + 4265 + WREG32(CG_BSP, pi->dsp); 4266 + } 4267 + 4268 + static void si_program_git(struct amdgpu_device *adev) 4269 + { 4270 + WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); 4271 + } 4272 + 4273 + static void si_program_tp(struct amdgpu_device *adev) 4274 + { 4275 + int i; 4276 + enum r600_td td = R600_TD_DFLT; 4277 + 4278 + for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) 4279 + WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); 4280 + 4281 + if (td == R600_TD_AUTO) 4282 + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); 4283 + else 4284 + WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); 4285 + 4286 + if (td == R600_TD_UP) 4287 + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); 4288 + 4289 + if (td == R600_TD_DOWN) 4290 + WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); 4291 + } 4292 + 4293 + static void si_program_tpp(struct amdgpu_device *adev) 4294 + { 4295 + WREG32(CG_TPC, R600_TPC_DFLT); 4296 + } 4297 + 4298 + static void si_program_sstp(struct amdgpu_device *adev) 4299 + { 4300 + WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); 4301 + } 4302 + 4303 + static void si_enable_display_gap(struct amdgpu_device *adev) 4304 + { 4305 + u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 4306 + 4307 + tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); 4308 + tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | 4309 + DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); 4310 + 4311 + tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 4312 + tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | 4313 + DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); 4314 + WREG32(CG_DISPLAY_GAP_CNTL, tmp); 4315 + } 4316 + 4317 + static void si_program_vc(struct amdgpu_device *adev) 4318 + { 4319 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4320 + 4321 + WREG32(CG_FTV, pi->vrc); 4322 + } 4323 + 4324 + static void si_clear_vc(struct amdgpu_device *adev) 4325 + { 4326 + WREG32(CG_FTV, 0); 4327 + } 4328 + 4329 + u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) 4330 + { 4331 + u8 mc_para_index; 4332 + 4333 + if (memory_clock < 10000) 4334 + mc_para_index = 0; 4335 + else if (memory_clock >= 80000) 4336 + mc_para_index = 0x0f; 4337 + else 4338 + mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); 4339 + return mc_para_index; 4340 + } 4341 + 4342 + u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) 4343 + { 4344 + u8 mc_para_index; 4345 + 4346 + if (strobe_mode) { 4347 + if (memory_clock < 12500) 4348 + mc_para_index = 0x00; 4349 + else if (memory_clock > 47500) 4350 + mc_para_index = 0x0f; 4351 + else 4352 + mc_para_index = (u8)((memory_clock - 10000) / 2500); 4353 + } else { 4354 + if (memory_clock < 65000) 4355 + mc_para_index = 0x00; 4356 + else if (memory_clock > 135000) 4357 + mc_para_index = 0x0f; 4358 + else 4359 + mc_para_index = (u8)((memory_clock - 60000) / 5000); 4360 + } 4361 + return mc_para_index; 4362 + } 4363 + 4364 + static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk) 4365 + { 4366 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4367 + bool strobe_mode = false; 4368 + u8 result = 0; 4369 + 4370 + if (mclk <= pi->mclk_strobe_mode_threshold) 4371 + strobe_mode = true; 4372 + 4373 + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 4374 + result = si_get_mclk_frequency_ratio(mclk, strobe_mode); 4375 + else 4376 + result = si_get_ddr3_mclk_frequency_ratio(mclk); 4377 + 4378 + if (strobe_mode) 4379 + result |= SISLANDS_SMC_STROBE_ENABLE; 4380 + 4381 + return result; 4382 + } 4383 + 4384 + static int si_upload_firmware(struct amdgpu_device *adev) 4385 + { 4386 + struct si_power_info *si_pi = si_get_pi(adev); 4387 + int ret; 4388 + 4389 + si_reset_smc(adev); 4390 + si_stop_smc_clock(adev); 4391 + 4392 + ret = si_load_smc_ucode(adev, si_pi->sram_end); 4393 + 4394 + return ret; 4395 + } 4396 + 4397 + static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, 4398 + const struct atom_voltage_table *table, 4399 + const struct amdgpu_phase_shedding_limits_table *limits) 4400 + { 4401 + u32 data, num_bits, num_levels; 4402 + 4403 + if ((table == NULL) || (limits == NULL)) 4404 + return false; 4405 + 4406 + data = table->mask_low; 4407 + 4408 + num_bits = hweight32(data); 4409 + 4410 + if (num_bits == 0) 4411 + return false; 4412 + 4413 + num_levels = (1 << num_bits); 4414 + 4415 + if (table->count != num_levels) 4416 + return false; 4417 + 4418 + if (limits->count != (num_levels - 1)) 4419 + return false; 4420 + 4421 + return true; 4422 + } 4423 + 4424 + static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, 4425 + u32 max_voltage_steps, 4426 + struct atom_voltage_table *voltage_table) 4427 + { 4428 + unsigned int i, diff; 4429 + 4430 + if (voltage_table->count <= max_voltage_steps) 4431 + return; 4432 + 4433 + diff = voltage_table->count - max_voltage_steps; 4434 + 4435 + for (i= 0; i < max_voltage_steps; i++) 4436 + voltage_table->entries[i] = voltage_table->entries[i + diff]; 4437 + 4438 + voltage_table->count = max_voltage_steps; 4439 + } 4440 + 4441 + static int si_get_svi2_voltage_table(struct amdgpu_device *adev, 4442 + struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, 4443 + struct atom_voltage_table *voltage_table) 4444 + { 4445 + u32 i; 4446 + 4447 + if (voltage_dependency_table == NULL) 4448 + return -EINVAL; 4449 + 4450 + voltage_table->mask_low = 0; 4451 + voltage_table->phase_delay = 0; 4452 + 4453 + voltage_table->count = voltage_dependency_table->count; 4454 + for (i = 0; i < voltage_table->count; i++) { 4455 + voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; 4456 + voltage_table->entries[i].smio_low = 0; 4457 + } 4458 + 4459 + return 0; 4460 + } 4461 + 4462 + static int si_construct_voltage_tables(struct amdgpu_device *adev) 4463 + { 4464 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4465 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 4466 + struct si_power_info *si_pi = si_get_pi(adev); 4467 + int ret; 4468 + 4469 + if (pi->voltage_control) { 4470 + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, 4471 + VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); 4472 + if (ret) 4473 + return ret; 4474 + 4475 + if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 4476 + si_trim_voltage_table_to_fit_state_table(adev, 4477 + SISLANDS_MAX_NO_VREG_STEPS, 4478 + &eg_pi->vddc_voltage_table); 4479 + } else if (si_pi->voltage_control_svi2) { 4480 + ret = si_get_svi2_voltage_table(adev, 4481 + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 4482 + &eg_pi->vddc_voltage_table); 4483 + if (ret) 4484 + return ret; 4485 + } else { 4486 + return -EINVAL; 4487 + } 4488 + 4489 + if (eg_pi->vddci_control) { 4490 + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, 4491 + VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table); 4492 + if (ret) 4493 + return ret; 4494 + 4495 + if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 4496 + si_trim_voltage_table_to_fit_state_table(adev, 4497 + SISLANDS_MAX_NO_VREG_STEPS, 4498 + &eg_pi->vddci_voltage_table); 4499 + } 4500 + if (si_pi->vddci_control_svi2) { 4501 + ret = si_get_svi2_voltage_table(adev, 4502 + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 4503 + &eg_pi->vddci_voltage_table); 4504 + if (ret) 4505 + return ret; 4506 + } 4507 + 4508 + if (pi->mvdd_control) { 4509 + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, 4510 + VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table); 4511 + 4512 + if (ret) { 4513 + pi->mvdd_control = false; 4514 + return ret; 4515 + } 4516 + 4517 + if (si_pi->mvdd_voltage_table.count == 0) { 4518 + pi->mvdd_control = false; 4519 + return -EINVAL; 4520 + } 4521 + 4522 + if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 4523 + si_trim_voltage_table_to_fit_state_table(adev, 4524 + SISLANDS_MAX_NO_VREG_STEPS, 4525 + &si_pi->mvdd_voltage_table); 4526 + } 4527 + 4528 + if (si_pi->vddc_phase_shed_control) { 4529 + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, 4530 + VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table); 4531 + if (ret) 4532 + si_pi->vddc_phase_shed_control = false; 4533 + 4534 + if ((si_pi->vddc_phase_shed_table.count == 0) || 4535 + (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS)) 4536 + si_pi->vddc_phase_shed_control = false; 4537 + } 4538 + 4539 + return 0; 4540 + } 4541 + 4542 + static void si_populate_smc_voltage_table(struct amdgpu_device *adev, 4543 + const struct atom_voltage_table *voltage_table, 4544 + SISLANDS_SMC_STATETABLE *table) 4545 + { 4546 + unsigned int i; 4547 + 4548 + for (i = 0; i < voltage_table->count; i++) 4549 + table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); 4550 + } 4551 + 4552 + static int si_populate_smc_voltage_tables(struct amdgpu_device *adev, 4553 + SISLANDS_SMC_STATETABLE *table) 4554 + { 4555 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4556 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 4557 + struct si_power_info *si_pi = si_get_pi(adev); 4558 + u8 i; 4559 + 4560 + if (si_pi->voltage_control_svi2) { 4561 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc, 4562 + si_pi->svc_gpio_id); 4563 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd, 4564 + si_pi->svd_gpio_id); 4565 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type, 4566 + 2); 4567 + } else { 4568 + if (eg_pi->vddc_voltage_table.count) { 4569 + si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table); 4570 + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = 4571 + cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); 4572 + 4573 + for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { 4574 + if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { 4575 + table->maxVDDCIndexInPPTable = i; 4576 + break; 4577 + } 4578 + } 4579 + } 4580 + 4581 + if (eg_pi->vddci_voltage_table.count) { 4582 + si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table); 4583 + 4584 + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = 4585 + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); 4586 + } 4587 + 4588 + 4589 + if (si_pi->mvdd_voltage_table.count) { 4590 + si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table); 4591 + 4592 + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = 4593 + cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); 4594 + } 4595 + 4596 + if (si_pi->vddc_phase_shed_control) { 4597 + if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table, 4598 + &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { 4599 + si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); 4600 + 4601 + table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = 4602 + cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); 4603 + 4604 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, 4605 + (u32)si_pi->vddc_phase_shed_table.phase_delay); 4606 + } else { 4607 + si_pi->vddc_phase_shed_control = false; 4608 + } 4609 + } 4610 + } 4611 + 4612 + return 0; 4613 + } 4614 + 4615 + static int si_populate_voltage_value(struct amdgpu_device *adev, 4616 + const struct atom_voltage_table *table, 4617 + u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage) 4618 + { 4619 + unsigned int i; 4620 + 4621 + for (i = 0; i < table->count; i++) { 4622 + if (value <= table->entries[i].value) { 4623 + voltage->index = (u8)i; 4624 + voltage->value = cpu_to_be16(table->entries[i].value); 4625 + break; 4626 + } 4627 + } 4628 + 4629 + if (i >= table->count) 4630 + return -EINVAL; 4631 + 4632 + return 0; 4633 + } 4634 + 4635 + static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, 4636 + SISLANDS_SMC_VOLTAGE_VALUE *voltage) 4637 + { 4638 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4639 + struct si_power_info *si_pi = si_get_pi(adev); 4640 + 4641 + if (pi->mvdd_control) { 4642 + if (mclk <= pi->mvdd_split_frequency) 4643 + voltage->index = 0; 4644 + else 4645 + voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1; 4646 + 4647 + voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); 4648 + } 4649 + return 0; 4650 + } 4651 + 4652 + static int si_get_std_voltage_value(struct amdgpu_device *adev, 4653 + SISLANDS_SMC_VOLTAGE_VALUE *voltage, 4654 + u16 *std_voltage) 4655 + { 4656 + u16 v_index; 4657 + bool voltage_found = false; 4658 + *std_voltage = be16_to_cpu(voltage->value); 4659 + 4660 + if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { 4661 + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) { 4662 + if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) 4663 + return -EINVAL; 4664 + 4665 + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 4666 + if (be16_to_cpu(voltage->value) == 4667 + (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 4668 + voltage_found = true; 4669 + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) 4670 + *std_voltage = 4671 + adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; 4672 + else 4673 + *std_voltage = 4674 + adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; 4675 + break; 4676 + } 4677 + } 4678 + 4679 + if (!voltage_found) { 4680 + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 4681 + if (be16_to_cpu(voltage->value) <= 4682 + (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 4683 + voltage_found = true; 4684 + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) 4685 + *std_voltage = 4686 + adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; 4687 + else 4688 + *std_voltage = 4689 + adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; 4690 + break; 4691 + } 4692 + } 4693 + } 4694 + } else { 4695 + if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count) 4696 + *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; 4697 + } 4698 + } 4699 + 4700 + return 0; 4701 + } 4702 + 4703 + static int si_populate_std_voltage_value(struct amdgpu_device *adev, 4704 + u16 value, u8 index, 4705 + SISLANDS_SMC_VOLTAGE_VALUE *voltage) 4706 + { 4707 + voltage->index = index; 4708 + voltage->value = cpu_to_be16(value); 4709 + 4710 + return 0; 4711 + } 4712 + 4713 + static int si_populate_phase_shedding_value(struct amdgpu_device *adev, 4714 + const struct amdgpu_phase_shedding_limits_table *limits, 4715 + u16 voltage, u32 sclk, u32 mclk, 4716 + SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage) 4717 + { 4718 + unsigned int i; 4719 + 4720 + for (i = 0; i < limits->count; i++) { 4721 + if ((voltage <= limits->entries[i].voltage) && 4722 + (sclk <= limits->entries[i].sclk) && 4723 + (mclk <= limits->entries[i].mclk)) 4724 + break; 4725 + } 4726 + 4727 + smc_voltage->phase_settings = (u8)i; 4728 + 4729 + return 0; 4730 + } 4731 + 4732 + static int si_init_arb_table_index(struct amdgpu_device *adev) 4733 + { 4734 + struct si_power_info *si_pi = si_get_pi(adev); 4735 + u32 tmp; 4736 + int ret; 4737 + 4738 + ret = si_read_smc_sram_dword(adev, si_pi->arb_table_start, &tmp, si_pi->sram_end); 4739 + if (ret) 4740 + return ret; 4741 + 4742 + tmp &= 0x00FFFFFF; 4743 + tmp |= MC_CG_ARB_FREQ_F1 << 24; 4744 + 4745 + return si_write_smc_sram_dword(adev, si_pi->arb_table_start, tmp, si_pi->sram_end); 4746 + } 4747 + 4748 + static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) 4749 + { 4750 + return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 4751 + } 4752 + 4753 + static int si_reset_to_default(struct amdgpu_device *adev) 4754 + { 4755 + return (si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 4756 + 0 : -EINVAL; 4757 + } 4758 + 4759 + static int si_force_switch_to_arb_f0(struct amdgpu_device *adev) 4760 + { 4761 + struct si_power_info *si_pi = si_get_pi(adev); 4762 + u32 tmp; 4763 + int ret; 4764 + 4765 + ret = si_read_smc_sram_dword(adev, si_pi->arb_table_start, 4766 + &tmp, si_pi->sram_end); 4767 + if (ret) 4768 + return ret; 4769 + 4770 + tmp = (tmp >> 24) & 0xff; 4771 + 4772 + if (tmp == MC_CG_ARB_FREQ_F0) 4773 + return 0; 4774 + 4775 + return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); 4776 + } 4777 + 4778 + static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, 4779 + u32 engine_clock) 4780 + { 4781 + u32 dram_rows; 4782 + u32 dram_refresh_rate; 4783 + u32 mc_arb_rfsh_rate; 4784 + u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 4785 + 4786 + if (tmp >= 4) 4787 + dram_rows = 16384; 4788 + else 4789 + dram_rows = 1 << (tmp + 10); 4790 + 4791 + dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); 4792 + mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; 4793 + 4794 + return mc_arb_rfsh_rate; 4795 + } 4796 + 4797 + static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, 4798 + struct rv7xx_pl *pl, 4799 + SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs) 4800 + { 4801 + u32 dram_timing; 4802 + u32 dram_timing2; 4803 + u32 burst_time; 4804 + 4805 + arb_regs->mc_arb_rfsh_rate = 4806 + (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); 4807 + 4808 + amdgpu_atombios_set_engine_dram_timings(adev, 4809 + pl->sclk, 4810 + pl->mclk); 4811 + 4812 + dram_timing = RREG32(MC_ARB_DRAM_TIMING); 4813 + dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 4814 + burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; 4815 + 4816 + arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing); 4817 + arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2); 4818 + arb_regs->mc_arb_burst_time = (u8)burst_time; 4819 + 4820 + return 0; 4821 + } 4822 + 4823 + static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev, 4824 + struct amdgpu_ps *amdgpu_state, 4825 + unsigned int first_arb_set) 4826 + { 4827 + struct si_power_info *si_pi = si_get_pi(adev); 4828 + struct si_ps *state = si_get_ps(amdgpu_state); 4829 + SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; 4830 + int i, ret = 0; 4831 + 4832 + for (i = 0; i < state->performance_level_count; i++) { 4833 + ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs); 4834 + if (ret) 4835 + break; 4836 + ret = si_copy_bytes_to_smc(adev, 4837 + si_pi->arb_table_start + 4838 + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + 4839 + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i), 4840 + (u8 *)&arb_regs, 4841 + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), 4842 + si_pi->sram_end); 4843 + if (ret) 4844 + break; 4845 + } 4846 + 4847 + return ret; 4848 + } 4849 + 4850 + static int si_program_memory_timing_parameters(struct amdgpu_device *adev, 4851 + struct amdgpu_ps *amdgpu_new_state) 4852 + { 4853 + return si_do_program_memory_timing_parameters(adev, amdgpu_new_state, 4854 + SISLANDS_DRIVER_STATE_ARB_INDEX); 4855 + } 4856 + 4857 + static int si_populate_initial_mvdd_value(struct amdgpu_device *adev, 4858 + struct SISLANDS_SMC_VOLTAGE_VALUE *voltage) 4859 + { 4860 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4861 + struct si_power_info *si_pi = si_get_pi(adev); 4862 + 4863 + if (pi->mvdd_control) 4864 + return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table, 4865 + si_pi->mvdd_bootup_value, voltage); 4866 + 4867 + return 0; 4868 + } 4869 + 4870 + static int si_populate_smc_initial_state(struct amdgpu_device *adev, 4871 + struct amdgpu_ps *amdgpu_initial_state, 4872 + SISLANDS_SMC_STATETABLE *table) 4873 + { 4874 + struct si_ps *initial_state = si_get_ps(amdgpu_initial_state); 4875 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4876 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 4877 + struct si_power_info *si_pi = si_get_pi(adev); 4878 + u32 reg; 4879 + int ret; 4880 + 4881 + table->initialState.levels[0].mclk.vDLL_CNTL = 4882 + cpu_to_be32(si_pi->clock_registers.dll_cntl); 4883 + table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = 4884 + cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl); 4885 + table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = 4886 + cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl); 4887 + table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = 4888 + cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl); 4889 + table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL = 4890 + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl); 4891 + table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = 4892 + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1); 4893 + table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = 4894 + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2); 4895 + table->initialState.levels[0].mclk.vMPLL_SS = 4896 + cpu_to_be32(si_pi->clock_registers.mpll_ss1); 4897 + table->initialState.levels[0].mclk.vMPLL_SS2 = 4898 + cpu_to_be32(si_pi->clock_registers.mpll_ss2); 4899 + 4900 + table->initialState.levels[0].mclk.mclk_value = 4901 + cpu_to_be32(initial_state->performance_levels[0].mclk); 4902 + 4903 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = 4904 + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl); 4905 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = 4906 + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2); 4907 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = 4908 + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3); 4909 + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = 4910 + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4); 4911 + table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = 4912 + cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum); 4913 + table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = 4914 + cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2); 4915 + 4916 + table->initialState.levels[0].sclk.sclk_value = 4917 + cpu_to_be32(initial_state->performance_levels[0].sclk); 4918 + 4919 + table->initialState.levels[0].arbRefreshState = 4920 + SISLANDS_INITIAL_STATE_ARB_INDEX; 4921 + 4922 + table->initialState.levels[0].ACIndex = 0; 4923 + 4924 + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, 4925 + initial_state->performance_levels[0].vddc, 4926 + &table->initialState.levels[0].vddc); 4927 + 4928 + if (!ret) { 4929 + u16 std_vddc; 4930 + 4931 + ret = si_get_std_voltage_value(adev, 4932 + &table->initialState.levels[0].vddc, 4933 + &std_vddc); 4934 + if (!ret) 4935 + si_populate_std_voltage_value(adev, std_vddc, 4936 + table->initialState.levels[0].vddc.index, 4937 + &table->initialState.levels[0].std_vddc); 4938 + } 4939 + 4940 + if (eg_pi->vddci_control) 4941 + si_populate_voltage_value(adev, 4942 + &eg_pi->vddci_voltage_table, 4943 + initial_state->performance_levels[0].vddci, 4944 + &table->initialState.levels[0].vddci); 4945 + 4946 + if (si_pi->vddc_phase_shed_control) 4947 + si_populate_phase_shedding_value(adev, 4948 + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, 4949 + initial_state->performance_levels[0].vddc, 4950 + initial_state->performance_levels[0].sclk, 4951 + initial_state->performance_levels[0].mclk, 4952 + &table->initialState.levels[0].vddc); 4953 + 4954 + si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd); 4955 + 4956 + reg = CG_R(0xffff) | CG_L(0); 4957 + table->initialState.levels[0].aT = cpu_to_be32(reg); 4958 + 4959 + table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); 4960 + 4961 + table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; 4962 + 4963 + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 4964 + table->initialState.levels[0].strobeMode = 4965 + si_get_strobe_mode_settings(adev, 4966 + initial_state->performance_levels[0].mclk); 4967 + 4968 + if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) 4969 + table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; 4970 + else 4971 + table->initialState.levels[0].mcFlags = 0; 4972 + } 4973 + 4974 + table->initialState.levelCount = 1; 4975 + 4976 + table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; 4977 + 4978 + table->initialState.levels[0].dpm2.MaxPS = 0; 4979 + table->initialState.levels[0].dpm2.NearTDPDec = 0; 4980 + table->initialState.levels[0].dpm2.AboveSafeInc = 0; 4981 + table->initialState.levels[0].dpm2.BelowSafeInc = 0; 4982 + table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0; 4983 + 4984 + reg = MIN_POWER_MASK | MAX_POWER_MASK; 4985 + table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); 4986 + 4987 + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; 4988 + table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); 4989 + 4990 + return 0; 4991 + } 4992 + 4993 + static int si_populate_smc_acpi_state(struct amdgpu_device *adev, 4994 + SISLANDS_SMC_STATETABLE *table) 4995 + { 4996 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 4997 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 4998 + struct si_power_info *si_pi = si_get_pi(adev); 4999 + u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; 5000 + u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; 5001 + u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; 5002 + u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; 5003 + u32 dll_cntl = si_pi->clock_registers.dll_cntl; 5004 + u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; 5005 + u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; 5006 + u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; 5007 + u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; 5008 + u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; 5009 + u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; 5010 + u32 reg; 5011 + int ret; 5012 + 5013 + table->ACPIState = table->initialState; 5014 + 5015 + table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; 5016 + 5017 + if (pi->acpi_vddc) { 5018 + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, 5019 + pi->acpi_vddc, &table->ACPIState.levels[0].vddc); 5020 + if (!ret) { 5021 + u16 std_vddc; 5022 + 5023 + ret = si_get_std_voltage_value(adev, 5024 + &table->ACPIState.levels[0].vddc, &std_vddc); 5025 + if (!ret) 5026 + si_populate_std_voltage_value(adev, std_vddc, 5027 + table->ACPIState.levels[0].vddc.index, 5028 + &table->ACPIState.levels[0].std_vddc); 5029 + } 5030 + table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen; 5031 + 5032 + if (si_pi->vddc_phase_shed_control) { 5033 + si_populate_phase_shedding_value(adev, 5034 + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, 5035 + pi->acpi_vddc, 5036 + 0, 5037 + 0, 5038 + &table->ACPIState.levels[0].vddc); 5039 + } 5040 + } else { 5041 + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, 5042 + pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); 5043 + if (!ret) { 5044 + u16 std_vddc; 5045 + 5046 + ret = si_get_std_voltage_value(adev, 5047 + &table->ACPIState.levels[0].vddc, &std_vddc); 5048 + 5049 + if (!ret) 5050 + si_populate_std_voltage_value(adev, std_vddc, 5051 + table->ACPIState.levels[0].vddc.index, 5052 + &table->ACPIState.levels[0].std_vddc); 5053 + } 5054 + table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, 5055 + si_pi->sys_pcie_mask, 5056 + si_pi->boot_pcie_gen, 5057 + AMDGPU_PCIE_GEN1); 5058 + 5059 + if (si_pi->vddc_phase_shed_control) 5060 + si_populate_phase_shedding_value(adev, 5061 + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, 5062 + pi->min_vddc_in_table, 5063 + 0, 5064 + 0, 5065 + &table->ACPIState.levels[0].vddc); 5066 + } 5067 + 5068 + if (pi->acpi_vddc) { 5069 + if (eg_pi->acpi_vddci) 5070 + si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, 5071 + eg_pi->acpi_vddci, 5072 + &table->ACPIState.levels[0].vddci); 5073 + } 5074 + 5075 + mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; 5076 + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 5077 + 5078 + dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); 5079 + 5080 + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 5081 + spll_func_cntl_2 |= SCLK_MUX_SEL(4); 5082 + 5083 + table->ACPIState.levels[0].mclk.vDLL_CNTL = 5084 + cpu_to_be32(dll_cntl); 5085 + table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = 5086 + cpu_to_be32(mclk_pwrmgt_cntl); 5087 + table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = 5088 + cpu_to_be32(mpll_ad_func_cntl); 5089 + table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = 5090 + cpu_to_be32(mpll_dq_func_cntl); 5091 + table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL = 5092 + cpu_to_be32(mpll_func_cntl); 5093 + table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = 5094 + cpu_to_be32(mpll_func_cntl_1); 5095 + table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = 5096 + cpu_to_be32(mpll_func_cntl_2); 5097 + table->ACPIState.levels[0].mclk.vMPLL_SS = 5098 + cpu_to_be32(si_pi->clock_registers.mpll_ss1); 5099 + table->ACPIState.levels[0].mclk.vMPLL_SS2 = 5100 + cpu_to_be32(si_pi->clock_registers.mpll_ss2); 5101 + 5102 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = 5103 + cpu_to_be32(spll_func_cntl); 5104 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = 5105 + cpu_to_be32(spll_func_cntl_2); 5106 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = 5107 + cpu_to_be32(spll_func_cntl_3); 5108 + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = 5109 + cpu_to_be32(spll_func_cntl_4); 5110 + 5111 + table->ACPIState.levels[0].mclk.mclk_value = 0; 5112 + table->ACPIState.levels[0].sclk.sclk_value = 0; 5113 + 5114 + si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd); 5115 + 5116 + if (eg_pi->dynamic_ac_timing) 5117 + table->ACPIState.levels[0].ACIndex = 0; 5118 + 5119 + table->ACPIState.levels[0].dpm2.MaxPS = 0; 5120 + table->ACPIState.levels[0].dpm2.NearTDPDec = 0; 5121 + table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; 5122 + table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; 5123 + table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0; 5124 + 5125 + reg = MIN_POWER_MASK | MAX_POWER_MASK; 5126 + table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); 5127 + 5128 + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; 5129 + table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); 5130 + 5131 + return 0; 5132 + } 5133 + 5134 + static int si_populate_ulv_state(struct amdgpu_device *adev, 5135 + SISLANDS_SMC_SWSTATE *state) 5136 + { 5137 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 5138 + struct si_power_info *si_pi = si_get_pi(adev); 5139 + struct si_ulv_param *ulv = &si_pi->ulv; 5140 + u32 sclk_in_sr = 1350; /* ??? */ 5141 + int ret; 5142 + 5143 + ret = si_convert_power_level_to_smc(adev, &ulv->pl, 5144 + &state->levels[0]); 5145 + if (!ret) { 5146 + if (eg_pi->sclk_deep_sleep) { 5147 + if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) 5148 + state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; 5149 + else 5150 + state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; 5151 + } 5152 + if (ulv->one_pcie_lane_in_ulv) 5153 + state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1; 5154 + state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); 5155 + state->levels[0].ACIndex = 1; 5156 + state->levels[0].std_vddc = state->levels[0].vddc; 5157 + state->levelCount = 1; 5158 + 5159 + state->flags |= PPSMC_SWSTATE_FLAG_DC; 5160 + } 5161 + 5162 + return ret; 5163 + } 5164 + 5165 + static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev) 5166 + { 5167 + struct si_power_info *si_pi = si_get_pi(adev); 5168 + struct si_ulv_param *ulv = &si_pi->ulv; 5169 + SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; 5170 + int ret; 5171 + 5172 + ret = si_populate_memory_timing_parameters(adev, &ulv->pl, 5173 + &arb_regs); 5174 + if (ret) 5175 + return ret; 5176 + 5177 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay, 5178 + ulv->volt_change_delay); 5179 + 5180 + ret = si_copy_bytes_to_smc(adev, 5181 + si_pi->arb_table_start + 5182 + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + 5183 + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX, 5184 + (u8 *)&arb_regs, 5185 + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), 5186 + si_pi->sram_end); 5187 + 5188 + return ret; 5189 + } 5190 + 5191 + static void si_get_mvdd_configuration(struct amdgpu_device *adev) 5192 + { 5193 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 5194 + 5195 + pi->mvdd_split_frequency = 30000; 5196 + } 5197 + 5198 + static int si_init_smc_table(struct amdgpu_device *adev) 5199 + { 5200 + struct si_power_info *si_pi = si_get_pi(adev); 5201 + struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; 5202 + const struct si_ulv_param *ulv = &si_pi->ulv; 5203 + SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; 5204 + int ret; 5205 + u32 lane_width; 5206 + u32 vr_hot_gpio; 5207 + 5208 + si_populate_smc_voltage_tables(adev, table); 5209 + 5210 + switch (adev->pm.int_thermal_type) { 5211 + case THERMAL_TYPE_SI: 5212 + case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 5213 + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; 5214 + break; 5215 + case THERMAL_TYPE_NONE: 5216 + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; 5217 + break; 5218 + default: 5219 + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; 5220 + break; 5221 + } 5222 + 5223 + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 5224 + table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 5225 + 5226 + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) { 5227 + if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819)) 5228 + table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; 5229 + } 5230 + 5231 + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 5232 + table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 5233 + 5234 + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 5235 + table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 5236 + 5237 + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) 5238 + table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; 5239 + 5240 + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { 5241 + table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; 5242 + vr_hot_gpio = adev->pm.dpm.backbias_response_time; 5243 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio, 5244 + vr_hot_gpio); 5245 + } 5246 + 5247 + ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table); 5248 + if (ret) 5249 + return ret; 5250 + 5251 + ret = si_populate_smc_acpi_state(adev, table); 5252 + if (ret) 5253 + return ret; 5254 + 5255 + table->driverState = table->initialState; 5256 + 5257 + ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state, 5258 + SISLANDS_INITIAL_STATE_ARB_INDEX); 5259 + if (ret) 5260 + return ret; 5261 + 5262 + if (ulv->supported && ulv->pl.vddc) { 5263 + ret = si_populate_ulv_state(adev, &table->ULVState); 5264 + if (ret) 5265 + return ret; 5266 + 5267 + ret = si_program_ulv_memory_timing_parameters(adev); 5268 + if (ret) 5269 + return ret; 5270 + 5271 + WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); 5272 + WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); 5273 + 5274 + lane_width = amdgpu_get_pcie_lanes(adev); 5275 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); 5276 + } else { 5277 + table->ULVState = table->initialState; 5278 + } 5279 + 5280 + return si_copy_bytes_to_smc(adev, si_pi->state_table_start, 5281 + (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), 5282 + si_pi->sram_end); 5283 + } 5284 + 5285 + static int si_calculate_sclk_params(struct amdgpu_device *adev, 5286 + u32 engine_clock, 5287 + SISLANDS_SMC_SCLK_VALUE *sclk) 5288 + { 5289 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 5290 + struct si_power_info *si_pi = si_get_pi(adev); 5291 + struct atom_clock_dividers dividers; 5292 + u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; 5293 + u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; 5294 + u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; 5295 + u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; 5296 + u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum; 5297 + u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2; 5298 + u64 tmp; 5299 + u32 reference_clock = adev->clock.spll.reference_freq; 5300 + u32 reference_divider; 5301 + u32 fbdiv; 5302 + int ret; 5303 + 5304 + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 5305 + engine_clock, false, &dividers); 5306 + if (ret) 5307 + return ret; 5308 + 5309 + reference_divider = 1 + dividers.ref_div; 5310 + 5311 + tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384; 5312 + do_div(tmp, reference_clock); 5313 + fbdiv = (u32) tmp; 5314 + 5315 + spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); 5316 + spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); 5317 + spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); 5318 + 5319 + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 5320 + spll_func_cntl_2 |= SCLK_MUX_SEL(2); 5321 + 5322 + spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 5323 + spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 5324 + spll_func_cntl_3 |= SPLL_DITHEN; 5325 + 5326 + if (pi->sclk_ss) { 5327 + struct amdgpu_atom_ss ss; 5328 + u32 vco_freq = engine_clock * dividers.post_div; 5329 + 5330 + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, 5331 + ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 5332 + u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 5333 + u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 5334 + 5335 + cg_spll_spread_spectrum &= ~CLK_S_MASK; 5336 + cg_spll_spread_spectrum |= CLK_S(clk_s); 5337 + cg_spll_spread_spectrum |= SSEN; 5338 + 5339 + cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; 5340 + cg_spll_spread_spectrum_2 |= CLK_V(clk_v); 5341 + } 5342 + } 5343 + 5344 + sclk->sclk_value = engine_clock; 5345 + sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl; 5346 + sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2; 5347 + sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3; 5348 + sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4; 5349 + sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum; 5350 + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2; 5351 + 5352 + return 0; 5353 + } 5354 + 5355 + static int si_populate_sclk_value(struct amdgpu_device *adev, 5356 + u32 engine_clock, 5357 + SISLANDS_SMC_SCLK_VALUE *sclk) 5358 + { 5359 + SISLANDS_SMC_SCLK_VALUE sclk_tmp; 5360 + int ret; 5361 + 5362 + ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp); 5363 + if (!ret) { 5364 + sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value); 5365 + sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL); 5366 + sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2); 5367 + sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3); 5368 + sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4); 5369 + sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM); 5370 + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2); 5371 + } 5372 + 5373 + return ret; 5374 + } 5375 + 5376 + static int si_populate_mclk_value(struct amdgpu_device *adev, 5377 + u32 engine_clock, 5378 + u32 memory_clock, 5379 + SISLANDS_SMC_MCLK_VALUE *mclk, 5380 + bool strobe_mode, 5381 + bool dll_state_on) 5382 + { 5383 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 5384 + struct si_power_info *si_pi = si_get_pi(adev); 5385 + u32 dll_cntl = si_pi->clock_registers.dll_cntl; 5386 + u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; 5387 + u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; 5388 + u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; 5389 + u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; 5390 + u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; 5391 + u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; 5392 + u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1; 5393 + u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2; 5394 + struct atom_mpll_param mpll_param; 5395 + int ret; 5396 + 5397 + ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); 5398 + if (ret) 5399 + return ret; 5400 + 5401 + mpll_func_cntl &= ~BWCTRL_MASK; 5402 + mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); 5403 + 5404 + mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); 5405 + mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | 5406 + CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); 5407 + 5408 + mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; 5409 + mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); 5410 + 5411 + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 5412 + mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); 5413 + mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | 5414 + YCLK_POST_DIV(mpll_param.post_div); 5415 + } 5416 + 5417 + if (pi->mclk_ss) { 5418 + struct amdgpu_atom_ss ss; 5419 + u32 freq_nom; 5420 + u32 tmp; 5421 + u32 reference_clock = adev->clock.mpll.reference_freq; 5422 + 5423 + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 5424 + freq_nom = memory_clock * 4; 5425 + else 5426 + freq_nom = memory_clock * 2; 5427 + 5428 + tmp = freq_nom / reference_clock; 5429 + tmp = tmp * tmp; 5430 + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, 5431 + ASIC_INTERNAL_MEMORY_SS, freq_nom)) { 5432 + u32 clks = reference_clock * 5 / ss.rate; 5433 + u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); 5434 + 5435 + mpll_ss1 &= ~CLKV_MASK; 5436 + mpll_ss1 |= CLKV(clkv); 5437 + 5438 + mpll_ss2 &= ~CLKS_MASK; 5439 + mpll_ss2 |= CLKS(clks); 5440 + } 5441 + } 5442 + 5443 + mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; 5444 + mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); 5445 + 5446 + if (dll_state_on) 5447 + mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; 5448 + else 5449 + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 5450 + 5451 + mclk->mclk_value = cpu_to_be32(memory_clock); 5452 + mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); 5453 + mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); 5454 + mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); 5455 + mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); 5456 + mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); 5457 + mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); 5458 + mclk->vDLL_CNTL = cpu_to_be32(dll_cntl); 5459 + mclk->vMPLL_SS = cpu_to_be32(mpll_ss1); 5460 + mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2); 5461 + 5462 + return 0; 5463 + } 5464 + 5465 + static void si_populate_smc_sp(struct amdgpu_device *adev, 5466 + struct amdgpu_ps *amdgpu_state, 5467 + SISLANDS_SMC_SWSTATE *smc_state) 5468 + { 5469 + struct si_ps *ps = si_get_ps(amdgpu_state); 5470 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 5471 + int i; 5472 + 5473 + for (i = 0; i < ps->performance_level_count - 1; i++) 5474 + smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); 5475 + 5476 + smc_state->levels[ps->performance_level_count - 1].bSP = 5477 + cpu_to_be32(pi->psp); 5478 + } 5479 + 5480 + static int si_convert_power_level_to_smc(struct amdgpu_device *adev, 5481 + struct rv7xx_pl *pl, 5482 + SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) 5483 + { 5484 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 5485 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 5486 + struct si_power_info *si_pi = si_get_pi(adev); 5487 + int ret; 5488 + bool dll_state_on; 5489 + u16 std_vddc; 5490 + bool gmc_pg = false; 5491 + 5492 + if (eg_pi->pcie_performance_request && 5493 + (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID)) 5494 + level->gen2PCIE = (u8)si_pi->force_pcie_gen; 5495 + else 5496 + level->gen2PCIE = (u8)pl->pcie_gen; 5497 + 5498 + ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk); 5499 + if (ret) 5500 + return ret; 5501 + 5502 + level->mcFlags = 0; 5503 + 5504 + if (pi->mclk_stutter_mode_threshold && 5505 + (pl->mclk <= pi->mclk_stutter_mode_threshold) && 5506 + !eg_pi->uvd_enabled && 5507 + (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && 5508 + (adev->pm.dpm.new_active_crtc_count <= 2)) { 5509 + level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; 5510 + 5511 + if (gmc_pg) 5512 + level->mcFlags |= SISLANDS_SMC_MC_PG_EN; 5513 + } 5514 + 5515 + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 5516 + if (pl->mclk > pi->mclk_edc_enable_threshold) 5517 + level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; 5518 + 5519 + if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) 5520 + level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG; 5521 + 5522 + level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); 5523 + 5524 + if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) { 5525 + if (si_get_mclk_frequency_ratio(pl->mclk, true) >= 5526 + ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) 5527 + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 5528 + else 5529 + dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 5530 + } else { 5531 + dll_state_on = false; 5532 + } 5533 + } else { 5534 + level->strobeMode = si_get_strobe_mode_settings(adev, 5535 + pl->mclk); 5536 + 5537 + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 5538 + } 5539 + 5540 + ret = si_populate_mclk_value(adev, 5541 + pl->sclk, 5542 + pl->mclk, 5543 + &level->mclk, 5544 + (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on); 5545 + if (ret) 5546 + return ret; 5547 + 5548 + ret = si_populate_voltage_value(adev, 5549 + &eg_pi->vddc_voltage_table, 5550 + pl->vddc, &level->vddc); 5551 + if (ret) 5552 + return ret; 5553 + 5554 + 5555 + ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc); 5556 + if (ret) 5557 + return ret; 5558 + 5559 + ret = si_populate_std_voltage_value(adev, std_vddc, 5560 + level->vddc.index, &level->std_vddc); 5561 + if (ret) 5562 + return ret; 5563 + 5564 + if (eg_pi->vddci_control) { 5565 + ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, 5566 + pl->vddci, &level->vddci); 5567 + if (ret) 5568 + return ret; 5569 + } 5570 + 5571 + if (si_pi->vddc_phase_shed_control) { 5572 + ret = si_populate_phase_shedding_value(adev, 5573 + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, 5574 + pl->vddc, 5575 + pl->sclk, 5576 + pl->mclk, 5577 + &level->vddc); 5578 + if (ret) 5579 + return ret; 5580 + } 5581 + 5582 + level->MaxPoweredUpCU = si_pi->max_cu; 5583 + 5584 + ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd); 5585 + 5586 + return ret; 5587 + } 5588 + 5589 + static int si_populate_smc_t(struct amdgpu_device *adev, 5590 + struct amdgpu_ps *amdgpu_state, 5591 + SISLANDS_SMC_SWSTATE *smc_state) 5592 + { 5593 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 5594 + struct si_ps *state = si_get_ps(amdgpu_state); 5595 + u32 a_t; 5596 + u32 t_l, t_h; 5597 + u32 high_bsp; 5598 + int i, ret; 5599 + 5600 + if (state->performance_level_count >= 9) 5601 + return -EINVAL; 5602 + 5603 + if (state->performance_level_count < 2) { 5604 + a_t = CG_R(0xffff) | CG_L(0); 5605 + smc_state->levels[0].aT = cpu_to_be32(a_t); 5606 + return 0; 5607 + } 5608 + 5609 + smc_state->levels[0].aT = cpu_to_be32(0); 5610 + 5611 + for (i = 0; i <= state->performance_level_count - 2; i++) { 5612 + ret = r600_calculate_at( 5613 + (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1), 5614 + 100 * R600_AH_DFLT, 5615 + state->performance_levels[i + 1].sclk, 5616 + state->performance_levels[i].sclk, 5617 + &t_l, 5618 + &t_h); 5619 + 5620 + if (ret) { 5621 + t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT; 5622 + t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; 5623 + } 5624 + 5625 + a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; 5626 + a_t |= CG_R(t_l * pi->bsp / 20000); 5627 + smc_state->levels[i].aT = cpu_to_be32(a_t); 5628 + 5629 + high_bsp = (i == state->performance_level_count - 2) ? 5630 + pi->pbsp : pi->bsp; 5631 + a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); 5632 + smc_state->levels[i + 1].aT = cpu_to_be32(a_t); 5633 + } 5634 + 5635 + return 0; 5636 + } 5637 + 5638 + static int si_disable_ulv(struct amdgpu_device *adev) 5639 + { 5640 + struct si_power_info *si_pi = si_get_pi(adev); 5641 + struct si_ulv_param *ulv = &si_pi->ulv; 5642 + 5643 + if (ulv->supported) 5644 + return (si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 5645 + 0 : -EINVAL; 5646 + 5647 + return 0; 5648 + } 5649 + 5650 + static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, 5651 + struct amdgpu_ps *amdgpu_state) 5652 + { 5653 + const struct si_power_info *si_pi = si_get_pi(adev); 5654 + const struct si_ulv_param *ulv = &si_pi->ulv; 5655 + const struct si_ps *state = si_get_ps(amdgpu_state); 5656 + int i; 5657 + 5658 + if (state->performance_levels[0].mclk != ulv->pl.mclk) 5659 + return false; 5660 + 5661 + /* XXX validate against display requirements! */ 5662 + 5663 + for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { 5664 + if (adev->clock.current_dispclk <= 5665 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { 5666 + if (ulv->pl.vddc < 5667 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) 5668 + return false; 5669 + } 5670 + } 5671 + 5672 + if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0)) 5673 + return false; 5674 + 5675 + return true; 5676 + } 5677 + 5678 + static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev, 5679 + struct amdgpu_ps *amdgpu_new_state) 5680 + { 5681 + const struct si_power_info *si_pi = si_get_pi(adev); 5682 + const struct si_ulv_param *ulv = &si_pi->ulv; 5683 + 5684 + if (ulv->supported) { 5685 + if (si_is_state_ulv_compatible(adev, amdgpu_new_state)) 5686 + return (si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 5687 + 0 : -EINVAL; 5688 + } 5689 + return 0; 5690 + } 5691 + 5692 + static int si_convert_power_state_to_smc(struct amdgpu_device *adev, 5693 + struct amdgpu_ps *amdgpu_state, 5694 + SISLANDS_SMC_SWSTATE *smc_state) 5695 + { 5696 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 5697 + struct ni_power_info *ni_pi = ni_get_pi(adev); 5698 + struct si_power_info *si_pi = si_get_pi(adev); 5699 + struct si_ps *state = si_get_ps(amdgpu_state); 5700 + int i, ret; 5701 + u32 threshold; 5702 + u32 sclk_in_sr = 1350; /* ??? */ 5703 + 5704 + if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS) 5705 + return -EINVAL; 5706 + 5707 + threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100; 5708 + 5709 + if (amdgpu_state->vclk && amdgpu_state->dclk) { 5710 + eg_pi->uvd_enabled = true; 5711 + if (eg_pi->smu_uvd_hs) 5712 + smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD; 5713 + } else { 5714 + eg_pi->uvd_enabled = false; 5715 + } 5716 + 5717 + if (state->dc_compatible) 5718 + smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; 5719 + 5720 + smc_state->levelCount = 0; 5721 + for (i = 0; i < state->performance_level_count; i++) { 5722 + if (eg_pi->sclk_deep_sleep) { 5723 + if ((i == 0) || si_pi->sclk_deep_sleep_above_low) { 5724 + if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) 5725 + smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; 5726 + else 5727 + smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; 5728 + } 5729 + } 5730 + 5731 + ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i], 5732 + &smc_state->levels[i]); 5733 + smc_state->levels[i].arbRefreshState = 5734 + (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i); 5735 + 5736 + if (ret) 5737 + return ret; 5738 + 5739 + if (ni_pi->enable_power_containment) 5740 + smc_state->levels[i].displayWatermark = 5741 + (state->performance_levels[i].sclk < threshold) ? 5742 + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; 5743 + else 5744 + smc_state->levels[i].displayWatermark = (i < 2) ? 5745 + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; 5746 + 5747 + if (eg_pi->dynamic_ac_timing) 5748 + smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i; 5749 + else 5750 + smc_state->levels[i].ACIndex = 0; 5751 + 5752 + smc_state->levelCount++; 5753 + } 5754 + 5755 + si_write_smc_soft_register(adev, 5756 + SI_SMC_SOFT_REGISTER_watermark_threshold, 5757 + threshold / 512); 5758 + 5759 + si_populate_smc_sp(adev, amdgpu_state, smc_state); 5760 + 5761 + ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state); 5762 + if (ret) 5763 + ni_pi->enable_power_containment = false; 5764 + 5765 + ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); 5766 + if (ret) 5767 + ni_pi->enable_sq_ramping = false; 5768 + 5769 + return si_populate_smc_t(adev, amdgpu_state, smc_state); 5770 + } 5771 + 5772 + static int si_upload_sw_state(struct amdgpu_device *adev, 5773 + struct amdgpu_ps *amdgpu_new_state) 5774 + { 5775 + struct si_power_info *si_pi = si_get_pi(adev); 5776 + struct si_ps *new_state = si_get_ps(amdgpu_new_state); 5777 + int ret; 5778 + u32 address = si_pi->state_table_start + 5779 + offsetof(SISLANDS_SMC_STATETABLE, driverState); 5780 + u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) + 5781 + ((new_state->performance_level_count - 1) * 5782 + sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL)); 5783 + SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState; 5784 + 5785 + memset(smc_state, 0, state_size); 5786 + 5787 + ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state); 5788 + if (ret) 5789 + return ret; 5790 + 5791 + ret = si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, 5792 + state_size, si_pi->sram_end); 5793 + 5794 + return ret; 5795 + } 5796 + 5797 + static int si_upload_ulv_state(struct amdgpu_device *adev) 5798 + { 5799 + struct si_power_info *si_pi = si_get_pi(adev); 5800 + struct si_ulv_param *ulv = &si_pi->ulv; 5801 + int ret = 0; 5802 + 5803 + if (ulv->supported && ulv->pl.vddc) { 5804 + u32 address = si_pi->state_table_start + 5805 + offsetof(SISLANDS_SMC_STATETABLE, ULVState); 5806 + SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState; 5807 + u32 state_size = sizeof(SISLANDS_SMC_SWSTATE); 5808 + 5809 + memset(smc_state, 0, state_size); 5810 + 5811 + ret = si_populate_ulv_state(adev, smc_state); 5812 + if (!ret) 5813 + ret = si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, 5814 + state_size, si_pi->sram_end); 5815 + } 5816 + 5817 + return ret; 5818 + } 5819 + 5820 + static int si_upload_smc_data(struct amdgpu_device *adev) 5821 + { 5822 + struct amdgpu_crtc *amdgpu_crtc = NULL; 5823 + int i; 5824 + 5825 + if (adev->pm.dpm.new_active_crtc_count == 0) 5826 + return 0; 5827 + 5828 + for (i = 0; i < adev->mode_info.num_crtc; i++) { 5829 + if (adev->pm.dpm.new_active_crtcs & (1 << i)) { 5830 + amdgpu_crtc = adev->mode_info.crtcs[i]; 5831 + break; 5832 + } 5833 + } 5834 + 5835 + if (amdgpu_crtc == NULL) 5836 + return 0; 5837 + 5838 + if (amdgpu_crtc->line_time <= 0) 5839 + return 0; 5840 + 5841 + if (si_write_smc_soft_register(adev, 5842 + SI_SMC_SOFT_REGISTER_crtc_index, 5843 + amdgpu_crtc->crtc_id) != PPSMC_Result_OK) 5844 + return 0; 5845 + 5846 + if (si_write_smc_soft_register(adev, 5847 + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, 5848 + amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) 5849 + return 0; 5850 + 5851 + if (si_write_smc_soft_register(adev, 5852 + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, 5853 + amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) 5854 + return 0; 5855 + 5856 + return 0; 5857 + } 5858 + 5859 + static int si_set_mc_special_registers(struct amdgpu_device *adev, 5860 + struct si_mc_reg_table *table) 5861 + { 5862 + u8 i, j, k; 5863 + u32 temp_reg; 5864 + 5865 + for (i = 0, j = table->last; i < table->last; i++) { 5866 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5867 + return -EINVAL; 5868 + switch (table->mc_reg_address[i].s1) { 5869 + case MC_SEQ_MISC1: 5870 + temp_reg = RREG32(MC_PMG_CMD_EMRS); 5871 + table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS; 5872 + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP; 5873 + for (k = 0; k < table->num_entries; k++) 5874 + table->mc_reg_table_entry[k].mc_data[j] = 5875 + ((temp_reg & 0xffff0000)) | 5876 + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 5877 + j++; 5878 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5879 + return -EINVAL; 5880 + 5881 + temp_reg = RREG32(MC_PMG_CMD_MRS); 5882 + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS; 5883 + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP; 5884 + for (k = 0; k < table->num_entries; k++) { 5885 + table->mc_reg_table_entry[k].mc_data[j] = 5886 + (temp_reg & 0xffff0000) | 5887 + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 5888 + if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) 5889 + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 5890 + } 5891 + j++; 5892 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5893 + return -EINVAL; 5894 + 5895 + if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { 5896 + table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; 5897 + table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD; 5898 + for (k = 0; k < table->num_entries; k++) 5899 + table->mc_reg_table_entry[k].mc_data[j] = 5900 + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 5901 + j++; 5902 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5903 + return -EINVAL; 5904 + } 5905 + break; 5906 + case MC_SEQ_RESERVE_M: 5907 + temp_reg = RREG32(MC_PMG_CMD_MRS1); 5908 + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1; 5909 + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP; 5910 + for(k = 0; k < table->num_entries; k++) 5911 + table->mc_reg_table_entry[k].mc_data[j] = 5912 + (temp_reg & 0xffff0000) | 5913 + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 5914 + j++; 5915 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5916 + return -EINVAL; 5917 + break; 5918 + default: 5919 + break; 5920 + } 5921 + } 5922 + 5923 + table->last = j; 5924 + 5925 + return 0; 5926 + } 5927 + 5928 + static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 5929 + { 5930 + bool result = true; 5931 + switch (in_reg) { 5932 + case MC_SEQ_RAS_TIMING: 5933 + *out_reg = MC_SEQ_RAS_TIMING_LP; 5934 + break; 5935 + case MC_SEQ_CAS_TIMING: 5936 + *out_reg = MC_SEQ_CAS_TIMING_LP; 5937 + break; 5938 + case MC_SEQ_MISC_TIMING: 5939 + *out_reg = MC_SEQ_MISC_TIMING_LP; 5940 + break; 5941 + case MC_SEQ_MISC_TIMING2: 5942 + *out_reg = MC_SEQ_MISC_TIMING2_LP; 5943 + break; 5944 + case MC_SEQ_RD_CTL_D0: 5945 + *out_reg = MC_SEQ_RD_CTL_D0_LP; 5946 + break; 5947 + case MC_SEQ_RD_CTL_D1: 5948 + *out_reg = MC_SEQ_RD_CTL_D1_LP; 5949 + break; 5950 + case MC_SEQ_WR_CTL_D0: 5951 + *out_reg = MC_SEQ_WR_CTL_D0_LP; 5952 + break; 5953 + case MC_SEQ_WR_CTL_D1: 5954 + *out_reg = MC_SEQ_WR_CTL_D1_LP; 5955 + break; 5956 + case MC_PMG_CMD_EMRS: 5957 + *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; 5958 + break; 5959 + case MC_PMG_CMD_MRS: 5960 + *out_reg = MC_SEQ_PMG_CMD_MRS_LP; 5961 + break; 5962 + case MC_PMG_CMD_MRS1: 5963 + *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; 5964 + break; 5965 + case MC_SEQ_PMG_TIMING: 5966 + *out_reg = MC_SEQ_PMG_TIMING_LP; 5967 + break; 5968 + case MC_PMG_CMD_MRS2: 5969 + *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; 5970 + break; 5971 + case MC_SEQ_WR_CTL_2: 5972 + *out_reg = MC_SEQ_WR_CTL_2_LP; 5973 + break; 5974 + default: 5975 + result = false; 5976 + break; 5977 + } 5978 + 5979 + return result; 5980 + } 5981 + 5982 + static void si_set_valid_flag(struct si_mc_reg_table *table) 5983 + { 5984 + u8 i, j; 5985 + 5986 + for (i = 0; i < table->last; i++) { 5987 + for (j = 1; j < table->num_entries; j++) { 5988 + if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { 5989 + table->valid_flag |= 1 << i; 5990 + break; 5991 + } 5992 + } 5993 + } 5994 + } 5995 + 5996 + static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) 5997 + { 5998 + u32 i; 5999 + u16 address; 6000 + 6001 + for (i = 0; i < table->last; i++) 6002 + table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 6003 + address : table->mc_reg_address[i].s1; 6004 + 6005 + } 6006 + 6007 + static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, 6008 + struct si_mc_reg_table *si_table) 6009 + { 6010 + u8 i, j; 6011 + 6012 + if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 6013 + return -EINVAL; 6014 + if (table->num_entries > MAX_AC_TIMING_ENTRIES) 6015 + return -EINVAL; 6016 + 6017 + for (i = 0; i < table->last; i++) 6018 + si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 6019 + si_table->last = table->last; 6020 + 6021 + for (i = 0; i < table->num_entries; i++) { 6022 + si_table->mc_reg_table_entry[i].mclk_max = 6023 + table->mc_reg_table_entry[i].mclk_max; 6024 + for (j = 0; j < table->last; j++) { 6025 + si_table->mc_reg_table_entry[i].mc_data[j] = 6026 + table->mc_reg_table_entry[i].mc_data[j]; 6027 + } 6028 + } 6029 + si_table->num_entries = table->num_entries; 6030 + 6031 + return 0; 6032 + } 6033 + 6034 + static int si_initialize_mc_reg_table(struct amdgpu_device *adev) 6035 + { 6036 + struct si_power_info *si_pi = si_get_pi(adev); 6037 + struct atom_mc_reg_table *table; 6038 + struct si_mc_reg_table *si_table = &si_pi->mc_reg_table; 6039 + u8 module_index = rv770_get_memory_module_index(adev); 6040 + int ret; 6041 + 6042 + table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 6043 + if (!table) 6044 + return -ENOMEM; 6045 + 6046 + WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); 6047 + WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); 6048 + WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); 6049 + WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); 6050 + WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); 6051 + WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); 6052 + WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); 6053 + WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); 6054 + WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); 6055 + WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); 6056 + WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); 6057 + WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); 6058 + WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); 6059 + WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); 6060 + 6061 + ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); 6062 + if (ret) 6063 + goto init_mc_done; 6064 + 6065 + ret = si_copy_vbios_mc_reg_table(table, si_table); 6066 + if (ret) 6067 + goto init_mc_done; 6068 + 6069 + si_set_s0_mc_reg_index(si_table); 6070 + 6071 + ret = si_set_mc_special_registers(adev, si_table); 6072 + if (ret) 6073 + goto init_mc_done; 6074 + 6075 + si_set_valid_flag(si_table); 6076 + 6077 + init_mc_done: 6078 + kfree(table); 6079 + 6080 + return ret; 6081 + 6082 + } 6083 + 6084 + static void si_populate_mc_reg_addresses(struct amdgpu_device *adev, 6085 + SMC_SIslands_MCRegisters *mc_reg_table) 6086 + { 6087 + struct si_power_info *si_pi = si_get_pi(adev); 6088 + u32 i, j; 6089 + 6090 + for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { 6091 + if (si_pi->mc_reg_table.valid_flag & (1 << j)) { 6092 + if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 6093 + break; 6094 + mc_reg_table->address[i].s0 = 6095 + cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); 6096 + mc_reg_table->address[i].s1 = 6097 + cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1); 6098 + i++; 6099 + } 6100 + } 6101 + mc_reg_table->last = (u8)i; 6102 + } 6103 + 6104 + static void si_convert_mc_registers(const struct si_mc_reg_entry *entry, 6105 + SMC_SIslands_MCRegisterSet *data, 6106 + u32 num_entries, u32 valid_flag) 6107 + { 6108 + u32 i, j; 6109 + 6110 + for(i = 0, j = 0; j < num_entries; j++) { 6111 + if (valid_flag & (1 << j)) { 6112 + data->value[i] = cpu_to_be32(entry->mc_data[j]); 6113 + i++; 6114 + } 6115 + } 6116 + } 6117 + 6118 + static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, 6119 + struct rv7xx_pl *pl, 6120 + SMC_SIslands_MCRegisterSet *mc_reg_table_data) 6121 + { 6122 + struct si_power_info *si_pi = si_get_pi(adev); 6123 + u32 i = 0; 6124 + 6125 + for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) { 6126 + if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 6127 + break; 6128 + } 6129 + 6130 + if ((i == si_pi->mc_reg_table.num_entries) && (i > 0)) 6131 + --i; 6132 + 6133 + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i], 6134 + mc_reg_table_data, si_pi->mc_reg_table.last, 6135 + si_pi->mc_reg_table.valid_flag); 6136 + } 6137 + 6138 + static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, 6139 + struct amdgpu_ps *amdgpu_state, 6140 + SMC_SIslands_MCRegisters *mc_reg_table) 6141 + { 6142 + struct si_ps *state = si_get_ps(amdgpu_state); 6143 + int i; 6144 + 6145 + for (i = 0; i < state->performance_level_count; i++) { 6146 + si_convert_mc_reg_table_entry_to_smc(adev, 6147 + &state->performance_levels[i], 6148 + &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]); 6149 + } 6150 + } 6151 + 6152 + static int si_populate_mc_reg_table(struct amdgpu_device *adev, 6153 + struct amdgpu_ps *amdgpu_boot_state) 6154 + { 6155 + struct si_ps *boot_state = si_get_ps(amdgpu_boot_state); 6156 + struct si_power_info *si_pi = si_get_pi(adev); 6157 + struct si_ulv_param *ulv = &si_pi->ulv; 6158 + SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; 6159 + 6160 + memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); 6161 + 6162 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1); 6163 + 6164 + si_populate_mc_reg_addresses(adev, smc_mc_reg_table); 6165 + 6166 + si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0], 6167 + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]); 6168 + 6169 + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], 6170 + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT], 6171 + si_pi->mc_reg_table.last, 6172 + si_pi->mc_reg_table.valid_flag); 6173 + 6174 + if (ulv->supported && ulv->pl.vddc != 0) 6175 + si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl, 6176 + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]); 6177 + else 6178 + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], 6179 + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT], 6180 + si_pi->mc_reg_table.last, 6181 + si_pi->mc_reg_table.valid_flag); 6182 + 6183 + si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table); 6184 + 6185 + return si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start, 6186 + (u8 *)smc_mc_reg_table, 6187 + sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end); 6188 + } 6189 + 6190 + static int si_upload_mc_reg_table(struct amdgpu_device *adev, 6191 + struct amdgpu_ps *amdgpu_new_state) 6192 + { 6193 + struct si_ps *new_state = si_get_ps(amdgpu_new_state); 6194 + struct si_power_info *si_pi = si_get_pi(adev); 6195 + u32 address = si_pi->mc_reg_table_start + 6196 + offsetof(SMC_SIslands_MCRegisters, 6197 + data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]); 6198 + SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; 6199 + 6200 + memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); 6201 + 6202 + si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); 6203 + 6204 + 6205 + return si_copy_bytes_to_smc(adev, address, 6206 + (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], 6207 + sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, 6208 + si_pi->sram_end); 6209 + 6210 + } 6211 + 6212 + static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) 6213 + { 6214 + if (enable) 6215 + WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); 6216 + else 6217 + WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); 6218 + } 6219 + 6220 + static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, 6221 + struct amdgpu_ps *amdgpu_state) 6222 + { 6223 + struct si_ps *state = si_get_ps(amdgpu_state); 6224 + int i; 6225 + u16 pcie_speed, max_speed = 0; 6226 + 6227 + for (i = 0; i < state->performance_level_count; i++) { 6228 + pcie_speed = state->performance_levels[i].pcie_gen; 6229 + if (max_speed < pcie_speed) 6230 + max_speed = pcie_speed; 6231 + } 6232 + return max_speed; 6233 + } 6234 + 6235 + static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) 6236 + { 6237 + u32 speed_cntl; 6238 + 6239 + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; 6240 + speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; 6241 + 6242 + return (u16)speed_cntl; 6243 + } 6244 + 6245 + static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev, 6246 + struct amdgpu_ps *amdgpu_new_state, 6247 + struct amdgpu_ps *amdgpu_current_state) 6248 + { 6249 + struct si_power_info *si_pi = si_get_pi(adev); 6250 + enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); 6251 + enum amdgpu_pcie_gen current_link_speed; 6252 + 6253 + if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) 6254 + current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state); 6255 + else 6256 + current_link_speed = si_pi->force_pcie_gen; 6257 + 6258 + si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 6259 + si_pi->pspp_notify_required = false; 6260 + if (target_link_speed > current_link_speed) { 6261 + switch (target_link_speed) { 6262 + #if defined(CONFIG_ACPI) 6263 + case AMDGPU_PCIE_GEN3: 6264 + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 6265 + break; 6266 + si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; 6267 + if (current_link_speed == AMDGPU_PCIE_GEN2) 6268 + break; 6269 + case AMDGPU_PCIE_GEN2: 6270 + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 6271 + break; 6272 + #endif 6273 + default: 6274 + si_pi->force_pcie_gen = si_get_current_pcie_speed(adev); 6275 + break; 6276 + } 6277 + } else { 6278 + if (target_link_speed < current_link_speed) 6279 + si_pi->pspp_notify_required = true; 6280 + } 6281 + } 6282 + 6283 + static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, 6284 + struct amdgpu_ps *amdgpu_new_state, 6285 + struct amdgpu_ps *amdgpu_current_state) 6286 + { 6287 + struct si_power_info *si_pi = si_get_pi(adev); 6288 + enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); 6289 + u8 request; 6290 + 6291 + if (si_pi->pspp_notify_required) { 6292 + if (target_link_speed == AMDGPU_PCIE_GEN3) 6293 + request = PCIE_PERF_REQ_PECI_GEN3; 6294 + else if (target_link_speed == AMDGPU_PCIE_GEN2) 6295 + request = PCIE_PERF_REQ_PECI_GEN2; 6296 + else 6297 + request = PCIE_PERF_REQ_PECI_GEN1; 6298 + 6299 + if ((request == PCIE_PERF_REQ_PECI_GEN1) && 6300 + (si_get_current_pcie_speed(adev) > 0)) 6301 + return; 6302 + 6303 + #if defined(CONFIG_ACPI) 6304 + amdgpu_acpi_pcie_performance_request(adev, request, false); 6305 + #endif 6306 + } 6307 + } 6308 + 6309 + #if 0 6310 + static int si_ds_request(struct amdgpu_device *adev, 6311 + bool ds_status_on, u32 count_write) 6312 + { 6313 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 6314 + 6315 + if (eg_pi->sclk_deep_sleep) { 6316 + if (ds_status_on) 6317 + return (si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) == 6318 + PPSMC_Result_OK) ? 6319 + 0 : -EINVAL; 6320 + else 6321 + return (si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) == 6322 + PPSMC_Result_OK) ? 0 : -EINVAL; 6323 + } 6324 + return 0; 6325 + } 6326 + #endif 6327 + 6328 + static void si_set_max_cu_value(struct amdgpu_device *adev) 6329 + { 6330 + struct si_power_info *si_pi = si_get_pi(adev); 6331 + 6332 + if (adev->asic_type == CHIP_VERDE) { 6333 + switch (adev->pdev->device) { 6334 + case 0x6820: 6335 + case 0x6825: 6336 + case 0x6821: 6337 + case 0x6823: 6338 + case 0x6827: 6339 + si_pi->max_cu = 10; 6340 + break; 6341 + case 0x682D: 6342 + case 0x6824: 6343 + case 0x682F: 6344 + case 0x6826: 6345 + si_pi->max_cu = 8; 6346 + break; 6347 + case 0x6828: 6348 + case 0x6830: 6349 + case 0x6831: 6350 + case 0x6838: 6351 + case 0x6839: 6352 + case 0x683D: 6353 + si_pi->max_cu = 10; 6354 + break; 6355 + case 0x683B: 6356 + case 0x683F: 6357 + case 0x6829: 6358 + si_pi->max_cu = 8; 6359 + break; 6360 + default: 6361 + si_pi->max_cu = 0; 6362 + break; 6363 + } 6364 + } else { 6365 + si_pi->max_cu = 0; 6366 + } 6367 + } 6368 + 6369 + static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev, 6370 + struct amdgpu_clock_voltage_dependency_table *table) 6371 + { 6372 + u32 i; 6373 + int j; 6374 + u16 leakage_voltage; 6375 + 6376 + if (table) { 6377 + for (i = 0; i < table->count; i++) { 6378 + switch (si_get_leakage_voltage_from_leakage_index(adev, 6379 + table->entries[i].v, 6380 + &leakage_voltage)) { 6381 + case 0: 6382 + table->entries[i].v = leakage_voltage; 6383 + break; 6384 + case -EAGAIN: 6385 + return -EINVAL; 6386 + case -EINVAL: 6387 + default: 6388 + break; 6389 + } 6390 + } 6391 + 6392 + for (j = (table->count - 2); j >= 0; j--) { 6393 + table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? 6394 + table->entries[j].v : table->entries[j + 1].v; 6395 + } 6396 + } 6397 + return 0; 6398 + } 6399 + 6400 + static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev) 6401 + { 6402 + int ret = 0; 6403 + 6404 + ret = si_patch_single_dependency_table_based_on_leakage(adev, 6405 + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 6406 + ret = si_patch_single_dependency_table_based_on_leakage(adev, 6407 + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 6408 + ret = si_patch_single_dependency_table_based_on_leakage(adev, 6409 + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); 6410 + return ret; 6411 + } 6412 + 6413 + static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, 6414 + struct amdgpu_ps *amdgpu_new_state, 6415 + struct amdgpu_ps *amdgpu_current_state) 6416 + { 6417 + u32 lane_width; 6418 + u32 new_lane_width = 6419 + (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 6420 + u32 current_lane_width = 6421 + (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 6422 + 6423 + if (new_lane_width != current_lane_width) { 6424 + amdgpu_set_pcie_lanes(adev, new_lane_width); 6425 + lane_width = amdgpu_get_pcie_lanes(adev); 6426 + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); 6427 + } 6428 + } 6429 + 6430 + static void si_dpm_setup_asic(struct amdgpu_device *adev) 6431 + { 6432 + si_read_clock_registers(adev); 6433 + si_enable_acpi_power_management(adev); 6434 + } 6435 + 6436 + static int si_thermal_enable_alert(struct amdgpu_device *adev, 6437 + bool enable) 6438 + { 6439 + u32 thermal_int = RREG32(CG_THERMAL_INT); 6440 + 6441 + if (enable) { 6442 + PPSMC_Result result; 6443 + 6444 + thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 6445 + WREG32(CG_THERMAL_INT, thermal_int); 6446 + result = si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); 6447 + if (result != PPSMC_Result_OK) { 6448 + DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 6449 + return -EINVAL; 6450 + } 6451 + } else { 6452 + thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 6453 + WREG32(CG_THERMAL_INT, thermal_int); 6454 + } 6455 + 6456 + return 0; 6457 + } 6458 + 6459 + static int si_thermal_set_temperature_range(struct amdgpu_device *adev, 6460 + int min_temp, int max_temp) 6461 + { 6462 + int low_temp = 0 * 1000; 6463 + int high_temp = 255 * 1000; 6464 + 6465 + if (low_temp < min_temp) 6466 + low_temp = min_temp; 6467 + if (high_temp > max_temp) 6468 + high_temp = max_temp; 6469 + if (high_temp < low_temp) { 6470 + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 6471 + return -EINVAL; 6472 + } 6473 + 6474 + WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); 6475 + WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); 6476 + WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); 6477 + 6478 + adev->pm.dpm.thermal.min_temp = low_temp; 6479 + adev->pm.dpm.thermal.max_temp = high_temp; 6480 + 6481 + return 0; 6482 + } 6483 + 6484 + static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) 6485 + { 6486 + struct si_power_info *si_pi = si_get_pi(adev); 6487 + u32 tmp; 6488 + 6489 + if (si_pi->fan_ctrl_is_in_default_mode) { 6490 + tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; 6491 + si_pi->fan_ctrl_default_mode = tmp; 6492 + tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; 6493 + si_pi->t_min = tmp; 6494 + si_pi->fan_ctrl_is_in_default_mode = false; 6495 + } 6496 + 6497 + tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; 6498 + tmp |= TMIN(0); 6499 + WREG32(CG_FDO_CTRL2, tmp); 6500 + 6501 + tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 6502 + tmp |= FDO_PWM_MODE(mode); 6503 + WREG32(CG_FDO_CTRL2, tmp); 6504 + } 6505 + 6506 + static int si_thermal_setup_fan_table(struct amdgpu_device *adev) 6507 + { 6508 + struct si_power_info *si_pi = si_get_pi(adev); 6509 + PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE }; 6510 + u32 duty100; 6511 + u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; 6512 + u16 fdo_min, slope1, slope2; 6513 + u32 reference_clock, tmp; 6514 + int ret; 6515 + u64 tmp64; 6516 + 6517 + if (!si_pi->fan_table_start) { 6518 + adev->pm.dpm.fan.ucode_fan_control = false; 6519 + return 0; 6520 + } 6521 + 6522 + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 6523 + 6524 + if (duty100 == 0) { 6525 + adev->pm.dpm.fan.ucode_fan_control = false; 6526 + return 0; 6527 + } 6528 + 6529 + tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; 6530 + do_div(tmp64, 10000); 6531 + fdo_min = (u16)tmp64; 6532 + 6533 + t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; 6534 + t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; 6535 + 6536 + pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; 6537 + pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; 6538 + 6539 + slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); 6540 + slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); 6541 + 6542 + fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); 6543 + fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); 6544 + fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); 6545 + 6546 + fan_table.slope1 = cpu_to_be16(slope1); 6547 + fan_table.slope2 = cpu_to_be16(slope2); 6548 + 6549 + fan_table.fdo_min = cpu_to_be16(fdo_min); 6550 + 6551 + fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); 6552 + 6553 + fan_table.hys_up = cpu_to_be16(1); 6554 + 6555 + fan_table.hys_slope = cpu_to_be16(1); 6556 + 6557 + fan_table.temp_resp_lim = cpu_to_be16(5); 6558 + 6559 + reference_clock = amdgpu_asic_get_xclk(adev); 6560 + 6561 + fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * 6562 + reference_clock) / 1600); 6563 + 6564 + fan_table.fdo_max = cpu_to_be16((u16)duty100); 6565 + 6566 + tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; 6567 + fan_table.temp_src = (uint8_t)tmp; 6568 + 6569 + ret = si_copy_bytes_to_smc(adev, 6570 + si_pi->fan_table_start, 6571 + (u8 *)(&fan_table), 6572 + sizeof(fan_table), 6573 + si_pi->sram_end); 6574 + 6575 + if (ret) { 6576 + DRM_ERROR("Failed to load fan table to the SMC."); 6577 + adev->pm.dpm.fan.ucode_fan_control = false; 6578 + } 6579 + 6580 + return 0; 6581 + } 6582 + 6583 + static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) 6584 + { 6585 + struct si_power_info *si_pi = si_get_pi(adev); 6586 + PPSMC_Result ret; 6587 + 6588 + ret = si_send_msg_to_smc(adev, PPSMC_StartFanControl); 6589 + if (ret == PPSMC_Result_OK) { 6590 + si_pi->fan_is_controlled_by_smc = true; 6591 + return 0; 6592 + } else { 6593 + return -EINVAL; 6594 + } 6595 + } 6596 + 6597 + static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) 6598 + { 6599 + struct si_power_info *si_pi = si_get_pi(adev); 6600 + PPSMC_Result ret; 6601 + 6602 + ret = si_send_msg_to_smc(adev, PPSMC_StopFanControl); 6603 + 6604 + if (ret == PPSMC_Result_OK) { 6605 + si_pi->fan_is_controlled_by_smc = false; 6606 + return 0; 6607 + } else { 6608 + return -EINVAL; 6609 + } 6610 + } 6611 + 6612 + static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev, 6613 + u32 *speed) 6614 + { 6615 + u32 duty, duty100; 6616 + u64 tmp64; 6617 + 6618 + if (adev->pm.no_fan) 6619 + return -ENOENT; 6620 + 6621 + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 6622 + duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; 6623 + 6624 + if (duty100 == 0) 6625 + return -EINVAL; 6626 + 6627 + tmp64 = (u64)duty * 100; 6628 + do_div(tmp64, duty100); 6629 + *speed = (u32)tmp64; 6630 + 6631 + if (*speed > 100) 6632 + *speed = 100; 6633 + 6634 + return 0; 6635 + } 6636 + 6637 + static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev, 6638 + u32 speed) 6639 + { 6640 + struct si_power_info *si_pi = si_get_pi(adev); 6641 + u32 tmp; 6642 + u32 duty, duty100; 6643 + u64 tmp64; 6644 + 6645 + if (adev->pm.no_fan) 6646 + return -ENOENT; 6647 + 6648 + if (si_pi->fan_is_controlled_by_smc) 6649 + return -EINVAL; 6650 + 6651 + if (speed > 100) 6652 + return -EINVAL; 6653 + 6654 + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 6655 + 6656 + if (duty100 == 0) 6657 + return -EINVAL; 6658 + 6659 + tmp64 = (u64)speed * duty100; 6660 + do_div(tmp64, 100); 6661 + duty = (u32)tmp64; 6662 + 6663 + tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; 6664 + tmp |= FDO_STATIC_DUTY(duty); 6665 + WREG32(CG_FDO_CTRL0, tmp); 6666 + 6667 + return 0; 6668 + } 6669 + 6670 + static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) 6671 + { 6672 + if (mode) { 6673 + /* stop auto-manage */ 6674 + if (adev->pm.dpm.fan.ucode_fan_control) 6675 + si_fan_ctrl_stop_smc_fan_control(adev); 6676 + si_fan_ctrl_set_static_mode(adev, mode); 6677 + } else { 6678 + /* restart auto-manage */ 6679 + if (adev->pm.dpm.fan.ucode_fan_control) 6680 + si_thermal_start_smc_fan_control(adev); 6681 + else 6682 + si_fan_ctrl_set_default_mode(adev); 6683 + } 6684 + } 6685 + 6686 + static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev) 6687 + { 6688 + struct si_power_info *si_pi = si_get_pi(adev); 6689 + u32 tmp; 6690 + 6691 + if (si_pi->fan_is_controlled_by_smc) 6692 + return 0; 6693 + 6694 + tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; 6695 + return (tmp >> FDO_PWM_MODE_SHIFT); 6696 + } 6697 + 6698 + #if 0 6699 + static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, 6700 + u32 *speed) 6701 + { 6702 + u32 tach_period; 6703 + u32 xclk = amdgpu_asic_get_xclk(adev); 6704 + 6705 + if (adev->pm.no_fan) 6706 + return -ENOENT; 6707 + 6708 + if (adev->pm.fan_pulses_per_revolution == 0) 6709 + return -ENOENT; 6710 + 6711 + tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; 6712 + if (tach_period == 0) 6713 + return -ENOENT; 6714 + 6715 + *speed = 60 * xclk * 10000 / tach_period; 6716 + 6717 + return 0; 6718 + } 6719 + 6720 + static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, 6721 + u32 speed) 6722 + { 6723 + u32 tach_period, tmp; 6724 + u32 xclk = amdgpu_asic_get_xclk(adev); 6725 + 6726 + if (adev->pm.no_fan) 6727 + return -ENOENT; 6728 + 6729 + if (adev->pm.fan_pulses_per_revolution == 0) 6730 + return -ENOENT; 6731 + 6732 + if ((speed < adev->pm.fan_min_rpm) || 6733 + (speed > adev->pm.fan_max_rpm)) 6734 + return -EINVAL; 6735 + 6736 + if (adev->pm.dpm.fan.ucode_fan_control) 6737 + si_fan_ctrl_stop_smc_fan_control(adev); 6738 + 6739 + tach_period = 60 * xclk * 10000 / (8 * speed); 6740 + tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; 6741 + tmp |= TARGET_PERIOD(tach_period); 6742 + WREG32(CG_TACH_CTRL, tmp); 6743 + 6744 + si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); 6745 + 6746 + return 0; 6747 + } 6748 + #endif 6749 + 6750 + static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) 6751 + { 6752 + struct si_power_info *si_pi = si_get_pi(adev); 6753 + u32 tmp; 6754 + 6755 + if (!si_pi->fan_ctrl_is_in_default_mode) { 6756 + tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 6757 + tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); 6758 + WREG32(CG_FDO_CTRL2, tmp); 6759 + 6760 + tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; 6761 + tmp |= TMIN(si_pi->t_min); 6762 + WREG32(CG_FDO_CTRL2, tmp); 6763 + si_pi->fan_ctrl_is_in_default_mode = true; 6764 + } 6765 + } 6766 + 6767 + static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev) 6768 + { 6769 + if (adev->pm.dpm.fan.ucode_fan_control) { 6770 + si_fan_ctrl_start_smc_fan_control(adev); 6771 + si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); 6772 + } 6773 + } 6774 + 6775 + static void si_thermal_initialize(struct amdgpu_device *adev) 6776 + { 6777 + u32 tmp; 6778 + 6779 + if (adev->pm.fan_pulses_per_revolution) { 6780 + tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; 6781 + tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); 6782 + WREG32(CG_TACH_CTRL, tmp); 6783 + } 6784 + 6785 + tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; 6786 + tmp |= TACH_PWM_RESP_RATE(0x28); 6787 + WREG32(CG_FDO_CTRL2, tmp); 6788 + } 6789 + 6790 + static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) 6791 + { 6792 + int ret; 6793 + 6794 + si_thermal_initialize(adev); 6795 + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 6796 + if (ret) 6797 + return ret; 6798 + ret = si_thermal_enable_alert(adev, true); 6799 + if (ret) 6800 + return ret; 6801 + if (adev->pm.dpm.fan.ucode_fan_control) { 6802 + ret = si_halt_smc(adev); 6803 + if (ret) 6804 + return ret; 6805 + ret = si_thermal_setup_fan_table(adev); 6806 + if (ret) 6807 + return ret; 6808 + ret = si_resume_smc(adev); 6809 + if (ret) 6810 + return ret; 6811 + si_thermal_start_smc_fan_control(adev); 6812 + } 6813 + 6814 + return 0; 6815 + } 6816 + 6817 + static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev) 6818 + { 6819 + if (!adev->pm.no_fan) { 6820 + si_fan_ctrl_set_default_mode(adev); 6821 + si_fan_ctrl_stop_smc_fan_control(adev); 6822 + } 6823 + } 6824 + 6825 + static int si_dpm_enable(struct amdgpu_device *adev) 6826 + { 6827 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 6828 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 6829 + struct si_power_info *si_pi = si_get_pi(adev); 6830 + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; 6831 + int ret; 6832 + 6833 + if (si_is_smc_running(adev)) 6834 + return -EINVAL; 6835 + if (pi->voltage_control || si_pi->voltage_control_svi2) 6836 + si_enable_voltage_control(adev, true); 6837 + if (pi->mvdd_control) 6838 + si_get_mvdd_configuration(adev); 6839 + if (pi->voltage_control || si_pi->voltage_control_svi2) { 6840 + ret = si_construct_voltage_tables(adev); 6841 + if (ret) { 6842 + DRM_ERROR("si_construct_voltage_tables failed\n"); 6843 + return ret; 6844 + } 6845 + } 6846 + if (eg_pi->dynamic_ac_timing) { 6847 + ret = si_initialize_mc_reg_table(adev); 6848 + if (ret) 6849 + eg_pi->dynamic_ac_timing = false; 6850 + } 6851 + if (pi->dynamic_ss) 6852 + si_enable_spread_spectrum(adev, true); 6853 + if (pi->thermal_protection) 6854 + si_enable_thermal_protection(adev, true); 6855 + si_setup_bsp(adev); 6856 + si_program_git(adev); 6857 + si_program_tp(adev); 6858 + si_program_tpp(adev); 6859 + si_program_sstp(adev); 6860 + si_enable_display_gap(adev); 6861 + si_program_vc(adev); 6862 + ret = si_upload_firmware(adev); 6863 + if (ret) { 6864 + DRM_ERROR("si_upload_firmware failed\n"); 6865 + return ret; 6866 + } 6867 + ret = si_process_firmware_header(adev); 6868 + if (ret) { 6869 + DRM_ERROR("si_process_firmware_header failed\n"); 6870 + return ret; 6871 + } 6872 + ret = si_initial_switch_from_arb_f0_to_f1(adev); 6873 + if (ret) { 6874 + DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n"); 6875 + return ret; 6876 + } 6877 + ret = si_init_smc_table(adev); 6878 + if (ret) { 6879 + DRM_ERROR("si_init_smc_table failed\n"); 6880 + return ret; 6881 + } 6882 + ret = si_init_smc_spll_table(adev); 6883 + if (ret) { 6884 + DRM_ERROR("si_init_smc_spll_table failed\n"); 6885 + return ret; 6886 + } 6887 + ret = si_init_arb_table_index(adev); 6888 + if (ret) { 6889 + DRM_ERROR("si_init_arb_table_index failed\n"); 6890 + return ret; 6891 + } 6892 + if (eg_pi->dynamic_ac_timing) { 6893 + ret = si_populate_mc_reg_table(adev, boot_ps); 6894 + if (ret) { 6895 + DRM_ERROR("si_populate_mc_reg_table failed\n"); 6896 + return ret; 6897 + } 6898 + } 6899 + ret = si_initialize_smc_cac_tables(adev); 6900 + if (ret) { 6901 + DRM_ERROR("si_initialize_smc_cac_tables failed\n"); 6902 + return ret; 6903 + } 6904 + ret = si_initialize_hardware_cac_manager(adev); 6905 + if (ret) { 6906 + DRM_ERROR("si_initialize_hardware_cac_manager failed\n"); 6907 + return ret; 6908 + } 6909 + ret = si_initialize_smc_dte_tables(adev); 6910 + if (ret) { 6911 + DRM_ERROR("si_initialize_smc_dte_tables failed\n"); 6912 + return ret; 6913 + } 6914 + ret = si_populate_smc_tdp_limits(adev, boot_ps); 6915 + if (ret) { 6916 + DRM_ERROR("si_populate_smc_tdp_limits failed\n"); 6917 + return ret; 6918 + } 6919 + ret = si_populate_smc_tdp_limits_2(adev, boot_ps); 6920 + if (ret) { 6921 + DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n"); 6922 + return ret; 6923 + } 6924 + si_program_response_times(adev); 6925 + si_program_ds_registers(adev); 6926 + si_dpm_start_smc(adev); 6927 + ret = si_notify_smc_display_change(adev, false); 6928 + if (ret) { 6929 + DRM_ERROR("si_notify_smc_display_change failed\n"); 6930 + return ret; 6931 + } 6932 + si_enable_sclk_control(adev, true); 6933 + si_start_dpm(adev); 6934 + 6935 + si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 6936 + 6937 + si_thermal_start_thermal_controller(adev); 6938 + 6939 + ni_update_current_ps(adev, boot_ps); 6940 + 6941 + return 0; 6942 + } 6943 + 6944 + static int si_set_temperature_range(struct amdgpu_device *adev) 6945 + { 6946 + int ret; 6947 + 6948 + ret = si_thermal_enable_alert(adev, false); 6949 + if (ret) 6950 + return ret; 6951 + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 6952 + if (ret) 6953 + return ret; 6954 + ret = si_thermal_enable_alert(adev, true); 6955 + if (ret) 6956 + return ret; 6957 + 6958 + return ret; 6959 + } 6960 + 6961 + static void si_dpm_disable(struct amdgpu_device *adev) 6962 + { 6963 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 6964 + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; 6965 + 6966 + if (!si_is_smc_running(adev)) 6967 + return; 6968 + si_thermal_stop_thermal_controller(adev); 6969 + si_disable_ulv(adev); 6970 + si_clear_vc(adev); 6971 + if (pi->thermal_protection) 6972 + si_enable_thermal_protection(adev, false); 6973 + si_enable_power_containment(adev, boot_ps, false); 6974 + si_enable_smc_cac(adev, boot_ps, false); 6975 + si_enable_spread_spectrum(adev, false); 6976 + si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 6977 + si_stop_dpm(adev); 6978 + si_reset_to_default(adev); 6979 + si_dpm_stop_smc(adev); 6980 + si_force_switch_to_arb_f0(adev); 6981 + 6982 + ni_update_current_ps(adev, boot_ps); 6983 + } 6984 + 6985 + static int si_dpm_pre_set_power_state(struct amdgpu_device *adev) 6986 + { 6987 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 6988 + struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 6989 + struct amdgpu_ps *new_ps = &requested_ps; 6990 + 6991 + ni_update_requested_ps(adev, new_ps); 6992 + 6993 + si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); 6994 + 6995 + return 0; 6996 + } 6997 + 6998 + static int si_power_control_set_level(struct amdgpu_device *adev) 6999 + { 7000 + struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps; 7001 + int ret; 7002 + 7003 + ret = si_restrict_performance_levels_before_switch(adev); 7004 + if (ret) 7005 + return ret; 7006 + ret = si_halt_smc(adev); 7007 + if (ret) 7008 + return ret; 7009 + ret = si_populate_smc_tdp_limits(adev, new_ps); 7010 + if (ret) 7011 + return ret; 7012 + ret = si_populate_smc_tdp_limits_2(adev, new_ps); 7013 + if (ret) 7014 + return ret; 7015 + ret = si_resume_smc(adev); 7016 + if (ret) 7017 + return ret; 7018 + ret = si_set_sw_state(adev); 7019 + if (ret) 7020 + return ret; 7021 + return 0; 7022 + } 7023 + 7024 + static int si_dpm_set_power_state(struct amdgpu_device *adev) 7025 + { 7026 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 7027 + struct amdgpu_ps *new_ps = &eg_pi->requested_rps; 7028 + struct amdgpu_ps *old_ps = &eg_pi->current_rps; 7029 + int ret; 7030 + 7031 + ret = si_disable_ulv(adev); 7032 + if (ret) { 7033 + DRM_ERROR("si_disable_ulv failed\n"); 7034 + return ret; 7035 + } 7036 + ret = si_restrict_performance_levels_before_switch(adev); 7037 + if (ret) { 7038 + DRM_ERROR("si_restrict_performance_levels_before_switch failed\n"); 7039 + return ret; 7040 + } 7041 + if (eg_pi->pcie_performance_request) 7042 + si_request_link_speed_change_before_state_change(adev, new_ps, old_ps); 7043 + ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps); 7044 + ret = si_enable_power_containment(adev, new_ps, false); 7045 + if (ret) { 7046 + DRM_ERROR("si_enable_power_containment failed\n"); 7047 + return ret; 7048 + } 7049 + ret = si_enable_smc_cac(adev, new_ps, false); 7050 + if (ret) { 7051 + DRM_ERROR("si_enable_smc_cac failed\n"); 7052 + return ret; 7053 + } 7054 + ret = si_halt_smc(adev); 7055 + if (ret) { 7056 + DRM_ERROR("si_halt_smc failed\n"); 7057 + return ret; 7058 + } 7059 + ret = si_upload_sw_state(adev, new_ps); 7060 + if (ret) { 7061 + DRM_ERROR("si_upload_sw_state failed\n"); 7062 + return ret; 7063 + } 7064 + ret = si_upload_smc_data(adev); 7065 + if (ret) { 7066 + DRM_ERROR("si_upload_smc_data failed\n"); 7067 + return ret; 7068 + } 7069 + ret = si_upload_ulv_state(adev); 7070 + if (ret) { 7071 + DRM_ERROR("si_upload_ulv_state failed\n"); 7072 + return ret; 7073 + } 7074 + if (eg_pi->dynamic_ac_timing) { 7075 + ret = si_upload_mc_reg_table(adev, new_ps); 7076 + if (ret) { 7077 + DRM_ERROR("si_upload_mc_reg_table failed\n"); 7078 + return ret; 7079 + } 7080 + } 7081 + ret = si_program_memory_timing_parameters(adev, new_ps); 7082 + if (ret) { 7083 + DRM_ERROR("si_program_memory_timing_parameters failed\n"); 7084 + return ret; 7085 + } 7086 + si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps); 7087 + 7088 + ret = si_resume_smc(adev); 7089 + if (ret) { 7090 + DRM_ERROR("si_resume_smc failed\n"); 7091 + return ret; 7092 + } 7093 + ret = si_set_sw_state(adev); 7094 + if (ret) { 7095 + DRM_ERROR("si_set_sw_state failed\n"); 7096 + return ret; 7097 + } 7098 + ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps); 7099 + if (eg_pi->pcie_performance_request) 7100 + si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); 7101 + ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps); 7102 + if (ret) { 7103 + DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n"); 7104 + return ret; 7105 + } 7106 + ret = si_enable_smc_cac(adev, new_ps, true); 7107 + if (ret) { 7108 + DRM_ERROR("si_enable_smc_cac failed\n"); 7109 + return ret; 7110 + } 7111 + ret = si_enable_power_containment(adev, new_ps, true); 7112 + if (ret) { 7113 + DRM_ERROR("si_enable_power_containment failed\n"); 7114 + return ret; 7115 + } 7116 + 7117 + ret = si_power_control_set_level(adev); 7118 + if (ret) { 7119 + DRM_ERROR("si_power_control_set_level failed\n"); 7120 + return ret; 7121 + } 7122 + 7123 + return 0; 7124 + } 7125 + 7126 + static void si_dpm_post_set_power_state(struct amdgpu_device *adev) 7127 + { 7128 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 7129 + struct amdgpu_ps *new_ps = &eg_pi->requested_rps; 7130 + 7131 + ni_update_current_ps(adev, new_ps); 7132 + } 7133 + 7134 + #if 0 7135 + void si_dpm_reset_asic(struct amdgpu_device *adev) 7136 + { 7137 + si_restrict_performance_levels_before_switch(adev); 7138 + si_disable_ulv(adev); 7139 + si_set_boot_state(adev); 7140 + } 7141 + #endif 7142 + 7143 + static void si_dpm_display_configuration_changed(struct amdgpu_device *adev) 7144 + { 7145 + si_program_display_gap(adev); 7146 + } 7147 + 7148 + 7149 + static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev, 7150 + struct amdgpu_ps *rps, 7151 + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 7152 + u8 table_rev) 7153 + { 7154 + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 7155 + rps->class = le16_to_cpu(non_clock_info->usClassification); 7156 + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 7157 + 7158 + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 7159 + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 7160 + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 7161 + } else if (r600_is_uvd_state(rps->class, rps->class2)) { 7162 + rps->vclk = RV770_DEFAULT_VCLK_FREQ; 7163 + rps->dclk = RV770_DEFAULT_DCLK_FREQ; 7164 + } else { 7165 + rps->vclk = 0; 7166 + rps->dclk = 0; 7167 + } 7168 + 7169 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 7170 + adev->pm.dpm.boot_ps = rps; 7171 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 7172 + adev->pm.dpm.uvd_ps = rps; 7173 + } 7174 + 7175 + static void si_parse_pplib_clock_info(struct amdgpu_device *adev, 7176 + struct amdgpu_ps *rps, int index, 7177 + union pplib_clock_info *clock_info) 7178 + { 7179 + struct rv7xx_power_info *pi = rv770_get_pi(adev); 7180 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 7181 + struct si_power_info *si_pi = si_get_pi(adev); 7182 + struct si_ps *ps = si_get_ps(rps); 7183 + u16 leakage_voltage; 7184 + struct rv7xx_pl *pl = &ps->performance_levels[index]; 7185 + int ret; 7186 + 7187 + ps->performance_level_count = index + 1; 7188 + 7189 + pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); 7190 + pl->sclk |= clock_info->si.ucEngineClockHigh << 16; 7191 + pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); 7192 + pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; 7193 + 7194 + pl->vddc = le16_to_cpu(clock_info->si.usVDDC); 7195 + pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); 7196 + pl->flags = le32_to_cpu(clock_info->si.ulFlags); 7197 + pl->pcie_gen = r600_get_pcie_gen_support(adev, 7198 + si_pi->sys_pcie_mask, 7199 + si_pi->boot_pcie_gen, 7200 + clock_info->si.ucPCIEGen); 7201 + 7202 + /* patch up vddc if necessary */ 7203 + ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, 7204 + &leakage_voltage); 7205 + if (ret == 0) 7206 + pl->vddc = leakage_voltage; 7207 + 7208 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 7209 + pi->acpi_vddc = pl->vddc; 7210 + eg_pi->acpi_vddci = pl->vddci; 7211 + si_pi->acpi_pcie_gen = pl->pcie_gen; 7212 + } 7213 + 7214 + if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && 7215 + index == 0) { 7216 + /* XXX disable for A0 tahiti */ 7217 + si_pi->ulv.supported = false; 7218 + si_pi->ulv.pl = *pl; 7219 + si_pi->ulv.one_pcie_lane_in_ulv = false; 7220 + si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; 7221 + si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT; 7222 + si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT; 7223 + } 7224 + 7225 + if (pi->min_vddc_in_table > pl->vddc) 7226 + pi->min_vddc_in_table = pl->vddc; 7227 + 7228 + if (pi->max_vddc_in_table < pl->vddc) 7229 + pi->max_vddc_in_table = pl->vddc; 7230 + 7231 + /* patch up boot state */ 7232 + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 7233 + u16 vddc, vddci, mvdd; 7234 + amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd); 7235 + pl->mclk = adev->clock.default_mclk; 7236 + pl->sclk = adev->clock.default_sclk; 7237 + pl->vddc = vddc; 7238 + pl->vddci = vddci; 7239 + si_pi->mvdd_bootup_value = mvdd; 7240 + } 7241 + 7242 + if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 7243 + ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 7244 + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; 7245 + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; 7246 + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; 7247 + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; 7248 + } 7249 + } 7250 + 7251 + union pplib_power_state { 7252 + struct _ATOM_PPLIB_STATE v1; 7253 + struct _ATOM_PPLIB_STATE_V2 v2; 7254 + }; 7255 + 7256 + static int si_parse_power_table(struct amdgpu_device *adev) 7257 + { 7258 + struct amdgpu_mode_info *mode_info = &adev->mode_info; 7259 + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 7260 + union pplib_power_state *power_state; 7261 + int i, j, k, non_clock_array_index, clock_array_index; 7262 + union pplib_clock_info *clock_info; 7263 + struct _StateArray *state_array; 7264 + struct _ClockInfoArray *clock_info_array; 7265 + struct _NonClockInfoArray *non_clock_info_array; 7266 + union power_info *power_info; 7267 + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 7268 + u16 data_offset; 7269 + u8 frev, crev; 7270 + u8 *power_state_offset; 7271 + struct si_ps *ps; 7272 + 7273 + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 7274 + &frev, &crev, &data_offset)) 7275 + return -EINVAL; 7276 + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 7277 + 7278 + amdgpu_add_thermal_controller(adev); 7279 + 7280 + state_array = (struct _StateArray *) 7281 + (mode_info->atom_context->bios + data_offset + 7282 + le16_to_cpu(power_info->pplib.usStateArrayOffset)); 7283 + clock_info_array = (struct _ClockInfoArray *) 7284 + (mode_info->atom_context->bios + data_offset + 7285 + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 7286 + non_clock_info_array = (struct _NonClockInfoArray *) 7287 + (mode_info->atom_context->bios + data_offset + 7288 + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 7289 + 7290 + adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 7291 + state_array->ucNumEntries, GFP_KERNEL); 7292 + if (!adev->pm.dpm.ps) 7293 + return -ENOMEM; 7294 + power_state_offset = (u8 *)state_array->states; 7295 + for (i = 0; i < state_array->ucNumEntries; i++) { 7296 + u8 *idx; 7297 + power_state = (union pplib_power_state *)power_state_offset; 7298 + non_clock_array_index = power_state->v2.nonClockInfoIndex; 7299 + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 7300 + &non_clock_info_array->nonClockInfo[non_clock_array_index]; 7301 + ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); 7302 + if (ps == NULL) { 7303 + kfree(adev->pm.dpm.ps); 7304 + return -ENOMEM; 7305 + } 7306 + adev->pm.dpm.ps[i].ps_priv = ps; 7307 + si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 7308 + non_clock_info, 7309 + non_clock_info_array->ucEntrySize); 7310 + k = 0; 7311 + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 7312 + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 7313 + clock_array_index = idx[j]; 7314 + if (clock_array_index >= clock_info_array->ucNumEntries) 7315 + continue; 7316 + if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) 7317 + break; 7318 + clock_info = (union pplib_clock_info *) 7319 + ((u8 *)&clock_info_array->clockInfo[0] + 7320 + (clock_array_index * clock_info_array->ucEntrySize)); 7321 + si_parse_pplib_clock_info(adev, 7322 + &adev->pm.dpm.ps[i], k, 7323 + clock_info); 7324 + k++; 7325 + } 7326 + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 7327 + } 7328 + adev->pm.dpm.num_ps = state_array->ucNumEntries; 7329 + 7330 + /* fill in the vce power states */ 7331 + for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { 7332 + u32 sclk, mclk; 7333 + clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 7334 + clock_info = (union pplib_clock_info *) 7335 + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 7336 + sclk = le16_to_cpu(clock_info->si.usEngineClockLow); 7337 + sclk |= clock_info->si.ucEngineClockHigh << 16; 7338 + mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); 7339 + mclk |= clock_info->si.ucMemoryClockHigh << 16; 7340 + adev->pm.dpm.vce_states[i].sclk = sclk; 7341 + adev->pm.dpm.vce_states[i].mclk = mclk; 7342 + } 7343 + 7344 + return 0; 7345 + } 7346 + 7347 + static int si_dpm_init(struct amdgpu_device *adev) 7348 + { 7349 + struct rv7xx_power_info *pi; 7350 + struct evergreen_power_info *eg_pi; 7351 + struct ni_power_info *ni_pi; 7352 + struct si_power_info *si_pi; 7353 + struct atom_clock_dividers dividers; 7354 + int ret; 7355 + u32 mask; 7356 + 7357 + si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); 7358 + if (si_pi == NULL) 7359 + return -ENOMEM; 7360 + adev->pm.dpm.priv = si_pi; 7361 + ni_pi = &si_pi->ni; 7362 + eg_pi = &ni_pi->eg; 7363 + pi = &eg_pi->rv7xx; 7364 + 7365 + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 7366 + if (ret) 7367 + si_pi->sys_pcie_mask = 0; 7368 + else 7369 + si_pi->sys_pcie_mask = mask; 7370 + si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 7371 + si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); 7372 + 7373 + si_set_max_cu_value(adev); 7374 + 7375 + rv770_get_max_vddc(adev); 7376 + si_get_leakage_vddc(adev); 7377 + si_patch_dependency_tables_based_on_leakage(adev); 7378 + 7379 + pi->acpi_vddc = 0; 7380 + eg_pi->acpi_vddci = 0; 7381 + pi->min_vddc_in_table = 0; 7382 + pi->max_vddc_in_table = 0; 7383 + 7384 + ret = amdgpu_get_platform_caps(adev); 7385 + if (ret) 7386 + return ret; 7387 + 7388 + ret = amdgpu_parse_extended_power_table(adev); 7389 + if (ret) 7390 + return ret; 7391 + 7392 + ret = si_parse_power_table(adev); 7393 + if (ret) 7394 + return ret; 7395 + 7396 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 7397 + kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); 7398 + if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 7399 + amdgpu_free_extended_power_table(adev); 7400 + return -ENOMEM; 7401 + } 7402 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; 7403 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; 7404 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; 7405 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; 7406 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; 7407 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; 7408 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; 7409 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; 7410 + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; 7411 + 7412 + if (adev->pm.dpm.voltage_response_time == 0) 7413 + adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; 7414 + if (adev->pm.dpm.backbias_response_time == 0) 7415 + adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; 7416 + 7417 + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 7418 + 0, false, &dividers); 7419 + if (ret) 7420 + pi->ref_div = dividers.ref_div + 1; 7421 + else 7422 + pi->ref_div = R600_REFERENCEDIVIDER_DFLT; 7423 + 7424 + eg_pi->smu_uvd_hs = false; 7425 + 7426 + pi->mclk_strobe_mode_threshold = 40000; 7427 + if (si_is_special_1gb_platform(adev)) 7428 + pi->mclk_stutter_mode_threshold = 0; 7429 + else 7430 + pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold; 7431 + pi->mclk_edc_enable_threshold = 40000; 7432 + eg_pi->mclk_edc_wr_enable_threshold = 40000; 7433 + 7434 + ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; 7435 + 7436 + pi->voltage_control = 7437 + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, 7438 + VOLTAGE_OBJ_GPIO_LUT); 7439 + if (!pi->voltage_control) { 7440 + si_pi->voltage_control_svi2 = 7441 + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, 7442 + VOLTAGE_OBJ_SVID2); 7443 + if (si_pi->voltage_control_svi2) 7444 + amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, 7445 + &si_pi->svd_gpio_id, &si_pi->svc_gpio_id); 7446 + } 7447 + 7448 + pi->mvdd_control = 7449 + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 7450 + VOLTAGE_OBJ_GPIO_LUT); 7451 + 7452 + eg_pi->vddci_control = 7453 + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 7454 + VOLTAGE_OBJ_GPIO_LUT); 7455 + if (!eg_pi->vddci_control) 7456 + si_pi->vddci_control_svi2 = 7457 + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 7458 + VOLTAGE_OBJ_SVID2); 7459 + 7460 + si_pi->vddc_phase_shed_control = 7461 + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, 7462 + VOLTAGE_OBJ_PHASE_LUT); 7463 + 7464 + rv770_get_engine_memory_ss(adev); 7465 + 7466 + pi->asi = RV770_ASI_DFLT; 7467 + pi->pasi = CYPRESS_HASI_DFLT; 7468 + pi->vrc = SISLANDS_VRC_DFLT; 7469 + 7470 + pi->gfx_clock_gating = true; 7471 + 7472 + eg_pi->sclk_deep_sleep = true; 7473 + si_pi->sclk_deep_sleep_above_low = false; 7474 + 7475 + if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) 7476 + pi->thermal_protection = true; 7477 + else 7478 + pi->thermal_protection = false; 7479 + 7480 + eg_pi->dynamic_ac_timing = true; 7481 + 7482 + eg_pi->light_sleep = true; 7483 + #if defined(CONFIG_ACPI) 7484 + eg_pi->pcie_performance_request = 7485 + amdgpu_acpi_is_pcie_performance_request_supported(adev); 7486 + #else 7487 + eg_pi->pcie_performance_request = false; 7488 + #endif 7489 + 7490 + si_pi->sram_end = SMC_RAM_END; 7491 + 7492 + adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; 7493 + adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; 7494 + adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 7495 + adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; 7496 + adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; 7497 + adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 7498 + adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 7499 + 7500 + si_initialize_powertune_defaults(adev); 7501 + 7502 + /* make sure dc limits are valid */ 7503 + if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || 7504 + (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) 7505 + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 7506 + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 7507 + 7508 + si_pi->fan_ctrl_is_in_default_mode = true; 7509 + 7510 + return 0; 7511 + } 7512 + 7513 + static void si_dpm_fini(struct amdgpu_device *adev) 7514 + { 7515 + int i; 7516 + 7517 + for (i = 0; i < adev->pm.dpm.num_ps; i++) { 7518 + kfree(adev->pm.dpm.ps[i].ps_priv); 7519 + } 7520 + kfree(adev->pm.dpm.ps); 7521 + kfree(adev->pm.dpm.priv); 7522 + kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); 7523 + amdgpu_free_extended_power_table(adev); 7524 + } 7525 + 7526 + static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 7527 + struct seq_file *m) 7528 + { 7529 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 7530 + struct amdgpu_ps *rps = &eg_pi->current_rps; 7531 + struct si_ps *ps = si_get_ps(rps); 7532 + struct rv7xx_pl *pl; 7533 + u32 current_index = 7534 + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> 7535 + CURRENT_STATE_INDEX_SHIFT; 7536 + 7537 + if (current_index >= ps->performance_level_count) { 7538 + seq_printf(m, "invalid dpm profile %d\n", current_index); 7539 + } else { 7540 + pl = &ps->performance_levels[current_index]; 7541 + seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 7542 + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", 7543 + current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); 7544 + } 7545 + } 7546 + 7547 + static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, 7548 + struct amdgpu_irq_src *source, 7549 + unsigned type, 7550 + enum amdgpu_interrupt_state state) 7551 + { 7552 + u32 cg_thermal_int; 7553 + 7554 + switch (type) { 7555 + case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 7556 + switch (state) { 7557 + case AMDGPU_IRQ_STATE_DISABLE: 7558 + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); 7559 + cg_thermal_int |= THERM_INT_MASK_HIGH; 7560 + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); 7561 + break; 7562 + case AMDGPU_IRQ_STATE_ENABLE: 7563 + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); 7564 + cg_thermal_int &= ~THERM_INT_MASK_HIGH; 7565 + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); 7566 + break; 7567 + default: 7568 + break; 7569 + } 7570 + break; 7571 + 7572 + case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 7573 + switch (state) { 7574 + case AMDGPU_IRQ_STATE_DISABLE: 7575 + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); 7576 + cg_thermal_int |= THERM_INT_MASK_LOW; 7577 + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); 7578 + break; 7579 + case AMDGPU_IRQ_STATE_ENABLE: 7580 + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); 7581 + cg_thermal_int &= ~THERM_INT_MASK_LOW; 7582 + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); 7583 + break; 7584 + default: 7585 + break; 7586 + } 7587 + break; 7588 + 7589 + default: 7590 + break; 7591 + } 7592 + return 0; 7593 + } 7594 + 7595 + static int si_dpm_process_interrupt(struct amdgpu_device *adev, 7596 + struct amdgpu_irq_src *source, 7597 + struct amdgpu_iv_entry *entry) 7598 + { 7599 + bool queue_thermal = false; 7600 + 7601 + if (entry == NULL) 7602 + return -EINVAL; 7603 + 7604 + switch (entry->src_id) { 7605 + case 230: /* thermal low to high */ 7606 + DRM_DEBUG("IH: thermal low to high\n"); 7607 + adev->pm.dpm.thermal.high_to_low = false; 7608 + queue_thermal = true; 7609 + break; 7610 + case 231: /* thermal high to low */ 7611 + DRM_DEBUG("IH: thermal high to low\n"); 7612 + adev->pm.dpm.thermal.high_to_low = true; 7613 + queue_thermal = true; 7614 + break; 7615 + default: 7616 + break; 7617 + } 7618 + 7619 + if (queue_thermal) 7620 + schedule_work(&adev->pm.dpm.thermal.work); 7621 + 7622 + return 0; 7623 + } 7624 + 7625 + static int si_dpm_late_init(void *handle) 7626 + { 7627 + int ret; 7628 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7629 + 7630 + if (!amdgpu_dpm) 7631 + return 0; 7632 + 7633 + /* init the sysfs and debugfs files late */ 7634 + ret = amdgpu_pm_sysfs_init(adev); 7635 + if (ret) 7636 + return ret; 7637 + 7638 + ret = si_set_temperature_range(adev); 7639 + if (ret) 7640 + return ret; 7641 + #if 0 //TODO ? 7642 + si_dpm_powergate_uvd(adev, true); 7643 + #endif 7644 + return 0; 7645 + } 7646 + 7647 + /** 7648 + * si_dpm_init_microcode - load ucode images from disk 7649 + * 7650 + * @adev: amdgpu_device pointer 7651 + * 7652 + * Use the firmware interface to load the ucode images into 7653 + * the driver (not loaded into hw). 7654 + * Returns 0 on success, error on failure. 7655 + */ 7656 + static int si_dpm_init_microcode(struct amdgpu_device *adev) 7657 + { 7658 + const char *chip_name; 7659 + char fw_name[30]; 7660 + int err; 7661 + 7662 + DRM_DEBUG("\n"); 7663 + switch (adev->asic_type) { 7664 + case CHIP_TAHITI: 7665 + chip_name = "tahiti"; 7666 + break; 7667 + case CHIP_PITCAIRN: 7668 + chip_name = "pitcairn"; 7669 + break; 7670 + case CHIP_VERDE: 7671 + chip_name = "verde"; 7672 + break; 7673 + case CHIP_OLAND: 7674 + chip_name = "oland"; 7675 + break; 7676 + case CHIP_HAINAN: 7677 + chip_name = "hainan"; 7678 + break; 7679 + default: BUG(); 7680 + } 7681 + 7682 + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 7683 + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 7684 + if (err) 7685 + goto out; 7686 + err = amdgpu_ucode_validate(adev->pm.fw); 7687 + 7688 + out: 7689 + if (err) { 7690 + printk(KERN_ERR 7691 + "si_smc: Failed to load firmware. err = %d\"%s\"\n", 7692 + err, fw_name); 7693 + release_firmware(adev->pm.fw); 7694 + adev->pm.fw = NULL; 7695 + } 7696 + return err; 7697 + 7698 + } 7699 + 7700 + static int si_dpm_sw_init(void *handle) 7701 + { 7702 + int ret; 7703 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7704 + 7705 + ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); 7706 + if (ret) 7707 + return ret; 7708 + 7709 + ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); 7710 + if (ret) 7711 + return ret; 7712 + 7713 + /* default to balanced state */ 7714 + adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 7715 + adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 7716 + adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 7717 + adev->pm.default_sclk = adev->clock.default_sclk; 7718 + adev->pm.default_mclk = adev->clock.default_mclk; 7719 + adev->pm.current_sclk = adev->clock.default_sclk; 7720 + adev->pm.current_mclk = adev->clock.default_mclk; 7721 + adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 7722 + 7723 + if (amdgpu_dpm == 0) 7724 + return 0; 7725 + 7726 + ret = si_dpm_init_microcode(adev); 7727 + if (ret) 7728 + return ret; 7729 + 7730 + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 7731 + mutex_lock(&adev->pm.mutex); 7732 + ret = si_dpm_init(adev); 7733 + if (ret) 7734 + goto dpm_failed; 7735 + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 7736 + if (amdgpu_dpm == 1) 7737 + amdgpu_pm_print_power_states(adev); 7738 + mutex_unlock(&adev->pm.mutex); 7739 + DRM_INFO("amdgpu: dpm initialized\n"); 7740 + 7741 + return 0; 7742 + 7743 + dpm_failed: 7744 + si_dpm_fini(adev); 7745 + mutex_unlock(&adev->pm.mutex); 7746 + DRM_ERROR("amdgpu: dpm initialization failed\n"); 7747 + return ret; 7748 + } 7749 + 7750 + static int si_dpm_sw_fini(void *handle) 7751 + { 7752 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7753 + 7754 + mutex_lock(&adev->pm.mutex); 7755 + amdgpu_pm_sysfs_fini(adev); 7756 + si_dpm_fini(adev); 7757 + mutex_unlock(&adev->pm.mutex); 7758 + 7759 + return 0; 7760 + } 7761 + 7762 + static int si_dpm_hw_init(void *handle) 7763 + { 7764 + int ret; 7765 + 7766 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7767 + 7768 + if (!amdgpu_dpm) 7769 + return 0; 7770 + 7771 + mutex_lock(&adev->pm.mutex); 7772 + si_dpm_setup_asic(adev); 7773 + ret = si_dpm_enable(adev); 7774 + if (ret) 7775 + adev->pm.dpm_enabled = false; 7776 + else 7777 + adev->pm.dpm_enabled = true; 7778 + mutex_unlock(&adev->pm.mutex); 7779 + 7780 + return ret; 7781 + } 7782 + 7783 + static int si_dpm_hw_fini(void *handle) 7784 + { 7785 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7786 + 7787 + if (adev->pm.dpm_enabled) { 7788 + mutex_lock(&adev->pm.mutex); 7789 + si_dpm_disable(adev); 7790 + mutex_unlock(&adev->pm.mutex); 7791 + } 7792 + 7793 + return 0; 7794 + } 7795 + 7796 + static int si_dpm_suspend(void *handle) 7797 + { 7798 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7799 + 7800 + if (adev->pm.dpm_enabled) { 7801 + mutex_lock(&adev->pm.mutex); 7802 + /* disable dpm */ 7803 + si_dpm_disable(adev); 7804 + /* reset the power state */ 7805 + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 7806 + mutex_unlock(&adev->pm.mutex); 7807 + } 7808 + return 0; 7809 + } 7810 + 7811 + static int si_dpm_resume(void *handle) 7812 + { 7813 + int ret; 7814 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7815 + 7816 + if (adev->pm.dpm_enabled) { 7817 + /* asic init will reset to the boot state */ 7818 + mutex_lock(&adev->pm.mutex); 7819 + si_dpm_setup_asic(adev); 7820 + ret = si_dpm_enable(adev); 7821 + if (ret) 7822 + adev->pm.dpm_enabled = false; 7823 + else 7824 + adev->pm.dpm_enabled = true; 7825 + mutex_unlock(&adev->pm.mutex); 7826 + if (adev->pm.dpm_enabled) 7827 + amdgpu_pm_compute_clocks(adev); 7828 + } 7829 + return 0; 7830 + } 7831 + 7832 + static bool si_dpm_is_idle(void *handle) 7833 + { 7834 + /* XXX */ 7835 + return true; 7836 + } 7837 + 7838 + static int si_dpm_wait_for_idle(void *handle) 7839 + { 7840 + /* XXX */ 7841 + return 0; 7842 + } 7843 + 7844 + static int si_dpm_soft_reset(void *handle) 7845 + { 7846 + return 0; 7847 + } 7848 + 7849 + static int si_dpm_set_clockgating_state(void *handle, 7850 + enum amd_clockgating_state state) 7851 + { 7852 + return 0; 7853 + } 7854 + 7855 + static int si_dpm_set_powergating_state(void *handle, 7856 + enum amd_powergating_state state) 7857 + { 7858 + return 0; 7859 + } 7860 + 7861 + /* get temperature in millidegrees */ 7862 + static int si_dpm_get_temp(struct amdgpu_device *adev) 7863 + { 7864 + u32 temp; 7865 + int actual_temp = 0; 7866 + 7867 + temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> 7868 + CTF_TEMP_SHIFT; 7869 + 7870 + if (temp & 0x200) 7871 + actual_temp = 255; 7872 + else 7873 + actual_temp = temp & 0x1ff; 7874 + 7875 + actual_temp = (actual_temp * 1000); 7876 + 7877 + return actual_temp; 7878 + } 7879 + 7880 + static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low) 7881 + { 7882 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 7883 + struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); 7884 + 7885 + if (low) 7886 + return requested_state->performance_levels[0].sclk; 7887 + else 7888 + return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 7889 + } 7890 + 7891 + static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low) 7892 + { 7893 + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); 7894 + struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); 7895 + 7896 + if (low) 7897 + return requested_state->performance_levels[0].mclk; 7898 + else 7899 + return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 7900 + } 7901 + 7902 + static void si_dpm_print_power_state(struct amdgpu_device *adev, 7903 + struct amdgpu_ps *rps) 7904 + { 7905 + struct si_ps *ps = si_get_ps(rps); 7906 + struct rv7xx_pl *pl; 7907 + int i; 7908 + 7909 + amdgpu_dpm_print_class_info(rps->class, rps->class2); 7910 + amdgpu_dpm_print_cap_info(rps->caps); 7911 + printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 7912 + for (i = 0; i < ps->performance_level_count; i++) { 7913 + pl = &ps->performance_levels[i]; 7914 + if (adev->asic_type >= CHIP_TAHITI) 7915 + printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", 7916 + i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); 7917 + else 7918 + printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", 7919 + i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); 7920 + } 7921 + amdgpu_dpm_print_ps_status(adev, rps); 7922 + } 7923 + 7924 + static int si_dpm_early_init(void *handle) 7925 + { 7926 + 7927 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7928 + 7929 + si_dpm_set_dpm_funcs(adev); 7930 + si_dpm_set_irq_funcs(adev); 7931 + return 0; 7932 + } 7933 + 7934 + 7935 + const struct amd_ip_funcs si_dpm_ip_funcs = { 7936 + .name = "si_dpm", 7937 + .early_init = si_dpm_early_init, 7938 + .late_init = si_dpm_late_init, 7939 + .sw_init = si_dpm_sw_init, 7940 + .sw_fini = si_dpm_sw_fini, 7941 + .hw_init = si_dpm_hw_init, 7942 + .hw_fini = si_dpm_hw_fini, 7943 + .suspend = si_dpm_suspend, 7944 + .resume = si_dpm_resume, 7945 + .is_idle = si_dpm_is_idle, 7946 + .wait_for_idle = si_dpm_wait_for_idle, 7947 + .soft_reset = si_dpm_soft_reset, 7948 + .set_clockgating_state = si_dpm_set_clockgating_state, 7949 + .set_powergating_state = si_dpm_set_powergating_state, 7950 + }; 7951 + 7952 + static const struct amdgpu_dpm_funcs si_dpm_funcs = { 7953 + .get_temperature = &si_dpm_get_temp, 7954 + .pre_set_power_state = &si_dpm_pre_set_power_state, 7955 + .set_power_state = &si_dpm_set_power_state, 7956 + .post_set_power_state = &si_dpm_post_set_power_state, 7957 + .display_configuration_changed = &si_dpm_display_configuration_changed, 7958 + .get_sclk = &si_dpm_get_sclk, 7959 + .get_mclk = &si_dpm_get_mclk, 7960 + .print_power_state = &si_dpm_print_power_state, 7961 + .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, 7962 + .force_performance_level = &si_dpm_force_performance_level, 7963 + .vblank_too_short = &si_dpm_vblank_too_short, 7964 + .set_fan_control_mode = &si_dpm_set_fan_control_mode, 7965 + .get_fan_control_mode = &si_dpm_get_fan_control_mode, 7966 + .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, 7967 + .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, 7968 + }; 7969 + 7970 + static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) 7971 + { 7972 + if (adev->pm.funcs == NULL) 7973 + adev->pm.funcs = &si_dpm_funcs; 7974 + } 7975 + 7976 + static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { 7977 + .set = si_dpm_set_interrupt_state, 7978 + .process = si_dpm_process_interrupt, 7979 + }; 7980 + 7981 + static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) 7982 + { 7983 + adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 7984 + adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; 7985 + } 7986 +
+1015
drivers/gpu/drm/amd/amdgpu/si_dpm.h
··· 1 + /* 2 + * Copyright 2012 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __SI_DPM_H__ 24 + #define __SI_DPM_H__ 25 + 26 + #include "amdgpu_atombios.h" 27 + #include "sislands_smc.h" 28 + 29 + #define MC_CG_CONFIG 0x96f 30 + #define MC_ARB_CG 0x9fa 31 + #define CG_ARB_REQ(x) ((x) << 0) 32 + #define CG_ARB_REQ_MASK (0xff << 0) 33 + 34 + #define MC_ARB_DRAM_TIMING_1 0x9fc 35 + #define MC_ARB_DRAM_TIMING_2 0x9fd 36 + #define MC_ARB_DRAM_TIMING_3 0x9fe 37 + #define MC_ARB_DRAM_TIMING2_1 0x9ff 38 + #define MC_ARB_DRAM_TIMING2_2 0xa00 39 + #define MC_ARB_DRAM_TIMING2_3 0xa01 40 + 41 + #define MAX_NO_OF_MVDD_VALUES 2 42 + #define MAX_NO_VREG_STEPS 32 43 + #define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 44 + #define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 45 + #define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 46 + #define RV770_ASI_DFLT 1000 47 + #define CYPRESS_HASI_DFLT 400000 48 + #define PCIE_PERF_REQ_PECI_GEN1 2 49 + #define PCIE_PERF_REQ_PECI_GEN2 3 50 + #define PCIE_PERF_REQ_PECI_GEN3 4 51 + #define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */ 52 + #define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */ 53 + 54 + #define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 55 + 56 + #define RV770_SMC_TABLE_ADDRESS 0xB000 57 + #define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 58 + 59 + #define SMC_STROBE_RATIO 0x0F 60 + #define SMC_STROBE_ENABLE 0x10 61 + 62 + #define SMC_MC_EDC_RD_FLAG 0x01 63 + #define SMC_MC_EDC_WR_FLAG 0x02 64 + #define SMC_MC_RTT_ENABLE 0x04 65 + #define SMC_MC_STUTTER_EN 0x08 66 + 67 + #define RV770_SMC_VOLTAGEMASK_VDDC 0 68 + #define RV770_SMC_VOLTAGEMASK_MVDD 1 69 + #define RV770_SMC_VOLTAGEMASK_VDDCI 2 70 + #define RV770_SMC_VOLTAGEMASK_MAX 4 71 + 72 + #define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 73 + #define NISLANDS_SMC_STROBE_RATIO 0x0F 74 + #define NISLANDS_SMC_STROBE_ENABLE 0x10 75 + 76 + #define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 77 + #define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 78 + #define NISLANDS_SMC_MC_RTT_ENABLE 0x04 79 + #define NISLANDS_SMC_MC_STUTTER_EN 0x08 80 + 81 + #define MAX_NO_VREG_STEPS 32 82 + 83 + #define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 84 + #define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 85 + #define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 86 + #define NISLANDS_SMC_VOLTAGEMASK_MAX 4 87 + 88 + #define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0 89 + #define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1 90 + #define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 91 + #define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3 92 + 93 + #define SISLANDS_LEAKAGE_INDEX0 0xff01 94 + #define SISLANDS_MAX_LEAKAGE_COUNT 4 95 + 96 + #define SISLANDS_MAX_HARDWARE_POWERLEVELS 5 97 + #define SISLANDS_INITIAL_STATE_ARB_INDEX 0 98 + #define SISLANDS_ACPI_STATE_ARB_INDEX 1 99 + #define SISLANDS_ULV_STATE_ARB_INDEX 2 100 + #define SISLANDS_DRIVER_STATE_ARB_INDEX 3 101 + 102 + #define SISLANDS_DPM2_MAX_PULSE_SKIP 256 103 + 104 + #define SISLANDS_DPM2_NEAR_TDP_DEC 10 105 + #define SISLANDS_DPM2_ABOVE_SAFE_INC 5 106 + #define SISLANDS_DPM2_BELOW_SAFE_INC 20 107 + 108 + #define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80 109 + 110 + #define SISLANDS_DPM2_MAXPS_PERCENT_H 99 111 + #define SISLANDS_DPM2_MAXPS_PERCENT_M 99 112 + 113 + #define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF 114 + #define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12 115 + #define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 116 + #define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E 117 + #define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF 118 + 119 + #define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10 120 + 121 + #define SISLANDS_VRC_DFLT 0xC000B3 122 + #define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687 123 + #define SISLANDS_CGULVPARAMETER_DFLT 0x00040035 124 + #define SISLANDS_CGULVCONTROL_DFLT 0x1f007550 125 + 126 + #define SI_ASI_DFLT 10000 127 + #define SI_BSP_DFLT 0x41EB 128 + #define SI_BSU_DFLT 0x2 129 + #define SI_AH_DFLT 5 130 + #define SI_RLP_DFLT 25 131 + #define SI_RMP_DFLT 65 132 + #define SI_LHP_DFLT 40 133 + #define SI_LMP_DFLT 15 134 + #define SI_TD_DFLT 0 135 + #define SI_UTC_DFLT_00 0x24 136 + #define SI_UTC_DFLT_01 0x22 137 + #define SI_UTC_DFLT_02 0x22 138 + #define SI_UTC_DFLT_03 0x22 139 + #define SI_UTC_DFLT_04 0x22 140 + #define SI_UTC_DFLT_05 0x22 141 + #define SI_UTC_DFLT_06 0x22 142 + #define SI_UTC_DFLT_07 0x22 143 + #define SI_UTC_DFLT_08 0x22 144 + #define SI_UTC_DFLT_09 0x22 145 + #define SI_UTC_DFLT_10 0x22 146 + #define SI_UTC_DFLT_11 0x22 147 + #define SI_UTC_DFLT_12 0x22 148 + #define SI_UTC_DFLT_13 0x22 149 + #define SI_UTC_DFLT_14 0x22 150 + #define SI_DTC_DFLT_00 0x24 151 + #define SI_DTC_DFLT_01 0x22 152 + #define SI_DTC_DFLT_02 0x22 153 + #define SI_DTC_DFLT_03 0x22 154 + #define SI_DTC_DFLT_04 0x22 155 + #define SI_DTC_DFLT_05 0x22 156 + #define SI_DTC_DFLT_06 0x22 157 + #define SI_DTC_DFLT_07 0x22 158 + #define SI_DTC_DFLT_08 0x22 159 + #define SI_DTC_DFLT_09 0x22 160 + #define SI_DTC_DFLT_10 0x22 161 + #define SI_DTC_DFLT_11 0x22 162 + #define SI_DTC_DFLT_12 0x22 163 + #define SI_DTC_DFLT_13 0x22 164 + #define SI_DTC_DFLT_14 0x22 165 + #define SI_VRC_DFLT 0x0000C003 166 + #define SI_VOLTAGERESPONSETIME_DFLT 1000 167 + #define SI_BACKBIASRESPONSETIME_DFLT 1000 168 + #define SI_VRU_DFLT 0x3 169 + #define SI_SPLLSTEPTIME_DFLT 0x1000 170 + #define SI_SPLLSTEPUNIT_DFLT 0x3 171 + #define SI_TPU_DFLT 0 172 + #define SI_TPC_DFLT 0x200 173 + #define SI_SSTU_DFLT 0 174 + #define SI_SST_DFLT 0x00C8 175 + #define SI_GICST_DFLT 0x200 176 + #define SI_FCT_DFLT 0x0400 177 + #define SI_FCTU_DFLT 0 178 + #define SI_CTXCGTT3DRPHC_DFLT 0x20 179 + #define SI_CTXCGTT3DRSDC_DFLT 0x40 180 + #define SI_VDDC3DOORPHC_DFLT 0x100 181 + #define SI_VDDC3DOORSDC_DFLT 0x7 182 + #define SI_VDDC3DOORSU_DFLT 0 183 + #define SI_MPLLLOCKTIME_DFLT 100 184 + #define SI_MPLLRESETTIME_DFLT 150 185 + #define SI_VCOSTEPPCT_DFLT 20 186 + #define SI_ENDINGVCOSTEPPCT_DFLT 5 187 + #define SI_REFERENCEDIVIDER_DFLT 4 188 + 189 + #define SI_PM_NUMBER_OF_TC 15 190 + #define SI_PM_NUMBER_OF_SCLKS 20 191 + #define SI_PM_NUMBER_OF_MCLKS 4 192 + #define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4 193 + #define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3 194 + 195 + /* XXX are these ok? */ 196 + #define SI_TEMP_RANGE_MIN (90 * 1000) 197 + #define SI_TEMP_RANGE_MAX (120 * 1000) 198 + 199 + #define FDO_PWM_MODE_STATIC 1 200 + #define FDO_PWM_MODE_STATIC_RPM 5 201 + 202 + enum ni_dc_cac_level 203 + { 204 + NISLANDS_DCCAC_LEVEL_0 = 0, 205 + NISLANDS_DCCAC_LEVEL_1, 206 + NISLANDS_DCCAC_LEVEL_2, 207 + NISLANDS_DCCAC_LEVEL_3, 208 + NISLANDS_DCCAC_LEVEL_4, 209 + NISLANDS_DCCAC_LEVEL_5, 210 + NISLANDS_DCCAC_LEVEL_6, 211 + NISLANDS_DCCAC_LEVEL_7, 212 + NISLANDS_DCCAC_MAX_LEVELS 213 + }; 214 + 215 + enum si_cac_config_reg_type 216 + { 217 + SISLANDS_CACCONFIG_MMR = 0, 218 + SISLANDS_CACCONFIG_CGIND, 219 + SISLANDS_CACCONFIG_MAX 220 + }; 221 + 222 + enum si_power_level { 223 + SI_POWER_LEVEL_LOW = 0, 224 + SI_POWER_LEVEL_MEDIUM = 1, 225 + SI_POWER_LEVEL_HIGH = 2, 226 + SI_POWER_LEVEL_CTXSW = 3, 227 + }; 228 + 229 + enum si_td { 230 + SI_TD_AUTO, 231 + SI_TD_UP, 232 + SI_TD_DOWN, 233 + }; 234 + 235 + enum si_display_watermark { 236 + SI_DISPLAY_WATERMARK_LOW = 0, 237 + SI_DISPLAY_WATERMARK_HIGH = 1, 238 + }; 239 + 240 + enum si_display_gap 241 + { 242 + SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, 243 + SI_PM_DISPLAY_GAP_VBLANK = 1, 244 + SI_PM_DISPLAY_GAP_WATERMARK = 2, 245 + SI_PM_DISPLAY_GAP_IGNORE = 3, 246 + }; 247 + 248 + extern const struct amd_ip_funcs si_dpm_ip_funcs; 249 + 250 + struct ni_leakage_coeffients 251 + { 252 + u32 at; 253 + u32 bt; 254 + u32 av; 255 + u32 bv; 256 + s32 t_slope; 257 + s32 t_intercept; 258 + u32 t_ref; 259 + }; 260 + 261 + struct SMC_Evergreen_MCRegisterAddress 262 + { 263 + uint16_t s0; 264 + uint16_t s1; 265 + }; 266 + 267 + typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; 268 + 269 + struct evergreen_mc_reg_entry { 270 + u32 mclk_max; 271 + u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; 272 + }; 273 + 274 + struct evergreen_mc_reg_table { 275 + u8 last; 276 + u8 num_entries; 277 + u16 valid_flag; 278 + struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; 279 + SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; 280 + }; 281 + 282 + struct SMC_Evergreen_MCRegisterSet 283 + { 284 + uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; 285 + }; 286 + 287 + typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; 288 + 289 + struct SMC_Evergreen_MCRegisters 290 + { 291 + uint8_t last; 292 + uint8_t reserved[3]; 293 + SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; 294 + SMC_Evergreen_MCRegisterSet data[5]; 295 + }; 296 + 297 + typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; 298 + 299 + struct SMC_NIslands_MCRegisterSet 300 + { 301 + uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 302 + }; 303 + 304 + typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; 305 + 306 + struct ni_mc_reg_entry { 307 + u32 mclk_max; 308 + u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 309 + }; 310 + 311 + struct SMC_NIslands_MCRegisterAddress 312 + { 313 + uint16_t s0; 314 + uint16_t s1; 315 + }; 316 + 317 + typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; 318 + 319 + struct SMC_NIslands_MCRegisters 320 + { 321 + uint8_t last; 322 + uint8_t reserved[3]; 323 + SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 324 + SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; 325 + }; 326 + 327 + typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; 328 + 329 + struct evergreen_ulv_param { 330 + bool supported; 331 + struct rv7xx_pl *pl; 332 + }; 333 + 334 + struct evergreen_arb_registers { 335 + u32 mc_arb_dram_timing; 336 + u32 mc_arb_dram_timing2; 337 + u32 mc_arb_rfsh_rate; 338 + u32 mc_arb_burst_time; 339 + }; 340 + 341 + struct at { 342 + u32 rlp; 343 + u32 rmp; 344 + u32 lhp; 345 + u32 lmp; 346 + }; 347 + 348 + struct ni_clock_registers { 349 + u32 cg_spll_func_cntl; 350 + u32 cg_spll_func_cntl_2; 351 + u32 cg_spll_func_cntl_3; 352 + u32 cg_spll_func_cntl_4; 353 + u32 cg_spll_spread_spectrum; 354 + u32 cg_spll_spread_spectrum_2; 355 + u32 mclk_pwrmgt_cntl; 356 + u32 dll_cntl; 357 + u32 mpll_ad_func_cntl; 358 + u32 mpll_ad_func_cntl_2; 359 + u32 mpll_dq_func_cntl; 360 + u32 mpll_dq_func_cntl_2; 361 + u32 mpll_ss1; 362 + u32 mpll_ss2; 363 + }; 364 + 365 + struct RV770_SMC_SCLK_VALUE 366 + { 367 + uint32_t vCG_SPLL_FUNC_CNTL; 368 + uint32_t vCG_SPLL_FUNC_CNTL_2; 369 + uint32_t vCG_SPLL_FUNC_CNTL_3; 370 + uint32_t vCG_SPLL_SPREAD_SPECTRUM; 371 + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; 372 + uint32_t sclk_value; 373 + }; 374 + 375 + typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; 376 + 377 + struct RV770_SMC_MCLK_VALUE 378 + { 379 + uint32_t vMPLL_AD_FUNC_CNTL; 380 + uint32_t vMPLL_AD_FUNC_CNTL_2; 381 + uint32_t vMPLL_DQ_FUNC_CNTL; 382 + uint32_t vMPLL_DQ_FUNC_CNTL_2; 383 + uint32_t vMCLK_PWRMGT_CNTL; 384 + uint32_t vDLL_CNTL; 385 + uint32_t vMPLL_SS; 386 + uint32_t vMPLL_SS2; 387 + uint32_t mclk_value; 388 + }; 389 + 390 + typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; 391 + 392 + 393 + struct RV730_SMC_MCLK_VALUE 394 + { 395 + uint32_t vMCLK_PWRMGT_CNTL; 396 + uint32_t vDLL_CNTL; 397 + uint32_t vMPLL_FUNC_CNTL; 398 + uint32_t vMPLL_FUNC_CNTL2; 399 + uint32_t vMPLL_FUNC_CNTL3; 400 + uint32_t vMPLL_SS; 401 + uint32_t vMPLL_SS2; 402 + uint32_t mclk_value; 403 + }; 404 + 405 + typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; 406 + 407 + struct RV770_SMC_VOLTAGE_VALUE 408 + { 409 + uint16_t value; 410 + uint8_t index; 411 + uint8_t padding; 412 + }; 413 + 414 + typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; 415 + 416 + union RV7XX_SMC_MCLK_VALUE 417 + { 418 + RV770_SMC_MCLK_VALUE mclk770; 419 + RV730_SMC_MCLK_VALUE mclk730; 420 + }; 421 + 422 + typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; 423 + 424 + struct RV770_SMC_HW_PERFORMANCE_LEVEL 425 + { 426 + uint8_t arbValue; 427 + union{ 428 + uint8_t seqValue; 429 + uint8_t ACIndex; 430 + }; 431 + uint8_t displayWatermark; 432 + uint8_t gen2PCIE; 433 + uint8_t gen2XSP; 434 + uint8_t backbias; 435 + uint8_t strobeMode; 436 + uint8_t mcFlags; 437 + uint32_t aT; 438 + uint32_t bSP; 439 + RV770_SMC_SCLK_VALUE sclk; 440 + RV7XX_SMC_MCLK_VALUE mclk; 441 + RV770_SMC_VOLTAGE_VALUE vddc; 442 + RV770_SMC_VOLTAGE_VALUE mvdd; 443 + RV770_SMC_VOLTAGE_VALUE vddci; 444 + uint8_t reserved1; 445 + uint8_t reserved2; 446 + uint8_t stateFlags; 447 + uint8_t padding; 448 + }; 449 + 450 + typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; 451 + 452 + struct RV770_SMC_SWSTATE 453 + { 454 + uint8_t flags; 455 + uint8_t padding1; 456 + uint8_t padding2; 457 + uint8_t padding3; 458 + RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; 459 + }; 460 + 461 + typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; 462 + 463 + struct RV770_SMC_VOLTAGEMASKTABLE 464 + { 465 + uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; 466 + uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; 467 + }; 468 + 469 + typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; 470 + 471 + struct RV770_SMC_STATETABLE 472 + { 473 + uint8_t thermalProtectType; 474 + uint8_t systemFlags; 475 + uint8_t maxVDDCIndexInPPTable; 476 + uint8_t extraFlags; 477 + uint8_t highSMIO[MAX_NO_VREG_STEPS]; 478 + uint32_t lowSMIO[MAX_NO_VREG_STEPS]; 479 + RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable; 480 + RV770_SMC_SWSTATE initialState; 481 + RV770_SMC_SWSTATE ACPIState; 482 + RV770_SMC_SWSTATE driverState; 483 + RV770_SMC_SWSTATE ULVState; 484 + }; 485 + 486 + typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; 487 + 488 + struct vddc_table_entry { 489 + u16 vddc; 490 + u8 vddc_index; 491 + u8 high_smio; 492 + u32 low_smio; 493 + }; 494 + 495 + struct rv770_clock_registers { 496 + u32 cg_spll_func_cntl; 497 + u32 cg_spll_func_cntl_2; 498 + u32 cg_spll_func_cntl_3; 499 + u32 cg_spll_spread_spectrum; 500 + u32 cg_spll_spread_spectrum_2; 501 + u32 mpll_ad_func_cntl; 502 + u32 mpll_ad_func_cntl_2; 503 + u32 mpll_dq_func_cntl; 504 + u32 mpll_dq_func_cntl_2; 505 + u32 mclk_pwrmgt_cntl; 506 + u32 dll_cntl; 507 + u32 mpll_ss1; 508 + u32 mpll_ss2; 509 + }; 510 + 511 + struct rv730_clock_registers { 512 + u32 cg_spll_func_cntl; 513 + u32 cg_spll_func_cntl_2; 514 + u32 cg_spll_func_cntl_3; 515 + u32 cg_spll_spread_spectrum; 516 + u32 cg_spll_spread_spectrum_2; 517 + u32 mclk_pwrmgt_cntl; 518 + u32 dll_cntl; 519 + u32 mpll_func_cntl; 520 + u32 mpll_func_cntl2; 521 + u32 mpll_func_cntl3; 522 + u32 mpll_ss; 523 + u32 mpll_ss2; 524 + }; 525 + 526 + union r7xx_clock_registers { 527 + struct rv770_clock_registers rv770; 528 + struct rv730_clock_registers rv730; 529 + }; 530 + 531 + struct rv7xx_power_info { 532 + /* flags */ 533 + bool mem_gddr5; 534 + bool pcie_gen2; 535 + bool dynamic_pcie_gen2; 536 + bool acpi_pcie_gen2; 537 + bool boot_in_gen2; 538 + bool voltage_control; /* vddc */ 539 + bool mvdd_control; 540 + bool sclk_ss; 541 + bool mclk_ss; 542 + bool dynamic_ss; 543 + bool gfx_clock_gating; 544 + bool mg_clock_gating; 545 + bool mgcgtssm; 546 + bool power_gating; 547 + bool thermal_protection; 548 + bool display_gap; 549 + bool dcodt; 550 + bool ulps; 551 + /* registers */ 552 + union r7xx_clock_registers clk_regs; 553 + u32 s0_vid_lower_smio_cntl; 554 + /* voltage */ 555 + u32 vddc_mask_low; 556 + u32 mvdd_mask_low; 557 + u32 mvdd_split_frequency; 558 + u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES]; 559 + u16 max_vddc; 560 + u16 max_vddc_in_table; 561 + u16 min_vddc_in_table; 562 + struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS]; 563 + u8 valid_vddc_entries; 564 + /* dc odt */ 565 + u32 mclk_odt_threshold; 566 + u8 odt_value_0[2]; 567 + u8 odt_value_1[2]; 568 + /* stored values */ 569 + u32 boot_sclk; 570 + u16 acpi_vddc; 571 + u32 ref_div; 572 + u32 active_auto_throttle_sources; 573 + u32 mclk_stutter_mode_threshold; 574 + u32 mclk_strobe_mode_threshold; 575 + u32 mclk_edc_enable_threshold; 576 + u32 bsp; 577 + u32 bsu; 578 + u32 pbsp; 579 + u32 pbsu; 580 + u32 dsp; 581 + u32 psp; 582 + u32 asi; 583 + u32 pasi; 584 + u32 vrc; 585 + u32 restricted_levels; 586 + u32 rlp; 587 + u32 rmp; 588 + u32 lhp; 589 + u32 lmp; 590 + /* smc offsets */ 591 + u16 state_table_start; 592 + u16 soft_regs_start; 593 + u16 sram_end; 594 + /* scratch structs */ 595 + RV770_SMC_STATETABLE smc_statetable; 596 + }; 597 + 598 + struct rv7xx_pl { 599 + u32 sclk; 600 + u32 mclk; 601 + u16 vddc; 602 + u16 vddci; /* eg+ only */ 603 + u32 flags; 604 + enum amdgpu_pcie_gen pcie_gen; /* si+ only */ 605 + }; 606 + 607 + struct rv7xx_ps { 608 + struct rv7xx_pl high; 609 + struct rv7xx_pl medium; 610 + struct rv7xx_pl low; 611 + bool dc_compatible; 612 + }; 613 + 614 + struct si_ps { 615 + u16 performance_level_count; 616 + bool dc_compatible; 617 + struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; 618 + }; 619 + 620 + struct ni_mc_reg_table { 621 + u8 last; 622 + u8 num_entries; 623 + u16 valid_flag; 624 + struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; 625 + SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; 626 + }; 627 + 628 + struct ni_cac_data 629 + { 630 + struct ni_leakage_coeffients leakage_coefficients; 631 + u32 i_leakage; 632 + s32 leakage_minimum_temperature; 633 + u32 pwr_const; 634 + u32 dc_cac_value; 635 + u32 bif_cac_value; 636 + u32 lkge_pwr; 637 + u8 mc_wr_weight; 638 + u8 mc_rd_weight; 639 + u8 allow_ovrflw; 640 + u8 num_win_tdp; 641 + u8 l2num_win_tdp; 642 + u8 lts_truncate_n; 643 + }; 644 + 645 + struct evergreen_power_info { 646 + /* must be first! */ 647 + struct rv7xx_power_info rv7xx; 648 + /* flags */ 649 + bool vddci_control; 650 + bool dynamic_ac_timing; 651 + bool abm; 652 + bool mcls; 653 + bool light_sleep; 654 + bool memory_transition; 655 + bool pcie_performance_request; 656 + bool pcie_performance_request_registered; 657 + bool sclk_deep_sleep; 658 + bool dll_default_on; 659 + bool ls_clock_gating; 660 + bool smu_uvd_hs; 661 + bool uvd_enabled; 662 + /* stored values */ 663 + u16 acpi_vddci; 664 + u8 mvdd_high_index; 665 + u8 mvdd_low_index; 666 + u32 mclk_edc_wr_enable_threshold; 667 + struct evergreen_mc_reg_table mc_reg_table; 668 + struct atom_voltage_table vddc_voltage_table; 669 + struct atom_voltage_table vddci_voltage_table; 670 + struct evergreen_arb_registers bootup_arb_registers; 671 + struct evergreen_ulv_param ulv; 672 + struct at ats[2]; 673 + /* smc offsets */ 674 + u16 mc_reg_table_start; 675 + struct amdgpu_ps current_rps; 676 + struct rv7xx_ps current_ps; 677 + struct amdgpu_ps requested_rps; 678 + struct rv7xx_ps requested_ps; 679 + }; 680 + 681 + struct PP_NIslands_Dpm2PerfLevel 682 + { 683 + uint8_t MaxPS; 684 + uint8_t TgtAct; 685 + uint8_t MaxPS_StepInc; 686 + uint8_t MaxPS_StepDec; 687 + uint8_t PSST; 688 + uint8_t NearTDPDec; 689 + uint8_t AboveSafeInc; 690 + uint8_t BelowSafeInc; 691 + uint8_t PSDeltaLimit; 692 + uint8_t PSDeltaWin; 693 + uint8_t Reserved[6]; 694 + }; 695 + 696 + typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; 697 + 698 + struct PP_NIslands_DPM2Parameters 699 + { 700 + uint32_t TDPLimit; 701 + uint32_t NearTDPLimit; 702 + uint32_t SafePowerLimit; 703 + uint32_t PowerBoostLimit; 704 + }; 705 + typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; 706 + 707 + struct NISLANDS_SMC_SCLK_VALUE 708 + { 709 + uint32_t vCG_SPLL_FUNC_CNTL; 710 + uint32_t vCG_SPLL_FUNC_CNTL_2; 711 + uint32_t vCG_SPLL_FUNC_CNTL_3; 712 + uint32_t vCG_SPLL_FUNC_CNTL_4; 713 + uint32_t vCG_SPLL_SPREAD_SPECTRUM; 714 + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; 715 + uint32_t sclk_value; 716 + }; 717 + 718 + typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; 719 + 720 + struct NISLANDS_SMC_MCLK_VALUE 721 + { 722 + uint32_t vMPLL_FUNC_CNTL; 723 + uint32_t vMPLL_FUNC_CNTL_1; 724 + uint32_t vMPLL_FUNC_CNTL_2; 725 + uint32_t vMPLL_AD_FUNC_CNTL; 726 + uint32_t vMPLL_AD_FUNC_CNTL_2; 727 + uint32_t vMPLL_DQ_FUNC_CNTL; 728 + uint32_t vMPLL_DQ_FUNC_CNTL_2; 729 + uint32_t vMCLK_PWRMGT_CNTL; 730 + uint32_t vDLL_CNTL; 731 + uint32_t vMPLL_SS; 732 + uint32_t vMPLL_SS2; 733 + uint32_t mclk_value; 734 + }; 735 + 736 + typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; 737 + 738 + struct NISLANDS_SMC_VOLTAGE_VALUE 739 + { 740 + uint16_t value; 741 + uint8_t index; 742 + uint8_t padding; 743 + }; 744 + 745 + typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; 746 + 747 + struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL 748 + { 749 + uint8_t arbValue; 750 + uint8_t ACIndex; 751 + uint8_t displayWatermark; 752 + uint8_t gen2PCIE; 753 + uint8_t reserved1; 754 + uint8_t reserved2; 755 + uint8_t strobeMode; 756 + uint8_t mcFlags; 757 + uint32_t aT; 758 + uint32_t bSP; 759 + NISLANDS_SMC_SCLK_VALUE sclk; 760 + NISLANDS_SMC_MCLK_VALUE mclk; 761 + NISLANDS_SMC_VOLTAGE_VALUE vddc; 762 + NISLANDS_SMC_VOLTAGE_VALUE mvdd; 763 + NISLANDS_SMC_VOLTAGE_VALUE vddci; 764 + NISLANDS_SMC_VOLTAGE_VALUE std_vddc; 765 + uint32_t powergate_en; 766 + uint8_t hUp; 767 + uint8_t hDown; 768 + uint8_t stateFlags; 769 + uint8_t arbRefreshState; 770 + uint32_t SQPowerThrottle; 771 + uint32_t SQPowerThrottle_2; 772 + uint32_t reserved[2]; 773 + PP_NIslands_Dpm2PerfLevel dpm2; 774 + }; 775 + 776 + typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; 777 + 778 + struct NISLANDS_SMC_SWSTATE 779 + { 780 + uint8_t flags; 781 + uint8_t levelCount; 782 + uint8_t padding2; 783 + uint8_t padding3; 784 + NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1]; 785 + }; 786 + 787 + typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; 788 + 789 + struct NISLANDS_SMC_VOLTAGEMASKTABLE 790 + { 791 + uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; 792 + uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; 793 + }; 794 + 795 + typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; 796 + 797 + #define NISLANDS_MAX_NO_VREG_STEPS 32 798 + 799 + struct NISLANDS_SMC_STATETABLE 800 + { 801 + uint8_t thermalProtectType; 802 + uint8_t systemFlags; 803 + uint8_t maxVDDCIndexInPPTable; 804 + uint8_t extraFlags; 805 + uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; 806 + uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; 807 + NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; 808 + PP_NIslands_DPM2Parameters dpm2Params; 809 + NISLANDS_SMC_SWSTATE initialState; 810 + NISLANDS_SMC_SWSTATE ACPIState; 811 + NISLANDS_SMC_SWSTATE ULVState; 812 + NISLANDS_SMC_SWSTATE driverState; 813 + NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; 814 + }; 815 + 816 + typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; 817 + 818 + struct ni_power_info { 819 + /* must be first! */ 820 + struct evergreen_power_info eg; 821 + struct ni_clock_registers clock_registers; 822 + struct ni_mc_reg_table mc_reg_table; 823 + u32 mclk_rtt_mode_threshold; 824 + /* flags */ 825 + bool use_power_boost_limit; 826 + bool support_cac_long_term_average; 827 + bool cac_enabled; 828 + bool cac_configuration_required; 829 + bool driver_calculate_cac_leakage; 830 + bool pc_enabled; 831 + bool enable_power_containment; 832 + bool enable_cac; 833 + bool enable_sq_ramping; 834 + /* smc offsets */ 835 + u16 arb_table_start; 836 + u16 fan_table_start; 837 + u16 cac_table_start; 838 + u16 spll_table_start; 839 + /* CAC stuff */ 840 + struct ni_cac_data cac_data; 841 + u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; 842 + const struct ni_cac_weights *cac_weights; 843 + u8 lta_window_size; 844 + u8 lts_truncate; 845 + struct si_ps current_ps; 846 + struct si_ps requested_ps; 847 + /* scratch structs */ 848 + SMC_NIslands_MCRegisters smc_mc_reg_table; 849 + NISLANDS_SMC_STATETABLE smc_statetable; 850 + }; 851 + 852 + struct si_cac_config_reg 853 + { 854 + u32 offset; 855 + u32 mask; 856 + u32 shift; 857 + u32 value; 858 + enum si_cac_config_reg_type type; 859 + }; 860 + 861 + struct si_powertune_data 862 + { 863 + u32 cac_window; 864 + u32 l2_lta_window_size_default; 865 + u8 lts_truncate_default; 866 + u8 shift_n_default; 867 + u8 operating_temp; 868 + struct ni_leakage_coeffients leakage_coefficients; 869 + u32 fixed_kt; 870 + u32 lkge_lut_v0_percent; 871 + u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS]; 872 + bool enable_powertune_by_default; 873 + }; 874 + 875 + struct si_dyn_powertune_data 876 + { 877 + u32 cac_leakage; 878 + s32 leakage_minimum_temperature; 879 + u32 wintime; 880 + u32 l2_lta_window_size; 881 + u8 lts_truncate; 882 + u8 shift_n; 883 + u8 dc_pwr_value; 884 + bool disable_uvd_powertune; 885 + }; 886 + 887 + struct si_dte_data 888 + { 889 + u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; 890 + u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; 891 + u32 k; 892 + u32 t0; 893 + u32 max_t; 894 + u8 window_size; 895 + u8 temp_select; 896 + u8 dte_mode; 897 + u8 tdep_count; 898 + u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; 899 + u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; 900 + u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; 901 + u32 t_threshold; 902 + bool enable_dte_by_default; 903 + }; 904 + 905 + struct si_clock_registers { 906 + u32 cg_spll_func_cntl; 907 + u32 cg_spll_func_cntl_2; 908 + u32 cg_spll_func_cntl_3; 909 + u32 cg_spll_func_cntl_4; 910 + u32 cg_spll_spread_spectrum; 911 + u32 cg_spll_spread_spectrum_2; 912 + u32 dll_cntl; 913 + u32 mclk_pwrmgt_cntl; 914 + u32 mpll_ad_func_cntl; 915 + u32 mpll_dq_func_cntl; 916 + u32 mpll_func_cntl; 917 + u32 mpll_func_cntl_1; 918 + u32 mpll_func_cntl_2; 919 + u32 mpll_ss1; 920 + u32 mpll_ss2; 921 + }; 922 + 923 + struct si_mc_reg_entry { 924 + u32 mclk_max; 925 + u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; 926 + }; 927 + 928 + struct si_mc_reg_table { 929 + u8 last; 930 + u8 num_entries; 931 + u16 valid_flag; 932 + struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; 933 + SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; 934 + }; 935 + 936 + struct si_leakage_voltage_entry 937 + { 938 + u16 voltage; 939 + u16 leakage_index; 940 + }; 941 + 942 + struct si_leakage_voltage 943 + { 944 + u16 count; 945 + struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; 946 + }; 947 + 948 + 949 + struct si_ulv_param { 950 + bool supported; 951 + u32 cg_ulv_control; 952 + u32 cg_ulv_parameter; 953 + u32 volt_change_delay; 954 + struct rv7xx_pl pl; 955 + bool one_pcie_lane_in_ulv; 956 + }; 957 + 958 + struct si_power_info { 959 + /* must be first! */ 960 + struct ni_power_info ni; 961 + struct si_clock_registers clock_registers; 962 + struct si_mc_reg_table mc_reg_table; 963 + struct atom_voltage_table mvdd_voltage_table; 964 + struct atom_voltage_table vddc_phase_shed_table; 965 + struct si_leakage_voltage leakage_voltage; 966 + u16 mvdd_bootup_value; 967 + struct si_ulv_param ulv; 968 + u32 max_cu; 969 + /* pcie gen */ 970 + enum amdgpu_pcie_gen force_pcie_gen; 971 + enum amdgpu_pcie_gen boot_pcie_gen; 972 + enum amdgpu_pcie_gen acpi_pcie_gen; 973 + u32 sys_pcie_mask; 974 + /* flags */ 975 + bool enable_dte; 976 + bool enable_ppm; 977 + bool vddc_phase_shed_control; 978 + bool pspp_notify_required; 979 + bool sclk_deep_sleep_above_low; 980 + bool voltage_control_svi2; 981 + bool vddci_control_svi2; 982 + /* smc offsets */ 983 + u32 sram_end; 984 + u32 state_table_start; 985 + u32 soft_regs_start; 986 + u32 mc_reg_table_start; 987 + u32 arb_table_start; 988 + u32 cac_table_start; 989 + u32 dte_table_start; 990 + u32 spll_table_start; 991 + u32 papm_cfg_table_start; 992 + u32 fan_table_start; 993 + /* CAC stuff */ 994 + const struct si_cac_config_reg *cac_weights; 995 + const struct si_cac_config_reg *lcac_config; 996 + const struct si_cac_config_reg *cac_override; 997 + const struct si_powertune_data *powertune_data; 998 + struct si_dyn_powertune_data dyn_powertune_data; 999 + /* DTE stuff */ 1000 + struct si_dte_data dte_data; 1001 + /* scratch structs */ 1002 + SMC_SIslands_MCRegisters smc_mc_reg_table; 1003 + SISLANDS_SMC_STATETABLE smc_statetable; 1004 + PP_SIslands_PAPMParameters papm_parm; 1005 + /* SVI2 */ 1006 + u8 svd_gpio_id; 1007 + u8 svc_gpio_id; 1008 + /* fan control */ 1009 + bool fan_ctrl_is_in_default_mode; 1010 + u32 t_min; 1011 + u32 fan_ctrl_default_mode; 1012 + bool fan_is_controlled_by_smc; 1013 + }; 1014 + 1015 + #endif
+1
drivers/gpu/drm/amd/amdgpu/si_smc.c
··· 224 224 225 225 amdgpu_ucode_print_smc_hdr(&hdr->header); 226 226 227 + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 227 228 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); 228 229 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); 229 230 src = (const u8 *)