Merge branch 'drm-fixes-3.12' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Radeon drm fixes for 3.12. All over the place (display, dpm, uvd, etc.).
Also adds a couple more berlin pci ids.

* 'drm-fixes-3.12' of git://people.freedesktop.org/~agd5f/linux: (25 commits)
drm/radeon/dpm: add bapm callback for kb/kv
drm/radeon/dpm: add bapm callback for trinity
drm/radeon/dpm: add infrastructure to properly handle bapm
drm/radeon/dpm: handle bapm on kb/kv
drm/radeon/dpm: handle bapm on trinity
drm/radeon: expose DPM thermal thresholds through sysfs
drm/radeon: simplify driver data retrieval
drm/radeon/atom: workaround vbios bug in transmitter table on rs880 (v2)
drm/radeon/dpm: fix fallback for empty UVD clocks
drm/radeon: add command submission tracepoint
drm/radeon: remove stale radeon_fence_retire tracepoint
drm/radeon/r6xx: add a stubbed out set_uvd_clocks callback
drm/radeon: fix typo in PG flags
drm/radeon: add some additional berlin pci ids
drm/radeon/cik: update gpu_init for an additional berlin gpu
drm/radeon: add spinlocks for indirect register accesss
drm/radeon: protect concurrent smc register access with a spinlock
drm/radeon: dpm updates for KV
drm/radeon: signedness bug in kv_dpm.c
drm/radeon: clean up r600_free_extended_power_table()
...

+729 -170
+15 -8
drivers/gpu/drm/radeon/atombios_encoders.c
··· 707 switch (connector->connector_type) { 708 case DRM_MODE_CONNECTOR_DVII: 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 710 - if (drm_detect_hdmi_monitor(radeon_connector->edid) && 711 - radeon_audio) 712 return ATOM_ENCODER_MODE_HDMI; 713 else if (radeon_connector->use_digital) 714 return ATOM_ENCODER_MODE_DVI; ··· 719 case DRM_MODE_CONNECTOR_DVID: 720 case DRM_MODE_CONNECTOR_HDMIA: 721 default: 722 - if (drm_detect_hdmi_monitor(radeon_connector->edid) && 723 - radeon_audio) 724 return ATOM_ENCODER_MODE_HDMI; 725 else 726 return ATOM_ENCODER_MODE_DVI; ··· 734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 735 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 736 return ATOM_ENCODER_MODE_DP; 737 - else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 738 - radeon_audio) 739 return ATOM_ENCODER_MODE_HDMI; 740 else 741 return ATOM_ENCODER_MODE_DVI; ··· 1650 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1651 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1652 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1653 - /* some early dce3.2 boards have a bug in their transmitter control table */ 1654 - if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) 1655 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1656 } 1657 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
··· 707 switch (connector->connector_type) { 708 case DRM_MODE_CONNECTOR_DVII: 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 710 + if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 711 + (drm_detect_hdmi_monitor(radeon_connector->edid) && 712 + (radeon_connector->audio == RADEON_AUDIO_AUTO))) 713 return ATOM_ENCODER_MODE_HDMI; 714 else if (radeon_connector->use_digital) 715 return ATOM_ENCODER_MODE_DVI; ··· 718 case DRM_MODE_CONNECTOR_DVID: 719 case DRM_MODE_CONNECTOR_HDMIA: 720 default: 721 + if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 722 + (drm_detect_hdmi_monitor(radeon_connector->edid) && 723 + (radeon_connector->audio == RADEON_AUDIO_AUTO))) 724 return ATOM_ENCODER_MODE_HDMI; 725 else 726 return ATOM_ENCODER_MODE_DVI; ··· 732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 734 return ATOM_ENCODER_MODE_DP; 735 + else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 736 + (drm_detect_hdmi_monitor(radeon_connector->edid) && 737 + (radeon_connector->audio == RADEON_AUDIO_AUTO))) 738 return ATOM_ENCODER_MODE_HDMI; 739 else 740 return ATOM_ENCODER_MODE_DVI; ··· 1647 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1648 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1649 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1650 + /* some dce3.x boards have a bug in their transmitter control table. 1651 + * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE 1652 + * does the same thing and more. 1653 + */ 1654 + if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && 1655 + (rdev->family != CHIP_RS880)) 1656 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1657 } 1658 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+26 -13
drivers/gpu/drm/radeon/ci_smc.c
··· 47 u32 smc_start_address, 48 const u8 *src, u32 byte_count, u32 limit) 49 { 50 u32 data, original_data; 51 u32 addr; 52 u32 extra_shift; 53 - int ret; 54 55 if (smc_start_address & 3) 56 return -EINVAL; ··· 60 61 addr = smc_start_address; 62 63 while (byte_count >= 4) { 64 /* SMC address space is BE */ 65 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 66 67 ret = ci_set_smc_sram_address(rdev, addr, limit); 68 if (ret) 69 - return ret; 70 71 WREG32(SMC_IND_DATA_0, data); 72 ··· 82 83 ret = ci_set_smc_sram_address(rdev, addr, limit); 84 if (ret) 85 - return ret; 86 87 original_data = RREG32(SMC_IND_DATA_0); 88 ··· 99 100 ret = ci_set_smc_sram_address(rdev, addr, limit); 101 if (ret) 102 - return ret; 103 104 WREG32(SMC_IND_DATA_0, data); 105 } 106 - return 0; 107 } 108 109 void ci_start_smc(struct radeon_device *rdev) ··· 203 204 int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) 205 { 206 u32 ucode_start_address; 207 u32 ucode_size; 208 const u8 *src; ··· 226 return -EINVAL; 227 228 src = (const u8 *)rdev->smc_fw->data; 229 WREG32(SMC_IND_INDEX_0, ucode_start_address); 230 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 231 while (ucode_size >= 4) { ··· 239 ucode_size -= 4; 240 } 241 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 242 243 return 0; 244 } ··· 247 int ci_read_smc_sram_dword(struct radeon_device *rdev, 248 u32 smc_address, u32 *value, u32 limit) 249 { 250 int ret; 251 252 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 253 - if (ret) 254 - return ret; 255 256 - *value = RREG32(SMC_IND_DATA_0); 257 - return 0; 258 } 259 260 int ci_write_smc_sram_dword(struct radeon_device *rdev, 261 u32 smc_address, u32 value, u32 limit) 262 { 263 int ret; 264 265 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 266 - if (ret) 267 - return ret; 268 269 - WREG32(SMC_IND_DATA_0, value); 270 - return 0; 271 }
··· 47 u32 smc_start_address, 48 const u8 *src, u32 byte_count, u32 limit) 49 { 50 + unsigned long flags; 51 u32 data, original_data; 52 u32 addr; 53 u32 extra_shift; 54 + int ret = 0; 55 56 if (smc_start_address & 3) 57 return -EINVAL; ··· 59 60 addr = smc_start_address; 61 62 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 63 while (byte_count >= 4) { 64 /* SMC address space is BE */ 65 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 66 67 ret = ci_set_smc_sram_address(rdev, addr, limit); 68 if (ret) 69 + goto done; 70 71 WREG32(SMC_IND_DATA_0, data); 72 ··· 80 81 ret = ci_set_smc_sram_address(rdev, addr, limit); 82 if (ret) 83 + goto done; 84 85 original_data = RREG32(SMC_IND_DATA_0); 86 ··· 97 98 ret = ci_set_smc_sram_address(rdev, addr, limit); 99 if (ret) 100 + goto done; 101 102 WREG32(SMC_IND_DATA_0, data); 103 } 104 + 105 + done: 106 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 107 + 108 + return ret; 109 } 110 111 void ci_start_smc(struct radeon_device *rdev) ··· 197 198 int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) 199 { 200 + unsigned long flags; 201 u32 ucode_start_address; 202 u32 ucode_size; 203 const u8 *src; ··· 219 return -EINVAL; 220 221 src = (const u8 *)rdev->smc_fw->data; 222 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 223 WREG32(SMC_IND_INDEX_0, ucode_start_address); 224 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 225 while (ucode_size >= 4) { ··· 231 ucode_size -= 4; 232 } 233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 234 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 235 236 return 0; 237 } ··· 238 int ci_read_smc_sram_dword(struct radeon_device *rdev, 239 u32 smc_address, u32 *value, u32 limit) 240 { 241 + unsigned long flags; 242 int ret; 243 244 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 245 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 246 + if (ret == 0) 247 + *value = RREG32(SMC_IND_DATA_0); 248 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 249 250 + return ret; 251 } 252 253 int ci_write_smc_sram_dword(struct radeon_device *rdev, 254 u32 smc_address, u32 value, u32 limit) 255 { 256 + unsigned long flags; 257 int ret; 258 259 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 260 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 261 + if (ret == 0) 262 + WREG32(SMC_IND_DATA_0, value); 263 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 264 265 + return ret; 266 }
+29 -7
drivers/gpu/drm/radeon/cik.c
··· 77 static void cik_program_aspm(struct radeon_device *rdev); 78 static void cik_init_pg(struct radeon_device *rdev); 79 static void cik_init_cg(struct radeon_device *rdev); 80 81 /* get temperature in millidegrees */ 82 int ci_get_temp(struct radeon_device *rdev) ··· 122 */ 123 u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) 124 { 125 u32 r; 126 127 WREG32(PCIE_INDEX, reg); 128 (void)RREG32(PCIE_INDEX); 129 r = RREG32(PCIE_DATA); 130 return r; 131 } 132 133 void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 134 { 135 WREG32(PCIE_INDEX, reg); 136 (void)RREG32(PCIE_INDEX); 137 WREG32(PCIE_DATA, v); 138 (void)RREG32(PCIE_DATA); 139 } 140 141 static const u32 spectre_rlc_save_restore_register_list[] = ··· 2731 } else if ((rdev->pdev->device == 0x1309) || 2732 (rdev->pdev->device == 0x130A) || 2733 (rdev->pdev->device == 0x130D) || 2734 - (rdev->pdev->device == 0x1313)) { 2735 rdev->config.cik.max_cu_per_sh = 6; 2736 rdev->config.cik.max_backends_per_se = 2; 2737 } else if ((rdev->pdev->device == 0x1306) || ··· 4023 { 4024 int r; 4025 4026 r = cik_cp_load_microcode(rdev); 4027 if (r) 4028 return r; ··· 4035 r = cik_cp_compute_resume(rdev); 4036 if (r) 4037 return r; 4038 4039 return 0; 4040 } ··· 5390 void cik_update_cg(struct radeon_device *rdev, 5391 u32 block, bool enable) 5392 { 5393 if (block & RADEON_CG_BLOCK_GFX) { 5394 /* order matters! */ 5395 if (enable) { 5396 cik_enable_mgcg(rdev, true); ··· 5401 cik_enable_cgcg(rdev, false); 5402 cik_enable_mgcg(rdev, false); 5403 } 5404 } 5405 5406 if (block & RADEON_CG_BLOCK_MC) { ··· 5558 { 5559 u32 data, orig; 5560 5561 - if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 5562 orig = data = RREG32(RLC_PG_CNTL); 5563 data |= GFX_PG_ENABLE; 5564 if (orig != data) ··· 5822 if (rdev->pg_flags) { 5823 cik_enable_sck_slowdown_on_pu(rdev, true); 5824 cik_enable_sck_slowdown_on_pd(rdev, true); 5825 - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5826 cik_init_gfx_cgpg(rdev); 5827 cik_enable_cp_pg(rdev, true); 5828 cik_enable_gds_pg(rdev, true); ··· 5836 { 5837 if (rdev->pg_flags) { 5838 cik_update_gfx_pg(rdev, false); 5839 - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5840 cik_enable_cp_pg(rdev, false); 5841 cik_enable_gds_pg(rdev, false); 5842 } ··· 5912 u32 tmp; 5913 5914 /* gfx ring */ 5915 - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5916 /* sdma */ 5917 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5918 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); ··· 6055 */ 6056 int cik_irq_set(struct radeon_device *rdev) 6057 { 6058 - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | 6059 - PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; 6060 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 6061 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; 6062 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; ··· 6075 cik_disable_interrupt_state(rdev); 6076 return 0; 6077 } 6078 6079 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 6080 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
··· 77 static void cik_program_aspm(struct radeon_device *rdev); 78 static void cik_init_pg(struct radeon_device *rdev); 79 static void cik_init_cg(struct radeon_device *rdev); 80 + static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, 81 + bool enable); 82 83 /* get temperature in millidegrees */ 84 int ci_get_temp(struct radeon_device *rdev) ··· 120 */ 121 u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) 122 { 123 + unsigned long flags; 124 u32 r; 125 126 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 127 WREG32(PCIE_INDEX, reg); 128 (void)RREG32(PCIE_INDEX); 129 r = RREG32(PCIE_DATA); 130 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 131 return r; 132 } 133 134 void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 135 { 136 + unsigned long flags; 137 + 138 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 139 WREG32(PCIE_INDEX, reg); 140 (void)RREG32(PCIE_INDEX); 141 WREG32(PCIE_DATA, v); 142 (void)RREG32(PCIE_DATA); 143 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 144 } 145 146 static const u32 spectre_rlc_save_restore_register_list[] = ··· 2722 } else if ((rdev->pdev->device == 0x1309) || 2723 (rdev->pdev->device == 0x130A) || 2724 (rdev->pdev->device == 0x130D) || 2725 + (rdev->pdev->device == 0x1313) || 2726 + (rdev->pdev->device == 0x131D)) { 2727 rdev->config.cik.max_cu_per_sh = 6; 2728 rdev->config.cik.max_backends_per_se = 2; 2729 } else if ((rdev->pdev->device == 0x1306) || ··· 4013 { 4014 int r; 4015 4016 + cik_enable_gui_idle_interrupt(rdev, false); 4017 + 4018 r = cik_cp_load_microcode(rdev); 4019 if (r) 4020 return r; ··· 4023 r = cik_cp_compute_resume(rdev); 4024 if (r) 4025 return r; 4026 + 4027 + cik_enable_gui_idle_interrupt(rdev, true); 4028 4029 return 0; 4030 } ··· 5376 void cik_update_cg(struct radeon_device *rdev, 5377 u32 block, bool enable) 5378 { 5379 + 5380 if (block & RADEON_CG_BLOCK_GFX) { 5381 + cik_enable_gui_idle_interrupt(rdev, false); 5382 /* order matters! */ 5383 if (enable) { 5384 cik_enable_mgcg(rdev, true); ··· 5385 cik_enable_cgcg(rdev, false); 5386 cik_enable_mgcg(rdev, false); 5387 } 5388 + cik_enable_gui_idle_interrupt(rdev, true); 5389 } 5390 5391 if (block & RADEON_CG_BLOCK_MC) { ··· 5541 { 5542 u32 data, orig; 5543 5544 + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { 5545 orig = data = RREG32(RLC_PG_CNTL); 5546 data |= GFX_PG_ENABLE; 5547 if (orig != data) ··· 5805 if (rdev->pg_flags) { 5806 cik_enable_sck_slowdown_on_pu(rdev, true); 5807 cik_enable_sck_slowdown_on_pd(rdev, true); 5808 + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { 5809 cik_init_gfx_cgpg(rdev); 5810 cik_enable_cp_pg(rdev, true); 5811 cik_enable_gds_pg(rdev, true); ··· 5819 { 5820 if (rdev->pg_flags) { 5821 cik_update_gfx_pg(rdev, false); 5822 + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { 5823 cik_enable_cp_pg(rdev, false); 5824 cik_enable_gds_pg(rdev, false); 5825 } ··· 5895 u32 tmp; 5896 5897 /* gfx ring */ 5898 + tmp = RREG32(CP_INT_CNTL_RING0) & 5899 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5900 + WREG32(CP_INT_CNTL_RING0, tmp); 5901 /* sdma */ 5902 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5903 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); ··· 6036 */ 6037 int cik_irq_set(struct radeon_device *rdev) 6038 { 6039 + u32 cp_int_cntl; 6040 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 6041 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; 6042 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; ··· 6057 cik_disable_interrupt_state(rdev); 6058 return 0; 6059 } 6060 + 6061 + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & 6062 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 6063 + cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; 6064 6065 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 6066 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+10 -2
drivers/gpu/drm/radeon/dce6_afmt.c
··· 28 static u32 dce6_endpoint_rreg(struct radeon_device *rdev, 29 u32 block_offset, u32 reg) 30 { 31 u32 r; 32 33 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 34 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); 35 return r; 36 } 37 38 static void dce6_endpoint_wreg(struct radeon_device *rdev, 39 u32 block_offset, u32 reg, u32 v) 40 { 41 if (ASIC_IS_DCE8(rdev)) 42 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 43 else 44 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, 45 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); 46 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); 47 } 48 49 #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) ··· 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 96 u32 offset = dig->afmt->offset; 97 - u32 id = dig->afmt->pin->id; 98 99 if (!dig->afmt->pin) 100 return; 101 102 - WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); 103 } 104 105 void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
··· 28 static u32 dce6_endpoint_rreg(struct radeon_device *rdev, 29 u32 block_offset, u32 reg) 30 { 31 + unsigned long flags; 32 u32 r; 33 34 + spin_lock_irqsave(&rdev->end_idx_lock, flags); 35 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 36 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); 37 + spin_unlock_irqrestore(&rdev->end_idx_lock, flags); 38 + 39 return r; 40 } 41 42 static void dce6_endpoint_wreg(struct radeon_device *rdev, 43 u32 block_offset, u32 reg, u32 v) 44 { 45 + unsigned long flags; 46 + 47 + spin_lock_irqsave(&rdev->end_idx_lock, flags); 48 if (ASIC_IS_DCE8(rdev)) 49 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 50 else 51 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, 52 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); 53 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); 54 + spin_unlock_irqrestore(&rdev->end_idx_lock, flags); 55 } 56 57 #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) ··· 86 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 87 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 88 u32 offset = dig->afmt->offset; 89 90 if (!dig->afmt->pin) 91 return; 92 93 + WREG32(AFMT_AUDIO_SRC_CONTROL + offset, 94 + AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 95 } 96 97 void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+129 -34
drivers/gpu/drm/radeon/kv_dpm.c
··· 40 static void kv_enable_new_levels(struct radeon_device *rdev); 41 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 42 struct radeon_ps *new_rps); 43 static int kv_set_enabled_levels(struct radeon_device *rdev); 44 static int kv_force_dpm_highest(struct radeon_device *rdev); 45 static int kv_force_dpm_lowest(struct radeon_device *rdev); ··· 520 521 static void kv_program_vc(struct radeon_device *rdev) 522 { 523 - WREG32_SMC(CG_FTV_0, 0x3FFFC000); 524 } 525 526 static void kv_clear_vc(struct radeon_device *rdev) ··· 639 640 static int kv_unforce_levels(struct radeon_device *rdev) 641 { 642 - return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 643 } 644 645 static int kv_update_sclk_t(struct radeon_device *rdev) ··· 671 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 672 673 if (table && table->count) { 674 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 675 - if ((table->entries[i].clk == pi->boot_pl.sclk) || 676 - (i == 0)) 677 break; 678 } 679 ··· 685 if (table->num_max_dpm_entries == 0) 686 return -EINVAL; 687 688 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 689 - if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || 690 - (i == 0)) 691 break; 692 } 693 ··· 1080 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1081 } 1082 1083 static void kv_update_current_ps(struct radeon_device *rdev, 1084 struct radeon_ps *rps) 1085 { ··· 1107 pi->requested_rps = *rps; 1108 pi->requested_ps = *new_ps; 1109 pi->requested_rps.ps_priv = &pi->requested_ps; 1110 } 1111 1112 int kv_dpm_enable(struct radeon_device *rdev) ··· 1213 return ret; 1214 } 1215 1216 if (rdev->irq.installed && 1217 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1218 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); ··· 1224 } 1225 rdev->irq.dpm_thermal = true; 1226 radeon_irq_set(rdev); 1227 } 1228 1229 /* powerdown unused blocks for now */ ··· 1254 RADEON_CG_BLOCK_SDMA | 1255 RADEON_CG_BLOCK_BIF | 1256 RADEON_CG_BLOCK_HDP), false); 1257 1258 /* powerup blocks */ 1259 kv_dpm_powergate_acp(rdev, false); ··· 1481 return kv_enable_samu_dpm(rdev, !gate); 1482 } 1483 1484 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1485 { 1486 struct kv_power_info *pi = kv_get_pi(rdev); ··· 1525 if (pi->caps_stable_p_state) 1526 pi->acp_boot_level = table->count - 1; 1527 else 1528 - pi->acp_boot_level = 0; 1529 1530 ret = kv_copy_bytes_to_smc(rdev, 1531 pi->dpm_table_start + ··· 1652 } 1653 } 1654 1655 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1656 - if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || 1657 - (i == 0)) { 1658 - pi->highest_valid = i; 1659 break; 1660 - } 1661 } 1662 1663 if (pi->lowest_valid > pi->highest_valid) { 1664 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > ··· 1677 } 1678 } 1679 1680 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1681 if (table->entries[i].sclk_frequency <= 1682 - new_ps->levels[new_ps->num_levels - 1].sclk || 1683 - i == 0) { 1684 - pi->highest_valid = i; 1685 break; 1686 - } 1687 } 1688 1689 if (pi->lowest_valid > pi->highest_valid) { 1690 if ((new_ps->levels[0].sclk - ··· 1784 RADEON_CG_BLOCK_BIF | 1785 RADEON_CG_BLOCK_HDP), false); 1786 1787 if (rdev->family == CHIP_KABINI) { 1788 if (pi->enable_dpm) { 1789 kv_set_valid_clock_range(rdev, new_ps); ··· 1843 return ret; 1844 } 1845 #endif 1846 kv_update_sclk_t(rdev); 1847 kv_enable_nb_dpm(rdev); 1848 } ··· 1875 1876 void kv_dpm_reset_asic(struct radeon_device *rdev) 1877 { 1878 - kv_force_lowest_valid(rdev); 1879 - kv_init_graphics_levels(rdev); 1880 - kv_program_bootup_state(rdev); 1881 - kv_upload_dpm_settings(rdev); 1882 - kv_force_lowest_valid(rdev); 1883 - kv_unforce_levels(rdev); 1884 } 1885 1886 //XXX use sumo_dpm_display_configuration_changed ··· 1951 if (ret) 1952 return ret; 1953 1954 - for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { 1955 if (enable_mask & (1 << i)) 1956 break; 1957 } 1958 1959 - return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1960 } 1961 1962 static int kv_force_dpm_lowest(struct radeon_device *rdev) ··· 1976 break; 1977 } 1978 1979 - return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1980 } 1981 1982 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, ··· 1997 if (!pi->caps_sclk_ds) 1998 return 0; 1999 2000 - for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { 2001 temp = sclk / sumo_get_sleep_divider_from_id(i); 2002 - if ((temp >= min) || (i == 0)) 2003 break; 2004 } 2005 ··· 2125 ps->dpmx_nb_ps_lo = 0x1; 2126 ps->dpmx_nb_ps_hi = 0x0; 2127 } else { 2128 - ps->dpm0_pg_nb_ps_lo = 0x1; 2129 ps->dpm0_pg_nb_ps_hi = 0x0; 2130 - ps->dpmx_nb_ps_lo = 0x2; 2131 - ps->dpmx_nb_ps_hi = 0x1; 2132 2133 - if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2134 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2135 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2136 pi->disable_nb_ps3_in_battery; ··· 2294 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2295 kv_dpm_power_level_enable(rdev, i, true); 2296 } 2297 } 2298 2299 static int kv_set_enabled_levels(struct radeon_device *rdev)
··· 40 static void kv_enable_new_levels(struct radeon_device *rdev); 41 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 42 struct radeon_ps *new_rps); 43 + static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 44 static int kv_set_enabled_levels(struct radeon_device *rdev); 45 static int kv_force_dpm_highest(struct radeon_device *rdev); 46 static int kv_force_dpm_lowest(struct radeon_device *rdev); ··· 519 520 static void kv_program_vc(struct radeon_device *rdev) 521 { 522 + WREG32_SMC(CG_FTV_0, 0x3FFFC100); 523 } 524 525 static void kv_clear_vc(struct radeon_device *rdev) ··· 638 639 static int kv_unforce_levels(struct radeon_device *rdev) 640 { 641 + if (rdev->family == CHIP_KABINI) 642 + return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 643 + else 644 + return kv_set_enabled_levels(rdev); 645 } 646 647 static int kv_update_sclk_t(struct radeon_device *rdev) ··· 667 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 668 669 if (table && table->count) { 670 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 671 + if (table->entries[i].clk == pi->boot_pl.sclk) 672 break; 673 } 674 ··· 682 if (table->num_max_dpm_entries == 0) 683 return -EINVAL; 684 685 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 686 + if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 687 break; 688 } 689 ··· 1078 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1079 } 1080 1081 + static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1082 + { 1083 + struct kv_power_info *pi = kv_get_pi(rdev); 1084 + 1085 + pi->acp_boot_level = 0xff; 1086 + } 1087 + 1088 static void kv_update_current_ps(struct radeon_device *rdev, 1089 struct radeon_ps *rps) 1090 { ··· 1098 pi->requested_rps = *rps; 1099 pi->requested_ps = *new_ps; 1100 pi->requested_rps.ps_priv = &pi->requested_ps; 1101 + } 1102 + 1103 + void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1104 + { 1105 + struct kv_power_info *pi = kv_get_pi(rdev); 1106 + int ret; 1107 + 1108 + if (pi->bapm_enable) { 1109 + ret = kv_smc_bapm_enable(rdev, enable); 1110 + if (ret) 1111 + DRM_ERROR("kv_smc_bapm_enable failed\n"); 1112 + } 1113 } 1114 1115 int kv_dpm_enable(struct radeon_device *rdev) ··· 1192 return ret; 1193 } 1194 1195 + kv_reset_acp_boot_level(rdev); 1196 + 1197 if (rdev->irq.installed && 1198 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1199 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); ··· 1201 } 1202 rdev->irq.dpm_thermal = true; 1203 radeon_irq_set(rdev); 1204 + } 1205 + 1206 + ret = kv_smc_bapm_enable(rdev, false); 1207 + if (ret) { 1208 + DRM_ERROR("kv_smc_bapm_enable failed\n"); 1209 + return ret; 1210 } 1211 1212 /* powerdown unused blocks for now */ ··· 1225 RADEON_CG_BLOCK_SDMA | 1226 RADEON_CG_BLOCK_BIF | 1227 RADEON_CG_BLOCK_HDP), false); 1228 + 1229 + kv_smc_bapm_enable(rdev, false); 1230 1231 /* powerup blocks */ 1232 kv_dpm_powergate_acp(rdev, false); ··· 1450 return kv_enable_samu_dpm(rdev, !gate); 1451 } 1452 1453 + static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1454 + { 1455 + u8 i; 1456 + struct radeon_clock_voltage_dependency_table *table = 1457 + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1458 + 1459 + for (i = 0; i < table->count; i++) { 1460 + if (table->entries[i].clk >= 0) /* XXX */ 1461 + break; 1462 + } 1463 + 1464 + if (i >= table->count) 1465 + i = table->count - 1; 1466 + 1467 + return i; 1468 + } 1469 + 1470 + static void kv_update_acp_boot_level(struct radeon_device *rdev) 1471 + { 1472 + struct kv_power_info *pi = kv_get_pi(rdev); 1473 + u8 acp_boot_level; 1474 + 1475 + if (!pi->caps_stable_p_state) { 1476 + acp_boot_level = kv_get_acp_boot_level(rdev); 1477 + if (acp_boot_level != pi->acp_boot_level) { 1478 + pi->acp_boot_level = acp_boot_level; 1479 + kv_send_msg_to_smc_with_parameter(rdev, 1480 + PPSMC_MSG_ACPDPM_SetEnabledMask, 1481 + (1 << pi->acp_boot_level)); 1482 + } 1483 + } 1484 + } 1485 + 1486 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1487 { 1488 struct kv_power_info *pi = kv_get_pi(rdev); ··· 1461 if (pi->caps_stable_p_state) 1462 pi->acp_boot_level = table->count - 1; 1463 else 1464 + pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1465 1466 ret = kv_copy_bytes_to_smc(rdev, 1467 pi->dpm_table_start + ··· 1588 } 1589 } 1590 1591 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1592 + if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1593 break; 1594 } 1595 + pi->highest_valid = i; 1596 1597 if (pi->lowest_valid > pi->highest_valid) { 1598 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > ··· 1615 } 1616 } 1617 1618 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1619 if (table->entries[i].sclk_frequency <= 1620 + new_ps->levels[new_ps->num_levels - 1].sclk) 1621 break; 1622 } 1623 + pi->highest_valid = i; 1624 1625 if (pi->lowest_valid > pi->highest_valid) { 1626 if ((new_ps->levels[0].sclk - ··· 1724 RADEON_CG_BLOCK_BIF | 1725 RADEON_CG_BLOCK_HDP), false); 1726 1727 + if (pi->bapm_enable) { 1728 + ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1729 + if (ret) { 1730 + DRM_ERROR("kv_smc_bapm_enable failed\n"); 1731 + return ret; 1732 + } 1733 + } 1734 + 1735 if (rdev->family == CHIP_KABINI) { 1736 if (pi->enable_dpm) { 1737 kv_set_valid_clock_range(rdev, new_ps); ··· 1775 return ret; 1776 } 1777 #endif 1778 + kv_update_acp_boot_level(rdev); 1779 kv_update_sclk_t(rdev); 1780 kv_enable_nb_dpm(rdev); 1781 } ··· 1806 1807 void kv_dpm_reset_asic(struct radeon_device *rdev) 1808 { 1809 + struct kv_power_info *pi = kv_get_pi(rdev); 1810 + 1811 + if (rdev->family == CHIP_KABINI) { 1812 + kv_force_lowest_valid(rdev); 1813 + kv_init_graphics_levels(rdev); 1814 + kv_program_bootup_state(rdev); 1815 + kv_upload_dpm_settings(rdev); 1816 + kv_force_lowest_valid(rdev); 1817 + kv_unforce_levels(rdev); 1818 + } else { 1819 + kv_init_graphics_levels(rdev); 1820 + kv_program_bootup_state(rdev); 1821 + kv_freeze_sclk_dpm(rdev, true); 1822 + kv_upload_dpm_settings(rdev); 1823 + kv_freeze_sclk_dpm(rdev, false); 1824 + kv_set_enabled_level(rdev, pi->graphics_boot_level); 1825 + } 1826 } 1827 1828 //XXX use sumo_dpm_display_configuration_changed ··· 1871 if (ret) 1872 return ret; 1873 1874 + for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 1875 if (enable_mask & (1 << i)) 1876 break; 1877 } 1878 1879 + if (rdev->family == CHIP_KABINI) 1880 + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1881 + else 1882 + return kv_set_enabled_level(rdev, i); 1883 } 1884 1885 static int kv_force_dpm_lowest(struct radeon_device *rdev) ··· 1893 break; 1894 } 1895 1896 + if (rdev->family == CHIP_KABINI) 1897 + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1898 + else 1899 + return kv_set_enabled_level(rdev, i); 1900 } 1901 1902 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, ··· 1911 if (!pi->caps_sclk_ds) 1912 return 0; 1913 1914 + for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 1915 temp = sclk / sumo_get_sleep_divider_from_id(i); 1916 + if (temp >= min) 1917 break; 1918 } 1919 ··· 2039 ps->dpmx_nb_ps_lo = 0x1; 2040 ps->dpmx_nb_ps_hi = 0x0; 2041 } else { 2042 + ps->dpm0_pg_nb_ps_lo = 0x3; 2043 ps->dpm0_pg_nb_ps_hi = 0x0; 2044 + ps->dpmx_nb_ps_lo = 0x3; 2045 + ps->dpmx_nb_ps_hi = 0x0; 2046 2047 + if (pi->sys_info.nb_dpm_enable) { 2048 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2049 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2050 pi->disable_nb_ps3_in_battery; ··· 2208 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2209 kv_dpm_power_level_enable(rdev, i, true); 2210 } 2211 + } 2212 + 2213 + static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2214 + { 2215 + u32 new_mask = (1 << level); 2216 + 2217 + return kv_send_msg_to_smc_with_parameter(rdev, 2218 + PPSMC_MSG_SCLKDPM_SetEnabledMask, 2219 + new_mask); 2220 } 2221 2222 static int kv_set_enabled_levels(struct radeon_device *rdev)
+1
drivers/gpu/drm/radeon/kv_dpm.h
··· 192 int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 193 u32 *value, u32 limit); 194 int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); 195 int kv_copy_bytes_to_smc(struct radeon_device *rdev, 196 u32 smc_start_address, 197 const u8 *src, u32 byte_count, u32 limit);
··· 192 int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 193 u32 *value, u32 limit); 194 int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); 195 + int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable); 196 int kv_copy_bytes_to_smc(struct radeon_device *rdev, 197 u32 smc_start_address, 198 const u8 *src, u32 byte_count, u32 limit);
+8
drivers/gpu/drm/radeon/kv_smc.c
··· 107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); 108 } 109 110 int kv_copy_bytes_to_smc(struct radeon_device *rdev, 111 u32 smc_start_address, 112 const u8 *src, u32 byte_count, u32 limit)
··· 107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); 108 } 109 110 + int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable) 111 + { 112 + if (enable) 113 + return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); 114 + else 115 + return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); 116 + } 117 + 118 int kv_copy_bytes_to_smc(struct radeon_device *rdev, 119 u32 smc_start_address, 120 const u8 *src, u32 byte_count, u32 limit)
+2
drivers/gpu/drm/radeon/ppsmc.h
··· 163 #define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) 164 #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 165 #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 166 #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 167 168
··· 163 #define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) 164 #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 165 #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 166 + #define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) 167 + #define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121) 168 #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 169 170
+7
drivers/gpu/drm/radeon/r100.c
··· 2853 2854 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2855 { 2856 uint32_t data; 2857 2858 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2859 r100_pll_errata_after_index(rdev); 2860 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2861 r100_pll_errata_after_data(rdev); 2862 return data; 2863 } 2864 2865 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2866 { 2867 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2868 r100_pll_errata_after_index(rdev); 2869 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2870 r100_pll_errata_after_data(rdev); 2871 } 2872 2873 static void r100_set_safe_registers(struct radeon_device *rdev)
··· 2853 2854 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2855 { 2856 + unsigned long flags; 2857 uint32_t data; 2858 2859 + spin_lock_irqsave(&rdev->pll_idx_lock, flags); 2860 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2861 r100_pll_errata_after_index(rdev); 2862 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2863 r100_pll_errata_after_data(rdev); 2864 + spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); 2865 return data; 2866 } 2867 2868 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2869 { 2870 + unsigned long flags; 2871 + 2872 + spin_lock_irqsave(&rdev->pll_idx_lock, flags); 2873 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2874 r100_pll_errata_after_index(rdev); 2875 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2876 r100_pll_errata_after_data(rdev); 2877 + spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); 2878 } 2879 2880 static void r100_set_safe_registers(struct radeon_device *rdev)
+7
drivers/gpu/drm/radeon/r420.c
··· 160 161 u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) 162 { 163 u32 r; 164 165 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); 166 r = RREG32(R_0001FC_MC_IND_DATA); 167 return r; 168 } 169 170 void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 171 { 172 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | 173 S_0001F8_MC_IND_WR_EN(1)); 174 WREG32(R_0001FC_MC_IND_DATA, v); 175 } 176 177 static void r420_debugfs(struct radeon_device *rdev)
··· 160 161 u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) 162 { 163 + unsigned long flags; 164 u32 r; 165 166 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 167 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); 168 r = RREG32(R_0001FC_MC_IND_DATA); 169 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 170 return r; 171 } 172 173 void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 174 { 175 + unsigned long flags; 176 + 177 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 178 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | 179 S_0001F8_MC_IND_WR_EN(1)); 180 WREG32(R_0001FC_MC_IND_DATA, v); 181 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 182 } 183 184 static void r420_debugfs(struct radeon_device *rdev)
+19
drivers/gpu/drm/radeon/r600.c
··· 119 return rdev->clock.spll.reference_freq; 120 } 121 122 /* get temperature in millidegrees */ 123 int rv6xx_get_temp(struct radeon_device *rdev) 124 { ··· 1050 1051 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1052 { 1053 uint32_t r; 1054 1055 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1056 r = RREG32(R_0028FC_MC_DATA); 1057 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1058 return r; 1059 } 1060 1061 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1062 { 1063 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1064 S_0028F8_MC_IND_WR_EN(1)); 1065 WREG32(R_0028FC_MC_DATA, v); 1066 WREG32(R_0028F8_MC_INDEX, 0x7F); 1067 } 1068 1069 static void r600_mc_program(struct radeon_device *rdev) ··· 2104 */ 2105 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2106 { 2107 u32 r; 2108 2109 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2110 (void)RREG32(PCIE_PORT_INDEX); 2111 r = RREG32(PCIE_PORT_DATA); 2112 return r; 2113 } 2114 2115 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2116 { 2117 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2118 (void)RREG32(PCIE_PORT_INDEX); 2119 WREG32(PCIE_PORT_DATA, (v)); 2120 (void)RREG32(PCIE_PORT_DATA); 2121 } 2122 2123 /*
··· 119 return rdev->clock.spll.reference_freq; 120 } 121 122 + int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 123 + { 124 + return 0; 125 + } 126 + 127 /* get temperature in millidegrees */ 128 int rv6xx_get_temp(struct radeon_device *rdev) 129 { ··· 1045 1046 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1047 { 1048 + unsigned long flags; 1049 uint32_t r; 1050 1051 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 1052 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1053 r = RREG32(R_0028FC_MC_DATA); 1054 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1055 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 1056 return r; 1057 } 1058 1059 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1060 { 1061 + unsigned long flags; 1062 + 1063 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 1064 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1065 S_0028F8_MC_IND_WR_EN(1)); 1066 WREG32(R_0028FC_MC_DATA, v); 1067 WREG32(R_0028F8_MC_INDEX, 0x7F); 1068 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 1069 } 1070 1071 static void r600_mc_program(struct radeon_device *rdev) ··· 2092 */ 2093 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2094 { 2095 + unsigned long flags; 2096 u32 r; 2097 2098 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 2099 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2100 (void)RREG32(PCIE_PORT_INDEX); 2101 r = RREG32(PCIE_PORT_DATA); 2102 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 2103 return r; 2104 } 2105 2106 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2107 { 2108 + unsigned long flags; 2109 + 2110 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 2111 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2112 (void)RREG32(PCIE_PORT_INDEX); 2113 WREG32(PCIE_PORT_DATA, (v)); 2114 (void)RREG32(PCIE_PORT_DATA); 2115 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 2116 } 2117 2118 /*
+14 -24
drivers/gpu/drm/radeon/r600_dpm.c
··· 1219 1220 void r600_free_extended_power_table(struct radeon_device *rdev) 1221 { 1222 - if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) 1223 - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 1224 - if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) 1225 - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 1226 - if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) 1227 - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 1228 - if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) 1229 - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); 1230 - if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) 1231 - kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); 1232 - if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 1233 - kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); 1234 - if (rdev->pm.dpm.dyn_state.ppm_table) 1235 - kfree(rdev->pm.dpm.dyn_state.ppm_table); 1236 - if (rdev->pm.dpm.dyn_state.cac_tdp_table) 1237 - kfree(rdev->pm.dpm.dyn_state.cac_tdp_table); 1238 - if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) 1239 - kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); 1240 - if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) 1241 - kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); 1242 - if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) 1243 - kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries); 1244 - if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) 1245 - kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries); 1246 } 1247 1248 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
··· 1219 1220 void r600_free_extended_power_table(struct radeon_device *rdev) 1221 { 1222 + struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; 1223 + 1224 + kfree(dyn_state->vddc_dependency_on_sclk.entries); 1225 + kfree(dyn_state->vddci_dependency_on_mclk.entries); 1226 + kfree(dyn_state->vddc_dependency_on_mclk.entries); 1227 + kfree(dyn_state->mvdd_dependency_on_mclk.entries); 1228 + kfree(dyn_state->cac_leakage_table.entries); 1229 + kfree(dyn_state->phase_shedding_limits_table.entries); 1230 + kfree(dyn_state->ppm_table); 1231 + kfree(dyn_state->cac_tdp_table); 1232 + kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 1233 + kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 1234 + kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 1235 + kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 1236 } 1237 1238 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
+81 -1
drivers/gpu/drm/radeon/radeon.h
··· 181 #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) 182 183 /* PG flags */ 184 - #define RADEON_PG_SUPPORT_GFX_CG (1 << 0) 185 #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) 186 #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) 187 #define RADEON_PG_SUPPORT_UVD (1 << 3) ··· 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1779 bool (*vblank_too_short)(struct radeon_device *rdev); 1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate); 1781 } dpm; 1782 /* pageflipping */ 1783 struct { ··· 2111 resource_size_t rmmio_size; 2112 /* protects concurrent MM_INDEX/DATA based register access */ 2113 spinlock_t mmio_idx_lock; 2114 void __iomem *rmmio; 2115 radeon_rreg_t mc_rreg; 2116 radeon_wreg_t mc_wreg; ··· 2300 */ 2301 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2302 { 2303 uint32_t r; 2304 2305 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2306 r = RREG32(RADEON_PCIE_DATA); 2307 return r; 2308 } 2309 2310 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2311 { 2312 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2313 WREG32(RADEON_PCIE_DATA, (v)); 2314 } 2315 2316 static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2317 { 2318 u32 r; 2319 2320 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2321 r = RREG32(TN_SMC_IND_DATA_0); 2322 return r; 2323 } 2324 2325 static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2326 { 2327 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2328 WREG32(TN_SMC_IND_DATA_0, (v)); 2329 } 2330 2331 static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2332 { 2333 u32 r; 2334 2335 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2336 r = RREG32(R600_RCU_DATA); 2337 return r; 2338 } 2339 2340 static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2341 { 2342 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2343 WREG32(R600_RCU_DATA, (v)); 2344 } 2345 2346 static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2347 { 2348 u32 r; 2349 2350 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2351 r = RREG32(EVERGREEN_CG_IND_DATA); 2352 return r; 2353 } 2354 2355 static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2356 { 2357 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2358 WREG32(EVERGREEN_CG_IND_DATA, (v)); 2359 } 2360 2361 static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2362 { 2363 u32 r; 2364 2365 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2366 r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2367 return r; 2368 } 2369 2370 static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2371 { 2372 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2373 WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2374 } 2375 2376 static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2377 { 2378 u32 r; 2379 2380 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2381 r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2382 return r; 2383 } 2384 2385 static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2386 { 2387 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2388 WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2389 } 2390 2391 static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2392 { 2393 u32 r; 2394 2395 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2396 r = RREG32(R600_UVD_CTX_DATA); 2397 return r; 2398 } 2399 2400 static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2401 { 2402 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2403 WREG32(R600_UVD_CTX_DATA, (v)); 2404 } 2405 2406 2407 static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2408 { 2409 u32 r; 2410 2411 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2412 r = RREG32(CIK_DIDT_IND_DATA); 2413 return r; 2414 } 2415 2416 static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2417 { 2418 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2419 WREG32(CIK_DIDT_IND_DATA, (v)); 2420 } 2421 2422 void r100_pll_errata_after_index(struct radeon_device *rdev); ··· 2648 #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2649 #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2650 #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) 2651 2652 /* Common functions */ 2653 /* AGP */
··· 181 #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) 182 183 /* PG flags */ 184 + #define RADEON_PG_SUPPORT_GFX_PG (1 << 0) 185 #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) 186 #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) 187 #define RADEON_PG_SUPPORT_UVD (1 << 3) ··· 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1779 bool (*vblank_too_short)(struct radeon_device *rdev); 1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate); 1781 + void (*enable_bapm)(struct radeon_device *rdev, bool enable); 1782 } dpm; 1783 /* pageflipping */ 1784 struct { ··· 2110 resource_size_t rmmio_size; 2111 /* protects concurrent MM_INDEX/DATA based register access */ 2112 spinlock_t mmio_idx_lock; 2113 + /* protects concurrent SMC based register access */ 2114 + spinlock_t smc_idx_lock; 2115 + /* protects concurrent PLL register access */ 2116 + spinlock_t pll_idx_lock; 2117 + /* protects concurrent MC register access */ 2118 + spinlock_t mc_idx_lock; 2119 + /* protects concurrent PCIE register access */ 2120 + spinlock_t pcie_idx_lock; 2121 + /* protects concurrent PCIE_PORT register access */ 2122 + spinlock_t pciep_idx_lock; 2123 + /* protects concurrent PIF register access */ 2124 + spinlock_t pif_idx_lock; 2125 + /* protects concurrent CG register access */ 2126 + spinlock_t cg_idx_lock; 2127 + /* protects concurrent UVD register access */ 2128 + spinlock_t uvd_idx_lock; 2129 + /* protects concurrent RCU register access */ 2130 + spinlock_t rcu_idx_lock; 2131 + /* protects concurrent DIDT register access */ 2132 + spinlock_t didt_idx_lock; 2133 + /* protects concurrent ENDPOINT (audio) register access */ 2134 + spinlock_t end_idx_lock; 2135 void __iomem *rmmio; 2136 radeon_rreg_t mc_rreg; 2137 radeon_wreg_t mc_wreg; ··· 2277 */ 2278 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2279 { 2280 + unsigned long flags; 2281 uint32_t r; 2282 2283 + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 2284 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2285 r = RREG32(RADEON_PCIE_DATA); 2286 + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 2287 return r; 2288 } 2289 2290 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2291 { 2292 + unsigned long flags; 2293 + 2294 + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 2295 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2296 WREG32(RADEON_PCIE_DATA, (v)); 2297 + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 2298 } 2299 2300 static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2301 { 2302 + unsigned long flags; 2303 u32 r; 2304 2305 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 2306 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2307 r = RREG32(TN_SMC_IND_DATA_0); 2308 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 2309 return r; 2310 } 2311 2312 static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2313 { 2314 + unsigned long flags; 2315 + 2316 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 2317 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2318 WREG32(TN_SMC_IND_DATA_0, (v)); 2319 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 2320 } 2321 2322 static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2323 { 2324 + unsigned long flags; 2325 u32 r; 2326 2327 + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 2328 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2329 r = RREG32(R600_RCU_DATA); 2330 + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 2331 return r; 2332 } 2333 2334 static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2335 { 2336 + unsigned long flags; 2337 + 2338 + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 2339 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2340 WREG32(R600_RCU_DATA, (v)); 2341 + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 2342 } 2343 2344 static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2345 { 2346 + unsigned long flags; 2347 u32 r; 2348 2349 + spin_lock_irqsave(&rdev->cg_idx_lock, flags); 2350 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2351 r = RREG32(EVERGREEN_CG_IND_DATA); 2352 + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 2353 return r; 2354 } 2355 2356 static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2357 { 2358 + unsigned long flags; 2359 + 2360 + spin_lock_irqsave(&rdev->cg_idx_lock, flags); 2361 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2362 WREG32(EVERGREEN_CG_IND_DATA, (v)); 2363 + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 2364 } 2365 2366 static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2367 { 2368 + unsigned long flags; 2369 u32 r; 2370 2371 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2372 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2373 r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2374 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2375 return r; 2376 } 2377 2378 static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2379 { 2380 + unsigned long flags; 2381 + 2382 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2383 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2384 WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2385 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2386 } 2387 2388 static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2389 { 2390 + unsigned long flags; 2391 u32 r; 2392 2393 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2394 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2395 r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2396 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2397 return r; 2398 } 2399 2400 static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2401 { 2402 + unsigned long flags; 2403 + 2404 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2405 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2406 WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2407 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2408 } 2409 2410 static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2411 { 2412 + unsigned long flags; 2413 u32 r; 2414 2415 + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 2416 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2417 r = RREG32(R600_UVD_CTX_DATA); 2418 + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 2419 return r; 2420 } 2421 2422 static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2423 { 2424 + unsigned long flags; 2425 + 2426 + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 2427 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2428 WREG32(R600_UVD_CTX_DATA, (v)); 2429 + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 2430 } 2431 2432 2433 static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2434 { 2435 + unsigned long flags; 2436 u32 r; 2437 2438 + spin_lock_irqsave(&rdev->didt_idx_lock, flags); 2439 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2440 r = RREG32(CIK_DIDT_IND_DATA); 2441 + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 2442 return r; 2443 } 2444 2445 static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2446 { 2447 + unsigned long flags; 2448 + 2449 + spin_lock_irqsave(&rdev->didt_idx_lock, flags); 2450 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2451 WREG32(CIK_DIDT_IND_DATA, (v)); 2452 + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 2453 } 2454 2455 void r100_pll_errata_after_index(struct radeon_device *rdev); ··· 2569 #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2570 #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2571 #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) 2572 + #define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e)) 2573 2574 /* Common functions */ 2575 /* AGP */
+8 -3
drivers/gpu/drm/radeon/radeon_asic.c
··· 1037 .set_pcie_lanes = &r600_set_pcie_lanes, 1038 .set_clock_gating = NULL, 1039 .get_temperature = &rv6xx_get_temp, 1040 }, 1041 .dpm = { 1042 .init = &rv6xx_dpm_init, ··· 1127 .set_pcie_lanes = NULL, 1128 .set_clock_gating = NULL, 1129 .get_temperature = &rv6xx_get_temp, 1130 }, 1131 .dpm = { 1132 .init = &rs780_dpm_init, ··· 1143 .get_mclk = &rs780_dpm_get_mclk, 1144 .print_power_state = &rs780_dpm_print_power_state, 1145 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, 1146 }, 1147 .pflip = { 1148 .pre_page_flip = &rs600_pre_page_flip, ··· 1794 .print_power_state = &trinity_dpm_print_power_state, 1795 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, 1796 .force_performance_level = &trinity_dpm_force_performance_level, 1797 }, 1798 .pflip = { 1799 .pre_page_flip = &evergreen_pre_page_flip, ··· 2170 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 2171 .force_performance_level = &kv_dpm_force_performance_level, 2172 .powergate_uvd = &kv_dpm_powergate_uvd, 2173 }, 2174 .pflip = { 2175 .pre_page_flip = &evergreen_pre_page_flip, ··· 2395 RADEON_CG_SUPPORT_HDP_LS | 2396 RADEON_CG_SUPPORT_HDP_MGCG; 2397 rdev->pg_flags = 0 | 2398 - /*RADEON_PG_SUPPORT_GFX_CG | */ 2399 RADEON_PG_SUPPORT_SDMA; 2400 break; 2401 case CHIP_OLAND: ··· 2484 RADEON_CG_SUPPORT_HDP_LS | 2485 RADEON_CG_SUPPORT_HDP_MGCG; 2486 rdev->pg_flags = 0; 2487 - /*RADEON_PG_SUPPORT_GFX_CG | 2488 RADEON_PG_SUPPORT_GFX_SMG | 2489 RADEON_PG_SUPPORT_GFX_DMG | 2490 RADEON_PG_SUPPORT_UVD | ··· 2512 RADEON_CG_SUPPORT_HDP_LS | 2513 RADEON_CG_SUPPORT_HDP_MGCG; 2514 rdev->pg_flags = 0; 2515 - /*RADEON_PG_SUPPORT_GFX_CG | 2516 RADEON_PG_SUPPORT_GFX_SMG | 2517 RADEON_PG_SUPPORT_UVD | 2518 RADEON_PG_SUPPORT_VCE |
··· 1037 .set_pcie_lanes = &r600_set_pcie_lanes, 1038 .set_clock_gating = NULL, 1039 .get_temperature = &rv6xx_get_temp, 1040 + .set_uvd_clocks = &r600_set_uvd_clocks, 1041 }, 1042 .dpm = { 1043 .init = &rv6xx_dpm_init, ··· 1126 .set_pcie_lanes = NULL, 1127 .set_clock_gating = NULL, 1128 .get_temperature = &rv6xx_get_temp, 1129 + .set_uvd_clocks = &r600_set_uvd_clocks, 1130 }, 1131 .dpm = { 1132 .init = &rs780_dpm_init, ··· 1141 .get_mclk = &rs780_dpm_get_mclk, 1142 .print_power_state = &rs780_dpm_print_power_state, 1143 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, 1144 + .force_performance_level = &rs780_dpm_force_performance_level, 1145 }, 1146 .pflip = { 1147 .pre_page_flip = &rs600_pre_page_flip, ··· 1791 .print_power_state = &trinity_dpm_print_power_state, 1792 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, 1793 .force_performance_level = &trinity_dpm_force_performance_level, 1794 + .enable_bapm = &trinity_dpm_enable_bapm, 1795 }, 1796 .pflip = { 1797 .pre_page_flip = &evergreen_pre_page_flip, ··· 2166 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 2167 .force_performance_level = &kv_dpm_force_performance_level, 2168 .powergate_uvd = &kv_dpm_powergate_uvd, 2169 + .enable_bapm = &kv_dpm_enable_bapm, 2170 }, 2171 .pflip = { 2172 .pre_page_flip = &evergreen_pre_page_flip, ··· 2390 RADEON_CG_SUPPORT_HDP_LS | 2391 RADEON_CG_SUPPORT_HDP_MGCG; 2392 rdev->pg_flags = 0 | 2393 + /*RADEON_PG_SUPPORT_GFX_PG | */ 2394 RADEON_PG_SUPPORT_SDMA; 2395 break; 2396 case CHIP_OLAND: ··· 2479 RADEON_CG_SUPPORT_HDP_LS | 2480 RADEON_CG_SUPPORT_HDP_MGCG; 2481 rdev->pg_flags = 0; 2482 + /*RADEON_PG_SUPPORT_GFX_PG | 2483 RADEON_PG_SUPPORT_GFX_SMG | 2484 RADEON_PG_SUPPORT_GFX_DMG | 2485 RADEON_PG_SUPPORT_UVD | ··· 2507 RADEON_CG_SUPPORT_HDP_LS | 2508 RADEON_CG_SUPPORT_HDP_MGCG; 2509 rdev->pg_flags = 0; 2510 + /*RADEON_PG_SUPPORT_GFX_PG | 2511 RADEON_PG_SUPPORT_GFX_SMG | 2512 RADEON_PG_SUPPORT_UVD | 2513 RADEON_PG_SUPPORT_VCE |
+5
drivers/gpu/drm/radeon/radeon_asic.h
··· 389 u32 r600_get_xclk(struct radeon_device *rdev); 390 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 391 int rv6xx_get_temp(struct radeon_device *rdev); 392 int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 393 void r600_dpm_post_set_power_state(struct radeon_device *rdev); 394 /* r600 dma */ ··· 429 struct radeon_ps *ps); 430 void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 431 struct seq_file *m); 432 433 /* 434 * rv770,rv730,rv710,rv740 ··· 628 struct seq_file *m); 629 int trinity_dpm_force_performance_level(struct radeon_device *rdev, 630 enum radeon_dpm_forced_level level); 631 632 /* DCE6 - SI */ 633 void dce6_bandwidth_update(struct radeon_device *rdev); ··· 785 int kv_dpm_force_performance_level(struct radeon_device *rdev, 786 enum radeon_dpm_forced_level level); 787 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 788 789 /* uvd v1.0 */ 790 uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
··· 389 u32 r600_get_xclk(struct radeon_device *rdev); 390 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 391 int rv6xx_get_temp(struct radeon_device *rdev); 392 + int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 393 int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 394 void r600_dpm_post_set_power_state(struct radeon_device *rdev); 395 /* r600 dma */ ··· 428 struct radeon_ps *ps); 429 void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 430 struct seq_file *m); 431 + int rs780_dpm_force_performance_level(struct radeon_device *rdev, 432 + enum radeon_dpm_forced_level level); 433 434 /* 435 * rv770,rv730,rv710,rv740 ··· 625 struct seq_file *m); 626 int trinity_dpm_force_performance_level(struct radeon_device *rdev, 627 enum radeon_dpm_forced_level level); 628 + void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable); 629 630 /* DCE6 - SI */ 631 void dce6_bandwidth_update(struct radeon_device *rdev); ··· 781 int kv_dpm_force_performance_level(struct radeon_device *rdev, 782 enum radeon_dpm_forced_level level); 783 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 784 + void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable); 785 786 /* uvd v1.0 */ 787 uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+33
drivers/gpu/drm/radeon/radeon_connectors.c
··· 396 } 397 } 398 399 if (property == rdev->mode_info.underscan_property) { 400 /* need to find digital encoder on connector */ 401 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); ··· 1634 drm_object_attach_property(&radeon_connector->base.base, 1635 rdev->mode_info.underscan_vborder_property, 1636 0); 1637 subpixel_order = SubPixelHorizontalRGB; 1638 connector->interlace_allowed = true; 1639 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) ··· 1726 rdev->mode_info.underscan_vborder_property, 1727 0); 1728 } 1729 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1730 radeon_connector->dac_load_detect = true; 1731 drm_object_attach_property(&radeon_connector->base.base, ··· 1771 rdev->mode_info.underscan_vborder_property, 1772 0); 1773 } 1774 subpixel_order = SubPixelHorizontalRGB; 1775 connector->interlace_allowed = true; 1776 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) ··· 1814 drm_object_attach_property(&radeon_connector->base.base, 1815 rdev->mode_info.underscan_vborder_property, 1816 0); 1817 } 1818 connector->interlace_allowed = true; 1819 /* in theory with a DP to VGA converter... */
··· 396 } 397 } 398 399 + if (property == rdev->mode_info.audio_property) { 400 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 401 + /* need to find digital encoder on connector */ 402 + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 403 + if (!encoder) 404 + return 0; 405 + 406 + radeon_encoder = to_radeon_encoder(encoder); 407 + 408 + if (radeon_connector->audio != val) { 409 + radeon_connector->audio = val; 410 + radeon_property_change_mode(&radeon_encoder->base); 411 + } 412 + } 413 + 414 if (property == rdev->mode_info.underscan_property) { 415 /* need to find digital encoder on connector */ 416 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); ··· 1619 drm_object_attach_property(&radeon_connector->base.base, 1620 rdev->mode_info.underscan_vborder_property, 1621 0); 1622 + drm_object_attach_property(&radeon_connector->base.base, 1623 + rdev->mode_info.audio_property, 1624 + RADEON_AUDIO_DISABLE); 1625 subpixel_order = SubPixelHorizontalRGB; 1626 connector->interlace_allowed = true; 1627 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) ··· 1708 rdev->mode_info.underscan_vborder_property, 1709 0); 1710 } 1711 + if (ASIC_IS_DCE2(rdev)) { 1712 + drm_object_attach_property(&radeon_connector->base.base, 1713 + rdev->mode_info.audio_property, 1714 + RADEON_AUDIO_DISABLE); 1715 + } 1716 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1717 radeon_connector->dac_load_detect = true; 1718 drm_object_attach_property(&radeon_connector->base.base, ··· 1748 rdev->mode_info.underscan_vborder_property, 1749 0); 1750 } 1751 + if (ASIC_IS_DCE2(rdev)) { 1752 + drm_object_attach_property(&radeon_connector->base.base, 1753 + rdev->mode_info.audio_property, 1754 + RADEON_AUDIO_DISABLE); 1755 + } 1756 subpixel_order = SubPixelHorizontalRGB; 1757 connector->interlace_allowed = true; 1758 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) ··· 1786 drm_object_attach_property(&radeon_connector->base.base, 1787 rdev->mode_info.underscan_vborder_property, 1788 0); 1789 + } 1790 + if (ASIC_IS_DCE2(rdev)) { 1791 + drm_object_attach_property(&radeon_connector->base.base, 1792 + rdev->mode_info.audio_property, 1793 + RADEON_AUDIO_DISABLE); 1794 } 1795 connector->interlace_allowed = true; 1796 /* in theory with a DP to VGA converter... */
+3
drivers/gpu/drm/radeon/radeon_cs.c
··· 28 #include <drm/radeon_drm.h> 29 #include "radeon_reg.h" 30 #include "radeon.h" 31 32 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 33 { ··· 559 r = radeon_cs_handle_lockup(rdev, r); 560 return r; 561 } 562 563 r = radeon_cs_ib_chunk(rdev, &parser); 564 if (r) {
··· 28 #include <drm/radeon_drm.h> 29 #include "radeon_reg.h" 30 #include "radeon.h" 31 + #include "radeon_trace.h" 32 33 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 34 { ··· 558 r = radeon_cs_handle_lockup(rdev, r); 559 return r; 560 } 561 + 562 + trace_radeon_cs(&parser); 563 564 r = radeon_cs_ib_chunk(rdev, &parser); 565 if (r) {
+11
drivers/gpu/drm/radeon/radeon_device.c
··· 1249 /* Registers mapping */ 1250 /* TODO: block userspace mapping of io register */ 1251 spin_lock_init(&rdev->mmio_idx_lock); 1252 if (rdev->family >= CHIP_BONAIRE) { 1253 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1254 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
··· 1249 /* Registers mapping */ 1250 /* TODO: block userspace mapping of io register */ 1251 spin_lock_init(&rdev->mmio_idx_lock); 1252 + spin_lock_init(&rdev->smc_idx_lock); 1253 + spin_lock_init(&rdev->pll_idx_lock); 1254 + spin_lock_init(&rdev->mc_idx_lock); 1255 + spin_lock_init(&rdev->pcie_idx_lock); 1256 + spin_lock_init(&rdev->pciep_idx_lock); 1257 + spin_lock_init(&rdev->pif_idx_lock); 1258 + spin_lock_init(&rdev->cg_idx_lock); 1259 + spin_lock_init(&rdev->uvd_idx_lock); 1260 + spin_lock_init(&rdev->rcu_idx_lock); 1261 + spin_lock_init(&rdev->didt_idx_lock); 1262 + spin_lock_init(&rdev->end_idx_lock); 1263 if (rdev->family >= CHIP_BONAIRE) { 1264 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1265 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
+12
drivers/gpu/drm/radeon/radeon_display.c
··· 1172 { UNDERSCAN_AUTO, "auto" }, 1173 }; 1174 1175 static int radeon_modeset_create_props(struct radeon_device *rdev) 1176 { 1177 int sz; ··· 1227 "underscan vborder", 0, 128); 1228 if (!rdev->mode_info.underscan_vborder_property) 1229 return -ENOMEM; 1230 1231 return 0; 1232 }
··· 1172 { UNDERSCAN_AUTO, "auto" }, 1173 }; 1174 1175 + static struct drm_prop_enum_list radeon_audio_enum_list[] = 1176 + { { RADEON_AUDIO_DISABLE, "off" }, 1177 + { RADEON_AUDIO_ENABLE, "on" }, 1178 + { RADEON_AUDIO_AUTO, "auto" }, 1179 + }; 1180 + 1181 static int radeon_modeset_create_props(struct radeon_device *rdev) 1182 { 1183 int sz; ··· 1221 "underscan vborder", 0, 128); 1222 if (!rdev->mode_info.underscan_vborder_property) 1223 return -ENOMEM; 1224 + 1225 + sz = ARRAY_SIZE(radeon_audio_enum_list); 1226 + rdev->mode_info.audio_property = 1227 + drm_property_create_enum(rdev->ddev, 0, 1228 + "audio", 1229 + radeon_audio_enum_list, sz); 1230 1231 return 0; 1232 }
+1 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 153 int radeon_testing = 0; 154 int radeon_connector_table = 0; 155 int radeon_tv = 1; 156 - int radeon_audio = 0; 157 int radeon_disp_priority = 0; 158 int radeon_hw_i2c = 0; 159 int radeon_pcie_gen2 = -1;
··· 153 int radeon_testing = 0; 154 int radeon_connector_table = 0; 155 int radeon_tv = 1; 156 + int radeon_audio = 1; 157 int radeon_disp_priority = 0; 158 int radeon_hw_i2c = 0; 159 int radeon_pcie_gen2 = -1;
+9
drivers/gpu/drm/radeon/radeon_mode.h
··· 247 struct drm_property *underscan_property; 248 struct drm_property *underscan_hborder_property; 249 struct drm_property *underscan_vborder_property; 250 /* hardcoded DFP edid from BIOS */ 251 struct edid *bios_hardcoded_edid; 252 int bios_hardcoded_edid_size; ··· 473 u8 cd_mux_state; 474 }; 475 476 struct radeon_connector { 477 struct drm_connector base; 478 uint32_t connector_id; ··· 497 struct radeon_hpd hpd; 498 struct radeon_router router; 499 struct radeon_i2c_chan *router_bus; 500 }; 501 502 struct radeon_framebuffer {
··· 247 struct drm_property *underscan_property; 248 struct drm_property *underscan_hborder_property; 249 struct drm_property *underscan_vborder_property; 250 + /* audio */ 251 + struct drm_property *audio_property; 252 /* hardcoded DFP edid from BIOS */ 253 struct edid *bios_hardcoded_edid; 254 int bios_hardcoded_edid_size; ··· 471 u8 cd_mux_state; 472 }; 473 474 + enum radeon_connector_audio { 475 + RADEON_AUDIO_DISABLE = 0, 476 + RADEON_AUDIO_ENABLE = 1, 477 + RADEON_AUDIO_AUTO = 2 478 + }; 479 + 480 struct radeon_connector { 481 struct drm_connector base; 482 uint32_t connector_id; ··· 489 struct radeon_hpd hpd; 490 struct radeon_router router; 491 struct radeon_i2c_chan *router_bus; 492 + enum radeon_connector_audio audio; 493 }; 494 495 struct radeon_framebuffer {
+57 -10
drivers/gpu/drm/radeon/radeon_pm.c
··· 67 68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 69 { 70 - if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 71 if (rdev->pm.profile == PM_PROFILE_AUTO) { 72 mutex_lock(&rdev->pm.mutex); 73 radeon_pm_update_profile(rdev); ··· 342 struct device_attribute *attr, 343 char *buf) 344 { 345 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 346 struct radeon_device *rdev = ddev->dev_private; 347 int cp = rdev->pm.profile; 348 ··· 358 const char *buf, 359 size_t count) 360 { 361 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 362 struct radeon_device *rdev = ddev->dev_private; 363 364 mutex_lock(&rdev->pm.mutex); ··· 392 struct device_attribute *attr, 393 char *buf) 394 { 395 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 396 struct radeon_device *rdev = ddev->dev_private; 397 int pm = rdev->pm.pm_method; 398 ··· 406 const char *buf, 407 size_t count) 408 { 409 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 410 struct radeon_device *rdev = ddev->dev_private; 411 412 /* we don't support the legacy modes with dpm */ ··· 442 struct device_attribute *attr, 443 char *buf) 444 { 445 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 446 struct radeon_device *rdev = ddev->dev_private; 447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 448 ··· 456 const char *buf, 457 size_t count) 458 { 459 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 460 struct radeon_device *rdev = ddev->dev_private; 461 462 mutex_lock(&rdev->pm.mutex); ··· 481 struct device_attribute *attr, 482 char *buf) 483 { 484 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 485 struct radeon_device *rdev = ddev->dev_private; 486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 487 ··· 495 const char *buf, 496 size_t count) 497 { 498 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 499 struct radeon_device *rdev = ddev->dev_private; 500 enum radeon_dpm_forced_level level; 501 int ret = 0; ··· 533 struct device_attribute *attr, 534 char *buf) 535 { 536 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 537 struct radeon_device *rdev = ddev->dev_private; 538 int temp; 539 ··· 541 temp = radeon_get_temperature(rdev); 542 else 543 temp = 0; 544 545 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 546 } ··· 570 } 571 572 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 573 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 574 575 static struct attribute *hwmon_attributes[] = { 576 &sensor_dev_attr_temp1_input.dev_attr.attr, 577 &sensor_dev_attr_name.dev_attr.attr, 578 NULL 579 }; 580 581 static const struct attribute_group hwmon_attrgroup = { 582 .attrs = hwmon_attributes, 583 }; 584 585 static int radeon_hwmon_init(struct radeon_device *rdev)
··· 67 68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 69 { 70 + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 71 + mutex_lock(&rdev->pm.mutex); 72 + if (power_supply_is_system_supplied() > 0) 73 + rdev->pm.dpm.ac_power = true; 74 + else 75 + rdev->pm.dpm.ac_power = false; 76 + if (rdev->asic->dpm.enable_bapm) 77 + radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 78 + mutex_unlock(&rdev->pm.mutex); 79 + } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 80 if (rdev->pm.profile == PM_PROFILE_AUTO) { 81 mutex_lock(&rdev->pm.mutex); 82 radeon_pm_update_profile(rdev); ··· 333 struct device_attribute *attr, 334 char *buf) 335 { 336 + struct drm_device *ddev = dev_get_drvdata(dev); 337 struct radeon_device *rdev = ddev->dev_private; 338 int cp = rdev->pm.profile; 339 ··· 349 const char *buf, 350 size_t count) 351 { 352 + struct drm_device *ddev = dev_get_drvdata(dev); 353 struct radeon_device *rdev = ddev->dev_private; 354 355 mutex_lock(&rdev->pm.mutex); ··· 383 struct device_attribute *attr, 384 char *buf) 385 { 386 + struct drm_device *ddev = dev_get_drvdata(dev); 387 struct radeon_device *rdev = ddev->dev_private; 388 int pm = rdev->pm.pm_method; 389 ··· 397 const char *buf, 398 size_t count) 399 { 400 + struct drm_device *ddev = dev_get_drvdata(dev); 401 struct radeon_device *rdev = ddev->dev_private; 402 403 /* we don't support the legacy modes with dpm */ ··· 433 struct device_attribute *attr, 434 char *buf) 435 { 436 + struct drm_device *ddev = dev_get_drvdata(dev); 437 struct radeon_device *rdev = ddev->dev_private; 438 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 439 ··· 447 const char *buf, 448 size_t count) 449 { 450 + struct drm_device *ddev = dev_get_drvdata(dev); 451 struct radeon_device *rdev = ddev->dev_private; 452 453 mutex_lock(&rdev->pm.mutex); ··· 472 struct device_attribute *attr, 473 char *buf) 474 { 475 + struct drm_device *ddev = dev_get_drvdata(dev); 476 struct radeon_device *rdev = ddev->dev_private; 477 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 478 ··· 486 const char *buf, 487 size_t count) 488 { 489 + struct drm_device *ddev = dev_get_drvdata(dev); 490 struct radeon_device *rdev = ddev->dev_private; 491 enum radeon_dpm_forced_level level; 492 int ret = 0; ··· 524 struct device_attribute *attr, 525 char *buf) 526 { 527 + struct drm_device *ddev = dev_get_drvdata(dev); 528 struct radeon_device *rdev = ddev->dev_private; 529 int temp; 530 ··· 532 temp = radeon_get_temperature(rdev); 533 else 534 temp = 0; 535 + 536 + return snprintf(buf, PAGE_SIZE, "%d\n", temp); 537 + } 538 + 539 + static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, 540 + struct device_attribute *attr, 541 + char *buf) 542 + { 543 + struct drm_device *ddev = dev_get_drvdata(dev); 544 + struct radeon_device *rdev = ddev->dev_private; 545 + int hyst = to_sensor_dev_attr(attr)->index; 546 + int temp; 547 + 548 + if (hyst) 549 + temp = rdev->pm.dpm.thermal.min_temp; 550 + else 551 + temp = rdev->pm.dpm.thermal.max_temp; 552 553 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 554 } ··· 544 } 545 546 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 547 + static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 548 + static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 549 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 550 551 static struct attribute *hwmon_attributes[] = { 552 &sensor_dev_attr_temp1_input.dev_attr.attr, 553 + &sensor_dev_attr_temp1_crit.dev_attr.attr, 554 + &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 555 &sensor_dev_attr_name.dev_attr.attr, 556 NULL 557 }; 558 559 + static umode_t hwmon_attributes_visible(struct kobject *kobj, 560 + struct attribute *attr, int index) 561 + { 562 + struct device *dev = container_of(kobj, struct device, kobj); 563 + struct drm_device *ddev = dev_get_drvdata(dev); 564 + struct radeon_device *rdev = ddev->dev_private; 565 + 566 + /* Skip limit attributes if DPM is not enabled */ 567 + if (rdev->pm.pm_method != PM_METHOD_DPM && 568 + (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 569 + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 570 + return 0; 571 + 572 + return attr->mode; 573 + } 574 + 575 static const struct attribute_group hwmon_attrgroup = { 576 .attrs = hwmon_attributes, 577 + .is_visible = hwmon_attributes_visible, 578 }; 579 580 static int radeon_hwmon_init(struct radeon_device *rdev)
+20 -7
drivers/gpu/drm/radeon/radeon_trace.h
··· 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 28 ); 29 30 DECLARE_EVENT_CLASS(radeon_fence_request, 31 32 TP_PROTO(struct drm_device *dev, u32 seqno), ··· 67 ); 68 69 DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, 70 - 71 - TP_PROTO(struct drm_device *dev, u32 seqno), 72 - 73 - TP_ARGS(dev, seqno) 74 - ); 75 - 76 - DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, 77 78 TP_PROTO(struct drm_device *dev, u32 seqno), 79
··· 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 28 ); 29 30 + TRACE_EVENT(radeon_cs, 31 + TP_PROTO(struct radeon_cs_parser *p), 32 + TP_ARGS(p), 33 + TP_STRUCT__entry( 34 + __field(u32, ring) 35 + __field(u32, dw) 36 + __field(u32, fences) 37 + ), 38 + 39 + TP_fast_assign( 40 + __entry->ring = p->ring; 41 + __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; 42 + __entry->fences = radeon_fence_count_emitted( 43 + p->rdev, p->ring); 44 + ), 45 + TP_printk("ring=%u, dw=%u, fences=%u", 46 + __entry->ring, __entry->dw, 47 + __entry->fences) 48 + ); 49 + 50 DECLARE_EVENT_CLASS(radeon_fence_request, 51 52 TP_PROTO(struct drm_device *dev, u32 seqno), ··· 47 ); 48 49 DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, 50 51 TP_PROTO(struct drm_device *dev, u32 seqno), 52
+7
drivers/gpu/drm/radeon/rs400.c
··· 274 275 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 276 { 277 uint32_t r; 278 279 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 280 r = RREG32(RS480_NB_MC_DATA); 281 WREG32(RS480_NB_MC_INDEX, 0xff); 282 return r; 283 } 284 285 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 286 { 287 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 288 WREG32(RS480_NB_MC_DATA, (v)); 289 WREG32(RS480_NB_MC_INDEX, 0xff); 290 } 291 292 #if defined(CONFIG_DEBUG_FS)
··· 274 275 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 276 { 277 + unsigned long flags; 278 uint32_t r; 279 280 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 281 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 282 r = RREG32(RS480_NB_MC_DATA); 283 WREG32(RS480_NB_MC_INDEX, 0xff); 284 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 285 return r; 286 } 287 288 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 289 { 290 + unsigned long flags; 291 + 292 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 293 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 294 WREG32(RS480_NB_MC_DATA, (v)); 295 WREG32(RS480_NB_MC_INDEX, 0xff); 296 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 297 } 298 299 #if defined(CONFIG_DEBUG_FS)
+11 -1
drivers/gpu/drm/radeon/rs600.c
··· 847 848 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 849 { 850 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 851 S_000070_MC_IND_CITF_ARB0(1)); 852 - return RREG32(R_000074_MC_IND_DATA); 853 } 854 855 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 856 { 857 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 858 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 859 WREG32(R_000074_MC_IND_DATA, v); 860 } 861 862 static void rs600_debugfs(struct radeon_device *rdev)
··· 847 848 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 849 { 850 + unsigned long flags; 851 + u32 r; 852 + 853 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 854 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 855 S_000070_MC_IND_CITF_ARB0(1)); 856 + r = RREG32(R_000074_MC_IND_DATA); 857 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 858 + return r; 859 } 860 861 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 862 { 863 + unsigned long flags; 864 + 865 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 866 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 867 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 868 WREG32(R_000074_MC_IND_DATA, v); 869 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 870 } 871 872 static void rs600_debugfs(struct radeon_device *rdev)
+7
drivers/gpu/drm/radeon/rs690.c
··· 631 632 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 633 { 634 uint32_t r; 635 636 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); 637 r = RREG32(R_00007C_MC_DATA); 638 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); 639 return r; 640 } 641 642 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 643 { 644 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | 645 S_000078_MC_IND_WR_EN(1)); 646 WREG32(R_00007C_MC_DATA, v); 647 WREG32(R_000078_MC_INDEX, 0x7F); 648 } 649 650 static void rs690_mc_program(struct radeon_device *rdev)
··· 631 632 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 633 { 634 + unsigned long flags; 635 uint32_t r; 636 637 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 638 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); 639 r = RREG32(R_00007C_MC_DATA); 640 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); 641 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 642 return r; 643 } 644 645 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 646 { 647 + unsigned long flags; 648 + 649 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 650 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | 651 S_000078_MC_IND_WR_EN(1)); 652 WREG32(R_00007C_MC_DATA, v); 653 WREG32(R_000078_MC_INDEX, 0x7F); 654 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 655 } 656 657 static void rs690_mc_program(struct radeon_device *rdev)
+81 -18
drivers/gpu/drm/radeon/rs780_dpm.c
··· 376 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); 377 } 378 379 - static void rs780_force_voltage_to_high(struct radeon_device *rdev) 380 { 381 - struct igp_power_info *pi = rs780_get_pi(rdev); 382 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 383 384 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && ··· 389 udelay(1); 390 391 WREG32_P(FVTHROT_PWM_CTRL_REG0, 392 - STARTING_PWM_HIGHTIME(pi->max_voltage), 393 ~STARTING_PWM_HIGHTIME_MASK); 394 395 WREG32_P(FVTHROT_PWM_CTRL_REG0, ··· 399 ~RANGE_PWM_FEEDBACK_DIV_EN); 400 401 udelay(1); 402 403 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 404 } ··· 451 if (ret) 452 return ret; 453 454 - WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); 455 - 456 - WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), 457 - ~FORCED_FEEDBACK_DIV_MASK); 458 - WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div), 459 - ~STARTING_FEEDBACK_DIV_MASK); 460 - WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); 461 - 462 - udelay(100); 463 - 464 - WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 465 466 if (max_dividers.fb_div > min_dividers.fb_div) { 467 WREG32_P(FVTHROT_FBDIV_REG0, ··· 658 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 659 660 if (pi->voltage_control) { 661 - rs780_force_voltage_to_high(rdev); 662 mdelay(5); 663 } 664 ··· 726 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 727 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 728 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 729 - } else if (r600_is_uvd_state(rps->class, rps->class2)) { 730 - rps->vclk = RS780_DEFAULT_VCLK_FREQ; 731 - rps->dclk = RS780_DEFAULT_DCLK_FREQ; 732 } else { 733 rps->vclk = 0; 734 rps->dclk = 0; 735 } 736 737 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) ··· 998 else 999 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", 1000 ps->sclk_high, ps->max_voltage); 1001 }
··· 376 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); 377 } 378 379 + static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage) 380 { 381 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 382 383 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && ··· 390 udelay(1); 391 392 WREG32_P(FVTHROT_PWM_CTRL_REG0, 393 + STARTING_PWM_HIGHTIME(voltage), 394 ~STARTING_PWM_HIGHTIME_MASK); 395 396 WREG32_P(FVTHROT_PWM_CTRL_REG0, ··· 400 ~RANGE_PWM_FEEDBACK_DIV_EN); 401 402 udelay(1); 403 + 404 + WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 405 + } 406 + 407 + static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div) 408 + { 409 + struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 410 + 411 + if (current_state->sclk_low == current_state->sclk_high) 412 + return; 413 + 414 + WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); 415 + 416 + WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div), 417 + ~FORCED_FEEDBACK_DIV_MASK); 418 + WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div), 419 + ~STARTING_FEEDBACK_DIV_MASK); 420 + WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); 421 + 422 + udelay(100); 423 424 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 425 } ··· 432 if (ret) 433 return ret; 434 435 + rs780_force_fbdiv(rdev, max_dividers.fb_div); 436 437 if (max_dividers.fb_div > min_dividers.fb_div) { 438 WREG32_P(FVTHROT_FBDIV_REG0, ··· 649 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 650 651 if (pi->voltage_control) { 652 + rs780_force_voltage(rdev, pi->max_voltage); 653 mdelay(5); 654 } 655 ··· 717 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 718 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 719 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 720 } else { 721 rps->vclk = 0; 722 rps->dclk = 0; 723 + } 724 + 725 + if (r600_is_uvd_state(rps->class, rps->class2)) { 726 + if ((rps->vclk == 0) || (rps->dclk == 0)) { 727 + rps->vclk = RS780_DEFAULT_VCLK_FREQ; 728 + rps->dclk = RS780_DEFAULT_DCLK_FREQ; 729 + } 730 } 731 732 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) ··· 985 else 986 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", 987 ps->sclk_high, ps->max_voltage); 988 + } 989 + 990 + int rs780_dpm_force_performance_level(struct radeon_device *rdev, 991 + enum radeon_dpm_forced_level level) 992 + { 993 + struct igp_power_info *pi = rs780_get_pi(rdev); 994 + struct radeon_ps *rps = rdev->pm.dpm.current_ps; 995 + struct igp_ps *ps = rs780_get_ps(rps); 996 + struct atom_clock_dividers dividers; 997 + int ret; 998 + 999 + rs780_clk_scaling_enable(rdev, false); 1000 + rs780_voltage_scaling_enable(rdev, false); 1001 + 1002 + if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1003 + if (pi->voltage_control) 1004 + rs780_force_voltage(rdev, pi->max_voltage); 1005 + 1006 + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1007 + ps->sclk_high, false, &dividers); 1008 + if (ret) 1009 + return ret; 1010 + 1011 + rs780_force_fbdiv(rdev, dividers.fb_div); 1012 + } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1013 + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1014 + ps->sclk_low, false, &dividers); 1015 + if (ret) 1016 + return ret; 1017 + 1018 + rs780_force_fbdiv(rdev, dividers.fb_div); 1019 + 1020 + if (pi->voltage_control) 1021 + rs780_force_voltage(rdev, pi->min_voltage); 1022 + } else { 1023 + if (pi->voltage_control) 1024 + rs780_force_voltage(rdev, pi->max_voltage); 1025 + 1026 + WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV); 1027 + rs780_clk_scaling_enable(rdev, true); 1028 + 1029 + if (pi->voltage_control) { 1030 + rs780_voltage_scaling_enable(rdev, true); 1031 + rs780_enable_voltage_scaling(rdev, rps); 1032 + } 1033 + } 1034 + 1035 + rdev->pm.dpm.forced_level = level; 1036 + 1037 + return 0; 1038 }
+8
drivers/gpu/drm/radeon/rv515.c
··· 209 210 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 211 { 212 uint32_t r; 213 214 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 215 r = RREG32(MC_IND_DATA); 216 WREG32(MC_IND_INDEX, 0); 217 return r; 218 } 219 220 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 221 { 222 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 223 WREG32(MC_IND_DATA, (v)); 224 WREG32(MC_IND_INDEX, 0); 225 } 226 227 #if defined(CONFIG_DEBUG_FS)
··· 209 210 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 211 { 212 + unsigned long flags; 213 uint32_t r; 214 215 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 216 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 217 r = RREG32(MC_IND_DATA); 218 WREG32(MC_IND_INDEX, 0); 219 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 220 + 221 return r; 222 } 223 224 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 225 { 226 + unsigned long flags; 227 + 228 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 229 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 230 WREG32(MC_IND_DATA, (v)); 231 WREG32(MC_IND_INDEX, 0); 232 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 233 } 234 235 #if defined(CONFIG_DEBUG_FS)
+7 -3
drivers/gpu/drm/radeon/rv770_dpm.c
··· 2147 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2148 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2149 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2150 - } else if (r600_is_uvd_state(rps->class, rps->class2)) { 2151 - rps->vclk = RV770_DEFAULT_VCLK_FREQ; 2152 - rps->dclk = RV770_DEFAULT_DCLK_FREQ; 2153 } else { 2154 rps->vclk = 0; 2155 rps->dclk = 0; 2156 } 2157 2158 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
··· 2147 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2148 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2149 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2150 } else { 2151 rps->vclk = 0; 2152 rps->dclk = 0; 2153 + } 2154 + 2155 + if (r600_is_uvd_state(rps->class, rps->class2)) { 2156 + if ((rps->vclk == 0) || (rps->dclk == 0)) { 2157 + rps->vclk = RV770_DEFAULT_VCLK_FREQ; 2158 + rps->dclk = RV770_DEFAULT_DCLK_FREQ; 2159 + } 2160 } 2161 2162 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+27 -17
drivers/gpu/drm/radeon/rv770_smc.c
··· 274 0x08, 0x72, 0x08, 0x72 275 }; 276 277 - int rv770_set_smc_sram_address(struct radeon_device *rdev, 278 - u16 smc_address, u16 limit) 279 { 280 u32 addr; 281 ··· 296 u16 smc_start_address, const u8 *src, 297 u16 byte_count, u16 limit) 298 { 299 u32 data, original_data, extra_shift; 300 u16 addr; 301 - int ret; 302 303 if (smc_start_address & 3) 304 return -EINVAL; ··· 308 309 addr = smc_start_address; 310 311 while (byte_count >= 4) { 312 /* SMC address space is BE */ 313 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 314 315 ret = rv770_set_smc_sram_address(rdev, addr, limit); 316 if (ret) 317 - return ret; 318 319 WREG32(SMC_SRAM_DATA, data); 320 ··· 330 331 ret = rv770_set_smc_sram_address(rdev, addr, limit); 332 if (ret) 333 - return ret; 334 335 original_data = RREG32(SMC_SRAM_DATA); 336 ··· 348 349 ret = rv770_set_smc_sram_address(rdev, addr, limit); 350 if (ret) 351 - return ret; 352 353 WREG32(SMC_SRAM_DATA, data); 354 } 355 356 - return 0; 357 } 358 359 static int rv770_program_interrupt_vectors(struct radeon_device *rdev, ··· 466 467 static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) 468 { 469 u16 i; 470 471 for (i = 0; i < limit; i += 4) { 472 rv770_set_smc_sram_address(rdev, i, limit); 473 WREG32(SMC_SRAM_DATA, 0); 474 } 475 } 476 477 int rv770_load_smc_ucode(struct radeon_device *rdev, ··· 603 int rv770_read_smc_sram_dword(struct radeon_device *rdev, 604 u16 smc_address, u32 *value, u16 limit) 605 { 606 int ret; 607 608 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 609 - if (ret) 610 - return ret; 611 612 - *value = RREG32(SMC_SRAM_DATA); 613 - 614 - return 0; 615 } 616 617 int rv770_write_smc_sram_dword(struct radeon_device *rdev, 618 u16 smc_address, u32 value, u16 limit) 619 { 620 int ret; 621 622 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 623 - if (ret) 624 - return ret; 625 626 - WREG32(SMC_SRAM_DATA, value); 627 - 628 - return 0; 629 }
··· 274 0x08, 0x72, 0x08, 0x72 275 }; 276 277 + static int rv770_set_smc_sram_address(struct radeon_device *rdev, 278 + u16 smc_address, u16 limit) 279 { 280 u32 addr; 281 ··· 296 u16 smc_start_address, const u8 *src, 297 u16 byte_count, u16 limit) 298 { 299 + unsigned long flags; 300 u32 data, original_data, extra_shift; 301 u16 addr; 302 + int ret = 0; 303 304 if (smc_start_address & 3) 305 return -EINVAL; ··· 307 308 addr = smc_start_address; 309 310 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 311 while (byte_count >= 4) { 312 /* SMC address space is BE */ 313 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 314 315 ret = rv770_set_smc_sram_address(rdev, addr, limit); 316 if (ret) 317 + goto done; 318 319 WREG32(SMC_SRAM_DATA, data); 320 ··· 328 329 ret = rv770_set_smc_sram_address(rdev, addr, limit); 330 if (ret) 331 + goto done; 332 333 original_data = RREG32(SMC_SRAM_DATA); 334 ··· 346 347 ret = rv770_set_smc_sram_address(rdev, addr, limit); 348 if (ret) 349 + goto done; 350 351 WREG32(SMC_SRAM_DATA, data); 352 } 353 354 + done: 355 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 356 + 357 + return ret; 358 } 359 360 static int rv770_program_interrupt_vectors(struct radeon_device *rdev, ··· 461 462 static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) 463 { 464 + unsigned long flags; 465 u16 i; 466 467 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 468 for (i = 0; i < limit; i += 4) { 469 rv770_set_smc_sram_address(rdev, i, limit); 470 WREG32(SMC_SRAM_DATA, 0); 471 } 472 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 473 } 474 475 int rv770_load_smc_ucode(struct radeon_device *rdev, ··· 595 int rv770_read_smc_sram_dword(struct radeon_device *rdev, 596 u16 smc_address, u32 *value, u16 limit) 597 { 598 + unsigned long flags; 599 int ret; 600 601 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 602 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 603 + if (ret == 0) 604 + *value = RREG32(SMC_SRAM_DATA); 605 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 606 607 + return ret; 608 } 609 610 int rv770_write_smc_sram_dword(struct radeon_device *rdev, 611 u16 smc_address, u32 value, u16 limit) 612 { 613 + unsigned long flags; 614 int ret; 615 616 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 617 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 618 + if (ret == 0) 619 + WREG32(SMC_SRAM_DATA, value); 620 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 621 622 + return ret; 623 }
-2
drivers/gpu/drm/radeon/rv770_smc.h
··· 187 #define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C 188 #define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 189 190 - int rv770_set_smc_sram_address(struct radeon_device *rdev, 191 - u16 smc_address, u16 limit); 192 int rv770_copy_bytes_to_smc(struct radeon_device *rdev, 193 u16 smc_start_address, const u8 *src, 194 u16 byte_count, u16 limit);
··· 187 #define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C 188 #define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 189 190 int rv770_copy_bytes_to_smc(struct radeon_device *rdev, 191 u16 smc_start_address, const u8 *src, 192 u16 byte_count, u16 limit);
+17 -4
drivers/gpu/drm/radeon/si.c
··· 83 uint64_t pe, 84 uint64_t addr, unsigned count, 85 uint32_t incr, uint32_t flags); 86 87 static const u32 verde_rlc_save_restore_register_list[] = 88 { ··· 3388 u32 rb_bufsz; 3389 int r; 3390 3391 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3392 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3393 ··· 3504 if (r) { 3505 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 3506 } 3507 3508 return 0; 3509 } ··· 4894 { 4895 u32 tmp; 4896 4897 - if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 4898 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4899 WREG32(RLC_TTOP_D, tmp); 4900 ··· 5256 u32 block, bool enable) 5257 { 5258 if (block & RADEON_CG_BLOCK_GFX) { 5259 /* order matters! */ 5260 if (enable) { 5261 si_enable_mgcg(rdev, true); ··· 5265 si_enable_cgcg(rdev, false); 5266 si_enable_mgcg(rdev, false); 5267 } 5268 } 5269 5270 if (block & RADEON_CG_BLOCK_MC) { ··· 5416 si_init_dma_pg(rdev); 5417 } 5418 si_init_ao_cu_mask(rdev); 5419 - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5420 si_init_gfx_cgpg(rdev); 5421 } 5422 si_enable_dma_pg(rdev, true); ··· 5568 { 5569 u32 tmp; 5570 5571 - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5572 WREG32(CP_INT_CNTL_RING1, 0); 5573 WREG32(CP_INT_CNTL_RING2, 0); 5574 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; ··· 5695 5696 int si_irq_set(struct radeon_device *rdev) 5697 { 5698 - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 5699 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 5700 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5701 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; ··· 5715 si_disable_interrupt_state(rdev); 5716 return 0; 5717 } 5718 5719 if (!ASIC_IS_NODCE(rdev)) { 5720 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
··· 83 uint64_t pe, 84 uint64_t addr, unsigned count, 85 uint32_t incr, uint32_t flags); 86 + static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, 87 + bool enable); 88 89 static const u32 verde_rlc_save_restore_register_list[] = 90 { ··· 3386 u32 rb_bufsz; 3387 int r; 3388 3389 + si_enable_gui_idle_interrupt(rdev, false); 3390 + 3391 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3392 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3393 ··· 3500 if (r) { 3501 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 3502 } 3503 + 3504 + si_enable_gui_idle_interrupt(rdev, true); 3505 3506 return 0; 3507 } ··· 4888 { 4889 u32 tmp; 4890 4891 + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { 4892 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4893 WREG32(RLC_TTOP_D, tmp); 4894 ··· 5250 u32 block, bool enable) 5251 { 5252 if (block & RADEON_CG_BLOCK_GFX) { 5253 + si_enable_gui_idle_interrupt(rdev, false); 5254 /* order matters! */ 5255 if (enable) { 5256 si_enable_mgcg(rdev, true); ··· 5258 si_enable_cgcg(rdev, false); 5259 si_enable_mgcg(rdev, false); 5260 } 5261 + si_enable_gui_idle_interrupt(rdev, true); 5262 } 5263 5264 if (block & RADEON_CG_BLOCK_MC) { ··· 5408 si_init_dma_pg(rdev); 5409 } 5410 si_init_ao_cu_mask(rdev); 5411 + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { 5412 si_init_gfx_cgpg(rdev); 5413 } 5414 si_enable_dma_pg(rdev, true); ··· 5560 { 5561 u32 tmp; 5562 5563 + tmp = RREG32(CP_INT_CNTL_RING0) & 5564 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5565 + WREG32(CP_INT_CNTL_RING0, tmp); 5566 WREG32(CP_INT_CNTL_RING1, 0); 5567 WREG32(CP_INT_CNTL_RING2, 0); 5568 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; ··· 5685 5686 int si_irq_set(struct radeon_device *rdev) 5687 { 5688 + u32 cp_int_cntl; 5689 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 5690 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5691 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; ··· 5705 si_disable_interrupt_state(rdev); 5706 return 0; 5707 } 5708 + 5709 + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & 5710 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5711 5712 if (!ASIC_IS_NODCE(rdev)) { 5713 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+28 -15
drivers/gpu/drm/radeon/si_smc.c
··· 29 #include "ppsmc.h" 30 #include "radeon_ucode.h" 31 32 - int si_set_smc_sram_address(struct radeon_device *rdev, 33 - u32 smc_address, u32 limit) 34 { 35 if (smc_address & 3) 36 return -EINVAL; ··· 47 u32 smc_start_address, 48 const u8 *src, u32 byte_count, u32 limit) 49 { 50 - int ret; 51 u32 data, original_data, addr, extra_shift; 52 53 if (smc_start_address & 3) ··· 58 59 addr = smc_start_address; 60 61 while (byte_count >= 4) { 62 /* SMC address space is BE */ 63 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 64 65 ret = si_set_smc_sram_address(rdev, addr, limit); 66 if (ret) 67 - return ret; 68 69 WREG32(SMC_IND_DATA_0, data); 70 ··· 80 81 ret = si_set_smc_sram_address(rdev, addr, limit); 82 if (ret) 83 - return ret; 84 85 original_data = RREG32(SMC_IND_DATA_0); 86 ··· 98 99 ret = si_set_smc_sram_address(rdev, addr, limit); 100 if (ret) 101 - return ret; 102 103 WREG32(SMC_IND_DATA_0, data); 104 } 105 - return 0; 106 } 107 108 void si_start_smc(struct radeon_device *rdev) ··· 209 210 int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) 211 { 212 u32 ucode_start_address; 213 u32 ucode_size; 214 const u8 *src; ··· 248 return -EINVAL; 249 250 src = (const u8 *)rdev->smc_fw->data; 251 WREG32(SMC_IND_INDEX_0, ucode_start_address); 252 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 253 while (ucode_size >= 4) { ··· 261 ucode_size -= 4; 262 } 263 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 264 265 return 0; 266 } ··· 269 int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 270 u32 *value, u32 limit) 271 { 272 int ret; 273 274 ret = si_set_smc_sram_address(rdev, smc_address, limit); 275 - if (ret) 276 - return ret; 277 278 - *value = RREG32(SMC_IND_DATA_0); 279 - return 0; 280 } 281 282 int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 283 u32 value, u32 limit) 284 { 285 int ret; 286 287 ret = si_set_smc_sram_address(rdev, smc_address, limit); 288 - if (ret) 289 - return ret; 290 291 - WREG32(SMC_IND_DATA_0, value); 292 - return 0; 293 }
··· 29 #include "ppsmc.h" 30 #include "radeon_ucode.h" 31 32 + static int si_set_smc_sram_address(struct radeon_device *rdev, 33 + u32 smc_address, u32 limit) 34 { 35 if (smc_address & 3) 36 return -EINVAL; ··· 47 u32 smc_start_address, 48 const u8 *src, u32 byte_count, u32 limit) 49 { 50 + unsigned long flags; 51 + int ret = 0; 52 u32 data, original_data, addr, extra_shift; 53 54 if (smc_start_address & 3) ··· 57 58 addr = smc_start_address; 59 60 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 61 while (byte_count >= 4) { 62 /* SMC address space is BE */ 63 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 64 65 ret = si_set_smc_sram_address(rdev, addr, limit); 66 if (ret) 67 + goto done; 68 69 WREG32(SMC_IND_DATA_0, data); 70 ··· 78 79 ret = si_set_smc_sram_address(rdev, addr, limit); 80 if (ret) 81 + goto done; 82 83 original_data = RREG32(SMC_IND_DATA_0); 84 ··· 96 97 ret = si_set_smc_sram_address(rdev, addr, limit); 98 if (ret) 99 + goto done; 100 101 WREG32(SMC_IND_DATA_0, data); 102 } 103 + 104 + done: 105 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 106 + 107 + return ret; 108 } 109 110 void si_start_smc(struct radeon_device *rdev) ··· 203 204 int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) 205 { 206 + unsigned long flags; 207 u32 ucode_start_address; 208 u32 ucode_size; 209 const u8 *src; ··· 241 return -EINVAL; 242 243 src = (const u8 *)rdev->smc_fw->data; 244 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 245 WREG32(SMC_IND_INDEX_0, ucode_start_address); 246 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 247 while (ucode_size >= 4) { ··· 253 ucode_size -= 4; 254 } 255 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 256 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 257 258 return 0; 259 } ··· 260 int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 261 u32 *value, u32 limit) 262 { 263 + unsigned long flags; 264 int ret; 265 266 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 267 ret = si_set_smc_sram_address(rdev, smc_address, limit); 268 + if (ret == 0) 269 + *value = RREG32(SMC_IND_DATA_0); 270 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 271 272 + return ret; 273 } 274 275 int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 276 u32 value, u32 limit) 277 { 278 + unsigned long flags; 279 int ret; 280 281 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 282 ret = si_set_smc_sram_address(rdev, smc_address, limit); 283 + if (ret == 0) 284 + WREG32(SMC_IND_DATA_0, value); 285 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 286 287 + return ret; 288 }
+16
drivers/gpu/drm/radeon/trinity_dpm.c
··· 1068 pi->requested_rps.ps_priv = &pi->requested_ps; 1069 } 1070 1071 int trinity_dpm_enable(struct radeon_device *rdev) 1072 { 1073 struct trinity_power_info *pi = trinity_get_pi(rdev); ··· 1102 trinity_program_sclk_dpm(rdev); 1103 trinity_start_dpm(rdev); 1104 trinity_wait_for_dpm_enabled(rdev); 1105 trinity_release_mutex(rdev); 1106 1107 if (rdev->irq.installed && ··· 1128 trinity_release_mutex(rdev); 1129 return; 1130 } 1131 trinity_disable_clock_power_gating(rdev); 1132 sumo_clear_vc(rdev); 1133 trinity_wait_for_level_0(rdev); ··· 1225 1226 trinity_acquire_mutex(rdev); 1227 if (pi->enable_dpm) { 1228 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1229 trinity_enable_power_level_0(rdev); 1230 trinity_force_level_0(rdev); ··· 1869 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1870 pi->at[i] = TRINITY_AT_DFLT; 1871 1872 pi->enable_nbps_policy = true; 1873 pi->enable_sclk_ds = true; 1874 pi->enable_gfx_power_gating = true;
··· 1068 pi->requested_rps.ps_priv = &pi->requested_ps; 1069 } 1070 1071 + void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1072 + { 1073 + struct trinity_power_info *pi = trinity_get_pi(rdev); 1074 + 1075 + if (pi->enable_bapm) { 1076 + trinity_acquire_mutex(rdev); 1077 + trinity_dpm_bapm_enable(rdev, enable); 1078 + trinity_release_mutex(rdev); 1079 + } 1080 + } 1081 + 1082 int trinity_dpm_enable(struct radeon_device *rdev) 1083 { 1084 struct trinity_power_info *pi = trinity_get_pi(rdev); ··· 1091 trinity_program_sclk_dpm(rdev); 1092 trinity_start_dpm(rdev); 1093 trinity_wait_for_dpm_enabled(rdev); 1094 + trinity_dpm_bapm_enable(rdev, false); 1095 trinity_release_mutex(rdev); 1096 1097 if (rdev->irq.installed && ··· 1116 trinity_release_mutex(rdev); 1117 return; 1118 } 1119 + trinity_dpm_bapm_enable(rdev, false); 1120 trinity_disable_clock_power_gating(rdev); 1121 sumo_clear_vc(rdev); 1122 trinity_wait_for_level_0(rdev); ··· 1212 1213 trinity_acquire_mutex(rdev); 1214 if (pi->enable_dpm) { 1215 + if (pi->enable_bapm) 1216 + trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1217 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1218 trinity_enable_power_level_0(rdev); 1219 trinity_force_level_0(rdev); ··· 1854 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1855 pi->at[i] = TRINITY_AT_DFLT; 1856 1857 + pi->enable_bapm = true; 1858 pi->enable_nbps_policy = true; 1859 pi->enable_sclk_ds = true; 1860 pi->enable_gfx_power_gating = true;
+2
drivers/gpu/drm/radeon/trinity_dpm.h
··· 108 bool enable_auto_thermal_throttling; 109 bool enable_dpm; 110 bool enable_sclk_ds; 111 bool uvd_dpm; 112 struct radeon_ps current_rps; 113 struct trinity_ps current_ps; ··· 119 #define TRINITY_AT_DFLT 30 120 121 /* trinity_smc.c */ 122 int trinity_dpm_config(struct radeon_device *rdev, bool enable); 123 int trinity_uvd_dpm_config(struct radeon_device *rdev); 124 int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
··· 108 bool enable_auto_thermal_throttling; 109 bool enable_dpm; 110 bool enable_sclk_ds; 111 + bool enable_bapm; 112 bool uvd_dpm; 113 struct radeon_ps current_rps; 114 struct trinity_ps current_ps; ··· 118 #define TRINITY_AT_DFLT 30 119 120 /* trinity_smc.c */ 121 + int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable); 122 int trinity_dpm_config(struct radeon_device *rdev, bool enable); 123 int trinity_uvd_dpm_config(struct radeon_device *rdev); 124 int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
+8
drivers/gpu/drm/radeon/trinity_smc.c
··· 56 return 0; 57 } 58 59 int trinity_dpm_config(struct radeon_device *rdev, bool enable) 60 { 61 if (enable)
··· 56 return 0; 57 } 58 59 + int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable) 60 + { 61 + if (enable) 62 + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); 63 + else 64 + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); 65 + } 66 + 67 int trinity_dpm_config(struct radeon_device *rdev, bool enable) 68 { 69 if (enable)
+3
include/drm/drm_pciids.h
··· 12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 15 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 16 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 17 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 18 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 19 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 20 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 21 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 22 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
··· 12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 15 + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 16 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 17 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 18 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 19 + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 20 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 21 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 22 + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 23 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 24 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 25 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \