Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-fixes-4.7' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

radeon and amdgpu fixes for 4.7. Highlights:
- fixes for GPU VM passthrough
- fixes for powerplay on Polaris GPUs
- pll fixes for rs780/880

* 'drm-fixes-4.7' of git://people.freedesktop.org/~agd5f/linux:
drm/amd/powerplay: select samu dpm 0 as boot level on polaris.
drm/amd/powerplay: update powerplay table parsing
Revert "drm/amdgpu: add pipeline sync while vmid switch in same ctx"
drm/amdgpu/gfx7: fix broken condition check
drm/radeon: fix asic initialization for virtualized environments
amdgpu: fix asic initialization for virtualized environments (v2)
drm/radeon: don't use fractional dividers on RS[78]80 if SS is enabled
drm/radeon: do not hard reset GPU while freezing on r600/r700 family

+173 -54
+8 -3
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 799 799 unsigned cond_exe_offs; 800 800 u64 cond_exe_gpu_addr; 801 801 volatile u32 *cond_exe_cpu_addr; 802 - int vmid; 803 802 }; 804 803 805 804 /* ··· 936 937 unsigned vm_id, uint64_t pd_addr, 937 938 uint32_t gds_base, uint32_t gds_size, 938 939 uint32_t gws_base, uint32_t gws_size, 939 - uint32_t oa_base, uint32_t oa_size, 940 - bool vmid_switch); 940 + uint32_t oa_base, uint32_t oa_size); 941 941 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 942 942 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 943 943 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ··· 1820 1822 /* MM block clocks */ 1821 1823 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1822 1824 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1825 + /* query virtual capabilities */ 1826 + u32 (*get_virtual_caps)(struct amdgpu_device *adev); 1823 1827 }; 1824 1828 1825 1829 /* ··· 1916 1916 1917 1917 1918 1918 /* GPU virtualization */ 1919 + #define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) 1920 + #define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) 1919 1921 struct amdgpu_virtualization { 1920 1922 bool supports_sr_iov; 1923 + bool is_virtual; 1924 + u32 caps; 1921 1925 }; 1922 1926 1923 1927 /* ··· 2210 2206 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2211 2207 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2212 2208 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2209 + #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) 2213 2210 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2214 2211 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2215 2212 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+16 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1385 1385 return 0; 1386 1386 } 1387 1387 1388 + static bool amdgpu_device_is_virtual(void) 1389 + { 1390 + #ifdef CONFIG_X86 1391 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); 1392 + #else 1393 + return false; 1394 + #endif 1395 + } 1396 + 1388 1397 /** 1389 1398 * amdgpu_device_init - initialize the driver 1390 1399 * ··· 1528 1519 adev->virtualization.supports_sr_iov = 1529 1520 amdgpu_atombios_has_gpu_virtualization_table(adev); 1530 1521 1522 + /* Check if we are executing in a virtualized environment */ 1523 + adev->virtualization.is_virtual = amdgpu_device_is_virtual(); 1524 + adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); 1525 + 1531 1526 /* Post card if necessary */ 1532 - if (!amdgpu_card_posted(adev)) { 1527 + if (!amdgpu_card_posted(adev) || 1528 + (adev->virtualization.is_virtual && 1529 + !adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN)) { 1533 1530 if (!adev->bios) { 1534 1531 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1535 1532 return -EINVAL;
+2 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 122 122 bool skip_preamble, need_ctx_switch; 123 123 unsigned patch_offset = ~0; 124 124 struct amdgpu_vm *vm; 125 - int vmid = 0, old_vmid = ring->vmid; 126 125 struct fence *hwf; 127 126 uint64_t ctx; 128 127 ··· 135 136 if (job) { 136 137 vm = job->vm; 137 138 ctx = job->ctx; 138 - vmid = job->vm_id; 139 139 } else { 140 140 vm = NULL; 141 141 ctx = 0; 142 - vmid = 0; 143 142 } 144 143 145 144 if (!ring->ready) { ··· 163 166 r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, 164 167 job->gds_base, job->gds_size, 165 168 job->gws_base, job->gws_size, 166 - job->oa_base, job->oa_size, 167 - (ring->current_ctx == ctx) && (old_vmid != vmid)); 169 + job->oa_base, job->oa_size); 168 170 if (r) { 169 171 amdgpu_ring_undo(ring); 170 172 return r; ··· 180 184 need_ctx_switch = ring->current_ctx != ctx; 181 185 for (i = 0; i < num_ibs; ++i) { 182 186 ib = &ibs[i]; 187 + 183 188 /* drop preamble IBs if we don't have a context switch */ 184 189 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) 185 190 continue; ··· 188 191 amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, 189 192 need_ctx_switch); 190 193 need_ctx_switch = false; 191 - ring->vmid = vmid; 192 194 } 193 195 194 196 if (ring->funcs->emit_hdp_invalidate) ··· 198 202 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 199 203 if (job && job->vm_id) 200 204 amdgpu_vm_reset_id(adev, job->vm_id); 201 - ring->vmid = old_vmid; 202 205 amdgpu_ring_undo(ring); 203 206 return r; 204 207 }
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 298 298 unsigned vm_id, uint64_t pd_addr, 299 299 uint32_t gds_base, uint32_t gds_size, 300 300 uint32_t gws_base, uint32_t gws_size, 301 - uint32_t oa_base, uint32_t oa_size, 302 - bool vmid_switch) 301 + uint32_t oa_base, uint32_t oa_size) 303 302 { 304 303 struct amdgpu_device *adev = ring->adev; 305 304 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; ··· 312 313 int r; 313 314 314 315 if (ring->funcs->emit_pipeline_sync && ( 315 - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch)) 316 + pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || 317 + ring->type == AMDGPU_RING_TYPE_COMPUTE)) 316 318 amdgpu_ring_emit_pipeline_sync(ring); 317 319 318 320 if (ring->funcs->emit_vm_flush &&
+7
drivers/gpu/drm/amd/amdgpu/cik.c
··· 962 962 return true; 963 963 } 964 964 965 + static u32 cik_get_virtual_caps(struct amdgpu_device *adev) 966 + { 967 + /* CIK does not support SR-IOV */ 968 + return 0; 969 + } 970 + 965 971 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { 966 972 {mmGRBM_STATUS, false}, 967 973 {mmGB_ADDR_CONFIG, false}, ··· 2013 2007 .get_xclk = &cik_get_xclk, 2014 2008 .set_uvd_clocks = &cik_set_uvd_clocks, 2015 2009 .set_vce_clocks = &cik_set_vce_clocks, 2010 + .get_virtual_caps = &cik_get_virtual_caps, 2016 2011 /* these should be moved to their own ip modules */ 2017 2012 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 2018 2013 .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 4833 4833 case 2: 4834 4834 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4835 4835 ring = &adev->gfx.compute_ring[i]; 4836 - if ((ring->me == me_id) & (ring->pipe == pipe_id)) 4836 + if ((ring->me == me_id) && (ring->pipe == pipe_id)) 4837 4837 amdgpu_fence_process(ring); 4838 4838 } 4839 4839 break;
+15
drivers/gpu/drm/amd/amdgpu/vi.c
··· 421 421 return true; 422 422 } 423 423 424 + static u32 vi_get_virtual_caps(struct amdgpu_device *adev) 425 + { 426 + u32 caps = 0; 427 + u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 428 + 429 + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 430 + caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; 431 + 432 + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 433 + caps |= AMDGPU_VIRT_CAPS_IS_VF; 434 + 435 + return caps; 436 + } 437 + 424 438 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 425 439 {mmGB_MACROTILE_MODE7, true}, 426 440 }; ··· 1132 1118 .get_xclk = &vi_get_xclk, 1133 1119 .set_uvd_clocks = &vi_set_uvd_clocks, 1134 1120 .set_vce_clocks = &vi_set_vce_clocks, 1121 + .get_virtual_caps = &vi_get_virtual_caps, 1135 1122 /* these should be moved to their own ip modules */ 1136 1123 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, 1137 1124 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
+1
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
··· 39 39 uint8_t phases; 40 40 uint8_t cks_enable; 41 41 uint8_t cks_voffset; 42 + uint32_t sclk_offset; 42 43 }; 43 44 44 45 typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
+17 -11
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
··· 999 999 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), 1000 1000 (dep_table->entries[i].vddc - 1001 1001 (uint16_t)data->vddc_vddci_delta)); 1002 - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; 1002 + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; 1003 1003 } 1004 1004 1005 1005 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ··· 3520 3520 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; 3521 3521 ATOM_Tonga_POWERPLAYTABLE *powerplay_table = 3522 3522 (ATOM_Tonga_POWERPLAYTABLE *)pp_table; 3523 - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 3524 - (ATOM_Tonga_SCLK_Dependency_Table *) 3523 + PPTable_Generic_SubTable_Header *sclk_dep_table = 3524 + (PPTable_Generic_SubTable_Header *) 3525 3525 (((unsigned long)powerplay_table) + 3526 3526 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 3527 + 3527 3528 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 3528 3529 (ATOM_Tonga_MCLK_Dependency_Table *) 3529 3530 (((unsigned long)powerplay_table) + ··· 3576 3575 /* Performance levels are arranged from low to high. */ 3577 3576 performance_level->memory_clock = mclk_dep_table->entries 3578 3577 [state_entry->ucMemoryClockIndexLow].ulMclk; 3579 - performance_level->engine_clock = sclk_dep_table->entries 3578 + if (sclk_dep_table->ucRevId == 0) 3579 + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3580 + [state_entry->ucEngineClockIndexLow].ulSclk; 3581 + else if (sclk_dep_table->ucRevId == 1) 3582 + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3580 3583 [state_entry->ucEngineClockIndexLow].ulSclk; 3581 3584 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3582 3585 state_entry->ucPCIEGenLow); ··· 3591 3586 [polaris10_power_state->performance_level_count++]); 3592 3587 performance_level->memory_clock = mclk_dep_table->entries 3593 3588 [state_entry->ucMemoryClockIndexHigh].ulMclk; 3594 - performance_level->engine_clock = sclk_dep_table->entries 3589 + 3590 + if (sclk_dep_table->ucRevId == 0) 3591 + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3595 3592 [state_entry->ucEngineClockIndexHigh].ulSclk; 3593 + else if (sclk_dep_table->ucRevId == 1) 3594 + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3595 + [state_entry->ucEngineClockIndexHigh].ulSclk; 3596 + 3596 3597 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3597 3598 state_entry->ucPCIEGenHigh); 3598 3599 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, ··· 3656 3645 switch (state->classification.ui_label) { 3657 3646 case PP_StateUILabel_Performance: 3658 3647 data->use_pcie_performance_levels = true; 3659 - 3660 3648 for (i = 0; i < ps->performance_level_count; i++) { 3661 3649 if (data->pcie_gen_performance.max < 3662 3650 ps->performance_levels[i].pcie_gen) ··· 3671 3661 ps->performance_levels[i].pcie_lane) 3672 3662 data->pcie_lane_performance.max = 3673 3663 ps->performance_levels[i].pcie_lane; 3674 - 3675 3664 if (data->pcie_lane_performance.min > 3676 3665 ps->performance_levels[i].pcie_lane) 3677 3666 data->pcie_lane_performance.min = ··· 4196 4187 { 4197 4188 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 4198 4189 uint32_t mm_boot_level_offset, mm_boot_level_value; 4199 - struct phm_ppt_v1_information *table_info = 4200 - (struct phm_ppt_v1_information *)(hwmgr->pptable); 4201 4190 4202 4191 if (!bgate) { 4203 - data->smc_state_table.SamuBootLevel = 4204 - (uint8_t) (table_info->mm_dep_table->count - 1); 4192 + data->smc_state_table.SamuBootLevel = 0; 4205 4193 mm_boot_level_offset = data->dpm_table_start + 4206 4194 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); 4207 4195 mm_boot_level_offset /= 4;
+16
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
··· 197 197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 198 198 } ATOM_Tonga_SCLK_Dependency_Table; 199 199 200 + typedef struct _ATOM_Polaris_SCLK_Dependency_Record { 201 + UCHAR ucVddInd; /* Base voltage */ 202 + USHORT usVddcOffset; /* Offset relative to base voltage */ 203 + ULONG ulSclk; 204 + USHORT usEdcCurrent; 205 + UCHAR ucReliabilityTemperature; 206 + UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */ 207 + ULONG ulSclkOffset; 208 + } ATOM_Polaris_SCLK_Dependency_Record; 209 + 210 + typedef struct _ATOM_Polaris_SCLK_Dependency_Table { 211 + UCHAR ucRevId; 212 + UCHAR ucNumEntries; /* Number of entries. */ 213 + ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 214 + } ATOM_Polaris_SCLK_Dependency_Table; 215 + 200 216 typedef struct _ATOM_Tonga_PCIE_Record { 201 217 UCHAR ucPCIEGenSpeed; 202 218 UCHAR usPCIELaneWidth;
+62 -25
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
··· 408 408 static int get_sclk_voltage_dependency_table( 409 409 struct pp_hwmgr *hwmgr, 410 410 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, 411 - const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table 411 + const PPTable_Generic_SubTable_Header *sclk_dep_table 412 412 ) 413 413 { 414 414 uint32_t table_size, i; 415 415 phm_ppt_v1_clock_voltage_dependency_table *sclk_table; 416 416 417 - PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), 418 - "Invalid PowerPlay Table!", return -1); 417 + if (sclk_dep_table->ucRevId < 1) { 418 + const ATOM_Tonga_SCLK_Dependency_Table *tonga_table = 419 + (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table; 419 420 420 - table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 421 - * sclk_dep_table->ucNumEntries; 421 + PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries), 422 + "Invalid PowerPlay Table!", return -1); 422 423 423 - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 424 - kzalloc(table_size, GFP_KERNEL); 424 + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 425 + * tonga_table->ucNumEntries; 425 426 426 - if (NULL == sclk_table) 427 - return -ENOMEM; 427 + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 428 + kzalloc(table_size, GFP_KERNEL); 428 429 429 - memset(sclk_table, 0x00, table_size); 430 + if (NULL == sclk_table) 431 + return -ENOMEM; 430 432 431 - sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; 433 + memset(sclk_table, 0x00, table_size); 432 434 433 - for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { 434 - sclk_table->entries[i].vddInd = 435 - sclk_dep_table->entries[i].ucVddInd; 436 - sclk_table->entries[i].vdd_offset = 437 - sclk_dep_table->entries[i].usVddcOffset; 438 - sclk_table->entries[i].clk = 439 - sclk_dep_table->entries[i].ulSclk; 440 - sclk_table->entries[i].cks_enable = 441 - (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 442 - sclk_table->entries[i].cks_voffset = 443 - (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 435 + sclk_table->count = (uint32_t)tonga_table->ucNumEntries; 436 + 437 + for (i = 0; i < tonga_table->ucNumEntries; i++) { 438 + sclk_table->entries[i].vddInd = 439 + tonga_table->entries[i].ucVddInd; 440 + sclk_table->entries[i].vdd_offset = 441 + tonga_table->entries[i].usVddcOffset; 442 + sclk_table->entries[i].clk = 443 + tonga_table->entries[i].ulSclk; 444 + sclk_table->entries[i].cks_enable = 445 + (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 446 + sclk_table->entries[i].cks_voffset = 447 + (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 448 + } 449 + } else { 450 + const ATOM_Polaris_SCLK_Dependency_Table *polaris_table = 451 + (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table; 452 + 453 + PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries), 454 + "Invalid PowerPlay Table!", return -1); 455 + 456 + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 457 + * polaris_table->ucNumEntries; 458 + 459 + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 460 + kzalloc(table_size, GFP_KERNEL); 461 + 462 + if (NULL == sclk_table) 463 + return -ENOMEM; 464 + 465 + memset(sclk_table, 0x00, table_size); 466 + 467 + sclk_table->count = (uint32_t)polaris_table->ucNumEntries; 468 + 469 + for (i = 0; i < polaris_table->ucNumEntries; i++) { 470 + sclk_table->entries[i].vddInd = 471 + polaris_table->entries[i].ucVddInd; 472 + sclk_table->entries[i].vdd_offset = 473 + polaris_table->entries[i].usVddcOffset; 474 + sclk_table->entries[i].clk = 475 + polaris_table->entries[i].ulSclk; 476 + sclk_table->entries[i].cks_enable = 477 + (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 478 + sclk_table->entries[i].cks_voffset = 479 + (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 480 + sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset; 481 + } 444 482 } 445 - 446 483 *pp_tonga_sclk_dep_table = sclk_table; 447 484 448 485 return 0; ··· 745 708 const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 746 709 (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 747 710 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 748 - const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 749 - (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 711 + const PPTable_Generic_SubTable_Header *sclk_dep_table = 712 + (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + 750 713 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 751 714 const ATOM_Tonga_Hard_Limit_Table *pHardLimits = 752 715 (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
+3 -2
drivers/gpu/drm/radeon/atombios_crtc.c
··· 589 589 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) 590 590 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 591 591 /* use frac fb div on RS780/RS880 */ 592 - if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 592 + if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 593 + && !radeon_crtc->ss_enabled) 593 594 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 594 595 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) 595 596 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; ··· 627 626 if (radeon_crtc->ss.refdiv) { 628 627 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 629 628 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 630 - if (ASIC_IS_AVIVO(rdev)) 629 + if (rdev->family >= CHIP_RV770) 631 630 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 632 631 } 633 632 }
+22 -1
drivers/gpu/drm/radeon/radeon_device.c
··· 630 630 /* 631 631 * GPU helpers function. 632 632 */ 633 + 634 + /** 635 + * radeon_device_is_virtual - check if we are running is a virtual environment 636 + * 637 + * Check if the asic has been passed through to a VM (all asics). 638 + * Used at driver startup. 639 + * Returns true if virtual or false if not. 640 + */ 641 + static bool radeon_device_is_virtual(void) 642 + { 643 + #ifdef CONFIG_X86 644 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); 645 + #else 646 + return false; 647 + #endif 648 + } 649 + 633 650 /** 634 651 * radeon_card_posted - check if the hw has already been initialized 635 652 * ··· 659 642 bool radeon_card_posted(struct radeon_device *rdev) 660 643 { 661 644 uint32_t reg; 645 + 646 + /* for pass through, always force asic_init */ 647 + if (radeon_device_is_virtual()) 648 + return false; 662 649 663 650 /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 664 651 if (efi_enabled(EFI_BOOT) && ··· 1652 1631 radeon_agp_suspend(rdev); 1653 1632 1654 1633 pci_save_state(dev->pdev); 1655 - if (freeze && rdev->family >= CHIP_R600) { 1634 + if (freeze && rdev->family >= CHIP_CEDAR) { 1656 1635 rdev->asic->asic_reset(rdev, true); 1657 1636 pci_restore_state(dev->pdev); 1658 1637 } else if (suspend) {