Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-for-v4.7-rc4' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"The main drm fixes pull for rc4: one regression fix in the connector
refcounting, and an MST fix.

There rest is nouveau, amdkfd, i915, etnaviv, and radeon/amdgpu fixes,
mostly regression or black screen fixes"

* tag 'drm-fixes-for-v4.7-rc4' of git://people.freedesktop.org/~airlied/linux: (23 commits)
drm/etnaviv: initialize iommu domain page size
drm/nouveau/iccsense: fix memory leak
drm/nouveau/Revert "drm/nouveau/device/pci: set as non-CPU-coherent on ARM64"
drm/amd/powerplay: select samu dpm 0 as boot level on polaris.
drm/amd/powerplay: update powerplay table parsing
drm/dp/mst: Always clear proposed vcpi table for port.
drm/crtc: only store the necessary data for set_config rollback
drm/crtc: fix connector reference counting mismatch in drm_crtc_helper_set_config
drm/i915/ilk: Don't disable SSC source if it's in use
Revert "drm/amdgpu: add pipeline sync while vmid switch in same ctx"
drm/amdgpu/gfx7: fix broken condition check
drm/radeon: fix asic initialization for virtualized environments
amdgpu: fix asic initialization for virtualized environments (v2)
drm/radeon: don't use fractional dividers on RS[78]80 if SS is enabled
drm/radeon: do not hard reset GPU while freezing on r600/r700 family
drm/i915: Extract physical display dimensions from VBT
drm/i915: Check VBT for port presence in addition to the strap on VLV/CHV
drm/i915: Only ignore eDP ports that are connected
drm/i915: Silence "unexpected child device config size" for VBT on 845g
drm/i915: Fix NULL pointer deference when out of PLLs in IVB
...

+399 -170
+8 -3
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 799 799 unsigned cond_exe_offs; 800 800 u64 cond_exe_gpu_addr; 801 801 volatile u32 *cond_exe_cpu_addr; 802 - int vmid; 803 802 }; 804 803 805 804 /* ··· 936 937 unsigned vm_id, uint64_t pd_addr, 937 938 uint32_t gds_base, uint32_t gds_size, 938 939 uint32_t gws_base, uint32_t gws_size, 939 - uint32_t oa_base, uint32_t oa_size, 940 - bool vmid_switch); 940 + uint32_t oa_base, uint32_t oa_size); 941 941 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 942 942 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 943 943 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ··· 1820 1822 /* MM block clocks */ 1821 1823 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1822 1824 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1825 + /* query virtual capabilities */ 1826 + u32 (*get_virtual_caps)(struct amdgpu_device *adev); 1823 1827 }; 1824 1828 1825 1829 /* ··· 1916 1916 1917 1917 1918 1918 /* GPU virtualization */ 1919 + #define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) 1920 + #define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) 1919 1921 struct amdgpu_virtualization { 1920 1922 bool supports_sr_iov; 1923 + bool is_virtual; 1924 + u32 caps; 1921 1925 }; 1922 1926 1923 1927 /* ··· 2210 2206 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2211 2207 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2212 2208 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2209 + #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) 2213 2210 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2214 2211 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2215 2212 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+16 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1385 1385 return 0; 1386 1386 } 1387 1387 1388 + static bool amdgpu_device_is_virtual(void) 1389 + { 1390 + #ifdef CONFIG_X86 1391 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); 1392 + #else 1393 + return false; 1394 + #endif 1395 + } 1396 + 1388 1397 /** 1389 1398 * amdgpu_device_init - initialize the driver 1390 1399 * ··· 1528 1519 adev->virtualization.supports_sr_iov = 1529 1520 amdgpu_atombios_has_gpu_virtualization_table(adev); 1530 1521 1522 + /* Check if we are executing in a virtualized environment */ 1523 + adev->virtualization.is_virtual = amdgpu_device_is_virtual(); 1524 + adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); 1525 + 1531 1526 /* Post card if necessary */ 1532 - if (!amdgpu_card_posted(adev)) { 1527 + if (!amdgpu_card_posted(adev) || 1528 + (adev->virtualization.is_virtual && 1529 + !adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN)) { 1533 1530 if (!adev->bios) { 1534 1531 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1535 1532 return -EINVAL;
+2 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 122 122 bool skip_preamble, need_ctx_switch; 123 123 unsigned patch_offset = ~0; 124 124 struct amdgpu_vm *vm; 125 - int vmid = 0, old_vmid = ring->vmid; 126 125 struct fence *hwf; 127 126 uint64_t ctx; 128 127 ··· 135 136 if (job) { 136 137 vm = job->vm; 137 138 ctx = job->ctx; 138 - vmid = job->vm_id; 139 139 } else { 140 140 vm = NULL; 141 141 ctx = 0; 142 - vmid = 0; 143 142 } 144 143 145 144 if (!ring->ready) { ··· 163 166 r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, 164 167 job->gds_base, job->gds_size, 165 168 job->gws_base, job->gws_size, 166 - job->oa_base, job->oa_size, 167 - (ring->current_ctx == ctx) && (old_vmid != vmid)); 169 + job->oa_base, job->oa_size); 168 170 if (r) { 169 171 amdgpu_ring_undo(ring); 170 172 return r; ··· 180 184 need_ctx_switch = ring->current_ctx != ctx; 181 185 for (i = 0; i < num_ibs; ++i) { 182 186 ib = &ibs[i]; 187 + 183 188 /* drop preamble IBs if we don't have a context switch */ 184 189 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) 185 190 continue; ··· 188 191 amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, 189 192 need_ctx_switch); 190 193 need_ctx_switch = false; 191 - ring->vmid = vmid; 192 194 } 193 195 194 196 if (ring->funcs->emit_hdp_invalidate) ··· 198 202 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 199 203 if (job && job->vm_id) 200 204 amdgpu_vm_reset_id(adev, job->vm_id); 201 - ring->vmid = old_vmid; 202 205 amdgpu_ring_undo(ring); 203 206 return r; 204 207 }
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 298 298 unsigned vm_id, uint64_t pd_addr, 299 299 uint32_t gds_base, uint32_t gds_size, 300 300 uint32_t gws_base, uint32_t gws_size, 301 - uint32_t oa_base, uint32_t oa_size, 302 - bool vmid_switch) 301 + uint32_t oa_base, uint32_t oa_size) 303 302 { 304 303 struct amdgpu_device *adev = ring->adev; 305 304 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; ··· 312 313 int r; 313 314 314 315 if (ring->funcs->emit_pipeline_sync && ( 315 - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch)) 316 + pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || 317 + ring->type == AMDGPU_RING_TYPE_COMPUTE)) 316 318 amdgpu_ring_emit_pipeline_sync(ring); 317 319 318 320 if (ring->funcs->emit_vm_flush &&
+7
drivers/gpu/drm/amd/amdgpu/cik.c
··· 962 962 return true; 963 963 } 964 964 965 + static u32 cik_get_virtual_caps(struct amdgpu_device *adev) 966 + { 967 + /* CIK does not support SR-IOV */ 968 + return 0; 969 + } 970 + 965 971 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { 966 972 {mmGRBM_STATUS, false}, 967 973 {mmGB_ADDR_CONFIG, false}, ··· 2013 2007 .get_xclk = &cik_get_xclk, 2014 2008 .set_uvd_clocks = &cik_set_uvd_clocks, 2015 2009 .set_vce_clocks = &cik_set_vce_clocks, 2010 + .get_virtual_caps = &cik_get_virtual_caps, 2016 2011 /* these should be moved to their own ip modules */ 2017 2012 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 2018 2013 .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 4833 4833 case 2: 4834 4834 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4835 4835 ring = &adev->gfx.compute_ring[i]; 4836 - if ((ring->me == me_id) & (ring->pipe == pipe_id)) 4836 + if ((ring->me == me_id) && (ring->pipe == pipe_id)) 4837 4837 amdgpu_fence_process(ring); 4838 4838 } 4839 4839 break;
+15
drivers/gpu/drm/amd/amdgpu/vi.c
··· 421 421 return true; 422 422 } 423 423 424 + static u32 vi_get_virtual_caps(struct amdgpu_device *adev) 425 + { 426 + u32 caps = 0; 427 + u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 428 + 429 + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 430 + caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; 431 + 432 + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 433 + caps |= AMDGPU_VIRT_CAPS_IS_VF; 434 + 435 + return caps; 436 + } 437 + 424 438 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 425 439 {mmGB_MACROTILE_MODE7, true}, 426 440 }; ··· 1132 1118 .get_xclk = &vi_get_xclk, 1133 1119 .set_uvd_clocks = &vi_set_uvd_clocks, 1134 1120 .set_vce_clocks = &vi_set_vce_clocks, 1121 + .get_virtual_caps = &vi_get_virtual_caps, 1135 1122 /* these should be moved to their own ip modules */ 1136 1123 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, 1137 1124 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
+51 -35
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 242 242 pqm_uninit(&p->pqm); 243 243 244 244 /* Iterate over all process device data structure and check 245 - * if we should reset all wavefronts */ 246 - list_for_each_entry(pdd, &p->per_device_data, per_device_list) 245 + * if we should delete debug managers and reset all wavefronts 246 + */ 247 + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { 248 + if ((pdd->dev->dbgmgr) && 249 + (pdd->dev->dbgmgr->pasid == p->pasid)) 250 + kfd_dbgmgr_destroy(pdd->dev->dbgmgr); 251 + 247 252 if (pdd->reset_wavefronts) { 248 253 pr_warn("amdkfd: Resetting all wave fronts\n"); 249 254 dbgdev_wave_reset_wavefronts(pdd->dev, p); 250 255 pdd->reset_wavefronts = false; 251 256 } 257 + } 252 258 253 259 mutex_unlock(&p->mutex); 254 260 ··· 410 404 411 405 idx = srcu_read_lock(&kfd_processes_srcu); 412 406 407 + /* 408 + * Look for the process that matches the pasid. If there is no such 409 + * process, we either released it in amdkfd's own notifier, or there 410 + * is a bug. Unfortunately, there is no way to tell... 411 + */ 413 412 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) 414 - if (p->pasid == pasid) 415 - break; 413 + if (p->pasid == pasid) { 414 + 415 + srcu_read_unlock(&kfd_processes_srcu, idx); 416 + 417 + pr_debug("Unbinding process %d from IOMMU\n", pasid); 418 + 419 + mutex_lock(&p->mutex); 420 + 421 + if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) 422 + kfd_dbgmgr_destroy(dev->dbgmgr); 423 + 424 + pqm_uninit(&p->pqm); 425 + 426 + pdd = kfd_get_process_device_data(dev, p); 427 + 428 + if (!pdd) { 429 + mutex_unlock(&p->mutex); 430 + return; 431 + } 432 + 433 + if (pdd->reset_wavefronts) { 434 + dbgdev_wave_reset_wavefronts(pdd->dev, p); 435 + pdd->reset_wavefronts = false; 436 + } 437 + 438 + /* 439 + * Just mark pdd as unbound, because we still need it 440 + * to call amd_iommu_unbind_pasid() in when the 441 + * process exits. 442 + * We don't call amd_iommu_unbind_pasid() here 443 + * because the IOMMU called us. 444 + */ 445 + pdd->bound = false; 446 + 447 + mutex_unlock(&p->mutex); 448 + 449 + return; 450 + } 416 451 417 452 srcu_read_unlock(&kfd_processes_srcu, idx); 418 - 419 - BUG_ON(p->pasid != pasid); 420 - 421 - mutex_lock(&p->mutex); 422 - 423 - if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) 424 - kfd_dbgmgr_destroy(dev->dbgmgr); 425 - 426 - pqm_uninit(&p->pqm); 427 - 428 - pdd = kfd_get_process_device_data(dev, p); 429 - 430 - if (!pdd) { 431 - mutex_unlock(&p->mutex); 432 - return; 433 - } 434 - 435 - if (pdd->reset_wavefronts) { 436 - dbgdev_wave_reset_wavefronts(pdd->dev, p); 437 - pdd->reset_wavefronts = false; 438 - } 439 - 440 - /* 441 - * Just mark pdd as unbound, because we still need it to call 442 - * amd_iommu_unbind_pasid() in when the process exits. 443 - * We don't call amd_iommu_unbind_pasid() here 444 - * because the IOMMU called us. 445 - */ 446 - pdd->bound = false; 447 - 448 - mutex_unlock(&p->mutex); 449 453 } 450 454 451 455 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 666 666 dev->node_props.simd_count); 667 667 668 668 if (dev->mem_bank_count < dev->node_props.mem_banks_count) { 669 - pr_warn("kfd: mem_banks_count truncated from %d to %d\n", 669 + pr_info_once("kfd: mem_banks_count truncated from %d to %d\n", 670 670 dev->node_props.mem_banks_count, 671 671 dev->mem_bank_count); 672 672 sysfs_show_32bit_prop(buffer, "mem_banks_count",
+1
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
··· 39 39 uint8_t phases; 40 40 uint8_t cks_enable; 41 41 uint8_t cks_voffset; 42 + uint32_t sclk_offset; 42 43 }; 43 44 44 45 typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
+17 -11
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
··· 999 999 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), 1000 1000 (dep_table->entries[i].vddc - 1001 1001 (uint16_t)data->vddc_vddci_delta)); 1002 - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; 1002 + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; 1003 1003 } 1004 1004 1005 1005 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ··· 3520 3520 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; 3521 3521 ATOM_Tonga_POWERPLAYTABLE *powerplay_table = 3522 3522 (ATOM_Tonga_POWERPLAYTABLE *)pp_table; 3523 - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 3524 - (ATOM_Tonga_SCLK_Dependency_Table *) 3523 + PPTable_Generic_SubTable_Header *sclk_dep_table = 3524 + (PPTable_Generic_SubTable_Header *) 3525 3525 (((unsigned long)powerplay_table) + 3526 3526 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 3527 + 3527 3528 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 3528 3529 (ATOM_Tonga_MCLK_Dependency_Table *) 3529 3530 (((unsigned long)powerplay_table) + ··· 3576 3575 /* Performance levels are arranged from low to high. */ 3577 3576 performance_level->memory_clock = mclk_dep_table->entries 3578 3577 [state_entry->ucMemoryClockIndexLow].ulMclk; 3579 - performance_level->engine_clock = sclk_dep_table->entries 3578 + if (sclk_dep_table->ucRevId == 0) 3579 + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3580 + [state_entry->ucEngineClockIndexLow].ulSclk; 3581 + else if (sclk_dep_table->ucRevId == 1) 3582 + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3580 3583 [state_entry->ucEngineClockIndexLow].ulSclk; 3581 3584 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3582 3585 state_entry->ucPCIEGenLow); ··· 3591 3586 [polaris10_power_state->performance_level_count++]); 3592 3587 performance_level->memory_clock = mclk_dep_table->entries 3593 3588 [state_entry->ucMemoryClockIndexHigh].ulMclk; 3594 - performance_level->engine_clock = sclk_dep_table->entries 3589 + 3590 + if (sclk_dep_table->ucRevId == 0) 3591 + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3595 3592 [state_entry->ucEngineClockIndexHigh].ulSclk; 3593 + else if (sclk_dep_table->ucRevId == 1) 3594 + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3595 + [state_entry->ucEngineClockIndexHigh].ulSclk; 3596 + 3596 3597 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3597 3598 state_entry->ucPCIEGenHigh); 3598 3599 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, ··· 3656 3645 switch (state->classification.ui_label) { 3657 3646 case PP_StateUILabel_Performance: 3658 3647 data->use_pcie_performance_levels = true; 3659 - 3660 3648 for (i = 0; i < ps->performance_level_count; i++) { 3661 3649 if (data->pcie_gen_performance.max < 3662 3650 ps->performance_levels[i].pcie_gen) ··· 3671 3661 ps->performance_levels[i].pcie_lane) 3672 3662 data->pcie_lane_performance.max = 3673 3663 ps->performance_levels[i].pcie_lane; 3674 - 3675 3664 if (data->pcie_lane_performance.min > 3676 3665 ps->performance_levels[i].pcie_lane) 3677 3666 data->pcie_lane_performance.min = ··· 4196 4187 { 4197 4188 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 4198 4189 uint32_t mm_boot_level_offset, mm_boot_level_value; 4199 - struct phm_ppt_v1_information *table_info = 4200 - (struct phm_ppt_v1_information *)(hwmgr->pptable); 4201 4190 4202 4191 if (!bgate) { 4203 - data->smc_state_table.SamuBootLevel = 4204 - (uint8_t) (table_info->mm_dep_table->count - 1); 4192 + data->smc_state_table.SamuBootLevel = 0; 4205 4193 mm_boot_level_offset = data->dpm_table_start + 4206 4194 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); 4207 4195 mm_boot_level_offset /= 4;
+16
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
··· 197 197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 198 198 } ATOM_Tonga_SCLK_Dependency_Table; 199 199 200 + typedef struct _ATOM_Polaris_SCLK_Dependency_Record { 201 + UCHAR ucVddInd; /* Base voltage */ 202 + USHORT usVddcOffset; /* Offset relative to base voltage */ 203 + ULONG ulSclk; 204 + USHORT usEdcCurrent; 205 + UCHAR ucReliabilityTemperature; 206 + UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */ 207 + ULONG ulSclkOffset; 208 + } ATOM_Polaris_SCLK_Dependency_Record; 209 + 210 + typedef struct _ATOM_Polaris_SCLK_Dependency_Table { 211 + UCHAR ucRevId; 212 + UCHAR ucNumEntries; /* Number of entries. */ 213 + ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 214 + } ATOM_Polaris_SCLK_Dependency_Table; 215 + 200 216 typedef struct _ATOM_Tonga_PCIE_Record { 201 217 UCHAR ucPCIEGenSpeed; 202 218 UCHAR usPCIELaneWidth;
+62 -25
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
··· 408 408 static int get_sclk_voltage_dependency_table( 409 409 struct pp_hwmgr *hwmgr, 410 410 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, 411 - const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table 411 + const PPTable_Generic_SubTable_Header *sclk_dep_table 412 412 ) 413 413 { 414 414 uint32_t table_size, i; 415 415 phm_ppt_v1_clock_voltage_dependency_table *sclk_table; 416 416 417 - PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), 418 - "Invalid PowerPlay Table!", return -1); 417 + if (sclk_dep_table->ucRevId < 1) { 418 + const ATOM_Tonga_SCLK_Dependency_Table *tonga_table = 419 + (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table; 419 420 420 - table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 421 - * sclk_dep_table->ucNumEntries; 421 + PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries), 422 + "Invalid PowerPlay Table!", return -1); 422 423 423 - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 424 - kzalloc(table_size, GFP_KERNEL); 424 + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 425 + * tonga_table->ucNumEntries; 425 426 426 - if (NULL == sclk_table) 427 - return -ENOMEM; 427 + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 428 + kzalloc(table_size, GFP_KERNEL); 428 429 429 - memset(sclk_table, 0x00, table_size); 430 + if (NULL == sclk_table) 431 + return -ENOMEM; 430 432 431 - sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; 433 + memset(sclk_table, 0x00, table_size); 432 434 433 - for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { 434 - sclk_table->entries[i].vddInd = 435 - sclk_dep_table->entries[i].ucVddInd; 436 - sclk_table->entries[i].vdd_offset = 437 - sclk_dep_table->entries[i].usVddcOffset; 438 - sclk_table->entries[i].clk = 439 - sclk_dep_table->entries[i].ulSclk; 440 - sclk_table->entries[i].cks_enable = 441 - (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 442 - sclk_table->entries[i].cks_voffset = 443 - (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 435 + sclk_table->count = (uint32_t)tonga_table->ucNumEntries; 436 + 437 + for (i = 0; i < tonga_table->ucNumEntries; i++) { 438 + sclk_table->entries[i].vddInd = 439 + tonga_table->entries[i].ucVddInd; 440 + sclk_table->entries[i].vdd_offset = 441 + tonga_table->entries[i].usVddcOffset; 442 + sclk_table->entries[i].clk = 443 + tonga_table->entries[i].ulSclk; 444 + sclk_table->entries[i].cks_enable = 445 + (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 446 + sclk_table->entries[i].cks_voffset = 447 + (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 448 + } 449 + } else { 450 + const ATOM_Polaris_SCLK_Dependency_Table *polaris_table = 451 + (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table; 452 + 453 + PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries), 454 + "Invalid PowerPlay Table!", return -1); 455 + 456 + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 457 + * polaris_table->ucNumEntries; 458 + 459 + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 460 + kzalloc(table_size, GFP_KERNEL); 461 + 462 + if (NULL == sclk_table) 463 + return -ENOMEM; 464 + 465 + memset(sclk_table, 0x00, table_size); 466 + 467 + sclk_table->count = (uint32_t)polaris_table->ucNumEntries; 468 + 469 + for (i = 0; i < polaris_table->ucNumEntries; i++) { 470 + sclk_table->entries[i].vddInd = 471 + polaris_table->entries[i].ucVddInd; 472 + sclk_table->entries[i].vdd_offset = 473 + polaris_table->entries[i].usVddcOffset; 474 + sclk_table->entries[i].clk = 475 + polaris_table->entries[i].ulSclk; 476 + sclk_table->entries[i].cks_enable = 477 + (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 478 + sclk_table->entries[i].cks_voffset = 479 + (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 480 + sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset; 481 + } 444 482 } 445 - 446 483 *pp_tonga_sclk_dep_table = sclk_table; 447 484 448 485 return 0; ··· 745 708 const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 746 709 (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 747 710 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 748 - const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 749 - (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 711 + const PPTable_Generic_SubTable_Header *sclk_dep_table = 712 + (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + 750 713 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 751 714 const ATOM_Tonga_Hard_Limit_Table *pHardLimits = 752 715 (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
+28 -26
drivers/gpu/drm/drm_crtc_helper.c
··· 528 528 int drm_crtc_helper_set_config(struct drm_mode_set *set) 529 529 { 530 530 struct drm_device *dev; 531 - struct drm_crtc *new_crtc; 532 - struct drm_encoder *save_encoders, *new_encoder, *encoder; 531 + struct drm_crtc **save_encoder_crtcs, *new_crtc; 532 + struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; 533 533 bool mode_changed = false; /* if true do a full mode set */ 534 534 bool fb_changed = false; /* if true and !mode_changed just do a flip */ 535 - struct drm_connector *save_connectors, *connector; 535 + struct drm_connector *connector; 536 536 int count = 0, ro, fail = 0; 537 537 const struct drm_crtc_helper_funcs *crtc_funcs; 538 538 struct drm_mode_set save_set; ··· 574 574 * Allocate space for the backup of all (non-pointer) encoder and 575 575 * connector data. 576 576 */ 577 - save_encoders = kzalloc(dev->mode_config.num_encoder * 578 - sizeof(struct drm_encoder), GFP_KERNEL); 579 - if (!save_encoders) 577 + save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder * 578 + sizeof(struct drm_crtc *), GFP_KERNEL); 579 + if (!save_encoder_crtcs) 580 580 return -ENOMEM; 581 581 582 - save_connectors = kzalloc(dev->mode_config.num_connector * 583 - sizeof(struct drm_connector), GFP_KERNEL); 584 - if (!save_connectors) { 585 - kfree(save_encoders); 582 + save_connector_encoders = kzalloc(dev->mode_config.num_connector * 583 + sizeof(struct drm_encoder *), GFP_KERNEL); 584 + if (!save_connector_encoders) { 585 + kfree(save_encoder_crtcs); 586 586 return -ENOMEM; 587 587 } 588 588 ··· 593 593 */ 594 594 count = 0; 595 595 drm_for_each_encoder(encoder, dev) { 596 - save_encoders[count++] = *encoder; 596 + save_encoder_crtcs[count++] = encoder->crtc; 597 597 } 598 598 599 599 count = 0; 600 600 drm_for_each_connector(connector, dev) { 601 - save_connectors[count++] = *connector; 601 + save_connector_encoders[count++] = connector->encoder; 602 602 } 603 603 604 604 save_set.crtc = set->crtc; ··· 631 631 mode_changed = true; 632 632 } 633 633 634 - /* take a reference on all connectors in set */ 634 + /* take a reference on all unbound connectors in set, reuse the 635 + * already taken reference for bound connectors 636 + */ 635 637 for (ro = 0; ro < set->num_connectors; ro++) { 638 + if (set->connectors[ro]->encoder) 639 + continue; 636 640 drm_connector_reference(set->connectors[ro]); 637 641 } 638 642 ··· 758 754 } 759 755 } 760 756 761 - /* after fail drop reference on all connectors in save set */ 762 - count = 0; 763 - drm_for_each_connector(connector, dev) { 764 - drm_connector_unreference(&save_connectors[count++]); 765 - } 766 - 767 - kfree(save_connectors); 768 - kfree(save_encoders); 757 + kfree(save_connector_encoders); 758 + kfree(save_encoder_crtcs); 769 759 return 0; 770 760 771 761 fail: 772 762 /* Restore all previous data. */ 773 763 count = 0; 774 764 drm_for_each_encoder(encoder, dev) { 775 - *encoder = save_encoders[count++]; 765 + encoder->crtc = save_encoder_crtcs[count++]; 776 766 } 777 767 778 768 count = 0; 779 769 drm_for_each_connector(connector, dev) { 780 - *connector = save_connectors[count++]; 770 + connector->encoder = save_connector_encoders[count++]; 781 771 } 782 772 783 - /* after fail drop reference on all connectors in set */ 773 + /* after fail drop reference on all unbound connectors in set, let 774 + * bound connectors keep their reference 775 + */ 784 776 for (ro = 0; ro < set->num_connectors; ro++) { 777 + if (set->connectors[ro]->encoder) 778 + continue; 785 779 drm_connector_unreference(set->connectors[ro]); 786 780 } 787 781 ··· 789 787 save_set.y, save_set.fb)) 790 788 DRM_ERROR("failed to restore config after modeset failure\n"); 791 789 792 - kfree(save_connectors); 793 - kfree(save_encoders); 790 + kfree(save_connector_encoders); 791 + kfree(save_encoder_crtcs); 794 792 return ret; 795 793 } 796 794 EXPORT_SYMBOL(drm_crtc_helper_set_config);
+3 -5
drivers/gpu/drm/drm_dp_mst_topology.c
··· 2927 2927 drm_dp_port_teardown_pdt(port, port->pdt); 2928 2928 2929 2929 if (!port->input && port->vcpi.vcpi > 0) { 2930 - if (mgr->mst_state) { 2931 - drm_dp_mst_reset_vcpi_slots(mgr, port); 2932 - drm_dp_update_payload_part1(mgr); 2933 - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2934 - } 2930 + drm_dp_mst_reset_vcpi_slots(mgr, port); 2931 + drm_dp_update_payload_part1(mgr); 2932 + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2935 2933 } 2936 2934 2937 2935 kref_put(&port->kref, drm_dp_free_mst_port);
+1
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
··· 225 225 226 226 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; 227 227 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; 228 + etnaviv_domain->domain.pgsize_bitmap = SZ_4K; 228 229 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; 229 230 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; 230 231
+1
drivers/gpu/drm/i915/i915_drv.h
··· 3481 3481 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3482 3482 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3483 3483 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3484 + bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3484 3485 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3485 3486 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3486 3487 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+45 -1
drivers/gpu/drm/i915/intel_bios.c
··· 139 139 else 140 140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; 141 141 142 + panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | 143 + dvo_timing->himage_lo; 144 + panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | 145 + dvo_timing->vimage_lo; 146 + 142 147 /* Some VBTs have bogus h/vtotal values */ 143 148 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 144 149 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; ··· 1192 1187 } 1193 1188 if (bdb->version < 106) { 1194 1189 expected_size = 22; 1195 - } else if (bdb->version < 109) { 1190 + } else if (bdb->version < 111) { 1196 1191 expected_size = 27; 1197 1192 } else if (bdb->version < 195) { 1198 1193 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); ··· 1544 1539 * the OpRegion then they have validated the LVDS's existence. 1545 1540 */ 1546 1541 if (dev_priv->opregion.vbt) 1542 + return true; 1543 + } 1544 + 1545 + return false; 1546 + } 1547 + 1548 + /** 1549 + * intel_bios_is_port_present - is the specified digital port present 1550 + * @dev_priv: i915 device instance 1551 + * @port: port to check 1552 + * 1553 + * Return true if the device in %port is present. 1554 + */ 1555 + bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) 1556 + { 1557 + static const struct { 1558 + u16 dp, hdmi; 1559 + } port_mapping[] = { 1560 + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, 1561 + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, 1562 + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, 1563 + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, 1564 + }; 1565 + int i; 1566 + 1567 + /* FIXME maybe deal with port A as well? */ 1568 + if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) 1569 + return false; 1570 + 1571 + if (!dev_priv->vbt.child_dev_num) 1572 + return false; 1573 + 1574 + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 1575 + const union child_device_config *p_child = 1576 + &dev_priv->vbt.child_dev[i]; 1577 + if ((p_child->common.dvo_port == port_mapping[port].dp || 1578 + p_child->common.dvo_port == port_mapping[port].hdmi) && 1579 + (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | 1580 + DEVICE_TYPE_DISPLAYPORT_OUTPUT))) 1547 1581 return true; 1548 1582 } 1549 1583
+60 -28
drivers/gpu/drm/i915/intel_display.c
··· 8275 8275 { 8276 8276 struct drm_i915_private *dev_priv = dev->dev_private; 8277 8277 struct intel_encoder *encoder; 8278 + int i; 8278 8279 u32 val, final; 8279 8280 bool has_lvds = false; 8280 8281 bool has_cpu_edp = false; 8281 8282 bool has_panel = false; 8282 8283 bool has_ck505 = false; 8283 8284 bool can_ssc = false; 8285 + bool using_ssc_source = false; 8284 8286 8285 8287 /* We need to take the global config into account */ 8286 8288 for_each_intel_encoder(dev, encoder) { ··· 8309 8307 can_ssc = true; 8310 8308 } 8311 8309 8312 - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8313 - has_panel, has_lvds, has_ck505); 8310 + /* Check if any DPLLs are using the SSC source */ 8311 + for (i = 0; i < dev_priv->num_shared_dpll; i++) { 8312 + u32 temp = I915_READ(PCH_DPLL(i)); 8313 + 8314 + if (!(temp & DPLL_VCO_ENABLE)) 8315 + continue; 8316 + 8317 + if ((temp & PLL_REF_INPUT_MASK) == 8318 + PLLB_REF_INPUT_SPREADSPECTRUMIN) { 8319 + using_ssc_source = true; 8320 + break; 8321 + } 8322 + } 8323 + 8324 + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 8325 + has_panel, has_lvds, has_ck505, using_ssc_source); 8314 8326 8315 8327 /* Ironlake: try to setup display ref clock before DPLL 8316 8328 * enabling. This is only under driver's control after ··· 8361 8345 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8362 8346 } else 8363 8347 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8364 - } else { 8365 - final |= DREF_SSC_SOURCE_DISABLE; 8366 - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8348 + } else if (using_ssc_source) { 8349 + final |= DREF_SSC_SOURCE_ENABLE; 8350 + final |= DREF_SSC1_ENABLE; 8367 8351 } 8368 8352 8369 8353 if (final == val) ··· 8409 8393 POSTING_READ(PCH_DREF_CONTROL); 8410 8394 udelay(200); 8411 8395 } else { 8412 - DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8396 + DRM_DEBUG_KMS("Disabling CPU source output\n"); 8413 8397 8414 8398 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8415 8399 ··· 8420 8404 POSTING_READ(PCH_DREF_CONTROL); 8421 8405 udelay(200); 8422 8406 8423 - /* Turn off the SSC source */ 8424 - val &= ~DREF_SSC_SOURCE_MASK; 8425 - val |= DREF_SSC_SOURCE_DISABLE; 8407 + if (!using_ssc_source) { 8408 + DRM_DEBUG_KMS("Disabling SSC source\n"); 8426 8409 8427 - /* Turn off SSC1 */ 8428 - val &= ~DREF_SSC1_ENABLE; 8410 + /* Turn off the SSC source */ 8411 + val &= ~DREF_SSC_SOURCE_MASK; 8412 + val |= DREF_SSC_SOURCE_DISABLE; 8429 8413 8430 - I915_WRITE(PCH_DREF_CONTROL, val); 8431 - POSTING_READ(PCH_DREF_CONTROL); 8432 - udelay(200); 8414 + /* Turn off SSC1 */ 8415 + val &= ~DREF_SSC1_ENABLE; 8416 + 8417 + I915_WRITE(PCH_DREF_CONTROL, val); 8418 + POSTING_READ(PCH_DREF_CONTROL); 8419 + udelay(200); 8420 + } 8433 8421 } 8434 8422 8435 8423 BUG_ON(val != final); ··· 14574 14554 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14575 14555 intel_dp_init(dev, PCH_DP_D, PORT_D); 14576 14556 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14557 + bool has_edp, has_port; 14558 + 14577 14559 /* 14578 14560 * The DP_DETECTED bit is the latched state of the DDC 14579 14561 * SDA pin at boot. However since eDP doesn't require DDC ··· 14584 14562 * Thus we can't rely on the DP_DETECTED bit alone to detect 14585 14563 * eDP ports. Consult the VBT as well as DP_DETECTED to 14586 14564 * detect eDP ports. 14565 + * 14566 + * Sadly the straps seem to be missing sometimes even for HDMI 14567 + * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14568 + * and VBT for the presence of the port. Additionally we can't 14569 + * trust the port type the VBT declares as we've seen at least 14570 + * HDMI ports that the VBT claim are DP or eDP. 14587 14571 */ 14588 - if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14589 - !intel_dp_is_edp(dev, PORT_B)) 14572 + has_edp = intel_dp_is_edp(dev, PORT_B); 14573 + has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14574 + if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 14575 + has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); 14576 + if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14590 14577 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14591 - if (I915_READ(VLV_DP_B) & DP_DETECTED || 14592 - intel_dp_is_edp(dev, PORT_B)) 14593 - intel_dp_init(dev, VLV_DP_B, PORT_B); 14594 14578 14595 - if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14596 - !intel_dp_is_edp(dev, PORT_C)) 14579 + has_edp = intel_dp_is_edp(dev, PORT_C); 14580 + has_port = intel_bios_is_port_present(dev_priv, PORT_C); 14581 + if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 14582 + has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); 14583 + if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 14597 14584 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14598 - if (I915_READ(VLV_DP_C) & DP_DETECTED || 14599 - intel_dp_is_edp(dev, PORT_C)) 14600 - intel_dp_init(dev, VLV_DP_C, PORT_C); 14601 14585 14602 14586 if (IS_CHERRYVIEW(dev)) { 14603 - /* eDP not supported on port D, so don't check VBT */ 14604 - if (I915_READ(CHV_HDMID) & SDVO_DETECTED) 14605 - intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14606 - if (I915_READ(CHV_DP_D) & DP_DETECTED) 14587 + /* 14588 + * eDP not supported on port D, 14589 + * so no need to worry about it 14590 + */ 14591 + has_port = intel_bios_is_port_present(dev_priv, PORT_D); 14592 + if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 14607 14593 intel_dp_init(dev, CHV_DP_D, PORT_D); 14594 + if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 14595 + intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14608 14596 } 14609 14597 14610 14598 intel_dsi_init(dev);
+10 -8
drivers/gpu/drm/i915/intel_dp.c
··· 5725 5725 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 5726 5726 fixed_mode = drm_mode_duplicate(dev, 5727 5727 dev_priv->vbt.lfp_lvds_vbt_mode); 5728 - if (fixed_mode) 5728 + if (fixed_mode) { 5729 5729 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 5730 + connector->display_info.width_mm = fixed_mode->width_mm; 5731 + connector->display_info.height_mm = fixed_mode->height_mm; 5732 + } 5730 5733 } 5731 5734 mutex_unlock(&dev->mode_config.mutex); 5732 5735 ··· 5926 5923 return false; 5927 5924 } 5928 5925 5929 - void 5930 - intel_dp_init(struct drm_device *dev, 5931 - i915_reg_t output_reg, enum port port) 5926 + bool intel_dp_init(struct drm_device *dev, 5927 + i915_reg_t output_reg, 5928 + enum port port) 5932 5929 { 5933 5930 struct drm_i915_private *dev_priv = dev->dev_private; 5934 5931 struct intel_digital_port *intel_dig_port; ··· 5938 5935 5939 5936 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 5940 5937 if (!intel_dig_port) 5941 - return; 5938 + return false; 5942 5939 5943 5940 intel_connector = intel_connector_alloc(); 5944 5941 if (!intel_connector) ··· 5995 5992 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 5996 5993 goto err_init_connector; 5997 5994 5998 - return; 5995 + return true; 5999 5996 6000 5997 err_init_connector: 6001 5998 drm_encoder_cleanup(encoder); ··· 6003 6000 kfree(intel_connector); 6004 6001 err_connector_alloc: 6005 6002 kfree(intel_dig_port); 6006 - 6007 - return; 6003 + return false; 6008 6004 } 6009 6005 6010 6006 void intel_dp_mst_suspend(struct drm_device *dev)
+3
drivers/gpu/drm/i915/intel_dpll_mgr.c
··· 366 366 DPLL_ID_PCH_PLL_B); 367 367 } 368 368 369 + if (!pll) 370 + return NULL; 371 + 369 372 /* reference the pll */ 370 373 intel_reference_shared_dpll(pll, crtc_state); 371 374
+1 -1
drivers/gpu/drm/i915/intel_drv.h
··· 1284 1284 void intel_csr_ucode_resume(struct drm_i915_private *); 1285 1285 1286 1286 /* intel_dp.c */ 1287 - void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); 1287 + bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); 1288 1288 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1289 1289 struct intel_connector *intel_connector); 1290 1290 void intel_dp_set_link_params(struct intel_dp *intel_dp,
+3
drivers/gpu/drm/i915/intel_dsi.c
··· 1545 1545 goto err; 1546 1546 } 1547 1547 1548 + connector->display_info.width_mm = fixed_mode->width_mm; 1549 + connector->display_info.height_mm = fixed_mode->height_mm; 1550 + 1548 1551 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 1549 1552 1550 1553 intel_dsi_add_properties(intel_connector);
+3
drivers/gpu/drm/i915/intel_hdmi.c
··· 2142 2142 enum port port = intel_dig_port->port; 2143 2143 uint8_t alternate_ddc_pin; 2144 2144 2145 + DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", 2146 + port_name(port)); 2147 + 2145 2148 if (WARN(intel_dig_port->max_lanes < 4, 2146 2149 "Not enough lanes (%d) for HDMI on port %c\n", 2147 2150 intel_dig_port->max_lanes, port_name(port)))
+2
drivers/gpu/drm/i915/intel_lvds.c
··· 1082 1082 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); 1083 1083 if (fixed_mode) { 1084 1084 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1085 + connector->display_info.width_mm = fixed_mode->width_mm; 1086 + connector->display_info.height_mm = fixed_mode->height_mm; 1085 1087 goto out; 1086 1088 } 1087 1089 }
+4 -3
drivers/gpu/drm/i915/intel_vbt_defs.h
··· 403 403 u8 vsync_off:4; 404 404 u8 rsvd0:6; 405 405 u8 hsync_off_hi:2; 406 - u8 h_image; 407 - u8 v_image; 408 - u8 max_hv; 406 + u8 himage_lo; 407 + u8 vimage_lo; 408 + u8 vimage_hi:4; 409 + u8 himage_hi:4; 409 410 u8 h_border; 410 411 u8 v_border; 411 412 u8 rsvd1:3;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
··· 1614 1614 .fini = nvkm_device_pci_fini, 1615 1615 .resource_addr = nvkm_device_pci_resource_addr, 1616 1616 .resource_size = nvkm_device_pci_resource_size, 1617 - .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64), 1617 + .cpu_coherent = !IS_ENABLED(CONFIG_ARM), 1618 1618 }; 1619 1619 1620 1620 int
+9 -7
drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
··· 276 276 struct pwr_rail_t *r = &stbl.rail[i]; 277 277 struct nvkm_iccsense_rail *rail; 278 278 struct nvkm_iccsense_sensor *sensor; 279 + int (*read)(struct nvkm_iccsense *, 280 + struct nvkm_iccsense_rail *); 279 281 280 282 if (!r->mode || r->resistor_mohm == 0) 281 283 continue; ··· 286 284 if (!sensor) 287 285 continue; 288 286 289 - rail = kmalloc(sizeof(*rail), GFP_KERNEL); 290 - if (!rail) 291 - return -ENOMEM; 292 - 293 287 switch (sensor->type) { 294 288 case NVBIOS_EXTDEV_INA209: 295 289 if (r->rail != 0) 296 290 continue; 297 - rail->read = nvkm_iccsense_ina209_read; 291 + read = nvkm_iccsense_ina209_read; 298 292 break; 299 293 case NVBIOS_EXTDEV_INA219: 300 294 if (r->rail != 0) 301 295 continue; 302 - rail->read = nvkm_iccsense_ina219_read; 296 + read = nvkm_iccsense_ina219_read; 303 297 break; 304 298 case NVBIOS_EXTDEV_INA3221: 305 299 if (r->rail >= 3) 306 300 continue; 307 - rail->read = nvkm_iccsense_ina3221_read; 301 + read = nvkm_iccsense_ina3221_read; 308 302 break; 309 303 default: 310 304 continue; 311 305 } 312 306 307 + rail = kmalloc(sizeof(*rail), GFP_KERNEL); 308 + if (!rail) 309 + return -ENOMEM; 313 310 sensor->rail_mask |= 1 << r->rail; 311 + rail->read = read; 314 312 rail->sensor = sensor; 315 313 rail->idx = r->rail; 316 314 rail->mohm = r->resistor_mohm;
+3 -2
drivers/gpu/drm/radeon/atombios_crtc.c
··· 589 589 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) 590 590 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 591 591 /* use frac fb div on RS780/RS880 */ 592 - if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 592 + if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 593 + && !radeon_crtc->ss_enabled) 593 594 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 594 595 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) 595 596 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; ··· 627 626 if (radeon_crtc->ss.refdiv) { 628 627 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 629 628 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 630 - if (ASIC_IS_AVIVO(rdev)) 629 + if (rdev->family >= CHIP_RV770) 631 630 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 632 631 } 633 632 }
+22 -1
drivers/gpu/drm/radeon/radeon_device.c
··· 630 630 /* 631 631 * GPU helpers function. 632 632 */ 633 + 634 + /** 635 + * radeon_device_is_virtual - check if we are running is a virtual environment 636 + * 637 + * Check if the asic has been passed through to a VM (all asics). 638 + * Used at driver startup. 639 + * Returns true if virtual or false if not. 640 + */ 641 + static bool radeon_device_is_virtual(void) 642 + { 643 + #ifdef CONFIG_X86 644 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); 645 + #else 646 + return false; 647 + #endif 648 + } 649 + 633 650 /** 634 651 * radeon_card_posted - check if the hw has already been initialized 635 652 * ··· 659 642 bool radeon_card_posted(struct radeon_device *rdev) 660 643 { 661 644 uint32_t reg; 645 + 646 + /* for pass through, always force asic_init */ 647 + if (radeon_device_is_virtual()) 648 + return false; 662 649 663 650 /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 664 651 if (efi_enabled(EFI_BOOT) && ··· 1652 1631 radeon_agp_suspend(rdev); 1653 1632 1654 1633 pci_save_state(dev->pdev); 1655 - if (freeze && rdev->family >= CHIP_R600) { 1634 + if (freeze && rdev->family >= CHIP_CEDAR) { 1656 1635 rdev->asic->asic_reset(rdev, true); 1657 1636 pci_restore_state(dev->pdev); 1658 1637 } else if (suspend) {