Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2020-08-12' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"This has a few vmwgfx regression fixes we hit from the merge window
(one in TTM), it also has a bunch of amdgpu fixes along with a
scattering everywhere else.

core:
- Fix drm_dp_mst_port refcount leaks in drm_dp_mst_allocate_vcpi
- Remove null check for kfree in drm_dev_release.
- Fix DRM_FORMAT_MOD_AMLOGIC_FBC definition.
- re-added docs for drm_gem_flink_ioctl()
- add orientation quirk for ASUS T103HAF

ttm:
- ttm: fix page-offset calculation within TTM
- revert patch causing vmwgfx regressions

fbcon:
- Fix a fbcon OOB read in fbdev, found by syzbot.

vga:
- Mark vga_tryget static as it's not used elsewhere.

amdgpu:
- Re-add spelling typo fix
- Sienna Cichlid fixes
- Navy Flounder fixes
- DC fixes
- SMU i2c fix
- Power fixes

vmwgfx:
- regression fixes for modesetting crashes
- misc fixes

xlnx:
- Small fixes to xlnx.

omap:
- Fix mode initialization in omap_connector_mode_valid().
- force runtime PM suspend on system suspend

tidss:
- fix modeset init for DPI panels"

* tag 'drm-next-2020-08-12' of git://anongit.freedesktop.org/drm/drm: (70 commits)
drm/ttm: revert "drm/ttm: make TT creation purely optional v3"
drm/vmwgfx: fix spelling mistake "Cant" -> "Can't"
drm/vmwgfx: fix spelling mistake "Cound" -> "Could"
drm/vmwgfx/ldu: Use drm_mode_config_reset
drm/vmwgfx/sou: Use drm_mode_config_reset
drm/vmwgfx/stdu: Use drm_mode_config_reset
drm/vmwgfx: Fix two list_for_each loop exit tests
drm/vmwgfx: Use correct vmw_legacy_display_unit pointer
drm/vmwgfx: Use struct_size() helper
drm/amdgpu: Fix bug where DPM is not enabled after hibernate and resume
drm/amd/powerplay: put VCN/JPEG into PG ungate state before dpm table setup(V3)
drm/amd/powerplay: update swSMU VCN/JPEG PG logics
drm/amdgpu: use mode1 reset by default for sienna_cichlid
drm/amdgpu/smu: rework i2c adpater registration
drm/amd/display: Display goes blank after inst
drm/amd/display: Change null plane state swizzle mode to 4kb_s
drm/amd/display: Use helper function to check for HDMI signal
drm/amd/display: AMD OUI (DPCD 0x00300) skipped on some sink
drm/amd/display: Fix logger context
drm/amd/display: populate new dml variable
...

+903 -382
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2574 2574 AMD_IP_BLOCK_TYPE_IH, 2575 2575 }; 2576 2576 2577 + for (i = 0; i < adev->num_ip_blocks; i++) 2578 + adev->ip_blocks[i].status.hw = false; 2579 + 2577 2580 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 2578 2581 int j; 2579 2582 struct amdgpu_ip_block *block; ··· 2584 2581 for (j = 0; j < adev->num_ip_blocks; j++) { 2585 2582 block = &adev->ip_blocks[j]; 2586 2583 2587 - block->status.hw = false; 2588 2584 if (block->version->type != ip_order[i] || 2589 2585 !block->status.valid) 2590 2586 continue;
+6
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 3212 3212 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3213 3213 return 0; 3214 3214 3215 + /* Skip crit temp on APU */ 3216 + if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) && 3217 + (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3218 + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 3219 + return 0; 3220 + 3215 3221 /* Skip limit attributes if DPM is not enabled */ 3216 3222 if (!adev->pm.dpm_enabled && 3217 3223 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+30 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 193 193 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 194 194 195 195 psp_memory_training_fini(&adev->psp); 196 - release_firmware(adev->psp.sos_fw); 197 - adev->psp.sos_fw = NULL; 198 - release_firmware(adev->psp.asd_fw); 199 - adev->psp.asd_fw = NULL; 200 - release_firmware(adev->psp.ta_fw); 201 - adev->psp.ta_fw = NULL; 196 + if (adev->psp.sos_fw) { 197 + release_firmware(adev->psp.sos_fw); 198 + adev->psp.sos_fw = NULL; 199 + } 200 + if (adev->psp.asd_fw) { 201 + release_firmware(adev->psp.asd_fw); 202 + adev->psp.asd_fw = NULL; 203 + } 204 + if (adev->psp.ta_fw) { 205 + release_firmware(adev->psp.ta_fw); 206 + adev->psp.ta_fw = NULL; 207 + } 202 208 203 209 if (adev->asic_type == CHIP_NAVI10) 204 210 psp_sysfs_fini(adev); ··· 415 409 return ret; 416 410 } 417 411 412 + static bool psp_skip_tmr(struct psp_context *psp) 413 + { 414 + switch (psp->adev->asic_type) { 415 + case CHIP_NAVI12: 416 + case CHIP_SIENNA_CICHLID: 417 + return true; 418 + default: 419 + return false; 420 + } 421 + } 422 + 418 423 static int psp_tmr_load(struct psp_context *psp) 419 424 { 420 425 int ret; 421 426 struct psp_gfx_cmd_resp *cmd; 427 + 428 + /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 429 + * Already set up by host driver. 430 + */ 431 + if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 432 + return 0; 422 433 423 434 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 424 435 if (!cmd) ··· 2010 1987 2011 1988 ret = psp_tmr_terminate(psp); 2012 1989 if (ret) { 2013 - DRM_ERROR("Falied to terminate tmr\n"); 1990 + DRM_ERROR("Failed to terminate tmr\n"); 2014 1991 return ret; 2015 1992 } 2016 1993
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 1618 1618 data = con->eh_data; 1619 1619 save_count = data->count - control->num_recs; 1620 1620 /* only new entries are saved */ 1621 - if (save_count > 0) 1621 + if (save_count > 0) { 1622 1622 if (amdgpu_ras_eeprom_process_recods(control, 1623 1623 &data->bps[control->num_recs], 1624 1624 true, ··· 1626 1626 dev_err(adev->dev, "Failed to save EEPROM table data!"); 1627 1627 return -EIO; 1628 1628 } 1629 + 1630 + dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 1631 + } 1629 1632 1630 1633 return 0; 1631 1634 }
+4 -3
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 3082 3082 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), 3083 3083 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), 3084 3084 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), 3085 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), 3085 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), 3086 3086 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), 3087 3087 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), 3088 3088 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), ··· 3127 3127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), 3128 3128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), 3129 3129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), 3130 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), 3130 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), 3131 3131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), 3132 3132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), 3133 3133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), ··· 3158 3158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), 3159 3159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), 3160 3160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), 3161 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xffffffff, 0x010b0000), 3161 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), 3162 3162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000), 3163 3163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 3164 3164 }; ··· 7529 7529 case CHIP_NAVI14: 7530 7530 case CHIP_NAVI12: 7531 7531 case CHIP_SIENNA_CICHLID: 7532 + case CHIP_NAVY_FLOUNDER: 7532 7533 amdgpu_gfx_off_ctrl(adev, enable); 7533 7534 break; 7534 7535 default:
+4 -5
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
··· 49 49 static int jpeg_v3_0_early_init(void *handle) 50 50 { 51 51 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 52 - if (adev->asic_type == CHIP_SIENNA_CICHLID) { 53 - u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); 52 + u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); 54 53 55 - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 56 - return -ENOENT; 57 - } 54 + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 55 + return -ENOENT; 56 + 58 57 adev->jpeg.num_jpeg_inst = 1; 59 58 60 59 jpeg_v3_0_set_dec_ring_funcs(adev);
+53 -3
drivers/gpu/drm/amd/amdgpu/nv.c
··· 97 97 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 98 98 } 99 99 100 + static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg) 101 + { 102 + unsigned long flags, address, data; 103 + u64 r; 104 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 105 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 106 + 107 + spin_lock_irqsave(&adev->pcie_idx_lock, flags); 108 + /* read low 32 bit */ 109 + WREG32(address, reg); 110 + (void)RREG32(address); 111 + r = RREG32(data); 112 + 113 + /* read high 32 bit*/ 114 + WREG32(address, reg + 4); 115 + (void)RREG32(address); 116 + r |= ((u64)RREG32(data) << 32); 117 + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 118 + return r; 119 + } 120 + 121 + static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) 122 + { 123 + unsigned long flags, address, data; 124 + 125 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 126 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 127 + 128 + spin_lock_irqsave(&adev->pcie_idx_lock, flags); 129 + /* write low 32 bit */ 130 + WREG32(address, reg); 131 + (void)RREG32(address); 132 + WREG32(data, (u32)(v & 0xffffffffULL)); 133 + (void)RREG32(data); 134 + 135 + /* write high 32 bit */ 136 + WREG32(address, reg + 4); 137 + (void)RREG32(address); 138 + WREG32(data, (u32)(v >> 32)); 139 + (void)RREG32(data); 140 + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 141 + } 142 + 100 143 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 101 144 { 102 145 unsigned long flags, address, data; ··· 362 319 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 363 320 amdgpu_reset_method); 364 321 365 - if (smu_baco_is_support(smu)) 366 - return AMD_RESET_METHOD_BACO; 367 - else 322 + switch (adev->asic_type) { 323 + case CHIP_SIENNA_CICHLID: 368 324 return AMD_RESET_METHOD_MODE1; 325 + default: 326 + if (smu_baco_is_support(smu)) 327 + return AMD_RESET_METHOD_BACO; 328 + else 329 + return AMD_RESET_METHOD_MODE1; 330 + } 369 331 } 370 332 371 333 static int nv_asic_reset(struct amdgpu_device *adev) ··· 721 673 adev->smc_wreg = NULL; 722 674 adev->pcie_rreg = &nv_pcie_rreg; 723 675 adev->pcie_wreg = &nv_pcie_wreg; 676 + adev->pcie_rreg64 = &nv_pcie_rreg64; 677 + adev->pcie_wreg64 = &nv_pcie_wreg64; 724 678 725 679 /* TODO: will add them during VCN v2 implementation */ 726 680 adev->uvd_ctx_rreg = NULL;
+1 -1
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 1659 1659 .emit_ib = vcn_v2_0_dec_ring_emit_ib, 1660 1660 .emit_fence = vcn_v2_0_dec_ring_emit_fence, 1661 1661 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, 1662 - .test_ring = amdgpu_vcn_dec_ring_test_ring, 1662 + .test_ring = vcn_v2_0_dec_ring_test_ring, 1663 1663 .test_ib = amdgpu_vcn_dec_ring_test_ib, 1664 1664 .insert_nop = vcn_v2_0_dec_ring_insert_nop, 1665 1665 .insert_start = vcn_v2_0_dec_ring_insert_start,
+29 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 97 97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 98 98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 99 99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 100 + #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 101 + MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 100 102 #endif 101 103 102 104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" ··· 1187 1185 break; 1188 1186 #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 1189 1187 case CHIP_SIENNA_CICHLID: 1190 - case CHIP_NAVY_FLOUNDER: 1191 1188 dmub_asic = DMUB_ASIC_DCN30; 1192 1189 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 1190 + break; 1191 + case CHIP_NAVY_FLOUNDER: 1192 + dmub_asic = DMUB_ASIC_DCN30; 1193 + fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 1193 1194 break; 1194 1195 #endif 1195 1196 ··· 8548 8543 ret = drm_atomic_helper_check_modeset(dev, state); 8549 8544 if (ret) 8550 8545 goto fail; 8546 + 8547 + /* Check connector changes */ 8548 + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8549 + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 8550 + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8551 + 8552 + /* Skip connectors that are disabled or part of modeset already. */ 8553 + if (!old_con_state->crtc && !new_con_state->crtc) 8554 + continue; 8555 + 8556 + if (!new_con_state->crtc) 8557 + continue; 8558 + 8559 + new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 8560 + if (IS_ERR(new_crtc_state)) { 8561 + ret = PTR_ERR(new_crtc_state); 8562 + goto fail; 8563 + } 8564 + 8565 + if (dm_old_con_state->abm_level != 8566 + dm_new_con_state->abm_level) 8567 + new_crtc_state->connectors_changed = true; 8568 + } 8551 8569 8552 8570 #if defined(CONFIG_DRM_AMD_DC_DCN) 8553 8571 if (adev->asic_type >= CHIP_NAVI10) {
+6 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 35 35 #include "dmub/dmub_srv.h" 36 36 #include "resource.h" 37 37 #include "dsc.h" 38 + #include "dc_link_dp.h" 38 39 39 40 struct dmub_debugfs_trace_header { 40 41 uint32_t entry_count; ··· 1151 1150 return result; 1152 1151 } 1153 1152 1154 - static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf, 1153 + static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, 1155 1154 size_t size, loff_t *pos) 1156 1155 { 1157 1156 char *rd_buf = NULL; ··· 1187 1186 1188 1187 snprintf(rd_buf_ptr, str_len, 1189 1188 "%d\n", 1190 - dsc_state.dsc_bytes_per_pixel); 1189 + dsc_state.dsc_bits_per_pixel); 1191 1190 rd_buf_ptr += str_len; 1192 1191 1193 1192 while (size) { ··· 1461 1460 .llseek = default_llseek 1462 1461 }; 1463 1462 1464 - static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = { 1463 + static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = { 1465 1464 .owner = THIS_MODULE, 1466 - .read = dp_dsc_bytes_per_pixel_read, 1465 + .read = dp_dsc_bits_per_pixel_read, 1467 1466 .llseek = default_llseek 1468 1467 }; 1469 1468 ··· 1553 1552 {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, 1554 1553 {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, 1555 1554 {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, 1556 - {"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops}, 1555 + {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops}, 1557 1556 {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops}, 1558 1557 {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, 1559 1558 {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
+2
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
··· 2834 2834 .bios_parser_destroy = bios_parser_destroy, 2835 2835 2836 2836 .get_board_layout_info = bios_get_board_layout_info, 2837 + 2838 + .get_atom_dc_golden_table = NULL 2837 2839 }; 2838 2840 2839 2841 static bool bios_parser_construct(
+81
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
··· 2079 2079 return 0; 2080 2080 } 2081 2081 2082 + static struct atom_dc_golden_table_v1 *bios_get_golden_table( 2083 + struct bios_parser *bp, 2084 + uint32_t rev_major, 2085 + uint32_t rev_minor, 2086 + uint16_t *dc_golden_table_ver) 2087 + { 2088 + struct atom_display_controller_info_v4_4 *disp_cntl_tbl_4_4 = NULL; 2089 + uint32_t dc_golden_offset = 0; 2090 + *dc_golden_table_ver = 0; 2091 + 2092 + if (!DATA_TABLES(dce_info)) 2093 + return NULL; 2094 + 2095 + /* ver.4.4 or higher */ 2096 + switch (rev_major) { 2097 + case 4: 2098 + switch (rev_minor) { 2099 + case 4: 2100 + disp_cntl_tbl_4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4, 2101 + DATA_TABLES(dce_info)); 2102 + if (!disp_cntl_tbl_4_4) 2103 + return NULL; 2104 + dc_golden_offset = DATA_TABLES(dce_info) + disp_cntl_tbl_4_4->dc_golden_table_offset; 2105 + *dc_golden_table_ver = disp_cntl_tbl_4_4->dc_golden_table_ver; 2106 + break; 2107 + } 2108 + break; 2109 + } 2110 + 2111 + if (!dc_golden_offset) 2112 + return NULL; 2113 + 2114 + if (*dc_golden_table_ver != 1) 2115 + return NULL; 2116 + 2117 + return GET_IMAGE(struct atom_dc_golden_table_v1, 2118 + dc_golden_offset); 2119 + } 2120 + 2121 + static enum bp_result bios_get_atom_dc_golden_table( 2122 + struct dc_bios *dcb) 2123 + { 2124 + struct bios_parser *bp = BP_FROM_DCB(dcb); 2125 + enum bp_result result = BP_RESULT_OK; 2126 + struct atom_dc_golden_table_v1 *atom_dc_golden_table = NULL; 2127 + struct atom_common_table_header *header; 2128 + struct atom_data_revision tbl_revision; 2129 + uint16_t dc_golden_table_ver = 0; 2130 + 2131 + header = GET_IMAGE(struct atom_common_table_header, 2132 + DATA_TABLES(dce_info)); 2133 + if (!header) 2134 + return BP_RESULT_UNSUPPORTED; 2135 + 2136 + get_atom_data_table_revision(header, &tbl_revision); 2137 + 2138 + atom_dc_golden_table = bios_get_golden_table(bp, 2139 + tbl_revision.major, 2140 + tbl_revision.minor, 2141 + &dc_golden_table_ver); 2142 + 2143 + if (!atom_dc_golden_table) 2144 + return BP_RESULT_UNSUPPORTED; 2145 + 2146 + dcb->golden_table.dc_golden_table_ver = dc_golden_table_ver; 2147 + dcb->golden_table.aux_dphy_rx_control0_val = atom_dc_golden_table->aux_dphy_rx_control0_val; 2148 + dcb->golden_table.aux_dphy_rx_control1_val = atom_dc_golden_table->aux_dphy_rx_control1_val; 2149 + dcb->golden_table.aux_dphy_tx_control_val = atom_dc_golden_table->aux_dphy_tx_control_val; 2150 + dcb->golden_table.dc_gpio_aux_ctrl_0_val = atom_dc_golden_table->dc_gpio_aux_ctrl_0_val; 2151 + dcb->golden_table.dc_gpio_aux_ctrl_1_val = atom_dc_golden_table->dc_gpio_aux_ctrl_1_val; 2152 + dcb->golden_table.dc_gpio_aux_ctrl_2_val = atom_dc_golden_table->dc_gpio_aux_ctrl_2_val; 2153 + dcb->golden_table.dc_gpio_aux_ctrl_3_val = atom_dc_golden_table->dc_gpio_aux_ctrl_3_val; 2154 + dcb->golden_table.dc_gpio_aux_ctrl_4_val = atom_dc_golden_table->dc_gpio_aux_ctrl_4_val; 2155 + dcb->golden_table.dc_gpio_aux_ctrl_5_val = atom_dc_golden_table->dc_gpio_aux_ctrl_5_val; 2156 + 2157 + return result; 2158 + } 2159 + 2160 + 2082 2161 static const struct dc_vbios_funcs vbios_funcs = { 2083 2162 .get_connectors_number = bios_parser_get_connectors_number, 2084 2163 ··· 2207 2128 2208 2129 .get_board_layout_info = bios_get_board_layout_info, 2209 2130 .pack_data_tables = bios_parser_pack_data_tables, 2131 + 2132 + .get_atom_dc_golden_table = bios_get_atom_dc_golden_table 2210 2133 }; 2211 2134 2212 2135 static bool bios_parser2_construct(
+67 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
··· 85 85 return disp_clk_threshold; 86 86 } 87 87 88 - static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks) 88 + static void ramp_up_dispclk_with_dpp( 89 + struct clk_mgr_internal *clk_mgr, 90 + struct dc *dc, 91 + struct dc_clocks *new_clocks, 92 + bool safe_to_lower) 89 93 { 90 94 int i; 91 95 int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks); 92 96 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; 97 + 98 + /* this function is to change dispclk, dppclk and dprefclk according to 99 + * bandwidth requirement. Its call stack is rv1_update_clocks --> 100 + * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth 101 + * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw, 102 + * prepare_bandwidth will be called first to allow enough clock, 103 + * watermark for change, after end of dcn hw change, optimize_bandwidth 104 + * is executed to lower clock to save power for new dcn hw settings. 105 + * 106 + * below is sequence of commit_planes_for_stream: 107 + * 108 + * step 1: prepare_bandwidth - raise clock to have enough bandwidth 109 + * step 2: lock_doublebuffer_enable 110 + * step 3: pipe_control_lock(true) - make dchubp register change will 111 + * not take effect right way 112 + * step 4: apply_ctx_for_surface - program dchubp 113 + * step 5: pipe_control_lock(false) - dchubp register change take effect 114 + * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream 115 + * for full_date, optimize clock to save power 116 + * 117 + * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be 118 + * changed for new dchubp configuration. but real dcn hub dchubps are 119 + * still running with old configuration until end of step 5. this need 120 + * clocks settings at step 1 should not less than that before step 1. 121 + * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower 122 + * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || 123 + * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) 124 + * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz 125 + * 126 + * the second condition is based on new dchubp configuration. dppclk 127 + * for new dchubp may be different from dppclk before step 1. 128 + * for example, before step 1, dchubps are as below: 129 + * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979) 130 + * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080) 131 + * for dppclk for pipe0 need dppclk = dispclk 132 + * 133 + * new dchubp pipe split configuration: 134 + * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080) 135 + * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080) 136 + * dppclk only needs dppclk = dispclk /2. 137 + * 138 + * dispclk, dppclk are not lock by otg master lock. they take effect 139 + * after step 1. during this transition, dispclk are the same, but 140 + * dppclk is changed to half of previous clock for old dchubp 141 + * configuration between step 1 and step 6. This may cause p-state 142 + * warning intermittently. 143 + * 144 + * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we 145 + * need make sure dppclk are not changed to less between step 1 and 6. 146 + * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz, 147 + * new display clock is raised, but we do not know ratio of 148 + * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz, 149 + * new_clocks->dispclk_khz /2 does not guarantee equal or higher than 150 + * old dppclk. we could ignore power saving different between 151 + * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6. 152 + * as long as safe_to_lower = false, set dpclk = dispclk to simplify 153 + * condition check. 154 + * todo: review this change for other asic. 155 + **/ 156 + if (!safe_to_lower) 157 + request_dpp_div = false; 93 158 94 159 /* set disp clk to dpp clk threshold */ 95 160 ··· 274 209 /* program dispclk on = as a w/a for sleep resume clock ramping issues */ 275 210 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) 276 211 || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) { 277 - ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks); 212 + ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower); 278 213 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; 279 214 send_request_to_lower = true; 280 215 }
+4 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
··· 323 323 /* if clock is being raised, increase refclk before lowering DTO */ 324 324 if (update_dppclk || update_dispclk) 325 325 dcn20_update_clocks_update_dentist(clk_mgr); 326 - /* always update dtos unless clock is lowered and not safe to lower */ 327 - if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) 328 - dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 326 + /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures 327 + * that we do not lower dto when it is not safe to lower. We do not need to 328 + * compare the current and new dppclk before calling this function.*/ 329 + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 329 330 } 330 331 } 331 332
+16 -2
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 1250 1250 int i, k, l; 1251 1251 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1252 1252 1253 + #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 1254 + dc_allow_idle_optimizations(dc, false); 1255 + #endif 1253 1256 1254 1257 for (i = 0; i < context->stream_count; i++) 1255 1258 dc_streams[i] = context->streams[i]; ··· 1841 1838 int i; 1842 1839 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 1843 1840 1841 + #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 1842 + if (dc->idle_optimizations_allowed) 1843 + overall_type = UPDATE_TYPE_FULL; 1844 + 1845 + #endif 1844 1846 if (stream_status == NULL || stream_status->plane_count != surface_count) 1845 1847 overall_type = UPDATE_TYPE_FULL; 1846 1848 ··· 2314 2306 } 2315 2307 } 2316 2308 2317 - if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) { 2318 - dc->hwss.prepare_bandwidth(dc, context); 2309 + if (update_type == UPDATE_TYPE_FULL) { 2310 + #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 2311 + dc_allow_idle_optimizations(dc, false); 2312 + 2313 + #endif 2314 + if (dc->optimize_seamless_boot_streams == 0) 2315 + dc->hwss.prepare_bandwidth(dc, context); 2316 + 2319 2317 context_clock_trace(dc, context); 2320 2318 } 2321 2319
+9 -3
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 1540 1540 } 1541 1541 } 1542 1542 1543 + if (bios->funcs->get_atom_dc_golden_table) 1544 + bios->funcs->get_atom_dc_golden_table(bios); 1545 + 1543 1546 /* 1544 1547 * TODO check if GPIO programmed correctly 1545 1548 * ··· 3105 3102 struct dc *dc = pipe_ctx->stream->ctx->dc; 3106 3103 struct dc_stream_state *stream = pipe_ctx->stream; 3107 3104 enum dc_status status; 3105 + #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 3106 + enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; 3107 + #endif 3108 3108 DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); 3109 3109 3110 3110 if (!IS_DIAG_DC(dc->ctx->dce_environment) && ··· 3142 3136 pipe_ctx->stream->link->link_state_valid = true; 3143 3137 3144 3138 #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 3145 - if (pipe_ctx->stream_res.tg->funcs->set_out_mux) 3146 - pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); 3139 + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) 3140 + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); 3147 3141 #endif 3148 3142 3149 3143 if (dc_is_dvi_signal(pipe_ctx->stream->signal)) ··· 3282 3276 dc_is_virtual_signal(pipe_ctx->stream->signal)) 3283 3277 return; 3284 3278 3285 - if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 3279 + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { 3286 3280 core_link_set_avmute(pipe_ctx, true); 3287 3281 } 3288 3282
+8 -10
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 246 246 247 247 #ifndef TRIM_FSFT 248 248 /** 249 - * dc_optimize_timing() - dc to optimize timing 249 + * dc_optimize_timing_for_fsft() - dc to optimize timing 250 250 */ 251 - bool dc_optimize_timing( 252 - struct dc_crtc_timing *timing, 251 + bool dc_optimize_timing_for_fsft( 252 + struct dc_stream_state *pStream, 253 253 unsigned int max_input_rate_in_khz) 254 254 { 255 - //optimization is expected to assing a value to these: 256 - //timing->pix_clk_100hz 257 - //timing->v_front_porch 258 - //timing->v_total 259 - //timing->fast_transport_output_rate_100hz; 260 - timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz; 255 + struct dc *dc; 261 256 262 - return true; 257 + dc = pStream->ctx->dc; 258 + 259 + return (dc->hwss.optimize_timing_for_fsft && 260 + dc->hwss.optimize_timing_for_fsft(dc, &pStream->timing, max_input_rate_in_khz)); 263 261 } 264 262 #endif 265 263
+4
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
··· 133 133 uint16_t (*pack_data_tables)( 134 134 struct dc_bios *dcb, 135 135 void *dst); 136 + 137 + enum bp_result (*get_atom_dc_golden_table)( 138 + struct dc_bios *dcb); 136 139 }; 137 140 138 141 struct bios_registers { ··· 157 154 struct dc_firmware_info fw_info; 158 155 bool fw_info_valid; 159 156 struct dc_vram_info vram_info; 157 + struct dc_golden_table golden_table; 160 158 }; 161 159 162 160 #endif /* DC_BIOS_TYPES_H */
+2 -2
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 424 424 struct dc_stream_state *dc_stream); 425 425 426 426 #ifndef TRIM_FSFT 427 - bool dc_optimize_timing( 428 - struct dc_crtc_timing *timing, 427 + bool dc_optimize_timing_for_fsft( 428 + struct dc_stream_state *pStream, 429 429 unsigned int max_input_rate_in_khz); 430 430 #endif 431 431
+14
drivers/gpu/drm/amd/display/dc/dc_types.h
··· 890 890 uint32_t branch_max_line_width; 891 891 }; 892 892 893 + struct dc_golden_table { 894 + uint16_t dc_golden_table_ver; 895 + uint32_t aux_dphy_rx_control0_val; 896 + uint32_t aux_dphy_tx_control_val; 897 + uint32_t aux_dphy_rx_control1_val; 898 + uint32_t dc_gpio_aux_ctrl_0_val; 899 + uint32_t dc_gpio_aux_ctrl_1_val; 900 + uint32_t dc_gpio_aux_ctrl_2_val; 901 + uint32_t dc_gpio_aux_ctrl_3_val; 902 + uint32_t dc_gpio_aux_ctrl_4_val; 903 + uint32_t dc_gpio_aux_ctrl_5_val; 904 + }; 905 + 906 + 893 907 #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 894 908 enum dc_gpu_mem_alloc_type { 895 909 DC_MEM_ALLOC_TYPE_GART,
+1 -1
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
··· 233 233 copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; 234 234 copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ? 235 235 true : false; 236 + copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1; 236 237 copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline; 237 - copy_settings_data->debug.bitfields.use_hw_lock_mgr = 0; 238 238 239 239 dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 240 240 dc_dmub_srv_cmd_execute(dc->dmub_srv);
+3 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 390 390 } 391 391 DTN_INFO("\n"); 392 392 393 + // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel 394 + // TODO: Update golden log header to reflect this name change 393 395 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n"); 394 396 for (i = 0; i < pool->res_cap->num_dsc; i++) { 395 397 struct display_stream_compressor *dsc = pool->dscs[i]; ··· 402 400 dsc->inst, 403 401 s.dsc_clock_en, 404 402 s.dsc_slice_width, 405 - s.dsc_bytes_per_pixel); 403 + s.dsc_bits_per_pixel); 406 404 DTN_INFO("\n"); 407 405 } 408 406 DTN_INFO("\n");
+1 -1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
··· 156 156 157 157 REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en); 158 158 REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width); 159 - REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel); 159 + REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel); 160 160 REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height); 161 161 REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size); 162 162 REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
+27
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 2498 2498 tg->funcs->tg_init(tg); 2499 2499 } 2500 2500 } 2501 + #ifndef TRIM_FSFT 2502 + bool dcn20_optimize_timing_for_fsft(struct dc *dc, 2503 + struct dc_crtc_timing *timing, 2504 + unsigned int max_input_rate_in_khz) 2505 + { 2506 + unsigned int old_v_front_porch; 2507 + unsigned int old_v_total; 2508 + unsigned int max_input_rate_in_100hz; 2509 + unsigned long long new_v_total; 2510 + 2511 + max_input_rate_in_100hz = max_input_rate_in_khz * 10; 2512 + if (max_input_rate_in_100hz < timing->pix_clk_100hz) 2513 + return false; 2514 + 2515 + old_v_total = timing->v_total; 2516 + old_v_front_porch = timing->v_front_porch; 2517 + 2518 + timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz; 2519 + timing->pix_clk_100hz = max_input_rate_in_100hz; 2520 + 2521 + new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz); 2522 + 2523 + timing->v_total = new_v_total; 2524 + timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total); 2525 + return true; 2526 + } 2527 + #endif
+5
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
··· 132 132 struct dc *dc, 133 133 struct dc_phy_addr_space_config *pa_config); 134 134 135 + #ifndef TRIM_FSFT 136 + bool dcn20_optimize_timing_for_fsft(struct dc *dc, 137 + struct dc_crtc_timing *timing, 138 + unsigned int max_input_rate_in_khz); 139 + #endif 135 140 #endif /* __DC_HWSS_DCN20_H__ */ 136 141
+3
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
··· 88 88 .set_backlight_level = dce110_set_backlight_level, 89 89 .set_abm_immediate_disable = dce110_set_abm_immediate_disable, 90 90 .set_pipe = dce110_set_pipe, 91 + #ifndef TRIM_FSFT 92 + .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft, 93 + #endif 91 94 }; 92 95 93 96 static const struct hwseq_private_funcs dcn20_private_funcs = {
+7 -46
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 2223 2223 if (!res_ctx->pipe_ctx[i].plane_state) { 2224 2224 pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; 2225 2225 pipes[pipe_cnt].pipe.src.source_scan = dm_horz; 2226 - pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear; 2226 + pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s; 2227 2227 pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile; 2228 2228 pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable; 2229 2229 if (pipes[pipe_cnt].pipe.src.viewport_width > 1920) ··· 2235 2235 pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width; 2236 2236 pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height; 2237 2237 pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width; 2238 - pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ 2238 + pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256; 2239 2239 pipes[pipe_cnt].pipe.src.source_format = dm_444_32; 2240 2240 pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ 2241 2241 pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/ ··· 3069 3069 int pipe_cnt, 3070 3070 int vlevel) 3071 3071 { 3072 - int i, j, pipe_idx, pipe_idx_unsplit; 3073 - bool visited[MAX_PIPES] = { 0 }; 3072 + int i, pipe_idx; 3074 3073 3075 3074 /* Writeback MCIF_WB arbitration parameters */ 3076 3075 dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); ··· 3088 3089 if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) 3089 3090 context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; 3090 3091 3091 - /* 3092 - * An artifact of dml pipe split/odm is that pipes get merged back together for 3093 - * calculation. Therefore we need to only extract for first pipe in ascending index order 3094 - * and copy into the other split half. 3095 - */ 3096 - for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) { 3097 - if (!context->res_ctx.pipe_ctx[i].stream) 3098 - continue; 3099 - 3100 - if (!visited[pipe_idx]) { 3101 - display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src; 3102 - display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest; 3103 - 3104 - dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; 3105 - dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; 3106 - dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; 3107 - dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; 3108 - /* 3109 - * j iterates inside pipes array, unlike i which iterates inside 3110 - * pipe_ctx array 3111 - */ 3112 - if (src->is_hsplit) 3113 - for (j = pipe_idx + 1; j < pipe_cnt; j++) { 3114 - display_pipe_source_params_st *src_j = &pipes[j].pipe.src; 3115 - display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest; 3116 - 3117 - if (src_j->is_hsplit && !visited[j] 3118 - && src->hsplit_grp == src_j->hsplit_grp) { 3119 - dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; 3120 - dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; 3121 - dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; 3122 - dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; 3123 - visited[j] = true; 3124 - } 3125 - } 3126 - visited[pipe_idx] = true; 3127 - pipe_idx_unsplit++; 3128 - } 3129 - pipe_idx++; 3130 - } 3131 - 3132 3092 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 3133 3093 if (!context->res_ctx.pipe_ctx[i].stream) 3134 3094 continue; 3095 + pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 3096 + pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 3097 + pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 3098 + pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 3135 3099 if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) 3136 3100 context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; 3137 3101 context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 3138 3102 pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; 3139 - ASSERT(visited[pipe_idx]); 3140 3103 context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; 3141 3104 pipe_idx++; 3142 3105 }
+3
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
··· 92 92 .set_backlight_level = dcn21_set_backlight_level, 93 93 .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, 94 94 .set_pipe = dcn21_set_pipe, 95 + #ifndef TRIM_FSFT 96 + .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft, 97 + #endif 95 98 }; 96 99 97 100 static const struct hwseq_private_funcs dcn21_private_funcs = {
+3 -2
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
··· 26 26 #include "dce110/dce110_hw_sequencer.h" 27 27 #include "dcn10/dcn10_hw_sequencer.h" 28 28 #include "dcn20/dcn20_hwseq.h" 29 + #include "dcn21/dcn21_hwseq.h" 29 30 #include "dcn30_hwseq.h" 30 31 31 32 static const struct hw_sequencer_funcs dcn30_funcs = { ··· 88 87 .set_flip_control_gsl = dcn20_set_flip_control_gsl, 89 88 .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, 90 89 .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations, 91 - .set_backlight_level = dce110_set_backlight_level, 92 - .set_abm_immediate_disable = dce110_set_abm_immediate_disable, 90 + .set_backlight_level = dcn21_set_backlight_level, 91 + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, 93 92 }; 94 93 95 94 static const struct hwseq_private_funcs dcn30_private_funcs = {
+3 -14
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
··· 154 154 dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip); 155 155 dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip); 156 156 157 + dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup); 157 158 dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix); 158 159 dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix); 159 160 dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix); 160 - 161 - unsigned int get_vstartup_calculated( 162 - struct display_mode_lib *mode_lib, 163 - const display_e2e_pipe_params_st *pipes, 164 - unsigned int num_pipes, 165 - unsigned int which_pipe) 166 - { 167 - unsigned int which_plane; 168 - 169 - recalculate_params(mode_lib, pipes, num_pipes); 170 - which_plane = mode_lib->vba.pipe_plane[which_pipe]; 171 - return mode_lib->vba.VStartup[which_plane]; 172 - } 173 161 174 162 double get_total_immediate_flip_bytes( 175 163 struct display_mode_lib *mode_lib, ··· 467 479 mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] = 468 480 1; 469 481 mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0; 470 - mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable; 482 + mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;; 483 + mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable; 471 484 mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] = 472 485 dout->dsc_slices; 473 486 mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+1 -6
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
··· 98 98 dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_l_in_us); 99 99 dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_c_in_us); 100 100 101 + dml_get_pipe_attr_decl(vstartup); 101 102 dml_get_pipe_attr_decl(vupdate_offset); 102 103 dml_get_pipe_attr_decl(vupdate_width); 103 104 dml_get_pipe_attr_decl(vready_offset); 104 - 105 - unsigned int get_vstartup_calculated( 106 - struct display_mode_lib *mode_lib, 107 - const display_e2e_pipe_params_st *pipes, 108 - unsigned int num_pipes, 109 - unsigned int which_pipe); 110 105 111 106 double get_total_immediate_flip_bytes( 112 107 struct display_mode_lib *mode_lib,
+2 -1
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
··· 71 71 72 72 #define CTX \ 73 73 clk_mgr->base.ctx 74 + 74 75 #define DC_LOGGER \ 75 - clk_mgr->ctx->logger 76 + clk_mgr->base.ctx->logger 76 77 77 78 78 79
+1 -1
drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
··· 55 55 struct dcn_dsc_state { 56 56 uint32_t dsc_clock_en; 57 57 uint32_t dsc_slice_width; 58 - uint32_t dsc_bytes_per_pixel; 58 + uint32_t dsc_bits_per_pixel; 59 59 uint32_t dsc_slice_height; 60 60 uint32_t dsc_pic_width; 61 61 uint32_t dsc_pic_height;
+5
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
··· 116 116 void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, 117 117 int num_pipes, 118 118 const struct dc_static_screen_params *events); 119 + #ifndef TRIM_FSFT 120 + bool (*optimize_timing_for_fsft)(struct dc *dc, 121 + struct dc_crtc_timing *timing, 122 + unsigned int max_input_rate_in_khz); 123 + #endif 119 124 120 125 /* Stream Related */ 121 126 void (*enable_stream)(struct pipe_ctx *pipe_ctx);
+4 -1
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
··· 829 829 switch (packet_type) { 830 830 case PACKET_TYPE_FS_V3: 831 831 #ifndef TRIM_FSFT 832 + // always populate with pixel rate. 832 833 build_vrr_infopacket_v3( 833 834 stream->signal, vrr, 834 835 stream->timing.flags.FAST_TRANSPORT, 835 - stream->timing.fast_transport_output_rate_100hz, 836 + (stream->timing.flags.FAST_TRANSPORT) ? 837 + stream->timing.fast_transport_output_rate_100hz : 838 + stream->timing.pix_clk_100hz, 836 839 app_tf, infopacket); 837 840 #else 838 841 build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+53 -1
drivers/gpu/drm/amd/include/atomfirmware.h
··· 941 941 uint8_t reserved3[8]; 942 942 }; 943 943 944 - 945 944 struct atom_display_controller_info_v4_2 946 945 { 947 946 struct atom_common_table_header table_header; ··· 975 976 uint8_t reserved3[8]; 976 977 }; 977 978 979 + struct atom_display_controller_info_v4_4 { 980 + struct atom_common_table_header table_header; 981 + uint32_t display_caps; 982 + uint32_t bootup_dispclk_10khz; 983 + uint16_t dce_refclk_10khz; 984 + uint16_t i2c_engine_refclk_10khz; 985 + uint16_t dvi_ss_percentage; // in unit of 0.001% 986 + uint16_t dvi_ss_rate_10hz; 987 + uint16_t hdmi_ss_percentage; // in unit of 0.001% 988 + uint16_t hdmi_ss_rate_10hz; 989 + uint16_t dp_ss_percentage; // in unit of 0.001% 990 + uint16_t dp_ss_rate_10hz; 991 + uint8_t dvi_ss_mode; // enum of atom_spread_spectrum_mode 992 + uint8_t hdmi_ss_mode; // enum of atom_spread_spectrum_mode 993 + uint8_t dp_ss_mode; // enum of atom_spread_spectrum_mode 994 + uint8_t ss_reserved; 995 + uint8_t dfp_hardcode_mode_num; // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available 996 + uint8_t dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available 997 + uint8_t vga_hardcode_mode_num; // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable 998 + uint8_t vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable 999 + uint16_t dpphy_refclk_10khz; 1000 + uint16_t hw_chip_id; 1001 + uint8_t dcnip_min_ver; 1002 + uint8_t dcnip_max_ver; 1003 + uint8_t max_disp_pipe_num; 1004 + uint8_t max_vbios_active_disp_pipum; 1005 + uint8_t max_ppll_num; 1006 + uint8_t max_disp_phy_num; 1007 + uint8_t max_aux_pairs; 1008 + uint8_t remotedisplayconfig; 1009 + uint32_t dispclk_pll_vco_freq; 1010 + uint32_t dp_ref_clk_freq; 1011 + uint32_t max_mclk_chg_lat; // Worst case blackout duration for a memory clock frequency (p-state) change, units of 100s of ns (0.1 us) 1012 + uint32_t max_sr_exit_lat; // Worst case memory self refresh exit time, units of 100ns of ns (0.1us) 1013 + uint32_t max_sr_enter_exit_lat; // Worst case memory self refresh entry followed by immediate exit time, units of 100ns of ns (0.1us) 1014 + uint16_t dc_golden_table_offset; // point of struct of atom_dc_golden_table_vxx 1015 + uint16_t dc_golden_table_ver; 1016 + uint32_t reserved3[3]; 1017 + }; 1018 + 1019 + struct atom_dc_golden_table_v1 1020 + { 1021 + uint32_t aux_dphy_rx_control0_val; 1022 + uint32_t aux_dphy_tx_control_val; 1023 + uint32_t aux_dphy_rx_control1_val; 1024 + uint32_t dc_gpio_aux_ctrl_0_val; 1025 + uint32_t dc_gpio_aux_ctrl_1_val; 1026 + uint32_t dc_gpio_aux_ctrl_2_val; 1027 + uint32_t dc_gpio_aux_ctrl_3_val; 1028 + uint32_t dc_gpio_aux_ctrl_4_val; 1029 + uint32_t dc_gpio_aux_ctrl_5_val; 1030 + uint32_t reserved[23]; 1031 + }; 978 1032 979 1033 enum dce_info_caps_def 980 1034 {
+134 -10
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
··· 133 133 return ret; 134 134 } 135 135 136 + static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, 137 + bool enable) 138 + { 139 + struct smu_power_context *smu_power = &smu->smu_power; 140 + struct smu_power_gate *power_gate = &smu_power->power_gate; 141 + int ret = 0; 142 + 143 + if (!smu->ppt_funcs->dpm_set_vcn_enable) 144 + return 0; 145 + 146 + if (atomic_read(&power_gate->vcn_gated) ^ enable) 147 + return 0; 148 + 149 + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 150 + if (!ret) 151 + atomic_set(&power_gate->vcn_gated, !enable); 152 + 153 + return ret; 154 + } 155 + 156 + static int smu_dpm_set_vcn_enable(struct smu_context *smu, 157 + bool enable) 158 + { 159 + struct smu_power_context *smu_power = &smu->smu_power; 160 + struct smu_power_gate *power_gate = &smu_power->power_gate; 161 + int ret = 0; 162 + 163 + mutex_lock(&power_gate->vcn_gate_lock); 164 + 165 + ret = smu_dpm_set_vcn_enable_locked(smu, enable); 166 + 167 + mutex_unlock(&power_gate->vcn_gate_lock); 168 + 169 + return ret; 170 + } 171 + 172 + static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, 173 + bool enable) 174 + { 175 + struct smu_power_context *smu_power = &smu->smu_power; 176 + struct smu_power_gate *power_gate = &smu_power->power_gate; 177 + int ret = 0; 178 + 179 + if (!smu->ppt_funcs->dpm_set_jpeg_enable) 180 + return 0; 181 + 182 + if (atomic_read(&power_gate->jpeg_gated) ^ enable) 183 + return 0; 184 + 185 + ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 186 + if (!ret) 187 + atomic_set(&power_gate->jpeg_gated, !enable); 188 + 189 + return ret; 190 + } 191 + 192 + static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 193 + bool enable) 194 + { 195 + struct smu_power_context *smu_power = &smu->smu_power; 196 + struct smu_power_gate *power_gate = &smu_power->power_gate; 197 + int ret = 0; 198 + 199 + mutex_lock(&power_gate->jpeg_gate_lock); 200 + 201 + ret = smu_dpm_set_jpeg_enable_locked(smu, enable); 202 + 203 + mutex_unlock(&power_gate->jpeg_gate_lock); 204 + 205 + return ret; 206 + } 207 + 136 208 /** 137 209 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 138 210 * ··· 423 351 mutex_init(&smu->mutex); 424 352 425 353 return smu_set_funcs(adev); 354 + } 355 + 356 + static int smu_set_default_dpm_table(struct smu_context *smu) 357 + { 358 + struct smu_power_context *smu_power = &smu->smu_power; 359 + struct smu_power_gate *power_gate = &smu_power->power_gate; 360 + int vcn_gate, jpeg_gate; 361 + int ret = 0; 362 + 363 + if (!smu->ppt_funcs->set_default_dpm_table) 364 + return 0; 365 + 366 + mutex_lock(&power_gate->vcn_gate_lock); 367 + mutex_lock(&power_gate->jpeg_gate_lock); 368 + 369 + vcn_gate = atomic_read(&power_gate->vcn_gated); 370 + jpeg_gate = atomic_read(&power_gate->jpeg_gated); 371 + 372 + ret = smu_dpm_set_vcn_enable_locked(smu, true); 373 + if (ret) 374 + goto err0_out; 375 + 376 + ret = smu_dpm_set_jpeg_enable_locked(smu, true); 377 + if (ret) 378 + goto err1_out; 379 + 380 + ret = smu->ppt_funcs->set_default_dpm_table(smu); 381 + if (ret) 382 + dev_err(smu->adev->dev, 383 + "Failed to setup default dpm clock tables!\n"); 384 + 385 + smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); 386 + err1_out: 387 + smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); 388 + err0_out: 389 + mutex_unlock(&power_gate->jpeg_gate_lock); 390 + mutex_unlock(&power_gate->vcn_gate_lock); 391 + 392 + return ret; 426 393 } 427 394 428 395 static int smu_late_init(void *handle) ··· 690 579 if (ret) 691 580 return ret; 692 581 582 + ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); 583 + if (ret) 584 + return ret; 585 + 693 586 return 0; 694 587 } 695 588 696 589 static int smu_smc_table_sw_fini(struct smu_context *smu) 697 590 { 698 591 int ret; 592 + 593 + smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); 699 594 700 595 ret = smu_free_memory_pool(smu); 701 596 if (ret) ··· 759 642 smu->watermarks_bitmap = 0; 760 643 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 761 644 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 645 + 646 + atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 647 + atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 648 + mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); 649 + mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); 762 650 763 651 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 764 652 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; ··· 856 734 uint32_t pcie_gen = 0, pcie_width = 0; 857 735 int ret; 858 736 859 - if (smu_is_dpm_running(smu) && adev->in_suspend) { 737 + if (adev->in_suspend && smu_is_dpm_running(smu)) { 860 738 dev_info(adev->dev, "dpm has been enabled\n"); 861 739 return 0; 862 740 } ··· 965 843 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 966 844 return ret; 967 845 } 968 - 969 - ret = smu_i2c_init(smu, &adev->pm.smu_i2c); 970 - if (ret) 971 - return ret; 972 846 973 847 ret = smu_disable_umc_cdr_12gbps_workaround(smu); 974 848 if (ret) { ··· 1163 1045 { 1164 1046 struct amdgpu_device *adev = smu->adev; 1165 1047 int ret = 0; 1166 - 1167 - smu_i2c_fini(smu, &adev->pm.smu_i2c); 1168 1048 1169 1049 cancel_work_sync(&smu->throttling_logging_work); 1170 1050 ··· 1706 1590 } 1707 1591 1708 1592 ret = smu_send_smc_msg(smu, msg, NULL); 1593 + /* some asics may not support those messages */ 1594 + if (ret == -EINVAL) 1595 + ret = 0; 1709 1596 if (ret) 1710 1597 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1711 1598 ··· 2063 1944 2064 1945 mutex_lock(&smu->mutex); 2065 1946 1947 + if (smu->ppt_funcs->read_sensor) 1948 + if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 1949 + goto unlock; 1950 + 2066 1951 switch (sensor) { 2067 1952 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2068 1953 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; ··· 2089 1966 *size = 4; 2090 1967 break; 2091 1968 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2092 - *(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1; 1969 + *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2093 1970 *size = 4; 2094 1971 break; 2095 1972 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: ··· 2097 1974 *size = 4; 2098 1975 break; 2099 1976 default: 2100 - if (smu->ppt_funcs->read_sensor) 2101 - ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); 1977 + *size = 0; 1978 + ret = -EOPNOTSUPP; 2102 1979 break; 2103 1980 } 2104 1981 1982 + unlock: 2105 1983 mutex_unlock(&smu->mutex); 2106 1984 2107 1985 return ret;
-18
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
··· 1849 1849 1850 1850 static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 1851 1851 { 1852 - struct smu_power_context *smu_power = &smu->smu_power; 1853 - struct smu_power_gate *power_gate = &smu_power->power_gate; 1854 1852 int ret = 0; 1855 1853 1856 1854 if (enable) { ··· 1859 1861 return ret; 1860 1862 } 1861 1863 } 1862 - power_gate->vcn_gated = false; 1863 1864 } else { 1864 1865 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 1865 1866 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0); ··· 1867 1870 return ret; 1868 1871 } 1869 1872 } 1870 - power_gate->vcn_gated = true; 1871 1873 } 1872 1874 1873 1875 return ret; ··· 2076 2080 .functionality = arcturus_i2c_func, 2077 2081 }; 2078 2082 2079 - static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control) 2080 - { 2081 - struct amdgpu_device *adev = to_amdgpu_device(control); 2082 - 2083 - return control->dev.parent == &adev->pdev->dev; 2084 - } 2085 - 2086 2083 static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) 2087 2084 { 2088 2085 struct amdgpu_device *adev = to_amdgpu_device(control); 2089 2086 int res; 2090 - 2091 - /* smu_i2c_eeprom_init may be called twice in sriov */ 2092 - if (arcturus_i2c_adapter_is_added(control)) 2093 - return 0; 2094 2087 2095 2088 control->owner = THIS_MODULE; 2096 2089 control->class = I2C_CLASS_SPD; ··· 2096 2111 2097 2112 static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) 2098 2113 { 2099 - if (!arcturus_i2c_adapter_is_added(control)) 2100 - return; 2101 - 2102 2114 i2c_del_adapter(control); 2103 2115 } 2104 2116
+4 -2
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
··· 292 292 struct smu_power_gate { 293 293 bool uvd_gated; 294 294 bool vce_gated; 295 - bool vcn_gated; 296 - bool jpeg_gated; 295 + atomic_t vcn_gated; 296 + atomic_t jpeg_gated; 297 + struct mutex vcn_gate_lock; 298 + struct mutex jpeg_gate_lock; 297 299 }; 298 300 299 301 struct smu_power_context {
+15 -6
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
··· 27 27 // *** IMPORTANT *** 28 28 // SMU TEAM: Always increment the interface version if 29 29 // any structure is changed in this file 30 - #define SMU11_DRIVER_IF_VERSION 0x33 30 + #define SMU11_DRIVER_IF_VERSION 0x34 31 31 32 32 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 5 33 33 ··· 968 968 969 969 typedef struct { 970 970 uint32_t CurrClock[PPCLK_COUNT]; 971 - uint16_t AverageGfxclkFrequency; 972 - uint16_t AverageFclkFrequency; 973 - uint16_t AverageUclkFrequency ; 971 + 972 + uint16_t AverageGfxclkFrequencyPreDs; 973 + uint16_t AverageGfxclkFrequencyPostDs; 974 + uint16_t AverageFclkFrequencyPreDs; 975 + uint16_t AverageFclkFrequencyPostDs; 976 + uint16_t AverageUclkFrequencyPreDs ; 977 + uint16_t AverageUclkFrequencyPostDs ; 978 + 979 + 974 980 uint16_t AverageGfxActivity ; 975 981 uint16_t AverageUclkActivity ; 976 982 uint8_t CurrSocVoltageOffset ; ··· 994 988 uint16_t TemperatureLiquid0 ; 995 989 uint16_t TemperatureLiquid1 ; 996 990 uint16_t TemperaturePlx ; 991 + uint16_t Padding16 ; 997 992 uint32_t ThrottlerStatus ; 998 993 999 994 uint8_t LinkDpmLevel; ··· 1013 1006 uint16_t AverageDclk0Frequency ; 1014 1007 uint16_t AverageVclk1Frequency ; 1015 1008 uint16_t AverageDclk1Frequency ; 1016 - uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence 1017 - uint16_t padding16_2; 1009 + uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence 1010 + uint8_t PcieRate ; 1011 + uint8_t PcieWidth ; 1012 + 1018 1013 } SmuMetrics_t; 1019 1014 1020 1015 typedef struct {
+2 -2
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
··· 30 30 #define SMU11_DRIVER_IF_VERSION_NV10 0x36 31 31 #define SMU11_DRIVER_IF_VERSION_NV12 0x33 32 32 #define SMU11_DRIVER_IF_VERSION_NV14 0x36 33 - #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33 34 - #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2 33 + #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34 34 + #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3 35 35 36 36 /* MP Apertures */ 37 37 #define MP0_Public 0x03800000
-22
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
··· 785 785 786 786 static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 787 787 { 788 - struct smu_power_context *smu_power = &smu->smu_power; 789 - struct smu_power_gate *power_gate = &smu_power->power_gate; 790 788 int ret = 0; 791 789 792 790 if (enable) { ··· 794 796 if (ret) 795 797 return ret; 796 798 } 797 - power_gate->vcn_gated = false; 798 799 } else { 799 800 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 800 801 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); 801 802 if (ret) 802 803 return ret; 803 804 } 804 - power_gate->vcn_gated = true; 805 805 } 806 806 807 807 return ret; ··· 807 811 808 812 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 809 813 { 810 - struct smu_power_context *smu_power = &smu->smu_power; 811 - struct smu_power_gate *power_gate = &smu_power->power_gate; 812 814 int ret = 0; 813 815 814 816 if (enable) { ··· 815 821 if (ret) 816 822 return ret; 817 823 } 818 - power_gate->jpeg_gated = false; 819 824 } else { 820 825 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 821 826 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL); 822 827 if (ret) 823 828 return ret; 824 829 } 825 - power_gate->jpeg_gated = true; 826 830 } 827 831 828 832 return ret; ··· 2449 2457 .functionality = navi10_i2c_func, 2450 2458 }; 2451 2459 2452 - static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control) 2453 - { 2454 - struct amdgpu_device *adev = to_amdgpu_device(control); 2455 - 2456 - return control->dev.parent == &adev->pdev->dev; 2457 - } 2458 - 2459 2460 static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) 2460 2461 { 2461 2462 struct amdgpu_device *adev = to_amdgpu_device(control); 2462 2463 int res; 2463 - 2464 - /* smu_i2c_eeprom_init may be called twice in sriov */ 2465 - if (navi10_i2c_adapter_is_added(control)) 2466 - return 0; 2467 2464 2468 2465 control->owner = THIS_MODULE; 2469 2466 control->class = I2C_CLASS_SPD; ··· 2469 2488 2470 2489 static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) 2471 2490 { 2472 - if (!navi10_i2c_adapter_is_added(control)) 2473 - return; 2474 - 2475 2491 i2c_del_adapter(control); 2476 2492 } 2477 2493
-8
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
··· 459 459 460 460 static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 461 461 { 462 - struct smu_power_context *smu_power = &smu->smu_power; 463 - struct smu_power_gate *power_gate = &smu_power->power_gate; 464 462 int ret = 0; 465 463 466 464 if (enable) { ··· 468 470 if (ret) 469 471 return ret; 470 472 } 471 - power_gate->vcn_gated = false; 472 473 } else { 473 474 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 474 475 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); 475 476 if (ret) 476 477 return ret; 477 478 } 478 - power_gate->vcn_gated = true; 479 479 } 480 480 481 481 return ret; ··· 481 485 482 486 static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 483 487 { 484 - struct smu_power_context *smu_power = &smu->smu_power; 485 - struct smu_power_gate *power_gate = &smu_power->power_gate; 486 488 int ret = 0; 487 489 488 490 if (enable) { ··· 489 495 if (ret) 490 496 return ret; 491 497 } 492 - power_gate->jpeg_gated = false; 493 498 } else { 494 499 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 495 500 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 496 501 if (ret) 497 502 return ret; 498 503 } 499 - power_gate->jpeg_gated = true; 500 504 } 501 505 502 506 return ret;
+43 -60
drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
··· 70 70 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 71 71 FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) 72 72 73 + #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 74 + 73 75 static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = { 74 76 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 75 77 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 76 78 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 77 - MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 1), 78 - MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 1), 79 - MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1), 80 - MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1), 79 + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 80 + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 81 + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 82 + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 81 83 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 82 84 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 83 85 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), ··· 87 85 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 88 86 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 89 87 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 90 - MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1), 91 - MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 92 - MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 93 - MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 1), 94 - MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 1), 95 - MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 96 - MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 97 - MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 1), 98 - MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 1), 99 - MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 100 - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 88 + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 89 + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 90 + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 91 + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 92 + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 93 + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 94 + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 95 + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 96 + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 97 + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), 98 + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), 101 99 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 102 - MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 1), 100 + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 103 101 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 104 102 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 105 103 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 106 - MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 1), 107 - MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 1), 108 - MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 1), 109 - MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 1), 110 - MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 1), 111 - MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 1), 112 - MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 1), 113 - MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1), 104 + MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 0), 105 + MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 0), 106 + MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 0), 107 + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 108 + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 109 + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), 110 + MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 0), 111 + MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 0), 114 112 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 115 - MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 116 - MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 117 - MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), 113 + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 114 + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 115 + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 118 116 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 119 - MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 1), 120 - MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 121 - MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 122 - MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 123 - MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 124 - MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 1), 125 - MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 1), 117 + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 118 + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 119 + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 120 + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 121 + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 122 + MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0), 123 + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 124 + MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 126 125 }; 127 126 128 127 static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = { ··· 445 442 *value = metrics->CurrClock[PPCLK_DCEFCLK]; 446 443 break; 447 444 case METRICS_AVERAGE_GFXCLK: 448 - *value = metrics->AverageGfxclkFrequency; 445 + if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) 446 + *value = metrics->AverageGfxclkFrequencyPostDs; 447 + else 448 + *value = metrics->AverageGfxclkFrequencyPreDs; 449 449 break; 450 450 case METRICS_AVERAGE_FCLK: 451 - *value = metrics->AverageFclkFrequency; 451 + *value = metrics->AverageFclkFrequencyPostDs; 452 452 break; 453 453 case METRICS_AVERAGE_UCLK: 454 - *value = metrics->AverageUclkFrequency; 454 + *value = metrics->AverageUclkFrequencyPostDs; 455 455 break; 456 456 case METRICS_AVERAGE_GFXACTIVITY: 457 457 *value = metrics->AverageGfxActivity; ··· 766 760 767 761 static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 768 762 { 769 - struct smu_power_context *smu_power = &smu->smu_power; 770 - struct smu_power_gate *power_gate = &smu_power->power_gate; 771 763 struct amdgpu_device *adev = smu->adev; 772 - 773 764 int ret = 0; 774 765 775 766 if (enable) { ··· 782 779 return ret; 783 780 } 784 781 } 785 - power_gate->vcn_gated = false; 786 782 } else { 787 783 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { 788 784 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); ··· 794 792 return ret; 795 793 } 796 794 } 797 - power_gate->vcn_gated = true; 798 795 } 799 796 800 797 return ret; ··· 801 800 802 801 static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 803 802 { 804 - struct smu_power_context *smu_power = &smu->smu_power; 805 - struct smu_power_gate *power_gate = &smu_power->power_gate; 806 803 int ret = 0; 807 804 808 805 if (enable) { ··· 809 810 if (ret) 810 811 return ret; 811 812 } 812 - power_gate->jpeg_gated = false; 813 813 } else { 814 814 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { 815 815 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 816 816 if (ret) 817 817 return ret; 818 818 } 819 - power_gate->jpeg_gated = true; 820 819 } 821 820 822 821 return ret; ··· 2621 2624 .functionality = sienna_cichlid_i2c_func, 2622 2625 }; 2623 2626 2624 - static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control) 2625 - { 2626 - struct amdgpu_device *adev = to_amdgpu_device(control); 2627 - 2628 - return control->dev.parent == &adev->pdev->dev; 2629 - } 2630 - 2631 2627 static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) 2632 2628 { 2633 2629 struct amdgpu_device *adev = to_amdgpu_device(control); 2634 2630 int res; 2635 - 2636 - /* smu_i2c_eeprom_init may be called twice in sriov */ 2637 - if (sienna_cichlid_i2c_adapter_is_added(control)) 2638 - return 0; 2639 2631 2640 2632 control->owner = THIS_MODULE; 2641 2633 control->class = I2C_CLASS_SPD; ··· 2641 2655 2642 2656 static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) 2643 2657 { 2644 - if (!sienna_cichlid_i2c_adapter_is_added(control)) 2645 - return; 2646 - 2647 2658 i2c_del_adapter(control); 2648 2659 } 2649 2660
+5 -5
drivers/gpu/drm/amd/powerplay/smu_cmn.c
··· 166 166 167 167 switch (type) { 168 168 case CMN2ASIC_MAPPING_MSG: 169 - if (index > SMU_MSG_MAX_COUNT || 169 + if (index >= SMU_MSG_MAX_COUNT || 170 170 !smu->message_map) 171 171 return -EINVAL; 172 172 ··· 181 181 return msg_mapping.map_to; 182 182 183 183 case CMN2ASIC_MAPPING_CLK: 184 - if (index > SMU_CLK_COUNT || 184 + if (index >= SMU_CLK_COUNT || 185 185 !smu->clock_map) 186 186 return -EINVAL; 187 187 ··· 192 192 return mapping.map_to; 193 193 194 194 case CMN2ASIC_MAPPING_FEATURE: 195 - if (index > SMU_FEATURE_COUNT || 195 + if (index >= SMU_FEATURE_COUNT || 196 196 !smu->feature_map) 197 197 return -EINVAL; 198 198 ··· 203 203 return mapping.map_to; 204 204 205 205 case CMN2ASIC_MAPPING_TABLE: 206 - if (index > SMU_TABLE_COUNT || 206 + if (index >= SMU_TABLE_COUNT || 207 207 !smu->table_map) 208 208 return -EINVAL; 209 209 ··· 214 214 return mapping.map_to; 215 215 216 216 case CMN2ASIC_MAPPING_PWR: 217 - if (index > SMU_POWER_SOURCE_COUNT || 217 + if (index >= SMU_POWER_SOURCE_COUNT || 218 218 !smu->pwr_src_map) 219 219 return -EINVAL; 220 220
-3
drivers/gpu/drm/amd/powerplay/smu_internal.h
··· 60 60 #define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask) 61 61 #define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu) 62 62 #define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu) 63 - #define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu) 64 63 #define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu) 65 64 #define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu) 66 65 #define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu) ··· 76 77 #define smu_get_dal_power_level(smu, clocks) smu_ppt_funcs(get_dal_power_level, 0, smu, clocks) 77 78 #define smu_get_perf_level(smu, designation, level) smu_ppt_funcs(get_perf_level, 0, smu, designation, level) 78 79 #define smu_get_current_shallow_sleep_clocks(smu, clocks) smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks) 79 - #define smu_dpm_set_vcn_enable(smu, enable) smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable) 80 - #define smu_dpm_set_jpeg_enable(smu, enable) smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable) 81 80 #define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges) 82 81 #define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw) 83 82 #define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
+1
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
··· 1029 1029 case CHIP_NAVI14: 1030 1030 case CHIP_NAVI12: 1031 1031 case CHIP_SIENNA_CICHLID: 1032 + case CHIP_NAVY_FLOUNDER: 1032 1033 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 1033 1034 return 0; 1034 1035 if (enable)
+4 -1
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
··· 2725 2725 2726 2726 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) 2727 2727 { 2728 - return ci_is_smc_ram_running(hwmgr); 2728 + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, 2729 + CGS_IND_REG__SMC, FEATURE_STATUS, 2730 + VOLTAGE_CONTROLLER_ON)) 2731 + ? true : false; 2729 2732 } 2730 2733 2731 2734 static int ci_smu_init(struct pp_hwmgr *hwmgr)
+4 -3
drivers/gpu/drm/drm_dp_mst_topology.c
··· 4308 4308 { 4309 4309 int ret; 4310 4310 4311 - port = drm_dp_mst_topology_get_port_validated(mgr, port); 4312 - if (!port) 4311 + if (slots < 0) 4313 4312 return false; 4314 4313 4315 - if (slots < 0) 4314 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 4315 + if (!port) 4316 4316 return false; 4317 4317 4318 4318 if (port->vcpi.vcpi > 0) { ··· 4328 4328 if (ret) { 4329 4329 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", 4330 4330 DIV_ROUND_UP(pbn, mgr->pbn_div), ret); 4331 + drm_dp_mst_topology_put_port(port); 4331 4332 goto out; 4332 4333 } 4333 4334 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
+1 -2
drivers/gpu/drm/drm_drv.c
··· 815 815 816 816 drm_managed_release(dev); 817 817 818 - if (dev->managed.final_kfree) 819 - kfree(dev->managed.final_kfree); 818 + kfree(dev->managed.final_kfree); 820 819 } 821 820 822 821 /**
+3
drivers/gpu/drm/drm_gem.c
··· 879 879 * @file_priv: drm file-private structure 880 880 * 881 881 * Open an object using the global name, returning a handle and the size. 882 + * 883 + * This handle (of course) holds a reference to the object, so the object 884 + * will not go away until the handle is deleted. 882 885 */ 883 886 int 884 887 drm_gem_open_ioctl(struct drm_device *dev, void *data,
+6
drivers/gpu/drm/drm_panel_orientation_quirks.c
··· 121 121 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), 122 122 }, 123 123 .driver_data = (void *)&lcd800x1280_rightside_up, 124 + }, { /* Asus T103HAF */ 125 + .matches = { 126 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 127 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), 128 + }, 129 + .driver_data = (void *)&lcd800x1280_rightside_up, 124 130 }, { /* GPD MicroPC (generic strings, also match on bios date) */ 125 131 .matches = { 126 132 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+1
drivers/gpu/drm/omapdrm/dss/dispc.c
··· 4915 4915 static const struct dev_pm_ops dispc_pm_ops = { 4916 4916 .runtime_suspend = dispc_runtime_suspend, 4917 4917 .runtime_resume = dispc_runtime_resume, 4918 + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 4918 4919 }; 4919 4920 4920 4921 struct platform_driver omap_dispchw_driver = {
+1
drivers/gpu/drm/omapdrm/dss/dsi.c
··· 5467 5467 static const struct dev_pm_ops dsi_pm_ops = { 5468 5468 .runtime_suspend = dsi_runtime_suspend, 5469 5469 .runtime_resume = dsi_runtime_resume, 5470 + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 5470 5471 }; 5471 5472 5472 5473 struct platform_driver omap_dsihw_driver = {
+1
drivers/gpu/drm/omapdrm/dss/dss.c
··· 1614 1614 static const struct dev_pm_ops dss_pm_ops = { 1615 1615 .runtime_suspend = dss_runtime_suspend, 1616 1616 .runtime_resume = dss_runtime_resume, 1617 + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 1617 1618 }; 1618 1619 1619 1620 struct platform_driver omap_dsshw_driver = {
+1
drivers/gpu/drm/omapdrm/dss/venc.c
··· 903 903 static const struct dev_pm_ops venc_pm_ops = { 904 904 .runtime_suspend = venc_runtime_suspend, 905 905 .runtime_resume = venc_runtime_resume, 906 + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 906 907 }; 907 908 908 909 static const struct of_device_id venc_of_match[] = {
+1 -1
drivers/gpu/drm/omapdrm/omap_connector.c
··· 89 89 struct drm_display_mode *mode) 90 90 { 91 91 struct omap_connector *omap_connector = to_omap_connector(connector); 92 - struct drm_display_mode new_mode = { { 0 } }; 92 + struct drm_display_mode new_mode = {}; 93 93 enum drm_mode_status status; 94 94 95 95 status = omap_connector_mode_fixup(omap_connector->output, mode,
+1 -1
drivers/gpu/drm/tidss/tidss_kms.c
··· 154 154 break; 155 155 case DISPC_VP_DPI: 156 156 enc_type = DRM_MODE_ENCODER_DPI; 157 - conn_type = DRM_MODE_CONNECTOR_LVDS; 157 + conn_type = DRM_MODE_CONNECTOR_DPI; 158 158 break; 159 159 default: 160 160 WARN_ON(1);
+28 -9
drivers/gpu/drm/ttm/ttm_bo.c
··· 287 287 */ 288 288 289 289 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 290 - bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 291 - 292 - ret = ttm_tt_create(bo, zero); 293 - if (ret) 294 - goto out_err; 290 + if (bo->ttm == NULL) { 291 + bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 292 + ret = ttm_tt_create(bo, zero); 293 + if (ret) 294 + goto out_err; 295 + } 295 296 296 297 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 297 298 if (ret) ··· 653 652 placement.num_busy_placement = 0; 654 653 bdev->driver->evict_flags(bo, &placement); 655 654 656 - if (!placement.num_placement && !placement.num_busy_placement) 657 - return ttm_bo_pipeline_gutting(bo); 655 + if (!placement.num_placement && !placement.num_busy_placement) { 656 + ret = ttm_bo_pipeline_gutting(bo); 657 + if (ret) 658 + return ret; 659 + 660 + return ttm_tt_create(bo, false); 661 + } 658 662 659 663 evict_mem = bo->mem; 660 664 evict_mem.mm_node = NULL; ··· 1198 1192 /* 1199 1193 * Remove the backing store if no placement is given. 1200 1194 */ 1201 - if (!placement->num_placement && !placement->num_busy_placement) 1202 - return ttm_bo_pipeline_gutting(bo); 1195 + if (!placement->num_placement && !placement->num_busy_placement) { 1196 + ret = ttm_bo_pipeline_gutting(bo); 1197 + if (ret) 1198 + return ret; 1199 + 1200 + return ttm_tt_create(bo, false); 1201 + } 1203 1202 1204 1203 /* 1205 1204 * Check whether we need to move buffer. ··· 1220 1209 */ 1221 1210 ttm_flag_masked(&bo->mem.placement, new_flags, 1222 1211 ~TTM_PL_MASK_MEMTYPE); 1212 + } 1213 + /* 1214 + * We might need to add a TTM. 1215 + */ 1216 + if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1217 + ret = ttm_tt_create(bo, true); 1218 + if (ret) 1219 + return ret; 1223 1220 } 1224 1221 return 0; 1225 1222 }
+2 -5
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 531 531 .interruptible = false, 532 532 .no_wait_gpu = false 533 533 }; 534 - struct ttm_tt *ttm; 534 + struct ttm_tt *ttm = bo->ttm; 535 535 pgprot_t prot; 536 536 int ret; 537 537 538 - ret = ttm_tt_create(bo, true); 539 - if (ret) 540 - return ret; 538 + BUG_ON(!ttm); 541 539 542 - ttm = bo->ttm; 543 540 ret = ttm_tt_populate(ttm, &ctx); 544 541 if (ret) 545 542 return ret;
+3 -6
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 351 351 352 352 }; 353 353 354 - if (ttm_tt_create(bo, true)) { 355 - ret = VM_FAULT_OOM; 356 - goto out_io_unlock; 357 - } 358 - 359 354 ttm = bo->ttm; 360 355 if (ttm_tt_populate(bo->ttm, &ctx)) { 361 356 ret = VM_FAULT_OOM; ··· 505 510 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, 506 511 void *buf, int len, int write) 507 512 { 508 - unsigned long offset = (addr) - vma->vm_start; 509 513 struct ttm_buffer_object *bo = vma->vm_private_data; 514 + unsigned long offset = (addr) - vma->vm_start + 515 + ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) 516 + << PAGE_SHIFT); 510 517 int ret; 511 518 512 519 if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+1 -3
drivers/gpu/drm/ttm/ttm_tt.c
··· 50 50 51 51 dma_resv_assert_held(bo->base.resv); 52 52 53 - if (bo->ttm) 54 - return 0; 55 - 56 53 if (bdev->need_dma32) 57 54 page_flags |= TTM_PAGE_FLAG_DMA32; 58 55 ··· 67 70 page_flags |= TTM_PAGE_FLAG_SG; 68 71 break; 69 72 default: 73 + bo->ttm = NULL; 70 74 pr_err("Illegal buffer object type\n"); 71 75 return -EINVAL; 72 76 }
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 3037 3037 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), 3038 3038 cmd->body.soid); 3039 3039 if (IS_ERR(res)) { 3040 - DRM_ERROR("Cound not find streamoutput to bind.\n"); 3040 + DRM_ERROR("Could not find streamoutput to bind.\n"); 3041 3041 return PTR_ERR(res); 3042 3042 } 3043 3043
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 186 186 /* TODO handle none page aligned offsets */ 187 187 /* TODO handle more dst & src != 0 */ 188 188 /* TODO handle more then one copy */ 189 - DRM_ERROR("Cant snoop dma request for cursor!\n"); 189 + DRM_ERROR("Can't snoop dma request for cursor!\n"); 190 190 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 191 191 box->srcx, box->srcy, box->srcz, 192 192 box->x, box->y, box->z, ··· 2575 2575 ++i; 2576 2576 } 2577 2577 2578 - if (i != unit) { 2578 + if (&con->head == &dev_priv->dev->mode_config.connector_list) { 2579 2579 DRM_ERROR("Could not find initial display unit.\n"); 2580 2580 ret = -EINVAL; 2581 2581 goto out_unlock; ··· 2599 2599 break; 2600 2600 } 2601 2601 2602 - if (mode->type & DRM_MODE_TYPE_PREFERRED) 2603 - *p_mode = mode; 2604 - else { 2602 + if (&mode->head == &con->modes) { 2605 2603 WARN_ONCE(true, "Could not find initial preferred mode.\n"); 2606 2604 *p_mode = list_first_entry(&con->modes, 2607 2605 struct drm_display_mode, 2608 2606 head); 2607 + } else { 2608 + *p_mode = mode; 2609 2609 } 2610 2610 2611 2611 out_unlock:
+4 -9
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
··· 81 81 struct vmw_legacy_display_unit *entry; 82 82 struct drm_framebuffer *fb = NULL; 83 83 struct drm_crtc *crtc = NULL; 84 - int i = 0; 84 + int i; 85 85 86 86 /* If there is no display topology the host just assumes 87 87 * that the guest will set the same layout as the host. ··· 92 92 crtc = &entry->base.crtc; 93 93 w = max(w, crtc->x + crtc->mode.hdisplay); 94 94 h = max(h, crtc->y + crtc->mode.vdisplay); 95 - i++; 96 95 } 97 96 98 97 if (crtc == NULL) 99 98 return 0; 100 - fb = entry->base.crtc.primary->state->fb; 99 + fb = crtc->primary->state->fb; 101 100 102 101 return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], 103 102 fb->format->cpp[0] * 8, ··· 387 388 ldu->base.is_implicit = true; 388 389 389 390 /* Initialize primary plane */ 390 - vmw_du_plane_reset(primary); 391 - 392 391 ret = drm_universal_plane_init(dev, &ldu->base.primary, 393 392 0, &vmw_ldu_plane_funcs, 394 393 vmw_primary_plane_formats, ··· 400 403 drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs); 401 404 402 405 /* Initialize cursor plane */ 403 - vmw_du_plane_reset(cursor); 404 - 405 406 ret = drm_universal_plane_init(dev, &ldu->base.cursor, 406 407 0, &vmw_ldu_cursor_funcs, 407 408 vmw_cursor_plane_formats, ··· 413 418 414 419 drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs); 415 420 416 - vmw_du_connector_reset(connector); 417 421 ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 418 422 DRM_MODE_CONNECTOR_VIRTUAL); 419 423 if (ret) { ··· 440 446 goto err_free_encoder; 441 447 } 442 448 443 - vmw_du_crtc_reset(crtc); 444 449 ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary, 445 450 &ldu->base.cursor, 446 451 &vmw_legacy_crtc_funcs, NULL); ··· 513 520 vmw_ldu_init(dev_priv, 0); 514 521 515 522 dev_priv->active_display_unit = vmw_du_legacy; 523 + 524 + drm_mode_config_reset(dev); 516 525 517 526 DRM_INFO("Legacy Display Unit initialized\n"); 518 527
+2 -7
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
··· 859 859 sou->base.is_implicit = false; 860 860 861 861 /* Initialize primary plane */ 862 - vmw_du_plane_reset(primary); 863 - 864 862 ret = drm_universal_plane_init(dev, &sou->base.primary, 865 863 0, &vmw_sou_plane_funcs, 866 864 vmw_primary_plane_formats, ··· 873 875 drm_plane_enable_fb_damage_clips(primary); 874 876 875 877 /* Initialize cursor plane */ 876 - vmw_du_plane_reset(cursor); 877 - 878 878 ret = drm_universal_plane_init(dev, &sou->base.cursor, 879 879 0, &vmw_sou_cursor_funcs, 880 880 vmw_cursor_plane_formats, ··· 886 890 887 891 drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs); 888 892 889 - vmw_du_connector_reset(connector); 890 893 ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs, 891 894 DRM_MODE_CONNECTOR_VIRTUAL); 892 895 if (ret) { ··· 913 918 goto err_free_encoder; 914 919 } 915 920 916 - 917 - vmw_du_crtc_reset(crtc); 918 921 ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary, 919 922 &sou->base.cursor, 920 923 &vmw_screen_object_crtc_funcs, NULL); ··· 965 972 vmw_sou_init(dev_priv, i); 966 973 967 974 dev_priv->active_display_unit = vmw_du_screen_object; 975 + 976 + drm_mode_config_reset(dev); 968 977 969 978 DRM_INFO("Screen Objects Display Unit initialized\n"); 970 979
+2 -7
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 1738 1738 stdu->base.is_implicit = false; 1739 1739 1740 1740 /* Initialize primary plane */ 1741 - vmw_du_plane_reset(primary); 1742 - 1743 1741 ret = drm_universal_plane_init(dev, primary, 1744 1742 0, &vmw_stdu_plane_funcs, 1745 1743 vmw_primary_plane_formats, ··· 1752 1754 drm_plane_enable_fb_damage_clips(primary); 1753 1755 1754 1756 /* Initialize cursor plane */ 1755 - vmw_du_plane_reset(cursor); 1756 - 1757 1757 ret = drm_universal_plane_init(dev, cursor, 1758 1758 0, &vmw_stdu_cursor_funcs, 1759 1759 vmw_cursor_plane_formats, ··· 1764 1768 } 1765 1769 1766 1770 drm_plane_helper_add(cursor, &vmw_stdu_cursor_plane_helper_funcs); 1767 - 1768 - vmw_du_connector_reset(connector); 1769 1771 1770 1772 ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs, 1771 1773 DRM_MODE_CONNECTOR_VIRTUAL); ··· 1792 1798 goto err_free_encoder; 1793 1799 } 1794 1800 1795 - vmw_du_crtc_reset(crtc); 1796 1801 ret = drm_crtc_init_with_planes(dev, crtc, &stdu->base.primary, 1797 1802 &stdu->base.cursor, 1798 1803 &vmw_stdu_crtc_funcs, NULL); ··· 1886 1893 return ret; 1887 1894 } 1888 1895 } 1896 + 1897 + drm_mode_config_reset(dev); 1889 1898 1890 1899 DRM_INFO("Screen Target Display device initialized\n"); 1891 1900
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 1969 1969 num_mip = 1; 1970 1970 1971 1971 num_subres = num_layers * num_mip; 1972 - dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]); 1972 + dirty_size = struct_size(dirty, boxes, num_subres); 1973 1973 acc_size = ttm_round_pot(dirty_size); 1974 1974 ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), 1975 1975 acc_size, &ctx);
+18 -15
drivers/gpu/drm/xlnx/zynqmp_dp.c
··· 44 44 */ 45 45 static uint zynqmp_dp_power_on_delay_ms = 4; 46 46 module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444); 47 - MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)"); 47 + MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); 48 48 49 49 /* Link configuration registers */ 50 50 #define ZYNQMP_DP_LINK_BW_SET 0x0 ··· 567 567 u8 current_bw) 568 568 { 569 569 int max_rate = dp->link_config.max_rate; 570 - u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; 570 + u8 bw_code; 571 571 u8 max_lanes = dp->link_config.max_lanes; 572 572 u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate); 573 573 u8 bpp = dp->config.bpp; 574 574 u8 lane_cnt; 575 - s8 i; 576 575 577 - if (current_bw == DP_LINK_BW_1_62) { 576 + /* Downshift from current bandwidth */ 577 + switch (current_bw) { 578 + case DP_LINK_BW_5_4: 579 + bw_code = DP_LINK_BW_2_7; 580 + break; 581 + case DP_LINK_BW_2_7: 582 + bw_code = DP_LINK_BW_1_62; 583 + break; 584 + case DP_LINK_BW_1_62: 578 585 dev_err(dp->dev, "can't downshift. already lowest link rate\n"); 579 586 return -EINVAL; 580 - } 581 - 582 - for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) { 583 - if (current_bw && bws[i] >= current_bw) 584 - continue; 585 - 586 - if (bws[i] <= max_link_rate_code) 587 - break; 587 + default: 588 + /* If not given, start with max supported */ 589 + bw_code = max_link_rate_code; 590 + break; 588 591 } 589 592 590 593 for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) { 591 594 int bw; 592 595 u32 rate; 593 596 594 - bw = drm_dp_bw_code_to_link_rate(bws[i]); 597 + bw = drm_dp_bw_code_to_link_rate(bw_code); 595 598 rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp); 596 599 if (pclock <= rate) { 597 - dp->mode.bw_code = bws[i]; 600 + dp->mode.bw_code = bw_code; 598 601 dp->mode.lane_cnt = lane_cnt; 599 602 dp->mode.pclock = pclock; 600 603 return dp->mode.bw_code; ··· 1311 1308 ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd, 1312 1309 sizeof(dp->dpcd)); 1313 1310 if (ret < 0) { 1314 - dev_dbg(dp->dev, "DPCD read failes"); 1311 + dev_dbg(dp->dev, "DPCD read failed"); 1315 1312 goto disconnected; 1316 1313 } 1317 1314
+1 -2
drivers/gpu/vga/vgaarb.c
··· 529 529 * 530 530 * 0 on success, negative error code on failure. 531 531 */ 532 - int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) 532 + static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) 533 533 { 534 534 struct vga_device *vgadev; 535 535 unsigned long flags; ··· 554 554 spin_unlock_irqrestore(&vga_lock, flags); 555 555 return rc; 556 556 } 557 - EXPORT_SYMBOL(vga_tryget); 558 557 559 558 /** 560 559 * vga_put - release lock on legacy VGA resources
+2 -6
drivers/video/fbdev/core/fbmem.c
··· 957 957 int 958 958 fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) 959 959 { 960 - int flags = info->flags; 961 960 int ret = 0; 962 961 u32 activate; 963 962 struct fb_var_screeninfo old_var; ··· 1051 1052 event.data = &mode; 1052 1053 fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event); 1053 1054 1054 - if (flags & FBINFO_MISC_USEREVENT) 1055 - fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL); 1056 - 1057 1055 return 0; 1058 1056 } 1059 1057 EXPORT_SYMBOL(fb_set_var); ··· 1101 1105 return -EFAULT; 1102 1106 console_lock(); 1103 1107 lock_fb_info(info); 1104 - info->flags |= FBINFO_MISC_USEREVENT; 1105 1108 ret = fb_set_var(info, &var); 1106 - info->flags &= ~FBINFO_MISC_USEREVENT; 1109 + if (!ret) 1110 + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); 1107 1111 unlock_fb_info(info); 1108 1112 console_unlock(); 1109 1113 if (!ret && copy_to_user(argp, &var, sizeof(var)))
+2 -2
drivers/video/fbdev/core/fbsysfs.c
··· 91 91 92 92 var->activate |= FB_ACTIVATE_FORCE; 93 93 console_lock(); 94 - fb_info->flags |= FBINFO_MISC_USEREVENT; 95 94 err = fb_set_var(fb_info, var); 96 - fb_info->flags &= ~FBINFO_MISC_USEREVENT; 95 + if (!err) 96 + fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL); 97 97 console_unlock(); 98 98 if (err) 99 99 return err;
+3 -2
drivers/video/fbdev/ps3fb.c
··· 29 29 #include <linux/freezer.h> 30 30 #include <linux/uaccess.h> 31 31 #include <linux/fb.h> 32 + #include <linux/fbcon.h> 32 33 #include <linux/init.h> 33 34 34 35 #include <asm/cell-regs.h> ··· 825 824 var = info->var; 826 825 fb_videomode_to_var(&var, vmode); 827 826 console_lock(); 828 - info->flags |= FBINFO_MISC_USEREVENT; 829 827 /* Force, in case only special bits changed */ 830 828 var.activate |= FB_ACTIVATE_FORCE; 831 829 par->new_mode_id = val; 832 830 retval = fb_set_var(info, &var); 833 - info->flags &= ~FBINFO_MISC_USEREVENT; 831 + if (!retval) 832 + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); 834 833 console_unlock(); 835 834 } 836 835 break;
-2
include/linux/fb.h
··· 400 400 #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ 401 401 #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ 402 402 403 - #define FBINFO_MISC_USEREVENT 0x10000 /* event request 404 - from userspace */ 405 403 #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ 406 404 407 405 /* A driver may set this flag to indicate that it does want a set_par to be
-6
include/linux/vgaarb.h
··· 110 110 } 111 111 112 112 #if defined(CONFIG_VGA_ARB) 113 - extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); 114 - #else 115 - static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } 116 - #endif 117 - 118 - #if defined(CONFIG_VGA_ARB) 119 113 extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); 120 114 #else 121 115 #define vga_put(pdev, rsrc)
+1 -1
include/uapi/drm/drm_fourcc.h
··· 1004 1004 #define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \ 1005 1005 fourcc_mod_code(AMLOGIC, \ 1006 1006 ((__layout) & __fourcc_mod_amlogic_layout_mask) | \ 1007 - ((__options) & __fourcc_mod_amlogic_options_mask \ 1007 + (((__options) & __fourcc_mod_amlogic_options_mask) \ 1008 1008 << __fourcc_mod_amlogic_options_shift)) 1009 1009 1010 1010 /* Amlogic FBC Layouts */