Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2025-12-13' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"This is the weekly fixes for what is in next tree, mostly amdgpu and
some i915, panthor and a core revert.

core:
- revert dumb bo 8 byte alignment

amdgpu:
- SI fix
- DC reduce stack usage
- HDMI fixes
- VCN 4.0.5 fix
- DP MST fix
- DC memory allocation fix

amdkfd:
- SVM fix
- Trap handler fix
- VGPR fixes for GC 11.5

i915:
- Fix format string truncation warning
- FIx runtime PM reference during fbdev BO creation

panthor:
- fix UAF

renesas:
- fix sync flag handling"

* tag 'drm-next-2025-12-13' of https://gitlab.freedesktop.org/drm/kernel:
Revert "drm/amd/display: Fix pbn to kbps Conversion"
drm/amd: Fix unbind/rebind for VCN 4.0.5
drm/i915: Fix format string truncation warning
drm/i915/fbdev: Hold runtime PM ref during fbdev BO creation
drm/amd/display: Improve HDMI info retrieval
drm/amdkfd: bump minimum vgpr size for gfx1151
drm/amd/display: shrink struct members
drm/amdkfd: Export the cwsr_size and ctl_stack_size to userspace
drm/amd/display: Refactor dml_core_mode_support to reduce stack frame
drm/amdgpu: don't attach the tlb fence for SI
drm/amd/display: Use GFP_ATOMIC in dc_create_plane_state()
drm/amdkfd: Trap handler support for expert scheduling mode
drm/amdkfd: Use huge page size to check split svm range alignment
drm/rcar-du: dsi: Handle both DRM_MODE_FLAG_N.SYNC and !DRM_MODE_FLAG_P.SYNC
drm/gem-shmem: revert the 8-byte alignment constraint
drm/gem-dma: revert the 8-byte alignment constraint
drm/panthor: Prevent potential UAF in group creation

+267 -152
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1069 1069 } 1070 1070 1071 1071 /* Prepare a TLB flush fence to be attached to PTs */ 1072 - if (!params->unlocked) { 1072 + if (!params->unlocked && 1073 + /* SI doesn't support pasid or KIQ/MES */ 1074 + params->adev->family > AMDGPU_FAMILY_SI) { 1073 1075 amdgpu_vm_tlb_fence_create(params->adev, vm, fence); 1074 1076 1075 1077 /* Makes sure no PD/PT is freed before the flush */
+2
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
··· 265 265 if (amdgpu_sriov_vf(adev)) 266 266 amdgpu_virt_free_mm_table(adev); 267 267 268 + amdgpu_vcn_sysfs_reset_mask_fini(adev); 269 + 268 270 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 269 271 r = amdgpu_vcn_suspend(adev, i); 270 272 if (r)
+36 -26
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
··· 3644 3644 }; 3645 3645 3646 3646 static const uint32_t cwsr_trap_gfx12_hex[] = { 3647 - 0xbfa00001, 0xbfa002a2, 3648 - 0xb0804009, 0xb8f8f804, 3647 + 0xbfa00001, 0xbfa002b2, 3648 + 0xb0804009, 0xb8eef81a, 3649 + 0xbf880000, 0xb980081a, 3650 + 0x00000000, 0xb8f8f804, 3651 + 0x9177ff77, 0x0c000000, 3652 + 0x846e9a6e, 0x8c776e77, 3649 3653 0x9178ff78, 0x00008c00, 3650 3654 0xb8fbf811, 0x8b6eff78, 3651 3655 0x00004000, 0xbfa10008, 3652 3656 0x8b6eff7b, 0x00000080, 3653 3657 0xbfa20018, 0x8b6ea07b, 3654 - 0xbfa20042, 0xbf830010, 3658 + 0xbfa2004a, 0xbf830010, 3655 3659 0xb8fbf811, 0xbfa0fffb, 3656 3660 0x8b6eff7b, 0x00000bd0, 3657 3661 0xbfa20010, 0xb8eef812, ··· 3666 3662 0xf0000000, 0xbfa20005, 3667 3663 0x8b6fff6f, 0x00000200, 3668 3664 0xbfa20002, 0x8b6ea07b, 3669 - 0xbfa2002c, 0xbefa4d82, 3665 + 0xbfa20034, 0xbefa4d82, 3670 3666 0xbf8a0000, 0x84fa887a, 3671 3667 0xbf0d8f7b, 0xbfa10002, 3672 3668 0x8c7bff7b, 0xffff0000, 3673 - 0xf4601bbd, 0xf8000010, 3674 - 0xbf8a0000, 0x846e976e, 3675 - 0x9177ff77, 0x00800000, 3676 - 0x8c776e77, 0xf4603bbd, 3677 - 0xf8000000, 0xbf8a0000, 3678 - 0xf4603ebd, 0xf8000008, 3679 - 0xbf8a0000, 0x8bee6e6e, 3680 - 0xbfa10001, 0xbe80486e, 3681 - 0x8b6eff6d, 0xf0000000, 3682 - 0xbfa20009, 0xb8eef811, 3683 - 0x8b6eff6e, 0x00000080, 3684 - 0xbfa20007, 0x8c78ff78, 3685 - 0x00004000, 0x80ec886c, 3686 - 0x82ed806d, 0xbfa00002, 3687 - 0x806c846c, 0x826d806d, 3688 - 0x8b6dff6d, 0x0000ffff, 3689 - 0x8bfe7e7e, 0x8bea6a6a, 3690 - 0x85788978, 0xb9783244, 3669 + 0x8b6eff77, 0x0c000000, 3670 + 0x916dff6d, 0x0c000000, 3671 + 0x8c6d6e6d, 0xf4601bbd, 3672 + 0xf8000010, 0xbf8a0000, 3673 + 0x846e976e, 0x9177ff77, 3674 + 0x00800000, 0x8c776e77, 3675 + 0xf4603bbd, 0xf8000000, 3676 + 0xbf8a0000, 0xf4603ebd, 3677 + 0xf8000008, 0xbf8a0000, 3678 + 0x8bee6e6e, 0xbfa10001, 3679 + 0xbe80486e, 0x8b6eff6d, 3680 + 0xf0000000, 0xbfa20009, 3681 + 0xb8eef811, 0x8b6eff6e, 3682 + 0x00000080, 0xbfa20007, 3683 + 0x8c78ff78, 0x00004000, 3684 + 0x80ec886c, 0x82ed806d, 3685 + 0xbfa00002, 0x806c846c, 3686 + 0x826d806d, 0x8b6dff6d, 3687 + 0x0000ffff, 0x8bfe7e7e, 3688 + 0x8bea6a6a, 0x85788978, 3689 + 0x936eff77, 0x0002001a, 3690 + 0xb96ef81a, 0xb9783244, 3691 3691 0xbe804a6c, 0xb8faf802, 3692 3692 0xbf0d987a, 0xbfa10001, 3693 3693 0xbfb00000, 0x8b6dff6d, ··· 3989 3981 0x008ce800, 0x00000000, 3990 3982 0x807d817d, 0x8070ff70, 3991 3983 0x00000080, 0xbf0a7b7d, 3992 - 0xbfa2fff7, 0xbfa0016e, 3984 + 0xbfa2fff7, 0xbfa00171, 3993 3985 0xbef4007e, 0x8b75ff7f, 3994 3986 0x0000ffff, 0x8c75ff75, 3995 3987 0x00040000, 0xbef60080, ··· 4171 4163 0xf8000074, 0xbf8a0000, 4172 4164 0x8b6dff6d, 0x0000ffff, 4173 4165 0x8bfe7e7e, 0x8bea6a6a, 4174 - 0xb97af804, 0xbe804ec2, 4175 - 0xbf94fffe, 0xbe804a6c, 4166 + 0x936eff77, 0x0002001a, 4167 + 0xb96ef81a, 0xb97af804, 4176 4168 0xbe804ec2, 0xbf94fffe, 4177 - 0xbfb10000, 0xbf9f0000, 4169 + 0xbe804a6c, 0xbe804ec2, 4170 + 0xbf94fffe, 0xbfb10000, 4178 4171 0xbf9f0000, 0xbf9f0000, 4179 4172 0xbf9f0000, 0xbf9f0000, 4173 + 0xbf9f0000, 0x00000000, 4180 4174 }; 4181 4175 4182 4176 static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
+37
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
··· 78 78 var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT 79 79 var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT 80 80 var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT 81 + 82 + var SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT = 0 83 + var SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE = 2 84 + 81 85 var BARRIER_STATE_SIGNAL_OFFSET = 16 82 86 var BARRIER_STATE_VALID_OFFSET = 0 83 87 88 + var TTMP11_SCHED_MODE_SHIFT = 26 89 + var TTMP11_SCHED_MODE_SIZE = 2 90 + var TTMP11_SCHED_MODE_MASK = 0xC000000 84 91 var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23 85 92 var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000 86 93 ··· 167 160 s_branch L_RESTORE 168 161 169 162 L_SKIP_RESTORE: 163 + // Assume most relaxed scheduling mode is set. Save and revert to normal mode. 164 + s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_SCHED_MODE) 165 + s_wait_alu 0 166 + s_setreg_imm32_b32 hwreg(HW_REG_WAVE_SCHED_MODE, \ 167 + SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT, SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE), 0 168 + 170 169 s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC 170 + 171 + // Save SCHED_MODE[1:0] into ttmp11[27:26]. 172 + s_andn2_b32 ttmp11, ttmp11, TTMP11_SCHED_MODE_MASK 173 + s_lshl_b32 ttmp2, ttmp2, TTMP11_SCHED_MODE_SHIFT 174 + s_or_b32 ttmp11, ttmp11, ttmp2 171 175 172 176 // Clear SPI_PRIO: do not save with elevated priority. 173 177 // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd. ··· 256 238 s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA 257 239 s_or_b32 ttmp15, ttmp15, 0xFFFF0000 258 240 L_NO_SIGN_EXTEND_TMA: 241 + #if ASIC_FAMILY == CHIP_GFX12 242 + // Move SCHED_MODE[1:0] from ttmp11 to unused bits in ttmp1[27:26] (return PC_HI). 243 + // The second-level trap will restore from ttmp1 for backwards compatibility. 244 + s_and_b32 ttmp2, ttmp11, TTMP11_SCHED_MODE_MASK 245 + s_andn2_b32 ttmp1, ttmp1, TTMP11_SCHED_MODE_MASK 246 + s_or_b32 ttmp1, ttmp1, ttmp2 247 + #endif 259 248 260 249 s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag 261 250 s_wait_idle ··· 312 287 // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it. 313 288 // Only restore fields which the trap handler changes. 314 289 s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT 290 + 291 + // Assume relaxed scheduling mode after this point. 292 + restore_sched_mode(ttmp2) 293 + 315 294 s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \ 316 295 SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv 317 296 ··· 1072 1043 s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 1073 1044 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 1074 1045 1046 + // Assume relaxed scheduling mode after this point. 1047 + restore_sched_mode(s_restore_tmp) 1048 + 1075 1049 s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu 1076 1050 1077 1051 // Make barrier and LDS state visible to all waves in the group. ··· 1165 1133 ds_nop 1166 1134 end 1167 1135 #endif 1136 + end 1137 + 1138 + function restore_sched_mode(s_tmp) 1139 + s_bfe_u32 s_tmp, ttmp11, (TTMP11_SCHED_MODE_SHIFT | (TTMP11_SCHED_MODE_SIZE << 0x10)) 1140 + s_setreg_b32 hwreg(HW_REG_WAVE_SCHED_MODE), s_tmp 1168 1141 end
+1
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
··· 409 409 vgpr_size = 0x80000; 410 410 else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */ 411 411 gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */ 412 + gfxv == 110501 || /* GFX_VERSION_GFX1151 */ 412 413 gfxv == 120000 || /* GFX_VERSION_GFX1200 */ 413 414 gfxv == 120001) /* GFX_VERSION_GFX1201 */ 414 415 vgpr_size = 0x60000;
+32 -14
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1144 1144 svm_range_split_tail(struct svm_range *prange, uint64_t new_last, 1145 1145 struct list_head *insert_list, struct list_head *remap_list) 1146 1146 { 1147 + unsigned long last_align_down = ALIGN_DOWN(prange->last, 512); 1148 + unsigned long start_align = ALIGN(prange->start, 512); 1149 + bool huge_page_mapping = last_align_down > start_align; 1147 1150 struct svm_range *tail = NULL; 1148 - int r = svm_range_split(prange, prange->start, new_last, &tail); 1151 + int r; 1149 1152 1150 - if (!r) { 1151 - list_add(&tail->list, insert_list); 1152 - if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity)) 1153 - list_add(&tail->update_list, remap_list); 1154 - } 1155 - return r; 1153 + r = svm_range_split(prange, prange->start, new_last, &tail); 1154 + 1155 + if (r) 1156 + return r; 1157 + 1158 + list_add(&tail->list, insert_list); 1159 + 1160 + if (huge_page_mapping && tail->start > start_align && 1161 + tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512))) 1162 + list_add(&tail->update_list, remap_list); 1163 + 1164 + return 0; 1156 1165 } 1157 1166 1158 1167 static int 1159 1168 svm_range_split_head(struct svm_range *prange, uint64_t new_start, 1160 1169 struct list_head *insert_list, struct list_head *remap_list) 1161 1170 { 1171 + unsigned long last_align_down = ALIGN_DOWN(prange->last, 512); 1172 + unsigned long start_align = ALIGN(prange->start, 512); 1173 + bool huge_page_mapping = last_align_down > start_align; 1162 1174 struct svm_range *head = NULL; 1163 - int r = svm_range_split(prange, new_start, prange->last, &head); 1175 + int r; 1164 1176 1165 - if (!r) { 1166 - list_add(&head->list, insert_list); 1167 - if (!IS_ALIGNED(new_start, 1UL << prange->granularity)) 1168 - list_add(&head->update_list, remap_list); 1169 - } 1170 - return r; 1177 + r = svm_range_split(prange, new_start, prange->last, &head); 1178 + 1179 + if (r) 1180 + return r; 1181 + 1182 + list_add(&head->list, insert_list); 1183 + 1184 + if (huge_page_mapping && head->last + 1 > start_align && 1185 + head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512))) 1186 + list_add(&head->update_list, remap_list); 1187 + 1188 + return 0; 1171 1189 } 1172 1190 1173 1191 static void
+4
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 491 491 dev->node_props.num_sdma_queues_per_engine); 492 492 sysfs_show_32bit_prop(buffer, offs, "num_cp_queues", 493 493 dev->node_props.num_cp_queues); 494 + sysfs_show_32bit_prop(buffer, offs, "cwsr_size", 495 + dev->node_props.cwsr_size); 496 + sysfs_show_32bit_prop(buffer, offs, "ctl_stack_size", 497 + dev->node_props.ctl_stack_size); 494 498 495 499 if (dev->gpu) { 496 500 log_max_watch_addr =
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 1063 1063 void amdgpu_dm_update_connector_after_detect( 1064 1064 struct amdgpu_dm_connector *aconnector); 1065 1065 1066 + void populate_hdmi_info_from_connector(struct drm_hdmi_info *info, 1067 + struct dc_edid_caps *edid_caps); 1068 + 1066 1069 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 1067 1070 1068 1071 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
+8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 139 139 140 140 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 141 141 142 + if (edid_caps->edid_hdmi) 143 + populate_hdmi_info_from_connector(&connector->display_info.hdmi, edid_caps); 144 + 142 145 apply_edid_quirks(dev, edid_buf, edid_caps); 143 146 144 147 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); ··· 991 988 return NULL; 992 989 993 990 return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); 991 + } 992 + 993 + void populate_hdmi_info_from_connector(struct drm_hdmi_info *hdmi, struct dc_edid_caps *edid_caps) 994 + { 995 + edid_caps->scdc_present = hdmi->scdc.supported; 994 996 } 995 997 996 998 enum dc_edid_status dm_helpers_read_local_edid(
+36 -23
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 884 884 }; 885 885 886 886 #if defined(CONFIG_DRM_AMD_DC_FP) 887 - static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn) 887 + static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) 888 888 { 889 - uint64_t effective_kbps = (uint64_t)kbps; 889 + u8 link_coding_cap; 890 + uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; 890 891 891 - if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps 892 - effective_kbps *= 1006; 893 - effective_kbps = div_u64(effective_kbps, 1000); 894 - } 892 + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); 893 + if (link_coding_cap == DP_128b_132b_ENCODING) 894 + fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; 895 895 896 - return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000)); 896 + return fec_overhead_multiplier_x1000; 897 897 } 898 898 899 - static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin) 899 + static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) 900 900 { 901 - uint64_t pbn_effective = (uint64_t)pbn; 901 + u64 peak_kbps = kbps; 902 902 903 - if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn 904 - pbn_effective *= (1000000 / PEAK_FACTOR_X1000); 905 - else 906 - pbn_effective *= 1000; 907 - 908 - return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64); 903 + peak_kbps *= 1006; 904 + peak_kbps *= fec_overhead_multiplier_x1000; 905 + peak_kbps = div_u64(peak_kbps, 1000 * 1000); 906 + return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); 909 907 } 910 908 911 909 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, ··· 974 976 dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); 975 977 dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; 976 978 977 - kbps = pbn_to_kbps(pbn, false); 979 + kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); 978 980 dc_dsc_compute_config( 979 981 param.sink->ctx->dc->res_pool->dscs[0], 980 982 &param.sink->dsc_caps.dsc_dec_caps, ··· 1003 1005 int link_timeslots_used; 1004 1006 int fair_pbn_alloc; 1005 1007 int ret = 0; 1008 + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); 1006 1009 1007 1010 for (i = 0; i < count; i++) { 1008 1011 if (vars[i + k].dsc_enabled) { 1009 1012 initial_slack[i] = 1010 - kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn; 1013 + kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; 1011 1014 bpp_increased[i] = false; 1012 1015 remaining_to_increase += 1; 1013 1016 } else { ··· 1104 1105 int next_index; 1105 1106 int remaining_to_try = 0; 1106 1107 int ret; 1108 + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); 1107 1109 int var_pbn; 1108 1110 1109 1111 for (i = 0; i < count; i++) { ··· 1137 1137 1138 1138 DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); 1139 1139 var_pbn = vars[next_index].pbn; 1140 - vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true); 1140 + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); 1141 1141 ret = drm_dp_atomic_find_time_slots(state, 1142 1142 params[next_index].port->mgr, 1143 1143 params[next_index].port, ··· 1197 1197 int count = 0; 1198 1198 int i, k, ret; 1199 1199 bool debugfs_overwrite = false; 1200 + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); 1200 1201 struct drm_connector_state *new_conn_state; 1201 1202 1202 1203 memset(params, 0, sizeof(params)); ··· 1278 1277 DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); 1279 1278 for (i = 0; i < count; i++) { 1280 1279 vars[i + k].aconnector = params[i].aconnector; 1281 - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); 1280 + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); 1282 1281 vars[i + k].dsc_enabled = false; 1283 1282 vars[i + k].bpp_x16 = 0; 1284 1283 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, ··· 1300 1299 DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); 1301 1300 for (i = 0; i < count; i++) { 1302 1301 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { 1303 - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false); 1302 + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); 1304 1303 vars[i + k].dsc_enabled = true; 1305 1304 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; 1306 1305 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, ··· 1308 1307 if (ret < 0) 1309 1308 return ret; 1310 1309 } else { 1311 - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); 1310 + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); 1312 1311 vars[i + k].dsc_enabled = false; 1313 1312 vars[i + k].bpp_x16 = 0; 1314 1313 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, ··· 1763 1762 return ret; 1764 1763 } 1765 1764 1765 + static uint32_t kbps_from_pbn(unsigned int pbn) 1766 + { 1767 + uint64_t kbps = (uint64_t)pbn; 1768 + 1769 + kbps *= (1000000 / PEAK_FACTOR_X1000); 1770 + kbps *= 8; 1771 + kbps *= 54; 1772 + kbps /= 64; 1773 + 1774 + return (uint32_t)kbps; 1775 + } 1776 + 1766 1777 static bool is_dsc_common_config_possible(struct dc_stream_state *stream, 1767 1778 struct dc_dsc_bw_range *bw_range) 1768 1779 { ··· 1873 1860 dc_link_get_highest_encoding_format(stream->link)); 1874 1861 cur_link_settings = stream->link->verified_link_cap; 1875 1862 root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); 1876 - virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true); 1863 + virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); 1877 1864 1878 1865 /* pick the end to end bw bottleneck */ 1879 1866 end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); ··· 1926 1913 immediate_upstream_port = aconnector->mst_output_port->parent->port_parent; 1927 1914 1928 1915 if (immediate_upstream_port) { 1929 - virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true); 1916 + virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); 1930 1917 virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); 1931 1918 } else { 1932 1919 /* For topology LCT 1 case - only one mstb*/
+1 -1
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
··· 86 86 struct dc_plane_state *dc_create_plane_state(const struct dc *dc) 87 87 { 88 88 struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), 89 - GFP_KERNEL); 89 + GFP_ATOMIC); 90 90 91 91 if (NULL == plane_state) 92 92 return NULL;
+71 -63
drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
··· 6711 6711 } // for j 6712 6712 } 6713 6713 6714 + static noinline_for_stack void set_vm_row_and_swath_parameters(struct display_mode_lib_st *mode_lib) 6715 + { 6716 + struct CalculateVMRowAndSwath_params_st *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params; 6717 + struct dml_core_mode_support_locals_st *s = &mode_lib->scratch.dml_core_mode_support_locals; 6718 + 6719 + CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; 6720 + CalculateVMRowAndSwath_params->myPipe = s->SurfParameters; 6721 + CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->ms.SurfaceSizeInMALL; 6722 + CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_luma; 6723 + CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_chroma; 6724 + CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ms.ip.dcc_meta_buffer_size_bytes; 6725 + CalculateVMRowAndSwath_params->UseMALLForStaticScreen = mode_lib->ms.cache_display_cfg.plane.UseMALLForStaticScreen; 6726 + CalculateVMRowAndSwath_params->UseMALLForPStateChange = mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange; 6727 + CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->ms.soc.mall_allocated_for_dcn_mbytes; 6728 + CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthYThisState; 6729 + CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthCThisState; 6730 + CalculateVMRowAndSwath_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable; 6731 + CalculateVMRowAndSwath_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; 6732 + CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; 6733 + CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; 6734 + CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; 6735 + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; 6736 + CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; 6737 + CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; 6738 + CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState; 6739 + CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = mode_lib->ms.DCCMetaBufferSizeNotExceededPerState; 6740 + CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = s->dummy_integer_array[0]; 6741 + CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = s->dummy_integer_array[1]; 6742 + CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->ms.dpte_row_height; 6743 + CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->ms.dpte_row_height_chroma; 6744 + CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = s->dummy_integer_array[2]; // VBA_DELTA 6745 + CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = s->dummy_integer_array[3]; // VBA_DELTA 6746 + CalculateVMRowAndSwath_params->meta_req_width = s->dummy_integer_array[4]; 6747 + CalculateVMRowAndSwath_params->meta_req_width_chroma = s->dummy_integer_array[5]; 6748 + CalculateVMRowAndSwath_params->meta_req_height = s->dummy_integer_array[6]; 6749 + CalculateVMRowAndSwath_params->meta_req_height_chroma = s->dummy_integer_array[7]; 6750 + CalculateVMRowAndSwath_params->meta_row_width = s->dummy_integer_array[8]; 6751 + CalculateVMRowAndSwath_params->meta_row_width_chroma = s->dummy_integer_array[9]; 6752 + CalculateVMRowAndSwath_params->meta_row_height = mode_lib->ms.meta_row_height; 6753 + CalculateVMRowAndSwath_params->meta_row_height_chroma = mode_lib->ms.meta_row_height_chroma; 6754 + CalculateVMRowAndSwath_params->vm_group_bytes = s->dummy_integer_array[10]; 6755 + CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes; 6756 + CalculateVMRowAndSwath_params->PixelPTEReqWidthY = s->dummy_integer_array[11]; 6757 + CalculateVMRowAndSwath_params->PixelPTEReqHeightY = s->dummy_integer_array[12]; 6758 + CalculateVMRowAndSwath_params->PTERequestSizeY = s->dummy_integer_array[13]; 6759 + CalculateVMRowAndSwath_params->PixelPTEReqWidthC = s->dummy_integer_array[14]; 6760 + CalculateVMRowAndSwath_params->PixelPTEReqHeightC = s->dummy_integer_array[15]; 6761 + CalculateVMRowAndSwath_params->PTERequestSizeC = s->dummy_integer_array[16]; 6762 + CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = s->dummy_integer_array[17]; 6763 + CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = s->dummy_integer_array[18]; 6764 + CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = s->dummy_integer_array[19]; 6765 + CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = s->dummy_integer_array[20]; 6766 + CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesYThisState; 6767 + CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesCThisState; 6768 + CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->ms.PrefillY; 6769 + CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->ms.PrefillC; 6770 + CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->ms.MaxNumSwY; 6771 + CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->ms.MaxNumSwC; 6772 + CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->ms.meta_row_bandwidth_this_state; 6773 + CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->ms.dpte_row_bandwidth_this_state; 6774 + CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRowThisState; 6775 + CalculateVMRowAndSwath_params->PDEAndMetaPTEBytesFrame = mode_lib->ms.PDEAndMetaPTEBytesPerFrameThisState; 6776 + CalculateVMRowAndSwath_params->MetaRowByte = mode_lib->ms.MetaRowBytesThisState; 6777 + CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->ms.use_one_row_for_frame_this_state; 6778 + CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->ms.use_one_row_for_frame_flip_this_state; 6779 + CalculateVMRowAndSwath_params->UsesMALLForStaticScreen = s->dummy_boolean_array[0]; 6780 + CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = s->dummy_boolean_array[1]; 6781 + CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = s->dummy_integer_array[21]; 6782 + } 6783 + 6714 6784 /// @brief The Mode Support function. 6715 6785 dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib) 6716 6786 { ··· 7753 7683 s->SurfParameters[k].SwathHeightC = mode_lib->ms.SwathHeightCThisState[k]; 7754 7684 } 7755 7685 7756 - CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; 7757 - CalculateVMRowAndSwath_params->myPipe = s->SurfParameters; 7758 - CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->ms.SurfaceSizeInMALL; 7759 - CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_luma; 7760 - CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_chroma; 7761 - CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ms.ip.dcc_meta_buffer_size_bytes; 7762 - CalculateVMRowAndSwath_params->UseMALLForStaticScreen = mode_lib->ms.cache_display_cfg.plane.UseMALLForStaticScreen; 7763 - CalculateVMRowAndSwath_params->UseMALLForPStateChange = mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange; 7764 - CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->ms.soc.mall_allocated_for_dcn_mbytes; 7765 - CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthYThisState; 7766 - CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthCThisState; 7767 - CalculateVMRowAndSwath_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable; 7768 - CalculateVMRowAndSwath_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; 7769 - CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; 7770 - CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; 7771 - CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; 7772 - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; 7773 - CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; 7774 - CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; 7775 - CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState; 7776 - CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = mode_lib->ms.DCCMetaBufferSizeNotExceededPerState; 7777 - CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = s->dummy_integer_array[0]; 7778 - CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = s->dummy_integer_array[1]; 7779 - CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->ms.dpte_row_height; 7780 - CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->ms.dpte_row_height_chroma; 7781 - CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = s->dummy_integer_array[2]; // VBA_DELTA 7782 - CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = s->dummy_integer_array[3]; // VBA_DELTA 7783 - CalculateVMRowAndSwath_params->meta_req_width = s->dummy_integer_array[4]; 7784 - CalculateVMRowAndSwath_params->meta_req_width_chroma = s->dummy_integer_array[5]; 7785 - CalculateVMRowAndSwath_params->meta_req_height = s->dummy_integer_array[6]; 7786 - CalculateVMRowAndSwath_params->meta_req_height_chroma = s->dummy_integer_array[7]; 7787 - CalculateVMRowAndSwath_params->meta_row_width = s->dummy_integer_array[8]; 7788 - CalculateVMRowAndSwath_params->meta_row_width_chroma = s->dummy_integer_array[9]; 7789 - CalculateVMRowAndSwath_params->meta_row_height = mode_lib->ms.meta_row_height; 7790 - CalculateVMRowAndSwath_params->meta_row_height_chroma = mode_lib->ms.meta_row_height_chroma; 7791 - CalculateVMRowAndSwath_params->vm_group_bytes = s->dummy_integer_array[10]; 7792 - CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes; 7793 - CalculateVMRowAndSwath_params->PixelPTEReqWidthY = s->dummy_integer_array[11]; 7794 - CalculateVMRowAndSwath_params->PixelPTEReqHeightY = s->dummy_integer_array[12]; 7795 - CalculateVMRowAndSwath_params->PTERequestSizeY = s->dummy_integer_array[13]; 7796 - CalculateVMRowAndSwath_params->PixelPTEReqWidthC = s->dummy_integer_array[14]; 7797 - CalculateVMRowAndSwath_params->PixelPTEReqHeightC = s->dummy_integer_array[15]; 7798 - CalculateVMRowAndSwath_params->PTERequestSizeC = s->dummy_integer_array[16]; 7799 - CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = s->dummy_integer_array[17]; 7800 - CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = s->dummy_integer_array[18]; 7801 - CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = s->dummy_integer_array[19]; 7802 - CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = s->dummy_integer_array[20]; 7803 - CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesYThisState; 7804 - CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesCThisState; 7805 - CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->ms.PrefillY; 7806 - CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->ms.PrefillC; 7807 - CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->ms.MaxNumSwY; 7808 - CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->ms.MaxNumSwC; 7809 - CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->ms.meta_row_bandwidth_this_state; 7810 - CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->ms.dpte_row_bandwidth_this_state; 7811 - CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRowThisState; 7812 - CalculateVMRowAndSwath_params->PDEAndMetaPTEBytesFrame = mode_lib->ms.PDEAndMetaPTEBytesPerFrameThisState; 7813 - CalculateVMRowAndSwath_params->MetaRowByte = mode_lib->ms.MetaRowBytesThisState; 7814 - CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->ms.use_one_row_for_frame_this_state; 7815 - CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->ms.use_one_row_for_frame_flip_this_state; 7816 - CalculateVMRowAndSwath_params->UsesMALLForStaticScreen = s->dummy_boolean_array[0]; 7817 - CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = s->dummy_boolean_array[1]; 7818 - CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = s->dummy_integer_array[21]; 7686 + set_vm_row_and_swath_parameters(mode_lib); 7819 7687 7820 7688 CalculateVMRowAndSwath(&mode_lib->scratch, 7821 7689 CalculateVMRowAndSwath_params);
-3
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 1484 1484 state->clk_mgr); 1485 1485 } 1486 1486 1487 - audio_output->pll_info.feed_back_divider = 1488 - pipe_ctx->pll_settings.feedback_divider; 1489 - 1490 1487 audio_output->pll_info.dto_source = 1491 1488 translate_to_dto_source( 1492 1489 pipe_ctx->stream_res.tg->inst + 1);
+6 -8
drivers/gpu/drm/amd/display/include/audio_types.h
··· 47 47 uint32_t h_total; 48 48 uint32_t h_active; 49 49 uint32_t v_active; 50 - uint32_t pixel_repetition; 51 50 uint32_t requested_pixel_clock_100Hz; /* in 100Hz */ 52 51 uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */ 53 - uint32_t refresh_rate; 54 - enum dc_color_depth color_depth; 55 - enum dc_pixel_encoding pixel_encoding; 56 - bool interlaced; 57 52 uint32_t dsc_bits_per_pixel; 58 53 uint32_t dsc_num_slices; 54 + enum dc_color_depth color_depth; 55 + enum dc_pixel_encoding pixel_encoding; 56 + uint16_t refresh_rate; 57 + uint8_t pixel_repetition; 58 + bool interlaced; 59 59 }; 60 60 struct azalia_clock_info { 61 61 uint32_t pixel_clock_in_10khz; ··· 78 78 79 79 struct audio_pll_info { 80 80 uint32_t audio_dto_source_clock_in_khz; 81 - uint32_t feed_back_divider; 81 + uint32_t ss_percentage; 82 82 enum audio_dto_source dto_source; 83 83 bool ss_enabled; 84 - uint32_t ss_percentage; 85 - uint32_t ss_percentage_divider; 86 84 }; 87 85 88 86 struct audio_channel_associate_info {
+1 -1
drivers/gpu/drm/drm_gem_dma_helper.c
··· 308 308 struct drm_gem_dma_object *dma_obj; 309 309 int ret; 310 310 311 - ret = drm_mode_size_dumb(drm, args, SZ_8, 0); 311 + ret = drm_mode_size_dumb(drm, args, 0, 0); 312 312 if (ret) 313 313 return ret; 314 314
+1 -1
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 559 559 { 560 560 int ret; 561 561 562 - ret = drm_mode_size_dumb(dev, args, SZ_8, 0); 562 + ret = drm_mode_size_dumb(dev, args, 0, 0); 563 563 if (ret) 564 564 return ret; 565 565
+7 -4
drivers/gpu/drm/i915/display/intel_fbdev.c
··· 288 288 drm_framebuffer_put(&fb->base); 289 289 fb = NULL; 290 290 } 291 + 292 + wakeref = intel_display_rpm_get(display); 293 + 291 294 if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) { 292 295 drm_dbg_kms(display->drm, 293 296 "no BIOS fb, allocating a new one\n"); 294 297 295 298 fb = __intel_fbdev_fb_alloc(display, sizes); 296 - if (IS_ERR(fb)) 297 - return PTR_ERR(fb); 299 + if (IS_ERR(fb)) { 300 + ret = PTR_ERR(fb); 301 + goto out_unlock; 302 + } 298 303 } else { 299 304 drm_dbg_kms(display->drm, "re-using BIOS fb\n"); 300 305 prealloc = true; 301 306 sizes->fb_width = fb->base.width; 302 307 sizes->fb_height = fb->base.height; 303 308 } 304 - 305 - wakeref = intel_display_rpm_get(display); 306 309 307 310 /* Pin the GGTT vma for our access via info->screen_base. 308 311 * This also validates that any existing fb inherited from the
+1 -1
drivers/gpu/drm/i915/intel_memory_region.h
··· 72 72 u16 instance; 73 73 enum intel_region_id id; 74 74 char name[16]; 75 - char uabi_name[16]; 75 + char uabi_name[20]; 76 76 bool private; /* not for userspace */ 77 77 78 78 struct {
+15 -4
drivers/gpu/drm/panthor/panthor_sched.c
··· 779 779 */ 780 780 #define MAX_GROUPS_PER_POOL 128 781 781 782 + /* 783 + * Mark added on an entry of group pool Xarray to identify if the group has 784 + * been fully initialized and can be accessed elsewhere in the driver code. 785 + */ 786 + #define GROUP_REGISTERED XA_MARK_1 787 + 782 788 /** 783 789 * struct panthor_group_pool - Group pool 784 790 * ··· 3013 3007 return; 3014 3008 3015 3009 xa_lock(&gpool->xa); 3016 - xa_for_each(&gpool->xa, i, group) { 3010 + xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) { 3017 3011 guard(spinlock)(&group->fdinfo.lock); 3018 3012 pfile->stats.cycles += group->fdinfo.data.cycles; 3019 3013 pfile->stats.time += group->fdinfo.data.time; ··· 3733 3727 3734 3728 group_init_task_info(group); 3735 3729 3730 + xa_set_mark(&gpool->xa, gid, GROUP_REGISTERED); 3731 + 3736 3732 return gid; 3737 3733 3738 3734 err_erase_gid: ··· 3751 3743 struct panthor_device *ptdev = pfile->ptdev; 3752 3744 struct panthor_scheduler *sched = ptdev->scheduler; 3753 3745 struct panthor_group *group; 3746 + 3747 + if (!xa_get_mark(&gpool->xa, group_handle, GROUP_REGISTERED)) 3748 + return -EINVAL; 3754 3749 3755 3750 group = xa_erase(&gpool->xa, group_handle); 3756 3751 if (!group) ··· 3780 3769 } 3781 3770 3782 3771 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, 3783 - u32 group_handle) 3772 + unsigned long group_handle) 3784 3773 { 3785 3774 struct panthor_group *group; 3786 3775 3787 3776 xa_lock(&pool->xa); 3788 - group = group_get(xa_load(&pool->xa, group_handle)); 3777 + group = group_get(xa_find(&pool->xa, &group_handle, group_handle, GROUP_REGISTERED)); 3789 3778 xa_unlock(&pool->xa); 3790 3779 3791 3780 return group; ··· 3872 3861 return; 3873 3862 3874 3863 xa_lock(&gpool->xa); 3875 - xa_for_each(&gpool->xa, i, group) { 3864 + xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) { 3876 3865 stats->resident += group->fdinfo.kbo_sizes; 3877 3866 if (group->csg_id >= 0) 3878 3867 stats->active += group->fdinfo.kbo_sizes;
+2 -2
drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
··· 492 492 493 493 /* Configuration for Video Parameters, input is always RGB888 */ 494 494 vprmset0r = TXVMVPRMSET0R_BPP_24; 495 - if (mode->flags & DRM_MODE_FLAG_NVSYNC) 495 + if (!(mode->flags & DRM_MODE_FLAG_PVSYNC)) 496 496 vprmset0r |= TXVMVPRMSET0R_VSPOL_LOW; 497 - if (mode->flags & DRM_MODE_FLAG_NHSYNC) 497 + if (!(mode->flags & DRM_MODE_FLAG_PHSYNC)) 498 498 vprmset0r |= TXVMVPRMSET0R_HSPOL_LOW; 499 499 500 500 vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay)