Merge tag 'drm-fixes-2024-05-11' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"This should be the last set of fixes for 6.9, i915, xe and amdgpu are
the bulk here, one of the previous nouveau fixes turned up an issue,
so reverting it, otherwise one core and a couple of meson fixes.

core:
- fix connector debugging output

i915:
- Automate CCS Mode setting during engine resets
- Fix audio time stamp programming for DP
- Fix parsing backlight BDB data

xe:
- Fix use zero-length element array
- Move more from system wq to ordered private wq
- Do not ignore return for drmm_mutex_init

amdgpu:
- DCN 3.5 fix
- MST DSC fixes
- S0i3 fix
- S4 fix
- HDP MMIO mapping fix
- Fix a regression in visible vram handling

amdkfd:
- Spatial partition fix

meson:
- dw-hdmi: power-up fixes
- dw-hdmi: add badngap setting for g12

nouveau:
- revert SG_DEBUG fix that has a side effect"

* tag 'drm-fixes-2024-05-11' of https://gitlab.freedesktop.org/drm/kernel:
Revert "drm/nouveau/firmware: Fix SG_DEBUG error with nvkm_firmware_ctor()"
drm/amdgpu: Fix comparison in amdgpu_res_cpu_visible
drm/amdkfd: don't allow mapping the MMIO HDP page with large pages
drm/xe: Use ordered WQ for G2H handler
drm/xe/guc: Check error code when initializing the CT mutex
drm/xe/ads: Use flexible-array
Revert "drm/amdkfd: Add partition id field to location_id"
dm/amd/pm: Fix problems with reboot/shutdown for some SMU 13.0.4/13.0.11 users
drm/amd/display: MST DSC check for older devices
drm/amd/display: Fix idle optimization checks for multi-display and dual eDP
drm/amd/display: Fix DSC-re-computing
drm/amd/display: Enable urgent latency adjustments for DCN35
drm/connector: Add \n to message about demoting connector force-probes
drm/i915/bios: Fix parsing backlight BDB data
drm/i915/audio: Fix audio time stamp programming for DP
drm/i915/gt: Automate CCS Mode setting during engine resets
drm/meson: dw-hdmi: add bandgap setting for g12
drm/meson: dw-hdmi: power up phy on device init

+122 -203
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 427 428 amdgpu_res_first(res, 0, res->size, &cursor); 429 while (cursor.remaining) { 430 - if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) 431 return false; 432 amdgpu_res_next(&cursor, cursor.size); 433 }
··· 427 428 amdgpu_res_first(res, 0, res->size, &cursor); 429 while (cursor.remaining) { 430 + if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size) 431 return false; 432 amdgpu_res_next(&cursor, cursor.size); 433 }
+5 -2
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 1139 goto err_unlock; 1140 } 1141 offset = dev->adev->rmmio_remap.bus_addr; 1142 - if (!offset) { 1143 err = -ENOMEM; 1144 goto err_unlock; 1145 } ··· 2307 return -EINVAL; 2308 } 2309 offset = pdd->dev->adev->rmmio_remap.bus_addr; 2310 - if (!offset) { 2311 pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n"); 2312 return -ENOMEM; 2313 } ··· 3347 phys_addr_t address; 3348 3349 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 3350 return -EINVAL; 3351 3352 address = dev->adev->rmmio_remap.bus_addr;
··· 1139 goto err_unlock; 1140 } 1141 offset = dev->adev->rmmio_remap.bus_addr; 1142 + if (!offset || (PAGE_SIZE > 4096)) { 1143 err = -ENOMEM; 1144 goto err_unlock; 1145 } ··· 2307 return -EINVAL; 2308 } 2309 offset = pdd->dev->adev->rmmio_remap.bus_addr; 2310 + if (!offset || (PAGE_SIZE > 4096)) { 2311 pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n"); 2312 return -ENOMEM; 2313 } ··· 3347 phys_addr_t address; 3348 3349 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 3350 + return -EINVAL; 3351 + 3352 + if (PAGE_SIZE > 4096) 3353 return -EINVAL; 3354 3355 address = dev->adev->rmmio_remap.bus_addr;
+2 -3
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 1997 HSA_CAP_ASIC_REVISION_MASK); 1998 1999 dev->node_props.location_id = pci_dev_id(gpu->adev->pdev); 2000 - /* On multi-partition nodes, node id = location_id[31:28] */ 2001 - if (gpu->kfd->num_nodes > 1) 2002 - dev->node_props.location_id |= (dev->gpu->node_id << 28); 2003 2004 dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus); 2005 dev->node_props.max_engine_clk_fcompute =
··· 1997 HSA_CAP_ASIC_REVISION_MASK); 1998 1999 dev->node_props.location_id = pci_dev_id(gpu->adev->pdev); 2000 + if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3)) 2001 + dev->node_props.location_id |= dev->gpu->node_id; 2002 2003 dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus); 2004 dev->node_props.max_engine_clk_fcompute =
+13 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 1219 if (dc_link->type != dc_connection_mst_branch) 1220 return false; 1221 1222 - if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || 1223 - dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) 1224 return false; 1225 1226 for (i = 0; i < MAX_PIPES; i++) ··· 1242 continue; 1243 1244 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; 1245 - if (!aconnector) 1246 continue; 1247 1248 stream_on_link[new_stream_on_link_num] = aconnector;
··· 1219 if (dc_link->type != dc_connection_mst_branch) 1220 return false; 1221 1222 + /* add a check for older MST DSC with no virtual DPCDs */ 1223 + if (needs_dsc_aux_workaround(dc_link) && 1224 + (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || 1225 + dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))) 1226 return false; 1227 1228 for (i = 0; i < MAX_PIPES; i++) ··· 1240 continue; 1241 1242 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; 1243 + if (!aconnector || !aconnector->dsc_aux) 1244 + continue; 1245 + 1246 + /* 1247 + * check if cached virtual MST DSC caps are available and DSC is supported 1248 + * as per specifications in their Virtual DPCD registers. 1249 + */ 1250 + if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported || 1251 + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) 1252 continue; 1253 1254 stream_on_link[new_stream_on_link_num] = aconnector;
+2 -2
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 195 .dcn_downspread_percent = 0.5, 196 .gpuvm_min_page_size_bytes = 4096, 197 .hostvm_min_page_size_bytes = 4096, 198 - .do_urgent_latency_adjustment = 0, 199 .urgent_latency_adjustment_fabric_clock_component_us = 0, 200 - .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, 201 }; 202 203 void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
··· 195 .dcn_downspread_percent = 0.5, 196 .gpuvm_min_page_size_bytes = 4096, 197 .hostvm_min_page_size_bytes = 4096, 198 + .do_urgent_latency_adjustment = 1, 199 .urgent_latency_adjustment_fabric_clock_component_us = 0, 200 + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, 201 }; 202 203 void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
+27 -6
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 638 639 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) 640 { 641 - struct dc_link *edp_links[MAX_NUM_EDP]; 642 - int i, edp_num; 643 if (dc->debug.dmcub_emulation) 644 return true; 645 646 if (enable) { 647 - dc_get_edp_links(dc, edp_links, &edp_num); 648 - if (edp_num == 0 || edp_num > 1) 649 - return false; 650 651 for (i = 0; i < dc->current_state->stream_count; ++i) { 652 struct dc_stream_state *stream = dc->current_state->streams[i]; 653 654 - if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal)) 655 return false; 656 } 657 } 658 659 // TODO: review other cases when idle optimization is allowed
··· 638 639 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) 640 { 641 if (dc->debug.dmcub_emulation) 642 return true; 643 644 if (enable) { 645 + uint32_t num_active_edp = 0; 646 + int i; 647 648 for (i = 0; i < dc->current_state->stream_count; ++i) { 649 struct dc_stream_state *stream = dc->current_state->streams[i]; 650 + struct dc_link *link = stream->link; 651 + bool is_psr = link && !link->panel_config.psr.disable_psr && 652 + (link->psr_settings.psr_version == DC_PSR_VERSION_1 || 653 + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1); 654 + bool is_replay = link && link->replay_settings.replay_feature_enabled; 655 656 + /* Ignore streams that disabled. */ 657 + if (stream->dpms_off) 658 + continue; 659 + 660 + /* Active external displays block idle optimizations. */ 661 + if (!dc_is_embedded_signal(stream->signal)) 662 return false; 663 + 664 + /* If not PWRSEQ0 can't enter idle optimizations */ 665 + if (link && link->link_index != 0) 666 + return false; 667 + 668 + /* Check for panel power features required for idle optimizations. */ 669 + if (!is_psr && !is_replay) 670 + return false; 671 + 672 + num_active_edp += 1; 673 } 674 + 675 + /* If more than one active eDP then disallow. */ 676 + if (num_active_edp > 1) 677 + return false; 678 } 679 680 // TODO: review other cases when idle optimization is allowed
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
··· 226 struct amdgpu_device *adev = smu->adev; 227 int ret = 0; 228 229 - if (!en && !adev->in_s0ix) { 230 /* Adds a GFX reset as workaround just before sending the 231 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering 232 * an invalid state.
··· 226 struct amdgpu_device *adev = smu->adev; 227 int ret = 0; 228 229 + if (!en && adev->in_s4) { 230 /* Adds a GFX reset as workaround just before sending the 231 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering 232 * an invalid state.
+1 -1
drivers/gpu/drm/drm_connector.c
··· 2940 dev->mode_config.max_width, 2941 dev->mode_config.max_height); 2942 else 2943 - drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe", 2944 connector->base.id, connector->name); 2945 } 2946
··· 2940 dev->mode_config.max_width, 2941 dev->mode_config.max_height); 2942 else 2943 + drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe\n", 2944 connector->base.id, connector->name); 2945 } 2946
+8 -105
drivers/gpu/drm/i915/display/intel_audio.c
··· 76 struct intel_crtc_state *crtc_state); 77 }; 78 79 - /* DP N/M table */ 80 - #define LC_810M 810000 81 - #define LC_540M 540000 82 - #define LC_270M 270000 83 - #define LC_162M 162000 84 - 85 - struct dp_aud_n_m { 86 - int sample_rate; 87 - int clock; 88 - u16 m; 89 - u16 n; 90 - }; 91 - 92 struct hdmi_aud_ncts { 93 int sample_rate; 94 int clock; 95 int n; 96 int cts; 97 }; 98 - 99 - /* Values according to DP 1.4 Table 2-104 */ 100 - static const struct dp_aud_n_m dp_aud_n_m[] = { 101 - { 32000, LC_162M, 1024, 10125 }, 102 - { 44100, LC_162M, 784, 5625 }, 103 - { 48000, LC_162M, 512, 3375 }, 104 - { 64000, LC_162M, 2048, 10125 }, 105 - { 88200, LC_162M, 1568, 5625 }, 106 - { 96000, LC_162M, 1024, 3375 }, 107 - { 128000, LC_162M, 4096, 10125 }, 108 - { 176400, LC_162M, 3136, 5625 }, 109 - { 192000, LC_162M, 2048, 3375 }, 110 - { 32000, LC_270M, 1024, 16875 }, 111 - { 44100, LC_270M, 784, 9375 }, 112 - { 48000, LC_270M, 512, 5625 }, 113 - { 64000, LC_270M, 2048, 16875 }, 114 - { 88200, LC_270M, 1568, 9375 }, 115 - { 96000, LC_270M, 1024, 5625 }, 116 - { 128000, LC_270M, 4096, 16875 }, 117 - { 176400, LC_270M, 3136, 9375 }, 118 - { 192000, LC_270M, 2048, 5625 }, 119 - { 32000, LC_540M, 1024, 33750 }, 120 - { 44100, LC_540M, 784, 18750 }, 121 - { 48000, LC_540M, 512, 11250 }, 122 - { 64000, LC_540M, 2048, 33750 }, 123 - { 88200, LC_540M, 1568, 18750 }, 124 - { 96000, LC_540M, 1024, 11250 }, 125 - { 128000, LC_540M, 4096, 33750 }, 126 - { 176400, LC_540M, 3136, 18750 }, 127 - { 192000, LC_540M, 2048, 11250 }, 128 - { 32000, LC_810M, 1024, 50625 }, 129 - { 44100, LC_810M, 784, 28125 }, 130 - { 48000, LC_810M, 512, 16875 }, 131 - { 64000, LC_810M, 2048, 50625 }, 132 - { 88200, LC_810M, 1568, 28125 }, 133 - { 96000, LC_810M, 1024, 16875 }, 134 - { 128000, LC_810M, 4096, 50625 }, 135 - { 176400, LC_810M, 3136, 28125 }, 136 - { 192000, LC_810M, 2048, 16875 }, 137 - }; 138 - 139 - static const struct dp_aud_n_m * 140 - audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate) 141 - { 142 - int i; 143 - 144 - for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) { 145 - if (rate == dp_aud_n_m[i].sample_rate && 146 - crtc_state->port_clock == dp_aud_n_m[i].clock) 147 - return &dp_aud_n_m[i]; 148 - } 149 - 150 - return NULL; 151 - } 152 153 static const struct { 154 int clock; ··· 320 const struct intel_crtc_state *crtc_state) 321 { 322 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 323 - struct i915_audio_component *acomp = i915->display.audio.component; 324 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 325 - enum port port = encoder->port; 326 - const struct dp_aud_n_m *nm; 327 - int rate; 328 - u32 tmp; 329 330 - rate = acomp ? acomp->aud_sample_rate[port] : 0; 331 - nm = audio_config_dp_get_n_m(crtc_state, rate); 332 - if (nm) 333 - drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m, 334 - nm->n); 335 - else 336 - drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n"); 337 338 - tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder)); 339 - tmp &= ~AUD_CONFIG_N_VALUE_INDEX; 340 - tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; 341 - tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 342 - tmp |= AUD_CONFIG_N_VALUE_INDEX; 343 - 344 - if (nm) { 345 - tmp &= ~AUD_CONFIG_N_MASK; 346 - tmp |= AUD_CONFIG_N(nm->n); 347 - tmp |= AUD_CONFIG_N_PROG_ENABLE; 348 - } 349 - 350 - intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp); 351 - 352 - tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); 353 - tmp &= ~AUD_CONFIG_M_MASK; 354 - tmp &= ~AUD_M_CTS_M_VALUE_INDEX; 355 - tmp &= ~AUD_M_CTS_M_PROG_ENABLE; 356 - 357 - if (nm) { 358 - tmp |= nm->m; 359 - tmp |= AUD_M_CTS_M_VALUE_INDEX; 360 - tmp |= AUD_M_CTS_M_PROG_ENABLE; 361 - } 362 - 363 - intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); 364 } 365 366 static void
··· 76 struct intel_crtc_state *crtc_state); 77 }; 78 79 struct hdmi_aud_ncts { 80 int sample_rate; 81 int clock; 82 int n; 83 int cts; 84 }; 85 86 static const struct { 87 int clock; ··· 387 const struct intel_crtc_state *crtc_state) 388 { 389 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 390 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 391 392 + /* Enable time stamps. Let HW calculate Maud/Naud values */ 393 + intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder), 394 + AUD_CONFIG_N_VALUE_INDEX | 395 + AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK | 396 + AUD_CONFIG_UPPER_N_MASK | 397 + AUD_CONFIG_LOWER_N_MASK | 398 + AUD_CONFIG_N_PROG_ENABLE, 399 + AUD_CONFIG_N_VALUE_INDEX); 400 401 } 402 403 static void
+4 -15
drivers/gpu/drm/i915/display/intel_bios.c
··· 1042 panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; 1043 panel->vbt.backlight.controller = 0; 1044 if (i915->display.vbt.version >= 191) { 1045 - size_t exp_size; 1046 1047 - if (i915->display.vbt.version >= 236) 1048 - exp_size = sizeof(struct bdb_lfp_backlight_data); 1049 - else if (i915->display.vbt.version >= 234) 1050 - exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234; 1051 - else 1052 - exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191; 1053 - 1054 - if (get_blocksize(backlight_data) >= exp_size) { 1055 - const struct lfp_backlight_control_method *method; 1056 - 1057 - method = &backlight_data->backlight_control[panel_type]; 1058 - panel->vbt.backlight.type = method->type; 1059 - panel->vbt.backlight.controller = method->controller; 1060 - } 1061 } 1062 1063 panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
··· 1042 panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; 1043 panel->vbt.backlight.controller = 0; 1044 if (i915->display.vbt.version >= 191) { 1045 + const struct lfp_backlight_control_method *method; 1046 1047 + method = &backlight_data->backlight_control[panel_type]; 1048 + panel->vbt.backlight.type = method->type; 1049 + panel->vbt.backlight.controller = method->controller; 1050 } 1051 1052 panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
-5
drivers/gpu/drm/i915/display/intel_vbt_defs.h
··· 897 u16 reserved; 898 } __packed; 899 900 - #define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \ 901 - offsetof(struct bdb_lfp_backlight_data, brightness_level) 902 - #define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \ 903 - offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits) 904 - 905 struct bdb_lfp_backlight_data { 906 u8 entry_size; 907 struct lfp_backlight_data_entry data[16];
··· 897 u16 reserved; 898 } __packed; 899 900 struct bdb_lfp_backlight_data { 901 u8 entry_size; 902 struct lfp_backlight_data_entry data[16];
+3 -3
drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
··· 8 #include "intel_gt_ccs_mode.h" 9 #include "intel_gt_regs.h" 10 11 - void intel_gt_apply_ccs_mode(struct intel_gt *gt) 12 { 13 int cslice; 14 u32 mode = 0; 15 int first_ccs = __ffs(CCS_MASK(gt)); 16 17 if (!IS_DG2(gt->i915)) 18 - return; 19 20 /* Build the value for the fixed CCS load balancing */ 21 for (cslice = 0; cslice < I915_MAX_CCS; cslice++) { ··· 35 XEHP_CCS_MODE_CSLICE_MASK); 36 } 37 38 - intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode); 39 }
··· 8 #include "intel_gt_ccs_mode.h" 9 #include "intel_gt_regs.h" 10 11 + unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt) 12 { 13 int cslice; 14 u32 mode = 0; 15 int first_ccs = __ffs(CCS_MASK(gt)); 16 17 if (!IS_DG2(gt->i915)) 18 + return 0; 19 20 /* Build the value for the fixed CCS load balancing */ 21 for (cslice = 0; cslice < I915_MAX_CCS; cslice++) { ··· 35 XEHP_CCS_MODE_CSLICE_MASK); 36 } 37 38 + return mode; 39 }
+1 -1
drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
··· 8 9 struct intel_gt; 10 11 - void intel_gt_apply_ccs_mode(struct intel_gt *gt); 12 13 #endif /* __INTEL_GT_CCS_MODE_H__ */
··· 8 9 struct intel_gt; 10 11 + unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt); 12 13 #endif /* __INTEL_GT_CCS_MODE_H__ */
+3 -1
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 2859 static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2860 { 2861 struct intel_gt *gt = engine->gt; 2862 2863 if (!IS_DG2(gt->i915)) 2864 return; ··· 2876 * After having disabled automatic load balancing we need to 2877 * assign all slices to a single CCS. We will call it CCS mode 1 2878 */ 2879 - intel_gt_apply_ccs_mode(gt); 2880 } 2881 2882 /*
··· 2859 static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2860 { 2861 struct intel_gt *gt = engine->gt; 2862 + u32 mode; 2863 2864 if (!IS_DG2(gt->i915)) 2865 return; ··· 2875 * After having disabled automatic load balancing we need to 2876 * assign all slices to a single CCS. We will call it CCS mode 1 2877 */ 2878 + mode = intel_gt_apply_ccs_mode(gt); 2879 + wa_masked_en(wal, XEHP_CCS_MODE, mode); 2880 } 2881 2882 /*
+31 -39
drivers/gpu/drm/meson/meson_dw_hdmi.c
··· 106 #define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */ 107 #define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */ 108 #define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */ 109 #define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */ 110 #define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */ 111 #define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */ ··· 132 unsigned int addr); 133 void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi, 134 unsigned int addr, unsigned int data); 135 }; 136 137 struct meson_dw_hdmi { ··· 388 dw_hdmi_bus_fmt_is_420(hdmi)) 389 mode_is_420 = true; 390 391 - /* Enable clocks */ 392 - regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); 393 - 394 - /* Bring HDMITX MEM output of power down */ 395 - regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0); 396 - 397 - /* Bring out of reset */ 398 - dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0); 399 - 400 - /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */ 401 - dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, 402 - 0x3, 0x3); 403 - 404 - /* Enable cec_clk and hdcp22_tmdsclk_en */ 405 - dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, 406 - 0x3 << 4, 0x3 << 4); 407 - 408 - /* Enable normal output to PHY */ 409 - dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); 410 - 411 /* TMDS pattern setup */ 412 if (mode->clock > 340000 && !mode_is_420) { 413 dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, ··· 408 409 /* Setup PHY parameters */ 410 meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420); 411 - 412 - /* Setup PHY */ 413 - regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 414 - 0xffff << 16, 0x0390 << 16); 415 - 416 - /* BIT_INVERT */ 417 - if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || 418 - dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") || 419 - dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi")) 420 - regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 421 - BIT(17), 0); 422 - else 423 - regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 424 - BIT(17), BIT(17)); 425 426 /* Disable clock, fifo, fifo_wr */ 427 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0); ··· 462 463 DRM_DEBUG_DRIVER("\n"); 464 465 - regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0); 466 } 467 468 static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi, ··· 582 .fast_io = true, 583 }; 584 585 - static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = { 586 .top_read = dw_hdmi_top_read, 587 .top_write = dw_hdmi_top_write, 588 .dwc_read = dw_hdmi_dwc_read, 589 .dwc_write = dw_hdmi_dwc_write, 590 }; 591 592 static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { ··· 605 .top_write = dw_hdmi_g12a_top_write, 606 .dwc_read = dw_hdmi_g12a_dwc_read, 607 .dwc_write = dw_hdmi_g12a_dwc_write, 608 }; 609 610 static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) ··· 640 641 meson_dw_hdmi->data->top_write(meson_dw_hdmi, 642 HDMITX_TOP_CLK_CNTL, 0xff); 643 644 /* Enable HDMI-TX Interrupt */ 645 meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, ··· 857 858 static const struct of_device_id meson_dw_hdmi_of_table[] = { 859 { .compatible = "amlogic,meson-gxbb-dw-hdmi", 860 - .data = &meson_dw_hdmi_gx_data }, 861 { .compatible = "amlogic,meson-gxl-dw-hdmi", 862 - .data = &meson_dw_hdmi_gx_data }, 863 { .compatible = "amlogic,meson-gxm-dw-hdmi", 864 - .data = &meson_dw_hdmi_gx_data }, 865 { .compatible = "amlogic,meson-g12a-dw-hdmi", 866 .data = &meson_dw_hdmi_g12a_data }, 867 { }
··· 106 #define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */ 107 #define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */ 108 #define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */ 109 + #define PHY_CNTL1_INIT 0x03900000 110 + #define PHY_INVERT BIT(17) 111 #define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */ 112 #define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */ 113 #define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */ ··· 130 unsigned int addr); 131 void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi, 132 unsigned int addr, unsigned int data); 133 + u32 cntl0_init; 134 + u32 cntl1_init; 135 }; 136 137 struct meson_dw_hdmi { ··· 384 dw_hdmi_bus_fmt_is_420(hdmi)) 385 mode_is_420 = true; 386 387 /* TMDS pattern setup */ 388 if (mode->clock > 340000 && !mode_is_420) { 389 dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, ··· 424 425 /* Setup PHY parameters */ 426 meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420); 427 428 /* Disable clock, fifo, fifo_wr */ 429 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0); ··· 492 493 DRM_DEBUG_DRIVER("\n"); 494 495 + /* Fallback to init mode */ 496 + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, dw_hdmi->data->cntl1_init); 497 + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, dw_hdmi->data->cntl0_init); 498 } 499 500 static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi, ··· 610 .fast_io = true, 611 }; 612 613 + static const struct meson_dw_hdmi_data meson_dw_hdmi_gxbb_data = { 614 .top_read = dw_hdmi_top_read, 615 .top_write = dw_hdmi_top_write, 616 .dwc_read = dw_hdmi_dwc_read, 617 .dwc_write = dw_hdmi_dwc_write, 618 + .cntl0_init = 0x0, 619 + .cntl1_init = PHY_CNTL1_INIT | PHY_INVERT, 620 + }; 621 + 622 + static const struct meson_dw_hdmi_data meson_dw_hdmi_gxl_data = { 623 + .top_read = dw_hdmi_top_read, 624 + .top_write = dw_hdmi_top_write, 625 + .dwc_read = dw_hdmi_dwc_read, 626 + .dwc_write = dw_hdmi_dwc_write, 627 + .cntl0_init = 0x0, 628 + .cntl1_init = PHY_CNTL1_INIT, 629 }; 630 631 static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { ··· 622 .top_write = dw_hdmi_g12a_top_write, 623 .dwc_read = dw_hdmi_g12a_dwc_read, 624 .dwc_write = dw_hdmi_g12a_dwc_write, 625 + .cntl0_init = 0x000b4242, /* Bandgap */ 626 + .cntl1_init = PHY_CNTL1_INIT, 627 }; 628 629 static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) ··· 655 656 meson_dw_hdmi->data->top_write(meson_dw_hdmi, 657 HDMITX_TOP_CLK_CNTL, 0xff); 658 + 659 + /* Enable normal output to PHY */ 660 + meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); 661 + 662 + /* Setup PHY */ 663 + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, meson_dw_hdmi->data->cntl1_init); 664 + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, meson_dw_hdmi->data->cntl0_init); 665 666 /* Enable HDMI-TX Interrupt */ 667 meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, ··· 865 866 static const struct of_device_id meson_dw_hdmi_of_table[] = { 867 { .compatible = "amlogic,meson-gxbb-dw-hdmi", 868 + .data = &meson_dw_hdmi_gxbb_data }, 869 { .compatible = "amlogic,meson-gxl-dw-hdmi", 870 + .data = &meson_dw_hdmi_gxl_data }, 871 { .compatible = "amlogic,meson-gxm-dw-hdmi", 872 + .data = &meson_dw_hdmi_gxl_data }, 873 { .compatible = "amlogic,meson-g12a-dw-hdmi", 874 .data = &meson_dw_hdmi_g12a_data }, 875 { }
+7 -12
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
··· 205 break; 206 case NVKM_FIRMWARE_IMG_DMA: 207 nvkm_memory_unref(&memory); 208 - dma_unmap_single(fw->device->dev, fw->phys, sg_dma_len(&fw->mem.sgl), 209 - DMA_TO_DEVICE); 210 - kfree(fw->img); 211 break; 212 case NVKM_FIRMWARE_IMG_SGT: 213 nvkm_memory_unref(&memory); ··· 235 fw->img = kmemdup(src, fw->len, GFP_KERNEL); 236 break; 237 case NVKM_FIRMWARE_IMG_DMA: { 238 len = ALIGN(fw->len, PAGE_SIZE); 239 240 - fw->img = kmalloc(len, GFP_KERNEL); 241 - if (!fw->img) 242 - return -ENOMEM; 243 - 244 - memcpy(fw->img, src, fw->len); 245 - fw->phys = dma_map_single(fw->device->dev, fw->img, len, DMA_TO_DEVICE); 246 - if (dma_mapping_error(fw->device->dev, fw->phys)) { 247 - kfree(fw->img); 248 - return -EFAULT; 249 } 250 251 sg_init_one(&fw->mem.sgl, fw->img, len);
··· 205 break; 206 case NVKM_FIRMWARE_IMG_DMA: 207 nvkm_memory_unref(&memory); 208 + dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys); 209 break; 210 case NVKM_FIRMWARE_IMG_SGT: 211 nvkm_memory_unref(&memory); ··· 237 fw->img = kmemdup(src, fw->len, GFP_KERNEL); 238 break; 239 case NVKM_FIRMWARE_IMG_DMA: { 240 + dma_addr_t addr; 241 + 242 len = ALIGN(fw->len, PAGE_SIZE); 243 244 + fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL); 245 + if (fw->img) { 246 + memcpy(fw->img, src, fw->len); 247 + fw->phys = addr; 248 } 249 250 sg_init_one(&fw->mem.sgl, fw->img, len);
+1 -1
drivers/gpu/drm/xe/xe_guc_ads.c
··· 100 struct guc_engine_usage engine_usage; 101 struct guc_um_init_params um_init_params; 102 /* From here on, location is dynamic! Refer to above diagram. */ 103 - struct guc_mmio_reg regset[0]; 104 } __packed; 105 106 #define ads_blob_read(ads_, field_) \
··· 100 struct guc_engine_usage engine_usage; 101 struct guc_um_init_params um_init_params; 102 /* From here on, location is dynamic! Refer to above diagram. */ 103 + struct guc_mmio_reg regset[]; 104 } __packed; 105 106 #define ads_blob_read(ads_, field_) \
+9 -1
drivers/gpu/drm/xe/xe_guc_ct.c
··· 120 { 121 struct xe_guc_ct *ct = arg; 122 123 xa_destroy(&ct->fence_lookup); 124 } 125 ··· 146 147 xe_assert(xe, !(guc_ct_size() % PAGE_SIZE)); 148 149 - drmm_mutex_init(&xe->drm, &ct->lock); 150 spin_lock_init(&ct->fast_lock); 151 xa_init(&ct->fence_lookup); 152 INIT_WORK(&ct->g2h_worker, g2h_worker_func); 153 init_waitqueue_head(&ct->wq); 154 init_waitqueue_head(&ct->g2h_fence_wq); 155 156 primelockdep(ct); 157
··· 120 { 121 struct xe_guc_ct *ct = arg; 122 123 + destroy_workqueue(ct->g2h_wq); 124 xa_destroy(&ct->fence_lookup); 125 } 126 ··· 145 146 xe_assert(xe, !(guc_ct_size() % PAGE_SIZE)); 147 148 + ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0); 149 + if (!ct->g2h_wq) 150 + return -ENOMEM; 151 + 152 spin_lock_init(&ct->fast_lock); 153 xa_init(&ct->fence_lookup); 154 INIT_WORK(&ct->g2h_worker, g2h_worker_func); 155 init_waitqueue_head(&ct->wq); 156 init_waitqueue_head(&ct->g2h_fence_wq); 157 + 158 + err = drmm_mutex_init(&xe->drm, &ct->lock); 159 + if (err) 160 + return err; 161 162 primelockdep(ct); 163
+1 -1
drivers/gpu/drm/xe/xe_guc_ct.h
··· 34 return; 35 36 wake_up_all(&ct->wq); 37 - queue_work(system_unbound_wq, &ct->g2h_worker); 38 xe_guc_ct_fast_path(ct); 39 } 40
··· 34 return; 35 36 wake_up_all(&ct->wq); 37 + queue_work(ct->g2h_wq, &ct->g2h_worker); 38 xe_guc_ct_fast_path(ct); 39 } 40
+2
drivers/gpu/drm/xe/xe_guc_ct_types.h
··· 120 wait_queue_head_t wq; 121 /** @g2h_fence_wq: wait queue used for G2H fencing */ 122 wait_queue_head_t g2h_fence_wq; 123 /** @msg: Message buffer */ 124 u32 msg[GUC_CTB_MSG_MAX_LEN]; 125 /** @fast_msg: Message buffer */
··· 120 wait_queue_head_t wq; 121 /** @g2h_fence_wq: wait queue used for G2H fencing */ 122 wait_queue_head_t g2h_fence_wq; 123 + /** @g2h_wq: used to process G2H */ 124 + struct workqueue_struct *g2h_wq; 125 /** @msg: Message buffer */ 126 u32 msg[GUC_CTB_MSG_MAX_LEN]; 127 /** @fast_msg: Message buffer */