Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2024-01-15-1' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"This is just a wrap up of fixes from the last few days. It has the
proper fix to the i915/xe collision, we can clean up what you did
later once rc1 lands.

Otherwise it's a few other i915, a v3d, rockchip and a nouveau fix to
make GSP load on some original Turing GPUs.

i915:
- Fixes for kernel-doc warnings enforced in linux-next
- Another build warning fix for string formatting of intel_wakeref_t
- Display fixes for DP DSC BPC and C20 PLL state verification

v3d:
- register readout fix

rockchip:
- two build warning fixes

nouveau:
- fix GSP loading on Turing with different nvdec configuration"

* tag 'drm-next-2024-01-15-1' of git://anongit.freedesktop.org/drm/drm:
nouveau/gsp: handle engines in runl without nonstall interrupts.
drm/i915/perf: reconcile Excess struct member kernel-doc warnings
drm/i915/guc: reconcile Excess struct member kernel-doc warnings
drm/i915/gt: reconcile Excess struct member kernel-doc warnings
drm/i915/gem: reconcile Excess struct member kernel-doc warnings
drm/i915/dp: Fix the max DSC bpc supported by source
drm/i915: don't make assumptions about intel_wakeref_t type
drm/i915/dp: Fix the PSR debugfs entries wrt. MST connectors
drm/i915/display: Fix C20 pll selection for state verification
drm/v3d: Fix support for register debugging on the RPi 4
drm/rockchip: vop2: Drop unused if_dclk_rate variable
drm/rockchip: vop2: Drop superfluous include

+96 -78
+15 -10
drivers/gpu/drm/i915/display/intel_cx0_phy.c
··· 3067 3067 { 3068 3068 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3069 3069 const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20; 3070 - bool use_mplla; 3070 + bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB; 3071 + bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB; 3071 3072 int i; 3072 3073 3073 - use_mplla = intel_c20_use_mplla(mpll_hw_state->clock); 3074 - if (use_mplla) { 3075 - for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { 3076 - I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], 3077 - "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", 3078 - crtc->base.base.id, crtc->base.name, i, 3079 - mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); 3080 - } 3081 - } else { 3074 + I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb, 3075 + "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", 3076 + crtc->base.base.id, crtc->base.name, 3077 + sw_use_mpllb, hw_use_mpllb); 3078 + 3079 + if (hw_use_mpllb) { 3082 3080 for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) { 3083 3081 I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], 3084 3082 "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", 3085 3083 crtc->base.base.id, crtc->base.name, i, 3086 3084 mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); 3085 + } 3086 + } else { 3087 + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { 3088 + I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], 3089 + "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", 3090 + crtc->base.base.id, crtc->base.name, i, 3091 + mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); 3087 3092 } 3088 3093 } 3089 3094
+2 -2
drivers/gpu/drm/i915/display/intel_display_power.c
··· 405 405 struct drm_i915_private, 406 406 display.power.domains); 407 407 408 - drm_dbg(&i915->drm, "async_put_wakeref %lu\n", 409 - power_domains->async_put_wakeref); 408 + drm_dbg(&i915->drm, "async_put_wakeref: %s\n", 409 + str_yes_no(power_domains->async_put_wakeref)); 410 410 411 411 print_power_domains(power_domains, "async_put_domains[0]", 412 412 &power_domains->async_put_domains[0]);
+1 -1
drivers/gpu/drm/i915/display/intel_dp.c
··· 2101 2101 } 2102 2102 } 2103 2103 2104 - dsc_max_bpc = intel_dp_dsc_min_src_input_bpc(i915); 2104 + dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); 2105 2105 if (!dsc_max_bpc) 2106 2106 return -EINVAL; 2107 2107
+5 -5
drivers/gpu/drm/i915/display/intel_psr.c
··· 3319 3319 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3320 3320 struct dentry *root = connector->base.debugfs_entry; 3321 3321 3322 - if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) { 3323 - if (!(HAS_DP20(i915) && 3324 - connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort)) 3325 - return; 3326 - } 3322 + /* TODO: Add support for MST connectors as well. */ 3323 + if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && 3324 + connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) || 3325 + connector->mst_port) 3326 + return; 3327 3327 3328 3328 debugfs_create_file("i915_psr_sink_status", 0444, root, 3329 3329 connector, &i915_psr_sink_status_fops);
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
··· 412 412 413 413 /** @stale: tracks stale engines to be destroyed */ 414 414 struct { 415 - /** @lock: guards engines */ 415 + /** @stale.lock: guards engines */ 416 416 spinlock_t lock; 417 - /** @engines: list of stale engines */ 417 + /** @stale.engines: list of stale engines */ 418 418 struct list_head engines; 419 419 } stale; 420 420 };
+5 -2
drivers/gpu/drm/i915/gt/intel_gsc.h
··· 21 21 /** 22 22 * struct intel_gsc - graphics security controller 23 23 * 24 - * @gem_obj: scratch memory GSC operations 25 - * @intf : gsc interface 24 + * @intf: gsc interface 25 + * @intf.adev: MEI aux. device for this @intf 26 + * @intf.gem_obj: scratch memory GSC operations 27 + * @intf.irq: IRQ for this device (%-1 for no IRQ) 28 + * @intf.id: this interface's id number/index 26 29 */ 27 30 struct intel_gsc { 28 31 struct intel_gsc_intf {
+42 -33
drivers/gpu/drm/i915/gt/uc/intel_guc.h
··· 105 105 */ 106 106 struct { 107 107 /** 108 - * @lock: protects everything in submission_state, 109 - * ce->guc_id.id, and ce->guc_id.ref when transitioning in and 110 - * out of zero 108 + * @submission_state.lock: protects everything in 109 + * submission_state, ce->guc_id.id, and ce->guc_id.ref 110 + * when transitioning in and out of zero 111 111 */ 112 112 spinlock_t lock; 113 113 /** 114 - * @guc_ids: used to allocate new guc_ids, single-lrc 114 + * @submission_state.guc_ids: used to allocate new 115 + * guc_ids, single-lrc 115 116 */ 116 117 struct ida guc_ids; 117 118 /** 118 - * @num_guc_ids: Number of guc_ids, selftest feature to be able 119 - * to reduce this number while testing. 119 + * @submission_state.num_guc_ids: Number of guc_ids, selftest 120 + * feature to be able to reduce this number while testing. 120 121 */ 121 122 int num_guc_ids; 122 123 /** 123 - * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc 124 + * @submission_state.guc_ids_bitmap: used to allocate 125 + * new guc_ids, multi-lrc 124 126 */ 125 127 unsigned long *guc_ids_bitmap; 126 128 /** 127 - * @guc_id_list: list of intel_context with valid guc_ids but no 128 - * refs 129 + * @submission_state.guc_id_list: list of intel_context 130 + * with valid guc_ids but no refs 129 131 */ 130 132 struct list_head guc_id_list; 131 133 /** 132 - * @guc_ids_in_use: Number single-lrc guc_ids in use 134 + * @submission_state.guc_ids_in_use: Number single-lrc 135 + * guc_ids in use 133 136 */ 134 137 unsigned int guc_ids_in_use; 135 138 /** 136 - * @destroyed_contexts: list of contexts waiting to be destroyed 137 - * (deregistered with the GuC) 139 + * @submission_state.destroyed_contexts: list of contexts 140 + * waiting to be destroyed (deregistered with the GuC) 138 141 */ 139 142 struct list_head destroyed_contexts; 140 143 /** 141 - * @destroyed_worker: worker to deregister contexts, need as we 142 - * need to take a GT PM reference and can't from destroy 143 - * function as it might be in an atomic context (no sleeping) 144 + * @submission_state.destroyed_worker: worker to deregister 145 + * contexts, need as we need to take a GT PM reference and 146 + * can't from destroy function as it might be in an atomic 147 + * context (no sleeping) 144 148 */ 145 149 struct work_struct destroyed_worker; 146 150 /** 147 - * @reset_fail_worker: worker to trigger a GT reset after an 148 - * engine reset fails 151 + * @submission_state.reset_fail_worker: worker to trigger 152 + * a GT reset after an engine reset fails 149 153 */ 150 154 struct work_struct reset_fail_worker; 151 155 /** 152 - * @reset_fail_mask: mask of engines that failed to reset 156 + * @submission_state.reset_fail_mask: mask of engines that 157 + * failed to reset 153 158 */ 154 159 intel_engine_mask_t reset_fail_mask; 155 160 /** 156 - * @sched_disable_delay_ms: schedule disable delay, in ms, for 157 - * contexts 161 + * @submission_state.sched_disable_delay_ms: schedule 162 + * disable delay, in ms, for contexts 158 163 */ 159 164 unsigned int sched_disable_delay_ms; 160 165 /** 161 - * @sched_disable_gucid_threshold: threshold of min remaining available 162 - * guc_ids before we start bypassing the schedule disable delay 166 + * @submission_state.sched_disable_gucid_threshold: 167 + * threshold of min remaining available guc_ids before 168 + * we start bypassing the schedule disable delay 163 169 */ 164 170 unsigned int sched_disable_gucid_threshold; 165 171 } submission_state; ··· 249 243 */ 250 244 struct { 251 245 /** 252 - * @lock: Lock protecting the below fields and the engine stats. 246 + * @timestamp.lock: Lock protecting the below fields and 247 + * the engine stats. 253 248 */ 254 249 spinlock_t lock; 255 250 256 251 /** 257 - * @gt_stamp: 64 bit extended value of the GT timestamp. 252 + * @timestamp.gt_stamp: 64-bit extended value of the GT 253 + * timestamp. 258 254 */ 259 255 u64 gt_stamp; 260 256 261 257 /** 262 - * @ping_delay: Period for polling the GT timestamp for 263 - * overflow. 258 + * @timestamp.ping_delay: Period for polling the GT 259 + * timestamp for overflow. 264 260 */ 265 261 unsigned long ping_delay; 266 262 267 263 /** 268 - * @work: Periodic work to adjust GT timestamp, engine and 269 - * context usage for overflows. 264 + * @timestamp.work: Periodic work to adjust GT timestamp, 265 + * engine and context usage for overflows. 270 266 */ 271 267 struct delayed_work work; 272 268 273 269 /** 274 - * @shift: Right shift value for the gpm timestamp 270 + * @timestamp.shift: Right shift value for the gpm timestamp 275 271 */ 276 272 u32 shift; 277 273 278 274 /** 279 - * @last_stat_jiffies: jiffies at last actual stats collection time 280 - * We use this timestamp to ensure we don't oversample the 281 - * stats because runtime power management events can trigger 282 - * stats collection at much higher rates than required. 275 + * @timestamp.last_stat_jiffies: jiffies at last actual 276 + * stats collection time. We use this timestamp to ensure 277 + * we don't oversample the stats because runtime power 278 + * management events can trigger stats collection at much 279 + * higher rates than required. 283 280 */ 284 281 unsigned long last_stat_jiffies; 285 282 } timestamp;
+6 -3
drivers/gpu/drm/i915/i915_perf_types.h
··· 291 291 int size_exponent; 292 292 293 293 /** 294 - * @ptr_lock: Locks reads and writes to all head/tail state 294 + * @oa_buffer.ptr_lock: Locks reads and writes to all 295 + * head/tail state 295 296 * 296 297 * Consider: the head and tail pointer state needs to be read 297 298 * consistently from a hrtimer callback (atomic context) and ··· 314 313 spinlock_t ptr_lock; 315 314 316 315 /** 317 - * @head: Although we can always read back the head pointer register, 316 + * @oa_buffer.head: Although we can always read back 317 + * the head pointer register, 318 318 * we prefer to avoid trusting the HW state, just to avoid any 319 319 * risk that some hardware condition could * somehow bump the 320 320 * head pointer unpredictably and cause us to forward the wrong ··· 324 322 u32 head; 325 323 326 324 /** 327 - * @tail: The last verified tail that can be read by userspace. 325 + * @oa_buffer.tail: The last verified tail that can be 326 + * read by userspace. 328 327 */ 329 328 u32 tail; 330 329 } oa_buffer;
+4
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
··· 550 550 struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head); 551 551 552 552 runl->nonstall.vector = engn->func->nonstall(engn); 553 + 554 + /* if no nonstall vector just keep going */ 555 + if (runl->nonstall.vector == -1) 556 + continue; 553 557 if (runl->nonstall.vector < 0) { 554 558 RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector); 555 559 return runl->nonstall.vector;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
··· 351 351 int ret; 352 352 353 353 ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst); 354 - WARN_ON(ret < 0); 354 + WARN_ON(ret == -ENOENT); 355 355 return ret; 356 356 } 357 357
+2 -6
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
··· 25 25 nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst) 26 26 { 27 27 for (int i = 0; i < gsp->intr_nr; i++) { 28 - if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) { 29 - if (gsp->intr[i].nonstall != ~0) 30 - return gsp->intr[i].nonstall; 31 - 32 - return -EINVAL; 33 - } 28 + if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) 29 + return gsp->intr[i].nonstall; 34 30 } 35 31 36 32 return -ENOENT;
+1 -3
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
··· 35 35 36 36 #include "rockchip_drm_drv.h" 37 37 #include "rockchip_drm_gem.h" 38 - #include "rockchip_drm_fb.h" 39 38 #include "rockchip_drm_vop2.h" 40 39 #include "rockchip_rgb.h" 41 40 ··· 1680 1681 unsigned long dclk_core_rate = v_pixclk >> 2; 1681 1682 unsigned long dclk_rate = v_pixclk; 1682 1683 unsigned long dclk_out_rate; 1683 - unsigned long if_dclk_rate; 1684 1684 unsigned long if_pixclk_rate; 1685 1685 int K = 1; 1686 1686 ··· 1694 1696 } 1695 1697 1696 1698 if_pixclk_rate = (dclk_core_rate << 1) / K; 1697 - if_dclk_rate = dclk_core_rate / K; 1698 1699 /* 1700 + * if_dclk_rate = dclk_core_rate / K; 1699 1701 * *if_pixclk_div = dclk_rate / if_pixclk_rate; 1700 1702 * *if_dclk_div = dclk_rate / if_dclk_rate; 1701 1703 */
+10 -10
drivers/gpu/drm/v3d/v3d_debugfs.c
··· 62 62 REGDEF(33, 71, V3D_PTB_BPCA), 63 63 REGDEF(33, 71, V3D_PTB_BPCS), 64 64 65 - REGDEF(33, 41, V3D_GMP_STATUS(33)), 66 - REGDEF(33, 41, V3D_GMP_CFG(33)), 67 - REGDEF(33, 41, V3D_GMP_VIO_ADDR(33)), 65 + REGDEF(33, 42, V3D_GMP_STATUS(33)), 66 + REGDEF(33, 42, V3D_GMP_CFG(33)), 67 + REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)), 68 68 69 69 REGDEF(33, 71, V3D_ERR_FDBGO), 70 70 REGDEF(33, 71, V3D_ERR_FDBGB), ··· 74 74 75 75 static const struct v3d_reg_def v3d_csd_reg_defs[] = { 76 76 REGDEF(41, 71, V3D_CSD_STATUS), 77 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG0(41)), 78 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG1(41)), 79 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG2(41)), 80 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG3(41)), 81 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG4(41)), 82 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG5(41)), 83 - REGDEF(41, 41, V3D_CSD_CURRENT_CFG6(41)), 77 + REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)), 78 + REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)), 79 + REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)), 80 + REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)), 81 + REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)), 82 + REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)), 83 + REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)), 84 84 REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)), 85 85 REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)), 86 86 REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),