Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-fixes-2022-12-01' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Multi-cast register fix (Matt)
- Fix workarounds on gen2-3 (Tvrtko)
- Bigjoiner fix (Ville)
- Make Guc default_list a const data (Jani)
- Acquire forcewake before uncore read (Umesh)
- Selftest fix (Umesh)
- HuC related fixes (Daniele)
- Fix some incorrect return values (Janusz)
- Fix a memory leak in bios related code (Xia)
- Fix VBT send packet port selection (Mikko)
- DG2's DMC fix bump for Register noclaims and few restore (Gustavo)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Y4jZBRw9KvlKgkr6@intel.com

+113 -65
+1 -1
drivers/gpu/drm/i915/display/intel_bios.c
··· 414 414 ptrs->lvds_entries++; 415 415 416 416 if (size != 0 || ptrs->lvds_entries != 3) { 417 - kfree(ptrs); 417 + kfree(ptrs_block); 418 418 return NULL; 419 419 } 420 420
+7 -3
drivers/gpu/drm/i915/display/intel_display.c
··· 3733 3733 3734 3734 static u8 bigjoiner_pipes(struct drm_i915_private *i915) 3735 3735 { 3736 + u8 pipes; 3737 + 3736 3738 if (DISPLAY_VER(i915) >= 12) 3737 - return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3739 + pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3738 3740 else if (DISPLAY_VER(i915) >= 11) 3739 - return BIT(PIPE_B) | BIT(PIPE_C); 3741 + pipes = BIT(PIPE_B) | BIT(PIPE_C); 3740 3742 else 3741 - return 0; 3743 + pipes = 0; 3744 + 3745 + return pipes & RUNTIME_INFO(i915)->pipe_mask; 3742 3746 } 3743 3747 3744 3748 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+2 -2
drivers/gpu/drm/i915/display/intel_dmc.c
··· 52 52 53 53 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE 54 54 55 - #define DG2_DMC_PATH DMC_PATH(dg2, 2, 07) 56 - #define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 07) 55 + #define DG2_DMC_PATH DMC_PATH(dg2, 2, 08) 56 + #define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 8) 57 57 MODULE_FIRMWARE(DG2_DMC_PATH); 58 58 59 59 #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
+2 -2
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
··· 137 137 return ffs(intel_dsi->ports) - 1; 138 138 139 139 if (seq_port) { 140 - if (intel_dsi->ports & PORT_B) 140 + if (intel_dsi->ports & BIT(PORT_B)) 141 141 return PORT_B; 142 - else if (intel_dsi->ports & PORT_C) 142 + else if (intel_dsi->ports & BIT(PORT_C)) 143 143 return PORT_C; 144 144 } 145 145
+10 -5
drivers/gpu/drm/i915/gt/intel_gt.c
··· 677 677 return -EINTR; 678 678 } 679 679 680 - return timeout ? timeout : intel_uc_wait_for_idle(&gt->uc, 681 - remaining_timeout); 680 + if (timeout) 681 + return timeout; 682 + 683 + if (remaining_timeout < 0) 684 + remaining_timeout = 0; 685 + 686 + return intel_uc_wait_for_idle(&gt->uc, remaining_timeout); 682 687 } 683 688 684 689 int intel_gt_init(struct intel_gt *gt) ··· 1040 1035 static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb) 1041 1036 { 1042 1037 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) 1043 - return intel_gt_mcr_wait_for_reg_fw(gt, rb.mcr_reg, rb.bit, 0, 1044 - TLB_INVAL_TIMEOUT_US, 1045 - TLB_INVAL_TIMEOUT_MS); 1038 + return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0, 1039 + TLB_INVAL_TIMEOUT_US, 1040 + TLB_INVAL_TIMEOUT_MS); 1046 1041 else 1047 1042 return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0, 1048 1043 TLB_INVAL_TIMEOUT_US,
+10 -8
drivers/gpu/drm/i915/gt/intel_gt_mcr.c
··· 730 730 * 731 731 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. 732 732 */ 733 - int intel_gt_mcr_wait_for_reg_fw(struct intel_gt *gt, 734 - i915_mcr_reg_t reg, 735 - u32 mask, 736 - u32 value, 737 - unsigned int fast_timeout_us, 738 - unsigned int slow_timeout_ms) 733 + int intel_gt_mcr_wait_for_reg(struct intel_gt *gt, 734 + i915_mcr_reg_t reg, 735 + u32 mask, 736 + u32 value, 737 + unsigned int fast_timeout_us, 738 + unsigned int slow_timeout_ms) 739 739 { 740 - u32 reg_value = 0; 741 - #define done (((reg_value = intel_gt_mcr_read_any_fw(gt, reg)) & mask) == value) 742 740 int ret; 741 + 742 + lockdep_assert_not_held(&gt->uncore->lock); 743 + 744 + #define done ((intel_gt_mcr_read_any(gt, reg) & mask) == value) 743 745 744 746 /* Catch any overuse of this function */ 745 747 might_sleep_if(slow_timeout_ms);
+6 -6
drivers/gpu/drm/i915/gt/intel_gt_mcr.h
··· 37 37 void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss, 38 38 unsigned int *group, unsigned int *instance); 39 39 40 - int intel_gt_mcr_wait_for_reg_fw(struct intel_gt *gt, 41 - i915_mcr_reg_t reg, 42 - u32 mask, 43 - u32 value, 44 - unsigned int fast_timeout_us, 45 - unsigned int slow_timeout_ms); 40 + int intel_gt_mcr_wait_for_reg(struct intel_gt *gt, 41 + i915_mcr_reg_t reg, 42 + u32 mask, 43 + u32 value, 44 + unsigned int fast_timeout_us, 45 + unsigned int slow_timeout_ms); 46 46 47 47 /* 48 48 * Helper for for_each_ss_steering loop. On pre-Xe_HP platforms, subslice
+1 -1
drivers/gpu/drm/i915/gt/intel_gt_requests.c
··· 199 199 if (remaining_timeout) 200 200 *remaining_timeout = timeout; 201 201 202 - return active_count ? timeout : 0; 202 + return active_count ? timeout ?: -ETIME : 0; 203 203 } 204 204 205 205 static void retire_work_handler(struct work_struct *work)
+1 -4
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 3011 3011 static void 3012 3012 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) 3013 3013 { 3014 - if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4)) 3014 + if (GRAPHICS_VER(engine->i915) < 4) 3015 3015 return; 3016 3016 3017 3017 engine_fake_wa_init(engine, wal); ··· 3035 3035 void intel_engine_init_workarounds(struct intel_engine_cs *engine) 3036 3036 { 3037 3037 struct i915_wa_list *wal = &engine->wa_list; 3038 - 3039 - if (GRAPHICS_VER(engine->i915) < 4) 3040 - return; 3041 3038 3042 3039 wa_init_start(wal, engine->gt, "engine", engine->name); 3043 3040 engine_init_workarounds(engine, wal);
+1 -1
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
··· 317 317 ENGINE_TRACE(engine, "measuring busy time\n"); 318 318 preempt_disable(); 319 319 de = intel_engine_get_busy_time(engine, &t[0]); 320 - mdelay(10); 320 + mdelay(100); 321 321 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de); 322 322 preempt_enable(); 323 323 dt = ktime_sub(t[1], t[0]);
+1 -1
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
··· 170 170 } 171 171 172 172 /* List of lists */ 173 - static struct __guc_mmio_reg_descr_group default_lists[] = { 173 + static const struct __guc_mmio_reg_descr_group default_lists[] = { 174 174 MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0), 175 175 MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS), 176 176 MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+40 -17
drivers/gpu/drm/i915/gt/uc/intel_huc.c
··· 211 211 huc->delayed_load.nb.notifier_call = NULL; 212 212 } 213 213 214 + static void delayed_huc_load_init(struct intel_huc *huc) 215 + { 216 + /* 217 + * Initialize fence to be complete as this is expected to be complete 218 + * unless there is a delayed HuC load in progress. 219 + */ 220 + i915_sw_fence_init(&huc->delayed_load.fence, 221 + sw_fence_dummy_notify); 222 + i915_sw_fence_commit(&huc->delayed_load.fence); 223 + 224 + hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 225 + huc->delayed_load.timer.function = huc_delayed_load_timer_callback; 226 + } 227 + 228 + static void delayed_huc_load_fini(struct intel_huc *huc) 229 + { 230 + /* 231 + * the fence is initialized in init_early, so we need to clean it up 232 + * even if HuC loading is off. 233 + */ 234 + delayed_huc_load_complete(huc); 235 + i915_sw_fence_fini(&huc->delayed_load.fence); 236 + } 237 + 214 238 static bool vcs_supported(struct intel_gt *gt) 215 239 { 216 240 intel_engine_mask_t mask = gt->info.engine_mask; ··· 265 241 266 242 intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC); 267 243 244 + /* 245 + * we always init the fence as already completed, even if HuC is not 246 + * supported. This way we don't have to distinguish between HuC not 247 + * supported/disabled or already loaded, and can focus on if the load 248 + * is currently in progress (fence not complete) or not, which is what 249 + * we care about for stalling userspace submissions. 250 + */ 251 + delayed_huc_load_init(huc); 252 + 268 253 if (!vcs_supported(gt)) { 269 254 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); 270 255 return; ··· 288 255 huc->status.mask = HUC_FW_VERIFIED; 289 256 huc->status.value = HUC_FW_VERIFIED; 290 257 } 291 - 292 - /* 293 - * Initialize fence to be complete as this is expected to be complete 294 - * unless there is a delayed HuC reload in progress. 295 - */ 296 - i915_sw_fence_init(&huc->delayed_load.fence, 297 - sw_fence_dummy_notify); 298 - i915_sw_fence_commit(&huc->delayed_load.fence); 299 - 300 - hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 301 - huc->delayed_load.timer.function = huc_delayed_load_timer_callback; 302 258 } 303 259 304 260 #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy") ··· 351 329 352 330 void intel_huc_fini(struct intel_huc *huc) 353 331 { 354 - if (!intel_uc_fw_is_loadable(&huc->fw)) 355 - return; 332 + /* 333 + * the fence is initialized in init_early, so we need to clean it up 334 + * even if HuC loading is off. 335 + */ 336 + delayed_huc_load_fini(huc); 356 337 357 - delayed_huc_load_complete(huc); 358 - 359 - i915_sw_fence_fini(&huc->delayed_load.fence); 360 - intel_uc_fw_fini(&huc->fw); 338 + if (intel_uc_fw_is_loadable(&huc->fw)) 339 + intel_uc_fw_fini(&huc->fw); 361 340 } 362 341 363 342 void intel_huc_suspend(struct intel_huc *huc)
+1
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 722 722 723 723 static const struct intel_uc_ops uc_ops_off = { 724 724 .init_hw = __uc_check_hw, 725 + .fini = __uc_fini, /* to clean-up the init_early initialization */ 725 726 }; 726 727 727 728 static const struct intel_uc_ops uc_ops_on = {
+30 -14
drivers/gpu/drm/i915/intel_uncore.h
··· 382 382 */ 383 383 __uncore_read(read64, 64, q, true) 384 384 385 - static inline u64 386 - intel_uncore_read64_2x32(struct intel_uncore *uncore, 387 - i915_reg_t lower_reg, i915_reg_t upper_reg) 388 - { 389 - u32 upper, lower, old_upper, loop = 0; 390 - upper = intel_uncore_read(uncore, upper_reg); 391 - do { 392 - old_upper = upper; 393 - lower = intel_uncore_read(uncore, lower_reg); 394 - upper = intel_uncore_read(uncore, upper_reg); 395 - } while (upper != old_upper && loop++ < 2); 396 - return (u64)upper << 32 | lower; 397 - } 398 - 399 385 #define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__)) 400 386 #define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__)) 401 387 ··· 439 453 val = (old & ~clear) | set; 440 454 if (val != old) 441 455 intel_uncore_write_fw(uncore, reg, val); 456 + } 457 + 458 + static inline u64 459 + intel_uncore_read64_2x32(struct intel_uncore *uncore, 460 + i915_reg_t lower_reg, i915_reg_t upper_reg) 461 + { 462 + u32 upper, lower, old_upper, loop = 0; 463 + enum forcewake_domains fw_domains; 464 + unsigned long flags; 465 + 466 + fw_domains = intel_uncore_forcewake_for_reg(uncore, lower_reg, 467 + FW_REG_READ); 468 + 469 + fw_domains |= intel_uncore_forcewake_for_reg(uncore, upper_reg, 470 + FW_REG_READ); 471 + 472 + spin_lock_irqsave(&uncore->lock, flags); 473 + intel_uncore_forcewake_get__locked(uncore, fw_domains); 474 + 475 + upper = intel_uncore_read_fw(uncore, upper_reg); 476 + do { 477 + old_upper = upper; 478 + lower = intel_uncore_read_fw(uncore, lower_reg); 479 + upper = intel_uncore_read_fw(uncore, upper_reg); 480 + } while (upper != old_upper && loop++ < 2); 481 + 482 + intel_uncore_forcewake_put__locked(uncore, fw_domains); 483 + spin_unlock_irqrestore(&uncore->lock, flags); 484 + 485 + return (u64)upper << 32 | lower; 442 486 } 443 487 444 488 static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,