Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/params: switch to device specific parameters

Start using device specific parameters instead of module parameters for
most things. The module parameters become the immutable initial values
for i915 parameters. The device specific parameters in i915->params
start life as a copy of i915_modparams. Any later changes are only
reflected in the debugfs.

The stragglers are:

* i915.force_probe and i915.modeset. Needed before dev_priv is
available. This is fine because the parameters are read-only and never
modified.

* i915.verbose_state_checks. Passing dev_priv to I915_STATE_WARN and
I915_STATE_WARN_ON would result in massive and ugly churn. This is
handled by not exposing the parameter via debugfs, and leaving the
parameter writable in sysfs. This may be fixed up in follow-up work.

* i915.inject_probe_failure. Only makes sense in terms of the module,
not the device. This is handled by not exposing the parameter via
debugfs.

v2: Fix uc i915 lookup code (Michał Winiarski)

Cc: Juha-Pekka Heikkilä <juha-pekka.heikkila@intel.com>
Cc: Venkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200618150402.14022-1-jani.nikula@intel.com

+120 -109
+3 -3
drivers/gpu/drm/i915/display/intel_bios.c
··· 479 479 struct drm_display_mode *panel_fixed_mode; 480 480 int index; 481 481 482 - index = i915_modparams.vbt_sdvo_panel_type; 482 + index = dev_priv->params.vbt_sdvo_panel_type; 483 483 if (index == -2) { 484 484 drm_dbg_kms(&dev_priv->drm, 485 485 "Ignore SDVO panel mode from BIOS VBT tables.\n"); ··· 829 829 u8 vswing; 830 830 831 831 /* Don't read from VBT if module parameter has valid value*/ 832 - if (i915_modparams.edp_vswing) { 832 + if (dev_priv->params.edp_vswing) { 833 833 dev_priv->vbt.edp.low_vswing = 834 - i915_modparams.edp_vswing == 1; 834 + dev_priv->params.edp_vswing == 1; 835 835 } else { 836 836 vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; 837 837 dev_priv->vbt.edp.low_vswing = vswing == 0;
+2 -2
drivers/gpu/drm/i915/display/intel_crt.c
··· 833 833 connector->base.id, connector->name, 834 834 force); 835 835 836 - if (i915_modparams.load_detect_test) { 836 + if (dev_priv->params.load_detect_test) { 837 837 wakeref = intel_display_power_get(dev_priv, 838 838 intel_encoder->power_domain); 839 839 goto load_detect; ··· 889 889 else if (INTEL_GEN(dev_priv) < 4) 890 890 status = intel_crt_load_detect(crt, 891 891 to_intel_crtc(connector->state->crtc)->pipe); 892 - else if (i915_modparams.load_detect_test) 892 + else if (dev_priv->params.load_detect_test) 893 893 status = connector_status_disconnected; 894 894 else 895 895 status = connector_status_unknown;
+3 -3
drivers/gpu/drm/i915/display/intel_csr.c
··· 723 723 csr->max_fw_size = BXT_CSR_MAX_FW_SIZE; 724 724 } 725 725 726 - if (i915_modparams.dmc_firmware_path) { 727 - if (strlen(i915_modparams.dmc_firmware_path) == 0) { 726 + if (dev_priv->params.dmc_firmware_path) { 727 + if (strlen(dev_priv->params.dmc_firmware_path) == 0) { 728 728 csr->fw_path = NULL; 729 729 drm_info(&dev_priv->drm, 730 730 "Disabling CSR firmware and runtime PM\n"); 731 731 return; 732 732 } 733 733 734 - csr->fw_path = i915_modparams.dmc_firmware_path; 734 + csr->fw_path = dev_priv->params.dmc_firmware_path; 735 735 /* Bypass version check for firmware override. */ 736 736 csr->required_version = 0; 737 737 }
+6 -6
drivers/gpu/drm/i915/display/intel_display.c
··· 4886 4886 int ret; 4887 4887 4888 4888 /* reset doesn't touch the display */ 4889 - if (!i915_modparams.force_reset_modeset_test && 4889 + if (!dev_priv->params.force_reset_modeset_test && 4890 4890 !gpu_reset_clobbers_display(dev_priv)) 4891 4891 return; 4892 4892 ··· 7882 7882 if (!hsw_crtc_supports_ips(crtc)) 7883 7883 return false; 7884 7884 7885 - if (!i915_modparams.enable_ips) 7885 + if (!dev_priv->params.enable_ips) 7886 7886 return false; 7887 7887 7888 7888 if (crtc_state->pipe_bpp > 24) ··· 8153 8153 8154 8154 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 8155 8155 { 8156 - if (i915_modparams.panel_use_ssc >= 0) 8157 - return i915_modparams.panel_use_ssc != 0; 8156 + if (dev_priv->params.panel_use_ssc >= 0) 8157 + return dev_priv->params.panel_use_ssc != 0; 8158 8158 return dev_priv->vbt.lvds_use_ssc 8159 8159 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 8160 8160 } ··· 13585 13585 13586 13586 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 13587 13587 { 13588 - if (i915_modparams.fastboot != -1) 13589 - return i915_modparams.fastboot; 13588 + if (dev_priv->params.fastboot != -1) 13589 + return dev_priv->params.fastboot; 13590 13590 13591 13591 /* Enable fastboot by default on Skylake and newer */ 13592 13592 if (INTEL_GEN(dev_priv) >= 9)
+1 -1
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 125 125 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 126 126 127 127 seq_printf(m, "Enabled by kernel parameter: %s\n", 128 - yesno(i915_modparams.enable_ips)); 128 + yesno(dev_priv->params.enable_ips)); 129 129 130 130 if (INTEL_GEN(dev_priv) >= 8) { 131 131 seq_puts(m, "Currently: unknown\n");
+7 -7
drivers/gpu/drm/i915/display/intel_display_power.c
··· 4513 4513 mask = 0; 4514 4514 } 4515 4515 4516 - if (!i915_modparams.disable_power_well) 4516 + if (!dev_priv->params.disable_power_well) 4517 4517 max_dc = 0; 4518 4518 4519 4519 if (enable_dc >= 0 && enable_dc <= max_dc) { ··· 4602 4602 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4603 4603 int err; 4604 4604 4605 - i915_modparams.disable_power_well = 4605 + dev_priv->params.disable_power_well = 4606 4606 sanitize_disable_power_well_option(dev_priv, 4607 - i915_modparams.disable_power_well); 4607 + dev_priv->params.disable_power_well); 4608 4608 dev_priv->csr.allowed_dc_mask = 4609 - get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); 4609 + get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 4610 4610 4611 4611 dev_priv->csr.target_dc_state = 4612 4612 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); ··· 5568 5568 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5569 5569 5570 5570 /* Disable power support if the user asked so. */ 5571 - if (!i915_modparams.disable_power_well) 5571 + if (!i915->params.disable_power_well) 5572 5572 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5573 5573 intel_power_domains_sync_hw(i915); 5574 5574 ··· 5592 5592 fetch_and_zero(&i915->power_domains.wakeref); 5593 5593 5594 5594 /* Remove the refcount we took to keep power well support disabled. */ 5595 - if (!i915_modparams.disable_power_well) 5595 + if (!i915->params.disable_power_well) 5596 5596 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5597 5597 5598 5598 intel_display_power_flush_work_sync(i915); ··· 5681 5681 * Even if power well support was disabled we still want to disable 5682 5682 * power wells if power domains must be deinitialized for suspend. 5683 5683 */ 5684 - if (!i915_modparams.disable_power_well) 5684 + if (!i915->params.disable_power_well) 5685 5685 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5686 5686 5687 5687 intel_display_power_flush_work(i915);
+5 -3
drivers/gpu/drm/i915/display/intel_dp.c
··· 4707 4707 static bool 4708 4708 intel_dp_can_mst(struct intel_dp *intel_dp) 4709 4709 { 4710 - return i915_modparams.enable_dp_mst && 4710 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4711 + 4712 + return i915->params.enable_dp_mst && 4711 4713 intel_dp->can_mst && 4712 4714 intel_dp_sink_can_mst(intel_dp); 4713 4715 } ··· 4726 4724 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4727 4725 encoder->base.base.id, encoder->base.name, 4728 4726 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4729 - yesno(i915_modparams.enable_dp_mst)); 4727 + yesno(i915->params.enable_dp_mst)); 4730 4728 4731 4729 if (!intel_dp->can_mst) 4732 4730 return; 4733 4731 4734 4732 intel_dp->is_mst = sink_can_mst && 4735 - i915_modparams.enable_dp_mst; 4733 + i915->params.enable_dp_mst; 4736 4734 4737 4735 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4738 4736 intel_dp->is_mst);
+2 -2
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
··· 348 348 struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder); 349 349 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 350 350 351 - if (i915_modparams.enable_dpcd_backlight == 0 || 351 + if (i915->params.enable_dpcd_backlight == 0 || 352 352 !intel_dp_aux_display_control_capable(intel_connector)) 353 353 return -ENODEV; 354 354 ··· 358 358 */ 359 359 if (i915->vbt.backlight.type != 360 360 INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE && 361 - i915_modparams.enable_dpcd_backlight != 1 && 361 + i915->params.enable_dpcd_backlight != 1 && 362 362 !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks, 363 363 DP_QUIRK_FORCE_DPCD_BACKLIGHT)) { 364 364 drm_info(&i915->drm,
+6 -6
drivers/gpu/drm/i915/display/intel_fbc.c
··· 740 740 return false; 741 741 } 742 742 743 - if (!i915_modparams.enable_fbc) { 743 + if (!dev_priv->params.enable_fbc) { 744 744 fbc->no_fbc_reason = "disabled per module param or by default"; 745 745 return false; 746 746 } ··· 1017 1017 1018 1018 fbc->flip_pending = false; 1019 1019 1020 - if (!i915_modparams.enable_fbc) { 1020 + if (!dev_priv->params.enable_fbc) { 1021 1021 intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); 1022 1022 __intel_fbc_disable(dev_priv); 1023 1023 ··· 1370 1370 */ 1371 1371 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) 1372 1372 { 1373 - if (i915_modparams.enable_fbc >= 0) 1374 - return !!i915_modparams.enable_fbc; 1373 + if (dev_priv->params.enable_fbc >= 0) 1374 + return !!dev_priv->params.enable_fbc; 1375 1375 1376 1376 if (!HAS_FBC(dev_priv)) 1377 1377 return 0; ··· 1415 1415 if (need_fbc_vtd_wa(dev_priv)) 1416 1416 mkwrite_device_info(dev_priv)->display.has_fbc = false; 1417 1417 1418 - i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1418 + dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1419 1419 drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", 1420 - i915_modparams.enable_fbc); 1420 + dev_priv->params.enable_fbc); 1421 1421 1422 1422 if (!HAS_FBC(dev_priv)) { 1423 1423 fbc->no_fbc_reason = "unsupported by this chipset";
+2 -2
drivers/gpu/drm/i915/display/intel_lvds.c
··· 784 784 struct drm_i915_private *dev_priv = to_i915(dev); 785 785 786 786 /* use the module option value if specified */ 787 - if (i915_modparams.lvds_channel_mode > 0) 788 - return i915_modparams.lvds_channel_mode == 2; 787 + if (dev_priv->params.lvds_channel_mode > 0) 788 + return dev_priv->params.lvds_channel_mode == 2; 789 789 790 790 /* single channel LVDS is limited to 112 MHz */ 791 791 if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
+1 -1
drivers/gpu/drm/i915/display/intel_opregion.c
··· 801 801 { 802 802 struct intel_opregion *opregion = &dev_priv->opregion; 803 803 const struct firmware *fw = NULL; 804 - const char *name = i915_modparams.vbt_firmware; 804 + const char *name = dev_priv->params.vbt_firmware; 805 805 int ret; 806 806 807 807 if (!name || !*name)
+2 -2
drivers/gpu/drm/i915/display/intel_panel.c
··· 521 521 522 522 drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); 523 523 524 - if (i915_modparams.invert_brightness < 0) 524 + if (dev_priv->params.invert_brightness < 0) 525 525 return val; 526 526 527 - if (i915_modparams.invert_brightness > 0 || 527 + if (dev_priv->params.invert_brightness > 0 || 528 528 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 529 529 return panel->backlight.max - val + panel->backlight.min; 530 530 }
+5 -5
drivers/gpu/drm/i915/display/intel_psr.c
··· 83 83 { 84 84 switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 85 85 case I915_PSR_DEBUG_DEFAULT: 86 - return i915_modparams.enable_psr; 86 + return i915->params.enable_psr; 87 87 case I915_PSR_DEBUG_DISABLE: 88 88 return false; 89 89 default: ··· 426 426 if (INTEL_GEN(dev_priv) >= 11) 427 427 val |= EDP_PSR_TP4_TIME_0US; 428 428 429 - if (i915_modparams.psr_safest_params) { 429 + if (dev_priv->params.psr_safest_params) { 430 430 val |= EDP_PSR_TP1_TIME_2500us; 431 431 val |= EDP_PSR_TP2_TP3_TIME_2500us; 432 432 goto check_tp3_sel; ··· 507 507 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 508 508 u32 val = 0; 509 509 510 - if (i915_modparams.psr_safest_params) 510 + if (dev_priv->params.psr_safest_params) 511 511 return EDP_PSR2_TP2_TIME_2500us; 512 512 513 513 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && ··· 1500 1500 */ 1501 1501 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; 1502 1502 1503 - if (i915_modparams.enable_psr == -1) 1503 + if (dev_priv->params.enable_psr == -1) 1504 1504 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) 1505 - i915_modparams.enable_psr = 0; 1505 + dev_priv->params.enable_psr = 0; 1506 1506 1507 1507 /* Set link_standby x link_off defaults */ 1508 1508 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 650 650 * context close. 651 651 */ 652 652 if (!i915_gem_context_is_persistent(ctx) || 653 - !i915_modparams.enable_hangcheck) 653 + !ctx->i915->params.enable_hangcheck) 654 654 kill_context(ctx); 655 655 656 656 i915_gem_context_put(ctx); ··· 667 667 * reset] are allowed to survive past termination. We require 668 668 * hangcheck to ensure that the persistent requests are healthy. 669 669 */ 670 - if (!i915_modparams.enable_hangcheck) 670 + if (!ctx->i915->params.enable_hangcheck) 671 671 return -EINVAL; 672 672 673 673 i915_gem_context_set_persistence(ctx);
+2 -1
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
··· 4 4 * Copyright © 2019 Intel Corporation 5 5 */ 6 6 7 + #include "i915_drv.h" 7 8 #include "i915_request.h" 8 9 9 10 #include "intel_context.h" ··· 134 133 goto unlock; 135 134 136 135 idle_pulse(engine, rq); 137 - if (i915_modparams.enable_hangcheck) 136 + if (engine->i915->params.enable_hangcheck) 138 137 engine->heartbeat.systole = i915_request_get(rq); 139 138 140 139 __i915_request_commit(rq);
+3 -3
drivers/gpu/drm/i915/gt/intel_reset.c
··· 638 638 639 639 bool intel_has_gpu_reset(const struct intel_gt *gt) 640 640 { 641 - if (!i915_modparams.reset) 641 + if (!gt->i915->params.reset) 642 642 return NULL; 643 643 644 644 return intel_get_gpu_reset(gt); ··· 646 646 647 647 bool intel_has_reset_engine(const struct intel_gt *gt) 648 648 { 649 - if (i915_modparams.reset < 2) 649 + if (gt->i915->params.reset < 2) 650 650 return false; 651 651 652 652 return INTEL_INFO(gt->i915)->has_reset_engine; ··· 1038 1038 awake = reset_prepare(gt); 1039 1039 1040 1040 if (!intel_has_gpu_reset(gt)) { 1041 - if (i915_modparams.reset) 1041 + if (gt->i915->params.reset) 1042 1042 drm_err(&gt->i915->drm, "GPU reset not supported\n"); 1043 1043 else 1044 1044 drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
+3 -3
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
··· 365 365 if (intel_gt_is_wedged(&i915->gt)) 366 366 return 0; 367 367 368 - saved_hangcheck = i915_modparams.enable_hangcheck; 369 - i915_modparams.enable_hangcheck = INT_MAX; 368 + saved_hangcheck = i915->params.enable_hangcheck; 369 + i915->params.enable_hangcheck = INT_MAX; 370 370 371 371 err = intel_gt_live_subtests(tests, &i915->gt); 372 372 373 - i915_modparams.enable_hangcheck = saved_hangcheck; 373 + i915->params.enable_hangcheck = saved_hangcheck; 374 374 return err; 375 375 } 376 376
+9 -6
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
··· 424 424 425 425 static u32 __get_default_log_level(struct intel_guc_log *log) 426 426 { 427 + struct intel_guc *guc = log_to_guc(log); 428 + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 429 + 427 430 /* A negative value means "use platform/config default" */ 428 - if (i915_modparams.guc_log_level < 0) { 431 + if (i915->params.guc_log_level < 0) { 429 432 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 430 433 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 431 434 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; 432 435 } 433 436 434 - if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { 437 + if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) { 435 438 DRM_WARN("Incompatible option detected: %s=%d, %s!\n", 436 - "guc_log_level", i915_modparams.guc_log_level, 439 + "guc_log_level", i915->params.guc_log_level, 437 440 "verbosity too high"); 438 441 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 439 442 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 440 443 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; 441 444 } 442 445 443 - GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED); 444 - GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX); 445 - return i915_modparams.guc_log_level; 446 + GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED); 447 + GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX); 448 + return i915->params.guc_log_level; 446 449 } 447 450 448 451 int intel_guc_log_create(struct intel_guc_log *log)
+3 -1
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
··· 660 660 661 661 static bool __guc_submission_selected(struct intel_guc *guc) 662 662 { 663 + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 664 + 663 665 if (!intel_guc_submission_is_supported(guc)) 664 666 return false; 665 667 666 - return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; 668 + return i915->params.enable_guc & ENABLE_GUC_SUBMISSION; 667 669 } 668 670 669 671 void intel_guc_submission_init_early(struct intel_guc *guc)
+10 -10
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 47 47 48 48 drm_dbg(&i915->drm, 49 49 "enable_guc=%d (guc:%s submission:%s huc:%s)\n", 50 - i915_modparams.enable_guc, 50 + i915->params.enable_guc, 51 51 yesno(intel_uc_wants_guc(uc)), 52 52 yesno(intel_uc_wants_guc_submission(uc)), 53 53 yesno(intel_uc_wants_huc(uc))); 54 54 55 - if (i915_modparams.enable_guc == -1) 55 + if (i915->params.enable_guc == -1) 56 56 return; 57 57 58 - if (i915_modparams.enable_guc == 0) { 58 + if (i915->params.enable_guc == 0) { 59 59 GEM_BUG_ON(intel_uc_wants_guc(uc)); 60 60 GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); 61 61 GEM_BUG_ON(intel_uc_wants_huc(uc)); ··· 65 65 if (!intel_uc_supports_guc(uc)) 66 66 drm_info(&i915->drm, 67 67 "Incompatible option enable_guc=%d - %s\n", 68 - i915_modparams.enable_guc, "GuC is not supported!"); 68 + i915->params.enable_guc, "GuC is not supported!"); 69 69 70 - if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC && 70 + if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC && 71 71 !intel_uc_supports_huc(uc)) 72 72 drm_info(&i915->drm, 73 73 "Incompatible option enable_guc=%d - %s\n", 74 - i915_modparams.enable_guc, "HuC is not supported!"); 74 + i915->params.enable_guc, "HuC is not supported!"); 75 75 76 - if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION && 76 + if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && 77 77 !intel_uc_supports_guc_submission(uc)) 78 78 drm_info(&i915->drm, 79 79 "Incompatible option enable_guc=%d - %s\n", 80 - i915_modparams.enable_guc, "GuC submission is N/A"); 80 + i915->params.enable_guc, "GuC submission is N/A"); 81 81 82 - if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION | 82 + if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION | 83 83 ENABLE_GUC_LOAD_HUC)) 84 84 drm_info(&i915->drm, 85 85 "Incompatible option enable_guc=%d - %s\n", 86 - i915_modparams.enable_guc, "undocumented flag"); 86 + i915->params.enable_guc, "undocumented flag"); 87 87 } 88 88 89 89 void intel_uc_init_early(struct intel_uc *uc)
+16 -16
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
··· 115 115 }, 116 116 117 117 static void 118 - __uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev) 118 + __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) 119 119 { 120 120 static const struct uc_fw_platform_requirement fw_blobs[] = { 121 121 INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB) 122 122 }; 123 + enum intel_platform p = INTEL_INFO(i915)->platform; 124 + u8 rev = INTEL_REVID(i915); 123 125 int i; 124 126 125 127 for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) { ··· 156 154 } 157 155 158 156 /* We don't want to enable GuC/HuC on pre-Gen11 by default */ 159 - if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE) 157 + if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE) 160 158 uc_fw->path = NULL; 161 159 } 162 160 163 - static const char *__override_guc_firmware_path(void) 161 + static const char *__override_guc_firmware_path(struct drm_i915_private *i915) 164 162 { 165 - if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION | 166 - ENABLE_GUC_LOAD_HUC)) 167 - return i915_modparams.guc_firmware_path; 163 + if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION | 164 + ENABLE_GUC_LOAD_HUC)) 165 + return i915->params.guc_firmware_path; 168 166 return ""; 169 167 } 170 168 171 - static const char *__override_huc_firmware_path(void) 169 + static const char *__override_huc_firmware_path(struct drm_i915_private *i915) 172 170 { 173 - if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC) 174 - return i915_modparams.huc_firmware_path; 171 + if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC) 172 + return i915->params.huc_firmware_path; 175 173 return ""; 176 174 } 177 175 178 - static void __uc_fw_user_override(struct intel_uc_fw *uc_fw) 176 + static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) 179 177 { 180 178 const char *path = NULL; 181 179 182 180 switch (uc_fw->type) { 183 181 case INTEL_UC_FW_TYPE_GUC: 184 - path = __override_guc_firmware_path(); 182 + path = __override_guc_firmware_path(i915); 185 183 break; 186 184 case INTEL_UC_FW_TYPE_HUC: 187 - path = __override_huc_firmware_path(); 185 + path = __override_huc_firmware_path(i915); 188 186 break; 189 187 } 190 188 ··· 218 216 uc_fw->type = type; 219 217 220 218 if (HAS_GT_UC(i915)) { 221 - __uc_fw_auto_select(uc_fw, 222 - INTEL_INFO(i915)->platform, 223 - INTEL_REVID(i915)); 224 - __uc_fw_user_override(uc_fw); 219 + __uc_fw_auto_select(i915, uc_fw); 220 + __uc_fw_user_override(i915, uc_fw); 225 221 } 226 222 227 223 intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
+1 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 64 64 intel_driver_caps_print(&i915->caps, &p); 65 65 66 66 kernel_param_lock(THIS_MODULE); 67 - i915_params_dump(&i915_modparams, &p); 67 + i915_params_dump(&i915->params, &p); 68 68 kernel_param_unlock(THIS_MODULE); 69 69 70 70 return 0;
+1 -6
drivers/gpu/drm/i915/i915_debugfs_params.c
··· 138 138 char **s = m->private; 139 139 char *new, *old; 140 140 141 - /* FIXME: remove locking after params aren't the module params */ 142 - kernel_param_lock(THIS_MODULE); 143 - 144 141 old = *s; 145 142 new = strndup_user(ubuf, PAGE_SIZE); 146 143 if (IS_ERR(new)) { ··· 149 152 150 153 kfree(old); 151 154 out: 152 - kernel_param_unlock(THIS_MODULE); 153 - 154 155 return len; 155 156 } 156 157 ··· 224 229 struct dentry *i915_debugfs_params(struct drm_i915_private *i915) 225 230 { 226 231 struct drm_minor *minor = i915->drm.primary; 227 - struct i915_params *params = &i915_modparams; 232 + struct i915_params *params = &i915->params; 228 233 struct dentry *dir; 229 234 230 235 dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+7 -2
drivers/gpu/drm/i915/i915_drv.c
··· 500 500 501 501 cpu_latency_qos_remove_request(&dev_priv->sb_qos); 502 502 mutex_destroy(&dev_priv->sb_lock); 503 + 504 + i915_params_free(&dev_priv->params); 503 505 } 504 506 505 507 /** ··· 922 920 i915->drm.pdev = pdev; 923 921 pci_set_drvdata(pdev, i915); 924 922 923 + /* Device parameters start as a copy of module parameters. */ 924 + i915_params_copy(&i915->params, &i915_modparams); 925 + 925 926 /* Setup the write-once "constant" device info */ 926 927 device_info = mkwrite_device_info(i915); 927 928 memcpy(device_info, match_info, sizeof(*device_info)); ··· 969 964 return PTR_ERR(i915); 970 965 971 966 /* Disable nuclear pageflip by default on pre-ILK */ 972 - if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) 967 + if (!i915->params.nuclear_pageflip && match_info->gen < 5) 973 968 i915->drm.driver_features &= ~DRIVER_ATOMIC; 974 969 975 970 /* ··· 979 974 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 980 975 if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) { 981 976 if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 && 982 - i915_modparams.fake_lmem_start) { 977 + i915->params.fake_lmem_start) { 983 978 mkwrite_device_info(i915)->memory_regions = 984 979 REGION_SMEM | REGION_LMEM | REGION_STOLEN; 985 980 mkwrite_device_info(i915)->is_dgfx = true;
+4 -1
drivers/gpu/drm/i915/i915_drv.h
··· 827 827 struct drm_i915_private { 828 828 struct drm_device drm; 829 829 830 + /* i915 device parameters */ 831 + struct i915_params params; 832 + 830 833 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 831 834 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 832 835 struct intel_driver_caps caps; ··· 1691 1688 1692 1689 /* Only valid when HAS_DISPLAY() is true */ 1693 1690 #define INTEL_DISPLAY_ENABLED(dev_priv) \ 1694 - (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display) 1691 + (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display) 1695 1692 1696 1693 static inline bool intel_vtd_active(void) 1697 1694 {
+1 -1
drivers/gpu/drm/i915/i915_getparam.c
··· 80 80 return -ENODEV; 81 81 break; 82 82 case I915_PARAM_HAS_GPU_RESET: 83 - value = i915_modparams.enable_hangcheck && 83 + value = i915->params.enable_hangcheck && 84 84 intel_has_gpu_reset(&i915->gt); 85 85 if (value && intel_has_reset_engine(&i915->gt)) 86 86 value = 2;
+2 -2
drivers/gpu/drm/i915/i915_gpu_error.c
··· 1698 1698 error->reset_count = i915_reset_count(&i915->gpu_error); 1699 1699 error->suspend_count = i915->suspend_count; 1700 1700 1701 - i915_params_copy(&error->params, &i915_modparams); 1701 + i915_params_copy(&error->params, &i915->params); 1702 1702 memcpy(&error->device_info, 1703 1703 INTEL_INFO(i915), 1704 1704 sizeof(error->device_info)); ··· 1713 1713 { 1714 1714 struct i915_gpu_coredump *error; 1715 1715 1716 - if (!i915_modparams.error_capture) 1716 + if (!i915->params.error_capture) 1717 1717 return NULL; 1718 1718 1719 1719 error = kzalloc(sizeof(*error), gfp);
+4 -4
drivers/gpu/drm/i915/intel_gvt.c
··· 66 66 */ 67 67 void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) 68 68 { 69 - if (!i915_modparams.enable_gvt) 69 + if (!dev_priv->params.enable_gvt) 70 70 return; 71 71 72 72 if (intel_vgpu_active(dev_priv)) { ··· 82 82 83 83 return; 84 84 bail: 85 - i915_modparams.enable_gvt = 0; 85 + dev_priv->params.enable_gvt = 0; 86 86 } 87 87 88 88 /** ··· 102 102 if (i915_inject_probe_failure(dev_priv)) 103 103 return -ENODEV; 104 104 105 - if (!i915_modparams.enable_gvt) { 105 + if (!dev_priv->params.enable_gvt) { 106 106 drm_dbg(&dev_priv->drm, 107 107 "GVT-g is disabled by kernel params\n"); 108 108 return 0; ··· 123 123 return 0; 124 124 125 125 bail: 126 - i915_modparams.enable_gvt = 0; 126 + dev_priv->params.enable_gvt = 0; 127 127 return 0; 128 128 } 129 129
+3 -3
drivers/gpu/drm/i915/intel_region_lmem.c
··· 76 76 { 77 77 int ret; 78 78 79 - if (i915_modparams.fake_lmem_start) { 79 + if (mem->i915->params.fake_lmem_start) { 80 80 ret = init_fake_lmem_bar(mem); 81 81 GEM_BUG_ON(ret); 82 82 } ··· 111 111 resource_size_t start; 112 112 113 113 GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt)); 114 - GEM_BUG_ON(!i915_modparams.fake_lmem_start); 114 + GEM_BUG_ON(!i915->params.fake_lmem_start); 115 115 116 116 /* Your mappable aperture belongs to me now! */ 117 117 mappable_end = pci_resource_len(pdev, 2); 118 118 io_start = pci_resource_start(pdev, 2), 119 - start = i915_modparams.fake_lmem_start; 119 + start = i915->params.fake_lmem_start; 120 120 121 121 mem = intel_memory_region_create(i915, 122 122 start,
+4 -4
drivers/gpu/drm/i915/intel_uncore.c
··· 1185 1185 read ? "read from" : "write to", 1186 1186 i915_mmio_reg_offset(reg))) 1187 1187 /* Only report the first N failures */ 1188 - i915_modparams.mmio_debug--; 1188 + uncore->i915->params.mmio_debug--; 1189 1189 } 1190 1190 1191 1191 static inline void ··· 1194 1194 const bool read, 1195 1195 const bool before) 1196 1196 { 1197 - if (likely(!i915_modparams.mmio_debug)) 1197 + if (likely(!uncore->i915->params.mmio_debug)) 1198 1198 return; 1199 1199 1200 1200 /* interrupts are disabled and re-enabled around uncore->lock usage */ ··· 2093 2093 goto out; 2094 2094 2095 2095 if (unlikely(check_for_unclaimed_mmio(uncore))) { 2096 - if (!i915_modparams.mmio_debug) { 2096 + if (!uncore->i915->params.mmio_debug) { 2097 2097 drm_dbg(&uncore->i915->drm, 2098 2098 "Unclaimed register detected, " 2099 2099 "enabling oneshot unclaimed register reporting. " 2100 2100 "Please use i915.mmio_debug=N for more information.\n"); 2101 - i915_modparams.mmio_debug++; 2101 + uncore->i915->params.mmio_debug++; 2102 2102 } 2103 2103 uncore->debug->unclaimed_mmio_check--; 2104 2104 ret = true;