Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/gt: Use to_gt() helper

Use to_gt() helper consistently throughout the codebase.
Pure mechanical s/i915->gt/to_gt(i915). No functional changes.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211214193346.21231-5-andi.shyti@linux.intel.com

authored by

Michał Winiarski and committed by
Matt Roper
c14adcbd 62e94f92

+46 -46
+1 -1
drivers/gpu/drm/i915/gt/intel_engine_user.c
··· 116 116 disabled |= (I915_SCHEDULER_CAP_ENABLED | 117 117 I915_SCHEDULER_CAP_PRIORITY); 118 118 119 - if (intel_uc_uses_guc_submission(&i915->gt.uc)) 119 + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) 120 120 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP; 121 121 122 122 for (i = 0; i < ARRAY_SIZE(map); i++) {
+1 -1
drivers/gpu/drm/i915/gt/intel_ggtt.c
··· 1215 1215 { 1216 1216 int ret; 1217 1217 1218 - ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); 1218 + ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915)); 1219 1219 if (ret) 1220 1220 return ret; 1221 1221
+6 -6
drivers/gpu/drm/i915/gt/intel_rps.c
··· 2302 2302 return 0; 2303 2303 2304 2304 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 2305 - struct intel_ips *ips = &i915->gt.rps.ips; 2305 + struct intel_ips *ips = &to_gt(i915)->rps.ips; 2306 2306 2307 2307 spin_lock_irq(&mchdev_lock); 2308 2308 chipset_val = __ips_chipset_val(ips); ··· 2329 2329 if (!i915) 2330 2330 return false; 2331 2331 2332 - rps = &i915->gt.rps; 2332 + rps = &to_gt(i915)->rps; 2333 2333 2334 2334 spin_lock_irq(&mchdev_lock); 2335 2335 if (rps->max_freq_softlimit < rps->max_freq) ··· 2356 2356 if (!i915) 2357 2357 return false; 2358 2358 2359 - rps = &i915->gt.rps; 2359 + rps = &to_gt(i915)->rps; 2360 2360 2361 2361 spin_lock_irq(&mchdev_lock); 2362 2362 if (rps->max_freq_softlimit > rps->min_freq) ··· 2382 2382 if (!i915) 2383 2383 return false; 2384 2384 2385 - ret = i915->gt.awake; 2385 + ret = to_gt(i915)->awake; 2386 2386 2387 2387 drm_dev_put(&i915->drm); 2388 2388 return ret; ··· 2405 2405 if (!i915) 2406 2406 return false; 2407 2407 2408 - rps = &i915->gt.rps; 2408 + rps = &to_gt(i915)->rps; 2409 2409 2410 2410 spin_lock_irq(&mchdev_lock); 2411 2411 rps->max_freq_softlimit = rps->min_freq; 2412 - ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq); 2412 + ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq); 2413 2413 spin_unlock_irq(&mchdev_lock); 2414 2414 2415 2415 drm_dev_put(&i915->drm);
+1 -1
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 929 929 static void 930 930 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) 931 931 { 932 - const struct sseu_dev_info *sseu = &i915->gt.info.sseu; 932 + const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu; 933 933 unsigned int slice, subslice; 934 934 u32 mcr, mcr_mask; 935 935
+5 -5
drivers/gpu/drm/i915/gt/mock_engine.c
··· 345 345 struct mock_engine *engine; 346 346 347 347 GEM_BUG_ON(id >= I915_NUM_ENGINES); 348 - GEM_BUG_ON(!i915->gt.uncore); 348 + GEM_BUG_ON(!to_gt(i915)->uncore); 349 349 350 350 engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL); 351 351 if (!engine) ··· 353 353 354 354 /* minimal engine setup for requests */ 355 355 engine->base.i915 = i915; 356 - engine->base.gt = &i915->gt; 357 - engine->base.uncore = i915->gt.uncore; 356 + engine->base.gt = to_gt(i915); 357 + engine->base.uncore = to_gt(i915)->uncore; 358 358 snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); 359 359 engine->base.id = id; 360 360 engine->base.mask = BIT(id); ··· 377 377 378 378 engine->base.release = mock_engine_release; 379 379 380 - i915->gt.engine[id] = &engine->base; 381 - i915->gt.engine_class[0][id] = &engine->base; 380 + to_gt(i915)->engine[id] = &engine->base; 381 + to_gt(i915)->engine_class[0][id] = &engine->base; 382 382 383 383 /* fake hw queue */ 384 384 spin_lock_init(&engine->hw_lock);
+1 -1
drivers/gpu/drm/i915/gt/selftest_context.c
··· 442 442 SUBTEST(live_active_context), 443 443 SUBTEST(live_remote_context), 444 444 }; 445 - struct intel_gt *gt = &i915->gt; 445 + struct intel_gt *gt = to_gt(i915); 446 446 447 447 if (intel_gt_is_wedged(gt)) 448 448 return 0;
+1 -1
drivers/gpu/drm/i915/gt/selftest_engine.c
··· 12 12 live_engine_pm_selftests, 13 13 NULL, 14 14 }; 15 - struct intel_gt *gt = &i915->gt; 15 + struct intel_gt *gt = to_gt(i915); 16 16 typeof(*tests) *fn; 17 17 18 18 for (fn = tests; *fn; fn++) {
+2 -2
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
··· 361 361 SUBTEST(perf_mi_noop), 362 362 }; 363 363 364 - if (intel_gt_is_wedged(&i915->gt)) 364 + if (intel_gt_is_wedged(to_gt(i915))) 365 365 return 0; 366 366 367 - return intel_gt_live_subtests(tests, &i915->gt); 367 + return intel_gt_live_subtests(tests, to_gt(i915)); 368 368 } 369 369 370 370 static int intel_mmio_bases_check(void *arg)
+2 -2
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
··· 378 378 int saved_hangcheck; 379 379 int err; 380 380 381 - if (intel_gt_is_wedged(&i915->gt)) 381 + if (intel_gt_is_wedged(to_gt(i915))) 382 382 return 0; 383 383 384 384 saved_hangcheck = i915->params.enable_hangcheck; 385 385 i915->params.enable_hangcheck = INT_MAX; 386 386 387 - err = intel_gt_live_subtests(tests, &i915->gt); 387 + err = intel_gt_live_subtests(tests, to_gt(i915)); 388 388 389 389 i915->params.enable_hangcheck = saved_hangcheck; 390 390 return err;
+3 -3
drivers/gpu/drm/i915/gt/selftest_execlists.c
··· 4502 4502 SUBTEST(live_virtual_reset), 4503 4503 }; 4504 4504 4505 - if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP) 4505 + if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP) 4506 4506 return 0; 4507 4507 4508 - if (intel_gt_is_wedged(&i915->gt)) 4508 + if (intel_gt_is_wedged(to_gt(i915))) 4509 4509 return 0; 4510 4510 4511 - return intel_gt_live_subtests(tests, &i915->gt); 4511 + return intel_gt_live_subtests(tests, to_gt(i915)); 4512 4512 }
+4 -4
drivers/gpu/drm/i915/gt/selftest_gt_pm.c
··· 193 193 SUBTEST(live_gt_resume), 194 194 }; 195 195 196 - if (intel_gt_is_wedged(&i915->gt)) 196 + if (intel_gt_is_wedged(to_gt(i915))) 197 197 return 0; 198 198 199 - return intel_gt_live_subtests(tests, &i915->gt); 199 + return intel_gt_live_subtests(tests, to_gt(i915)); 200 200 } 201 201 202 202 int intel_gt_pm_late_selftests(struct drm_i915_private *i915) ··· 210 210 SUBTEST(live_rc6_ctx_wa), 211 211 }; 212 212 213 - if (intel_gt_is_wedged(&i915->gt)) 213 + if (intel_gt_is_wedged(to_gt(i915))) 214 214 return 0; 215 215 216 - return intel_gt_live_subtests(tests, &i915->gt); 216 + return intel_gt_live_subtests(tests, to_gt(i915)); 217 217 }
+1 -1
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
··· 2018 2018 SUBTEST(igt_reset_evict_fence), 2019 2019 SUBTEST(igt_handle_error), 2020 2020 }; 2021 - struct intel_gt *gt = &i915->gt; 2021 + struct intel_gt *gt = to_gt(i915); 2022 2022 intel_wakeref_t wakeref; 2023 2023 int err; 2024 2024
+1 -1
drivers/gpu/drm/i915/gt/selftest_lrc.c
··· 1847 1847 if (!HAS_LOGICAL_RING_CONTEXTS(i915)) 1848 1848 return 0; 1849 1849 1850 - return intel_gt_live_subtests(tests, &i915->gt); 1850 + return intel_gt_live_subtests(tests, to_gt(i915)); 1851 1851 }
+2 -2
drivers/gpu/drm/i915/gt/selftest_migrate.c
··· 442 442 SUBTEST(thread_global_copy), 443 443 SUBTEST(thread_global_clear), 444 444 }; 445 - struct intel_gt *gt = &i915->gt; 445 + struct intel_gt *gt = to_gt(i915); 446 446 447 447 if (!gt->migrate.context) 448 448 return 0; ··· 658 658 SUBTEST(perf_clear_blt), 659 659 SUBTEST(perf_copy_blt), 660 660 }; 661 - struct intel_gt *gt = &i915->gt; 661 + struct intel_gt *gt = to_gt(i915); 662 662 663 663 if (intel_gt_is_wedged(gt)) 664 664 return 0;
+1 -1
drivers/gpu/drm/i915/gt/selftest_mocs.c
··· 451 451 if (!get_mocs_settings(i915, &table)) 452 452 return 0; 453 453 454 - return intel_gt_live_subtests(tests, &i915->gt); 454 + return intel_gt_live_subtests(tests, to_gt(i915)); 455 455 }
+1 -1
drivers/gpu/drm/i915/gt/selftest_reset.c
··· 376 376 SUBTEST(igt_atomic_reset), 377 377 SUBTEST(igt_atomic_engine_reset), 378 378 }; 379 - struct intel_gt *gt = &i915->gt; 379 + struct intel_gt *gt = to_gt(i915); 380 380 381 381 if (!intel_has_gpu_reset(gt)) 382 382 return 0;
+2 -2
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
··· 291 291 SUBTEST(live_ctx_switch_wa), 292 292 }; 293 293 294 - if (i915->gt.submission_method > INTEL_SUBMISSION_RING) 294 + if (to_gt(i915)->submission_method > INTEL_SUBMISSION_RING) 295 295 return 0; 296 296 297 - return intel_gt_live_subtests(tests, &i915->gt); 297 + return intel_gt_live_subtests(tests, to_gt(i915)); 298 298 }
+3 -3
drivers/gpu/drm/i915/gt/selftest_slpc.c
··· 39 39 static int live_slpc_clamp_min(void *arg) 40 40 { 41 41 struct drm_i915_private *i915 = arg; 42 - struct intel_gt *gt = &i915->gt; 42 + struct intel_gt *gt = to_gt(i915); 43 43 struct intel_guc_slpc *slpc = &gt->uc.guc.slpc; 44 44 struct intel_rps *rps = &gt->rps; 45 45 struct intel_engine_cs *engine; ··· 166 166 static int live_slpc_clamp_max(void *arg) 167 167 { 168 168 struct drm_i915_private *i915 = arg; 169 - struct intel_gt *gt = &i915->gt; 169 + struct intel_gt *gt = to_gt(i915); 170 170 struct intel_guc_slpc *slpc; 171 171 struct intel_rps *rps; 172 172 struct intel_engine_cs *engine; ··· 304 304 SUBTEST(live_slpc_clamp_min), 305 305 }; 306 306 307 - if (intel_gt_is_wedged(&i915->gt)) 307 + if (intel_gt_is_wedged(to_gt(i915))) 308 308 return 0; 309 309 310 310 return i915_live_subtests(tests, i915);
+3 -3
drivers/gpu/drm/i915/gt/selftest_timeline.c
··· 159 159 INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); 160 160 state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); 161 161 162 - state.gt = &i915->gt; 162 + state.gt = to_gt(i915); 163 163 164 164 /* 165 165 * Create a bunch of timelines and check that their HWSP do not overlap. ··· 1416 1416 SUBTEST(live_hwsp_rollover_user), 1417 1417 }; 1418 1418 1419 - if (intel_gt_is_wedged(&i915->gt)) 1419 + if (intel_gt_is_wedged(to_gt(i915))) 1420 1420 return 0; 1421 1421 1422 - return intel_gt_live_subtests(tests, &i915->gt); 1422 + return intel_gt_live_subtests(tests, to_gt(i915)); 1423 1423 }
+2 -2
drivers/gpu/drm/i915/gt/selftest_workarounds.c
··· 1387 1387 SUBTEST(live_engine_reset_workarounds), 1388 1388 }; 1389 1389 1390 - if (intel_gt_is_wedged(&i915->gt)) 1390 + if (intel_gt_is_wedged(to_gt(i915))) 1391 1391 return 0; 1392 1392 1393 - return intel_gt_live_subtests(tests, &i915->gt); 1393 + return intel_gt_live_subtests(tests, to_gt(i915)); 1394 1394 }
+1 -1
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
··· 623 623 if (unlikely(ret < 0)) 624 624 return ret; 625 625 626 - intel_guc_pm_intrmsk_enable(&i915->gt); 626 + intel_guc_pm_intrmsk_enable(to_gt(i915)); 627 627 628 628 slpc_get_rp_values(slpc); 629 629
+1 -1
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
··· 288 288 SUBTEST(intel_guc_scrub_ctbs), 289 289 SUBTEST(intel_guc_steal_guc_ids), 290 290 }; 291 - struct intel_gt *gt = &i915->gt; 291 + struct intel_gt *gt = to_gt(i915); 292 292 293 293 if (intel_gt_is_wedged(gt)) 294 294 return 0;
+1 -1
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
··· 167 167 static const struct i915_subtest tests[] = { 168 168 SUBTEST(intel_guc_multi_lrc_basic), 169 169 }; 170 - struct intel_gt *gt = &i915->gt; 170 + struct intel_gt *gt = to_gt(i915); 171 171 172 172 if (intel_gt_is_wedged(gt)) 173 173 return 0;