Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: use direct alias for i915 in requests

i915_request contains direct alias to i915, there is no point to go via
rq->engine->i915.

v2: added missing rq.i915 initialization in measure_breadcrumb_dw.

Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Acked-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230720113002.1541572-1-andrzej.hajda@intel.com

+43 -42
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 2230 2230 u32 *cs; 2231 2231 int i; 2232 2232 2233 - if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) { 2234 - drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n"); 2233 + if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) { 2234 + drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); 2235 2235 return -EINVAL; 2236 2236 } 2237 2237
+1 -1
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
··· 76 76 cmd = MI_FLUSH; 77 77 if (mode & EMIT_INVALIDATE) { 78 78 cmd |= MI_EXE_FLUSH; 79 - if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5) 79 + if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5) 80 80 cmd |= MI_INVALIDATE_ISP; 81 81 } 82 82
+9 -9
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
··· 39 39 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 40 40 * pipe control. 41 41 */ 42 - if (GRAPHICS_VER(rq->engine->i915) == 9) 42 + if (GRAPHICS_VER(rq->i915) == 9) 43 43 vf_flush_wa = true; 44 44 45 45 /* WaForGAMHang:kbl */ 46 - if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0)) 46 + if (IS_KBL_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) 47 47 dc_flush_wa = true; 48 48 } 49 49 ··· 180 180 static int mtl_dummy_pipe_control(struct i915_request *rq) 181 181 { 182 182 /* Wa_14016712196 */ 183 - if (IS_MTL_GRAPHICS_STEP(rq->engine->i915, M, STEP_A0, STEP_B0) || 184 - IS_MTL_GRAPHICS_STEP(rq->engine->i915, P, STEP_A0, STEP_B0)) { 183 + if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) || 184 + IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) { 185 185 u32 *cs; 186 186 187 187 /* dummy PIPE_CONTROL + depth flush */ ··· 267 267 else if (engine->class == COMPUTE_CLASS) 268 268 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; 269 269 270 - if (!HAS_FLAT_CCS(rq->engine->i915)) 270 + if (!HAS_FLAT_CCS(rq->i915)) 271 271 count = 8 + 4; 272 272 else 273 273 count = 8; ··· 285 285 286 286 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 287 287 288 - if (!HAS_FLAT_CCS(rq->engine->i915)) { 288 + if (!HAS_FLAT_CCS(rq->i915)) { 289 289 /* hsdes: 1809175790 */ 290 290 cs = gen12_emit_aux_table_inv(rq->engine->gt, 291 291 cs, GEN12_GFX_CCS_AUX_NV); ··· 307 307 if (mode & EMIT_INVALIDATE) { 308 308 cmd += 2; 309 309 310 - if (!HAS_FLAT_CCS(rq->engine->i915) && 310 + if (!HAS_FLAT_CCS(rq->i915) && 311 311 (rq->engine->class == VIDEO_DECODE_CLASS || 312 312 rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { 313 313 aux_inv = rq->engine->mask & ··· 754 754 755 755 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) 756 756 { 757 - struct drm_i915_private *i915 = rq->engine->i915; 757 + struct drm_i915_private *i915 = rq->i915; 758 758 u32 flags = (PIPE_CONTROL_CS_STALL | 759 759 PIPE_CONTROL_TLB_INVALIDATE | 760 760 PIPE_CONTROL_TILE_CACHE_FLUSH | ··· 775 775 /* Wa_1409600907 */ 776 776 flags |= PIPE_CONTROL_DEPTH_STALL; 777 777 778 - if (!HAS_3D_PIPELINE(rq->engine->i915)) 778 + if (!HAS_3D_PIPELINE(rq->i915)) 779 779 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; 780 780 else if (rq->engine->class == COMPUTE_CLASS) 781 781 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+1
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 1333 1333 if (!frame) 1334 1334 return -ENOMEM; 1335 1335 1336 + frame->rq.i915 = engine->i915; 1336 1337 frame->rq.engine = engine; 1337 1338 frame->rq.context = ce; 1338 1339 rcu_assign_pointer(frame->rq.timeline, ce->timeline);
+1 -1
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
··· 2717 2717 int err, i; 2718 2718 u32 *cs; 2719 2719 2720 - GEM_BUG_ON(intel_vgpu_active(rq->engine->i915)); 2720 + GEM_BUG_ON(intel_vgpu_active(rq->i915)); 2721 2721 2722 2722 /* 2723 2723 * Beware ye of the dragons, this sequence is magic!
+5 -5
drivers/gpu/drm/i915/gt/intel_migrate.c
··· 366 366 u64 offset, 367 367 int length) 368 368 { 369 - bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915); 369 + bool has_64K_pages = HAS_64K_PAGES(rq->i915); 370 370 const u64 encode = rq->context->vm->pte_encode(0, pat_index, 371 371 is_lmem ? PTE_LM : 0); 372 372 struct intel_ring *ring = rq->ring; ··· 375 375 u32 page_size; 376 376 u32 *hdr, *cs; 377 377 378 - GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); 378 + GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8); 379 379 380 380 page_size = I915_GTT_PAGE_SIZE; 381 381 dword_length = 0x400; ··· 531 531 u32 dst_offset, u8 dst_access, 532 532 u32 src_offset, u8 src_access, int size) 533 533 { 534 - struct drm_i915_private *i915 = rq->engine->i915; 534 + struct drm_i915_private *i915 = rq->i915; 535 535 int mocs = rq->engine->gt->mocs.uc_index << 1; 536 536 u32 num_ccs_blks; 537 537 u32 *cs; ··· 581 581 static int emit_copy(struct i915_request *rq, 582 582 u32 dst_offset, u32 src_offset, int size) 583 583 { 584 - const int ver = GRAPHICS_VER(rq->engine->i915); 584 + const int ver = GRAPHICS_VER(rq->i915); 585 585 u32 instance = rq->engine->instance; 586 586 u32 *cs; 587 587 ··· 917 917 static int emit_clear(struct i915_request *rq, u32 offset, int size, 918 918 u32 value, bool is_lmem) 919 919 { 920 - struct drm_i915_private *i915 = rq->engine->i915; 920 + struct drm_i915_private *i915 = rq->i915; 921 921 int mocs = rq->engine->gt->mocs.uc_index << 1; 922 922 const int ver = GRAPHICS_VER(i915); 923 923 int ring_sz;
+1 -1
drivers/gpu/drm/i915/gt/intel_ring_submission.c
··· 805 805 static int remap_l3_slice(struct i915_request *rq, int slice) 806 806 { 807 807 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32)) 808 - u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice]; 808 + u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 809 809 int i; 810 810 811 811 if (!remap_info)
+2 -2
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 3249 3249 const struct i915_wa_list *wal, 3250 3250 struct i915_vma *vma) 3251 3251 { 3252 - struct drm_i915_private *i915 = rq->engine->i915; 3252 + struct drm_i915_private *i915 = rq->i915; 3253 3253 unsigned int i, count = 0; 3254 3254 const struct i915_wa *wa; 3255 3255 u32 srm, *cs; ··· 3348 3348 3349 3349 err = 0; 3350 3350 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 3351 - if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg))) 3351 + if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg))) 3352 3352 continue; 3353 3353 3354 3354 if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
+1 -1
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
··· 62 62 return PTR_ERR(cs); 63 63 64 64 cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; 65 - if (GRAPHICS_VER(rq->engine->i915) >= 8) 65 + if (GRAPHICS_VER(rq->i915) >= 8) 66 66 cmd++; 67 67 *cs++ = cmd; 68 68 *cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine));
+1 -1
drivers/gpu/drm/i915/gt/selftest_mocs.c
··· 137 137 if (!table) 138 138 return 0; 139 139 140 - if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915)) 140 + if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915)) 141 141 addr = global_mocs_offset() + gt->uncore->gsi_offset; 142 142 else 143 143 addr = mocs_offset(rq->engine);
+1 -1
drivers/gpu/drm/i915/gt/selftest_rc6.c
··· 140 140 } 141 141 142 142 cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; 143 - if (GRAPHICS_VER(rq->engine->i915) >= 8) 143 + if (GRAPHICS_VER(rq->i915) >= 8) 144 144 cmd++; 145 145 146 146 *cs++ = cmd;
+2 -2
drivers/gpu/drm/i915/gt/selftest_timeline.c
··· 459 459 if (IS_ERR(cs)) 460 460 return PTR_ERR(cs); 461 461 462 - if (GRAPHICS_VER(rq->engine->i915) >= 8) { 462 + if (GRAPHICS_VER(rq->i915) >= 8) { 463 463 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 464 464 *cs++ = addr; 465 465 *cs++ = 0; 466 466 *cs++ = value; 467 - } else if (GRAPHICS_VER(rq->engine->i915) >= 4) { 467 + } else if (GRAPHICS_VER(rq->i915) >= 4) { 468 468 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 469 469 *cs++ = 0; 470 470 *cs++ = addr;
+1 -1
drivers/gpu/drm/i915/gvt/scheduler.c
··· 974 974 context_page_num = rq->engine->context_size; 975 975 context_page_num = context_page_num >> PAGE_SHIFT; 976 976 977 - if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0) 977 + if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0) 978 978 context_page_num = 19; 979 979 980 980 context_base = (void *) ctx->lrc_reg_state -
+1 -1
drivers/gpu/drm/i915/i915_perf.c
··· 1319 1319 u32 *cs, cmd; 1320 1320 1321 1321 cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1322 - if (GRAPHICS_VER(rq->engine->i915) >= 8) 1322 + if (GRAPHICS_VER(rq->i915) >= 8) 1323 1323 cmd++; 1324 1324 1325 1325 cs = intel_ring_begin(rq, 4);
+1 -1
drivers/gpu/drm/i915/i915_request.c
··· 1353 1353 { 1354 1354 mark_external(rq); 1355 1355 return i915_sw_fence_await_dma_fence(&rq->submit, fence, 1356 - i915_fence_context_timeout(rq->engine->i915, 1356 + i915_fence_context_timeout(rq->i915, 1357 1357 fence->context), 1358 1358 I915_FENCE_GFP); 1359 1359 }
+5 -5
drivers/gpu/drm/i915/i915_trace.h
··· 277 277 ), 278 278 279 279 TP_fast_assign( 280 - __entry->dev = rq->engine->i915->drm.primary->index; 280 + __entry->dev = rq->i915->drm.primary->index; 281 281 __entry->class = rq->engine->uabi_class; 282 282 __entry->instance = rq->engine->uabi_instance; 283 283 __entry->ctx = rq->fence.context; ··· 304 304 ), 305 305 306 306 TP_fast_assign( 307 - __entry->dev = rq->engine->i915->drm.primary->index; 307 + __entry->dev = rq->i915->drm.primary->index; 308 308 __entry->class = rq->engine->uabi_class; 309 309 __entry->instance = rq->engine->uabi_instance; 310 310 __entry->ctx = rq->fence.context; ··· 353 353 ), 354 354 355 355 TP_fast_assign( 356 - __entry->dev = rq->engine->i915->drm.primary->index; 356 + __entry->dev = rq->i915->drm.primary->index; 357 357 __entry->class = rq->engine->uabi_class; 358 358 __entry->instance = rq->engine->uabi_instance; 359 359 __entry->ctx = rq->fence.context; ··· 382 382 ), 383 383 384 384 TP_fast_assign( 385 - __entry->dev = rq->engine->i915->drm.primary->index; 385 + __entry->dev = rq->i915->drm.primary->index; 386 386 __entry->class = rq->engine->uabi_class; 387 387 __entry->instance = rq->engine->uabi_instance; 388 388 __entry->ctx = rq->fence.context; ··· 623 623 * less desirable. 624 624 */ 625 625 TP_fast_assign( 626 - __entry->dev = rq->engine->i915->drm.primary->index; 626 + __entry->dev = rq->i915->drm.primary->index; 627 627 __entry->class = rq->engine->uabi_class; 628 628 __entry->instance = rq->engine->uabi_instance; 629 629 __entry->ctx = rq->fence.context;
+1 -1
drivers/gpu/drm/i915/selftests/i915_perf.c
··· 168 168 return PTR_ERR(cs); 169 169 170 170 len = 5; 171 - if (GRAPHICS_VER(rq->engine->i915) >= 8) 171 + if (GRAPHICS_VER(rq->i915) >= 8) 172 172 len++; 173 173 174 174 *cs++ = GFX_OP_PIPE_CONTROL(len);
+7 -7
drivers/gpu/drm/i915/selftests/igt_spinner.c
··· 159 159 160 160 batch = spin->batch; 161 161 162 - if (GRAPHICS_VER(rq->engine->i915) >= 8) { 162 + if (GRAPHICS_VER(rq->i915) >= 8) { 163 163 *batch++ = MI_STORE_DWORD_IMM_GEN4; 164 164 *batch++ = lower_32_bits(hws_address(hws, rq)); 165 165 *batch++ = upper_32_bits(hws_address(hws, rq)); 166 - } else if (GRAPHICS_VER(rq->engine->i915) >= 6) { 166 + } else if (GRAPHICS_VER(rq->i915) >= 6) { 167 167 *batch++ = MI_STORE_DWORD_IMM_GEN4; 168 168 *batch++ = 0; 169 169 *batch++ = hws_address(hws, rq); 170 - } else if (GRAPHICS_VER(rq->engine->i915) >= 4) { 170 + } else if (GRAPHICS_VER(rq->i915) >= 4) { 171 171 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 172 172 *batch++ = 0; 173 173 *batch++ = hws_address(hws, rq); ··· 179 179 180 180 *batch++ = arbitration_command; 181 181 182 - if (GRAPHICS_VER(rq->engine->i915) >= 8) 182 + if (GRAPHICS_VER(rq->i915) >= 8) 183 183 *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1; 184 - else if (IS_HASWELL(rq->engine->i915)) 184 + else if (IS_HASWELL(rq->i915)) 185 185 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW; 186 - else if (GRAPHICS_VER(rq->engine->i915) >= 6) 186 + else if (GRAPHICS_VER(rq->i915) >= 6) 187 187 *batch++ = MI_BATCH_BUFFER_START; 188 188 else 189 189 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; ··· 201 201 } 202 202 203 203 flags = 0; 204 - if (GRAPHICS_VER(rq->engine->i915) <= 5) 204 + if (GRAPHICS_VER(rq->i915) <= 5) 205 205 flags |= I915_DISPATCH_SECURE; 206 206 err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags); 207 207