Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-gt-next-2023-08-11' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Cross-subsystem Changes:

- Backmerge of drm-next

Driver Changes:

- Apply workaround 22016122933 correctly (Jonathan, Matt R)

- Simplify shmem_create_from_object map_type selection (Jonathan,
Tvrtko)
- Make i915_coherent_map_type GT-centric (Jonathan, Matt R)

- Selftest improvements (John)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZNYR3bKFquGc7u9w@jlahtine-mobl.ger.corp.intel.com

+75 -69
+2 -1
drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
··· 6 6 #include <drm/i915_hdcp_interface.h> 7 7 8 8 #include "gem/i915_gem_region.h" 9 + #include "gt/intel_gt.h" 9 10 #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" 10 11 #include "i915_drv.h" 11 12 #include "i915_utils.h" ··· 633 632 return PTR_ERR(obj); 634 633 } 635 634 636 - cmd_in = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); 635 + cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true)); 637 636 if (IS_ERR(cmd_in)) { 638 637 drm_err(&i915->drm, "Failed to map gsc message page!\n"); 639 638 err = PTR_ERR(cmd_in);
-4
drivers/gpu/drm/i915/gem/i915_gem_object.h
··· 716 716 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, 717 717 enum i915_map_type type); 718 718 719 - enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915, 720 - struct drm_i915_gem_object *obj, 721 - bool always_coherent); 722 - 723 719 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 724 720 unsigned long offset, 725 721 unsigned long size);
-15
drivers/gpu/drm/i915/gem/i915_gem_pages.c
··· 468 468 return ret; 469 469 } 470 470 471 - enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915, 472 - struct drm_i915_gem_object *obj, 473 - bool always_coherent) 474 - { 475 - /* 476 - * Wa_22016122933: always return I915_MAP_WC for MTL 477 - */ 478 - if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915)) 479 - return I915_MAP_WC; 480 - if (HAS_LLC(i915) || always_coherent) 481 - return I915_MAP_WB; 482 - else 483 - return I915_MAP_WC; 484 - } 485 - 486 471 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 487 472 unsigned long offset, 488 473 unsigned long size)
+6 -6
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
··· 13 13 #include "selftests/igt_spinner.h" 14 14 15 15 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj, 16 + struct intel_gt *gt, 16 17 bool fill) 17 18 { 18 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 19 19 unsigned int i, count = obj->base.size / sizeof(u32); 20 20 enum i915_map_type map_type = 21 - i915_coherent_map_type(i915, obj, false); 21 + intel_gt_coherent_map_type(gt, obj, false); 22 22 u32 *cur; 23 23 int err = 0; 24 24 ··· 66 66 if (err) 67 67 continue; 68 68 69 - err = igt_fill_check_buffer(obj, true); 69 + err = igt_fill_check_buffer(obj, gt, true); 70 70 if (err) 71 71 continue; 72 72 ··· 86 86 if (err) 87 87 continue; 88 88 89 - err = igt_fill_check_buffer(obj, false); 89 + err = igt_fill_check_buffer(obj, gt, false); 90 90 } 91 91 i915_gem_object_put(obj); 92 92 ··· 233 233 continue; 234 234 235 235 if (!vma) { 236 - err = igt_fill_check_buffer(obj, true); 236 + err = igt_fill_check_buffer(obj, gt, true); 237 237 if (err) 238 238 continue; 239 239 } ··· 276 276 if (err) 277 277 goto out_unlock; 278 278 } else { 279 - err = igt_fill_check_buffer(obj, false); 279 + err = igt_fill_check_buffer(obj, gt, false); 280 280 } 281 281 282 282 out_unlock:
+1 -1
drivers/gpu/drm/i915/gt/intel_engine_pm.c
··· 39 39 40 40 if (ce->state) { 41 41 struct drm_i915_gem_object *obj = ce->state->obj; 42 - int type = i915_coherent_map_type(ce->engine->i915, obj, true); 42 + int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true); 43 43 void *map; 44 44 45 45 if (!i915_gem_object_trylock(obj, NULL))
+16
drivers/gpu/drm/i915/gt/intel_gt.c
··· 1003 1003 1004 1004 intel_sseu_dump(&info->sseu, p); 1005 1005 } 1006 + 1007 + enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, 1008 + struct drm_i915_gem_object *obj, 1009 + bool always_coherent) 1010 + { 1011 + /* 1012 + * Wa_22016122933: always return I915_MAP_WC for Media 1013 + * version 13.0 when the object is on the Media GT 1014 + */ 1015 + if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt)) 1016 + return I915_MAP_WC; 1017 + if (HAS_LLC(gt->i915) || always_coherent) 1018 + return I915_MAP_WB; 1019 + else 1020 + return I915_MAP_WC; 1021 + }
+10
drivers/gpu/drm/i915/gt/intel_gt.h
··· 6 6 #ifndef __INTEL_GT__ 7 7 #define __INTEL_GT__ 8 8 9 + #include "i915_drv.h" 9 10 #include "intel_engine_types.h" 10 11 #include "intel_gt_types.h" 11 12 #include "intel_reset.h" ··· 23 22 static inline bool gt_is_root(struct intel_gt *gt) 24 23 { 25 24 return !gt->info.id; 25 + } 26 + 27 + static inline bool intel_gt_needs_wa_22016122933(struct intel_gt *gt) 28 + { 29 + return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA; 26 30 } 27 31 28 32 static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) ··· 112 106 struct drm_printer *p); 113 107 114 108 void intel_gt_watchdog_work(struct work_struct *work); 109 + 110 + enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, 111 + struct drm_i915_gem_object *obj, 112 + bool always_coherent); 115 113 116 114 #endif /* __INTEL_GT_H__ */
+2 -2
drivers/gpu/drm/i915/gt/intel_gtt.c
··· 89 89 enum i915_map_type type; 90 90 void *vaddr; 91 91 92 - type = i915_coherent_map_type(vm->i915, obj, true); 92 + type = intel_gt_coherent_map_type(vm->gt, obj, true); 93 93 vaddr = i915_gem_object_pin_map_unlocked(obj, type); 94 94 if (IS_ERR(vaddr)) 95 95 return PTR_ERR(vaddr); ··· 103 103 enum i915_map_type type; 104 104 void *vaddr; 105 105 106 - type = i915_coherent_map_type(vm->i915, obj, true); 106 + type = intel_gt_coherent_map_type(vm->gt, obj, true); 107 107 vaddr = i915_gem_object_pin_map(obj, type); 108 108 if (IS_ERR(vaddr)) 109 109 return PTR_ERR(vaddr);
+7 -6
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 1095 1095 if (IS_ERR(obj)) { 1096 1096 obj = i915_gem_object_create_shmem(engine->i915, context_size); 1097 1097 /* 1098 - * Wa_22016122933: For MTL the shared memory needs to be mapped 1099 - * as WC on CPU side and UC (PAT index 2) on GPU side 1098 + * Wa_22016122933: For Media version 13.0, all Media GT shared 1099 + * memory needs to be mapped as WC on CPU side and UC (PAT 1100 + * index 2) on GPU side. 1100 1101 */ 1101 - if (IS_METEORLAKE(engine->i915)) 1102 + if (intel_gt_needs_wa_22016122933(engine->gt)) 1102 1103 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 1103 1104 } 1104 1105 if (IS_ERR(obj)) ··· 1192 1191 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 1193 1192 1194 1193 *vaddr = i915_gem_object_pin_map(ce->state->obj, 1195 - i915_coherent_map_type(ce->engine->i915, 1196 - ce->state->obj, 1197 - false) | 1194 + intel_gt_coherent_map_type(ce->engine->gt, 1195 + ce->state->obj, 1196 + false) | 1198 1197 I915_MAP_OVERRIDE); 1199 1198 1200 1199 return PTR_ERR_OR_ZERO(*vaddr);
+2 -1
drivers/gpu/drm/i915/gt/intel_ring.c
··· 13 13 #include "intel_engine_regs.h" 14 14 #include "intel_gpu_commands.h" 15 15 #include "intel_ring.h" 16 + #include "intel_gt.h" 16 17 #include "intel_timeline.h" 17 18 18 19 unsigned int intel_ring_update_space(struct intel_ring *ring) ··· 57 56 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) { 58 57 addr = (void __force *)i915_vma_pin_iomap(vma); 59 58 } else { 60 - int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false); 59 + int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false); 61 60 62 61 addr = i915_gem_object_pin_map(vma->obj, type); 63 62 }
+3 -2
drivers/gpu/drm/i915/gt/selftest_context.c
··· 88 88 goto err; 89 89 90 90 vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj, 91 - i915_coherent_map_type(engine->i915, 92 - ce->state->obj, false)); 91 + intel_gt_coherent_map_type(engine->gt, 92 + ce->state->obj, 93 + false)); 93 94 if (IS_ERR(vaddr)) { 94 95 err = PTR_ERR(vaddr); 95 96 intel_context_unpin(ce);
+2 -2
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
··· 73 73 h->seqno = memset(vaddr, 0xff, PAGE_SIZE); 74 74 75 75 vaddr = i915_gem_object_pin_map_unlocked(h->obj, 76 - i915_coherent_map_type(gt->i915, h->obj, false)); 76 + intel_gt_coherent_map_type(gt, h->obj, false)); 77 77 if (IS_ERR(vaddr)) { 78 78 err = PTR_ERR(vaddr); 79 79 goto err_unpin_hws; ··· 119 119 return ERR_CAST(obj); 120 120 } 121 121 122 - vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false)); 122 + vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false)); 123 123 if (IS_ERR(vaddr)) { 124 124 i915_gem_object_put(obj); 125 125 i915_vm_put(vm);
+3 -3
drivers/gpu/drm/i915/gt/selftest_lrc.c
··· 1292 1292 } 1293 1293 1294 1294 lrc = i915_gem_object_pin_map_unlocked(ce->state->obj, 1295 - i915_coherent_map_type(engine->i915, 1296 - ce->state->obj, 1297 - false)); 1295 + intel_gt_coherent_map_type(engine->gt, 1296 + ce->state->obj, 1297 + false)); 1298 1298 if (IS_ERR(lrc)) { 1299 1299 err = PTR_ERR(lrc); 1300 1300 goto err_B1;
+1 -2
drivers/gpu/drm/i915/gt/shmem_utils.c
··· 33 33 34 34 struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) 35 35 { 36 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 37 36 enum i915_map_type map_type; 38 37 struct file *file; 39 38 void *ptr; ··· 43 44 return file; 44 45 } 45 46 46 - map_type = i915_coherent_map_type(i915, obj, true); 47 + map_type = i915_gem_object_is_lmem(obj) ? I915_MAP_WC : I915_MAP_WB; 47 48 ptr = i915_gem_object_pin_map_unlocked(obj, map_type); 48 49 if (IS_ERR(ptr)) 49 50 return ERR_CAST(ptr);
+1 -6
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
··· 282 282 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc) 283 283 { 284 284 struct intel_gt *gt = gsc_uc_to_gt(gsc); 285 - struct drm_i915_private *i915 = gt->i915; 286 285 void *src; 287 286 288 287 if (!gsc->local) ··· 291 292 return -ENOSPC; 292 293 293 294 src = i915_gem_object_pin_map_unlocked(gsc->fw.obj, 294 - i915_coherent_map_type(i915, gsc->fw.obj, true)); 295 + intel_gt_coherent_map_type(gt, gsc->fw.obj, true)); 295 296 if (IS_ERR(src)) 296 297 return PTR_ERR(src); 297 298 298 299 memcpy_toio(gsc->local_vaddr, src, gsc->fw.size); 299 300 memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size); 300 301 301 - /* 302 - * Wa_22016122933: Making sure the data in dst is 303 - * visible to GSC right away 304 - */ 305 302 intel_guc_write_barrier(&gt->uc.guc); 306 303 307 304 i915_gem_object_unpin_map(gsc->fw.obj);
+6 -5
drivers/gpu/drm/i915/gt/uc/intel_guc.c
··· 745 745 return ERR_CAST(obj); 746 746 747 747 /* 748 - * Wa_22016122933: For MTL the shared memory needs to be mapped 749 - * as WC on CPU side and UC (PAT index 2) on GPU side 748 + * Wa_22016122933: For Media version 13.0, all Media GT shared 749 + * memory needs to be mapped as WC on CPU side and UC (PAT 750 + * index 2) on GPU side. 750 751 */ 751 - if (IS_METEORLAKE(gt->i915)) 752 + if (intel_gt_needs_wa_22016122933(gt)) 752 753 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 753 754 754 755 vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL); ··· 793 792 return PTR_ERR(vma); 794 793 795 794 vaddr = i915_gem_object_pin_map_unlocked(vma->obj, 796 - i915_coherent_map_type(guc_to_gt(guc)->i915, 797 - vma->obj, true)); 795 + intel_gt_coherent_map_type(guc_to_gt(guc), 796 + vma->obj, true)); 798 797 if (IS_ERR(vaddr)) { 799 798 i915_vma_unpin_and_release(&vma, 0); 800 799 return PTR_ERR(vaddr);
-4
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
··· 960 960 /* now update descriptor */ 961 961 WRITE_ONCE(desc->head, head); 962 962 963 - /* 964 - * Wa_22016122933: Making sure the head update is 965 - * visible to GuC right away 966 - */ 967 963 intel_guc_write_barrier(ct_to_guc(ct)); 968 964 969 965 return available - len;
+1 -2
drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
··· 27 27 int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc) 28 28 { 29 29 struct intel_gt *gt = huc_to_gt(huc); 30 - struct drm_i915_private *i915 = gt->i915; 31 30 struct drm_i915_gem_object *obj; 32 31 struct mtl_huc_auth_msg_in *msg_in; 33 32 struct mtl_huc_auth_msg_out *msg_out; ··· 42 43 pkt_offset = i915_ggtt_offset(huc->heci_pkt); 43 44 44 45 pkt_vaddr = i915_gem_object_pin_map_unlocked(obj, 45 - i915_coherent_map_type(i915, obj, true)); 46 + intel_gt_coherent_map_type(gt, obj, true)); 46 47 if (IS_ERR(pkt_vaddr)) 47 48 return PTR_ERR(pkt_vaddr); 48 49
+2 -1
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
··· 11 11 #include <drm/drm_print.h> 12 12 13 13 #include "gem/i915_gem_lmem.h" 14 + #include "gt/intel_gt.h" 14 15 #include "gt/intel_gt_print.h" 15 16 #include "intel_gsc_binary_headers.h" 16 17 #include "intel_gsc_fw.h" ··· 1198 1197 return PTR_ERR(vma); 1199 1198 1200 1199 vaddr = i915_gem_object_pin_map_unlocked(vma->obj, 1201 - i915_coherent_map_type(gt->i915, vma->obj, true)); 1200 + intel_gt_coherent_map_type(gt, vma->obj, true)); 1202 1201 if (IS_ERR(vaddr)) { 1203 1202 i915_vma_unpin_and_release(&vma, 0); 1204 1203 err = PTR_ERR(vaddr);
+3 -3
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
··· 204 204 if (IS_ERR(rq)) { 205 205 ret = PTR_ERR(rq); 206 206 rq = NULL; 207 - if (ret != -EAGAIN) { 208 - guc_err(guc, "Failed to create request %d: %pe\n", 209 - context_index, ERR_PTR(ret)); 207 + if ((ret != -EAGAIN) || !last) { 208 + guc_err(guc, "Failed to create %srequest %d: %pe\n", 209 + last ? "" : "first ", context_index, ERR_PTR(ret)); 210 210 goto err_spin_rq; 211 211 } 212 212 } else {
+2 -1
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
··· 6 6 #include "gem/i915_gem_internal.h" 7 7 8 8 #include "gt/intel_context.h" 9 + #include "gt/intel_gt.h" 9 10 #include "gt/uc/intel_gsc_fw.h" 10 11 #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" 11 12 ··· 337 336 } 338 337 339 338 /* return a virtual pointer */ 340 - *map = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); 339 + *map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true)); 341 340 if (IS_ERR(*map)) { 342 341 drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname); 343 342 err = PTR_ERR(*map);
+4 -1
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
··· 11 11 #include "gem/i915_gem_lmem.h" 12 12 13 13 #include "i915_drv.h" 14 + #include "gt/intel_gt.h" 14 15 15 16 #include "intel_pxp.h" 16 17 #include "intel_pxp_cmd_interface_42.h" ··· 246 245 } 247 246 248 247 /* map the lmem into the virtual memory pointer */ 249 - cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); 248 + cmd = i915_gem_object_pin_map_unlocked(obj, 249 + intel_gt_coherent_map_type(pxp->ctrl_gt, 250 + obj, true)); 250 251 if (IS_ERR(cmd)) { 251 252 drm_err(&i915->drm, "Failed to map gsc message page!\n"); 252 253 err = PTR_ERR(cmd);
+1 -1
drivers/gpu/drm/i915/selftests/igt_spinner.c
··· 97 97 if (!spin->batch) { 98 98 unsigned int mode; 99 99 100 - mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false); 100 + mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false); 101 101 vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma); 102 102 if (IS_ERR(vaddr)) 103 103 return PTR_ERR(vaddr);