Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/mocs: Add debugfs node to dump mocs

This is useful to check mocs configuration. Tests/Tools can use
this debugfs entry to get mocs info.

v2: Address review comments. Change debugfs output style similar
to pat debugfs. (Lucas De Marchi)

v3: rebase.

v4: Address review comments. Use function pointer inside ops
struct. Update Test-with links. Remove usage of flags wherever
not required. (Lucas De Marchi)

v5: Address review comments. Move register defines. Modify mocs
info struct to avoid holes. (Luca De Marchi)

Cc: Matt Roper <matthew.d.roper@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240503193902.2056202-3-janga.rahul.kumar@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>

authored by

Janga Rahul Kumar and committed by
Lucas De Marchi
9fbd0adb 72c7163f

+304 -32
+37 -1
drivers/gpu/drm/xe/regs/xe_gt_regs.h
··· 59 59 60 60 #define XELP_GLOBAL_MOCS(i) XE_REG(0x4000 + (i) * 4) 61 61 #define XEHP_GLOBAL_MOCS(i) XE_REG_MCR(0x4000 + (i) * 4) 62 + #define LE_SSE_MASK REG_GENMASK(18, 17) 63 + #define LE_SSE(value) REG_FIELD_PREP(LE_SSE_MASK, value) 64 + #define LE_COS_MASK REG_GENMASK(16, 15) 65 + #define LE_COS(value) REG_FIELD_PREP(LE_COS_MASK) 66 + #define LE_SCF_MASK REG_BIT(14) 67 + #define LE_SCF(value) REG_FIELD_PREP(LE_SCF_MASK, value) 68 + #define LE_PFM_MASK REG_GENMASK(13, 11) 69 + #define LE_PFM(value) REG_FIELD_PREP(LE_PFM_MASK, value) 70 + #define LE_SCC_MASK REG_GENMASK(10, 8) 71 + #define LE_SCC(value) REG_FIELD_PREP(LE_SCC_MASK, value) 72 + #define LE_RSC_MASK REG_BIT(7) 73 + #define LE_RSC(value) REG_FIELD_PREP(LE_RSC_MASK, value) 74 + #define LE_AOM_MASK REG_BIT(6) 75 + #define LE_AOM(value) REG_FIELD_PREP(LE_AOM_MASK, value) 76 + #define LE_LRUM_MASK REG_GENMASK(5, 4) 77 + #define LE_LRUM(value) REG_FIELD_PREP(LE_LRUM_MASK, value) 78 + #define LE_TGT_CACHE_MASK REG_GENMASK(3, 2) 79 + #define LE_TGT_CACHE(value) REG_FIELD_PREP(LE_TGT_CACHE_MASK, value) 80 + #define LE_CACHEABILITY_MASK REG_GENMASK(1, 0) 81 + #define LE_CACHEABILITY(value) REG_FIELD_PREP(LE_CACHEABILITY_MASK, value) 82 + 62 83 #define CCS_AUX_INV XE_REG(0x4208) 63 84 64 85 #define VD0_AUX_INV XE_REG(0x4218) ··· 335 314 #define XEHPC_OVRLSCCC REG_BIT(0) 336 315 337 316 /* L3 Cache Control */ 317 + #define LNCFCMOCS_REG_COUNT 32 338 318 #define XELP_LNCFCMOCS(i) XE_REG(0xb020 + (i) * 4) 339 319 #define XEHP_LNCFCMOCS(i) XE_REG_MCR(0xb020 + (i) * 4) 340 - #define LNCFCMOCS_REG_COUNT 32 320 + #define L3_UPPER_LKUP_MASK REG_BIT(23) 321 + #define L3_UPPER_GLBGO_MASK REG_BIT(22) 322 + #define L3_UPPER_IDX_CACHEABILITY_MASK REG_GENMASK(21, 20) 323 + #define L3_UPPER_IDX_SCC_MASK REG_GENMASK(19, 17) 324 + #define L3_UPPER_IDX_ESC_MASK REG_BIT(16) 325 + #define L3_LKUP_MASK REG_BIT(7) 326 + #define L3_LKUP(value) REG_FIELD_PREP(L3_LKUP_MASK, value) 327 + #define L3_GLBGO_MASK REG_BIT(6) 328 + #define L3_GLBGO(value) REG_FIELD_PREP(L3_GLBGO_MASK, value) 329 + #define L3_CACHEABILITY_MASK REG_GENMASK(5, 4) 330 + #define L3_CACHEABILITY(value) REG_FIELD_PREP(L3_CACHEABILITY_MASK, value) 331 + #define L3_SCC_MASK REG_GENMASK(3, 1) 332 + #define L3_SCC(value) REG_FIELD_PREP(L3_SCC_MASK, value) 333 + #define L3_ESC_MASK REG_BIT(0) 334 + #define L3_ESC(value) REG_FIELD_PREP(L3_ESC_MASK, value) 341 335 342 336 #define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4) 343 337 #define XEHP_LNESPARE REG_BIT(19)
+11
drivers/gpu/drm/xe/xe_gt_debugfs.c
··· 20 20 #include "xe_hw_engine.h" 21 21 #include "xe_lrc.h" 22 22 #include "xe_macros.h" 23 + #include "xe_mocs.h" 23 24 #include "xe_pat.h" 24 25 #include "xe_pm.h" 25 26 #include "xe_reg_sr.h" ··· 203 202 return 0; 204 203 } 205 204 205 + static int mocs(struct xe_gt *gt, struct drm_printer *p) 206 + { 207 + xe_pm_runtime_get(gt_to_xe(gt)); 208 + xe_mocs_dump(gt, p); 209 + xe_pm_runtime_put(gt_to_xe(gt)); 210 + 211 + return 0; 212 + } 213 + 206 214 static int rcs_default_lrc(struct xe_gt *gt, struct drm_printer *p) 207 215 { 208 216 xe_pm_runtime_get(gt_to_xe(gt)); ··· 267 257 {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore}, 268 258 {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds}, 269 259 {"pat", .show = xe_gt_debugfs_simple_show, .data = pat}, 260 + {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs}, 270 261 {"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc}, 271 262 {"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc}, 272 263 {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
+248 -31
drivers/gpu/drm/xe/xe_mocs.c
··· 13 13 #include "xe_gt_mcr.h" 14 14 #include "xe_mmio.h" 15 15 #include "xe_platform_types.h" 16 + #include "xe_pm.h" 16 17 #include "xe_sriov.h" 17 18 #include "xe_step_types.h" 18 19 ··· 37 36 u16 used; 38 37 }; 39 38 39 + struct xe_mocs_info; 40 + 41 + struct xe_mocs_ops { 42 + void (*dump)(struct xe_mocs_info *mocs, unsigned int flags, 43 + struct xe_gt *gt, struct drm_printer *p); 44 + }; 45 + 40 46 struct xe_mocs_info { 41 47 unsigned int size; 42 48 unsigned int n_entries; 43 49 const struct xe_mocs_entry *table; 50 + const struct xe_mocs_ops *ops; 44 51 u8 uc_index; 45 52 u8 wb_index; 46 53 u8 unused_entries_index; 47 54 }; 48 - 49 - /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */ 50 - #define _LE_CACHEABILITY(value) ((value) << 0) 51 - #define _LE_TGT_CACHE(value) ((value) << 2) 52 - #define LE_LRUM(value) ((value) << 4) 53 - #define LE_AOM(value) ((value) << 6) 54 - #define LE_RSC(value) ((value) << 7) 55 - #define LE_SCC(value) ((value) << 8) 56 - #define LE_PFM(value) ((value) << 11) 57 - #define LE_SCF(value) ((value) << 14) 58 - #define LE_COS(value) ((value) << 15) 59 - #define LE_SSE(value) ((value) << 17) 60 - 61 - /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */ 62 - #define L3_ESC(value) ((value) << 0) 63 - #define L3_SCC(value) ((value) << 1) 64 - #define _L3_CACHEABILITY(value) ((value) << 4) 65 - #define L3_GLBGO(value) ((value) << 6) 66 - #define L3_LKUP(value) ((value) << 7) 67 55 68 56 /* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */ 69 57 #define IG_PAT REG_BIT(8) ··· 70 80 * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means 71 81 * the same as LE_UC 72 82 */ 73 - #define LE_0_PAGETABLE _LE_CACHEABILITY(0) 74 - #define LE_1_UC _LE_CACHEABILITY(1) 75 - #define LE_2_WT _LE_CACHEABILITY(2) 76 - #define LE_3_WB _LE_CACHEABILITY(3) 83 + #define LE_0_PAGETABLE LE_CACHEABILITY(0) 84 + #define LE_1_UC LE_CACHEABILITY(1) 85 + #define LE_2_WT LE_CACHEABILITY(2) 86 + #define LE_3_WB LE_CACHEABILITY(3) 77 87 78 88 /* Target cache */ 79 - #define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0) 80 - #define LE_TC_1_LLC _LE_TGT_CACHE(1) 81 - #define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2) 82 - #define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3) 89 + #define LE_TC_0_PAGETABLE LE_TGT_CACHE(0) 90 + #define LE_TC_1_LLC LE_TGT_CACHE(1) 91 + #define LE_TC_2_LLC_ELLC LE_TGT_CACHE(2) 92 + #define LE_TC_3_LLC_ELLC_ALT LE_TGT_CACHE(3) 83 93 84 94 /* L3 caching options */ 85 - #define L3_0_DIRECT _L3_CACHEABILITY(0) 86 - #define L3_1_UC _L3_CACHEABILITY(1) 87 - #define L3_2_RESERVED _L3_CACHEABILITY(2) 88 - #define L3_3_WB _L3_CACHEABILITY(3) 95 + #define L3_0_DIRECT L3_CACHEABILITY(0) 96 + #define L3_1_UC L3_CACHEABILITY(1) 97 + #define L3_2_RESERVED L3_CACHEABILITY(2) 98 + #define L3_3_WB L3_CACHEABILITY(3) 89 99 90 100 /* L4 caching options */ 91 101 #define L4_0_WB REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 0) ··· 96 106 /* XD: WB Transient Display */ 97 107 #define XE2_L3_1_XD REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 1) 98 108 #define XE2_L3_3_UC REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 3) 109 + 110 + #define XE2_L3_CLOS_MASK REG_GENMASK(7, 6) 99 111 100 112 #define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \ 101 113 [__idx] = { \ ··· 257 265 return GRAPHICS_VERx100(xe) >= 1250; 258 266 } 259 267 268 + static void xelp_lncf_dump(struct xe_mocs_info *info, struct xe_gt *gt, struct drm_printer *p) 269 + { 270 + unsigned int i, j; 271 + u32 reg_val; 272 + 273 + drm_printf(p, "LNCFCMOCS[idx] = [ESC, SCC, L3CC] (value)\n\n"); 274 + 275 + for (i = 0, j = 0; i < (info->n_entries + 1) / 2; i++, j++) { 276 + if (regs_are_mcr(gt)) 277 + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); 278 + else 279 + reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); 280 + 281 + drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", 282 + j++, 283 + !!(reg_val & L3_ESC_MASK), 284 + REG_FIELD_GET(L3_SCC_MASK, reg_val), 285 + REG_FIELD_GET(L3_CACHEABILITY_MASK, reg_val), 286 + reg_val); 287 + 288 + drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", 289 + j, 290 + !!(reg_val & L3_UPPER_IDX_ESC_MASK), 291 + REG_FIELD_GET(L3_UPPER_IDX_SCC_MASK, reg_val), 292 + REG_FIELD_GET(L3_UPPER_IDX_CACHEABILITY_MASK, reg_val), 293 + reg_val); 294 + } 295 + } 296 + 297 + static void xelp_mocs_dump(struct xe_mocs_info *info, unsigned int flags, 298 + struct xe_gt *gt, struct drm_printer *p) 299 + { 300 + unsigned int i; 301 + u32 reg_val; 302 + 303 + if (flags & HAS_GLOBAL_MOCS) { 304 + drm_printf(p, "Global mocs table configuration:\n"); 305 + drm_printf(p, "GLOB_MOCS[idx] = [LeCC, TC, LRUM, AOM, RSC, SCC, PFM, SCF, CoS, SSE] (value)\n\n"); 306 + 307 + for (i = 0; i < info->n_entries; i++) { 308 + if (regs_are_mcr(gt)) 309 + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); 310 + else 311 + reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); 312 + 313 + drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u ] (%#8x)\n", 314 + i, 315 + REG_FIELD_GET(LE_CACHEABILITY_MASK, reg_val), 316 + REG_FIELD_GET(LE_TGT_CACHE_MASK, reg_val), 317 + REG_FIELD_GET(LE_LRUM_MASK, reg_val), 318 + !!(reg_val & LE_AOM_MASK), 319 + !!(reg_val & LE_RSC_MASK), 320 + REG_FIELD_GET(LE_SCC_MASK, reg_val), 321 + REG_FIELD_GET(LE_PFM_MASK, reg_val), 322 + !!(reg_val & LE_SCF_MASK), 323 + REG_FIELD_GET(LE_COS_MASK, reg_val), 324 + REG_FIELD_GET(LE_SSE_MASK, reg_val), 325 + reg_val); 326 + } 327 + } 328 + 329 + xelp_lncf_dump(info, gt, p); 330 + } 331 + 332 + static const struct xe_mocs_ops xelp_mocs_ops = { 333 + .dump = xelp_mocs_dump, 334 + }; 335 + 260 336 static const struct xe_mocs_entry dg1_mocs_desc[] = { 261 337 /* UC */ 262 338 MOCS_ENTRY(1, 0, L3_1_UC), ··· 361 301 MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), 362 302 }; 363 303 304 + static void xehp_lncf_dump(struct xe_mocs_info *info, unsigned int flags, 305 + struct xe_gt *gt, struct drm_printer *p) 306 + { 307 + unsigned int i, j; 308 + u32 reg_val; 309 + 310 + drm_printf(p, "LNCFCMOCS[idx] = [UCL3LOOKUP, GLBGO, L3CC] (value)\n\n"); 311 + 312 + for (i = 0, j = 0; i < (info->n_entries + 1) / 2; i++, j++) { 313 + if (regs_are_mcr(gt)) 314 + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); 315 + else 316 + reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); 317 + 318 + drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", 319 + j++, 320 + !!(reg_val & L3_LKUP_MASK), 321 + !!(reg_val & L3_GLBGO_MASK), 322 + REG_FIELD_GET(L3_CACHEABILITY_MASK, reg_val), 323 + reg_val); 324 + 325 + drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", 326 + j, 327 + !!(reg_val & L3_UPPER_LKUP_MASK), 328 + !!(reg_val & L3_UPPER_GLBGO_MASK), 329 + REG_FIELD_GET(L3_UPPER_IDX_CACHEABILITY_MASK, reg_val), 330 + reg_val); 331 + } 332 + } 333 + 334 + static const struct xe_mocs_ops xehp_mocs_ops = { 335 + .dump = xehp_lncf_dump, 336 + }; 337 + 364 338 static const struct xe_mocs_entry pvc_mocs_desc[] = { 365 339 /* Error */ 366 340 MOCS_ENTRY(0, 0, L3_3_WB), ··· 404 310 405 311 /* WB */ 406 312 MOCS_ENTRY(2, 0, L3_3_WB), 313 + }; 314 + 315 + static void pvc_mocs_dump(struct xe_mocs_info *info, unsigned int flags, struct xe_gt *gt, 316 + struct drm_printer *p) 317 + { 318 + unsigned int i, j; 319 + u32 reg_val; 320 + 321 + drm_printf(p, "LNCFCMOCS[idx] = [ L3CC ] (value)\n\n"); 322 + 323 + for (i = 0, j = 0; i < (info->n_entries + 1) / 2; i++, j++) { 324 + if (regs_are_mcr(gt)) 325 + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); 326 + else 327 + reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); 328 + 329 + drm_printf(p, "LNCFCMOCS[%2d] = [ %u ] (%#8x)\n", 330 + j++, 331 + REG_FIELD_GET(L3_CACHEABILITY_MASK, reg_val), 332 + reg_val); 333 + 334 + drm_printf(p, "LNCFCMOCS[%2d] = [ %u ] (%#8x)\n", 335 + j, 336 + REG_FIELD_GET(L3_UPPER_IDX_CACHEABILITY_MASK, reg_val), 337 + reg_val); 338 + } 339 + } 340 + 341 + static const struct xe_mocs_ops pvc_mocs_ops = { 342 + .dump = pvc_mocs_dump, 407 343 }; 408 344 409 345 static const struct xe_mocs_entry mtl_mocs_desc[] = { ··· 487 363 L3_GLBGO(1) | L3_1_UC), 488 364 }; 489 365 366 + static void mtl_mocs_dump(struct xe_mocs_info *info, unsigned int flags, 367 + struct xe_gt *gt, struct drm_printer *p) 368 + { 369 + unsigned int i; 370 + u32 reg_val; 371 + 372 + drm_printf(p, "Global mocs table configuration:\n"); 373 + drm_printf(p, "GLOB_MOCS[idx] = [IG_PAT, L4_CACHE_POLICY] (value)\n\n"); 374 + 375 + for (i = 0; i < info->n_entries; i++) { 376 + if (regs_are_mcr(gt)) 377 + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); 378 + else 379 + reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); 380 + 381 + drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u] (%#8x)\n", 382 + i, 383 + !!(reg_val & IG_PAT), 384 + REG_FIELD_GET(L4_CACHE_POLICY_MASK, reg_val), 385 + reg_val); 386 + } 387 + 388 + /* MTL lncf mocs table pattern is similar to that of xehp */ 389 + xehp_lncf_dump(info, flags, gt, p); 390 + } 391 + 392 + static const struct xe_mocs_ops mtl_mocs_ops = { 393 + .dump = mtl_mocs_dump, 394 + }; 395 + 490 396 static const struct xe_mocs_entry xe2_mocs_table[] = { 491 397 /* Defer to PAT */ 492 398 MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0), ··· 530 376 MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0), 531 377 }; 532 378 379 + static void xe2_mocs_dump(struct xe_mocs_info *info, unsigned int flags, 380 + struct xe_gt *gt, struct drm_printer *p) 381 + { 382 + unsigned int i; 383 + u32 reg_val; 384 + 385 + drm_printf(p, "Global mocs table configuration:\n"); 386 + drm_printf(p, "GLOB_MOCS[idx] = [IG_PAT, L3_CLOS, L3_CACHE_POLICY, L4_CACHE_POLICY] (value)\n\n"); 387 + 388 + for (i = 0; i < info->n_entries; i++) { 389 + if (regs_are_mcr(gt)) 390 + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); 391 + else 392 + reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); 393 + 394 + drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u] (%#8x)\n", 395 + i, 396 + !!(reg_val & IG_PAT), 397 + REG_FIELD_GET(XE2_L3_CLOS_MASK, reg_val), 398 + REG_FIELD_GET(L4_CACHE_POLICY_MASK, reg_val), 399 + reg_val); 400 + } 401 + } 402 + 403 + static const struct xe_mocs_ops xe2_mocs_ops = { 404 + .dump = xe2_mocs_dump, 405 + }; 406 + 533 407 static unsigned int get_mocs_settings(struct xe_device *xe, 534 408 struct xe_mocs_info *info) 535 409 { ··· 568 386 switch (xe->info.platform) { 569 387 case XE_LUNARLAKE: 570 388 case XE_BATTLEMAGE: 389 + info->ops = &xe2_mocs_ops; 571 390 info->size = ARRAY_SIZE(xe2_mocs_table); 572 391 info->table = xe2_mocs_table; 573 392 info->n_entries = XE2_NUM_MOCS_ENTRIES; ··· 577 394 info->unused_entries_index = 4; 578 395 break; 579 396 case XE_PVC: 397 + info->ops = &pvc_mocs_ops; 580 398 info->size = ARRAY_SIZE(pvc_mocs_desc); 581 399 info->table = pvc_mocs_desc; 582 400 info->n_entries = PVC_NUM_MOCS_ENTRIES; ··· 586 402 info->unused_entries_index = 2; 587 403 break; 588 404 case XE_METEORLAKE: 405 + info->ops = &mtl_mocs_ops; 589 406 info->size = ARRAY_SIZE(mtl_mocs_desc); 590 407 info->table = mtl_mocs_desc; 591 408 info->n_entries = MTL_NUM_MOCS_ENTRIES; ··· 594 409 info->unused_entries_index = 1; 595 410 break; 596 411 case XE_DG2: 412 + info->ops = &xehp_mocs_ops; 597 413 info->size = ARRAY_SIZE(dg2_mocs_desc); 598 414 info->table = dg2_mocs_desc; 599 415 info->uc_index = 1; ··· 606 420 info->unused_entries_index = 3; 607 421 break; 608 422 case XE_DG1: 423 + info->ops = &xelp_mocs_ops; 609 424 info->size = ARRAY_SIZE(dg1_mocs_desc); 610 425 info->table = dg1_mocs_desc; 611 426 info->uc_index = 1; ··· 618 431 case XE_ALDERLAKE_S: 619 432 case XE_ALDERLAKE_P: 620 433 case XE_ALDERLAKE_N: 434 + info->ops = &xelp_mocs_ops; 621 435 info->size = ARRAY_SIZE(gen12_mocs_desc); 622 436 info->table = gen12_mocs_desc; 623 437 info->n_entries = XELP_NUM_MOCS_ENTRIES; ··· 639 451 * mistake in the switch statement above. 640 452 */ 641 453 xe_assert(xe, info->unused_entries_index != 0); 454 + 455 + xe_assert(xe, !info->ops || info->ops->dump); 642 456 643 457 if (XE_WARN_ON(info->size > info->n_entries)) { 644 458 info->table = NULL; ··· 766 576 __init_mocs_table(gt, &table); 767 577 if (flags & HAS_LNCF_MOCS) 768 578 init_l3cc_table(gt, &table); 579 + } 580 + 581 + void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) 582 + { 583 + struct xe_mocs_info table; 584 + unsigned int flags; 585 + u32 ret; 586 + struct xe_device *xe = gt_to_xe(gt); 587 + 588 + flags = get_mocs_settings(xe, &table); 589 + 590 + if (!table.ops->dump) 591 + return; 592 + 593 + xe_pm_runtime_get_noresume(xe); 594 + ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 595 + 596 + if (ret) 597 + goto err_fw; 598 + 599 + table.ops->dump(&table, flags, gt, p); 600 + 601 + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 602 + 603 + err_fw: 604 + xe_assert(xe, !ret); 605 + xe_pm_runtime_put(xe); 769 606 } 770 607 771 608 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+8
drivers/gpu/drm/xe/xe_mocs.h
··· 10 10 11 11 struct xe_exec_queue; 12 12 struct xe_gt; 13 + struct drm_printer; 13 14 14 15 void xe_mocs_init_early(struct xe_gt *gt); 15 16 void xe_mocs_init(struct xe_gt *gt); 17 + 18 + /** 19 + * xe_mocs_dump - Dump mocs table 20 + * @gt: GT structure 21 + * @p: Printer to dump info to 22 + */ 23 + void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p); 16 24 17 25 #endif