Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/sseu: Move sseu detection and dump to intel_sseu

Keep all the SSEU code in the relevant file. The code has also been
updated to use intel_gt instead of dev_priv.

Based on an original patch by Sandeep.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
Cc: Venkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-7-daniele.ceraolospurio@intel.com

authored by

Daniele Ceraolo Spurio and committed by
Chris Wilson
9b413f01 d0eb6866

+599 -586
+1
drivers/gpu/drm/i915/gt/intel_gt.c
··· 47 47 int intel_gt_init_mmio(struct intel_gt *gt) 48 48 { 49 49 intel_uc_init_mmio(&gt->uc); 50 + intel_sseu_info_init(gt); 50 51 51 52 return intel_engines_init_mmio(gt); 52 53 }
+586
drivers/gpu/drm/i915/gt/intel_sseu.c
··· 60 60 return hweight32(intel_sseu_get_subslices(sseu, slice)); 61 61 } 62 62 63 + static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, 64 + int subslice) 65 + { 66 + int slice_stride = sseu->max_subslices * sseu->eu_stride; 67 + 68 + return slice * slice_stride + subslice * sseu->eu_stride; 69 + } 70 + 71 + static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, 72 + int subslice) 73 + { 74 + int i, offset = sseu_eu_idx(sseu, slice, subslice); 75 + u16 eu_mask = 0; 76 + 77 + for (i = 0; i < sseu->eu_stride; i++) 78 + eu_mask |= 79 + ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE); 80 + 81 + return eu_mask; 82 + } 83 + 84 + static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, 85 + u16 eu_mask) 86 + { 87 + int i, offset = sseu_eu_idx(sseu, slice, subslice); 88 + 89 + for (i = 0; i < sseu->eu_stride; i++) 90 + sseu->eu_mask[offset + i] = 91 + (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; 92 + } 93 + 94 + static u16 compute_eu_total(const struct sseu_dev_info *sseu) 95 + { 96 + u16 i, total = 0; 97 + 98 + for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++) 99 + total += hweight8(sseu->eu_mask[i]); 100 + 101 + return total; 102 + } 103 + 104 + static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, 105 + u8 s_en, u32 ss_en, u16 eu_en) 106 + { 107 + int s, ss; 108 + 109 + /* ss_en represents entire subslice mask across all slices */ 110 + GEM_BUG_ON(sseu->max_slices * sseu->max_subslices > 111 + sizeof(ss_en) * BITS_PER_BYTE); 112 + 113 + for (s = 0; s < sseu->max_slices; s++) { 114 + if ((s_en & BIT(s)) == 0) 115 + continue; 116 + 117 + sseu->slice_mask |= BIT(s); 118 + 119 + intel_sseu_set_subslices(sseu, s, ss_en); 120 + 121 + for (ss = 0; ss < sseu->max_subslices; ss++) 122 + if (intel_sseu_has_subslice(sseu, s, ss)) 123 + sseu_set_eus(sseu, s, ss, eu_en); 124 + } 125 + sseu->eu_per_subslice = hweight16(eu_en); 126 + sseu->eu_total = compute_eu_total(sseu); 127 + } 128 + 129 + static void gen12_sseu_info_init(struct intel_gt *gt) 130 + { 131 + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; 132 + struct intel_uncore *uncore = gt->uncore; 133 + u32 dss_en; 134 + u16 eu_en = 0; 135 + u8 eu_en_fuse; 136 + u8 s_en; 137 + int eu; 138 + 139 + /* 140 + * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS. 141 + * Instead of splitting these, provide userspace with an array 142 + * of DSS to more closely represent the hardware resource. 143 + */ 144 + intel_sseu_set_info(sseu, 1, 6, 16); 145 + 146 + s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & 147 + GEN11_GT_S_ENA_MASK; 148 + 149 + dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE); 150 + 151 + /* one bit per pair of EUs */ 152 + eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & 153 + GEN11_EU_DIS_MASK); 154 + for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++) 155 + if (eu_en_fuse & BIT(eu)) 156 + eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); 157 + 158 + gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en); 159 + 160 + /* TGL only supports slice-level power gating */ 161 + sseu->has_slice_pg = 1; 162 + } 163 + 164 + static void gen11_sseu_info_init(struct intel_gt *gt) 165 + { 166 + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; 167 + struct intel_uncore *uncore = gt->uncore; 168 + u32 ss_en; 169 + u8 eu_en; 170 + u8 s_en; 171 + 172 + if (IS_ELKHARTLAKE(gt->i915)) 173 + intel_sseu_set_info(sseu, 1, 4, 8); 174 + else 175 + intel_sseu_set_info(sseu, 1, 8, 8); 176 + 177 + s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & 178 + GEN11_GT_S_ENA_MASK; 179 + ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE); 180 + 181 + eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & 182 + GEN11_EU_DIS_MASK); 183 + 184 + gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); 185 + 186 + /* ICL has no power gating restrictions. */ 187 + sseu->has_slice_pg = 1; 188 + sseu->has_subslice_pg = 1; 189 + sseu->has_eu_pg = 1; 190 + } 191 + 192 + static void gen10_sseu_info_init(struct intel_gt *gt) 193 + { 194 + struct intel_uncore *uncore = gt->uncore; 195 + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; 196 + const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); 197 + const int eu_mask = 0xff; 198 + u32 subslice_mask, eu_en; 199 + int s, ss; 200 + 201 + intel_sseu_set_info(sseu, 6, 4, 8); 202 + 203 + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> 204 + GEN10_F2_S_ENA_SHIFT; 205 + 206 + /* Slice0 */ 207 + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0); 208 + for (ss = 0; ss < sseu->max_subslices; ss++) 209 + sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); 210 + /* Slice1 */ 211 + sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); 212 + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1); 213 + sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); 214 + /* Slice2 */ 215 + sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); 216 + sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); 217 + /* Slice3 */ 218 + sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); 219 + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2); 220 + sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); 221 + /* Slice4 */ 222 + sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); 223 + sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); 224 + /* Slice5 */ 225 + sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); 226 + eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3); 227 + sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); 228 + 229 + subslice_mask = (1 << 4) - 1; 230 + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> 231 + GEN10_F2_SS_DIS_SHIFT); 232 + 233 + for (s = 0; s < sseu->max_slices; s++) { 234 + u32 subslice_mask_with_eus = subslice_mask; 235 + 236 + for (ss = 0; ss < sseu->max_subslices; ss++) { 237 + if (sseu_get_eus(sseu, s, ss) == 0) 238 + subslice_mask_with_eus &= ~BIT(ss); 239 + } 240 + 241 + /* 242 + * Slice0 can have up to 3 subslices, but there are only 2 in 243 + * slice1/2. 244 + */ 245 + intel_sseu_set_subslices(sseu, s, s == 0 ? 246 + subslice_mask_with_eus : 247 + subslice_mask_with_eus & 0x3); 248 + } 249 + 250 + sseu->eu_total = compute_eu_total(sseu); 251 + 252 + /* 253 + * CNL is expected to always have a uniform distribution 254 + * of EU across subslices with the exception that any one 255 + * EU in any one subslice may be fused off for die 256 + * recovery. 257 + */ 258 + sseu->eu_per_subslice = 259 + intel_sseu_subslice_total(sseu) ? 260 + DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) : 261 + 0; 262 + 263 + /* No restrictions on Power Gating */ 264 + sseu->has_slice_pg = 1; 265 + sseu->has_subslice_pg = 1; 266 + sseu->has_eu_pg = 1; 267 + } 268 + 269 + static void cherryview_sseu_info_init(struct intel_gt *gt) 270 + { 271 + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; 272 + u32 fuse; 273 + u8 subslice_mask = 0; 274 + 275 + fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT); 276 + 277 + sseu->slice_mask = BIT(0); 278 + intel_sseu_set_info(sseu, 1, 2, 8); 279 + 280 + if (!(fuse & CHV_FGT_DISABLE_SS0)) { 281 + u8 disabled_mask = 282 + ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >> 283 + CHV_FGT_EU_DIS_SS0_R0_SHIFT) | 284 + (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> 285 + CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); 286 + 287 + subslice_mask |= BIT(0); 288 + sseu_set_eus(sseu, 0, 0, ~disabled_mask); 289 + } 290 + 291 + if (!(fuse & CHV_FGT_DISABLE_SS1)) { 292 + u8 disabled_mask = 293 + ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >> 294 + CHV_FGT_EU_DIS_SS1_R0_SHIFT) | 295 + (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> 296 + CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); 297 + 298 + subslice_mask |= BIT(1); 299 + sseu_set_eus(sseu, 0, 1, ~disabled_mask); 300 + } 301 + 302 + intel_sseu_set_subslices(sseu, 0, subslice_mask); 303 + 304 + sseu->eu_total = compute_eu_total(sseu); 305 + 306 + /* 307 + * CHV expected to always have a uniform distribution of EU 308 + * across subslices. 309 + */ 310 + sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? 311 + sseu->eu_total / 312 + intel_sseu_subslice_total(sseu) : 313 + 0; 314 + /* 315 + * CHV supports subslice power gating on devices with more than 316 + * one subslice, and supports EU power gating on devices with 317 + * more than one EU pair per subslice. 318 + */ 319 + sseu->has_slice_pg = 0; 320 + sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1; 321 + sseu->has_eu_pg = (sseu->eu_per_subslice > 2); 322 + } 323 + 324 + static void gen9_sseu_info_init(struct intel_gt *gt) 325 + { 326 + struct drm_i915_private *i915 = gt->i915; 327 + struct intel_device_info *info = mkwrite_device_info(i915); 328 + struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; 329 + struct intel_uncore *uncore = gt->uncore; 330 + u32 fuse2, eu_disable, subslice_mask; 331 + const u8 eu_mask = 0xff; 332 + int s, ss; 333 + 334 + fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); 335 + sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 336 + 337 + /* BXT has a single slice and at most 3 subslices. */ 338 + intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3, 339 + IS_GEN9_LP(i915) ? 3 : 4, 8); 340 + 341 + /* 342 + * The subslice disable field is global, i.e. it applies 343 + * to each of the enabled slices. 344 + */ 345 + subslice_mask = (1 << sseu->max_subslices) - 1; 346 + subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> 347 + GEN9_F2_SS_DIS_SHIFT); 348 + 349 + /* 350 + * Iterate through enabled slices and subslices to 351 + * count the total enabled EU. 352 + */ 353 + for (s = 0; s < sseu->max_slices; s++) { 354 + if (!(sseu->slice_mask & BIT(s))) 355 + /* skip disabled slice */ 356 + continue; 357 + 358 + intel_sseu_set_subslices(sseu, s, subslice_mask); 359 + 360 + eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s)); 361 + for (ss = 0; ss < sseu->max_subslices; ss++) { 362 + int eu_per_ss; 363 + u8 eu_disabled_mask; 364 + 365 + if (!intel_sseu_has_subslice(sseu, s, ss)) 366 + /* skip disabled subslice */ 367 + continue; 368 + 369 + eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask; 370 + 371 + sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); 372 + 373 + eu_per_ss = sseu->max_eus_per_subslice - 374 + hweight8(eu_disabled_mask); 375 + 376 + /* 377 + * Record which subslice(s) has(have) 7 EUs. we 378 + * can tune the hash used to spread work among 379 + * subslices if they are unbalanced. 380 + */ 381 + if (eu_per_ss == 7) 382 + sseu->subslice_7eu[s] |= BIT(ss); 383 + } 384 + } 385 + 386 + sseu->eu_total = compute_eu_total(sseu); 387 + 388 + /* 389 + * SKL is expected to always have a uniform distribution 390 + * of EU across subslices with the exception that any one 391 + * EU in any one subslice may be fused off for die 392 + * recovery. BXT is expected to be perfectly uniform in EU 393 + * distribution. 394 + */ 395 + sseu->eu_per_subslice = 396 + intel_sseu_subslice_total(sseu) ? 397 + DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) : 398 + 0; 399 + 400 + /* 401 + * SKL+ supports slice power gating on devices with more than 402 + * one slice, and supports EU power gating on devices with 403 + * more than one EU pair per subslice. BXT+ supports subslice 404 + * power gating on devices with more than one subslice, and 405 + * supports EU power gating on devices with more than one EU 406 + * pair per subslice. 407 + */ 408 + sseu->has_slice_pg = 409 + !IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1; 410 + sseu->has_subslice_pg = 411 + IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1; 412 + sseu->has_eu_pg = sseu->eu_per_subslice > 2; 413 + 414 + if (IS_GEN9_LP(i915)) { 415 + #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss))) 416 + info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3; 417 + 418 + sseu->min_eu_in_pool = 0; 419 + if (info->has_pooled_eu) { 420 + if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) 421 + sseu->min_eu_in_pool = 3; 422 + else if (IS_SS_DISABLED(1)) 423 + sseu->min_eu_in_pool = 6; 424 + else 425 + sseu->min_eu_in_pool = 9; 426 + } 427 + #undef IS_SS_DISABLED 428 + } 429 + } 430 + 431 + static void bdw_sseu_info_init(struct intel_gt *gt) 432 + { 433 + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; 434 + struct intel_uncore *uncore = gt->uncore; 435 + int s, ss; 436 + u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ 437 + u32 eu_disable0, eu_disable1, eu_disable2; 438 + 439 + fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); 440 + sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 441 + intel_sseu_set_info(sseu, 3, 3, 8); 442 + 443 + /* 444 + * The subslice disable field is global, i.e. it applies 445 + * to each of the enabled slices. 446 + */ 447 + subslice_mask = GENMASK(sseu->max_subslices - 1, 0); 448 + subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> 449 + GEN8_F2_SS_DIS_SHIFT); 450 + eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0); 451 + eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1); 452 + eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2); 453 + eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK; 454 + eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) | 455 + ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) << 456 + (32 - GEN8_EU_DIS0_S1_SHIFT)); 457 + eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) | 458 + ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) << 459 + (32 - GEN8_EU_DIS1_S2_SHIFT)); 460 + 461 + /* 462 + * Iterate through enabled slices and subslices to 463 + * count the total enabled EU. 464 + */ 465 + for (s = 0; s < sseu->max_slices; s++) { 466 + if (!(sseu->slice_mask & BIT(s))) 467 + /* skip disabled slice */ 468 + continue; 469 + 470 + intel_sseu_set_subslices(sseu, s, subslice_mask); 471 + 472 + for (ss = 0; ss < sseu->max_subslices; ss++) { 473 + u8 eu_disabled_mask; 474 + u32 n_disabled; 475 + 476 + if (!intel_sseu_has_subslice(sseu, s, ss)) 477 + /* skip disabled subslice */ 478 + continue; 479 + 480 + eu_disabled_mask = 481 + eu_disable[s] >> (ss * sseu->max_eus_per_subslice); 482 + 483 + sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); 484 + 485 + n_disabled = hweight8(eu_disabled_mask); 486 + 487 + /* 488 + * Record which subslices have 7 EUs. 489 + */ 490 + if (sseu->max_eus_per_subslice - n_disabled == 7) 491 + sseu->subslice_7eu[s] |= 1 << ss; 492 + } 493 + } 494 + 495 + sseu->eu_total = compute_eu_total(sseu); 496 + 497 + /* 498 + * BDW is expected to always have a uniform distribution of EU across 499 + * subslices with the exception that any one EU in any one subslice may 500 + * be fused off for die recovery. 501 + */ 502 + sseu->eu_per_subslice = 503 + intel_sseu_subslice_total(sseu) ? 504 + DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) : 505 + 0; 506 + 507 + /* 508 + * BDW supports slice power gating on devices with more than 509 + * one slice. 510 + */ 511 + sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; 512 + sseu->has_subslice_pg = 0; 513 + sseu->has_eu_pg = 0; 514 + } 515 + 516 + static void hsw_sseu_info_init(struct intel_gt *gt) 517 + { 518 + struct drm_i915_private *i915 = gt->i915; 519 + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; 520 + u32 fuse1; 521 + u8 subslice_mask = 0; 522 + int s, ss; 523 + 524 + /* 525 + * There isn't a register to tell us how many slices/subslices. We 526 + * work off the PCI-ids here. 527 + */ 528 + switch (INTEL_INFO(i915)->gt) { 529 + default: 530 + MISSING_CASE(INTEL_INFO(i915)->gt); 531 + fallthrough; 532 + case 1: 533 + sseu->slice_mask = BIT(0); 534 + subslice_mask = BIT(0); 535 + break; 536 + case 2: 537 + sseu->slice_mask = BIT(0); 538 + subslice_mask = BIT(0) | BIT(1); 539 + break; 540 + case 3: 541 + sseu->slice_mask = BIT(0) | BIT(1); 542 + subslice_mask = BIT(0) | BIT(1); 543 + break; 544 + } 545 + 546 + fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); 547 + switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { 548 + default: 549 + MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> 550 + HSW_F1_EU_DIS_SHIFT); 551 + fallthrough; 552 + case HSW_F1_EU_DIS_10EUS: 553 + sseu->eu_per_subslice = 10; 554 + break; 555 + case HSW_F1_EU_DIS_8EUS: 556 + sseu->eu_per_subslice = 8; 557 + break; 558 + case HSW_F1_EU_DIS_6EUS: 559 + sseu->eu_per_subslice = 6; 560 + break; 561 + } 562 + 563 + intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), 564 + hweight8(subslice_mask), 565 + sseu->eu_per_subslice); 566 + 567 + for (s = 0; s < sseu->max_slices; s++) { 568 + intel_sseu_set_subslices(sseu, s, subslice_mask); 569 + 570 + for (ss = 0; ss < sseu->max_subslices; ss++) { 571 + sseu_set_eus(sseu, s, ss, 572 + (1UL << sseu->eu_per_subslice) - 1); 573 + } 574 + } 575 + 576 + sseu->eu_total = compute_eu_total(sseu); 577 + 578 + /* No powergating for you. */ 579 + sseu->has_slice_pg = 0; 580 + sseu->has_subslice_pg = 0; 581 + sseu->has_eu_pg = 0; 582 + } 583 + 584 + void intel_sseu_info_init(struct intel_gt *gt) 585 + { 586 + struct drm_i915_private *i915 = gt->i915; 587 + 588 + if (IS_HASWELL(i915)) 589 + hsw_sseu_info_init(gt); 590 + else if (IS_CHERRYVIEW(i915)) 591 + cherryview_sseu_info_init(gt); 592 + else if (IS_BROADWELL(i915)) 593 + bdw_sseu_info_init(gt); 594 + else if (IS_GEN(i915, 9)) 595 + gen9_sseu_info_init(gt); 596 + else if (IS_GEN(i915, 10)) 597 + gen10_sseu_info_init(gt); 598 + else if (IS_GEN(i915, 11)) 599 + gen11_sseu_info_init(gt); 600 + else if (INTEL_GEN(i915) >= 12) 601 + gen12_sseu_info_init(gt); 602 + } 603 + 63 604 u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, 64 605 const struct intel_sseu *req_sseu) 65 606 { ··· 713 172 } 714 173 715 174 return rpcs; 175 + } 176 + 177 + void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) 178 + { 179 + int s; 180 + 181 + drm_printf(p, "slice total: %u, mask=%04x\n", 182 + hweight8(sseu->slice_mask), sseu->slice_mask); 183 + drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); 184 + for (s = 0; s < sseu->max_slices; s++) { 185 + drm_printf(p, "slice%d: %u subslices, mask=%08x\n", 186 + s, intel_sseu_subslices_per_slice(sseu, s), 187 + intel_sseu_get_subslices(sseu, s)); 188 + } 189 + drm_printf(p, "EU total: %u\n", sseu->eu_total); 190 + drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); 191 + drm_printf(p, "has slice power gating: %s\n", 192 + yesno(sseu->has_slice_pg)); 193 + drm_printf(p, "has subslice power gating: %s\n", 194 + yesno(sseu->has_subslice_pg)); 195 + drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); 196 + } 197 + 198 + void intel_sseu_print_topology(const struct sseu_dev_info *sseu, 199 + struct drm_printer *p) 200 + { 201 + int s, ss; 202 + 203 + if (sseu->max_slices == 0) { 204 + drm_printf(p, "Unavailable\n"); 205 + return; 206 + } 207 + 208 + for (s = 0; s < sseu->max_slices; s++) { 209 + drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", 210 + s, intel_sseu_subslices_per_slice(sseu, s), 211 + intel_sseu_get_subslices(sseu, s)); 212 + 213 + for (ss = 0; ss < sseu->max_subslices; ss++) { 214 + u16 enabled_eus = sseu_get_eus(sseu, s, ss); 215 + 216 + drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n", 217 + ss, hweight16(enabled_eus), enabled_eus); 218 + } 219 + } 716 220 }
+8
drivers/gpu/drm/i915/gt/intel_sseu.h
··· 13 13 #include "i915_gem.h" 14 14 15 15 struct drm_i915_private; 16 + struct intel_gt; 17 + struct drm_printer; 16 18 17 19 #define GEN_MAX_SLICES (6) /* CNL upper bound */ 18 20 #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ ··· 96 94 void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, 97 95 u32 ss_mask); 98 96 97 + void intel_sseu_info_init(struct intel_gt *gt); 98 + 99 99 u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, 100 100 const struct intel_sseu *req_sseu); 101 + 102 + void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p); 103 + void intel_sseu_print_topology(const struct sseu_dev_info *sseu, 104 + struct drm_printer *p); 101 105 102 106 #endif /* __INTEL_SSEU_H__ */
+1 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 1327 1327 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1328 1328 struct drm_printer p = drm_seq_file_printer(m); 1329 1329 1330 - intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); 1330 + intel_sseu_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); 1331 1331 1332 1332 return 0; 1333 1333 }
+1 -1
drivers/gpu/drm/i915/i915_gpu_error.c
··· 626 626 627 627 intel_device_info_print_static(&error->device_info, &p); 628 628 intel_device_info_print_runtime(&error->runtime_info, &p); 629 - intel_device_info_print_topology(&error->runtime_info.sseu, &p); 629 + intel_sseu_print_topology(&error->runtime_info.sseu, &p); 630 630 intel_gt_info_print(&error->gt->info, &p); 631 631 intel_driver_caps_print(&error->driver_caps, &p); 632 632 }
+2 -582
drivers/gpu/drm/i915/intel_device_info.c
··· 29 29 #include "display/intel_de.h" 30 30 #include "intel_device_info.h" 31 31 #include "i915_drv.h" 32 + #include "gt/intel_sseu.h" 32 33 33 34 #define PLATFORM_NAME(x) [INTEL_##x] = #x 34 35 static const char * const platform_names[] = { ··· 112 111 #undef PRINT_FLAG 113 112 } 114 113 115 - static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) 116 - { 117 - int s; 118 - 119 - drm_printf(p, "slice total: %u, mask=%04x\n", 120 - hweight8(sseu->slice_mask), sseu->slice_mask); 121 - drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); 122 - for (s = 0; s < sseu->max_slices; s++) { 123 - drm_printf(p, "slice%d: %u subslices, mask=%08x\n", 124 - s, intel_sseu_subslices_per_slice(sseu, s), 125 - intel_sseu_get_subslices(sseu, s)); 126 - } 127 - drm_printf(p, "EU total: %u\n", sseu->eu_total); 128 - drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); 129 - drm_printf(p, "has slice power gating: %s\n", 130 - yesno(sseu->has_slice_pg)); 131 - drm_printf(p, "has subslice power gating: %s\n", 132 - yesno(sseu->has_subslice_pg)); 133 - drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); 134 - } 135 - 136 114 void intel_device_info_print_runtime(const struct intel_runtime_info *info, 137 115 struct drm_printer *p) 138 116 { 139 - sseu_dump(&info->sseu, p); 117 + intel_sseu_dump(&info->sseu, p); 140 118 141 119 drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq); 142 120 drm_printf(p, "CS timestamp frequency: %u Hz\n", 143 121 info->cs_timestamp_frequency_hz); 144 - } 145 - 146 - static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, 147 - int subslice) 148 - { 149 - int slice_stride = sseu->max_subslices * sseu->eu_stride; 150 - 151 - return slice * slice_stride + subslice * sseu->eu_stride; 152 - } 153 - 154 - static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, 155 - int subslice) 156 - { 157 - int i, offset = sseu_eu_idx(sseu, slice, subslice); 158 - u16 eu_mask = 0; 159 - 160 - for (i = 0; i < sseu->eu_stride; i++) { 161 - eu_mask |= ((u16)sseu->eu_mask[offset + i]) << 162 - (i * BITS_PER_BYTE); 163 - } 164 - 165 - return eu_mask; 166 - } 167 - 168 - static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, 169 - u16 eu_mask) 170 - { 171 - int i, offset = sseu_eu_idx(sseu, slice, subslice); 172 - 173 - for (i = 0; i < sseu->eu_stride; i++) { 174 - sseu->eu_mask[offset + i] = 175 - (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; 176 - } 177 - } 178 - 179 - void intel_device_info_print_topology(const struct sseu_dev_info *sseu, 180 - struct drm_printer *p) 181 - { 182 - int s, ss; 183 - 184 - if (sseu->max_slices == 0) { 185 - drm_printf(p, "Unavailable\n"); 186 - return; 187 - } 188 - 189 - for (s = 0; s < sseu->max_slices; s++) { 190 - drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", 191 - s, intel_sseu_subslices_per_slice(sseu, s), 192 - intel_sseu_get_subslices(sseu, s)); 193 - 194 - for (ss = 0; ss < sseu->max_subslices; ss++) { 195 - u16 enabled_eus = sseu_get_eus(sseu, s, ss); 196 - 197 - drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n", 198 - ss, hweight16(enabled_eus), enabled_eus); 199 - } 200 - } 201 - } 202 - 203 - static u16 compute_eu_total(const struct sseu_dev_info *sseu) 204 - { 205 - u16 i, total = 0; 206 - 207 - for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++) 208 - total += hweight8(sseu->eu_mask[i]); 209 - 210 - return total; 211 - } 212 - 213 - static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, 214 - u8 s_en, u32 ss_en, u16 eu_en) 215 - { 216 - int s, ss; 217 - 218 - /* ss_en represents entire subslice mask across all slices */ 219 - GEM_BUG_ON(sseu->max_slices * sseu->max_subslices > 220 - sizeof(ss_en) * BITS_PER_BYTE); 221 - 222 - for (s = 0; s < sseu->max_slices; s++) { 223 - if ((s_en & BIT(s)) == 0) 224 - continue; 225 - 226 - sseu->slice_mask |= BIT(s); 227 - 228 - intel_sseu_set_subslices(sseu, s, ss_en); 229 - 230 - for (ss = 0; ss < sseu->max_subslices; ss++) 231 - if (intel_sseu_has_subslice(sseu, s, ss)) 232 - sseu_set_eus(sseu, s, ss, eu_en); 233 - } 234 - sseu->eu_per_subslice = hweight16(eu_en); 235 - sseu->eu_total = compute_eu_total(sseu); 236 - } 237 - 238 - static void gen12_sseu_info_init(struct drm_i915_private *dev_priv) 239 - { 240 - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 241 - struct intel_uncore *uncore = &dev_priv->uncore; 242 - u8 s_en; 243 - u32 dss_en; 244 - u16 eu_en = 0; 245 - u8 eu_en_fuse; 246 - int eu; 247 - 248 - /* 249 - * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS. 250 - * Instead of splitting these, provide userspace with an array 251 - * of DSS to more closely represent the hardware resource. 252 - */ 253 - intel_sseu_set_info(sseu, 1, 6, 16); 254 - 255 - s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & 256 - GEN11_GT_S_ENA_MASK; 257 - 258 - dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE); 259 - 260 - /* one bit per pair of EUs */ 261 - eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & 262 - GEN11_EU_DIS_MASK); 263 - for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++) 264 - if (eu_en_fuse & BIT(eu)) 265 - eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); 266 - 267 - gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en); 268 - 269 - /* TGL only supports slice-level power gating */ 270 - sseu->has_slice_pg = 1; 271 - } 272 - 273 - static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) 274 - { 275 - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 276 - struct intel_uncore *uncore = &dev_priv->uncore; 277 - u8 s_en; 278 - u32 ss_en; 279 - u8 eu_en; 280 - 281 - if (IS_ELKHARTLAKE(dev_priv)) 282 - intel_sseu_set_info(sseu, 1, 4, 8); 283 - else 284 - intel_sseu_set_info(sseu, 1, 8, 8); 285 - 286 - s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & 287 - GEN11_GT_S_ENA_MASK; 288 - ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE); 289 - 290 - eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & 291 - GEN11_EU_DIS_MASK); 292 - 293 - gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); 294 - 295 - /* ICL has no power gating restrictions. */ 296 - sseu->has_slice_pg = 1; 297 - sseu->has_subslice_pg = 1; 298 - sseu->has_eu_pg = 1; 299 - } 300 - 301 - static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) 302 - { 303 - struct intel_uncore *uncore = &dev_priv->uncore; 304 - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 305 - const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); 306 - int s, ss; 307 - const int eu_mask = 0xff; 308 - u32 subslice_mask, eu_en; 309 - 310 - intel_sseu_set_info(sseu, 6, 4, 8); 311 - 312 - sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> 313 - GEN10_F2_S_ENA_SHIFT; 314 - 315 - /* Slice0 */ 316 - eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0); 317 - for (ss = 0; ss < sseu->max_subslices; ss++) 318 - sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); 319 - /* Slice1 */ 320 - sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); 321 - eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1); 322 - sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); 323 - /* Slice2 */ 324 - sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); 325 - sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); 326 - /* Slice3 */ 327 - sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); 328 - eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2); 329 - sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); 330 - /* Slice4 */ 331 - sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); 332 - sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); 333 - /* Slice5 */ 334 - sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); 335 - eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3); 336 - sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); 337 - 338 - subslice_mask = (1 << 4) - 1; 339 - subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> 340 - GEN10_F2_SS_DIS_SHIFT); 341 - 342 - for (s = 0; s < sseu->max_slices; s++) { 343 - u32 subslice_mask_with_eus = subslice_mask; 344 - 345 - for (ss = 0; ss < sseu->max_subslices; ss++) { 346 - if (sseu_get_eus(sseu, s, ss) == 0) 347 - subslice_mask_with_eus &= ~BIT(ss); 348 - } 349 - 350 - /* 351 - * Slice0 can have up to 3 subslices, but there are only 2 in 352 - * slice1/2. 353 - */ 354 - intel_sseu_set_subslices(sseu, s, s == 0 ? 355 - subslice_mask_with_eus : 356 - subslice_mask_with_eus & 0x3); 357 - } 358 - 359 - sseu->eu_total = compute_eu_total(sseu); 360 - 361 - /* 362 - * CNL is expected to always have a uniform distribution 363 - * of EU across subslices with the exception that any one 364 - * EU in any one subslice may be fused off for die 365 - * recovery. 366 - */ 367 - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? 368 - DIV_ROUND_UP(sseu->eu_total, 369 - intel_sseu_subslice_total(sseu)) : 370 - 0; 371 - 372 - /* No restrictions on Power Gating */ 373 - sseu->has_slice_pg = 1; 374 - sseu->has_subslice_pg = 1; 375 - sseu->has_eu_pg = 1; 376 - } 377 - 378 - static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) 379 - { 380 - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 381 - u32 fuse; 382 - u8 subslice_mask = 0; 383 - 384 - fuse = intel_uncore_read(&dev_priv->uncore, CHV_FUSE_GT); 385 - 386 - sseu->slice_mask = BIT(0); 387 - intel_sseu_set_info(sseu, 1, 2, 8); 388 - 389 - if (!(fuse & CHV_FGT_DISABLE_SS0)) { 390 - u8 disabled_mask = 391 - ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >> 392 - CHV_FGT_EU_DIS_SS0_R0_SHIFT) | 393 - (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> 394 - CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); 395 - 396 - subslice_mask |= BIT(0); 397 - sseu_set_eus(sseu, 0, 0, ~disabled_mask); 398 - } 399 - 400 - if (!(fuse & CHV_FGT_DISABLE_SS1)) { 401 - u8 disabled_mask = 402 - ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >> 403 - CHV_FGT_EU_DIS_SS1_R0_SHIFT) | 404 - (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> 405 - CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); 406 - 407 - subslice_mask |= BIT(1); 408 - sseu_set_eus(sseu, 0, 1, ~disabled_mask); 409 - } 410 - 411 - intel_sseu_set_subslices(sseu, 0, subslice_mask); 412 - 413 - sseu->eu_total = compute_eu_total(sseu); 414 - 415 - /* 416 - * CHV expected to always have a uniform distribution of EU 417 - * across subslices. 418 - */ 419 - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? 420 - sseu->eu_total / 421 - intel_sseu_subslice_total(sseu) : 422 - 0; 423 - /* 424 - * CHV supports subslice power gating on devices with more than 425 - * one subslice, and supports EU power gating on devices with 426 - * more than one EU pair per subslice. 427 - */ 428 - sseu->has_slice_pg = 0; 429 - sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1; 430 - sseu->has_eu_pg = (sseu->eu_per_subslice > 2); 431 - } 432 - 433 - static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) 434 - { 435 - struct intel_device_info *info = mkwrite_device_info(dev_priv); 436 - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 437 - struct intel_uncore *uncore = &dev_priv->uncore; 438 - int s, ss; 439 - u32 fuse2, eu_disable, subslice_mask; 440 - const u8 eu_mask = 0xff; 441 - 442 - fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); 443 - sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 444 - 445 - /* BXT has a single slice and at most 3 subslices. */ 446 - intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, 447 - IS_GEN9_LP(dev_priv) ? 3 : 4, 8); 448 - 449 - /* 450 - * The subslice disable field is global, i.e. it applies 451 - * to each of the enabled slices. 452 - */ 453 - subslice_mask = (1 << sseu->max_subslices) - 1; 454 - subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> 455 - GEN9_F2_SS_DIS_SHIFT); 456 - 457 - /* 458 - * Iterate through enabled slices and subslices to 459 - * count the total enabled EU. 460 - */ 461 - for (s = 0; s < sseu->max_slices; s++) { 462 - if (!(sseu->slice_mask & BIT(s))) 463 - /* skip disabled slice */ 464 - continue; 465 - 466 - intel_sseu_set_subslices(sseu, s, subslice_mask); 467 - 468 - eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s)); 469 - for (ss = 0; ss < sseu->max_subslices; ss++) { 470 - int eu_per_ss; 471 - u8 eu_disabled_mask; 472 - 473 - if (!intel_sseu_has_subslice(sseu, s, ss)) 474 - /* skip disabled subslice */ 475 - continue; 476 - 477 - eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask; 478 - 479 - sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); 480 - 481 - eu_per_ss = sseu->max_eus_per_subslice - 482 - hweight8(eu_disabled_mask); 483 - 484 - /* 485 - * Record which subslice(s) has(have) 7 EUs. we 486 - * can tune the hash used to spread work among 487 - * subslices if they are unbalanced. 488 - */ 489 - if (eu_per_ss == 7) 490 - sseu->subslice_7eu[s] |= BIT(ss); 491 - } 492 - } 493 - 494 - sseu->eu_total = compute_eu_total(sseu); 495 - 496 - /* 497 - * SKL is expected to always have a uniform distribution 498 - * of EU across subslices with the exception that any one 499 - * EU in any one subslice may be fused off for die 500 - * recovery. BXT is expected to be perfectly uniform in EU 501 - * distribution. 502 - */ 503 - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? 504 - DIV_ROUND_UP(sseu->eu_total, 505 - intel_sseu_subslice_total(sseu)) : 506 - 0; 507 - /* 508 - * SKL+ supports slice power gating on devices with more than 509 - * one slice, and supports EU power gating on devices with 510 - * more than one EU pair per subslice. BXT+ supports subslice 511 - * power gating on devices with more than one subslice, and 512 - * supports EU power gating on devices with more than one EU 513 - * pair per subslice. 514 - */ 515 - sseu->has_slice_pg = 516 - !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1; 517 - sseu->has_subslice_pg = 518 - IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1; 519 - sseu->has_eu_pg = sseu->eu_per_subslice > 2; 520 - 521 - if (IS_GEN9_LP(dev_priv)) { 522 - #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss))) 523 - info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3; 524 - 525 - sseu->min_eu_in_pool = 0; 526 - if (info->has_pooled_eu) { 527 - if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) 528 - sseu->min_eu_in_pool = 3; 529 - else if (IS_SS_DISABLED(1)) 530 - sseu->min_eu_in_pool = 6; 531 - else 532 - sseu->min_eu_in_pool = 9; 533 - } 534 - #undef IS_SS_DISABLED 535 - } 536 - } 537 - 538 - static void bdw_sseu_info_init(struct drm_i915_private *dev_priv) 539 - { 540 - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 541 - struct intel_uncore *uncore = &dev_priv->uncore; 542 - int s, ss; 543 - u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ 544 - u32 eu_disable0, eu_disable1, eu_disable2; 545 - 546 - fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); 547 - sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 548 - intel_sseu_set_info(sseu, 3, 3, 8); 549 - 550 - /* 551 - * The subslice disable field is global, i.e. it applies 552 - * to each of the enabled slices. 553 - */ 554 - subslice_mask = GENMASK(sseu->max_subslices - 1, 0); 555 - subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> 556 - GEN8_F2_SS_DIS_SHIFT); 557 - eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0); 558 - eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1); 559 - eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2); 560 - eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK; 561 - eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) | 562 - ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) << 563 - (32 - GEN8_EU_DIS0_S1_SHIFT)); 564 - eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) | 565 - ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) << 566 - (32 - GEN8_EU_DIS1_S2_SHIFT)); 567 - 568 - /* 569 - * Iterate through enabled slices and subslices to 570 - * count the total enabled EU. 571 - */ 572 - for (s = 0; s < sseu->max_slices; s++) { 573 - if (!(sseu->slice_mask & BIT(s))) 574 - /* skip disabled slice */ 575 - continue; 576 - 577 - intel_sseu_set_subslices(sseu, s, subslice_mask); 578 - 579 - for (ss = 0; ss < sseu->max_subslices; ss++) { 580 - u8 eu_disabled_mask; 581 - u32 n_disabled; 582 - 583 - if (!intel_sseu_has_subslice(sseu, s, ss)) 584 - /* skip disabled subslice */ 585 - continue; 586 - 587 - eu_disabled_mask = 588 - eu_disable[s] >> (ss * sseu->max_eus_per_subslice); 589 - 590 - sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); 591 - 592 - n_disabled = hweight8(eu_disabled_mask); 593 - 594 - /* 595 - * Record which subslices have 7 EUs. 596 - */ 597 - if (sseu->max_eus_per_subslice - n_disabled == 7) 598 - sseu->subslice_7eu[s] |= 1 << ss; 599 - } 600 - } 601 - 602 - sseu->eu_total = compute_eu_total(sseu); 603 - 604 - /* 605 - * BDW is expected to always have a uniform distribution of EU across 606 - * subslices with the exception that any one EU in any one subslice may 607 - * be fused off for die recovery. 608 - */ 609 - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? 610 - DIV_ROUND_UP(sseu->eu_total, 611 - intel_sseu_subslice_total(sseu)) : 612 - 0; 613 - 614 - /* 615 - * BDW supports slice power gating on devices with more than 616 - * one slice. 617 - */ 618 - sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; 619 - sseu->has_subslice_pg = 0; 620 - sseu->has_eu_pg = 0; 621 - } 622 - 623 - static void hsw_sseu_info_init(struct drm_i915_private *dev_priv) 624 - { 625 - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 626 - u32 fuse1; 627 - u8 subslice_mask = 0; 628 - int s, ss; 629 - 630 - /* 631 - * There isn't a register to tell us how many slices/subslices. We 632 - * work off the PCI-ids here. 633 - */ 634 - switch (INTEL_INFO(dev_priv)->gt) { 635 - default: 636 - MISSING_CASE(INTEL_INFO(dev_priv)->gt); 637 - /* fall through */ 638 - case 1: 639 - sseu->slice_mask = BIT(0); 640 - subslice_mask = BIT(0); 641 - break; 642 - case 2: 643 - sseu->slice_mask = BIT(0); 644 - subslice_mask = BIT(0) | BIT(1); 645 - break; 646 - case 3: 647 - sseu->slice_mask = BIT(0) | BIT(1); 648 - subslice_mask = BIT(0) | BIT(1); 649 - break; 650 - } 651 - 652 - fuse1 = intel_uncore_read(&dev_priv->uncore, HSW_PAVP_FUSE1); 653 - switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { 654 - default: 655 - MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> 656 - HSW_F1_EU_DIS_SHIFT); 657 - /* fall through */ 658 - case HSW_F1_EU_DIS_10EUS: 659 - sseu->eu_per_subslice = 10; 660 - break; 661 - case HSW_F1_EU_DIS_8EUS: 662 - sseu->eu_per_subslice = 8; 663 - break; 664 - case HSW_F1_EU_DIS_6EUS: 665 - sseu->eu_per_subslice = 6; 666 - break; 667 - } 668 - 669 - intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), 670 - hweight8(subslice_mask), 671 - sseu->eu_per_subslice); 672 - 673 - for (s = 0; s < sseu->max_slices; s++) { 674 - intel_sseu_set_subslices(sseu, s, subslice_mask); 675 - 676 - for (ss = 0; ss < sseu->max_subslices; ss++) { 677 - sseu_set_eus(sseu, s, ss, 678 - (1UL << sseu->eu_per_subslice) - 1); 679 - } 680 - } 681 - 682 - sseu->eu_total = compute_eu_total(sseu); 683 - 684 - /* No powergating for you. */ 685 - sseu->has_slice_pg = 0; 686 - sseu->has_subslice_pg = 0; 687 - sseu->has_eu_pg = 0; 688 122 } 689 123 690 124 static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv) ··· 477 1041 (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE)) 478 1042 info->display.has_dsc = 0; 479 1043 } 480 - 481 - /* Initialize slice/subslice/EU info */ 482 - if (IS_HASWELL(dev_priv)) 483 - hsw_sseu_info_init(dev_priv); 484 - else if (IS_CHERRYVIEW(dev_priv)) 485 - cherryview_sseu_info_init(dev_priv); 486 - else if (IS_BROADWELL(dev_priv)) 487 - bdw_sseu_info_init(dev_priv); 488 - else if (IS_GEN(dev_priv, 9)) 489 - gen9_sseu_info_init(dev_priv); 490 - else if (IS_GEN(dev_priv, 10)) 491 - gen10_sseu_info_init(dev_priv); 492 - else if (IS_GEN(dev_priv, 11)) 493 - gen11_sseu_info_init(dev_priv); 494 - else if (INTEL_GEN(dev_priv) >= 12) 495 - gen12_sseu_info_init(dev_priv); 496 1044 497 1045 if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { 498 1046 drm_info(&dev_priv->drm,
-2
drivers/gpu/drm/i915/intel_device_info.h
··· 242 242 struct drm_printer *p); 243 243 void intel_device_info_print_runtime(const struct intel_runtime_info *info, 244 244 struct drm_printer *p); 245 - void intel_device_info_print_topology(const struct sseu_dev_info *sseu, 246 - struct drm_printer *p); 247 245 248 246 void intel_driver_caps_print(const struct intel_driver_caps *caps, 249 247 struct drm_printer *p);