Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/wm: convert skl_watermarks.c internally to struct intel_display

Going forward, struct intel_display is the main display device data
pointer. Convert as much as possible of skl_watermarks.c to struct
intel_display.

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Link: https://lore.kernel.org/r/61ae2013c5db962e90e072be7d37d630cb7dfc34.1744119460.git.jani.nikula@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>

+250 -286
+250 -286
drivers/gpu/drm/i915/display/skl_watermark.c
··· 35 35 */ 36 36 #define DSB_EXE_TIME 100 37 37 38 - static void skl_sagv_disable(struct drm_i915_private *i915); 38 + static void skl_sagv_disable(struct intel_display *display); 39 39 40 40 /* Stores plane specific WM parameters */ 41 41 struct skl_wm_params { ··· 70 70 * FIXME: We still don't have the proper code detect if we need to apply the WA, 71 71 * so assume we'll always need it in order to avoid underruns. 72 72 */ 73 - static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) 73 + static bool skl_needs_memory_bw_wa(struct intel_display *display) 74 74 { 75 - return DISPLAY_VER(i915) == 9; 75 + return DISPLAY_VER(display) == 9; 76 76 } 77 77 78 78 bool ··· 82 82 } 83 83 84 84 static u32 85 - intel_sagv_block_time(struct drm_i915_private *i915) 85 + intel_sagv_block_time(struct intel_display *display) 86 86 { 87 - struct intel_display *display = &i915->display; 87 + struct drm_i915_private *i915 = to_i915(display->drm); 88 88 89 89 if (DISPLAY_VER(display) >= 14) { 90 90 u32 val; ··· 114 114 } 115 115 } 116 116 117 - static void intel_sagv_init(struct drm_i915_private *i915) 117 + static void intel_sagv_init(struct intel_display *display) 118 118 { 119 - struct intel_display *display = &i915->display; 120 - 121 119 if (!HAS_SAGV(display)) 122 120 display->sagv.status = I915_SAGV_NOT_CONTROLLED; 123 121 ··· 124 126 * For icl+ this was already determined by intel_bw_init_hw(). 125 127 */ 126 128 if (DISPLAY_VER(display) < 11) 127 - skl_sagv_disable(i915); 129 + skl_sagv_disable(display); 128 130 129 131 drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN); 130 132 131 - display->sagv.block_time_us = intel_sagv_block_time(i915); 133 + display->sagv.block_time_us = intel_sagv_block_time(display); 132 134 133 135 drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n", 134 136 str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us); ··· 154 156 * - All planes can enable watermarks for latencies >= SAGV engine block time 155 157 * - We're not using an interlaced display configuration 156 158 */ 157 - static void skl_sagv_enable(struct drm_i915_private *i915) 159 + static void skl_sagv_enable(struct intel_display *display) 158 160 { 159 - struct intel_display *display = &i915->display; 161 + struct drm_i915_private *i915 = to_i915(display->drm); 160 162 int ret; 161 163 162 164 if (!intel_has_sagv(display)) 163 165 return; 164 166 165 - if (i915->display.sagv.status == I915_SAGV_ENABLED) 167 + if (display->sagv.status == I915_SAGV_ENABLED) 166 168 return; 167 169 168 - drm_dbg_kms(&i915->drm, "Enabling SAGV\n"); 170 + drm_dbg_kms(display->drm, "Enabling SAGV\n"); 169 171 ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, 170 172 GEN9_SAGV_ENABLE); 171 173 ··· 175 177 * Some skl systems, pre-release machines in particular, 176 178 * don't actually have SAGV. 177 179 */ 178 - if (IS_SKYLAKE(i915) && ret == -ENXIO) { 179 - drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); 180 - i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; 180 + if (display->platform.skylake && ret == -ENXIO) { 181 + drm_dbg(display->drm, "No SAGV found on system, ignoring\n"); 182 + display->sagv.status = I915_SAGV_NOT_CONTROLLED; 181 183 return; 182 184 } else if (ret < 0) { 183 - drm_err(&i915->drm, "Failed to enable SAGV\n"); 185 + drm_err(display->drm, "Failed to enable SAGV\n"); 184 186 return; 185 187 } 186 188 187 - i915->display.sagv.status = I915_SAGV_ENABLED; 189 + display->sagv.status = I915_SAGV_ENABLED; 188 190 } 189 191 190 - static void skl_sagv_disable(struct drm_i915_private *i915) 192 + static void skl_sagv_disable(struct intel_display *display) 191 193 { 192 - struct intel_display *display = &i915->display; 194 + struct drm_i915_private *i915 = to_i915(display->drm); 193 195 int ret; 194 196 195 197 if (!intel_has_sagv(display)) 196 198 return; 197 199 198 - if (i915->display.sagv.status == I915_SAGV_DISABLED) 200 + if (display->sagv.status == I915_SAGV_DISABLED) 199 201 return; 200 202 201 - drm_dbg_kms(&i915->drm, "Disabling SAGV\n"); 203 + drm_dbg_kms(display->drm, "Disabling SAGV\n"); 202 204 /* bspec says to keep retrying for at least 1 ms */ 203 205 ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, 204 206 GEN9_SAGV_DISABLE, ··· 208 210 * Some skl systems, pre-release machines in particular, 209 211 * don't actually have SAGV. 210 212 */ 211 - if (IS_SKYLAKE(i915) && ret == -ENXIO) { 212 - drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); 213 - i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; 213 + if (display->platform.skylake && ret == -ENXIO) { 214 + drm_dbg(display->drm, "No SAGV found on system, ignoring\n"); 215 + display->sagv.status = I915_SAGV_NOT_CONTROLLED; 214 216 return; 215 217 } else if (ret < 0) { 216 - drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret); 218 + drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret); 217 219 return; 218 220 } 219 221 220 - i915->display.sagv.status = I915_SAGV_DISABLED; 222 + display->sagv.status = I915_SAGV_DISABLED; 221 223 } 222 224 223 225 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) 224 226 { 225 227 struct intel_display *display = to_intel_display(state); 226 - struct drm_i915_private *i915 = to_i915(state->base.dev); 227 228 const struct intel_bw_state *new_bw_state = 228 229 intel_atomic_get_new_bw_state(state); 229 230 ··· 230 233 return; 231 234 232 235 if (!intel_can_enable_sagv(display, new_bw_state)) 233 - skl_sagv_disable(i915); 236 + skl_sagv_disable(display); 234 237 } 235 238 236 239 static void skl_sagv_post_plane_update(struct intel_atomic_state *state) 237 240 { 238 241 struct intel_display *display = to_intel_display(state); 239 - struct drm_i915_private *i915 = to_i915(state->base.dev); 240 242 const struct intel_bw_state *new_bw_state = 241 243 intel_atomic_get_new_bw_state(state); 242 244 ··· 243 247 return; 244 248 245 249 if (intel_can_enable_sagv(display, new_bw_state)) 246 - skl_sagv_enable(i915); 250 + skl_sagv_enable(display); 247 251 } 248 252 249 253 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) 250 254 { 251 255 struct intel_display *display = to_intel_display(state); 252 - struct drm_i915_private *i915 = to_i915(display->drm); 253 256 const struct intel_bw_state *old_bw_state = 254 257 intel_atomic_get_old_bw_state(state); 255 258 const struct intel_bw_state *new_bw_state = ··· 266 271 267 272 WARN_ON(!new_bw_state->base.changed); 268 273 269 - drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n", 274 + drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n", 270 275 old_mask, new_mask); 271 276 272 277 /* ··· 281 286 static void icl_sagv_post_plane_update(struct intel_atomic_state *state) 282 287 { 283 288 struct intel_display *display = to_intel_display(state); 284 - struct drm_i915_private *i915 = to_i915(display->drm); 285 289 const struct intel_bw_state *old_bw_state = 286 290 intel_atomic_get_old_bw_state(state); 287 291 const struct intel_bw_state *new_bw_state = ··· 298 304 299 305 WARN_ON(!new_bw_state->base.changed); 300 306 301 - drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", 307 + drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", 302 308 old_mask, new_mask); 303 309 304 310 /* ··· 313 319 void intel_sagv_pre_plane_update(struct intel_atomic_state *state) 314 320 { 315 321 struct intel_display *display = to_intel_display(state); 316 - struct drm_i915_private *i915 = to_i915(state->base.dev); 317 322 318 323 /* 319 324 * Just return if we can't control SAGV or don't have it. ··· 324 331 if (!intel_has_sagv(display)) 325 332 return; 326 333 327 - if (DISPLAY_VER(i915) >= 11) 334 + if (DISPLAY_VER(display) >= 11) 328 335 icl_sagv_pre_plane_update(state); 329 336 else 330 337 skl_sagv_pre_plane_update(state); ··· 333 340 void intel_sagv_post_plane_update(struct intel_atomic_state *state) 334 341 { 335 342 struct intel_display *display = to_intel_display(state); 336 - struct drm_i915_private *i915 = to_i915(state->base.dev); 337 343 338 344 /* 339 345 * Just return if we can't control SAGV or don't have it. ··· 344 352 if (!intel_has_sagv(display)) 345 353 return; 346 354 347 - if (DISPLAY_VER(i915) >= 11) 355 + if (DISPLAY_VER(display) >= 11) 348 356 icl_sagv_post_plane_update(state); 349 357 else 350 358 skl_sagv_post_plane_update(state); ··· 354 362 { 355 363 struct intel_display *display = to_intel_display(crtc_state); 356 364 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 357 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 358 365 enum plane_id plane_id; 359 366 int max_level = INT_MAX; 360 367 ··· 376 385 continue; 377 386 378 387 /* Find the highest enabled wm level for this plane */ 379 - for (level = i915->display.wm.num_levels - 1; 388 + for (level = display->wm.num_levels - 1; 380 389 !wm->wm[level].enable; --level) 381 390 { } 382 391 ··· 424 433 425 434 bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) 426 435 { 427 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 428 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 436 + struct intel_display *display = to_intel_display(crtc_state); 429 437 430 - if (!i915->display.params.enable_sagv) 438 + if (!display->params.enable_sagv) 431 439 return false; 432 440 433 441 /* ··· 437 447 if (crtc_state->inherited) 438 448 return false; 439 449 440 - if (DISPLAY_VER(i915) >= 12) 450 + if (DISPLAY_VER(display) >= 12) 441 451 return tgl_crtc_can_enable_sagv(crtc_state); 442 452 else 443 453 return skl_crtc_can_enable_sagv(crtc_state); ··· 462 472 return end; 463 473 } 464 474 465 - static int intel_dbuf_slice_size(struct drm_i915_private *i915) 475 + static int intel_dbuf_slice_size(struct intel_display *display) 466 476 { 467 - return DISPLAY_INFO(i915)->dbuf.size / 468 - hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask); 477 + return DISPLAY_INFO(display)->dbuf.size / 478 + hweight8(DISPLAY_INFO(display)->dbuf.slice_mask); 469 479 } 470 480 471 481 static void 472 - skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask, 482 + skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask, 473 483 struct skl_ddb_entry *ddb) 474 484 { 475 - int slice_size = intel_dbuf_slice_size(i915); 485 + int slice_size = intel_dbuf_slice_size(display); 476 486 477 487 if (!slice_mask) { 478 488 ddb->start = 0; ··· 484 494 ddb->end = fls(slice_mask) * slice_size; 485 495 486 496 WARN_ON(ddb->start >= ddb->end); 487 - WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size); 497 + WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size); 488 498 } 489 499 490 - static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) 500 + static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask) 491 501 { 492 502 struct skl_ddb_entry ddb; 493 503 ··· 496 506 else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4))) 497 507 slice_mask = BIT(DBUF_S3); 498 508 499 - skl_ddb_entry_for_slices(i915, slice_mask, &ddb); 509 + skl_ddb_entry_for_slices(display, slice_mask, &ddb); 500 510 501 511 return ddb.start; 502 512 } ··· 504 514 u32 skl_ddb_dbuf_slice_mask(struct intel_display *display, 505 515 const struct skl_ddb_entry *entry) 506 516 { 507 - struct drm_i915_private *i915 = to_i915(display->drm); 508 - int slice_size = intel_dbuf_slice_size(i915); 517 + int slice_size = intel_dbuf_slice_size(display); 509 518 enum dbuf_slice start_slice, end_slice; 510 519 u8 slice_mask = 0; 511 520 ··· 550 561 unsigned int *weight_end, 551 562 unsigned int *weight_total) 552 563 { 553 - struct drm_i915_private *i915 = 554 - to_i915(dbuf_state->base.state->base.dev); 564 + struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev); 555 565 enum pipe pipe; 556 566 557 567 *weight_start = 0; 558 568 *weight_end = 0; 559 569 *weight_total = 0; 560 570 561 - for_each_pipe(i915, pipe) { 571 + for_each_pipe(display, pipe) { 562 572 int weight = dbuf_state->weight[pipe]; 563 573 564 574 /* ··· 583 595 static int 584 596 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) 585 597 { 586 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 598 + struct intel_display *display = to_intel_display(crtc); 587 599 unsigned int weight_total, weight_start, weight_end; 588 600 const struct intel_dbuf_state *old_dbuf_state = 589 601 intel_atomic_get_old_dbuf_state(state); ··· 605 617 606 618 dbuf_slice_mask = new_dbuf_state->slices[pipe]; 607 619 608 - skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices); 609 - mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask); 620 + skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices); 621 + mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask); 610 622 ddb_range_size = skl_ddb_entry_size(&ddb_slices); 611 623 612 624 intel_crtc_dbuf_weights(new_dbuf_state, pipe, ··· 640 652 crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; 641 653 crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; 642 654 643 - drm_dbg_kms(&i915->drm, 655 + drm_dbg_kms(display->drm, 644 656 "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n", 645 657 crtc->base.base.id, crtc->base.name, 646 658 old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], ··· 665 677 const struct skl_wm_level *result_prev, 666 678 struct skl_wm_level *result /* out */); 667 679 668 - static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level, 680 + static unsigned int skl_wm_latency(struct intel_display *display, int level, 669 681 const struct skl_wm_params *wp) 670 682 { 671 - struct intel_display *display = &i915->display; 672 - unsigned int latency = i915->display.wm.skl_latency[level]; 683 + unsigned int latency = display->wm.skl_latency[level]; 673 684 674 685 if (latency == 0) 675 686 return 0; ··· 677 690 * WaIncreaseLatencyIPCEnabled: kbl,cfl 678 691 * Display WA #1141: kbl,cfl 679 692 */ 680 - if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && 681 - skl_watermark_ipc_enabled(display)) 693 + if ((display->platform.kabylake || display->platform.coffeelake || 694 + display->platform.cometlake) && skl_watermark_ipc_enabled(display)) 682 695 latency += 4; 683 696 684 - if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled) 697 + if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled) 685 698 latency += 15; 686 699 687 700 return latency; ··· 691 704 skl_cursor_allocation(const struct intel_crtc_state *crtc_state, 692 705 int num_active) 693 706 { 707 + struct intel_display *display = to_intel_display(crtc_state); 694 708 struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); 695 - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 696 709 struct skl_wm_level wm = {}; 697 710 int ret, min_ddb_alloc = 0; 698 711 struct skl_wm_params wp; ··· 703 716 DRM_FORMAT_MOD_LINEAR, 704 717 DRM_MODE_ROTATE_0, 705 718 crtc_state->pixel_rate, &wp, 0, 0); 706 - drm_WARN_ON(&i915->drm, ret); 719 + drm_WARN_ON(display->drm, ret); 707 720 708 - for (level = 0; level < i915->display.wm.num_levels; level++) { 709 - unsigned int latency = skl_wm_latency(i915, level, &wp); 721 + for (level = 0; level < display->wm.num_levels; level++) { 722 + unsigned int latency = skl_wm_latency(display, level, &wp); 710 723 711 724 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); 712 725 if (wm.min_ddb_alloc == U16_MAX) ··· 728 741 } 729 742 730 743 static void 731 - skl_ddb_get_hw_plane_state(struct drm_i915_private *i915, 744 + skl_ddb_get_hw_plane_state(struct intel_display *display, 732 745 const enum pipe pipe, 733 746 const enum plane_id plane_id, 734 747 struct skl_ddb_entry *ddb, 735 748 struct skl_ddb_entry *ddb_y, 736 749 u16 *min_ddb, u16 *interim_ddb) 737 750 { 738 - struct intel_display *display = &i915->display; 739 751 u32 val; 740 752 741 753 /* Cursor doesn't support NV12/planar, so no extra calculation needed */ ··· 767 781 u16 *min_ddb, u16 *interim_ddb) 768 782 { 769 783 struct intel_display *display = to_intel_display(crtc); 770 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 771 784 enum intel_display_power_domain power_domain; 772 785 enum pipe pipe = crtc->pipe; 773 786 intel_wakeref_t wakeref; ··· 778 793 return; 779 794 780 795 for_each_plane_id_on_crtc(crtc, plane_id) 781 - skl_ddb_get_hw_plane_state(i915, pipe, 796 + skl_ddb_get_hw_plane_state(display, pipe, 782 797 plane_id, 783 798 &ddb[plane_id], 784 799 &ddb_y[plane_id], ··· 1296 1311 1297 1312 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) 1298 1313 { 1299 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1314 + struct intel_display *display = to_intel_display(crtc); 1300 1315 enum pipe pipe = crtc->pipe; 1301 1316 1302 - if (IS_DG2(i915)) 1317 + if (display->platform.dg2) 1303 1318 return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); 1304 - else if (DISPLAY_VER(i915) >= 13) 1319 + else if (DISPLAY_VER(display) >= 13) 1305 1320 return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); 1306 - else if (DISPLAY_VER(i915) == 12) 1321 + else if (DISPLAY_VER(display) == 12) 1307 1322 return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); 1308 - else if (DISPLAY_VER(i915) == 11) 1323 + else if (DISPLAY_VER(display) == 11) 1309 1324 return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); 1310 1325 /* 1311 1326 * For anything else just return one slice yet. ··· 1345 1360 static u64 1346 1361 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) 1347 1362 { 1363 + struct intel_display *display = to_intel_display(crtc_state); 1348 1364 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1349 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1350 1365 enum plane_id plane_id; 1351 1366 u64 data_rate = 0; 1352 1367 ··· 1356 1371 1357 1372 data_rate += crtc_state->rel_data_rate[plane_id]; 1358 1373 1359 - if (DISPLAY_VER(i915) < 11) 1374 + if (DISPLAY_VER(display) < 11) 1360 1375 data_rate += crtc_state->rel_data_rate_y[plane_id]; 1361 1376 } 1362 1377 ··· 1418 1433 } 1419 1434 } 1420 1435 1421 - static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level, 1436 + static bool skl_need_wm_copy_wa(struct intel_display *display, int level, 1422 1437 const struct skl_plane_wm *wm) 1423 1438 { 1424 1439 /* ··· 1472 1487 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, 1473 1488 struct intel_crtc *crtc) 1474 1489 { 1475 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1476 1490 struct intel_crtc_state *crtc_state = 1477 1491 intel_atomic_get_new_crtc_state(state, crtc); 1478 1492 const struct intel_dbuf_state *dbuf_state = ··· 1513 1529 * Find the highest watermark level for which we can satisfy the block 1514 1530 * requirement of active planes. 1515 1531 */ 1516 - for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { 1532 + for (level = display->wm.num_levels - 1; level >= 0; level--) { 1517 1533 blocks = 0; 1518 1534 for_each_plane_id_on_crtc(crtc, plane_id) { 1519 1535 const struct skl_plane_wm *wm = ··· 1524 1540 &crtc_state->wm.skl.plane_ddb[plane_id]; 1525 1541 1526 1542 if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { 1527 - drm_WARN_ON(&i915->drm, 1543 + drm_WARN_ON(display->drm, 1528 1544 wm->wm[level].min_ddb_alloc != U16_MAX); 1529 1545 blocks = U32_MAX; 1530 1546 break; ··· 1543 1559 } 1544 1560 1545 1561 if (level < 0) { 1546 - drm_dbg_kms(&i915->drm, 1562 + drm_dbg_kms(display->drm, 1547 1563 "Requested display configuration exceeds system DDB limitations"); 1548 - drm_dbg_kms(&i915->drm, "minimum required %d/%d\n", 1564 + drm_dbg_kms(display->drm, "minimum required %d/%d\n", 1549 1565 blocks, iter.size); 1550 1566 return -EINVAL; 1551 1567 } ··· 1573 1589 if (plane_id == PLANE_CURSOR) 1574 1590 continue; 1575 1591 1576 - if (DISPLAY_VER(i915) < 11 && 1592 + if (DISPLAY_VER(display) < 11 && 1577 1593 crtc_state->nv12_planes & BIT(plane_id)) { 1578 1594 skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], 1579 1595 crtc_state->rel_data_rate_y[plane_id]); ··· 1589 1605 *interim_ddb = wm->sagv.wm0.min_ddb_alloc; 1590 1606 } 1591 1607 } 1592 - drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0); 1608 + drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0); 1593 1609 1594 1610 /* 1595 1611 * When we calculated watermark values we didn't know how high ··· 1597 1613 * all levels as "enabled." Go back now and disable the ones 1598 1614 * that aren't actually possible. 1599 1615 */ 1600 - for (level++; level < i915->display.wm.num_levels; level++) { 1616 + for (level++; level < display->wm.num_levels; level++) { 1601 1617 for_each_plane_id_on_crtc(crtc, plane_id) { 1602 1618 const struct skl_ddb_entry *ddb = 1603 1619 &crtc_state->wm.skl.plane_ddb[plane_id]; ··· 1606 1622 struct skl_plane_wm *wm = 1607 1623 &crtc_state->wm.skl.optimal.planes[plane_id]; 1608 1624 1609 - if (DISPLAY_VER(i915) < 11 && 1625 + if (DISPLAY_VER(display) < 11 && 1610 1626 crtc_state->nv12_planes & BIT(plane_id)) 1611 1627 skl_check_nv12_wm_level(&wm->wm[level], 1612 1628 &wm->uv_wm[level], ··· 1614 1630 else 1615 1631 skl_check_wm_level(&wm->wm[level], ddb); 1616 1632 1617 - if (skl_need_wm_copy_wa(i915, level, wm)) { 1633 + if (skl_need_wm_copy_wa(display, level, wm)) { 1618 1634 wm->wm[level].blocks = wm->wm[level - 1].blocks; 1619 1635 wm->wm[level].lines = wm->wm[level - 1].lines; 1620 1636 wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines; ··· 1636 1652 struct skl_plane_wm *wm = 1637 1653 &crtc_state->wm.skl.optimal.planes[plane_id]; 1638 1654 1639 - if (DISPLAY_VER(i915) < 11 && 1655 + if (DISPLAY_VER(display) < 11 && 1640 1656 crtc_state->nv12_planes & BIT(plane_id)) { 1641 1657 skl_check_wm_level(&wm->trans_wm, ddb_y); 1642 1658 } else { ··· 1662 1678 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. 1663 1679 */ 1664 1680 static uint_fixed_16_16_t 1665 - skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate, 1681 + skl_wm_method1(struct intel_display *display, u32 pixel_rate, 1666 1682 u8 cpp, u32 latency, u32 dbuf_block_size) 1667 1683 { 1668 1684 u32 wm_intermediate_val; ··· 1674 1690 wm_intermediate_val = latency * pixel_rate * cpp; 1675 1691 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); 1676 1692 1677 - if (DISPLAY_VER(i915) >= 10) 1693 + if (DISPLAY_VER(display) >= 10) 1678 1694 ret = add_fixed16_u32(ret, 1); 1679 1695 1680 1696 return ret; ··· 1700 1716 static uint_fixed_16_16_t 1701 1717 intel_get_linetime_us(const struct intel_crtc_state *crtc_state) 1702 1718 { 1703 - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1719 + struct intel_display *display = to_intel_display(crtc_state); 1704 1720 u32 pixel_rate; 1705 1721 u32 crtc_htotal; 1706 1722 uint_fixed_16_16_t linetime_us; ··· 1710 1726 1711 1727 pixel_rate = crtc_state->pixel_rate; 1712 1728 1713 - if (drm_WARN_ON(&i915->drm, pixel_rate == 0)) 1729 + if (drm_WARN_ON(display->drm, pixel_rate == 0)) 1714 1730 return u32_to_fixed16(0); 1715 1731 1716 1732 crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; ··· 1726 1742 u32 plane_pixel_rate, struct skl_wm_params *wp, 1727 1743 int color_plane, unsigned int pan_x) 1728 1744 { 1729 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1730 1745 struct intel_display *display = to_intel_display(crtc_state); 1731 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1732 1746 u32 interm_pbpl; 1733 1747 1734 1748 /* only planar format has two planes */ 1735 1749 if (color_plane == 1 && 1736 1750 !intel_format_info_is_yuv_semiplanar(format, modifier)) { 1737 - drm_dbg_kms(&i915->drm, 1751 + drm_dbg_kms(display->drm, 1738 1752 "Non planar format have single plane\n"); 1739 1753 return -EINVAL; 1740 1754 } ··· 1750 1768 wp->cpp = format->cpp[color_plane]; 1751 1769 wp->plane_pixel_rate = plane_pixel_rate; 1752 1770 1753 - if (DISPLAY_VER(i915) >= 11 && 1771 + if (DISPLAY_VER(display) >= 11 && 1754 1772 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) 1755 1773 wp->dbuf_block_size = 256; 1756 1774 else ··· 1775 1793 wp->y_min_scanlines = 4; 1776 1794 } 1777 1795 1778 - if (skl_needs_memory_bw_wa(i915)) 1796 + if (skl_needs_memory_bw_wa(display)) 1779 1797 wp->y_min_scanlines *= 2; 1780 1798 1781 1799 wp->plane_bytes_per_line = wp->width * wp->cpp; ··· 1786 1804 1787 1805 if (DISPLAY_VER(display) >= 30) 1788 1806 interm_pbpl += (pan_x != 0); 1789 - else if (DISPLAY_VER(i915) >= 10) 1807 + else if (DISPLAY_VER(display) >= 10) 1790 1808 interm_pbpl++; 1791 1809 1792 1810 wp->plane_blocks_per_line = div_fixed16(interm_pbpl, ··· 1795 1813 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 1796 1814 wp->dbuf_block_size); 1797 1815 1798 - if (!wp->x_tiled || DISPLAY_VER(i915) >= 10) 1816 + if (!wp->x_tiled || DISPLAY_VER(display) >= 10) 1799 1817 interm_pbpl++; 1800 1818 1801 1819 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); ··· 1832 1850 plane_state->uapi.src.x1); 1833 1851 } 1834 1852 1835 - static bool skl_wm_has_lines(struct drm_i915_private *i915, int level) 1853 + static bool skl_wm_has_lines(struct intel_display *display, int level) 1836 1854 { 1837 - if (DISPLAY_VER(i915) >= 10) 1855 + if (DISPLAY_VER(display) >= 10) 1838 1856 return true; 1839 1857 1840 1858 /* The number of lines are ignored for the level 0 watermark. */ 1841 1859 return level > 0; 1842 1860 } 1843 1861 1844 - static int skl_wm_max_lines(struct drm_i915_private *i915) 1862 + static int skl_wm_max_lines(struct intel_display *display) 1845 1863 { 1846 - if (DISPLAY_VER(i915) >= 13) 1864 + if (DISPLAY_VER(display) >= 13) 1847 1865 return 255; 1848 1866 else 1849 1867 return 31; ··· 1864 1882 const struct skl_wm_level *result_prev, 1865 1883 struct skl_wm_level *result /* out */) 1866 1884 { 1867 - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1885 + struct intel_display *display = to_intel_display(crtc_state); 1868 1886 uint_fixed_16_16_t method1, method2; 1869 1887 uint_fixed_16_16_t selected_result; 1870 1888 u32 blocks, lines, min_ddb_alloc = 0; ··· 1876 1894 return; 1877 1895 } 1878 1896 1879 - method1 = skl_wm_method1(i915, wp->plane_pixel_rate, 1897 + method1 = skl_wm_method1(display, wp->plane_pixel_rate, 1880 1898 wp->cpp, latency, wp->dbuf_block_size); 1881 1899 method2 = skl_wm_method2(wp->plane_pixel_rate, 1882 1900 crtc_state->hw.pipe_mode.crtc_htotal, ··· 1891 1909 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { 1892 1910 selected_result = method2; 1893 1911 } else if (latency >= wp->linetime_us) { 1894 - if (DISPLAY_VER(i915) == 9) 1912 + if (DISPLAY_VER(display) == 9) 1895 1913 selected_result = min_fixed16(method1, method2); 1896 1914 else 1897 1915 selected_result = method2; ··· 1901 1919 } 1902 1920 1903 1921 blocks = fixed16_to_u32_round_up(selected_result); 1904 - if (DISPLAY_VER(i915) < 30) 1922 + if (DISPLAY_VER(display) < 30) 1905 1923 blocks++; 1906 1924 1907 1925 /* ··· 1920 1938 * channels' impact on the level 0 memory latency and the relevant 1921 1939 * wm calculations. 1922 1940 */ 1923 - if (skl_wm_has_lines(i915, level)) 1941 + if (skl_wm_has_lines(display, level)) 1924 1942 blocks = max(blocks, 1925 1943 fixed16_to_u32_round_up(wp->plane_blocks_per_line)); 1926 1944 lines = div_round_up_fixed16(selected_result, 1927 1945 wp->plane_blocks_per_line); 1928 1946 1929 - if (DISPLAY_VER(i915) == 9) { 1947 + if (DISPLAY_VER(display) == 9) { 1930 1948 /* Display WA #1125: skl,bxt,kbl */ 1931 1949 if (level == 0 && wp->rc_surface) 1932 1950 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); ··· 1951 1969 } 1952 1970 } 1953 1971 1954 - if (DISPLAY_VER(i915) >= 11) { 1972 + if (DISPLAY_VER(display) >= 11) { 1955 1973 if (wp->y_tiled) { 1956 1974 int extra_lines; 1957 1975 ··· 1968 1986 } 1969 1987 } 1970 1988 1971 - if (!skl_wm_has_lines(i915, level)) 1989 + if (!skl_wm_has_lines(display, level)) 1972 1990 lines = 0; 1973 1991 1974 - if (lines > skl_wm_max_lines(i915)) { 1992 + if (lines > skl_wm_max_lines(display)) { 1975 1993 /* reject it */ 1976 1994 result->min_ddb_alloc = U16_MAX; 1977 1995 return; ··· 1990 2008 result->enable = true; 1991 2009 result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level); 1992 2010 1993 - if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us) 1994 - result->can_sagv = latency >= i915->display.sagv.block_time_us; 2011 + if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us) 2012 + result->can_sagv = latency >= display->sagv.block_time_us; 1995 2013 } 1996 2014 1997 2015 static void ··· 2000 2018 const struct skl_wm_params *wm_params, 2001 2019 struct skl_wm_level *levels) 2002 2020 { 2003 - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 2021 + struct intel_display *display = to_intel_display(crtc_state); 2004 2022 struct skl_wm_level *result_prev = &levels[0]; 2005 2023 int level; 2006 2024 2007 - for (level = 0; level < i915->display.wm.num_levels; level++) { 2025 + for (level = 0; level < display->wm.num_levels; level++) { 2008 2026 struct skl_wm_level *result = &levels[level]; 2009 - unsigned int latency = skl_wm_latency(i915, level, wm_params); 2027 + unsigned int latency = skl_wm_latency(display, level, wm_params); 2010 2028 2011 2029 skl_compute_plane_wm(crtc_state, plane, level, latency, 2012 2030 wm_params, result_prev, result); ··· 2020 2038 const struct skl_wm_params *wm_params, 2021 2039 struct skl_plane_wm *plane_wm) 2022 2040 { 2023 - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 2041 + struct intel_display *display = to_intel_display(crtc_state); 2024 2042 struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; 2025 2043 struct skl_wm_level *levels = plane_wm->wm; 2026 2044 unsigned int latency = 0; 2027 2045 2028 - if (i915->display.sagv.block_time_us) 2029 - latency = i915->display.sagv.block_time_us + 2030 - skl_wm_latency(i915, 0, wm_params); 2046 + if (display->sagv.block_time_us) 2047 + latency = display->sagv.block_time_us + 2048 + skl_wm_latency(display, 0, wm_params); 2031 2049 2032 2050 skl_compute_plane_wm(crtc_state, plane, 0, latency, 2033 2051 wm_params, &levels[0], 2034 2052 sagv_wm); 2035 2053 } 2036 2054 2037 - static void skl_compute_transition_wm(struct drm_i915_private *i915, 2055 + static void skl_compute_transition_wm(struct intel_display *display, 2038 2056 struct skl_wm_level *trans_wm, 2039 2057 const struct skl_wm_level *wm0, 2040 2058 const struct skl_wm_params *wp) 2041 2059 { 2042 - struct intel_display *display = &i915->display; 2043 2060 u16 trans_min, trans_amount, trans_y_tile_min; 2044 2061 u16 wm0_blocks, trans_offset, blocks; 2045 2062 ··· 2050 2069 * WaDisableTWM:skl,kbl,cfl,bxt 2051 2070 * Transition WM are not recommended by HW team for GEN9 2052 2071 */ 2053 - if (DISPLAY_VER(i915) == 9) 2072 + if (DISPLAY_VER(display) == 9) 2054 2073 return; 2055 2074 2056 - if (DISPLAY_VER(i915) >= 11) 2075 + if (DISPLAY_VER(display) >= 11) 2057 2076 trans_min = 4; 2058 2077 else 2059 2078 trans_min = 14; 2060 2079 2061 2080 /* Display WA #1140: glk,cnl */ 2062 - if (DISPLAY_VER(i915) == 10) 2081 + if (DISPLAY_VER(display) == 10) 2063 2082 trans_amount = 0; 2064 2083 else 2065 2084 trans_amount = 10; /* This is configurable amount */ ··· 2101 2120 const struct intel_plane_state *plane_state, 2102 2121 struct intel_plane *plane, int color_plane) 2103 2122 { 2104 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2105 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2123 + struct intel_display *display = to_intel_display(crtc_state); 2106 2124 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; 2107 2125 struct skl_wm_params wm_params; 2108 2126 int ret; ··· 2113 2133 2114 2134 skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); 2115 2135 2116 - skl_compute_transition_wm(i915, &wm->trans_wm, 2136 + skl_compute_transition_wm(display, &wm->trans_wm, 2117 2137 &wm->wm[0], &wm_params); 2118 2138 2119 - if (DISPLAY_VER(i915) >= 12) { 2139 + if (DISPLAY_VER(display) >= 12) { 2120 2140 tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); 2121 2141 2122 - skl_compute_transition_wm(i915, &wm->sagv.trans_wm, 2142 + skl_compute_transition_wm(display, &wm->sagv.trans_wm, 2123 2143 &wm->sagv.wm0, &wm_params); 2124 2144 } 2125 2145 ··· 2179 2199 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, 2180 2200 const struct intel_plane_state *plane_state) 2181 2201 { 2202 + struct intel_display *display = to_intel_display(plane_state); 2182 2203 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2183 - struct drm_i915_private *i915 = to_i915(plane->base.dev); 2184 2204 enum plane_id plane_id = plane->id; 2185 2205 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; 2186 2206 int ret; ··· 2194 2214 if (plane_state->planar_linked_plane) { 2195 2215 const struct drm_framebuffer *fb = plane_state->hw.fb; 2196 2216 2197 - drm_WARN_ON(&i915->drm, 2217 + drm_WARN_ON(display->drm, 2198 2218 !intel_wm_plane_visible(crtc_state, plane_state)); 2199 - drm_WARN_ON(&i915->drm, !fb->format->is_yuv || 2219 + drm_WARN_ON(display->drm, !fb->format->is_yuv || 2200 2220 fb->format->num_planes == 1); 2201 2221 2202 2222 ret = skl_build_plane_wm_single(crtc_state, plane_state, ··· 2336 2356 static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, 2337 2357 int wm0_lines) 2338 2358 { 2339 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2340 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2359 + struct intel_display *display = to_intel_display(crtc_state); 2341 2360 int level; 2342 2361 2343 - for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { 2362 + for (level = display->wm.num_levels - 1; level >= 0; level--) { 2344 2363 int latency; 2345 2364 2346 2365 /* FIXME should we care about the latency w/a's? */ 2347 - latency = skl_wm_latency(i915, level, NULL); 2366 + latency = skl_wm_latency(display, level, NULL); 2348 2367 if (latency == 0) 2349 2368 continue; 2350 2369 ··· 2360 2381 2361 2382 static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) 2362 2383 { 2384 + struct intel_display *display = to_intel_display(crtc_state); 2363 2385 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2364 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2365 2386 int wm0_lines, level; 2366 2387 2367 2388 if (!crtc_state->hw.active) ··· 2377 2398 * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* 2378 2399 * based on whether we're limited by the vblank duration. 2379 2400 */ 2380 - crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1; 2401 + crtc_state->wm_level_disabled = level < display->wm.num_levels - 1; 2381 2402 2382 - for (level++; level < i915->display.wm.num_levels; level++) { 2403 + for (level++; level < display->wm.num_levels; level++) { 2383 2404 enum plane_id plane_id; 2384 2405 2385 2406 for_each_plane_id_on_crtc(crtc, plane_id) { ··· 2395 2416 } 2396 2417 } 2397 2418 2398 - if (DISPLAY_VER(i915) >= 12 && 2399 - i915->display.sagv.block_time_us && 2419 + if (DISPLAY_VER(display) >= 12 && 2420 + display->sagv.block_time_us && 2400 2421 skl_is_vblank_too_short(crtc_state, wm0_lines, 2401 - i915->display.sagv.block_time_us)) { 2422 + display->sagv.block_time_us)) { 2402 2423 enum plane_id plane_id; 2403 2424 2404 2425 for_each_plane_id_on_crtc(crtc, plane_id) { ··· 2416 2437 static int skl_build_pipe_wm(struct intel_atomic_state *state, 2417 2438 struct intel_crtc *crtc) 2418 2439 { 2419 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2440 + struct intel_display *display = to_intel_display(crtc); 2420 2441 struct intel_crtc_state *crtc_state = 2421 2442 intel_atomic_get_new_crtc_state(state, crtc); 2422 2443 const struct intel_plane_state *plane_state; ··· 2432 2453 if (plane->pipe != crtc->pipe) 2433 2454 continue; 2434 2455 2435 - if (DISPLAY_VER(i915) >= 11) 2456 + if (DISPLAY_VER(display) >= 11) 2436 2457 ret = icl_build_plane_wm(crtc_state, plane_state); 2437 2458 else 2438 2459 ret = skl_build_plane_wm(crtc_state, plane_state); ··· 2455 2476 l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable; 2456 2477 } 2457 2478 2458 - static bool skl_plane_wm_equals(struct drm_i915_private *i915, 2479 + static bool skl_plane_wm_equals(struct intel_display *display, 2459 2480 const struct skl_plane_wm *wm1, 2460 2481 const struct skl_plane_wm *wm2) 2461 2482 { 2462 - struct intel_display *display = &i915->display; 2463 2483 int level; 2464 2484 2465 2485 for (level = 0; level < display->wm.num_levels; level++) { ··· 2513 2535 skl_ddb_add_affected_planes(struct intel_atomic_state *state, 2514 2536 struct intel_crtc *crtc) 2515 2537 { 2516 - struct drm_i915_private *i915 = to_i915(state->base.dev); 2538 + struct intel_display *display = to_intel_display(state); 2517 2539 const struct intel_crtc_state *old_crtc_state = 2518 2540 intel_atomic_get_old_crtc_state(state, crtc); 2519 2541 struct intel_crtc_state *new_crtc_state = 2520 2542 intel_atomic_get_new_crtc_state(state, crtc); 2521 2543 struct intel_plane *plane; 2522 2544 2523 - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { 2545 + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { 2524 2546 struct intel_plane_state *plane_state; 2525 2547 enum plane_id plane_id = plane->id; 2526 2548 ··· 2531 2553 continue; 2532 2554 2533 2555 if (new_crtc_state->do_async_flip) { 2534 - drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n", 2556 + drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n", 2535 2557 plane->base.base.id, plane->base.name); 2536 2558 return -EINVAL; 2537 2559 } ··· 2550 2572 2551 2573 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) 2552 2574 { 2553 - struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev); 2575 + struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev); 2554 2576 u8 enabled_slices; 2555 2577 enum pipe pipe; 2556 2578 ··· 2560 2582 */ 2561 2583 enabled_slices = BIT(DBUF_S1); 2562 2584 2563 - for_each_pipe(i915, pipe) 2585 + for_each_pipe(display, pipe) 2564 2586 enabled_slices |= dbuf_state->slices[pipe]; 2565 2587 2566 2588 return enabled_slices; ··· 2570 2592 skl_compute_ddb(struct intel_atomic_state *state) 2571 2593 { 2572 2594 struct intel_display *display = to_intel_display(state); 2573 - struct drm_i915_private *i915 = to_i915(state->base.dev); 2574 2595 const struct intel_dbuf_state *old_dbuf_state; 2575 2596 struct intel_dbuf_state *new_dbuf_state = NULL; 2576 2597 struct intel_crtc_state *new_crtc_state; ··· 2608 2631 } 2609 2632 } 2610 2633 2611 - for_each_intel_crtc(&i915->drm, crtc) { 2634 + for_each_intel_crtc(display->drm, crtc) { 2612 2635 enum pipe pipe = crtc->pipe; 2613 2636 2614 2637 new_dbuf_state->slices[pipe] = ··· 2631 2654 if (ret) 2632 2655 return ret; 2633 2656 2634 - drm_dbg_kms(&i915->drm, 2657 + drm_dbg_kms(display->drm, 2635 2658 "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", 2636 2659 old_dbuf_state->enabled_slices, 2637 2660 new_dbuf_state->enabled_slices, 2638 - DISPLAY_INFO(i915)->dbuf.slice_mask, 2661 + DISPLAY_INFO(display)->dbuf.slice_mask, 2639 2662 str_yes_no(old_dbuf_state->joined_mbus), 2640 2663 str_yes_no(new_dbuf_state->joined_mbus)); 2641 2664 } ··· 2653 2676 return ret; 2654 2677 } 2655 2678 2656 - for_each_intel_crtc(&i915->drm, crtc) { 2679 + for_each_intel_crtc(display->drm, crtc) { 2657 2680 ret = skl_crtc_allocate_ddb(state, crtc); 2658 2681 if (ret) 2659 2682 return ret; ··· 2680 2703 static void 2681 2704 skl_print_wm_changes(struct intel_atomic_state *state) 2682 2705 { 2683 - struct drm_i915_private *i915 = to_i915(state->base.dev); 2706 + struct intel_display *display = to_intel_display(state); 2684 2707 const struct intel_crtc_state *old_crtc_state; 2685 2708 const struct intel_crtc_state *new_crtc_state; 2686 2709 struct intel_plane *plane; ··· 2697 2720 old_pipe_wm = &old_crtc_state->wm.skl.optimal; 2698 2721 new_pipe_wm = &new_crtc_state->wm.skl.optimal; 2699 2722 2700 - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { 2723 + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { 2701 2724 enum plane_id plane_id = plane->id; 2702 2725 const struct skl_ddb_entry *old, *new; 2703 2726 ··· 2707 2730 if (skl_ddb_entry_equal(old, new)) 2708 2731 continue; 2709 2732 2710 - drm_dbg_kms(&i915->drm, 2733 + drm_dbg_kms(display->drm, 2711 2734 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", 2712 2735 plane->base.base.id, plane->base.name, 2713 2736 old->start, old->end, new->start, new->end, 2714 2737 skl_ddb_entry_size(old), skl_ddb_entry_size(new)); 2715 2738 } 2716 2739 2717 - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { 2740 + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { 2718 2741 enum plane_id plane_id = plane->id; 2719 2742 const struct skl_plane_wm *old_wm, *new_wm; 2720 2743 2721 2744 old_wm = &old_pipe_wm->planes[plane_id]; 2722 2745 new_wm = &new_pipe_wm->planes[plane_id]; 2723 2746 2724 - if (skl_plane_wm_equals(i915, old_wm, new_wm)) 2747 + if (skl_plane_wm_equals(display, old_wm, new_wm)) 2725 2748 continue; 2726 2749 2727 - drm_dbg_kms(&i915->drm, 2750 + drm_dbg_kms(display->drm, 2728 2751 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" 2729 2752 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", 2730 2753 plane->base.base.id, plane->base.name, ··· 2743 2766 enast(new_wm->sagv.wm0.enable), 2744 2767 enast(new_wm->sagv.trans_wm.enable)); 2745 2768 2746 - drm_dbg_kms(&i915->drm, 2769 + drm_dbg_kms(display->drm, 2747 2770 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" 2748 2771 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", 2749 2772 plane->base.base.id, plane->base.name, ··· 2770 2793 enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, 2771 2794 enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); 2772 2795 2773 - drm_dbg_kms(&i915->drm, 2796 + drm_dbg_kms(display->drm, 2774 2797 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" 2775 2798 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", 2776 2799 plane->base.base.id, plane->base.name, ··· 2789 2812 new_wm->sagv.wm0.blocks, 2790 2813 new_wm->sagv.trans_wm.blocks); 2791 2814 2792 - drm_dbg_kms(&i915->drm, 2815 + drm_dbg_kms(display->drm, 2793 2816 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" 2794 2817 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", 2795 2818 plane->base.base.id, plane->base.name, ··· 2867 2890 static int skl_wm_add_affected_planes(struct intel_atomic_state *state, 2868 2891 struct intel_crtc *crtc) 2869 2892 { 2870 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2893 + struct intel_display *display = to_intel_display(state); 2871 2894 const struct intel_crtc_state *old_crtc_state = 2872 2895 intel_atomic_get_old_crtc_state(state, crtc); 2873 2896 struct intel_crtc_state *new_crtc_state = 2874 2897 intel_atomic_get_new_crtc_state(state, crtc); 2875 2898 struct intel_plane *plane; 2876 2899 2877 - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { 2900 + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { 2878 2901 struct intel_plane_state *plane_state; 2879 2902 enum plane_id plane_id = plane->id; 2880 2903 ··· 2893 2916 continue; 2894 2917 2895 2918 if (new_crtc_state->do_async_flip) { 2896 - drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n", 2919 + drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n", 2897 2920 plane->base.base.id, plane->base.name); 2898 2921 return -EINVAL; 2899 2922 } ··· 3092 3115 3093 3116 static void skl_wm_get_hw_state(struct intel_display *display) 3094 3117 { 3095 - struct drm_i915_private *i915 = to_i915(display->drm); 3096 3118 struct intel_dbuf_state *dbuf_state = 3097 - to_intel_dbuf_state(i915->display.dbuf.obj.state); 3119 + to_intel_dbuf_state(display->dbuf.obj.state); 3098 3120 struct intel_crtc *crtc; 3099 3121 3100 3122 if (HAS_MBUS_JOINING(display)) ··· 3133 3157 if (!crtc_state->hw.active) 3134 3158 continue; 3135 3159 3136 - skl_ddb_get_hw_plane_state(i915, crtc->pipe, 3160 + skl_ddb_get_hw_plane_state(display, crtc->pipe, 3137 3161 plane_id, ddb, ddb_y, 3138 3162 min_ddb, interim_ddb); 3139 3163 ··· 3149 3173 */ 3150 3174 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, 3151 3175 dbuf_state->joined_mbus); 3152 - mbus_offset = mbus_ddb_offset(i915, slices); 3176 + mbus_offset = mbus_ddb_offset(display, slices); 3153 3177 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; 3154 3178 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; 3155 3179 ··· 3182 3206 skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0); 3183 3207 } 3184 3208 3185 - static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915) 3209 + static bool skl_watermark_ipc_can_enable(struct intel_display *display) 3186 3210 { 3211 + struct drm_i915_private *i915 = to_i915(display->drm); 3212 + 3187 3213 /* Display WA #0477 WaDisableIPC: skl */ 3188 - if (IS_SKYLAKE(i915)) 3214 + if (display->platform.skylake) 3189 3215 return false; 3190 3216 3191 3217 /* Display WA #1141: SKL:all KBL:all CFL */ 3192 - if (IS_KABYLAKE(i915) || 3193 - IS_COFFEELAKE(i915) || 3194 - IS_COMETLAKE(i915)) 3218 + if (display->platform.kabylake || 3219 + display->platform.coffeelake || 3220 + display->platform.cometlake) 3195 3221 return i915->dram_info.symmetric_memory; 3196 3222 3197 3223 return true; ··· 3201 3223 3202 3224 void skl_watermark_ipc_init(struct intel_display *display) 3203 3225 { 3204 - struct drm_i915_private *i915 = to_i915(display->drm); 3205 - 3206 3226 if (!HAS_IPC(display)) 3207 3227 return; 3208 3228 3209 - display->wm.ipc_enabled = skl_watermark_ipc_can_enable(i915); 3229 + display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display); 3210 3230 3211 3231 skl_watermark_ipc_update(display); 3212 3232 } 3213 3233 3214 3234 static void 3215 - adjust_wm_latency(struct drm_i915_private *i915, 3235 + adjust_wm_latency(struct intel_display *display, 3216 3236 u16 wm[], int num_levels, int read_latency) 3217 3237 { 3238 + struct drm_i915_private *i915 = to_i915(display->drm); 3218 3239 bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; 3219 3240 int i, level; 3220 3241 ··· 3254 3277 wm[0] += 1; 3255 3278 } 3256 3279 3257 - static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 3280 + static void mtl_read_wm_latency(struct intel_display *display, u16 wm[]) 3258 3281 { 3259 - int num_levels = i915->display.wm.num_levels; 3282 + int num_levels = display->wm.num_levels; 3260 3283 u32 val; 3261 3284 3262 - val = intel_de_read(i915, MTL_LATENCY_LP0_LP1); 3285 + val = intel_de_read(display, MTL_LATENCY_LP0_LP1); 3263 3286 wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); 3264 3287 wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); 3265 3288 3266 - val = intel_de_read(i915, MTL_LATENCY_LP2_LP3); 3289 + val = intel_de_read(display, MTL_LATENCY_LP2_LP3); 3267 3290 wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); 3268 3291 wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); 3269 3292 3270 - val = intel_de_read(i915, MTL_LATENCY_LP4_LP5); 3293 + val = intel_de_read(display, MTL_LATENCY_LP4_LP5); 3271 3294 wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); 3272 3295 wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); 3273 3296 3274 - adjust_wm_latency(i915, wm, num_levels, 6); 3297 + adjust_wm_latency(display, wm, num_levels, 6); 3275 3298 } 3276 3299 3277 - static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 3300 + static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) 3278 3301 { 3279 - int num_levels = i915->display.wm.num_levels; 3280 - int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; 3281 - int mult = IS_DG2(i915) ? 2 : 1; 3302 + struct drm_i915_private *i915 = to_i915(display->drm); 3303 + int num_levels = display->wm.num_levels; 3304 + int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2; 3305 + int mult = display->platform.dg2 ? 2 : 1; 3282 3306 u32 val; 3283 3307 int ret; 3284 3308 ··· 3287 3309 val = 0; /* data0 to be programmed to 0 for first set */ 3288 3310 ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); 3289 3311 if (ret) { 3290 - drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); 3312 + drm_err(display->drm, "SKL Mailbox read error = %d\n", ret); 3291 3313 return; 3292 3314 } 3293 3315 ··· 3300 3322 val = 1; /* data0 to be programmed to 1 for second set */ 3301 3323 ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); 3302 3324 if (ret) { 3303 - drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); 3325 + drm_err(display->drm, "SKL Mailbox read error = %d\n", ret); 3304 3326 return; 3305 3327 } 3306 3328 ··· 3309 3331 wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; 3310 3332 wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; 3311 3333 3312 - adjust_wm_latency(i915, wm, num_levels, read_latency); 3334 + adjust_wm_latency(display, wm, num_levels, read_latency); 3313 3335 } 3314 3336 3315 - static void skl_setup_wm_latency(struct drm_i915_private *i915) 3337 + static void skl_setup_wm_latency(struct intel_display *display) 3316 3338 { 3317 - struct intel_display *display = &i915->display; 3318 - 3319 3339 if (HAS_HW_SAGV_WM(display)) 3320 3340 display->wm.num_levels = 6; 3321 3341 else 3322 3342 display->wm.num_levels = 8; 3323 3343 3324 3344 if (DISPLAY_VER(display) >= 14) 3325 - mtl_read_wm_latency(i915, display->wm.skl_latency); 3345 + mtl_read_wm_latency(display, display->wm.skl_latency); 3326 3346 else 3327 - skl_read_wm_latency(i915, display->wm.skl_latency); 3347 + skl_read_wm_latency(display, display->wm.skl_latency); 3328 3348 3329 3349 intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency); 3330 3350 } ··· 3352 3376 struct intel_dbuf_state * 3353 3377 intel_atomic_get_dbuf_state(struct intel_atomic_state *state) 3354 3378 { 3355 - struct drm_i915_private *i915 = to_i915(state->base.dev); 3379 + struct intel_display *display = to_intel_display(state); 3356 3380 struct intel_global_state *dbuf_state; 3357 3381 3358 - dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj); 3382 + dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj); 3359 3383 if (IS_ERR(dbuf_state)) 3360 3384 return ERR_CAST(dbuf_state); 3361 3385 ··· 3398 3422 static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc, 3399 3423 const struct intel_dbuf_state *dbuf_state) 3400 3424 { 3401 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3425 + struct intel_display *display = to_intel_display(crtc); 3402 3426 u32 val = 0; 3403 3427 3404 - if (DISPLAY_VER(i915) >= 14) 3428 + if (DISPLAY_VER(display) >= 14) 3405 3429 val |= MBUS_DBOX_I_CREDIT(2); 3406 3430 3407 - if (DISPLAY_VER(i915) >= 12) { 3431 + if (DISPLAY_VER(display) >= 12) { 3408 3432 val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16); 3409 3433 val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1); 3410 3434 val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN; 3411 3435 } 3412 3436 3413 - if (DISPLAY_VER(i915) >= 14) 3437 + if (DISPLAY_VER(display) >= 14) 3414 3438 val |= dbuf_state->joined_mbus ? 3415 3439 MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8); 3416 - else if (IS_ALDERLAKE_P(i915)) 3440 + else if (display->platform.alderlake_p) 3417 3441 /* Wa_22010947358:adl-p */ 3418 3442 val |= dbuf_state->joined_mbus ? 3419 3443 MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4); 3420 3444 else 3421 3445 val |= MBUS_DBOX_A_CREDIT(2); 3422 3446 3423 - if (DISPLAY_VER(i915) >= 14) { 3447 + if (DISPLAY_VER(display) >= 14) { 3424 3448 val |= MBUS_DBOX_B_CREDIT(0xA); 3425 - } else if (IS_ALDERLAKE_P(i915)) { 3449 + } else if (display->platform.alderlake_p) { 3426 3450 val |= MBUS_DBOX_BW_CREDIT(2); 3427 3451 val |= MBUS_DBOX_B_CREDIT(8); 3428 - } else if (DISPLAY_VER(i915) >= 12) { 3452 + } else if (DISPLAY_VER(display) >= 12) { 3429 3453 val |= MBUS_DBOX_BW_CREDIT(2); 3430 3454 val |= MBUS_DBOX_B_CREDIT(12); 3431 3455 } else { ··· 3433 3457 val |= MBUS_DBOX_B_CREDIT(8); 3434 3458 } 3435 3459 3436 - if (DISPLAY_VERx100(i915) == 1400) { 3460 + if (DISPLAY_VERx100(display) == 1400) { 3437 3461 if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes)) 3438 3462 val |= MBUS_DBOX_BW_8CREDITS_MTL; 3439 3463 else ··· 3443 3467 return val; 3444 3468 } 3445 3469 3446 - static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915, 3470 + static void pipe_mbus_dbox_ctl_update(struct intel_display *display, 3447 3471 const struct intel_dbuf_state *dbuf_state) 3448 3472 { 3449 3473 struct intel_crtc *crtc; 3450 3474 3451 - for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes) 3452 - intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), 3475 + for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes) 3476 + intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe), 3453 3477 pipe_mbus_dbox_ctl(crtc, dbuf_state)); 3454 3478 } 3455 3479 3456 3480 static void intel_mbus_dbox_update(struct intel_atomic_state *state) 3457 3481 { 3458 - struct drm_i915_private *i915 = to_i915(state->base.dev); 3482 + struct intel_display *display = to_intel_display(state); 3459 3483 const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; 3460 3484 3461 - if (DISPLAY_VER(i915) < 11) 3485 + if (DISPLAY_VER(display) < 11) 3462 3486 return; 3463 3487 3464 3488 new_dbuf_state = intel_atomic_get_new_dbuf_state(state); ··· 3468 3492 new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) 3469 3493 return; 3470 3494 3471 - pipe_mbus_dbox_ctl_update(i915, new_dbuf_state); 3495 + pipe_mbus_dbox_ctl_update(display, new_dbuf_state); 3472 3496 } 3473 3497 3474 3498 int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, ··· 3534 3558 const struct intel_dbuf_state *dbuf_state) 3535 3559 { 3536 3560 struct intel_display *display = to_intel_display(state); 3537 - struct drm_i915_private *i915 = to_i915(state->base.dev); 3538 3561 enum pipe pipe = ffs(dbuf_state->active_pipes) - 1; 3539 3562 const struct intel_crtc_state *new_crtc_state; 3540 3563 struct intel_crtc *crtc; 3541 3564 3542 - drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus); 3543 - drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes)); 3565 + drm_WARN_ON(display->drm, !dbuf_state->joined_mbus); 3566 + drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes)); 3544 3567 3545 3568 crtc = intel_crtc_for_pipe(display, pipe); 3546 3569 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); ··· 3550 3575 return INVALID_PIPE; 3551 3576 } 3552 3577 3553 - static void mbus_ctl_join_update(struct drm_i915_private *i915, 3578 + static void mbus_ctl_join_update(struct intel_display *display, 3554 3579 const struct intel_dbuf_state *dbuf_state, 3555 3580 enum pipe pipe) 3556 3581 { ··· 3566 3591 else 3567 3592 mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE; 3568 3593 3569 - intel_de_rmw(i915, MBUS_CTL, 3594 + intel_de_rmw(display, MBUS_CTL, 3570 3595 MBUS_HASHING_MODE_MASK | MBUS_JOIN | 3571 3596 MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); 3572 3597 } ··· 3574 3599 static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state, 3575 3600 enum pipe pipe) 3576 3601 { 3577 - struct drm_i915_private *i915 = to_i915(state->base.dev); 3602 + struct intel_display *display = to_intel_display(state); 3578 3603 const struct intel_dbuf_state *old_dbuf_state = 3579 3604 intel_atomic_get_old_dbuf_state(state); 3580 3605 const struct intel_dbuf_state *new_dbuf_state = 3581 3606 intel_atomic_get_new_dbuf_state(state); 3582 3607 3583 - drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n", 3608 + drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n", 3584 3609 str_yes_no(old_dbuf_state->joined_mbus), 3585 3610 str_yes_no(new_dbuf_state->joined_mbus), 3586 3611 pipe != INVALID_PIPE ? pipe_name(pipe) : '*'); 3587 3612 3588 - mbus_ctl_join_update(i915, new_dbuf_state, pipe); 3613 + mbus_ctl_join_update(display, new_dbuf_state, pipe); 3589 3614 } 3590 3615 3591 3616 void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state) ··· 3690 3715 gen9_dbuf_slices_update(display, new_slices); 3691 3716 } 3692 3717 3693 - static void skl_mbus_sanitize(struct drm_i915_private *i915) 3718 + static void skl_mbus_sanitize(struct intel_display *display) 3694 3719 { 3695 - struct intel_display *display = &i915->display; 3696 3720 struct intel_dbuf_state *dbuf_state = 3697 3721 to_intel_dbuf_state(display->dbuf.obj.state); 3698 3722 ··· 3709 3735 intel_dbuf_mdclk_cdclk_ratio_update(display, 3710 3736 dbuf_state->mdclk_cdclk_ratio, 3711 3737 dbuf_state->joined_mbus); 3712 - pipe_mbus_dbox_ctl_update(i915, dbuf_state); 3713 - mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE); 3738 + pipe_mbus_dbox_ctl_update(display, dbuf_state); 3739 + mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE); 3714 3740 } 3715 3741 3716 - static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) 3742 + static bool skl_dbuf_is_misconfigured(struct intel_display *display) 3717 3743 { 3718 3744 const struct intel_dbuf_state *dbuf_state = 3719 - to_intel_dbuf_state(i915->display.dbuf.obj.state); 3745 + to_intel_dbuf_state(display->dbuf.obj.state); 3720 3746 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 3721 3747 struct intel_crtc *crtc; 3722 3748 3723 - for_each_intel_crtc(&i915->drm, crtc) { 3749 + for_each_intel_crtc(display->drm, crtc) { 3724 3750 const struct intel_crtc_state *crtc_state = 3725 3751 to_intel_crtc_state(crtc->base.state); 3726 3752 3727 3753 entries[crtc->pipe] = crtc_state->wm.skl.ddb; 3728 3754 } 3729 3755 3730 - for_each_intel_crtc(&i915->drm, crtc) { 3756 + for_each_intel_crtc(display->drm, crtc) { 3731 3757 const struct intel_crtc_state *crtc_state = 3732 3758 to_intel_crtc_state(crtc->base.state); 3733 3759 u8 slices; ··· 3745 3771 return false; 3746 3772 } 3747 3773 3748 - static void skl_dbuf_sanitize(struct drm_i915_private *i915) 3774 + static void skl_dbuf_sanitize(struct intel_display *display) 3749 3775 { 3750 3776 struct intel_crtc *crtc; 3751 3777 ··· 3760 3786 * all the planes so that skl_commit_modeset_enables() can 3761 3787 * simply ignore them. 3762 3788 */ 3763 - if (!skl_dbuf_is_misconfigured(i915)) 3789 + if (!skl_dbuf_is_misconfigured(display)) 3764 3790 return; 3765 3791 3766 - drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); 3792 + drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); 3767 3793 3768 - for_each_intel_crtc(&i915->drm, crtc) { 3794 + for_each_intel_crtc(display->drm, crtc) { 3769 3795 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 3770 3796 const struct intel_plane_state *plane_state = 3771 3797 to_intel_plane_state(plane->base.state); ··· 3775 3801 if (plane_state->uapi.visible) 3776 3802 intel_plane_disable_noatomic(crtc, plane); 3777 3803 3778 - drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); 3804 + drm_WARN_ON(display->drm, crtc_state->active_planes != 0); 3779 3805 3780 3806 memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); 3781 3807 } ··· 3783 3809 3784 3810 static void skl_wm_sanitize(struct intel_display *display) 3785 3811 { 3786 - struct drm_i915_private *i915 = to_i915(display->drm); 3787 - 3788 - skl_mbus_sanitize(i915); 3789 - skl_dbuf_sanitize(i915); 3812 + skl_mbus_sanitize(display); 3813 + skl_dbuf_sanitize(display); 3790 3814 } 3791 3815 3792 3816 void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc) ··· 3835 3863 struct intel_crtc *crtc) 3836 3864 { 3837 3865 struct intel_display *display = to_intel_display(state); 3838 - struct drm_i915_private *i915 = to_i915(state->base.dev); 3839 3866 const struct intel_crtc_state *new_crtc_state = 3840 3867 intel_atomic_get_new_crtc_state(state, crtc); 3841 3868 struct skl_hw_state { ··· 3849 3878 u8 hw_enabled_slices; 3850 3879 int level; 3851 3880 3852 - if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) 3881 + if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active) 3853 3882 return; 3854 3883 3855 3884 hw = kzalloc(sizeof(*hw), GFP_KERNEL); ··· 3862 3891 3863 3892 hw_enabled_slices = intel_enabled_dbuf_slices_mask(display); 3864 3893 3865 - if (DISPLAY_VER(i915) >= 11 && 3866 - hw_enabled_slices != i915->display.dbuf.enabled_slices) 3867 - drm_err(&i915->drm, 3894 + if (DISPLAY_VER(display) >= 11 && 3895 + hw_enabled_slices != display->dbuf.enabled_slices) 3896 + drm_err(display->drm, 3868 3897 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", 3869 - i915->display.dbuf.enabled_slices, 3898 + display->dbuf.enabled_slices, 3870 3899 hw_enabled_slices); 3871 3900 3872 - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { 3901 + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { 3873 3902 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 3874 3903 const struct skl_wm_level *hw_wm_level, *sw_wm_level; 3875 3904 3876 3905 /* Watermarks */ 3877 - for (level = 0; level < i915->display.wm.num_levels; level++) { 3906 + for (level = 0; level < display->wm.num_levels; level++) { 3878 3907 hw_wm_level = &hw->wm.planes[plane->id].wm[level]; 3879 3908 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); 3880 3909 3881 3910 if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) 3882 3911 continue; 3883 3912 3884 - drm_err(&i915->drm, 3913 + drm_err(display->drm, 3885 3914 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 3886 3915 plane->base.base.id, plane->base.name, level, 3887 3916 sw_wm_level->enable, ··· 3896 3925 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); 3897 3926 3898 3927 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { 3899 - drm_err(&i915->drm, 3928 + drm_err(display->drm, 3900 3929 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 3901 3930 plane->base.base.id, plane->base.name, 3902 3931 sw_wm_level->enable, ··· 3912 3941 3913 3942 if (HAS_HW_SAGV_WM(display) && 3914 3943 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { 3915 - drm_err(&i915->drm, 3944 + drm_err(display->drm, 3916 3945 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 3917 3946 plane->base.base.id, plane->base.name, 3918 3947 sw_wm_level->enable, ··· 3928 3957 3929 3958 if (HAS_HW_SAGV_WM(display) && 3930 3959 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { 3931 - drm_err(&i915->drm, 3960 + drm_err(display->drm, 3932 3961 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 3933 3962 plane->base.base.id, plane->base.name, 3934 3963 sw_wm_level->enable, ··· 3944 3973 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; 3945 3974 3946 3975 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 3947 - drm_err(&i915->drm, 3976 + drm_err(display->drm, 3948 3977 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", 3949 3978 plane->base.base.id, plane->base.name, 3950 3979 sw_ddb_entry->start, sw_ddb_entry->end, ··· 3963 3992 3964 3993 void skl_wm_init(struct intel_display *display) 3965 3994 { 3966 - struct drm_i915_private *i915 = to_i915(display->drm); 3995 + intel_sagv_init(display); 3967 3996 3968 - intel_sagv_init(i915); 3969 - 3970 - skl_setup_wm_latency(i915); 3997 + skl_setup_wm_latency(display); 3971 3998 3972 3999 display->funcs.wm = &skl_wm_funcs; 3973 4000 } 3974 4001 3975 4002 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data) 3976 4003 { 3977 - struct drm_i915_private *i915 = m->private; 3978 - struct intel_display *display = &i915->display; 4004 + struct intel_display *display = m->private; 3979 4005 3980 4006 seq_printf(m, "Isochronous Priority Control: %s\n", 3981 4007 str_yes_no(skl_watermark_ipc_enabled(display))); ··· 3981 4013 3982 4014 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file) 3983 4015 { 3984 - struct drm_i915_private *i915 = inode->i_private; 4016 + struct intel_display *display = inode->i_private; 3985 4017 3986 - return single_open(file, skl_watermark_ipc_status_show, i915); 4018 + return single_open(file, skl_watermark_ipc_status_show, display); 3987 4019 } 3988 4020 3989 4021 static ssize_t skl_watermark_ipc_status_write(struct file *file, ··· 3991 4023 size_t len, loff_t *offp) 3992 4024 { 3993 4025 struct seq_file *m = file->private_data; 3994 - struct drm_i915_private *i915 = m->private; 3995 - struct intel_display *display = &i915->display; 4026 + struct intel_display *display = m->private; 3996 4027 bool enable; 3997 4028 int ret; 3998 4029 ··· 4021 4054 4022 4055 static int intel_sagv_status_show(struct seq_file *m, void *unused) 4023 4056 { 4024 - struct drm_i915_private *i915 = m->private; 4025 - struct intel_display *display = &i915->display; 4057 + struct intel_display *display = m->private; 4026 4058 static const char * const sagv_status[] = { 4027 4059 [I915_SAGV_UNKNOWN] = "unknown", 4028 4060 [I915_SAGV_DISABLED] = "disabled", ··· 4031 4065 4032 4066 seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display))); 4033 4067 seq_printf(m, "SAGV modparam: %s\n", 4034 - str_enabled_disabled(i915->display.params.enable_sagv)); 4035 - seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); 4036 - seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); 4068 + str_enabled_disabled(display->params.enable_sagv)); 4069 + seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]); 4070 + seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us); 4037 4071 4038 4072 return 0; 4039 4073 } ··· 4042 4076 4043 4077 void skl_watermark_debugfs_register(struct intel_display *display) 4044 4078 { 4045 - struct drm_i915_private *i915 = to_i915(display->drm); 4046 4079 struct drm_minor *minor = display->drm->primary; 4047 4080 4048 4081 if (HAS_IPC(display)) 4049 - debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, 4050 - &skl_watermark_ipc_status_fops); 4082 + debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, 4083 + display, &skl_watermark_ipc_status_fops); 4051 4084 4052 4085 if (HAS_SAGV(display)) 4053 - debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915, 4054 - &intel_sagv_status_fops); 4086 + debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, 4087 + display, &intel_sagv_status_fops); 4055 4088 } 4056 4089 4057 4090 unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level) 4058 4091 { 4059 - struct drm_i915_private *i915 = to_i915(display->drm); 4060 4092 int level; 4061 4093 4062 4094 for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) { 4063 - unsigned int latency = skl_wm_latency(i915, level, NULL); 4095 + unsigned int latency = skl_wm_latency(display, level, NULL); 4064 4096 4065 4097 if (latency) 4066 4098 return latency;