Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2018-03-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:

- Query uAPI interface (used for GPU topology information currently)
* Mesa: https://patchwork.freedesktop.org/series/38795/

Driver Changes:

- Increase PSR2 size for CNL (DK)
- Avoid retraining LSPCON link unnecessarily (Ville)
- Decrease request signaling latency (Chris)
- GuC error capture fix (Daniele)

* tag 'drm-intel-next-2018-03-08' of git://anongit.freedesktop.org/drm/drm-intel: (127 commits)
drm/i915: Update DRIVER_DATE to 20180308
drm/i915: add schedule out notification of preempted but completed request
drm/i915: expose rcs topology through query uAPI
drm/i915: add query uAPI
drm/i915: add rcs topology to error state
drm/i915/debugfs: add rcs topology entry
drm/i915/debugfs: reuse max slice/subslices already stored in sseu
drm/i915: store all subslice masks
drm/i915/guc: work around gcc-4.4.4 union initializer issue
drm/i915/cnl: Add Wa_2201832410
drm/i915/icl: Gen11 forcewake support
drm/i915/icl: Add Indirect Context Offset for Gen11
drm/i915/icl: Enhanced execution list support
drm/i915/icl: new context descriptor support
drm/i915/icl: Correctly initialize the Gen11 engines
drm/i915: Assert that the request is indeed complete when signaled from irq
drm/i915: Handle changing enable_fbc parameter at runtime better.
drm/i915: Track whether the DP link is trained or not
drm/i915: Nuke intel_dp->channel_eq_status
drm/i915: Move SST DP link retraining into the ->post_hotplug() hook
...

+5552 -3662
+7
Documentation/gpu/todo.rst
··· 450 450 451 451 Contact: Harry Wentland, Alex Deucher 452 452 453 + i915 454 + ---- 455 + 456 + - Our early/late pm callbacks could be removed in favour of using 457 + device_link_add to model the dependency between i915 and snd_had. See 458 + https://dri.freedesktop.org/docs/drm/driver-api/device_link.html 459 + 453 460 Outside DRM 454 461 ===========
+14 -8
drivers/gpu/drm/drm_vblank.c
··· 1247 1247 EXPORT_SYMBOL(drm_crtc_vblank_on); 1248 1248 1249 1249 /** 1250 - * drm_vblank_restore - estimated vblanks using timestamps and update it. 1250 + * drm_vblank_restore - estimate missed vblanks and update vblank count. 1251 + * @dev: DRM device 1252 + * @pipe: CRTC index 1251 1253 * 1252 1254 * Power manamement features can cause frame counter resets between vblank 1253 - * disable and enable. Drivers can then use this function in their 1254 - * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since 1255 - * the last &drm_crtc_funcs.disable_vblank. 1255 + * disable and enable. Drivers can use this function in their 1256 + * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since 1257 + * the last &drm_crtc_funcs.disable_vblank using timestamps and update the 1258 + * vblank counter. 1256 1259 * 1257 1260 * This function is the legacy version of drm_crtc_vblank_restore(). 1258 1261 */ ··· 1296 1293 EXPORT_SYMBOL(drm_vblank_restore); 1297 1294 1298 1295 /** 1299 - * drm_crtc_vblank_restore - estimate vblanks using timestamps and update it. 1296 + * drm_crtc_vblank_restore - estimate missed vblanks and update vblank count. 1297 + * @crtc: CRTC in question 1298 + * 1300 1299 * Power manamement features can cause frame counter resets between vblank 1301 - * disable and enable. Drivers can then use this function in their 1302 - * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since 1303 - * the last &drm_crtc_funcs.disable_vblank. 1300 + * disable and enable. Drivers can use this function in their 1301 + * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since 1302 + * the last &drm_crtc_funcs.disable_vblank using timestamps and update the 1303 + * vblank counter. 1304 1304 */ 1305 1305 void drm_crtc_vblank_restore(struct drm_crtc *crtc) 1306 1306 {
+4 -2
drivers/gpu/drm/i915/Makefile
··· 63 63 i915_gem.o \ 64 64 i915_gem_object.o \ 65 65 i915_gem_render_state.o \ 66 - i915_gem_request.o \ 67 66 i915_gem_shrinker.o \ 68 67 i915_gem_stolen.o \ 69 68 i915_gem_tiling.o \ 70 69 i915_gem_timeline.o \ 71 70 i915_gem_userptr.o \ 72 71 i915_gemfs.o \ 72 + i915_query.o \ 73 + i915_request.o \ 73 74 i915_trace_points.o \ 74 75 i915_vma.o \ 75 76 intel_breadcrumbs.o \ ··· 90 89 intel_guc_fw.o \ 91 90 intel_guc_log.o \ 92 91 intel_guc_submission.o \ 93 - intel_huc.o 92 + intel_huc.o \ 93 + intel_huc_fw.o 94 94 95 95 # autogenerated null render state 96 96 i915-y += intel_renderstate_gen6.o \
+1 -1
drivers/gpu/drm/i915/gvt/Makefile
··· 3 3 GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ 4 4 interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ 5 5 execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ 6 - fb_decoder.o dmabuf.o 6 + fb_decoder.o dmabuf.o page_track.o 7 7 8 8 ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) 9 9 i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+1 -1
drivers/gpu/drm/i915/gvt/dmabuf.c
··· 459 459 460 460 obj = vgpu_create_gem(dev, dmabuf_obj->info); 461 461 if (obj == NULL) { 462 - gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id); 462 + gvt_vgpu_err("create gvt gem obj failed\n"); 463 463 ret = -ENOMEM; 464 464 goto out; 465 465 }
+667 -808
drivers/gpu/drm/i915/gvt/gtt.c
··· 38 38 #include "i915_pvinfo.h" 39 39 #include "trace.h" 40 40 41 + #if defined(VERBOSE_DEBUG) 42 + #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) 43 + #else 44 + #define gvt_vdbg_mm(fmt, args...) 45 + #endif 46 + 41 47 static bool enable_out_of_sync = false; 42 48 static int preallocated_oos_pages = 8192; 43 49 ··· 270 264 return readq(addr); 271 265 } 272 266 273 - static void gtt_invalidate(struct drm_i915_private *dev_priv) 267 + static void ggtt_invalidate(struct drm_i915_private *dev_priv) 274 268 { 275 269 mmio_hw_access_pre(dev_priv); 276 270 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); ··· 337 331 338 332 #define GTT_HAW 46 339 333 340 - #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30) 341 - #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21) 342 - #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12) 334 + #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) 335 + #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) 336 + #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) 343 337 344 338 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 345 339 { 346 340 unsigned long pfn; 347 341 348 342 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) 349 - pfn = (e->val64 & ADDR_1G_MASK) >> 12; 343 + pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; 350 344 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) 351 - pfn = (e->val64 & ADDR_2M_MASK) >> 12; 345 + pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; 352 346 else 353 - pfn = (e->val64 & ADDR_4K_MASK) >> 12; 347 + pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; 354 348 return pfn; 355 349 } 356 350 ··· 358 352 { 359 353 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 360 354 e->val64 &= ~ADDR_1G_MASK; 361 - pfn &= (ADDR_1G_MASK >> 12); 355 + pfn &= (ADDR_1G_MASK >> PAGE_SHIFT); 362 356 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { 363 357 e->val64 &= ~ADDR_2M_MASK; 364 - pfn &= (ADDR_2M_MASK >> 12); 358 + pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); 365 359 } else { 366 360 e->val64 &= ~ADDR_4K_MASK; 367 - pfn &= (ADDR_4K_MASK >> 12); 361 + pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); 368 362 } 369 363 370 - e->val64 |= (pfn << 12); 364 + e->val64 |= (pfn << PAGE_SHIFT); 371 365 } 372 366 373 367 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) ··· 377 371 return false; 378 372 379 373 e->type = get_entry_type(e->type); 380 - if (!(e->val64 & BIT(7))) 374 + if (!(e->val64 & _PAGE_PSE)) 381 375 return false; 382 376 383 377 e->type = get_pse_type(e->type); ··· 395 389 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 396 390 return (e->val64 != 0); 397 391 else 398 - return (e->val64 & BIT(0)); 392 + return (e->val64 & _PAGE_PRESENT); 399 393 } 400 394 401 395 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) 402 396 { 403 - e->val64 &= ~BIT(0); 397 + e->val64 &= ~_PAGE_PRESENT; 404 398 } 405 399 406 400 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) 407 401 { 408 - e->val64 |= BIT(0); 402 + e->val64 |= _PAGE_PRESENT; 409 403 } 410 404 411 405 /* ··· 453 447 .gma_to_pml4_index = gen8_gma_to_pml4_index, 454 448 }; 455 449 456 - static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, 457 - struct intel_gvt_gtt_entry *m) 458 - { 459 - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 460 - unsigned long gfn, mfn; 461 - 462 - *m = *p; 463 - 464 - if (!ops->test_present(p)) 465 - return 0; 466 - 467 - gfn = ops->get_pfn(p); 468 - 469 - mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 470 - if (mfn == INTEL_GVT_INVALID_ADDR) { 471 - gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); 472 - return -ENXIO; 473 - } 474 - 475 - ops->set_pfn(m, mfn); 476 - return 0; 477 - } 478 - 479 450 /* 480 451 * MM helpers. 481 452 */ 482 - int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm, 483 - void *page_table, struct intel_gvt_gtt_entry *e, 484 - unsigned long index) 453 + static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, 454 + struct intel_gvt_gtt_entry *entry, unsigned long index, 455 + bool guest) 485 456 { 486 - struct intel_gvt *gvt = mm->vgpu->gvt; 487 - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 488 - int ret; 457 + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 489 458 490 - e->type = mm->page_table_entry_type; 459 + GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); 491 460 492 - ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu); 493 - if (ret) 494 - return ret; 461 + entry->type = mm->ppgtt_mm.root_entry_type; 462 + pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : 463 + mm->ppgtt_mm.shadow_pdps, 464 + entry, index, false, 0, mm->vgpu); 495 465 496 - ops->test_pse(e); 497 - return 0; 466 + pte_ops->test_pse(entry); 498 467 } 499 468 500 - int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm, 501 - void *page_table, struct intel_gvt_gtt_entry *e, 502 - unsigned long index) 469 + static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, 470 + struct intel_gvt_gtt_entry *entry, unsigned long index) 503 471 { 504 - struct intel_gvt *gvt = mm->vgpu->gvt; 505 - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 472 + _ppgtt_get_root_entry(mm, entry, index, true); 473 + } 506 474 507 - return ops->set_entry(page_table, e, index, false, 0, mm->vgpu); 475 + static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, 476 + struct intel_gvt_gtt_entry *entry, unsigned long index) 477 + { 478 + _ppgtt_get_root_entry(mm, entry, index, false); 479 + } 480 + 481 + static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, 482 + struct intel_gvt_gtt_entry *entry, unsigned long index, 483 + bool guest) 484 + { 485 + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 486 + 487 + pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : 488 + mm->ppgtt_mm.shadow_pdps, 489 + entry, index, false, 0, mm->vgpu); 490 + } 491 + 492 + static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm, 493 + struct intel_gvt_gtt_entry *entry, unsigned long index) 494 + { 495 + _ppgtt_set_root_entry(mm, entry, index, true); 496 + } 497 + 498 + static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, 499 + struct intel_gvt_gtt_entry *entry, unsigned long index) 500 + { 501 + _ppgtt_set_root_entry(mm, entry, index, false); 502 + } 503 + 504 + static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, 505 + struct intel_gvt_gtt_entry *entry, unsigned long index) 506 + { 507 + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 508 + 509 + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 510 + 511 + entry->type = GTT_TYPE_GGTT_PTE; 512 + pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 513 + false, 0, mm->vgpu); 514 + } 515 + 516 + static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, 517 + struct intel_gvt_gtt_entry *entry, unsigned long index) 518 + { 519 + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 520 + 521 + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 522 + 523 + pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 524 + false, 0, mm->vgpu); 525 + } 526 + 527 + static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 528 + struct intel_gvt_gtt_entry *entry, unsigned long index) 529 + { 530 + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 531 + 532 + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 533 + 534 + pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); 508 535 } 509 536 510 537 /* ··· 559 520 return -EINVAL; 560 521 561 522 ret = ops->get_entry(page_table, e, index, guest, 562 - spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, 523 + spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 563 524 spt->vgpu); 564 525 if (ret) 565 526 return ret; 566 527 567 528 ops->test_pse(e); 529 + 530 + gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 531 + type, e->type, index, e->val64); 568 532 return 0; 569 533 } 570 534 ··· 583 541 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 584 542 return -EINVAL; 585 543 544 + gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 545 + type, e->type, index, e->val64); 546 + 586 547 return ops->set_entry(page_table, e, index, guest, 587 - spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, 548 + spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 588 549 spt->vgpu); 589 550 } 590 551 591 552 #define ppgtt_get_guest_entry(spt, e, index) \ 592 553 ppgtt_spt_get_entry(spt, NULL, \ 593 - spt->guest_page_type, e, index, true) 554 + spt->guest_page.type, e, index, true) 594 555 595 556 #define ppgtt_set_guest_entry(spt, e, index) \ 596 557 ppgtt_spt_set_entry(spt, NULL, \ 597 - spt->guest_page_type, e, index, true) 558 + spt->guest_page.type, e, index, true) 598 559 599 560 #define ppgtt_get_shadow_entry(spt, e, index) \ 600 561 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ ··· 606 561 #define ppgtt_set_shadow_entry(spt, e, index) \ 607 562 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ 608 563 spt->shadow_page.type, e, index, false) 609 - 610 - /** 611 - * intel_vgpu_init_page_track - init a page track data structure 612 - * @vgpu: a vGPU 613 - * @t: a page track data structure 614 - * @gfn: guest memory page frame number 615 - * @handler: the function will be called when target guest memory page has 616 - * been modified. 617 - * 618 - * This function is called when a user wants to prepare a page track data 619 - * structure to track a guest memory page. 620 - * 621 - * Returns: 622 - * Zero on success, negative error code if failed. 623 - */ 624 - int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, 625 - struct intel_vgpu_page_track *t, 626 - unsigned long gfn, 627 - int (*handler)(void *, u64, void *, int), 628 - void *data) 629 - { 630 - INIT_HLIST_NODE(&t->node); 631 - 632 - t->tracked = false; 633 - t->gfn = gfn; 634 - t->handler = handler; 635 - t->data = data; 636 - 637 - hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn); 638 - return 0; 639 - } 640 - 641 - /** 642 - * intel_vgpu_clean_page_track - release a page track data structure 643 - * @vgpu: a vGPU 644 - * @t: a page track data structure 645 - * 646 - * This function is called before a user frees a page track data structure. 647 - */ 648 - void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, 649 - struct intel_vgpu_page_track *t) 650 - { 651 - if (!hlist_unhashed(&t->node)) 652 - hash_del(&t->node); 653 - 654 - if (t->tracked) 655 - intel_gvt_hypervisor_disable_page_track(vgpu, t); 656 - } 657 - 658 - /** 659 - * intel_vgpu_find_tracked_page - find a tracked guest page 660 - * @vgpu: a vGPU 661 - * @gfn: guest memory page frame number 662 - * 663 - * This function is called when the emulation layer wants to figure out if a 664 - * trapped GFN is a tracked guest page. 665 - * 666 - * Returns: 667 - * Pointer to page track data structure, NULL if not found. 668 - */ 669 - struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( 670 - struct intel_vgpu *vgpu, unsigned long gfn) 671 - { 672 - struct intel_vgpu_page_track *t; 673 - 674 - hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table, 675 - t, node, gfn) { 676 - if (t->gfn == gfn) 677 - return t; 678 - } 679 - return NULL; 680 - } 681 - 682 - static int init_guest_page(struct intel_vgpu *vgpu, 683 - struct intel_vgpu_guest_page *p, 684 - unsigned long gfn, 685 - int (*handler)(void *, u64, void *, int), 686 - void *data) 687 - { 688 - p->oos_page = NULL; 689 - p->write_cnt = 0; 690 - 691 - return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data); 692 - } 693 - 694 - static int detach_oos_page(struct intel_vgpu *vgpu, 695 - struct intel_vgpu_oos_page *oos_page); 696 - 697 - static void clean_guest_page(struct intel_vgpu *vgpu, 698 - struct intel_vgpu_guest_page *p) 699 - { 700 - if (p->oos_page) 701 - detach_oos_page(vgpu, p->oos_page); 702 - 703 - intel_vgpu_clean_page_track(vgpu, &p->track); 704 - } 705 - 706 - static inline int init_shadow_page(struct intel_vgpu *vgpu, 707 - struct intel_vgpu_shadow_page *p, int type, bool hash) 708 - { 709 - struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 710 - dma_addr_t daddr; 711 - 712 - daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 713 - if (dma_mapping_error(kdev, daddr)) { 714 - gvt_vgpu_err("fail to map dma addr\n"); 715 - return -EINVAL; 716 - } 717 - 718 - p->vaddr = page_address(p->page); 719 - p->type = type; 720 - 721 - INIT_HLIST_NODE(&p->node); 722 - 723 - p->mfn = daddr >> I915_GTT_PAGE_SHIFT; 724 - if (hash) 725 - hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); 726 - return 0; 727 - } 728 - 729 - static inline void clean_shadow_page(struct intel_vgpu *vgpu, 730 - struct intel_vgpu_shadow_page *p) 731 - { 732 - struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 733 - 734 - dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096, 735 - PCI_DMA_BIDIRECTIONAL); 736 - 737 - if (!hlist_unhashed(&p->node)) 738 - hash_del(&p->node); 739 - } 740 - 741 - static inline struct intel_vgpu_shadow_page *find_shadow_page( 742 - struct intel_vgpu *vgpu, unsigned long mfn) 743 - { 744 - struct intel_vgpu_shadow_page *p; 745 - 746 - hash_for_each_possible(vgpu->gtt.shadow_page_hash_table, 747 - p, node, mfn) { 748 - if (p->mfn == mfn) 749 - return p; 750 - } 751 - return NULL; 752 - } 753 - 754 - #define page_track_to_guest_page(ptr) \ 755 - container_of(ptr, struct intel_vgpu_guest_page, track) 756 - 757 - #define guest_page_to_ppgtt_spt(ptr) \ 758 - container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page) 759 - 760 - #define shadow_page_to_ppgtt_spt(ptr) \ 761 - container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page) 762 564 763 565 static void *alloc_spt(gfp_t gfp_mask) 764 566 { ··· 629 737 kfree(spt); 630 738 } 631 739 632 - static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 740 + static int detach_oos_page(struct intel_vgpu *vgpu, 741 + struct intel_vgpu_oos_page *oos_page); 742 + 743 + static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) 633 744 { 634 - trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); 745 + struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; 635 746 636 - clean_shadow_page(spt->vgpu, &spt->shadow_page); 637 - clean_guest_page(spt->vgpu, &spt->guest_page); 747 + trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); 748 + 749 + dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, 750 + PCI_DMA_BIDIRECTIONAL); 751 + 752 + radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); 753 + 754 + if (spt->guest_page.oos_page) 755 + detach_oos_page(spt->vgpu, spt->guest_page.oos_page); 756 + 757 + intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); 758 + 638 759 list_del_init(&spt->post_shadow_list); 639 - 640 760 free_spt(spt); 641 761 } 642 762 643 - static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu) 763 + static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) 644 764 { 645 - struct hlist_node *n; 646 - struct intel_vgpu_shadow_page *sp; 647 - int i; 765 + struct intel_vgpu_ppgtt_spt *spt; 766 + struct radix_tree_iter iter; 767 + void **slot; 648 768 649 - hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node) 650 - ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp)); 769 + radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { 770 + spt = radix_tree_deref_slot(slot); 771 + ppgtt_free_spt(spt); 772 + } 651 773 } 652 774 653 775 static int ppgtt_handle_guest_write_page_table_bytes( 654 - struct intel_vgpu_guest_page *gpt, 776 + struct intel_vgpu_ppgtt_spt *spt, 655 777 u64 pa, void *p_data, int bytes); 656 778 657 - static int ppgtt_write_protection_handler(void *data, u64 pa, 658 - void *p_data, int bytes) 779 + static int ppgtt_write_protection_handler( 780 + struct intel_vgpu_page_track *page_track, 781 + u64 gpa, void *data, int bytes) 659 782 { 660 - struct intel_vgpu_page_track *t = data; 661 - struct intel_vgpu_guest_page *p = page_track_to_guest_page(t); 783 + struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data; 784 + 662 785 int ret; 663 786 664 787 if (bytes != 4 && bytes != 8) 665 788 return -EINVAL; 666 789 667 - if (!t->tracked) 668 - return -EINVAL; 669 - 670 - ret = ppgtt_handle_guest_write_page_table_bytes(p, 671 - pa, p_data, bytes); 790 + ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes); 672 791 if (ret) 673 792 return ret; 674 793 return ret; 675 794 } 676 795 677 - static int reclaim_one_mm(struct intel_gvt *gvt); 796 + /* Find a spt by guest gfn. */ 797 + static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( 798 + struct intel_vgpu *vgpu, unsigned long gfn) 799 + { 800 + struct intel_vgpu_page_track *track; 678 801 679 - static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( 802 + track = intel_vgpu_find_page_track(vgpu, gfn); 803 + if (track && track->handler == ppgtt_write_protection_handler) 804 + return track->priv_data; 805 + 806 + return NULL; 807 + } 808 + 809 + /* Find the spt by shadow page mfn. */ 810 + static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( 811 + struct intel_vgpu *vgpu, unsigned long mfn) 812 + { 813 + return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn); 814 + } 815 + 816 + static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); 817 + 818 + static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( 680 819 struct intel_vgpu *vgpu, int type, unsigned long gfn) 681 820 { 821 + struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 682 822 struct intel_vgpu_ppgtt_spt *spt = NULL; 823 + dma_addr_t daddr; 683 824 int ret; 684 825 685 826 retry: 686 827 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); 687 828 if (!spt) { 688 - if (reclaim_one_mm(vgpu->gvt)) 829 + if (reclaim_one_ppgtt_mm(vgpu->gvt)) 689 830 goto retry; 690 831 691 832 gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); ··· 726 801 } 727 802 728 803 spt->vgpu = vgpu; 729 - spt->guest_page_type = type; 730 804 atomic_set(&spt->refcount, 1); 731 805 INIT_LIST_HEAD(&spt->post_shadow_list); 732 806 733 807 /* 734 - * TODO: guest page type may be different with shadow page type, 735 - * when we support PSE page in future. 808 + * Init shadow_page. 736 809 */ 737 - ret = init_shadow_page(vgpu, &spt->shadow_page, type, true); 738 - if (ret) { 739 - gvt_vgpu_err("fail to initialize shadow page for spt\n"); 740 - goto err; 810 + spt->shadow_page.type = type; 811 + daddr = dma_map_page(kdev, spt->shadow_page.page, 812 + 0, 4096, PCI_DMA_BIDIRECTIONAL); 813 + if (dma_mapping_error(kdev, daddr)) { 814 + gvt_vgpu_err("fail to map dma addr\n"); 815 + ret = -EINVAL; 816 + goto err_free_spt; 741 817 } 818 + spt->shadow_page.vaddr = page_address(spt->shadow_page.page); 819 + spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; 742 820 743 - ret = init_guest_page(vgpu, &spt->guest_page, 744 - gfn, ppgtt_write_protection_handler, NULL); 745 - if (ret) { 746 - gvt_vgpu_err("fail to initialize guest page for spt\n"); 747 - goto err; 748 - } 821 + /* 822 + * Init guest_page. 823 + */ 824 + spt->guest_page.type = type; 825 + spt->guest_page.gfn = gfn; 826 + 827 + ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, 828 + ppgtt_write_protection_handler, spt); 829 + if (ret) 830 + goto err_unmap_dma; 831 + 832 + ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); 833 + if (ret) 834 + goto err_unreg_page_track; 749 835 750 836 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); 751 837 return spt; 752 - err: 753 - ppgtt_free_shadow_page(spt); 838 + 839 + err_unreg_page_track: 840 + intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn); 841 + err_unmap_dma: 842 + dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 843 + err_free_spt: 844 + free_spt(spt); 754 845 return ERR_PTR(ret); 755 - } 756 - 757 - static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( 758 - struct intel_vgpu *vgpu, unsigned long mfn) 759 - { 760 - struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn); 761 - 762 - if (p) 763 - return shadow_page_to_ppgtt_spt(p); 764 - 765 - gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); 766 - return NULL; 767 846 } 768 847 769 848 #define pt_entry_size_shift(spt) \ ··· 786 857 if (!ppgtt_get_shadow_entry(spt, e, i) && \ 787 858 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 788 859 789 - static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 860 + static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) 790 861 { 791 862 int v = atomic_read(&spt->refcount); 792 863 ··· 795 866 atomic_inc(&spt->refcount); 796 867 } 797 868 798 - static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt); 869 + static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); 799 870 800 - static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, 871 + static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, 801 872 struct intel_gvt_gtt_entry *e) 802 873 { 803 874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 804 875 struct intel_vgpu_ppgtt_spt *s; 805 876 intel_gvt_gtt_type_t cur_pt_type; 806 877 807 - if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type)))) 808 - return -EINVAL; 878 + GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); 809 879 810 880 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 811 881 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { ··· 813 885 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 814 886 return 0; 815 887 } 816 - s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 888 + s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 817 889 if (!s) { 818 890 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 819 891 ops->get_pfn(e)); 820 892 return -ENXIO; 821 893 } 822 - return ppgtt_invalidate_shadow_page(s); 894 + return ppgtt_invalidate_spt(s); 823 895 } 824 896 825 - static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 897 + static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, 898 + struct intel_gvt_gtt_entry *entry) 899 + { 900 + struct intel_vgpu *vgpu = spt->vgpu; 901 + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 902 + unsigned long pfn; 903 + int type; 904 + 905 + pfn = ops->get_pfn(entry); 906 + type = spt->shadow_page.type; 907 + 908 + if (pfn == vgpu->gtt.scratch_pt[type].page_mfn) 909 + return; 910 + 911 + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); 912 + } 913 + 914 + static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) 826 915 { 827 916 struct intel_vgpu *vgpu = spt->vgpu; 828 917 struct intel_gvt_gtt_entry e; ··· 848 903 int v = atomic_read(&spt->refcount); 849 904 850 905 trace_spt_change(spt->vgpu->id, "die", spt, 851 - spt->guest_page.track.gfn, spt->shadow_page.type); 906 + spt->guest_page.gfn, spt->shadow_page.type); 852 907 853 908 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); 854 909 855 910 if (atomic_dec_return(&spt->refcount) > 0) 856 911 return 0; 857 912 858 - if (gtt_type_is_pte_pt(spt->shadow_page.type)) 859 - goto release; 860 - 861 913 for_each_present_shadow_entry(spt, &e, index) { 862 - if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 863 - gvt_vgpu_err("GVT doesn't support pse bit for now\n"); 864 - return -EINVAL; 914 + switch (e.type) { 915 + case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 916 + gvt_vdbg_mm("invalidate 4K entry\n"); 917 + ppgtt_invalidate_pte(spt, &e); 918 + break; 919 + case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 920 + case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 921 + WARN(1, "GVT doesn't support 2M/1GB page\n"); 922 + continue; 923 + case GTT_TYPE_PPGTT_PML4_ENTRY: 924 + case GTT_TYPE_PPGTT_PDP_ENTRY: 925 + case GTT_TYPE_PPGTT_PDE_ENTRY: 926 + gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n"); 927 + ret = ppgtt_invalidate_spt_by_shadow_entry( 928 + spt->vgpu, &e); 929 + if (ret) 930 + goto fail; 931 + break; 932 + default: 933 + GEM_BUG_ON(1); 865 934 } 866 - ret = ppgtt_invalidate_shadow_page_by_shadow_entry( 867 - spt->vgpu, &e); 868 - if (ret) 869 - goto fail; 870 935 } 871 - release: 936 + 872 937 trace_spt_change(spt->vgpu->id, "release", spt, 873 - spt->guest_page.track.gfn, spt->shadow_page.type); 874 - ppgtt_free_shadow_page(spt); 938 + spt->guest_page.gfn, spt->shadow_page.type); 939 + ppgtt_free_spt(spt); 875 940 return 0; 876 941 fail: 877 942 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", ··· 889 934 return ret; 890 935 } 891 936 892 - static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt); 937 + static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); 893 938 894 - static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( 939 + static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( 895 940 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) 896 941 { 897 942 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 898 - struct intel_vgpu_ppgtt_spt *s = NULL; 899 - struct intel_vgpu_guest_page *g; 900 - struct intel_vgpu_page_track *t; 943 + struct intel_vgpu_ppgtt_spt *spt = NULL; 901 944 int ret; 902 945 903 - if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) { 904 - ret = -EINVAL; 905 - goto fail; 906 - } 946 + GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); 907 947 908 - t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we)); 909 - if (t) { 910 - g = page_track_to_guest_page(t); 911 - s = guest_page_to_ppgtt_spt(g); 912 - ppgtt_get_shadow_page(s); 913 - } else { 948 + spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); 949 + if (spt) 950 + ppgtt_get_spt(spt); 951 + else { 914 952 int type = get_next_pt_type(we->type); 915 953 916 - s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we)); 917 - if (IS_ERR(s)) { 918 - ret = PTR_ERR(s); 954 + spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we)); 955 + if (IS_ERR(spt)) { 956 + ret = PTR_ERR(spt); 919 957 goto fail; 920 958 } 921 959 922 - ret = intel_gvt_hypervisor_enable_page_track(vgpu, 923 - &s->guest_page.track); 960 + ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); 924 961 if (ret) 925 962 goto fail; 926 963 927 - ret = ppgtt_populate_shadow_page(s); 964 + ret = ppgtt_populate_spt(spt); 928 965 if (ret) 929 966 goto fail; 930 967 931 - trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn, 932 - s->shadow_page.type); 968 + trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, 969 + spt->shadow_page.type); 933 970 } 934 - return s; 971 + return spt; 935 972 fail: 936 973 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 937 - s, we->val64, we->type); 974 + spt, we->val64, we->type); 938 975 return ERR_PTR(ret); 939 976 } 940 977 ··· 941 994 ops->set_pfn(se, s->shadow_page.mfn); 942 995 } 943 996 944 - static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 997 + static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, 998 + struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 999 + struct intel_gvt_gtt_entry *ge) 1000 + { 1001 + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 1002 + struct intel_gvt_gtt_entry se = *ge; 1003 + unsigned long gfn; 1004 + dma_addr_t dma_addr; 1005 + int ret; 1006 + 1007 + if (!pte_ops->test_present(ge)) 1008 + return 0; 1009 + 1010 + gfn = pte_ops->get_pfn(ge); 1011 + 1012 + switch (ge->type) { 1013 + case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 1014 + gvt_vdbg_mm("shadow 4K gtt entry\n"); 1015 + break; 1016 + case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 1017 + case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 1018 + gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); 1019 + return -EINVAL; 1020 + default: 1021 + GEM_BUG_ON(1); 1022 + }; 1023 + 1024 + /* direct shadow */ 1025 + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr); 1026 + if (ret) 1027 + return -ENXIO; 1028 + 1029 + pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); 1030 + ppgtt_set_shadow_entry(spt, &se, index); 1031 + return 0; 1032 + } 1033 + 1034 + static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) 945 1035 { 946 1036 struct intel_vgpu *vgpu = spt->vgpu; 947 1037 struct intel_gvt *gvt = vgpu->gvt; ··· 989 1005 int ret; 990 1006 991 1007 trace_spt_change(spt->vgpu->id, "born", spt, 992 - spt->guest_page.track.gfn, spt->shadow_page.type); 993 - 994 - if (gtt_type_is_pte_pt(spt->shadow_page.type)) { 995 - for_each_present_guest_entry(spt, &ge, i) { 996 - gfn = ops->get_pfn(&ge); 997 - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) || 998 - gtt_entry_p2m(vgpu, &ge, &se)) 999 - ops->set_pfn(&se, gvt->gtt.scratch_mfn); 1000 - ppgtt_set_shadow_entry(spt, &se, i); 1001 - } 1002 - return 0; 1003 - } 1008 + spt->guest_page.gfn, spt->shadow_page.type); 1004 1009 1005 1010 for_each_present_guest_entry(spt, &ge, i) { 1006 - if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 1007 - gvt_vgpu_err("GVT doesn't support pse bit now\n"); 1008 - ret = -EINVAL; 1009 - goto fail; 1010 - } 1011 + if (gtt_type_is_pt(get_next_pt_type(ge.type))) { 1012 + s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1013 + if (IS_ERR(s)) { 1014 + ret = PTR_ERR(s); 1015 + goto fail; 1016 + } 1017 + ppgtt_get_shadow_entry(spt, &se, i); 1018 + ppgtt_generate_shadow_entry(&se, s, &ge); 1019 + ppgtt_set_shadow_entry(spt, &se, i); 1020 + } else { 1021 + gfn = ops->get_pfn(&ge); 1022 + if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { 1023 + ops->set_pfn(&se, gvt->gtt.scratch_mfn); 1024 + ppgtt_set_shadow_entry(spt, &se, i); 1025 + continue; 1026 + } 1011 1027 1012 - s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1013 - if (IS_ERR(s)) { 1014 - ret = PTR_ERR(s); 1015 - goto fail; 1028 + ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); 1029 + if (ret) 1030 + goto fail; 1016 1031 } 1017 - ppgtt_get_shadow_entry(spt, &se, i); 1018 - ppgtt_generate_shadow_entry(&se, s, &ge); 1019 - ppgtt_set_shadow_entry(spt, &se, i); 1020 1032 } 1021 1033 return 0; 1022 1034 fail: ··· 1021 1041 return ret; 1022 1042 } 1023 1043 1024 - static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, 1044 + static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, 1025 1045 struct intel_gvt_gtt_entry *se, unsigned long index) 1026 1046 { 1027 - struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1028 - struct intel_vgpu_shadow_page *sp = &spt->shadow_page; 1029 1047 struct intel_vgpu *vgpu = spt->vgpu; 1030 1048 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1031 1049 int ret; 1032 1050 1033 - trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64, 1034 - index); 1051 + trace_spt_guest_change(spt->vgpu->id, "remove", spt, 1052 + spt->shadow_page.type, se->val64, index); 1053 + 1054 + gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", 1055 + se->type, index, se->val64); 1035 1056 1036 1057 if (!ops->test_present(se)) 1037 1058 return 0; 1038 1059 1039 - if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn) 1060 + if (ops->get_pfn(se) == 1061 + vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) 1040 1062 return 0; 1041 1063 1042 1064 if (gtt_type_is_pt(get_next_pt_type(se->type))) { 1043 1065 struct intel_vgpu_ppgtt_spt *s = 1044 - ppgtt_find_shadow_page(vgpu, ops->get_pfn(se)); 1066 + intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); 1045 1067 if (!s) { 1046 1068 gvt_vgpu_err("fail to find guest page\n"); 1047 1069 ret = -ENXIO; 1048 1070 goto fail; 1049 1071 } 1050 - ret = ppgtt_invalidate_shadow_page(s); 1072 + ret = ppgtt_invalidate_spt(s); 1051 1073 if (ret) 1052 1074 goto fail; 1053 - } 1075 + } else 1076 + ppgtt_invalidate_pte(spt, se); 1077 + 1054 1078 return 0; 1055 1079 fail: 1056 1080 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", ··· 1062 1078 return ret; 1063 1079 } 1064 1080 1065 - static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, 1081 + static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, 1066 1082 struct intel_gvt_gtt_entry *we, unsigned long index) 1067 1083 { 1068 - struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1069 - struct intel_vgpu_shadow_page *sp = &spt->shadow_page; 1070 1084 struct intel_vgpu *vgpu = spt->vgpu; 1071 1085 struct intel_gvt_gtt_entry m; 1072 1086 struct intel_vgpu_ppgtt_spt *s; 1073 1087 int ret; 1074 1088 1075 - trace_gpt_change(spt->vgpu->id, "add", spt, sp->type, 1076 - we->val64, index); 1089 + trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, 1090 + we->val64, index); 1091 + 1092 + gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", 1093 + we->type, index, we->val64); 1077 1094 1078 1095 if (gtt_type_is_pt(get_next_pt_type(we->type))) { 1079 - s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we); 1096 + s = ppgtt_populate_spt_by_guest_entry(vgpu, we); 1080 1097 if (IS_ERR(s)) { 1081 1098 ret = PTR_ERR(s); 1082 1099 goto fail; ··· 1086 1101 ppgtt_generate_shadow_entry(&m, s, we); 1087 1102 ppgtt_set_shadow_entry(spt, &m, index); 1088 1103 } else { 1089 - ret = gtt_entry_p2m(vgpu, we, &m); 1104 + ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we); 1090 1105 if (ret) 1091 1106 goto fail; 1092 - ppgtt_set_shadow_entry(spt, &m, index); 1093 1107 } 1094 1108 return 0; 1095 1109 fail: ··· 1103 1119 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1104 1120 struct intel_gvt *gvt = vgpu->gvt; 1105 1121 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1106 - struct intel_vgpu_ppgtt_spt *spt = 1107 - guest_page_to_ppgtt_spt(oos_page->guest_page); 1108 - struct intel_gvt_gtt_entry old, new, m; 1122 + struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1123 + struct intel_gvt_gtt_entry old, new; 1109 1124 int index; 1110 1125 int ret; 1111 1126 1112 1127 trace_oos_change(vgpu->id, "sync", oos_page->id, 1113 - oos_page->guest_page, spt->guest_page_type); 1128 + spt, spt->guest_page.type); 1114 1129 1115 - old.type = new.type = get_entry_type(spt->guest_page_type); 1130 + old.type = new.type = get_entry_type(spt->guest_page.type); 1116 1131 old.val64 = new.val64 = 0; 1117 1132 1118 1133 for (index = 0; index < (I915_GTT_PAGE_SIZE >> 1119 1134 info->gtt_entry_size_shift); index++) { 1120 1135 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); 1121 1136 ops->get_entry(NULL, &new, index, true, 1122 - oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu); 1137 + spt->guest_page.gfn << PAGE_SHIFT, vgpu); 1123 1138 1124 1139 if (old.val64 == new.val64 1125 1140 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) 1126 1141 continue; 1127 1142 1128 1143 trace_oos_sync(vgpu->id, oos_page->id, 1129 - oos_page->guest_page, spt->guest_page_type, 1144 + spt, spt->guest_page.type, 1130 1145 new.val64, index); 1131 1146 1132 - ret = gtt_entry_p2m(vgpu, &new, &m); 1147 + ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); 1133 1148 if (ret) 1134 1149 return ret; 1135 1150 1136 1151 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); 1137 - ppgtt_set_shadow_entry(spt, &m, index); 1138 1152 } 1139 1153 1140 - oos_page->guest_page->write_cnt = 0; 1154 + spt->guest_page.write_cnt = 0; 1141 1155 list_del_init(&spt->post_shadow_list); 1142 1156 return 0; 1143 1157 } ··· 1144 1162 struct intel_vgpu_oos_page *oos_page) 1145 1163 { 1146 1164 struct intel_gvt *gvt = vgpu->gvt; 1147 - struct intel_vgpu_ppgtt_spt *spt = 1148 - guest_page_to_ppgtt_spt(oos_page->guest_page); 1165 + struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1149 1166 1150 1167 trace_oos_change(vgpu->id, "detach", oos_page->id, 1151 - oos_page->guest_page, spt->guest_page_type); 1168 + spt, spt->guest_page.type); 1152 1169 1153 - oos_page->guest_page->write_cnt = 0; 1154 - oos_page->guest_page->oos_page = NULL; 1155 - oos_page->guest_page = NULL; 1170 + spt->guest_page.write_cnt = 0; 1171 + spt->guest_page.oos_page = NULL; 1172 + oos_page->spt = NULL; 1156 1173 1157 1174 list_del_init(&oos_page->vm_list); 1158 1175 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); ··· 1159 1178 return 0; 1160 1179 } 1161 1180 1162 - static int attach_oos_page(struct intel_vgpu *vgpu, 1163 - struct intel_vgpu_oos_page *oos_page, 1164 - struct intel_vgpu_guest_page *gpt) 1181 + static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, 1182 + struct intel_vgpu_ppgtt_spt *spt) 1165 1183 { 1166 - struct intel_gvt *gvt = vgpu->gvt; 1184 + struct intel_gvt *gvt = spt->vgpu->gvt; 1167 1185 int ret; 1168 1186 1169 - ret = intel_gvt_hypervisor_read_gpa(vgpu, 1170 - gpt->track.gfn << I915_GTT_PAGE_SHIFT, 1187 + ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, 1188 + spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 1171 1189 oos_page->mem, I915_GTT_PAGE_SIZE); 1172 1190 if (ret) 1173 1191 return ret; 1174 1192 1175 - oos_page->guest_page = gpt; 1176 - gpt->oos_page = oos_page; 1193 + oos_page->spt = spt; 1194 + spt->guest_page.oos_page = oos_page; 1177 1195 1178 1196 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); 1179 1197 1180 - trace_oos_change(vgpu->id, "attach", gpt->oos_page->id, 1181 - gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1198 + trace_oos_change(spt->vgpu->id, "attach", oos_page->id, 1199 + spt, spt->guest_page.type); 1182 1200 return 0; 1183 1201 } 1184 1202 1185 - static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu, 1186 - struct intel_vgpu_guest_page *gpt) 1203 + static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) 1187 1204 { 1205 + struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1188 1206 int ret; 1189 1207 1190 - ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track); 1208 + ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); 1191 1209 if (ret) 1192 1210 return ret; 1193 1211 1194 - trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id, 1195 - gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1212 + trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, 1213 + spt, spt->guest_page.type); 1196 1214 1197 - list_del_init(&gpt->oos_page->vm_list); 1198 - return sync_oos_page(vgpu, gpt->oos_page); 1215 + list_del_init(&oos_page->vm_list); 1216 + return sync_oos_page(spt->vgpu, oos_page); 1199 1217 } 1200 1218 1201 - static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu, 1202 - struct intel_vgpu_guest_page *gpt) 1219 + static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) 1203 1220 { 1204 - struct intel_gvt *gvt = vgpu->gvt; 1221 + struct intel_gvt *gvt = spt->vgpu->gvt; 1205 1222 struct intel_gvt_gtt *gtt = &gvt->gtt; 1206 - struct intel_vgpu_oos_page *oos_page = gpt->oos_page; 1223 + struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1207 1224 int ret; 1208 1225 1209 1226 WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); ··· 1209 1230 if (list_empty(&gtt->oos_page_free_list_head)) { 1210 1231 oos_page = container_of(gtt->oos_page_use_list_head.next, 1211 1232 struct intel_vgpu_oos_page, list); 1212 - ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); 1233 + ret = ppgtt_set_guest_page_sync(oos_page->spt); 1213 1234 if (ret) 1214 1235 return ret; 1215 - ret = detach_oos_page(vgpu, oos_page); 1236 + ret = detach_oos_page(spt->vgpu, oos_page); 1216 1237 if (ret) 1217 1238 return ret; 1218 1239 } else 1219 1240 oos_page = container_of(gtt->oos_page_free_list_head.next, 1220 1241 struct intel_vgpu_oos_page, list); 1221 - return attach_oos_page(vgpu, oos_page, gpt); 1242 + return attach_oos_page(oos_page, spt); 1222 1243 } 1223 1244 1224 - static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu, 1225 - struct intel_vgpu_guest_page *gpt) 1245 + static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) 1226 1246 { 1227 - struct intel_vgpu_oos_page *oos_page = gpt->oos_page; 1247 + struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1228 1248 1229 1249 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) 1230 1250 return -EINVAL; 1231 1251 1232 - trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id, 1233 - gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1252 + trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, 1253 + spt, spt->guest_page.type); 1234 1254 1235 - list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head); 1236 - return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track); 1255 + list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); 1256 + return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); 1237 1257 } 1238 1258 1239 1259 /** ··· 1257 1279 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { 1258 1280 oos_page = container_of(pos, 1259 1281 struct intel_vgpu_oos_page, vm_list); 1260 - ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); 1282 + ret = ppgtt_set_guest_page_sync(oos_page->spt); 1261 1283 if (ret) 1262 1284 return ret; 1263 1285 } ··· 1268 1290 * The heart of PPGTT shadow page table. 1269 1291 */ 1270 1292 static int ppgtt_handle_guest_write_page_table( 1271 - struct intel_vgpu_guest_page *gpt, 1293 + struct intel_vgpu_ppgtt_spt *spt, 1272 1294 struct intel_gvt_gtt_entry *we, unsigned long index) 1273 1295 { 1274 - struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1275 1296 struct intel_vgpu *vgpu = spt->vgpu; 1276 1297 int type = spt->shadow_page.type; 1277 1298 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1278 - struct intel_gvt_gtt_entry se; 1279 - 1280 - int ret; 1299 + struct intel_gvt_gtt_entry old_se; 1281 1300 int new_present; 1301 + int ret; 1282 1302 1283 1303 new_present = ops->test_present(we); 1284 1304 ··· 1285 1309 * guarantee the ppgtt table is validated during the window between 1286 1310 * adding and removal. 1287 1311 */ 1288 - ppgtt_get_shadow_entry(spt, &se, index); 1312 + ppgtt_get_shadow_entry(spt, &old_se, index); 1289 1313 1290 1314 if (new_present) { 1291 - ret = ppgtt_handle_guest_entry_add(gpt, we, index); 1315 + ret = ppgtt_handle_guest_entry_add(spt, we, index); 1292 1316 if (ret) 1293 1317 goto fail; 1294 1318 } 1295 1319 1296 - ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); 1320 + ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index); 1297 1321 if (ret) 1298 1322 goto fail; 1299 1323 1300 1324 if (!new_present) { 1301 - ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); 1302 - ppgtt_set_shadow_entry(spt, &se, index); 1325 + ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); 1326 + ppgtt_set_shadow_entry(spt, &old_se, index); 1303 1327 } 1304 1328 1305 1329 return 0; ··· 1309 1333 return ret; 1310 1334 } 1311 1335 1312 - static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt) 1336 + 1337 + 1338 + static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt) 1313 1339 { 1314 1340 return enable_out_of_sync 1315 - && gtt_type_is_pte_pt( 1316 - guest_page_to_ppgtt_spt(gpt)->guest_page_type) 1317 - && gpt->write_cnt >= 2; 1341 + && gtt_type_is_pte_pt(spt->guest_page.type) 1342 + && spt->guest_page.write_cnt >= 2; 1318 1343 } 1319 1344 1320 1345 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, ··· 1355 1378 GTT_ENTRY_NUM_IN_ONE_PAGE) { 1356 1379 ppgtt_get_guest_entry(spt, &ge, index); 1357 1380 1358 - ret = ppgtt_handle_guest_write_page_table( 1359 - &spt->guest_page, &ge, index); 1381 + ret = ppgtt_handle_guest_write_page_table(spt, 1382 + &ge, index); 1360 1383 if (ret) 1361 1384 return ret; 1362 1385 clear_bit(index, spt->post_shadow_bitmap); ··· 1367 1390 } 1368 1391 1369 1392 static int ppgtt_handle_guest_write_page_table_bytes( 1370 - struct intel_vgpu_guest_page *gpt, 1393 + struct intel_vgpu_ppgtt_spt *spt, 1371 1394 u64 pa, void *p_data, int bytes) 1372 1395 { 1373 - struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1374 1396 struct intel_vgpu *vgpu = spt->vgpu; 1375 1397 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1376 1398 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; ··· 1384 1408 ops->test_pse(&we); 1385 1409 1386 1410 if (bytes == info->gtt_entry_size) { 1387 - ret = ppgtt_handle_guest_write_page_table(gpt, &we, index); 1411 + ret = ppgtt_handle_guest_write_page_table(spt, &we, index); 1388 1412 if (ret) 1389 1413 return ret; 1390 1414 } else { ··· 1392 1416 int type = spt->shadow_page.type; 1393 1417 1394 1418 ppgtt_get_shadow_entry(spt, &se, index); 1395 - ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); 1419 + ret = ppgtt_handle_guest_entry_removal(spt, &se, index); 1396 1420 if (ret) 1397 1421 return ret; 1398 1422 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); ··· 1404 1428 if (!enable_out_of_sync) 1405 1429 return 0; 1406 1430 1407 - gpt->write_cnt++; 1431 + spt->guest_page.write_cnt++; 1408 1432 1409 - if (gpt->oos_page) 1410 - ops->set_entry(gpt->oos_page->mem, &we, index, 1433 + if (spt->guest_page.oos_page) 1434 + ops->set_entry(spt->guest_page.oos_page->mem, &we, index, 1411 1435 false, 0, vgpu); 1412 1436 1413 - if (can_do_out_of_sync(gpt)) { 1414 - if (!gpt->oos_page) 1415 - ppgtt_allocate_oos_page(vgpu, gpt); 1437 + if (can_do_out_of_sync(spt)) { 1438 + if (!spt->guest_page.oos_page) 1439 + ppgtt_allocate_oos_page(spt); 1416 1440 1417 - ret = ppgtt_set_guest_page_oos(vgpu, gpt); 1441 + ret = ppgtt_set_guest_page_oos(spt); 1418 1442 if (ret < 0) 1419 1443 return ret; 1420 1444 } 1421 1445 return 0; 1422 1446 } 1423 1447 1424 - /* 1425 - * mm page table allocation policy for bdw+ 1426 - * - for ggtt, only virtual page table will be allocated. 1427 - * - for ppgtt, dedicated virtual/shadow page table will be allocated. 1428 - */ 1429 - static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) 1430 - { 1431 - struct intel_vgpu *vgpu = mm->vgpu; 1432 - struct intel_gvt *gvt = vgpu->gvt; 1433 - const struct intel_gvt_device_info *info = &gvt->device_info; 1434 - void *mem; 1435 - 1436 - if (mm->type == INTEL_GVT_MM_PPGTT) { 1437 - mm->page_table_entry_cnt = 4; 1438 - mm->page_table_entry_size = mm->page_table_entry_cnt * 1439 - info->gtt_entry_size; 1440 - mem = kzalloc(mm->has_shadow_page_table ? 1441 - mm->page_table_entry_size * 2 1442 - : mm->page_table_entry_size, GFP_KERNEL); 1443 - if (!mem) 1444 - return -ENOMEM; 1445 - mm->virtual_page_table = mem; 1446 - if (!mm->has_shadow_page_table) 1447 - return 0; 1448 - mm->shadow_page_table = mem + mm->page_table_entry_size; 1449 - } else if (mm->type == INTEL_GVT_MM_GGTT) { 1450 - mm->page_table_entry_cnt = 1451 - (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT); 1452 - mm->page_table_entry_size = mm->page_table_entry_cnt * 1453 - info->gtt_entry_size; 1454 - mem = vzalloc(mm->page_table_entry_size); 1455 - if (!mem) 1456 - return -ENOMEM; 1457 - mm->virtual_page_table = mem; 1458 - } 1459 - return 0; 1460 - } 1461 - 1462 - static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm) 1463 - { 1464 - if (mm->type == INTEL_GVT_MM_PPGTT) { 1465 - kfree(mm->virtual_page_table); 1466 - } else if (mm->type == INTEL_GVT_MM_GGTT) { 1467 - if (mm->virtual_page_table) 1468 - vfree(mm->virtual_page_table); 1469 - } 1470 - mm->virtual_page_table = mm->shadow_page_table = NULL; 1471 - } 1472 - 1473 - static void invalidate_mm(struct intel_vgpu_mm *mm) 1448 + static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) 1474 1449 { 1475 1450 struct intel_vgpu *vgpu = mm->vgpu; 1476 1451 struct intel_gvt *gvt = vgpu->gvt; 1477 1452 struct intel_gvt_gtt *gtt = &gvt->gtt; 1478 1453 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1479 1454 struct intel_gvt_gtt_entry se; 1480 - int i; 1455 + int index; 1481 1456 1482 - if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed)) 1457 + if (!mm->ppgtt_mm.shadowed) 1483 1458 return; 1484 1459 1485 - for (i = 0; i < mm->page_table_entry_cnt; i++) { 1486 - ppgtt_get_shadow_root_entry(mm, &se, i); 1460 + for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { 1461 + ppgtt_get_shadow_root_entry(mm, &se, index); 1462 + 1487 1463 if (!ops->test_present(&se)) 1488 1464 continue; 1489 - ppgtt_invalidate_shadow_page_by_shadow_entry( 1490 - vgpu, &se); 1465 + 1466 + ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se); 1491 1467 se.val64 = 0; 1492 - ppgtt_set_shadow_root_entry(mm, &se, i); 1468 + ppgtt_set_shadow_root_entry(mm, &se, index); 1493 1469 1494 - trace_gpt_change(vgpu->id, "destroy root pointer", 1495 - NULL, se.type, se.val64, i); 1470 + trace_spt_guest_change(vgpu->id, "destroy root pointer", 1471 + NULL, se.type, se.val64, index); 1496 1472 } 1497 - mm->shadowed = false; 1473 + 1474 + mm->ppgtt_mm.shadowed = false; 1498 1475 } 1499 1476 1500 - /** 1501 - * intel_vgpu_destroy_mm - destroy a mm object 1502 - * @mm: a kref object 1503 - * 1504 - * This function is used to destroy a mm object for vGPU 1505 - * 1506 - */ 1507 - void intel_vgpu_destroy_mm(struct kref *mm_ref) 1508 - { 1509 - struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1510 - struct intel_vgpu *vgpu = mm->vgpu; 1511 - struct intel_gvt *gvt = vgpu->gvt; 1512 - struct intel_gvt_gtt *gtt = &gvt->gtt; 1513 1477 1514 - if (!mm->initialized) 1515 - goto out; 1516 - 1517 - list_del(&mm->list); 1518 - list_del(&mm->lru_list); 1519 - 1520 - if (mm->has_shadow_page_table) 1521 - invalidate_mm(mm); 1522 - 1523 - gtt->mm_free_page_table(mm); 1524 - out: 1525 - kfree(mm); 1526 - } 1527 - 1528 - static int shadow_mm(struct intel_vgpu_mm *mm) 1478 + static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) 1529 1479 { 1530 1480 struct intel_vgpu *vgpu = mm->vgpu; 1531 1481 struct intel_gvt *gvt = vgpu->gvt; ··· 1459 1557 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1460 1558 struct intel_vgpu_ppgtt_spt *spt; 1461 1559 struct intel_gvt_gtt_entry ge, se; 1462 - int i; 1463 - int ret; 1560 + int index, ret; 1464 1561 1465 - if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed)) 1562 + if (mm->ppgtt_mm.shadowed) 1466 1563 return 0; 1467 1564 1468 - mm->shadowed = true; 1565 + mm->ppgtt_mm.shadowed = true; 1469 1566 1470 - for (i = 0; i < mm->page_table_entry_cnt; i++) { 1471 - ppgtt_get_guest_root_entry(mm, &ge, i); 1567 + for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { 1568 + ppgtt_get_guest_root_entry(mm, &ge, index); 1569 + 1472 1570 if (!ops->test_present(&ge)) 1473 1571 continue; 1474 1572 1475 - trace_gpt_change(vgpu->id, __func__, NULL, 1476 - ge.type, ge.val64, i); 1573 + trace_spt_guest_change(vgpu->id, __func__, NULL, 1574 + ge.type, ge.val64, index); 1477 1575 1478 - spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1576 + spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1479 1577 if (IS_ERR(spt)) { 1480 1578 gvt_vgpu_err("fail to populate guest root pointer\n"); 1481 1579 ret = PTR_ERR(spt); 1482 1580 goto fail; 1483 1581 } 1484 1582 ppgtt_generate_shadow_entry(&se, spt, &ge); 1485 - ppgtt_set_shadow_root_entry(mm, &se, i); 1583 + ppgtt_set_shadow_root_entry(mm, &se, index); 1486 1584 1487 - trace_gpt_change(vgpu->id, "populate root pointer", 1488 - NULL, se.type, se.val64, i); 1585 + trace_spt_guest_change(vgpu->id, "populate root pointer", 1586 + NULL, se.type, se.val64, index); 1489 1587 } 1588 + 1490 1589 return 0; 1491 1590 fail: 1492 - invalidate_mm(mm); 1591 + invalidate_ppgtt_mm(mm); 1493 1592 return ret; 1494 1593 } 1495 1594 1595 + static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu) 1596 + { 1597 + struct intel_vgpu_mm *mm; 1598 + 1599 + mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1600 + if (!mm) 1601 + return NULL; 1602 + 1603 + mm->vgpu = vgpu; 1604 + kref_init(&mm->ref); 1605 + atomic_set(&mm->pincount, 0); 1606 + 1607 + return mm; 1608 + } 1609 + 1610 + static void vgpu_free_mm(struct intel_vgpu_mm *mm) 1611 + { 1612 + kfree(mm); 1613 + } 1614 + 1496 1615 /** 1497 - * intel_vgpu_create_mm - create a mm object for a vGPU 1616 + * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU 1498 1617 * @vgpu: a vGPU 1499 - * @mm_type: mm object type, should be PPGTT or GGTT 1500 - * @virtual_page_table: page table root pointers. Could be NULL if user wants 1501 - * to populate shadow later. 1502 - * @page_table_level: describe the page table level of the mm object 1503 - * @pde_base_index: pde root pointer base in GGTT MMIO. 1618 + * @root_entry_type: ppgtt root entry type 1619 + * @pdps: guest pdps. 1504 1620 * 1505 - * This function is used to create a mm object for a vGPU. 1621 + * This function is used to create a ppgtt mm object for a vGPU. 1506 1622 * 1507 1623 * Returns: 1508 1624 * Zero on success, negative error code in pointer if failed. 1509 1625 */ 1510 - struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, 1511 - int mm_type, void *virtual_page_table, int page_table_level, 1512 - u32 pde_base_index) 1626 + struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 1627 + intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 1513 1628 { 1514 1629 struct intel_gvt *gvt = vgpu->gvt; 1515 - struct intel_gvt_gtt *gtt = &gvt->gtt; 1516 1630 struct intel_vgpu_mm *mm; 1517 1631 int ret; 1518 1632 1519 - mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1520 - if (!mm) { 1521 - ret = -ENOMEM; 1522 - goto fail; 1523 - } 1633 + mm = vgpu_alloc_mm(vgpu); 1634 + if (!mm) 1635 + return ERR_PTR(-ENOMEM); 1524 1636 1525 - mm->type = mm_type; 1637 + mm->type = INTEL_GVT_MM_PPGTT; 1526 1638 1527 - if (page_table_level == 1) 1528 - mm->page_table_entry_type = GTT_TYPE_GGTT_PTE; 1529 - else if (page_table_level == 3) 1530 - mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1531 - else if (page_table_level == 4) 1532 - mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1533 - else { 1534 - WARN_ON(1); 1535 - ret = -EINVAL; 1536 - goto fail; 1537 - } 1639 + GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && 1640 + root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY); 1641 + mm->ppgtt_mm.root_entry_type = root_entry_type; 1538 1642 1539 - mm->page_table_level = page_table_level; 1540 - mm->pde_base_index = pde_base_index; 1643 + INIT_LIST_HEAD(&mm->ppgtt_mm.list); 1644 + INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); 1541 1645 1542 - mm->vgpu = vgpu; 1543 - mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT); 1646 + if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 1647 + mm->ppgtt_mm.guest_pdps[0] = pdps[0]; 1648 + else 1649 + memcpy(mm->ppgtt_mm.guest_pdps, pdps, 1650 + sizeof(mm->ppgtt_mm.guest_pdps)); 1544 1651 1545 - kref_init(&mm->ref); 1546 - atomic_set(&mm->pincount, 0); 1547 - INIT_LIST_HEAD(&mm->list); 1548 - INIT_LIST_HEAD(&mm->lru_list); 1549 - list_add_tail(&mm->list, &vgpu->gtt.mm_list_head); 1550 - 1551 - ret = gtt->mm_alloc_page_table(mm); 1652 + ret = shadow_ppgtt_mm(mm); 1552 1653 if (ret) { 1553 - gvt_vgpu_err("fail to allocate page table for mm\n"); 1554 - goto fail; 1654 + gvt_vgpu_err("failed to shadow ppgtt mm\n"); 1655 + vgpu_free_mm(mm); 1656 + return ERR_PTR(ret); 1555 1657 } 1556 1658 1557 - mm->initialized = true; 1558 - 1559 - if (virtual_page_table) 1560 - memcpy(mm->virtual_page_table, virtual_page_table, 1561 - mm->page_table_entry_size); 1562 - 1563 - if (mm->has_shadow_page_table) { 1564 - ret = shadow_mm(mm); 1565 - if (ret) 1566 - goto fail; 1567 - list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head); 1568 - } 1659 + list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1660 + list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1569 1661 return mm; 1570 - fail: 1571 - gvt_vgpu_err("fail to create mm\n"); 1572 - if (mm) 1573 - intel_gvt_mm_unreference(mm); 1574 - return ERR_PTR(ret); 1662 + } 1663 + 1664 + static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) 1665 + { 1666 + struct intel_vgpu_mm *mm; 1667 + unsigned long nr_entries; 1668 + 1669 + mm = vgpu_alloc_mm(vgpu); 1670 + if (!mm) 1671 + return ERR_PTR(-ENOMEM); 1672 + 1673 + mm->type = INTEL_GVT_MM_GGTT; 1674 + 1675 + nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; 1676 + mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries * 1677 + vgpu->gvt->device_info.gtt_entry_size); 1678 + if (!mm->ggtt_mm.virtual_ggtt) { 1679 + vgpu_free_mm(mm); 1680 + return ERR_PTR(-ENOMEM); 1681 + } 1682 + 1683 + return mm; 1684 + } 1685 + 1686 + /** 1687 + * _intel_vgpu_mm_release - destroy a mm object 1688 + * @mm_ref: a kref object 1689 + * 1690 + * This function is used to destroy a mm object for vGPU 1691 + * 1692 + */ 1693 + void _intel_vgpu_mm_release(struct kref *mm_ref) 1694 + { 1695 + struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1696 + 1697 + if (GEM_WARN_ON(atomic_read(&mm->pincount))) 1698 + gvt_err("vgpu mm pin count bug detected\n"); 1699 + 1700 + if (mm->type == INTEL_GVT_MM_PPGTT) { 1701 + list_del(&mm->ppgtt_mm.list); 1702 + list_del(&mm->ppgtt_mm.lru_list); 1703 + invalidate_ppgtt_mm(mm); 1704 + } else { 1705 + vfree(mm->ggtt_mm.virtual_ggtt); 1706 + } 1707 + 1708 + vgpu_free_mm(mm); 1575 1709 } 1576 1710 1577 1711 /** ··· 1618 1680 */ 1619 1681 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1620 1682 { 1621 - if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) 1622 - return; 1623 - 1624 1683 atomic_dec(&mm->pincount); 1625 1684 } 1626 1685 ··· 1636 1701 { 1637 1702 int ret; 1638 1703 1639 - if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) 1640 - return 0; 1704 + atomic_inc(&mm->pincount); 1641 1705 1642 - if (!mm->shadowed) { 1643 - ret = shadow_mm(mm); 1706 + if (mm->type == INTEL_GVT_MM_PPGTT) { 1707 + ret = shadow_ppgtt_mm(mm); 1644 1708 if (ret) 1645 1709 return ret; 1710 + 1711 + list_move_tail(&mm->ppgtt_mm.lru_list, 1712 + &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 1713 + 1646 1714 } 1647 1715 1648 - atomic_inc(&mm->pincount); 1649 - list_del_init(&mm->lru_list); 1650 - list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head); 1651 1716 return 0; 1652 1717 } 1653 1718 1654 - static int reclaim_one_mm(struct intel_gvt *gvt) 1719 + static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) 1655 1720 { 1656 1721 struct intel_vgpu_mm *mm; 1657 1722 struct list_head *pos, *n; 1658 1723 1659 - list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) { 1660 - mm = container_of(pos, struct intel_vgpu_mm, lru_list); 1724 + list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 1725 + mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 1661 1726 1662 - if (mm->type != INTEL_GVT_MM_PPGTT) 1663 - continue; 1664 1727 if (atomic_read(&mm->pincount)) 1665 1728 continue; 1666 1729 1667 - list_del_init(&mm->lru_list); 1668 - invalidate_mm(mm); 1730 + list_del_init(&mm->ppgtt_mm.lru_list); 1731 + invalidate_ppgtt_mm(mm); 1669 1732 return 1; 1670 1733 } 1671 1734 return 0; ··· 1679 1746 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1680 1747 struct intel_vgpu_ppgtt_spt *s; 1681 1748 1682 - if (WARN_ON(!mm->has_shadow_page_table)) 1683 - return -EINVAL; 1684 - 1685 - s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 1749 + s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 1686 1750 if (!s) 1687 1751 return -ENXIO; 1688 1752 ··· 1710 1780 unsigned long gpa = INTEL_GVT_INVALID_ADDR; 1711 1781 unsigned long gma_index[4]; 1712 1782 struct intel_gvt_gtt_entry e; 1713 - int i, index; 1783 + int i, levels = 0; 1714 1784 int ret; 1715 1785 1716 - if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT) 1717 - return INTEL_GVT_INVALID_ADDR; 1786 + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT && 1787 + mm->type != INTEL_GVT_MM_PPGTT); 1718 1788 1719 1789 if (mm->type == INTEL_GVT_MM_GGTT) { 1720 1790 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1721 1791 goto err; 1722 1792 1723 - ret = ggtt_get_guest_entry(mm, &e, 1724 - gma_ops->gma_to_ggtt_pte_index(gma)); 1725 - if (ret) 1726 - goto err; 1793 + ggtt_get_guest_entry(mm, &e, 1794 + gma_ops->gma_to_ggtt_pte_index(gma)); 1795 + 1727 1796 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 1728 1797 + (gma & ~I915_GTT_PAGE_MASK); 1729 1798 1730 1799 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); 1731 - return gpa; 1732 - } 1800 + } else { 1801 + switch (mm->ppgtt_mm.root_entry_type) { 1802 + case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 1803 + ppgtt_get_shadow_root_entry(mm, &e, 0); 1733 1804 1734 - switch (mm->page_table_level) { 1735 - case 4: 1736 - ret = ppgtt_get_shadow_root_entry(mm, &e, 0); 1737 - if (ret) 1738 - goto err; 1739 - gma_index[0] = gma_ops->gma_to_pml4_index(gma); 1740 - gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 1741 - gma_index[2] = gma_ops->gma_to_pde_index(gma); 1742 - gma_index[3] = gma_ops->gma_to_pte_index(gma); 1743 - index = 4; 1744 - break; 1745 - case 3: 1746 - ret = ppgtt_get_shadow_root_entry(mm, &e, 1747 - gma_ops->gma_to_l3_pdp_index(gma)); 1748 - if (ret) 1749 - goto err; 1750 - gma_index[0] = gma_ops->gma_to_pde_index(gma); 1751 - gma_index[1] = gma_ops->gma_to_pte_index(gma); 1752 - index = 2; 1753 - break; 1754 - case 2: 1755 - ret = ppgtt_get_shadow_root_entry(mm, &e, 1756 - gma_ops->gma_to_pde_index(gma)); 1757 - if (ret) 1758 - goto err; 1759 - gma_index[0] = gma_ops->gma_to_pte_index(gma); 1760 - index = 1; 1761 - break; 1762 - default: 1763 - WARN_ON(1); 1764 - goto err; 1765 - } 1805 + gma_index[0] = gma_ops->gma_to_pml4_index(gma); 1806 + gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 1807 + gma_index[2] = gma_ops->gma_to_pde_index(gma); 1808 + gma_index[3] = gma_ops->gma_to_pte_index(gma); 1809 + levels = 4; 1810 + break; 1811 + case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 1812 + ppgtt_get_shadow_root_entry(mm, &e, 1813 + gma_ops->gma_to_l3_pdp_index(gma)); 1766 1814 1767 - /* walk into the shadow page table and get gpa from guest entry */ 1768 - for (i = 0; i < index; i++) { 1769 - ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 1770 - (i == index - 1)); 1771 - if (ret) 1772 - goto err; 1773 - 1774 - if (!pte_ops->test_present(&e)) { 1775 - gvt_dbg_core("GMA 0x%lx is not present\n", gma); 1776 - goto err; 1815 + gma_index[0] = gma_ops->gma_to_pde_index(gma); 1816 + gma_index[1] = gma_ops->gma_to_pte_index(gma); 1817 + levels = 2; 1818 + break; 1819 + default: 1820 + GEM_BUG_ON(1); 1777 1821 } 1822 + 1823 + /* walk the shadow page table and get gpa from guest entry */ 1824 + for (i = 0; i < levels; i++) { 1825 + ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 1826 + (i == levels - 1)); 1827 + if (ret) 1828 + goto err; 1829 + 1830 + if (!pte_ops->test_present(&e)) { 1831 + gvt_dbg_core("GMA 0x%lx is not present\n", gma); 1832 + goto err; 1833 + } 1834 + } 1835 + 1836 + gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + 1837 + (gma & ~I915_GTT_PAGE_MASK); 1838 + trace_gma_translate(vgpu->id, "ppgtt", 0, 1839 + mm->ppgtt_mm.root_entry_type, gma, gpa); 1778 1840 } 1779 1841 1780 - gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 1781 - + (gma & ~I915_GTT_PAGE_MASK); 1782 - 1783 - trace_gma_translate(vgpu->id, "ppgtt", 0, 1784 - mm->page_table_level, gma, gpa); 1785 1842 return gpa; 1786 1843 err: 1787 1844 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1788 1845 return INTEL_GVT_INVALID_ADDR; 1789 1846 } 1790 1847 1791 - static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu, 1848 + static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, 1792 1849 unsigned int off, void *p_data, unsigned int bytes) 1793 1850 { 1794 1851 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; ··· 1804 1887 * Returns: 1805 1888 * Zero on success, error code if failed. 1806 1889 */ 1807 - int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 1890 + int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 1808 1891 void *p_data, unsigned int bytes) 1809 1892 { 1810 1893 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; ··· 1814 1897 return -EINVAL; 1815 1898 1816 1899 off -= info->gtt_start_offset; 1817 - ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes); 1900 + ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes); 1818 1901 return ret; 1819 1902 } 1820 1903 1821 - static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1904 + static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1822 1905 void *p_data, unsigned int bytes) 1823 1906 { 1824 1907 struct intel_gvt *gvt = vgpu->gvt; ··· 1828 1911 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 1829 1912 unsigned long gma, gfn; 1830 1913 struct intel_gvt_gtt_entry e, m; 1914 + dma_addr_t dma_addr; 1831 1915 int ret; 1832 1916 1833 1917 if (bytes != 4 && bytes != 8) ··· 1844 1926 1845 1927 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1846 1928 bytes); 1929 + m = e; 1847 1930 1848 1931 if (ops->test_present(&e)) { 1849 1932 gfn = ops->get_pfn(&e); ··· 1857 1938 goto out; 1858 1939 } 1859 1940 1860 - ret = gtt_entry_p2m(vgpu, &e, &m); 1941 + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, 1942 + &dma_addr); 1861 1943 if (ret) { 1862 - gvt_vgpu_err("fail to translate guest gtt entry\n"); 1944 + gvt_vgpu_err("fail to populate guest ggtt entry\n"); 1863 1945 /* guest driver may read/write the entry when partial 1864 1946 * update the entry in this situation p2m will fail 1865 1947 * settting the shadow entry to point to a scratch page 1866 1948 */ 1867 1949 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1868 - } 1869 - } else { 1870 - m = e; 1950 + } else 1951 + ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 1952 + } else 1871 1953 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1872 - } 1873 1954 1874 1955 out: 1875 - ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1876 - gtt_invalidate(gvt->dev_priv); 1956 + ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 1957 + ggtt_invalidate(gvt->dev_priv); 1877 1958 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 1878 1959 return 0; 1879 1960 } 1880 1961 1881 1962 /* 1882 - * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write 1963 + * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write 1883 1964 * @vgpu: a vGPU 1884 1965 * @off: register offset 1885 1966 * @p_data: data from guest write ··· 1890 1971 * Returns: 1891 1972 * Zero on success, error code if failed. 1892 1973 */ 1893 - int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1894 - void *p_data, unsigned int bytes) 1974 + int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, 1975 + unsigned int off, void *p_data, unsigned int bytes) 1895 1976 { 1896 1977 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1897 1978 int ret; ··· 1900 1981 return -EINVAL; 1901 1982 1902 1983 off -= info->gtt_start_offset; 1903 - ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes); 1984 + ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); 1904 1985 return ret; 1905 1986 } 1906 - 1907 - int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa, 1908 - void *p_data, unsigned int bytes) 1909 - { 1910 - struct intel_gvt *gvt = vgpu->gvt; 1911 - int ret = 0; 1912 - 1913 - if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { 1914 - struct intel_vgpu_page_track *t; 1915 - 1916 - mutex_lock(&gvt->lock); 1917 - 1918 - t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); 1919 - if (t) { 1920 - if (unlikely(vgpu->failsafe)) { 1921 - /* remove write protection to prevent furture traps */ 1922 - intel_vgpu_clean_page_track(vgpu, t); 1923 - } else { 1924 - ret = t->handler(t, pa, p_data, bytes); 1925 - if (ret) { 1926 - gvt_err("guest page write error %d, " 1927 - "gfn 0x%lx, pa 0x%llx, " 1928 - "var 0x%x, len %d\n", 1929 - ret, t->gfn, pa, 1930 - *(u32 *)p_data, bytes); 1931 - } 1932 - } 1933 - } 1934 - mutex_unlock(&gvt->lock); 1935 - } 1936 - return ret; 1937 - } 1938 - 1939 1987 1940 1988 static int alloc_scratch_pages(struct intel_vgpu *vgpu, 1941 1989 intel_gvt_gtt_type_t type) ··· 2017 2131 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) 2018 2132 { 2019 2133 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2020 - struct intel_vgpu_mm *ggtt_mm; 2021 2134 2022 - hash_init(gtt->tracked_guest_page_hash_table); 2023 - hash_init(gtt->shadow_page_hash_table); 2135 + INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL); 2024 2136 2025 - INIT_LIST_HEAD(&gtt->mm_list_head); 2137 + INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head); 2026 2138 INIT_LIST_HEAD(&gtt->oos_page_list_head); 2027 2139 INIT_LIST_HEAD(&gtt->post_shadow_list_head); 2028 2140 2029 - intel_vgpu_reset_ggtt(vgpu); 2030 - 2031 - ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2032 - NULL, 1, 0); 2033 - if (IS_ERR(ggtt_mm)) { 2141 + gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu); 2142 + if (IS_ERR(gtt->ggtt_mm)) { 2034 2143 gvt_vgpu_err("fail to create mm for ggtt.\n"); 2035 - return PTR_ERR(ggtt_mm); 2144 + return PTR_ERR(gtt->ggtt_mm); 2036 2145 } 2037 2146 2038 - gtt->ggtt_mm = ggtt_mm; 2147 + intel_vgpu_reset_ggtt(vgpu); 2039 2148 2040 2149 return create_scratch_page_tree(vgpu); 2041 2150 } 2042 2151 2043 - static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) 2152 + static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) 2044 2153 { 2045 2154 struct list_head *pos, *n; 2046 2155 struct intel_vgpu_mm *mm; 2047 2156 2048 - list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { 2049 - mm = container_of(pos, struct intel_vgpu_mm, list); 2050 - if (mm->type == type) { 2051 - vgpu->gvt->gtt.mm_free_page_table(mm); 2052 - list_del(&mm->list); 2053 - list_del(&mm->lru_list); 2054 - kfree(mm); 2055 - } 2157 + list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2158 + mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2159 + intel_vgpu_destroy_mm(mm); 2056 2160 } 2161 + 2162 + if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) 2163 + gvt_err("vgpu ppgtt mm is not fully destoried\n"); 2164 + 2165 + if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { 2166 + gvt_err("Why we still has spt not freed?\n"); 2167 + ppgtt_free_all_spt(vgpu); 2168 + } 2169 + } 2170 + 2171 + static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2172 + { 2173 + intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2174 + vgpu->gtt.ggtt_mm = NULL; 2057 2175 } 2058 2176 2059 2177 /** ··· 2072 2182 */ 2073 2183 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2074 2184 { 2075 - ppgtt_free_all_shadow_page(vgpu); 2185 + intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2186 + intel_vgpu_destroy_ggtt_mm(vgpu); 2076 2187 release_scratch_page_tree(vgpu); 2077 - 2078 - intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2079 - intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); 2080 2188 } 2081 2189 2082 2190 static void clean_spt_oos(struct intel_gvt *gvt) ··· 2136 2248 * pointer to mm object on success, NULL if failed. 2137 2249 */ 2138 2250 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 2139 - int page_table_level, void *root_entry) 2251 + u64 pdps[]) 2140 2252 { 2141 - struct list_head *pos; 2142 2253 struct intel_vgpu_mm *mm; 2143 - u64 *src, *dst; 2254 + struct list_head *pos; 2144 2255 2145 - list_for_each(pos, &vgpu->gtt.mm_list_head) { 2146 - mm = container_of(pos, struct intel_vgpu_mm, list); 2147 - if (mm->type != INTEL_GVT_MM_PPGTT) 2148 - continue; 2256 + list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) { 2257 + mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2149 2258 2150 - if (mm->page_table_level != page_table_level) 2151 - continue; 2152 - 2153 - src = root_entry; 2154 - dst = mm->virtual_page_table; 2155 - 2156 - if (page_table_level == 3) { 2157 - if (src[0] == dst[0] 2158 - && src[1] == dst[1] 2159 - && src[2] == dst[2] 2160 - && src[3] == dst[3]) 2259 + switch (mm->ppgtt_mm.root_entry_type) { 2260 + case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 2261 + if (pdps[0] == mm->ppgtt_mm.guest_pdps[0]) 2161 2262 return mm; 2162 - } else { 2163 - if (src[0] == dst[0]) 2263 + break; 2264 + case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 2265 + if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps, 2266 + sizeof(mm->ppgtt_mm.guest_pdps))) 2164 2267 return mm; 2268 + break; 2269 + default: 2270 + GEM_BUG_ON(1); 2165 2271 } 2166 2272 } 2167 2273 return NULL; 2168 2274 } 2169 2275 2170 2276 /** 2171 - * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from 2172 - * g2v notification 2277 + * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object. 2173 2278 * @vgpu: a vGPU 2174 - * @page_table_level: PPGTT page table level 2279 + * @root_entry_type: ppgtt root entry type 2280 + * @pdps: guest pdps 2175 2281 * 2176 - * This function is used to create a PPGTT mm object from a guest to GVT-g 2177 - * notification. 2282 + * This function is used to find or create a PPGTT mm object from a guest. 2178 2283 * 2179 2284 * Returns: 2180 2285 * Zero on success, negative error code if failed. 2181 2286 */ 2182 - int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, 2183 - int page_table_level) 2287 + struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 2288 + intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 2184 2289 { 2185 - u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); 2186 2290 struct intel_vgpu_mm *mm; 2187 2291 2188 - if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) 2189 - return -EINVAL; 2190 - 2191 - mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2292 + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2192 2293 if (mm) { 2193 - intel_gvt_mm_reference(mm); 2294 + intel_vgpu_mm_get(mm); 2194 2295 } else { 2195 - mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2196 - pdp, page_table_level, 0); 2197 - if (IS_ERR(mm)) { 2296 + mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps); 2297 + if (IS_ERR(mm)) 2198 2298 gvt_vgpu_err("fail to create mm\n"); 2199 - return PTR_ERR(mm); 2200 - } 2201 2299 } 2202 - return 0; 2300 + return mm; 2203 2301 } 2204 2302 2205 2303 /** 2206 - * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from 2207 - * g2v notification 2304 + * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object. 2208 2305 * @vgpu: a vGPU 2209 - * @page_table_level: PPGTT page table level 2306 + * @pdps: guest pdps 2210 2307 * 2211 - * This function is used to create a PPGTT mm object from a guest to GVT-g 2212 - * notification. 2308 + * This function is used to find a PPGTT mm object from a guest and destroy it. 2213 2309 * 2214 2310 * Returns: 2215 2311 * Zero on success, negative error code if failed. 2216 2312 */ 2217 - int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, 2218 - int page_table_level) 2313 + int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]) 2219 2314 { 2220 - u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); 2221 2315 struct intel_vgpu_mm *mm; 2222 2316 2223 - if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) 2224 - return -EINVAL; 2225 - 2226 - mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2317 + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2227 2318 if (!mm) { 2228 2319 gvt_vgpu_err("fail to find ppgtt instance.\n"); 2229 2320 return -EINVAL; 2230 2321 } 2231 - intel_gvt_mm_unreference(mm); 2322 + intel_vgpu_mm_put(mm); 2232 2323 return 0; 2233 2324 } 2234 2325 ··· 2234 2367 || IS_KABYLAKE(gvt->dev_priv)) { 2235 2368 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2236 2369 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2237 - gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; 2238 - gvt->gtt.mm_free_page_table = gen8_mm_free_page_table; 2239 2370 } else { 2240 2371 return -ENODEV; 2241 2372 } ··· 2264 2399 return ret; 2265 2400 } 2266 2401 } 2267 - INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head); 2402 + INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2268 2403 return 0; 2269 2404 } 2270 2405 ··· 2302 2437 { 2303 2438 struct intel_gvt *gvt = vgpu->gvt; 2304 2439 struct drm_i915_private *dev_priv = gvt->dev_priv; 2305 - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2440 + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2441 + struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2306 2442 u32 index; 2307 - u32 offset; 2308 2443 u32 num_entries; 2309 - struct intel_gvt_gtt_entry e; 2310 2444 2311 - memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); 2312 - e.type = GTT_TYPE_GGTT_PTE; 2313 - ops->set_pfn(&e, gvt->gtt.scratch_mfn); 2314 - e.val64 |= _PAGE_PRESENT; 2445 + pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn); 2446 + pte_ops->set_present(&entry); 2315 2447 2316 2448 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2317 2449 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2318 - for (offset = 0; offset < num_entries; offset++) 2319 - ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2450 + while (num_entries--) 2451 + ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2320 2452 2321 2453 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2322 2454 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2323 - for (offset = 0; offset < num_entries; offset++) 2324 - ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2455 + while (num_entries--) 2456 + ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2325 2457 2326 - gtt_invalidate(dev_priv); 2458 + ggtt_invalidate(dev_priv); 2327 2459 } 2328 2460 2329 2461 /** ··· 2333 2471 */ 2334 2472 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) 2335 2473 { 2336 - ppgtt_free_all_shadow_page(vgpu); 2337 - 2338 2474 /* Shadow pages are only created when there is no page 2339 2475 * table tracking data, so remove page tracking data after 2340 2476 * removing the shadow pages. 2341 2477 */ 2342 - intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2343 - 2478 + intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2344 2479 intel_vgpu_reset_ggtt(vgpu); 2345 2480 }
+69 -120
drivers/gpu/drm/i915/gvt/gtt.h
··· 39 39 40 40 struct intel_vgpu_mm; 41 41 42 - #define INTEL_GVT_GTT_HASH_BITS 8 43 42 #define INTEL_GVT_INVALID_ADDR (~0UL) 44 43 45 44 struct intel_gvt_gtt_entry { ··· 83 84 void (*mm_free_page_table)(struct intel_vgpu_mm *mm); 84 85 struct list_head oos_page_use_list_head; 85 86 struct list_head oos_page_free_list_head; 86 - struct list_head mm_lru_list_head; 87 + struct list_head ppgtt_mm_lru_list_head; 87 88 88 89 struct page *scratch_page; 89 90 unsigned long scratch_mfn; 90 - }; 91 - 92 - enum { 93 - INTEL_GVT_MM_GGTT = 0, 94 - INTEL_GVT_MM_PPGTT, 95 91 }; 96 92 97 93 typedef enum { ··· 119 125 GTT_TYPE_MAX, 120 126 } intel_gvt_gtt_type_t; 121 127 122 - struct intel_vgpu_mm { 123 - int type; 124 - bool initialized; 125 - bool shadowed; 126 - 127 - int page_table_entry_type; 128 - u32 page_table_entry_size; 129 - u32 page_table_entry_cnt; 130 - void *virtual_page_table; 131 - void *shadow_page_table; 132 - 133 - int page_table_level; 134 - bool has_shadow_page_table; 135 - u32 pde_base_index; 136 - 137 - struct list_head list; 138 - struct kref ref; 139 - atomic_t pincount; 140 - struct list_head lru_list; 141 - struct intel_vgpu *vgpu; 128 + enum intel_gvt_mm_type { 129 + INTEL_GVT_MM_GGTT, 130 + INTEL_GVT_MM_PPGTT, 142 131 }; 143 132 144 - extern int intel_vgpu_mm_get_entry( 145 - struct intel_vgpu_mm *mm, 146 - void *page_table, struct intel_gvt_gtt_entry *e, 147 - unsigned long index); 133 + #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES 148 134 149 - extern int intel_vgpu_mm_set_entry( 150 - struct intel_vgpu_mm *mm, 151 - void *page_table, struct intel_gvt_gtt_entry *e, 152 - unsigned long index); 135 + struct intel_vgpu_mm { 136 + enum intel_gvt_mm_type type; 137 + struct intel_vgpu *vgpu; 153 138 154 - #define ggtt_get_guest_entry(mm, e, index) \ 155 - intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index) 139 + struct kref ref; 140 + atomic_t pincount; 156 141 157 - #define ggtt_set_guest_entry(mm, e, index) \ 158 - intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) 142 + union { 143 + struct { 144 + intel_gvt_gtt_type_t root_entry_type; 145 + /* 146 + * The 4 PDPs in ring context. For 48bit addressing, 147 + * only PDP0 is valid and point to PML4. For 32it 148 + * addressing, all 4 are used as true PDPs. 149 + */ 150 + u64 guest_pdps[GVT_RING_CTX_NR_PDPS]; 151 + u64 shadow_pdps[GVT_RING_CTX_NR_PDPS]; 152 + bool shadowed; 159 153 160 - #define ggtt_get_shadow_entry(mm, e, index) \ 161 - intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index) 154 + struct list_head list; 155 + struct list_head lru_list; 156 + } ppgtt_mm; 157 + struct { 158 + void *virtual_ggtt; 159 + } ggtt_mm; 160 + }; 161 + }; 162 162 163 - #define ggtt_set_shadow_entry(mm, e, index) \ 164 - intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) 163 + struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 164 + intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); 165 165 166 - #define ppgtt_get_guest_root_entry(mm, e, index) \ 167 - intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index) 166 + static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm) 167 + { 168 + kref_get(&mm->ref); 169 + } 168 170 169 - #define ppgtt_set_guest_root_entry(mm, e, index) \ 170 - intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) 171 + void _intel_vgpu_mm_release(struct kref *mm_ref); 171 172 172 - #define ppgtt_get_shadow_root_entry(mm, e, index) \ 173 - intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index) 173 + static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm) 174 + { 175 + kref_put(&mm->ref, _intel_vgpu_mm_release); 176 + } 174 177 175 - #define ppgtt_set_shadow_root_entry(mm, e, index) \ 176 - intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) 177 - 178 - extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, 179 - int mm_type, void *virtual_page_table, int page_table_level, 180 - u32 pde_base_index); 181 - extern void intel_vgpu_destroy_mm(struct kref *mm_ref); 178 + static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm) 179 + { 180 + intel_vgpu_mm_put(mm); 181 + } 182 182 183 183 struct intel_vgpu_guest_page; 184 184 ··· 184 196 struct intel_vgpu_gtt { 185 197 struct intel_vgpu_mm *ggtt_mm; 186 198 unsigned long active_ppgtt_mm_bitmap; 187 - struct list_head mm_list_head; 188 - DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS); 189 - DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS); 190 - atomic_t n_tracked_guest_page; 199 + struct list_head ppgtt_mm_list_head; 200 + struct radix_tree_root spt_tree; 191 201 struct list_head oos_page_list_head; 192 202 struct list_head post_shadow_list_head; 193 203 struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; ··· 202 216 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 203 217 int page_table_level, void *root_entry); 204 218 205 - struct intel_vgpu_oos_page; 206 - 207 - struct intel_vgpu_shadow_page { 208 - void *vaddr; 209 - struct page *page; 210 - int type; 211 - struct hlist_node node; 212 - unsigned long mfn; 213 - }; 214 - 215 - struct intel_vgpu_page_track { 216 - struct hlist_node node; 217 - bool tracked; 218 - unsigned long gfn; 219 - int (*handler)(void *, u64, void *, int); 220 - void *data; 221 - }; 222 - 223 - struct intel_vgpu_guest_page { 224 - struct intel_vgpu_page_track track; 225 - unsigned long write_cnt; 226 - struct intel_vgpu_oos_page *oos_page; 227 - }; 228 - 229 219 struct intel_vgpu_oos_page { 230 - struct intel_vgpu_guest_page *guest_page; 220 + struct intel_vgpu_ppgtt_spt *spt; 231 221 struct list_head list; 232 222 struct list_head vm_list; 233 223 int id; ··· 212 250 213 251 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512 214 252 253 + /* Represent a vgpu shadow page table. */ 215 254 struct intel_vgpu_ppgtt_spt { 216 - struct intel_vgpu_shadow_page shadow_page; 217 - struct intel_vgpu_guest_page guest_page; 218 - int guest_page_type; 219 255 atomic_t refcount; 220 256 struct intel_vgpu *vgpu; 257 + 258 + struct { 259 + intel_gvt_gtt_type_t type; 260 + void *vaddr; 261 + struct page *page; 262 + unsigned long mfn; 263 + } shadow_page; 264 + 265 + struct { 266 + intel_gvt_gtt_type_t type; 267 + unsigned long gfn; 268 + unsigned long write_cnt; 269 + struct intel_vgpu_oos_page *oos_page; 270 + } guest_page; 271 + 221 272 DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE); 222 273 struct list_head post_shadow_list; 223 274 }; 224 275 225 - int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, 226 - struct intel_vgpu_page_track *t, 227 - unsigned long gfn, 228 - int (*handler)(void *gp, u64, void *, int), 229 - void *data); 230 - 231 - void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, 232 - struct intel_vgpu_page_track *t); 233 - 234 - struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( 235 - struct intel_vgpu *vgpu, unsigned long gfn); 236 - 237 276 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu); 238 277 239 278 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu); 240 - 241 - static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm) 242 - { 243 - kref_get(&mm->ref); 244 - } 245 - 246 - static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm) 247 - { 248 - kref_put(&mm->ref, intel_vgpu_destroy_mm); 249 - } 250 279 251 280 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm); 252 281 ··· 247 294 unsigned long gma); 248 295 249 296 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 250 - int page_table_level, void *root_entry); 297 + u64 pdps[]); 251 298 252 - int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, 253 - int page_table_level); 299 + struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 300 + intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); 254 301 255 - int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, 256 - int page_table_level); 302 + int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]); 257 303 258 - int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, 304 + int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, 259 305 unsigned int off, void *p_data, unsigned int bytes); 260 306 261 - int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, 307 + int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, 262 308 unsigned int off, void *p_data, unsigned int bytes); 263 - 264 - int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa, 265 - void *p_data, unsigned int bytes); 266 309 267 310 #endif /* _GVT_GTT_H_ */
+1 -1
drivers/gpu/drm/i915/gvt/gvt.c
··· 183 183 .get_gvt_attrs = intel_get_gvt_attrs, 184 184 .vgpu_query_plane = intel_vgpu_query_plane, 185 185 .vgpu_get_dmabuf = intel_vgpu_get_dmabuf, 186 - .write_protect_handler = intel_vgpu_write_protect_handler, 186 + .write_protect_handler = intel_vgpu_page_track_handler, 187 187 }; 188 188 189 189 /**
+16 -5
drivers/gpu/drm/i915/gvt/gvt.h
··· 48 48 #include "cmd_parser.h" 49 49 #include "fb_decoder.h" 50 50 #include "dmabuf.h" 51 + #include "page_track.h" 51 52 52 53 #define GVT_MAX_VGPU 8 53 54 ··· 132 131 133 132 #define vgpu_opregion(vgpu) (&(vgpu->opregion)) 134 133 135 - #define INTEL_GVT_MAX_PORT 5 136 - 137 134 struct intel_vgpu_display { 138 135 struct intel_vgpu_i2c_edid i2c_edid; 139 - struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT]; 136 + struct intel_vgpu_port ports[I915_MAX_PORTS]; 140 137 struct intel_vgpu_sbi sbi; 141 138 }; 142 139 ··· 189 190 struct intel_vgpu_opregion opregion; 190 191 struct intel_vgpu_display display; 191 192 struct intel_vgpu_submission submission; 193 + struct radix_tree_root page_track_tree; 192 194 u32 hws_pga[I915_NUM_ENGINES]; 193 195 194 196 struct dentry *debugfs; ··· 201 201 int num_regions; 202 202 struct eventfd_ctx *intx_trigger; 203 203 struct eventfd_ctx *msi_trigger; 204 - struct rb_root cache; 204 + 205 + /* 206 + * Two caches are used to avoid mapping duplicated pages (eg. 207 + * scratch pages). This help to reduce dma setup overhead. 208 + */ 209 + struct rb_root gfn_cache; 210 + struct rb_root dma_addr_cache; 211 + unsigned long nr_cache_entries; 205 212 struct mutex cache_lock; 213 + 206 214 struct notifier_block iommu_notifier; 207 215 struct notifier_block group_notifier; 208 216 struct kvm *kvm; ··· 316 308 wait_queue_head_t service_thread_wq; 317 309 unsigned long service_request; 318 310 319 - struct engine_mmio *engine_mmio_list; 311 + struct { 312 + struct engine_mmio *mmio; 313 + int ctx_mmio_count[I915_NUM_ENGINES]; 314 + } engine_mmio_list; 320 315 321 316 struct dentry *debugfs_root; 322 317 };
+20 -18
drivers/gpu/drm/i915/gvt/handlers.c
··· 188 188 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, 189 189 unsigned int fence_num, void *p_data, unsigned int bytes) 190 190 { 191 - if (fence_num >= vgpu_fence_sz(vgpu)) { 191 + unsigned int max_fence = vgpu_fence_sz(vgpu); 192 + 193 + if (fence_num >= max_fence) { 192 194 193 195 /* When guest access oob fence regs without access 194 196 * pv_info first, we treat guest not supporting GVT, ··· 203 201 if (!vgpu->mmio.disable_warn_untrack) { 204 202 gvt_vgpu_err("found oob fence register access\n"); 205 203 gvt_vgpu_err("total fence %d, access fence %d\n", 206 - vgpu_fence_sz(vgpu), fence_num); 204 + max_fence, fence_num); 207 205 } 208 206 memset(p_data, 0, bytes); 209 207 return -EINVAL; ··· 322 320 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); 323 321 324 322 /* sw will wait for the device to ack the reset request */ 325 - vgpu_vreg(vgpu, offset) = 0; 323 + vgpu_vreg(vgpu, offset) = 0; 326 324 327 325 return 0; 328 326 } ··· 1141 1139 1142 1140 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) 1143 1141 { 1144 - int ret = 0; 1142 + intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1143 + struct intel_vgpu_mm *mm; 1144 + u64 *pdps; 1145 + 1146 + pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); 1145 1147 1146 1148 switch (notification) { 1147 1149 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: 1148 - ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3); 1149 - break; 1150 - case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY: 1151 - ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3); 1152 - break; 1150 + root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1153 1151 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: 1154 - ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4); 1155 - break; 1152 + mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); 1153 + return PTR_ERR_OR_ZERO(mm); 1154 + case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY: 1156 1155 case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY: 1157 - ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4); 1158 - break; 1156 + return intel_vgpu_put_ppgtt_mm(vgpu, pdps); 1159 1157 case VGT_G2V_EXECLIST_CONTEXT_CREATE: 1160 1158 case VGT_G2V_EXECLIST_CONTEXT_DESTROY: 1161 1159 case 1: /* Remove this in guest driver. */ ··· 1163 1161 default: 1164 1162 gvt_vgpu_err("Invalid PV notification %d\n", notification); 1165 1163 } 1166 - return ret; 1164 + return 0; 1167 1165 } 1168 1166 1169 1167 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) ··· 1391 1389 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); 1392 1390 1393 1391 if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { 1394 - gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n", 1395 - vgpu->id, offset, value); 1392 + gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n", 1393 + offset, value); 1396 1394 return -EINVAL; 1397 1395 } 1398 1396 /* ··· 1401 1399 * support BDW, SKL or other platforms with same HWSP registers. 1402 1400 */ 1403 1401 if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { 1404 - gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n", 1405 - vgpu->id, offset); 1402 + gvt_vgpu_err("access unknown hardware status page register:0x%x\n", 1403 + offset); 1406 1404 return -EINVAL; 1407 1405 } 1408 1406 vgpu->hws_pga[ring_id] = value;
+7 -2
drivers/gpu/drm/i915/gvt/hypercall.h
··· 44 44 void (*detach_vgpu)(unsigned long handle); 45 45 int (*inject_msi)(unsigned long handle, u32 addr, u16 data); 46 46 unsigned long (*from_virt_to_mfn)(void *p); 47 - int (*set_wp_page)(unsigned long handle, u64 gfn); 48 - int (*unset_wp_page)(unsigned long handle, u64 gfn); 47 + int (*enable_page_track)(unsigned long handle, u64 gfn); 48 + int (*disable_page_track)(unsigned long handle, u64 gfn); 49 49 int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf, 50 50 unsigned long len); 51 51 int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf, 52 52 unsigned long len); 53 53 unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); 54 + 55 + int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, 56 + dma_addr_t *dma_addr); 57 + void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); 58 + 54 59 int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, 55 60 unsigned long mfn, unsigned int nr, bool map); 56 61 int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
+191 -126
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 41 41 #include <linux/kvm_host.h> 42 42 #include <linux/vfio.h> 43 43 #include <linux/mdev.h> 44 + #include <linux/debugfs.h> 44 45 45 46 #include "i915_drv.h" 46 47 #include "gvt.h" ··· 85 84 #define NR_BKT (1 << 18) 86 85 struct hlist_head ptable[NR_BKT]; 87 86 #undef NR_BKT 87 + struct dentry *debugfs_cache_entries; 88 88 }; 89 89 90 90 struct gvt_dma { 91 - struct rb_node node; 91 + struct intel_vgpu *vgpu; 92 + struct rb_node gfn_node; 93 + struct rb_node dma_addr_node; 92 94 gfn_t gfn; 93 - unsigned long iova; 95 + dma_addr_t dma_addr; 96 + struct kref ref; 94 97 }; 95 98 96 99 static inline bool handle_valid(unsigned long handle) ··· 106 101 static void intel_vgpu_release_work(struct work_struct *work); 107 102 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); 108 103 109 - static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, 110 - unsigned long *iova) 104 + static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, 105 + dma_addr_t *dma_addr) 111 106 { 112 - struct page *page; 113 107 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 114 - dma_addr_t daddr; 108 + struct page *page; 109 + unsigned long pfn; 110 + int ret; 115 111 116 - if (unlikely(!pfn_valid(pfn))) 117 - return -EFAULT; 112 + /* Pin the page first. */ 113 + ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1, 114 + IOMMU_READ | IOMMU_WRITE, &pfn); 115 + if (ret != 1) { 116 + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", 117 + gfn, ret); 118 + return -EINVAL; 119 + } 118 120 121 + /* Setup DMA mapping. */ 119 122 page = pfn_to_page(pfn); 120 - daddr = dma_map_page(dev, page, 0, PAGE_SIZE, 121 - PCI_DMA_BIDIRECTIONAL); 122 - if (dma_mapping_error(dev, daddr)) 123 + *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, 124 + PCI_DMA_BIDIRECTIONAL); 125 + if (dma_mapping_error(dev, *dma_addr)) { 126 + gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn); 127 + vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); 123 128 return -ENOMEM; 129 + } 124 130 125 - *iova = (unsigned long)(daddr >> PAGE_SHIFT); 126 131 return 0; 127 132 } 128 133 129 - static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova) 134 + static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, 135 + dma_addr_t dma_addr) 130 136 { 131 137 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 132 - dma_addr_t daddr; 138 + int ret; 133 139 134 - daddr = (dma_addr_t)(iova << PAGE_SHIFT); 135 - dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 140 + dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 141 + ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); 142 + WARN_ON(ret != 1); 136 143 } 137 144 138 - static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) 145 + static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, 146 + dma_addr_t dma_addr) 139 147 { 140 - struct rb_node *node = vgpu->vdev.cache.rb_node; 141 - struct gvt_dma *ret = NULL; 148 + struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node; 149 + struct gvt_dma *itr; 142 150 143 151 while (node) { 144 - struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node); 152 + itr = rb_entry(node, struct gvt_dma, dma_addr_node); 153 + 154 + if (dma_addr < itr->dma_addr) 155 + node = node->rb_left; 156 + else if (dma_addr > itr->dma_addr) 157 + node = node->rb_right; 158 + else 159 + return itr; 160 + } 161 + return NULL; 162 + } 163 + 164 + static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) 165 + { 166 + struct rb_node *node = vgpu->vdev.gfn_cache.rb_node; 167 + struct gvt_dma *itr; 168 + 169 + while (node) { 170 + itr = rb_entry(node, struct gvt_dma, gfn_node); 145 171 146 172 if (gfn < itr->gfn) 147 173 node = node->rb_left; 148 174 else if (gfn > itr->gfn) 149 175 node = node->rb_right; 150 - else { 151 - ret = itr; 152 - goto out; 153 - } 176 + else 177 + return itr; 154 178 } 155 - 156 - out: 157 - return ret; 179 + return NULL; 158 180 } 159 181 160 - static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) 161 - { 162 - struct gvt_dma *entry; 163 - unsigned long iova; 164 - 165 - mutex_lock(&vgpu->vdev.cache_lock); 166 - 167 - entry = __gvt_cache_find(vgpu, gfn); 168 - iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova; 169 - 170 - mutex_unlock(&vgpu->vdev.cache_lock); 171 - return iova; 172 - } 173 - 174 - static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, 175 - unsigned long iova) 182 + static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, 183 + dma_addr_t dma_addr) 176 184 { 177 185 struct gvt_dma *new, *itr; 178 - struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; 186 + struct rb_node **link, *parent = NULL; 179 187 180 188 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); 181 189 if (!new) 182 190 return; 183 191 192 + new->vgpu = vgpu; 184 193 new->gfn = gfn; 185 - new->iova = iova; 194 + new->dma_addr = dma_addr; 195 + kref_init(&new->ref); 186 196 187 - mutex_lock(&vgpu->vdev.cache_lock); 197 + /* gfn_cache maps gfn to struct gvt_dma. */ 198 + link = &vgpu->vdev.gfn_cache.rb_node; 188 199 while (*link) { 189 200 parent = *link; 190 - itr = rb_entry(parent, struct gvt_dma, node); 201 + itr = rb_entry(parent, struct gvt_dma, gfn_node); 191 202 192 - if (gfn == itr->gfn) 193 - goto out; 194 - else if (gfn < itr->gfn) 203 + if (gfn < itr->gfn) 195 204 link = &parent->rb_left; 196 205 else 197 206 link = &parent->rb_right; 198 207 } 208 + rb_link_node(&new->gfn_node, parent, link); 209 + rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache); 199 210 200 - rb_link_node(&new->node, parent, link); 201 - rb_insert_color(&new->node, &vgpu->vdev.cache); 202 - mutex_unlock(&vgpu->vdev.cache_lock); 203 - return; 211 + /* dma_addr_cache maps dma addr to struct gvt_dma. */ 212 + parent = NULL; 213 + link = &vgpu->vdev.dma_addr_cache.rb_node; 214 + while (*link) { 215 + parent = *link; 216 + itr = rb_entry(parent, struct gvt_dma, dma_addr_node); 204 217 205 - out: 206 - mutex_unlock(&vgpu->vdev.cache_lock); 207 - kfree(new); 218 + if (dma_addr < itr->dma_addr) 219 + link = &parent->rb_left; 220 + else 221 + link = &parent->rb_right; 222 + } 223 + rb_link_node(&new->dma_addr_node, parent, link); 224 + rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache); 225 + 226 + vgpu->vdev.nr_cache_entries++; 208 227 } 209 228 210 229 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, 211 230 struct gvt_dma *entry) 212 231 { 213 - rb_erase(&entry->node, &vgpu->vdev.cache); 232 + rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache); 233 + rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache); 214 234 kfree(entry); 215 - } 216 - 217 - static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) 218 - { 219 - struct device *dev = mdev_dev(vgpu->vdev.mdev); 220 - struct gvt_dma *this; 221 - unsigned long g1; 222 - int rc; 223 - 224 - mutex_lock(&vgpu->vdev.cache_lock); 225 - this = __gvt_cache_find(vgpu, gfn); 226 - if (!this) { 227 - mutex_unlock(&vgpu->vdev.cache_lock); 228 - return; 229 - } 230 - 231 - g1 = gfn; 232 - gvt_dma_unmap_iova(vgpu, this->iova); 233 - rc = vfio_unpin_pages(dev, &g1, 1); 234 - WARN_ON(rc != 1); 235 - __gvt_cache_remove_entry(vgpu, this); 236 - mutex_unlock(&vgpu->vdev.cache_lock); 237 - } 238 - 239 - static void gvt_cache_init(struct intel_vgpu *vgpu) 240 - { 241 - vgpu->vdev.cache = RB_ROOT; 242 - mutex_init(&vgpu->vdev.cache_lock); 235 + vgpu->vdev.nr_cache_entries--; 243 236 } 244 237 245 238 static void gvt_cache_destroy(struct intel_vgpu *vgpu) 246 239 { 247 240 struct gvt_dma *dma; 248 241 struct rb_node *node = NULL; 249 - struct device *dev = mdev_dev(vgpu->vdev.mdev); 250 - unsigned long gfn; 251 242 252 243 for (;;) { 253 244 mutex_lock(&vgpu->vdev.cache_lock); 254 - node = rb_first(&vgpu->vdev.cache); 245 + node = rb_first(&vgpu->vdev.gfn_cache); 255 246 if (!node) { 256 247 mutex_unlock(&vgpu->vdev.cache_lock); 257 248 break; 258 249 } 259 - dma = rb_entry(node, struct gvt_dma, node); 260 - gvt_dma_unmap_iova(vgpu, dma->iova); 261 - gfn = dma->gfn; 250 + dma = rb_entry(node, struct gvt_dma, gfn_node); 251 + gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr); 262 252 __gvt_cache_remove_entry(vgpu, dma); 263 253 mutex_unlock(&vgpu->vdev.cache_lock); 264 - vfio_unpin_pages(dev, &gfn, 1); 265 254 } 255 + } 256 + 257 + static void gvt_cache_init(struct intel_vgpu *vgpu) 258 + { 259 + vgpu->vdev.gfn_cache = RB_ROOT; 260 + vgpu->vdev.dma_addr_cache = RB_ROOT; 261 + vgpu->vdev.nr_cache_entries = 0; 262 + mutex_init(&vgpu->vdev.cache_lock); 266 263 } 267 264 268 265 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) ··· 459 452 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 460 453 if (IS_ERR_OR_NULL(vgpu)) { 461 454 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 462 - gvt_vgpu_err("failed to create intel vgpu: %d\n", ret); 455 + gvt_err("failed to create intel vgpu: %d\n", ret); 463 456 goto out; 464 457 } 465 458 ··· 496 489 497 490 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { 498 491 struct vfio_iommu_type1_dma_unmap *unmap = data; 499 - unsigned long gfn, end_gfn; 492 + struct gvt_dma *entry; 493 + unsigned long iov_pfn, end_iov_pfn; 500 494 501 - gfn = unmap->iova >> PAGE_SHIFT; 502 - end_gfn = gfn + unmap->size / PAGE_SIZE; 495 + iov_pfn = unmap->iova >> PAGE_SHIFT; 496 + end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; 503 497 504 - while (gfn < end_gfn) 505 - gvt_cache_remove(vgpu, gfn++); 498 + mutex_lock(&vgpu->vdev.cache_lock); 499 + for (; iov_pfn < end_iov_pfn; iov_pfn++) { 500 + entry = __gvt_cache_find_gfn(vgpu, iov_pfn); 501 + if (!entry) 502 + continue; 503 + 504 + gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr); 505 + __gvt_cache_remove_entry(vgpu, entry); 506 + } 507 + mutex_unlock(&vgpu->vdev.cache_lock); 506 508 } 507 509 508 510 return NOTIFY_OK; ··· 1337 1321 mdev_unregister_device(dev); 1338 1322 } 1339 1323 1340 - static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) 1324 + static int kvmgt_page_track_add(unsigned long handle, u64 gfn) 1341 1325 { 1342 1326 struct kvmgt_guest_info *info; 1343 1327 struct kvm *kvm; ··· 1371 1355 return 0; 1372 1356 } 1373 1357 1374 - static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) 1358 + static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) 1375 1359 { 1376 1360 struct kvmgt_guest_info *info; 1377 1361 struct kvm *kvm; ··· 1499 1483 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; 1500 1484 kvm_page_track_register_notifier(kvm, &info->track_node); 1501 1485 1486 + info->debugfs_cache_entries = debugfs_create_ulong( 1487 + "kvmgt_nr_cache_entries", 1488 + 0444, vgpu->debugfs, 1489 + &vgpu->vdev.nr_cache_entries); 1490 + if (!info->debugfs_cache_entries) 1491 + gvt_vgpu_err("Cannot create kvmgt debugfs entry\n"); 1492 + 1502 1493 return 0; 1503 1494 } 1504 1495 1505 1496 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1506 1497 { 1498 + debugfs_remove(info->debugfs_cache_entries); 1499 + 1507 1500 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1508 1501 kvm_put_kvm(info->kvm); 1509 1502 kvmgt_protect_table_destroy(info); ··· 1552 1527 1553 1528 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) 1554 1529 { 1555 - unsigned long iova, pfn; 1556 1530 struct kvmgt_guest_info *info; 1557 - struct device *dev; 1558 - struct intel_vgpu *vgpu; 1559 - int rc; 1531 + kvm_pfn_t pfn; 1560 1532 1561 1533 if (!handle_valid(handle)) 1562 1534 return INTEL_GVT_INVALID_ADDR; 1563 1535 1564 1536 info = (struct kvmgt_guest_info *)handle; 1537 + 1538 + pfn = gfn_to_pfn(info->kvm, gfn); 1539 + if (is_error_noslot_pfn(pfn)) 1540 + return INTEL_GVT_INVALID_ADDR; 1541 + 1542 + return pfn; 1543 + } 1544 + 1545 + int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, 1546 + dma_addr_t *dma_addr) 1547 + { 1548 + struct kvmgt_guest_info *info; 1549 + struct intel_vgpu *vgpu; 1550 + struct gvt_dma *entry; 1551 + int ret; 1552 + 1553 + if (!handle_valid(handle)) 1554 + return -EINVAL; 1555 + 1556 + info = (struct kvmgt_guest_info *)handle; 1565 1557 vgpu = info->vgpu; 1566 - iova = gvt_cache_find(info->vgpu, gfn); 1567 - if (iova != INTEL_GVT_INVALID_ADDR) 1568 - return iova; 1569 1558 1570 - pfn = INTEL_GVT_INVALID_ADDR; 1571 - dev = mdev_dev(info->vgpu->vdev.mdev); 1572 - rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); 1573 - if (rc != 1) { 1574 - gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", 1575 - gfn, rc); 1576 - return INTEL_GVT_INVALID_ADDR; 1577 - } 1578 - /* transfer to host iova for GFX to use DMA */ 1579 - rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); 1580 - if (rc) { 1581 - gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); 1582 - vfio_unpin_pages(dev, &gfn, 1); 1583 - return INTEL_GVT_INVALID_ADDR; 1559 + mutex_lock(&info->vgpu->vdev.cache_lock); 1560 + 1561 + entry = __gvt_cache_find_gfn(info->vgpu, gfn); 1562 + if (!entry) { 1563 + ret = gvt_dma_map_page(vgpu, gfn, dma_addr); 1564 + if (ret) { 1565 + mutex_unlock(&info->vgpu->vdev.cache_lock); 1566 + return ret; 1567 + } 1568 + __gvt_cache_add(info->vgpu, gfn, *dma_addr); 1569 + } else { 1570 + kref_get(&entry->ref); 1571 + *dma_addr = entry->dma_addr; 1584 1572 } 1585 1573 1586 - gvt_cache_add(info->vgpu, gfn, iova); 1587 - return iova; 1574 + mutex_unlock(&info->vgpu->vdev.cache_lock); 1575 + return 0; 1576 + } 1577 + 1578 + static void __gvt_dma_release(struct kref *ref) 1579 + { 1580 + struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); 1581 + 1582 + gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr); 1583 + __gvt_cache_remove_entry(entry->vgpu, entry); 1584 + } 1585 + 1586 + void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) 1587 + { 1588 + struct kvmgt_guest_info *info; 1589 + struct gvt_dma *entry; 1590 + 1591 + if (!handle_valid(handle)) 1592 + return; 1593 + 1594 + info = (struct kvmgt_guest_info *)handle; 1595 + 1596 + mutex_lock(&info->vgpu->vdev.cache_lock); 1597 + entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); 1598 + if (entry) 1599 + kref_put(&entry->ref, __gvt_dma_release); 1600 + mutex_unlock(&info->vgpu->vdev.cache_lock); 1588 1601 } 1589 1602 1590 1603 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, ··· 1692 1629 .detach_vgpu = kvmgt_detach_vgpu, 1693 1630 .inject_msi = kvmgt_inject_msi, 1694 1631 .from_virt_to_mfn = kvmgt_virt_to_pfn, 1695 - .set_wp_page = kvmgt_write_protect_add, 1696 - .unset_wp_page = kvmgt_write_protect_remove, 1632 + .enable_page_track = kvmgt_page_track_add, 1633 + .disable_page_track = kvmgt_page_track_remove, 1697 1634 .read_gpa = kvmgt_read_gpa, 1698 1635 .write_gpa = kvmgt_write_gpa, 1699 1636 .gfn_to_mfn = kvmgt_gfn_to_pfn, 1637 + .dma_map_guest_page = kvmgt_dma_map_guest_page, 1638 + .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, 1700 1639 .set_opregion = kvmgt_set_opregion, 1701 1640 .get_vfio_device = kvmgt_get_vfio_device, 1702 1641 .put_vfio_device = kvmgt_put_vfio_device,
+4 -5
drivers/gpu/drm/i915/gvt/mmio.c
··· 76 76 else 77 77 intel_vgpu_default_mmio_write(vgpu, offset, p_data, 78 78 bytes); 79 - } else if (reg_is_gtt(gvt, offset) && 80 - vgpu->gtt.ggtt_mm->virtual_page_table) { 79 + } else if (reg_is_gtt(gvt, offset)) { 81 80 offset -= gvt->device_info.gtt_start_offset; 82 - pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; 81 + pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset; 83 82 if (read) 84 83 memcpy(p_data, pt, bytes); 85 84 else ··· 124 125 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 125 126 goto err; 126 127 127 - ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset, 128 + ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset, 128 129 p_data, bytes); 129 130 if (ret) 130 131 goto err; ··· 197 198 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 198 199 goto err; 199 200 200 - ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset, 201 + ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset, 201 202 p_data, bytes); 202 203 if (ret) 203 204 goto err;
+191 -19
drivers/gpu/drm/i915/gvt/mmio_context.c
··· 50 50 #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) 51 51 #define VF_GUARDBAND _MMIO(0x83a4) 52 52 53 + #define GEN9_MOCS_SIZE 64 54 + 53 55 /* Raw offset is appened to each line for convenience. */ 54 56 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { 55 57 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ ··· 153 151 154 152 static struct { 155 153 bool initialized; 156 - u32 control_table[I915_NUM_ENGINES][64]; 157 - u32 l3cc_table[32]; 154 + u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE]; 155 + u32 l3cc_table[GEN9_MOCS_SIZE / 2]; 158 156 } gen9_render_mocs; 159 157 160 158 static void load_render_mocs(struct drm_i915_private *dev_priv) ··· 171 169 172 170 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { 173 171 offset.reg = regs[ring_id]; 174 - for (i = 0; i < 64; i++) { 172 + for (i = 0; i < GEN9_MOCS_SIZE; i++) { 175 173 gen9_render_mocs.control_table[ring_id][i] = 176 174 I915_READ_FW(offset); 177 175 offset.reg += 4; ··· 179 177 } 180 178 181 179 offset.reg = 0xb020; 182 - for (i = 0; i < 32; i++) { 180 + for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 183 181 gen9_render_mocs.l3cc_table[i] = 184 182 I915_READ_FW(offset); 185 183 offset.reg += 4; 186 184 } 187 185 gen9_render_mocs.initialized = true; 186 + } 187 + 188 + static int 189 + restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, 190 + struct i915_request *req) 191 + { 192 + u32 *cs; 193 + int ret; 194 + struct engine_mmio *mmio; 195 + struct intel_gvt *gvt = vgpu->gvt; 196 + int ring_id = req->engine->id; 197 + int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 198 + 199 + if (count == 0) 200 + return 0; 201 + 202 + ret = req->engine->emit_flush(req, EMIT_BARRIER); 203 + if (ret) 204 + return ret; 205 + 206 + cs = intel_ring_begin(req, count * 2 + 2); 207 + if (IS_ERR(cs)) 208 + return PTR_ERR(cs); 209 + 210 + *cs++ = MI_LOAD_REGISTER_IMM(count); 211 + for (mmio = gvt->engine_mmio_list.mmio; 212 + i915_mmio_reg_valid(mmio->reg); mmio++) { 213 + if (mmio->ring_id != ring_id || 214 + !mmio->in_context) 215 + continue; 216 + 217 + *cs++ = i915_mmio_reg_offset(mmio->reg); 218 + *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | 219 + (mmio->mask << 16); 220 + gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 221 + *(cs-2), *(cs-1), vgpu->id, ring_id); 222 + } 223 + 224 + *cs++ = MI_NOOP; 225 + intel_ring_advance(req, cs); 226 + 227 + ret = req->engine->emit_flush(req, EMIT_BARRIER); 228 + if (ret) 229 + return ret; 230 + 231 + return 0; 232 + } 233 + 234 + static int 235 + restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu, 236 + struct i915_request *req) 237 + { 238 + unsigned int index; 239 + u32 *cs; 240 + 241 + cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); 242 + if (IS_ERR(cs)) 243 + return PTR_ERR(cs); 244 + 245 + *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); 246 + 247 + for (index = 0; index < GEN9_MOCS_SIZE; index++) { 248 + *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); 249 + *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); 250 + gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 251 + *(cs-2), *(cs-1), vgpu->id, req->engine->id); 252 + 253 + } 254 + 255 + *cs++ = MI_NOOP; 256 + intel_ring_advance(req, cs); 257 + 258 + return 0; 259 + } 260 + 261 + static int 262 + restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu, 263 + struct i915_request *req) 264 + { 265 + unsigned int index; 266 + u32 *cs; 267 + 268 + cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); 269 + if (IS_ERR(cs)) 270 + return PTR_ERR(cs); 271 + 272 + *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); 273 + 274 + for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { 275 + *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); 276 + *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); 277 + gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 278 + *(cs-2), *(cs-1), vgpu->id, req->engine->id); 279 + 280 + } 281 + 282 + *cs++ = MI_NOOP; 283 + intel_ring_advance(req, cs); 284 + 285 + return 0; 286 + } 287 + 288 + /* 289 + * Use lri command to initialize the mmio which is in context state image for 290 + * inhibit context, it contains tracked engine mmio, render_mocs and 291 + * render_mocs_l3cc. 292 + */ 293 + int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, 294 + struct i915_request *req) 295 + { 296 + int ret; 297 + u32 *cs; 298 + 299 + cs = intel_ring_begin(req, 2); 300 + if (IS_ERR(cs)) 301 + return PTR_ERR(cs); 302 + 303 + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 304 + *cs++ = MI_NOOP; 305 + intel_ring_advance(req, cs); 306 + 307 + ret = restore_context_mmio_for_inhibit(vgpu, req); 308 + if (ret) 309 + goto out; 310 + 311 + /* no MOCS register in context except render engine */ 312 + if (req->engine->id != RCS) 313 + goto out; 314 + 315 + ret = restore_render_mocs_control_for_inhibit(vgpu, req); 316 + if (ret) 317 + goto out; 318 + 319 + ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req); 320 + if (ret) 321 + goto out; 322 + 323 + out: 324 + cs = intel_ring_begin(req, 2); 325 + if (IS_ERR(cs)) 326 + return PTR_ERR(cs); 327 + 328 + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 329 + *cs++ = MI_NOOP; 330 + intel_ring_advance(req, cs); 331 + 332 + return ret; 188 333 } 189 334 190 335 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ··· 400 251 if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) 401 252 return; 402 253 254 + if (IS_KABYLAKE(dev_priv) && ring_id == RCS) 255 + return; 256 + 403 257 if (!pre && !gen9_render_mocs.initialized) 404 258 load_render_mocs(dev_priv); 405 259 406 260 offset.reg = regs[ring_id]; 407 - for (i = 0; i < 64; i++) { 261 + for (i = 0; i < GEN9_MOCS_SIZE; i++) { 408 262 if (pre) 409 263 old_v = vgpu_vreg_t(pre, offset); 410 264 else ··· 425 273 426 274 if (ring_id == RCS) { 427 275 l3_offset.reg = 0xb020; 428 - for (i = 0; i < 32; i++) { 276 + for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 429 277 if (pre) 430 278 old_v = vgpu_vreg_t(pre, l3_offset); 431 279 else ··· 445 293 446 294 #define CTX_CONTEXT_CONTROL_VAL 0x03 447 295 296 + bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id) 297 + { 298 + u32 *reg_state = ctx->engine[ring_id].lrc_reg_state; 299 + u32 inhibit_mask = 300 + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 301 + 302 + return inhibit_mask == 303 + (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask); 304 + } 305 + 448 306 /* Switch ring mmio values (context). */ 449 307 static void switch_mmio(struct intel_vgpu *pre, 450 308 struct intel_vgpu *next, ··· 462 300 { 463 301 struct drm_i915_private *dev_priv; 464 302 struct intel_vgpu_submission *s; 465 - u32 *reg_state, ctx_ctrl; 466 - u32 inhibit_mask = 467 - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 468 303 struct engine_mmio *mmio; 469 304 u32 old_v, new_v; 470 305 ··· 469 310 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 470 311 switch_mocs(pre, next, ring_id); 471 312 472 - for (mmio = dev_priv->gvt->engine_mmio_list; 313 + for (mmio = dev_priv->gvt->engine_mmio_list.mmio; 473 314 i915_mmio_reg_valid(mmio->reg); mmio++) { 474 315 if (mmio->ring_id != ring_id) 475 316 continue; 317 + /* 318 + * No need to do save or restore of the mmio which is in context 319 + * state image on kabylake, it's initialized by lri command and 320 + * save or restore with context together. 321 + */ 322 + if (IS_KABYLAKE(dev_priv) && mmio->in_context) 323 + continue; 324 + 476 325 // save 477 326 if (pre) { 478 327 vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg); ··· 494 327 // restore 495 328 if (next) { 496 329 s = &next->submission; 497 - reg_state = 498 - s->shadow_ctx->engine[ring_id].lrc_reg_state; 499 - ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; 500 330 /* 501 - * if it is an inhibit context, load in_context mmio 502 - * into HW by mmio write. If it is not, skip this mmio 503 - * write. 331 + * No need to restore the mmio which is in context state 332 + * image if it's not inhibit context, it will restore 333 + * itself. 504 334 */ 505 335 if (mmio->in_context && 506 - (ctx_ctrl & inhibit_mask) != inhibit_mask) 336 + !is_inhibit_context(s->shadow_ctx, ring_id)) 507 337 continue; 508 338 509 339 if (mmio->mask) ··· 569 405 */ 570 406 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) 571 407 { 408 + struct engine_mmio *mmio; 409 + 572 410 if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) 573 - gvt->engine_mmio_list = gen9_engine_mmio_list; 411 + gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; 574 412 else 575 - gvt->engine_mmio_list = gen8_engine_mmio_list; 413 + gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; 414 + 415 + for (mmio = gvt->engine_mmio_list.mmio; 416 + i915_mmio_reg_valid(mmio->reg); mmio++) { 417 + if (mmio->in_context) 418 + gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; 419 + } 576 420 }
+5
drivers/gpu/drm/i915/gvt/mmio_context.h
··· 49 49 50 50 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt); 51 51 52 + bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id); 53 + 54 + int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, 55 + struct i915_request *req); 56 + 52 57 #endif
+36 -31
drivers/gpu/drm/i915/gvt/mpt.h
··· 154 154 } 155 155 156 156 /** 157 - * intel_gvt_hypervisor_enable - set a guest page to write-protected 157 + * intel_gvt_hypervisor_enable_page_track - track a guest page 158 158 * @vgpu: a vGPU 159 - * @t: page track data structure 159 + * @gfn: the gfn of guest 160 160 * 161 161 * Returns: 162 162 * Zero on success, negative error code if failed. 163 163 */ 164 164 static inline int intel_gvt_hypervisor_enable_page_track( 165 - struct intel_vgpu *vgpu, 166 - struct intel_vgpu_page_track *t) 165 + struct intel_vgpu *vgpu, unsigned long gfn) 167 166 { 168 - int ret; 169 - 170 - if (t->tracked) 171 - return 0; 172 - 173 - ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn); 174 - if (ret) 175 - return ret; 176 - t->tracked = true; 177 - atomic_inc(&vgpu->gtt.n_tracked_guest_page); 178 - return 0; 167 + return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); 179 168 } 180 169 181 170 /** 182 - * intel_gvt_hypervisor_disable_page_track - remove the write-protection of a 183 - * guest page 171 + * intel_gvt_hypervisor_disable_page_track - untrack a guest page 184 172 * @vgpu: a vGPU 185 - * @t: page track data structure 173 + * @gfn: the gfn of guest 186 174 * 187 175 * Returns: 188 176 * Zero on success, negative error code if failed. 189 177 */ 190 178 static inline int intel_gvt_hypervisor_disable_page_track( 191 - struct intel_vgpu *vgpu, 192 - struct intel_vgpu_page_track *t) 179 + struct intel_vgpu *vgpu, unsigned long gfn) 193 180 { 194 - int ret; 195 - 196 - if (!t->tracked) 197 - return 0; 198 - 199 - ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn); 200 - if (ret) 201 - return ret; 202 - t->tracked = false; 203 - atomic_dec(&vgpu->gtt.n_tracked_guest_page); 204 - return 0; 181 + return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); 205 182 } 206 183 207 184 /** ··· 225 248 struct intel_vgpu *vgpu, unsigned long gfn) 226 249 { 227 250 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); 251 + } 252 + 253 + /** 254 + * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page 255 + * @vgpu: a vGPU 256 + * @gpfn: guest pfn 257 + * @dma_addr: retrieve allocated dma addr 258 + * 259 + * Returns: 260 + * 0 on success, negative error code if failed. 261 + */ 262 + static inline int intel_gvt_hypervisor_dma_map_guest_page( 263 + struct intel_vgpu *vgpu, unsigned long gfn, 264 + dma_addr_t *dma_addr) 265 + { 266 + return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, 267 + dma_addr); 268 + } 269 + 270 + /** 271 + * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page 272 + * @vgpu: a vGPU 273 + * @dma_addr: the mapped dma addr 274 + */ 275 + static inline void intel_gvt_hypervisor_dma_unmap_guest_page( 276 + struct intel_vgpu *vgpu, dma_addr_t dma_addr) 277 + { 278 + intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr); 228 279 } 229 280 230 281 /**
+184
drivers/gpu/drm/i915/gvt/page_track.c
··· 1 + /* 2 + * Copyright(c) 2011-2017 Intel Corporation. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 + * SOFTWARE. 22 + */ 23 + #include "i915_drv.h" 24 + #include "gvt.h" 25 + 26 + /** 27 + * intel_vgpu_find_page_track - find page track rcord of guest page 28 + * @vgpu: a vGPU 29 + * @gfn: the gfn of guest page 30 + * 31 + * Returns: 32 + * A pointer to struct intel_vgpu_page_track if found, else NULL returned. 33 + */ 34 + struct intel_vgpu_page_track *intel_vgpu_find_page_track( 35 + struct intel_vgpu *vgpu, unsigned long gfn) 36 + { 37 + return radix_tree_lookup(&vgpu->page_track_tree, gfn); 38 + } 39 + 40 + /** 41 + * intel_vgpu_register_page_track - register a guest page to be tacked 42 + * @vgpu: a vGPU 43 + * @gfn: the gfn of guest page 44 + * 45 + * Returns: 46 + * zero on success, negative error code if failed. 47 + */ 48 + int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, 49 + gvt_page_track_handler_t handler, void *priv) 50 + { 51 + struct intel_vgpu_page_track *track; 52 + int ret; 53 + 54 + track = intel_vgpu_find_page_track(vgpu, gfn); 55 + if (track) 56 + return -EEXIST; 57 + 58 + track = kzalloc(sizeof(*track), GFP_KERNEL); 59 + if (!track) 60 + return -ENOMEM; 61 + 62 + track->handler = handler; 63 + track->priv_data = priv; 64 + 65 + ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); 66 + if (ret) { 67 + kfree(track); 68 + return ret; 69 + } 70 + 71 + return 0; 72 + } 73 + 74 + /** 75 + * intel_vgpu_unregister_page_track - unregister the tracked guest page 76 + * @vgpu: a vGPU 77 + * @gfn: the gfn of guest page 78 + * 79 + */ 80 + void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, 81 + unsigned long gfn) 82 + { 83 + struct intel_vgpu_page_track *track; 84 + 85 + track = radix_tree_delete(&vgpu->page_track_tree, gfn); 86 + if (track) { 87 + if (track->tracked) 88 + intel_gvt_hypervisor_disable_page_track(vgpu, gfn); 89 + kfree(track); 90 + } 91 + } 92 + 93 + /** 94 + * intel_vgpu_enable_page_track - set write-protection on guest page 95 + * @vgpu: a vGPU 96 + * @gfn: the gfn of guest page 97 + * 98 + * Returns: 99 + * zero on success, negative error code if failed. 100 + */ 101 + int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) 102 + { 103 + struct intel_vgpu_page_track *track; 104 + int ret; 105 + 106 + track = intel_vgpu_find_page_track(vgpu, gfn); 107 + if (!track) 108 + return -ENXIO; 109 + 110 + if (track->tracked) 111 + return 0; 112 + 113 + ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn); 114 + if (ret) 115 + return ret; 116 + track->tracked = true; 117 + return 0; 118 + } 119 + 120 + /** 121 + * intel_vgpu_enable_page_track - cancel write-protection on guest page 122 + * @vgpu: a vGPU 123 + * @gfn: the gfn of guest page 124 + * 125 + * Returns: 126 + * zero on success, negative error code if failed. 127 + */ 128 + int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) 129 + { 130 + struct intel_vgpu_page_track *track; 131 + int ret; 132 + 133 + track = intel_vgpu_find_page_track(vgpu, gfn); 134 + if (!track) 135 + return -ENXIO; 136 + 137 + if (!track->tracked) 138 + return 0; 139 + 140 + ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn); 141 + if (ret) 142 + return ret; 143 + track->tracked = false; 144 + return 0; 145 + } 146 + 147 + /** 148 + * intel_vgpu_page_track_handler - called when write to write-protected page 149 + * @vgpu: a vGPU 150 + * @gpa: the gpa of this write 151 + * @data: the writed data 152 + * @bytes: the length of this write 153 + * 154 + * Returns: 155 + * zero on success, negative error code if failed. 156 + */ 157 + int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, 158 + void *data, unsigned int bytes) 159 + { 160 + struct intel_gvt *gvt = vgpu->gvt; 161 + struct intel_vgpu_page_track *page_track; 162 + int ret = 0; 163 + 164 + mutex_lock(&gvt->lock); 165 + 166 + page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); 167 + if (!page_track) { 168 + ret = -ENXIO; 169 + goto out; 170 + } 171 + 172 + if (unlikely(vgpu->failsafe)) { 173 + /* Remove write protection to prevent furture traps. */ 174 + intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT); 175 + } else { 176 + ret = page_track->handler(page_track, gpa, data, bytes); 177 + if (ret) 178 + gvt_err("guest page write error, gpa %llx\n", gpa); 179 + } 180 + 181 + out: 182 + mutex_unlock(&gvt->lock); 183 + return ret; 184 + }
+56
drivers/gpu/drm/i915/gvt/page_track.h
··· 1 + /* 2 + * Copyright(c) 2011-2017 Intel Corporation. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 + * SOFTWARE. 22 + * 23 + */ 24 + 25 + #ifndef _GVT_PAGE_TRACK_H_ 26 + #define _GVT_PAGE_TRACK_H_ 27 + 28 + struct intel_vgpu_page_track; 29 + 30 + typedef int (*gvt_page_track_handler_t)( 31 + struct intel_vgpu_page_track *page_track, 32 + u64 gpa, void *data, int bytes); 33 + 34 + /* Track record for a write-protected guest page. */ 35 + struct intel_vgpu_page_track { 36 + gvt_page_track_handler_t handler; 37 + bool tracked; 38 + void *priv_data; 39 + }; 40 + 41 + struct intel_vgpu_page_track *intel_vgpu_find_page_track( 42 + struct intel_vgpu *vgpu, unsigned long gfn); 43 + 44 + int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, 45 + unsigned long gfn, gvt_page_track_handler_t handler, 46 + void *priv); 47 + void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, 48 + unsigned long gfn); 49 + 50 + int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 51 + int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 52 + 53 + int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, 54 + void *data, unsigned int bytes); 55 + 56 + #endif
+2 -3
drivers/gpu/drm/i915/gvt/sched_policy.c
··· 103 103 104 104 list_for_each(pos, &sched_data->lru_runq_head) { 105 105 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); 106 - fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) * 107 - vgpu_data->sched_ctl.weight / 108 - total_weight; 106 + fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS), 107 + total_weight) * vgpu_data->sched_ctl.weight; 109 108 110 109 vgpu_data->allocated_ts = fair_timeslice; 111 110 vgpu_data->left_ts = vgpu_data->allocated_ts;
+29 -29
drivers/gpu/drm/i915/gvt/scheduler.c
··· 113 113 #undef COPY_REG 114 114 115 115 set_context_pdp_root_pointer(shadow_ring_context, 116 - workload->shadow_mm->shadow_page_table); 116 + (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); 117 117 118 118 intel_gvt_hypervisor_read_gpa(vgpu, 119 119 workload->ring_context_gpa + ··· 126 126 return 0; 127 127 } 128 128 129 - static inline bool is_gvt_request(struct drm_i915_gem_request *req) 129 + static inline bool is_gvt_request(struct i915_request *req) 130 130 { 131 131 return i915_gem_context_force_single_submission(req->ctx); 132 132 } ··· 148 148 static int shadow_context_status_change(struct notifier_block *nb, 149 149 unsigned long action, void *data) 150 150 { 151 - struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; 151 + struct i915_request *req = data; 152 152 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, 153 153 shadow_ctx_notifier_block[req->engine->id]); 154 154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; ··· 225 225 struct intel_vgpu *vgpu = workload->vgpu; 226 226 void *shadow_ring_buffer_va; 227 227 u32 *cs; 228 + struct i915_request *req = workload->req; 229 + 230 + if (IS_KABYLAKE(req->i915) && 231 + is_inhibit_context(req->ctx, req->engine->id)) 232 + intel_vgpu_restore_inhibit_context(vgpu, req); 228 233 229 234 /* allocate shadow ring buffer */ 230 235 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); ··· 338 333 int ring_id = workload->ring_id; 339 334 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 340 335 struct intel_engine_cs *engine = dev_priv->engine[ring_id]; 341 - struct drm_i915_gem_request *rq; 336 + struct i915_request *rq; 342 337 struct intel_vgpu *vgpu = workload->vgpu; 343 338 struct intel_vgpu_submission *s = &vgpu->submission; 344 339 struct i915_gem_context *shadow_ctx = s->shadow_ctx; 345 340 int ret; 346 341 347 - rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 342 + rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 348 343 if (IS_ERR(rq)) { 349 344 gvt_vgpu_err("fail to allocate gem request\n"); 350 345 ret = PTR_ERR(rq); ··· 353 348 354 349 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); 355 350 356 - workload->req = i915_gem_request_get(rq); 351 + workload->req = i915_request_get(rq); 357 352 ret = copy_workload_to_ring_buffer(workload); 358 353 if (ret) 359 354 goto err_unpin; ··· 587 582 if (!IS_ERR_OR_NULL(workload->req)) { 588 583 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 589 584 ring_id, workload->req); 590 - i915_add_request(workload->req); 585 + i915_request_add(workload->req); 591 586 workload->dispatched = true; 592 587 } 593 588 ··· 774 769 workload->status = 0; 775 770 } 776 771 777 - i915_gem_request_put(fetch_and_zero(&workload->req)); 772 + i915_request_put(fetch_and_zero(&workload->req)); 778 773 779 774 if (!workload->status && !(vgpu->resetting_eng & 780 775 ENGINE_MASK(ring_id))) { ··· 891 886 892 887 gvt_dbg_sched("ring id %d wait workload %p\n", 893 888 workload->ring_id, workload); 894 - i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); 889 + i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); 895 890 896 891 complete: 897 892 gvt_dbg_sched("will complete workload %p, status: %d\n", ··· 1137 1132 struct intel_vgpu_submission *s = &workload->vgpu->submission; 1138 1133 1139 1134 if (workload->shadow_mm) 1140 - intel_gvt_mm_unreference(workload->shadow_mm); 1135 + intel_vgpu_mm_put(workload->shadow_mm); 1141 1136 1142 1137 kmem_cache_free(s->workloads, workload); 1143 1138 } ··· 1186 1181 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 1187 1182 struct intel_vgpu_mm *mm; 1188 1183 struct intel_vgpu *vgpu = workload->vgpu; 1189 - int page_table_level; 1190 - u32 pdp[8]; 1184 + intel_gvt_gtt_type_t root_entry_type; 1185 + u64 pdps[GVT_RING_CTX_NR_PDPS]; 1191 1186 1192 - if (desc->addressing_mode == 1) { /* legacy 32-bit */ 1193 - page_table_level = 3; 1194 - } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ 1195 - page_table_level = 4; 1196 - } else { 1187 + switch (desc->addressing_mode) { 1188 + case 1: /* legacy 32-bit */ 1189 + root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1190 + break; 1191 + case 3: /* legacy 64-bit */ 1192 + root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1193 + break; 1194 + default: 1197 1195 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); 1198 1196 return -EINVAL; 1199 1197 } 1200 1198 1201 - read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); 1199 + read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); 1202 1200 1203 - mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); 1204 - if (mm) { 1205 - intel_gvt_mm_reference(mm); 1206 - } else { 1201 + mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); 1202 + if (IS_ERR(mm)) 1203 + return PTR_ERR(mm); 1207 1204 1208 - mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, 1209 - pdp, page_table_level, 0); 1210 - if (IS_ERR(mm)) { 1211 - gvt_vgpu_err("fail to create mm object.\n"); 1212 - return PTR_ERR(mm); 1213 - } 1214 - } 1215 1205 workload->shadow_mm = mm; 1216 1206 return 0; 1217 1207 }
+1 -1
drivers/gpu/drm/i915/gvt/scheduler.h
··· 80 80 struct intel_vgpu_workload { 81 81 struct intel_vgpu *vgpu; 82 82 int ring_id; 83 - struct drm_i915_gem_request *req; 83 + struct i915_request *req; 84 84 /* if this workload has been dispatched to i915? */ 85 85 bool dispatched; 86 86 bool shadowed;
+5 -5
drivers/gpu/drm/i915/gvt/trace.h
··· 113 113 ); 114 114 115 115 TRACE_EVENT(gma_translate, 116 - TP_PROTO(int id, char *type, int ring_id, int pt_level, 116 + TP_PROTO(int id, char *type, int ring_id, int root_entry_type, 117 117 unsigned long gma, unsigned long gpa), 118 118 119 - TP_ARGS(id, type, ring_id, pt_level, gma, gpa), 119 + TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa), 120 120 121 121 TP_STRUCT__entry( 122 122 __array(char, buf, MAX_BUF_LEN) ··· 124 124 125 125 TP_fast_assign( 126 126 snprintf(__entry->buf, MAX_BUF_LEN, 127 - "VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n", 128 - id, type, ring_id, pt_level, gma, gpa); 127 + "VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n", 128 + id, type, ring_id, root_entry_type, gma, gpa); 129 129 ), 130 130 131 131 TP_printk("%s", __entry->buf) ··· 168 168 TP_printk("%s", __entry->buf) 169 169 ); 170 170 171 - TRACE_EVENT(gpt_change, 171 + TRACE_EVENT(spt_guest_change, 172 172 TP_PROTO(int id, const char *tag, void *spt, int type, u64 v, 173 173 unsigned long index), 174 174
+1
drivers/gpu/drm/i915/gvt/vgpu.c
··· 354 354 vgpu->gvt = gvt; 355 355 vgpu->sched_ctl.weight = param->weight; 356 356 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); 357 + INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); 357 358 idr_init(&vgpu->object_idr); 358 359 intel_vgpu_init_cfg_space(vgpu, param->primary); 359 360
+140 -74
drivers/gpu/drm/i915/i915_debugfs.c
··· 519 519 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 520 520 struct file_stats stats; 521 521 struct drm_i915_file_private *file_priv = file->driver_priv; 522 - struct drm_i915_gem_request *request; 522 + struct i915_request *request; 523 523 struct task_struct *task; 524 524 525 525 mutex_lock(&dev->struct_mutex); ··· 536 536 * Therefore, we need to protect this ->comm access using RCU. 537 537 */ 538 538 request = list_first_entry_or_null(&file_priv->mm.request_list, 539 - struct drm_i915_gem_request, 539 + struct i915_request, 540 540 client_link); 541 541 rcu_read_lock(); 542 542 task = pid_task(request && request->ctx->pid ? ··· 646 646 return 0; 647 647 } 648 648 649 + static void gen8_display_interrupt_info(struct seq_file *m) 650 + { 651 + struct drm_i915_private *dev_priv = node_to_i915(m->private); 652 + int pipe; 653 + 654 + for_each_pipe(dev_priv, pipe) { 655 + enum intel_display_power_domain power_domain; 656 + 657 + power_domain = POWER_DOMAIN_PIPE(pipe); 658 + if (!intel_display_power_get_if_enabled(dev_priv, 659 + power_domain)) { 660 + seq_printf(m, "Pipe %c power disabled\n", 661 + pipe_name(pipe)); 662 + continue; 663 + } 664 + seq_printf(m, "Pipe %c IMR:\t%08x\n", 665 + pipe_name(pipe), 666 + I915_READ(GEN8_DE_PIPE_IMR(pipe))); 667 + seq_printf(m, "Pipe %c IIR:\t%08x\n", 668 + pipe_name(pipe), 669 + I915_READ(GEN8_DE_PIPE_IIR(pipe))); 670 + seq_printf(m, "Pipe %c IER:\t%08x\n", 671 + pipe_name(pipe), 672 + I915_READ(GEN8_DE_PIPE_IER(pipe))); 673 + 674 + intel_display_power_put(dev_priv, power_domain); 675 + } 676 + 677 + seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 678 + I915_READ(GEN8_DE_PORT_IMR)); 679 + seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 680 + I915_READ(GEN8_DE_PORT_IIR)); 681 + seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 682 + I915_READ(GEN8_DE_PORT_IER)); 683 + 684 + seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 685 + I915_READ(GEN8_DE_MISC_IMR)); 686 + seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 687 + I915_READ(GEN8_DE_MISC_IIR)); 688 + seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 689 + I915_READ(GEN8_DE_MISC_IER)); 690 + 691 + seq_printf(m, "PCU interrupt mask:\t%08x\n", 692 + I915_READ(GEN8_PCU_IMR)); 693 + seq_printf(m, "PCU interrupt identity:\t%08x\n", 694 + I915_READ(GEN8_PCU_IIR)); 695 + seq_printf(m, "PCU interrupt enable:\t%08x\n", 696 + I915_READ(GEN8_PCU_IER)); 697 + } 698 + 649 699 static int i915_interrupt_info(struct seq_file *m, void *data) 650 700 { 651 701 struct drm_i915_private *dev_priv = node_to_i915(m->private); ··· 759 709 I915_READ(GEN8_PCU_IIR)); 760 710 seq_printf(m, "PCU interrupt enable:\t%08x\n", 761 711 I915_READ(GEN8_PCU_IER)); 712 + } else if (INTEL_GEN(dev_priv) >= 11) { 713 + seq_printf(m, "Master Interrupt Control: %08x\n", 714 + I915_READ(GEN11_GFX_MSTR_IRQ)); 715 + 716 + seq_printf(m, "Render/Copy Intr Enable: %08x\n", 717 + I915_READ(GEN11_RENDER_COPY_INTR_ENABLE)); 718 + seq_printf(m, "VCS/VECS Intr Enable: %08x\n", 719 + I915_READ(GEN11_VCS_VECS_INTR_ENABLE)); 720 + seq_printf(m, "GUC/SG Intr Enable:\t %08x\n", 721 + I915_READ(GEN11_GUC_SG_INTR_ENABLE)); 722 + seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n", 723 + I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE)); 724 + seq_printf(m, "Crypto Intr Enable:\t %08x\n", 725 + I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE)); 726 + seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n", 727 + I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE)); 728 + 729 + seq_printf(m, "Display Interrupt Control:\t%08x\n", 730 + I915_READ(GEN11_DISPLAY_INT_CTL)); 731 + 732 + gen8_display_interrupt_info(m); 762 733 } else if (INTEL_GEN(dev_priv) >= 8) { 763 734 seq_printf(m, "Master Interrupt Control:\t%08x\n", 764 735 I915_READ(GEN8_MASTER_IRQ)); ··· 793 722 i, I915_READ(GEN8_GT_IER(i))); 794 723 } 795 724 796 - for_each_pipe(dev_priv, pipe) { 797 - enum intel_display_power_domain power_domain; 798 - 799 - power_domain = POWER_DOMAIN_PIPE(pipe); 800 - if (!intel_display_power_get_if_enabled(dev_priv, 801 - power_domain)) { 802 - seq_printf(m, "Pipe %c power disabled\n", 803 - pipe_name(pipe)); 804 - continue; 805 - } 806 - seq_printf(m, "Pipe %c IMR:\t%08x\n", 807 - pipe_name(pipe), 808 - I915_READ(GEN8_DE_PIPE_IMR(pipe))); 809 - seq_printf(m, "Pipe %c IIR:\t%08x\n", 810 - pipe_name(pipe), 811 - I915_READ(GEN8_DE_PIPE_IIR(pipe))); 812 - seq_printf(m, "Pipe %c IER:\t%08x\n", 813 - pipe_name(pipe), 814 - I915_READ(GEN8_DE_PIPE_IER(pipe))); 815 - 816 - intel_display_power_put(dev_priv, power_domain); 817 - } 818 - 819 - seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 820 - I915_READ(GEN8_DE_PORT_IMR)); 821 - seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 822 - I915_READ(GEN8_DE_PORT_IIR)); 823 - seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 824 - I915_READ(GEN8_DE_PORT_IER)); 825 - 826 - seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 827 - I915_READ(GEN8_DE_MISC_IMR)); 828 - seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 829 - I915_READ(GEN8_DE_MISC_IIR)); 830 - seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 831 - I915_READ(GEN8_DE_MISC_IER)); 832 - 833 - seq_printf(m, "PCU interrupt mask:\t%08x\n", 834 - I915_READ(GEN8_PCU_IMR)); 835 - seq_printf(m, "PCU interrupt identity:\t%08x\n", 836 - I915_READ(GEN8_PCU_IIR)); 837 - seq_printf(m, "PCU interrupt enable:\t%08x\n", 838 - I915_READ(GEN8_PCU_IER)); 725 + gen8_display_interrupt_info(m); 839 726 } else if (IS_VALLEYVIEW(dev_priv)) { 840 727 seq_printf(m, "Display IER:\t%08x\n", 841 728 I915_READ(VLV_IER)); ··· 875 846 seq_printf(m, "Graphics Interrupt mask: %08x\n", 876 847 I915_READ(GTIMR)); 877 848 } 878 - if (INTEL_GEN(dev_priv) >= 6) { 849 + 850 + if (INTEL_GEN(dev_priv) >= 11) { 851 + seq_printf(m, "RCS Intr Mask:\t %08x\n", 852 + I915_READ(GEN11_RCS0_RSVD_INTR_MASK)); 853 + seq_printf(m, "BCS Intr Mask:\t %08x\n", 854 + I915_READ(GEN11_BCS_RSVD_INTR_MASK)); 855 + seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n", 856 + I915_READ(GEN11_VCS0_VCS1_INTR_MASK)); 857 + seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n", 858 + I915_READ(GEN11_VCS2_VCS3_INTR_MASK)); 859 + seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n", 860 + I915_READ(GEN11_VECS0_VECS1_INTR_MASK)); 861 + seq_printf(m, "GUC/SG Intr Mask:\t %08x\n", 862 + I915_READ(GEN11_GUC_SG_INTR_MASK)); 863 + seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n", 864 + I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK)); 865 + seq_printf(m, "Crypto Intr Mask:\t %08x\n", 866 + I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK)); 867 + seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n", 868 + I915_READ(GEN11_GUNIT_CSME_INTR_MASK)); 869 + 870 + } else if (INTEL_GEN(dev_priv) >= 6) { 879 871 for_each_engine(engine, dev_priv, id) { 880 872 seq_printf(m, 881 873 "Graphics Interrupt mask (%s): %08x\n", 882 874 engine->name, I915_READ_IMR(engine)); 883 875 } 884 876 } 877 + 885 878 intel_runtime_pm_put(dev_priv); 886 879 887 880 return 0; ··· 3201 3150 return 0; 3202 3151 } 3203 3152 3153 + static int i915_rcs_topology(struct seq_file *m, void *unused) 3154 + { 3155 + struct drm_i915_private *dev_priv = node_to_i915(m->private); 3156 + struct drm_printer p = drm_seq_file_printer(m); 3157 + 3158 + intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p); 3159 + 3160 + return 0; 3161 + } 3162 + 3204 3163 static int i915_shrinker_info(struct seq_file *m, void *unused) 3205 3164 { 3206 3165 struct drm_i915_private *i915 = node_to_i915(m->private); ··· 3987 3926 engine->hangcheck.stalled = true; 3988 3927 } 3989 3928 3990 - i915_handle_error(i915, val, "Manually setting wedged to %llu", val); 3929 + i915_handle_error(i915, val, "Manually set wedged engine mask = %llx", 3930 + val); 3991 3931 3992 3932 wait_on_bit(&i915->gpu_error.flags, 3993 3933 I915_RESET_HANDOFF, ··· 4122 4060 I915_WAIT_LOCKED); 4123 4061 4124 4062 if (val & DROP_RETIRE) 4125 - i915_gem_retire_requests(dev_priv); 4063 + i915_retire_requests(dev_priv); 4126 4064 4127 4065 mutex_unlock(&dev->struct_mutex); 4128 4066 } ··· 4333 4271 continue; 4334 4272 4335 4273 sseu->slice_mask = BIT(0); 4336 - sseu->subslice_mask |= BIT(ss); 4274 + sseu->subslice_mask[0] |= BIT(ss); 4337 4275 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4338 4276 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4339 4277 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + ··· 4348 4286 struct sseu_dev_info *sseu) 4349 4287 { 4350 4288 const struct intel_device_info *info = INTEL_INFO(dev_priv); 4351 - int s_max = 6, ss_max = 4; 4352 4289 int s, ss; 4353 - u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2]; 4290 + u32 s_reg[info->sseu.max_slices]; 4291 + u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2]; 4354 4292 4355 - for (s = 0; s < s_max; s++) { 4293 + for (s = 0; s < info->sseu.max_slices; s++) { 4356 4294 /* 4357 4295 * FIXME: Valid SS Mask respects the spec and read 4358 4296 * only valid bits for those registers, excluding reserverd ··· 4374 4312 GEN9_PGCTL_SSB_EU210_ACK | 4375 4313 GEN9_PGCTL_SSB_EU311_ACK; 4376 4314 4377 - for (s = 0; s < s_max; s++) { 4315 + for (s = 0; s < info->sseu.max_slices; s++) { 4378 4316 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4379 4317 /* skip disabled slice */ 4380 4318 continue; 4381 4319 4382 4320 sseu->slice_mask |= BIT(s); 4383 - sseu->subslice_mask = info->sseu.subslice_mask; 4321 + sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; 4384 4322 4385 - for (ss = 0; ss < ss_max; ss++) { 4323 + for (ss = 0; ss < info->sseu.max_subslices; ss++) { 4386 4324 unsigned int eu_cnt; 4387 4325 4388 4326 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) ··· 4402 4340 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, 4403 4341 struct sseu_dev_info *sseu) 4404 4342 { 4405 - int s_max = 3, ss_max = 4; 4343 + const struct intel_device_info *info = INTEL_INFO(dev_priv); 4406 4344 int s, ss; 4407 - u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4345 + u32 s_reg[info->sseu.max_slices]; 4346 + u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2]; 4408 4347 4409 - /* BXT has a single slice and at most 3 subslices. */ 4410 - if (IS_GEN9_LP(dev_priv)) { 4411 - s_max = 1; 4412 - ss_max = 3; 4413 - } 4414 - 4415 - for (s = 0; s < s_max; s++) { 4348 + for (s = 0; s < info->sseu.max_slices; s++) { 4416 4349 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4417 4350 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4418 4351 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); ··· 4422 4365 GEN9_PGCTL_SSB_EU210_ACK | 4423 4366 GEN9_PGCTL_SSB_EU311_ACK; 4424 4367 4425 - for (s = 0; s < s_max; s++) { 4368 + for (s = 0; s < info->sseu.max_slices; s++) { 4426 4369 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4427 4370 /* skip disabled slice */ 4428 4371 continue; ··· 4430 4373 sseu->slice_mask |= BIT(s); 4431 4374 4432 4375 if (IS_GEN9_BC(dev_priv)) 4433 - sseu->subslice_mask = 4434 - INTEL_INFO(dev_priv)->sseu.subslice_mask; 4376 + sseu->subslice_mask[s] = 4377 + INTEL_INFO(dev_priv)->sseu.subslice_mask[s]; 4435 4378 4436 - for (ss = 0; ss < ss_max; ss++) { 4379 + for (ss = 0; ss < info->sseu.max_subslices; ss++) { 4437 4380 unsigned int eu_cnt; 4438 4381 4439 4382 if (IS_GEN9_LP(dev_priv)) { ··· 4441 4384 /* skip disabled subslice */ 4442 4385 continue; 4443 4386 4444 - sseu->subslice_mask |= BIT(ss); 4387 + sseu->subslice_mask[s] |= BIT(ss); 4445 4388 } 4446 4389 4447 4390 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & ··· 4463 4406 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; 4464 4407 4465 4408 if (sseu->slice_mask) { 4466 - sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; 4467 4409 sseu->eu_per_subslice = 4468 4410 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4411 + for (s = 0; s < fls(sseu->slice_mask); s++) { 4412 + sseu->subslice_mask[s] = 4413 + INTEL_INFO(dev_priv)->sseu.subslice_mask[s]; 4414 + } 4469 4415 sseu->eu_total = sseu->eu_per_subslice * 4470 4416 sseu_subslice_total(sseu); 4471 4417 ··· 4487 4427 { 4488 4428 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4489 4429 const char *type = is_available_info ? "Available" : "Enabled"; 4430 + int s; 4490 4431 4491 4432 seq_printf(m, " %s Slice Mask: %04x\n", type, 4492 4433 sseu->slice_mask); ··· 4495 4434 hweight8(sseu->slice_mask)); 4496 4435 seq_printf(m, " %s Subslice Total: %u\n", type, 4497 4436 sseu_subslice_total(sseu)); 4498 - seq_printf(m, " %s Subslice Mask: %04x\n", type, 4499 - sseu->subslice_mask); 4500 - seq_printf(m, " %s Subslice Per Slice: %u\n", type, 4501 - hweight8(sseu->subslice_mask)); 4437 + for (s = 0; s < fls(sseu->slice_mask); s++) { 4438 + seq_printf(m, " %s Slice%i subslices: %u\n", type, 4439 + s, hweight8(sseu->subslice_mask[s])); 4440 + } 4502 4441 seq_printf(m, " %s EU Total: %u\n", type, 4503 4442 sseu->eu_total); 4504 4443 seq_printf(m, " %s EU Per Subslice: %u\n", type, ··· 4532 4471 4533 4472 seq_puts(m, "SSEU Device Status\n"); 4534 4473 memset(&sseu, 0, sizeof(sseu)); 4474 + sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices; 4475 + sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices; 4476 + sseu.max_eus_per_subslice = 4477 + INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice; 4535 4478 4536 4479 intel_runtime_pm_get(dev_priv); 4537 4480 ··· 4743 4678 {"i915_dmc_info", i915_dmc_info, 0}, 4744 4679 {"i915_display_info", i915_display_info, 0}, 4745 4680 {"i915_engine_info", i915_engine_info, 0}, 4681 + {"i915_rcs_topology", i915_rcs_topology, 0}, 4746 4682 {"i915_shrinker_info", i915_shrinker_info, 0}, 4747 4683 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4748 4684 {"i915_dp_mst_info", i915_dp_mst_info, 0},
+9 -7
drivers/gpu/drm/i915/i915_drv.c
··· 49 49 #include "i915_drv.h" 50 50 #include "i915_trace.h" 51 51 #include "i915_pmu.h" 52 + #include "i915_query.h" 52 53 #include "i915_vgpu.h" 53 54 #include "intel_drv.h" 54 55 #include "intel_uc.h" ··· 429 428 return -ENODEV; 430 429 break; 431 430 case I915_PARAM_SUBSLICE_MASK: 432 - value = INTEL_INFO(dev_priv)->sseu.subslice_mask; 431 + value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0]; 433 432 if (!value) 434 433 return -ENODEV; 435 434 break; ··· 809 808 /* 810 809 * The i915 workqueue is primarily used for batched retirement of 811 810 * requests (and thus managing bo) once the task has been completed 812 - * by the GPU. i915_gem_retire_requests() is called directly when we 811 + * by the GPU. i915_retire_requests() is called directly when we 813 812 * need high-priority retirement, such as waiting for an explicit 814 813 * bo. 815 814 * ··· 1993 1992 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 1994 1993 error: 1995 1994 i915_gem_set_wedged(i915); 1996 - i915_gem_retire_requests(i915); 1995 + i915_retire_requests(i915); 1997 1996 intel_gpu_reset(i915, ALL_ENGINES); 1998 1997 goto finish; 1999 1998 } ··· 2020 2019 int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) 2021 2020 { 2022 2021 struct i915_gpu_error *error = &engine->i915->gpu_error; 2023 - struct drm_i915_gem_request *active_request; 2022 + struct i915_request *active_request; 2024 2023 int ret; 2025 2024 2026 2025 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); ··· 2576 2575 */ 2577 2576 i915_gem_runtime_suspend(dev_priv); 2578 2577 2579 - intel_guc_suspend(dev_priv); 2578 + intel_uc_suspend(dev_priv); 2580 2579 2581 2580 intel_runtime_pm_disable_interrupts(dev_priv); 2582 2581 ··· 2598 2597 2599 2598 intel_runtime_pm_enable_interrupts(dev_priv); 2600 2599 2601 - intel_guc_resume(dev_priv); 2600 + intel_uc_resume(dev_priv); 2602 2601 2603 2602 i915_gem_init_swizzling(dev_priv); 2604 2603 i915_gem_restore_fences(dev_priv); ··· 2684 2683 2685 2684 intel_runtime_pm_enable_interrupts(dev_priv); 2686 2685 2687 - intel_guc_resume(dev_priv); 2686 + intel_uc_resume(dev_priv); 2688 2687 2689 2688 /* 2690 2689 * No point of rolling back things in case of an error, as the best ··· 2833 2832 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), 2834 2833 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 2835 2834 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 2835 + DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 2836 2836 }; 2837 2837 2838 2838 static struct drm_driver driver = {
+23 -18
drivers/gpu/drm/i915/i915_drv.h
··· 71 71 #include "i915_gem_fence_reg.h" 72 72 #include "i915_gem_object.h" 73 73 #include "i915_gem_gtt.h" 74 - #include "i915_gem_request.h" 75 74 #include "i915_gem_timeline.h" 76 75 76 + #include "i915_request.h" 77 77 #include "i915_vma.h" 78 78 79 79 #include "intel_gvt.h" ··· 83 83 84 84 #define DRIVER_NAME "i915" 85 85 #define DRIVER_DESC "Intel Graphics" 86 - #define DRIVER_DATE "20180221" 87 - #define DRIVER_TIMESTAMP 1519219289 86 + #define DRIVER_DATE "20180308" 87 + #define DRIVER_TIMESTAMP 1520513379 88 88 89 89 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 90 90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions ··· 1231 1231 * 1232 1232 * #I915_WEDGED - If reset fails and we can no longer use the GPU, 1233 1233 * we set the #I915_WEDGED bit. Prior to command submission, e.g. 1234 - * i915_gem_request_alloc(), this bit is checked and the sequence 1234 + * i915_request_alloc(), this bit is checked and the sequence 1235 1235 * aborted (with -EIO reported to userspace) if set. 1236 1236 */ 1237 1237 unsigned long flags; ··· 2103 2103 */ 2104 2104 struct ida hw_ida; 2105 2105 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 2106 + #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ 2106 2107 } contexts; 2107 2108 2108 2109 u32 fdi_rx_config; ··· 2747 2746 #define BLT_RING ENGINE_MASK(BCS) 2748 2747 #define VEBOX_RING ENGINE_MASK(VECS) 2749 2748 #define BSD2_RING ENGINE_MASK(VCS2) 2749 + #define BSD3_RING ENGINE_MASK(VCS3) 2750 + #define BSD4_RING ENGINE_MASK(VCS4) 2751 + #define VEBOX2_RING ENGINE_MASK(VECS2) 2750 2752 #define ALL_ENGINES (~0) 2751 2753 2752 2754 #define HAS_ENGINE(dev_priv, id) \ ··· 2772 2768 2773 2769 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 2774 2770 ((dev_priv)->info.has_logical_ring_contexts) 2771 + #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 2772 + ((dev_priv)->info.has_logical_ring_elsq) 2775 2773 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ 2776 2774 ((dev_priv)->info.has_logical_ring_preemption) 2777 2775 ··· 2794 2788 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2795 2789 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 2796 2790 2797 - /* WaRsDisableCoarsePowerGating:skl,bxt */ 2791 + /* WaRsDisableCoarsePowerGating:skl,cnl */ 2798 2792 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2799 - (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 2793 + (IS_CANNONLAKE(dev_priv) || \ 2794 + IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 2800 2795 2801 2796 /* 2802 2797 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts ··· 3336 3329 3337 3330 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3338 3331 void i915_vma_move_to_active(struct i915_vma *vma, 3339 - struct drm_i915_gem_request *req, 3332 + struct i915_request *rq, 3340 3333 unsigned int flags); 3341 3334 int i915_gem_dumb_create(struct drm_file *file_priv, 3342 3335 struct drm_device *dev, ··· 3351 3344 3352 3345 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3353 3346 3354 - struct drm_i915_gem_request * 3347 + struct i915_request * 3355 3348 i915_gem_find_active_request(struct intel_engine_cs *engine); 3356 - 3357 - void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3358 3349 3359 3350 static inline bool i915_reset_backoff(struct i915_gpu_error *error) 3360 3351 { ··· 3385 3380 return READ_ONCE(error->reset_engine_count[engine->id]); 3386 3381 } 3387 3382 3388 - struct drm_i915_gem_request * 3383 + struct i915_request * 3389 3384 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); 3390 3385 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); 3391 3386 void i915_gem_reset(struct drm_i915_private *dev_priv); ··· 3394 3389 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3395 3390 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); 3396 3391 void i915_gem_reset_engine(struct intel_engine_cs *engine, 3397 - struct drm_i915_gem_request *request); 3392 + struct i915_request *request); 3398 3393 3399 3394 void i915_gem_init_mmio(struct drm_i915_private *i915); 3400 3395 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); ··· 4013 4008 } 4014 4009 4015 4010 static inline bool 4016 - __i915_request_irq_complete(const struct drm_i915_gem_request *req) 4011 + __i915_request_irq_complete(const struct i915_request *rq) 4017 4012 { 4018 - struct intel_engine_cs *engine = req->engine; 4013 + struct intel_engine_cs *engine = rq->engine; 4019 4014 u32 seqno; 4020 4015 4021 4016 /* Note that the engine may have wrapped around the seqno, and ··· 4024 4019 * this by kicking all the waiters before resetting the seqno 4025 4020 * in hardware, and also signal the fence. 4026 4021 */ 4027 - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags)) 4022 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) 4028 4023 return true; 4029 4024 4030 4025 /* The request was dequeued before we were awoken. We check after ··· 4033 4028 * the request execution are sufficient to ensure that a check 4034 4029 * after reading the value from hw matches this request. 4035 4030 */ 4036 - seqno = i915_gem_request_global_seqno(req); 4031 + seqno = i915_request_global_seqno(rq); 4037 4032 if (!seqno) 4038 4033 return false; 4039 4034 4040 4035 /* Before we do the heavier coherent read of the seqno, 4041 4036 * check the value (hopefully) in the CPU cacheline. 4042 4037 */ 4043 - if (__i915_gem_request_completed(req, seqno)) 4038 + if (__i915_request_completed(rq, seqno)) 4044 4039 return true; 4045 4040 4046 4041 /* Ensure our read of the seqno is coherent so that we ··· 4089 4084 wake_up_process(b->irq_wait->tsk); 4090 4085 spin_unlock_irq(&b->irq_lock); 4091 4086 4092 - if (__i915_gem_request_completed(req, seqno)) 4087 + if (__i915_request_completed(rq, seqno)) 4093 4088 return true; 4094 4089 } 4095 4090
+59 -60
drivers/gpu/drm/i915/i915_gem.c
··· 353 353 long timeout, 354 354 struct intel_rps_client *rps_client) 355 355 { 356 - struct drm_i915_gem_request *rq; 356 + struct i915_request *rq; 357 357 358 358 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 359 359 ··· 366 366 timeout); 367 367 368 368 rq = to_request(fence); 369 - if (i915_gem_request_completed(rq)) 369 + if (i915_request_completed(rq)) 370 370 goto out; 371 371 372 372 /* ··· 385 385 * forcing the clocks too high for the whole system, we only allow 386 386 * each client to waitboost once in a busy period. 387 387 */ 388 - if (rps_client && !i915_gem_request_started(rq)) { 388 + if (rps_client && !i915_request_started(rq)) { 389 389 if (INTEL_GEN(rq->i915) >= 6) 390 390 gen6_rps_boost(rq, rps_client); 391 391 } 392 392 393 - timeout = i915_wait_request(rq, flags, timeout); 393 + timeout = i915_request_wait(rq, flags, timeout); 394 394 395 395 out: 396 - if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 397 - i915_gem_request_retire_upto(rq); 396 + if (flags & I915_WAIT_LOCKED && i915_request_completed(rq)) 397 + i915_request_retire_upto(rq); 398 398 399 399 return timeout; 400 400 } ··· 463 463 464 464 static void __fence_set_priority(struct dma_fence *fence, int prio) 465 465 { 466 - struct drm_i915_gem_request *rq; 466 + struct i915_request *rq; 467 467 struct intel_engine_cs *engine; 468 468 469 469 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) ··· 2856 2856 atomic_inc(&ctx->active_count); 2857 2857 } 2858 2858 2859 - struct drm_i915_gem_request * 2859 + struct i915_request * 2860 2860 i915_gem_find_active_request(struct intel_engine_cs *engine) 2861 2861 { 2862 - struct drm_i915_gem_request *request, *active = NULL; 2862 + struct i915_request *request, *active = NULL; 2863 2863 unsigned long flags; 2864 2864 2865 2865 /* We are called by the error capture and reset at a random ··· 2872 2872 */ 2873 2873 spin_lock_irqsave(&engine->timeline->lock, flags); 2874 2874 list_for_each_entry(request, &engine->timeline->requests, link) { 2875 - if (__i915_gem_request_completed(request, 2876 - request->global_seqno)) 2875 + if (__i915_request_completed(request, request->global_seqno)) 2877 2876 continue; 2878 2877 2879 2878 GEM_BUG_ON(request->engine != engine); ··· 2905 2906 * Ensure irq handler finishes, and not run again. 2906 2907 * Also return the active request so that we only search for it once. 2907 2908 */ 2908 - struct drm_i915_gem_request * 2909 + struct i915_request * 2909 2910 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) 2910 2911 { 2911 - struct drm_i915_gem_request *request = NULL; 2912 + struct i915_request *request = NULL; 2912 2913 2913 2914 /* 2914 2915 * During the reset sequence, we must prevent the engine from ··· 2966 2967 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) 2967 2968 { 2968 2969 struct intel_engine_cs *engine; 2969 - struct drm_i915_gem_request *request; 2970 + struct i915_request *request; 2970 2971 enum intel_engine_id id; 2971 2972 int err = 0; 2972 2973 ··· 2985 2986 return err; 2986 2987 } 2987 2988 2988 - static void skip_request(struct drm_i915_gem_request *request) 2989 + static void skip_request(struct i915_request *request) 2989 2990 { 2990 2991 void *vaddr = request->ring->vaddr; 2991 2992 u32 head; ··· 3004 3005 dma_fence_set_error(&request->fence, -EIO); 3005 3006 } 3006 3007 3007 - static void engine_skip_context(struct drm_i915_gem_request *request) 3008 + static void engine_skip_context(struct i915_request *request) 3008 3009 { 3009 3010 struct intel_engine_cs *engine = request->engine; 3010 3011 struct i915_gem_context *hung_ctx = request->ctx; ··· 3028 3029 } 3029 3030 3030 3031 /* Returns the request if it was guilty of the hang */ 3031 - static struct drm_i915_gem_request * 3032 + static struct i915_request * 3032 3033 i915_gem_reset_request(struct intel_engine_cs *engine, 3033 - struct drm_i915_gem_request *request) 3034 + struct i915_request *request) 3034 3035 { 3035 3036 /* The guilty request will get skipped on a hung engine. 3036 3037 * ··· 3084 3085 } 3085 3086 3086 3087 void i915_gem_reset_engine(struct intel_engine_cs *engine, 3087 - struct drm_i915_gem_request *request) 3088 + struct i915_request *request) 3088 3089 { 3089 3090 /* 3090 3091 * Make sure this write is visible before we re-enable the interrupt ··· 3112 3113 3113 3114 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3114 3115 3115 - i915_gem_retire_requests(dev_priv); 3116 + i915_retire_requests(dev_priv); 3116 3117 3117 3118 for_each_engine(engine, dev_priv, id) { 3118 3119 struct i915_gem_context *ctx; ··· 3133 3134 * empty request appears sufficient to paper over the glitch. 3134 3135 */ 3135 3136 if (intel_engine_is_idle(engine)) { 3136 - struct drm_i915_gem_request *rq; 3137 + struct i915_request *rq; 3137 3138 3138 - rq = i915_gem_request_alloc(engine, 3139 - dev_priv->kernel_context); 3139 + rq = i915_request_alloc(engine, 3140 + dev_priv->kernel_context); 3140 3141 if (!IS_ERR(rq)) 3141 - __i915_add_request(rq, false); 3142 + __i915_request_add(rq, false); 3142 3143 } 3143 3144 } 3144 3145 ··· 3173 3174 } 3174 3175 } 3175 3176 3176 - static void nop_submit_request(struct drm_i915_gem_request *request) 3177 + static void nop_submit_request(struct i915_request *request) 3177 3178 { 3178 3179 dma_fence_set_error(&request->fence, -EIO); 3179 3180 3180 - i915_gem_request_submit(request); 3181 + i915_request_submit(request); 3181 3182 } 3182 3183 3183 - static void nop_complete_submit_request(struct drm_i915_gem_request *request) 3184 + static void nop_complete_submit_request(struct i915_request *request) 3184 3185 { 3185 3186 unsigned long flags; 3186 3187 3187 3188 dma_fence_set_error(&request->fence, -EIO); 3188 3189 3189 3190 spin_lock_irqsave(&request->engine->timeline->lock, flags); 3190 - __i915_gem_request_submit(request); 3191 + __i915_request_submit(request); 3191 3192 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3192 3193 spin_unlock_irqrestore(&request->engine->timeline->lock, flags); 3193 3194 } ··· 3212 3213 * rolling the global seqno forward (since this would complete requests 3213 3214 * for which we haven't set the fence error to EIO yet). 3214 3215 */ 3215 - for_each_engine(engine, i915, id) 3216 + for_each_engine(engine, i915, id) { 3217 + i915_gem_reset_prepare_engine(engine); 3216 3218 engine->submit_request = nop_submit_request; 3219 + } 3217 3220 3218 3221 /* 3219 3222 * Make sure no one is running the old callback before we proceed with ··· 3257 3256 intel_engine_init_global_seqno(engine, 3258 3257 intel_engine_last_submit(engine)); 3259 3258 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3259 + 3260 + i915_gem_reset_finish_engine(engine); 3260 3261 } 3261 3262 3262 3263 wake_up_all(&i915->gpu_error.reset_queue); ··· 3284 3281 */ 3285 3282 list_for_each_entry(tl, &i915->gt.timelines, link) { 3286 3283 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3287 - struct drm_i915_gem_request *rq; 3284 + struct i915_request *rq; 3288 3285 3289 3286 rq = i915_gem_active_peek(&tl->engine[i].last_request, 3290 3287 &i915->drm.struct_mutex); ··· 3333 3330 3334 3331 /* Come back later if the device is busy... */ 3335 3332 if (mutex_trylock(&dev->struct_mutex)) { 3336 - i915_gem_retire_requests(dev_priv); 3333 + i915_retire_requests(dev_priv); 3337 3334 mutex_unlock(&dev->struct_mutex); 3338 3335 } 3339 3336 ··· 3421 3418 container_of(work, typeof(*dev_priv), gt.idle_work.work); 3422 3419 unsigned int epoch = I915_EPOCH_INVALID; 3423 3420 bool rearm_hangcheck; 3424 - ktime_t end; 3425 3421 3426 3422 if (!READ_ONCE(dev_priv->gt.awake)) 3427 3423 return; 3428 3424 3429 3425 /* 3430 3426 * Wait for last execlists context complete, but bail out in case a 3431 - * new request is submitted. 3427 + * new request is submitted. As we don't trust the hardware, we 3428 + * continue on if the wait times out. This is necessary to allow 3429 + * the machine to suspend even if the hardware dies, and we will 3430 + * try to recover in resume (after depriving the hardware of power, 3431 + * it may be in a better mmod). 3432 3432 */ 3433 - end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT); 3434 - do { 3435 - if (new_requests_since_last_retire(dev_priv)) 3436 - return; 3437 - 3438 - if (intel_engines_are_idle(dev_priv)) 3439 - break; 3440 - 3441 - usleep_range(100, 500); 3442 - } while (ktime_before(ktime_get(), end)); 3433 + __wait_for(if (new_requests_since_last_retire(dev_priv)) return, 3434 + intel_engines_are_idle(dev_priv), 3435 + I915_IDLE_ENGINES_TIMEOUT * 1000, 3436 + 10, 500); 3443 3437 3444 3438 rearm_hangcheck = 3445 3439 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); ··· 3684 3684 if (ret) 3685 3685 return ret; 3686 3686 } 3687 - i915_gem_retire_requests(i915); 3687 + i915_retire_requests(i915); 3688 3688 3689 3689 ret = wait_for_engines(i915); 3690 3690 } else { ··· 4224 4224 struct drm_i915_private *dev_priv = to_i915(dev); 4225 4225 struct drm_i915_file_private *file_priv = file->driver_priv; 4226 4226 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4227 - struct drm_i915_gem_request *request, *target = NULL; 4227 + struct i915_request *request, *target = NULL; 4228 4228 long ret; 4229 4229 4230 4230 /* ABI: return -EIO if already wedged */ ··· 4244 4244 target = request; 4245 4245 } 4246 4246 if (target) 4247 - i915_gem_request_get(target); 4247 + i915_request_get(target); 4248 4248 spin_unlock(&file_priv->mm.lock); 4249 4249 4250 4250 if (target == NULL) 4251 4251 return 0; 4252 4252 4253 - ret = i915_wait_request(target, 4253 + ret = i915_request_wait(target, 4254 4254 I915_WAIT_INTERRUPTIBLE, 4255 4255 MAX_SCHEDULE_TIMEOUT); 4256 - i915_gem_request_put(target); 4256 + i915_request_put(target); 4257 4257 4258 4258 return ret < 0 ? ret : 0; 4259 4259 } ··· 4367 4367 __busy_set_if_active(const struct dma_fence *fence, 4368 4368 unsigned int (*flag)(unsigned int id)) 4369 4369 { 4370 - struct drm_i915_gem_request *rq; 4370 + struct i915_request *rq; 4371 4371 4372 4372 /* We have to check the current hw status of the fence as the uABI 4373 4373 * guarantees forward progress. We could rely on the idle worker ··· 4380 4380 return 0; 4381 4381 4382 4382 /* opencode to_request() in order to avoid const warnings */ 4383 - rq = container_of(fence, struct drm_i915_gem_request, fence); 4384 - if (i915_gem_request_completed(rq)) 4383 + rq = container_of(fence, struct i915_request, fence); 4384 + if (i915_request_completed(rq)) 4385 4385 return 0; 4386 4386 4387 4387 return flag(rq->engine->uabi_id); ··· 4526 4526 } 4527 4527 4528 4528 static void 4529 - frontbuffer_retire(struct i915_gem_active *active, 4530 - struct drm_i915_gem_request *request) 4529 + frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request) 4531 4530 { 4532 4531 struct drm_i915_gem_object *obj = 4533 4532 container_of(active, typeof(*obj), frontbuffer_write); ··· 4920 4921 i915_gem_contexts_lost(dev_priv); 4921 4922 mutex_unlock(&dev->struct_mutex); 4922 4923 4923 - intel_guc_suspend(dev_priv); 4924 + intel_uc_suspend(dev_priv); 4924 4925 4925 4926 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4926 4927 cancel_delayed_work_sync(&dev_priv->gt.retire_work); ··· 4987 4988 if (i915_gem_init_hw(i915)) 4988 4989 goto err_wedged; 4989 4990 4990 - intel_guc_resume(i915); 4991 + intel_uc_resume(i915); 4991 4992 4992 4993 /* Always reload a context for powersaving. */ 4993 4994 if (i915_gem_switch_to_kernel_context(i915)) ··· 5160 5161 return PTR_ERR(ctx); 5161 5162 5162 5163 for_each_engine(engine, i915, id) { 5163 - struct drm_i915_gem_request *rq; 5164 + struct i915_request *rq; 5164 5165 5165 - rq = i915_gem_request_alloc(engine, ctx); 5166 + rq = i915_request_alloc(engine, ctx); 5166 5167 if (IS_ERR(rq)) { 5167 5168 err = PTR_ERR(rq); 5168 5169 goto out_ctx; ··· 5172 5173 if (engine->init_context) 5173 5174 err = engine->init_context(rq); 5174 5175 5175 - __i915_add_request(rq, true); 5176 + __i915_request_add(rq, true); 5176 5177 if (err) 5177 5178 goto err_active; 5178 5179 } ··· 5478 5479 if (!dev_priv->luts) 5479 5480 goto err_vmas; 5480 5481 5481 - dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 5482 + dev_priv->requests = KMEM_CACHE(i915_request, 5482 5483 SLAB_HWCACHE_ALIGN | 5483 5484 SLAB_RECLAIM_ACCOUNT | 5484 5485 SLAB_TYPESAFE_BY_RCU); ··· 5611 5612 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5612 5613 { 5613 5614 struct drm_i915_file_private *file_priv = file->driver_priv; 5614 - struct drm_i915_gem_request *request; 5615 + struct i915_request *request; 5615 5616 5616 5617 /* Clean up our request list when the client is going away, so that 5617 5618 * later retire_requests won't dereference our soon-to-be-gone
+5 -2
drivers/gpu/drm/i915/i915_gem.h
··· 29 29 30 30 #ifdef CONFIG_DRM_I915_DEBUG_GEM 31 31 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \ 32 - printk(KERN_ERR "GEM_BUG_ON(%s)\n", __stringify(condition)); \ 32 + pr_err("%s:%d GEM_BUG_ON(%s)\n", \ 33 + __func__, __LINE__, __stringify(condition)); \ 34 + GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \ 35 + __func__, __LINE__, __stringify(condition)); \ 33 36 BUG(); \ 34 37 } \ 35 38 } while(0) ··· 57 54 #define GEM_TRACE(...) do { } while (0) 58 55 #endif 59 56 60 - #define I915_NUM_ENGINES 5 57 + #define I915_NUM_ENGINES 8 61 58 62 59 #endif /* __I915_GEM_H__ */
+1 -1
drivers/gpu/drm/i915/i915_gem_batch_pool.c
··· 119 119 if (!reservation_object_test_signaled_rcu(resv, true)) 120 120 break; 121 121 122 - i915_gem_retire_requests(pool->engine->i915); 122 + i915_retire_requests(pool->engine->i915); 123 123 GEM_BUG_ON(i915_gem_object_is_active(obj)); 124 124 125 125 /*
+18 -11
drivers/gpu/drm/i915/i915_gem_context.c
··· 211 211 static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) 212 212 { 213 213 int ret; 214 + unsigned int max; 215 + 216 + if (INTEL_GEN(dev_priv) >= 11) 217 + max = GEN11_MAX_CONTEXT_HW_ID; 218 + else 219 + max = MAX_CONTEXT_HW_ID; 214 220 215 221 ret = ida_simple_get(&dev_priv->contexts.hw_ida, 216 - 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); 222 + 0, max, GFP_KERNEL); 217 223 if (ret < 0) { 218 224 /* Contexts are only released when no longer active. 219 225 * Flush any pending retires to hopefully release some 220 226 * stale contexts and try again. 221 227 */ 222 - i915_gem_retire_requests(dev_priv); 228 + i915_retire_requests(dev_priv); 223 229 ret = ida_simple_get(&dev_priv->contexts.hw_ida, 224 - 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); 230 + 0, max, GFP_KERNEL); 225 231 if (ret < 0) 226 232 return ret; 227 233 } ··· 469 463 470 464 /* Using the simple ida interface, the max is limited by sizeof(int) */ 471 465 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); 466 + BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); 472 467 ida_init(&dev_priv->contexts.hw_ida); 473 468 474 469 /* lowest priority; idle task */ ··· 597 590 598 591 lockdep_assert_held(&dev_priv->drm.struct_mutex); 599 592 600 - i915_gem_retire_requests(dev_priv); 593 + i915_retire_requests(dev_priv); 601 594 602 595 for_each_engine(engine, dev_priv, id) { 603 - struct drm_i915_gem_request *req; 596 + struct i915_request *rq; 604 597 605 598 if (engine_has_idle_kernel_context(engine)) 606 599 continue; 607 600 608 - req = i915_gem_request_alloc(engine, dev_priv->kernel_context); 609 - if (IS_ERR(req)) 610 - return PTR_ERR(req); 601 + rq = i915_request_alloc(engine, dev_priv->kernel_context); 602 + if (IS_ERR(rq)) 603 + return PTR_ERR(rq); 611 604 612 605 /* Queue this switch after all other activity */ 613 606 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 614 - struct drm_i915_gem_request *prev; 607 + struct i915_request *prev; 615 608 struct intel_timeline *tl; 616 609 617 610 tl = &timeline->engine[engine->id]; 618 611 prev = i915_gem_active_raw(&tl->last_request, 619 612 &dev_priv->drm.struct_mutex); 620 613 if (prev) 621 - i915_sw_fence_await_sw_fence_gfp(&req->submit, 614 + i915_sw_fence_await_sw_fence_gfp(&rq->submit, 622 615 &prev->submit, 623 616 I915_FENCE_GFP); 624 617 } ··· 630 623 * but an extra layer of paranoia before we declare the system 631 624 * idle (on suspend etc) is advisable! 632 625 */ 633 - __i915_add_request(req, true); 626 + __i915_request_add(rq, true); 634 627 } 635 628 636 629 return 0;
+2 -2
drivers/gpu/drm/i915/i915_gem_context.h
··· 38 38 39 39 struct drm_i915_private; 40 40 struct drm_i915_file_private; 41 - struct drm_i915_gem_request; 42 41 struct i915_hw_ppgtt; 42 + struct i915_request; 43 43 struct i915_vma; 44 44 struct intel_ring; 45 45 ··· 276 276 struct drm_file *file); 277 277 void i915_gem_context_close(struct drm_file *file); 278 278 279 - int i915_switch_context(struct drm_i915_gem_request *req); 279 + int i915_switch_context(struct i915_request *rq); 280 280 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); 281 281 282 282 void i915_gem_context_release(struct kref *ctx_ref);
+2 -2
drivers/gpu/drm/i915/i915_gem_evict.c
··· 168 168 * retiring. 169 169 */ 170 170 if (!(flags & PIN_NONBLOCK)) 171 - i915_gem_retire_requests(dev_priv); 171 + i915_retire_requests(dev_priv); 172 172 else 173 173 phases[1] = NULL; 174 174 ··· 293 293 * retiring. 294 294 */ 295 295 if (!(flags & PIN_NONBLOCK)) 296 - i915_gem_retire_requests(vm->i915); 296 + i915_retire_requests(vm->i915); 297 297 298 298 check_color = vm->mm.color_adjust; 299 299 if (check_color) {
+30 -30
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 200 200 struct i915_gem_context *ctx; /** context for building the request */ 201 201 struct i915_address_space *vm; /** GTT and vma for the request */ 202 202 203 - struct drm_i915_gem_request *request; /** our request to build */ 203 + struct i915_request *request; /** our request to build */ 204 204 struct i915_vma *batch; /** identity of the batch obj/vma */ 205 205 206 206 /** actual size of execobj[] as we may extend it for the cmdparser */ ··· 227 227 bool has_fence : 1; 228 228 bool needs_unfenced : 1; 229 229 230 - struct drm_i915_gem_request *rq; 230 + struct i915_request *rq; 231 231 u32 *rq_cmd; 232 232 unsigned int rq_size; 233 233 } reloc_cache; ··· 886 886 i915_gem_object_unpin_map(cache->rq->batch->obj); 887 887 i915_gem_chipset_flush(cache->rq->i915); 888 888 889 - __i915_add_request(cache->rq, true); 889 + __i915_request_add(cache->rq, true); 890 890 cache->rq = NULL; 891 891 } 892 892 ··· 1070 1070 { 1071 1071 struct reloc_cache *cache = &eb->reloc_cache; 1072 1072 struct drm_i915_gem_object *obj; 1073 - struct drm_i915_gem_request *rq; 1073 + struct i915_request *rq; 1074 1074 struct i915_vma *batch; 1075 1075 u32 *cmd; 1076 1076 int err; ··· 1103 1103 if (err) 1104 1104 goto err_unmap; 1105 1105 1106 - rq = i915_gem_request_alloc(eb->engine, eb->ctx); 1106 + rq = i915_request_alloc(eb->engine, eb->ctx); 1107 1107 if (IS_ERR(rq)) { 1108 1108 err = PTR_ERR(rq); 1109 1109 goto err_unpin; 1110 1110 } 1111 1111 1112 - err = i915_gem_request_await_object(rq, vma->obj, true); 1112 + err = i915_request_await_object(rq, vma->obj, true); 1113 1113 if (err) 1114 1114 goto err_request; 1115 1115 ··· 1141 1141 return 0; 1142 1142 1143 1143 err_request: 1144 - i915_add_request(rq); 1144 + i915_request_add(rq); 1145 1145 err_unpin: 1146 1146 i915_vma_unpin(batch); 1147 1147 err_unmap: ··· 1727 1727 } 1728 1728 1729 1729 static void eb_export_fence(struct i915_vma *vma, 1730 - struct drm_i915_gem_request *req, 1730 + struct i915_request *rq, 1731 1731 unsigned int flags) 1732 1732 { 1733 1733 struct reservation_object *resv = vma->resv; ··· 1739 1739 */ 1740 1740 reservation_object_lock(resv, NULL); 1741 1741 if (flags & EXEC_OBJECT_WRITE) 1742 - reservation_object_add_excl_fence(resv, &req->fence); 1742 + reservation_object_add_excl_fence(resv, &rq->fence); 1743 1743 else if (reservation_object_reserve_shared(resv) == 0) 1744 - reservation_object_add_shared_fence(resv, &req->fence); 1744 + reservation_object_add_shared_fence(resv, &rq->fence); 1745 1745 reservation_object_unlock(resv); 1746 1746 } 1747 1747 ··· 1757 1757 struct drm_i915_gem_object *obj = vma->obj; 1758 1758 1759 1759 if (flags & EXEC_OBJECT_CAPTURE) { 1760 - struct i915_gem_capture_list *capture; 1760 + struct i915_capture_list *capture; 1761 1761 1762 1762 capture = kmalloc(sizeof(*capture), GFP_KERNEL); 1763 1763 if (unlikely(!capture)) ··· 1788 1788 if (flags & EXEC_OBJECT_ASYNC) 1789 1789 continue; 1790 1790 1791 - err = i915_gem_request_await_object 1791 + err = i915_request_await_object 1792 1792 (eb->request, obj, flags & EXEC_OBJECT_WRITE); 1793 1793 if (err) 1794 1794 return err; ··· 1840 1840 } 1841 1841 1842 1842 void i915_vma_move_to_active(struct i915_vma *vma, 1843 - struct drm_i915_gem_request *req, 1843 + struct i915_request *rq, 1844 1844 unsigned int flags) 1845 1845 { 1846 1846 struct drm_i915_gem_object *obj = vma->obj; 1847 - const unsigned int idx = req->engine->id; 1847 + const unsigned int idx = rq->engine->id; 1848 1848 1849 - lockdep_assert_held(&req->i915->drm.struct_mutex); 1849 + lockdep_assert_held(&rq->i915->drm.struct_mutex); 1850 1850 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1851 1851 1852 1852 /* ··· 1860 1860 if (!i915_vma_is_active(vma)) 1861 1861 obj->active_count++; 1862 1862 i915_vma_set_active(vma, idx); 1863 - i915_gem_active_set(&vma->last_read[idx], req); 1863 + i915_gem_active_set(&vma->last_read[idx], rq); 1864 1864 list_move_tail(&vma->vm_link, &vma->vm->active_list); 1865 1865 1866 1866 obj->write_domain = 0; ··· 1868 1868 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1869 1869 1870 1870 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 1871 - i915_gem_active_set(&obj->frontbuffer_write, req); 1871 + i915_gem_active_set(&obj->frontbuffer_write, rq); 1872 1872 1873 1873 obj->read_domains = 0; 1874 1874 } 1875 1875 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1876 1876 1877 1877 if (flags & EXEC_OBJECT_NEEDS_FENCE) 1878 - i915_gem_active_set(&vma->last_fence, req); 1878 + i915_gem_active_set(&vma->last_fence, rq); 1879 1879 } 1880 1880 1881 - static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) 1881 + static int i915_reset_gen7_sol_offsets(struct i915_request *rq) 1882 1882 { 1883 1883 u32 *cs; 1884 1884 int i; 1885 1885 1886 - if (!IS_GEN7(req->i915) || req->engine->id != RCS) { 1886 + if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) { 1887 1887 DRM_DEBUG("sol reset is gen7/rcs only\n"); 1888 1888 return -EINVAL; 1889 1889 } 1890 1890 1891 - cs = intel_ring_begin(req, 4 * 2 + 2); 1891 + cs = intel_ring_begin(rq, 4 * 2 + 2); 1892 1892 if (IS_ERR(cs)) 1893 1893 return PTR_ERR(cs); 1894 1894 ··· 1898 1898 *cs++ = 0; 1899 1899 } 1900 1900 *cs++ = MI_NOOP; 1901 - intel_ring_advance(req, cs); 1901 + intel_ring_advance(rq, cs); 1902 1902 1903 1903 return 0; 1904 1904 } ··· 1944 1944 } 1945 1945 1946 1946 static void 1947 - add_to_client(struct drm_i915_gem_request *req, struct drm_file *file) 1947 + add_to_client(struct i915_request *rq, struct drm_file *file) 1948 1948 { 1949 - req->file_priv = file->driver_priv; 1950 - list_add_tail(&req->client_link, &req->file_priv->mm.request_list); 1949 + rq->file_priv = file->driver_priv; 1950 + list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list); 1951 1951 } 1952 1952 1953 1953 static int eb_submit(struct i915_execbuffer *eb) ··· 2151 2151 if (!fence) 2152 2152 return -EINVAL; 2153 2153 2154 - err = i915_gem_request_await_dma_fence(eb->request, fence); 2154 + err = i915_request_await_dma_fence(eb->request, fence); 2155 2155 dma_fence_put(fence); 2156 2156 if (err < 0) 2157 2157 return err; ··· 2365 2365 GEM_BUG_ON(eb.reloc_cache.rq); 2366 2366 2367 2367 /* Allocate a request for this batch buffer nice and early. */ 2368 - eb.request = i915_gem_request_alloc(eb.engine, eb.ctx); 2368 + eb.request = i915_request_alloc(eb.engine, eb.ctx); 2369 2369 if (IS_ERR(eb.request)) { 2370 2370 err = PTR_ERR(eb.request); 2371 2371 goto err_batch_unpin; 2372 2372 } 2373 2373 2374 2374 if (in_fence) { 2375 - err = i915_gem_request_await_dma_fence(eb.request, in_fence); 2375 + err = i915_request_await_dma_fence(eb.request, in_fence); 2376 2376 if (err < 0) 2377 2377 goto err_request; 2378 2378 } ··· 2400 2400 */ 2401 2401 eb.request->batch = eb.batch; 2402 2402 2403 - trace_i915_gem_request_queue(eb.request, eb.batch_flags); 2403 + trace_i915_request_queue(eb.request, eb.batch_flags); 2404 2404 err = eb_submit(&eb); 2405 2405 err_request: 2406 - __i915_add_request(eb.request, err == 0); 2406 + __i915_request_add(eb.request, err == 0); 2407 2407 add_to_client(eb.request, file); 2408 2408 2409 2409 if (fences)
+19 -19
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 765 765 } 766 766 767 767 /* Broadwell Page Directory Pointer Descriptors */ 768 - static int gen8_write_pdp(struct drm_i915_gem_request *req, 768 + static int gen8_write_pdp(struct i915_request *rq, 769 769 unsigned entry, 770 770 dma_addr_t addr) 771 771 { 772 - struct intel_engine_cs *engine = req->engine; 772 + struct intel_engine_cs *engine = rq->engine; 773 773 u32 *cs; 774 774 775 775 BUG_ON(entry >= 4); 776 776 777 - cs = intel_ring_begin(req, 6); 777 + cs = intel_ring_begin(rq, 6); 778 778 if (IS_ERR(cs)) 779 779 return PTR_ERR(cs); 780 780 ··· 784 784 *cs++ = MI_LOAD_REGISTER_IMM(1); 785 785 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry)); 786 786 *cs++ = lower_32_bits(addr); 787 - intel_ring_advance(req, cs); 787 + intel_ring_advance(rq, cs); 788 788 789 789 return 0; 790 790 } 791 791 792 792 static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt, 793 - struct drm_i915_gem_request *req) 793 + struct i915_request *rq) 794 794 { 795 795 int i, ret; 796 796 797 797 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { 798 798 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 799 799 800 - ret = gen8_write_pdp(req, i, pd_daddr); 800 + ret = gen8_write_pdp(rq, i, pd_daddr); 801 801 if (ret) 802 802 return ret; 803 803 } ··· 806 806 } 807 807 808 808 static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt, 809 - struct drm_i915_gem_request *req) 809 + struct i915_request *rq) 810 810 { 811 - return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); 811 + return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4)); 812 812 } 813 813 814 814 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify ··· 1732 1732 } 1733 1733 1734 1734 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 1735 - struct drm_i915_gem_request *req) 1735 + struct i915_request *rq) 1736 1736 { 1737 - struct intel_engine_cs *engine = req->engine; 1737 + struct intel_engine_cs *engine = rq->engine; 1738 1738 u32 *cs; 1739 1739 1740 1740 /* NB: TLBs must be flushed and invalidated before a switch */ 1741 - cs = intel_ring_begin(req, 6); 1741 + cs = intel_ring_begin(rq, 6); 1742 1742 if (IS_ERR(cs)) 1743 1743 return PTR_ERR(cs); 1744 1744 ··· 1748 1748 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); 1749 1749 *cs++ = get_pd_offset(ppgtt); 1750 1750 *cs++ = MI_NOOP; 1751 - intel_ring_advance(req, cs); 1751 + intel_ring_advance(rq, cs); 1752 1752 1753 1753 return 0; 1754 1754 } 1755 1755 1756 1756 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 1757 - struct drm_i915_gem_request *req) 1757 + struct i915_request *rq) 1758 1758 { 1759 - struct intel_engine_cs *engine = req->engine; 1759 + struct intel_engine_cs *engine = rq->engine; 1760 1760 u32 *cs; 1761 1761 1762 1762 /* NB: TLBs must be flushed and invalidated before a switch */ 1763 - cs = intel_ring_begin(req, 6); 1763 + cs = intel_ring_begin(rq, 6); 1764 1764 if (IS_ERR(cs)) 1765 1765 return PTR_ERR(cs); 1766 1766 ··· 1770 1770 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); 1771 1771 *cs++ = get_pd_offset(ppgtt); 1772 1772 *cs++ = MI_NOOP; 1773 - intel_ring_advance(req, cs); 1773 + intel_ring_advance(rq, cs); 1774 1774 1775 1775 return 0; 1776 1776 } 1777 1777 1778 1778 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 1779 - struct drm_i915_gem_request *req) 1779 + struct i915_request *rq) 1780 1780 { 1781 - struct intel_engine_cs *engine = req->engine; 1782 - struct drm_i915_private *dev_priv = req->i915; 1781 + struct intel_engine_cs *engine = rq->engine; 1782 + struct drm_i915_private *dev_priv = rq->i915; 1783 1783 1784 1784 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); 1785 1785 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
+3 -2
drivers/gpu/drm/i915/i915_gem_gtt.h
··· 39 39 #include <linux/pagevec.h> 40 40 41 41 #include "i915_gem_timeline.h" 42 - #include "i915_gem_request.h" 42 + 43 + #include "i915_request.h" 43 44 #include "i915_selftest.h" 44 45 45 46 #define I915_GTT_PAGE_SIZE_4K BIT(12) ··· 399 398 gen6_pte_t __iomem *pd_addr; 400 399 401 400 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, 402 - struct drm_i915_gem_request *req); 401 + struct i915_request *rq); 403 402 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 404 403 }; 405 404
+1 -1
drivers/gpu/drm/i915/i915_gem_object.h
··· 33 33 34 34 #include <drm/i915_drm.h> 35 35 36 - #include "i915_gem_request.h" 36 + #include "i915_request.h" 37 37 #include "i915_selftest.h" 38 38 39 39 struct drm_i915_gem_object;
+1 -1
drivers/gpu/drm/i915/i915_gem_render_state.c
··· 177 177 178 178 #undef OUT_BATCH 179 179 180 - int i915_gem_render_state_emit(struct drm_i915_gem_request *rq) 180 + int i915_gem_render_state_emit(struct i915_request *rq) 181 181 { 182 182 struct intel_engine_cs *engine = rq->engine; 183 183 struct intel_render_state so = {}; /* keep the compiler happy */
+2 -2
drivers/gpu/drm/i915/i915_gem_render_state.h
··· 24 24 #ifndef _I915_GEM_RENDER_STATE_H_ 25 25 #define _I915_GEM_RENDER_STATE_H_ 26 26 27 - struct drm_i915_gem_request; 27 + struct i915_request; 28 28 29 - int i915_gem_render_state_emit(struct drm_i915_gem_request *rq); 29 + int i915_gem_render_state_emit(struct i915_request *rq); 30 30 31 31 #endif /* _I915_GEM_RENDER_STATE_H_ */
+202 -188
drivers/gpu/drm/i915/i915_gem_request.c drivers/gpu/drm/i915/i915_request.c
··· 37 37 38 38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 39 39 { 40 - /* The timeline struct (as part of the ppgtt underneath a context) 40 + /* 41 + * The timeline struct (as part of the ppgtt underneath a context) 41 42 * may be freed when the request is no longer in use by the GPU. 42 43 * We could extend the life of a context to beyond that of all 43 44 * fences, possibly keeping the hw resource around indefinitely, ··· 54 53 55 54 static bool i915_fence_signaled(struct dma_fence *fence) 56 55 { 57 - return i915_gem_request_completed(to_request(fence)); 56 + return i915_request_completed(to_request(fence)); 58 57 } 59 58 60 59 static bool i915_fence_enable_signaling(struct dma_fence *fence) ··· 70 69 bool interruptible, 71 70 signed long timeout) 72 71 { 73 - return i915_wait_request(to_request(fence), interruptible, timeout); 72 + return i915_request_wait(to_request(fence), interruptible, timeout); 74 73 } 75 74 76 75 static void i915_fence_release(struct dma_fence *fence) 77 76 { 78 - struct drm_i915_gem_request *req = to_request(fence); 77 + struct i915_request *rq = to_request(fence); 79 78 80 - /* The request is put onto a RCU freelist (i.e. the address 79 + /* 80 + * The request is put onto a RCU freelist (i.e. the address 81 81 * is immediately reused), mark the fences as being freed now. 82 82 * Otherwise the debugobjects for the fences are only marked as 83 83 * freed when the slab cache itself is freed, and so we would get 84 84 * caught trying to reuse dead objects. 85 85 */ 86 - i915_sw_fence_fini(&req->submit); 86 + i915_sw_fence_fini(&rq->submit); 87 87 88 - kmem_cache_free(req->i915->requests, req); 88 + kmem_cache_free(rq->i915->requests, rq); 89 89 } 90 90 91 91 const struct dma_fence_ops i915_fence_ops = { ··· 99 97 }; 100 98 101 99 static inline void 102 - i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 100 + i915_request_remove_from_client(struct i915_request *request) 103 101 { 104 102 struct drm_i915_file_private *file_priv; 105 103 ··· 217 215 struct intel_timeline *tl = engine->timeline; 218 216 219 217 if (!i915_seqno_passed(seqno, tl->seqno)) { 220 - /* spin until threads are complete */ 221 - while (intel_breadcrumbs_busy(engine)) 222 - cond_resched(); 218 + /* Flush any waiters before we reuse the seqno */ 219 + intel_engine_disarm_breadcrumbs(engine); 220 + GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals)); 223 221 } 224 222 225 223 /* Check we are idle before we fiddle with hw state! */ ··· 240 238 241 239 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) 242 240 { 243 - struct drm_i915_private *dev_priv = to_i915(dev); 241 + struct drm_i915_private *i915 = to_i915(dev); 244 242 245 - lockdep_assert_held(&dev_priv->drm.struct_mutex); 243 + lockdep_assert_held(&i915->drm.struct_mutex); 246 244 247 245 if (seqno == 0) 248 246 return -EINVAL; 249 247 250 - /* HWS page needs to be set less than what we 251 - * will inject to ring 252 - */ 253 - return reset_all_global_seqno(dev_priv, seqno - 1); 248 + /* HWS page needs to be set less than what we will inject to ring */ 249 + return reset_all_global_seqno(i915, seqno - 1); 254 250 } 255 251 256 252 static void mark_busy(struct drm_i915_private *i915) ··· 331 331 } 332 332 333 333 void i915_gem_retire_noop(struct i915_gem_active *active, 334 - struct drm_i915_gem_request *request) 334 + struct i915_request *request) 335 335 { 336 336 /* Space left intentionally blank */ 337 337 } 338 338 339 - static void advance_ring(struct drm_i915_gem_request *request) 339 + static void advance_ring(struct i915_request *request) 340 340 { 341 341 unsigned int tail; 342 342 343 - /* We know the GPU must have read the request to have 343 + /* 344 + * We know the GPU must have read the request to have 344 345 * sent us the seqno + interrupt, so use the position 345 346 * of tail of the request to update the last known position 346 347 * of the GPU head. ··· 350 349 * completion order. 351 350 */ 352 351 if (list_is_last(&request->ring_link, &request->ring->request_list)) { 353 - /* We may race here with execlists resubmitting this request 352 + /* 353 + * We may race here with execlists resubmitting this request 354 354 * as we retire it. The resubmission will move the ring->tail 355 355 * forwards (to request->wa_tail). We either read the 356 356 * current value that was written to hw, or the value that ··· 367 365 request->ring->head = tail; 368 366 } 369 367 370 - static void free_capture_list(struct drm_i915_gem_request *request) 368 + static void free_capture_list(struct i915_request *request) 371 369 { 372 - struct i915_gem_capture_list *capture; 370 + struct i915_capture_list *capture; 373 371 374 372 capture = request->capture_list; 375 373 while (capture) { 376 - struct i915_gem_capture_list *next = capture->next; 374 + struct i915_capture_list *next = capture->next; 377 375 378 376 kfree(capture); 379 377 capture = next; 380 378 } 381 379 } 382 380 383 - static void i915_gem_request_retire(struct drm_i915_gem_request *request) 381 + static void i915_request_retire(struct i915_request *request) 384 382 { 385 383 struct intel_engine_cs *engine = request->engine; 386 384 struct i915_gem_active *active, *next; 387 385 388 386 lockdep_assert_held(&request->i915->drm.struct_mutex); 389 387 GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); 390 - GEM_BUG_ON(!i915_gem_request_completed(request)); 388 + GEM_BUG_ON(!i915_request_completed(request)); 391 389 GEM_BUG_ON(!request->i915->gt.active_requests); 392 390 393 - trace_i915_gem_request_retire(request); 391 + trace_i915_request_retire(request); 394 392 395 393 spin_lock_irq(&engine->timeline->lock); 396 394 list_del_init(&request->link); ··· 401 399 402 400 free_capture_list(request); 403 401 404 - /* Walk through the active list, calling retire on each. This allows 402 + /* 403 + * Walk through the active list, calling retire on each. This allows 405 404 * objects to track their GPU activity and mark themselves as idle 406 405 * when their *last* active request is completed (updating state 407 406 * tracking lists for eviction, active references for GEM, etc). ··· 412 409 * the node after the callback). 413 410 */ 414 411 list_for_each_entry_safe(active, next, &request->active_list, link) { 415 - /* In microbenchmarks or focusing upon time inside the kernel, 412 + /* 413 + * In microbenchmarks or focusing upon time inside the kernel, 416 414 * we may spend an inordinate amount of time simply handling 417 415 * the retirement of requests and processing their callbacks. 418 416 * Of which, this loop itself is particularly hot due to the ··· 430 426 active->retire(active, request); 431 427 } 432 428 433 - i915_gem_request_remove_from_client(request); 429 + i915_request_remove_from_client(request); 434 430 435 431 /* Retirement decays the ban score as it is a sign of ctx progress */ 436 432 atomic_dec_if_positive(&request->ctx->ban_score); 437 433 438 - /* The backing object for the context is done after switching to the 434 + /* 435 + * The backing object for the context is done after switching to the 439 436 * *next* context. Therefore we cannot retire the previous context until 440 437 * the next context has already started running. However, since we 441 - * cannot take the required locks at i915_gem_request_submit() we 438 + * cannot take the required locks at i915_request_submit() we 442 439 * defer the unpinning of the active context to now, retirement of 443 440 * the subsequent request. 444 441 */ ··· 459 454 spin_unlock_irq(&request->lock); 460 455 461 456 i915_priotree_fini(request->i915, &request->priotree); 462 - i915_gem_request_put(request); 457 + i915_request_put(request); 463 458 } 464 459 465 - void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) 460 + void i915_request_retire_upto(struct i915_request *rq) 466 461 { 467 - struct intel_engine_cs *engine = req->engine; 468 - struct drm_i915_gem_request *tmp; 462 + struct intel_engine_cs *engine = rq->engine; 463 + struct i915_request *tmp; 469 464 470 - lockdep_assert_held(&req->i915->drm.struct_mutex); 471 - GEM_BUG_ON(!i915_gem_request_completed(req)); 465 + lockdep_assert_held(&rq->i915->drm.struct_mutex); 466 + GEM_BUG_ON(!i915_request_completed(rq)); 472 467 473 - if (list_empty(&req->link)) 468 + if (list_empty(&rq->link)) 474 469 return; 475 470 476 471 do { 477 472 tmp = list_first_entry(&engine->timeline->requests, 478 473 typeof(*tmp), link); 479 474 480 - i915_gem_request_retire(tmp); 481 - } while (tmp != req); 475 + i915_request_retire(tmp); 476 + } while (tmp != rq); 482 477 } 483 478 484 479 static u32 timeline_get_seqno(struct intel_timeline *tl) ··· 486 481 return ++tl->seqno; 487 482 } 488 483 489 - void __i915_gem_request_submit(struct drm_i915_gem_request *request) 484 + void __i915_request_submit(struct i915_request *request) 490 485 { 491 486 struct intel_engine_cs *engine = request->engine; 492 487 struct intel_timeline *timeline; ··· 494 489 495 490 GEM_BUG_ON(!irqs_disabled()); 496 491 lockdep_assert_held(&engine->timeline->lock); 497 - 498 - trace_i915_gem_request_execute(request); 499 492 500 493 /* Transfer from per-context onto the global per-engine timeline */ 501 494 timeline = engine->timeline; ··· 518 515 list_move_tail(&request->link, &timeline->requests); 519 516 spin_unlock(&request->timeline->lock); 520 517 518 + trace_i915_request_execute(request); 519 + 521 520 wake_up_all(&request->execute); 522 521 } 523 522 524 - void i915_gem_request_submit(struct drm_i915_gem_request *request) 523 + void i915_request_submit(struct i915_request *request) 525 524 { 526 525 struct intel_engine_cs *engine = request->engine; 527 526 unsigned long flags; ··· 531 526 /* Will be called from irq-context when using foreign fences. */ 532 527 spin_lock_irqsave(&engine->timeline->lock, flags); 533 528 534 - __i915_gem_request_submit(request); 529 + __i915_request_submit(request); 535 530 536 531 spin_unlock_irqrestore(&engine->timeline->lock, flags); 537 532 } 538 533 539 - void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request) 534 + void __i915_request_unsubmit(struct i915_request *request) 540 535 { 541 536 struct intel_engine_cs *engine = request->engine; 542 537 struct intel_timeline *timeline; ··· 544 539 GEM_BUG_ON(!irqs_disabled()); 545 540 lockdep_assert_held(&engine->timeline->lock); 546 541 547 - /* Only unwind in reverse order, required so that the per-context list 542 + /* 543 + * Only unwind in reverse order, required so that the per-context list 548 544 * is kept in seqno/ring order. 549 545 */ 550 546 GEM_BUG_ON(!request->global_seqno); ··· 569 563 list_move(&request->link, &timeline->requests); 570 564 spin_unlock(&timeline->lock); 571 565 572 - /* We don't need to wake_up any waiters on request->execute, they 566 + /* 567 + * We don't need to wake_up any waiters on request->execute, they 573 568 * will get woken by any other event or us re-adding this request 574 - * to the engine timeline (__i915_gem_request_submit()). The waiters 569 + * to the engine timeline (__i915_request_submit()). The waiters 575 570 * should be quite adapt at finding that the request now has a new 576 571 * global_seqno to the one they went to sleep on. 577 572 */ 578 573 } 579 574 580 - void i915_gem_request_unsubmit(struct drm_i915_gem_request *request) 575 + void i915_request_unsubmit(struct i915_request *request) 581 576 { 582 577 struct intel_engine_cs *engine = request->engine; 583 578 unsigned long flags; ··· 586 579 /* Will be called from irq-context when using foreign fences. */ 587 580 spin_lock_irqsave(&engine->timeline->lock, flags); 588 581 589 - __i915_gem_request_unsubmit(request); 582 + __i915_request_unsubmit(request); 590 583 591 584 spin_unlock_irqrestore(&engine->timeline->lock, flags); 592 585 } ··· 594 587 static int __i915_sw_fence_call 595 588 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 596 589 { 597 - struct drm_i915_gem_request *request = 590 + struct i915_request *request = 598 591 container_of(fence, typeof(*request), submit); 599 592 600 593 switch (state) { 601 594 case FENCE_COMPLETE: 602 - trace_i915_gem_request_submit(request); 595 + trace_i915_request_submit(request); 603 596 /* 604 - * We need to serialize use of the submit_request() callback with its 605 - * hotplugging performed during an emergency i915_gem_set_wedged(). 606 - * We use the RCU mechanism to mark the critical section in order to 607 - * force i915_gem_set_wedged() to wait until the submit_request() is 608 - * completed before proceeding. 597 + * We need to serialize use of the submit_request() callback 598 + * with its hotplugging performed during an emergency 599 + * i915_gem_set_wedged(). We use the RCU mechanism to mark the 600 + * critical section in order to force i915_gem_set_wedged() to 601 + * wait until the submit_request() is completed before 602 + * proceeding. 609 603 */ 610 604 rcu_read_lock(); 611 605 request->engine->submit_request(request); ··· 614 606 break; 615 607 616 608 case FENCE_FREE: 617 - i915_gem_request_put(request); 609 + i915_request_put(request); 618 610 break; 619 611 } 620 612 ··· 622 614 } 623 615 624 616 /** 625 - * i915_gem_request_alloc - allocate a request structure 617 + * i915_request_alloc - allocate a request structure 626 618 * 627 619 * @engine: engine that we wish to issue the request on. 628 620 * @ctx: context that the request will be associated with. ··· 630 622 * Returns a pointer to the allocated request if successful, 631 623 * or an error code if not. 632 624 */ 633 - struct drm_i915_gem_request * 634 - i915_gem_request_alloc(struct intel_engine_cs *engine, 635 - struct i915_gem_context *ctx) 625 + struct i915_request * 626 + i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) 636 627 { 637 - struct drm_i915_private *dev_priv = engine->i915; 638 - struct drm_i915_gem_request *req; 628 + struct drm_i915_private *i915 = engine->i915; 629 + struct i915_request *rq; 639 630 struct intel_ring *ring; 640 631 int ret; 641 632 642 - lockdep_assert_held(&dev_priv->drm.struct_mutex); 633 + lockdep_assert_held(&i915->drm.struct_mutex); 643 634 644 635 /* 645 636 * Preempt contexts are reserved for exclusive use to inject a 646 637 * preemption context switch. They are never to be used for any trivial 647 638 * request! 648 639 */ 649 - GEM_BUG_ON(ctx == dev_priv->preempt_context); 640 + GEM_BUG_ON(ctx == i915->preempt_context); 650 641 651 - /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report 642 + /* 643 + * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 652 644 * EIO if the GPU is already wedged. 653 645 */ 654 - if (i915_terminally_wedged(&dev_priv->gpu_error)) 646 + if (i915_terminally_wedged(&i915->gpu_error)) 655 647 return ERR_PTR(-EIO); 656 648 657 - /* Pinning the contexts may generate requests in order to acquire 649 + /* 650 + * Pinning the contexts may generate requests in order to acquire 658 651 * GGTT space, so do this first before we reserve a seqno for 659 652 * ourselves. 660 653 */ ··· 673 664 goto err_unreserve; 674 665 675 666 /* Move the oldest request to the slab-cache (if not in use!) */ 676 - req = list_first_entry_or_null(&engine->timeline->requests, 677 - typeof(*req), link); 678 - if (req && i915_gem_request_completed(req)) 679 - i915_gem_request_retire(req); 667 + rq = list_first_entry_or_null(&engine->timeline->requests, 668 + typeof(*rq), link); 669 + if (rq && i915_request_completed(rq)) 670 + i915_request_retire(rq); 680 671 681 - /* Beware: Dragons be flying overhead. 672 + /* 673 + * Beware: Dragons be flying overhead. 682 674 * 683 675 * We use RCU to look up requests in flight. The lookups may 684 676 * race with the request being allocated from the slab freelist. ··· 707 697 * 708 698 * Do not use kmem_cache_zalloc() here! 709 699 */ 710 - req = kmem_cache_alloc(dev_priv->requests, 711 - GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 712 - if (unlikely(!req)) { 700 + rq = kmem_cache_alloc(i915->requests, 701 + GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 702 + if (unlikely(!rq)) { 713 703 /* Ratelimit ourselves to prevent oom from malicious clients */ 714 - ret = i915_gem_wait_for_idle(dev_priv, 704 + ret = i915_gem_wait_for_idle(i915, 715 705 I915_WAIT_LOCKED | 716 706 I915_WAIT_INTERRUPTIBLE); 717 707 if (ret) ··· 725 715 * Having already penalized the client to stall, we spend 726 716 * a little extra time to re-optimise page allocation. 727 717 */ 728 - kmem_cache_shrink(dev_priv->requests); 718 + kmem_cache_shrink(i915->requests); 729 719 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */ 730 720 731 - req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); 732 - if (!req) { 721 + rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); 722 + if (!rq) { 733 723 ret = -ENOMEM; 734 724 goto err_unreserve; 735 725 } 736 726 } 737 727 738 - req->timeline = i915_gem_context_lookup_timeline(ctx, engine); 739 - GEM_BUG_ON(req->timeline == engine->timeline); 728 + rq->timeline = i915_gem_context_lookup_timeline(ctx, engine); 729 + GEM_BUG_ON(rq->timeline == engine->timeline); 740 730 741 - spin_lock_init(&req->lock); 742 - dma_fence_init(&req->fence, 731 + spin_lock_init(&rq->lock); 732 + dma_fence_init(&rq->fence, 743 733 &i915_fence_ops, 744 - &req->lock, 745 - req->timeline->fence_context, 746 - timeline_get_seqno(req->timeline)); 734 + &rq->lock, 735 + rq->timeline->fence_context, 736 + timeline_get_seqno(rq->timeline)); 747 737 748 738 /* We bump the ref for the fence chain */ 749 - i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); 750 - init_waitqueue_head(&req->execute); 739 + i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); 740 + init_waitqueue_head(&rq->execute); 751 741 752 - i915_priotree_init(&req->priotree); 742 + i915_priotree_init(&rq->priotree); 753 743 754 - INIT_LIST_HEAD(&req->active_list); 755 - req->i915 = dev_priv; 756 - req->engine = engine; 757 - req->ctx = ctx; 758 - req->ring = ring; 744 + INIT_LIST_HEAD(&rq->active_list); 745 + rq->i915 = i915; 746 + rq->engine = engine; 747 + rq->ctx = ctx; 748 + rq->ring = ring; 759 749 760 750 /* No zalloc, must clear what we need by hand */ 761 - req->global_seqno = 0; 762 - req->signaling.wait.seqno = 0; 763 - req->file_priv = NULL; 764 - req->batch = NULL; 765 - req->capture_list = NULL; 766 - req->waitboost = false; 751 + rq->global_seqno = 0; 752 + rq->signaling.wait.seqno = 0; 753 + rq->file_priv = NULL; 754 + rq->batch = NULL; 755 + rq->capture_list = NULL; 756 + rq->waitboost = false; 767 757 768 758 /* 769 759 * Reserve space in the ring buffer for all the commands required to 770 760 * eventually emit this request. This is to guarantee that the 771 - * i915_add_request() call can't fail. Note that the reserve may need 761 + * i915_request_add() call can't fail. Note that the reserve may need 772 762 * to be redone if the request is not actually submitted straight 773 763 * away, e.g. because a GPU scheduler has deferred it. 774 764 */ 775 - req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 776 - GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); 765 + rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 766 + GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz); 777 767 778 768 /* 779 769 * Record the position of the start of the request so that ··· 781 771 * GPU processing the request, we never over-estimate the 782 772 * position of the head. 783 773 */ 784 - req->head = req->ring->emit; 774 + rq->head = rq->ring->emit; 785 775 786 776 /* Unconditionally invalidate GPU caches and TLBs. */ 787 - ret = engine->emit_flush(req, EMIT_INVALIDATE); 777 + ret = engine->emit_flush(rq, EMIT_INVALIDATE); 788 778 if (ret) 789 779 goto err_unwind; 790 780 791 - ret = engine->request_alloc(req); 781 + ret = engine->request_alloc(rq); 792 782 if (ret) 793 783 goto err_unwind; 794 784 795 785 /* Check that we didn't interrupt ourselves with a new request */ 796 - GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); 797 - return req; 786 + GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); 787 + return rq; 798 788 799 789 err_unwind: 800 - req->ring->emit = req->head; 790 + rq->ring->emit = rq->head; 801 791 802 792 /* Make sure we didn't add ourselves to external state before freeing */ 803 - GEM_BUG_ON(!list_empty(&req->active_list)); 804 - GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); 805 - GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); 793 + GEM_BUG_ON(!list_empty(&rq->active_list)); 794 + GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list)); 795 + GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list)); 806 796 807 - kmem_cache_free(dev_priv->requests, req); 797 + kmem_cache_free(i915->requests, rq); 808 798 err_unreserve: 809 799 unreserve_engine(engine); 810 800 err_unpin: ··· 813 803 } 814 804 815 805 static int 816 - i915_gem_request_await_request(struct drm_i915_gem_request *to, 817 - struct drm_i915_gem_request *from) 806 + i915_request_await_request(struct i915_request *to, struct i915_request *from) 818 807 { 819 808 int ret; 820 809 821 810 GEM_BUG_ON(to == from); 822 811 GEM_BUG_ON(to->timeline == from->timeline); 823 812 824 - if (i915_gem_request_completed(from)) 813 + if (i915_request_completed(from)) 825 814 return 0; 826 815 827 816 if (to->engine->schedule) { ··· 843 834 844 835 GEM_BUG_ON(!from->engine->semaphore.signal); 845 836 846 - seqno = i915_gem_request_global_seqno(from); 837 + seqno = i915_request_global_seqno(from); 847 838 if (!seqno) 848 839 goto await_dma_fence; 849 840 ··· 867 858 } 868 859 869 860 int 870 - i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, 871 - struct dma_fence *fence) 861 + i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) 872 862 { 873 863 struct dma_fence **child = &fence; 874 864 unsigned int nchild = 1; 875 865 int ret; 876 866 877 - /* Note that if the fence-array was created in signal-on-any mode, 867 + /* 868 + * Note that if the fence-array was created in signal-on-any mode, 878 869 * we should *not* decompose it into its individual fences. However, 879 870 * we don't currently store which mode the fence-array is operating 880 871 * in. Fortunately, the only user of signal-on-any is private to ··· 896 887 897 888 /* 898 889 * Requests on the same timeline are explicitly ordered, along 899 - * with their dependencies, by i915_add_request() which ensures 890 + * with their dependencies, by i915_request_add() which ensures 900 891 * that requests are submitted in-order through each ring. 901 892 */ 902 - if (fence->context == req->fence.context) 893 + if (fence->context == rq->fence.context) 903 894 continue; 904 895 905 896 /* Squash repeated waits to the same timelines */ 906 - if (fence->context != req->i915->mm.unordered_timeline && 907 - intel_timeline_sync_is_later(req->timeline, fence)) 897 + if (fence->context != rq->i915->mm.unordered_timeline && 898 + intel_timeline_sync_is_later(rq->timeline, fence)) 908 899 continue; 909 900 910 901 if (dma_fence_is_i915(fence)) 911 - ret = i915_gem_request_await_request(req, 912 - to_request(fence)); 902 + ret = i915_request_await_request(rq, to_request(fence)); 913 903 else 914 - ret = i915_sw_fence_await_dma_fence(&req->submit, fence, 904 + ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, 915 905 I915_FENCE_TIMEOUT, 916 906 I915_FENCE_GFP); 917 907 if (ret < 0) 918 908 return ret; 919 909 920 910 /* Record the latest fence used against each timeline */ 921 - if (fence->context != req->i915->mm.unordered_timeline) 922 - intel_timeline_sync_set(req->timeline, fence); 911 + if (fence->context != rq->i915->mm.unordered_timeline) 912 + intel_timeline_sync_set(rq->timeline, fence); 923 913 } while (--nchild); 924 914 925 915 return 0; 926 916 } 927 917 928 918 /** 929 - * i915_gem_request_await_object - set this request to (async) wait upon a bo 919 + * i915_request_await_object - set this request to (async) wait upon a bo 930 920 * @to: request we are wishing to use 931 921 * @obj: object which may be in use on another ring. 932 922 * @write: whether the wait is on behalf of a writer ··· 945 937 * Returns 0 if successful, else propagates up the lower layer error. 946 938 */ 947 939 int 948 - i915_gem_request_await_object(struct drm_i915_gem_request *to, 949 - struct drm_i915_gem_object *obj, 950 - bool write) 940 + i915_request_await_object(struct i915_request *to, 941 + struct drm_i915_gem_object *obj, 942 + bool write) 951 943 { 952 944 struct dma_fence *excl; 953 945 int ret = 0; ··· 962 954 return ret; 963 955 964 956 for (i = 0; i < count; i++) { 965 - ret = i915_gem_request_await_dma_fence(to, shared[i]); 957 + ret = i915_request_await_dma_fence(to, shared[i]); 966 958 if (ret) 967 959 break; 968 960 ··· 978 970 979 971 if (excl) { 980 972 if (ret == 0) 981 - ret = i915_gem_request_await_dma_fence(to, excl); 973 + ret = i915_request_await_dma_fence(to, excl); 982 974 983 975 dma_fence_put(excl); 984 976 } ··· 991 983 * request is not being tracked for completion but the work itself is 992 984 * going to happen on the hardware. This would be a Bad Thing(tm). 993 985 */ 994 - void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) 986 + void __i915_request_add(struct i915_request *request, bool flush_caches) 995 987 { 996 988 struct intel_engine_cs *engine = request->engine; 997 989 struct intel_ring *ring = request->ring; 998 990 struct intel_timeline *timeline = request->timeline; 999 - struct drm_i915_gem_request *prev; 991 + struct i915_request *prev; 1000 992 u32 *cs; 1001 993 int err; 1002 994 1003 995 lockdep_assert_held(&request->i915->drm.struct_mutex); 1004 - trace_i915_gem_request_add(request); 996 + trace_i915_request_add(request); 1005 997 1006 998 /* 1007 999 * Make sure that no request gazumped us - if it was allocated after 1008 - * our i915_gem_request_alloc() and called __i915_add_request() before 1000 + * our i915_request_alloc() and called __i915_request_add() before 1009 1001 * us, the timeline will hold its seqno which is later than ours. 1010 1002 */ 1011 1003 GEM_BUG_ON(timeline->seqno != request->fence.seqno); ··· 1050 1042 1051 1043 prev = i915_gem_active_raw(&timeline->last_request, 1052 1044 &request->i915->drm.struct_mutex); 1053 - if (prev && !i915_gem_request_completed(prev)) { 1045 + if (prev && !i915_request_completed(prev)) { 1054 1046 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 1055 1047 &request->submitq); 1056 1048 if (engine->schedule) ··· 1105 1097 * work on behalf of others -- but instead we should benefit from 1106 1098 * improved resource management. (Well, that's the theory at least.) 1107 1099 */ 1108 - if (prev && i915_gem_request_completed(prev)) 1109 - i915_gem_request_retire_upto(prev); 1100 + if (prev && i915_request_completed(prev)) 1101 + i915_request_retire_upto(prev); 1110 1102 } 1111 1103 1112 1104 static unsigned long local_clock_us(unsigned int *cpu) 1113 1105 { 1114 1106 unsigned long t; 1115 1107 1116 - /* Cheaply and approximately convert from nanoseconds to microseconds. 1108 + /* 1109 + * Cheaply and approximately convert from nanoseconds to microseconds. 1117 1110 * The result and subsequent calculations are also defined in the same 1118 1111 * approximate microseconds units. The principal source of timing 1119 1112 * error here is from the simple truncation. ··· 1142 1133 return this_cpu != cpu; 1143 1134 } 1144 1135 1145 - static bool __i915_spin_request(const struct drm_i915_gem_request *req, 1136 + static bool __i915_spin_request(const struct i915_request *rq, 1146 1137 u32 seqno, int state, unsigned long timeout_us) 1147 1138 { 1148 - struct intel_engine_cs *engine = req->engine; 1139 + struct intel_engine_cs *engine = rq->engine; 1149 1140 unsigned int irq, cpu; 1150 1141 1151 1142 GEM_BUG_ON(!seqno); ··· 1164 1155 if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1)) 1165 1156 return false; 1166 1157 1167 - /* When waiting for high frequency requests, e.g. during synchronous 1158 + /* 1159 + * When waiting for high frequency requests, e.g. during synchronous 1168 1160 * rendering split between the CPU and GPU, the finite amount of time 1169 1161 * required to set up the irq and wait upon it limits the response 1170 1162 * rate. By busywaiting on the request completion for a short while we ··· 1179 1169 timeout_us += local_clock_us(&cpu); 1180 1170 do { 1181 1171 if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) 1182 - return seqno == i915_gem_request_global_seqno(req); 1172 + return seqno == i915_request_global_seqno(rq); 1183 1173 1184 - /* Seqno are meant to be ordered *before* the interrupt. If 1174 + /* 1175 + * Seqno are meant to be ordered *before* the interrupt. If 1185 1176 * we see an interrupt without a corresponding seqno advance, 1186 1177 * assume we won't see one in the near future but require 1187 1178 * the engine->seqno_barrier() to fixup coherency. ··· 1202 1191 return false; 1203 1192 } 1204 1193 1205 - static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request) 1194 + static bool __i915_wait_request_check_and_reset(struct i915_request *request) 1206 1195 { 1207 1196 if (likely(!i915_reset_handoff(&request->i915->gpu_error))) 1208 1197 return false; ··· 1213 1202 } 1214 1203 1215 1204 /** 1216 - * i915_wait_request - wait until execution of request has finished 1217 - * @req: the request to wait upon 1205 + * i915_request_wait - wait until execution of request has finished 1206 + * @rq: the request to wait upon 1218 1207 * @flags: how to wait 1219 1208 * @timeout: how long to wait in jiffies 1220 1209 * 1221 - * i915_wait_request() waits for the request to be completed, for a 1210 + * i915_request_wait() waits for the request to be completed, for a 1222 1211 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an 1223 1212 * unbounded wait). 1224 1213 * ··· 1231 1220 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 1232 1221 * pending before the request completes. 1233 1222 */ 1234 - long i915_wait_request(struct drm_i915_gem_request *req, 1223 + long i915_request_wait(struct i915_request *rq, 1235 1224 unsigned int flags, 1236 1225 long timeout) 1237 1226 { 1238 1227 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1239 1228 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1240 - wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue; 1229 + wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue; 1241 1230 DEFINE_WAIT_FUNC(reset, default_wake_function); 1242 1231 DEFINE_WAIT_FUNC(exec, default_wake_function); 1243 1232 struct intel_wait wait; ··· 1245 1234 might_sleep(); 1246 1235 #if IS_ENABLED(CONFIG_LOCKDEP) 1247 1236 GEM_BUG_ON(debug_locks && 1248 - !!lockdep_is_held(&req->i915->drm.struct_mutex) != 1237 + !!lockdep_is_held(&rq->i915->drm.struct_mutex) != 1249 1238 !!(flags & I915_WAIT_LOCKED)); 1250 1239 #endif 1251 1240 GEM_BUG_ON(timeout < 0); 1252 1241 1253 - if (i915_gem_request_completed(req)) 1242 + if (i915_request_completed(rq)) 1254 1243 return timeout; 1255 1244 1256 1245 if (!timeout) 1257 1246 return -ETIME; 1258 1247 1259 - trace_i915_gem_request_wait_begin(req, flags); 1248 + trace_i915_request_wait_begin(rq, flags); 1260 1249 1261 - add_wait_queue(&req->execute, &exec); 1250 + add_wait_queue(&rq->execute, &exec); 1262 1251 if (flags & I915_WAIT_LOCKED) 1263 1252 add_wait_queue(errq, &reset); 1264 1253 1265 - intel_wait_init(&wait, req); 1254 + intel_wait_init(&wait, rq); 1266 1255 1267 1256 restart: 1268 1257 do { 1269 1258 set_current_state(state); 1270 - if (intel_wait_update_request(&wait, req)) 1259 + if (intel_wait_update_request(&wait, rq)) 1271 1260 break; 1272 1261 1273 1262 if (flags & I915_WAIT_LOCKED && 1274 - __i915_wait_request_check_and_reset(req)) 1263 + __i915_wait_request_check_and_reset(rq)) 1275 1264 continue; 1276 1265 1277 1266 if (signal_pending_state(state, current)) { ··· 1288 1277 } while (1); 1289 1278 1290 1279 GEM_BUG_ON(!intel_wait_has_seqno(&wait)); 1291 - GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); 1280 + GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); 1292 1281 1293 1282 /* Optimistic short spin before touching IRQs */ 1294 - if (__i915_spin_request(req, wait.seqno, state, 5)) 1283 + if (__i915_spin_request(rq, wait.seqno, state, 5)) 1295 1284 goto complete; 1296 1285 1297 1286 set_current_state(state); 1298 - if (intel_engine_add_wait(req->engine, &wait)) 1299 - /* In order to check that we haven't missed the interrupt 1287 + if (intel_engine_add_wait(rq->engine, &wait)) 1288 + /* 1289 + * In order to check that we haven't missed the interrupt 1300 1290 * as we enabled it, we need to kick ourselves to do a 1301 1291 * coherent check on the seqno before we sleep. 1302 1292 */ 1303 1293 goto wakeup; 1304 1294 1305 1295 if (flags & I915_WAIT_LOCKED) 1306 - __i915_wait_request_check_and_reset(req); 1296 + __i915_wait_request_check_and_reset(rq); 1307 1297 1308 1298 for (;;) { 1309 1299 if (signal_pending_state(state, current)) { ··· 1320 1308 timeout = io_schedule_timeout(timeout); 1321 1309 1322 1310 if (intel_wait_complete(&wait) && 1323 - intel_wait_check_request(&wait, req)) 1311 + intel_wait_check_request(&wait, rq)) 1324 1312 break; 1325 1313 1326 1314 set_current_state(state); 1327 1315 1328 1316 wakeup: 1329 - /* Carefully check if the request is complete, giving time 1317 + /* 1318 + * Carefully check if the request is complete, giving time 1330 1319 * for the seqno to be visible following the interrupt. 1331 1320 * We also have to check in case we are kicked by the GPU 1332 1321 * reset in order to drop the struct_mutex. 1333 1322 */ 1334 - if (__i915_request_irq_complete(req)) 1323 + if (__i915_request_irq_complete(rq)) 1335 1324 break; 1336 1325 1337 - /* If the GPU is hung, and we hold the lock, reset the GPU 1326 + /* 1327 + * If the GPU is hung, and we hold the lock, reset the GPU 1338 1328 * and then check for completion. On a full reset, the engine's 1339 1329 * HW seqno will be advanced passed us and we are complete. 1340 1330 * If we do a partial reset, we have to wait for the GPU to ··· 1347 1333 * itself, or indirectly by recovering the GPU). 1348 1334 */ 1349 1335 if (flags & I915_WAIT_LOCKED && 1350 - __i915_wait_request_check_and_reset(req)) 1336 + __i915_wait_request_check_and_reset(rq)) 1351 1337 continue; 1352 1338 1353 1339 /* Only spin if we know the GPU is processing this request */ 1354 - if (__i915_spin_request(req, wait.seqno, state, 2)) 1340 + if (__i915_spin_request(rq, wait.seqno, state, 2)) 1355 1341 break; 1356 1342 1357 - if (!intel_wait_check_request(&wait, req)) { 1358 - intel_engine_remove_wait(req->engine, &wait); 1343 + if (!intel_wait_check_request(&wait, rq)) { 1344 + intel_engine_remove_wait(rq->engine, &wait); 1359 1345 goto restart; 1360 1346 } 1361 1347 } 1362 1348 1363 - intel_engine_remove_wait(req->engine, &wait); 1349 + intel_engine_remove_wait(rq->engine, &wait); 1364 1350 complete: 1365 1351 __set_current_state(TASK_RUNNING); 1366 1352 if (flags & I915_WAIT_LOCKED) 1367 1353 remove_wait_queue(errq, &reset); 1368 - remove_wait_queue(&req->execute, &exec); 1369 - trace_i915_gem_request_wait_end(req); 1354 + remove_wait_queue(&rq->execute, &exec); 1355 + trace_i915_request_wait_end(rq); 1370 1356 1371 1357 return timeout; 1372 1358 } 1373 1359 1374 1360 static void engine_retire_requests(struct intel_engine_cs *engine) 1375 1361 { 1376 - struct drm_i915_gem_request *request, *next; 1362 + struct i915_request *request, *next; 1377 1363 u32 seqno = intel_engine_get_seqno(engine); 1378 1364 LIST_HEAD(retire); 1379 1365 ··· 1388 1374 spin_unlock_irq(&engine->timeline->lock); 1389 1375 1390 1376 list_for_each_entry_safe(request, next, &retire, link) 1391 - i915_gem_request_retire(request); 1377 + i915_request_retire(request); 1392 1378 } 1393 1379 1394 - void i915_gem_retire_requests(struct drm_i915_private *dev_priv) 1380 + void i915_retire_requests(struct drm_i915_private *i915) 1395 1381 { 1396 1382 struct intel_engine_cs *engine; 1397 1383 enum intel_engine_id id; 1398 1384 1399 - lockdep_assert_held(&dev_priv->drm.struct_mutex); 1385 + lockdep_assert_held(&i915->drm.struct_mutex); 1400 1386 1401 - if (!dev_priv->gt.active_requests) 1387 + if (!i915->gt.active_requests) 1402 1388 return; 1403 1389 1404 - for_each_engine(engine, dev_priv, id) 1390 + for_each_engine(engine, i915, id) 1405 1391 engine_retire_requests(engine); 1406 1392 } 1407 1393 1408 1394 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1409 1395 #include "selftests/mock_request.c" 1410 - #include "selftests/i915_gem_request.c" 1396 + #include "selftests/i915_request.c" 1411 1397 #endif
+115 -107
drivers/gpu/drm/i915/i915_gem_request.h drivers/gpu/drm/i915/i915_request.h
··· 1 1 /* 2 - * Copyright © 2008-2015 Intel Corporation 2 + * Copyright © 2008-2018 Intel Corporation 3 3 * 4 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 5 * copy of this software and associated documentation files (the "Software"), ··· 22 22 * 23 23 */ 24 24 25 - #ifndef I915_GEM_REQUEST_H 26 - #define I915_GEM_REQUEST_H 25 + #ifndef I915_REQUEST_H 26 + #define I915_REQUEST_H 27 27 28 28 #include <linux/dma-fence.h> 29 29 ··· 34 34 35 35 struct drm_file; 36 36 struct drm_i915_gem_object; 37 - struct drm_i915_gem_request; 37 + struct i915_request; 38 38 39 39 struct intel_wait { 40 40 struct rb_node node; 41 41 struct task_struct *tsk; 42 - struct drm_i915_gem_request *request; 42 + struct i915_request *request; 43 43 u32 seqno; 44 44 }; 45 45 46 46 struct intel_signal_node { 47 - struct rb_node node; 48 47 struct intel_wait wait; 48 + struct list_head link; 49 49 }; 50 50 51 51 struct i915_dependency { ··· 57 57 #define I915_DEPENDENCY_ALLOC BIT(0) 58 58 }; 59 59 60 - /* Requests exist in a complex web of interdependencies. Each request 60 + /* 61 + * "People assume that time is a strict progression of cause to effect, but 62 + * actually, from a nonlinear, non-subjective viewpoint, it's more like a big 63 + * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015 64 + * 65 + * Requests exist in a complex web of interdependencies. Each request 61 66 * has to wait for some other request to complete before it is ready to be run 62 67 * (e.g. we have to wait until the pixels have been rendering into a texture 63 68 * before we can copy from it). We track the readiness of a request in terms ··· 86 81 I915_PRIORITY_INVALID = INT_MIN 87 82 }; 88 83 89 - struct i915_gem_capture_list { 90 - struct i915_gem_capture_list *next; 84 + struct i915_capture_list { 85 + struct i915_capture_list *next; 91 86 struct i915_vma *vma; 92 87 }; 93 88 ··· 111 106 * 112 107 * The requests are reference counted. 113 108 */ 114 - struct drm_i915_gem_request { 109 + struct i915_request { 115 110 struct dma_fence fence; 116 111 spinlock_t lock; 117 112 ··· 125 120 * it persists while any request is linked to it. Requests themselves 126 121 * are also refcounted, so the request will only be freed when the last 127 122 * reference to it is dismissed, and the code in 128 - * i915_gem_request_free() will then decrement the refcount on the 123 + * i915_request_free() will then decrement the refcount on the 129 124 * context. 130 125 */ 131 126 struct i915_gem_context *ctx; ··· 134 129 struct intel_timeline *timeline; 135 130 struct intel_signal_node signaling; 136 131 137 - /* Fences for the various phases in the request's lifetime. 132 + /* 133 + * Fences for the various phases in the request's lifetime. 138 134 * 139 135 * The submit fence is used to await upon all of the request's 140 136 * dependencies. When it is signaled, the request is ready to run. ··· 145 139 wait_queue_entry_t submitq; 146 140 wait_queue_head_t execute; 147 141 148 - /* A list of everyone we wait upon, and everyone who waits upon us. 142 + /* 143 + * A list of everyone we wait upon, and everyone who waits upon us. 149 144 * Even though we will not be submitted to the hardware before the 150 145 * submit fence is signaled (it waits for all external events as well 151 146 * as our own requests), the scheduler still needs to know the ··· 157 150 struct i915_priotree priotree; 158 151 struct i915_dependency dep; 159 152 160 - /** GEM sequence number associated with this request on the 153 + /** 154 + * GEM sequence number associated with this request on the 161 155 * global execution timeline. It is zero when the request is not 162 156 * on the HW queue (i.e. not on the engine timeline list). 163 157 * Its value is guarded by the timeline spinlock. ··· 188 180 * error state dump only). 189 181 */ 190 182 struct i915_vma *batch; 191 - /** Additional buffers requested by userspace to be captured upon 183 + /** 184 + * Additional buffers requested by userspace to be captured upon 192 185 * a GPU hang. The vma/obj on this list are protected by their 193 186 * active reference - all objects on this list must also be 194 187 * on the active_list (of their final request). 195 188 */ 196 - struct i915_gem_capture_list *capture_list; 189 + struct i915_capture_list *capture_list; 197 190 struct list_head active_list; 198 191 199 192 /** Time at which this request was emitted, in jiffies. */ ··· 222 213 return fence->ops == &i915_fence_ops; 223 214 } 224 215 225 - struct drm_i915_gem_request * __must_check 226 - i915_gem_request_alloc(struct intel_engine_cs *engine, 227 - struct i915_gem_context *ctx); 228 - void i915_gem_request_retire_upto(struct drm_i915_gem_request *req); 216 + struct i915_request * __must_check 217 + i915_request_alloc(struct intel_engine_cs *engine, 218 + struct i915_gem_context *ctx); 219 + void i915_request_retire_upto(struct i915_request *rq); 229 220 230 - static inline struct drm_i915_gem_request * 221 + static inline struct i915_request * 231 222 to_request(struct dma_fence *fence) 232 223 { 233 224 /* We assume that NULL fence/request are interoperable */ 234 - BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); 225 + BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0); 235 226 GEM_BUG_ON(fence && !dma_fence_is_i915(fence)); 236 - return container_of(fence, struct drm_i915_gem_request, fence); 227 + return container_of(fence, struct i915_request, fence); 237 228 } 238 229 239 - static inline struct drm_i915_gem_request * 240 - i915_gem_request_get(struct drm_i915_gem_request *req) 230 + static inline struct i915_request * 231 + i915_request_get(struct i915_request *rq) 241 232 { 242 - return to_request(dma_fence_get(&req->fence)); 233 + return to_request(dma_fence_get(&rq->fence)); 243 234 } 244 235 245 - static inline struct drm_i915_gem_request * 246 - i915_gem_request_get_rcu(struct drm_i915_gem_request *req) 236 + static inline struct i915_request * 237 + i915_request_get_rcu(struct i915_request *rq) 247 238 { 248 - return to_request(dma_fence_get_rcu(&req->fence)); 239 + return to_request(dma_fence_get_rcu(&rq->fence)); 249 240 } 250 241 251 242 static inline void 252 - i915_gem_request_put(struct drm_i915_gem_request *req) 243 + i915_request_put(struct i915_request *rq) 253 244 { 254 - dma_fence_put(&req->fence); 245 + dma_fence_put(&rq->fence); 255 246 } 256 247 257 248 /** 258 - * i915_gem_request_global_seqno - report the current global seqno 249 + * i915_request_global_seqno - report the current global seqno 259 250 * @request - the request 260 251 * 261 252 * A request is assigned a global seqno only when it is on the hardware ··· 273 264 * after the read, it is indeed complete). 274 265 */ 275 266 static u32 276 - i915_gem_request_global_seqno(const struct drm_i915_gem_request *request) 267 + i915_request_global_seqno(const struct i915_request *request) 277 268 { 278 269 return READ_ONCE(request->global_seqno); 279 270 } 280 271 281 - int 282 - i915_gem_request_await_object(struct drm_i915_gem_request *to, 272 + int i915_request_await_object(struct i915_request *to, 283 273 struct drm_i915_gem_object *obj, 284 274 bool write); 285 - int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, 286 - struct dma_fence *fence); 275 + int i915_request_await_dma_fence(struct i915_request *rq, 276 + struct dma_fence *fence); 287 277 288 - void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); 289 - #define i915_add_request(req) \ 290 - __i915_add_request(req, false) 278 + void __i915_request_add(struct i915_request *rq, bool flush_caches); 279 + #define i915_request_add(rq) \ 280 + __i915_request_add(rq, false) 291 281 292 - void __i915_gem_request_submit(struct drm_i915_gem_request *request); 293 - void i915_gem_request_submit(struct drm_i915_gem_request *request); 282 + void __i915_request_submit(struct i915_request *request); 283 + void i915_request_submit(struct i915_request *request); 294 284 295 - void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request); 296 - void i915_gem_request_unsubmit(struct drm_i915_gem_request *request); 285 + void __i915_request_unsubmit(struct i915_request *request); 286 + void i915_request_unsubmit(struct i915_request *request); 297 287 298 - struct intel_rps_client; 299 - #define NO_WAITBOOST ERR_PTR(-1) 300 - #define IS_RPS_CLIENT(p) (!IS_ERR(p)) 301 - #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p)) 302 - 303 - long i915_wait_request(struct drm_i915_gem_request *req, 288 + long i915_request_wait(struct i915_request *rq, 304 289 unsigned int flags, 305 290 long timeout) 306 291 __attribute__((nonnull(1))); ··· 313 310 } 314 311 315 312 static inline bool 316 - __i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno) 313 + __i915_request_completed(const struct i915_request *rq, u32 seqno) 317 314 { 318 315 GEM_BUG_ON(!seqno); 319 - return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) && 320 - seqno == i915_gem_request_global_seqno(req); 316 + return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) && 317 + seqno == i915_request_global_seqno(rq); 321 318 } 322 319 323 - static inline bool 324 - i915_gem_request_completed(const struct drm_i915_gem_request *req) 320 + static inline bool i915_request_completed(const struct i915_request *rq) 325 321 { 326 322 u32 seqno; 327 323 328 - seqno = i915_gem_request_global_seqno(req); 324 + seqno = i915_request_global_seqno(rq); 329 325 if (!seqno) 330 326 return false; 331 327 332 - return __i915_gem_request_completed(req, seqno); 328 + return __i915_request_completed(rq, seqno); 333 329 } 334 330 335 - static inline bool 336 - i915_gem_request_started(const struct drm_i915_gem_request *req) 331 + static inline bool i915_request_started(const struct i915_request *rq) 337 332 { 338 333 u32 seqno; 339 334 340 - seqno = i915_gem_request_global_seqno(req); 335 + seqno = i915_request_global_seqno(rq); 341 336 if (!seqno) 342 337 return false; 343 338 344 - return i915_seqno_passed(intel_engine_get_seqno(req->engine), 339 + return i915_seqno_passed(intel_engine_get_seqno(rq->engine), 345 340 seqno - 1); 346 341 } 347 342 348 343 static inline bool i915_priotree_signaled(const struct i915_priotree *pt) 349 344 { 350 - const struct drm_i915_gem_request *rq = 351 - container_of(pt, const struct drm_i915_gem_request, priotree); 345 + const struct i915_request *rq = 346 + container_of(pt, const struct i915_request, priotree); 352 347 353 - return i915_gem_request_completed(rq); 348 + return i915_request_completed(rq); 354 349 } 355 350 356 - /* We treat requests as fences. This is not be to confused with our 351 + void i915_retire_requests(struct drm_i915_private *i915); 352 + 353 + /* 354 + * We treat requests as fences. This is not be to confused with our 357 355 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. 358 356 * We use the fences to synchronize access from the CPU with activity on the 359 357 * GPU, for example, we should not rewrite an object's PTE whilst the GPU ··· 384 380 struct i915_gem_active; 385 381 386 382 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *, 387 - struct drm_i915_gem_request *); 383 + struct i915_request *); 388 384 389 385 struct i915_gem_active { 390 - struct drm_i915_gem_request __rcu *request; 386 + struct i915_request __rcu *request; 391 387 struct list_head link; 392 388 i915_gem_retire_fn retire; 393 389 }; 394 390 395 391 void i915_gem_retire_noop(struct i915_gem_active *, 396 - struct drm_i915_gem_request *request); 392 + struct i915_request *request); 397 393 398 394 /** 399 395 * init_request_active - prepares the activity tracker for use ··· 425 421 */ 426 422 static inline void 427 423 i915_gem_active_set(struct i915_gem_active *active, 428 - struct drm_i915_gem_request *request) 424 + struct i915_request *request) 429 425 { 430 426 list_move(&active->link, &request->active_list); 431 427 rcu_assign_pointer(active->request, request); ··· 450 446 active->retire = fn ?: i915_gem_retire_noop; 451 447 } 452 448 453 - static inline struct drm_i915_gem_request * 449 + static inline struct i915_request * 454 450 __i915_gem_active_peek(const struct i915_gem_active *active) 455 451 { 456 - /* Inside the error capture (running with the driver in an unknown 452 + /* 453 + * Inside the error capture (running with the driver in an unknown 457 454 * state), we want to bend the rules slightly (a lot). 458 455 * 459 456 * Work is in progress to make it safer, in the meantime this keeps ··· 471 466 * It does not obtain a reference on the request for the caller, so the caller 472 467 * must hold struct_mutex. 473 468 */ 474 - static inline struct drm_i915_gem_request * 469 + static inline struct i915_request * 475 470 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex) 476 471 { 477 472 return rcu_dereference_protected(active->request, ··· 486 481 * still active, or NULL. It does not obtain a reference on the request 487 482 * for the caller, so the caller must hold struct_mutex. 488 483 */ 489 - static inline struct drm_i915_gem_request * 484 + static inline struct i915_request * 490 485 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) 491 486 { 492 - struct drm_i915_gem_request *request; 487 + struct i915_request *request; 493 488 494 489 request = i915_gem_active_raw(active, mutex); 495 - if (!request || i915_gem_request_completed(request)) 490 + if (!request || i915_request_completed(request)) 496 491 return NULL; 497 492 498 493 return request; ··· 505 500 * i915_gem_active_get() returns a reference to the active request, or NULL 506 501 * if the active tracker is idle. The caller must hold struct_mutex. 507 502 */ 508 - static inline struct drm_i915_gem_request * 503 + static inline struct i915_request * 509 504 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) 510 505 { 511 - return i915_gem_request_get(i915_gem_active_peek(active, mutex)); 506 + return i915_request_get(i915_gem_active_peek(active, mutex)); 512 507 } 513 508 514 509 /** ··· 519 514 * if the active tracker is idle. The caller must hold the RCU read lock, but 520 515 * the returned pointer is safe to use outside of RCU. 521 516 */ 522 - static inline struct drm_i915_gem_request * 517 + static inline struct i915_request * 523 518 __i915_gem_active_get_rcu(const struct i915_gem_active *active) 524 519 { 525 - /* Performing a lockless retrieval of the active request is super 520 + /* 521 + * Performing a lockless retrieval of the active request is super 526 522 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing 527 523 * slab of request objects will not be freed whilst we hold the 528 524 * RCU read lock. It does not guarantee that the request itself ··· 531 525 * 532 526 * Thread A Thread B 533 527 * 534 - * req = active.request 535 - * retire(req) -> free(req); 536 - * (req is now first on the slab freelist) 528 + * rq = active.request 529 + * retire(rq) -> free(rq); 530 + * (rq is now first on the slab freelist) 537 531 * active.request = NULL 538 532 * 539 - * req = new submission on a new object 540 - * ref(req) 533 + * rq = new submission on a new object 534 + * ref(rq) 541 535 * 542 536 * To prevent the request from being reused whilst the caller 543 537 * uses it, we take a reference like normal. Whilst acquiring ··· 566 560 * 567 561 * It is then imperative that we do not zero the request on 568 562 * reallocation, so that we can chase the dangling pointers! 569 - * See i915_gem_request_alloc(). 563 + * See i915_request_alloc(). 570 564 */ 571 565 do { 572 - struct drm_i915_gem_request *request; 566 + struct i915_request *request; 573 567 574 568 request = rcu_dereference(active->request); 575 - if (!request || i915_gem_request_completed(request)) 569 + if (!request || i915_request_completed(request)) 576 570 return NULL; 577 571 578 - /* An especially silly compiler could decide to recompute the 579 - * result of i915_gem_request_completed, more specifically 572 + /* 573 + * An especially silly compiler could decide to recompute the 574 + * result of i915_request_completed, more specifically 580 575 * re-emit the load for request->fence.seqno. A race would catch 581 576 * a later seqno value, which could flip the result from true to 582 577 * false. Which means part of the instructions below might not 583 578 * be executed, while later on instructions are executed. Due to 584 579 * barriers within the refcounting the inconsistency can't reach 585 - * past the call to i915_gem_request_get_rcu, but not executing 586 - * that while still executing i915_gem_request_put() creates 580 + * past the call to i915_request_get_rcu, but not executing 581 + * that while still executing i915_request_put() creates 587 582 * havoc enough. Prevent this with a compiler barrier. 588 583 */ 589 584 barrier(); 590 585 591 - request = i915_gem_request_get_rcu(request); 586 + request = i915_request_get_rcu(request); 592 587 593 - /* What stops the following rcu_access_pointer() from occurring 594 - * before the above i915_gem_request_get_rcu()? If we were 588 + /* 589 + * What stops the following rcu_access_pointer() from occurring 590 + * before the above i915_request_get_rcu()? If we were 595 591 * to read the value before pausing to get the reference to 596 592 * the request, we may not notice a change in the active 597 593 * tracker. ··· 607 599 * compiler. 608 600 * 609 601 * The atomic operation at the heart of 610 - * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is 602 + * i915_request_get_rcu(), see dma_fence_get_rcu(), is 611 603 * atomic_inc_not_zero() which is only a full memory barrier 612 - * when successful. That is, if i915_gem_request_get_rcu() 604 + * when successful. That is, if i915_request_get_rcu() 613 605 * returns the request (and so with the reference counted 614 606 * incremented) then the following read for rcu_access_pointer() 615 607 * must occur after the atomic operation and so confirm ··· 621 613 if (!request || request == rcu_access_pointer(active->request)) 622 614 return rcu_pointer_handoff(request); 623 615 624 - i915_gem_request_put(request); 616 + i915_request_put(request); 625 617 } while (1); 626 618 } 627 619 ··· 633 625 * or NULL if the active tracker is idle. The reference is obtained under RCU, 634 626 * so no locking is required by the caller. 635 627 * 636 - * The reference should be freed with i915_gem_request_put(). 628 + * The reference should be freed with i915_request_put(). 637 629 */ 638 - static inline struct drm_i915_gem_request * 630 + static inline struct i915_request * 639 631 i915_gem_active_get_unlocked(const struct i915_gem_active *active) 640 632 { 641 - struct drm_i915_gem_request *request; 633 + struct i915_request *request; 642 634 643 635 rcu_read_lock(); 644 636 request = __i915_gem_active_get_rcu(active); ··· 678 670 * can then wait upon the request, and afterwards release our reference, 679 671 * free of any locking. 680 672 * 681 - * This function wraps i915_wait_request(), see it for the full details on 673 + * This function wraps i915_request_wait(), see it for the full details on 682 674 * the arguments. 683 675 * 684 676 * Returns 0 if successful, or a negative error code. ··· 686 678 static inline int 687 679 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags) 688 680 { 689 - struct drm_i915_gem_request *request; 681 + struct i915_request *request; 690 682 long ret = 0; 691 683 692 684 request = i915_gem_active_get_unlocked(active); 693 685 if (request) { 694 - ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT); 695 - i915_gem_request_put(request); 686 + ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT); 687 + i915_request_put(request); 696 688 } 697 689 698 690 return ret < 0 ? ret : 0; ··· 711 703 i915_gem_active_retire(struct i915_gem_active *active, 712 704 struct mutex *mutex) 713 705 { 714 - struct drm_i915_gem_request *request; 706 + struct i915_request *request; 715 707 long ret; 716 708 717 709 request = i915_gem_active_raw(active, mutex); 718 710 if (!request) 719 711 return 0; 720 712 721 - ret = i915_wait_request(request, 713 + ret = i915_request_wait(request, 722 714 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 723 715 MAX_SCHEDULE_TIMEOUT); 724 716 if (ret < 0) ··· 735 727 #define for_each_active(mask, idx) \ 736 728 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx)) 737 729 738 - #endif /* I915_GEM_REQUEST_H */ 730 + #endif /* I915_REQUEST_H */
+2 -2
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 175 175 i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); 176 176 177 177 trace_i915_gem_shrink(i915, target, flags); 178 - i915_gem_retire_requests(i915); 178 + i915_retire_requests(i915); 179 179 180 180 /* 181 181 * Unbinding of objects will require HW access; Let us not wake the ··· 267 267 if (flags & I915_SHRINK_BOUND) 268 268 intel_runtime_pm_put(i915); 269 269 270 - i915_gem_retire_requests(i915); 270 + i915_retire_requests(i915); 271 271 272 272 shrinker_unlock(i915, unlock); 273 273
+2 -2
drivers/gpu/drm/i915/i915_gem_timeline.h
··· 27 27 28 28 #include <linux/list.h> 29 29 30 - #include "i915_utils.h" 31 - #include "i915_gem_request.h" 30 + #include "i915_request.h" 32 31 #include "i915_syncmap.h" 32 + #include "i915_utils.h" 33 33 34 34 struct i915_gem_timeline; 35 35
+49 -75
drivers/gpu/drm/i915/i915_gpu_error.c
··· 586 586 587 587 intel_device_info_dump_flags(info, &p); 588 588 intel_driver_caps_print(caps, &p); 589 + intel_device_info_dump_topology(&info->sseu, &p); 589 590 } 590 591 591 592 static void err_print_params(struct drm_i915_error_state_buf *m, ··· 992 991 static inline uint32_t 993 992 __active_get_seqno(struct i915_gem_active *active) 994 993 { 995 - struct drm_i915_gem_request *request; 994 + struct i915_request *request; 996 995 997 996 request = __i915_gem_active_peek(active); 998 997 return request ? request->global_seqno : 0; ··· 1001 1000 static inline int 1002 1001 __active_get_engine_id(struct i915_gem_active *active) 1003 1002 { 1004 - struct drm_i915_gem_request *request; 1003 + struct i915_request *request; 1005 1004 1006 1005 request = __i915_gem_active_peek(active); 1007 1006 return request ? request->engine->id : -1; ··· 1085 1084 return error_code; 1086 1085 } 1087 1086 1088 - static void i915_gem_record_fences(struct drm_i915_private *dev_priv, 1089 - struct i915_gpu_state *error) 1087 + static void gem_record_fences(struct i915_gpu_state *error) 1090 1088 { 1089 + struct drm_i915_private *dev_priv = error->i915; 1091 1090 int i; 1092 1091 1093 1092 if (INTEL_GEN(dev_priv) >= 6) { ··· 1101 1100 error->fence[i] = I915_READ(FENCE_REG(i)); 1102 1101 } 1103 1102 error->nfence = i; 1104 - } 1105 - 1106 - static inline u32 1107 - gen8_engine_sync_index(struct intel_engine_cs *engine, 1108 - struct intel_engine_cs *other) 1109 - { 1110 - int idx; 1111 - 1112 - /* 1113 - * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 1114 - * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 1115 - * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 1116 - * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 1117 - * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 1118 - */ 1119 - 1120 - idx = (other - engine) - 1; 1121 - if (idx < 0) 1122 - idx += I915_NUM_ENGINES; 1123 - 1124 - return idx; 1125 1103 } 1126 1104 1127 1105 static void gen6_record_semaphore_state(struct intel_engine_cs *engine, ··· 1273 1293 } 1274 1294 } 1275 1295 1276 - static void record_request(struct drm_i915_gem_request *request, 1296 + static void record_request(struct i915_request *request, 1277 1297 struct drm_i915_error_request *erq) 1278 1298 { 1279 1299 erq->context = request->ctx->hw_id; ··· 1290 1310 } 1291 1311 1292 1312 static void engine_record_requests(struct intel_engine_cs *engine, 1293 - struct drm_i915_gem_request *first, 1313 + struct i915_request *first, 1294 1314 struct drm_i915_error_engine *ee) 1295 1315 { 1296 - struct drm_i915_gem_request *request; 1316 + struct i915_request *request; 1297 1317 int count; 1298 1318 1299 1319 count = 0; ··· 1343 1363 unsigned int n; 1344 1364 1345 1365 for (n = 0; n < execlists_num_ports(execlists); n++) { 1346 - struct drm_i915_gem_request *rq = port_request(&execlists->port[n]); 1366 + struct i915_request *rq = port_request(&execlists->port[n]); 1347 1367 1348 1368 if (!rq) 1349 1369 break; ··· 1378 1398 e->active = atomic_read(&ctx->active_count); 1379 1399 } 1380 1400 1381 - static void request_record_user_bo(struct drm_i915_gem_request *request, 1401 + static void request_record_user_bo(struct i915_request *request, 1382 1402 struct drm_i915_error_engine *ee) 1383 1403 { 1384 - struct i915_gem_capture_list *c; 1404 + struct i915_capture_list *c; 1385 1405 struct drm_i915_error_object **bo; 1386 1406 long count; 1387 1407 ··· 1425 1445 } 1426 1446 } 1427 1447 1428 - static void i915_gem_record_rings(struct drm_i915_private *dev_priv, 1429 - struct i915_gpu_state *error) 1448 + static void gem_record_rings(struct i915_gpu_state *error) 1430 1449 { 1431 - struct i915_ggtt *ggtt = &dev_priv->ggtt; 1450 + struct drm_i915_private *i915 = error->i915; 1451 + struct i915_ggtt *ggtt = &i915->ggtt; 1432 1452 int i; 1433 1453 1434 1454 for (i = 0; i < I915_NUM_ENGINES; i++) { 1435 - struct intel_engine_cs *engine = dev_priv->engine[i]; 1455 + struct intel_engine_cs *engine = i915->engine[i]; 1436 1456 struct drm_i915_error_engine *ee = &error->engine[i]; 1437 - struct drm_i915_gem_request *request; 1457 + struct i915_request *request; 1438 1458 1439 1459 ee->engine_id = -1; 1440 1460 ··· 1461 1481 * by userspace. 1462 1482 */ 1463 1483 ee->batchbuffer = 1464 - i915_error_object_create(dev_priv, 1465 - request->batch); 1484 + i915_error_object_create(i915, request->batch); 1466 1485 1467 - if (HAS_BROKEN_CS_TLB(dev_priv)) 1486 + if (HAS_BROKEN_CS_TLB(i915)) 1468 1487 ee->wa_batchbuffer = 1469 - i915_error_object_create(dev_priv, 1488 + i915_error_object_create(i915, 1470 1489 engine->scratch); 1471 1490 request_record_user_bo(request, ee); 1472 1491 1473 1492 ee->ctx = 1474 - i915_error_object_create(dev_priv, 1493 + i915_error_object_create(i915, 1475 1494 request->ctx->engine[i].state); 1476 1495 1477 1496 error->simulated |= ··· 1484 1505 ee->cpu_ring_head = ring->head; 1485 1506 ee->cpu_ring_tail = ring->tail; 1486 1507 ee->ringbuffer = 1487 - i915_error_object_create(dev_priv, ring->vma); 1508 + i915_error_object_create(i915, ring->vma); 1488 1509 1489 1510 engine_record_requests(engine, request, ee); 1490 1511 } 1491 1512 1492 1513 ee->hws_page = 1493 - i915_error_object_create(dev_priv, 1514 + i915_error_object_create(i915, 1494 1515 engine->status_page.vma); 1495 1516 1496 - ee->wa_ctx = 1497 - i915_error_object_create(dev_priv, engine->wa_ctx.vma); 1517 + ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma); 1498 1518 1499 - ee->default_state = 1500 - capture_object(dev_priv, engine->default_state); 1519 + ee->default_state = capture_object(i915, engine->default_state); 1501 1520 } 1502 1521 } 1503 1522 1504 - static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, 1505 - struct i915_gpu_state *error, 1506 - struct i915_address_space *vm, 1507 - int idx) 1523 + static void gem_capture_vm(struct i915_gpu_state *error, 1524 + struct i915_address_space *vm, 1525 + int idx) 1508 1526 { 1509 1527 struct drm_i915_error_buffer *active_bo; 1510 1528 struct i915_vma *vma; ··· 1524 1548 error->active_bo_count[idx] = count; 1525 1549 } 1526 1550 1527 - static void i915_capture_active_buffers(struct drm_i915_private *dev_priv, 1528 - struct i915_gpu_state *error) 1551 + static void capture_active_buffers(struct i915_gpu_state *error) 1529 1552 { 1530 1553 int cnt = 0, i, j; 1531 1554 ··· 1544 1569 for (j = 0; j < i && !found; j++) 1545 1570 found = error->engine[j].vm == ee->vm; 1546 1571 if (!found) 1547 - i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++); 1572 + gem_capture_vm(error, ee->vm, cnt++); 1548 1573 } 1549 1574 } 1550 1575 1551 - static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv, 1552 - struct i915_gpu_state *error) 1576 + static void capture_pinned_buffers(struct i915_gpu_state *error) 1553 1577 { 1554 - struct i915_address_space *vm = &dev_priv->ggtt.base; 1578 + struct i915_address_space *vm = &error->i915->ggtt.base; 1555 1579 struct drm_i915_error_buffer *bo; 1556 1580 struct i915_vma *vma; 1557 1581 int count_inactive, count_active; ··· 1600 1626 } 1601 1627 1602 1628 /* Capture all registers which don't fit into another category. */ 1603 - static void i915_capture_reg_state(struct drm_i915_private *dev_priv, 1604 - struct i915_gpu_state *error) 1629 + static void capture_reg_state(struct i915_gpu_state *error) 1605 1630 { 1631 + struct drm_i915_private *dev_priv = error->i915; 1606 1632 int i; 1607 1633 1608 1634 /* General organization ··· 1699 1725 engine_mask ? "reset" : "continue"); 1700 1726 } 1701 1727 1702 - static void i915_capture_gen_state(struct drm_i915_private *dev_priv, 1703 - struct i915_gpu_state *error) 1728 + static void capture_gen_state(struct i915_gpu_state *error) 1704 1729 { 1705 - error->awake = dev_priv->gt.awake; 1706 - error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count); 1707 - error->suspended = dev_priv->runtime_pm.suspended; 1730 + struct drm_i915_private *i915 = error->i915; 1731 + 1732 + error->awake = i915->gt.awake; 1733 + error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); 1734 + error->suspended = i915->runtime_pm.suspended; 1708 1735 1709 1736 error->iommu = -1; 1710 1737 #ifdef CONFIG_INTEL_IOMMU 1711 1738 error->iommu = intel_iommu_gfx_mapped; 1712 1739 #endif 1713 - error->reset_count = i915_reset_count(&dev_priv->gpu_error); 1714 - error->suspend_count = dev_priv->suspend_count; 1740 + error->reset_count = i915_reset_count(&i915->gpu_error); 1741 + error->suspend_count = i915->suspend_count; 1715 1742 1716 1743 memcpy(&error->device_info, 1717 - INTEL_INFO(dev_priv), 1744 + INTEL_INFO(i915), 1718 1745 sizeof(error->device_info)); 1719 - error->driver_caps = dev_priv->caps; 1746 + error->driver_caps = i915->caps; 1720 1747 } 1721 1748 1722 1749 static __always_inline void dup_param(const char *type, void *x) ··· 1744 1769 error->i915->gt.last_init_time); 1745 1770 1746 1771 capture_params(error); 1772 + capture_gen_state(error); 1747 1773 capture_uc_state(error); 1748 - 1749 - i915_capture_gen_state(error->i915, error); 1750 - i915_capture_reg_state(error->i915, error); 1751 - i915_gem_record_fences(error->i915, error); 1752 - i915_gem_record_rings(error->i915, error); 1753 - i915_capture_active_buffers(error->i915, error); 1754 - i915_capture_pinned_buffers(error->i915, error); 1774 + capture_reg_state(error); 1775 + gem_record_fences(error); 1776 + gem_record_rings(error); 1777 + capture_active_buffers(error); 1778 + capture_pinned_buffers(error); 1755 1779 1756 1780 error->overlay = intel_overlay_capture_error_state(error->i915); 1757 1781 error->display = intel_display_capture_error_state(error->i915);
+240 -4
drivers/gpu/drm/i915/i915_irq.c
··· 415 415 if (READ_ONCE(rps->interrupts_enabled)) 416 416 return; 417 417 418 + if (WARN_ON_ONCE(IS_GEN11(dev_priv))) 419 + return; 420 + 418 421 spin_lock_irq(&dev_priv->irq_lock); 419 422 WARN_ON_ONCE(rps->pm_iir); 420 423 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); ··· 432 429 struct intel_rps *rps = &dev_priv->gt_pm.rps; 433 430 434 431 if (!READ_ONCE(rps->interrupts_enabled)) 432 + return; 433 + 434 + if (WARN_ON_ONCE(IS_GEN11(dev_priv))) 435 435 return; 436 436 437 437 spin_lock_irq(&dev_priv->irq_lock); ··· 1077 1071 1078 1072 static void notify_ring(struct intel_engine_cs *engine) 1079 1073 { 1080 - struct drm_i915_gem_request *rq = NULL; 1074 + struct i915_request *rq = NULL; 1081 1075 struct intel_wait *wait; 1082 1076 1083 1077 if (!engine->breadcrumbs.irq_armed) ··· 1104 1098 */ 1105 1099 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1106 1100 wait->seqno)) { 1107 - struct drm_i915_gem_request *waiter = wait->request; 1101 + struct i915_request *waiter = wait->request; 1108 1102 1109 1103 wakeup = true; 1110 1104 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1111 1105 &waiter->fence.flags) && 1112 1106 intel_wait_check_request(wait, waiter)) 1113 - rq = i915_gem_request_get(waiter); 1107 + rq = i915_request_get(waiter); 1114 1108 } 1115 1109 1116 1110 if (wakeup) ··· 1123 1117 1124 1118 if (rq) { 1125 1119 dma_fence_signal(&rq->fence); 1126 - i915_gem_request_put(rq); 1120 + GEM_BUG_ON(!i915_request_completed(rq)); 1121 + i915_request_put(rq); 1127 1122 } 1128 1123 1129 1124 trace_intel_engine_notify(engine, wait); ··· 2762 2755 (W)->i915; \ 2763 2756 __fini_wedge((W))) 2764 2757 2758 + static __always_inline void 2759 + gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir) 2760 + { 2761 + gen8_cs_irq_handler(engine, iir, 0); 2762 + } 2763 + 2764 + static void 2765 + gen11_gt_engine_irq_handler(struct drm_i915_private * const i915, 2766 + const unsigned int bank, 2767 + const unsigned int engine_n, 2768 + const u16 iir) 2769 + { 2770 + struct intel_engine_cs ** const engine = i915->engine; 2771 + 2772 + switch (bank) { 2773 + case 0: 2774 + switch (engine_n) { 2775 + 2776 + case GEN11_RCS0: 2777 + return gen11_cs_irq_handler(engine[RCS], iir); 2778 + 2779 + case GEN11_BCS: 2780 + return gen11_cs_irq_handler(engine[BCS], iir); 2781 + } 2782 + case 1: 2783 + switch (engine_n) { 2784 + 2785 + case GEN11_VCS(0): 2786 + return gen11_cs_irq_handler(engine[_VCS(0)], iir); 2787 + case GEN11_VCS(1): 2788 + return gen11_cs_irq_handler(engine[_VCS(1)], iir); 2789 + case GEN11_VCS(2): 2790 + return gen11_cs_irq_handler(engine[_VCS(2)], iir); 2791 + case GEN11_VCS(3): 2792 + return gen11_cs_irq_handler(engine[_VCS(3)], iir); 2793 + 2794 + case GEN11_VECS(0): 2795 + return gen11_cs_irq_handler(engine[_VECS(0)], iir); 2796 + case GEN11_VECS(1): 2797 + return gen11_cs_irq_handler(engine[_VECS(1)], iir); 2798 + } 2799 + } 2800 + } 2801 + 2802 + static u32 2803 + gen11_gt_engine_intr(struct drm_i915_private * const i915, 2804 + const unsigned int bank, const unsigned int bit) 2805 + { 2806 + void __iomem * const regs = i915->regs; 2807 + u32 timeout_ts; 2808 + u32 ident; 2809 + 2810 + raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2811 + 2812 + /* 2813 + * NB: Specs do not specify how long to spin wait, 2814 + * so we do ~100us as an educated guess. 2815 + */ 2816 + timeout_ts = (local_clock() >> 10) + 100; 2817 + do { 2818 + ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2819 + } while (!(ident & GEN11_INTR_DATA_VALID) && 2820 + !time_after32(local_clock() >> 10, timeout_ts)); 2821 + 2822 + if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 2823 + DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 2824 + bank, bit, ident); 2825 + return 0; 2826 + } 2827 + 2828 + raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 2829 + GEN11_INTR_DATA_VALID); 2830 + 2831 + return ident & GEN11_INTR_ENGINE_MASK; 2832 + } 2833 + 2834 + static void 2835 + gen11_gt_irq_handler(struct drm_i915_private * const i915, 2836 + const u32 master_ctl) 2837 + { 2838 + void __iomem * const regs = i915->regs; 2839 + unsigned int bank; 2840 + 2841 + for (bank = 0; bank < 2; bank++) { 2842 + unsigned long intr_dw; 2843 + unsigned int bit; 2844 + 2845 + if (!(master_ctl & GEN11_GT_DW_IRQ(bank))) 2846 + continue; 2847 + 2848 + intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 2849 + 2850 + if (unlikely(!intr_dw)) { 2851 + DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 2852 + continue; 2853 + } 2854 + 2855 + for_each_set_bit(bit, &intr_dw, 32) { 2856 + const u16 iir = gen11_gt_engine_intr(i915, bank, bit); 2857 + 2858 + if (unlikely(!iir)) 2859 + continue; 2860 + 2861 + gen11_gt_engine_irq_handler(i915, bank, bit, iir); 2862 + } 2863 + 2864 + /* Clear must be after shared has been served for engine */ 2865 + raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 2866 + } 2867 + } 2868 + 2869 + static irqreturn_t gen11_irq_handler(int irq, void *arg) 2870 + { 2871 + struct drm_i915_private * const i915 = to_i915(arg); 2872 + void __iomem * const regs = i915->regs; 2873 + u32 master_ctl; 2874 + 2875 + if (!intel_irqs_enabled(i915)) 2876 + return IRQ_NONE; 2877 + 2878 + master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2879 + master_ctl &= ~GEN11_MASTER_IRQ; 2880 + if (!master_ctl) 2881 + return IRQ_NONE; 2882 + 2883 + /* Disable interrupts. */ 2884 + raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2885 + 2886 + /* Find, clear, then process each source of interrupt. */ 2887 + gen11_gt_irq_handler(i915, master_ctl); 2888 + 2889 + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2890 + if (master_ctl & GEN11_DISPLAY_IRQ) { 2891 + const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2892 + 2893 + disable_rpm_wakeref_asserts(i915); 2894 + /* 2895 + * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2896 + * for the display related bits. 2897 + */ 2898 + gen8_de_irq_handler(i915, disp_ctl); 2899 + enable_rpm_wakeref_asserts(i915); 2900 + } 2901 + 2902 + /* Acknowledge and enable interrupts. */ 2903 + raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 2904 + 2905 + return IRQ_HANDLED; 2906 + } 2907 + 2765 2908 /** 2766 2909 * i915_reset_device - do process context error handling work 2767 2910 * @dev_priv: i915 device private ··· 3335 3178 3336 3179 if (HAS_PCH_SPLIT(dev_priv)) 3337 3180 ibx_irq_reset(dev_priv); 3181 + } 3182 + 3183 + static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3184 + { 3185 + /* Disable RCS, BCS, VCS and VECS class engines. */ 3186 + I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3187 + I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3188 + 3189 + /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3190 + I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3191 + I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3192 + I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3193 + I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3194 + I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3195 + } 3196 + 3197 + static void gen11_irq_reset(struct drm_device *dev) 3198 + { 3199 + struct drm_i915_private *dev_priv = dev->dev_private; 3200 + int pipe; 3201 + 3202 + I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3203 + POSTING_READ(GEN11_GFX_MSTR_IRQ); 3204 + 3205 + gen11_gt_irq_reset(dev_priv); 3206 + 3207 + I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3208 + 3209 + for_each_pipe(dev_priv, pipe) 3210 + if (intel_display_power_is_enabled(dev_priv, 3211 + POWER_DOMAIN_PIPE(pipe))) 3212 + GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3213 + 3214 + GEN3_IRQ_RESET(GEN8_DE_PORT_); 3215 + GEN3_IRQ_RESET(GEN8_DE_MISC_); 3216 + GEN3_IRQ_RESET(GEN8_PCU_); 3338 3217 } 3339 3218 3340 3219 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, ··· 3870 3677 return 0; 3871 3678 } 3872 3679 3680 + static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3681 + { 3682 + const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 3683 + 3684 + BUILD_BUG_ON(irqs & 0xffff0000); 3685 + 3686 + /* Enable RCS, BCS, VCS and VECS class interrupts. */ 3687 + I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 3688 + I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 3689 + 3690 + /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 3691 + I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 3692 + I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 3693 + I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 3694 + I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 3695 + I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 3696 + 3697 + dev_priv->pm_imr = 0xffffffff; /* TODO */ 3698 + } 3699 + 3700 + static int gen11_irq_postinstall(struct drm_device *dev) 3701 + { 3702 + struct drm_i915_private *dev_priv = dev->dev_private; 3703 + 3704 + gen11_gt_irq_postinstall(dev_priv); 3705 + gen8_de_irq_postinstall(dev_priv); 3706 + 3707 + I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 3708 + 3709 + I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 3710 + POSTING_READ(GEN11_GFX_MSTR_IRQ); 3711 + 3712 + return 0; 3713 + } 3714 + 3873 3715 static int cherryview_irq_postinstall(struct drm_device *dev) 3874 3716 { 3875 3717 struct drm_i915_private *dev_priv = to_i915(dev); ··· 4353 4125 dev->driver->enable_vblank = i965_enable_vblank; 4354 4126 dev->driver->disable_vblank = i965_disable_vblank; 4355 4127 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4128 + } else if (INTEL_GEN(dev_priv) >= 11) { 4129 + dev->driver->irq_handler = gen11_irq_handler; 4130 + dev->driver->irq_preinstall = gen11_irq_reset; 4131 + dev->driver->irq_postinstall = gen11_irq_postinstall; 4132 + dev->driver->irq_uninstall = gen11_irq_reset; 4133 + dev->driver->enable_vblank = gen8_enable_vblank; 4134 + dev->driver->disable_vblank = gen8_disable_vblank; 4135 + dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4356 4136 } else if (INTEL_GEN(dev_priv) >= 8) { 4357 4137 dev->driver->irq_handler = gen8_irq_handler; 4358 4138 dev->driver->irq_preinstall = gen8_irq_reset;
+3 -1
drivers/gpu/drm/i915/i915_pci.c
··· 594 594 GEN10_FEATURES, \ 595 595 GEN(11), \ 596 596 .ddb_size = 2048, \ 597 - .has_csr = 0 597 + .has_csr = 0, \ 598 + .has_logical_ring_elsq = 1 598 599 599 600 static const struct intel_device_info intel_icelake_11_info = { 600 601 GEN11_FEATURES, ··· 665 664 INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), 666 665 INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), 667 666 INTEL_CNL_IDS(&intel_cannonlake_info), 667 + INTEL_ICL_11_IDS(&intel_icelake_11_info), 668 668 {0, 0, 0} 669 669 }; 670 670 MODULE_DEVICE_TABLE(pci, pciidlist);
+28 -42
drivers/gpu/drm/i915/i915_perf.c
··· 1303 1303 */ 1304 1304 mutex_lock(&dev_priv->drm.struct_mutex); 1305 1305 dev_priv->perf.oa.exclusive_stream = NULL; 1306 - mutex_unlock(&dev_priv->drm.struct_mutex); 1307 - 1308 1306 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1307 + mutex_unlock(&dev_priv->drm.struct_mutex); 1309 1308 1310 1309 free_oa_buffer(dev_priv); 1311 1310 ··· 1629 1630 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This 1630 1631 * is only used by the kernel context. 1631 1632 */ 1632 - static int gen8_emit_oa_config(struct drm_i915_gem_request *req, 1633 + static int gen8_emit_oa_config(struct i915_request *rq, 1633 1634 const struct i915_oa_config *oa_config) 1634 1635 { 1635 - struct drm_i915_private *dev_priv = req->i915; 1636 + struct drm_i915_private *dev_priv = rq->i915; 1636 1637 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1637 1638 u32 flex_mmio[] = { 1638 1639 i915_mmio_reg_offset(EU_PERF_CNTL0), ··· 1646 1647 u32 *cs; 1647 1648 int i; 1648 1649 1649 - cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); 1650 + cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4); 1650 1651 if (IS_ERR(cs)) 1651 1652 return PTR_ERR(cs); 1652 1653 ··· 1684 1685 } 1685 1686 1686 1687 *cs++ = MI_NOOP; 1687 - intel_ring_advance(req, cs); 1688 + intel_ring_advance(rq, cs); 1688 1689 1689 1690 return 0; 1690 1691 } ··· 1694 1695 { 1695 1696 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1696 1697 struct i915_gem_timeline *timeline; 1697 - struct drm_i915_gem_request *req; 1698 + struct i915_request *rq; 1698 1699 int ret; 1699 1700 1700 1701 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1701 1702 1702 - i915_gem_retire_requests(dev_priv); 1703 + i915_retire_requests(dev_priv); 1703 1704 1704 - req = i915_gem_request_alloc(engine, dev_priv->kernel_context); 1705 - if (IS_ERR(req)) 1706 - return PTR_ERR(req); 1705 + rq = i915_request_alloc(engine, dev_priv->kernel_context); 1706 + if (IS_ERR(rq)) 1707 + return PTR_ERR(rq); 1707 1708 1708 - ret = gen8_emit_oa_config(req, oa_config); 1709 + ret = gen8_emit_oa_config(rq, oa_config); 1709 1710 if (ret) { 1710 - i915_add_request(req); 1711 + i915_request_add(rq); 1711 1712 return ret; 1712 1713 } 1713 1714 1714 1715 /* Queue this switch after all other activity */ 1715 1716 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 1716 - struct drm_i915_gem_request *prev; 1717 + struct i915_request *prev; 1717 1718 struct intel_timeline *tl; 1718 1719 1719 1720 tl = &timeline->engine[engine->id]; 1720 1721 prev = i915_gem_active_raw(&tl->last_request, 1721 1722 &dev_priv->drm.struct_mutex); 1722 1723 if (prev) 1723 - i915_sw_fence_await_sw_fence_gfp(&req->submit, 1724 + i915_sw_fence_await_sw_fence_gfp(&rq->submit, 1724 1725 &prev->submit, 1725 1726 GFP_KERNEL); 1726 1727 } 1727 1728 1728 - i915_add_request(req); 1729 + i915_request_add(rq); 1729 1730 1730 1731 return 0; 1731 1732 } ··· 1755 1756 * Note: it's only the RCS/Render context that has any OA state. 1756 1757 */ 1757 1758 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1758 - const struct i915_oa_config *oa_config, 1759 - bool interruptible) 1759 + const struct i915_oa_config *oa_config) 1760 1760 { 1761 1761 struct i915_gem_context *ctx; 1762 1762 int ret; 1763 1763 unsigned int wait_flags = I915_WAIT_LOCKED; 1764 1764 1765 - if (interruptible) { 1766 - ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1767 - if (ret) 1768 - return ret; 1769 - 1770 - wait_flags |= I915_WAIT_INTERRUPTIBLE; 1771 - } else { 1772 - mutex_lock(&dev_priv->drm.struct_mutex); 1773 - } 1765 + lockdep_assert_held(&dev_priv->drm.struct_mutex); 1774 1766 1775 1767 /* Switch away from any user context. */ 1776 1768 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); ··· 1809 1819 } 1810 1820 1811 1821 out: 1812 - mutex_unlock(&dev_priv->drm.struct_mutex); 1813 - 1814 1822 return ret; 1815 1823 } 1816 1824 ··· 1851 1863 * to make sure all slices/subslices are ON before writing to NOA 1852 1864 * registers. 1853 1865 */ 1854 - ret = gen8_configure_all_contexts(dev_priv, oa_config, true); 1866 + ret = gen8_configure_all_contexts(dev_priv, oa_config); 1855 1867 if (ret) 1856 1868 return ret; 1857 1869 ··· 1866 1878 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1867 1879 { 1868 1880 /* Reset all contexts' slices/subslices configurations. */ 1869 - gen8_configure_all_contexts(dev_priv, NULL, false); 1881 + gen8_configure_all_contexts(dev_priv, NULL); 1870 1882 1871 1883 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1872 1884 ~GT_NOA_ENABLE)); ··· 1876 1888 static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) 1877 1889 { 1878 1890 /* Reset all contexts' slices/subslices configurations. */ 1879 - gen8_configure_all_contexts(dev_priv, NULL, false); 1891 + gen8_configure_all_contexts(dev_priv, NULL); 1880 1892 1881 1893 /* Make sure we disable noa to save power. */ 1882 1894 I915_WRITE(RPM_CONFIG1, ··· 2126 2138 if (ret) 2127 2139 goto err_oa_buf_alloc; 2128 2140 2141 + ret = i915_mutex_lock_interruptible(&dev_priv->drm); 2142 + if (ret) 2143 + goto err_lock; 2144 + 2129 2145 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2130 2146 stream->oa_config); 2131 2147 if (ret) ··· 2137 2145 2138 2146 stream->ops = &i915_oa_stream_ops; 2139 2147 2140 - /* Lock device for exclusive_stream access late because 2141 - * enable_metric_set() might lock as well on gen8+. 2142 - */ 2143 - ret = i915_mutex_lock_interruptible(&dev_priv->drm); 2144 - if (ret) 2145 - goto err_lock; 2146 - 2147 2148 dev_priv->perf.oa.exclusive_stream = stream; 2148 2149 2149 2150 mutex_unlock(&dev_priv->drm.struct_mutex); 2150 2151 2151 2152 return 0; 2152 2153 2153 - err_lock: 2154 - dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2155 - 2156 2154 err_enable: 2155 + dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2156 + mutex_unlock(&dev_priv->drm.struct_mutex); 2157 + 2158 + err_lock: 2157 2159 free_oa_buffer(dev_priv); 2158 2160 2159 2161 err_oa_buf_alloc:
+125
drivers/gpu/drm/i915/i915_query.c
··· 1 + /* 2 + * SPDX-License-Identifier: MIT 3 + * 4 + * Copyright © 2018 Intel Corporation 5 + */ 6 + 7 + #include "i915_drv.h" 8 + #include "i915_query.h" 9 + #include <uapi/drm/i915_drm.h> 10 + 11 + static int query_topology_info(struct drm_i915_private *dev_priv, 12 + struct drm_i915_query_item *query_item) 13 + { 14 + const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu; 15 + struct drm_i915_query_topology_info topo; 16 + u32 slice_length, subslice_length, eu_length, total_length; 17 + 18 + if (query_item->flags != 0) 19 + return -EINVAL; 20 + 21 + if (sseu->max_slices == 0) 22 + return -ENODEV; 23 + 24 + BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 25 + 26 + slice_length = sizeof(sseu->slice_mask); 27 + subslice_length = sseu->max_slices * 28 + DIV_ROUND_UP(sseu->max_subslices, 29 + sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE); 30 + eu_length = sseu->max_slices * sseu->max_subslices * 31 + DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); 32 + 33 + total_length = sizeof(topo) + slice_length + subslice_length + eu_length; 34 + 35 + if (query_item->length == 0) 36 + return total_length; 37 + 38 + if (query_item->length < total_length) 39 + return -EINVAL; 40 + 41 + if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr), 42 + sizeof(topo))) 43 + return -EFAULT; 44 + 45 + if (topo.flags != 0) 46 + return -EINVAL; 47 + 48 + if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr), 49 + total_length)) 50 + return -EFAULT; 51 + 52 + memset(&topo, 0, sizeof(topo)); 53 + topo.max_slices = sseu->max_slices; 54 + topo.max_subslices = sseu->max_subslices; 55 + topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 56 + 57 + topo.subslice_offset = slice_length; 58 + topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE); 59 + topo.eu_offset = slice_length + subslice_length; 60 + topo.eu_stride = 61 + DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); 62 + 63 + if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), 64 + &topo, sizeof(topo))) 65 + return -EFAULT; 66 + 67 + if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 68 + &sseu->slice_mask, slice_length)) 69 + return -EFAULT; 70 + 71 + if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 72 + sizeof(topo) + slice_length), 73 + sseu->subslice_mask, subslice_length)) 74 + return -EFAULT; 75 + 76 + if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 77 + sizeof(topo) + 78 + slice_length + subslice_length), 79 + sseu->eu_mask, eu_length)) 80 + return -EFAULT; 81 + 82 + return total_length; 83 + } 84 + 85 + static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 86 + struct drm_i915_query_item *query_item) = { 87 + query_topology_info, 88 + }; 89 + 90 + int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 91 + { 92 + struct drm_i915_private *dev_priv = to_i915(dev); 93 + struct drm_i915_query *args = data; 94 + struct drm_i915_query_item __user *user_item_ptr = 95 + u64_to_user_ptr(args->items_ptr); 96 + u32 i; 97 + 98 + if (args->flags != 0) 99 + return -EINVAL; 100 + 101 + for (i = 0; i < args->num_items; i++, user_item_ptr++) { 102 + struct drm_i915_query_item item; 103 + u64 func_idx; 104 + int ret; 105 + 106 + if (copy_from_user(&item, user_item_ptr, sizeof(item))) 107 + return -EFAULT; 108 + 109 + if (item.query_id == 0) 110 + return -EINVAL; 111 + 112 + func_idx = item.query_id - 1; 113 + 114 + if (func_idx < ARRAY_SIZE(i915_query_funcs)) 115 + ret = i915_query_funcs[func_idx](dev_priv, &item); 116 + else 117 + ret = -EINVAL; 118 + 119 + /* Only write the length back to userspace if they differ. */ 120 + if (ret != item.length && put_user(ret, &user_item_ptr->length)) 121 + return -EFAULT; 122 + } 123 + 124 + return 0; 125 + }
+15
drivers/gpu/drm/i915/i915_query.h
··· 1 + /* 2 + * SPDX-License-Identifier: MIT 3 + * 4 + * Copyright © 2018 Intel Corporation 5 + */ 6 + 7 + #ifndef _I915_QUERY_H_ 8 + #define _I915_QUERY_H_ 9 + 10 + struct drm_device; 11 + struct drm_file; 12 + 13 + int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 14 + 15 + #endif
+34 -5
drivers/gpu/drm/i915/i915_reg.h
··· 178 178 #define BCS_HW 2 179 179 #define VECS_HW 3 180 180 #define VCS2_HW 4 181 + #define VCS3_HW 6 182 + #define VCS4_HW 7 183 + #define VECS2_HW 12 181 184 182 185 /* Engine class */ 183 186 ··· 191 188 #define OTHER_CLASS 4 192 189 #define MAX_ENGINE_CLASS 4 193 190 194 - #define MAX_ENGINE_INSTANCE 1 191 + #define MAX_ENGINE_INSTANCE 3 195 192 196 193 /* PCI config space */ 197 194 ··· 2345 2342 #define BSD_RING_BASE 0x04000 2346 2343 #define GEN6_BSD_RING_BASE 0x12000 2347 2344 #define GEN8_BSD2_RING_BASE 0x1c000 2345 + #define GEN11_BSD_RING_BASE 0x1c0000 2346 + #define GEN11_BSD2_RING_BASE 0x1c4000 2347 + #define GEN11_BSD3_RING_BASE 0x1d0000 2348 + #define GEN11_BSD4_RING_BASE 0x1d4000 2348 2349 #define VEBOX_RING_BASE 0x1a000 2350 + #define GEN11_VEBOX_RING_BASE 0x1c8000 2351 + #define GEN11_VEBOX2_RING_BASE 0x1d8000 2349 2352 #define BLT_RING_BASE 0x22000 2350 2353 #define RING_TAIL(base) _MMIO((base)+0x30) 2351 2354 #define RING_HEAD(base) _MMIO((base)+0x34) ··· 2816 2807 #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) 2817 2808 2818 2809 /* Fuse readout registers for GT */ 2810 + #define HSW_PAVP_FUSE1 _MMIO(0x911C) 2811 + #define HSW_F1_EU_DIS_SHIFT 16 2812 + #define HSW_F1_EU_DIS_MASK (0x3 << HSW_F1_EU_DIS_SHIFT) 2813 + #define HSW_F1_EU_DIS_10EUS 0 2814 + #define HSW_F1_EU_DIS_8EUS 1 2815 + #define HSW_F1_EU_DIS_6EUS 2 2816 + 2819 2817 #define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168) 2820 2818 #define CHV_FGT_DISABLE_SS0 (1 << 10) 2821 2819 #define CHV_FGT_DISABLE_SS1 (1 << 11) ··· 3912 3896 3913 3897 #define GEN8_CTX_ID_SHIFT 32 3914 3898 #define GEN8_CTX_ID_WIDTH 21 3899 + #define GEN11_SW_CTX_ID_SHIFT 37 3900 + #define GEN11_SW_CTX_ID_WIDTH 11 3901 + #define GEN11_ENGINE_CLASS_SHIFT 61 3902 + #define GEN11_ENGINE_CLASS_WIDTH 3 3903 + #define GEN11_ENGINE_INSTANCE_SHIFT 48 3904 + #define GEN11_ENGINE_INSTANCE_WIDTH 6 3915 3905 3916 3906 #define CHV_CLK_CTL1 _MMIO(0x101100) 3917 3907 #define VLV_CLK_CTL2 _MMIO(0x101104) ··· 3964 3942 #define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) 3965 3943 #define SARBUNIT_CLKGATE_DIS (1 << 5) 3966 3944 #define RCCUNIT_CLKGATE_DIS (1 << 7) 3945 + 3946 + #define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524) 3947 + #define GWUNIT_CLKGATE_DIS (1 << 16) 3967 3948 3968 3949 #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) 3969 3950 #define VFUNIT_CLKGATE_DIS (1 << 20) ··· 5372 5347 #define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520) 5373 5348 #define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524) 5374 5349 5375 - #define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) 5376 - #define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 5350 + #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) 5351 + #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 5377 5352 5378 5353 #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) 5379 5354 #define DP_AUX_CH_CTL_DONE (1 << 30) ··· 7922 7897 #define _PCH_DPD_AUX_CH_DATA4 0xe4320 7923 7898 #define _PCH_DPD_AUX_CH_DATA5 0xe4324 7924 7899 7925 - #define PCH_DP_AUX_CH_CTL(port) _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL) 7926 - #define PCH_DP_AUX_CH_DATA(port, i) _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 7900 + #define PCH_DP_AUX_CH_CTL(aux_ch) _MMIO_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL) 7901 + #define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 7927 7902 7928 7903 /* CPT */ 7929 7904 #define PORT_TRANS_A_SEL_CPT 0 ··· 8023 7998 #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 8024 7999 #define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */ 8025 8000 #define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270) 8001 + #define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4) 8002 + #define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4) 8026 8003 #define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278) 8027 8004 #define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188) 8028 8005 #define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88) 8006 + #define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4) 8007 + #define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4) 8029 8008 #define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84) 8030 8009 #define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044) 8031 8010 #define FORCEWAKE_KERNEL BIT(0)
+63 -65
drivers/gpu/drm/i915/i915_trace.h
··· 586 586 ); 587 587 588 588 TRACE_EVENT(i915_gem_ring_sync_to, 589 - TP_PROTO(struct drm_i915_gem_request *to, 590 - struct drm_i915_gem_request *from), 589 + TP_PROTO(struct i915_request *to, struct i915_request *from), 591 590 TP_ARGS(to, from), 592 591 593 592 TP_STRUCT__entry( ··· 609 610 __entry->seqno) 610 611 ); 611 612 612 - TRACE_EVENT(i915_gem_request_queue, 613 - TP_PROTO(struct drm_i915_gem_request *req, u32 flags), 614 - TP_ARGS(req, flags), 613 + TRACE_EVENT(i915_request_queue, 614 + TP_PROTO(struct i915_request *rq, u32 flags), 615 + TP_ARGS(rq, flags), 615 616 616 617 TP_STRUCT__entry( 617 618 __field(u32, dev) ··· 623 624 ), 624 625 625 626 TP_fast_assign( 626 - __entry->dev = req->i915->drm.primary->index; 627 - __entry->hw_id = req->ctx->hw_id; 628 - __entry->ring = req->engine->id; 629 - __entry->ctx = req->fence.context; 630 - __entry->seqno = req->fence.seqno; 627 + __entry->dev = rq->i915->drm.primary->index; 628 + __entry->hw_id = rq->ctx->hw_id; 629 + __entry->ring = rq->engine->id; 630 + __entry->ctx = rq->fence.context; 631 + __entry->seqno = rq->fence.seqno; 631 632 __entry->flags = flags; 632 633 ), 633 634 ··· 636 637 __entry->seqno, __entry->flags) 637 638 ); 638 639 639 - DECLARE_EVENT_CLASS(i915_gem_request, 640 - TP_PROTO(struct drm_i915_gem_request *req), 641 - TP_ARGS(req), 640 + DECLARE_EVENT_CLASS(i915_request, 641 + TP_PROTO(struct i915_request *rq), 642 + TP_ARGS(rq), 642 643 643 644 TP_STRUCT__entry( 644 645 __field(u32, dev) ··· 650 651 ), 651 652 652 653 TP_fast_assign( 653 - __entry->dev = req->i915->drm.primary->index; 654 - __entry->hw_id = req->ctx->hw_id; 655 - __entry->ring = req->engine->id; 656 - __entry->ctx = req->fence.context; 657 - __entry->seqno = req->fence.seqno; 658 - __entry->global = req->global_seqno; 654 + __entry->dev = rq->i915->drm.primary->index; 655 + __entry->hw_id = rq->ctx->hw_id; 656 + __entry->ring = rq->engine->id; 657 + __entry->ctx = rq->fence.context; 658 + __entry->seqno = rq->fence.seqno; 659 + __entry->global = rq->global_seqno; 659 660 ), 660 661 661 662 TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u", ··· 663 664 __entry->seqno, __entry->global) 664 665 ); 665 666 666 - DEFINE_EVENT(i915_gem_request, i915_gem_request_add, 667 - TP_PROTO(struct drm_i915_gem_request *req), 668 - TP_ARGS(req) 667 + DEFINE_EVENT(i915_request, i915_request_add, 668 + TP_PROTO(struct i915_request *rq), 669 + TP_ARGS(rq) 669 670 ); 670 671 671 672 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) 672 - DEFINE_EVENT(i915_gem_request, i915_gem_request_submit, 673 - TP_PROTO(struct drm_i915_gem_request *req), 674 - TP_ARGS(req) 673 + DEFINE_EVENT(i915_request, i915_request_submit, 674 + TP_PROTO(struct i915_request *rq), 675 + TP_ARGS(rq) 675 676 ); 676 677 677 - DEFINE_EVENT(i915_gem_request, i915_gem_request_execute, 678 - TP_PROTO(struct drm_i915_gem_request *req), 679 - TP_ARGS(req) 678 + DEFINE_EVENT(i915_request, i915_request_execute, 679 + TP_PROTO(struct i915_request *rq), 680 + TP_ARGS(rq) 680 681 ); 681 682 682 - DECLARE_EVENT_CLASS(i915_gem_request_hw, 683 - TP_PROTO(struct drm_i915_gem_request *req, 684 - unsigned int port), 685 - TP_ARGS(req, port), 683 + DECLARE_EVENT_CLASS(i915_request_hw, 684 + TP_PROTO(struct i915_request *rq, unsigned int port), 685 + TP_ARGS(rq, port), 686 686 687 687 TP_STRUCT__entry( 688 688 __field(u32, dev) ··· 694 696 ), 695 697 696 698 TP_fast_assign( 697 - __entry->dev = req->i915->drm.primary->index; 698 - __entry->hw_id = req->ctx->hw_id; 699 - __entry->ring = req->engine->id; 700 - __entry->ctx = req->fence.context; 701 - __entry->seqno = req->fence.seqno; 702 - __entry->global_seqno = req->global_seqno; 703 - __entry->port = port; 704 - ), 699 + __entry->dev = rq->i915->drm.primary->index; 700 + __entry->hw_id = rq->ctx->hw_id; 701 + __entry->ring = rq->engine->id; 702 + __entry->ctx = rq->fence.context; 703 + __entry->seqno = rq->fence.seqno; 704 + __entry->global_seqno = rq->global_seqno; 705 + __entry->port = port; 706 + ), 705 707 706 708 TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u", 707 709 __entry->dev, __entry->hw_id, __entry->ring, ··· 709 711 __entry->global_seqno, __entry->port) 710 712 ); 711 713 712 - DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in, 713 - TP_PROTO(struct drm_i915_gem_request *req, unsigned int port), 714 - TP_ARGS(req, port) 714 + DEFINE_EVENT(i915_request_hw, i915_request_in, 715 + TP_PROTO(struct i915_request *rq, unsigned int port), 716 + TP_ARGS(rq, port) 715 717 ); 716 718 717 - DEFINE_EVENT(i915_gem_request, i915_gem_request_out, 718 - TP_PROTO(struct drm_i915_gem_request *req), 719 - TP_ARGS(req) 719 + DEFINE_EVENT(i915_request, i915_request_out, 720 + TP_PROTO(struct i915_request *rq), 721 + TP_ARGS(rq) 720 722 ); 721 723 #else 722 724 #if !defined(TRACE_HEADER_MULTI_READ) 723 725 static inline void 724 - trace_i915_gem_request_submit(struct drm_i915_gem_request *req) 726 + trace_i915_request_submit(struct i915_request *rq) 725 727 { 726 728 } 727 729 728 730 static inline void 729 - trace_i915_gem_request_execute(struct drm_i915_gem_request *req) 731 + trace_i915_request_execute(struct i915_request *rq) 730 732 { 731 733 } 732 734 733 735 static inline void 734 - trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port) 736 + trace_i915_request_in(struct i915_request *rq, unsigned int port) 735 737 { 736 738 } 737 739 738 740 static inline void 739 - trace_i915_gem_request_out(struct drm_i915_gem_request *req) 741 + trace_i915_request_out(struct i915_request *rq) 740 742 { 741 743 } 742 744 #endif ··· 765 767 __entry->waiters) 766 768 ); 767 769 768 - DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 769 - TP_PROTO(struct drm_i915_gem_request *req), 770 - TP_ARGS(req) 770 + DEFINE_EVENT(i915_request, i915_request_retire, 771 + TP_PROTO(struct i915_request *rq), 772 + TP_ARGS(rq) 771 773 ); 772 774 773 - TRACE_EVENT(i915_gem_request_wait_begin, 774 - TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags), 775 - TP_ARGS(req, flags), 775 + TRACE_EVENT(i915_request_wait_begin, 776 + TP_PROTO(struct i915_request *rq, unsigned int flags), 777 + TP_ARGS(rq, flags), 776 778 777 779 TP_STRUCT__entry( 778 780 __field(u32, dev) ··· 791 793 * less desirable. 792 794 */ 793 795 TP_fast_assign( 794 - __entry->dev = req->i915->drm.primary->index; 795 - __entry->hw_id = req->ctx->hw_id; 796 - __entry->ring = req->engine->id; 797 - __entry->ctx = req->fence.context; 798 - __entry->seqno = req->fence.seqno; 799 - __entry->global = req->global_seqno; 796 + __entry->dev = rq->i915->drm.primary->index; 797 + __entry->hw_id = rq->ctx->hw_id; 798 + __entry->ring = rq->engine->id; 799 + __entry->ctx = rq->fence.context; 800 + __entry->seqno = rq->fence.seqno; 801 + __entry->global = rq->global_seqno; 800 802 __entry->flags = flags; 801 803 ), 802 804 ··· 806 808 !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags) 807 809 ); 808 810 809 - DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 810 - TP_PROTO(struct drm_i915_gem_request *req), 811 - TP_ARGS(req) 811 + DEFINE_EVENT(i915_request, i915_request_wait_end, 812 + TP_PROTO(struct i915_request *rq), 813 + TP_ARGS(rq) 812 814 ); 813 815 814 816 TRACE_EVENT(i915_flip_request,
+1 -2
drivers/gpu/drm/i915/i915_vma.c
··· 31 31 #include <drm/drm_gem.h> 32 32 33 33 static void 34 - i915_vma_retire(struct i915_gem_active *active, 35 - struct drm_i915_gem_request *rq) 34 + i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) 36 35 { 37 36 const unsigned int idx = rq->engine->id; 38 37 struct i915_vma *vma =
+1 -1
drivers/gpu/drm/i915/i915_vma.h
··· 32 32 #include "i915_gem_gtt.h" 33 33 #include "i915_gem_fence_reg.h" 34 34 #include "i915_gem_object.h" 35 - #include "i915_gem_request.h" 36 35 36 + #include "i915_request.h" 37 37 38 38 enum i915_cache_level; 39 39
+120 -174
drivers/gpu/drm/i915/intel_breadcrumbs.c
··· 168 168 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 169 169 170 170 /* Caller disables interrupts */ 171 - spin_lock(&engine->i915->irq_lock); 172 - engine->irq_enable(engine); 173 - spin_unlock(&engine->i915->irq_lock); 171 + if (engine->irq_enable) { 172 + spin_lock(&engine->i915->irq_lock); 173 + engine->irq_enable(engine); 174 + spin_unlock(&engine->i915->irq_lock); 175 + } 174 176 } 175 177 176 178 static void irq_disable(struct intel_engine_cs *engine) 177 179 { 178 180 /* Caller disables interrupts */ 179 - spin_lock(&engine->i915->irq_lock); 180 - engine->irq_disable(engine); 181 - spin_unlock(&engine->i915->irq_lock); 181 + if (engine->irq_disable) { 182 + spin_lock(&engine->i915->irq_lock); 183 + engine->irq_disable(engine); 184 + spin_unlock(&engine->i915->irq_lock); 185 + } 182 186 } 183 187 184 188 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) ··· 247 243 spin_unlock(&b->irq_lock); 248 244 249 245 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) { 246 + GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine), 247 + wait->seqno)); 250 248 RB_CLEAR_NODE(&wait->node); 251 249 wake_up_process(wait->tsk); 252 250 } ··· 342 336 lockdep_assert_held(&b->rb_lock); 343 337 GEM_BUG_ON(b->irq_wait == wait); 344 338 345 - /* This request is completed, so remove it from the tree, mark it as 339 + /* 340 + * This request is completed, so remove it from the tree, mark it as 346 341 * complete, and *then* wake up the associated task. N.B. when the 347 342 * task wakes up, it will find the empty rb_node, discern that it 348 343 * has already been removed from the tree and skip the serialisation ··· 354 347 rb_erase(&wait->node, &b->waiters); 355 348 RB_CLEAR_NODE(&wait->node); 356 349 357 - wake_up_process(wait->tsk); /* implicit smp_wmb() */ 350 + if (wait->tsk->state != TASK_RUNNING) 351 + wake_up_process(wait->tsk); /* implicit smp_wmb() */ 358 352 } 359 353 360 354 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine, ··· 596 588 spin_unlock_irq(&b->rb_lock); 597 589 } 598 590 599 - static bool signal_complete(const struct drm_i915_gem_request *request) 600 - { 601 - if (!request) 602 - return false; 603 - 604 - /* 605 - * Carefully check if the request is complete, giving time for the 606 - * seqno to be visible or if the GPU hung. 607 - */ 608 - return __i915_request_irq_complete(request); 609 - } 610 - 611 - static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) 612 - { 613 - return rb_entry(rb, struct drm_i915_gem_request, signaling.node); 614 - } 615 - 616 591 static void signaler_set_rtpriority(void) 617 592 { 618 593 struct sched_param param = { .sched_priority = 1 }; ··· 603 612 sched_setscheduler_nocheck(current, SCHED_FIFO, &param); 604 613 } 605 614 606 - static void __intel_engine_remove_signal(struct intel_engine_cs *engine, 607 - struct drm_i915_gem_request *request) 608 - { 609 - struct intel_breadcrumbs *b = &engine->breadcrumbs; 610 - 611 - lockdep_assert_held(&b->rb_lock); 612 - 613 - /* 614 - * Wake up all other completed waiters and select the 615 - * next bottom-half for the next user interrupt. 616 - */ 617 - __intel_engine_remove_wait(engine, &request->signaling.wait); 618 - 619 - /* 620 - * Find the next oldest signal. Note that as we have 621 - * not been holding the lock, another client may 622 - * have installed an even older signal than the one 623 - * we just completed - so double check we are still 624 - * the oldest before picking the next one. 625 - */ 626 - if (request->signaling.wait.seqno) { 627 - if (request == rcu_access_pointer(b->first_signal)) { 628 - struct rb_node *rb = rb_next(&request->signaling.node); 629 - rcu_assign_pointer(b->first_signal, 630 - rb ? to_signaler(rb) : NULL); 631 - } 632 - 633 - rb_erase(&request->signaling.node, &b->signals); 634 - request->signaling.wait.seqno = 0; 635 - } 636 - } 637 - 638 - static struct drm_i915_gem_request * 639 - get_first_signal_rcu(struct intel_breadcrumbs *b) 640 - { 641 - /* 642 - * See the big warnings for i915_gem_active_get_rcu() and similarly 643 - * for dma_fence_get_rcu_safe() that explain the intricacies involved 644 - * here with defeating CPU/compiler speculation and enforcing 645 - * the required memory barriers. 646 - */ 647 - do { 648 - struct drm_i915_gem_request *request; 649 - 650 - request = rcu_dereference(b->first_signal); 651 - if (request) 652 - request = i915_gem_request_get_rcu(request); 653 - 654 - barrier(); 655 - 656 - if (!request || request == rcu_access_pointer(b->first_signal)) 657 - return rcu_pointer_handoff(request); 658 - 659 - i915_gem_request_put(request); 660 - } while (1); 661 - } 662 - 663 615 static int intel_breadcrumbs_signaler(void *arg) 664 616 { 665 617 struct intel_engine_cs *engine = arg; 666 618 struct intel_breadcrumbs *b = &engine->breadcrumbs; 667 - struct drm_i915_gem_request *request; 619 + struct i915_request *rq, *n; 668 620 669 621 /* Install ourselves with high priority to reduce signalling latency */ 670 622 signaler_set_rtpriority(); 671 623 672 624 do { 673 625 bool do_schedule = true; 626 + LIST_HEAD(list); 627 + u32 seqno; 674 628 675 629 set_current_state(TASK_INTERRUPTIBLE); 630 + if (list_empty(&b->signals)) 631 + goto sleep; 676 632 677 - /* We are either woken up by the interrupt bottom-half, 633 + /* 634 + * We are either woken up by the interrupt bottom-half, 678 635 * or by a client adding a new signaller. In both cases, 679 636 * the GPU seqno may have advanced beyond our oldest signal. 680 637 * If it has, propagate the signal, remove the waiter and ··· 630 691 * need to wait for a new interrupt from the GPU or for 631 692 * a new client. 632 693 */ 633 - rcu_read_lock(); 634 - request = get_first_signal_rcu(b); 635 - rcu_read_unlock(); 636 - if (signal_complete(request)) { 637 - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 638 - &request->fence.flags)) { 639 - local_bh_disable(); 640 - dma_fence_signal(&request->fence); 641 - GEM_BUG_ON(!i915_gem_request_completed(request)); 642 - local_bh_enable(); /* kick start the tasklets */ 643 - } 694 + seqno = intel_engine_get_seqno(engine); 644 695 645 - if (READ_ONCE(request->signaling.wait.seqno)) { 646 - spin_lock_irq(&b->rb_lock); 647 - __intel_engine_remove_signal(engine, request); 648 - spin_unlock_irq(&b->rb_lock); 649 - } 696 + spin_lock_irq(&b->rb_lock); 697 + list_for_each_entry_safe(rq, n, &b->signals, signaling.link) { 698 + u32 this = rq->signaling.wait.seqno; 650 699 651 - /* If the engine is saturated we may be continually 700 + GEM_BUG_ON(!rq->signaling.wait.seqno); 701 + 702 + if (!i915_seqno_passed(seqno, this)) 703 + break; 704 + 705 + if (likely(this == i915_request_global_seqno(rq))) { 706 + __intel_engine_remove_wait(engine, 707 + &rq->signaling.wait); 708 + 709 + rq->signaling.wait.seqno = 0; 710 + __list_del_entry(&rq->signaling.link); 711 + 712 + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 713 + &rq->fence.flags)) { 714 + list_add_tail(&rq->signaling.link, 715 + &list); 716 + i915_request_get(rq); 717 + } 718 + } 719 + } 720 + spin_unlock_irq(&b->rb_lock); 721 + 722 + if (!list_empty(&list)) { 723 + local_bh_disable(); 724 + list_for_each_entry_safe(rq, n, &list, signaling.link) { 725 + dma_fence_signal(&rq->fence); 726 + GEM_BUG_ON(!i915_request_completed(rq)); 727 + i915_request_put(rq); 728 + } 729 + local_bh_enable(); /* kick start the tasklets */ 730 + 731 + /* 732 + * If the engine is saturated we may be continually 652 733 * processing completed requests. This angers the 653 734 * NMI watchdog if we never let anything else 654 735 * have access to the CPU. Let's pretend to be nice ··· 677 718 */ 678 719 do_schedule = need_resched(); 679 720 } 680 - i915_gem_request_put(request); 681 721 682 722 if (unlikely(do_schedule)) { 723 + /* Before we sleep, check for a missed seqno */ 724 + if (current->state & TASK_NORMAL && 725 + !list_empty(&b->signals) && 726 + engine->irq_seqno_barrier && 727 + test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, 728 + &engine->irq_posted)) { 729 + engine->irq_seqno_barrier(engine); 730 + intel_engine_wakeup(engine); 731 + } 732 + 733 + sleep: 683 734 if (kthread_should_park()) 684 735 kthread_parkme(); 685 736 ··· 704 735 return 0; 705 736 } 706 737 707 - void intel_engine_enable_signaling(struct drm_i915_gem_request *request, 708 - bool wakeup) 738 + static void insert_signal(struct intel_breadcrumbs *b, 739 + struct i915_request *request, 740 + const u32 seqno) 741 + { 742 + struct i915_request *iter; 743 + 744 + lockdep_assert_held(&b->rb_lock); 745 + 746 + /* 747 + * A reasonable assumption is that we are called to add signals 748 + * in sequence, as the requests are submitted for execution and 749 + * assigned a global_seqno. This will be the case for the majority 750 + * of internally generated signals (inter-engine signaling). 751 + * 752 + * Out of order waiters triggering random signaling enabling will 753 + * be more problematic, but hopefully rare enough and the list 754 + * small enough that the O(N) insertion sort is not an issue. 755 + */ 756 + 757 + list_for_each_entry_reverse(iter, &b->signals, signaling.link) 758 + if (i915_seqno_passed(seqno, iter->signaling.wait.seqno)) 759 + break; 760 + 761 + list_add(&request->signaling.link, &iter->signaling.link); 762 + } 763 + 764 + void intel_engine_enable_signaling(struct i915_request *request, bool wakeup) 709 765 { 710 766 struct intel_engine_cs *engine = request->engine; 711 767 struct intel_breadcrumbs *b = &engine->breadcrumbs; 712 768 u32 seqno; 713 769 714 - /* Note that we may be called from an interrupt handler on another 770 + /* 771 + * Note that we may be called from an interrupt handler on another 715 772 * device (e.g. nouveau signaling a fence completion causing us 716 773 * to submit a request, and so enable signaling). As such, 717 774 * we need to make sure that all other users of b->rb_lock protect ··· 748 753 GEM_BUG_ON(!irqs_disabled()); 749 754 lockdep_assert_held(&request->lock); 750 755 751 - seqno = i915_gem_request_global_seqno(request); 752 - if (!seqno) 756 + seqno = i915_request_global_seqno(request); 757 + if (!seqno) /* will be enabled later upon execution */ 753 758 return; 754 - 755 - spin_lock(&b->rb_lock); 756 759 757 760 GEM_BUG_ON(request->signaling.wait.seqno); 758 761 request->signaling.wait.tsk = b->signaler; 759 762 request->signaling.wait.request = request; 760 763 request->signaling.wait.seqno = seqno; 761 764 762 - /* First add ourselves into the list of waiters, but register our 765 + /* 766 + * Add ourselves into the list of waiters, but registering our 763 767 * bottom-half as the signaller thread. As per usual, only the oldest 764 768 * waiter (not just signaller) is tasked as the bottom-half waking 765 769 * up all completed waiters after the user interrupt. ··· 766 772 * If we are the oldest waiter, enable the irq (after which we 767 773 * must double check that the seqno did not complete). 768 774 */ 775 + spin_lock(&b->rb_lock); 776 + insert_signal(b, request, seqno); 769 777 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait); 770 - 771 - if (!__i915_gem_request_completed(request, seqno)) { 772 - struct rb_node *parent, **p; 773 - bool first; 774 - 775 - /* Now insert ourselves into the retirement ordered list of 776 - * signals on this engine. We track the oldest seqno as that 777 - * will be the first signal to complete. 778 - */ 779 - parent = NULL; 780 - first = true; 781 - p = &b->signals.rb_node; 782 - while (*p) { 783 - parent = *p; 784 - if (i915_seqno_passed(seqno, 785 - to_signaler(parent)->signaling.wait.seqno)) { 786 - p = &parent->rb_right; 787 - first = false; 788 - } else { 789 - p = &parent->rb_left; 790 - } 791 - } 792 - rb_link_node(&request->signaling.node, parent, p); 793 - rb_insert_color(&request->signaling.node, &b->signals); 794 - if (first) 795 - rcu_assign_pointer(b->first_signal, request); 796 - } else { 797 - __intel_engine_remove_wait(engine, &request->signaling.wait); 798 - request->signaling.wait.seqno = 0; 799 - wakeup = false; 800 - } 801 - 802 778 spin_unlock(&b->rb_lock); 803 779 804 780 if (wakeup) 805 781 wake_up_process(b->signaler); 806 782 } 807 783 808 - void intel_engine_cancel_signaling(struct drm_i915_gem_request *request) 784 + void intel_engine_cancel_signaling(struct i915_request *request) 809 785 { 786 + struct intel_engine_cs *engine = request->engine; 787 + struct intel_breadcrumbs *b = &engine->breadcrumbs; 788 + 810 789 GEM_BUG_ON(!irqs_disabled()); 811 790 lockdep_assert_held(&request->lock); 812 791 813 - if (READ_ONCE(request->signaling.wait.seqno)) { 814 - struct intel_engine_cs *engine = request->engine; 815 - struct intel_breadcrumbs *b = &engine->breadcrumbs; 792 + if (!READ_ONCE(request->signaling.wait.seqno)) 793 + return; 816 794 817 - spin_lock(&b->rb_lock); 818 - __intel_engine_remove_signal(engine, request); 819 - spin_unlock(&b->rb_lock); 820 - } 795 + spin_lock(&b->rb_lock); 796 + __intel_engine_remove_wait(engine, &request->signaling.wait); 797 + if (fetch_and_zero(&request->signaling.wait.seqno)) 798 + __list_del_entry(&request->signaling.link); 799 + spin_unlock(&b->rb_lock); 821 800 } 822 801 823 802 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) ··· 803 836 804 837 timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0); 805 838 timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0); 839 + 840 + INIT_LIST_HEAD(&b->signals); 806 841 807 842 /* Spawn a thread to provide a common bottom-half for all signals. 808 843 * As this is an asynchronous interface we cannot steal the current ··· 865 896 /* The engines should be idle and all requests accounted for! */ 866 897 WARN_ON(READ_ONCE(b->irq_wait)); 867 898 WARN_ON(!RB_EMPTY_ROOT(&b->waiters)); 868 - WARN_ON(rcu_access_pointer(b->first_signal)); 869 - WARN_ON(!RB_EMPTY_ROOT(&b->signals)); 899 + WARN_ON(!list_empty(&b->signals)); 870 900 871 901 if (!IS_ERR_OR_NULL(b->signaler)) 872 902 kthread_stop(b->signaler); 873 903 874 904 cancel_fake_irq(engine); 875 - } 876 - 877 - bool intel_breadcrumbs_busy(struct intel_engine_cs *engine) 878 - { 879 - struct intel_breadcrumbs *b = &engine->breadcrumbs; 880 - bool busy = false; 881 - 882 - spin_lock_irq(&b->rb_lock); 883 - 884 - if (b->irq_wait) { 885 - wake_up_process(b->irq_wait->tsk); 886 - busy = true; 887 - } 888 - 889 - if (rcu_access_pointer(b->first_signal)) { 890 - wake_up_process(b->signaler); 891 - busy = true; 892 - } 893 - 894 - spin_unlock_irq(&b->rb_lock); 895 - 896 - return busy; 897 905 } 898 906 899 907 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+52 -45
drivers/gpu/drm/i915/intel_color.c
··· 66 66 * of the CTM coefficient and we write the value from bit 3. We also round the 67 67 * value. 68 68 */ 69 - #define I9XX_CSC_COEFF_FP(coeff, fbits) \ 69 + #define ILK_CSC_COEFF_FP(coeff, fbits) \ 70 70 (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8) 71 71 72 - #define I9XX_CSC_COEFF_LIMITED_RANGE \ 73 - I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9) 74 - #define I9XX_CSC_COEFF_1_0 \ 75 - ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) 72 + #define ILK_CSC_COEFF_LIMITED_RANGE \ 73 + ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9) 74 + #define ILK_CSC_COEFF_1_0 \ 75 + ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) 76 76 77 77 static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) 78 78 { ··· 84 84 85 85 /* 86 86 * When using limited range, multiply the matrix given by userspace by 87 - * the matrix that we would use for the limited range. We do the 88 - * multiplication in U2.30 format. 87 + * the matrix that we would use for the limited range. 89 88 */ 90 - static void ctm_mult_by_limited(uint64_t *result, int64_t *input) 89 + static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) 91 90 { 92 91 int i; 93 92 94 - for (i = 0; i < 9; i++) 95 - result[i] = 0; 93 + for (i = 0; i < 9; i++) { 94 + u64 user_coeff = input[i]; 95 + u32 limited_coeff = CTM_COEFF_LIMITED_RANGE; 96 + u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0, 97 + CTM_COEFF_4_0 - 1) >> 2; 96 98 97 - for (i = 0; i < 3; i++) { 98 - int64_t user_coeff = input[i * 3 + i]; 99 - uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2; 100 - uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 101 - 0, 102 - CTM_COEFF_4_0 - 1) >> 2; 103 - 104 - result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27; 105 - if (CTM_COEFF_NEGATIVE(user_coeff)) 106 - result[i * 3 + i] |= CTM_COEFF_SIGN; 99 + /* 100 + * By scaling every co-efficient with limited range (16-235) 101 + * vs full range (0-255) the final o/p will be scaled down to 102 + * fit in the limited range supported by the panel. 103 + */ 104 + result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30; 105 + result[i] |= user_coeff & CTM_COEFF_SIGN; 107 106 } 107 + 108 + return result; 108 109 } 109 110 110 - static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc) 111 + static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc) 111 112 { 112 113 int pipe = intel_crtc->pipe; 113 114 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); ··· 132 131 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 133 132 } 134 133 135 - /* Set up the pipe CSC unit. */ 136 - static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) 134 + static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state) 137 135 { 138 136 struct drm_crtc *crtc = crtc_state->crtc; 139 137 struct drm_i915_private *dev_priv = to_i915(crtc->dev); ··· 140 140 int i, pipe = intel_crtc->pipe; 141 141 uint16_t coeffs[9] = { 0, }; 142 142 struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state); 143 + bool limited_color_range = false; 144 + 145 + /* 146 + * FIXME if there's a gamma LUT after the CSC, we should 147 + * do the range compression using the gamma LUT instead. 148 + */ 149 + if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) 150 + limited_color_range = intel_crtc_state->limited_color_range; 143 151 144 152 if (intel_crtc_state->ycbcr420) { 145 - i9xx_load_ycbcr_conversion_matrix(intel_crtc); 153 + ilk_load_ycbcr_conversion_matrix(intel_crtc); 146 154 return; 147 155 } else if (crtc_state->ctm) { 148 156 struct drm_color_ctm *ctm = 149 157 (struct drm_color_ctm *)crtc_state->ctm->data; 150 - uint64_t input[9] = { 0, }; 158 + const u64 *input; 159 + u64 temp[9]; 151 160 152 - if (intel_crtc_state->limited_color_range) { 153 - ctm_mult_by_limited(input, ctm->matrix); 154 - } else { 155 - for (i = 0; i < ARRAY_SIZE(input); i++) 156 - input[i] = ctm->matrix[i]; 157 - } 161 + if (limited_color_range) 162 + input = ctm_mult_by_limited(temp, ctm->matrix); 163 + else 164 + input = ctm->matrix; 158 165 159 166 /* 160 167 * Convert fixed point S31.32 input to format supported by the ··· 182 175 183 176 if (abs_coeff < CTM_COEFF_0_125) 184 177 coeffs[i] |= (3 << 12) | 185 - I9XX_CSC_COEFF_FP(abs_coeff, 12); 178 + ILK_CSC_COEFF_FP(abs_coeff, 12); 186 179 else if (abs_coeff < CTM_COEFF_0_25) 187 180 coeffs[i] |= (2 << 12) | 188 - I9XX_CSC_COEFF_FP(abs_coeff, 11); 181 + ILK_CSC_COEFF_FP(abs_coeff, 11); 189 182 else if (abs_coeff < CTM_COEFF_0_5) 190 183 coeffs[i] |= (1 << 12) | 191 - I9XX_CSC_COEFF_FP(abs_coeff, 10); 184 + ILK_CSC_COEFF_FP(abs_coeff, 10); 192 185 else if (abs_coeff < CTM_COEFF_1_0) 193 - coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9); 186 + coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); 194 187 else if (abs_coeff < CTM_COEFF_2_0) 195 188 coeffs[i] |= (7 << 12) | 196 - I9XX_CSC_COEFF_FP(abs_coeff, 8); 189 + ILK_CSC_COEFF_FP(abs_coeff, 8); 197 190 else 198 191 coeffs[i] |= (6 << 12) | 199 - I9XX_CSC_COEFF_FP(abs_coeff, 7); 192 + ILK_CSC_COEFF_FP(abs_coeff, 7); 200 193 } 201 194 } else { 202 195 /* ··· 208 201 * into consideration. 209 202 */ 210 203 for (i = 0; i < 3; i++) { 211 - if (intel_crtc_state->limited_color_range) 204 + if (limited_color_range) 212 205 coeffs[i * 3 + i] = 213 - I9XX_CSC_COEFF_LIMITED_RANGE; 206 + ILK_CSC_COEFF_LIMITED_RANGE; 214 207 else 215 - coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0; 208 + coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0; 216 209 } 217 210 } 218 211 ··· 232 225 if (INTEL_GEN(dev_priv) > 6) { 233 226 uint16_t postoff = 0; 234 227 235 - if (intel_crtc_state->limited_color_range) 228 + if (limited_color_range) 236 229 postoff = (16 * (1 << 12) / 255) & 0x1fff; 237 230 238 231 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); ··· 243 236 } else { 244 237 uint32_t mode = CSC_MODE_YUV_TO_RGB; 245 238 246 - if (intel_crtc_state->limited_color_range) 239 + if (limited_color_range) 247 240 mode |= CSC_BLACK_SCREEN_OFFSET; 248 241 249 242 I915_WRITE(PIPE_CSC_MODE(pipe), mode); ··· 658 651 dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix; 659 652 dev_priv->display.load_luts = cherryview_load_luts; 660 653 } else if (IS_HASWELL(dev_priv)) { 661 - dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; 654 + dev_priv->display.load_csc_matrix = ilk_load_csc_matrix; 662 655 dev_priv->display.load_luts = haswell_load_luts; 663 656 } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) || 664 657 IS_BROXTON(dev_priv)) { 665 - dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; 658 + dev_priv->display.load_csc_matrix = ilk_load_csc_matrix; 666 659 dev_priv->display.load_luts = broadwell_load_luts; 667 660 } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { 668 - dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; 661 + dev_priv->display.load_csc_matrix = ilk_load_csc_matrix; 669 662 dev_priv->display.load_luts = glk_load_luts; 670 663 } else { 671 664 dev_priv->display.load_luts = i9xx_load_luts;
+3 -1
drivers/gpu/drm/i915/intel_crt.c
··· 956 956 crt->base.power_domain = POWER_DOMAIN_PORT_CRT; 957 957 958 958 if (I915_HAS_HOTPLUG(dev_priv) && 959 - !dmi_check_system(intel_spurious_crt_detect)) 959 + !dmi_check_system(intel_spurious_crt_detect)) { 960 960 crt->base.hpd_pin = HPD_CRT; 961 + crt->base.hotplug = intel_encoder_hotplug; 962 + } 961 963 962 964 if (HAS_DDI(dev_priv)) { 963 965 crt->base.port = PORT_E;
+187 -46
drivers/gpu/drm/i915/intel_ddi.c
··· 25 25 * 26 26 */ 27 27 28 + #include <drm/drm_scdc_helper.h> 28 29 #include "i915_drv.h" 29 30 #include "intel_drv.h" 30 31 ··· 2508 2507 { 2509 2508 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2510 2509 2510 + intel_dp->link_trained = false; 2511 + 2511 2512 if (old_crtc_state->has_audio) 2512 2513 intel_audio_codec_disable(encoder, 2513 2514 old_crtc_state, old_conn_state); ··· 2801 2798 return connector; 2802 2799 } 2803 2800 2801 + static int modeset_pipe(struct drm_crtc *crtc, 2802 + struct drm_modeset_acquire_ctx *ctx) 2803 + { 2804 + struct drm_atomic_state *state; 2805 + struct drm_crtc_state *crtc_state; 2806 + int ret; 2807 + 2808 + state = drm_atomic_state_alloc(crtc->dev); 2809 + if (!state) 2810 + return -ENOMEM; 2811 + 2812 + state->acquire_ctx = ctx; 2813 + 2814 + crtc_state = drm_atomic_get_crtc_state(state, crtc); 2815 + if (IS_ERR(crtc_state)) { 2816 + ret = PTR_ERR(crtc_state); 2817 + goto out; 2818 + } 2819 + 2820 + crtc_state->mode_changed = true; 2821 + 2822 + ret = drm_atomic_add_affected_connectors(state, crtc); 2823 + if (ret) 2824 + goto out; 2825 + 2826 + ret = drm_atomic_add_affected_planes(state, crtc); 2827 + if (ret) 2828 + goto out; 2829 + 2830 + ret = drm_atomic_commit(state); 2831 + if (ret) 2832 + goto out; 2833 + 2834 + return 0; 2835 + 2836 + out: 2837 + drm_atomic_state_put(state); 2838 + 2839 + return ret; 2840 + } 2841 + 2842 + static int intel_hdmi_reset_link(struct intel_encoder *encoder, 2843 + struct drm_modeset_acquire_ctx *ctx) 2844 + { 2845 + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2846 + struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base); 2847 + struct intel_connector *connector = hdmi->attached_connector; 2848 + struct i2c_adapter *adapter = 2849 + intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); 2850 + struct drm_connector_state *conn_state; 2851 + struct intel_crtc_state *crtc_state; 2852 + struct intel_crtc *crtc; 2853 + u8 config; 2854 + int ret; 2855 + 2856 + if (!connector || connector->base.status != connector_status_connected) 2857 + return 0; 2858 + 2859 + ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 2860 + ctx); 2861 + if (ret) 2862 + return ret; 2863 + 2864 + conn_state = connector->base.state; 2865 + 2866 + crtc = to_intel_crtc(conn_state->crtc); 2867 + if (!crtc) 2868 + return 0; 2869 + 2870 + ret = drm_modeset_lock(&crtc->base.mutex, ctx); 2871 + if (ret) 2872 + return ret; 2873 + 2874 + crtc_state = to_intel_crtc_state(crtc->base.state); 2875 + 2876 + WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)); 2877 + 2878 + if (!crtc_state->base.active) 2879 + return 0; 2880 + 2881 + if (!crtc_state->hdmi_high_tmds_clock_ratio && 2882 + !crtc_state->hdmi_scrambling) 2883 + return 0; 2884 + 2885 + if (conn_state->commit && 2886 + !try_wait_for_completion(&conn_state->commit->hw_done)) 2887 + return 0; 2888 + 2889 + ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config); 2890 + if (ret < 0) { 2891 + DRM_ERROR("Failed to read TMDS config: %d\n", ret); 2892 + return 0; 2893 + } 2894 + 2895 + if (!!(config & SCDC_TMDS_BIT_CLOCK_RATIO_BY_40) == 2896 + crtc_state->hdmi_high_tmds_clock_ratio && 2897 + !!(config & SCDC_SCRAMBLING_ENABLE) == 2898 + crtc_state->hdmi_scrambling) 2899 + return 0; 2900 + 2901 + /* 2902 + * HDMI 2.0 says that one should not send scrambled data 2903 + * prior to configuring the sink scrambling, and that 2904 + * TMDS clock/data transmission should be suspended when 2905 + * changing the TMDS clock rate in the sink. So let's 2906 + * just do a full modeset here, even though some sinks 2907 + * would be perfectly happy if were to just reconfigure 2908 + * the SCDC settings on the fly. 2909 + */ 2910 + return modeset_pipe(&crtc->base, ctx); 2911 + } 2912 + 2913 + static bool intel_ddi_hotplug(struct intel_encoder *encoder, 2914 + struct intel_connector *connector) 2915 + { 2916 + struct drm_modeset_acquire_ctx ctx; 2917 + bool changed; 2918 + int ret; 2919 + 2920 + changed = intel_encoder_hotplug(encoder, connector); 2921 + 2922 + drm_modeset_acquire_init(&ctx, 0); 2923 + 2924 + for (;;) { 2925 + if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) 2926 + ret = intel_hdmi_reset_link(encoder, &ctx); 2927 + else 2928 + ret = intel_dp_retrain_link(encoder, &ctx); 2929 + 2930 + if (ret == -EDEADLK) { 2931 + drm_modeset_backoff(&ctx); 2932 + continue; 2933 + } 2934 + 2935 + break; 2936 + } 2937 + 2938 + drm_modeset_drop_locks(&ctx); 2939 + drm_modeset_acquire_fini(&ctx); 2940 + WARN(ret, "Acquiring modeset locks failed with %i\n", ret); 2941 + 2942 + return changed; 2943 + } 2944 + 2804 2945 static struct intel_connector * 2805 2946 intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) 2806 2947 { ··· 2989 2842 return false; 2990 2843 } 2991 2844 2845 + static int 2846 + intel_ddi_max_lanes(struct intel_digital_port *intel_dport) 2847 + { 2848 + struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev); 2849 + enum port port = intel_dport->base.port; 2850 + int max_lanes = 4; 2851 + 2852 + if (INTEL_GEN(dev_priv) >= 11) 2853 + return max_lanes; 2854 + 2855 + if (port == PORT_A || port == PORT_E) { 2856 + if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 2857 + max_lanes = port == PORT_A ? 4 : 0; 2858 + else 2859 + /* Both A and E share 2 lanes */ 2860 + max_lanes = 2; 2861 + } 2862 + 2863 + /* 2864 + * Some BIOS might fail to set this bit on port A if eDP 2865 + * wasn't lit up at boot. Force this bit set when needed 2866 + * so we use the proper lane count for our calculations. 2867 + */ 2868 + if (intel_ddi_a_force_4_lanes(intel_dport)) { 2869 + DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n"); 2870 + intel_dport->saved_port_bits |= DDI_A_4_LANES; 2871 + max_lanes = 4; 2872 + } 2873 + 2874 + return max_lanes; 2875 + } 2876 + 2992 2877 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) 2993 2878 { 2994 2879 struct intel_digital_port *intel_dig_port; 2995 2880 struct intel_encoder *intel_encoder; 2996 2881 struct drm_encoder *encoder; 2997 2882 bool init_hdmi, init_dp, init_lspcon = false; 2998 - int max_lanes; 2999 2883 3000 - if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) { 3001 - switch (port) { 3002 - case PORT_A: 3003 - max_lanes = 4; 3004 - break; 3005 - case PORT_E: 3006 - max_lanes = 0; 3007 - break; 3008 - default: 3009 - max_lanes = 4; 3010 - break; 3011 - } 3012 - } else { 3013 - switch (port) { 3014 - case PORT_A: 3015 - max_lanes = 2; 3016 - break; 3017 - case PORT_E: 3018 - max_lanes = 2; 3019 - break; 3020 - default: 3021 - max_lanes = 4; 3022 - break; 3023 - } 3024 - } 3025 2884 3026 2885 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || 3027 2886 dev_priv->vbt.ddi_port_info[port].supports_hdmi); ··· 3061 2908 drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs, 3062 2909 DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); 3063 2910 2911 + intel_encoder->hotplug = intel_ddi_hotplug; 3064 2912 intel_encoder->compute_output_type = intel_ddi_compute_output_type; 3065 2913 intel_encoder->compute_config = intel_ddi_compute_config; 3066 2914 intel_encoder->enable = intel_enable_ddi; ··· 3074 2920 intel_encoder->get_config = intel_ddi_get_config; 3075 2921 intel_encoder->suspend = intel_dp_encoder_suspend; 3076 2922 intel_encoder->get_power_domains = intel_ddi_get_power_domains; 2923 + intel_encoder->type = INTEL_OUTPUT_DDI; 2924 + intel_encoder->power_domain = intel_port_to_power_domain(port); 2925 + intel_encoder->port = port; 2926 + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2927 + intel_encoder->cloneable = 0; 3077 2928 3078 2929 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 3079 2930 (DDI_BUF_PORT_REVERSAL | 3080 2931 DDI_A_4_LANES); 2932 + intel_dig_port->dp.output_reg = INVALID_MMIO_REG; 2933 + intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); 3081 2934 3082 2935 switch (port) { 3083 2936 case PORT_A: ··· 3114 2953 default: 3115 2954 MISSING_CASE(port); 3116 2955 } 3117 - 3118 - /* 3119 - * Some BIOS might fail to set this bit on port A if eDP 3120 - * wasn't lit up at boot. Force this bit set when needed 3121 - * so we use the proper lane count for our calculations. 3122 - */ 3123 - if (intel_ddi_a_force_4_lanes(intel_dig_port)) { 3124 - DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n"); 3125 - intel_dig_port->saved_port_bits |= DDI_A_4_LANES; 3126 - max_lanes = 4; 3127 - } 3128 - 3129 - intel_dig_port->dp.output_reg = INVALID_MMIO_REG; 3130 - intel_dig_port->max_lanes = max_lanes; 3131 - 3132 - intel_encoder->type = INTEL_OUTPUT_DDI; 3133 - intel_encoder->power_domain = intel_port_to_power_domain(port); 3134 - intel_encoder->port = port; 3135 - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 3136 - intel_encoder->cloneable = 0; 3137 2956 3138 2957 intel_infoframe_init(intel_dig_port); 3139 2958
+238 -50
drivers/gpu/drm/i915/intel_device_info.c
··· 81 81 82 82 static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) 83 83 { 84 + int s; 85 + 84 86 drm_printf(p, "slice mask: %04x\n", sseu->slice_mask); 85 87 drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask)); 86 88 drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu)); 87 - drm_printf(p, "subslice mask %04x\n", sseu->subslice_mask); 88 - drm_printf(p, "subslice per slice: %u\n", 89 - hweight8(sseu->subslice_mask)); 89 + for (s = 0; s < ARRAY_SIZE(sseu->subslice_mask); s++) { 90 + drm_printf(p, "slice%d %u subslices mask=%04x\n", 91 + s, hweight8(sseu->subslice_mask[s]), 92 + sseu->subslice_mask[s]); 93 + } 90 94 drm_printf(p, "EU total: %u\n", sseu->eu_total); 91 95 drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); 92 96 drm_printf(p, "has slice power gating: %s\n", ··· 124 120 intel_device_info_dump_flags(info, p); 125 121 } 126 122 123 + void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, 124 + struct drm_printer *p) 125 + { 126 + int s, ss; 127 + 128 + if (sseu->max_slices == 0) { 129 + drm_printf(p, "Unavailable\n"); 130 + return; 131 + } 132 + 133 + for (s = 0; s < sseu->max_slices; s++) { 134 + drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", 135 + s, hweight8(sseu->subslice_mask[s]), 136 + sseu->subslice_mask[s]); 137 + 138 + for (ss = 0; ss < sseu->max_subslices; ss++) { 139 + u16 enabled_eus = sseu_get_eus(sseu, s, ss); 140 + 141 + drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n", 142 + ss, hweight16(enabled_eus), enabled_eus); 143 + } 144 + } 145 + } 146 + 147 + static u16 compute_eu_total(const struct sseu_dev_info *sseu) 148 + { 149 + u16 i, total = 0; 150 + 151 + for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++) 152 + total += hweight8(sseu->eu_mask[i]); 153 + 154 + return total; 155 + } 156 + 127 157 static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) 128 158 { 129 159 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 130 160 const u32 fuse2 = I915_READ(GEN8_FUSE2); 161 + int s, ss; 162 + const int eu_mask = 0xff; 163 + u32 subslice_mask, eu_en; 131 164 132 165 sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> 133 166 GEN10_F2_S_ENA_SHIFT; 134 - sseu->subslice_mask = (1 << 4) - 1; 135 - sseu->subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> 136 - GEN10_F2_SS_DIS_SHIFT); 167 + sseu->max_slices = 6; 168 + sseu->max_subslices = 4; 169 + sseu->max_eus_per_subslice = 8; 137 170 138 - sseu->eu_total = hweight32(~I915_READ(GEN8_EU_DISABLE0)); 139 - sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE1)); 140 - sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE2)); 141 - sseu->eu_total += hweight8(~(I915_READ(GEN10_EU_DISABLE3) & 142 - GEN10_EU_DIS_SS_MASK)); 171 + subslice_mask = (1 << 4) - 1; 172 + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> 173 + GEN10_F2_SS_DIS_SHIFT); 174 + 175 + /* 176 + * Slice0 can have up to 3 subslices, but there are only 2 in 177 + * slice1/2. 178 + */ 179 + sseu->subslice_mask[0] = subslice_mask; 180 + for (s = 1; s < sseu->max_slices; s++) 181 + sseu->subslice_mask[s] = subslice_mask & 0x3; 182 + 183 + /* Slice0 */ 184 + eu_en = ~I915_READ(GEN8_EU_DISABLE0); 185 + for (ss = 0; ss < sseu->max_subslices; ss++) 186 + sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); 187 + /* Slice1 */ 188 + sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); 189 + eu_en = ~I915_READ(GEN8_EU_DISABLE1); 190 + sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); 191 + /* Slice2 */ 192 + sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); 193 + sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); 194 + /* Slice3 */ 195 + sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); 196 + eu_en = ~I915_READ(GEN8_EU_DISABLE2); 197 + sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); 198 + /* Slice4 */ 199 + sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); 200 + sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); 201 + /* Slice5 */ 202 + sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); 203 + eu_en = ~I915_READ(GEN10_EU_DISABLE3); 204 + sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); 205 + 206 + /* Do a second pass where we mark the subslices disabled if all their 207 + * eus are off. 208 + */ 209 + for (s = 0; s < sseu->max_slices; s++) { 210 + for (ss = 0; ss < sseu->max_subslices; ss++) { 211 + if (sseu_get_eus(sseu, s, ss) == 0) 212 + sseu->subslice_mask[s] &= ~BIT(ss); 213 + } 214 + } 215 + 216 + sseu->eu_total = compute_eu_total(sseu); 143 217 144 218 /* 145 219 * CNL is expected to always have a uniform distribution ··· 238 156 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) 239 157 { 240 158 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 241 - u32 fuse, eu_dis; 159 + u32 fuse; 242 160 243 161 fuse = I915_READ(CHV_FUSE_GT); 244 162 245 163 sseu->slice_mask = BIT(0); 164 + sseu->max_slices = 1; 165 + sseu->max_subslices = 2; 166 + sseu->max_eus_per_subslice = 8; 246 167 247 168 if (!(fuse & CHV_FGT_DISABLE_SS0)) { 248 - sseu->subslice_mask |= BIT(0); 249 - eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | 250 - CHV_FGT_EU_DIS_SS0_R1_MASK); 251 - sseu->eu_total += 8 - hweight32(eu_dis); 169 + u8 disabled_mask = 170 + ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >> 171 + CHV_FGT_EU_DIS_SS0_R0_SHIFT) | 172 + (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> 173 + CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); 174 + 175 + sseu->subslice_mask[0] |= BIT(0); 176 + sseu_set_eus(sseu, 0, 0, ~disabled_mask); 252 177 } 253 178 254 179 if (!(fuse & CHV_FGT_DISABLE_SS1)) { 255 - sseu->subslice_mask |= BIT(1); 256 - eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | 257 - CHV_FGT_EU_DIS_SS1_R1_MASK); 258 - sseu->eu_total += 8 - hweight32(eu_dis); 180 + u8 disabled_mask = 181 + ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >> 182 + CHV_FGT_EU_DIS_SS1_R0_SHIFT) | 183 + (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> 184 + CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); 185 + 186 + sseu->subslice_mask[0] |= BIT(1); 187 + sseu_set_eus(sseu, 0, 1, ~disabled_mask); 259 188 } 189 + 190 + sseu->eu_total = compute_eu_total(sseu); 260 191 261 192 /* 262 193 * CHV expected to always have a uniform distribution of EU ··· 292 197 { 293 198 struct intel_device_info *info = mkwrite_device_info(dev_priv); 294 199 struct sseu_dev_info *sseu = &info->sseu; 295 - int s_max = 3, ss_max = 4, eu_max = 8; 296 200 int s, ss; 297 - u32 fuse2, eu_disable; 298 - u8 eu_mask = 0xff; 201 + u32 fuse2, eu_disable, subslice_mask; 202 + const u8 eu_mask = 0xff; 299 203 300 204 fuse2 = I915_READ(GEN8_FUSE2); 301 205 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 206 + 207 + /* BXT has a single slice and at most 3 subslices. */ 208 + sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3; 209 + sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4; 210 + sseu->max_eus_per_subslice = 8; 302 211 303 212 /* 304 213 * The subslice disable field is global, i.e. it applies 305 214 * to each of the enabled slices. 306 215 */ 307 - sseu->subslice_mask = (1 << ss_max) - 1; 308 - sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> 309 - GEN9_F2_SS_DIS_SHIFT); 216 + subslice_mask = (1 << sseu->max_subslices) - 1; 217 + subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> 218 + GEN9_F2_SS_DIS_SHIFT); 310 219 311 220 /* 312 221 * Iterate through enabled slices and subslices to 313 222 * count the total enabled EU. 314 223 */ 315 - for (s = 0; s < s_max; s++) { 224 + for (s = 0; s < sseu->max_slices; s++) { 316 225 if (!(sseu->slice_mask & BIT(s))) 317 226 /* skip disabled slice */ 318 227 continue; 319 228 320 - eu_disable = I915_READ(GEN9_EU_DISABLE(s)); 321 - for (ss = 0; ss < ss_max; ss++) { 322 - int eu_per_ss; 229 + sseu->subslice_mask[s] = subslice_mask; 323 230 324 - if (!(sseu->subslice_mask & BIT(ss))) 231 + eu_disable = I915_READ(GEN9_EU_DISABLE(s)); 232 + for (ss = 0; ss < sseu->max_subslices; ss++) { 233 + int eu_per_ss; 234 + u8 eu_disabled_mask; 235 + 236 + if (!(sseu->subslice_mask[s] & BIT(ss))) 325 237 /* skip disabled subslice */ 326 238 continue; 327 239 328 - eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & 329 - eu_mask); 240 + eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask; 241 + 242 + sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); 243 + 244 + eu_per_ss = sseu->max_eus_per_subslice - 245 + hweight8(eu_disabled_mask); 330 246 331 247 /* 332 248 * Record which subslice(s) has(have) 7 EUs. we ··· 346 240 */ 347 241 if (eu_per_ss == 7) 348 242 sseu->subslice_7eu[s] |= BIT(ss); 349 - 350 - sseu->eu_total += eu_per_ss; 351 243 } 352 244 } 245 + 246 + sseu->eu_total = compute_eu_total(sseu); 353 247 354 248 /* 355 249 * SKL is expected to always have a uniform distribution ··· 376 270 sseu->has_eu_pg = sseu->eu_per_subslice > 2; 377 271 378 272 if (IS_GEN9_LP(dev_priv)) { 379 - #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss))) 380 - info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3; 273 + #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss))) 274 + info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3; 381 275 382 276 sseu->min_eu_in_pool = 0; 383 277 if (info->has_pooled_eu) { ··· 395 289 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) 396 290 { 397 291 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 398 - const int s_max = 3, ss_max = 3, eu_max = 8; 399 292 int s, ss; 400 - u32 fuse2, eu_disable[3]; /* s_max */ 293 + u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ 401 294 402 295 fuse2 = I915_READ(GEN8_FUSE2); 403 296 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 297 + sseu->max_slices = 3; 298 + sseu->max_subslices = 3; 299 + sseu->max_eus_per_subslice = 8; 300 + 404 301 /* 405 302 * The subslice disable field is global, i.e. it applies 406 303 * to each of the enabled slices. 407 304 */ 408 - sseu->subslice_mask = GENMASK(ss_max - 1, 0); 409 - sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> 410 - GEN8_F2_SS_DIS_SHIFT); 305 + subslice_mask = GENMASK(sseu->max_subslices - 1, 0); 306 + subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> 307 + GEN8_F2_SS_DIS_SHIFT); 411 308 412 309 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; 413 310 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | ··· 424 315 * Iterate through enabled slices and subslices to 425 316 * count the total enabled EU. 426 317 */ 427 - for (s = 0; s < s_max; s++) { 318 + for (s = 0; s < sseu->max_slices; s++) { 428 319 if (!(sseu->slice_mask & BIT(s))) 429 320 /* skip disabled slice */ 430 321 continue; 431 322 432 - for (ss = 0; ss < ss_max; ss++) { 323 + sseu->subslice_mask[s] = subslice_mask; 324 + 325 + for (ss = 0; ss < sseu->max_subslices; ss++) { 326 + u8 eu_disabled_mask; 433 327 u32 n_disabled; 434 328 435 - if (!(sseu->subslice_mask & BIT(ss))) 329 + if (!(sseu->subslice_mask[ss] & BIT(ss))) 436 330 /* skip disabled subslice */ 437 331 continue; 438 332 439 - n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); 333 + eu_disabled_mask = 334 + eu_disable[s] >> (ss * sseu->max_eus_per_subslice); 335 + 336 + sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); 337 + 338 + n_disabled = hweight8(eu_disabled_mask); 440 339 441 340 /* 442 341 * Record which subslices have 7 EUs. 443 342 */ 444 - if (eu_max - n_disabled == 7) 343 + if (sseu->max_eus_per_subslice - n_disabled == 7) 445 344 sseu->subslice_7eu[s] |= 1 << ss; 446 - 447 - sseu->eu_total += eu_max - n_disabled; 448 345 } 449 346 } 347 + 348 + sseu->eu_total = compute_eu_total(sseu); 450 349 451 350 /* 452 351 * BDW is expected to always have a uniform distribution of EU across ··· 470 353 * one slice. 471 354 */ 472 355 sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; 356 + sseu->has_subslice_pg = 0; 357 + sseu->has_eu_pg = 0; 358 + } 359 + 360 + static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) 361 + { 362 + struct intel_device_info *info = mkwrite_device_info(dev_priv); 363 + struct sseu_dev_info *sseu = &info->sseu; 364 + u32 fuse1; 365 + int s, ss; 366 + 367 + /* 368 + * There isn't a register to tell us how many slices/subslices. We 369 + * work off the PCI-ids here. 370 + */ 371 + switch (info->gt) { 372 + default: 373 + MISSING_CASE(info->gt); 374 + /* fall through */ 375 + case 1: 376 + sseu->slice_mask = BIT(0); 377 + sseu->subslice_mask[0] = BIT(0); 378 + break; 379 + case 2: 380 + sseu->slice_mask = BIT(0); 381 + sseu->subslice_mask[0] = BIT(0) | BIT(1); 382 + break; 383 + case 3: 384 + sseu->slice_mask = BIT(0) | BIT(1); 385 + sseu->subslice_mask[0] = BIT(0) | BIT(1); 386 + sseu->subslice_mask[1] = BIT(0) | BIT(1); 387 + break; 388 + } 389 + 390 + sseu->max_slices = hweight8(sseu->slice_mask); 391 + sseu->max_subslices = hweight8(sseu->subslice_mask[0]); 392 + 393 + fuse1 = I915_READ(HSW_PAVP_FUSE1); 394 + switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { 395 + default: 396 + MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> 397 + HSW_F1_EU_DIS_SHIFT); 398 + /* fall through */ 399 + case HSW_F1_EU_DIS_10EUS: 400 + sseu->eu_per_subslice = 10; 401 + break; 402 + case HSW_F1_EU_DIS_8EUS: 403 + sseu->eu_per_subslice = 8; 404 + break; 405 + case HSW_F1_EU_DIS_6EUS: 406 + sseu->eu_per_subslice = 6; 407 + break; 408 + } 409 + sseu->max_eus_per_subslice = sseu->eu_per_subslice; 410 + 411 + for (s = 0; s < sseu->max_slices; s++) { 412 + for (ss = 0; ss < sseu->max_subslices; ss++) { 413 + sseu_set_eus(sseu, s, ss, 414 + (1UL << sseu->eu_per_subslice) - 1); 415 + } 416 + } 417 + 418 + sseu->eu_total = compute_eu_total(sseu); 419 + 420 + /* No powergating for you. */ 421 + sseu->has_slice_pg = 0; 473 422 sseu->has_subslice_pg = 0; 474 423 sseu->has_eu_pg = 0; 475 424 } ··· 672 489 info->num_scalers[PIPE_C] = 1; 673 490 } 674 491 492 + BUILD_BUG_ON(I915_NUM_ENGINES > 493 + sizeof(intel_ring_mask_t) * BITS_PER_BYTE); 494 + 675 495 /* 676 496 * Skylake and Broxton currently don't expose the topmost plane as its 677 497 * use is exclusive with the legacy cursor and we only want to expose ··· 760 574 } 761 575 762 576 /* Initialize slice/subslice/EU info */ 763 - if (IS_CHERRYVIEW(dev_priv)) 577 + if (IS_HASWELL(dev_priv)) 578 + haswell_sseu_info_init(dev_priv); 579 + else if (IS_CHERRYVIEW(dev_priv)) 764 580 cherryview_sseu_info_init(dev_priv); 765 581 else if (IS_BROADWELL(dev_priv)) 766 582 broadwell_sseu_info_init(dev_priv);
+67 -6
drivers/gpu/drm/i915/intel_device_info.h
··· 96 96 func(has_l3_dpf); \ 97 97 func(has_llc); \ 98 98 func(has_logical_ring_contexts); \ 99 + func(has_logical_ring_elsq); \ 99 100 func(has_logical_ring_preemption); \ 100 101 func(has_overlay); \ 101 102 func(has_pooled_eu); \ ··· 113 112 func(supports_tv); \ 114 113 func(has_ipc); 115 114 115 + #define GEN_MAX_SLICES (6) /* CNL upper bound */ 116 + #define GEN_MAX_SUBSLICES (7) 117 + 116 118 struct sseu_dev_info { 117 119 u8 slice_mask; 118 - u8 subslice_mask; 119 - u8 eu_total; 120 + u8 subslice_mask[GEN_MAX_SUBSLICES]; 121 + u16 eu_total; 120 122 u8 eu_per_subslice; 121 123 u8 min_eu_in_pool; 122 124 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ ··· 127 123 u8 has_slice_pg:1; 128 124 u8 has_subslice_pg:1; 129 125 u8 has_eu_pg:1; 126 + 127 + /* Topology fields */ 128 + u8 max_slices; 129 + u8 max_subslices; 130 + u8 max_eus_per_subslice; 131 + 132 + /* We don't have more than 8 eus per subslice at the moment and as we 133 + * store eus enabled using bits, no need to multiply by eus per 134 + * subslice. 135 + */ 136 + u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES]; 130 137 }; 138 + 139 + typedef u8 intel_ring_mask_t; 131 140 132 141 struct intel_device_info { 133 142 u16 device_id; ··· 149 132 u8 gen; 150 133 u8 gt; /* GT number, 0 if undefined */ 151 134 u8 num_rings; 152 - u8 ring_mask; /* Rings supported by the HW */ 135 + intel_ring_mask_t ring_mask; /* Rings supported by the HW */ 153 136 154 137 enum intel_platform platform; 155 138 u32 platform_mask; 139 + 140 + unsigned int page_sizes; /* page sizes supported by the HW */ 156 141 157 142 u32 display_mmio_offset; 158 143 159 144 u8 num_pipes; 160 145 u8 num_sprites[I915_MAX_PIPES]; 161 146 u8 num_scalers[I915_MAX_PIPES]; 162 - 163 - unsigned int page_sizes; /* page sizes supported by the HW */ 164 147 165 148 #define DEFINE_FLAG(name) u8 name:1 166 149 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); ··· 190 173 191 174 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 192 175 { 193 - return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); 176 + unsigned int i, total = 0; 177 + 178 + for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++) 179 + total += hweight8(sseu->subslice_mask[i]); 180 + 181 + return total; 182 + } 183 + 184 + static inline int sseu_eu_idx(const struct sseu_dev_info *sseu, 185 + int slice, int subslice) 186 + { 187 + int subslice_stride = DIV_ROUND_UP(sseu->max_eus_per_subslice, 188 + BITS_PER_BYTE); 189 + int slice_stride = sseu->max_subslices * subslice_stride; 190 + 191 + return slice * slice_stride + subslice * subslice_stride; 192 + } 193 + 194 + static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu, 195 + int slice, int subslice) 196 + { 197 + int i, offset = sseu_eu_idx(sseu, slice, subslice); 198 + u16 eu_mask = 0; 199 + 200 + for (i = 0; 201 + i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) { 202 + eu_mask |= ((u16) sseu->eu_mask[offset + i]) << 203 + (i * BITS_PER_BYTE); 204 + } 205 + 206 + return eu_mask; 207 + } 208 + 209 + static inline void sseu_set_eus(struct sseu_dev_info *sseu, 210 + int slice, int subslice, u16 eu_mask) 211 + { 212 + int i, offset = sseu_eu_idx(sseu, slice, subslice); 213 + 214 + for (i = 0; 215 + i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) { 216 + sseu->eu_mask[offset + i] = 217 + (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; 218 + } 194 219 } 195 220 196 221 const char *intel_platform_name(enum intel_platform platform); ··· 244 185 struct drm_printer *p); 245 186 void intel_device_info_dump_runtime(const struct intel_device_info *info, 246 187 struct drm_printer *p); 188 + void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, 189 + struct drm_printer *p); 247 190 248 191 void intel_driver_caps_print(const struct intel_driver_caps *caps, 249 192 struct drm_printer *p);
+119 -70
drivers/gpu/drm/i915/intel_display.c
··· 2067 2067 } 2068 2068 } 2069 2069 2070 + static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2071 + { 2072 + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2073 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2074 + 2075 + return INTEL_GEN(dev_priv) < 4 || plane->has_fbc; 2076 + } 2077 + 2070 2078 struct i915_vma * 2071 2079 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2072 2080 unsigned int rotation, 2081 + bool uses_fence, 2073 2082 unsigned long *out_flags) 2074 2083 { 2075 2084 struct drm_device *dev = fb->dev; ··· 2131 2122 if (IS_ERR(vma)) 2132 2123 goto err; 2133 2124 2134 - if (i915_vma_is_map_and_fenceable(vma)) { 2125 + if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2126 + int ret; 2127 + 2135 2128 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2136 2129 * fence, whereas 965+ only requires a fence if using 2137 2130 * framebuffer compression. For simplicity, we always, when ··· 2150 2139 * something and try to run the system in a "less than optimal" 2151 2140 * mode that matches the user configuration. 2152 2141 */ 2153 - if (i915_vma_pin_fence(vma) == 0 && vma->fence) 2142 + ret = i915_vma_pin_fence(vma); 2143 + if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2144 + i915_gem_object_unpin_from_display_plane(vma); 2145 + vma = ERR_PTR(ret); 2146 + goto err; 2147 + } 2148 + 2149 + if (ret == 0 && vma->fence) 2154 2150 *out_flags |= PLANE_HAS_FENCE; 2155 2151 } 2156 2152 ··· 2846 2828 intel_state->vma = 2847 2829 intel_pin_and_fence_fb_obj(fb, 2848 2830 primary->state->rotation, 2831 + intel_plane_uses_fence(intel_state), 2849 2832 &intel_state->flags); 2850 2833 mutex_unlock(&dev->struct_mutex); 2851 2834 if (IS_ERR(intel_state->vma)) { ··· 12053 12034 int ret, i; 12054 12035 bool any_ms = false; 12055 12036 12037 + /* Catch I915_MODE_FLAG_INHERITED */ 12038 + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 12039 + crtc_state, i) { 12040 + if (crtc_state->mode.private_flags != 12041 + old_crtc_state->mode.private_flags) 12042 + crtc_state->mode_changed = true; 12043 + } 12044 + 12056 12045 ret = drm_atomic_helper_check_modeset(dev, state); 12057 12046 if (ret) 12058 12047 return ret; ··· 12069 12042 struct intel_crtc_state *pipe_config = 12070 12043 to_intel_crtc_state(crtc_state); 12071 12044 12072 - /* Catch I915_MODE_FLAG_INHERITED */ 12073 - if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags) 12074 - crtc_state->mode_changed = true; 12075 - 12076 12045 if (!needs_modeset(crtc_state)) 12077 12046 continue; 12078 12047 ··· 12076 12053 any_ms = true; 12077 12054 continue; 12078 12055 } 12079 - 12080 - /* FIXME: For only active_changed we shouldn't need to do any 12081 - * state recomputation at all. */ 12082 - 12083 - ret = drm_atomic_add_affected_connectors(state, crtc); 12084 - if (ret) 12085 - return ret; 12086 12056 12087 12057 ret = intel_modeset_pipe_config(crtc, pipe_config); 12088 12058 if (ret) { ··· 12094 12078 12095 12079 if (needs_modeset(crtc_state)) 12096 12080 any_ms = true; 12097 - 12098 - ret = drm_atomic_add_affected_planes(state, crtc); 12099 - if (ret) 12100 - return ret; 12101 12081 12102 12082 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 12103 12083 needs_modeset(crtc_state) ? ··· 12612 12600 struct wait_queue_entry wait; 12613 12601 12614 12602 struct drm_crtc *crtc; 12615 - struct drm_i915_gem_request *request; 12603 + struct i915_request *request; 12616 12604 }; 12617 12605 12618 12606 static int do_rps_boost(struct wait_queue_entry *_wait, 12619 12607 unsigned mode, int sync, void *key) 12620 12608 { 12621 12609 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 12622 - struct drm_i915_gem_request *rq = wait->request; 12610 + struct i915_request *rq = wait->request; 12623 12611 12624 12612 /* 12625 12613 * If we missed the vblank, but the request is already running it 12626 12614 * is reasonable to assume that it will complete before the next 12627 12615 * vblank without our intervention, so leave RPS alone. 12628 12616 */ 12629 - if (!i915_gem_request_started(rq)) 12617 + if (!i915_request_started(rq)) 12630 12618 gen6_rps_boost(rq, NULL); 12631 - i915_gem_request_put(rq); 12619 + i915_request_put(rq); 12632 12620 12633 12621 drm_crtc_vblank_put(wait->crtc); 12634 12622 ··· 12664 12652 wait->wait.flags = 0; 12665 12653 12666 12654 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 12655 + } 12656 + 12657 + static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 12658 + { 12659 + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 12660 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 12661 + struct drm_framebuffer *fb = plane_state->base.fb; 12662 + struct i915_vma *vma; 12663 + 12664 + if (plane->id == PLANE_CURSOR && 12665 + INTEL_INFO(dev_priv)->cursor_needs_physical) { 12666 + struct drm_i915_gem_object *obj = intel_fb_obj(fb); 12667 + const int align = intel_cursor_alignment(dev_priv); 12668 + 12669 + return i915_gem_object_attach_phys(obj, align); 12670 + } 12671 + 12672 + vma = intel_pin_and_fence_fb_obj(fb, 12673 + plane_state->base.rotation, 12674 + intel_plane_uses_fence(plane_state), 12675 + &plane_state->flags); 12676 + if (IS_ERR(vma)) 12677 + return PTR_ERR(vma); 12678 + 12679 + plane_state->vma = vma; 12680 + 12681 + return 0; 12682 + } 12683 + 12684 + static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 12685 + { 12686 + struct i915_vma *vma; 12687 + 12688 + vma = fetch_and_zero(&old_plane_state->vma); 12689 + if (vma) 12690 + intel_unpin_fb_vma(vma, old_plane_state->flags); 12667 12691 } 12668 12692 12669 12693 /** ··· 12776 12728 return ret; 12777 12729 } 12778 12730 12779 - if (plane->type == DRM_PLANE_TYPE_CURSOR && 12780 - INTEL_INFO(dev_priv)->cursor_needs_physical) { 12781 - const int align = intel_cursor_alignment(dev_priv); 12782 - 12783 - ret = i915_gem_object_attach_phys(obj, align); 12784 - } else { 12785 - struct i915_vma *vma; 12786 - 12787 - vma = intel_pin_and_fence_fb_obj(fb, 12788 - new_state->rotation, 12789 - &to_intel_plane_state(new_state)->flags); 12790 - if (!IS_ERR(vma)) 12791 - to_intel_plane_state(new_state)->vma = vma; 12792 - else 12793 - ret = PTR_ERR(vma); 12794 - } 12731 + ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); 12795 12732 12796 12733 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 12797 12734 ··· 12820 12787 intel_cleanup_plane_fb(struct drm_plane *plane, 12821 12788 struct drm_plane_state *old_state) 12822 12789 { 12823 - struct i915_vma *vma; 12790 + struct drm_i915_private *dev_priv = to_i915(plane->dev); 12824 12791 12825 12792 /* Should only be called after a successful intel_prepare_plane_fb()! */ 12826 - vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); 12827 - if (vma) { 12828 - mutex_lock(&plane->dev->struct_mutex); 12829 - intel_unpin_fb_vma(vma, to_intel_plane_state(old_state)->flags); 12830 - mutex_unlock(&plane->dev->struct_mutex); 12831 - } 12793 + mutex_lock(&dev_priv->drm.struct_mutex); 12794 + intel_plane_unpin_fb(to_intel_plane_state(old_state)); 12795 + mutex_unlock(&dev_priv->drm.struct_mutex); 12832 12796 } 12833 12797 12834 12798 int ··· 13110 13080 struct intel_plane *intel_plane = to_intel_plane(plane); 13111 13081 struct drm_framebuffer *old_fb; 13112 13082 struct drm_crtc_state *crtc_state = crtc->state; 13113 - struct i915_vma *old_vma, *vma; 13114 13083 13115 13084 /* 13116 13085 * When crtc is inactive or there is a modeset pending, ··· 13168 13139 if (ret) 13169 13140 goto out_free; 13170 13141 13171 - if (INTEL_INFO(dev_priv)->cursor_needs_physical) { 13172 - int align = intel_cursor_alignment(dev_priv); 13173 - 13174 - ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align); 13175 - if (ret) { 13176 - DRM_DEBUG_KMS("failed to attach phys object\n"); 13177 - goto out_unlock; 13178 - } 13179 - } else { 13180 - vma = intel_pin_and_fence_fb_obj(fb, 13181 - new_plane_state->rotation, 13182 - &to_intel_plane_state(new_plane_state)->flags); 13183 - if (IS_ERR(vma)) { 13184 - DRM_DEBUG_KMS("failed to pin object\n"); 13185 - 13186 - ret = PTR_ERR(vma); 13187 - goto out_unlock; 13188 - } 13189 - 13190 - to_intel_plane_state(new_plane_state)->vma = vma; 13191 - } 13142 + ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state)); 13143 + if (ret) 13144 + goto out_unlock; 13192 13145 13193 13146 old_fb = old_plane_state->fb; 13194 13147 ··· 13190 13179 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); 13191 13180 } 13192 13181 13193 - old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma); 13194 - if (old_vma) 13195 - intel_unpin_fb_vma(old_vma, 13196 - to_intel_plane_state(old_plane_state)->flags); 13182 + intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); 13197 13183 13198 13184 out_unlock: 13199 13185 mutex_unlock(&dev_priv->drm.struct_mutex); ··· 13217 13209 .atomic_destroy_state = intel_plane_destroy_state, 13218 13210 .format_mod_supported = intel_cursor_plane_format_mod_supported, 13219 13211 }; 13212 + 13213 + static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 13214 + enum i9xx_plane_id i9xx_plane) 13215 + { 13216 + if (!HAS_FBC(dev_priv)) 13217 + return false; 13218 + 13219 + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 13220 + return i9xx_plane == PLANE_A; /* tied to pipe A */ 13221 + else if (IS_IVYBRIDGE(dev_priv)) 13222 + return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 13223 + i9xx_plane == PLANE_C; 13224 + else if (INTEL_GEN(dev_priv) >= 4) 13225 + return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 13226 + else 13227 + return i9xx_plane == PLANE_A; 13228 + } 13229 + 13230 + static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, 13231 + enum pipe pipe, enum plane_id plane_id) 13232 + { 13233 + if (!HAS_FBC(dev_priv)) 13234 + return false; 13235 + 13236 + return pipe == PIPE_A && plane_id == PLANE_PRIMARY; 13237 + } 13220 13238 13221 13239 static struct intel_plane * 13222 13240 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) ··· 13286 13252 primary->i9xx_plane = (enum i9xx_plane_id) pipe; 13287 13253 primary->id = PLANE_PRIMARY; 13288 13254 primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id); 13255 + 13256 + if (INTEL_GEN(dev_priv) >= 9) 13257 + primary->has_fbc = skl_plane_has_fbc(dev_priv, 13258 + primary->pipe, 13259 + primary->id); 13260 + else 13261 + primary->has_fbc = i9xx_plane_has_fbc(dev_priv, 13262 + primary->i9xx_plane); 13263 + 13264 + if (primary->has_fbc) { 13265 + struct intel_fbc *fbc = &dev_priv->fbc; 13266 + 13267 + fbc->possible_framebuffer_bits |= primary->frontbuffer_bit; 13268 + } 13269 + 13289 13270 primary->check_plane = intel_check_primary_plane; 13290 13271 13291 13272 if (INTEL_GEN(dev_priv) >= 9) {
+12
drivers/gpu/drm/i915/intel_display.h
··· 139 139 140 140 #define I915_NUM_PHYS_VLV 2 141 141 142 + enum aux_ch { 143 + AUX_CH_A, 144 + AUX_CH_B, 145 + AUX_CH_C, 146 + AUX_CH_D, 147 + _AUX_CH_E, /* does not exist */ 148 + AUX_CH_F, 149 + }; 150 + 151 + #define aux_ch_name(a) ((a) + 'A') 152 + 142 153 enum intel_display_power_domain { 143 154 POWER_DOMAIN_PIPE_A, 144 155 POWER_DOMAIN_PIPE_B, ··· 186 175 POWER_DOMAIN_AUX_C, 187 176 POWER_DOMAIN_AUX_D, 188 177 POWER_DOMAIN_AUX_F, 178 + POWER_DOMAIN_AUX_IO_A, 189 179 POWER_DOMAIN_GMBUS, 190 180 POWER_DOMAIN_MODESET, 191 181 POWER_DOMAIN_GT_IRQ,
+371 -426
drivers/gpu/drm/i915/intel_dp.c
··· 96 96 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } 97 97 }; 98 98 99 - static const int bxt_rates[] = { 162000, 216000, 243000, 270000, 100 - 324000, 432000, 540000 }; 101 - static const int skl_rates[] = { 162000, 216000, 270000, 102 - 324000, 432000, 540000 }; 103 - static const int cnl_rates[] = { 162000, 216000, 270000, 104 - 324000, 432000, 540000, 105 - 648000, 810000 }; 106 - static const int default_rates[] = { 162000, 270000, 540000 }; 107 - 108 99 /** 109 100 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 110 101 * @intel_dp: DP struct ··· 135 144 /* update sink rates from dpcd */ 136 145 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 137 146 { 147 + static const int dp_rates[] = { 148 + 162000, 270000, 540000, 810000 149 + }; 138 150 int i, max_rate; 139 151 140 152 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 141 153 142 - for (i = 0; i < ARRAY_SIZE(default_rates); i++) { 143 - if (default_rates[i] > max_rate) 154 + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 155 + if (dp_rates[i] > max_rate) 144 156 break; 145 - intel_dp->sink_rates[i] = default_rates[i]; 157 + intel_dp->sink_rates[i] = dp_rates[i]; 146 158 } 147 159 148 160 intel_dp->num_sink_rates = i; ··· 262 268 static void 263 269 intel_dp_set_source_rates(struct intel_dp *intel_dp) 264 270 { 271 + /* The values must be in increasing order */ 272 + static const int cnl_rates[] = { 273 + 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 274 + }; 275 + static const int bxt_rates[] = { 276 + 162000, 216000, 243000, 270000, 324000, 432000, 540000 277 + }; 278 + static const int skl_rates[] = { 279 + 162000, 216000, 270000, 324000, 432000, 540000 280 + }; 281 + static const int hsw_rates[] = { 282 + 162000, 270000, 540000 283 + }; 284 + static const int g4x_rates[] = { 285 + 162000, 270000 286 + }; 265 287 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 266 288 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 267 289 const struct ddi_vbt_port_info *info = ··· 288 278 /* This should only be done once */ 289 279 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates); 290 280 291 - if (IS_GEN9_LP(dev_priv)) { 292 - source_rates = bxt_rates; 293 - size = ARRAY_SIZE(bxt_rates); 294 - } else if (IS_CANNONLAKE(dev_priv)) { 281 + if (IS_CANNONLAKE(dev_priv)) { 295 282 source_rates = cnl_rates; 296 283 size = ARRAY_SIZE(cnl_rates); 297 284 max_rate = cnl_max_source_rate(intel_dp); 285 + } else if (IS_GEN9_LP(dev_priv)) { 286 + source_rates = bxt_rates; 287 + size = ARRAY_SIZE(bxt_rates); 298 288 } else if (IS_GEN9_BC(dev_priv)) { 299 289 source_rates = skl_rates; 300 290 size = ARRAY_SIZE(skl_rates); 301 291 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 302 292 IS_BROADWELL(dev_priv)) { 303 - source_rates = default_rates; 304 - size = ARRAY_SIZE(default_rates); 293 + source_rates = hsw_rates; 294 + size = ARRAY_SIZE(hsw_rates); 305 295 } else { 306 - source_rates = default_rates; 307 - size = ARRAY_SIZE(default_rates) - 1; 296 + source_rates = g4x_rates; 297 + size = ARRAY_SIZE(g4x_rates); 308 298 } 309 299 310 300 if (max_rate && vbt_max_rate) ··· 366 356 367 357 /* Paranoia, there should always be something in common. */ 368 358 if (WARN_ON(intel_dp->num_common_rates == 0)) { 369 - intel_dp->common_rates[0] = default_rates[0]; 359 + intel_dp->common_rates[0] = 162000; 370 360 intel_dp->num_common_rates = 1; 371 361 } 372 362 } ··· 666 656 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 667 657 { 668 658 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 659 + int backlight_controller = dev_priv->vbt.backlight.controller; 669 660 670 661 lockdep_assert_held(&dev_priv->pps_mutex); 671 662 672 663 /* We should never land here with regular DP ports */ 673 664 WARN_ON(!intel_dp_is_edp(intel_dp)); 674 665 675 - /* 676 - * TODO: BXT has 2 PPS instances. The correct port->PPS instance 677 - * mapping needs to be retrieved from VBT, for now just hard-code to 678 - * use instance #0 always. 679 - */ 680 666 if (!intel_dp->pps_reset) 681 - return 0; 667 + return backlight_controller; 682 668 683 669 intel_dp->pps_reset = false; 684 670 ··· 684 678 */ 685 679 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 686 680 687 - return 0; 681 + return backlight_controller; 688 682 } 689 683 690 684 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, ··· 942 936 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 943 937 { 944 938 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 945 - i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 939 + i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 946 940 uint32_t status; 947 941 bool done; 948 942 ··· 962 956 963 957 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 964 958 { 965 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 966 - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 959 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 967 960 968 961 if (index) 969 962 return 0; ··· 976 971 977 972 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 978 973 { 979 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 980 - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 974 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 981 975 982 976 if (index) 983 977 return 0; ··· 986 982 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 987 983 * divide by 2000 and use that 988 984 */ 989 - if (intel_dig_port->base.port == PORT_A) 985 + if (intel_dp->aux_ch == AUX_CH_A) 990 986 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); 991 987 else 992 988 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); ··· 994 990 995 991 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 996 992 { 997 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 998 - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 993 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 999 994 1000 - if (intel_dig_port->base.port != PORT_A && HAS_PCH_LPT_H(dev_priv)) { 995 + if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1001 996 /* Workaround for non-ULT HSW */ 1002 997 switch (index) { 1003 998 case 0: return 63; ··· 1065 1062 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1066 1063 } 1067 1064 1068 - static uint32_t intel_dp_get_aux_send_ctl(struct intel_dp *intel_dp, 1069 - bool has_aux_irq, 1070 - int send_bytes, 1071 - uint32_t aux_clock_divider, 1072 - bool aksv_write) 1073 - { 1074 - uint32_t val = 0; 1075 - 1076 - if (aksv_write) { 1077 - send_bytes += 5; 1078 - val |= DP_AUX_CH_CTL_AUX_AKSV_SELECT; 1079 - } 1080 - 1081 - return val | intel_dp->get_aux_send_ctl(intel_dp, 1082 - has_aux_irq, 1083 - send_bytes, 1084 - aux_clock_divider); 1085 - } 1086 - 1087 1065 static int 1088 - intel_dp_aux_ch(struct intel_dp *intel_dp, 1089 - const uint8_t *send, int send_bytes, 1090 - uint8_t *recv, int recv_size, bool aksv_write) 1066 + intel_dp_aux_xfer(struct intel_dp *intel_dp, 1067 + const uint8_t *send, int send_bytes, 1068 + uint8_t *recv, int recv_size, 1069 + u32 aux_send_ctl_flags) 1091 1070 { 1092 1071 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1093 1072 struct drm_i915_private *dev_priv = 1094 1073 to_i915(intel_dig_port->base.base.dev); 1095 - i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 1074 + i915_reg_t ch_ctl, ch_data[5]; 1096 1075 uint32_t aux_clock_divider; 1097 1076 int i, ret, recv_bytes; 1098 1077 uint32_t status; 1099 1078 int try, clock = 0; 1100 1079 bool has_aux_irq = HAS_AUX_IRQ(dev_priv); 1101 1080 bool vdd; 1081 + 1082 + ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1083 + for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1084 + ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1102 1085 1103 1086 pps_lock(intel_dp); 1104 1087 ··· 1133 1144 } 1134 1145 1135 1146 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1136 - u32 send_ctl = intel_dp_get_aux_send_ctl(intel_dp, 1137 - has_aux_irq, 1138 - send_bytes, 1139 - aux_clock_divider, 1140 - aksv_write); 1147 + u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1148 + has_aux_irq, 1149 + send_bytes, 1150 + aux_clock_divider); 1151 + 1152 + send_ctl |= aux_send_ctl_flags; 1141 1153 1142 1154 /* Must try at least 3 times according to DP spec */ 1143 1155 for (try = 0; try < 5; try++) { 1144 1156 /* Load the send data into the aux channel data registers */ 1145 1157 for (i = 0; i < send_bytes; i += 4) 1146 - I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2], 1158 + I915_WRITE(ch_data[i >> 2], 1147 1159 intel_dp_pack_aux(send + i, 1148 1160 send_bytes - i)); 1149 1161 ··· 1160 1170 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1161 1171 DP_AUX_CH_CTL_RECEIVE_ERROR); 1162 1172 1163 - if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1164 - continue; 1165 - 1166 1173 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1167 1174 * 400us delay required for errors and timeouts 1168 1175 * Timeout errors from the HW already meet this 1169 1176 * requirement so skip to next iteration 1170 1177 */ 1178 + if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1179 + continue; 1180 + 1171 1181 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1172 1182 usleep_range(400, 500); 1173 1183 continue; ··· 1213 1223 if (recv_bytes == 0 || recv_bytes > 20) { 1214 1224 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n", 1215 1225 recv_bytes); 1216 - /* 1217 - * FIXME: This patch was created on top of a series that 1218 - * organize the retries at drm level. There EBUSY should 1219 - * also take care for 1ms wait before retrying. 1220 - * That aux retries re-org is still needed and after that is 1221 - * merged we remove this sleep from here. 1222 - */ 1223 - usleep_range(1000, 1500); 1224 1226 ret = -EBUSY; 1225 1227 goto out; 1226 1228 } ··· 1221 1239 recv_bytes = recv_size; 1222 1240 1223 1241 for (i = 0; i < recv_bytes; i += 4) 1224 - intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]), 1242 + intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]), 1225 1243 recv + i, recv_bytes - i); 1226 1244 1227 1245 ret = recv_bytes; ··· 1238 1256 1239 1257 #define BARE_ADDRESS_SIZE 3 1240 1258 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1259 + 1260 + static void 1261 + intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1262 + const struct drm_dp_aux_msg *msg) 1263 + { 1264 + txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1265 + txbuf[1] = (msg->address >> 8) & 0xff; 1266 + txbuf[2] = msg->address & 0xff; 1267 + txbuf[3] = msg->size - 1; 1268 + } 1269 + 1241 1270 static ssize_t 1242 1271 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1243 1272 { ··· 1257 1264 size_t txsize, rxsize; 1258 1265 int ret; 1259 1266 1260 - txbuf[0] = (msg->request << 4) | 1261 - ((msg->address >> 16) & 0xf); 1262 - txbuf[1] = (msg->address >> 8) & 0xff; 1263 - txbuf[2] = msg->address & 0xff; 1264 - txbuf[3] = msg->size - 1; 1267 + intel_dp_aux_header(txbuf, msg); 1265 1268 1266 1269 switch (msg->request & ~DP_AUX_I2C_MOT) { 1267 1270 case DP_AUX_NATIVE_WRITE: ··· 1274 1285 if (msg->buffer) 1275 1286 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1276 1287 1277 - ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize, 1278 - false); 1288 + ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1289 + rxbuf, rxsize, 0); 1279 1290 if (ret > 0) { 1280 1291 msg->reply = rxbuf[0] >> 4; 1281 1292 ··· 1297 1308 if (WARN_ON(rxsize > 20)) 1298 1309 return -E2BIG; 1299 1310 1300 - ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize, 1301 - false); 1311 + ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1312 + rxbuf, rxsize, 0); 1302 1313 if (ret > 0) { 1303 1314 msg->reply = rxbuf[0] >> 4; 1304 1315 /* ··· 1320 1331 return ret; 1321 1332 } 1322 1333 1323 - static enum port intel_aux_port(struct drm_i915_private *dev_priv, 1324 - enum port port) 1334 + static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp) 1325 1335 { 1336 + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1337 + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1338 + enum port port = encoder->port; 1326 1339 const struct ddi_vbt_port_info *info = 1327 1340 &dev_priv->vbt.ddi_port_info[port]; 1328 - enum port aux_port; 1341 + enum aux_ch aux_ch; 1329 1342 1330 1343 if (!info->alternate_aux_channel) { 1344 + aux_ch = (enum aux_ch) port; 1345 + 1331 1346 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", 1332 - port_name(port), port_name(port)); 1333 - return port; 1347 + aux_ch_name(aux_ch), port_name(port)); 1348 + return aux_ch; 1334 1349 } 1335 1350 1336 1351 switch (info->alternate_aux_channel) { 1337 1352 case DP_AUX_A: 1338 - aux_port = PORT_A; 1353 + aux_ch = AUX_CH_A; 1339 1354 break; 1340 1355 case DP_AUX_B: 1341 - aux_port = PORT_B; 1356 + aux_ch = AUX_CH_B; 1342 1357 break; 1343 1358 case DP_AUX_C: 1344 - aux_port = PORT_C; 1359 + aux_ch = AUX_CH_C; 1345 1360 break; 1346 1361 case DP_AUX_D: 1347 - aux_port = PORT_D; 1362 + aux_ch = AUX_CH_D; 1348 1363 break; 1349 1364 case DP_AUX_F: 1350 - aux_port = PORT_F; 1365 + aux_ch = AUX_CH_F; 1351 1366 break; 1352 1367 default: 1353 1368 MISSING_CASE(info->alternate_aux_channel); 1354 - aux_port = PORT_A; 1369 + aux_ch = AUX_CH_A; 1355 1370 break; 1356 1371 } 1357 1372 1358 1373 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", 1359 - port_name(aux_port), port_name(port)); 1374 + aux_ch_name(aux_ch), port_name(port)); 1360 1375 1361 - return aux_port; 1376 + return aux_ch; 1362 1377 } 1363 1378 1364 - static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, 1365 - enum port port) 1379 + static enum intel_display_power_domain 1380 + intel_aux_power_domain(struct intel_dp *intel_dp) 1366 1381 { 1367 - switch (port) { 1368 - case PORT_B: 1369 - case PORT_C: 1370 - case PORT_D: 1371 - return DP_AUX_CH_CTL(port); 1382 + switch (intel_dp->aux_ch) { 1383 + case AUX_CH_A: 1384 + return POWER_DOMAIN_AUX_A; 1385 + case AUX_CH_B: 1386 + return POWER_DOMAIN_AUX_B; 1387 + case AUX_CH_C: 1388 + return POWER_DOMAIN_AUX_C; 1389 + case AUX_CH_D: 1390 + return POWER_DOMAIN_AUX_D; 1391 + case AUX_CH_F: 1392 + return POWER_DOMAIN_AUX_F; 1372 1393 default: 1373 - MISSING_CASE(port); 1374 - return DP_AUX_CH_CTL(PORT_B); 1394 + MISSING_CASE(intel_dp->aux_ch); 1395 + return POWER_DOMAIN_AUX_A; 1375 1396 } 1376 1397 } 1377 1398 1378 - static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv, 1379 - enum port port, int index) 1380 - { 1381 - switch (port) { 1382 - case PORT_B: 1383 - case PORT_C: 1384 - case PORT_D: 1385 - return DP_AUX_CH_DATA(port, index); 1386 - default: 1387 - MISSING_CASE(port); 1388 - return DP_AUX_CH_DATA(PORT_B, index); 1389 - } 1390 - } 1391 - 1392 - static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv, 1393 - enum port port) 1394 - { 1395 - switch (port) { 1396 - case PORT_A: 1397 - return DP_AUX_CH_CTL(port); 1398 - case PORT_B: 1399 - case PORT_C: 1400 - case PORT_D: 1401 - return PCH_DP_AUX_CH_CTL(port); 1402 - default: 1403 - MISSING_CASE(port); 1404 - return DP_AUX_CH_CTL(PORT_A); 1405 - } 1406 - } 1407 - 1408 - static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv, 1409 - enum port port, int index) 1410 - { 1411 - switch (port) { 1412 - case PORT_A: 1413 - return DP_AUX_CH_DATA(port, index); 1414 - case PORT_B: 1415 - case PORT_C: 1416 - case PORT_D: 1417 - return PCH_DP_AUX_CH_DATA(port, index); 1418 - default: 1419 - MISSING_CASE(port); 1420 - return DP_AUX_CH_DATA(PORT_A, index); 1421 - } 1422 - } 1423 - 1424 - static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, 1425 - enum port port) 1426 - { 1427 - switch (port) { 1428 - case PORT_A: 1429 - case PORT_B: 1430 - case PORT_C: 1431 - case PORT_D: 1432 - case PORT_F: 1433 - return DP_AUX_CH_CTL(port); 1434 - default: 1435 - MISSING_CASE(port); 1436 - return DP_AUX_CH_CTL(PORT_A); 1437 - } 1438 - } 1439 - 1440 - static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, 1441 - enum port port, int index) 1442 - { 1443 - switch (port) { 1444 - case PORT_A: 1445 - case PORT_B: 1446 - case PORT_C: 1447 - case PORT_D: 1448 - case PORT_F: 1449 - return DP_AUX_CH_DATA(port, index); 1450 - default: 1451 - MISSING_CASE(port); 1452 - return DP_AUX_CH_DATA(PORT_A, index); 1453 - } 1454 - } 1455 - 1456 - static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv, 1457 - enum port port) 1458 - { 1459 - if (INTEL_GEN(dev_priv) >= 9) 1460 - return skl_aux_ctl_reg(dev_priv, port); 1461 - else if (HAS_PCH_SPLIT(dev_priv)) 1462 - return ilk_aux_ctl_reg(dev_priv, port); 1463 - else 1464 - return g4x_aux_ctl_reg(dev_priv, port); 1465 - } 1466 - 1467 - static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv, 1468 - enum port port, int index) 1469 - { 1470 - if (INTEL_GEN(dev_priv) >= 9) 1471 - return skl_aux_data_reg(dev_priv, port, index); 1472 - else if (HAS_PCH_SPLIT(dev_priv)) 1473 - return ilk_aux_data_reg(dev_priv, port, index); 1474 - else 1475 - return g4x_aux_data_reg(dev_priv, port, index); 1476 - } 1477 - 1478 - static void intel_aux_reg_init(struct intel_dp *intel_dp) 1399 + static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1479 1400 { 1480 1401 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1481 - enum port port = intel_aux_port(dev_priv, 1482 - dp_to_dig_port(intel_dp)->base.port); 1483 - int i; 1402 + enum aux_ch aux_ch = intel_dp->aux_ch; 1484 1403 1485 - intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); 1486 - for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++) 1487 - intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i); 1404 + switch (aux_ch) { 1405 + case AUX_CH_B: 1406 + case AUX_CH_C: 1407 + case AUX_CH_D: 1408 + return DP_AUX_CH_CTL(aux_ch); 1409 + default: 1410 + MISSING_CASE(aux_ch); 1411 + return DP_AUX_CH_CTL(AUX_CH_B); 1412 + } 1413 + } 1414 + 1415 + static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1416 + { 1417 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1418 + enum aux_ch aux_ch = intel_dp->aux_ch; 1419 + 1420 + switch (aux_ch) { 1421 + case AUX_CH_B: 1422 + case AUX_CH_C: 1423 + case AUX_CH_D: 1424 + return DP_AUX_CH_DATA(aux_ch, index); 1425 + default: 1426 + MISSING_CASE(aux_ch); 1427 + return DP_AUX_CH_DATA(AUX_CH_B, index); 1428 + } 1429 + } 1430 + 1431 + static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1432 + { 1433 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1434 + enum aux_ch aux_ch = intel_dp->aux_ch; 1435 + 1436 + switch (aux_ch) { 1437 + case AUX_CH_A: 1438 + return DP_AUX_CH_CTL(aux_ch); 1439 + case AUX_CH_B: 1440 + case AUX_CH_C: 1441 + case AUX_CH_D: 1442 + return PCH_DP_AUX_CH_CTL(aux_ch); 1443 + default: 1444 + MISSING_CASE(aux_ch); 1445 + return DP_AUX_CH_CTL(AUX_CH_A); 1446 + } 1447 + } 1448 + 1449 + static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1450 + { 1451 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1452 + enum aux_ch aux_ch = intel_dp->aux_ch; 1453 + 1454 + switch (aux_ch) { 1455 + case AUX_CH_A: 1456 + return DP_AUX_CH_DATA(aux_ch, index); 1457 + case AUX_CH_B: 1458 + case AUX_CH_C: 1459 + case AUX_CH_D: 1460 + return PCH_DP_AUX_CH_DATA(aux_ch, index); 1461 + default: 1462 + MISSING_CASE(aux_ch); 1463 + return DP_AUX_CH_DATA(AUX_CH_A, index); 1464 + } 1465 + } 1466 + 1467 + static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1468 + { 1469 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1470 + enum aux_ch aux_ch = intel_dp->aux_ch; 1471 + 1472 + switch (aux_ch) { 1473 + case AUX_CH_A: 1474 + case AUX_CH_B: 1475 + case AUX_CH_C: 1476 + case AUX_CH_D: 1477 + case AUX_CH_F: 1478 + return DP_AUX_CH_CTL(aux_ch); 1479 + default: 1480 + MISSING_CASE(aux_ch); 1481 + return DP_AUX_CH_CTL(AUX_CH_A); 1482 + } 1483 + } 1484 + 1485 + static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1486 + { 1487 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1488 + enum aux_ch aux_ch = intel_dp->aux_ch; 1489 + 1490 + switch (aux_ch) { 1491 + case AUX_CH_A: 1492 + case AUX_CH_B: 1493 + case AUX_CH_C: 1494 + case AUX_CH_D: 1495 + case AUX_CH_F: 1496 + return DP_AUX_CH_DATA(aux_ch, index); 1497 + default: 1498 + MISSING_CASE(aux_ch); 1499 + return DP_AUX_CH_DATA(AUX_CH_A, index); 1500 + } 1488 1501 } 1489 1502 1490 1503 static void ··· 1498 1507 static void 1499 1508 intel_dp_aux_init(struct intel_dp *intel_dp) 1500 1509 { 1501 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1502 - enum port port = intel_dig_port->base.port; 1510 + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1511 + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1503 1512 1504 - intel_aux_reg_init(intel_dp); 1513 + intel_dp->aux_ch = intel_aux_ch(intel_dp); 1514 + intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp); 1515 + 1516 + if (INTEL_GEN(dev_priv) >= 9) { 1517 + intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1518 + intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1519 + } else if (HAS_PCH_SPLIT(dev_priv)) { 1520 + intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1521 + intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1522 + } else { 1523 + intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1524 + intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1525 + } 1526 + 1527 + if (INTEL_GEN(dev_priv) >= 9) 1528 + intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1529 + else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1530 + intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1531 + else if (HAS_PCH_SPLIT(dev_priv)) 1532 + intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1533 + else 1534 + intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1535 + 1536 + if (INTEL_GEN(dev_priv) >= 9) 1537 + intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1538 + else 1539 + intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1540 + 1505 1541 drm_dp_aux_init(&intel_dp->aux); 1506 1542 1507 1543 /* Failure to allocate our preferred name is not critical */ 1508 - intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port)); 1544 + intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", 1545 + port_name(encoder->port)); 1509 1546 intel_dp->aux.transfer = intel_dp_aux_transfer; 1510 1547 } 1511 1548 ··· 1913 1894 int link_rate, uint8_t lane_count, 1914 1895 bool link_mst) 1915 1896 { 1897 + intel_dp->link_trained = false; 1916 1898 intel_dp->link_rate = link_rate; 1917 1899 intel_dp->lane_count = lane_count; 1918 1900 intel_dp->link_mst = link_mst; ··· 2762 2742 { 2763 2743 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2764 2744 2745 + intel_dp->link_trained = false; 2746 + 2765 2747 if (old_crtc_state->has_audio) 2766 2748 intel_audio_codec_disable(encoder, 2767 2749 old_crtc_state, old_conn_state); ··· 3192 3170 { 3193 3171 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3194 3172 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3195 - } 3196 - 3197 - static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp) 3198 - { 3199 - uint8_t psr_caps = 0; 3200 - 3201 - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1) 3202 - return false; 3203 - return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED; 3204 - } 3205 - 3206 - static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 3207 - { 3208 - uint8_t dprx = 0; 3209 - 3210 - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 3211 - &dprx) != 1) 3212 - return false; 3213 - return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 3214 - } 3215 - 3216 - static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 3217 - { 3218 - uint8_t alpm_caps = 0; 3219 - 3220 - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 3221 - &alpm_caps) != 1) 3222 - return false; 3223 - return alpm_caps & DP_ALPM_CAP; 3224 3173 } 3225 3174 3226 3175 /* These are source-specific values. */ ··· 3744 3751 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3745 3752 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 3746 3753 3747 - /* Check if the panel supports PSR */ 3748 - drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, 3749 - intel_dp->psr_dpcd, 3750 - sizeof(intel_dp->psr_dpcd)); 3751 - if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { 3752 - dev_priv->psr.sink_support = true; 3753 - DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 3754 - } 3755 - 3756 - if (INTEL_GEN(dev_priv) >= 9 && 3757 - (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) { 3758 - uint8_t frame_sync_cap; 3759 - 3760 - dev_priv->psr.sink_support = true; 3761 - if (drm_dp_dpcd_readb(&intel_dp->aux, 3762 - DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, 3763 - &frame_sync_cap) != 1) 3764 - frame_sync_cap = 0; 3765 - dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false; 3766 - /* PSR2 needs frame sync as well */ 3767 - dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; 3768 - DRM_DEBUG_KMS("PSR2 %s on sink", 3769 - dev_priv->psr.psr2_support ? "supported" : "not supported"); 3770 - 3771 - if (dev_priv->psr.psr2_support) { 3772 - dev_priv->psr.y_cord_support = 3773 - intel_dp_get_y_cord_status(intel_dp); 3774 - dev_priv->psr.colorimetry_support = 3775 - intel_dp_get_colorimetry_status(intel_dp); 3776 - dev_priv->psr.alpm = 3777 - intel_dp_get_alpm_status(intel_dp); 3778 - } 3779 - 3780 - } 3754 + intel_psr_init_dpcd(intel_dp); 3781 3755 3782 3756 /* 3783 3757 * Read the eDP display control registers. ··· 4275 4315 return -EINVAL; 4276 4316 } 4277 4317 4278 - static void 4279 - intel_dp_retrain_link(struct intel_dp *intel_dp) 4318 + static bool 4319 + intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 4280 4320 { 4281 - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4321 + u8 link_status[DP_LINK_STATUS_SIZE]; 4322 + 4323 + if (!intel_dp->link_trained) 4324 + return false; 4325 + 4326 + if (!intel_dp_get_link_status(intel_dp, link_status)) 4327 + return false; 4328 + 4329 + /* 4330 + * Validate the cached values of intel_dp->link_rate and 4331 + * intel_dp->lane_count before attempting to retrain. 4332 + */ 4333 + if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 4334 + intel_dp->lane_count)) 4335 + return false; 4336 + 4337 + /* Retrain if Channel EQ or CR not ok */ 4338 + return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 4339 + } 4340 + 4341 + /* 4342 + * If display is now connected check links status, 4343 + * there has been known issues of link loss triggering 4344 + * long pulse. 4345 + * 4346 + * Some sinks (eg. ASUS PB287Q) seem to perform some 4347 + * weird HPD ping pong during modesets. So we can apparently 4348 + * end up with HPD going low during a modeset, and then 4349 + * going back up soon after. And once that happens we must 4350 + * retrain the link to get a picture. That's in case no 4351 + * userspace component reacted to intermittent HPD dip. 4352 + */ 4353 + int intel_dp_retrain_link(struct intel_encoder *encoder, 4354 + struct drm_modeset_acquire_ctx *ctx) 4355 + { 4282 4356 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4283 - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 4357 + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 4358 + struct intel_connector *connector = intel_dp->attached_connector; 4359 + struct drm_connector_state *conn_state; 4360 + struct intel_crtc_state *crtc_state; 4361 + struct intel_crtc *crtc; 4362 + int ret; 4363 + 4364 + /* FIXME handle the MST connectors as well */ 4365 + 4366 + if (!connector || connector->base.status != connector_status_connected) 4367 + return 0; 4368 + 4369 + ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 4370 + ctx); 4371 + if (ret) 4372 + return ret; 4373 + 4374 + conn_state = connector->base.state; 4375 + 4376 + crtc = to_intel_crtc(conn_state->crtc); 4377 + if (!crtc) 4378 + return 0; 4379 + 4380 + ret = drm_modeset_lock(&crtc->base.mutex, ctx); 4381 + if (ret) 4382 + return ret; 4383 + 4384 + crtc_state = to_intel_crtc_state(crtc->base.state); 4385 + 4386 + WARN_ON(!intel_crtc_has_dp_encoder(crtc_state)); 4387 + 4388 + if (!crtc_state->base.active) 4389 + return 0; 4390 + 4391 + if (conn_state->commit && 4392 + !try_wait_for_completion(&conn_state->commit->hw_done)) 4393 + return 0; 4394 + 4395 + if (!intel_dp_needs_link_retrain(intel_dp)) 4396 + return 0; 4284 4397 4285 4398 /* Suppress underruns caused by re-training */ 4286 4399 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); ··· 4371 4338 if (crtc->config->has_pch_encoder) 4372 4339 intel_set_pch_fifo_underrun_reporting(dev_priv, 4373 4340 intel_crtc_pch_transcoder(crtc), true); 4341 + 4342 + return 0; 4374 4343 } 4375 4344 4376 - static void 4377 - intel_dp_check_link_status(struct intel_dp *intel_dp) 4345 + /* 4346 + * If display is now connected check links status, 4347 + * there has been known issues of link loss triggering 4348 + * long pulse. 4349 + * 4350 + * Some sinks (eg. ASUS PB287Q) seem to perform some 4351 + * weird HPD ping pong during modesets. So we can apparently 4352 + * end up with HPD going low during a modeset, and then 4353 + * going back up soon after. And once that happens we must 4354 + * retrain the link to get a picture. That's in case no 4355 + * userspace component reacted to intermittent HPD dip. 4356 + */ 4357 + static bool intel_dp_hotplug(struct intel_encoder *encoder, 4358 + struct intel_connector *connector) 4378 4359 { 4379 - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 4380 - struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4381 - struct drm_connector_state *conn_state = 4382 - intel_dp->attached_connector->base.state; 4383 - u8 link_status[DP_LINK_STATUS_SIZE]; 4360 + struct drm_modeset_acquire_ctx ctx; 4361 + bool changed; 4362 + int ret; 4384 4363 4385 - WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4364 + changed = intel_encoder_hotplug(encoder, connector); 4386 4365 4387 - if (!intel_dp_get_link_status(intel_dp, link_status)) { 4388 - DRM_ERROR("Failed to get link status\n"); 4389 - return; 4366 + drm_modeset_acquire_init(&ctx, 0); 4367 + 4368 + for (;;) { 4369 + ret = intel_dp_retrain_link(encoder, &ctx); 4370 + 4371 + if (ret == -EDEADLK) { 4372 + drm_modeset_backoff(&ctx); 4373 + continue; 4374 + } 4375 + 4376 + break; 4390 4377 } 4391 4378 4392 - if (!conn_state->crtc) 4393 - return; 4379 + drm_modeset_drop_locks(&ctx); 4380 + drm_modeset_acquire_fini(&ctx); 4381 + WARN(ret, "Acquiring modeset locks failed with %i\n", ret); 4394 4382 4395 - WARN_ON(!drm_modeset_is_locked(&conn_state->crtc->mutex)); 4396 - 4397 - if (!conn_state->crtc->state->active) 4398 - return; 4399 - 4400 - if (conn_state->commit && 4401 - !try_wait_for_completion(&conn_state->commit->hw_done)) 4402 - return; 4403 - 4404 - /* 4405 - * Validate the cached values of intel_dp->link_rate and 4406 - * intel_dp->lane_count before attempting to retrain. 4407 - */ 4408 - if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 4409 - intel_dp->lane_count)) 4410 - return; 4411 - 4412 - /* Retrain if Channel EQ or CR not ok */ 4413 - if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 4414 - DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 4415 - intel_encoder->base.name); 4416 - 4417 - intel_dp_retrain_link(intel_dp); 4418 - } 4383 + return changed; 4419 4384 } 4420 4385 4421 4386 /* ··· 4471 4440 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 4472 4441 } 4473 4442 4474 - intel_dp_check_link_status(intel_dp); 4443 + /* defer to the hotplug work for link retraining if needed */ 4444 + if (intel_dp_needs_link_retrain(intel_dp)) 4445 + return false; 4475 4446 4476 4447 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 4477 4448 DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); ··· 4858 4825 */ 4859 4826 status = connector_status_disconnected; 4860 4827 goto out; 4861 - } else { 4862 - /* 4863 - * If display is now connected check links status, 4864 - * there has been known issues of link loss triggerring 4865 - * long pulse. 4866 - * 4867 - * Some sinks (eg. ASUS PB287Q) seem to perform some 4868 - * weird HPD ping pong during modesets. So we can apparently 4869 - * end up with HPD going low during a modeset, and then 4870 - * going back up soon after. And once that happens we must 4871 - * retrain the link to get a picture. That's in case no 4872 - * userspace component reacted to intermittent HPD dip. 4873 - */ 4874 - intel_dp_check_link_status(intel_dp); 4875 4828 } 4876 4829 4877 4830 /* ··· 5073 5054 u8 *an) 5074 5055 { 5075 5056 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base); 5076 - uint8_t txbuf[4], rxbuf[2], reply = 0; 5057 + static const struct drm_dp_aux_msg msg = { 5058 + .request = DP_AUX_NATIVE_WRITE, 5059 + .address = DP_AUX_HDCP_AKSV, 5060 + .size = DRM_HDCP_KSV_LEN, 5061 + }; 5062 + uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 5077 5063 ssize_t dpcd_ret; 5078 5064 int ret; 5079 5065 ··· 5096 5072 * we were writing the data, and then tickle the hardware to output the 5097 5073 * data once the header is sent out. 5098 5074 */ 5099 - txbuf[0] = (DP_AUX_NATIVE_WRITE << 4) | 5100 - ((DP_AUX_HDCP_AKSV >> 16) & 0xf); 5101 - txbuf[1] = (DP_AUX_HDCP_AKSV >> 8) & 0xff; 5102 - txbuf[2] = DP_AUX_HDCP_AKSV & 0xff; 5103 - txbuf[3] = DRM_HDCP_KSV_LEN - 1; 5075 + intel_dp_aux_header(txbuf, &msg); 5104 5076 5105 - ret = intel_dp_aux_ch(intel_dp, txbuf, sizeof(txbuf), rxbuf, 5106 - sizeof(rxbuf), true); 5077 + ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 5078 + rxbuf, sizeof(rxbuf), 5079 + DP_AUX_CH_CTL_AUX_AKSV_SELECT); 5107 5080 if (ret < 0) { 5108 5081 DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret); 5109 5082 return ret; ··· 5434 5413 } 5435 5414 5436 5415 if (!intel_dp->is_mst) { 5437 - struct drm_modeset_acquire_ctx ctx; 5438 - struct drm_connector *connector = &intel_dp->attached_connector->base; 5439 - struct drm_crtc *crtc; 5440 - int iret; 5441 - bool handled = false; 5442 - 5443 - drm_modeset_acquire_init(&ctx, 0); 5444 - retry: 5445 - iret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, &ctx); 5446 - if (iret) 5447 - goto err; 5448 - 5449 - crtc = connector->state->crtc; 5450 - if (crtc) { 5451 - iret = drm_modeset_lock(&crtc->mutex, &ctx); 5452 - if (iret) 5453 - goto err; 5454 - } 5416 + bool handled; 5455 5417 5456 5418 handled = intel_dp_short_pulse(intel_dp); 5457 - 5458 - err: 5459 - if (iret == -EDEADLK) { 5460 - drm_modeset_backoff(&ctx); 5461 - goto retry; 5462 - } 5463 - 5464 - drm_modeset_drop_locks(&ctx); 5465 - drm_modeset_acquire_fini(&ctx); 5466 - WARN(iret, "Acquiring modeset locks failed with %i\n", iret); 5467 5419 5468 5420 /* Short pulse can signify loss of hdcp authentication */ 5469 5421 intel_hdcp_check_link(intel_dp->attached_connector); ··· 6260 6266 return false; 6261 6267 } 6262 6268 6263 - /* Set up the hotplug pin and aux power domain. */ 6264 - static void 6265 - intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port) 6266 - { 6267 - struct intel_encoder *encoder = &intel_dig_port->base; 6268 - struct intel_dp *intel_dp = &intel_dig_port->dp; 6269 - struct intel_encoder *intel_encoder = &intel_dig_port->base; 6270 - struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6271 - 6272 - encoder->hpd_pin = intel_hpd_pin_default(dev_priv, encoder->port); 6273 - 6274 - switch (encoder->port) { 6275 - case PORT_A: 6276 - intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A; 6277 - break; 6278 - case PORT_B: 6279 - intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B; 6280 - break; 6281 - case PORT_C: 6282 - intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C; 6283 - break; 6284 - case PORT_D: 6285 - intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D; 6286 - break; 6287 - case PORT_E: 6288 - /* FIXME: Check VBT for actual wiring of PORT E */ 6289 - intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D; 6290 - break; 6291 - case PORT_F: 6292 - intel_dp->aux_power_domain = POWER_DOMAIN_AUX_F; 6293 - break; 6294 - default: 6295 - MISSING_CASE(encoder->port); 6296 - } 6297 - } 6298 - 6299 6269 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 6300 6270 { 6301 6271 struct intel_connector *intel_connector; ··· 6311 6353 intel_dp->active_pipe = INVALID_PIPE; 6312 6354 6313 6355 /* intel_dp vfuncs */ 6314 - if (INTEL_GEN(dev_priv) >= 9) 6315 - intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 6316 - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 6317 - intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 6318 - else if (HAS_PCH_SPLIT(dev_priv)) 6319 - intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 6320 - else 6321 - intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 6322 - 6323 - if (INTEL_GEN(dev_priv) >= 9) 6324 - intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 6325 - else 6326 - intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 6327 - 6328 6356 if (HAS_DDI(dev_priv)) 6329 6357 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain; 6330 6358 ··· 6351 6407 connector->interlace_allowed = true; 6352 6408 connector->doublescan_allowed = 0; 6353 6409 6354 - intel_dp_init_connector_port_info(intel_dig_port); 6410 + intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 6355 6411 6356 6412 intel_dp_aux_init(intel_dp); 6357 6413 ··· 6428 6484 "DP %c", port_name(port))) 6429 6485 goto err_encoder_init; 6430 6486 6487 + intel_encoder->hotplug = intel_dp_hotplug; 6431 6488 intel_encoder->compute_config = intel_dp_compute_config; 6432 6489 intel_encoder->get_hw_state = intel_dp_get_hw_state; 6433 6490 intel_encoder->get_config = intel_dp_get_config;
+15 -7
drivers/gpu/drm/i915/intel_drv.h
··· 215 215 enum intel_output_type type; 216 216 enum port port; 217 217 unsigned int cloneable; 218 - void (*hot_plug)(struct intel_encoder *); 218 + bool (*hotplug)(struct intel_encoder *encoder, 219 + struct intel_connector *connector); 219 220 enum intel_output_type (*compute_output_type)(struct intel_encoder *, 220 221 struct intel_crtc_state *, 221 222 struct drm_connector_state *); ··· 936 935 enum plane_id id; 937 936 enum pipe pipe; 938 937 bool can_scale; 938 + bool has_fbc; 939 939 int max_downscale; 940 940 uint32_t frontbuffer_bit; 941 941 ··· 1043 1041 1044 1042 struct intel_dp { 1045 1043 i915_reg_t output_reg; 1046 - i915_reg_t aux_ch_ctl_reg; 1047 - i915_reg_t aux_ch_data_reg[5]; 1048 1044 uint32_t DP; 1049 1045 int link_rate; 1050 1046 uint8_t lane_count; 1051 1047 uint8_t sink_count; 1052 1048 bool link_mst; 1049 + bool link_trained; 1053 1050 bool has_audio; 1054 1051 bool detect_done; 1055 - bool channel_eq_status; 1056 1052 bool reset_link_params; 1053 + enum aux_ch aux_ch; 1057 1054 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 1058 1055 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 1059 1056 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; ··· 1126 1125 bool has_aux_irq, 1127 1126 int send_bytes, 1128 1127 uint32_t aux_clock_divider); 1128 + 1129 + i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp); 1130 + i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index); 1129 1131 1130 1132 /* This is called before a link training is starterd */ 1131 1133 void (*prepare_link_retrain)(struct intel_dp *intel_dp); ··· 1512 1508 struct i915_vma * 1513 1509 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1514 1510 unsigned int rotation, 1511 + bool uses_fence, 1515 1512 unsigned long *out_flags); 1516 1513 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags); 1517 1514 struct drm_framebuffer * ··· 1627 1622 int link_rate, uint8_t lane_count); 1628 1623 void intel_dp_start_link_train(struct intel_dp *intel_dp); 1629 1624 void intel_dp_stop_link_train(struct intel_dp *intel_dp); 1625 + int intel_dp_retrain_link(struct intel_encoder *encoder, 1626 + struct drm_modeset_acquire_ctx *ctx); 1630 1627 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 1631 1628 void intel_dp_encoder_reset(struct drm_encoder *encoder); 1632 1629 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); ··· 1708 1701 void intel_dvo_init(struct drm_i915_private *dev_priv); 1709 1702 /* intel_hotplug.c */ 1710 1703 void intel_hpd_poll_init(struct drm_i915_private *dev_priv); 1711 - 1704 + bool intel_encoder_hotplug(struct intel_encoder *encoder, 1705 + struct intel_connector *connector); 1712 1706 1713 1707 /* legacy fbdev emulation in intel_fbdev.c */ 1714 1708 #ifdef CONFIG_DRM_FBDEV_EMULATION ··· 1871 1863 1872 1864 /* intel_psr.c */ 1873 1865 #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) 1866 + void intel_psr_init_dpcd(struct intel_dp *intel_dp); 1874 1867 void intel_psr_enable(struct intel_dp *intel_dp, 1875 1868 const struct intel_crtc_state *crtc_state); 1876 1869 void intel_psr_disable(struct intel_dp *intel_dp, ··· 1998 1989 void gen6_rps_busy(struct drm_i915_private *dev_priv); 1999 1990 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); 2000 1991 void gen6_rps_idle(struct drm_i915_private *dev_priv); 2001 - void gen6_rps_boost(struct drm_i915_gem_request *rq, 2002 - struct intel_rps_client *rps); 1992 + void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps); 2003 1993 void g4x_wm_get_hw_state(struct drm_device *dev); 2004 1994 void vlv_wm_get_hw_state(struct drm_device *dev); 2005 1995 void ilk_wm_get_hw_state(struct drm_device *dev);
+63 -16
drivers/gpu/drm/i915/intel_engine_cs.c
··· 123 123 .mmio_base = GEN8_BSD2_RING_BASE, 124 124 .irq_shift = GEN8_VCS2_IRQ_SHIFT, 125 125 }, 126 + [VCS3] = { 127 + .hw_id = VCS3_HW, 128 + .uabi_id = I915_EXEC_BSD, 129 + .class = VIDEO_DECODE_CLASS, 130 + .instance = 2, 131 + .mmio_base = GEN11_BSD3_RING_BASE, 132 + .irq_shift = 0, /* not used */ 133 + }, 134 + [VCS4] = { 135 + .hw_id = VCS4_HW, 136 + .uabi_id = I915_EXEC_BSD, 137 + .class = VIDEO_DECODE_CLASS, 138 + .instance = 3, 139 + .mmio_base = GEN11_BSD4_RING_BASE, 140 + .irq_shift = 0, /* not used */ 141 + }, 126 142 [VECS] = { 127 143 .hw_id = VECS_HW, 128 144 .uabi_id = I915_EXEC_VEBOX, ··· 146 130 .instance = 0, 147 131 .mmio_base = VEBOX_RING_BASE, 148 132 .irq_shift = GEN8_VECS_IRQ_SHIFT, 133 + }, 134 + [VECS2] = { 135 + .hw_id = VECS2_HW, 136 + .uabi_id = I915_EXEC_VEBOX, 137 + .class = VIDEO_ENHANCEMENT_CLASS, 138 + .instance = 1, 139 + .mmio_base = GEN11_VEBOX2_RING_BASE, 140 + .irq_shift = 0, /* not used */ 149 141 }, 150 142 }; 151 143 ··· 234 210 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes)); 235 211 class_info = &intel_engine_classes[info->class]; 236 212 213 + BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 214 + BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 215 + 237 216 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) 238 217 return -EINVAL; 239 218 ··· 257 230 class_info->name, info->instance) >= 258 231 sizeof(engine->name)); 259 232 engine->hw_id = engine->guc_id = info->hw_id; 260 - engine->mmio_base = info->mmio_base; 233 + if (INTEL_GEN(dev_priv) >= 11) { 234 + switch (engine->id) { 235 + case VCS: 236 + engine->mmio_base = GEN11_BSD_RING_BASE; 237 + break; 238 + case VCS2: 239 + engine->mmio_base = GEN11_BSD2_RING_BASE; 240 + break; 241 + case VECS: 242 + engine->mmio_base = GEN11_VEBOX_RING_BASE; 243 + break; 244 + default: 245 + /* take the original value for all other engines */ 246 + engine->mmio_base = info->mmio_base; 247 + break; 248 + } 249 + } else { 250 + engine->mmio_base = info->mmio_base; 251 + } 261 252 engine->irq_shift = info->irq_shift; 262 253 engine->class = info->class; 263 254 engine->instance = info->instance; ··· 468 423 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); 469 424 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 470 425 426 + execlists->queue_priority = INT_MIN; 471 427 execlists->queue = RB_ROOT; 472 428 execlists->first = NULL; 473 429 } ··· 1472 1426 return 0; 1473 1427 } 1474 1428 1475 - int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 1429 + int intel_ring_workarounds_emit(struct i915_request *rq) 1476 1430 { 1477 - struct i915_workarounds *w = &req->i915->workarounds; 1431 + struct i915_workarounds *w = &rq->i915->workarounds; 1478 1432 u32 *cs; 1479 1433 int ret, i; 1480 1434 1481 1435 if (w->count == 0) 1482 1436 return 0; 1483 1437 1484 - ret = req->engine->emit_flush(req, EMIT_BARRIER); 1438 + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 1485 1439 if (ret) 1486 1440 return ret; 1487 1441 1488 - cs = intel_ring_begin(req, (w->count * 2 + 2)); 1442 + cs = intel_ring_begin(rq, w->count * 2 + 2); 1489 1443 if (IS_ERR(cs)) 1490 1444 return PTR_ERR(cs); 1491 1445 ··· 1496 1450 } 1497 1451 *cs++ = MI_NOOP; 1498 1452 1499 - intel_ring_advance(req, cs); 1453 + intel_ring_advance(rq, cs); 1500 1454 1501 - ret = req->engine->emit_flush(req, EMIT_BARRIER); 1455 + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 1502 1456 if (ret) 1503 1457 return ret; 1504 1458 ··· 1598 1552 { 1599 1553 const struct i915_gem_context * const kernel_context = 1600 1554 engine->i915->kernel_context; 1601 - struct drm_i915_gem_request *rq; 1555 + struct i915_request *rq; 1602 1556 1603 1557 lockdep_assert_held(&engine->i915->drm.struct_mutex); 1604 1558 ··· 1710 1664 } 1711 1665 1712 1666 static void print_request(struct drm_printer *m, 1713 - struct drm_i915_gem_request *rq, 1667 + struct i915_request *rq, 1714 1668 const char *prefix) 1715 1669 { 1716 - drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix, 1670 + drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix, 1717 1671 rq->global_seqno, 1718 - i915_gem_request_completed(rq) ? "!" : "", 1719 - rq->ctx->hw_id, rq->fence.seqno, 1672 + i915_request_completed(rq) ? "!" : "", 1673 + rq->fence.context, rq->fence.seqno, 1720 1674 rq->priotree.priority, 1721 1675 jiffies_to_msecs(jiffies - rq->emitted_jiffies), 1722 1676 rq->timeline->common->name); ··· 1849 1803 1850 1804 rcu_read_lock(); 1851 1805 for (idx = 0; idx < execlists_num_ports(execlists); idx++) { 1852 - struct drm_i915_gem_request *rq; 1806 + struct i915_request *rq; 1853 1807 unsigned int count; 1854 1808 1855 1809 rq = port_unpack(&execlists->port[idx], &count); ··· 1883 1837 struct intel_breadcrumbs * const b = &engine->breadcrumbs; 1884 1838 const struct intel_engine_execlists * const execlists = &engine->execlists; 1885 1839 struct i915_gpu_error * const error = &engine->i915->gpu_error; 1886 - struct drm_i915_gem_request *rq; 1840 + struct i915_request *rq; 1887 1841 struct rb_node *rb; 1888 1842 1889 1843 if (header) { ··· 1912 1866 drm_printf(m, "\tRequests:\n"); 1913 1867 1914 1868 rq = list_first_entry(&engine->timeline->requests, 1915 - struct drm_i915_gem_request, link); 1869 + struct i915_request, link); 1916 1870 if (&rq->link != &engine->timeline->requests) 1917 1871 print_request(m, rq, "\t\tfirst "); 1918 1872 1919 1873 rq = list_last_entry(&engine->timeline->requests, 1920 - struct drm_i915_gem_request, link); 1874 + struct i915_request, link); 1921 1875 if (&rq->link != &engine->timeline->requests) 1922 1876 print_request(m, rq, "\t\tlast "); 1923 1877 ··· 1949 1903 spin_lock_irq(&engine->timeline->lock); 1950 1904 list_for_each_entry(rq, &engine->timeline->requests, link) 1951 1905 print_request(m, rq, "\t\tE "); 1906 + drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); 1952 1907 for (rb = execlists->first; rb; rb = rb_next(rb)) { 1953 1908 struct i915_priolist *p = 1954 1909 rb_entry(rb, typeof(*p), node);
+56 -51
drivers/gpu/drm/i915/intel_fbc.c
··· 46 46 return HAS_FBC(dev_priv); 47 47 } 48 48 49 - static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) 50 - { 51 - return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8; 52 - } 53 - 54 - static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) 55 - { 56 - return INTEL_GEN(dev_priv) < 4; 57 - } 58 - 59 49 static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) 60 50 { 61 51 return INTEL_GEN(dev_priv) <= 3; ··· 809 819 * Note that is possible for a tiled surface to be unmappable (and 810 820 * so have no fence associated with it) due to aperture constaints 811 821 * at the time of pinning. 822 + * 823 + * FIXME with 90/270 degree rotation we should use the fence on 824 + * the normal GTT view (the rotated view doesn't even have a 825 + * fence). Would need changes to the FBC fence Y offset as well. 826 + * For now this will effecively disable FBC with 90/270 degree 827 + * rotation. 812 828 */ 813 829 if (!(cache->flags & PLANE_HAS_FENCE)) { 814 830 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; ··· 856 860 if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > 857 861 fbc->compressed_fb.size * fbc->threshold) { 858 862 fbc->no_fbc_reason = "CFB requirements changed"; 863 + return false; 864 + } 865 + 866 + /* 867 + * Work around a problem on GEN9+ HW, where enabling FBC on a plane 868 + * having a Y offset that isn't divisible by 4 causes FIFO underrun 869 + * and screen flicker. 870 + */ 871 + if (IS_GEN(dev_priv, 9, 10) && 872 + (fbc->state_cache.plane.adjusted_y & 3)) { 873 + fbc->no_fbc_reason = "plane Y offset is misaligned"; 859 874 return false; 860 875 } 861 876 ··· 960 953 mutex_unlock(&fbc->lock); 961 954 } 962 955 956 + /** 957 + * __intel_fbc_disable - disable FBC 958 + * @dev_priv: i915 device instance 959 + * 960 + * This is the low level function that actually disables FBC. Callers should 961 + * grab the FBC lock. 962 + */ 963 + static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 964 + { 965 + struct intel_fbc *fbc = &dev_priv->fbc; 966 + struct intel_crtc *crtc = fbc->crtc; 967 + 968 + WARN_ON(!mutex_is_locked(&fbc->lock)); 969 + WARN_ON(!fbc->enabled); 970 + WARN_ON(fbc->active); 971 + 972 + DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 973 + 974 + __intel_fbc_cleanup_cfb(dev_priv); 975 + 976 + fbc->enabled = false; 977 + fbc->crtc = NULL; 978 + } 979 + 963 980 static void __intel_fbc_post_update(struct intel_crtc *crtc) 964 981 { 965 982 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); ··· 994 963 995 964 if (!fbc->enabled || fbc->crtc != crtc) 996 965 return; 966 + 967 + if (!i915_modparams.enable_fbc) { 968 + intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); 969 + __intel_fbc_disable(dev_priv); 970 + 971 + return; 972 + } 997 973 998 974 if (!intel_fbc_can_activate(crtc)) { 999 975 WARN_ON(fbc->active); ··· 1132 1094 struct intel_crtc_state *crtc_state; 1133 1095 struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc); 1134 1096 1097 + if (!plane->has_fbc) 1098 + continue; 1099 + 1135 1100 if (!plane_state->base.visible) 1136 - continue; 1137 - 1138 - if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) 1139 - continue; 1140 - 1141 - if (fbc_on_plane_a_only(dev_priv) && plane->i9xx_plane != PLANE_A) 1142 1101 continue; 1143 1102 1144 1103 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); ··· 1206 1171 } 1207 1172 1208 1173 /** 1209 - * __intel_fbc_disable - disable FBC 1210 - * @dev_priv: i915 device instance 1211 - * 1212 - * This is the low level function that actually disables FBC. Callers should 1213 - * grab the FBC lock. 1214 - */ 1215 - static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 1216 - { 1217 - struct intel_fbc *fbc = &dev_priv->fbc; 1218 - struct intel_crtc *crtc = fbc->crtc; 1219 - 1220 - WARN_ON(!mutex_is_locked(&fbc->lock)); 1221 - WARN_ON(!fbc->enabled); 1222 - WARN_ON(fbc->active); 1223 - WARN_ON(crtc->active); 1224 - 1225 - DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 1226 - 1227 - __intel_fbc_cleanup_cfb(dev_priv); 1228 - 1229 - fbc->enabled = false; 1230 - fbc->crtc = NULL; 1231 - } 1232 - 1233 - /** 1234 1174 * intel_fbc_disable - disable FBC if it's associated with crtc 1235 1175 * @crtc: the CRTC 1236 1176 * ··· 1218 1208 1219 1209 if (!fbc_supported(dev_priv)) 1220 1210 return; 1211 + 1212 + WARN_ON(crtc->active); 1221 1213 1222 1214 mutex_lock(&fbc->lock); 1223 1215 if (fbc->crtc == crtc) ··· 1243 1231 return; 1244 1232 1245 1233 mutex_lock(&fbc->lock); 1246 - if (fbc->enabled) 1234 + if (fbc->enabled) { 1235 + WARN_ON(fbc->crtc->active); 1247 1236 __intel_fbc_disable(dev_priv); 1237 + } 1248 1238 mutex_unlock(&fbc->lock); 1249 1239 1250 1240 cancel_work_sync(&fbc->work.work); ··· 1371 1357 void intel_fbc_init(struct drm_i915_private *dev_priv) 1372 1358 { 1373 1359 struct intel_fbc *fbc = &dev_priv->fbc; 1374 - enum pipe pipe; 1375 1360 1376 1361 INIT_WORK(&fbc->work.work, intel_fbc_work_fn); 1377 1362 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); ··· 1389 1376 if (!HAS_FBC(dev_priv)) { 1390 1377 fbc->no_fbc_reason = "unsupported by this chipset"; 1391 1378 return; 1392 - } 1393 - 1394 - for_each_pipe(dev_priv, pipe) { 1395 - fbc->possible_framebuffer_bits |= 1396 - INTEL_FRONTBUFFER(pipe, PLANE_PRIMARY); 1397 - 1398 - if (fbc_on_pipe_a_only(dev_priv)) 1399 - break; 1400 1379 } 1401 1380 1402 1381 /* This value was pulled out of someone's hat */
+1 -1
drivers/gpu/drm/i915/intel_fbdev.c
··· 215 215 */ 216 216 vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, 217 217 DRM_MODE_ROTATE_0, 218 - &flags); 218 + false, &flags); 219 219 if (IS_ERR(vma)) { 220 220 ret = PTR_ERR(vma); 221 221 goto out_unlock;
+2
drivers/gpu/drm/i915/intel_frontbuffer.c
··· 79 79 spin_unlock(&dev_priv->fb_tracking.lock); 80 80 } 81 81 82 + might_sleep(); 82 83 intel_psr_invalidate(dev_priv, frontbuffer_bits); 83 84 intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits); 84 85 intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin); ··· 109 108 if (!frontbuffer_bits) 110 109 return; 111 110 111 + might_sleep(); 112 112 intel_edp_drrs_flush(dev_priv, frontbuffer_bits); 113 113 intel_psr_flush(dev_priv, frontbuffer_bits, origin); 114 114 intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
+15 -29
drivers/gpu/drm/i915/intel_guc.c
··· 370 370 u32 action[2]; 371 371 372 372 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; 373 - /* WaRsDisableCoarsePowerGating:skl,bxt */ 373 + /* WaRsDisableCoarsePowerGating:skl,cnl */ 374 374 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) 375 375 action[1] = 0; 376 376 else ··· 403 403 404 404 /** 405 405 * intel_guc_suspend() - notify GuC entering suspend state 406 - * @dev_priv: i915 device private 406 + * @guc: the guc 407 407 */ 408 - int intel_guc_suspend(struct drm_i915_private *dev_priv) 408 + int intel_guc_suspend(struct intel_guc *guc) 409 409 { 410 - struct intel_guc *guc = &dev_priv->guc; 411 - u32 data[3]; 412 - 413 - if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) 414 - return 0; 415 - 416 - gen9_disable_guc_interrupts(dev_priv); 417 - 418 - data[0] = INTEL_GUC_ACTION_ENTER_S_STATE; 419 - /* any value greater than GUC_POWER_D0 */ 420 - data[1] = GUC_POWER_D1; 421 - data[2] = guc_ggtt_offset(guc->shared_data); 410 + u32 data[] = { 411 + INTEL_GUC_ACTION_ENTER_S_STATE, 412 + GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ 413 + guc_ggtt_offset(guc->shared_data) 414 + }; 422 415 423 416 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 424 417 } ··· 441 448 442 449 /** 443 450 * intel_guc_resume() - notify GuC resuming from suspend state 444 - * @dev_priv: i915 device private 451 + * @guc: the guc 445 452 */ 446 - int intel_guc_resume(struct drm_i915_private *dev_priv) 453 + int intel_guc_resume(struct intel_guc *guc) 447 454 { 448 - struct intel_guc *guc = &dev_priv->guc; 449 - u32 data[3]; 450 - 451 - if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) 452 - return 0; 453 - 454 - if (i915_modparams.guc_log_level) 455 - gen9_enable_guc_interrupts(dev_priv); 456 - 457 - data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; 458 - data[1] = GUC_POWER_D0; 459 - data[2] = guc_ggtt_offset(guc->shared_data); 455 + u32 data[] = { 456 + INTEL_GUC_ACTION_EXIT_S_STATE, 457 + GUC_POWER_D0, 458 + guc_ggtt_offset(guc->shared_data) 459 + }; 460 460 461 461 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 462 462 }
+2 -2
drivers/gpu/drm/i915/intel_guc.h
··· 127 127 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); 128 128 int intel_guc_sample_forcewake(struct intel_guc *guc); 129 129 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); 130 - int intel_guc_suspend(struct drm_i915_private *dev_priv); 131 - int intel_guc_resume(struct drm_i915_private *dev_priv); 130 + int intel_guc_suspend(struct intel_guc *guc); 131 + int intel_guc_resume(struct intel_guc *guc); 132 132 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 133 133 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); 134 134
+5 -5
drivers/gpu/drm/i915/intel_guc_fw.c
··· 269 269 } 270 270 271 271 /** 272 - * intel_guc_fw_upload() - finish preparing the GuC for activity 272 + * intel_guc_fw_upload() - load GuC uCode to device 273 273 * @guc: intel_guc structure 274 274 * 275 - * Called during driver loading and also after a GPU reset. 275 + * Called from intel_uc_init_hw() during driver load, resume from sleep and 276 + * after a GPU reset. 276 277 * 277 - * The main action required here it to load the GuC uCode into the device. 278 278 * The firmware image should have already been fetched into memory by the 279 - * earlier call to intel_guc_init(), so here we need only check that 280 - * worked, and then transfer the image to the h/w. 279 + * earlier call to intel_uc_init_fw(), so here we need to only check that 280 + * fetch succeeded, and then transfer the image to the h/w. 281 281 * 282 282 * Return: non-zero code on error 283 283 */
+4 -2
drivers/gpu/drm/i915/intel_guc_log.c
··· 61 61 static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity) 62 62 { 63 63 union guc_log_control control_val = { 64 - .logging_enabled = enable, 65 - .verbosity = verbosity, 64 + { 65 + .logging_enabled = enable, 66 + .verbosity = verbosity, 67 + }, 66 68 }; 67 69 u32 action[] = { 68 70 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
+91 -49
drivers/gpu/drm/i915/intel_guc_submission.c
··· 26 26 #include <trace/events/dma_fence.h> 27 27 28 28 #include "intel_guc_submission.h" 29 + #include "intel_lrc_reg.h" 29 30 #include "i915_drv.h" 31 + 32 + #define GUC_PREEMPT_FINISHED 0x1 33 + #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 34 + #define GUC_PREEMPT_BREADCRUMB_BYTES \ 35 + (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) 30 36 31 37 /** 32 38 * DOC: GuC-based command submission ··· 80 74 * See guc_add_request() 81 75 * 82 76 */ 77 + 78 + static inline struct i915_priolist *to_priolist(struct rb_node *rb) 79 + { 80 + return rb_entry(rb, struct i915_priolist, node); 81 + } 83 82 84 83 static inline bool is_high_priority(struct intel_guc_client *client) 85 84 { ··· 507 496 GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); 508 497 } 509 498 510 - static void guc_add_request(struct intel_guc *guc, 511 - struct drm_i915_gem_request *rq) 499 + static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) 512 500 { 513 501 struct intel_guc_client *client = guc->execbuf_client; 514 502 struct intel_engine_cs *engine = rq->engine; ··· 541 531 POSTING_READ_FW(GUC_STATUS); 542 532 } 543 533 544 - #define GUC_PREEMPT_FINISHED 0x1 545 - #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 546 534 static void inject_preempt_context(struct work_struct *work) 547 535 { 548 536 struct guc_preempt_work *preempt_work = ··· 550 542 preempt_work[engine->id]); 551 543 struct intel_guc_client *client = guc->preempt_client; 552 544 struct guc_stage_desc *stage_desc = __get_stage_desc(client); 553 - struct intel_ring *ring = client->owner->engine[engine->id].ring; 554 545 u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner, 555 546 engine)); 556 - u32 *cs = ring->vaddr + ring->tail; 557 547 u32 data[7]; 558 548 559 - if (engine->id == RCS) { 560 - cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED, 561 - intel_hws_preempt_done_address(engine)); 562 - } else { 563 - cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED, 564 - intel_hws_preempt_done_address(engine)); 565 - *cs++ = MI_NOOP; 566 - *cs++ = MI_NOOP; 567 - } 568 - *cs++ = MI_USER_INTERRUPT; 569 - *cs++ = MI_NOOP; 570 - 571 - GEM_BUG_ON(!IS_ALIGNED(ring->size, 572 - GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32))); 573 - GEM_BUG_ON((void *)cs - (ring->vaddr + ring->tail) != 574 - GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32)); 575 - 576 - ring->tail += GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32); 577 - ring->tail &= (ring->size - 1); 578 - 579 - flush_ggtt_writes(ring->vma); 580 - 549 + /* 550 + * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP. 551 + * See guc_fill_preempt_context(). 552 + */ 581 553 spin_lock_irq(&client->wq_lock); 582 554 guc_wq_item_append(client, engine->guc_id, ctx_desc, 583 - ring->tail / sizeof(u64), 0); 555 + GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0); 584 556 spin_unlock_irq(&client->wq_lock); 585 557 586 558 /* ··· 636 648 unsigned int n; 637 649 638 650 for (n = 0; n < execlists_num_ports(execlists); n++) { 639 - struct drm_i915_gem_request *rq; 651 + struct i915_request *rq; 640 652 unsigned int count; 641 653 642 654 rq = port_unpack(&port[n], &count); ··· 650 662 } 651 663 } 652 664 653 - static void port_assign(struct execlist_port *port, 654 - struct drm_i915_gem_request *rq) 665 + static void port_assign(struct execlist_port *port, struct i915_request *rq) 655 666 { 656 667 GEM_BUG_ON(port_isset(port)); 657 668 658 - port_set(port, i915_gem_request_get(rq)); 669 + port_set(port, i915_request_get(rq)); 659 670 } 660 671 661 672 static void guc_dequeue(struct intel_engine_cs *engine) 662 673 { 663 674 struct intel_engine_execlists * const execlists = &engine->execlists; 664 675 struct execlist_port *port = execlists->port; 665 - struct drm_i915_gem_request *last = NULL; 676 + struct i915_request *last = NULL; 666 677 const struct execlist_port * const last_port = 667 678 &execlists->port[execlists->port_mask]; 668 679 bool submit = false; ··· 671 684 rb = execlists->first; 672 685 GEM_BUG_ON(rb_first(&execlists->queue) != rb); 673 686 674 - if (!rb) 675 - goto unlock; 676 - 677 687 if (port_isset(port)) { 678 688 if (engine->i915->preempt_context) { 679 689 struct guc_preempt_work *preempt_work = 680 690 &engine->i915->guc.preempt_work[engine->id]; 681 691 682 - if (rb_entry(rb, struct i915_priolist, node)->priority > 692 + if (execlists->queue_priority > 683 693 max(port_request(port)->priotree.priority, 0)) { 684 694 execlists_set_active(execlists, 685 695 EXECLISTS_ACTIVE_PREEMPT); ··· 692 708 } 693 709 GEM_BUG_ON(port_isset(port)); 694 710 695 - do { 696 - struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 697 - struct drm_i915_gem_request *rq, *rn; 711 + while (rb) { 712 + struct i915_priolist *p = to_priolist(rb); 713 + struct i915_request *rq, *rn; 698 714 699 715 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { 700 716 if (last && rq->ctx != last->ctx) { ··· 711 727 712 728 INIT_LIST_HEAD(&rq->priotree.link); 713 729 714 - __i915_gem_request_submit(rq); 715 - trace_i915_gem_request_in(rq, 716 - port_index(port, execlists)); 730 + __i915_request_submit(rq); 731 + trace_i915_request_in(rq, port_index(port, execlists)); 717 732 last = rq; 718 733 submit = true; 719 734 } ··· 722 739 INIT_LIST_HEAD(&p->requests); 723 740 if (p->priority != I915_PRIORITY_NORMAL) 724 741 kmem_cache_free(engine->i915->priorities, p); 725 - } while (rb); 742 + } 726 743 done: 744 + execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN; 727 745 execlists->first = rb; 728 746 if (submit) { 729 747 port_assign(port, last); ··· 746 762 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 747 763 struct intel_engine_execlists * const execlists = &engine->execlists; 748 764 struct execlist_port *port = execlists->port; 749 - struct drm_i915_gem_request *rq; 765 + struct i915_request *rq; 750 766 751 767 rq = port_request(&port[0]); 752 - while (rq && i915_gem_request_completed(rq)) { 753 - trace_i915_gem_request_out(rq); 754 - i915_gem_request_put(rq); 768 + while (rq && i915_request_completed(rq)) { 769 + trace_i915_request_out(rq); 770 + i915_request_put(rq); 755 771 756 772 execlists_port_complete(execlists, port); 757 773 ··· 956 972 kfree(client); 957 973 } 958 974 975 + static inline bool ctx_save_restore_disabled(struct intel_context *ce) 976 + { 977 + u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1]; 978 + 979 + #define SR_DISABLED \ 980 + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \ 981 + CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) 982 + 983 + return (sr & SR_DISABLED) == SR_DISABLED; 984 + 985 + #undef SR_DISABLED 986 + } 987 + 988 + static void guc_fill_preempt_context(struct intel_guc *guc) 989 + { 990 + struct drm_i915_private *dev_priv = guc_to_i915(guc); 991 + struct intel_guc_client *client = guc->preempt_client; 992 + struct intel_engine_cs *engine; 993 + enum intel_engine_id id; 994 + 995 + for_each_engine(engine, dev_priv, id) { 996 + struct intel_context *ce = &client->owner->engine[id]; 997 + u32 addr = intel_hws_preempt_done_address(engine); 998 + u32 *cs; 999 + 1000 + GEM_BUG_ON(!ce->pin_count); 1001 + 1002 + /* 1003 + * We rely on this context image *not* being saved after 1004 + * preemption. This ensures that the RING_HEAD / RING_TAIL 1005 + * remain pointing at initial values forever. 1006 + */ 1007 + GEM_BUG_ON(!ctx_save_restore_disabled(ce)); 1008 + 1009 + cs = ce->ring->vaddr; 1010 + if (id == RCS) { 1011 + cs = gen8_emit_ggtt_write_rcs(cs, 1012 + GUC_PREEMPT_FINISHED, 1013 + addr); 1014 + } else { 1015 + cs = gen8_emit_ggtt_write(cs, 1016 + GUC_PREEMPT_FINISHED, 1017 + addr); 1018 + *cs++ = MI_NOOP; 1019 + *cs++ = MI_NOOP; 1020 + } 1021 + *cs++ = MI_USER_INTERRUPT; 1022 + *cs++ = MI_NOOP; 1023 + 1024 + GEM_BUG_ON((void *)cs - ce->ring->vaddr != 1025 + GUC_PREEMPT_BREADCRUMB_BYTES); 1026 + 1027 + flush_ggtt_writes(ce->ring->vma); 1028 + } 1029 + } 1030 + 959 1031 static int guc_clients_create(struct intel_guc *guc) 960 1032 { 961 1033 struct drm_i915_private *dev_priv = guc_to_i915(guc); ··· 1042 1002 return PTR_ERR(client); 1043 1003 } 1044 1004 guc->preempt_client = client; 1005 + 1006 + guc_fill_preempt_context(guc); 1045 1007 } 1046 1008 1047 1009 return 0;
+1
drivers/gpu/drm/i915/intel_hdmi.c
··· 2383 2383 &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, 2384 2384 "HDMI %c", port_name(port)); 2385 2385 2386 + intel_encoder->hotplug = intel_encoder_hotplug; 2386 2387 intel_encoder->compute_config = intel_hdmi_compute_config; 2387 2388 if (HAS_PCH_SPLIT(dev_priv)) { 2388 2389 intel_encoder->disable = pch_disable_hdmi;
+13 -12
drivers/gpu/drm/i915/intel_hotplug.c
··· 274 274 intel_runtime_pm_put(dev_priv); 275 275 } 276 276 277 - static bool intel_hpd_irq_event(struct drm_device *dev, 278 - struct drm_connector *connector) 277 + bool intel_encoder_hotplug(struct intel_encoder *encoder, 278 + struct intel_connector *connector) 279 279 { 280 + struct drm_device *dev = connector->base.dev; 280 281 enum drm_connector_status old_status; 281 282 282 283 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 283 - old_status = connector->status; 284 + old_status = connector->base.status; 284 285 285 - connector->status = drm_helper_probe_detect(connector, NULL, false); 286 + connector->base.status = 287 + drm_helper_probe_detect(&connector->base, NULL, false); 286 288 287 - if (old_status == connector->status) 289 + if (old_status == connector->base.status) 288 290 return false; 289 291 290 292 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 291 - connector->base.id, 292 - connector->name, 293 + connector->base.base.id, 294 + connector->base.name, 293 295 drm_get_connector_status_name(old_status), 294 - drm_get_connector_status_name(connector->status)); 296 + drm_get_connector_status_name(connector->base.status)); 295 297 296 298 return true; 297 299 } ··· 383 381 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 384 382 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 385 383 connector->name, intel_encoder->hpd_pin); 386 - if (intel_encoder->hot_plug) 387 - intel_encoder->hot_plug(intel_encoder); 388 - if (intel_hpd_irq_event(dev, connector)) 389 - changed = true; 384 + 385 + changed |= intel_encoder->hotplug(intel_encoder, 386 + intel_connector); 390 387 } 391 388 } 392 389 drm_connector_list_iter_end(&conn_iter);
+19 -163
drivers/gpu/drm/i915/intel_huc.c
··· 27 27 #include "intel_huc.h" 28 28 #include "i915_drv.h" 29 29 30 - /** 31 - * DOC: HuC Firmware 32 - * 33 - * Motivation: 34 - * GEN9 introduces a new dedicated firmware for usage in media HEVC (High 35 - * Efficiency Video Coding) operations. Userspace can use the firmware 36 - * capabilities by adding HuC specific commands to batch buffers. 37 - * 38 - * Implementation: 39 - * The same firmware loader is used as the GuC. However, the actual 40 - * loading to HW is deferred until GEM initialization is done. 41 - * 42 - * Note that HuC firmware loading must be done before GuC loading. 43 - */ 44 - 45 - #define BXT_HUC_FW_MAJOR 01 46 - #define BXT_HUC_FW_MINOR 07 47 - #define BXT_BLD_NUM 1398 48 - 49 - #define SKL_HUC_FW_MAJOR 01 50 - #define SKL_HUC_FW_MINOR 07 51 - #define SKL_BLD_NUM 1398 52 - 53 - #define KBL_HUC_FW_MAJOR 02 54 - #define KBL_HUC_FW_MINOR 00 55 - #define KBL_BLD_NUM 1810 56 - 57 - #define HUC_FW_PATH(platform, major, minor, bld_num) \ 58 - "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ 59 - __stringify(minor) "_" __stringify(bld_num) ".bin" 60 - 61 - #define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \ 62 - SKL_HUC_FW_MINOR, SKL_BLD_NUM) 63 - MODULE_FIRMWARE(I915_SKL_HUC_UCODE); 64 - 65 - #define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \ 66 - BXT_HUC_FW_MINOR, BXT_BLD_NUM) 67 - MODULE_FIRMWARE(I915_BXT_HUC_UCODE); 68 - 69 - #define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \ 70 - KBL_HUC_FW_MINOR, KBL_BLD_NUM) 71 - MODULE_FIRMWARE(I915_KBL_HUC_UCODE); 72 - 73 - static void huc_fw_select(struct intel_uc_fw *huc_fw) 74 - { 75 - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); 76 - struct drm_i915_private *dev_priv = huc_to_i915(huc); 77 - 78 - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); 79 - 80 - if (!HAS_HUC(dev_priv)) 81 - return; 82 - 83 - if (i915_modparams.huc_firmware_path) { 84 - huc_fw->path = i915_modparams.huc_firmware_path; 85 - huc_fw->major_ver_wanted = 0; 86 - huc_fw->minor_ver_wanted = 0; 87 - } else if (IS_SKYLAKE(dev_priv)) { 88 - huc_fw->path = I915_SKL_HUC_UCODE; 89 - huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR; 90 - huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR; 91 - } else if (IS_BROXTON(dev_priv)) { 92 - huc_fw->path = I915_BXT_HUC_UCODE; 93 - huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR; 94 - huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR; 95 - } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { 96 - huc_fw->path = I915_KBL_HUC_UCODE; 97 - huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; 98 - huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; 99 - } else { 100 - DRM_WARN("%s: No firmware known for this platform!\n", 101 - intel_uc_fw_type_repr(huc_fw->type)); 102 - } 103 - } 104 - 105 - /** 106 - * intel_huc_init_early() - initializes HuC struct 107 - * @huc: intel_huc struct 108 - * 109 - * On platforms with HuC selects firmware for uploading 110 - */ 111 30 void intel_huc_init_early(struct intel_huc *huc) 112 31 { 113 - struct intel_uc_fw *huc_fw = &huc->fw; 114 - 115 - intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC); 116 - huc_fw_select(huc_fw); 117 - } 118 - 119 - /** 120 - * huc_ucode_xfer() - DMA's the firmware 121 - * @huc_fw: the firmware descriptor 122 - * @vma: the firmware image (bound into the GGTT) 123 - * 124 - * Transfer the firmware image to RAM for execution by the microcontroller. 125 - * 126 - * Return: 0 on success, non-zero on failure 127 - */ 128 - static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma) 129 - { 130 - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); 131 - struct drm_i915_private *dev_priv = huc_to_i915(huc); 132 - unsigned long offset = 0; 133 - u32 size; 134 - int ret; 135 - 136 - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); 137 - 138 - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 139 - 140 - /* Set the source address for the uCode */ 141 - offset = guc_ggtt_offset(vma) + huc_fw->header_offset; 142 - I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); 143 - I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); 144 - 145 - /* Hardware doesn't look at destination address for HuC. Set it to 0, 146 - * but still program the correct address space. 147 - */ 148 - I915_WRITE(DMA_ADDR_1_LOW, 0); 149 - I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); 150 - 151 - size = huc_fw->header_size + huc_fw->ucode_size; 152 - I915_WRITE(DMA_COPY_SIZE, size); 153 - 154 - /* Start the DMA */ 155 - I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); 156 - 157 - /* Wait for DMA to finish */ 158 - ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100); 159 - 160 - DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); 161 - 162 - /* Disable the bits once DMA is over */ 163 - I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); 164 - 165 - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 166 - 167 - return ret; 168 - } 169 - 170 - /** 171 - * intel_huc_init_hw() - load HuC uCode to device 172 - * @huc: intel_huc structure 173 - * 174 - * Called from intel_uc_init_hw() during driver loading and also after a GPU 175 - * reset. Be note that HuC loading must be done before GuC loading. 176 - * 177 - * The firmware image should have already been fetched into memory by the 178 - * earlier call to intel_uc_init_fw(), so here we need only check that 179 - * is succeeded, and then transfer the image to the h/w. 180 - * 181 - */ 182 - int intel_huc_init_hw(struct intel_huc *huc) 183 - { 184 - return intel_uc_fw_upload(&huc->fw, huc_ucode_xfer); 32 + intel_huc_fw_init_early(huc); 185 33 } 186 34 187 35 /** ··· 48 200 struct drm_i915_private *i915 = huc_to_i915(huc); 49 201 struct intel_guc *guc = &i915->guc; 50 202 struct i915_vma *vma; 203 + u32 status; 51 204 int ret; 52 205 53 206 if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) ··· 59 210 if (IS_ERR(vma)) { 60 211 ret = PTR_ERR(vma); 61 212 DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret); 62 - return ret; 213 + goto fail; 63 214 } 64 215 65 216 ret = intel_guc_auth_huc(guc, 66 217 guc_ggtt_offset(vma) + huc->fw.rsa_offset); 67 218 if (ret) { 68 219 DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); 69 - goto out; 220 + goto fail_unpin; 70 221 } 71 222 72 223 /* Check authentication status, it should be done by now */ 73 - ret = intel_wait_for_register(i915, 74 - HUC_STATUS2, 75 - HUC_FW_VERIFIED, 76 - HUC_FW_VERIFIED, 77 - 50); 224 + ret = __intel_wait_for_register(i915, 225 + HUC_STATUS2, 226 + HUC_FW_VERIFIED, 227 + HUC_FW_VERIFIED, 228 + 2, 50, &status); 78 229 if (ret) { 79 - DRM_ERROR("HuC: Authentication failed %d\n", ret); 80 - goto out; 230 + DRM_ERROR("HuC: Firmware not verified %#x\n", status); 231 + goto fail_unpin; 81 232 } 82 233 83 - out: 84 234 i915_vma_unpin(vma); 235 + return 0; 236 + 237 + fail_unpin: 238 + i915_vma_unpin(vma); 239 + fail: 240 + huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL; 241 + 242 + DRM_ERROR("HuC: Authentication failed %d\n", ret); 85 243 return ret; 86 244 }
+1 -1
drivers/gpu/drm/i915/intel_huc.h
··· 26 26 #define _INTEL_HUC_H_ 27 27 28 28 #include "intel_uc_fw.h" 29 + #include "intel_huc_fw.h" 29 30 30 31 struct intel_huc { 31 32 /* Generic uC firmware management */ ··· 36 35 }; 37 36 38 37 void intel_huc_init_early(struct intel_huc *huc); 39 - int intel_huc_init_hw(struct intel_huc *huc); 40 38 int intel_huc_auth(struct intel_huc *huc); 41 39 42 40 #endif
+166
drivers/gpu/drm/i915/intel_huc_fw.c
··· 1 + /* 2 + * SPDX-License-Identifier: MIT 3 + * 4 + * Copyright © 2014-2018 Intel Corporation 5 + */ 6 + 7 + #include "intel_huc_fw.h" 8 + #include "i915_drv.h" 9 + 10 + /** 11 + * DOC: HuC Firmware 12 + * 13 + * Motivation: 14 + * GEN9 introduces a new dedicated firmware for usage in media HEVC (High 15 + * Efficiency Video Coding) operations. Userspace can use the firmware 16 + * capabilities by adding HuC specific commands to batch buffers. 17 + * 18 + * Implementation: 19 + * The same firmware loader is used as the GuC. However, the actual 20 + * loading to HW is deferred until GEM initialization is done. 21 + * 22 + * Note that HuC firmware loading must be done before GuC loading. 23 + */ 24 + 25 + #define BXT_HUC_FW_MAJOR 01 26 + #define BXT_HUC_FW_MINOR 07 27 + #define BXT_BLD_NUM 1398 28 + 29 + #define SKL_HUC_FW_MAJOR 01 30 + #define SKL_HUC_FW_MINOR 07 31 + #define SKL_BLD_NUM 1398 32 + 33 + #define KBL_HUC_FW_MAJOR 02 34 + #define KBL_HUC_FW_MINOR 00 35 + #define KBL_BLD_NUM 1810 36 + 37 + #define HUC_FW_PATH(platform, major, minor, bld_num) \ 38 + "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ 39 + __stringify(minor) "_" __stringify(bld_num) ".bin" 40 + 41 + #define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \ 42 + SKL_HUC_FW_MINOR, SKL_BLD_NUM) 43 + MODULE_FIRMWARE(I915_SKL_HUC_UCODE); 44 + 45 + #define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \ 46 + BXT_HUC_FW_MINOR, BXT_BLD_NUM) 47 + MODULE_FIRMWARE(I915_BXT_HUC_UCODE); 48 + 49 + #define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \ 50 + KBL_HUC_FW_MINOR, KBL_BLD_NUM) 51 + MODULE_FIRMWARE(I915_KBL_HUC_UCODE); 52 + 53 + static void huc_fw_select(struct intel_uc_fw *huc_fw) 54 + { 55 + struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); 56 + struct drm_i915_private *dev_priv = huc_to_i915(huc); 57 + 58 + GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); 59 + 60 + if (!HAS_HUC(dev_priv)) 61 + return; 62 + 63 + if (i915_modparams.huc_firmware_path) { 64 + huc_fw->path = i915_modparams.huc_firmware_path; 65 + huc_fw->major_ver_wanted = 0; 66 + huc_fw->minor_ver_wanted = 0; 67 + } else if (IS_SKYLAKE(dev_priv)) { 68 + huc_fw->path = I915_SKL_HUC_UCODE; 69 + huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR; 70 + huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR; 71 + } else if (IS_BROXTON(dev_priv)) { 72 + huc_fw->path = I915_BXT_HUC_UCODE; 73 + huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR; 74 + huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR; 75 + } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { 76 + huc_fw->path = I915_KBL_HUC_UCODE; 77 + huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; 78 + huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; 79 + } else { 80 + DRM_WARN("%s: No firmware known for this platform!\n", 81 + intel_uc_fw_type_repr(huc_fw->type)); 82 + } 83 + } 84 + 85 + /** 86 + * intel_huc_fw_init_early() - initializes HuC firmware struct 87 + * @huc: intel_huc struct 88 + * 89 + * On platforms with HuC selects firmware for uploading 90 + */ 91 + void intel_huc_fw_init_early(struct intel_huc *huc) 92 + { 93 + struct intel_uc_fw *huc_fw = &huc->fw; 94 + 95 + intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC); 96 + huc_fw_select(huc_fw); 97 + } 98 + 99 + /** 100 + * huc_fw_xfer() - DMA's the firmware 101 + * @huc_fw: the firmware descriptor 102 + * @vma: the firmware image (bound into the GGTT) 103 + * 104 + * Transfer the firmware image to RAM for execution by the microcontroller. 105 + * 106 + * Return: 0 on success, non-zero on failure 107 + */ 108 + static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma) 109 + { 110 + struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); 111 + struct drm_i915_private *dev_priv = huc_to_i915(huc); 112 + unsigned long offset = 0; 113 + u32 size; 114 + int ret; 115 + 116 + GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); 117 + 118 + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 119 + 120 + /* Set the source address for the uCode */ 121 + offset = guc_ggtt_offset(vma) + huc_fw->header_offset; 122 + I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); 123 + I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); 124 + 125 + /* Hardware doesn't look at destination address for HuC. Set it to 0, 126 + * but still program the correct address space. 127 + */ 128 + I915_WRITE(DMA_ADDR_1_LOW, 0); 129 + I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); 130 + 131 + size = huc_fw->header_size + huc_fw->ucode_size; 132 + I915_WRITE(DMA_COPY_SIZE, size); 133 + 134 + /* Start the DMA */ 135 + I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); 136 + 137 + /* Wait for DMA to finish */ 138 + ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100); 139 + 140 + DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); 141 + 142 + /* Disable the bits once DMA is over */ 143 + I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); 144 + 145 + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 146 + 147 + return ret; 148 + } 149 + 150 + /** 151 + * intel_huc_fw_upload() - load HuC uCode to device 152 + * @huc: intel_huc structure 153 + * 154 + * Called from intel_uc_init_hw() during driver load, resume from sleep and 155 + * after a GPU reset. Note that HuC must be loaded before GuC. 156 + * 157 + * The firmware image should have already been fetched into memory by the 158 + * earlier call to intel_uc_init_fw(), so here we need to only check that 159 + * fetch succeeded, and then transfer the image to the h/w. 160 + * 161 + * Return: non-zero code on error 162 + */ 163 + int intel_huc_fw_upload(struct intel_huc *huc) 164 + { 165 + return intel_uc_fw_upload(&huc->fw, huc_fw_xfer); 166 + }
+15
drivers/gpu/drm/i915/intel_huc_fw.h
··· 1 + /* 2 + * SPDX-License-Identifier: MIT 3 + * 4 + * Copyright © 2014-2018 Intel Corporation 5 + */ 6 + 7 + #ifndef _INTEL_HUC_FW_H_ 8 + #define _INTEL_HUC_FW_H_ 9 + 10 + struct intel_huc; 11 + 12 + void intel_huc_fw_init_early(struct intel_huc *huc); 13 + int intel_huc_fw_upload(struct intel_huc *huc); 14 + 15 + #endif
+290 -169
drivers/gpu/drm/i915/intel_lrc.c
··· 169 169 struct intel_engine_cs *engine, 170 170 struct intel_ring *ring); 171 171 172 + static inline struct i915_priolist *to_priolist(struct rb_node *rb) 173 + { 174 + return rb_entry(rb, struct i915_priolist, node); 175 + } 176 + 177 + static inline int rq_prio(const struct i915_request *rq) 178 + { 179 + return rq->priotree.priority; 180 + } 181 + 182 + static inline bool need_preempt(const struct intel_engine_cs *engine, 183 + const struct i915_request *last, 184 + int prio) 185 + { 186 + return engine->i915->preempt_context && prio > max(rq_prio(last), 0); 187 + } 188 + 172 189 /** 173 190 * intel_lr_context_descriptor_update() - calculate & cache the descriptor 174 191 * descriptor for a pinned context ··· 204 187 * bits 32-52: ctx ID, a globally unique tag 205 188 * bits 53-54: mbz, reserved for use by hardware 206 189 * bits 55-63: group ID, currently unused and set to 0 190 + * 191 + * Starting from Gen11, the upper dword of the descriptor has a new format: 192 + * 193 + * bits 32-36: reserved 194 + * bits 37-47: SW context ID 195 + * bits 48:53: engine instance 196 + * bit 54: mbz, reserved for use by hardware 197 + * bits 55-60: SW counter 198 + * bits 61-63: engine class 199 + * 200 + * engine info, SW context ID and SW counter need to form a unique number 201 + * (Context ID) per lrc. 207 202 */ 208 203 static void 209 204 intel_lr_context_descriptor_update(struct i915_gem_context *ctx, ··· 224 195 struct intel_context *ce = &ctx->engine[engine->id]; 225 196 u64 desc; 226 197 227 - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); 198 + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); 199 + BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH))); 228 200 229 201 desc = ctx->desc_template; /* bits 0-11 */ 202 + GEM_BUG_ON(desc & GENMASK_ULL(63, 12)); 203 + 230 204 desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; 231 205 /* bits 12-31 */ 232 - desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 206 + GEM_BUG_ON(desc & GENMASK_ULL(63, 32)); 207 + 208 + if (INTEL_GEN(ctx->i915) >= 11) { 209 + GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); 210 + desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; 211 + /* bits 37-47 */ 212 + 213 + desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; 214 + /* bits 48-53 */ 215 + 216 + /* TODO: decide what to do with SW counter (bits 55-60) */ 217 + 218 + desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; 219 + /* bits 61-63 */ 220 + } else { 221 + GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH)); 222 + desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 223 + } 233 224 234 225 ce->lrc_desc = desc; 235 226 } ··· 273 224 parent = &execlists->queue.rb_node; 274 225 while (*parent) { 275 226 rb = *parent; 276 - p = rb_entry(rb, typeof(*p), node); 227 + p = to_priolist(rb); 277 228 if (prio > p->priority) { 278 229 parent = &rb->rb_left; 279 230 } else if (prio < p->priority) { ··· 313 264 if (first) 314 265 execlists->first = &p->node; 315 266 316 - return ptr_pack_bits(p, first, 1); 267 + return p; 317 268 } 318 269 319 - static void unwind_wa_tail(struct drm_i915_gem_request *rq) 270 + static void unwind_wa_tail(struct i915_request *rq) 320 271 { 321 272 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); 322 273 assert_ring_tail_valid(rq->ring, rq->tail); ··· 324 275 325 276 static void __unwind_incomplete_requests(struct intel_engine_cs *engine) 326 277 { 327 - struct drm_i915_gem_request *rq, *rn; 278 + struct i915_request *rq, *rn; 328 279 struct i915_priolist *uninitialized_var(p); 329 280 int last_prio = I915_PRIORITY_INVALID; 330 281 ··· 333 284 list_for_each_entry_safe_reverse(rq, rn, 334 285 &engine->timeline->requests, 335 286 link) { 336 - if (i915_gem_request_completed(rq)) 287 + if (i915_request_completed(rq)) 337 288 return; 338 289 339 - __i915_gem_request_unsubmit(rq); 290 + __i915_request_unsubmit(rq); 340 291 unwind_wa_tail(rq); 341 292 342 - GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID); 343 - if (rq->priotree.priority != last_prio) { 344 - p = lookup_priolist(engine, 345 - &rq->priotree, 346 - rq->priotree.priority); 347 - p = ptr_mask_bits(p, 1); 348 - 349 - last_prio = rq->priotree.priority; 293 + GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 294 + if (rq_prio(rq) != last_prio) { 295 + last_prio = rq_prio(rq); 296 + p = lookup_priolist(engine, &rq->priotree, last_prio); 350 297 } 351 298 352 299 list_add(&rq->priotree.link, &p->requests); ··· 361 316 } 362 317 363 318 static inline void 364 - execlists_context_status_change(struct drm_i915_gem_request *rq, 365 - unsigned long status) 319 + execlists_context_status_change(struct i915_request *rq, unsigned long status) 366 320 { 367 321 /* 368 322 * Only used when GVT-g is enabled now. When GVT-g is disabled, ··· 375 331 } 376 332 377 333 static inline void 378 - execlists_context_schedule_in(struct drm_i915_gem_request *rq) 334 + execlists_context_schedule_in(struct i915_request *rq) 379 335 { 380 336 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 381 337 intel_engine_context_in(rq->engine); 382 338 } 383 339 384 340 static inline void 385 - execlists_context_schedule_out(struct drm_i915_gem_request *rq) 341 + execlists_context_schedule_out(struct i915_request *rq) 386 342 { 387 343 intel_engine_context_out(rq->engine); 388 344 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); ··· 397 353 ASSIGN_CTX_PDP(ppgtt, reg_state, 0); 398 354 } 399 355 400 - static u64 execlists_update_context(struct drm_i915_gem_request *rq) 356 + static u64 execlists_update_context(struct i915_request *rq) 401 357 { 402 358 struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; 403 359 struct i915_hw_ppgtt *ppgtt = ··· 417 373 return ce->lrc_desc; 418 374 } 419 375 420 - static inline void elsp_write(u64 desc, u32 __iomem *elsp) 376 + static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) 421 377 { 422 - writel(upper_32_bits(desc), elsp); 423 - writel(lower_32_bits(desc), elsp); 378 + if (execlists->ctrl_reg) { 379 + writel(lower_32_bits(desc), execlists->submit_reg + port * 2); 380 + writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); 381 + } else { 382 + writel(upper_32_bits(desc), execlists->submit_reg); 383 + writel(lower_32_bits(desc), execlists->submit_reg); 384 + } 424 385 } 425 386 426 387 static void execlists_submit_ports(struct intel_engine_cs *engine) 427 388 { 428 - struct execlist_port *port = engine->execlists.port; 389 + struct intel_engine_execlists *execlists = &engine->execlists; 390 + struct execlist_port *port = execlists->port; 429 391 unsigned int n; 430 392 431 - for (n = execlists_num_ports(&engine->execlists); n--; ) { 432 - struct drm_i915_gem_request *rq; 393 + /* 394 + * ELSQ note: the submit queue is not cleared after being submitted 395 + * to the HW so we need to make sure we always clean it up. This is 396 + * currently ensured by the fact that we always write the same number 397 + * of elsq entries, keep this in mind before changing the loop below. 398 + */ 399 + for (n = execlists_num_ports(execlists); n--; ) { 400 + struct i915_request *rq; 433 401 unsigned int count; 434 402 u64 desc; 435 403 ··· 454 398 desc = execlists_update_context(rq); 455 399 GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); 456 400 457 - GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n", 401 + GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x, prio=%d\n", 458 402 engine->name, n, 459 403 port[n].context_id, count, 460 - rq->global_seqno); 404 + rq->global_seqno, 405 + rq_prio(rq)); 461 406 } else { 462 407 GEM_BUG_ON(!n); 463 408 desc = 0; 464 409 } 465 410 466 - elsp_write(desc, engine->execlists.elsp); 411 + write_desc(execlists, desc, n); 467 412 } 468 - execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK); 413 + 414 + /* we need to manually load the submit queue */ 415 + if (execlists->ctrl_reg) 416 + writel(EL_CTRL_LOAD, execlists->ctrl_reg); 417 + 418 + execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); 469 419 } 470 420 471 421 static bool ctx_single_port_submission(const struct i915_gem_context *ctx) ··· 492 430 return true; 493 431 } 494 432 495 - static void port_assign(struct execlist_port *port, 496 - struct drm_i915_gem_request *rq) 433 + static void port_assign(struct execlist_port *port, struct i915_request *rq) 497 434 { 498 435 GEM_BUG_ON(rq == port_request(port)); 499 436 500 437 if (port_isset(port)) 501 - i915_gem_request_put(port_request(port)); 438 + i915_request_put(port_request(port)); 502 439 503 - port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); 440 + port_set(port, port_pack(i915_request_get(rq), port_count(port))); 504 441 } 505 442 506 443 static void inject_preempt_context(struct intel_engine_cs *engine) 507 444 { 445 + struct intel_engine_execlists *execlists = &engine->execlists; 508 446 struct intel_context *ce = 509 447 &engine->i915->preempt_context->engine[engine->id]; 510 448 unsigned int n; 511 449 512 - GEM_BUG_ON(engine->execlists.preempt_complete_status != 450 + GEM_BUG_ON(execlists->preempt_complete_status != 513 451 upper_32_bits(ce->lrc_desc)); 514 - GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES)); 515 - 516 - memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES); 517 - ce->ring->tail += WA_TAIL_BYTES; 518 - ce->ring->tail &= (ce->ring->size - 1); 519 - ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail; 520 - 521 452 GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] & 522 453 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 523 454 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) != 524 455 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 525 456 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)); 526 457 458 + /* 459 + * Switch to our empty preempt context so 460 + * the state of the GPU is known (idle). 461 + */ 527 462 GEM_TRACE("%s\n", engine->name); 528 - for (n = execlists_num_ports(&engine->execlists); --n; ) 529 - elsp_write(0, engine->execlists.elsp); 463 + for (n = execlists_num_ports(execlists); --n; ) 464 + write_desc(execlists, 0, n); 530 465 531 - elsp_write(ce->lrc_desc, engine->execlists.elsp); 466 + write_desc(execlists, ce->lrc_desc, n); 467 + 468 + /* we need to manually load the submit queue */ 469 + if (execlists->ctrl_reg) 470 + writel(EL_CTRL_LOAD, execlists->ctrl_reg); 471 + 532 472 execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK); 473 + execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT); 533 474 } 534 475 535 476 static void execlists_dequeue(struct intel_engine_cs *engine) ··· 541 476 struct execlist_port *port = execlists->port; 542 477 const struct execlist_port * const last_port = 543 478 &execlists->port[execlists->port_mask]; 544 - struct drm_i915_gem_request *last = port_request(port); 479 + struct i915_request *last = port_request(port); 545 480 struct rb_node *rb; 546 481 bool submit = false; 547 482 ··· 569 504 spin_lock_irq(&engine->timeline->lock); 570 505 rb = execlists->first; 571 506 GEM_BUG_ON(rb_first(&execlists->queue) != rb); 572 - if (!rb) 573 - goto unlock; 574 507 575 508 if (last) { 576 509 /* ··· 591 528 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) 592 529 goto unlock; 593 530 594 - if (engine->i915->preempt_context && 595 - rb_entry(rb, struct i915_priolist, node)->priority > 596 - max(last->priotree.priority, 0)) { 597 - /* 598 - * Switch to our empty preempt context so 599 - * the state of the GPU is known (idle). 600 - */ 531 + if (need_preempt(engine, last, execlists->queue_priority)) { 601 532 inject_preempt_context(engine); 602 - execlists_set_active(execlists, 603 - EXECLISTS_ACTIVE_PREEMPT); 604 533 goto unlock; 605 - } else { 606 - /* 607 - * In theory, we could coalesce more requests onto 608 - * the second port (the first port is active, with 609 - * no preemptions pending). However, that means we 610 - * then have to deal with the possible lite-restore 611 - * of the second port (as we submit the ELSP, there 612 - * may be a context-switch) but also we may complete 613 - * the resubmission before the context-switch. Ergo, 614 - * coalescing onto the second port will cause a 615 - * preemption event, but we cannot predict whether 616 - * that will affect port[0] or port[1]. 617 - * 618 - * If the second port is already active, we can wait 619 - * until the next context-switch before contemplating 620 - * new requests. The GPU will be busy and we should be 621 - * able to resubmit the new ELSP before it idles, 622 - * avoiding pipeline bubbles (momentary pauses where 623 - * the driver is unable to keep up the supply of new 624 - * work). 625 - */ 626 - if (port_count(&port[1])) 627 - goto unlock; 628 - 629 - /* WaIdleLiteRestore:bdw,skl 630 - * Apply the wa NOOPs to prevent 631 - * ring:HEAD == req:TAIL as we resubmit the 632 - * request. See gen8_emit_breadcrumb() for 633 - * where we prepare the padding after the 634 - * end of the request. 635 - */ 636 - last->tail = last->wa_tail; 637 534 } 535 + 536 + /* 537 + * In theory, we could coalesce more requests onto 538 + * the second port (the first port is active, with 539 + * no preemptions pending). However, that means we 540 + * then have to deal with the possible lite-restore 541 + * of the second port (as we submit the ELSP, there 542 + * may be a context-switch) but also we may complete 543 + * the resubmission before the context-switch. Ergo, 544 + * coalescing onto the second port will cause a 545 + * preemption event, but we cannot predict whether 546 + * that will affect port[0] or port[1]. 547 + * 548 + * If the second port is already active, we can wait 549 + * until the next context-switch before contemplating 550 + * new requests. The GPU will be busy and we should be 551 + * able to resubmit the new ELSP before it idles, 552 + * avoiding pipeline bubbles (momentary pauses where 553 + * the driver is unable to keep up the supply of new 554 + * work). However, we have to double check that the 555 + * priorities of the ports haven't been switch. 556 + */ 557 + if (port_count(&port[1])) 558 + goto unlock; 559 + 560 + /* 561 + * WaIdleLiteRestore:bdw,skl 562 + * Apply the wa NOOPs to prevent 563 + * ring:HEAD == rq:TAIL as we resubmit the 564 + * request. See gen8_emit_breadcrumb() for 565 + * where we prepare the padding after the 566 + * end of the request. 567 + */ 568 + last->tail = last->wa_tail; 638 569 } 639 570 640 - do { 641 - struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 642 - struct drm_i915_gem_request *rq, *rn; 571 + while (rb) { 572 + struct i915_priolist *p = to_priolist(rb); 573 + struct i915_request *rq, *rn; 643 574 644 575 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { 645 576 /* ··· 683 626 } 684 627 685 628 INIT_LIST_HEAD(&rq->priotree.link); 686 - __i915_gem_request_submit(rq); 687 - trace_i915_gem_request_in(rq, port_index(port, execlists)); 629 + __i915_request_submit(rq); 630 + trace_i915_request_in(rq, port_index(port, execlists)); 688 631 last = rq; 689 632 submit = true; 690 633 } ··· 694 637 INIT_LIST_HEAD(&p->requests); 695 638 if (p->priority != I915_PRIORITY_NORMAL) 696 639 kmem_cache_free(engine->i915->priorities, p); 697 - } while (rb); 640 + } 698 641 done: 642 + execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN; 699 643 execlists->first = rb; 700 644 if (submit) 701 645 port_assign(port, last); ··· 723 665 unsigned int num_ports = execlists_num_ports(execlists); 724 666 725 667 while (num_ports-- && port_isset(port)) { 726 - struct drm_i915_gem_request *rq = port_request(port); 668 + struct i915_request *rq = port_request(port); 727 669 728 670 GEM_BUG_ON(!execlists->active); 729 671 intel_engine_context_out(rq->engine); 730 - execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED); 731 - i915_gem_request_put(rq); 672 + 673 + execlists_context_status_change(rq, 674 + i915_request_completed(rq) ? 675 + INTEL_CONTEXT_SCHEDULE_OUT : 676 + INTEL_CONTEXT_SCHEDULE_PREEMPTED); 677 + 678 + i915_request_put(rq); 732 679 733 680 memset(port, 0, sizeof(*port)); 734 681 port++; ··· 743 680 static void execlists_cancel_requests(struct intel_engine_cs *engine) 744 681 { 745 682 struct intel_engine_execlists * const execlists = &engine->execlists; 746 - struct drm_i915_gem_request *rq, *rn; 683 + struct i915_request *rq, *rn; 747 684 struct rb_node *rb; 748 685 unsigned long flags; 749 686 750 - spin_lock_irqsave(&engine->timeline->lock, flags); 687 + GEM_TRACE("%s\n", engine->name); 688 + 689 + /* 690 + * Before we call engine->cancel_requests(), we should have exclusive 691 + * access to the submission state. This is arranged for us by the 692 + * caller disabling the interrupt generation, the tasklet and other 693 + * threads that may then access the same state, giving us a free hand 694 + * to reset state. However, we still need to let lockdep be aware that 695 + * we know this state may be accessed in hardirq context, so we 696 + * disable the irq around this manipulation and we want to keep 697 + * the spinlock focused on its duties and not accidentally conflate 698 + * coverage to the submission's irq state. (Similarly, although we 699 + * shouldn't need to disable irq around the manipulation of the 700 + * submission's irq state, we also wish to remind ourselves that 701 + * it is irq state.) 702 + */ 703 + local_irq_save(flags); 751 704 752 705 /* Cancel the requests on the HW and clear the ELSP tracker. */ 753 706 execlists_cancel_port_requests(execlists); 754 707 708 + spin_lock(&engine->timeline->lock); 709 + 755 710 /* Mark all executing requests as skipped. */ 756 711 list_for_each_entry(rq, &engine->timeline->requests, link) { 757 712 GEM_BUG_ON(!rq->global_seqno); 758 - if (!i915_gem_request_completed(rq)) 713 + if (!i915_request_completed(rq)) 759 714 dma_fence_set_error(&rq->fence, -EIO); 760 715 } 761 716 762 717 /* Flush the queued requests to the timeline list (for retiring). */ 763 718 rb = execlists->first; 764 719 while (rb) { 765 - struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 720 + struct i915_priolist *p = to_priolist(rb); 766 721 767 722 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { 768 723 INIT_LIST_HEAD(&rq->priotree.link); 769 724 770 725 dma_fence_set_error(&rq->fence, -EIO); 771 - __i915_gem_request_submit(rq); 726 + __i915_request_submit(rq); 772 727 } 773 728 774 729 rb = rb_next(rb); ··· 798 717 799 718 /* Remaining _unready_ requests will be nop'ed when submitted */ 800 719 801 - 720 + execlists->queue_priority = INT_MIN; 802 721 execlists->queue = RB_ROOT; 803 722 execlists->first = NULL; 804 723 GEM_BUG_ON(port_isset(execlists->port)); 724 + 725 + spin_unlock(&engine->timeline->lock); 805 726 806 727 /* 807 728 * The port is checked prior to scheduling a tasklet, but ··· 813 730 */ 814 731 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 815 732 816 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 733 + /* Mark all CS interrupts as complete */ 734 + execlists->active = 0; 735 + 736 + local_irq_restore(flags); 817 737 } 818 738 819 739 /* ··· 892 806 tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?"); 893 807 894 808 while (head != tail) { 895 - struct drm_i915_gem_request *rq; 809 + struct i915_request *rq; 896 810 unsigned int status; 897 811 unsigned int count; 898 812 ··· 958 872 GEM_BUG_ON(!execlists_is_active(execlists, 959 873 EXECLISTS_ACTIVE_USER)); 960 874 875 + rq = port_unpack(port, &count); 876 + GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n", 877 + engine->name, 878 + port->context_id, count, 879 + rq ? rq->global_seqno : 0, 880 + rq ? rq_prio(rq) : 0); 881 + 961 882 /* Check the context/desc id for this event matches */ 962 883 GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); 963 884 964 - rq = port_unpack(port, &count); 965 - GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n", 966 - engine->name, 967 - port->context_id, count, 968 - rq ? rq->global_seqno : 0); 969 885 GEM_BUG_ON(count == 0); 970 886 if (--count == 0) { 971 887 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); 972 888 GEM_BUG_ON(port_isset(&port[1]) && 973 889 !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); 974 - GEM_BUG_ON(!i915_gem_request_completed(rq)); 890 + GEM_BUG_ON(!i915_request_completed(rq)); 975 891 execlists_context_schedule_out(rq); 976 - trace_i915_gem_request_out(rq); 977 - i915_gem_request_put(rq); 892 + trace_i915_request_out(rq); 893 + i915_request_put(rq); 894 + 895 + GEM_TRACE("%s completed ctx=%d\n", 896 + engine->name, port->context_id); 978 897 979 898 execlists_port_complete(execlists, port); 980 899 } else { ··· 1008 917 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); 1009 918 } 1010 919 1011 - static void insert_request(struct intel_engine_cs *engine, 1012 - struct i915_priotree *pt, 1013 - int prio) 920 + static void queue_request(struct intel_engine_cs *engine, 921 + struct i915_priotree *pt, 922 + int prio) 1014 923 { 1015 - struct i915_priolist *p = lookup_priolist(engine, pt, prio); 1016 - 1017 - list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests); 1018 - if (ptr_unmask_bits(p, 1)) 1019 - tasklet_hi_schedule(&engine->execlists.tasklet); 924 + list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests); 1020 925 } 1021 926 1022 - static void execlists_submit_request(struct drm_i915_gem_request *request) 927 + static void submit_queue(struct intel_engine_cs *engine, int prio) 928 + { 929 + if (prio > engine->execlists.queue_priority) { 930 + engine->execlists.queue_priority = prio; 931 + tasklet_hi_schedule(&engine->execlists.tasklet); 932 + } 933 + } 934 + 935 + static void execlists_submit_request(struct i915_request *request) 1023 936 { 1024 937 struct intel_engine_cs *engine = request->engine; 1025 938 unsigned long flags; ··· 1031 936 /* Will be called from irq-context when using foreign fences. */ 1032 937 spin_lock_irqsave(&engine->timeline->lock, flags); 1033 938 1034 - insert_request(engine, &request->priotree, request->priotree.priority); 939 + queue_request(engine, &request->priotree, rq_prio(request)); 940 + submit_queue(engine, rq_prio(request)); 1035 941 1036 942 GEM_BUG_ON(!engine->execlists.first); 1037 943 GEM_BUG_ON(list_empty(&request->priotree.link)); ··· 1040 944 spin_unlock_irqrestore(&engine->timeline->lock, flags); 1041 945 } 1042 946 1043 - static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt) 947 + static struct i915_request *pt_to_request(struct i915_priotree *pt) 1044 948 { 1045 - return container_of(pt, struct drm_i915_gem_request, priotree); 949 + return container_of(pt, struct i915_request, priotree); 1046 950 } 1047 951 1048 952 static struct intel_engine_cs * ··· 1060 964 return engine; 1061 965 } 1062 966 1063 - static void execlists_schedule(struct drm_i915_gem_request *request, int prio) 967 + static void execlists_schedule(struct i915_request *request, int prio) 1064 968 { 1065 969 struct intel_engine_cs *engine; 1066 970 struct i915_dependency *dep, *p; ··· 1069 973 1070 974 GEM_BUG_ON(prio == I915_PRIORITY_INVALID); 1071 975 1072 - if (i915_gem_request_completed(request)) 976 + if (i915_request_completed(request)) 1073 977 return; 1074 978 1075 979 if (prio <= READ_ONCE(request->priotree.priority)) ··· 1088 992 * static void update_priorities(struct i915_priotree *pt, prio) { 1089 993 * list_for_each_entry(dep, &pt->signalers_list, signal_link) 1090 994 * update_priorities(dep->signal, prio) 1091 - * insert_request(pt); 995 + * queue_request(pt); 1092 996 * } 1093 997 * but that may have unlimited recursion depth and so runs a very 1094 998 * real risk of overunning the kernel stack. Instead, we build ··· 1151 1055 pt->priority = prio; 1152 1056 if (!list_empty(&pt->link)) { 1153 1057 __list_del_entry(&pt->link); 1154 - insert_request(engine, pt, prio); 1058 + queue_request(engine, pt, prio); 1155 1059 } 1060 + submit_queue(engine, prio); 1156 1061 } 1157 1062 1158 1063 spin_unlock_irq(&engine->timeline->lock); ··· 1255 1158 i915_gem_context_put(ctx); 1256 1159 } 1257 1160 1258 - static int execlists_request_alloc(struct drm_i915_gem_request *request) 1161 + static int execlists_request_alloc(struct i915_request *request) 1259 1162 { 1260 1163 struct intel_engine_cs *engine = request->engine; 1261 1164 struct intel_context *ce = &request->ctx->engine[engine->id]; ··· 1687 1590 } 1688 1591 1689 1592 static void reset_common_ring(struct intel_engine_cs *engine, 1690 - struct drm_i915_gem_request *request) 1593 + struct i915_request *request) 1691 1594 { 1692 1595 struct intel_engine_execlists * const execlists = &engine->execlists; 1693 1596 struct intel_context *ce; ··· 1696 1599 GEM_TRACE("%s seqno=%x\n", 1697 1600 engine->name, request ? request->global_seqno : 0); 1698 1601 1699 - reset_irq(engine); 1602 + /* See execlists_cancel_requests() for the irq/spinlock split. */ 1603 + local_irq_save(flags); 1700 1604 1701 - spin_lock_irqsave(&engine->timeline->lock, flags); 1605 + reset_irq(engine); 1702 1606 1703 1607 /* 1704 1608 * Catch up with any missed context-switch interrupts. ··· 1713 1615 execlists_cancel_port_requests(execlists); 1714 1616 1715 1617 /* Push back any incomplete requests for replay after the reset. */ 1618 + spin_lock(&engine->timeline->lock); 1716 1619 __unwind_incomplete_requests(engine); 1717 - 1718 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 1620 + spin_unlock(&engine->timeline->lock); 1719 1621 1720 1622 /* Mark all CS interrupts as complete */ 1721 1623 execlists->active = 0; 1722 1624 1723 - /* If the request was innocent, we leave the request in the ELSP 1625 + local_irq_restore(flags); 1626 + 1627 + /* 1628 + * If the request was innocent, we leave the request in the ELSP 1724 1629 * and will try to replay it on restarting. The context image may 1725 1630 * have been corrupted by the reset, in which case we may have 1726 1631 * to service a new GPU hang, but more likely we can continue on ··· 1736 1635 if (!request || request->fence.error != -EIO) 1737 1636 return; 1738 1637 1739 - /* We want a simple context + ring to execute the breadcrumb update. 1638 + /* 1639 + * We want a simple context + ring to execute the breadcrumb update. 1740 1640 * We cannot rely on the context being intact across the GPU hang, 1741 1641 * so clear it and rebuild just what we need for the breadcrumb. 1742 1642 * All pending requests for this context will be zapped, and any ··· 1760 1658 unwind_wa_tail(request); 1761 1659 } 1762 1660 1763 - static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1661 + static int intel_logical_ring_emit_pdps(struct i915_request *rq) 1764 1662 { 1765 - struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; 1766 - struct intel_engine_cs *engine = req->engine; 1663 + struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; 1664 + struct intel_engine_cs *engine = rq->engine; 1767 1665 const int num_lri_cmds = GEN8_3LVL_PDPES * 2; 1768 1666 u32 *cs; 1769 1667 int i; 1770 1668 1771 - cs = intel_ring_begin(req, num_lri_cmds * 2 + 2); 1669 + cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2); 1772 1670 if (IS_ERR(cs)) 1773 1671 return PTR_ERR(cs); 1774 1672 ··· 1783 1681 } 1784 1682 1785 1683 *cs++ = MI_NOOP; 1786 - intel_ring_advance(req, cs); 1684 + intel_ring_advance(rq, cs); 1787 1685 1788 1686 return 0; 1789 1687 } 1790 1688 1791 - static int gen8_emit_bb_start(struct drm_i915_gem_request *req, 1689 + static int gen8_emit_bb_start(struct i915_request *rq, 1792 1690 u64 offset, u32 len, 1793 1691 const unsigned int flags) 1794 1692 { ··· 1801 1699 * it is unsafe in case of lite-restore (because the ctx is 1802 1700 * not idle). PML4 is allocated during ppgtt init so this is 1803 1701 * not needed in 48-bit.*/ 1804 - if (req->ctx->ppgtt && 1805 - (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) && 1806 - !i915_vm_is_48bit(&req->ctx->ppgtt->base) && 1807 - !intel_vgpu_active(req->i915)) { 1808 - ret = intel_logical_ring_emit_pdps(req); 1702 + if (rq->ctx->ppgtt && 1703 + (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) && 1704 + !i915_vm_is_48bit(&rq->ctx->ppgtt->base) && 1705 + !intel_vgpu_active(rq->i915)) { 1706 + ret = intel_logical_ring_emit_pdps(rq); 1809 1707 if (ret) 1810 1708 return ret; 1811 1709 1812 - req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); 1710 + rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); 1813 1711 } 1814 1712 1815 - cs = intel_ring_begin(req, 4); 1713 + cs = intel_ring_begin(rq, 4); 1816 1714 if (IS_ERR(cs)) 1817 1715 return PTR_ERR(cs); 1818 1716 ··· 1841 1739 (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); 1842 1740 *cs++ = lower_32_bits(offset); 1843 1741 *cs++ = upper_32_bits(offset); 1844 - intel_ring_advance(req, cs); 1742 + intel_ring_advance(rq, cs); 1845 1743 1846 1744 return 0; 1847 1745 } ··· 1860 1758 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1861 1759 } 1862 1760 1863 - static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) 1761 + static int gen8_emit_flush(struct i915_request *request, u32 mode) 1864 1762 { 1865 1763 u32 cmd, *cs; 1866 1764 ··· 1892 1790 return 0; 1893 1791 } 1894 1792 1895 - static int gen8_emit_flush_render(struct drm_i915_gem_request *request, 1793 + static int gen8_emit_flush_render(struct i915_request *request, 1896 1794 u32 mode) 1897 1795 { 1898 1796 struct intel_engine_cs *engine = request->engine; ··· 1967 1865 * used as a workaround for not being allowed to do lite 1968 1866 * restore with HEAD==TAIL (WaIdleLiteRestore). 1969 1867 */ 1970 - static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs) 1868 + static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs) 1971 1869 { 1972 1870 /* Ensure there's always at least one preemption point per-request. */ 1973 1871 *cs++ = MI_ARB_CHECK; ··· 1975 1873 request->wa_tail = intel_ring_offset(request, cs); 1976 1874 } 1977 1875 1978 - static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs) 1876 + static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) 1979 1877 { 1980 1878 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ 1981 1879 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); ··· 1991 1889 } 1992 1890 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; 1993 1891 1994 - static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request, 1995 - u32 *cs) 1892 + static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) 1996 1893 { 1997 1894 /* We're using qword write, seqno should be aligned to 8 bytes. */ 1998 1895 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); ··· 2007 1906 } 2008 1907 static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS; 2009 1908 2010 - static int gen8_init_rcs_context(struct drm_i915_gem_request *req) 1909 + static int gen8_init_rcs_context(struct i915_request *rq) 2011 1910 { 2012 1911 int ret; 2013 1912 2014 - ret = intel_ring_workarounds_emit(req); 1913 + ret = intel_ring_workarounds_emit(rq); 2015 1914 if (ret) 2016 1915 return ret; 2017 1916 2018 - ret = intel_rcs_context_init_mocs(req); 1917 + ret = intel_rcs_context_init_mocs(rq); 2019 1918 /* 2020 1919 * Failing to program the MOCS is non-fatal.The system will not 2021 1920 * run at peak performance. So generate an error and carry on. ··· 2023 1922 if (ret) 2024 1923 DRM_ERROR("MOCS failed to program: expect performance issues.\n"); 2025 1924 2026 - return i915_gem_render_state_emit(req); 1925 + return i915_gem_render_state_emit(rq); 2027 1926 } 2028 1927 2029 1928 /** ··· 2097 1996 2098 1997 engine->set_default_submission = execlists_set_default_submission; 2099 1998 2100 - engine->irq_enable = gen8_logical_ring_enable_irq; 2101 - engine->irq_disable = gen8_logical_ring_disable_irq; 1999 + if (INTEL_GEN(engine->i915) < 11) { 2000 + engine->irq_enable = gen8_logical_ring_enable_irq; 2001 + engine->irq_disable = gen8_logical_ring_disable_irq; 2002 + } else { 2003 + /* 2004 + * TODO: On Gen11 interrupt masks need to be clear 2005 + * to allow C6 entry. Keep interrupts enabled at 2006 + * and take the hit of generating extra interrupts 2007 + * until a more refined solution exists. 2008 + */ 2009 + } 2102 2010 engine->emit_bb_start = gen8_emit_bb_start; 2103 2011 } 2104 2012 ··· 2159 2049 if (ret) 2160 2050 goto error; 2161 2051 2162 - engine->execlists.elsp = 2163 - engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); 2052 + if (HAS_LOGICAL_RING_ELSQ(engine->i915)) { 2053 + engine->execlists.submit_reg = engine->i915->regs + 2054 + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); 2055 + engine->execlists.ctrl_reg = engine->i915->regs + 2056 + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine)); 2057 + } else { 2058 + engine->execlists.submit_reg = engine->i915->regs + 2059 + i915_mmio_reg_offset(RING_ELSP(engine)); 2060 + } 2164 2061 2165 2062 engine->execlists.preempt_complete_status = ~0u; 2166 2063 if (engine->i915->preempt_context) ··· 2253 2136 2254 2137 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) { 2255 2138 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 2256 - rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) << 2139 + rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) << 2257 2140 GEN8_RPCS_SS_CNT_SHIFT; 2258 2141 rpcs |= GEN8_RPCS_ENABLE; 2259 2142 } ··· 2277 2160 default: 2278 2161 MISSING_CASE(INTEL_GEN(engine->i915)); 2279 2162 /* fall through */ 2163 + case 11: 2164 + indirect_ctx_offset = 2165 + GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2166 + break; 2280 2167 case 10: 2281 2168 indirect_ctx_offset = 2282 2169 GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; ··· 2440 2319 if (!engine->default_state) 2441 2320 regs[CTX_CONTEXT_CONTROL + 1] |= 2442 2321 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 2443 - if (ctx == ctx->i915->preempt_context) 2322 + if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11) 2444 2323 regs[CTX_CONTEXT_CONTROL + 1] |= 2445 2324 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2446 2325 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+3
drivers/gpu/drm/i915/intel_lrc.h
··· 42 42 #define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8) 43 43 #define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4) 44 44 #define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0) 45 + #define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510) 46 + #define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550) 47 + #define EL_CTRL_LOAD (1 << 0) 45 48 46 49 /* The docs specify that the write pointer wraps around after 5h, "After status 47 50 * is written out to the last available status QW at offset 5h, this pointer
+1
drivers/gpu/drm/i915/intel_lrc_reg.h
··· 63 63 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 64 64 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 65 65 #define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19 66 + #define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A 66 67 67 68 #endif /* _INTEL_LRC_REG_H_ */
+14 -14
drivers/gpu/drm/i915/intel_mocs.c
··· 265 265 266 266 /** 267 267 * emit_mocs_control_table() - emit the mocs control table 268 - * @req: Request to set up the MOCS table for. 268 + * @rq: Request to set up the MOCS table for. 269 269 * @table: The values to program into the control regs. 270 270 * 271 271 * This function simply emits a MI_LOAD_REGISTER_IMM command for the ··· 273 273 * 274 274 * Return: 0 on success, otherwise the error status. 275 275 */ 276 - static int emit_mocs_control_table(struct drm_i915_gem_request *req, 276 + static int emit_mocs_control_table(struct i915_request *rq, 277 277 const struct drm_i915_mocs_table *table) 278 278 { 279 - enum intel_engine_id engine = req->engine->id; 279 + enum intel_engine_id engine = rq->engine->id; 280 280 unsigned int index; 281 281 u32 *cs; 282 282 283 283 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 284 284 return -ENODEV; 285 285 286 - cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); 286 + cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); 287 287 if (IS_ERR(cs)) 288 288 return PTR_ERR(cs); 289 289 ··· 308 308 } 309 309 310 310 *cs++ = MI_NOOP; 311 - intel_ring_advance(req, cs); 311 + intel_ring_advance(rq, cs); 312 312 313 313 return 0; 314 314 } ··· 323 323 324 324 /** 325 325 * emit_mocs_l3cc_table() - emit the mocs control table 326 - * @req: Request to set up the MOCS table for. 326 + * @rq: Request to set up the MOCS table for. 327 327 * @table: The values to program into the control regs. 328 328 * 329 329 * This function simply emits a MI_LOAD_REGISTER_IMM command for the ··· 332 332 * 333 333 * Return: 0 on success, otherwise the error status. 334 334 */ 335 - static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, 335 + static int emit_mocs_l3cc_table(struct i915_request *rq, 336 336 const struct drm_i915_mocs_table *table) 337 337 { 338 338 unsigned int i; ··· 341 341 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 342 342 return -ENODEV; 343 343 344 - cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); 344 + cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES); 345 345 if (IS_ERR(cs)) 346 346 return PTR_ERR(cs); 347 347 ··· 370 370 } 371 371 372 372 *cs++ = MI_NOOP; 373 - intel_ring_advance(req, cs); 373 + intel_ring_advance(rq, cs); 374 374 375 375 return 0; 376 376 } ··· 417 417 418 418 /** 419 419 * intel_rcs_context_init_mocs() - program the MOCS register. 420 - * @req: Request to set up the MOCS tables for. 420 + * @rq: Request to set up the MOCS tables for. 421 421 * 422 422 * This function will emit a batch buffer with the values required for 423 423 * programming the MOCS register values for all the currently supported ··· 431 431 * 432 432 * Return: 0 on success, otherwise the error status. 433 433 */ 434 - int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) 434 + int intel_rcs_context_init_mocs(struct i915_request *rq) 435 435 { 436 436 struct drm_i915_mocs_table t; 437 437 int ret; 438 438 439 - if (get_mocs_settings(req->i915, &t)) { 439 + if (get_mocs_settings(rq->i915, &t)) { 440 440 /* Program the RCS control registers */ 441 - ret = emit_mocs_control_table(req, &t); 441 + ret = emit_mocs_control_table(rq, &t); 442 442 if (ret) 443 443 return ret; 444 444 445 445 /* Now program the l3cc registers */ 446 - ret = emit_mocs_l3cc_table(req, &t); 446 + ret = emit_mocs_l3cc_table(rq, &t); 447 447 if (ret) 448 448 return ret; 449 449 }
+1 -1
drivers/gpu/drm/i915/intel_mocs.h
··· 52 52 #include <drm/drmP.h> 53 53 #include "i915_drv.h" 54 54 55 - int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); 55 + int intel_rcs_context_init_mocs(struct i915_request *rq); 56 56 void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); 57 57 int intel_mocs_init_engine(struct intel_engine_cs *engine); 58 58
+41 -41
drivers/gpu/drm/i915/intel_overlay.c
··· 234 234 } 235 235 236 236 static void intel_overlay_submit_request(struct intel_overlay *overlay, 237 - struct drm_i915_gem_request *req, 237 + struct i915_request *rq, 238 238 i915_gem_retire_fn retire) 239 239 { 240 240 GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, 241 241 &overlay->i915->drm.struct_mutex)); 242 242 i915_gem_active_set_retire_fn(&overlay->last_flip, retire, 243 243 &overlay->i915->drm.struct_mutex); 244 - i915_gem_active_set(&overlay->last_flip, req); 245 - i915_add_request(req); 244 + i915_gem_active_set(&overlay->last_flip, rq); 245 + i915_request_add(rq); 246 246 } 247 247 248 248 static int intel_overlay_do_wait_request(struct intel_overlay *overlay, 249 - struct drm_i915_gem_request *req, 249 + struct i915_request *rq, 250 250 i915_gem_retire_fn retire) 251 251 { 252 - intel_overlay_submit_request(overlay, req, retire); 252 + intel_overlay_submit_request(overlay, rq, retire); 253 253 return i915_gem_active_retire(&overlay->last_flip, 254 254 &overlay->i915->drm.struct_mutex); 255 255 } 256 256 257 - static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay) 257 + static struct i915_request *alloc_request(struct intel_overlay *overlay) 258 258 { 259 259 struct drm_i915_private *dev_priv = overlay->i915; 260 260 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 261 261 262 - return i915_gem_request_alloc(engine, dev_priv->kernel_context); 262 + return i915_request_alloc(engine, dev_priv->kernel_context); 263 263 } 264 264 265 265 /* overlay needs to be disable in OCMD reg */ 266 266 static int intel_overlay_on(struct intel_overlay *overlay) 267 267 { 268 268 struct drm_i915_private *dev_priv = overlay->i915; 269 - struct drm_i915_gem_request *req; 269 + struct i915_request *rq; 270 270 u32 *cs; 271 271 272 272 WARN_ON(overlay->active); 273 273 274 - req = alloc_request(overlay); 275 - if (IS_ERR(req)) 276 - return PTR_ERR(req); 274 + rq = alloc_request(overlay); 275 + if (IS_ERR(rq)) 276 + return PTR_ERR(rq); 277 277 278 - cs = intel_ring_begin(req, 4); 278 + cs = intel_ring_begin(rq, 4); 279 279 if (IS_ERR(cs)) { 280 - i915_add_request(req); 280 + i915_request_add(rq); 281 281 return PTR_ERR(cs); 282 282 } 283 283 ··· 290 290 *cs++ = overlay->flip_addr | OFC_UPDATE; 291 291 *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; 292 292 *cs++ = MI_NOOP; 293 - intel_ring_advance(req, cs); 293 + intel_ring_advance(rq, cs); 294 294 295 - return intel_overlay_do_wait_request(overlay, req, NULL); 295 + return intel_overlay_do_wait_request(overlay, rq, NULL); 296 296 } 297 297 298 298 static void intel_overlay_flip_prepare(struct intel_overlay *overlay, ··· 322 322 bool load_polyphase_filter) 323 323 { 324 324 struct drm_i915_private *dev_priv = overlay->i915; 325 - struct drm_i915_gem_request *req; 325 + struct i915_request *rq; 326 326 u32 flip_addr = overlay->flip_addr; 327 327 u32 tmp, *cs; 328 328 ··· 336 336 if (tmp & (1 << 17)) 337 337 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 338 338 339 - req = alloc_request(overlay); 340 - if (IS_ERR(req)) 341 - return PTR_ERR(req); 339 + rq = alloc_request(overlay); 340 + if (IS_ERR(rq)) 341 + return PTR_ERR(rq); 342 342 343 - cs = intel_ring_begin(req, 2); 343 + cs = intel_ring_begin(rq, 2); 344 344 if (IS_ERR(cs)) { 345 - i915_add_request(req); 345 + i915_request_add(rq); 346 346 return PTR_ERR(cs); 347 347 } 348 348 349 349 *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE; 350 350 *cs++ = flip_addr; 351 - intel_ring_advance(req, cs); 351 + intel_ring_advance(rq, cs); 352 352 353 353 intel_overlay_flip_prepare(overlay, vma); 354 354 355 - intel_overlay_submit_request(overlay, req, NULL); 355 + intel_overlay_submit_request(overlay, rq, NULL); 356 356 357 357 return 0; 358 358 } ··· 373 373 } 374 374 375 375 static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, 376 - struct drm_i915_gem_request *req) 376 + struct i915_request *rq) 377 377 { 378 378 struct intel_overlay *overlay = 379 379 container_of(active, typeof(*overlay), last_flip); ··· 382 382 } 383 383 384 384 static void intel_overlay_off_tail(struct i915_gem_active *active, 385 - struct drm_i915_gem_request *req) 385 + struct i915_request *rq) 386 386 { 387 387 struct intel_overlay *overlay = 388 388 container_of(active, typeof(*overlay), last_flip); ··· 401 401 /* overlay needs to be disabled in OCMD reg */ 402 402 static int intel_overlay_off(struct intel_overlay *overlay) 403 403 { 404 - struct drm_i915_gem_request *req; 404 + struct i915_request *rq; 405 405 u32 *cs, flip_addr = overlay->flip_addr; 406 406 407 407 WARN_ON(!overlay->active); ··· 412 412 * of the hw. Do it in both cases */ 413 413 flip_addr |= OFC_UPDATE; 414 414 415 - req = alloc_request(overlay); 416 - if (IS_ERR(req)) 417 - return PTR_ERR(req); 415 + rq = alloc_request(overlay); 416 + if (IS_ERR(rq)) 417 + return PTR_ERR(rq); 418 418 419 - cs = intel_ring_begin(req, 6); 419 + cs = intel_ring_begin(rq, 6); 420 420 if (IS_ERR(cs)) { 421 - i915_add_request(req); 421 + i915_request_add(rq); 422 422 return PTR_ERR(cs); 423 423 } 424 424 ··· 432 432 *cs++ = flip_addr; 433 433 *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; 434 434 435 - intel_ring_advance(req, cs); 435 + intel_ring_advance(rq, cs); 436 436 437 437 intel_overlay_flip_prepare(overlay, NULL); 438 438 439 - return intel_overlay_do_wait_request(overlay, req, 439 + return intel_overlay_do_wait_request(overlay, rq, 440 440 intel_overlay_off_tail); 441 441 } 442 442 ··· 468 468 469 469 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { 470 470 /* synchronous slowpath */ 471 - struct drm_i915_gem_request *req; 471 + struct i915_request *rq; 472 472 473 - req = alloc_request(overlay); 474 - if (IS_ERR(req)) 475 - return PTR_ERR(req); 473 + rq = alloc_request(overlay); 474 + if (IS_ERR(rq)) 475 + return PTR_ERR(rq); 476 476 477 - cs = intel_ring_begin(req, 2); 477 + cs = intel_ring_begin(rq, 2); 478 478 if (IS_ERR(cs)) { 479 - i915_add_request(req); 479 + i915_request_add(rq); 480 480 return PTR_ERR(cs); 481 481 } 482 482 483 483 *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; 484 484 *cs++ = MI_NOOP; 485 - intel_ring_advance(req, cs); 485 + intel_ring_advance(rq, cs); 486 486 487 - ret = intel_overlay_do_wait_request(overlay, req, 487 + ret = intel_overlay_do_wait_request(overlay, rq, 488 488 intel_overlay_release_old_vid_tail); 489 489 if (ret) 490 490 return ret;
+16 -5
drivers/gpu/drm/i915/intel_pm.c
··· 6360 6360 mutex_unlock(&dev_priv->pcu_lock); 6361 6361 } 6362 6362 6363 - void gen6_rps_boost(struct drm_i915_gem_request *rq, 6363 + void gen6_rps_boost(struct i915_request *rq, 6364 6364 struct intel_rps_client *rps_client) 6365 6365 { 6366 6366 struct intel_rps *rps = &rq->i915->gt_pm.rps; ··· 6376 6376 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) 6377 6377 return; 6378 6378 6379 - /* Serializes with i915_gem_request_retire() */ 6379 + /* Serializes with i915_request_retire() */ 6380 6380 boost = false; 6381 6381 spin_lock_irqsave(&rq->lock, flags); 6382 6382 if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) { ··· 6715 6715 6716 6716 /* 6717 6717 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 6718 - * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 6718 + * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6. 6719 6719 */ 6720 6720 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) 6721 6721 I915_WRITE(GEN9_PG_ENABLE, 0); ··· 8026 8026 dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */ 8027 8027 intel_disable_gt_powersave(dev_priv); 8028 8028 8029 - gen6_reset_rps_interrupts(dev_priv); 8029 + if (INTEL_GEN(dev_priv) < 11) 8030 + gen6_reset_rps_interrupts(dev_priv); 8031 + else 8032 + WARN_ON_ONCE(1); 8030 8033 } 8031 8034 8032 8035 static inline void intel_disable_llc_pstate(struct drm_i915_private *i915) ··· 8142 8139 cherryview_enable_rps(dev_priv); 8143 8140 } else if (IS_VALLEYVIEW(dev_priv)) { 8144 8141 valleyview_enable_rps(dev_priv); 8142 + } else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) { 8143 + /* TODO */ 8145 8144 } else if (INTEL_GEN(dev_priv) >= 9) { 8146 8145 gen9_enable_rps(dev_priv); 8147 8146 } else if (IS_BROADWELL(dev_priv)) { ··· 8492 8487 if (!HAS_PCH_CNP(dev_priv)) 8493 8488 return; 8494 8489 8495 - /* Display WA #1181: cnp */ 8490 + /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ 8496 8491 I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) | 8497 8492 CNP_PWM_CGE_GATING_DISABLE); 8498 8493 } ··· 8522 8517 val |= SARBUNIT_CLKGATE_DIS; 8523 8518 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val); 8524 8519 8520 + /* Wa_2201832410:cnl */ 8521 + val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE); 8522 + val |= GWUNIT_CLKGATE_DIS; 8523 + I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val); 8524 + 8525 8525 /* WaDisableVFclkgate:cnl */ 8526 + /* WaVFUnitClockGatingDisable:cnl */ 8526 8527 val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE); 8527 8528 val |= VFUNIT_CLKGATE_DIS; 8528 8529 I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
+157 -24
drivers/gpu/drm/i915/intel_psr.c
··· 56 56 #include "intel_drv.h" 57 57 #include "i915_drv.h" 58 58 59 + static inline enum intel_display_power_domain 60 + psr_aux_domain(struct intel_dp *intel_dp) 61 + { 62 + /* CNL HW requires corresponding AUX IOs to be powered up for PSR. 63 + * However, for non-A AUX ports the corresponding non-EDP transcoders 64 + * would have already enabled power well 2 and DC_OFF. This means we can 65 + * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a 66 + * specific AUX_IO reference without powering up any extra wells. 67 + * Note that PSR is enabled only on Port A even though this function 68 + * returns the correct domain for other ports too. 69 + */ 70 + return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : 71 + intel_dp->aux_power_domain; 72 + } 73 + 74 + static void psr_aux_io_power_get(struct intel_dp *intel_dp) 75 + { 76 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 77 + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 78 + 79 + if (INTEL_GEN(dev_priv) < 10) 80 + return; 81 + 82 + intel_display_power_get(dev_priv, psr_aux_domain(intel_dp)); 83 + } 84 + 85 + static void psr_aux_io_power_put(struct intel_dp *intel_dp) 86 + { 87 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 88 + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 89 + 90 + if (INTEL_GEN(dev_priv) < 10) 91 + return; 92 + 93 + intel_display_power_put(dev_priv, psr_aux_domain(intel_dp)); 94 + } 95 + 96 + static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp) 97 + { 98 + uint8_t psr_caps = 0; 99 + 100 + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1) 101 + return false; 102 + return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED; 103 + } 104 + 105 + static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 106 + { 107 + uint8_t dprx = 0; 108 + 109 + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 110 + &dprx) != 1) 111 + return false; 112 + return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 113 + } 114 + 115 + static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 116 + { 117 + uint8_t alpm_caps = 0; 118 + 119 + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 120 + &alpm_caps) != 1) 121 + return false; 122 + return alpm_caps & DP_ALPM_CAP; 123 + } 124 + 125 + void intel_psr_init_dpcd(struct intel_dp *intel_dp) 126 + { 127 + struct drm_i915_private *dev_priv = 128 + to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 129 + 130 + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 131 + sizeof(intel_dp->psr_dpcd)); 132 + 133 + if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { 134 + dev_priv->psr.sink_support = true; 135 + DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 136 + } 137 + 138 + if (INTEL_GEN(dev_priv) >= 9 && 139 + (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) { 140 + uint8_t frame_sync_cap; 141 + 142 + dev_priv->psr.sink_support = true; 143 + if (drm_dp_dpcd_readb(&intel_dp->aux, 144 + DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, 145 + &frame_sync_cap) != 1) 146 + frame_sync_cap = 0; 147 + dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP; 148 + /* PSR2 needs frame sync as well */ 149 + dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; 150 + DRM_DEBUG_KMS("PSR2 %s on sink", 151 + dev_priv->psr.psr2_support ? "supported" : "not supported"); 152 + 153 + if (dev_priv->psr.psr2_support) { 154 + dev_priv->psr.y_cord_support = 155 + intel_dp_get_y_cord_status(intel_dp); 156 + dev_priv->psr.colorimetry_support = 157 + intel_dp_get_colorimetry_status(intel_dp); 158 + dev_priv->psr.alpm = 159 + intel_dp_get_alpm_status(intel_dp); 160 + } 161 + } 162 + } 163 + 59 164 static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) 60 165 { 61 166 struct drm_i915_private *dev_priv = to_i915(dev); ··· 446 341 hsw_activate_psr1(intel_dp); 447 342 } 448 343 344 + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 345 + struct intel_crtc_state *crtc_state) 346 + { 347 + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 348 + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 349 + int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; 350 + int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; 351 + int psr_max_h = 0, psr_max_v = 0; 352 + 353 + /* 354 + * FIXME psr2_support is messed up. It's both computed 355 + * dynamically during PSR enable, and extracted from sink 356 + * caps during eDP detection. 357 + */ 358 + if (!dev_priv->psr.psr2_support) 359 + return false; 360 + 361 + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 362 + psr_max_h = 4096; 363 + psr_max_v = 2304; 364 + } else if (IS_GEN9(dev_priv)) { 365 + psr_max_h = 3640; 366 + psr_max_v = 2304; 367 + } 368 + 369 + if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { 370 + DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 371 + crtc_hdisplay, crtc_vdisplay, 372 + psr_max_h, psr_max_v); 373 + return false; 374 + } 375 + 376 + /* 377 + * FIXME:enable psr2 only for y-cordinate psr2 panels 378 + * After gtc implementation , remove this restriction. 379 + */ 380 + if (!dev_priv->psr.y_cord_support) { 381 + DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n"); 382 + return false; 383 + } 384 + 385 + return true; 386 + } 387 + 449 388 void intel_psr_compute_config(struct intel_dp *intel_dp, 450 389 struct intel_crtc_state *crtc_state) 451 390 { ··· 552 403 return; 553 404 } 554 405 555 - /* 556 - * FIXME psr2_support is messed up. It's both computed 557 - * dynamically during PSR enable, and extracted from sink 558 - * caps during eDP detection. 559 - */ 560 - if (!dev_priv->psr.psr2_support) { 561 - crtc_state->has_psr = true; 562 - return; 563 - } 564 - 565 - /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ 566 - if (adjusted_mode->crtc_hdisplay > 3200 || 567 - adjusted_mode->crtc_vdisplay > 2000) { 568 - DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n"); 569 - return; 570 - } 571 - 572 - /* 573 - * FIXME:enable psr2 only for y-cordinate psr2 panels 574 - * After gtc implementation , remove this restriction. 575 - */ 576 - if (!dev_priv->psr.y_cord_support) { 577 - DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n"); 406 + if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 407 + DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n"); 578 408 return; 579 409 } 580 410 581 411 crtc_state->has_psr = true; 582 - crtc_state->has_psr2 = true; 412 + crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); 413 + DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); 583 414 } 584 415 585 416 static void intel_psr_activate(struct intel_dp *intel_dp) ··· 587 458 struct drm_i915_private *dev_priv = to_i915(dev); 588 459 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 589 460 u32 chicken; 461 + 462 + psr_aux_io_power_get(intel_dp); 590 463 591 464 if (dev_priv->psr.psr2_support) { 592 465 chicken = PSR2_VSC_ENABLE_PROG_HEADER; ··· 748 617 else 749 618 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); 750 619 } 620 + 621 + psr_aux_io_power_put(intel_dp); 751 622 } 752 623 753 624 /**
+96 -100
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 66 66 } 67 67 68 68 static int 69 - gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 69 + gen2_render_ring_flush(struct i915_request *rq, u32 mode) 70 70 { 71 71 u32 cmd, *cs; 72 72 ··· 75 75 if (mode & EMIT_INVALIDATE) 76 76 cmd |= MI_READ_FLUSH; 77 77 78 - cs = intel_ring_begin(req, 2); 78 + cs = intel_ring_begin(rq, 2); 79 79 if (IS_ERR(cs)) 80 80 return PTR_ERR(cs); 81 81 82 82 *cs++ = cmd; 83 83 *cs++ = MI_NOOP; 84 - intel_ring_advance(req, cs); 84 + intel_ring_advance(rq, cs); 85 85 86 86 return 0; 87 87 } 88 88 89 89 static int 90 - gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 90 + gen4_render_ring_flush(struct i915_request *rq, u32 mode) 91 91 { 92 92 u32 cmd, *cs; 93 93 ··· 122 122 cmd = MI_FLUSH; 123 123 if (mode & EMIT_INVALIDATE) { 124 124 cmd |= MI_EXE_FLUSH; 125 - if (IS_G4X(req->i915) || IS_GEN5(req->i915)) 125 + if (IS_G4X(rq->i915) || IS_GEN5(rq->i915)) 126 126 cmd |= MI_INVALIDATE_ISP; 127 127 } 128 128 129 - cs = intel_ring_begin(req, 2); 129 + cs = intel_ring_begin(rq, 2); 130 130 if (IS_ERR(cs)) 131 131 return PTR_ERR(cs); 132 132 133 133 *cs++ = cmd; 134 134 *cs++ = MI_NOOP; 135 - intel_ring_advance(req, cs); 135 + intel_ring_advance(rq, cs); 136 136 137 137 return 0; 138 138 } ··· 175 175 * really our business. That leaves only stall at scoreboard. 176 176 */ 177 177 static int 178 - intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 178 + intel_emit_post_sync_nonzero_flush(struct i915_request *rq) 179 179 { 180 180 u32 scratch_addr = 181 - i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 181 + i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; 182 182 u32 *cs; 183 183 184 - cs = intel_ring_begin(req, 6); 184 + cs = intel_ring_begin(rq, 6); 185 185 if (IS_ERR(cs)) 186 186 return PTR_ERR(cs); 187 187 ··· 191 191 *cs++ = 0; /* low dword */ 192 192 *cs++ = 0; /* high dword */ 193 193 *cs++ = MI_NOOP; 194 - intel_ring_advance(req, cs); 194 + intel_ring_advance(rq, cs); 195 195 196 - cs = intel_ring_begin(req, 6); 196 + cs = intel_ring_begin(rq, 6); 197 197 if (IS_ERR(cs)) 198 198 return PTR_ERR(cs); 199 199 ··· 203 203 *cs++ = 0; 204 204 *cs++ = 0; 205 205 *cs++ = MI_NOOP; 206 - intel_ring_advance(req, cs); 206 + intel_ring_advance(rq, cs); 207 207 208 208 return 0; 209 209 } 210 210 211 211 static int 212 - gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 212 + gen6_render_ring_flush(struct i915_request *rq, u32 mode) 213 213 { 214 214 u32 scratch_addr = 215 - i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 215 + i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; 216 216 u32 *cs, flags = 0; 217 217 int ret; 218 218 219 219 /* Force SNB workarounds for PIPE_CONTROL flushes */ 220 - ret = intel_emit_post_sync_nonzero_flush(req); 220 + ret = intel_emit_post_sync_nonzero_flush(rq); 221 221 if (ret) 222 222 return ret; 223 223 ··· 247 247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 248 248 } 249 249 250 - cs = intel_ring_begin(req, 4); 250 + cs = intel_ring_begin(rq, 4); 251 251 if (IS_ERR(cs)) 252 252 return PTR_ERR(cs); 253 253 ··· 255 255 *cs++ = flags; 256 256 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 257 257 *cs++ = 0; 258 - intel_ring_advance(req, cs); 258 + intel_ring_advance(rq, cs); 259 259 260 260 return 0; 261 261 } 262 262 263 263 static int 264 - gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 264 + gen7_render_ring_cs_stall_wa(struct i915_request *rq) 265 265 { 266 266 u32 *cs; 267 267 268 - cs = intel_ring_begin(req, 4); 268 + cs = intel_ring_begin(rq, 4); 269 269 if (IS_ERR(cs)) 270 270 return PTR_ERR(cs); 271 271 ··· 273 273 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 274 274 *cs++ = 0; 275 275 *cs++ = 0; 276 - intel_ring_advance(req, cs); 276 + intel_ring_advance(rq, cs); 277 277 278 278 return 0; 279 279 } 280 280 281 281 static int 282 - gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 282 + gen7_render_ring_flush(struct i915_request *rq, u32 mode) 283 283 { 284 284 u32 scratch_addr = 285 - i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 285 + i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; 286 286 u32 *cs, flags = 0; 287 287 288 288 /* ··· 324 324 /* Workaround: we must issue a pipe_control with CS-stall bit 325 325 * set before a pipe_control command that has the state cache 326 326 * invalidate bit set. */ 327 - gen7_render_ring_cs_stall_wa(req); 327 + gen7_render_ring_cs_stall_wa(rq); 328 328 } 329 329 330 - cs = intel_ring_begin(req, 4); 330 + cs = intel_ring_begin(rq, 4); 331 331 if (IS_ERR(cs)) 332 332 return PTR_ERR(cs); 333 333 ··· 335 335 *cs++ = flags; 336 336 *cs++ = scratch_addr; 337 337 *cs++ = 0; 338 - intel_ring_advance(req, cs); 338 + intel_ring_advance(rq, cs); 339 339 340 340 return 0; 341 341 } ··· 531 531 } 532 532 533 533 static void reset_ring_common(struct intel_engine_cs *engine, 534 - struct drm_i915_gem_request *request) 534 + struct i915_request *request) 535 535 { 536 536 /* 537 537 * RC6 must be prevented until the reset is complete and the engine ··· 595 595 } 596 596 } 597 597 598 - static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 598 + static int intel_rcs_ctx_init(struct i915_request *rq) 599 599 { 600 600 int ret; 601 601 602 - ret = intel_ring_workarounds_emit(req); 602 + ret = intel_ring_workarounds_emit(rq); 603 603 if (ret != 0) 604 604 return ret; 605 605 606 - ret = i915_gem_render_state_emit(req); 606 + ret = i915_gem_render_state_emit(rq); 607 607 if (ret) 608 608 return ret; 609 609 ··· 661 661 return init_workarounds_ring(engine); 662 662 } 663 663 664 - static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs) 664 + static u32 *gen6_signal(struct i915_request *rq, u32 *cs) 665 665 { 666 - struct drm_i915_private *dev_priv = req->i915; 666 + struct drm_i915_private *dev_priv = rq->i915; 667 667 struct intel_engine_cs *engine; 668 668 enum intel_engine_id id; 669 669 int num_rings = 0; ··· 674 674 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) 675 675 continue; 676 676 677 - mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id]; 677 + mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id]; 678 678 if (i915_mmio_reg_valid(mbox_reg)) { 679 679 *cs++ = MI_LOAD_REGISTER_IMM(1); 680 680 *cs++ = i915_mmio_reg_offset(mbox_reg); 681 - *cs++ = req->global_seqno; 681 + *cs++ = rq->global_seqno; 682 682 num_rings++; 683 683 } 684 684 } ··· 690 690 691 691 static void cancel_requests(struct intel_engine_cs *engine) 692 692 { 693 - struct drm_i915_gem_request *request; 693 + struct i915_request *request; 694 694 unsigned long flags; 695 695 696 696 spin_lock_irqsave(&engine->timeline->lock, flags); ··· 698 698 /* Mark all submitted requests as skipped. */ 699 699 list_for_each_entry(request, &engine->timeline->requests, link) { 700 700 GEM_BUG_ON(!request->global_seqno); 701 - if (!i915_gem_request_completed(request)) 701 + if (!i915_request_completed(request)) 702 702 dma_fence_set_error(&request->fence, -EIO); 703 703 } 704 704 /* Remaining _unready_ requests will be nop'ed when submitted */ ··· 706 706 spin_unlock_irqrestore(&engine->timeline->lock, flags); 707 707 } 708 708 709 - static void i9xx_submit_request(struct drm_i915_gem_request *request) 709 + static void i9xx_submit_request(struct i915_request *request) 710 710 { 711 711 struct drm_i915_private *dev_priv = request->i915; 712 712 713 - i915_gem_request_submit(request); 713 + i915_request_submit(request); 714 714 715 715 I915_WRITE_TAIL(request->engine, 716 716 intel_ring_set_tail(request->ring, request->tail)); 717 717 } 718 718 719 - static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 719 + static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) 720 720 { 721 721 *cs++ = MI_STORE_DWORD_INDEX; 722 722 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT; 723 - *cs++ = req->global_seqno; 723 + *cs++ = rq->global_seqno; 724 724 *cs++ = MI_USER_INTERRUPT; 725 725 726 - req->tail = intel_ring_offset(req, cs); 727 - assert_ring_tail_valid(req->ring, req->tail); 726 + rq->tail = intel_ring_offset(rq, cs); 727 + assert_ring_tail_valid(rq->ring, rq->tail); 728 728 } 729 729 730 730 static const int i9xx_emit_breadcrumb_sz = 4; 731 731 732 - static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 732 + static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs) 733 733 { 734 - return i9xx_emit_breadcrumb(req, 735 - req->engine->semaphore.signal(req, cs)); 734 + return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs)); 736 735 } 737 736 738 737 static int 739 - gen6_ring_sync_to(struct drm_i915_gem_request *req, 740 - struct drm_i915_gem_request *signal) 738 + gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal) 741 739 { 742 740 u32 dw1 = MI_SEMAPHORE_MBOX | 743 741 MI_SEMAPHORE_COMPARE | 744 742 MI_SEMAPHORE_REGISTER; 745 - u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id]; 743 + u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id]; 746 744 u32 *cs; 747 745 748 746 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 749 747 750 - cs = intel_ring_begin(req, 4); 748 + cs = intel_ring_begin(rq, 4); 751 749 if (IS_ERR(cs)) 752 750 return PTR_ERR(cs); 753 751 ··· 757 759 *cs++ = signal->global_seqno - 1; 758 760 *cs++ = 0; 759 761 *cs++ = MI_NOOP; 760 - intel_ring_advance(req, cs); 762 + intel_ring_advance(rq, cs); 761 763 762 764 return 0; 763 765 } ··· 856 858 } 857 859 858 860 static int 859 - bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 861 + bsd_ring_flush(struct i915_request *rq, u32 mode) 860 862 { 861 863 u32 *cs; 862 864 863 - cs = intel_ring_begin(req, 2); 865 + cs = intel_ring_begin(rq, 2); 864 866 if (IS_ERR(cs)) 865 867 return PTR_ERR(cs); 866 868 867 869 *cs++ = MI_FLUSH; 868 870 *cs++ = MI_NOOP; 869 - intel_ring_advance(req, cs); 871 + intel_ring_advance(rq, cs); 870 872 return 0; 871 873 } 872 874 ··· 909 911 } 910 912 911 913 static int 912 - i965_emit_bb_start(struct drm_i915_gem_request *req, 914 + i965_emit_bb_start(struct i915_request *rq, 913 915 u64 offset, u32 length, 914 916 unsigned int dispatch_flags) 915 917 { 916 918 u32 *cs; 917 919 918 - cs = intel_ring_begin(req, 2); 920 + cs = intel_ring_begin(rq, 2); 919 921 if (IS_ERR(cs)) 920 922 return PTR_ERR(cs); 921 923 922 924 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & 923 925 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); 924 926 *cs++ = offset; 925 - intel_ring_advance(req, cs); 927 + intel_ring_advance(rq, cs); 926 928 927 929 return 0; 928 930 } ··· 932 934 #define I830_TLB_ENTRIES (2) 933 935 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 934 936 static int 935 - i830_emit_bb_start(struct drm_i915_gem_request *req, 937 + i830_emit_bb_start(struct i915_request *rq, 936 938 u64 offset, u32 len, 937 939 unsigned int dispatch_flags) 938 940 { 939 - u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch); 941 + u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch); 940 942 941 - cs = intel_ring_begin(req, 6); 943 + cs = intel_ring_begin(rq, 6); 942 944 if (IS_ERR(cs)) 943 945 return PTR_ERR(cs); 944 946 ··· 949 951 *cs++ = cs_offset; 950 952 *cs++ = 0xdeadbeef; 951 953 *cs++ = MI_NOOP; 952 - intel_ring_advance(req, cs); 954 + intel_ring_advance(rq, cs); 953 955 954 956 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 955 957 if (len > I830_BATCH_LIMIT) 956 958 return -ENOSPC; 957 959 958 - cs = intel_ring_begin(req, 6 + 2); 960 + cs = intel_ring_begin(rq, 6 + 2); 959 961 if (IS_ERR(cs)) 960 962 return PTR_ERR(cs); 961 963 ··· 972 974 973 975 *cs++ = MI_FLUSH; 974 976 *cs++ = MI_NOOP; 975 - intel_ring_advance(req, cs); 977 + intel_ring_advance(rq, cs); 976 978 977 979 /* ... and execute it. */ 978 980 offset = cs_offset; 979 981 } 980 982 981 - cs = intel_ring_begin(req, 2); 983 + cs = intel_ring_begin(rq, 2); 982 984 if (IS_ERR(cs)) 983 985 return PTR_ERR(cs); 984 986 985 987 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 986 988 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 987 989 MI_BATCH_NON_SECURE); 988 - intel_ring_advance(req, cs); 990 + intel_ring_advance(rq, cs); 989 991 990 992 return 0; 991 993 } 992 994 993 995 static int 994 - i915_emit_bb_start(struct drm_i915_gem_request *req, 996 + i915_emit_bb_start(struct i915_request *rq, 995 997 u64 offset, u32 len, 996 998 unsigned int dispatch_flags) 997 999 { 998 1000 u32 *cs; 999 1001 1000 - cs = intel_ring_begin(req, 2); 1002 + cs = intel_ring_begin(rq, 2); 1001 1003 if (IS_ERR(cs)) 1002 1004 return PTR_ERR(cs); 1003 1005 1004 1006 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1005 1007 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1006 1008 MI_BATCH_NON_SECURE); 1007 - intel_ring_advance(req, cs); 1009 + intel_ring_advance(rq, cs); 1008 1010 1009 1011 return 0; 1010 1012 } ··· 1375 1377 intel_ring_reset(engine->buffer, 0); 1376 1378 } 1377 1379 1378 - static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags) 1380 + static inline int mi_set_context(struct i915_request *rq, u32 flags) 1379 1381 { 1380 1382 struct drm_i915_private *i915 = rq->i915; 1381 1383 struct intel_engine_cs *engine = rq->engine; ··· 1461 1463 return 0; 1462 1464 } 1463 1465 1464 - static int remap_l3(struct drm_i915_gem_request *rq, int slice) 1466 + static int remap_l3(struct i915_request *rq, int slice) 1465 1467 { 1466 1468 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 1467 1469 int i; ··· 1489 1491 return 0; 1490 1492 } 1491 1493 1492 - static int switch_context(struct drm_i915_gem_request *rq) 1494 + static int switch_context(struct i915_request *rq) 1493 1495 { 1494 1496 struct intel_engine_cs *engine = rq->engine; 1495 1497 struct i915_gem_context *to_ctx = rq->ctx; ··· 1559 1561 return ret; 1560 1562 } 1561 1563 1562 - static int ring_request_alloc(struct drm_i915_gem_request *request) 1564 + static int ring_request_alloc(struct i915_request *request) 1563 1565 { 1564 1566 int ret; 1565 1567 ··· 1585 1587 1586 1588 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes) 1587 1589 { 1588 - struct drm_i915_gem_request *target; 1590 + struct i915_request *target; 1589 1591 long timeout; 1590 1592 1591 1593 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex); ··· 1603 1605 if (WARN_ON(&target->ring_link == &ring->request_list)) 1604 1606 return -ENOSPC; 1605 1607 1606 - timeout = i915_wait_request(target, 1608 + timeout = i915_request_wait(target, 1607 1609 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 1608 1610 MAX_SCHEDULE_TIMEOUT); 1609 1611 if (timeout < 0) 1610 1612 return timeout; 1611 1613 1612 - i915_gem_request_retire_upto(target); 1614 + i915_request_retire_upto(target); 1613 1615 1614 1616 intel_ring_update_space(ring); 1615 1617 GEM_BUG_ON(ring->space < bytes); ··· 1632 1634 return 0; 1633 1635 } 1634 1636 1635 - u32 *intel_ring_begin(struct drm_i915_gem_request *req, 1636 - unsigned int num_dwords) 1637 + u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) 1637 1638 { 1638 - struct intel_ring *ring = req->ring; 1639 + struct intel_ring *ring = rq->ring; 1639 1640 const unsigned int remain_usable = ring->effective_size - ring->emit; 1640 1641 const unsigned int bytes = num_dwords * sizeof(u32); 1641 1642 unsigned int need_wrap = 0; ··· 1644 1647 /* Packets must be qword aligned. */ 1645 1648 GEM_BUG_ON(num_dwords & 1); 1646 1649 1647 - total_bytes = bytes + req->reserved_space; 1650 + total_bytes = bytes + rq->reserved_space; 1648 1651 GEM_BUG_ON(total_bytes > ring->effective_size); 1649 1652 1650 1653 if (unlikely(total_bytes > remain_usable)) { ··· 1665 1668 * wrap and only need to effectively wait for the 1666 1669 * reserved size from the start of ringbuffer. 1667 1670 */ 1668 - total_bytes = req->reserved_space + remain_actual; 1671 + total_bytes = rq->reserved_space + remain_actual; 1669 1672 } 1670 1673 } 1671 1674 ··· 1679 1682 * overallocation and the assumption is that then we never need 1680 1683 * to wait (which has the risk of failing with EINTR). 1681 1684 * 1682 - * See also i915_gem_request_alloc() and i915_add_request(). 1685 + * See also i915_request_alloc() and i915_request_add(). 1683 1686 */ 1684 - GEM_BUG_ON(!req->reserved_space); 1687 + GEM_BUG_ON(!rq->reserved_space); 1685 1688 1686 1689 ret = wait_for_space(ring, total_bytes); 1687 1690 if (unlikely(ret)) ··· 1710 1713 } 1711 1714 1712 1715 /* Align the ring tail to a cacheline boundary */ 1713 - int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 1716 + int intel_ring_cacheline_align(struct i915_request *rq) 1714 1717 { 1715 - int num_dwords = 1716 - (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1718 + int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); 1717 1719 u32 *cs; 1718 1720 1719 1721 if (num_dwords == 0) 1720 1722 return 0; 1721 1723 1722 - num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1723 - cs = intel_ring_begin(req, num_dwords); 1724 + num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords; 1725 + cs = intel_ring_begin(rq, num_dwords); 1724 1726 if (IS_ERR(cs)) 1725 1727 return PTR_ERR(cs); 1726 1728 1727 1729 while (num_dwords--) 1728 1730 *cs++ = MI_NOOP; 1729 1731 1730 - intel_ring_advance(req, cs); 1732 + intel_ring_advance(rq, cs); 1731 1733 1732 1734 return 0; 1733 1735 } 1734 1736 1735 - static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) 1737 + static void gen6_bsd_submit_request(struct i915_request *request) 1736 1738 { 1737 1739 struct drm_i915_private *dev_priv = request->i915; 1738 1740 ··· 1768 1772 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1769 1773 } 1770 1774 1771 - static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1775 + static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) 1772 1776 { 1773 1777 u32 cmd, *cs; 1774 1778 1775 - cs = intel_ring_begin(req, 4); 1779 + cs = intel_ring_begin(rq, 4); 1776 1780 if (IS_ERR(cs)) 1777 1781 return PTR_ERR(cs); 1778 1782 ··· 1798 1802 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1799 1803 *cs++ = 0; 1800 1804 *cs++ = MI_NOOP; 1801 - intel_ring_advance(req, cs); 1805 + intel_ring_advance(rq, cs); 1802 1806 return 0; 1803 1807 } 1804 1808 1805 1809 static int 1806 - hsw_emit_bb_start(struct drm_i915_gem_request *req, 1810 + hsw_emit_bb_start(struct i915_request *rq, 1807 1811 u64 offset, u32 len, 1808 1812 unsigned int dispatch_flags) 1809 1813 { 1810 1814 u32 *cs; 1811 1815 1812 - cs = intel_ring_begin(req, 2); 1816 + cs = intel_ring_begin(rq, 2); 1813 1817 if (IS_ERR(cs)) 1814 1818 return PTR_ERR(cs); 1815 1819 ··· 1819 1823 MI_BATCH_RESOURCE_STREAMER : 0); 1820 1824 /* bit0-7 is the length on GEN6+ */ 1821 1825 *cs++ = offset; 1822 - intel_ring_advance(req, cs); 1826 + intel_ring_advance(rq, cs); 1823 1827 1824 1828 return 0; 1825 1829 } 1826 1830 1827 1831 static int 1828 - gen6_emit_bb_start(struct drm_i915_gem_request *req, 1832 + gen6_emit_bb_start(struct i915_request *rq, 1829 1833 u64 offset, u32 len, 1830 1834 unsigned int dispatch_flags) 1831 1835 { 1832 1836 u32 *cs; 1833 1837 1834 - cs = intel_ring_begin(req, 2); 1838 + cs = intel_ring_begin(rq, 2); 1835 1839 if (IS_ERR(cs)) 1836 1840 return PTR_ERR(cs); 1837 1841 ··· 1839 1843 0 : MI_BATCH_NON_SECURE_I965); 1840 1844 /* bit0-7 is the length on GEN6+ */ 1841 1845 *cs++ = offset; 1842 - intel_ring_advance(req, cs); 1846 + intel_ring_advance(rq, cs); 1843 1847 1844 1848 return 0; 1845 1849 } 1846 1850 1847 1851 /* Blitter support (SandyBridge+) */ 1848 1852 1849 - static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1853 + static int gen6_ring_flush(struct i915_request *rq, u32 mode) 1850 1854 { 1851 1855 u32 cmd, *cs; 1852 1856 1853 - cs = intel_ring_begin(req, 4); 1857 + cs = intel_ring_begin(rq, 4); 1854 1858 if (IS_ERR(cs)) 1855 1859 return PTR_ERR(cs); 1856 1860 ··· 1875 1879 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1876 1880 *cs++ = 0; 1877 1881 *cs++ = MI_NOOP; 1878 - intel_ring_advance(req, cs); 1882 + intel_ring_advance(rq, cs); 1879 1883 1880 1884 return 0; 1881 1885 }
+66 -48
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 3 3 #define _INTEL_RINGBUFFER_H_ 4 4 5 5 #include <linux/hashtable.h> 6 + 6 7 #include "i915_gem_batch_pool.h" 7 - #include "i915_gem_request.h" 8 8 #include "i915_gem_timeline.h" 9 + 9 10 #include "i915_pmu.h" 11 + #include "i915_request.h" 10 12 #include "i915_selftest.h" 11 13 12 14 struct drm_printer; ··· 92 90 93 91 #define instdone_subslice_mask(dev_priv__) \ 94 92 (INTEL_GEN(dev_priv__) == 7 ? \ 95 - 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask) 93 + 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) 96 94 97 95 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ 98 96 for ((slice__) = 0, (subslice__) = 0; \ ··· 117 115 unsigned long action_timestamp; 118 116 int deadlock; 119 117 struct intel_instdone instdone; 120 - struct drm_i915_gem_request *active_request; 118 + struct i915_request *active_request; 121 119 bool stalled; 122 120 }; 123 121 ··· 158 156 struct i915_vma *vma; 159 157 }; 160 158 161 - struct drm_i915_gem_request; 159 + struct i915_request; 160 + 161 + #define I915_MAX_VCS 4 162 + #define I915_MAX_VECS 2 162 163 163 164 /* 164 165 * Engine IDs definitions. ··· 172 167 BCS, 173 168 VCS, 174 169 VCS2, 170 + VCS3, 171 + VCS4, 175 172 #define _VCS(n) (VCS + (n)) 176 - VECS 173 + VECS, 174 + VECS2 175 + #define _VECS(n) (VECS + (n)) 177 176 }; 178 177 179 178 struct i915_priolist { ··· 209 200 bool no_priolist; 210 201 211 202 /** 212 - * @elsp: the ExecList Submission Port register 203 + * @submit_reg: gen-specific execlist submission register 204 + * set to the ExecList Submission Port (elsp) register pre-Gen11 and to 205 + * the ExecList Submission Queue Contents register array for Gen11+ 213 206 */ 214 - u32 __iomem *elsp; 207 + u32 __iomem *submit_reg; 208 + 209 + /** 210 + * @ctrl_reg: the enhanced execlists control register, used to load the 211 + * submit queue on the HW and to request preemptions to idle 212 + */ 213 + u32 __iomem *ctrl_reg; 215 214 216 215 /** 217 216 * @port: execlist port states ··· 235 218 /** 236 219 * @request_count: combined request and submission count 237 220 */ 238 - struct drm_i915_gem_request *request_count; 221 + struct i915_request *request_count; 239 222 #define EXECLIST_COUNT_BITS 2 240 223 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) 241 224 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) ··· 271 254 * @port_mask: number of execlist ports - 1 272 255 */ 273 256 unsigned int port_mask; 257 + 258 + /** 259 + * @queue_priority: Highest pending priority. 260 + * 261 + * When we add requests into the queue, or adjust the priority of 262 + * executing requests, we compute the maximum priority of those 263 + * pending requests. We can then use this value to determine if 264 + * we need to preempt the executing requests to service the queue. 265 + */ 266 + int queue_priority; 274 267 275 268 /** 276 269 * @queue: queue of requests, in priority lists ··· 364 337 365 338 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */ 366 339 struct rb_root waiters; /* sorted by retirement, priority */ 367 - struct rb_root signals; /* sorted by retirement */ 340 + struct list_head signals; /* sorted by retirement */ 368 341 struct task_struct *signaler; /* used for fence signalling */ 369 - struct drm_i915_gem_request __rcu *first_signal; 342 + 370 343 struct timer_list fake_irq; /* used after a missed interrupt */ 371 344 struct timer_list hangcheck; /* detect missed interrupts */ 372 345 ··· 418 391 419 392 int (*init_hw)(struct intel_engine_cs *engine); 420 393 void (*reset_hw)(struct intel_engine_cs *engine, 421 - struct drm_i915_gem_request *req); 394 + struct i915_request *rq); 422 395 423 396 void (*park)(struct intel_engine_cs *engine); 424 397 void (*unpark)(struct intel_engine_cs *engine); ··· 429 402 struct i915_gem_context *ctx); 430 403 void (*context_unpin)(struct intel_engine_cs *engine, 431 404 struct i915_gem_context *ctx); 432 - int (*request_alloc)(struct drm_i915_gem_request *req); 433 - int (*init_context)(struct drm_i915_gem_request *req); 405 + int (*request_alloc)(struct i915_request *rq); 406 + int (*init_context)(struct i915_request *rq); 434 407 435 - int (*emit_flush)(struct drm_i915_gem_request *request, 436 - u32 mode); 408 + int (*emit_flush)(struct i915_request *request, u32 mode); 437 409 #define EMIT_INVALIDATE BIT(0) 438 410 #define EMIT_FLUSH BIT(1) 439 411 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 440 - int (*emit_bb_start)(struct drm_i915_gem_request *req, 412 + int (*emit_bb_start)(struct i915_request *rq, 441 413 u64 offset, u32 length, 442 414 unsigned int dispatch_flags); 443 415 #define I915_DISPATCH_SECURE BIT(0) 444 416 #define I915_DISPATCH_PINNED BIT(1) 445 417 #define I915_DISPATCH_RS BIT(2) 446 - void (*emit_breadcrumb)(struct drm_i915_gem_request *req, 447 - u32 *cs); 418 + void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs); 448 419 int emit_breadcrumb_sz; 449 420 450 421 /* Pass the request to the hardware queue (e.g. directly into ··· 451 426 * This is called from an atomic context with irqs disabled; must 452 427 * be irq safe. 453 428 */ 454 - void (*submit_request)(struct drm_i915_gem_request *req); 429 + void (*submit_request)(struct i915_request *rq); 455 430 456 431 /* Call when the priority on a request has changed and it and its 457 432 * dependencies may need rescheduling. Note the request itself may ··· 459 434 * 460 435 * Called under the struct_mutex. 461 436 */ 462 - void (*schedule)(struct drm_i915_gem_request *request, 463 - int priority); 437 + void (*schedule)(struct i915_request *request, int priority); 464 438 465 439 /* 466 440 * Cancel all requests on the hardware, or queued for execution. ··· 527 503 } mbox; 528 504 529 505 /* AKA wait() */ 530 - int (*sync_to)(struct drm_i915_gem_request *req, 531 - struct drm_i915_gem_request *signal); 532 - u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); 506 + int (*sync_to)(struct i915_request *rq, 507 + struct i915_request *signal); 508 + u32 *(*signal)(struct i915_request *rq, u32 *cs); 533 509 } semaphore; 534 510 535 511 struct intel_engine_execlists execlists; ··· 750 726 751 727 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); 752 728 753 - int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 729 + int __must_check intel_ring_cacheline_align(struct i915_request *rq); 754 730 755 731 int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes); 756 - u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, 757 - unsigned int n); 732 + u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); 758 733 759 - static inline void 760 - intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) 734 + static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) 761 735 { 762 736 /* Dummy function. 763 737 * ··· 765 743 * reserved for the command packet (i.e. the value passed to 766 744 * intel_ring_begin()). 767 745 */ 768 - GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); 746 + GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); 769 747 } 770 748 771 - static inline u32 772 - intel_ring_wrap(const struct intel_ring *ring, u32 pos) 749 + static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) 773 750 { 774 751 return pos & (ring->size - 1); 775 752 } 776 753 777 - static inline u32 778 - intel_ring_offset(const struct drm_i915_gem_request *req, void *addr) 754 + static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) 779 755 { 780 756 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 781 - u32 offset = addr - req->ring->vaddr; 782 - GEM_BUG_ON(offset > req->ring->size); 783 - return intel_ring_wrap(req->ring, offset); 757 + u32 offset = addr - rq->ring->vaddr; 758 + GEM_BUG_ON(offset > rq->ring->size); 759 + return intel_ring_wrap(rq->ring, offset); 784 760 } 785 761 786 762 static inline void ··· 816 796 { 817 797 /* Whilst writes to the tail are strictly order, there is no 818 798 * serialisation between readers and the writers. The tail may be 819 - * read by i915_gem_request_retire() just as it is being updated 799 + * read by i915_request_retire() just as it is being updated 820 800 * by execlists, as although the breadcrumb is complete, the context 821 801 * switch hasn't been seen. 822 802 */ ··· 858 838 } 859 839 860 840 int init_workarounds_ring(struct intel_engine_cs *engine); 861 - int intel_ring_workarounds_emit(struct drm_i915_gem_request *req); 841 + int intel_ring_workarounds_emit(struct i915_request *rq); 862 842 863 843 void intel_engine_get_instdone(struct intel_engine_cs *engine, 864 844 struct intel_instdone *instdone); ··· 886 866 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); 887 867 888 868 static inline void intel_wait_init(struct intel_wait *wait, 889 - struct drm_i915_gem_request *rq) 869 + struct i915_request *rq) 890 870 { 891 871 wait->tsk = current; 892 872 wait->request = rq; ··· 912 892 913 893 static inline bool 914 894 intel_wait_update_request(struct intel_wait *wait, 915 - const struct drm_i915_gem_request *rq) 895 + const struct i915_request *rq) 916 896 { 917 - return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq)); 897 + return intel_wait_update_seqno(wait, i915_request_global_seqno(rq)); 918 898 } 919 899 920 900 static inline bool ··· 925 905 926 906 static inline bool 927 907 intel_wait_check_request(const struct intel_wait *wait, 928 - const struct drm_i915_gem_request *rq) 908 + const struct i915_request *rq) 929 909 { 930 - return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq)); 910 + return intel_wait_check_seqno(wait, i915_request_global_seqno(rq)); 931 911 } 932 912 933 913 static inline bool intel_wait_complete(const struct intel_wait *wait) ··· 939 919 struct intel_wait *wait); 940 920 void intel_engine_remove_wait(struct intel_engine_cs *engine, 941 921 struct intel_wait *wait); 942 - void intel_engine_enable_signaling(struct drm_i915_gem_request *request, 943 - bool wakeup); 944 - void intel_engine_cancel_signaling(struct drm_i915_gem_request *request); 922 + void intel_engine_enable_signaling(struct i915_request *request, bool wakeup); 923 + void intel_engine_cancel_signaling(struct i915_request *request); 945 924 946 925 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) 947 926 { ··· 959 940 960 941 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); 961 942 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); 962 - bool intel_breadcrumbs_busy(struct intel_engine_cs *engine); 963 943 964 944 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) 965 945 {
+3
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 130 130 return "AUX_D"; 131 131 case POWER_DOMAIN_AUX_F: 132 132 return "AUX_F"; 133 + case POWER_DOMAIN_AUX_IO_A: 134 + return "AUX_IO_A"; 133 135 case POWER_DOMAIN_GMBUS: 134 136 return "GMBUS"; 135 137 case POWER_DOMAIN_INIT: ··· 1855 1853 BIT_ULL(POWER_DOMAIN_INIT)) 1856 1854 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 1857 1855 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1856 + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 1858 1857 BIT_ULL(POWER_DOMAIN_INIT)) 1859 1858 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 1860 1859 BIT_ULL(POWER_DOMAIN_AUX_B) | \
+10 -2
drivers/gpu/drm/i915/intel_sdvo.c
··· 1705 1705 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1706 1706 1707 1707 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, 1708 - &intel_sdvo->hotplug_active, 2); 1708 + &intel_sdvo->hotplug_active, 2); 1709 + } 1710 + 1711 + static bool intel_sdvo_hotplug(struct intel_encoder *encoder, 1712 + struct intel_connector *connector) 1713 + { 1714 + intel_sdvo_enable_hotplug(encoder); 1715 + 1716 + return intel_encoder_hotplug(encoder, connector); 1709 1717 } 1710 1718 1711 1719 static bool ··· 2524 2516 * Some SDVO devices have one-shot hotplug interrupts. 2525 2517 * Ensure that they get re-enabled when an interrupt happens. 2526 2518 */ 2527 - intel_encoder->hot_plug = intel_sdvo_enable_hotplug; 2519 + intel_encoder->hotplug = intel_sdvo_hotplug; 2528 2520 intel_sdvo_enable_hotplug(intel_encoder); 2529 2521 } else { 2530 2522 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+46 -1
drivers/gpu/drm/i915/intel_uc.c
··· 361 361 goto err_out; 362 362 363 363 if (USES_HUC(dev_priv)) { 364 - ret = intel_huc_init_hw(huc); 364 + ret = intel_huc_fw_upload(huc); 365 365 if (ret) 366 366 goto err_out; 367 367 } ··· 444 444 445 445 if (USES_GUC_SUBMISSION(dev_priv)) 446 446 gen9_disable_guc_interrupts(dev_priv); 447 + } 448 + 449 + int intel_uc_suspend(struct drm_i915_private *i915) 450 + { 451 + struct intel_guc *guc = &i915->guc; 452 + int err; 453 + 454 + if (!USES_GUC(i915)) 455 + return 0; 456 + 457 + if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) 458 + return 0; 459 + 460 + err = intel_guc_suspend(guc); 461 + if (err) { 462 + DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); 463 + return err; 464 + } 465 + 466 + gen9_disable_guc_interrupts(i915); 467 + 468 + return 0; 469 + } 470 + 471 + int intel_uc_resume(struct drm_i915_private *i915) 472 + { 473 + struct intel_guc *guc = &i915->guc; 474 + int err; 475 + 476 + if (!USES_GUC(i915)) 477 + return 0; 478 + 479 + if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) 480 + return 0; 481 + 482 + if (i915_modparams.guc_log_level) 483 + gen9_enable_guc_interrupts(i915); 484 + 485 + err = intel_guc_resume(guc); 486 + if (err) { 487 + DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); 488 + return err; 489 + } 490 + 491 + return 0; 447 492 }
+2
drivers/gpu/drm/i915/intel_uc.h
··· 39 39 void intel_uc_fini_hw(struct drm_i915_private *dev_priv); 40 40 int intel_uc_init(struct drm_i915_private *dev_priv); 41 41 void intel_uc_fini(struct drm_i915_private *dev_priv); 42 + int intel_uc_suspend(struct drm_i915_private *dev_priv); 43 + int intel_uc_resume(struct drm_i915_private *dev_priv); 42 44 43 45 static inline bool intel_uc_is_using_guc(void) 44 46 {
+147 -10
drivers/gpu/drm/i915/intel_uncore.c
··· 37 37 "render", 38 38 "blitter", 39 39 "media", 40 + "vdbox0", 41 + "vdbox1", 42 + "vdbox2", 43 + "vdbox3", 44 + "vebox0", 45 + "vebox1", 40 46 }; 41 47 42 48 const char * ··· 780 774 /* We give fast paths for the really cool registers */ 781 775 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 782 776 777 + #define GEN11_NEEDS_FORCE_WAKE(reg) \ 778 + ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) 779 + 783 780 #define __gen6_reg_read_fw_domains(offset) \ 784 781 ({ \ 785 782 enum forcewake_domains __fwd; \ ··· 835 826 if (!entry) 836 827 return 0; 837 828 829 + /* 830 + * The list of FW domains depends on the SKU in gen11+ so we 831 + * can't determine it statically. We use FORCEWAKE_ALL and 832 + * translate it here to the list of available domains. 833 + */ 834 + if (entry->domains == FORCEWAKE_ALL) 835 + return dev_priv->uncore.fw_domains; 836 + 838 837 WARN(entry->domains & ~dev_priv->uncore.fw_domains, 839 838 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 840 839 entry->domains & ~dev_priv->uncore.fw_domains, offset); ··· 877 860 __fwd; \ 878 861 }) 879 862 863 + #define __gen11_fwtable_reg_read_fw_domains(offset) \ 864 + ({ \ 865 + enum forcewake_domains __fwd = 0; \ 866 + if (GEN11_NEEDS_FORCE_WAKE((offset))) \ 867 + __fwd = find_fw_domain(dev_priv, offset); \ 868 + __fwd; \ 869 + }) 870 + 880 871 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 881 872 static const i915_reg_t gen8_shadowed_regs[] = { 882 873 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ ··· 893 868 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 894 869 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 895 870 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 871 + /* TODO: Other registers are not yet used */ 872 + }; 873 + 874 + static const i915_reg_t gen11_shadowed_regs[] = { 875 + RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 876 + GEN6_RPNSWREQ, /* 0xA008 */ 877 + GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 878 + RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 879 + RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ 880 + RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ 881 + RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ 882 + RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ 883 + RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ 884 + RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ 896 885 /* TODO: Other registers are not yet used */ 897 886 }; 898 887 ··· 922 883 return 0; 923 884 } 924 885 925 - static bool is_gen8_shadowed(u32 offset) 926 - { 927 - const i915_reg_t *regs = gen8_shadowed_regs; 928 - 929 - return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs), 930 - mmio_reg_cmp); 886 + #define __is_genX_shadowed(x) \ 887 + static bool is_gen##x##_shadowed(u32 offset) \ 888 + { \ 889 + const i915_reg_t *regs = gen##x##_shadowed_regs; \ 890 + return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \ 891 + mmio_reg_cmp); \ 931 892 } 893 + 894 + __is_genX_shadowed(8) 895 + __is_genX_shadowed(11) 932 896 933 897 #define __gen8_reg_write_fw_domains(offset) \ 934 898 ({ \ ··· 971 929 __fwd; \ 972 930 }) 973 931 932 + #define __gen11_fwtable_reg_write_fw_domains(offset) \ 933 + ({ \ 934 + enum forcewake_domains __fwd = 0; \ 935 + if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ 936 + __fwd = find_fw_domain(dev_priv, offset); \ 937 + __fwd; \ 938 + }) 939 + 974 940 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 975 941 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 976 942 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), ··· 1013 963 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1014 964 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 1015 965 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 966 + }; 967 + 968 + /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 969 + static const struct intel_forcewake_range __gen11_fw_ranges[] = { 970 + GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 971 + GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 972 + GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 973 + GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 974 + GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 975 + GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 976 + GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 977 + GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), 978 + GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 979 + GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 980 + GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 981 + GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), 982 + GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 983 + GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 984 + GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), 985 + GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 986 + GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 987 + GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), 988 + GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 989 + GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER), 990 + GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 991 + GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), 992 + GEN_FW_RANGE(0x40000, 0x1bffff, 0), 993 + GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 994 + GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), 995 + GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), 996 + GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), 997 + GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 998 + GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), 999 + GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) 1016 1000 }; 1017 1001 1018 1002 static void ··· 1179 1095 } 1180 1096 #define __gen6_read(x) __gen_read(gen6, x) 1181 1097 #define __fwtable_read(x) __gen_read(fwtable, x) 1098 + #define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x) 1182 1099 1100 + __gen11_fwtable_read(8) 1101 + __gen11_fwtable_read(16) 1102 + __gen11_fwtable_read(32) 1103 + __gen11_fwtable_read(64) 1183 1104 __fwtable_read(8) 1184 1105 __fwtable_read(16) 1185 1106 __fwtable_read(32) ··· 1194 1105 __gen6_read(32) 1195 1106 __gen6_read(64) 1196 1107 1108 + #undef __gen11_fwtable_read 1197 1109 #undef __fwtable_read 1198 1110 #undef __gen6_read 1199 1111 #undef GEN6_READ_FOOTER ··· 1271 1181 } 1272 1182 #define __gen8_write(x) __gen_write(gen8, x) 1273 1183 #define __fwtable_write(x) __gen_write(fwtable, x) 1184 + #define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x) 1274 1185 1186 + __gen11_fwtable_write(8) 1187 + __gen11_fwtable_write(16) 1188 + __gen11_fwtable_write(32) 1275 1189 __fwtable_write(8) 1276 1190 __fwtable_write(16) 1277 1191 __fwtable_write(32) ··· 1286 1192 __gen6_write(16) 1287 1193 __gen6_write(32) 1288 1194 1195 + #undef __gen11_fwtable_write 1289 1196 #undef __fwtable_write 1290 1197 #undef __gen8_write 1291 1198 #undef __gen6_write ··· 1335 1240 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1336 1241 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1337 1242 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1243 + BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); 1244 + BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); 1245 + BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); 1246 + BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); 1247 + BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); 1248 + BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); 1249 + 1338 1250 1339 1251 d->mask = BIT(domain_id); 1340 1252 ··· 1369 1267 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1370 1268 } 1371 1269 1372 - if (INTEL_GEN(dev_priv) >= 9) { 1270 + if (INTEL_GEN(dev_priv) >= 11) { 1271 + int i; 1272 + 1273 + dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1274 + dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1275 + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1276 + FORCEWAKE_RENDER_GEN9, 1277 + FORCEWAKE_ACK_RENDER_GEN9); 1278 + fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1279 + FORCEWAKE_BLITTER_GEN9, 1280 + FORCEWAKE_ACK_BLITTER_GEN9); 1281 + for (i = 0; i < I915_MAX_VCS; i++) { 1282 + if (!HAS_ENGINE(dev_priv, _VCS(i))) 1283 + continue; 1284 + 1285 + fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, 1286 + FORCEWAKE_MEDIA_VDBOX_GEN11(i), 1287 + FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); 1288 + } 1289 + for (i = 0; i < I915_MAX_VECS; i++) { 1290 + if (!HAS_ENGINE(dev_priv, _VECS(i))) 1291 + continue; 1292 + 1293 + fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, 1294 + FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1295 + FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1296 + } 1297 + } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { 1373 1298 dev_priv->uncore.funcs.force_wake_get = 1374 1299 fw_domains_get_with_fallback; 1375 1300 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; ··· 1551 1422 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); 1552 1423 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); 1553 1424 } 1554 - } else { 1425 + } else if (IS_GEN(dev_priv, 9, 10)) { 1555 1426 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1556 1427 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); 1557 1428 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); 1429 + } else { 1430 + ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges); 1431 + ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable); 1432 + ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable); 1558 1433 } 1559 1434 1560 1435 iosf_mbi_register_pmic_bus_access_notifier( ··· 2127 1994 u32 offset = i915_mmio_reg_offset(reg); 2128 1995 enum forcewake_domains fw_domains; 2129 1996 2130 - if (HAS_FWTABLE(dev_priv)) { 1997 + if (INTEL_GEN(dev_priv) >= 11) { 1998 + fw_domains = __gen11_fwtable_reg_read_fw_domains(offset); 1999 + } else if (HAS_FWTABLE(dev_priv)) { 2131 2000 fw_domains = __fwtable_reg_read_fw_domains(offset); 2132 2001 } else if (INTEL_GEN(dev_priv) >= 6) { 2133 2002 fw_domains = __gen6_reg_read_fw_domains(offset); ··· 2150 2015 u32 offset = i915_mmio_reg_offset(reg); 2151 2016 enum forcewake_domains fw_domains; 2152 2017 2153 - if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 2018 + if (INTEL_GEN(dev_priv) >= 11) { 2019 + fw_domains = __gen11_fwtable_reg_write_fw_domains(offset); 2020 + } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 2154 2021 fw_domains = __fwtable_reg_write_fw_domains(offset); 2155 2022 } else if (IS_GEN8(dev_priv)) { 2156 2023 fw_domains = __gen8_reg_write_fw_domains(offset);
+17 -6
drivers/gpu/drm/i915/intel_uncore.h
··· 37 37 FW_DOMAIN_ID_RENDER = 0, 38 38 FW_DOMAIN_ID_BLITTER, 39 39 FW_DOMAIN_ID_MEDIA, 40 + FW_DOMAIN_ID_MEDIA_VDBOX0, 41 + FW_DOMAIN_ID_MEDIA_VDBOX1, 42 + FW_DOMAIN_ID_MEDIA_VDBOX2, 43 + FW_DOMAIN_ID_MEDIA_VDBOX3, 44 + FW_DOMAIN_ID_MEDIA_VEBOX0, 45 + FW_DOMAIN_ID_MEDIA_VEBOX1, 40 46 41 47 FW_DOMAIN_ID_COUNT 42 48 }; 43 49 44 50 enum forcewake_domains { 45 - FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), 46 - FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), 47 - FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), 48 - FORCEWAKE_ALL = (FORCEWAKE_RENDER | 49 - FORCEWAKE_BLITTER | 50 - FORCEWAKE_MEDIA) 51 + FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), 52 + FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), 53 + FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), 54 + FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0), 55 + FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1), 56 + FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2), 57 + FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3), 58 + FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0), 59 + FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1), 60 + 61 + FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1 51 62 }; 52 63 53 64 struct intel_uncore_funcs {
+3 -3
drivers/gpu/drm/i915/selftests/huge_pages.c
··· 964 964 u32 dword, 965 965 u32 value) 966 966 { 967 - struct drm_i915_gem_request *rq; 967 + struct i915_request *rq; 968 968 struct i915_vma *batch; 969 969 int flags = 0; 970 970 int err; ··· 975 975 if (err) 976 976 return err; 977 977 978 - rq = i915_gem_request_alloc(engine, ctx); 978 + rq = i915_request_alloc(engine, ctx); 979 979 if (IS_ERR(rq)) 980 980 return PTR_ERR(rq); 981 981 ··· 1003 1003 reservation_object_unlock(vma->resv); 1004 1004 1005 1005 err_request: 1006 - __i915_add_request(rq, err == 0); 1006 + __i915_request_add(rq, err == 0); 1007 1007 1008 1008 return err; 1009 1009 }
+4 -4
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
··· 178 178 u32 v) 179 179 { 180 180 struct drm_i915_private *i915 = to_i915(obj->base.dev); 181 - struct drm_i915_gem_request *rq; 181 + struct i915_request *rq; 182 182 struct i915_vma *vma; 183 183 u32 *cs; 184 184 int err; ··· 191 191 if (IS_ERR(vma)) 192 192 return PTR_ERR(vma); 193 193 194 - rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context); 194 + rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); 195 195 if (IS_ERR(rq)) { 196 196 i915_vma_unpin(vma); 197 197 return PTR_ERR(rq); ··· 199 199 200 200 cs = intel_ring_begin(rq, 4); 201 201 if (IS_ERR(cs)) { 202 - __i915_add_request(rq, false); 202 + __i915_request_add(rq, false); 203 203 i915_vma_unpin(vma); 204 204 return PTR_ERR(cs); 205 205 } ··· 229 229 reservation_object_add_excl_fence(obj->resv, &rq->fence); 230 230 reservation_object_unlock(obj->resv); 231 231 232 - __i915_add_request(rq, true); 232 + __i915_request_add(rq, true); 233 233 234 234 return 0; 235 235 }
+4 -4
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 114 114 struct drm_i915_private *i915 = to_i915(obj->base.dev); 115 115 struct i915_address_space *vm = 116 116 ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; 117 - struct drm_i915_gem_request *rq; 117 + struct i915_request *rq; 118 118 struct i915_vma *vma; 119 119 struct i915_vma *batch; 120 120 unsigned int flags; ··· 152 152 goto err_vma; 153 153 } 154 154 155 - rq = i915_gem_request_alloc(engine, ctx); 155 + rq = i915_request_alloc(engine, ctx); 156 156 if (IS_ERR(rq)) { 157 157 err = PTR_ERR(rq); 158 158 goto err_batch; ··· 180 180 reservation_object_add_excl_fence(obj->resv, &rq->fence); 181 181 reservation_object_unlock(obj->resv); 182 182 183 - __i915_add_request(rq, true); 183 + __i915_request_add(rq, true); 184 184 185 185 return 0; 186 186 187 187 err_request: 188 - __i915_add_request(rq, false); 188 + __i915_request_add(rq, false); 189 189 err_batch: 190 190 i915_vma_unpin(batch); 191 191 err_vma:
+3 -3
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
··· 407 407 mutex_lock(&i915->drm.struct_mutex); 408 408 onstack_fence_init(&fence); 409 409 do { 410 - struct drm_i915_gem_request *rq; 410 + struct i915_request *rq; 411 411 struct i915_gem_context *ctx; 412 412 413 413 ctx = live_context(i915, file); ··· 416 416 417 417 /* We will need some GGTT space for the rq's context */ 418 418 igt_evict_ctl.fail_if_busy = true; 419 - rq = i915_gem_request_alloc(engine, ctx); 419 + rq = i915_request_alloc(engine, ctx); 420 420 igt_evict_ctl.fail_if_busy = false; 421 421 422 422 if (IS_ERR(rq)) { ··· 437 437 if (err < 0) 438 438 break; 439 439 440 - i915_add_request(rq); 440 + i915_request_add(rq); 441 441 count++; 442 442 err = 0; 443 443 } while(1);
+3 -3
drivers/gpu/drm/i915/selftests/i915_gem_object.c
··· 436 436 static int make_obj_busy(struct drm_i915_gem_object *obj) 437 437 { 438 438 struct drm_i915_private *i915 = to_i915(obj->base.dev); 439 - struct drm_i915_gem_request *rq; 439 + struct i915_request *rq; 440 440 struct i915_vma *vma; 441 441 int err; 442 442 ··· 448 448 if (err) 449 449 return err; 450 450 451 - rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context); 451 + rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); 452 452 if (IS_ERR(rq)) { 453 453 i915_vma_unpin(vma); 454 454 return PTR_ERR(rq); 455 455 } 456 456 457 457 i915_vma_move_to_active(vma, rq, 0); 458 - i915_add_request(rq); 458 + i915_request_add(rq); 459 459 460 460 i915_gem_object_set_active_reference(obj); 461 461 i915_vma_unpin(vma);
+61 -64
drivers/gpu/drm/i915/selftests/i915_gem_request.c drivers/gpu/drm/i915/selftests/i915_request.c
··· 32 32 static int igt_add_request(void *arg) 33 33 { 34 34 struct drm_i915_private *i915 = arg; 35 - struct drm_i915_gem_request *request; 35 + struct i915_request *request; 36 36 int err = -ENOMEM; 37 37 38 38 /* Basic preliminary test to create a request and let it loose! */ ··· 44 44 if (!request) 45 45 goto out_unlock; 46 46 47 - i915_add_request(request); 47 + i915_request_add(request); 48 48 49 49 err = 0; 50 50 out_unlock: ··· 56 56 { 57 57 const long T = HZ / 4; 58 58 struct drm_i915_private *i915 = arg; 59 - struct drm_i915_gem_request *request; 59 + struct i915_request *request; 60 60 int err = -EINVAL; 61 61 62 62 /* Submit a request, then wait upon it */ ··· 68 68 goto out_unlock; 69 69 } 70 70 71 - if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) { 71 + if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) { 72 72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n"); 73 73 goto out_unlock; 74 74 } 75 75 76 - if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) { 76 + if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) { 77 77 pr_err("request wait succeeded (expected timeout before submit!)\n"); 78 78 goto out_unlock; 79 79 } 80 80 81 - if (i915_gem_request_completed(request)) { 81 + if (i915_request_completed(request)) { 82 82 pr_err("request completed before submit!!\n"); 83 83 goto out_unlock; 84 84 } 85 85 86 - i915_add_request(request); 86 + i915_request_add(request); 87 87 88 - if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) { 88 + if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) { 89 89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n"); 90 90 goto out_unlock; 91 91 } 92 92 93 - if (i915_gem_request_completed(request)) { 93 + if (i915_request_completed(request)) { 94 94 pr_err("request completed immediately!\n"); 95 95 goto out_unlock; 96 96 } 97 97 98 - if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) { 98 + if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) { 99 99 pr_err("request wait succeeded (expected timeout!)\n"); 100 100 goto out_unlock; 101 101 } 102 102 103 - if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) { 103 + if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) { 104 104 pr_err("request wait timed out!\n"); 105 105 goto out_unlock; 106 106 } 107 107 108 - if (!i915_gem_request_completed(request)) { 108 + if (!i915_request_completed(request)) { 109 109 pr_err("request not complete after waiting!\n"); 110 110 goto out_unlock; 111 111 } 112 112 113 - if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) { 113 + if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) { 114 114 pr_err("request wait timed out when already complete!\n"); 115 115 goto out_unlock; 116 116 } ··· 126 126 { 127 127 const long T = HZ / 4; 128 128 struct drm_i915_private *i915 = arg; 129 - struct drm_i915_gem_request *request; 129 + struct i915_request *request; 130 130 int err = -EINVAL; 131 131 132 132 /* Submit a request, treat it as a fence and wait upon it */ ··· 145 145 } 146 146 147 147 mutex_lock(&i915->drm.struct_mutex); 148 - i915_add_request(request); 148 + i915_request_add(request); 149 149 mutex_unlock(&i915->drm.struct_mutex); 150 150 151 151 if (dma_fence_is_signaled(&request->fence)) { ··· 185 185 static int igt_request_rewind(void *arg) 186 186 { 187 187 struct drm_i915_private *i915 = arg; 188 - struct drm_i915_gem_request *request, *vip; 188 + struct i915_request *request, *vip; 189 189 struct i915_gem_context *ctx[2]; 190 190 int err = -EINVAL; 191 191 ··· 197 197 goto err_context_0; 198 198 } 199 199 200 - i915_gem_request_get(request); 201 - i915_add_request(request); 200 + i915_request_get(request); 201 + i915_request_add(request); 202 202 203 203 ctx[1] = mock_context(i915, "B"); 204 204 vip = mock_request(i915->engine[RCS], ctx[1], 0); ··· 210 210 /* Simulate preemption by manual reordering */ 211 211 if (!mock_cancel_request(request)) { 212 212 pr_err("failed to cancel request (already executed)!\n"); 213 - i915_add_request(vip); 213 + i915_request_add(vip); 214 214 goto err_context_1; 215 215 } 216 - i915_gem_request_get(vip); 217 - i915_add_request(vip); 216 + i915_request_get(vip); 217 + i915_request_add(vip); 218 218 rcu_read_lock(); 219 219 request->engine->submit_request(request); 220 220 rcu_read_unlock(); 221 221 222 222 mutex_unlock(&i915->drm.struct_mutex); 223 223 224 - if (i915_wait_request(vip, 0, HZ) == -ETIME) { 224 + if (i915_request_wait(vip, 0, HZ) == -ETIME) { 225 225 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n", 226 226 vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS])); 227 227 goto err; 228 228 } 229 229 230 - if (i915_gem_request_completed(request)) { 230 + if (i915_request_completed(request)) { 231 231 pr_err("low priority request already completed\n"); 232 232 goto err; 233 233 } 234 234 235 235 err = 0; 236 236 err: 237 - i915_gem_request_put(vip); 237 + i915_request_put(vip); 238 238 mutex_lock(&i915->drm.struct_mutex); 239 239 err_context_1: 240 240 mock_context_close(ctx[1]); 241 - i915_gem_request_put(request); 241 + i915_request_put(request); 242 242 err_context_0: 243 243 mock_context_close(ctx[0]); 244 244 mock_device_flush(i915); ··· 246 246 return err; 247 247 } 248 248 249 - int i915_gem_request_mock_selftests(void) 249 + int i915_request_mock_selftests(void) 250 250 { 251 251 static const struct i915_subtest tests[] = { 252 252 SUBTEST(igt_add_request), ··· 303 303 { 304 304 struct drm_i915_private *i915 = t->i915; 305 305 306 - i915_gem_retire_requests(i915); 306 + i915_retire_requests(i915); 307 307 308 308 if (wait_for(intel_engines_are_idle(i915), 10)) { 309 309 pr_err("%s(%s): GPU not idle\n", t->func, t->name); ··· 343 343 344 344 for_each_engine(engine, i915, id) { 345 345 IGT_TIMEOUT(end_time); 346 - struct drm_i915_gem_request *request; 346 + struct i915_request *request; 347 347 unsigned long n, prime; 348 348 ktime_t times[2] = {}; 349 349 ··· 355 355 times[1] = ktime_get_raw(); 356 356 357 357 for (n = 0; n < prime; n++) { 358 - request = i915_gem_request_alloc(engine, 359 - i915->kernel_context); 358 + request = i915_request_alloc(engine, 359 + i915->kernel_context); 360 360 if (IS_ERR(request)) { 361 361 err = PTR_ERR(request); 362 362 goto out_unlock; ··· 375 375 * for latency. 376 376 */ 377 377 378 - i915_add_request(request); 378 + i915_request_add(request); 379 379 } 380 - i915_wait_request(request, 380 + i915_request_wait(request, 381 381 I915_WAIT_LOCKED, 382 382 MAX_SCHEDULE_TIMEOUT); 383 383 ··· 447 447 return ERR_PTR(err); 448 448 } 449 449 450 - static struct drm_i915_gem_request * 450 + static struct i915_request * 451 451 empty_request(struct intel_engine_cs *engine, 452 452 struct i915_vma *batch) 453 453 { 454 - struct drm_i915_gem_request *request; 454 + struct i915_request *request; 455 455 int err; 456 456 457 - request = i915_gem_request_alloc(engine, 458 - engine->i915->kernel_context); 457 + request = i915_request_alloc(engine, engine->i915->kernel_context); 459 458 if (IS_ERR(request)) 460 459 return request; 461 460 ··· 466 467 goto out_request; 467 468 468 469 out_request: 469 - __i915_add_request(request, err == 0); 470 + __i915_request_add(request, err == 0); 470 471 return err ? ERR_PTR(err) : request; 471 472 } 472 473 ··· 494 495 495 496 for_each_engine(engine, i915, id) { 496 497 IGT_TIMEOUT(end_time); 497 - struct drm_i915_gem_request *request; 498 + struct i915_request *request; 498 499 unsigned long n, prime; 499 500 ktime_t times[2] = {}; 500 501 ··· 508 509 err = PTR_ERR(request); 509 510 goto out_batch; 510 511 } 511 - i915_wait_request(request, 512 + i915_request_wait(request, 512 513 I915_WAIT_LOCKED, 513 514 MAX_SCHEDULE_TIMEOUT); 514 515 ··· 522 523 goto out_batch; 523 524 } 524 525 } 525 - i915_wait_request(request, 526 + i915_request_wait(request, 526 527 I915_WAIT_LOCKED, 527 528 MAX_SCHEDULE_TIMEOUT); 528 529 ··· 632 633 { 633 634 struct drm_i915_private *i915 = arg; 634 635 struct intel_engine_cs *engine; 635 - struct drm_i915_gem_request *request[I915_NUM_ENGINES]; 636 + struct i915_request *request[I915_NUM_ENGINES]; 636 637 struct i915_vma *batch; 637 638 struct live_test t; 638 639 unsigned int id; ··· 657 658 } 658 659 659 660 for_each_engine(engine, i915, id) { 660 - request[id] = i915_gem_request_alloc(engine, 661 - i915->kernel_context); 661 + request[id] = i915_request_alloc(engine, i915->kernel_context); 662 662 if (IS_ERR(request[id])) { 663 663 err = PTR_ERR(request[id]); 664 664 pr_err("%s: Request allocation failed with err=%d\n", ··· 678 680 } 679 681 680 682 i915_vma_move_to_active(batch, request[id], 0); 681 - i915_gem_request_get(request[id]); 682 - i915_add_request(request[id]); 683 + i915_request_get(request[id]); 684 + i915_request_add(request[id]); 683 685 } 684 686 685 687 for_each_engine(engine, i915, id) { 686 - if (i915_gem_request_completed(request[id])) { 688 + if (i915_request_completed(request[id])) { 687 689 pr_err("%s(%s): request completed too early!\n", 688 690 __func__, engine->name); 689 691 err = -EINVAL; ··· 700 702 for_each_engine(engine, i915, id) { 701 703 long timeout; 702 704 703 - timeout = i915_wait_request(request[id], 705 + timeout = i915_request_wait(request[id], 704 706 I915_WAIT_LOCKED, 705 707 MAX_SCHEDULE_TIMEOUT); 706 708 if (timeout < 0) { ··· 710 712 goto out_request; 711 713 } 712 714 713 - GEM_BUG_ON(!i915_gem_request_completed(request[id])); 714 - i915_gem_request_put(request[id]); 715 + GEM_BUG_ON(!i915_request_completed(request[id])); 716 + i915_request_put(request[id]); 715 717 request[id] = NULL; 716 718 } 717 719 ··· 720 722 out_request: 721 723 for_each_engine(engine, i915, id) 722 724 if (request[id]) 723 - i915_gem_request_put(request[id]); 725 + i915_request_put(request[id]); 724 726 i915_vma_unpin(batch); 725 727 i915_vma_put(batch); 726 728 out_unlock: ··· 731 733 static int live_sequential_engines(void *arg) 732 734 { 733 735 struct drm_i915_private *i915 = arg; 734 - struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {}; 735 - struct drm_i915_gem_request *prev = NULL; 736 + struct i915_request *request[I915_NUM_ENGINES] = {}; 737 + struct i915_request *prev = NULL; 736 738 struct intel_engine_cs *engine; 737 739 struct live_test t; 738 740 unsigned int id; ··· 761 763 goto out_unlock; 762 764 } 763 765 764 - request[id] = i915_gem_request_alloc(engine, 765 - i915->kernel_context); 766 + request[id] = i915_request_alloc(engine, i915->kernel_context); 766 767 if (IS_ERR(request[id])) { 767 768 err = PTR_ERR(request[id]); 768 769 pr_err("%s: Request allocation failed for %s with err=%d\n", ··· 770 773 } 771 774 772 775 if (prev) { 773 - err = i915_gem_request_await_dma_fence(request[id], 774 - &prev->fence); 776 + err = i915_request_await_dma_fence(request[id], 777 + &prev->fence); 775 778 if (err) { 776 - i915_add_request(request[id]); 779 + i915_request_add(request[id]); 777 780 pr_err("%s: Request await failed for %s with err=%d\n", 778 781 __func__, engine->name, err); 779 782 goto out_request; ··· 791 794 i915_gem_object_set_active_reference(batch->obj); 792 795 i915_vma_get(batch); 793 796 794 - i915_gem_request_get(request[id]); 795 - i915_add_request(request[id]); 797 + i915_request_get(request[id]); 798 + i915_request_add(request[id]); 796 799 797 800 prev = request[id]; 798 801 } ··· 800 803 for_each_engine(engine, i915, id) { 801 804 long timeout; 802 805 803 - if (i915_gem_request_completed(request[id])) { 806 + if (i915_request_completed(request[id])) { 804 807 pr_err("%s(%s): request completed too early!\n", 805 808 __func__, engine->name); 806 809 err = -EINVAL; ··· 814 817 goto out_request; 815 818 } 816 819 817 - timeout = i915_wait_request(request[id], 820 + timeout = i915_request_wait(request[id], 818 821 I915_WAIT_LOCKED, 819 822 MAX_SCHEDULE_TIMEOUT); 820 823 if (timeout < 0) { ··· 824 827 goto out_request; 825 828 } 826 829 827 - GEM_BUG_ON(!i915_gem_request_completed(request[id])); 830 + GEM_BUG_ON(!i915_request_completed(request[id])); 828 831 } 829 832 830 833 err = end_live_test(&t); ··· 846 849 } 847 850 848 851 i915_vma_put(request[id]->batch); 849 - i915_gem_request_put(request[id]); 852 + i915_request_put(request[id]); 850 853 } 851 854 out_unlock: 852 855 mutex_unlock(&i915->drm.struct_mutex); 853 856 return err; 854 857 } 855 858 856 - int i915_gem_request_live_selftests(struct drm_i915_private *i915) 859 + int i915_request_live_selftests(struct drm_i915_private *i915) 857 860 { 858 861 static const struct i915_subtest tests[] = { 859 862 SUBTEST(live_nop_request),
+1 -1
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
··· 11 11 */ 12 12 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */ 13 13 selftest(uncore, intel_uncore_live_selftests) 14 - selftest(requests, i915_gem_request_live_selftests) 14 + selftest(requests, i915_request_live_selftests) 15 15 selftest(objects, i915_gem_object_live_selftests) 16 16 selftest(dmabuf, i915_gem_dmabuf_live_selftests) 17 17 selftest(coherency, i915_gem_coherency_live_selftests)
+1 -1
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
··· 16 16 selftest(uncore, intel_uncore_mock_selftests) 17 17 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests) 18 18 selftest(timelines, i915_gem_timeline_mock_selftests) 19 - selftest(requests, i915_gem_request_mock_selftests) 19 + selftest(requests, i915_request_mock_selftests) 20 20 selftest(objects, i915_gem_object_mock_selftests) 21 21 selftest(dmabuf, i915_gem_dmabuf_mock_selftests) 22 22 selftest(vma, i915_vma_mock_selftests)
+59 -60
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
··· 92 92 } 93 93 94 94 static u64 hws_address(const struct i915_vma *hws, 95 - const struct drm_i915_gem_request *rq) 95 + const struct i915_request *rq) 96 96 { 97 97 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); 98 98 } 99 99 100 100 static int emit_recurse_batch(struct hang *h, 101 - struct drm_i915_gem_request *rq) 101 + struct i915_request *rq) 102 102 { 103 103 struct drm_i915_private *i915 = h->i915; 104 104 struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base; ··· 204 204 return err; 205 205 } 206 206 207 - static struct drm_i915_gem_request * 207 + static struct i915_request * 208 208 hang_create_request(struct hang *h, struct intel_engine_cs *engine) 209 209 { 210 - struct drm_i915_gem_request *rq; 210 + struct i915_request *rq; 211 211 int err; 212 212 213 213 if (i915_gem_object_is_active(h->obj)) { ··· 232 232 h->batch = vaddr; 233 233 } 234 234 235 - rq = i915_gem_request_alloc(engine, h->ctx); 235 + rq = i915_request_alloc(engine, h->ctx); 236 236 if (IS_ERR(rq)) 237 237 return rq; 238 238 239 239 err = emit_recurse_batch(h, rq); 240 240 if (err) { 241 - __i915_add_request(rq, false); 241 + __i915_request_add(rq, false); 242 242 return ERR_PTR(err); 243 243 } 244 244 245 245 return rq; 246 246 } 247 247 248 - static u32 hws_seqno(const struct hang *h, 249 - const struct drm_i915_gem_request *rq) 248 + static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) 250 249 { 251 250 return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]); 252 251 } ··· 318 319 flush_test(h->i915, I915_WAIT_LOCKED); 319 320 } 320 321 321 - static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq) 322 + static bool wait_for_hang(struct hang *h, struct i915_request *rq) 322 323 { 323 324 return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), 324 325 rq->fence.seqno), ··· 331 332 static int igt_hang_sanitycheck(void *arg) 332 333 { 333 334 struct drm_i915_private *i915 = arg; 334 - struct drm_i915_gem_request *rq; 335 + struct i915_request *rq; 335 336 struct intel_engine_cs *engine; 336 337 enum intel_engine_id id; 337 338 struct hang h; ··· 358 359 goto fini; 359 360 } 360 361 361 - i915_gem_request_get(rq); 362 + i915_request_get(rq); 362 363 363 364 *h.batch = MI_BATCH_BUFFER_END; 364 365 i915_gem_chipset_flush(i915); 365 366 366 - __i915_add_request(rq, true); 367 + __i915_request_add(rq, true); 367 368 368 - timeout = i915_wait_request(rq, 369 + timeout = i915_request_wait(rq, 369 370 I915_WAIT_LOCKED, 370 371 MAX_SCHEDULE_TIMEOUT); 371 - i915_gem_request_put(rq); 372 + i915_request_put(rq); 372 373 373 374 if (timeout < 0) { 374 375 err = timeout; ··· 484 485 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); 485 486 do { 486 487 if (active) { 487 - struct drm_i915_gem_request *rq; 488 + struct i915_request *rq; 488 489 489 490 mutex_lock(&i915->drm.struct_mutex); 490 491 rq = hang_create_request(&h, engine); ··· 494 495 break; 495 496 } 496 497 497 - i915_gem_request_get(rq); 498 - __i915_add_request(rq, true); 498 + i915_request_get(rq); 499 + __i915_request_add(rq, true); 499 500 mutex_unlock(&i915->drm.struct_mutex); 500 501 501 502 if (!wait_for_hang(&h, rq)) { ··· 506 507 intel_engine_dump(engine, &p, 507 508 "%s\n", engine->name); 508 509 509 - i915_gem_request_put(rq); 510 + i915_request_put(rq); 510 511 err = -EIO; 511 512 break; 512 513 } 513 514 514 - i915_gem_request_put(rq); 515 + i915_request_put(rq); 515 516 } 516 517 517 518 engine->hangcheck.stalled = true; ··· 576 577 static int active_engine(void *data) 577 578 { 578 579 struct intel_engine_cs *engine = data; 579 - struct drm_i915_gem_request *rq[2] = {}; 580 + struct i915_request *rq[2] = {}; 580 581 struct i915_gem_context *ctx[2]; 581 582 struct drm_file *file; 582 583 unsigned long count = 0; ··· 605 606 606 607 while (!kthread_should_stop()) { 607 608 unsigned int idx = count++ & 1; 608 - struct drm_i915_gem_request *old = rq[idx]; 609 - struct drm_i915_gem_request *new; 609 + struct i915_request *old = rq[idx]; 610 + struct i915_request *new; 610 611 611 612 mutex_lock(&engine->i915->drm.struct_mutex); 612 - new = i915_gem_request_alloc(engine, ctx[idx]); 613 + new = i915_request_alloc(engine, ctx[idx]); 613 614 if (IS_ERR(new)) { 614 615 mutex_unlock(&engine->i915->drm.struct_mutex); 615 616 err = PTR_ERR(new); 616 617 break; 617 618 } 618 619 619 - rq[idx] = i915_gem_request_get(new); 620 - i915_add_request(new); 620 + rq[idx] = i915_request_get(new); 621 + i915_request_add(new); 621 622 mutex_unlock(&engine->i915->drm.struct_mutex); 622 623 623 624 if (old) { 624 - i915_wait_request(old, 0, MAX_SCHEDULE_TIMEOUT); 625 - i915_gem_request_put(old); 625 + i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT); 626 + i915_request_put(old); 626 627 } 627 628 } 628 629 629 630 for (count = 0; count < ARRAY_SIZE(rq); count++) 630 - i915_gem_request_put(rq[count]); 631 + i915_request_put(rq[count]); 631 632 632 633 err_file: 633 634 mock_file_free(engine->i915, file); ··· 691 692 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); 692 693 do { 693 694 if (active) { 694 - struct drm_i915_gem_request *rq; 695 + struct i915_request *rq; 695 696 696 697 mutex_lock(&i915->drm.struct_mutex); 697 698 rq = hang_create_request(&h, engine); ··· 701 702 break; 702 703 } 703 704 704 - i915_gem_request_get(rq); 705 - __i915_add_request(rq, true); 705 + i915_request_get(rq); 706 + __i915_request_add(rq, true); 706 707 mutex_unlock(&i915->drm.struct_mutex); 707 708 708 709 if (!wait_for_hang(&h, rq)) { ··· 713 714 intel_engine_dump(engine, &p, 714 715 "%s\n", engine->name); 715 716 716 - i915_gem_request_put(rq); 717 + i915_request_put(rq); 717 718 err = -EIO; 718 719 break; 719 720 } 720 721 721 - i915_gem_request_put(rq); 722 + i915_request_put(rq); 722 723 } 723 724 724 725 engine->hangcheck.stalled = true; ··· 813 814 return __igt_reset_engine_others(arg, true); 814 815 } 815 816 816 - static u32 fake_hangcheck(struct drm_i915_gem_request *rq) 817 + static u32 fake_hangcheck(struct i915_request *rq) 817 818 { 818 819 u32 reset_count; 819 820 ··· 831 832 static int igt_wait_reset(void *arg) 832 833 { 833 834 struct drm_i915_private *i915 = arg; 834 - struct drm_i915_gem_request *rq; 835 + struct i915_request *rq; 835 836 unsigned int reset_count; 836 837 struct hang h; 837 838 long timeout; ··· 855 856 goto fini; 856 857 } 857 858 858 - i915_gem_request_get(rq); 859 - __i915_add_request(rq, true); 859 + i915_request_get(rq); 860 + __i915_request_add(rq, true); 860 861 861 862 if (!wait_for_hang(&h, rq)) { 862 863 struct drm_printer p = drm_info_printer(i915->drm.dev); ··· 874 875 875 876 reset_count = fake_hangcheck(rq); 876 877 877 - timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10); 878 + timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10); 878 879 if (timeout < 0) { 879 - pr_err("i915_wait_request failed on a stuck request: err=%ld\n", 880 + pr_err("i915_request_wait failed on a stuck request: err=%ld\n", 880 881 timeout); 881 882 err = timeout; 882 883 goto out_rq; ··· 890 891 } 891 892 892 893 out_rq: 893 - i915_gem_request_put(rq); 894 + i915_request_put(rq); 894 895 fini: 895 896 hang_fini(&h); 896 897 unlock: ··· 921 922 goto unlock; 922 923 923 924 for_each_engine(engine, i915, id) { 924 - struct drm_i915_gem_request *prev; 925 + struct i915_request *prev; 925 926 IGT_TIMEOUT(end_time); 926 927 unsigned int count; 927 928 ··· 934 935 goto fini; 935 936 } 936 937 937 - i915_gem_request_get(prev); 938 - __i915_add_request(prev, true); 938 + i915_request_get(prev); 939 + __i915_request_add(prev, true); 939 940 940 941 count = 0; 941 942 do { 942 - struct drm_i915_gem_request *rq; 943 + struct i915_request *rq; 943 944 unsigned int reset_count; 944 945 945 946 rq = hang_create_request(&h, engine); ··· 948 949 goto fini; 949 950 } 950 951 951 - i915_gem_request_get(rq); 952 - __i915_add_request(rq, true); 952 + i915_request_get(rq); 953 + __i915_request_add(rq, true); 953 954 954 955 if (!wait_for_hang(&h, prev)) { 955 956 struct drm_printer p = drm_info_printer(i915->drm.dev); ··· 959 960 intel_engine_dump(prev->engine, &p, 960 961 "%s\n", prev->engine->name); 961 962 962 - i915_gem_request_put(rq); 963 - i915_gem_request_put(prev); 963 + i915_request_put(rq); 964 + i915_request_put(prev); 964 965 965 966 i915_reset(i915, 0); 966 967 i915_gem_set_wedged(i915); ··· 979 980 if (prev->fence.error != -EIO) { 980 981 pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", 981 982 prev->fence.error); 982 - i915_gem_request_put(rq); 983 - i915_gem_request_put(prev); 983 + i915_request_put(rq); 984 + i915_request_put(prev); 984 985 err = -EINVAL; 985 986 goto fini; 986 987 } ··· 988 989 if (rq->fence.error) { 989 990 pr_err("Fence error status not zero [%d] after unrelated reset\n", 990 991 rq->fence.error); 991 - i915_gem_request_put(rq); 992 - i915_gem_request_put(prev); 992 + i915_request_put(rq); 993 + i915_request_put(prev); 993 994 err = -EINVAL; 994 995 goto fini; 995 996 } 996 997 997 998 if (i915_reset_count(&i915->gpu_error) == reset_count) { 998 999 pr_err("No GPU reset recorded!\n"); 999 - i915_gem_request_put(rq); 1000 - i915_gem_request_put(prev); 1000 + i915_request_put(rq); 1001 + i915_request_put(prev); 1001 1002 err = -EINVAL; 1002 1003 goto fini; 1003 1004 } 1004 1005 1005 - i915_gem_request_put(prev); 1006 + i915_request_put(prev); 1006 1007 prev = rq; 1007 1008 count++; 1008 1009 } while (time_before(jiffies, end_time)); ··· 1011 1012 *h.batch = MI_BATCH_BUFFER_END; 1012 1013 i915_gem_chipset_flush(i915); 1013 1014 1014 - i915_gem_request_put(prev); 1015 + i915_request_put(prev); 1015 1016 1016 1017 err = flush_test(i915, I915_WAIT_LOCKED); 1017 1018 if (err) ··· 1035 1036 struct drm_i915_private *i915 = arg; 1036 1037 struct intel_engine_cs *engine = i915->engine[RCS]; 1037 1038 struct hang h; 1038 - struct drm_i915_gem_request *rq; 1039 + struct i915_request *rq; 1039 1040 struct i915_gpu_state *error; 1040 1041 int err; 1041 1042 ··· 1059 1060 goto err_fini; 1060 1061 } 1061 1062 1062 - i915_gem_request_get(rq); 1063 - __i915_add_request(rq, true); 1063 + i915_request_get(rq); 1064 + __i915_request_add(rq, true); 1064 1065 1065 1066 if (!wait_for_hang(&h, rq)) { 1066 1067 struct drm_printer p = drm_info_printer(i915->drm.dev); ··· 1097 1098 } 1098 1099 1099 1100 err_request: 1100 - i915_gem_request_put(rq); 1101 + i915_request_put(rq); 1101 1102 err_fini: 1102 1103 hang_fini(&h); 1103 1104 err_unlock:
+21 -10
drivers/gpu/drm/i915/selftests/intel_uncore.c
··· 61 61 62 62 static int intel_shadow_table_check(void) 63 63 { 64 - const i915_reg_t *reg = gen8_shadowed_regs; 65 - unsigned int i; 64 + struct { 65 + const i915_reg_t *regs; 66 + unsigned int size; 67 + } reg_lists[] = { 68 + { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) }, 69 + { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) }, 70 + }; 71 + const i915_reg_t *reg; 72 + unsigned int i, j; 66 73 s32 prev; 67 74 68 - for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) { 69 - u32 offset = i915_mmio_reg_offset(*reg); 75 + for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) { 76 + reg = reg_lists[j].regs; 77 + for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) { 78 + u32 offset = i915_mmio_reg_offset(*reg); 70 79 71 - if (prev >= (s32)offset) { 72 - pr_err("%s: entry[%d]:(%x) is before previous (%x)\n", 73 - __func__, i, offset, prev); 74 - return -EINVAL; 80 + if (prev >= (s32)offset) { 81 + pr_err("%s: entry[%d]:(%x) is before previous (%x)\n", 82 + __func__, i, offset, prev); 83 + return -EINVAL; 84 + } 85 + 86 + prev = offset; 75 87 } 76 - 77 - prev = offset; 78 88 } 79 89 80 90 return 0; ··· 100 90 { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false }, 101 91 { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false }, 102 92 { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true }, 93 + { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true }, 103 94 }; 104 95 int err, i; 105 96
+5 -5
drivers/gpu/drm/i915/selftests/mock_engine.c
··· 81 81 i915_gem_context_put(ctx); 82 82 } 83 83 84 - static int mock_request_alloc(struct drm_i915_gem_request *request) 84 + static int mock_request_alloc(struct i915_request *request) 85 85 { 86 86 struct mock_request *mock = container_of(request, typeof(*mock), base); 87 87 ··· 91 91 return 0; 92 92 } 93 93 94 - static int mock_emit_flush(struct drm_i915_gem_request *request, 94 + static int mock_emit_flush(struct i915_request *request, 95 95 unsigned int flags) 96 96 { 97 97 return 0; 98 98 } 99 99 100 - static void mock_emit_breadcrumb(struct drm_i915_gem_request *request, 100 + static void mock_emit_breadcrumb(struct i915_request *request, 101 101 u32 *flags) 102 102 { 103 103 } 104 104 105 - static void mock_submit_request(struct drm_i915_gem_request *request) 105 + static void mock_submit_request(struct i915_request *request) 106 106 { 107 107 struct mock_request *mock = container_of(request, typeof(*mock), base); 108 108 struct mock_engine *engine = 109 109 container_of(request->engine, typeof(*engine), base); 110 110 111 - i915_gem_request_submit(request); 111 + i915_request_submit(request); 112 112 GEM_BUG_ON(!request->global_seqno); 113 113 114 114 spin_lock_irq(&engine->hw_lock);
+1 -1
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 43 43 for_each_engine(engine, i915, id) 44 44 mock_engine_flush(engine); 45 45 46 - i915_gem_retire_requests(i915); 46 + i915_retire_requests(i915); 47 47 } 48 48 49 49 static void mock_device_release(struct drm_device *dev)
+5 -5
drivers/gpu/drm/i915/selftests/mock_request.c
··· 25 25 #include "mock_engine.h" 26 26 #include "mock_request.h" 27 27 28 - struct drm_i915_gem_request * 28 + struct i915_request * 29 29 mock_request(struct intel_engine_cs *engine, 30 30 struct i915_gem_context *context, 31 31 unsigned long delay) 32 32 { 33 - struct drm_i915_gem_request *request; 33 + struct i915_request *request; 34 34 struct mock_request *mock; 35 35 36 36 /* NB the i915->requests slab cache is enlarged to fit mock_request */ 37 - request = i915_gem_request_alloc(engine, context); 37 + request = i915_request_alloc(engine, context); 38 38 if (IS_ERR(request)) 39 39 return NULL; 40 40 ··· 44 44 return &mock->base; 45 45 } 46 46 47 - bool mock_cancel_request(struct drm_i915_gem_request *request) 47 + bool mock_cancel_request(struct i915_request *request) 48 48 { 49 49 struct mock_request *mock = container_of(request, typeof(*mock), base); 50 50 struct mock_engine *engine = ··· 57 57 spin_unlock_irq(&engine->hw_lock); 58 58 59 59 if (was_queued) 60 - i915_gem_request_unsubmit(request); 60 + i915_request_unsubmit(request); 61 61 62 62 return was_queued; 63 63 }
+4 -4
drivers/gpu/drm/i915/selftests/mock_request.h
··· 27 27 28 28 #include <linux/list.h> 29 29 30 - #include "../i915_gem_request.h" 30 + #include "../i915_request.h" 31 31 32 32 struct mock_request { 33 - struct drm_i915_gem_request base; 33 + struct i915_request base; 34 34 35 35 struct list_head link; 36 36 unsigned long delay; 37 37 }; 38 38 39 - struct drm_i915_gem_request * 39 + struct i915_request * 40 40 mock_request(struct intel_engine_cs *engine, 41 41 struct i915_gem_context *context, 42 42 unsigned long delay); 43 43 44 - bool mock_cancel_request(struct drm_i915_gem_request *request); 44 + bool mock_cancel_request(struct i915_request *request); 45 45 46 46 #endif /* !__MOCK_REQUEST__ */
+12
include/drm/i915_pciids.h
··· 431 431 INTEL_VGA_DEVICE(0x5A44, info), \ 432 432 INTEL_VGA_DEVICE(0x5A4C, info) 433 433 434 + /* ICL */ 435 + #define INTEL_ICL_11_IDS(info) \ 436 + INTEL_VGA_DEVICE(0x8A50, info), \ 437 + INTEL_VGA_DEVICE(0x8A51, info), \ 438 + INTEL_VGA_DEVICE(0x8A5C, info), \ 439 + INTEL_VGA_DEVICE(0x8A5D, info), \ 440 + INTEL_VGA_DEVICE(0x8A52, info), \ 441 + INTEL_VGA_DEVICE(0x8A5A, info), \ 442 + INTEL_VGA_DEVICE(0x8A5B, info), \ 443 + INTEL_VGA_DEVICE(0x8A71, info), \ 444 + INTEL_VGA_DEVICE(0x8A70, info) 445 + 434 446 #endif /* _I915_PCIIDS_H */
+105 -3
include/uapi/drm/i915_drm.h
··· 318 318 #define DRM_I915_PERF_OPEN 0x36 319 319 #define DRM_I915_PERF_ADD_CONFIG 0x37 320 320 #define DRM_I915_PERF_REMOVE_CONFIG 0x38 321 + #define DRM_I915_QUERY 0x39 321 322 322 323 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 323 324 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 376 375 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 377 376 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 378 377 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 378 + #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) 379 379 380 380 /* Allow drivers to submit batchbuffers directly to hardware, relying 381 381 * on the security mechanisms provided by hardware. ··· 1608 1606 __u32 n_flex_regs; 1609 1607 1610 1608 /* 1611 - * These fields are pointers to tuples of u32 values (register 1612 - * address, value). For example the expected length of the buffer 1613 - * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). 1609 + * These fields are pointers to tuples of u32 values (register address, 1610 + * value). For example the expected length of the buffer pointed by 1611 + * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). 1614 1612 */ 1615 1613 __u64 mux_regs_ptr; 1616 1614 __u64 boolean_regs_ptr; 1617 1615 __u64 flex_regs_ptr; 1616 + }; 1617 + 1618 + struct drm_i915_query_item { 1619 + __u64 query_id; 1620 + #define DRM_I915_QUERY_TOPOLOGY_INFO 1 1621 + 1622 + /* 1623 + * When set to zero by userspace, this is filled with the size of the 1624 + * data to be written at the data_ptr pointer. The kernel sets this 1625 + * value to a negative value to signal an error on a particular query 1626 + * item. 1627 + */ 1628 + __s32 length; 1629 + 1630 + /* 1631 + * Unused for now. Must be cleared to zero. 1632 + */ 1633 + __u32 flags; 1634 + 1635 + /* 1636 + * Data will be written at the location pointed by data_ptr when the 1637 + * value of length matches the length of the data to be written by the 1638 + * kernel. 1639 + */ 1640 + __u64 data_ptr; 1641 + }; 1642 + 1643 + struct drm_i915_query { 1644 + __u32 num_items; 1645 + 1646 + /* 1647 + * Unused for now. Must be cleared to zero. 1648 + */ 1649 + __u32 flags; 1650 + 1651 + /* 1652 + * This points to an array of num_items drm_i915_query_item structures. 1653 + */ 1654 + __u64 items_ptr; 1655 + }; 1656 + 1657 + /* 1658 + * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO : 1659 + * 1660 + * data: contains the 3 pieces of information : 1661 + * 1662 + * - the slice mask with one bit per slice telling whether a slice is 1663 + * available. The availability of slice X can be queried with the following 1664 + * formula : 1665 + * 1666 + * (data[X / 8] >> (X % 8)) & 1 1667 + * 1668 + * - the subslice mask for each slice with one bit per subslice telling 1669 + * whether a subslice is available. The availability of subslice Y in slice 1670 + * X can be queried with the following formula : 1671 + * 1672 + * (data[subslice_offset + 1673 + * X * subslice_stride + 1674 + * Y / 8] >> (Y % 8)) & 1 1675 + * 1676 + * - the EU mask for each subslice in each slice with one bit per EU telling 1677 + * whether an EU is available. The availability of EU Z in subslice Y in 1678 + * slice X can be queried with the following formula : 1679 + * 1680 + * (data[eu_offset + 1681 + * (X * max_subslices + Y) * eu_stride + 1682 + * Z / 8] >> (Z % 8)) & 1 1683 + */ 1684 + struct drm_i915_query_topology_info { 1685 + /* 1686 + * Unused for now. Must be cleared to zero. 1687 + */ 1688 + __u16 flags; 1689 + 1690 + __u16 max_slices; 1691 + __u16 max_subslices; 1692 + __u16 max_eus_per_subslice; 1693 + 1694 + /* 1695 + * Offset in data[] at which the subslice masks are stored. 1696 + */ 1697 + __u16 subslice_offset; 1698 + 1699 + /* 1700 + * Stride at which each of the subslice masks for each slice are 1701 + * stored. 1702 + */ 1703 + __u16 subslice_stride; 1704 + 1705 + /* 1706 + * Offset in data[] at which the EU masks are stored. 1707 + */ 1708 + __u16 eu_offset; 1709 + 1710 + /* 1711 + * Stride at which each of the EU masks for each subslice are stored. 1712 + */ 1713 + __u16 eu_stride; 1714 + 1715 + __u8 data[]; 1618 1716 }; 1619 1717 1620 1718 #if defined(__cplusplus)