Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'vmwgfx-fixes-5.0-2' of git://people.freedesktop.org/~thomash/linux into drm-fixes

A patch set from Christoph for vmwgfx dma mode detection breakage with the
new dma code restructuring in 5.0

A couple of fixes also CC'd stable

Finally an improved IOMMU detection that automatically enables dma mapping
also with other vIOMMUS than the intel one if present and enabled.
Currently trying to start a VM in that case would fail catastrophically.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190206194735.4663-1-thellstrom@vmware.com

+40 -57
+36 -53
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 26 26 **************************************************************************/ 27 27 #include <linux/module.h> 28 28 #include <linux/console.h> 29 + #include <linux/dma-mapping.h> 29 30 30 31 #include <drm/drmP.h> 31 32 #include "vmwgfx_drv.h" ··· 35 34 #include <drm/ttm/ttm_placement.h> 36 35 #include <drm/ttm/ttm_bo_driver.h> 37 36 #include <drm/ttm/ttm_module.h> 38 - #include <linux/intel-iommu.h> 39 37 40 38 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 41 39 #define VMWGFX_CHIP_SVGAII 0 ··· 546 546 } 547 547 548 548 /** 549 + * vmw_assume_iommu - Figure out whether coherent dma-remapping might be 550 + * taking place. 551 + * @dev: Pointer to the struct drm_device. 552 + * 553 + * Return: true if iommu present, false otherwise. 554 + */ 555 + static bool vmw_assume_iommu(struct drm_device *dev) 556 + { 557 + const struct dma_map_ops *ops = get_dma_ops(dev->dev); 558 + 559 + return !dma_is_direct(ops) && ops && 560 + ops->map_page != dma_direct_map_page; 561 + } 562 + 563 + /** 549 564 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 550 565 * system. 551 566 * ··· 580 565 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 581 566 [vmw_dma_map_populate] = "Keeping DMA mappings.", 582 567 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 583 - #ifdef CONFIG_X86 584 - const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); 585 - 586 - #ifdef CONFIG_INTEL_IOMMU 587 - if (intel_iommu_enabled) { 588 - dev_priv->map_mode = vmw_dma_map_populate; 589 - goto out_fixup; 590 - } 591 - #endif 592 - 593 - if (!(vmw_force_iommu || vmw_force_coherent)) { 594 - dev_priv->map_mode = vmw_dma_phys; 595 - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 596 - return 0; 597 - } 598 - 599 - dev_priv->map_mode = vmw_dma_map_populate; 600 - 601 - if (dma_ops && dma_ops->sync_single_for_cpu) 602 - dev_priv->map_mode = vmw_dma_alloc_coherent; 603 - #ifdef CONFIG_SWIOTLB 604 - if (swiotlb_nr_tbl() == 0) 605 - dev_priv->map_mode = vmw_dma_map_populate; 606 - #endif 607 - 608 - #ifdef CONFIG_INTEL_IOMMU 609 - out_fixup: 610 - #endif 611 - if (dev_priv->map_mode == vmw_dma_map_populate && 612 - vmw_restrict_iommu) 613 - dev_priv->map_mode = vmw_dma_map_bind; 614 568 615 569 if (vmw_force_coherent) 616 570 dev_priv->map_mode = vmw_dma_alloc_coherent; 571 + else if (vmw_assume_iommu(dev_priv->dev)) 572 + dev_priv->map_mode = vmw_dma_map_populate; 573 + else if (!vmw_force_iommu) 574 + dev_priv->map_mode = vmw_dma_phys; 575 + else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) 576 + dev_priv->map_mode = vmw_dma_alloc_coherent; 577 + else 578 + dev_priv->map_mode = vmw_dma_map_populate; 617 579 618 - #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) 619 - /* 620 - * No coherent page pool 621 - */ 622 - if (dev_priv->map_mode == vmw_dma_alloc_coherent) 580 + if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) 581 + dev_priv->map_mode = vmw_dma_map_bind; 582 + 583 + /* No TTM coherent page pool? FIXME: Ask TTM instead! */ 584 + if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && 585 + (dev_priv->map_mode == vmw_dma_alloc_coherent)) 623 586 return -EINVAL; 624 - #endif 625 - 626 - #else /* CONFIG_X86 */ 627 - dev_priv->map_mode = vmw_dma_map_populate; 628 - #endif /* CONFIG_X86 */ 629 587 630 588 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 631 - 632 589 return 0; 633 590 } 634 591 ··· 612 625 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 613 626 * restriction also for 64-bit systems. 614 627 */ 615 - #ifdef CONFIG_INTEL_IOMMU 616 628 static int vmw_dma_masks(struct vmw_private *dev_priv) 617 629 { 618 630 struct drm_device *dev = dev_priv->dev; 631 + int ret = 0; 619 632 620 - if (intel_iommu_enabled && 633 + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 634 + if (dev_priv->map_mode != vmw_dma_phys && 621 635 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 622 636 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 623 - return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 637 + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 624 638 } 625 - return 0; 639 + 640 + return ret; 626 641 } 627 - #else 628 - static int vmw_dma_masks(struct vmw_private *dev_priv) 629 - { 630 - return 0; 631 - } 632 - #endif 633 642 634 643 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 635 644 {
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 3570 3570 *p_fence = NULL; 3571 3571 } 3572 3572 3573 - return 0; 3573 + return ret; 3574 3574 } 3575 3575 3576 3576 /**
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 1646 1646 struct drm_connector_state *conn_state; 1647 1647 struct vmw_connector_state *vmw_conn_state; 1648 1648 1649 - if (!du->pref_active) { 1649 + if (!du->pref_active && new_crtc_state->enable) { 1650 1650 ret = -EINVAL; 1651 1651 goto clean; 1652 1652 } ··· 2554 2554 user_fence_rep) 2555 2555 { 2556 2556 struct vmw_fence_obj *fence = NULL; 2557 - uint32_t handle; 2558 - int ret; 2557 + uint32_t handle = 0; 2558 + int ret = 0; 2559 2559 2560 2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2561 2561 out_fence)