Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-for-v4.12-rc4' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"This is the main set of fixes for rc4, one amdgpu fix, some exynos
regression fixes, some msm fixes and some i915 and GVT fixes.

I've got a second regression fix for some DP chips that might be a
bit large, but I think we'd like to land it now, I'll send it along
tomorrow, once you are happy with this set"

* tag 'drm-fixes-for-v4.12-rc4' of git://people.freedesktop.org/~airlied/linux: (24 commits)
drm/amdgpu: Program ring for vce instance 1 at its register space
drm/exynos: clean up description of exynos_drm_crtc
drm/exynos: dsi: Remove bridge node reference in removal
drm/exynos: dsi: Fix the parse_dt function
drm/exynos: Merge pre/postclose hooks
drm/msm: Fix the check for the command size
drm/msm: Take the mutex before calling msm_gem_new_impl
drm/msm: for array in-fences, check if all backing fences are from our own context before waiting
drm/msm: constify irq_domain_ops
drm/msm/mdp5: release hwpipe(s) for unused planes
drm/msm: Reuse dma_fence_release.
drm/msm: Expose our reservation object when exporting a dmabuf.
drm/msm/gpu: check legacy clk names in get_clocks()
drm/msm/mdp5: use __drm_atomic_helper_plane_duplicate_state()
drm/msm: select PM_OPP
drm/i915: Stop pretending to mask/unmask LPE audio interrupts
drm/i915/selftests: Silence compiler warning in igt_ctx_exec
Revert "drm/i915: Restore lost "Initialized i915" welcome message"
drm/i915/gvt: clean up unsubmited workloads before destroying kmem cache
drm/i915/gvt: Disable compression workaround for Gen9
...

+169 -154
+68 -27
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 77 77 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 78 78 { 79 79 struct amdgpu_device *adev = ring->adev; 80 + u32 v; 81 + 82 + mutex_lock(&adev->grbm_idx_mutex); 83 + if (adev->vce.harvest_config == 0 || 84 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 85 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 86 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 87 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 80 88 81 89 if (ring == &adev->vce.ring[0]) 82 - return RREG32(mmVCE_RB_RPTR); 90 + v = RREG32(mmVCE_RB_RPTR); 83 91 else if (ring == &adev->vce.ring[1]) 84 - return RREG32(mmVCE_RB_RPTR2); 92 + v = RREG32(mmVCE_RB_RPTR2); 85 93 else 86 - return RREG32(mmVCE_RB_RPTR3); 94 + v = RREG32(mmVCE_RB_RPTR3); 95 + 96 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 97 + mutex_unlock(&adev->grbm_idx_mutex); 98 + 99 + return v; 87 100 } 88 101 89 102 /** ··· 109 96 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 110 97 { 111 98 struct amdgpu_device *adev = ring->adev; 99 + u32 v; 100 + 101 + mutex_lock(&adev->grbm_idx_mutex); 102 + if (adev->vce.harvest_config == 0 || 103 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 104 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 105 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 106 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 112 107 113 108 if (ring == &adev->vce.ring[0]) 114 - return RREG32(mmVCE_RB_WPTR); 109 + v = RREG32(mmVCE_RB_WPTR); 115 110 else if (ring == &adev->vce.ring[1]) 116 - return RREG32(mmVCE_RB_WPTR2); 111 + v = RREG32(mmVCE_RB_WPTR2); 117 112 else 118 - return RREG32(mmVCE_RB_WPTR3); 113 + v = RREG32(mmVCE_RB_WPTR3); 114 + 115 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 116 + mutex_unlock(&adev->grbm_idx_mutex); 117 + 118 + return v; 119 119 } 120 120 121 121 /** ··· 142 116 { 143 117 struct amdgpu_device *adev = ring->adev; 144 118 119 + mutex_lock(&adev->grbm_idx_mutex); 120 + if (adev->vce.harvest_config == 0 || 121 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 122 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 123 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 124 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 125 + 145 126 if (ring == &adev->vce.ring[0]) 146 127 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 147 128 else if (ring == &adev->vce.ring[1]) 148 129 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 149 130 else 150 131 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 132 + 133 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 134 + mutex_unlock(&adev->grbm_idx_mutex); 151 135 } 152 136 153 137 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) ··· 267 231 struct amdgpu_ring *ring; 268 232 int idx, r; 269 233 270 - ring = &adev->vce.ring[0]; 271 - WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 272 - WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 273 - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 274 - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 275 - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 276 - 277 - ring = &adev->vce.ring[1]; 278 - WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 279 - WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 280 - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 281 - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 282 - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 283 - 284 - ring = &adev->vce.ring[2]; 285 - WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 286 - WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 287 - WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 288 - WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 289 - WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 290 - 291 234 mutex_lock(&adev->grbm_idx_mutex); 292 235 for (idx = 0; idx < 2; ++idx) { 293 236 if (adev->vce.harvest_config & (1 << idx)) 294 237 continue; 295 238 296 239 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 240 + 241 + /* Program instance 0 reg space for two instances or instance 0 case 242 + program instance 1 reg space for only instance 1 available case */ 243 + if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { 244 + ring = &adev->vce.ring[0]; 245 + WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 246 + WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 247 + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 248 + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 249 + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 250 + 251 + ring = &adev->vce.ring[1]; 252 + WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 253 + WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 254 + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 255 + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 256 + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 257 + 258 + ring = &adev->vce.ring[2]; 259 + WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 260 + WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 261 + WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 262 + WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 263 + WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 264 + } 265 + 297 266 vce_v3_0_mc_resume(adev, idx); 298 267 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 299 268
+1 -7
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 82 82 return ret; 83 83 } 84 84 85 - static void exynos_drm_preclose(struct drm_device *dev, 86 - struct drm_file *file) 87 - { 88 - exynos_drm_subdrv_close(dev, file); 89 - } 90 - 91 85 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 92 86 { 87 + exynos_drm_subdrv_close(dev, file); 93 88 kfree(file->driver_priv); 94 89 file->driver_priv = NULL; 95 90 } ··· 140 145 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME 141 146 | DRIVER_ATOMIC | DRIVER_RENDER, 142 147 .open = exynos_drm_open, 143 - .preclose = exynos_drm_preclose, 144 148 .lastclose = exynos_drm_lastclose, 145 149 .postclose = exynos_drm_postclose, 146 150 .gem_free_object_unlocked = exynos_drm_gem_free_object,
+1 -4
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 160 160 * drm framework doesn't support multiple irq yet. 161 161 * we can refer to the crtc to current hardware interrupt occurred through 162 162 * this pipe value. 163 - * @enabled: if the crtc is enabled or not 164 - * @event: vblank event that is currently queued for flip 165 - * @wait_update: wait all pending planes updates to finish 166 - * @pending_update: number of pending plane updates in this crtc 167 163 * @ops: pointer to callbacks for exynos drm specific functionality 168 164 * @ctx: A pointer to the crtc's implementation specific context 165 + * @pipe_clk: A pointer to the crtc's pipeline clock. 169 166 */ 170 167 struct exynos_drm_crtc { 171 168 struct drm_crtc base;
+9 -17
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 1633 1633 { 1634 1634 struct device *dev = dsi->dev; 1635 1635 struct device_node *node = dev->of_node; 1636 - struct device_node *ep; 1637 1636 int ret; 1638 1637 1639 1638 ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", ··· 1640 1641 if (ret < 0) 1641 1642 return ret; 1642 1643 1643 - ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0); 1644 - if (!ep) { 1645 - dev_err(dev, "no output port with endpoint specified\n"); 1646 - return -EINVAL; 1647 - } 1648 - 1649 - ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency", 1644 + ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency", 1650 1645 &dsi->burst_clk_rate); 1651 1646 if (ret < 0) 1652 - goto end; 1647 + return ret; 1653 1648 1654 - ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", 1649 + ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency", 1655 1650 &dsi->esc_clk_rate); 1656 1651 if (ret < 0) 1657 - goto end; 1658 - 1659 - of_node_put(ep); 1652 + return ret; 1660 1653 1661 1654 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); 1662 1655 if (!dsi->bridge_node) 1663 1656 return -EINVAL; 1664 1657 1665 - end: 1666 - of_node_put(ep); 1667 - 1668 - return ret; 1658 + return 0; 1669 1659 } 1670 1660 1671 1661 static int exynos_dsi_bind(struct device *dev, struct device *master, ··· 1805 1817 1806 1818 static int exynos_dsi_remove(struct platform_device *pdev) 1807 1819 { 1820 + struct exynos_dsi *dsi = platform_get_drvdata(pdev); 1821 + 1822 + of_node_put(dsi->bridge_node); 1823 + 1808 1824 pm_runtime_disable(&pdev->dev); 1809 1825 1810 1826 component_del(&pdev->dev, &exynos_dsi_component_ops);
+20 -10
drivers/gpu/drm/i915/gvt/execlist.c
··· 779 779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 780 780 } 781 781 782 + static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) 783 + { 784 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 785 + struct intel_engine_cs *engine; 786 + struct intel_vgpu_workload *pos, *n; 787 + unsigned int tmp; 788 + 789 + /* free the unsubmited workloads in the queues. */ 790 + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 791 + list_for_each_entry_safe(pos, n, 792 + &vgpu->workload_q_head[engine->id], list) { 793 + list_del_init(&pos->list); 794 + free_workload(pos); 795 + } 796 + } 797 + } 798 + 782 799 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) 783 800 { 801 + clean_workloads(vgpu, ALL_ENGINES); 784 802 kmem_cache_destroy(vgpu->workloads); 785 803 } 786 804 ··· 829 811 { 830 812 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 831 813 struct intel_engine_cs *engine; 832 - struct intel_vgpu_workload *pos, *n; 833 814 unsigned int tmp; 834 815 835 - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 836 - /* free the unsubmited workload in the queue */ 837 - list_for_each_entry_safe(pos, n, 838 - &vgpu->workload_q_head[engine->id], list) { 839 - list_del_init(&pos->list); 840 - free_workload(pos); 841 - } 842 - 816 + clean_workloads(vgpu, engine_mask); 817 + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 843 818 init_vgpu_execlist(vgpu, engine->id); 844 - } 845 819 }
+21 -9
drivers/gpu/drm/i915/gvt/handlers.c
··· 1366 1366 void *p_data, unsigned int bytes) 1367 1367 { 1368 1368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1369 - i915_reg_t reg = {.reg = offset}; 1369 + u32 v = *(u32 *)p_data; 1370 + 1371 + if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) 1372 + return intel_vgpu_default_mmio_write(vgpu, 1373 + offset, p_data, bytes); 1370 1374 1371 1375 switch (offset) { 1372 1376 case 0x4ddc: 1373 - vgpu_vreg(vgpu, offset) = 0x8000003c; 1374 - /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ 1375 - I915_WRITE(reg, vgpu_vreg(vgpu, offset)); 1377 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1378 + vgpu_vreg(vgpu, offset) = v & ~(1 << 31); 1376 1379 break; 1377 1380 case 0x42080: 1378 - vgpu_vreg(vgpu, offset) = 0x8000; 1379 - /* WaCompressedResourceDisplayNewHashMode:skl */ 1380 - I915_WRITE(reg, vgpu_vreg(vgpu, offset)); 1381 + /* bypass WaCompressedResourceDisplayNewHashMode */ 1382 + vgpu_vreg(vgpu, offset) = v & ~(1 << 15); 1383 + break; 1384 + case 0xe194: 1385 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1386 + vgpu_vreg(vgpu, offset) = v & ~(1 << 8); 1387 + break; 1388 + case 0x7014: 1389 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1390 + vgpu_vreg(vgpu, offset) = v & ~(1 << 13); 1381 1391 break; 1382 1392 default: 1383 1393 return -EINVAL; ··· 1644 1634 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1645 1635 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1646 1636 NULL, NULL); 1647 - MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1637 + MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, 1638 + skl_misc_ctl_write); 1648 1639 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); 1649 1640 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); 1650 1641 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); ··· 2579 2568 MMIO_D(0x6e570, D_BDW_PLUS); 2580 2569 MMIO_D(0x65f10, D_BDW_PLUS); 2581 2570 2582 - MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2571 + MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, 2572 + skl_misc_ctl_write); 2583 2573 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2584 2574 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2585 2575 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-4
drivers/gpu/drm/i915/i915_drv.c
··· 1272 1272 1273 1273 dev_priv->ipc_enabled = false; 1274 1274 1275 - /* Everything is in place, we can now relax! */ 1276 - DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 1277 - driver.name, driver.major, driver.minor, driver.patchlevel, 1278 - driver.date, pci_name(pdev), dev_priv->drm.primary->index); 1279 1275 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1280 1276 DRM_INFO("DRM_I915_DEBUG enabled\n"); 1281 1277 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+1 -1
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2313 2313 appgtt->base.allocate_va_range) { 2314 2314 ret = appgtt->base.allocate_va_range(&appgtt->base, 2315 2315 vma->node.start, 2316 - vma->node.size); 2316 + vma->size); 2317 2317 if (ret) 2318 2318 goto err_pages; 2319 2319 }
-5
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 59 59 return; 60 60 61 61 mutex_unlock(&dev->struct_mutex); 62 - 63 - /* expedite the RCU grace period to free some request slabs */ 64 - synchronize_rcu_expedited(); 65 62 } 66 63 67 64 static bool any_vma_pinned(struct drm_i915_gem_object *obj) ··· 270 273 I915_SHRINK_UNBOUND | 271 274 I915_SHRINK_ACTIVE); 272 275 intel_runtime_pm_put(dev_priv); 273 - 274 - synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ 275 276 276 277 return freed; 277 278 }
+6 -9
drivers/gpu/drm/i915/i915_irq.c
··· 2953 2953 u32 pipestat_mask; 2954 2954 u32 enable_mask; 2955 2955 enum pipe pipe; 2956 - u32 val; 2957 2956 2958 2957 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2959 2958 PIPE_CRC_DONE_INTERRUPT_STATUS; ··· 2963 2964 2964 2965 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2965 2966 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2966 - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2967 + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2968 + I915_LPE_PIPE_A_INTERRUPT | 2969 + I915_LPE_PIPE_B_INTERRUPT; 2970 + 2967 2971 if (IS_CHERRYVIEW(dev_priv)) 2968 - enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2972 + enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2973 + I915_LPE_PIPE_C_INTERRUPT; 2969 2974 2970 2975 WARN_ON(dev_priv->irq_mask != ~0); 2971 - 2972 - val = (I915_LPE_PIPE_A_INTERRUPT | 2973 - I915_LPE_PIPE_B_INTERRUPT | 2974 - I915_LPE_PIPE_C_INTERRUPT); 2975 - 2976 - enable_mask |= val; 2977 2976 2978 2977 dev_priv->irq_mask = ~enable_mask; 2979 2978
+1 -1
drivers/gpu/drm/i915/i915_reg.h
··· 8280 8280 8281 8281 /* MIPI DSI registers */ 8282 8282 8283 - #define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */ 8283 + #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ 8284 8284 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) 8285 8285 8286 8286 #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
-36
drivers/gpu/drm/i915/intel_lpe_audio.c
··· 149 149 150 150 static void lpe_audio_irq_unmask(struct irq_data *d) 151 151 { 152 - struct drm_i915_private *dev_priv = d->chip_data; 153 - unsigned long irqflags; 154 - u32 val = (I915_LPE_PIPE_A_INTERRUPT | 155 - I915_LPE_PIPE_B_INTERRUPT); 156 - 157 - if (IS_CHERRYVIEW(dev_priv)) 158 - val |= I915_LPE_PIPE_C_INTERRUPT; 159 - 160 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 161 - 162 - dev_priv->irq_mask &= ~val; 163 - I915_WRITE(VLV_IIR, val); 164 - I915_WRITE(VLV_IIR, val); 165 - I915_WRITE(VLV_IMR, dev_priv->irq_mask); 166 - POSTING_READ(VLV_IMR); 167 - 168 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 169 152 } 170 153 171 154 static void lpe_audio_irq_mask(struct irq_data *d) 172 155 { 173 - struct drm_i915_private *dev_priv = d->chip_data; 174 - unsigned long irqflags; 175 - u32 val = (I915_LPE_PIPE_A_INTERRUPT | 176 - I915_LPE_PIPE_B_INTERRUPT); 177 - 178 - if (IS_CHERRYVIEW(dev_priv)) 179 - val |= I915_LPE_PIPE_C_INTERRUPT; 180 - 181 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 182 - 183 - dev_priv->irq_mask |= val; 184 - I915_WRITE(VLV_IMR, dev_priv->irq_mask); 185 - I915_WRITE(VLV_IIR, val); 186 - I915_WRITE(VLV_IIR, val); 187 - POSTING_READ(VLV_IIR); 188 - 189 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 190 156 } 191 157 192 158 static struct irq_chip lpe_audio_irqchip = { ··· 295 329 return; 296 330 297 331 desc = irq_to_desc(dev_priv->lpe_audio.irq); 298 - 299 - lpe_audio_irq_mask(&desc->irq_data); 300 332 301 333 lpe_audio_platdev_destroy(dev_priv); 302 334
+1 -1
drivers/gpu/drm/i915/intel_lrc.c
··· 1989 1989 1990 1990 ce->ring = ring; 1991 1991 ce->state = vma; 1992 - ce->initialised = engine->init_context == NULL; 1992 + ce->initialised |= engine->init_context == NULL; 1993 1993 1994 1994 return 0; 1995 1995
+5 -3
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 320 320 static int igt_ctx_exec(void *arg) 321 321 { 322 322 struct drm_i915_private *i915 = arg; 323 - struct drm_i915_gem_object *obj; 323 + struct drm_i915_gem_object *obj = NULL; 324 324 struct drm_file *file; 325 325 IGT_TIMEOUT(end_time); 326 326 LIST_HEAD(objects); ··· 359 359 } 360 360 361 361 for_each_engine(engine, i915, id) { 362 - if (dw == 0) { 362 + if (!obj) { 363 363 obj = create_test_object(ctx, file, &objects); 364 364 if (IS_ERR(obj)) { 365 365 err = PTR_ERR(obj); ··· 376 376 goto out_unlock; 377 377 } 378 378 379 - if (++dw == max_dwords(obj)) 379 + if (++dw == max_dwords(obj)) { 380 + obj = NULL; 380 381 dw = 0; 382 + } 381 383 ndwords++; 382 384 } 383 385 ncontexts++;
+1
drivers/gpu/drm/msm/Kconfig
··· 13 13 select QCOM_SCM 14 14 select SND_SOC_HDMI_CODEC if SND_SOC 15 15 select SYNC_FILE 16 + select PM_OPP 16 17 default y 17 18 help 18 19 DRM/KMS driver for MSM/snapdragon.
+1 -1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
··· 116 116 return 0; 117 117 } 118 118 119 - static struct irq_domain_ops mdss_hw_irqdomain_ops = { 119 + static const struct irq_domain_ops mdss_hw_irqdomain_ops = { 120 120 .map = mdss_hw_irqdomain_map, 121 121 .xlate = irq_domain_xlate_onecell, 122 122 };
+7 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 225 225 226 226 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), 227 227 sizeof(*mdp5_state), GFP_KERNEL); 228 + if (!mdp5_state) 229 + return NULL; 228 230 229 - if (mdp5_state && mdp5_state->base.fb) 230 - drm_framebuffer_reference(mdp5_state->base.fb); 231 + __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); 231 232 232 233 return &mdp5_state->base; 233 234 } ··· 445 444 mdp5_pipe_release(state->state, old_hwpipe); 446 445 mdp5_pipe_release(state->state, old_right_hwpipe); 447 446 } 447 + } else { 448 + mdp5_pipe_release(state->state, mdp5_state->hwpipe); 449 + mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); 450 + mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; 448 451 } 449 452 450 453 return 0;
+1
drivers/gpu/drm/msm/msm_drv.c
··· 830 830 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 831 831 .gem_prime_export = drm_gem_prime_export, 832 832 .gem_prime_import = drm_gem_prime_import, 833 + .gem_prime_res_obj = msm_gem_prime_res_obj, 833 834 .gem_prime_pin = msm_gem_prime_pin, 834 835 .gem_prime_unpin = msm_gem_prime_unpin, 835 836 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+1
drivers/gpu/drm/msm/msm_drv.h
··· 224 224 void *msm_gem_prime_vmap(struct drm_gem_object *obj); 225 225 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 226 226 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 227 + struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); 227 228 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 228 229 struct dma_buf_attachment *attach, struct sg_table *sg); 229 230 int msm_gem_prime_pin(struct drm_gem_object *obj);
+2 -8
drivers/gpu/drm/msm/msm_fence.c
··· 99 99 } 100 100 101 101 struct msm_fence { 102 - struct msm_fence_context *fctx; 103 102 struct dma_fence base; 103 + struct msm_fence_context *fctx; 104 104 }; 105 105 106 106 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) ··· 130 130 return fence_completed(f->fctx, f->base.seqno); 131 131 } 132 132 133 - static void msm_fence_release(struct dma_fence *fence) 134 - { 135 - struct msm_fence *f = to_msm_fence(fence); 136 - kfree_rcu(f, base.rcu); 137 - } 138 - 139 133 static const struct dma_fence_ops msm_fence_ops = { 140 134 .get_driver_name = msm_fence_get_driver_name, 141 135 .get_timeline_name = msm_fence_get_timeline_name, 142 136 .enable_signaling = msm_fence_enable_signaling, 143 137 .signaled = msm_fence_signaled, 144 138 .wait = dma_fence_default_wait, 145 - .release = msm_fence_release, 139 + .release = dma_fence_free, 146 140 }; 147 141 148 142 struct dma_fence *
+6
drivers/gpu/drm/msm/msm_gem.c
··· 758 758 struct msm_gem_object *msm_obj; 759 759 bool use_vram = false; 760 760 761 + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 762 + 761 763 switch (flags & MSM_BO_CACHE_MASK) { 762 764 case MSM_BO_UNCACHED: 763 765 case MSM_BO_CACHED: ··· 855 853 856 854 size = PAGE_ALIGN(dmabuf->size); 857 855 856 + /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ 857 + mutex_lock(&dev->struct_mutex); 858 858 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 859 + mutex_unlock(&dev->struct_mutex); 860 + 859 861 if (ret) 860 862 goto fail; 861 863
+7
drivers/gpu/drm/msm/msm_gem_prime.c
··· 70 70 if (!obj->import_attach) 71 71 msm_gem_put_pages(obj); 72 72 } 73 + 74 + struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) 75 + { 76 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 77 + 78 + return msm_obj->resv; 79 + }
+7 -7
drivers/gpu/drm/msm/msm_gem_submit.c
··· 410 410 if (!in_fence) 411 411 return -EINVAL; 412 412 413 - /* TODO if we get an array-fence due to userspace merging multiple 414 - * fences, we need a way to determine if all the backing fences 415 - * are from our own context.. 413 + /* 414 + * Wait if the fence is from a foreign context, or if the fence 415 + * array contains any fence from a foreign context. 416 416 */ 417 - 418 - if (in_fence->context != gpu->fctx->context) { 417 + if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { 419 418 ret = dma_fence_wait(in_fence, true); 420 419 if (ret) 421 420 return ret; ··· 495 496 goto out; 496 497 } 497 498 498 - if ((submit_cmd.size + submit_cmd.submit_offset) >= 499 - msm_obj->base.size) { 499 + if (!submit_cmd.size || 500 + ((submit_cmd.size + submit_cmd.submit_offset) > 501 + msm_obj->base.size)) { 500 502 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); 501 503 ret = -EINVAL; 502 504 goto out;
+2 -2
drivers/gpu/drm/msm/msm_gpu.c
··· 549 549 gpu->grp_clks[i] = get_clock(dev, name); 550 550 551 551 /* Remember the key clocks that we need to control later */ 552 - if (!strcmp(name, "core")) 552 + if (!strcmp(name, "core") || !strcmp(name, "core_clk")) 553 553 gpu->core_clk = gpu->grp_clks[i]; 554 - else if (!strcmp(name, "rbbmtimer")) 554 + else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk")) 555 555 gpu->rbbmtimer_clk = gpu->grp_clks[i]; 556 556 557 557 ++i;